From 7107ff7878165cbbfd36b67d2ee5854f7986f50b Mon Sep 17 00:00:00 2001 From: Ian Cordasco Date: Wed, 2 Mar 2016 11:28:02 -0600 Subject: [PATCH] Add forgotten templates --- templates/api_audit_map.conf.j2 | 26 ++ templates/barbican-api-paste.ini.j2 | 76 +++++ templates/barbican-vassals-api.ini.j2 | 11 + templates/barbican.conf.j2 | 381 ++++++++++++++++++++++++++ templates/policy.json.j2 | 78 ++++++ 5 files changed, 572 insertions(+) create mode 100644 templates/api_audit_map.conf.j2 create mode 100644 templates/barbican-api-paste.ini.j2 create mode 100644 templates/barbican-vassals-api.ini.j2 create mode 100644 templates/barbican.conf.j2 create mode 100644 templates/policy.json.j2 diff --git a/templates/api_audit_map.conf.j2 b/templates/api_audit_map.conf.j2 new file mode 100644 index 0000000..3d9ab38 --- /dev/null +++ b/templates/api_audit_map.conf.j2 @@ -0,0 +1,26 @@ +[DEFAULT] +# default target endpoint type +# should match the endpoint type defined in service catalog +target_endpoint_type = key-manager + +# map urls ending with specific text to a unique action +# Don't need custom mapping for other resource operations +# Note: action should match action names defined in CADF taxonomy +[custom_actions] +acl/get = read + + +# path of api requests for CADF target typeURI +# Just need to include top resource path to identify class of resources +[path_keywords] +secrets= +containers= +orders= +cas=None +quotas= +project-quotas= + + +# map endpoint type defined in service catalog to CADF typeURI +[service_endpoints] +key-manager = service/security/keymanager \ No newline at end of file diff --git a/templates/barbican-api-paste.ini.j2 b/templates/barbican-api-paste.ini.j2 new file mode 100644 index 0000000..6ecbd42 --- /dev/null +++ b/templates/barbican-api-paste.ini.j2 @@ -0,0 +1,76 @@ +[composite:main] +use = egg:Paste#urlmap +/: barbican_version +/v1: barbican_api + +# Use this pipeline for Barbican API - versions no authentication +[pipeline:barbican_version] +pipeline = cors versionapp + +# Use this pipeline for Barbican API - DEFAULT no authentication +[pipeline:barbican_api] +pipeline = cors unauthenticated-context apiapp + +#Use this pipeline to activate a repoze.profile middleware and HTTP port, +# to provide profiling information for the REST API processing. +[pipeline:barbican-profile] +pipeline = cors unauthenticated-context egg:Paste#cgitb egg:Paste#httpexceptions profile apiapp + +#Use this pipeline for keystone auth +[pipeline:barbican-api-keystone] +pipeline = cors keystone_authtoken context apiapp + +#Use this pipeline for keystone auth with audit feature +[pipeline:barbican-api-keystone-audit] +pipeline = keystone_authtoken context audit apiapp + +[app:apiapp] +paste.app_factory = barbican.api.app:create_main_app + +[app:versionapp] +paste.app_factory = barbican.api.app:create_version_app + +[filter:simple] +paste.filter_factory = barbican.api.middleware.simple:SimpleFilter.factory + +[filter:unauthenticated-context] +paste.filter_factory = barbican.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:context] +paste.filter_factory = barbican.api.middleware.context:ContextMiddleware.factory + +[filter:audit] +paste.filter_factory = keystonemiddleware.audit:filter_factory +audit_map_file = /etc/barbican/api_audit_map.conf + +[filter:keystone_authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +#need ability to re-auth a token, thus admin url +identity_uri = http://localhost:35357 +admin_tenant_name = service +admin_user = barbican +admin_password = orange +auth_version = v3.0 +#delay failing perhaps to log the unauthorized request in barbican .. +#delay_auth_decision = true +# signing_dir is configurable, but the default behavior of the authtoken +# middleware should be sufficient. It will create a temporary directory +# for the user the barbican process is running as. +#signing_dir = /var/barbican/keystone-signing + + +[filter:profile] +use = egg:repoze.profile +log_filename = myapp.profile +cachegrind_filename = cachegrind.out.myapp +discard_first_request = true +path = /__profile__ +flush_at_shutdown = true +unwind = false + +[filter:cors] +paste.filter_factory = oslo_middleware.cors:filter_factory +oslo_config_project = barbican +latent_allow_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles +latent_expose_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles +latent_allow_methods = GET, PUT, POST, DELETE, PATCH diff --git a/templates/barbican-vassals-api.ini.j2 b/templates/barbican-vassals-api.ini.j2 new file mode 100644 index 0000000..a4f0bee --- /dev/null +++ b/templates/barbican-vassals-api.ini.j2 @@ -0,0 +1,11 @@ +[uwsgi] +socket = :9311 +protocol = http +processes = 1 +lazy = true +vacuum = true +no-default-app = true +memory-report = true +plugins = python +paste = config:/etc/barbican/barbican-api-paste.ini +add-header = Connection: close diff --git a/templates/barbican.conf.j2 b/templates/barbican.conf.j2 new file mode 100644 index 0000000..89e9913 --- /dev/null +++ b/templates/barbican.conf.j2 @@ -0,0 +1,381 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +#debug = True + +# Address to bind the API server +bind_host = 0.0.0.0 + +# Port to bind the API server to +bind_port = 9311 + +# Host name, for use in HATEOAS-style references +# Note: Typically this would be the load balanced endpoint that clients would use +# communicate back with this service. +host_href = http://localhost:9311 + +# Log to this file. Make sure you do not set the same log +# file for both the API and registry servers! +#log_file = /var/log/barbican/api.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# Maximum allowed http request size against the barbican-api +max_allowed_secret_in_bytes = 10000 +max_allowed_request_size_in_bytes = 1000000 + +# SQLAlchemy connection string for the reference implementation +# registry server. Any valid SQLAlchemy connection string is fine. +# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine +# Uncomment this for local dev, putting db in project directory: +#sql_connection = sqlite:///barbican.sqlite +# Note: For absolute addresses, use '////' slashes after 'sqlite:' +# Uncomment for a more global development environment +sql_connection = sqlite:////var/lib/barbican/barbican.sqlite + +# Period in seconds after which SQLAlchemy should reestablish its connection +# to the database. +# +# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop +# idle connections. This can result in 'MySQL Gone Away' exceptions. If you +# notice this, you can lower this value to ensure that SQLAlchemy reconnects +# before MySQL can drop the connection. +sql_idle_timeout = 3600 + +# Accepts a class imported from the sqlalchemy.pool module, and handles the +# details of building the pool for you. If commented out, SQLAlchemy +# will select based on the database dialect. Other options are QueuePool +# (for SQLAlchemy-managed connections) and NullPool (to disabled SQLAlchemy +# management of connections). +# See http://docs.sqlalchemy.org/en/latest/core/pooling.html for more details. +#sql_pool_class = QueuePool + +# Show SQLAlchemy pool-related debugging output in logs (sets DEBUG log level +# output) if specified. +#sql_pool_logging = True + +# Size of pool used by SQLAlchemy. This is the largest number of connections +# that will be kept persistently in the pool. Can be set to 0 to indicate no +# size limit. To disable pooling, use a NullPool with sql_pool_class instead. +# Comment out to allow SQLAlchemy to select the default. +#sql_pool_size = 5 + +# The maximum overflow size of the pool used by SQLAlchemy. When the number of +# checked-out connections reaches the size set in sql_pool_size, additional +# connections will be returned up to this limit. It follows then that the +# total number of simultaneous connections the pool will allow is +# sql_pool_size + sql_pool_max_overflow. Can be set to -1 to indicate no +# overflow limit, so no limit will be placed on the total number of concurrent +# connections. Comment out to allow SQLAlchemy to select the default. +#sql_pool_max_overflow = 10 + +# Default page size for the 'limit' paging URL parameter. +default_limit_paging = 10 + +# Maximum page size for the 'limit' paging URL parameter. +max_limit_paging = 100 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Allow unauthenticated users to access the API with read-only +# privileges. This only applies when using ContextMiddleware. +#allow_anonymous_access = False + +# Allow access to version 1 of barbican api +#enable_v1_api = True + +# Allow access to version 2 of barbican api +#enable_v2_api = True + +# ================= SSL Options =============================== + +# Certificate file to use when starting API server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# ================= Queue Options - oslo.messaging ========================== + +# Rabbit and HA configuration: +ampq_durable_queues = True +rabbit_userid=guest +rabbit_password=guest +rabbit_ha_queues = True +rabbit_port=5672 + +# For HA, specify queue nodes in cluster, comma delimited: +# For example: rabbit_hosts=192.168.50.8:5672, 192.168.50.9:5672 +rabbit_hosts=localhost:5672 + +# For HA, specify queue nodes in cluster as 'user@host:5672', comma delimited, ending with '/offset': +# For example: transport_url = rabbit://guest@192.168.50.8:5672,guest@192.168.50.9:5672/ +# DO NOT USE THIS, due to '# FIXME(markmc): support multiple hosts' in oslo/messaging/_drivers/amqpdriver.py +# transport_url = rabbit://guest@localhost:5672/ + +# oslo notification driver for sending audit events via audit middleware. +# Meaningful only when middleware is enabled in barbican paste ini file. +# This is oslo config MultiStrOpt so can be defined multiple times in case +# there is need to route audit event to messaging as well as log. +# notification_driver = messagingv2 +# notification_driver = log + +# ======== OpenStack policy - oslo_policy =============== + +[oslo_policy] + +# ======== OpenStack policy integration +# JSON file representing policy (string value) +policy_file=/etc/barbican/policy.json + +# Rule checked when requested rule is not found (string value) +policy_default_rule=default + + +# ================= Queue Options - Application ========================== + +[queue] +# Enable queuing asynchronous messaging. +# Set false to invoke worker tasks synchronously (i.e. no-queue standalone mode) +enable = False + +# Namespace for the queue +namespace = 'barbican' + +# Topic for the queue +topic = 'barbican.workers' + +# Version for the task API +version = '1.1' + +# Server name for RPC service +server_name = 'barbican.queue' + +# Number of asynchronous worker processes. +# When greater than 1, then that many additional worker processes are +# created for asynchronous worker functionality. +asynchronous_workers = 1 + +# ================= Retry/Scheduler Options ========================== + +[retry_scheduler] +# Seconds (float) to wait between starting retry scheduler +initial_delay_seconds = 10.0 + +# Seconds (float) to wait between starting retry scheduler +periodic_interval_max_seconds = 10.0 + + +# ====================== Quota Options =============================== + +[quotas] +# For each resource, the default maximum number that can be used for +# a project is set below. This value can be overridden for each +# project through the API. A negative value means no limit. A zero +# value effectively disables the resource. + +# default number of secrets allowed per project +quota_secrets = -1 + +# default number of orders allowed per project +quota_orders = -1 + +# default number of containers allowed per project +quota_containers = -1 + +# default number of consumers allowed per project +quota_consumers = -1 + +# default number of CAs allowed per project +quota_cas = -1 + +# ================= Keystone Notification Options - Application =============== + +[keystone_notifications] + +# Keystone notification functionality uses transport related configuration +# from barbican common configuration as defined under +# 'Queue Options - oslo.messaging' comments. +# The HA related configuration is also shared with notification server. + +# True enables keystone notification listener functionality. +enable = False + +# The default exchange under which topics are scoped. +# May be overridden by an exchange name specified in the transport_url option. +control_exchange = 'openstack' + +# Keystone notification queue topic name. +# This name needs to match one of values mentioned in Keystone deployment's +# 'notification_topics' configuration e.g. +# notification_topics=notifications, barbican_notifications +# Multiple servers may listen on a topic and messages will be dispatched to one +# of the servers in a round-robin fashion. That's why Barbican service should +# have its own dedicated notification queue so that it receives all of Keystone +# notifications. +topic = 'notifications' + +# True enables requeue feature in case of notification processing error. +# Enable this only when underlying transport supports this feature. +allow_requeue = False + +# Version of tasks invoked via notifications +version = '1.0' + +# Define the number of max threads to be used for notification server +# processing functionality. +thread_pool_size = 10 + +# ================= Secret Store Plugin =================== +[secretstore] +namespace = barbican.secretstore.plugin +enabled_secretstore_plugins = store_crypto + +# ================= Crypto plugin =================== +[crypto] +namespace = barbican.crypto.plugin +enabled_crypto_plugins = simple_crypto + +[simple_crypto_plugin] +# the kek should be a 32-byte value which is base64 encoded +kek = 'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY=' + +[dogtag_plugin] +pem_path = '/etc/barbican/kra_admin_cert.pem' +dogtag_host = localhost +dogtag_port = 8443 +nss_db_path = '/etc/barbican/alias' +nss_db_path_ca = '/etc/barbican/alias-ca' +nss_password = 'password123' +simple_cmc_profile = 'caOtherCert' +ca_expiration_time = 1 +plugin_working_dir = '/etc/barbican/dogtag' + + +[p11_crypto_plugin] +# Path to vendor PKCS11 library +library_path = '/usr/lib/libCryptoki2_64.so' +# Password to login to PKCS11 session +login = 'mypassword' +# Label to identify master KEK in the HSM (must not be the same as HMAC label) +mkek_label = 'an_mkek' +# Length in bytes of master KEK +mkek_length = 32 +# Label to identify HMAC key in the HSM (must not be the same as MKEK label) +hmac_label = 'my_hmac_label' +# HSM Slot id (Should correspond to a configured PKCS11 slot). Default: 1 +# slot_id = 1 +# Enable Read/Write session with the HSM? +# rw_session = True +# Length of Project KEKs to create +# pkek_length = 32 +# How long to cache unwrapped Project KEKs +# pkek_cache_ttl = 900 +# Max number of items in pkek cache +# pkek_cache_limit = 100 + + +# ================== KMIP plugin ===================== +[kmip_plugin] +username = 'admin' +password = 'password' +host = localhost +port = 5696 +keyfile = '/path/to/certs/cert.key' +certfile = '/path/to/certs/cert.crt' +ca_certs = '/path/to/certs/LocalCA.crt' + + +# ================= Certificate plugin =================== +[certificate] +namespace = barbican.certificate.plugin +enabled_certificate_plugins = simple_certificate +enabled_certificate_plugins = snakeoil_ca + +[certificate_event] +namespace = barbican.certificate.event.plugin +enabled_certificate_event_plugins = simple_certificate + +[snakeoil_ca_plugin] +ca_cert_path = /etc/barbican/snakeoil-ca.crt +ca_cert_key_path = /etc/barbican/snakeoil-ca.key +ca_cert_chain_path = /etc/barbican/snakeoil-ca.chain +ca_cert_pkcs7_path = /etc/barbican/snakeoil-ca.p7b +subca_cert_key_directory=/etc/barbican/snakeoil-cas + +[cors] + +# +# From oslo.middleware.cors +# + +# Indicate whether this resource may be shared with the domain +# received in the requests "origin" header. (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials +# (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to +# HTTP Simple Headers. (list value) +#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list +# value) +#allow_methods = GET,POST,PUT,DELETE,OPTIONS + +# Indicate which header field names may be used during the actual +# request. (list value) +#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + + +[cors.subdomain] + +# +# From oslo.middleware.cors +# + +# Indicate whether this resource may be shared with the domain +# received in the requests "origin" header. (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials +# (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to +# HTTP Simple Headers. (list value) +#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list +# value) +#allow_methods = GET,POST,PUT,DELETE,OPTIONS + +# Indicate which header field names may be used during the actual +# request. (list value) +#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma diff --git a/templates/policy.json.j2 b/templates/policy.json.j2 new file mode 100644 index 0000000..2e4e53c --- /dev/null +++ b/templates/policy.json.j2 @@ -0,0 +1,78 @@ +{ + "admin": "role:admin", + "observer": "role:observer", + "creator": "role:creator", + "audit": "role:audit", + "service_admin": "role:key-manager:service-admin", + "admin_or_user_does_not_work": "project_id:%(project_id)s", + "admin_or_user": "rule:admin or project_id:%(project_id)s", + "admin_or_creator": "rule:admin or rule:creator", + "all_but_audit": "rule:admin or rule:observer or rule:creator", + "all_users": "rule:admin or rule:observer or rule:creator or rule:audit or rule:service_admin", + "secret_project_match": "project:%(target.secret.project_id)s", + "secret_acl_read": "'read':%(target.secret.read)s", + "secret_private_read": "'False':%(target.secret.read_project_access)s", + "secret_creator_user": "user:%(target.secret.creator_id)s", + "container_project_match": "project:%(target.container.project_id)s", + "container_acl_read": "'read':%(target.container.read)s", + "container_private_read": "'False':%(target.container.read_project_access)s", + "container_creator_user": "user:%(target.container.creator_id)s", + + "secret_non_private_read": "rule:all_users and rule:secret_project_match and not rule:secret_private_read", + "secret_decrypt_non_private_read": "rule:all_but_audit and rule:secret_project_match and not rule:secret_private_read", + "container_non_private_read": "rule:all_users and rule:container_project_match and not rule:container_private_read", + "secret_project_admin": "rule:admin and rule:secret_project_match", + "secret_project_creator": "rule:creator and rule:secret_project_match and rule:secret_creator_user", + "container_project_admin": "rule:admin and rule:container_project_match", + "container_project_creator": "rule:creator and rule:container_project_match and rule:container_creator_user", + + "version:get": "@", + "secret:decrypt": "rule:secret_decrypt_non_private_read or rule:secret_project_creator or rule:secret_project_admin or rule:secret_acl_read", + "secret:get": "rule:secret_non_private_read or rule:secret_project_creator or rule:secret_project_admin or rule:secret_acl_read", + "secret:put": "rule:admin_or_creator and rule:secret_project_match", + "secret:delete": "rule:admin and rule:secret_project_match", + "secrets:post": "rule:admin_or_creator", + "secrets:get": "rule:all_but_audit", + "orders:post": "rule:admin_or_creator", + "orders:get": "rule:all_but_audit", + "order:get": "rule:all_users", + "order:put": "rule:admin_or_creator", + "order:delete": "rule:admin", + "consumer:get": "rule:all_users", + "consumers:get": "rule:all_users", + "consumers:post": "rule:admin", + "consumers:delete": "rule:admin", + "containers:post": "rule:admin_or_creator", + "containers:get": "rule:all_but_audit", + "container:get": "rule:container_non_private_read or rule:container_project_creator or rule:container_project_admin or rule:container_acl_read", + "container:delete": "rule:admin", + "transport_key:get": "rule:all_users", + "transport_key:delete": "rule:admin", + "transport_keys:get": "rule:all_users", + "transport_keys:post": "rule:admin", + "certificate_authorities:get_limited": "rule:all_users", + "certificate_authorities:get_all": "rule:admin", + "certificate_authorities:post": "rule:admin", + "certificate_authorities:get_preferred_ca": "rule:all_users", + "certificate_authorities:get_global_preferred_ca": "rule:service_admin", + "certificate_authorities:unset_global_preferred": "rule:service_admin", + "certificate_authority:delete": "rule:admin", + "certificate_authority:get": "rule:all_users", + "certificate_authority:get_cacert": "rule:all_users", + "certificate_authority:get_ca_cert_chain": "rule:all_users", + "certificate_authority:get_projects": "rule:service_admin", + "certificate_authority:add_to_project": "rule:admin", + "certificate_authority:remove_from_project": "rule:admin", + "certificate_authority:set_preferred": "rule:admin", + "certificate_authority:set_global_preferred": "rule:service_admin", + "secret_acls:put_patch": "rule:secret_project_admin or rule:secret_project_creator", + "secret_acls:delete": "rule:secret_project_admin or rule:secret_project_creator", + "secret_acls:get": "rule:all_but_audit and rule:secret_project_match", + "container_acls:put_patch": "rule:container_project_admin or rule:container_project_creator", + "container_acls:delete": "rule:container_project_admin or rule:container_project_creator", + "container_acls:get": "rule:all_but_audit and rule:container_project_match", + "quotas:get": "rule:all_users", + "project_quotas:get": "rule:service_admin", + "project_quotas:put": "rule:service_admin", + "project_quotas:delete": "rule:service_admin" +}