[DEFAULT] # Show more verbose log output (sets INFO log level output) verbose = {{ options.verbose }} # Show debugging output in logs (sets DEBUG log level output) debug = {{ options.debug }} # Address to bind the API server bind_host = 0.0.0.0 # Port to bind the API server to bind_port = 9311 # Host name, for use in HATEOS-style references # Note: Typically this would be the load balanced endpoint that clients would use # communicate back with this service. host_href = http://localhost:9311 # Log to this file. Make sure you do not set the same log # file for both the API and registry servers! #log_file = /var/log/barbican/api.log # Backlog requests when creating socket backlog = 4096 # TCP_KEEPIDLE value in seconds when creating socket. # Not supported on OS X. #tcp_keepidle = 600 # Maximum allowed http request size against the barbican-api max_allowed_secret_in_bytes = 10000 max_allowed_request_size_in_bytes = 1000000 # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine # Uncomment this for local dev, putting db in project directory: #sql_connection = sqlite:///barbican.sqlite # Note: For absolute addresses, use '////' slashes after 'sqlite:' # Uncomment for a more global development environment # Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default `wait_timeout` of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. sql_idle_timeout = 3600 # Default page size for the 'limit' paging URL parameter. default_limit_paging = 10 # Maximum page size for the 'limit' paging URL parameter. max_limit_paging = 100 # Number of Barbican API worker processes to start. # On machines with more than one CPU increasing this value # may improve performance (especially if using SSL with # compression turned on). It is typically recommended to set # this value to the number of CPUs present on your machine. workers = 1 # Role used to identify an authenticated user as administrator #admin_role = admin # Allow unauthenticated users to access the API with read-only # privileges. This only applies when using ContextMiddleware. #allow_anonymous_access = False # Allow access to version 1 of barbican api #enable_v1_api = True # Allow access to version 2 of barbican api #enable_v2_api = True # ================= SSL Options =============================== # Certificate file to use when starting API server securely #cert_file = /path/to/certfile # Private key file to use when starting API server securely #key_file = /path/to/keyfile # CA certificate file to use to verify connecting clients #ca_file = /path/to/cafile # ================= Security Options ========================== # AES key for encrypting store 'location' metadata, including # -- if used -- Swift or S3 credentials # Should be set to a random string of length 16, 24 or 32 bytes #metadata_encryption_key = <16, 24 or 32 char registry metadata key> # ============ Delayed Delete Options ============================= # Turn on/off delayed delete delayed_delete = False # Delayed delete time in seconds scrub_time = 43200 # Directory that the scrubber will use to remind itself of what to delete # Make sure this is also set in glance-scrubber.conf scrubber_datadir = /var/lib/barbican/scrubber # ======== OpenStack policy integration # JSON file representing policy (string value) policy_file=/etc/barbican/policy.json # Rule checked when requested rule is not found (string value) policy_default_rule=default {% include "parts/database" %} # ================= Queue Options - oslo.messaging ========================== {% include "parts/section-rabbitmq-oslo" %} # For HA, specify queue nodes in cluster as 'user@host:5672', comma delimited, ending with '/offset': # For example: transport_url = rabbit://guest@192.168.50.8:5672,guest@192.168.50.9:5672/ # DO NOT USE THIS, due to '# FIXME(markmc): support multiple hosts' in oslo/messaging/_drivers/amqpdriver.py # transport_url = rabbit://guest@localhost:5672/ # ================= Queue Options - Application ========================== [queue] # Enable queuing asynchronous messaging. # Set false to invoke worker tasks synchronously (i.e. no-queue standalone mode) enable = False # Namespace for the queue namespace = 'barbican' # Topic for the queue topic = 'barbican.workers' # Version for the task API version = '1.1' # Server name for RPC service server_name = 'barbican.queue' # ================= Keystone Notification Options - Application =============== [keystone_notifications] # Keystone notification functionality uses transport related configuration # from barbican common configuration as defined under # 'Queue Options - oslo.messaging' comments. # The HA related configuration is also shared with notification server. # True enables keystone notification listener functionality. enable = False # The default exchange under which topics are scoped. # May be overridden by an exchange name specified in the transport_url option. control_exchange = 'openstack' # Keystone notification queue topic name. # This name needs to match one of values mentioned in Keystone deployment's # 'notification_topics' configuration e.g. # notification_topics=notifications, barbican_notifications # Multiple servers may listen on a topic and messages will be dispatched to one # of the servers in a round-robin fashion. That's why Barbican service should # have its own dedicated notification queue so that it receives all of Keystone # notifications. topic = 'notifications' # True enables requeue feature in case of notification processing error. # Enable this only when underlying transport supports this feature. allow_requeue = False # Version of tasks invoked via notifications version = '1.0' # Define the number of max threads to be used for notification server # processing functionality. thread_pool_size = 10 # ================= Secret Store Plugin =================== [secretstore] namespace = barbican.secretstore.plugin enabled_secretstore_plugins = store_crypto # ================= Crypto plugin =================== [crypto] namespace = barbican.crypto.plugin enabled_crypto_plugins = simple_crypto [simple_crypto_plugin] # the kek should be a 32-byte value which is base64 encoded kek = 'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY=' [dogtag_plugin] pem_path = '/etc/barbican/kra_admin_cert.pem' dogtag_host = localhost dogtag_port = 8443 nss_db_path = '/etc/barbican/alias' nss_password = 'password123' [p11_crypto_plugin] # Path to vendor PKCS11 library library_path = '/usr/lib/libCryptoki2_64.so' # Password to login to PKCS11 session login = 'mypassword' # Label to identify master KEK in the HSM (must not be the same as HMAC label) mkek_label = 'an_mkek' # Length in bytes of master KEK mkek_length = 32 # Label to identify HMAC key in the HSM (must not be the same as MKEK label) hmac_label = 'my_hmac_label' # ================== KMIP plugin ===================== [kmip_plugin] username = 'admin' password = 'password' host = localhost port = 9090 # ================= Certificate plugin =================== [certificate] namespace = barbican.certificate.plugin enabled_certificate_plugins = simple_certificate [certificate_event] namespace = barbican.certificate.event.plugin enabled_certificate_event_plugins = simple_certificate