Use oslo-config-generator to generate barbican.conf.sample
Currently etc/barbican/barbican.conf is maintained by hand and can not be regenerated based on the config settings defined in the code. A common pattern for OpenStack projects is to use oslo-config-generator for that task. Co-Authored-By: Randall Burt <randall.burt@rackspace.com> Depends-On: I90870dcb49cd96f6bf0fe353fa6e779ffd87a5af Closes-Bug: #1584789 Change-Id: I5f3dcd2fc982f1178ef7dd662c24d3166f91b266
This commit is contained in:
parent
e88d79cae0
commit
06b76aa6e8
2
.gitignore
vendored
2
.gitignore
vendored
@ -67,6 +67,8 @@ ChangeLog
|
|||||||
|
|
||||||
# Rope
|
# Rope
|
||||||
.ropeproject
|
.ropeproject
|
||||||
|
# files created by oslo-config-generator
|
||||||
|
etc/barbican/barbican.conf.sample
|
||||||
|
|
||||||
# Files created by releasenotes build
|
# Files created by releasenotes build
|
||||||
releasenotes/build
|
releasenotes/build
|
||||||
|
@ -44,27 +44,90 @@ context_opts = [
|
|||||||
|
|
||||||
common_opts = [
|
common_opts = [
|
||||||
cfg.IntOpt('max_allowed_request_size_in_bytes',
|
cfg.IntOpt('max_allowed_request_size_in_bytes',
|
||||||
default=MAX_BYTES_REQUEST_INPUT_ACCEPTED),
|
default=MAX_BYTES_REQUEST_INPUT_ACCEPTED,
|
||||||
|
help=u._("Maximum allowed http request size against the "
|
||||||
|
"barbican-api.")),
|
||||||
cfg.IntOpt('max_allowed_secret_in_bytes',
|
cfg.IntOpt('max_allowed_secret_in_bytes',
|
||||||
default=DEFAULT_MAX_SECRET_BYTES),
|
default=DEFAULT_MAX_SECRET_BYTES,
|
||||||
|
help=u._("Maximum allowed secret size in bytes.")),
|
||||||
]
|
]
|
||||||
|
|
||||||
host_opts = [
|
host_opts = [
|
||||||
cfg.StrOpt('host_href', default='http://localhost:9311'),
|
cfg.StrOpt('host_href', default='http://localhost:9311',
|
||||||
|
help=u._("Host name, for use in HATEOAS-style references Note: "
|
||||||
|
"Typically this would be the load balanced endpoint "
|
||||||
|
"that clients would use to communicate back with this "
|
||||||
|
"service. If a deployment wants to derive host from "
|
||||||
|
"wsgi request instead then make this blank. Blank is "
|
||||||
|
"needed to override default config value which is "
|
||||||
|
"'http://localhost:9311'")),
|
||||||
]
|
]
|
||||||
|
|
||||||
db_opts = [
|
db_opts = [
|
||||||
cfg.StrOpt('sql_connection', secret=True),
|
cfg.StrOpt('sql_connection',
|
||||||
cfg.IntOpt('sql_idle_timeout', default=3600),
|
default="sqlite:///barbican.sqlite",
|
||||||
cfg.IntOpt('sql_max_retries', default=60),
|
secret=True,
|
||||||
cfg.IntOpt('sql_retry_interval', default=1),
|
help=u._("SQLAlchemy connection string for the reference "
|
||||||
cfg.BoolOpt('db_auto_create', default=True),
|
"implementation registry server. Any valid "
|
||||||
cfg.IntOpt('max_limit_paging', default=100),
|
"SQLAlchemy connection string is fine. See: "
|
||||||
cfg.IntOpt('default_limit_paging', default=10),
|
"http://www.sqlalchemy.org/docs/05/reference/"
|
||||||
cfg.StrOpt('sql_pool_class'),
|
"sqlalchemy/connections.html#sqlalchemy."
|
||||||
cfg.BoolOpt('sql_pool_logging', default=False),
|
"create_engine. Note: For absolute addresses, use "
|
||||||
cfg.IntOpt('sql_pool_size'),
|
"'////' slashes after 'sqlite:'.")),
|
||||||
cfg.IntOpt('sql_pool_max_overflow'),
|
cfg.IntOpt('sql_idle_timeout', default=3600,
|
||||||
|
help=u._("Period in seconds after which SQLAlchemy should "
|
||||||
|
"reestablish its connection to the database. MySQL "
|
||||||
|
"uses a default `wait_timeout` of 8 hours, after "
|
||||||
|
"which it will drop idle connections. This can result "
|
||||||
|
"in 'MySQL Gone Away' exceptions. If you notice this, "
|
||||||
|
"you can lower this value to ensure that SQLAlchemy "
|
||||||
|
"reconnects before MySQL can drop the connection.")),
|
||||||
|
cfg.IntOpt('sql_max_retries', default=60,
|
||||||
|
help=u._("Maximum number of database connection retries "
|
||||||
|
"during startup. Set to -1 to specify an infinite "
|
||||||
|
"retry count.")),
|
||||||
|
cfg.IntOpt('sql_retry_interval', default=1,
|
||||||
|
help=u._("Interval between retries of opening a SQL "
|
||||||
|
"connection.")),
|
||||||
|
cfg.BoolOpt('db_auto_create', default=True,
|
||||||
|
help=u._("Create the Barbican database on service startup.")),
|
||||||
|
cfg.IntOpt('max_limit_paging', default=100,
|
||||||
|
help=u._("Maximum page size for the 'limit' paging URL "
|
||||||
|
"parameter.")),
|
||||||
|
cfg.IntOpt('default_limit_paging', default=10,
|
||||||
|
help=u._("Default page size for the 'limit' paging URL "
|
||||||
|
"parameter.")),
|
||||||
|
cfg.StrOpt('sql_pool_class', default="QueuePool",
|
||||||
|
help=u._("Accepts a class imported from the sqlalchemy.pool "
|
||||||
|
"module, and handles the details of building the "
|
||||||
|
"pool for you. If commented out, SQLAlchemy will "
|
||||||
|
"select based on the database dialect. Other options "
|
||||||
|
"are QueuePool (for SQLAlchemy-managed connections) "
|
||||||
|
"and NullPool (to disabled SQLAlchemy management of "
|
||||||
|
"connections). See http://docs.sqlalchemy.org/en/"
|
||||||
|
"latest/core/pooling.html for more details")),
|
||||||
|
cfg.BoolOpt('sql_pool_logging', default=False,
|
||||||
|
help=u._("Show SQLAlchemy pool-related debugging output in "
|
||||||
|
"logs (sets DEBUG log level output) if specified.")),
|
||||||
|
cfg.IntOpt('sql_pool_size', default=5,
|
||||||
|
help=u._("Size of pool used by SQLAlchemy. This is the largest "
|
||||||
|
"number of connections that will be kept persistently "
|
||||||
|
"in the pool. Can be set to 0 to indicate no size "
|
||||||
|
"limit. To disable pooling, use a NullPool with "
|
||||||
|
"sql_pool_class instead. Comment out to allow "
|
||||||
|
"SQLAlchemy to select the default.")),
|
||||||
|
cfg.IntOpt('sql_pool_max_overflow', default=10,
|
||||||
|
help=u._("# The maximum overflow size of the pool used by "
|
||||||
|
"SQLAlchemy. When the number of checked-out "
|
||||||
|
"connections reaches the size set in sql_pool_size, "
|
||||||
|
"additional connections will be returned up to this "
|
||||||
|
"limit. It follows then that the total number of "
|
||||||
|
"simultaneous connections the pool will allow is "
|
||||||
|
"sql_pool_size + sql_pool_max_overflow. Can be set "
|
||||||
|
"to -1 to indicate no overflow limit, so no limit "
|
||||||
|
"will be placed on the total number of concurrent "
|
||||||
|
"connections. Comment out to allow SQLAlchemy to "
|
||||||
|
"select the default.")),
|
||||||
]
|
]
|
||||||
|
|
||||||
retry_opt_group = cfg.OptGroup(name='retry_scheduler',
|
retry_opt_group = cfg.OptGroup(name='retry_scheduler',
|
||||||
@ -153,6 +216,19 @@ quota_opts = [
|
|||||||
help=u._('Number of CAs allowed per project'))
|
help=u._('Number of CAs allowed per project'))
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield None, context_opts
|
||||||
|
yield None, common_opts
|
||||||
|
yield None, host_opts
|
||||||
|
yield None, db_opts
|
||||||
|
yield None, _options.eventlet_backdoor_opts
|
||||||
|
yield retry_opt_group, retry_opts
|
||||||
|
yield queue_opt_group, queue_opts
|
||||||
|
yield ks_queue_opt_group, ks_queue_opts
|
||||||
|
yield quota_opt_group, quota_opts
|
||||||
|
|
||||||
|
|
||||||
# Flag to indicate barbican configuration is already parsed once or not
|
# Flag to indicate barbican configuration is already parsed once or not
|
||||||
_CONFIG_PARSED_ONCE = False
|
_CONFIG_PARSED_ONCE = False
|
||||||
|
|
||||||
|
@ -50,6 +50,10 @@ config.parse_args(CONF)
|
|||||||
config.set_module_config("crypto", CONF)
|
config.set_module_config("crypto", CONF)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield crypto_opt_group, crypto_opts
|
||||||
|
|
||||||
|
|
||||||
class _CryptoPluginManager(named.NamedExtensionManager):
|
class _CryptoPluginManager(named.NamedExtensionManager):
|
||||||
def __init__(self, conf=CONF, invoke_args=(), invoke_kwargs={}):
|
def __init__(self, conf=CONF, invoke_args=(), invoke_kwargs={}):
|
||||||
"""Crypto Plugin Manager
|
"""Crypto Plugin Manager
|
||||||
|
@ -78,6 +78,10 @@ CONF.register_opts(p11_crypto_plugin_opts, group=p11_crypto_plugin_group)
|
|||||||
config.parse_args(CONF)
|
config.parse_args(CONF)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield p11_crypto_plugin_group, p11_crypto_plugin_opts
|
||||||
|
|
||||||
|
|
||||||
def json_dumps_compact(data):
|
def json_dumps_compact(data):
|
||||||
return json.dumps(data, separators=(',', ':'))
|
return json.dumps(data, separators=(',', ':'))
|
||||||
|
|
||||||
|
@ -44,6 +44,10 @@ CONF.register_opts(simple_crypto_plugin_opts, group=simple_crypto_plugin_group)
|
|||||||
config.parse_args(CONF)
|
config.parse_args(CONF)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield simple_crypto_plugin_group, simple_crypto_plugin_opts
|
||||||
|
|
||||||
|
|
||||||
class SimpleCryptoPlugin(c.CryptoPluginBase):
|
class SimpleCryptoPlugin(c.CryptoPluginBase):
|
||||||
"""Insecure implementation of the crypto plugin."""
|
"""Insecure implementation of the crypto plugin."""
|
||||||
|
|
||||||
|
@ -56,3 +56,7 @@ dogtag_plugin_opts = [
|
|||||||
CONF.register_group(dogtag_plugin_group)
|
CONF.register_group(dogtag_plugin_group)
|
||||||
CONF.register_opts(dogtag_plugin_opts, group=dogtag_plugin_group)
|
CONF.register_opts(dogtag_plugin_opts, group=dogtag_plugin_group)
|
||||||
config.parse_args(CONF)
|
config.parse_args(CONF)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield dogtag_plugin_group, dogtag_plugin_opts
|
||||||
|
@ -60,6 +60,11 @@ CONF.register_opts(cert_opts, group=cert_opt_group)
|
|||||||
config.parse_args(CONF)
|
config.parse_args(CONF)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield cert_opt_group, cert_opts
|
||||||
|
yield cert_event_opt_group, cert_event_opts
|
||||||
|
|
||||||
|
|
||||||
# Configuration for certificate eventing plugins:
|
# Configuration for certificate eventing plugins:
|
||||||
DEFAULT_EVENT_PLUGIN_NAMESPACE = 'barbican.certificate.event.plugin'
|
DEFAULT_EVENT_PLUGIN_NAMESPACE = 'barbican.certificate.event.plugin'
|
||||||
DEFAULT_EVENT_PLUGINS = ['simple_certificate_event']
|
DEFAULT_EVENT_PLUGINS = ['simple_certificate_event']
|
||||||
|
@ -61,6 +61,10 @@ config.parse_args(CONF)
|
|||||||
config.set_module_config("secretstore", CONF)
|
config.set_module_config("secretstore", CONF)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield store_opt_group, store_opts
|
||||||
|
|
||||||
|
|
||||||
class SecretStorePluginNotFound(exception.BarbicanHTTPException):
|
class SecretStorePluginNotFound(exception.BarbicanHTTPException):
|
||||||
"""Raised when no plugins are installed."""
|
"""Raised when no plugins are installed."""
|
||||||
|
|
||||||
|
@ -86,6 +86,11 @@ CONF.register_group(kmip_opt_group)
|
|||||||
CONF.register_opts(kmip_opts, group=kmip_opt_group)
|
CONF.register_opts(kmip_opts, group=kmip_opt_group)
|
||||||
config.parse_args(CONF)
|
config.parse_args(CONF)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield kmip_opt_group, kmip_opts
|
||||||
|
|
||||||
|
|
||||||
attribute_debug_msg = "Created attribute type %s with value %s"
|
attribute_debug_msg = "Created attribute type %s with value %s"
|
||||||
|
|
||||||
|
|
||||||
|
@ -56,6 +56,10 @@ CONF.register_opts(snakeoil_ca_plugin_opts, group=snakeoil_ca_plugin_group)
|
|||||||
config.parse_args(CONF)
|
config.parse_args(CONF)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
yield snakeoil_ca_plugin_group, snakeoil_ca_plugin_opts
|
||||||
|
|
||||||
|
|
||||||
def set_subject_X509Name(target, dn):
|
def set_subject_X509Name(target, dn):
|
||||||
"""Set target X509Name object with parsed dn.
|
"""Set target X509Name object with parsed dn.
|
||||||
|
|
||||||
|
@ -272,7 +272,10 @@ class WhenTestingGetEnginePrivate(utils.BaseTestCase):
|
|||||||
'connection',
|
'connection',
|
||||||
pool_recycle=3600,
|
pool_recycle=3600,
|
||||||
convert_unicode=True,
|
convert_unicode=True,
|
||||||
echo=False
|
echo=False,
|
||||||
|
poolclass=sqlalchemy.pool.QueuePool,
|
||||||
|
pool_size=repositories.CONF.sql_pool_size,
|
||||||
|
max_overflow=repositories.CONF.sql_pool_max_overflow
|
||||||
)
|
)
|
||||||
|
|
||||||
@mock.patch('barbican.model.repositories._create_engine')
|
@mock.patch('barbican.model.repositories._create_engine')
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
mozilla-nss-devel [platform:rpm]
|
mozilla-nss-devel [platform:rpm]
|
||||||
nss-devel [platform:rpm]
|
nss-devel [platform:rpm]
|
||||||
|
libnss3-dev [platform:dpkg]
|
||||||
|
|
||||||
gettext [test]
|
gettext [test]
|
||||||
|
|
||||||
|
@ -94,7 +94,6 @@ function configure_barbican {
|
|||||||
sudo chown $USER $BARBICAN_CONF_DIR
|
sudo chown $USER $BARBICAN_CONF_DIR
|
||||||
|
|
||||||
# Copy the barbican config files to the config dir
|
# Copy the barbican config files to the config dir
|
||||||
cp $BARBICAN_DIR/etc/barbican/barbican.conf $BARBICAN_CONF_DIR
|
|
||||||
cp $BARBICAN_DIR/etc/barbican/barbican-api-paste.ini $BARBICAN_CONF_DIR
|
cp $BARBICAN_DIR/etc/barbican/barbican-api-paste.ini $BARBICAN_CONF_DIR
|
||||||
cp -R $BARBICAN_DIR/etc/barbican/vassals $BARBICAN_CONF_DIR
|
cp -R $BARBICAN_DIR/etc/barbican/vassals $BARBICAN_CONF_DIR
|
||||||
|
|
||||||
|
4
etc/barbican/README.barbican.conf.txt
Normal file
4
etc/barbican/README.barbican.conf.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
To generate the sample barbican.conf file, run the following
|
||||||
|
command from the top level of the barbican directory:
|
||||||
|
|
||||||
|
tox -egenconfig
|
@ -1,558 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
# Show debugging output in logs (sets DEBUG log level output)
|
|
||||||
#debug = True
|
|
||||||
|
|
||||||
# Address to bind the API server
|
|
||||||
bind_host = 0.0.0.0
|
|
||||||
|
|
||||||
# Port to bind the API server to
|
|
||||||
bind_port = 9311
|
|
||||||
|
|
||||||
# Host name, for use in HATEOAS-style references
|
|
||||||
# Note: Typically this would be the load balanced endpoint that clients would use
|
|
||||||
# communicate back with this service.
|
|
||||||
# If a deployment wants to derive host from wsgi request instead then make this
|
|
||||||
# blank. Blank is needed to override default config value which is
|
|
||||||
# 'http://localhost:9311'.
|
|
||||||
host_href = http://localhost:9311
|
|
||||||
|
|
||||||
# Log to this file. Make sure you do not set the same log
|
|
||||||
# file for both the API and registry servers!
|
|
||||||
#log_file = /var/log/barbican/api.log
|
|
||||||
|
|
||||||
# Backlog requests when creating socket
|
|
||||||
backlog = 4096
|
|
||||||
|
|
||||||
# TCP_KEEPIDLE value in seconds when creating socket.
|
|
||||||
# Not supported on OS X.
|
|
||||||
#tcp_keepidle = 600
|
|
||||||
|
|
||||||
# Maximum allowed http request size against the barbican-api
|
|
||||||
max_allowed_secret_in_bytes = 10000
|
|
||||||
max_allowed_request_size_in_bytes = 1000000
|
|
||||||
|
|
||||||
# SQLAlchemy connection string for the reference implementation
|
|
||||||
# registry server. Any valid SQLAlchemy connection string is fine.
|
|
||||||
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
|
|
||||||
# Uncomment this for local dev, putting db in project directory:
|
|
||||||
#sql_connection = sqlite:///barbican.sqlite
|
|
||||||
# Note: For absolute addresses, use '////' slashes after 'sqlite:'
|
|
||||||
# Uncomment for a more global development environment
|
|
||||||
sql_connection = sqlite:////var/lib/barbican/barbican.sqlite
|
|
||||||
|
|
||||||
# Period in seconds after which SQLAlchemy should reestablish its connection
|
|
||||||
# to the database.
|
|
||||||
#
|
|
||||||
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
|
|
||||||
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
|
|
||||||
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
|
|
||||||
# before MySQL can drop the connection.
|
|
||||||
sql_idle_timeout = 3600
|
|
||||||
|
|
||||||
# Accepts a class imported from the sqlalchemy.pool module, and handles the
|
|
||||||
# details of building the pool for you. If commented out, SQLAlchemy
|
|
||||||
# will select based on the database dialect. Other options are QueuePool
|
|
||||||
# (for SQLAlchemy-managed connections) and NullPool (to disabled SQLAlchemy
|
|
||||||
# management of connections).
|
|
||||||
# See http://docs.sqlalchemy.org/en/latest/core/pooling.html for more details.
|
|
||||||
#sql_pool_class = QueuePool
|
|
||||||
|
|
||||||
# Show SQLAlchemy pool-related debugging output in logs (sets DEBUG log level
|
|
||||||
# output) if specified.
|
|
||||||
#sql_pool_logging = True
|
|
||||||
|
|
||||||
# Size of pool used by SQLAlchemy. This is the largest number of connections
|
|
||||||
# that will be kept persistently in the pool. Can be set to 0 to indicate no
|
|
||||||
# size limit. To disable pooling, use a NullPool with sql_pool_class instead.
|
|
||||||
# Comment out to allow SQLAlchemy to select the default.
|
|
||||||
#sql_pool_size = 5
|
|
||||||
|
|
||||||
# The maximum overflow size of the pool used by SQLAlchemy. When the number of
|
|
||||||
# checked-out connections reaches the size set in sql_pool_size, additional
|
|
||||||
# connections will be returned up to this limit. It follows then that the
|
|
||||||
# total number of simultaneous connections the pool will allow is
|
|
||||||
# sql_pool_size + sql_pool_max_overflow. Can be set to -1 to indicate no
|
|
||||||
# overflow limit, so no limit will be placed on the total number of concurrent
|
|
||||||
# connections. Comment out to allow SQLAlchemy to select the default.
|
|
||||||
#sql_pool_max_overflow = 10
|
|
||||||
|
|
||||||
# Default page size for the 'limit' paging URL parameter.
|
|
||||||
default_limit_paging = 10
|
|
||||||
|
|
||||||
# Maximum page size for the 'limit' paging URL parameter.
|
|
||||||
max_limit_paging = 100
|
|
||||||
|
|
||||||
# Role used to identify an authenticated user as administrator
|
|
||||||
#admin_role = admin
|
|
||||||
|
|
||||||
# Allow unauthenticated users to access the API with read-only
|
|
||||||
# privileges. This only applies when using ContextMiddleware.
|
|
||||||
#allow_anonymous_access = False
|
|
||||||
|
|
||||||
# Allow access to version 1 of barbican api
|
|
||||||
#enable_v1_api = True
|
|
||||||
|
|
||||||
# Allow access to version 2 of barbican api
|
|
||||||
#enable_v2_api = True
|
|
||||||
|
|
||||||
# ================= SSL Options ===============================
|
|
||||||
|
|
||||||
# Certificate file to use when starting API server securely
|
|
||||||
#cert_file = /path/to/certfile
|
|
||||||
|
|
||||||
# Private key file to use when starting API server securely
|
|
||||||
#key_file = /path/to/keyfile
|
|
||||||
|
|
||||||
# CA certificate file to use to verify connecting clients
|
|
||||||
#ca_file = /path/to/cafile
|
|
||||||
|
|
||||||
# ================= Security Options ==========================
|
|
||||||
|
|
||||||
# AES key for encrypting store 'location' metadata, including
|
|
||||||
# -- if used -- Swift or S3 credentials
|
|
||||||
# Should be set to a random string of length 16, 24 or 32 bytes
|
|
||||||
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
|
|
||||||
|
|
||||||
# ================= Queue Options - oslo.messaging ==========================
|
|
||||||
|
|
||||||
[oslo_messaging_rabbit]
|
|
||||||
|
|
||||||
# Rabbit and HA configuration:
|
|
||||||
amqp_durable_queues = True
|
|
||||||
rabbit_userid=guest
|
|
||||||
rabbit_password=guest
|
|
||||||
rabbit_ha_queues = True
|
|
||||||
rabbit_port=5672
|
|
||||||
|
|
||||||
# For HA, specify queue nodes in cluster, comma delimited:
|
|
||||||
# For example: rabbit_hosts=192.168.50.8:5672, 192.168.50.9:5672
|
|
||||||
rabbit_hosts=localhost:5672
|
|
||||||
|
|
||||||
# For HA, specify queue nodes in cluster as 'user@host:5672', comma delimited, ending with '/offset':
|
|
||||||
# For example: transport_url = rabbit://guest@192.168.50.8:5672,guest@192.168.50.9:5672/
|
|
||||||
# DO NOT USE THIS, due to '# FIXME(markmc): support multiple hosts' in oslo/messaging/_drivers/amqpdriver.py
|
|
||||||
# transport_url = rabbit://guest@localhost:5672/
|
|
||||||
|
|
||||||
|
|
||||||
[oslo_messaging_notifications]
|
|
||||||
# oslo notification driver for sending audit events via audit middleware.
|
|
||||||
# Meaningful only when middleware is enabled in barbican paste ini file.
|
|
||||||
# This is oslo config MultiStrOpt so can be defined multiple times in case
|
|
||||||
# there is need to route audit event to messaging as well as log.
|
|
||||||
# driver = messagingv2
|
|
||||||
# driver = log
|
|
||||||
|
|
||||||
|
|
||||||
# ======== OpenStack policy - oslo_policy ===============
|
|
||||||
|
|
||||||
[oslo_policy]
|
|
||||||
|
|
||||||
# ======== OpenStack policy integration
|
|
||||||
# JSON file representing policy (string value)
|
|
||||||
policy_file=/etc/barbican/policy.json
|
|
||||||
|
|
||||||
# Rule checked when requested rule is not found (string value)
|
|
||||||
policy_default_rule=default
|
|
||||||
|
|
||||||
|
|
||||||
# ================= Queue Options - Application ==========================
|
|
||||||
|
|
||||||
[queue]
|
|
||||||
# Enable queuing asynchronous messaging.
|
|
||||||
# Set false to invoke worker tasks synchronously (i.e. no-queue standalone mode)
|
|
||||||
enable = False
|
|
||||||
|
|
||||||
# Namespace for the queue
|
|
||||||
namespace = 'barbican'
|
|
||||||
|
|
||||||
# Topic for the queue
|
|
||||||
topic = 'barbican.workers'
|
|
||||||
|
|
||||||
# Version for the task API
|
|
||||||
version = '1.1'
|
|
||||||
|
|
||||||
# Server name for RPC service
|
|
||||||
server_name = 'barbican.queue'
|
|
||||||
|
|
||||||
# Number of asynchronous worker processes.
|
|
||||||
# When greater than 1, then that many additional worker processes are
|
|
||||||
# created for asynchronous worker functionality.
|
|
||||||
asynchronous_workers = 1
|
|
||||||
|
|
||||||
# ================= Retry/Scheduler Options ==========================
|
|
||||||
|
|
||||||
[retry_scheduler]
|
|
||||||
# Seconds (float) to wait between starting retry scheduler
|
|
||||||
initial_delay_seconds = 10.0
|
|
||||||
|
|
||||||
# Seconds (float) to wait between starting retry scheduler
|
|
||||||
periodic_interval_max_seconds = 10.0
|
|
||||||
|
|
||||||
|
|
||||||
# ====================== Quota Options ===============================
|
|
||||||
|
|
||||||
[quotas]
|
|
||||||
# For each resource, the default maximum number that can be used for
|
|
||||||
# a project is set below. This value can be overridden for each
|
|
||||||
# project through the API. A negative value means no limit. A zero
|
|
||||||
# value effectively disables the resource.
|
|
||||||
|
|
||||||
# default number of secrets allowed per project
|
|
||||||
quota_secrets = -1
|
|
||||||
|
|
||||||
# default number of orders allowed per project
|
|
||||||
quota_orders = -1
|
|
||||||
|
|
||||||
# default number of containers allowed per project
|
|
||||||
quota_containers = -1
|
|
||||||
|
|
||||||
# default number of consumers allowed per project
|
|
||||||
quota_consumers = -1
|
|
||||||
|
|
||||||
# default number of CAs allowed per project
|
|
||||||
quota_cas = -1
|
|
||||||
|
|
||||||
# ================= Keystone Notification Options - Application ===============
|
|
||||||
|
|
||||||
[keystone_notifications]
|
|
||||||
|
|
||||||
# Keystone notification functionality uses transport related configuration
|
|
||||||
# from barbican common configuration as defined under
|
|
||||||
# 'Queue Options - oslo.messaging' comments.
|
|
||||||
# The HA related configuration is also shared with notification server.
|
|
||||||
|
|
||||||
# True enables keystone notification listener functionality.
|
|
||||||
enable = False
|
|
||||||
|
|
||||||
# The default exchange under which topics are scoped.
|
|
||||||
# May be overridden by an exchange name specified in the transport_url option.
|
|
||||||
control_exchange = 'openstack'
|
|
||||||
|
|
||||||
# Keystone notification queue topic name.
|
|
||||||
# This name needs to match one of values mentioned in Keystone deployment's
|
|
||||||
# 'notification_topics' configuration e.g.
|
|
||||||
# notification_topics=notifications, barbican_notifications
|
|
||||||
# Multiple servers may listen on a topic and messages will be dispatched to one
|
|
||||||
# of the servers in a round-robin fashion. That's why Barbican service should
|
|
||||||
# have its own dedicated notification queue so that it receives all of Keystone
|
|
||||||
# notifications.
|
|
||||||
topic = 'notifications'
|
|
||||||
|
|
||||||
# True enables requeue feature in case of notification processing error.
|
|
||||||
# Enable this only when underlying transport supports this feature.
|
|
||||||
allow_requeue = False
|
|
||||||
|
|
||||||
# Version of tasks invoked via notifications
|
|
||||||
version = '1.0'
|
|
||||||
|
|
||||||
# Define the number of max threads to be used for notification server
|
|
||||||
# processing functionality.
|
|
||||||
thread_pool_size = 10
|
|
||||||
|
|
||||||
# ================= Secret Store Plugin ===================
|
|
||||||
[secretstore]
|
|
||||||
namespace = barbican.secretstore.plugin
|
|
||||||
enabled_secretstore_plugins = store_crypto
|
|
||||||
|
|
||||||
# ================= Crypto plugin ===================
|
|
||||||
[crypto]
|
|
||||||
namespace = barbican.crypto.plugin
|
|
||||||
enabled_crypto_plugins = simple_crypto
|
|
||||||
|
|
||||||
[simple_crypto_plugin]
|
|
||||||
# the kek should be a 32-byte value which is base64 encoded
|
|
||||||
kek = 'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY='
|
|
||||||
|
|
||||||
# User friendly plugin name
|
|
||||||
# plugin_name = 'Software Only Crypto'
|
|
||||||
|
|
||||||
[dogtag_plugin]
|
|
||||||
pem_path = '/etc/barbican/kra_admin_cert.pem'
|
|
||||||
dogtag_host = localhost
|
|
||||||
dogtag_port = 8443
|
|
||||||
nss_db_path = '/etc/barbican/alias'
|
|
||||||
nss_db_path_ca = '/etc/barbican/alias-ca'
|
|
||||||
nss_password = 'password123'
|
|
||||||
simple_cmc_profile = 'caOtherCert'
|
|
||||||
ca_expiration_time = 1
|
|
||||||
plugin_working_dir = '/etc/barbican/dogtag'
|
|
||||||
|
|
||||||
# User friendly plugin name
|
|
||||||
# plugin_name = 'Dogtag KRA'
|
|
||||||
|
|
||||||
|
|
||||||
[p11_crypto_plugin]
|
|
||||||
# Path to vendor PKCS11 library
|
|
||||||
library_path = '/usr/lib/libCryptoki2_64.so'
|
|
||||||
# Password to login to PKCS11 session
|
|
||||||
login = 'mypassword'
|
|
||||||
# Label to identify master KEK in the HSM (must not be the same as HMAC label)
|
|
||||||
mkek_label = 'an_mkek'
|
|
||||||
# Length in bytes of master KEK
|
|
||||||
mkek_length = 32
|
|
||||||
# Label to identify HMAC key in the HSM (must not be the same as MKEK label)
|
|
||||||
hmac_label = 'my_hmac_label'
|
|
||||||
# HSM Slot id (Should correspond to a configured PKCS11 slot). Default: 1
|
|
||||||
# slot_id = 1
|
|
||||||
# Enable Read/Write session with the HSM?
|
|
||||||
# rw_session = True
|
|
||||||
# Length of Project KEKs to create
|
|
||||||
# pkek_length = 32
|
|
||||||
# How long to cache unwrapped Project KEKs
|
|
||||||
# pkek_cache_ttl = 900
|
|
||||||
# Max number of items in pkek cache
|
|
||||||
# pkek_cache_limit = 100
|
|
||||||
|
|
||||||
# User friendly plugin name
|
|
||||||
# plugin_name = 'PKCS11 HSM'
|
|
||||||
|
|
||||||
|
|
||||||
# ================== KMIP plugin =====================
|
|
||||||
[kmip_plugin]
|
|
||||||
username = 'admin'
|
|
||||||
password = 'password'
|
|
||||||
host = localhost
|
|
||||||
port = 5696
|
|
||||||
keyfile = '/path/to/certs/cert.key'
|
|
||||||
certfile = '/path/to/certs/cert.crt'
|
|
||||||
ca_certs = '/path/to/certs/LocalCA.crt'
|
|
||||||
ssl_version = 'PROTOCOL_TLSv1_2'
|
|
||||||
pkcs1_only = False
|
|
||||||
plugin_name = 'KMIP HSM'
|
|
||||||
|
|
||||||
|
|
||||||
# ================= Certificate plugin ===================
|
|
||||||
|
|
||||||
# DEPRECATION WARNING: The Certificates Plugin has been deprecated
|
|
||||||
# and will be removed in the P release.
|
|
||||||
|
|
||||||
[certificate]
|
|
||||||
namespace = barbican.certificate.plugin
|
|
||||||
enabled_certificate_plugins = simple_certificate
|
|
||||||
enabled_certificate_plugins = snakeoil_ca
|
|
||||||
|
|
||||||
[certificate_event]
|
|
||||||
namespace = barbican.certificate.event.plugin
|
|
||||||
enabled_certificate_event_plugins = simple_certificate_event
|
|
||||||
|
|
||||||
[snakeoil_ca_plugin]
|
|
||||||
ca_cert_path = /etc/barbican/snakeoil-ca.crt
|
|
||||||
ca_cert_key_path = /etc/barbican/snakeoil-ca.key
|
|
||||||
ca_cert_chain_path = /etc/barbican/snakeoil-ca.chain
|
|
||||||
ca_cert_pkcs7_path = /etc/barbican/snakeoil-ca.p7b
|
|
||||||
subca_cert_key_directory=/etc/barbican/snakeoil-cas
|
|
||||||
|
|
||||||
# ========================================================
|
|
||||||
|
|
||||||
[cors]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.middleware.cors
|
|
||||||
#
|
|
||||||
|
|
||||||
# Indicate whether this resource may be shared with the domain
|
|
||||||
# received in the requests "origin" header. (list value)
|
|
||||||
#allowed_origin = <None>
|
|
||||||
|
|
||||||
# Indicate that the actual request can include user credentials
|
|
||||||
# (boolean value)
|
|
||||||
#allow_credentials = true
|
|
||||||
|
|
||||||
# Indicate which headers are safe to expose to the API. Defaults to
|
|
||||||
# HTTP Simple Headers. (list value)
|
|
||||||
#expose_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
|
|
||||||
|
|
||||||
# Maximum cache age of CORS preflight requests. (integer value)
|
|
||||||
#max_age = 3600
|
|
||||||
|
|
||||||
# Indicate which methods can be used during the actual request. (list
|
|
||||||
# value)
|
|
||||||
#allow_methods = GET,PUT,POST,DELETE,PATCH
|
|
||||||
|
|
||||||
# Indicate which header field names may be used during the actual
|
|
||||||
# request. (list value)
|
|
||||||
#allow_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
|
|
||||||
|
|
||||||
|
|
||||||
[cors.subdomain]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.middleware.cors
|
|
||||||
#
|
|
||||||
|
|
||||||
# Indicate whether this resource may be shared with the domain
|
|
||||||
# received in the requests "origin" header. (list value)
|
|
||||||
#allowed_origin = <None>
|
|
||||||
|
|
||||||
# Indicate that the actual request can include user credentials
|
|
||||||
# (boolean value)
|
|
||||||
#allow_credentials = true
|
|
||||||
|
|
||||||
# Indicate which headers are safe to expose to the API. Defaults to
|
|
||||||
# HTTP Simple Headers. (list value)
|
|
||||||
#expose_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
|
|
||||||
|
|
||||||
# Maximum cache age of CORS preflight requests. (integer value)
|
|
||||||
#max_age = 3600
|
|
||||||
|
|
||||||
# Indicate which methods can be used during the actual request. (list
|
|
||||||
# value)
|
|
||||||
#allow_methods = GET,PUT,POST,DELETE,PATCH
|
|
||||||
|
|
||||||
# Indicate which header field names may be used during the actual
|
|
||||||
# request. (list value)
|
|
||||||
#allow_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
|
|
||||||
|
|
||||||
|
|
||||||
[oslo_middleware]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.middleware.http_proxy_to_wsgi
|
|
||||||
#
|
|
||||||
|
|
||||||
# Wether the application is behind a proxy or not. This determines if
|
|
||||||
# the middleware should parse the headers or not. (boolean value)
|
|
||||||
#enable_proxy_headers_parsing = false
|
|
||||||
|
|
||||||
|
|
||||||
[keystone_authtoken]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From keystonemiddleware.auth_token
|
|
||||||
#
|
|
||||||
|
|
||||||
# Complete "public" Identity API endpoint. This endpoint should not be an
|
|
||||||
# "admin" endpoint, as it should be accessible by all end users. Unauthenticated
|
|
||||||
# clients are redirected to this endpoint to authenticate. Although this
|
|
||||||
# endpoint should ideally be unversioned, client support in the wild varies.
|
|
||||||
# If you're using a versioned v2 endpoint here, then this should *not* be the
|
|
||||||
# same endpoint the service user utilizes for validating tokens, because normal
|
|
||||||
# end users may not be able to reach that endpoint. (string value)
|
|
||||||
#auth_uri = <None>
|
|
||||||
|
|
||||||
# API version of the admin Identity API endpoint. (string value)
|
|
||||||
#auth_version = <None>
|
|
||||||
|
|
||||||
# Do not handle authorization requests within the middleware, but delegate the
|
|
||||||
# authorization decision to downstream WSGI components. (boolean value)
|
|
||||||
#delay_auth_decision = false
|
|
||||||
|
|
||||||
# Request timeout value for communicating with Identity API server. (integer
|
|
||||||
# value)
|
|
||||||
#http_connect_timeout = <None>
|
|
||||||
|
|
||||||
# How many times are we trying to reconnect when communicating with Identity API
|
|
||||||
# Server. (integer value)
|
|
||||||
#http_request_max_retries = 3
|
|
||||||
|
|
||||||
# Request environment key where the Swift cache object is stored. When
|
|
||||||
# auth_token middleware is deployed with a Swift cache, use this option to have
|
|
||||||
# the middleware share a caching backend with swift. Otherwise, use the
|
|
||||||
# ``memcached_servers`` option instead. (string value)
|
|
||||||
#cache = <None>
|
|
||||||
|
|
||||||
# Required if identity server requires client certificate (string value)
|
|
||||||
#certfile = <None>
|
|
||||||
|
|
||||||
# Required if identity server requires client certificate (string value)
|
|
||||||
#keyfile = <None>
|
|
||||||
|
|
||||||
# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
|
|
||||||
# Defaults to system CAs. (string value)
|
|
||||||
#cafile = <None>
|
|
||||||
|
|
||||||
# Verify HTTPS connections. (boolean value)
|
|
||||||
#insecure = false
|
|
||||||
|
|
||||||
# The region in which the identity server can be found. (string value)
|
|
||||||
#region_name = <None>
|
|
||||||
|
|
||||||
# Directory used to cache files related to PKI tokens. (string value)
|
|
||||||
#signing_dir = <None>
|
|
||||||
|
|
||||||
# Optionally specify a list of memcached server(s) to use for caching. If left
|
|
||||||
# undefined, tokens will instead be cached in-process. (list value)
|
|
||||||
# Deprecated group/name - [keystone_authtoken]/memcache_servers
|
|
||||||
#memcached_servers = <None>
|
|
||||||
|
|
||||||
# In order to prevent excessive effort spent validating tokens, the middleware
|
|
||||||
# caches previously-seen tokens for a configurable duration (in seconds). Set to
|
|
||||||
# -1 to disable caching completely. (integer value)
|
|
||||||
#token_cache_time = 300
|
|
||||||
|
|
||||||
# Determines the frequency at which the list of revoked tokens is retrieved from
|
|
||||||
# the Identity service (in seconds). A high number of revocation events combined
|
|
||||||
# with a low cache duration may significantly reduce performance. Only valid for
|
|
||||||
# PKI tokens. (integer value)
|
|
||||||
#revocation_cache_time = 10
|
|
||||||
|
|
||||||
# (Optional) If defined, indicate whether token data should be authenticated or
|
|
||||||
# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
|
|
||||||
# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
|
|
||||||
# cache. If the value is not one of these options or empty, auth_token will
|
|
||||||
# raise an exception on initialization. (string value)
|
|
||||||
# Allowed values: None, MAC, ENCRYPT
|
|
||||||
#memcache_security_strategy = None
|
|
||||||
|
|
||||||
# (Optional, mandatory if memcache_security_strategy is defined) This string is
|
|
||||||
# used for key derivation. (string value)
|
|
||||||
#memcache_secret_key = <None>
|
|
||||||
|
|
||||||
# (Optional) Number of seconds memcached server is considered dead before it is
|
|
||||||
# tried again. (integer value)
|
|
||||||
#memcache_pool_dead_retry = 300
|
|
||||||
|
|
||||||
# (Optional) Maximum total number of open connections to every memcached server.
|
|
||||||
# (integer value)
|
|
||||||
#memcache_pool_maxsize = 10
|
|
||||||
|
|
||||||
# (Optional) Socket timeout in seconds for communicating with a memcached
|
|
||||||
# server. (integer value)
|
|
||||||
#memcache_pool_socket_timeout = 3
|
|
||||||
|
|
||||||
# (Optional) Number of seconds a connection to memcached is held unused in the
|
|
||||||
# pool before it is closed. (integer value)
|
|
||||||
#memcache_pool_unused_timeout = 60
|
|
||||||
|
|
||||||
# (Optional) Number of seconds that an operation will wait to get a memcached
|
|
||||||
# client connection from the pool. (integer value)
|
|
||||||
#memcache_pool_conn_get_timeout = 10
|
|
||||||
|
|
||||||
# (Optional) Use the advanced (eventlet safe) memcached client pool. The
|
|
||||||
# advanced pool will only work under python 2.x. (boolean value)
|
|
||||||
#memcache_use_advanced_pool = false
|
|
||||||
|
|
||||||
# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
|
|
||||||
# middleware will not ask for service catalog on token validation and will not
|
|
||||||
# set the X-Service-Catalog header. (boolean value)
|
|
||||||
#include_service_catalog = true
|
|
||||||
|
|
||||||
# Used to control the use and type of token binding. Can be set to: "disabled"
|
|
||||||
# to not check token binding. "permissive" (default) to validate binding
|
|
||||||
# information if the bind type is of a form known to the server and ignore it if
|
|
||||||
# not. "strict" like "permissive" but if the bind type is unknown the token will
|
|
||||||
# be rejected. "required" any form of token binding is needed to be allowed.
|
|
||||||
# Finally the name of a binding method that must be present in tokens. (string
|
|
||||||
# value)
|
|
||||||
#enforce_token_bind = permissive
|
|
||||||
|
|
||||||
# If true, the revocation list will be checked for cached tokens. This requires
|
|
||||||
# that PKI tokens are configured on the identity server. (boolean value)
|
|
||||||
#check_revocations_for_cached = false
|
|
||||||
|
|
||||||
# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
|
|
||||||
# or multiple. The algorithms are those supported by Python standard
|
|
||||||
# hashlib.new(). The hashes will be tried in the order given, so put the
|
|
||||||
# preferred one first for performance. The result of the first hash will be
|
|
||||||
# stored in the cache. This will typically be set to multiple values only while
|
|
||||||
# migrating from a less secure algorithm to a more secure one. Once all the old
|
|
||||||
# tokens are expired this option should be set to a single value for better
|
|
||||||
# performance. (list value)
|
|
||||||
#hash_algorithms = md5
|
|
||||||
|
|
||||||
# Authentication type to load (string value)
|
|
||||||
# Deprecated group/name - [keystone_authtoken]/auth_plugin
|
|
||||||
#auth_type = <None>
|
|
||||||
|
|
||||||
# Config Section from which to load plugin specific options (string value)
|
|
||||||
#auth_section = <None>
|
|
21
etc/oslo-config-generator/barbican.conf
Normal file
21
etc/oslo-config-generator/barbican.conf
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
output_file = etc/barbican/barbican.conf.sample
|
||||||
|
namespace = barbican
|
||||||
|
namespace = barbican.certificate.plugin
|
||||||
|
namespace = barbican.certificate.plugin.snakeoil
|
||||||
|
namespace = barbican.common.config
|
||||||
|
namespace = barbican.plugin.crypto
|
||||||
|
namespace = barbican.plugin.crypto.p11
|
||||||
|
namespace = barbican.plugin.crypto.simple
|
||||||
|
namespace = barbican.plugin.dogtag
|
||||||
|
namespace = barbican.plugin.secret_store
|
||||||
|
namespace = barbican.plugin.secret_store.kmip
|
||||||
|
namespace = keystonemiddleware.auth_token
|
||||||
|
namespace = oslo.log
|
||||||
|
namespace = oslo.messaging
|
||||||
|
namespace = oslo.middleware.cors
|
||||||
|
namespace = oslo.middleware.http_proxy_to_wsgi
|
||||||
|
namespace = oslo.policy
|
||||||
|
namespace = oslo.service.periodic_task
|
||||||
|
namespace = oslo.service.sslutils
|
||||||
|
namespace = oslo.service.wsgi
|
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
other:
|
||||||
|
- oslo-config-generator is now used to generate a
|
||||||
|
barbican.conf.sample file
|
13
setup.cfg
13
setup.cfg
@ -53,7 +53,18 @@ barbican.certificate.event.plugin =
|
|||||||
simple_certificate_event = barbican.plugin.simple_certificate_manager:SimpleCertificateEventPlugin
|
simple_certificate_event = barbican.plugin.simple_certificate_manager:SimpleCertificateEventPlugin
|
||||||
barbican.test.crypto.plugin =
|
barbican.test.crypto.plugin =
|
||||||
test_crypto = barbican.tests.crypto.test_plugin:TestCryptoPlugin
|
test_crypto = barbican.tests.crypto.test_plugin:TestCryptoPlugin
|
||||||
|
oslo.config.opts =
|
||||||
|
barbican.common.config = barbican.common.config:list_opts
|
||||||
|
barbican.plugin.secret_store = barbican.plugin.interface.secret_store:list_opts
|
||||||
|
barbican.plugin.crypto = barbican.plugin.crypto.manager:list_opts
|
||||||
|
barbican.plugin.crypto.simple = barbican.plugin.crypto.simple_crypto:list_opts
|
||||||
|
barbican.plugin.dogtag_config_opts = barbican.plugin.dogtag:list_opts
|
||||||
|
barbican.plugin.crypto.p11 = barbican.plugin.crypto.p11_crypto:list_opts
|
||||||
|
barbican.plugin.secret_store.kmip = barbican.plugin.kmip_secret_store:list_opts
|
||||||
|
barbican.certificate.plugin = barbican.plugin.interface.certificate_manager:list_opts
|
||||||
|
barbican.certificate.plugin.snakeoil = barbican.plugin.snakeoil_ca:list_opts
|
||||||
|
oslo.config.opts.defaults =
|
||||||
|
barbican.common.config = barbican.common.config:set_middleware_defaults
|
||||||
[build_sphinx]
|
[build_sphinx]
|
||||||
all_files = 1
|
all_files = 1
|
||||||
build-dir = doc/build
|
build-dir = doc/build
|
||||||
|
6
tox.ini
6
tox.ini
@ -10,6 +10,7 @@ deps = -r{toxinidir}/requirements.txt
|
|||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
|
||||||
commands =
|
commands =
|
||||||
|
oslo-config-generator --config-file etc/oslo-config-generator/barbican.conf --output-file etc/barbican/barbican.conf
|
||||||
/usr/bin/find . -type f -name "*.py[c|o]" -delete
|
/usr/bin/find . -type f -name "*.py[c|o]" -delete
|
||||||
coverage erase
|
coverage erase
|
||||||
python setup.py testr --coverage --testr-args='{posargs}'
|
python setup.py testr --coverage --testr-args='{posargs}'
|
||||||
@ -35,6 +36,11 @@ commands =
|
|||||||
# Run security linter
|
# Run security linter
|
||||||
bandit -r barbican -x tests -n5
|
bandit -r barbican -x tests -n5
|
||||||
|
|
||||||
|
[testenv:genconfig]
|
||||||
|
whitelist_externals = bash
|
||||||
|
commands =
|
||||||
|
oslo-config-generator --config-file etc/oslo-config-generator/barbican.conf
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
commands = {posargs}
|
commands = {posargs}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user