Update the devstack plugin
This updates various parts of the devstack plugin: * The fetcher is now configurable via the "CLOUDKITTY_FETCHER" variable and defaults to gnocchi (this allows the user to have immediate results). * The "CLOUDKITTY_SERVICES" variable was removed as it is not used anymore. * The storage backend does now default to "influxdb". If this storage backend is selected, influxdb is installed. This can be done on fedora and ubuntu. The storage backend to use in devstack can be configured through the ``CLOUDKITTY_STORAGE_BACKEND`` and ``CLOUDKITTY_STORAGE_VERSION`` variables. * Some details about available variables have been added to the devstack documentation. Since the "admin/quick_deployment" section did only contain the devstack documentation, it has been removed for now. * Given that the "ceilometer-low" archive-policy (default in devstack) only provides the "mean" aggregation method, it is now the aggregation method used in the default metrics.yml file. Change-Id: I37452772de163b5fafc502917af870c86a3d38b2
This commit is contained in:
parent
5c6964469b
commit
87ab314fcf
@ -27,6 +27,7 @@
|
|||||||
tempest_test_regex: cloudkitty_tempest_plugin.*
|
tempest_test_regex: cloudkitty_tempest_plugin.*
|
||||||
tox_envlist: all
|
tox_envlist: all
|
||||||
devstack_localrc:
|
devstack_localrc:
|
||||||
|
CLOUDKITTY_FETCHER: keystone
|
||||||
TEMPEST_PLUGINS: /opt/stack/cloudkitty-tempest-plugin
|
TEMPEST_PLUGINS: /opt/stack/cloudkitty-tempest-plugin
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
|
571
devstack/files/influxdb.conf
Normal file
571
devstack/files/influxdb.conf
Normal file
@ -0,0 +1,571 @@
|
|||||||
|
## Modified version of: https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml
|
||||||
|
|
||||||
|
### Welcome to the InfluxDB configuration file.
|
||||||
|
|
||||||
|
# The values in this file override the default values used by the system if
|
||||||
|
# a config option is not specified. The commented out lines are the configuration
|
||||||
|
# field and the default value used. Uncommenting a line and changing the value
|
||||||
|
# will change the value used at runtime when the process is restarted.
|
||||||
|
|
||||||
|
# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
|
||||||
|
# The data includes a random ID, os, arch, version, the number of series and other
|
||||||
|
# usage data. No data from user databases is ever transmitted.
|
||||||
|
# Change this option to true to disable reporting.
|
||||||
|
# reporting-disabled = true
|
||||||
|
|
||||||
|
# Bind address to use for the RPC service for backup and restore.
|
||||||
|
# bind-address = "127.0.0.1:8088"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [meta]
|
||||||
|
###
|
||||||
|
### Controls the parameters for the Raft consensus group that stores metadata
|
||||||
|
### about the InfluxDB cluster.
|
||||||
|
###
|
||||||
|
|
||||||
|
[meta]
|
||||||
|
# Where the metadata/raft database is stored
|
||||||
|
dir = "/var/lib/influxdb/meta"
|
||||||
|
|
||||||
|
# Automatically create a default retention policy when creating a database.
|
||||||
|
# retention-autocreate = true
|
||||||
|
|
||||||
|
# If log messages are printed for the meta service
|
||||||
|
# logging-enabled = true
|
||||||
|
|
||||||
|
###
|
||||||
|
### [data]
|
||||||
|
###
|
||||||
|
### Controls where the actual shard data for InfluxDB lives and how it is
|
||||||
|
### flushed from the WAL. "dir" may need to be changed to a suitable place
|
||||||
|
### for your system, but the WAL settings are an advanced configuration. The
|
||||||
|
### defaults should work for most systems.
|
||||||
|
###
|
||||||
|
|
||||||
|
[data]
|
||||||
|
# The directory where the TSM storage engine stores TSM files.
|
||||||
|
dir = "/var/lib/influxdb/data"
|
||||||
|
|
||||||
|
# The directory where the TSM storage engine stores WAL files.
|
||||||
|
wal-dir = "/var/lib/influxdb/wal"
|
||||||
|
|
||||||
|
# The amount of time that a write will wait before fsyncing. A duration
|
||||||
|
# greater than 0 can be used to batch up multiple fsync calls. This is useful for slower
|
||||||
|
# disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL.
|
||||||
|
# Values in the range of 0-100ms are recommended for non-SSD disks.
|
||||||
|
# wal-fsync-delay = "0s"
|
||||||
|
|
||||||
|
|
||||||
|
# The type of shard index to use for new shards. The default is an in-memory index that is
|
||||||
|
# recreated at startup. A value of "tsi1" will use a disk based index that supports higher
|
||||||
|
# cardinality datasets.
|
||||||
|
# index-version = "inmem"
|
||||||
|
|
||||||
|
# Trace logging provides more verbose output around the tsm engine. Turning
|
||||||
|
# this on can provide more useful output for debugging tsm engine issues.
|
||||||
|
# trace-logging-enabled = false
|
||||||
|
|
||||||
|
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
|
||||||
|
# log any sensitive data contained within a query.
|
||||||
|
# query-log-enabled = true
|
||||||
|
|
||||||
|
# Validates incoming writes to ensure keys only have valid unicode characters.
|
||||||
|
# This setting will incur a small overhead because every key must be checked.
|
||||||
|
# validate-keys = false
|
||||||
|
|
||||||
|
# Settings for the TSM engine
|
||||||
|
|
||||||
|
# CacheMaxMemorySize is the maximum size a shard's cache can
|
||||||
|
# reach before it starts rejecting writes.
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# cache-max-memory-size = "1g"
|
||||||
|
|
||||||
|
# CacheSnapshotMemorySize is the size at which the engine will
|
||||||
|
# snapshot the cache and write it to a TSM file, freeing up memory
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# cache-snapshot-memory-size = "25m"
|
||||||
|
|
||||||
|
# CacheSnapshotWriteColdDuration is the length of time at
|
||||||
|
# which the engine will snapshot the cache and write it to
|
||||||
|
# a new TSM file if the shard hasn't received writes or deletes
|
||||||
|
# cache-snapshot-write-cold-duration = "10m"
|
||||||
|
|
||||||
|
# CompactFullWriteColdDuration is the duration at which the engine
|
||||||
|
# will compact all TSM files in a shard if it hasn't received a
|
||||||
|
# write or delete
|
||||||
|
# compact-full-write-cold-duration = "4h"
|
||||||
|
|
||||||
|
# The maximum number of concurrent full and level compactions that can run at one time. A
|
||||||
|
# value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater
|
||||||
|
# than 0 limits compactions to that value. This setting does not apply
|
||||||
|
# to cache snapshotting.
|
||||||
|
# max-concurrent-compactions = 0
|
||||||
|
|
||||||
|
# CompactThroughput is the rate limit in bytes per second that we
|
||||||
|
# will allow TSM compactions to write to disk. Note that short bursts are allowed
|
||||||
|
# to happen at a possibly larger value, set by CompactThroughputBurst
|
||||||
|
# compact-throughput = "48m"
|
||||||
|
|
||||||
|
# CompactThroughputBurst is the rate limit in bytes per second that we
|
||||||
|
# will allow TSM compactions to write to disk.
|
||||||
|
# compact-throughput-burst = "48m"
|
||||||
|
|
||||||
|
# The threshold, in bytes, when an index write-ahead log file will compact
|
||||||
|
# into an index file. Lower sizes will cause log files to be compacted more
|
||||||
|
# quickly and result in lower heap usage at the expense of write throughput.
|
||||||
|
# Higher sizes will be compacted less frequently, store more series in-memory,
|
||||||
|
# and provide higher write throughput.
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# max-index-log-file-size = "1m"
|
||||||
|
|
||||||
|
# The maximum series allowed per database before writes are dropped. This limit can prevent
|
||||||
|
# high cardinality issues at the database level. This limit can be disabled by setting it to
|
||||||
|
# 0.
|
||||||
|
# max-series-per-database = 1000000
|
||||||
|
|
||||||
|
# The maximum number of tag values per tag that are allowed before writes are dropped. This limit
|
||||||
|
# can prevent high cardinality tag values from being written to a measurement. This limit can be
|
||||||
|
# disabled by setting it to 0.
|
||||||
|
# max-values-per-tag = 100000
|
||||||
|
|
||||||
|
# If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to
|
||||||
|
# TSM files. This setting has been found to be problematic on some kernels, and defaults to off.
|
||||||
|
# It might help users who have slow disks in some cases.
|
||||||
|
# tsm-use-madv-willneed = false
|
||||||
|
|
||||||
|
###
|
||||||
|
### [coordinator]
|
||||||
|
###
|
||||||
|
### Controls the clustering service configuration.
|
||||||
|
###
|
||||||
|
|
||||||
|
[coordinator]
|
||||||
|
# The default time a write request will wait until a "timeout" error is returned to the caller.
|
||||||
|
# write-timeout = "10s"
|
||||||
|
|
||||||
|
# The maximum number of concurrent queries allowed to be executing at one time. If a query is
|
||||||
|
# executed and exceeds this limit, an error is returned to the caller. This limit can be disabled
|
||||||
|
# by setting it to 0.
|
||||||
|
# max-concurrent-queries = 0
|
||||||
|
|
||||||
|
# The maximum time a query will is allowed to execute before being killed by the system. This limit
|
||||||
|
# can help prevent run away queries. Setting the value to 0 disables the limit.
|
||||||
|
# query-timeout = "0s"
|
||||||
|
|
||||||
|
# The time threshold when a query will be logged as a slow query. This limit can be set to help
|
||||||
|
# discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging.
|
||||||
|
# log-queries-after = "0s"
|
||||||
|
|
||||||
|
# The maximum number of points a SELECT can process. A value of 0 will make
|
||||||
|
# the maximum point count unlimited. This will only be checked every second so queries will not
|
||||||
|
# be aborted immediately when hitting the limit.
|
||||||
|
# max-select-point = 0
|
||||||
|
|
||||||
|
# The maximum number of series a SELECT can run. A value of 0 will make the maximum series
|
||||||
|
# count unlimited.
|
||||||
|
# max-select-series = 0
|
||||||
|
|
||||||
|
# The maxium number of group by time bucket a SELECT can create. A value of zero will max the maximum
|
||||||
|
# number of buckets unlimited.
|
||||||
|
# max-select-buckets = 0
|
||||||
|
|
||||||
|
###
|
||||||
|
### [retention]
|
||||||
|
###
|
||||||
|
### Controls the enforcement of retention policies for evicting old data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[retention]
|
||||||
|
# Determines whether retention policy enforcement enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The interval of time when retention policy enforcement checks run.
|
||||||
|
# check-interval = "30m"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [shard-precreation]
|
||||||
|
###
|
||||||
|
### Controls the precreation of shards, so they are available before data arrives.
|
||||||
|
### Only shards that, after creation, will have both a start- and end-time in the
|
||||||
|
### future, will ever be created. Shards are never precreated that would be wholly
|
||||||
|
### or partially in the past.
|
||||||
|
|
||||||
|
[shard-precreation]
|
||||||
|
# Determines whether shard pre-creation service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The interval of time when the check to pre-create new shards runs.
|
||||||
|
# check-interval = "10m"
|
||||||
|
|
||||||
|
# The default period ahead of the endtime of a shard group that its successor
|
||||||
|
# group is created.
|
||||||
|
# advance-period = "30m"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Controls the system self-monitoring, statistics and diagnostics.
|
||||||
|
###
|
||||||
|
### The internal database for monitoring data is created automatically if
|
||||||
|
### if it does not already exist. The target retention within this database
|
||||||
|
### is called 'monitor' and is also created with a retention period of 7 days
|
||||||
|
### and a replication factor of 1, if it does not exist. In all cases the
|
||||||
|
### this retention policy is configured as the default for the database.
|
||||||
|
|
||||||
|
[monitor]
|
||||||
|
# Whether to record statistics internally.
|
||||||
|
# store-enabled = true
|
||||||
|
|
||||||
|
# The destination database for recorded statistics
|
||||||
|
# store-database = "_internal"
|
||||||
|
|
||||||
|
# The interval at which to record statistics
|
||||||
|
# store-interval = "10s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [http]
|
||||||
|
###
|
||||||
|
### Controls how the HTTP endpoints are configured. These are the primary
|
||||||
|
### mechanism for getting data into and out of InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[http]
|
||||||
|
# Determines whether HTTP endpoint is enabled.
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
# The bind address used by the HTTP service.
|
||||||
|
bind-address = ":8086"
|
||||||
|
|
||||||
|
# Determines whether user authentication is enabled over HTTP/HTTPS.
|
||||||
|
# auth-enabled = false
|
||||||
|
|
||||||
|
# The default realm sent back when issuing a basic auth challenge.
|
||||||
|
# realm = "InfluxDB"
|
||||||
|
|
||||||
|
# Determines whether HTTP request logging is enabled.
|
||||||
|
# log-enabled = true
|
||||||
|
|
||||||
|
# Determines whether the HTTP write request logs should be suppressed when the log is enabled.
|
||||||
|
# suppress-write-log = false
|
||||||
|
|
||||||
|
# When HTTP request logging is enabled, this option specifies the path where
|
||||||
|
# log entries should be written. If unspecified, the default is to write to stderr, which
|
||||||
|
# intermingles HTTP logs with internal InfluxDB logging.
|
||||||
|
#
|
||||||
|
# If influxd is unable to access the specified path, it will log an error and fall back to writing
|
||||||
|
# the request log to stderr.
|
||||||
|
# access-log-path = ""
|
||||||
|
|
||||||
|
# Determines whether detailed write logging is enabled.
|
||||||
|
# write-tracing = false
|
||||||
|
|
||||||
|
# Determines whether the pprof endpoint is enabled. This endpoint is used for
|
||||||
|
# troubleshooting and monitoring.
|
||||||
|
# pprof-enabled = true
|
||||||
|
|
||||||
|
# Enables a pprof endpoint that binds to localhost:6060 immediately on startup.
|
||||||
|
# This is only needed to debug startup issues.
|
||||||
|
# debug-pprof-enabled = false
|
||||||
|
|
||||||
|
# Determines whether HTTPS is enabled.
|
||||||
|
# https-enabled = false
|
||||||
|
|
||||||
|
# The SSL certificate to use when HTTPS is enabled.
|
||||||
|
# https-certificate = "/etc/ssl/influxdb.pem"
|
||||||
|
|
||||||
|
# Use a separate private key location.
|
||||||
|
# https-private-key = ""
|
||||||
|
|
||||||
|
# The JWT auth shared secret to validate requests using JSON web tokens.
|
||||||
|
# shared-secret = ""
|
||||||
|
|
||||||
|
# The default chunk size for result sets that should be chunked.
|
||||||
|
# max-row-limit = 0
|
||||||
|
|
||||||
|
# The maximum number of HTTP connections that may be open at once. New connections that
|
||||||
|
# would exceed this limit are dropped. Setting this value to 0 disables the limit.
|
||||||
|
# max-connection-limit = 0
|
||||||
|
|
||||||
|
# Enable http service over unix domain socket
|
||||||
|
# unix-socket-enabled = false
|
||||||
|
|
||||||
|
# The path of the unix domain socket.
|
||||||
|
# bind-socket = "/var/run/influxdb.sock"
|
||||||
|
|
||||||
|
# The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit.
|
||||||
|
# max-body-size = 25000000
|
||||||
|
|
||||||
|
# The maximum number of writes processed concurrently.
|
||||||
|
# Setting this to 0 disables the limit.
|
||||||
|
# max-concurrent-write-limit = 0
|
||||||
|
|
||||||
|
# The maximum number of writes queued for processing.
|
||||||
|
# Setting this to 0 disables the limit.
|
||||||
|
# max-enqueued-write-limit = 0
|
||||||
|
|
||||||
|
# The maximum duration for a write to wait in the queue to be processed.
|
||||||
|
# Setting this to 0 or setting max-concurrent-write-limit to 0 disables the limit.
|
||||||
|
# enqueued-write-timeout = 0
|
||||||
|
|
||||||
|
|
||||||
|
###
|
||||||
|
### [ifql]
|
||||||
|
###
|
||||||
|
### Configures the ifql RPC API.
|
||||||
|
###
|
||||||
|
|
||||||
|
[ifql]
|
||||||
|
# Determines whether the RPC service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# Determines whether additional logging is enabled.
|
||||||
|
# log-enabled = true
|
||||||
|
|
||||||
|
# The bind address used by the ifql RPC service.
|
||||||
|
# bind-address = ":8082"
|
||||||
|
|
||||||
|
|
||||||
|
###
|
||||||
|
### [logging]
|
||||||
|
###
|
||||||
|
### Controls how the logger emits logs to the output.
|
||||||
|
###
|
||||||
|
|
||||||
|
[logging]
|
||||||
|
# Determines which log encoder to use for logs. Available options
|
||||||
|
# are auto, logfmt, and json. auto will use a more a more user-friendly
|
||||||
|
# output format if the output terminal is a TTY, but the format is not as
|
||||||
|
# easily machine-readable. When the output is a non-TTY, auto will use
|
||||||
|
# logfmt.
|
||||||
|
# format = "auto"
|
||||||
|
|
||||||
|
# Determines which level of logs will be emitted. The available levels
|
||||||
|
# are error, warn, info, and debug. Logs that are equal to or above the
|
||||||
|
# specified level will be emitted.
|
||||||
|
# level = "info"
|
||||||
|
|
||||||
|
# Suppresses the logo output that is printed when the program is started.
|
||||||
|
# The logo is always suppressed if STDOUT is not a TTY.
|
||||||
|
# suppress-logo = false
|
||||||
|
|
||||||
|
###
|
||||||
|
### [subscriber]
|
||||||
|
###
|
||||||
|
### Controls the subscriptions, which can be used to fork a copy of all data
|
||||||
|
### received by the InfluxDB host.
|
||||||
|
###
|
||||||
|
|
||||||
|
[subscriber]
|
||||||
|
# Determines whether the subscriber service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The default timeout for HTTP writes to subscribers.
|
||||||
|
# http-timeout = "30s"
|
||||||
|
|
||||||
|
# Allows insecure HTTPS connections to subscribers. This is useful when testing with self-
|
||||||
|
# signed certificates.
|
||||||
|
# insecure-skip-verify = false
|
||||||
|
|
||||||
|
# The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used
|
||||||
|
# ca-certs = ""
|
||||||
|
|
||||||
|
# The number of writer goroutines processing the write channel.
|
||||||
|
# write-concurrency = 40
|
||||||
|
|
||||||
|
# The number of in-flight writes buffered in the write channel.
|
||||||
|
# write-buffer-size = 1000
|
||||||
|
|
||||||
|
|
||||||
|
###
|
||||||
|
### [[graphite]]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for Graphite data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[graphite]]
|
||||||
|
# Determines whether the graphite endpoint is enabled.
|
||||||
|
# enabled = false
|
||||||
|
# database = "graphite"
|
||||||
|
# retention-policy = ""
|
||||||
|
# bind-address = ":2003"
|
||||||
|
# protocol = "tcp"
|
||||||
|
# consistency-level = "one"
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# udp-read-buffer = 0
|
||||||
|
|
||||||
|
### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
|
||||||
|
# separator = "."
|
||||||
|
|
||||||
|
### Default tags that will be added to all metrics. These can be overridden at the template level
|
||||||
|
### or by tags extracted from metric
|
||||||
|
# tags = ["region=us-east", "zone=1c"]
|
||||||
|
|
||||||
|
### Each template line requires a template pattern. It can have an optional
|
||||||
|
### filter before the template and separated by spaces. It can also have optional extra
|
||||||
|
### tags following the template. Multiple tags should be separated by commas and no spaces
|
||||||
|
### similar to the line protocol format. There can be only one default template.
|
||||||
|
# templates = [
|
||||||
|
# "*.app env.service.resource.measurement",
|
||||||
|
# # Default template
|
||||||
|
# "server.*",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
###
|
||||||
|
### [collectd]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for collectd data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[collectd]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":25826"
|
||||||
|
# database = "collectd"
|
||||||
|
# retention-policy = ""
|
||||||
|
#
|
||||||
|
# The collectd service supports either scanning a directory for multiple types
|
||||||
|
# db files, or specifying a single db file.
|
||||||
|
# typesdb = "/usr/local/share/collectd"
|
||||||
|
#
|
||||||
|
# security-level = "none"
|
||||||
|
# auth-file = "/etc/collectd/auth_file"
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "10s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# read-buffer = 0
|
||||||
|
|
||||||
|
# Multi-value plugins can be handled two ways.
|
||||||
|
# "split" will parse and store the multi-value plugin data into separate measurements
|
||||||
|
# "join" will parse and store the multi-value plugin as a single multi-value measurement.
|
||||||
|
# "split" is the default behavior for backward compatability with previous versions of influxdb.
|
||||||
|
# parse-multivalue-plugin = "split"
|
||||||
|
###
|
||||||
|
### [opentsdb]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for OpenTSDB data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[opentsdb]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":4242"
|
||||||
|
# database = "opentsdb"
|
||||||
|
# retention-policy = ""
|
||||||
|
# consistency-level = "one"
|
||||||
|
# tls-enabled = false
|
||||||
|
# certificate= "/etc/ssl/influxdb.pem"
|
||||||
|
|
||||||
|
# Log an error for every malformed point.
|
||||||
|
# log-point-errors = true
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Only points
|
||||||
|
# metrics received over the telnet protocol undergo batching.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 1000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 5
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [[udp]]
|
||||||
|
###
|
||||||
|
### Controls the listeners for InfluxDB line protocol data via UDP.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[udp]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":8089"
|
||||||
|
# database = "udp"
|
||||||
|
# retention-policy = ""
|
||||||
|
|
||||||
|
# InfluxDB precision for timestamps on received points ("" or "n", "u", "ms", "s", "m", "h")
|
||||||
|
# precision = ""
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Will flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# read-buffer = 0
|
||||||
|
|
||||||
|
###
|
||||||
|
### [continuous_queries]
|
||||||
|
###
|
||||||
|
### Controls how continuous queries are run within InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[continuous_queries]
|
||||||
|
# Determines whether the continuous query service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# Controls whether queries are logged when executed by the CQ service.
|
||||||
|
# log-enabled = true
|
||||||
|
|
||||||
|
# Controls whether queries are logged to the self-monitoring data store.
|
||||||
|
# query-stats-enabled = false
|
||||||
|
|
||||||
|
# interval for how often continuous queries will be checked if they need to run
|
||||||
|
# run-interval = "1s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [tls]
|
||||||
|
###
|
||||||
|
### Global configuration settings for TLS in InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[tls]
|
||||||
|
# Determines the available set of cipher suites. See https://golang.org/pkg/crypto/tls/#pkg-constants
|
||||||
|
# for a list of available ciphers, which depends on the version of Go (use the query
|
||||||
|
# SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not specified, uses
|
||||||
|
# the default settings from Go's crypto/tls package.
|
||||||
|
# ciphers = [
|
||||||
|
# "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||||
|
# "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
# Minimum version of the tls protocol that will be negotiated. If not specified, uses the
|
||||||
|
# default settings from Go's crypto/tls package.
|
||||||
|
# min-version = "tls1.2"
|
||||||
|
|
||||||
|
# Maximum version of the tls protocol that will be negotiated. If not specified, uses the
|
||||||
|
# default settings from Go's crypto/tls package.
|
||||||
|
# max-version = "tls1.2"
|
@ -159,13 +159,23 @@ function configure_cloudkitty {
|
|||||||
iniset $CLOUDKITTY_CONF authinfos project_domain_name default
|
iniset $CLOUDKITTY_CONF authinfos project_domain_name default
|
||||||
iniset $CLOUDKITTY_CONF authinfos debug "$ENABLE_DEBUG_LOG_LEVEL"
|
iniset $CLOUDKITTY_CONF authinfos debug "$ENABLE_DEBUG_LOG_LEVEL"
|
||||||
|
|
||||||
iniset $CLOUDKITTY_CONF fetcher_keystone auth_section authinfos
|
iniset $CLOUDKITTY_CONF fetcher backend $CLOUDKITTY_FETCHER
|
||||||
iniset $CLOUDKITTY_CONF fetcher_keystone keystone_version 3
|
iniset $CLOUDKITTY_CONF "fetcher_$CLOUDKITTY_FETCHER" auth_section authinfos
|
||||||
|
if [[ "$CLOUDKITTY_FETCHER" == "keystone" ]]; then
|
||||||
|
iniset $CLOUDKITTY_CONF fetcher_keystone keystone_version 3
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$CLOUDKITTY_STORAGE_BACKEND" == "influxdb" ]; then
|
||||||
|
iniset $CLOUDKITTY_CONF storage_${CLOUDKITTY_STORAGE_BACKEND} user ${CLOUDKITTY_INFLUXDB_USER}
|
||||||
|
iniset $CLOUDKITTY_CONF storage_${CLOUDKITTY_STORAGE_BACKEND} password ${CLOUDKITTY_INFLUXDB_PASSWORD}
|
||||||
|
iniset $CLOUDKITTY_CONF storage_${CLOUDKITTY_STORAGE_BACKEND} database ${CLOUDKITTY_INFLUXDB_DATABASE}
|
||||||
|
iniset $CLOUDKITTY_CONF storage_${CLOUDKITTY_STORAGE_BACKEND} host ${CLOUDKITTY_INFLUXDB_HOST}
|
||||||
|
iniset $CLOUDKITTY_CONF storage_${CLOUDKITTY_STORAGE_BACKEND} port ${CLOUDKITTY_INFLUXDB_PORT}
|
||||||
|
fi
|
||||||
|
|
||||||
# collect
|
# collect
|
||||||
iniset $CLOUDKITTY_CONF collect collector $CLOUDKITTY_COLLECTOR
|
iniset $CLOUDKITTY_CONF collect collector $CLOUDKITTY_COLLECTOR
|
||||||
iniset $CLOUDKITTY_CONF ${CLOUDKITTY_COLLECTOR}_collector auth_section authinfos
|
iniset $CLOUDKITTY_CONF "collector_${CLOUDKITTY_COLLECTOR}" auth_section authinfos
|
||||||
iniset $CLOUDKITTY_CONF collect services $CLOUDKITTY_SERVICES
|
|
||||||
iniset $CLOUDKITTY_CONF collect metrics_conf $CLOUDKITTY_CONF_DIR/$CLOUDKITTY_METRICS_CONF
|
iniset $CLOUDKITTY_CONF collect metrics_conf $CLOUDKITTY_CONF_DIR/$CLOUDKITTY_METRICS_CONF
|
||||||
|
|
||||||
# output
|
# output
|
||||||
@ -176,9 +186,6 @@ function configure_cloudkitty {
|
|||||||
# storage
|
# storage
|
||||||
iniset $CLOUDKITTY_CONF storage backend $CLOUDKITTY_STORAGE_BACKEND
|
iniset $CLOUDKITTY_CONF storage backend $CLOUDKITTY_STORAGE_BACKEND
|
||||||
iniset $CLOUDKITTY_CONF storage version $CLOUDKITTY_STORAGE_VERSION
|
iniset $CLOUDKITTY_CONF storage version $CLOUDKITTY_STORAGE_VERSION
|
||||||
if [ "$CLOUDKITTY_STORAGE_BACKEND" != "sqlalchemy" ]; then
|
|
||||||
iniset $CLOUDKITTY_CONF storage_${CLOUDKITTY_STORAGE_BACKEND} auth_section authinfos
|
|
||||||
fi
|
|
||||||
|
|
||||||
# database
|
# database
|
||||||
local dburl=`database_connection_url cloudkitty`
|
local dburl=`database_connection_url cloudkitty`
|
||||||
@ -221,6 +228,12 @@ function create_cloudkitty_data_dir {
|
|||||||
sudo chown $STACK_USER $CLOUDKITTY_DATA_DIR/locks
|
sudo chown $STACK_USER $CLOUDKITTY_DATA_DIR/locks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function create_influxdb_database {
|
||||||
|
if [ "$CLOUDKITTY_STORAGE_BACKEND" == "influxdb" ]; then
|
||||||
|
influx -execute "CREATE DATABASE ${CLOUDKITTY_INFLUXDB_DATABASE}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# init_cloudkitty() - Initialize CloudKitty database
|
# init_cloudkitty() - Initialize CloudKitty database
|
||||||
function init_cloudkitty {
|
function init_cloudkitty {
|
||||||
# Delete existing cache
|
# Delete existing cache
|
||||||
@ -236,6 +249,8 @@ function init_cloudkitty {
|
|||||||
# (Re)create cloudkitty database
|
# (Re)create cloudkitty database
|
||||||
recreate_database cloudkitty utf8
|
recreate_database cloudkitty utf8
|
||||||
|
|
||||||
|
create_influxdb_database
|
||||||
|
|
||||||
# Migrate cloudkitty database
|
# Migrate cloudkitty database
|
||||||
$CLOUDKITTY_BIN_DIR/cloudkitty-dbsync upgrade
|
$CLOUDKITTY_BIN_DIR/cloudkitty-dbsync upgrade
|
||||||
|
|
||||||
@ -249,10 +264,36 @@ function init_cloudkitty {
|
|||||||
create_cloudkitty_data_dir
|
create_cloudkitty_data_dir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function install_influx_ubuntu {
|
||||||
|
local influxdb_file=$(get_extra_file https://dl.influxdata.com/influxdb/releases/influxdb_1.6.3_amd64.deb)
|
||||||
|
sudo dpkg -i --skip-same-version ${influxdb_file}
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_influx_fedora {
|
||||||
|
local influxdb_file=$(get_extra_file https://dl.influxdata.com/influxdb/releases/influxdb-1.6.3.x86_64.rpm)
|
||||||
|
sudo yum localinstall -y ${influxdb_file}
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_influx {
|
||||||
|
if is_ubuntu; then
|
||||||
|
install_influx_ubuntu
|
||||||
|
elif is_fedora; then
|
||||||
|
install_influx_fedora
|
||||||
|
else
|
||||||
|
die $LINENO "Distribution must be Debian or Fedora-based"
|
||||||
|
fi
|
||||||
|
sudo cp -f "${CLOUDKITTY_DIR}"/devstack/files/influxdb.conf /etc/influxdb/influxdb.conf
|
||||||
|
sudo systemctl start influxdb || sudo systemctl restart influxdb
|
||||||
|
}
|
||||||
|
|
||||||
# install_cloudkitty() - Collect source and prepare
|
# install_cloudkitty() - Collect source and prepare
|
||||||
function install_cloudkitty {
|
function install_cloudkitty {
|
||||||
git_clone $CLOUDKITTY_REPO $CLOUDKITTY_DIR $CLOUDKITTY_BRANCH
|
git_clone $CLOUDKITTY_REPO $CLOUDKITTY_DIR $CLOUDKITTY_BRANCH
|
||||||
setup_develop $CLOUDKITTY_DIR
|
setup_develop $CLOUDKITTY_DIR
|
||||||
|
|
||||||
|
if [ $CLOUDKITTY_STORAGE_BACKEND == 'influxdb' ]; then
|
||||||
|
install_influx
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# start_cloudkitty() - Start running processes, including screen
|
# start_cloudkitty() - Start running processes, including screen
|
||||||
|
@ -40,14 +40,16 @@ CLOUDKITTY_PRICING_USER=${CLOUDKITTY_PRICING_USER:-"admin"}
|
|||||||
CLOUDKITTY_PRICING_PASSWORD=${CLOUDKITTY_PRICING_PASSWORD:-$ADMIN_PASSWORD}
|
CLOUDKITTY_PRICING_PASSWORD=${CLOUDKITTY_PRICING_PASSWORD:-$ADMIN_PASSWORD}
|
||||||
CLOUDKITTY_PRICING_TENANT=${CLOUDKITTY_PRICING_TENANT:-"demo"}
|
CLOUDKITTY_PRICING_TENANT=${CLOUDKITTY_PRICING_TENANT:-"demo"}
|
||||||
|
|
||||||
|
# Set CloudKitty fetcher info
|
||||||
|
CLOUDKITTY_FETCHER=${CLOUDKITTY_FETCHER:-gnocchi}
|
||||||
|
|
||||||
# Set CloudKitty collect info
|
# Set CloudKitty collect info
|
||||||
CLOUDKITTY_COLLECTOR=${CLOUDKITTY_COLLECTOR:-gnocchi}
|
CLOUDKITTY_COLLECTOR=${CLOUDKITTY_COLLECTOR:-gnocchi}
|
||||||
CLOUDKITTY_SERVICES=${CLOUDKITTY_SERVICES:-compute}
|
|
||||||
CLOUDKITTY_METRICS_CONF=metrics.yml
|
CLOUDKITTY_METRICS_CONF=metrics.yml
|
||||||
|
|
||||||
# Set CloudKitty storage info
|
# Set CloudKitty storage info
|
||||||
CLOUDKITTY_STORAGE_BACKEND=${CLOUDKITTY_STORAGE_BACKEND:-"sqlalchemy"}
|
CLOUDKITTY_STORAGE_BACKEND=${CLOUDKITTY_STORAGE_BACKEND:-"influxdb"}
|
||||||
CLOUDKITTY_STORAGE_VERSION=${CLOUDKITTY_STORAGE_VERSION:-"1"}
|
CLOUDKITTY_STORAGE_VERSION=${CLOUDKITTY_STORAGE_VERSION:-"2"}
|
||||||
|
|
||||||
# Set CloudKitty output info
|
# Set CloudKitty output info
|
||||||
CLOUDKITTY_OUTPUT_BACKEND=${CLOUDKITTY_OUTPUT_BACKEND:-"cloudkitty.backend.file.FileBackend"}
|
CLOUDKITTY_OUTPUT_BACKEND=${CLOUDKITTY_OUTPUT_BACKEND:-"cloudkitty.backend.file.FileBackend"}
|
||||||
@ -63,3 +65,10 @@ GITBRANCH["python-cloudkittyclient"]=${CLOUDKITTYCLIENT_BRANCH:-master}
|
|||||||
GITREPO["cloudkitty-dashboard"]=${CLOUDKITTYDASHBOARD_REPO:-${GIT_BASE}/openstack/cloudkitty-dashboard.git}
|
GITREPO["cloudkitty-dashboard"]=${CLOUDKITTYDASHBOARD_REPO:-${GIT_BASE}/openstack/cloudkitty-dashboard.git}
|
||||||
GITDIR["cloudkitty-dashboard"]=$DEST/cloudkitty-dashboard
|
GITDIR["cloudkitty-dashboard"]=$DEST/cloudkitty-dashboard
|
||||||
GITBRANCH["cloudkitty-dashboard"]=${CLOUDKITTYDASHBOARD_BRANCH:-master}
|
GITBRANCH["cloudkitty-dashboard"]=${CLOUDKITTYDASHBOARD_BRANCH:-master}
|
||||||
|
|
||||||
|
# Set influxdb info
|
||||||
|
CLOUDKITTY_INFLUXDB_USER=${CLOUDKITTY_INFLUXDB_USER:-cloudkitty}
|
||||||
|
CLOUDKITTY_INFLUXDB_PASSWORD=${CLOUDKITTY_INFLUXDB_PASSWORD:-cloudkitty}
|
||||||
|
CLOUDKITTY_INFLUXDB_HOST=${CLOUDKITTY_INFLUXDB_HOST:-"localhost"}
|
||||||
|
CLOUDKITTY_INFLUXDB_PORT=${CLOUDKITTY_INFLUXDB_PORT:-"8086"}
|
||||||
|
CLOUDKITTY_INFLUXDB_DATABASE=${CLOUDKITTY_INFLUXDB_DATABASE:-"cloudkitty"}
|
||||||
|
25
doc/source/admin/devstack.rst
Normal file
25
doc/source/admin/devstack.rst
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
DevStack installation
|
||||||
|
=====================
|
||||||
|
|
||||||
|
Add the following lines in your ``local.conf`` file to enable CloudKitty,
|
||||||
|
Ceilometer and Gnocchi. By default, the fetcher will be ``gnocchi``
|
||||||
|
(configurable via the ``CLOUDKITTY_FETCHER`` variable), the collector will be
|
||||||
|
``gnocchi`` (configurable via the ``CLOUDKITTY_GNOCCHI`` variable), and the
|
||||||
|
storage backend will be ``influxdb`` (configurable via the
|
||||||
|
``CLOUDKITTY_STORAGE_BACKEND`` and ``CLOUDKITTY_STORAGE_VERSION`` variables).
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[[local|localrc]]
|
||||||
|
# ceilometer
|
||||||
|
enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git master
|
||||||
|
|
||||||
|
# cloudkitty
|
||||||
|
enable_plugin cloudkitty https://git.openstack.org/openstack/cloudkitty.git master
|
||||||
|
enable_service ck-api,ck-proc
|
||||||
|
|
||||||
|
Then start devstack:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
./stack.sh
|
@ -6,7 +6,7 @@ Administration Guide
|
|||||||
:glob:
|
:glob:
|
||||||
|
|
||||||
architecture
|
architecture
|
||||||
quick_deployment/index
|
devstack
|
||||||
install/index
|
install/index
|
||||||
configuration/index
|
configuration/index
|
||||||
cli/index
|
cli/index
|
||||||
|
@ -1,18 +0,0 @@
|
|||||||
DevStack installation
|
|
||||||
=====================
|
|
||||||
|
|
||||||
Add the following lines in your ``local.conf`` file to enable CloudKitty with
|
|
||||||
gnocchi collector::
|
|
||||||
|
|
||||||
[[local|localrc]]
|
|
||||||
# ceilometer
|
|
||||||
enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git master
|
|
||||||
|
|
||||||
# cloudkitty
|
|
||||||
enable_plugin cloudkitty https://git.openstack.org/openstack/cloudkitty.git master
|
|
||||||
enable_service ck-api,ck-proc
|
|
||||||
CLOUDKITTY_COLLECTOR=gnocchi
|
|
||||||
|
|
||||||
Then start devstack::
|
|
||||||
|
|
||||||
./stack.sh
|
|
@ -1,6 +0,0 @@
|
|||||||
Quick Deployment
|
|
||||||
================
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
|
|
||||||
devstack
|
|
@ -11,7 +11,7 @@ metrics:
|
|||||||
- vcpus
|
- vcpus
|
||||||
mutate: NUMBOOL
|
mutate: NUMBOOL
|
||||||
extra_args:
|
extra_args:
|
||||||
aggregation_method: max
|
aggregation_method: mean
|
||||||
resource_type: instance
|
resource_type: instance
|
||||||
|
|
||||||
image.size:
|
image.size:
|
||||||
@ -24,7 +24,7 @@ metrics:
|
|||||||
- container_format
|
- container_format
|
||||||
- disk_format
|
- disk_format
|
||||||
extra_args:
|
extra_args:
|
||||||
aggregation_method: max
|
aggregation_method: mean
|
||||||
resource_type: image
|
resource_type: image
|
||||||
|
|
||||||
volume.size:
|
volume.size:
|
||||||
@ -35,7 +35,7 @@ metrics:
|
|||||||
metadata:
|
metadata:
|
||||||
- volume_type
|
- volume_type
|
||||||
extra_args:
|
extra_args:
|
||||||
aggregation_method: max
|
aggregation_method: mean
|
||||||
resource_type: volume
|
resource_type: volume
|
||||||
|
|
||||||
network.outgoing.bytes:
|
network.outgoing.bytes:
|
||||||
@ -47,7 +47,7 @@ metrics:
|
|||||||
metadata:
|
metadata:
|
||||||
- instance_id
|
- instance_id
|
||||||
extra_args:
|
extra_args:
|
||||||
aggregation_method: max
|
aggregation_method: mean
|
||||||
resource_type: instance_network_interface
|
resource_type: instance_network_interface
|
||||||
|
|
||||||
network.incoming.bytes:
|
network.incoming.bytes:
|
||||||
@ -59,7 +59,7 @@ metrics:
|
|||||||
metadata:
|
metadata:
|
||||||
- instance_id
|
- instance_id
|
||||||
extra_args:
|
extra_args:
|
||||||
aggregation_method: max
|
aggregation_method: mean
|
||||||
resource_type: instance_network_interface
|
resource_type: instance_network_interface
|
||||||
|
|
||||||
ip.floating:
|
ip.floating:
|
||||||
@ -71,7 +71,7 @@ metrics:
|
|||||||
- state
|
- state
|
||||||
mutate: NUMBOOL
|
mutate: NUMBOOL
|
||||||
extra_args:
|
extra_args:
|
||||||
aggregation_method: max
|
aggregation_method: mean
|
||||||
resource_type: network
|
resource_type: network
|
||||||
|
|
||||||
radosgw.objects.size:
|
radosgw.objects.size:
|
||||||
@ -81,5 +81,5 @@ metrics:
|
|||||||
- project_id
|
- project_id
|
||||||
factor: 1/1073741824
|
factor: 1/1073741824
|
||||||
extra_args:
|
extra_args:
|
||||||
aggregation_method: max
|
aggregation_method: mean
|
||||||
resource_type: ceph_account
|
resource_type: ceph_account
|
||||||
|
Loading…
Reference in New Issue
Block a user