Browse Source

Sample tacker.conf generator

It also make required keystone config changes as part
of devstack plugin, which helps the functional test cases
to successfully complete and it makes inline with existing
tacker conf.

Closes-bug: #1592247
Change-Id: I545ceb2c9e61c22dbd8a0e1ee39a6e4fa24f8e88
changes/61/327661/15
Kanagaraj Manickam 5 years ago
parent
commit
e575760d3c
  1. 2
      .gitignore
  2. 17
      devstack/lib/tacker
  3. 4
      doc/source/install/manual_installation.rst
  4. 15
      etc/config-generator.conf
  5. 9
      etc/tacker/README.txt
  6. 422
      etc/tacker/tacker.conf
  7. 8
      releasenotes/notes/sample-config-file-f653eac0183bfa92.yaml
  8. 14
      setup.cfg
  9. 6
      tacker/common/config.py
  10. 4
      tacker/nfvo/drivers/vim/openstack_driver.py
  11. 4
      tacker/nfvo/nfvo_plugin.py
  12. 5
      tacker/service.py
  13. 5
      tacker/vm/infra_drivers/heat/heat.py
  14. 4
      tacker/vm/mgmt_drivers/openwrt/openwrt.py
  15. 4
      tacker/vm/monitor.py
  16. 4
      tacker/vm/monitor_drivers/http_ping/http_ping.py
  17. 4
      tacker/vm/monitor_drivers/ping/ping.py
  18. 5
      tacker/vm/plugin.py
  19. 4
      tacker/vm/vim_client.py
  20. 4
      tacker/wsgi.py
  21. 26
      tools/generate_config_file_sample.sh
  22. 4
      tox.ini

2
.gitignore

@ -25,5 +25,5 @@ subunit.log
*.sw?
*~
# Files created by releasenotes build
releasenotes/build
etc/tacker/tacker.conf.sample

17
devstack/lib/tacker

@ -192,12 +192,17 @@ function configure_tacker {
iniset "/$Q_PLUGIN_CONF_FILE" ml2 extension_drivers port_security
iniset "/$Q_PLUGIN_CONF_FILE" ml2_type_flat flat_networks $PUBLIC_PHYSICAL_NETWORK,$MGMT_PHYS_NET
iniset "/$Q_PLUGIN_CONF_FILE" ovs bridge_mappings $PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE,$MGMT_PHYS_NET:$BR_MGMT
_create_tacker_conf_dir
iniset_rpc_backend tacker $TACKER_CONF
cp $TACKER_DIR/etc/tacker/tacker.conf $TACKER_CONF
cd $TACKER_DIR
./tools/generate_config_file_sample.sh
cd -
cp $TACKER_DIR/etc/tacker/tacker.conf.sample $TACKER_CONF
iniset_rpc_backend tacker $TACKER_CONF
# If# - needed, move config file from ``$TACKER_DIR/etc/tacker`` to ``TACKER_CONF_DIR``
iniset $TACKER_CONF database connection `database_connection_url $TACKER_DB_NAME`
iniset $TACKER_CONF DEFAULT state_path $DATA_DIR/tacker
iniset $TACKER_CONF DEFAULT use_syslog $SYSLOG
@ -261,6 +266,12 @@ function configure_tacker {
iniset $TACKER_CONF tacker_heat heat_uri http://$SERVICE_HOST:8004/v1
iniset $TACKER_CONF tacker_heat stack_retries 60
iniset $TACKER_CONF tacker_heat stack_retry_wait 5
iniset $TACKER_CONF keystone_authtoken identity_uri http://127.0.0.1:5000
iniset $TACKER_CONF keystone_authtoken project_domain_id default
iniset $TACKER_CONF keystone_authtoken user_domain_id default
iniset $TACKER_CONF keystone_authtoken auth_plugin password
iniset $TACKER_CONF nfvo_vim default_vim VIM0
_tacker_setup_rootwrap
echo "Creating bridge"

4
doc/source/install/manual_installation.rst

@ -169,7 +169,9 @@ d). Provide an endpoint to tacker service.
..
7). Edit tacker.conf to ensure the below entries:
7). Generate the tacker.conf.sample using tools/generate_config_file_sample.sh
or 'tox -e config-gen' command and rename it to tacker.conf. Then edit it
to ensure the below entries:
.. note::

15
etc/config-generator.conf

@ -0,0 +1,15 @@
[DEFAULT]
output_file = etc/tacker/tacker.conf.sample
wrap_width = 79
namespace = tacker.common.config
namespace = tacker.wsgi
namespace = tacker.service
namespace = tacker.nfvo.nfvo_plugin
namespace = tacker.nfvo.drivers.vim.openstack_driver
namespace = tacker.vm.monitor
namespace = tacker.vm.plugin
namespace = tacker.vm.vim_client
namespace = tacker.vm.infra_drivers.heat.heat
namespace = tacker.vm.mgmt_drivers.openwrt.openwrt
namespace = tacker.vm.monitor_drivers.http_ping.http_ping
namespace = tacker.vm.monitor_drivers.ping.ping

9
etc/tacker/README.txt

@ -0,0 +1,9 @@
To generate the sample tacker configuration files, run the following
command from the top level of the neutron directory:
tox -e config-gen
If a 'tox' environment is unavailable, then you can run the following script
instead to generate the configuration files:
./tools/generate_config_file_sample.sh

422
etc/tacker/tacker.conf

@ -1,422 +0,0 @@
[DEFAULT]
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Tacker state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/tacker
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9890
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of tacker.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Tacker core plugin entrypoint to be loaded from the
# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the tacker source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
# core_plugin =
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the tacker source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = vnfm,nfvo
# Paste configuration file
# api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Tacker is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = tacker.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = tacker
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# IP address of the RabbitMQ installation
# rabbit_host = localhost
# Password of the RabbitMQ server
# rabbit_password = guest
# Port where RabbitMQ server is running/listening
# rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
# rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=tacker.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=tacker.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = tacker.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = tacker.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = tacker.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
# default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
# notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# =========== end of items for agent management extension =====
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# tacker server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to tacker server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
# api_workers = 0
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== tacker nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of tacker nova interactions ==========
[agent]
# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
# root_helper = sudo
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
signing_dir = /var/cache/tacker
cafile = /opt/stack/data/ca-bundle.pem
project_domain_id = default
project_name = service
user_domain_id = default
password = service-password
username = tacker
auth_url = http://127.0.0.1:35357
auth_plugin = password
identity_uri = http://127.0.0.1:5000
auth_uri = http://127.0.0.1:5000
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/tacker
# Replace 127.0.0.1 above with the IP address of the database used by the
# main tacker server. (Leave it as is if the database runs on this host.)
# connection = sqlite://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[tacker]
# Specify drivers for hosting device
# infra_driver nova deprecated for Mitaka. Will be removed in Newton cycle.
# infra_driver = heat,nova,noop
# Specify drivers for mgmt
# mgmt_driver = noop,openwrt
# Specify drivers for monitoring
# monitor_driver = ping, http_ping
[nfvo_vim]
# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
#Default VIM driver is OpenStack
#vim_drivers = openstack
#Default VIM placement if vim id is not provided
default_vim = VIM0
[vim_keys]
#openstack = /etc/tacker/vim/fernet_keys
#Deprecated for Mitaka. Will be removed in Newton cycle.
[tacker_nova]
# parameters for novaclient to talk to nova
region_name = RegionOne
project_domain_id = default
project_name = service
user_domain_id = default
password = service-password
username = nova
auth_url = http://127.0.0.1:35357
auth_plugin = password
[tacker_heat]
heat_uri = http://localhost:8004/v1
stack_retries = 60
stack_retry_wait = 5

8
releasenotes/notes/sample-config-file-f653eac0183bfa92.yaml

@ -0,0 +1,8 @@
---
prelude: >
Core configuration file is automatically generated.
features:
- Tacker no longer includes static example configuration file. Instead,
use tools/generate_config_file_sample.sh to generate it. The file is
generated with a .sample extension under etc/tacker directory.

14
setup.cfg

@ -57,6 +57,20 @@ tacker.tacker.mgmt.drivers =
tacker.tacker.monitor.drivers =
ping = tacker.vm.monitor_drivers.ping.ping:VNFMonitorPing
http_ping = tacker.vm.monitor_drivers.http_ping.http_ping:VNFMonitorHTTPPing
oslo.config.opts =
tacker.common.config = tacker.common.config:config_opts
tacker.wsgi = tacker.wsgi:config_opts
tacker.service = tacker.service:config_opts
tacker.nfvo.nfvo_plugin = tacker.nfvo.nfvo_plugin:config_opts
tacker.nfvo.drivers.vim.openstack_driver = tacker.nfvo.drivers.vim.openstack_driver:config_opts
tacker.vm.monitor = tacker.vm.monitor:config_opts
tacker.vm.plugin = tacker.vm.plugin:config_opts
tacker.vm.vim_client = tacker.vm.vim_client:config_opts
tacker.vm.infra_drivers.heat.heat= tacker.vm.infra_drivers.heat.heat:config_opts
tacker.vm.mgmt_drivers.openwrt.openwrt = tacker.vm.mgmt_drivers.openwrt.openwrt:config_opts
tacker.vm.monitor_drivers.http_ping.http_ping = tacker.vm.monitor_drivers.http_ping.http_ping:config_opts
tacker.vm.monitor_drivers.ping.ping = tacker.vm.monitor_drivers.ping.ping:config_opts
[build_sphinx]

6
tacker/common/config.py

@ -40,7 +40,7 @@ core_opts = [
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.ListOpt('service_plugins', default=[],
cfg.ListOpt('service_plugins', default=['nfvo', 'vnfm'],
help=_("The service plugins Tacker will use")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
@ -93,6 +93,10 @@ logging.register_options(cfg.CONF)
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
def config_opts():
return [(None, core_opts), (None, core_cli_opts)]
# Ensure that the control exchange is set correctly
oslo_messaging.set_transport_defaults(control_exchange='tacker')

4
tacker/nfvo/drivers/vim/openstack_driver.py

@ -47,6 +47,10 @@ cfg.CONF.register_opts(OPTS, 'vim_keys')
cfg.CONF.register_opts(OPENSTACK_OPTS, 'vim_monitor')
def config_opts():
return [('vim_keys', OPTS), ('vim_monitor', OPENSTACK_OPTS)]
class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver):
"""Driver for OpenStack VIM

4
tacker/nfvo/nfvo_plugin.py

@ -31,6 +31,10 @@ from tacker.db.nfvo import nfvo_db
LOG = logging.getLogger(__name__)
def config_opts():
return [('nfvo', NfvoPlugin.OPTS)]
class NfvoPlugin(nfvo_db.NfvoPluginDb):
"""NFVO reference plugin for NFVO extension

5
tacker/service.py

@ -40,6 +40,11 @@ service_opts = [
CONF = cfg.CONF
CONF.register_opts(service_opts)
def config_opts():
return [(None, service_opts)]
LOG = logging.getLogger(__name__)

5
tacker/vm/infra_drivers/heat/heat.py

@ -49,6 +49,11 @@ OPTS = [
help=_("Flavor Extra Specs")),
]
CONF.register_opts(OPTS, group='tacker_heat')
def config_opts():
return [('tacker_heat', OPTS)]
STACK_RETRIES = cfg.CONF.tacker_heat.stack_retries
STACK_RETRY_WAIT = cfg.CONF.tacker_heat.stack_retry_wait
STACK_FLAVOR_EXTRA = cfg.CONF.tacker_heat.flavor_extra_specs

4
tacker/vm/mgmt_drivers/openwrt/openwrt.py

@ -34,6 +34,10 @@ OPTS = [
cfg.CONF.register_opts(OPTS, 'openwrt')
def config_opts():
return [('openwrt', OPTS)]
class DeviceMgmtOpenWRT(abstract_driver.DeviceMGMTAbstractDriver):
def get_type(self):
return 'openwrt'

4
tacker/vm/monitor.py

@ -41,6 +41,10 @@ OPTS = [
CONF.register_opts(OPTS, group='monitor')
def config_opts():
return [('monitor', OPTS), ('tacker', VNFMonitor.OPTS)]
class VNFMonitor(object):
"""VNF Monitor."""

4
tacker/vm/monitor_drivers/http_ping/http_ping.py

@ -34,6 +34,10 @@ OPTS = [
cfg.CONF.register_opts(OPTS, 'monitor_http_ping')
def config_opts():
return [('monitor_http_ping', OPTS)]
class VNFMonitorHTTPPing(abstract_driver.VNFMonitorAbstractDriver):
def get_type(self):
return 'http_ping'

4
tacker/vm/monitor_drivers/ping/ping.py

@ -33,6 +33,10 @@ OPTS = [
cfg.CONF.register_opts(OPTS, 'monitor_ping')
def config_opts():
return [('monitor_ping', OPTS)]
class VNFMonitorPing(abstract_driver.VNFMonitorAbstractDriver):
def get_type(self):
return 'ping'

5
tacker/vm/plugin.py

@ -39,6 +39,11 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def config_opts():
return [('tacker', VNFMMgmtMixin.OPTS),
('tacker', VNFMPlugin.OPTS)]
class VNFMMgmtMixin(object):
OPTS = [
cfg.ListOpt(

4
tacker/vm/vim_client.py

@ -33,6 +33,10 @@ OPTS = [
cfg.CONF.register_opts(OPTS, 'nfvo_vim')
def config_opts():
return [('nfvo_vim', OPTS)]
class VimClient(object):
def get_vim(self, context, vim_id=None, region_name=None):
"""Get Vim information for provided VIM id

4
tacker/wsgi.py

@ -79,6 +79,10 @@ socket_opts = [
CONF = cfg.CONF
CONF.register_opts(socket_opts)
def config_opts():
return [(None, socket_opts)]
LOG = logging.getLogger(__name__)

26
tools/generate_config_file_sample.sh

@ -0,0 +1,26 @@
#!/bin/sh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
GEN_CMD=oslo-config-generator
if ! type "$GEN_CMD" > /dev/null; then
echo "ERROR: $GEN_CMD not installed on the system."
exit 1
fi
$GEN_CMD --config-file=etc/config-generator.conf
set -x

4
tox.ini

@ -95,3 +95,7 @@ exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,to
[hacking]
import_exceptions = tacker._i18n
local-check-factory = tacker.hacking.checks.factory
[testenv:config-gen]
commands =
oslo-config-generator --config-file=etc/config-generator.conf

Loading…
Cancel
Save