[ADMIN_API] Migrate to oslo.config

Major configuration change. See examples in etc/ directory.

Change-Id: I4360529cc50edcb0cbcd4261373e57cb09ea7d6f
This commit is contained in:
David Shrewsbury
2013-10-18 14:19:19 -04:00
parent 2a28c63ea9
commit 6e601de80d
16 changed files with 423 additions and 751 deletions

View File

@@ -1,72 +0,0 @@
########################################################################
# A sample configuration file read by the Libra pool manager utility.
########################################################################
#-----------------------------------------------------------------------
# The [DEFAULT] section contains options common to the various Libra
# utilities (worker, mgm, etc).
#-----------------------------------------------------------------------
[DEFAULT]
# Options to enable more verbose output
#verbose = false
#debug = false
# Daemon process options
#daemon = true
#user = libra
#group = libra
# Other logging options
#syslog = false
#syslog_socket = /dev/log
#syslog_faciltiy = local7
#logstash = HOST:PORT
#-----------------------------------------------------------------------
# Options for utilities that are Gearman workers or clients.
#-----------------------------------------------------------------------
[gearman]
#servers = localhost:4730, HOST:PORT
#keepalive = false
#keepcnt = COUNT
#keepidle = SECONDS
#keepintvl = SECONDS
#poll = 1
#reconnect_sleep = 60
#ssl_ca = /path/to/ssl_ca
#ssl_cert = /path/to/ssl_cert
#ssl_key = /path/to/ssl_key
#-----------------------------------------------------------------------
# The [api] section is specific to the libra_api utility.
#-----------------------------------------------------------------------
[api]
# Options with defaults
#disable_keystone=False
#host=0.0.0.0
#port=443
#keystone_module=keystoneclient.middleware.auth_token:AuthProtocol
#logfile=/var/log/libra/libra_api.log
#pid=/var/run/libra/libra_api.pid
# Required options
db_sections=mysql1
swift_basepath=lbaaslogs
swift_endpoint=https://host.com:443/v1/
# Others
ssl_certfile=certfile.crt
ssl_keyfile=keyfile.key
ip_filters=192.168.0.0/24
[mysql1]
username=root
password=
schema=lbaas
host=localhost
# Keystone options go here
[keystone]

174
etc/libra.cfg Normal file
View File

@@ -0,0 +1,174 @@
########################################################################
# A sample configuration file read by the Libra utilities.
########################################################################
#-----------------------------------------------------------------------
# The [DEFAULT] section contains options common to the various Libra
# utilities (worker, mgm, etc).
#-----------------------------------------------------------------------
[DEFAULT]
# Options to enable more verbose output
#verbose = false
#debug = false
# Daemon process options
#daemon = true
#user = libra
#group = libra
# Other logging options
#syslog = false
#syslog_socket = /dev/log
#syslog_faciltiy = local7
#logstash = HOST:PORT
#-----------------------------------------------------------------------
# Options for utilities that are Gearman workers or clients.
#-----------------------------------------------------------------------
[gearman]
#servers = localhost:4730, HOST:PORT
#keepalive = false
#keepcnt = COUNT
#keepidle = SECONDS
#keepintvl = SECONDS
#poll = 1
#reconnect_sleep = 60
#ssl_ca = /path/to/ssl_ca
#ssl_cert = /path/to/ssl_cert
#ssl_key = /path/to/ssl_key
#-----------------------------------------------------------------------
# [worker] and [worker:*] sections are specific to the Libra worker.
#-----------------------------------------------------------------------
[worker]
#driver = haproxy
#pid = /var/run/libra/libra_worker.pid
#logfile = /var/log/libra/libra_worker.log
# HAProxy driver options for the worker
[worker:haproxy]
#service = ubuntu
#logfile = /var/log/haproxy.log
#-----------------------------------------------------------------------
# The [mgm] section is specific to the libra_mgm utility.
#-----------------------------------------------------------------------
[mgm]
# Options with defaults
#pid = /var/run/libra/libra_mgm.pid
#logfile = /var/log/libra/libra_mgm.log
#threads = 4
#rm_fip_ignore_500 = false
#nova_insecure = false
# Required options
az = 1
nova_auth_url = https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
nova_keyname = default
nova_region = region
nova_secgroup = default
nova_user = username
nova_pass = password
nova_image = 12345
nova_image_size = standard.medium
# Others
node_basename = BASENAME
nova_az_name = NAME
nova_bypass_url = URL
nova_net_id = ID
nova_tenant = TENANT
nova_tenant_id = TENANTID
#-----------------------------------------------------------------------
# The [admin_api] section is specific to the libra_admin_api utility.
#-----------------------------------------------------------------------
[admin_api]
# Options with defaults
#host = 0.0.0.0
#port = 8889
#logfile = /var/log/libra/libra_admin_api.log
#pid = /var/run/libra/libra_admin_api.pid
#expire_days = 0
#node_pool_size = 10
#number_of_servers = 1
#server_id = 0
#stats_device_error_limit = 5
#stats_driver = dummy
#stats_offline_ping_limit = 10
#stats_poll_timeout = 5
#stats_poll_timeout_retry = 30
#vip_pool_size = 10
# Required options
db_sections = mysql1
ssl_certfile = certfile.crt
ssl_keyfile = keyfile.key
# Datadog plugin options
#datadog_env = unknown
datadog_api_key = KEY
datadog_app_key = KEY2
datadog_message_tail = MSG
datadog_tags = service:lbaas
# Others
#-----------------------------------------------------------------------
# The [api] section is specific to the libra_api utility.
#-----------------------------------------------------------------------
[api]
# Options with defaults
#disable_keystone=False
#host = 0.0.0.0
#port = 443
#keystone_module = keystoneclient.middleware.auth_token:AuthProtocol
#logfile = /var/log/libra/libra_api.log
#pid = /var/run/libra/libra_api.pid
# Required options
db_sections = mysql1
swift_basepath = lbaaslogs
swift_endpoint = https://host.com:443/v1/
# Others
ssl_certfile = certfile.crt
ssl_keyfile = keyfile.key
ip_filters = 192.168.0.0/24
#-----------------------------------------------------------------------
# The [mysql*] sections are referenced by admin_api and api by the
# db_sections values.
#-----------------------------------------------------------------------
[mysql1]
username = root
password =
schema = lbaas
host = localhost
#-----------------------------------------------------------------------
# The API will reference keystone options here
#-----------------------------------------------------------------------
[keystone]

View File

@@ -1,71 +0,0 @@
########################################################################
# A sample configuration file read by the Libra pool manager utility.
########################################################################
#-----------------------------------------------------------------------
# The [DEFAULT] section contains options common to the various Libra
# utilities (worker, mgm, etc).
#-----------------------------------------------------------------------
[DEFAULT]
# Options to enable more verbose output
#verbose = false
#debug = false
# Daemon process options
#daemon = true
#user = libra
#group = libra
# Other logging options
#syslog = false
#syslog_socket = /dev/log
#syslog_faciltiy = local7
#logstash = HOST:PORT
#-----------------------------------------------------------------------
# Options for utilities that are Gearman workers or clients.
#-----------------------------------------------------------------------
[gearman]
#servers = localhost:4730, HOST:PORT
#keepalive = false
#keepcnt = COUNT
#keepidle = SECONDS
#keepintvl = SECONDS
#poll = 1
#reconnect_sleep = 60
#ssl_ca = /path/to/ssl_ca
#ssl_cert = /path/to/ssl_cert
#ssl_key = /path/to/ssl_key
#-----------------------------------------------------------------------
# The [mgm] section is specific to the libra_mgm utility.
#-----------------------------------------------------------------------
[mgm]
# Options with defaults
#pid = /var/run/libra/libra_mgm.pid
#logfile = /var/log/libra/libra_mgm.log
#threads = 4
#rm_fip_ignore_500 = false
#nova_insecure = false
# Required options
az = 1
nova_auth_url = https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
nova_keyname = default
nova_region = region
nova_secgroup = default
nova_user = username
nova_pass = password
nova_image = 12345
nova_image_size = standard.medium
# Others
node_basename = BASENAME
nova_az_name = NAME
nova_bypass_url = URL
nova_net_id = ID
nova_tenant = TENANT
nova_tenant_id = TENANTID

View File

@@ -1,88 +0,0 @@
########################################################################
# A sample configuration file read by the Libra utilities that use the
# Options class from the libra/common/options.py module.
#
# Options are expressed in one of two forms:
# key = value
# key : value
#
# Boolean options should be given either a 'true' or 'false' value.
# Some options can contain multiple values (see 'server' option in the
# [worker] section).
#
# Options given on the command line will override any options set in
# the configuration file.
########################################################################
# The [global] section contains options common to the various Libra
# utilities (worker, mgm, etc). This section is read before any other
# section, so values may be overridden by the other sections.
[global]
verbose = true
# The [worker] section is specific to the libra_worker utility.
[worker]
user = libra
group = libra
driver = haproxy
reconnect_sleep = 60
gearman_poll = 60
server = 10.0.0.1:4730 10.0.0.2:4730
pid = /var/run/libra/libra_worker.pid
logfile = /var/log/libra/libra_worker.log
haproxy_logfile = /var/log/haproxy.log
# The [mgm] section is specific to the libra_mgm utility.
[mgm]
user = libra
group = libra
pid = /var/run/libra/libra_mgm.pid
logfile = /var/log/libra/libra_mgm.log
datadir = /var/run/libra/
nova_auth_url = https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/
nova_user = username
nova_pass = password
nova_tenant = tenant
nova_region = region
nova_keyname = default
nova_secgroup = default
nova_image = 12345
nova_image_size = standard.medium
node_basename = 'libra'
az = 1
gearman=127.0.0.1:4730
[admin_api]
db_sections=mysql1
ssl_certfile=certfile.crt
ssl_keyfile=keyfile.key
expire_days=7
stats_driver=dummy datadog database
datadog_api_key=KEY
datadog_app_key=KEY2
datadog_tags=service:lbaas
node_pool_size=50
[api]
host=0.0.0.0
port=8080
disable_keystone=False
db_sections=mysql1
gearman=127.0.0.1:4730
swift_basepath=lbaaslogs
swift_endpoint=https://host.com:443/v1/
ssl_certfile=certfile.crt
ssl_keyfile=keyfile.key
ip_filters=192.168.0.0/24
[mysql1]
username=root
password=
schema=lbaas
host=localhost
# Keystone options go here
[keystone]

View File

@@ -1,53 +0,0 @@
########################################################################
# A sample configuration file read by the Libra worker utility.
########################################################################
#-----------------------------------------------------------------------
# The [DEFAULT] section contains options common to the various Libra
# utilities (worker, mgm, etc).
#-----------------------------------------------------------------------
[DEFAULT]
# Options to enable more verbose output
#verbose = false
#debug = false
# Daemon process options
#daemon = true
#user = libra
#group = libra
# Other logging options
#syslog = false
#syslog_socket = /dev/log
#syslog_faciltiy = local7
#logstash = HOST:PORT
#-----------------------------------------------------------------------
# Options for utilities that are Gearman workers or clients.
#-----------------------------------------------------------------------
[gearman]
#servers = localhost:4730, HOST:PORT
#keepalive = false
#keepcnt = COUNT
#keepidle = SECONDS
#keepintvl = SECONDS
#poll = 1
#reconnect_sleep = 60
#ssl_ca = /path/to/ssl_ca
#ssl_cert = /path/to/ssl_cert
#ssl_key = /path/to/ssl_key
#-----------------------------------------------------------------------
# [worker] and [worker:*] sections are specific to the Libra worker.
#-----------------------------------------------------------------------
[worker]
#driver = haproxy
#pid = /var/run/libra/libra_worker.pid
#logfile = /var/log/libra/libra_worker.log
# HAProxy driver options for the worker
[worker:haproxy]
#service = ubuntu
#logfile = /var/log/haproxy.log

View File

@@ -11,3 +11,84 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
adminapi_group = cfg.OptGroup('admin_api', 'Libra Admin API options')
cfg.CONF.register_group(adminapi_group)
cfg.CONF.register_opts(
[
cfg.StrOpt('datadog_api_key',
help='API key for datadog alerting'),
cfg.StrOpt('datadog_app_key',
help='Application key for datadog alerting'),
cfg.StrOpt('datadog_env',
default='unknown',
help='Server enironment'),
cfg.StrOpt('datadog_message_tail',
help='Text to add at the end of a Datadog alert'),
cfg.StrOpt('datadog_tags',
help='A space separated list of tags for Datadog alerts'),
cfg.ListOpt('db_sections',
required=True,
help='MySQL config sections in the config file'),
cfg.IntOpt('expire_days',
default=0,
help='Number of days until deleted load balancers '
'are expired'),
cfg.StrOpt('host',
default='0.0.0.0',
help='IP address to bind to, 0.0.0.0 for all IPs'),
cfg.StrOpt('logfile',
default='/var/log/libra/libra_admin_api.log',
help='Log file'),
cfg.IntOpt('node_pool_size',
default=10,
help='Number of hot spare devices to keep in the pool'),
cfg.IntOpt('number_of_servers',
default=1,
help='number of Admin API servers, used to calculate '
'which Admin API server should stats ping next'),
cfg.StrOpt('pid',
default='/var/run/libra/libra_admin_api.pid',
help='PID file'),
cfg.IntOpt('port',
default=8889,
help='Port number for API server'),
cfg.IntOpt('server_id',
default=0,
help='server ID of this server, used to calculate which '
'Admin API server should stats ping next '
'(start at 0)'),
cfg.StrOpt('ssl_certfile',
help='Path to an SSL certificate file'),
cfg.StrOpt('ssl_keyfile',
help='Path to an SSL key file'),
cfg.IntOpt('stats_device_error_limit',
default=5,
help='Max number of simultaneous device failures to allow '
'recovery on'),
cfg.ListOpt('stats_driver',
default=['dummy'],
help='type of stats device to use'),
cfg.IntOpt('stats_offline_ping_limit',
default=10,
help='Number of failed pings to an OFFLINE device before '
'deleting it'),
cfg.IntOpt('stats_poll_timeout',
default=5,
help='gearman timeout value for initial ping request '
'(in seconds)'),
cfg.IntOpt('stats_poll_timeout_retry',
default=30,
help='gearman timeout value for retry ping request '
'(in seconds)'),
cfg.IntOpt('vip_pool_size',
default=10,
help='Number of hot spare vips to keep in the pool'),
],
group=adminapi_group
)

View File

@@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import daemon
@@ -21,17 +22,19 @@ import pwd
import pecan
import sys
import signal
import os
from eventlet import wsgi
from libra import __version__
from libra.common.api import server
from libra.admin_api.stats.drivers.base import known_drivers
from libra.admin_api.stats.scheduler import Stats
from libra.admin_api.device_pool.manage_pool import Pool
from libra.admin_api.expunge.expunge import ExpungeScheduler
from libra.admin_api import config as api_config
from libra.admin_api import model
from libra.admin_api.stats.drivers.base import known_drivers
from libra.openstack.common import importutils
from libra.common.options import Options, setup_logging
from eventlet import wsgi
from libra.common.options import add_common_opts, libra_logging, CONF
def get_pecan_config():
@@ -40,26 +43,25 @@ def get_pecan_config():
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config, args):
def setup_app(pecan_config):
model.init_model()
if not pecan_config:
pecan_config = get_pecan_config()
config = dict(pecan_config)
config['database'] = args.db_sections
config['database'] = CONF['admin_api']['db_sections']
config['gearman'] = {
'server': args.gearman,
'ssl_key': args.gearman_ssl_key,
'ssl_cert': args.gearman_ssl_cert,
'ssl_ca': args.gearman_ssl_ca,
'keepalive': args.gearman_keepalive,
'keepcnt': args.gearman_keepcnt,
'keepidle': args.gearman_keepidle,
'keepintvl': args.gearman_keepintvl
'server': CONF['gearman']['servers'],
'ssl_key': CONF['gearman']['ssl_key'],
'ssl_cert': CONF['gearman']['ssl_cert'],
'ssl_ca': CONF['gearman']['ssl_ca'],
'keepalive': CONF['gearman']['keepalive'],
'keepcnt': CONF['gearman']['keepcnt'],
'keepidle': CONF['gearman']['keepidle'],
'keepintvl': CONF['gearman']['keepintvl']
}
config['conffile'] = args.config
if args.debug:
if CONF['debug']:
config['wsme'] = {'debug': True}
config['app']['debug'] = True
@@ -81,19 +83,18 @@ def setup_app(pecan_config, args):
class MaintThreads(object):
def __init__(self, logger, args, drivers):
def __init__(self, logger, drivers):
self.classes = []
self.logger = logger
self.args = args
self.drivers = drivers
signal.signal(signal.SIGINT, self.exit_handler)
signal.signal(signal.SIGTERM, self.exit_handler)
self.run_threads()
def run_threads(self):
stats = Stats(self.logger, self.args, self.drivers)
pool = Pool(self.logger, self.args)
expunge = ExpungeScheduler(self.logger, self.args)
stats = Stats(self.logger, self.drivers)
pool = Pool(self.logger)
expunge = ExpungeScheduler(self.logger)
self.classes.append(stats)
self.classes.append(pool)
self.classes.append(expunge)
@@ -117,156 +118,15 @@ class LogStdout(object):
def main():
options = Options('admin_api', 'Admin API Server')
options.parser.add_argument(
'--host', help='IP address to bind to, 0.0.0.0 for all IPs',
default='0.0.0.0'
)
options.parser.add_argument(
'--port', help='Port number for API server', type=int, default=8889
)
options.parser.add_argument(
'--db_sections', action='append', default=[],
help='MySQL config sections in the config file'
)
options.parser.add_argument(
'--ssl_certfile',
help='Path to an SSL certificate file'
)
options.parser.add_argument(
'--ssl_keyfile',
help='Path to an SSL key file'
)
options.parser.add_argument(
'--gearman', action='append', metavar='HOST:PORT', default=[],
help='Gearman job servers'
)
options.parser.add_argument(
'--gearman_keepalive', action="store_true",
help='use KEEPALIVE to Gearman server'
)
options.parser.add_argument(
'--gearman_keepcnt', type=int, metavar='COUNT',
help='max keepalive probes to send before killing connection'
)
options.parser.add_argument(
'--gearman_keepidle', type=int, metavar='SECONDS',
help='seconds of idle time before sending keepalive probes'
)
options.parser.add_argument(
'--gearman_keepintvl', type=int, metavar='SECONDS',
help='seconds between TCP keepalive probes'
)
options.parser.add_argument(
'--gearman_ssl_ca', metavar='FILE',
help='Gearman SSL certificate authority'
)
options.parser.add_argument(
'--gearman_ssl_cert', metavar='FILE',
help='Gearman SSL certificate'
)
options.parser.add_argument(
'--gearman_ssl_key', metavar='FILE',
help='Gearman SSL key'
)
options.parser.add_argument(
'--stats_driver',
choices=known_drivers.keys(), default='dummy',
help='type of stats device to use'
)
options.parser.add_argument(
'--stats_poll_timeout', type=int, default=5,
help='gearman timeout value for initial ping request (in seconds)'
)
options.parser.add_argument(
'--stats_poll_timeout_retry', type=int, default=30,
help='gearman timeout value for retry ping request (in seconds)'
)
options.parser.add_argument(
'--stats_offline_ping_limit', type=int, default=10,
help='Number of failed pings to an OFFLINE device before deleting it'
)
options.parser.add_argument(
'--stats_device_error_limit', type=int, default=5,
help='Max number of simultaneous device failures to allow recovery on'
)
options.parser.add_argument(
'--number_of_servers', type=int, default=1,
help='number of Admin API servers, used to calculate which Admin API '
'server should stats ping next'
)
options.parser.add_argument(
'--server_id', type=int, default=0,
help='server ID of this server, used to calculate which Admin API '
'server should stats ping next (start at 0)'
)
# Datadog plugin options
options.parser.add_argument(
'--datadog_api_key', help='API key for datadog alerting'
)
options.parser.add_argument(
'--datadog_app_key', help='Application key for datadog alerting'
)
options.parser.add_argument(
'--datadog_message_tail',
help='Text to add at the end of a Datadog alert'
)
options.parser.add_argument(
'--datadog_tags',
help='A space separated list of tags for Datadog alerts'
)
options.parser.add_argument(
'--datadog_env', default='unknown',
help='Server enironment'
)
options.parser.add_argument(
'--node_pool_size', default=10, type=int,
help='Number of hot spare devices to keep in the pool'
)
options.parser.add_argument(
'--vip_pool_size', default=10, type=int,
help='Number of hot spare vips to keep in the pool'
)
options.parser.add_argument(
'--expire_days', default=0,
help='Number of days until deleted load balancers are expired'
)
args = options.run()
add_common_opts()
CONF(project='libra', version=__version__)
drivers = []
if not args.gearman:
# NOTE(shrews): Can't set a default in argparse method because the
# value is appended to the specified default.
args.gearman.append('localhost:4730')
elif not isinstance(args.gearman, list):
# NOTE(shrews): The Options object cannot intelligently handle
# creating a list from an option that may have multiple values.
# We convert it to the expected type here.
svr_list = args.gearman.split()
args.gearman = svr_list
required_args = ['db_sections', 'ssl_certfile', 'ssl_keyfile']
missing_args = 0
for req in required_args:
test_var = getattr(args, req)
if test_var is None:
missing_args += 1
sys.stderr.write(
'{app}: error: argument --{test_var} is required\n'
.format(app=os.path.basename(sys.argv[0]), test_var=req))
if missing_args:
return 2
if not isinstance(args.db_sections, list):
db_list = args.db_sections.split()
args.db_sections = db_list
pc = get_pecan_config()
if not args.nodaemon:
pidfile = daemon.pidfile.TimeoutPIDLockFile(args.pid, 10)
if CONF['daemon']:
pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['admin_api']['pid'],
10)
if daemon.runner.is_pidfile_stale(pidfile):
pidfile.break_lock()
context = daemon.DaemonContext(
@@ -274,26 +134,28 @@ def main():
umask=0o022,
pidfile=pidfile
)
if args.user:
context.uid = pwd.getpwnam(args.user).pw_uid
if args.group:
context.gid = grp.getgrnam(args.group).gr_gid
if CONF['user']:
context.uid = pwd.getpwnam(CONF['user']).pw_uid
if CONF['group']:
context.gid = grp.getgrnam(CONF['group']).gr_gid
context.open()
# Use the root logger due to lots of services using logger
logger = setup_logging('', args)
logger.info('Starting on {0}:{1}'.format(args.host, args.port))
api = setup_app(pc, args)
if not isinstance(args.stats_driver, list):
args.stats_driver = args.stats_driver.split()
for driver in args.stats_driver:
drivers.append(importutils.import_class(
known_drivers[driver]
))
MaintThreads(logger, args, drivers)
logger = libra_logging('', 'admin_api')
logger.info('Starting on {0}:{1}'.format(CONF['admin_api']['host'],
CONF['admin_api']['port']))
api = setup_app(pc)
for driver in CONF['admin_api']['stats_driver']:
drivers.append(importutils.import_class(known_drivers[driver]))
MaintThreads(logger, drivers)
sys.stderr = LogStdout(logger)
sock = server.make_socket(args.host, args.port,
args.ssl_keyfile, args.ssl_certfile)
sock = server.make_socket(CONF['admin_api']['host'],
CONF['admin_api']['port'],
CONF['admin_api']['ssl_keyfile'],
CONF['admin_api']['ssl_certfile'])
wsgi.server(sock, api, keepalive=False)
return 0

View File

@@ -12,13 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
import threading
import ipaddress
import threading
from datetime import datetime
from libra.common.json_gearman import JSONGearmanClient
from gearman.constants import JOB_UNKNOWN
from oslo.config import cfg
from sqlalchemy import func
from libra.common.api.lbaas import Device, PoolBuilding, Vip, db_session
from libra.common.json_gearman import JSONGearmanClient
#TODO: Lots of duplication of code here, need to cleanup
@@ -29,12 +32,15 @@ class Pool(object):
PROBE_SECONDS = 30
VIPS_SECONDS = 50
def __init__(self, logger, args):
def __init__(self, logger):
self.logger = logger
self.args = args
self.probe_timer = None
self.delete_timer = None
self.vips_time = None
self.server_id = cfg.CONF['admin_api']['server_id']
self.number_of_servers = cfg.CONF['admin_api']['number_of_servers']
self.vip_pool_size = cfg.CONF['admin_api']['vip_pool_size']
self.node_pool_size = cfg.CONF['admin_api']['node_pool_size']
self.start_delete_sched()
self.start_probe_sched()
@@ -51,7 +57,7 @@ class Pool(object):
def delete_devices(self):
""" Searches for all devices in the DELETED state and removes them """
minute = datetime.now().minute
if self.args.server_id != minute % self.args.number_of_servers:
if self.server_id != minute % self.number_of_servers:
self.logger.info('Not our turn to run delete check, sleeping')
self.start_delete_sched()
return
@@ -72,7 +78,7 @@ class Pool(object):
if not message:
self.logger.info("No devices to delete")
else:
gear = GearmanWork(self.args, self.logger)
gear = GearmanWork(self.logger)
gear.send_delete_message(message)
except:
self.logger.exception("Exception when deleting devices")
@@ -81,7 +87,7 @@ class Pool(object):
def probe_vips(self):
minute = datetime.now().minute
if self.args.server_id != minute % self.args.number_of_servers:
if self.server_id != minute % self.number_of_servers:
self.logger.info('Not our turn to run vips check, sleeping')
self.start_vips_sched()
return
@@ -91,13 +97,13 @@ class Pool(object):
NULL = None # For pep8
vip_count = session.query(Vip).\
filter(Vip.device == NULL).count()
if vip_count >= self.args.vip_pool_size:
if vip_count >= self.vip_pool_size:
self.logger.info("Enough vips exist, no work to do")
session.commit()
self.start_vips_sched()
return
build_count = self.args.vip_pool_size - vip_count
build_count = self.vip_pool_size - vip_count
self._build_vips(build_count)
except:
self.logger.exception(
@@ -107,7 +113,7 @@ class Pool(object):
def probe_devices(self):
minute = datetime.now().minute
if self.args.server_id != minute % self.args.number_of_servers:
if self.server_id != minute % self.number_of_servers:
self.logger.info('Not our turn to run probe check, sleeping')
self.start_probe_sched()
return
@@ -116,18 +122,18 @@ class Pool(object):
with db_session() as session:
# Double check we have no outstanding builds assigned to us
session.query(PoolBuilding).\
filter(PoolBuilding.server_id == self.args.server_id).\
filter(PoolBuilding.server_id == self.server_id).\
delete()
session.flush()
dev_count = session.query(Device).\
filter(Device.status == 'OFFLINE').count()
if dev_count >= self.args.node_pool_size:
if dev_count >= self.node_pool_size:
self.logger.info("Enough devices exist, no work to do")
session.commit()
self.start_probe_sched()
return
build_count = self.args.node_pool_size - dev_count
build_count = self.node_pool_size - dev_count
built = session.query(func.sum(PoolBuilding.qty)).first()
if not built[0]:
built = 0
@@ -142,7 +148,7 @@ class Pool(object):
return
build_count -= built
building = PoolBuilding()
building.server_id = self.args.server_id
building.server_id = self.server_id
building.qty = build_count
session.add(building)
session.commit()
@@ -152,7 +158,7 @@ class Pool(object):
self._build_nodes(build_count)
with db_session() as session:
session.query(PoolBuilding).\
filter(PoolBuilding.server_id == self.args.server_id).\
filter(PoolBuilding.server_id == self.server_id).\
delete()
session.commit()
except:
@@ -166,7 +172,7 @@ class Pool(object):
while it < count:
message.append(dict(task='libra_pool_mgm', data=job_data))
it += 1
gear = GearmanWork(self.args, self.logger)
gear = GearmanWork(self.logger)
gear.send_create_message(message)
def _build_vips(self, count):
@@ -176,7 +182,7 @@ class Pool(object):
while it < count:
message.append(dict(task='libra_pool_mgm', data=job_data))
it += 1
gear = GearmanWork(self.args, self.logger)
gear = GearmanWork(self.logger)
gear.send_vips_message(message)
def start_probe_sched(self):
@@ -218,22 +224,22 @@ class Pool(object):
class GearmanWork(object):
def __init__(self, args, logger):
def __init__(self, logger):
self.logger = logger
if all([args.gearman_ssl_key, args.gearman_ssl_cert,
args.gearman_ssl_ca]):
# Use SSL connections to each Gearman job server.
ssl_server_list = []
for server in args.gearman:
ghost, gport = server.split(':')
ssl_server_list.append({'host': ghost,
'port': int(gport),
'keyfile': args.gearman_ssl_key,
'certfile': args.gearman_ssl_cert,
'ca_certs': args.gearman_ssl_ca})
self.gearman_client = JSONGearmanClient(ssl_server_list)
else:
self.gearman_client = JSONGearmanClient(args.gearman)
server_list = []
for server in cfg.CONF['gearman']['servers']:
host, port = server.split(':')
server_list.append({'host': host,
'port': int(port),
'keyfile': cfg.CONF['gearman']['ssl_key'],
'certfile': cfg.CONF['gearman']['ssl_cert'],
'ca_certs': cfg.CONF['gearman']['ssl_ca'],
'keepalive': cfg.CONF['gearman']['keepalive'],
'keepcnt': cfg.CONF['gearman']['keepcnt'],
'keepidle': cfg.CONF['gearman']['keepidle'],
'keepintvl': cfg.CONF['gearman']['keepintvl']
})
self.gearman_client = JSONGearmanClient(server_list)
def send_delete_message(self, message):
self.logger.info("Sending {0} gearman messages".format(len(message)))

View File

@@ -13,18 +13,23 @@
# under the License.
import threading
from datetime import datetime, timedelta
from oslo.config import cfg
from libra.common.api.lbaas import LoadBalancer, db_session
class ExpungeScheduler(object):
def __init__(self, logger, args):
def __init__(self, logger):
self.expunge_timer = None
if not args.expire_days:
self.expire_days = cfg.CONF['admin_api']['expire_days']
if not self.expire_days:
logger.info('Expunge not configured, disabled')
return
self.logger = logger
self.args = args
self.server_id = cfg.CONF['admin_api']['server_id']
self.number_of_servers = cfg.CONF['admin_api']['number_of_servers']
self.run_expunge()
def shutdown(self):
@@ -33,7 +38,7 @@ class ExpungeScheduler(object):
def run_expunge(self):
day = datetime.now().day
if self.args.server_id != day % self.args.number_of_servers:
if self.server_id != day % self.number_of_servers:
self.logger.info('Not our turn to run expunge check, sleeping')
self.expunge_timer = threading.Timer(
24 * 60 * 60, self.run_expunge, ()
@@ -41,7 +46,7 @@ class ExpungeScheduler(object):
with db_session() as session:
try:
exp = datetime.now() - timedelta(
days=int(self.args.expire_days)
days=int(self.expire_days)
)
exp_time = exp.strftime('%Y-%m-%d %H:%M:%S')
self.logger.info(

View File

@@ -19,9 +19,8 @@ known_drivers = {
class AlertDriver(object):
def __init__(self, logger, args):
def __init__(self, logger):
self.logger = logger
self.args = args
def send_alert(self, message, device_id):
raise NotImplementedError()

View File

@@ -11,22 +11,27 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
from libra.admin_api.stats.drivers.base import AlertDriver
from dogapi import dog_http_api as api
from oslo.config import cfg
from libra.admin_api.stats.drivers.base import AlertDriver
class DatadogDriver(AlertDriver):
def __init__(self, logger, args):
api.api_key = args.datadog_api_key
api.application_key = args.datadog_app_key
super(DatadogDriver, self).__init__(logger, args)
def __init__(self, logger):
super(DatadogDriver, self).__init__(logger)
api.api_key = cfg.CONF['admin_api']['datadog_api_key']
api.application_key = cfg.CONF['admin_api']['datadog_app_key']
self.dd_env = cfg.CONF['admin_api']['datadog_env']
self.dd_tags = cfg.CONF['admin_api']['datadog_tags']
self.dd_message_tail = cfg.CONF['admin_api']['datadog_message_tail']
def send_alert(self, message, device_id):
title = 'Load balancer failure in {0}'.format(self.args.datadog_env)
title = 'Load balancer failure in {0}'.format(self.dd_env)
text = 'Load balancer failed with message {0} {1}'.format(
message, self.args.datadog_message_tail
message, self.dd_message_tail
)
tags = self.args.datadog_tags.split()
tags = self.dd_tags.split()
resp = api.event_with_response(
title, text, tags=tags, alert_type='error'
)
@@ -34,11 +39,11 @@ class DatadogDriver(AlertDriver):
def send_delete(self, message, device_id):
title = 'Load balancer unreachable in {0}'.\
format(self.args.datadog_env)
format(self.dd_env)
text = 'Load balancer unreachable with message {0} {1}'.format(
message, self.args.datadog_message_tail
message, self.dd_message_tail
)
tags = self.args.datadog_tags.split()
tags = self.dd_tags.split()
resp = api.event_with_response(
title, text, tags=tags, alert_type='success'
)

View File

@@ -13,7 +13,10 @@
# under the License.
import threading
from datetime import datetime
from oslo.config import cfg
from libra.common.api.lbaas import LoadBalancer, Device, Node, db_session
from libra.admin_api.stats.stats_gearman import GearJobs
@@ -27,16 +30,17 @@ class Stats(object):
PING_SECONDS = 15
OFFLINE_SECONDS = 45
def __init__(self, logger, args, drivers):
def __init__(self, logger, drivers):
self.logger = logger
self.args = args
self.drivers = drivers
self.ping_timer = None
self.offline_timer = None
self.ping_limit = args.stats_offline_ping_limit
self.error_limit = args.stats_device_error_limit
logger.info("Selected stats drivers: {0}".format(args.stats_driver))
self.ping_limit = cfg.CONF['admin_api']['stats_offline_ping_limit']
self.error_limit = cfg.CONF['admin_api']['stats_device_error_limit']
self.server_id = cfg.CONF['admin_api']['server_id']
self.number_of_servers = cfg.CONF['admin_api']['number_of_servers']
self.stats_driver = cfg.CONF['admin_api']['stats_driver']
logger.info("Selected stats drivers: {0}".format(self.stats_driver))
self.start_ping_sched()
self.start_offline_sched()
@@ -50,7 +54,7 @@ class Stats(object):
def check_offline_lbs(self):
# Work out if it is our turn to run
minute = datetime.now().minute
if self.args.server_id != minute % self.args.number_of_servers:
if self.server_id != minute % self.number_of_servers:
self.logger.info('Not our turn to run OFFLINE check, sleeping')
self.start_offline_sched()
return
@@ -70,7 +74,7 @@ class Stats(object):
def ping_lbs(self):
# Work out if it is our turn to run
minute = datetime.now().minute
if self.args.server_id != minute % self.args.number_of_servers:
if self.server_id != minute % self.number_of_servers:
self.logger.info('Not our turn to run ping check, sleeping')
self.start_ping_sched()
return
@@ -100,7 +104,7 @@ class Stats(object):
return (0, 0)
for lb in devices:
node_list.append(lb.name)
gearman = GearJobs(self.logger, self.args)
gearman = GearJobs(self.logger)
failed_lbs, node_status = gearman.send_pings(node_list)
failed = len(failed_lbs)
if failed > self.error_limit:
@@ -136,7 +140,7 @@ class Stats(object):
return (0, 0)
for lb in devices:
node_list.append(lb.name)
gearman = GearJobs(self.logger, self.args)
gearman = GearJobs(self.logger)
failed_lbs = gearman.offline_check(node_list)
failed = len(failed_lbs)
if failed > self.error_limit:

View File

@@ -13,29 +13,30 @@
# under the License.
from gearman.constants import JOB_UNKNOWN
from oslo.config import cfg
from libra.common.json_gearman import JSONGearmanClient
class GearJobs(object):
def __init__(self, logger, args):
def __init__(self, logger):
self.logger = logger
self.poll_timeout = args.stats_poll_timeout
self.poll_timeout_retry = args.stats_poll_timeout_retry
self.poll_timeout = cfg.CONF['admin_api']['stats_poll_timeout']
self.poll_retry = cfg.CONF['admin_api']['stats_poll_timeout_retry']
if all([args.gearman_ssl_ca, args.gearman_ssl_cert,
args.gearman_ssl_key]):
# Use SSL connections to each Gearman job server.
ssl_server_list = []
for server in args.gearman:
host, port = server.split(':')
ssl_server_list.append({'host': host,
'port': int(port),
'keyfile': args.gearman_ssl_key,
'certfile': args.gearman_ssl_cert,
'ca_certs': args.gearman_ssl_ca})
self.gm_client = JSONGearmanClient(ssl_server_list)
else:
self.gm_client = JSONGearmanClient(args.gearman)
server_list = []
for server in cfg.CONF['gearman']['servers']:
host, port = server.split(':')
server_list.append({'host': host,
'port': int(port),
'keyfile': cfg.CONF['gearman']['ssl_key'],
'certfile': cfg.CONF['gearman']['ssl_cert'],
'ca_certs': cfg.CONF['gearman']['ssl_ca'],
'keepalive': cfg.CONF['gearman']['keepalive'],
'keepcnt': cfg.CONF['gearman']['keepcnt'],
'keepidle': cfg.CONF['gearman']['keepidle'],
'keepintvl': cfg.CONF['gearman']['keepintvl']
})
self.gm_client = JSONGearmanClient(server_list)
def send_pings(self, node_list):
# TODO: lots of duplicated code that needs cleanup
@@ -81,7 +82,7 @@ class GearJobs(object):
list_of_jobs.append(dict(task=str(node), data=job_data))
submitted_pings = self.gm_client.submit_multiple_jobs(
list_of_jobs, background=False, wait_until_complete=True,
poll_timeout=self.poll_timeout_retry
poll_timeout=self.poll_retry
)
for ping in submitted_pings:
if ping.state == JOB_UNKNOWN:

View File

@@ -51,8 +51,6 @@ def setup_app(pecan_config):
pecan_config = get_pecan_config()
config = dict(pecan_config)
config['database'] = CONF['api']['db_sections']
# NOTE: We support only 1 config file
config['conffile'] = CONF['config-file'][0]
config['swift'] = {
'swift_basepath': CONF['api']['swift_basepath'],
'swift_endpoint': CONF['api']['swift_endpoint']

View File

@@ -12,15 +12,17 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sqlalchemy.types as types
import time
from oslo.config import cfg
from pecan import conf
from sqlalchemy import Table, Column, Integer, ForeignKey, create_engine
from sqlalchemy import INTEGER, VARCHAR, BIGINT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, sessionmaker, Session
import sqlalchemy.types as types
import time
import ConfigParser
from pecan import conf
import logging
DeclarativeBase = declarative_base()
@@ -170,15 +172,13 @@ class RoutingSession(Session):
return engine
def _build_engines(self):
config = ConfigParser.SafeConfigParser()
config.read([conf.conffile])
if 'debug' in conf.app and conf.app.debug:
echo = True
else:
echo = False
for section in conf.database:
db_conf = config._sections[section]
db_conf = cfg.CONF[section]
conn_string = '''mysql+mysqlconnector://%s:%s@%s:%d/%s''' % (
db_conf['username'],

View File

@@ -11,18 +11,13 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
import argparse
import logging
import logging.handlers
import logstash
import os
import os.path
import sys
import ConfigParser
from oslo.config import cfg
from libra import __version__, __release__
from logging_handler import CompressedTimedRotatingFileHandler
from logging_handler import NewlineFormatter
@@ -105,196 +100,22 @@ def add_common_opts():
CONF.register_cli_opts(common_cli_opts)
"""
Common options parser.
Options can come from either the command line or a configuration file
in INI format. Command line options will override options from the config
file.
The following sections of the config file will be parsed:
[global]
[shortname]
The [global] section can be used for options common to any program using
this class. It is optional and does not need to be present in the file.
The [shortname] section can be used for program-specific options.
The value of 'shortname' comes from the Options.__init__() method.
For example, this Options object:
options = Options('worker', 'Worker Daemon')
Will read the [global] and [worker] config file sections. All other
sections will be ignored.
Boolean values in the configuration file must be given a true/false
value.
"""
class Options(object):
def __init__(self, shortname, title):
self.title = title
self.shortname = shortname
self._arg_defaults = dict()
self._parse_args()
def _get_defaults_from_config(self, parser):
"""
Use the config file to get the defaults. This should be called
immediately after the option for the config file is defined, but
before all other options are defined.
"""
args, remaining_args = parser.parse_known_args()
if args.config and os.path.exists(args.config):
config = ConfigParser.SafeConfigParser()
config.read([args.config])
# global section not required, so don't error
try:
global_items = config.items('global')
except ConfigParser.NoSectionError:
global_items = []
# program-specific section not required, so don't error
try:
section_items = config.items(self.shortname)
except ConfigParser.NoSectionError:
section_items = []
self._arg_defaults.update(dict(global_items + section_items))
# Convert booleans to correct type
for k, v in self._arg_defaults.items():
if v.upper() == 'FALSE':
self._arg_defaults[k] = False
elif v.upper() == 'TRUE':
self._arg_defaults[k] = True
def _parse_args(self):
# We use a temporary parser to get the config file and read those
# options in as defaults, then continue parsing the rest.
tmp_parser = argparse.ArgumentParser(add_help=False)
tmp_parser.add_argument(
'-c', '--config', dest='config', default='/etc/libra/libra.ini'
)
self._get_defaults_from_config(tmp_parser)
self.parser = argparse.ArgumentParser(
description='Libra {title}'.format(title=self.title)
)
# Config repeated here just so it will show up in the automatically
# generated help from ArgumentParser.
self.parser.add_argument(
'-c', '--config', dest='config', default='/etc/libra/libra.ini',
metavar='FILE', help='configuration file'
)
self.parser.add_argument(
'--version', dest='version', action='store_true',
help='print version and exit'
)
self.parser.add_argument(
'--release', dest='release', action='store_true',
help='print full release version info and exit'
)
self.parser.add_argument(
'-n', '--nodaemon', dest='nodaemon', action='store_true',
help='do not run in daemon mode'
)
self.parser.add_argument(
'--syslog', dest='syslog', action='store_true',
help='use syslog for logging output'
)
self.parser.add_argument(
'--syslog-socket', dest='syslog_socket',
default='/dev/log',
help='socket to use for syslog connection (default: /dev/log)'
)
self.parser.add_argument(
'--syslog-facility', dest='syslog_facility',
default='local7',
help='syslog logging facility (default: local7)'
)
self.parser.add_argument(
'-d', '--debug', dest='debug', action='store_true',
help='log debugging output'
)
self.parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true',
help='log more verbose output'
)
self.parser.add_argument(
'-p', '--pid', dest='pid',
default='/var/run/libra/libra_{name}.pid'.format(
name=self.shortname
),
help='PID file to use (ignored with --nodaemon)'
)
self.parser.add_argument(
'-l', '--logfile', dest='logfile',
default='/var/log/libra/libra_{name}.log'.format(
name=self.shortname
),
help='log file to use (ignored with --nodaemon)'
)
self.parser.add_argument(
'--logstash', dest='logstash', metavar="HOST:PORT",
help='send logs to logstash at "host:port"'
)
self.parser.add_argument(
'--user', dest='user',
help='user to use for daemon mode'
)
self.parser.add_argument(
'--group', dest='group',
help='group to use for daemon mode'
)
def run(self):
# We have to set defaults from the config AFTER all add_argument()
# calls because otherwise, the default= value used in those calls
# would take precedence.
if self._arg_defaults:
self.parser.set_defaults(**self._arg_defaults)
args = self.parser.parse_args()
if args.version:
print("Libra toolset version %s" % __version__)
sys.exit(0)
if args.release:
print("Libra toolset release %s" % __release__)
sys.exit(0)
return args
def libra_logging(name, section):
"""
Temporary conversion function for utilities using oslo.config.
"""
class args(object):
debug = CONF['debug']
verbose = CONF['verbose']
logfile = CONF[section]['logfile']
nodaemon = not CONF['daemon']
syslog = CONF['syslog']
syslog_socket = CONF['syslog_socket']
syslog_facility = CONF['syslog_facility']
logstash = CONF['logstash']
return setup_logging(name, args)
def setup_logging(name, args):
"""
Shared routine for setting up logging. Depends on some common options
(nodaemon, logfile, debug, verbose) being set.
"""
logfile = args.logfile
if args.nodaemon:
debug = CONF['debug']
verbose = CONF['verbose']
logfile = CONF[section]['logfile']
daemon = CONF['daemon']
syslog = CONF['syslog']
syslog_socket = CONF['syslog_socket']
syslog_facility = CONF['syslog_facility']
logstash_opt = CONF['logstash']
if not daemon:
logfile = None
# Timestamped formatter
@@ -309,12 +130,12 @@ def setup_logging(name, args):
'%(name)s - %(levelname)s - %(message)s'
)
if args.syslog and not args.nodaemon:
handler = logging.handlers.SysLogHandler(address=args.syslog_socket,
facility=args.syslog_facility)
if syslog and daemon:
handler = logging.handlers.SysLogHandler(address=syslog_socket,
facility=syslog_facility)
handler.setFormatter(simple_formatter)
elif args.logstash:
logstash_host, logstash_port = args.logstash.split(':')
elif logstash_opt:
logstash_host, logstash_port = logstash_opt.split(':')
handler = logstash.LogstashHandler(logstash_host, int(logstash_port))
handler.setFormatter(ts_formatter)
elif logfile:
@@ -329,9 +150,9 @@ def setup_logging(name, args):
logger = logging.getLogger(name)
logger.addHandler(handler)
if args.debug:
if debug:
logger.setLevel(level=logging.DEBUG)
elif args.verbose:
elif verbose:
logger.setLevel(level=logging.INFO)
return logger