Initial directory skeleton
Implements: blueprint initial-skeleton Change-Id: If6559f5e1adf4aee70f5d92b5dfcf9fead86b205
This commit is contained in:
parent
507ff77b29
commit
74eee09e4b
@ -1,4 +1,5 @@
|
|||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
test_command=${PYTHON:-python} -m subunit.run discover octavia $LISTOPT $IDOPTION
|
#test_command=${PYTHON:-python} -m subunit.run discover octavia $LISTOPT $IDOPTION
|
||||||
|
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./octavia/tests} $LISTOPT $IDOPTION
|
||||||
test_id_option=--load-list $IDFILE
|
test_id_option=--load-list $IDFILE
|
||||||
test_list_option=--list
|
test_list_option=--list
|
||||||
|
0
client/__init__.py
Normal file
0
client/__init__.py
Normal file
19
etc/octavia.conf
Normal file
19
etc/octavia.conf
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
# Print more verbose output (set logging level to INFO instead of default WARNING level).
|
||||||
|
# verbose = False
|
||||||
|
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
|
||||||
|
# debug = False
|
||||||
|
|
||||||
|
[database]
|
||||||
|
# This line MUST be changed to actually run the plugin.
|
||||||
|
# Example:
|
||||||
|
# connection = mysql://root:pass@127.0.0.1:3306/octavia
|
||||||
|
# Replace 127.0.0.1 above with the IP address of the database used by the
|
||||||
|
# main octavia server. (Leave it as is if the database runs on this host.)
|
||||||
|
|
||||||
|
# connection = sqlite://
|
||||||
|
|
||||||
|
# NOTE: In deployment the [database] section and its connection attribute may
|
||||||
|
# be set in the corresponding core plugin '.ini' file. However, it is suggested
|
||||||
|
# to put the [database] section and its connection attribute in this
|
||||||
|
# configuration file.
|
@ -0,0 +1,19 @@
|
|||||||
|
# Copyright 2011-2014 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
|
||||||
|
|
||||||
|
gettext.install('octavia', unicode=1)
|
0
octavia/amphorae/__init__.py
Normal file
0
octavia/amphorae/__init__.py
Normal file
0
octavia/amphorae/backends/__init__.py
Normal file
0
octavia/amphorae/backends/__init__.py
Normal file
0
octavia/amphorae/drivers/__init__.py
Normal file
0
octavia/amphorae/drivers/__init__.py
Normal file
0
octavia/amphorae/drivers/base.py
Normal file
0
octavia/amphorae/drivers/base.py
Normal file
0
octavia/amphorae/drivers/haproxy-simple/__init__.py
Normal file
0
octavia/amphorae/drivers/haproxy-simple/__init__.py
Normal file
0
octavia/common/__init__.py
Normal file
0
octavia/common/__init__.py
Normal file
135
octavia/common/config.py
Normal file
135
octavia/common/config.py
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
# Copyright 2011 VMware, Inc., 2014 A10 Networks
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Routines for configuring Octavia
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
from oslo.db import options as db_options
|
||||||
|
from oslo import messaging
|
||||||
|
# from paste import deploy
|
||||||
|
|
||||||
|
from octavia.common import utils
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
from octavia import version
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
core_opts = [
|
||||||
|
cfg.StrOpt('bind_host', default='0.0.0.0',
|
||||||
|
help=_("The host IP to bind to")),
|
||||||
|
cfg.IntOpt('bind_port', default=9696,
|
||||||
|
help=_("The port to bind to")),
|
||||||
|
cfg.StrOpt('api_paste_config', default="api-paste.ini",
|
||||||
|
help=_("The API paste config file to use")),
|
||||||
|
cfg.StrOpt('api_extensions_path', default="",
|
||||||
|
help=_("The path for API extensions")),
|
||||||
|
cfg.StrOpt('auth_strategy', default='keystone',
|
||||||
|
help=_("The type of authentication to use")),
|
||||||
|
cfg.BoolOpt('allow_bulk', default=True,
|
||||||
|
help=_("Allow the usage of the bulk API")),
|
||||||
|
cfg.BoolOpt('allow_pagination', default=False,
|
||||||
|
help=_("Allow the usage of the pagination")),
|
||||||
|
cfg.BoolOpt('allow_sorting', default=False,
|
||||||
|
help=_("Allow the usage of the sorting")),
|
||||||
|
cfg.StrOpt('pagination_max_limit', default="-1",
|
||||||
|
help=_("The maximum number of items returned in a single "
|
||||||
|
"response, value was 'infinite' or negative integer "
|
||||||
|
"means no limit")),
|
||||||
|
cfg.StrOpt('host', default=utils.get_hostname(),
|
||||||
|
help=_("The hostname Octavia is running on")),
|
||||||
|
cfg.StrOpt('nova_url',
|
||||||
|
default='http://127.0.0.1:8774/v2',
|
||||||
|
help=_('URL for connection to nova')),
|
||||||
|
cfg.StrOpt('nova_admin_username',
|
||||||
|
help=_('Username for connecting to nova in admin context')),
|
||||||
|
cfg.StrOpt('nova_admin_password',
|
||||||
|
help=_('Password for connection to nova in admin context'),
|
||||||
|
secret=True),
|
||||||
|
cfg.StrOpt('nova_admin_tenant_id',
|
||||||
|
help=_('The uuid of the admin nova tenant')),
|
||||||
|
cfg.StrOpt('nova_admin_auth_url',
|
||||||
|
default='http://localhost:5000/v2.0',
|
||||||
|
help=_('Authorization URL for connecting to nova in admin '
|
||||||
|
'context')),
|
||||||
|
cfg.StrOpt('nova_ca_certificates_file',
|
||||||
|
help=_('CA file for novaclient to verify server certificates')),
|
||||||
|
cfg.BoolOpt('nova_api_insecure', default=False,
|
||||||
|
help=_("If True, ignore any SSL validation issues")),
|
||||||
|
cfg.StrOpt('nova_region_name',
|
||||||
|
help=_('Name of nova region to use. Useful if keystone manages'
|
||||||
|
' more than one region.')),
|
||||||
|
]
|
||||||
|
|
||||||
|
core_cli_opts = []
|
||||||
|
|
||||||
|
# Register the configuration options
|
||||||
|
cfg.CONF.register_opts(core_opts)
|
||||||
|
cfg.CONF.register_cli_opts(core_cli_opts)
|
||||||
|
|
||||||
|
# Ensure that the control exchange is set correctly
|
||||||
|
messaging.set_transport_defaults(control_exchange='octavia')
|
||||||
|
_SQL_CONNECTION_DEFAULT = 'sqlite://'
|
||||||
|
# Update the default QueuePool parameters. These can be tweaked by the
|
||||||
|
# configuration variables - max_pool_size, max_overflow and pool_timeout
|
||||||
|
db_options.set_defaults(cfg.CONF,
|
||||||
|
connection=_SQL_CONNECTION_DEFAULT,
|
||||||
|
sqlite_db='', max_pool_size=10,
|
||||||
|
max_overflow=20, pool_timeout=10)
|
||||||
|
|
||||||
|
|
||||||
|
def init(args, **kwargs):
|
||||||
|
cfg.CONF(args=args, project='octavia',
|
||||||
|
version='%%prog %s' % version.version_info.release_string(),
|
||||||
|
**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def setup_logging(conf):
|
||||||
|
"""Sets up the logging options for a log with supplied name.
|
||||||
|
|
||||||
|
:param conf: a cfg.ConfOpts object
|
||||||
|
"""
|
||||||
|
product_name = "octavia"
|
||||||
|
logging.setup(product_name)
|
||||||
|
LOG.info(_("Logging enabled!"))
|
||||||
|
|
||||||
|
|
||||||
|
# def load_paste_app(app_name):
|
||||||
|
# """Builds and returns a WSGI app from a paste config file.
|
||||||
|
|
||||||
|
# :param app_name: Name of the application to load
|
||||||
|
# :raises ConfigFilesNotFoundError when config file cannot be located
|
||||||
|
# :raises RuntimeError when application cannot be loaded from config file
|
||||||
|
# """
|
||||||
|
|
||||||
|
# config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
|
||||||
|
# if not config_path:
|
||||||
|
# raise cfg.ConfigFilesNotFoundError(
|
||||||
|
# config_files=[cfg.CONF.api_paste_config])
|
||||||
|
# config_path = os.path.abspath(config_path)
|
||||||
|
# LOG.info(_("Config paste file: %s"), config_path)
|
||||||
|
|
||||||
|
# try:
|
||||||
|
# app = deploy.loadapp("config:%s" % config_path, name=app_name)
|
||||||
|
# except (LookupError, ImportError):
|
||||||
|
# msg = (_("Unable to load %(app_name)s from "
|
||||||
|
# "configuration file %(config_path)s.") %
|
||||||
|
# {'app_name': app_name,
|
||||||
|
# 'config_path': config_path})
|
||||||
|
# LOG.exception(msg)
|
||||||
|
# raise RuntimeError(msg)
|
||||||
|
# return app
|
28
octavia/common/constants.py
Normal file
28
octavia/common/constants.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Copyright (c) 2012-2014 OpenStack Foundation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
|
||||||
|
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
|
||||||
|
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
|
||||||
|
|
||||||
|
PROTOCOL_TCP = 'TCP'
|
||||||
|
PROTOCOL_HTTP = 'HTTP'
|
||||||
|
PROTOCOL_HTTPS = 'HTTPS'
|
||||||
|
PROTOCOL_UDP = 'UDP'
|
||||||
|
|
||||||
|
HEALTH_MONITOR_PING = 'PING'
|
||||||
|
HEALTH_MONITOR_TCP = 'TCP'
|
||||||
|
HEALTH_MONITOR_HTTP = 'HTTP'
|
||||||
|
HEALTH_MONITOR_HTTPS = 'HTTPS'
|
59
octavia/common/exceptions.py
Normal file
59
octavia/common/exceptions.py
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
# Copyright 2011 VMware, Inc, 2014 A10 Networks
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Octavia base exception handling.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from octavia.openstack.common import excutils
|
||||||
|
|
||||||
|
|
||||||
|
class OctaviaException(Exception):
|
||||||
|
"""Base Octavia Exception.
|
||||||
|
|
||||||
|
To correctly use this class, inherit from it and define
|
||||||
|
a 'message' property. That message will get printf'd
|
||||||
|
with the keyword arguments provided to the constructor.
|
||||||
|
"""
|
||||||
|
message = _("An unknown exception occurred.")
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
try:
|
||||||
|
super(OctaviaException, self).__init__(self.message % kwargs)
|
||||||
|
self.msg = self.message % kwargs
|
||||||
|
except Exception:
|
||||||
|
with excutils.save_and_reraise_exception() as ctxt:
|
||||||
|
if not self.use_fatal_exceptions():
|
||||||
|
ctxt.reraise = False
|
||||||
|
# at least get the core message out if something happened
|
||||||
|
super(OctaviaException, self).__init__(self.message)
|
||||||
|
|
||||||
|
def __unicode__(self):
|
||||||
|
return unicode(self.msg)
|
||||||
|
|
||||||
|
def use_fatal_exceptions(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class BadRequest(OctaviaException):
|
||||||
|
message = _('Bad %(resource)s request: %(msg)s')
|
||||||
|
|
||||||
|
|
||||||
|
class NotFound(OctaviaException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NotAuthorized(OctaviaException):
|
||||||
|
message = _("Not authorized.")
|
76
octavia/common/utils.py
Normal file
76
octavia/common/utils.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# Copyright 2011, VMware, Inc., 2014 A10 Networks
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Borrowed from nova code base, more utilities will be added/borrowed as and
|
||||||
|
# when needed.
|
||||||
|
|
||||||
|
"""Utilities and helper functions."""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import hashlib
|
||||||
|
import random
|
||||||
|
import socket
|
||||||
|
|
||||||
|
# from eventlet.green import subprocess
|
||||||
|
# from oslo.config import cfg
|
||||||
|
|
||||||
|
from octavia.openstack.common import excutils
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_hostname():
|
||||||
|
return socket.gethostname()
|
||||||
|
|
||||||
|
|
||||||
|
def get_random_string(length):
|
||||||
|
"""Get a random hex string of the specified length.
|
||||||
|
|
||||||
|
based on Cinder library
|
||||||
|
cinder/transfer/api.py
|
||||||
|
"""
|
||||||
|
rndstr = ""
|
||||||
|
random.seed(datetime.datetime.now().microsecond)
|
||||||
|
while len(rndstr) < length:
|
||||||
|
rndstr += hashlib.sha224(str(random.random())).hexdigest()
|
||||||
|
|
||||||
|
return rndstr[0:length]
|
||||||
|
|
||||||
|
|
||||||
|
class exception_logger(object):
|
||||||
|
"""Wrap a function and log raised exception
|
||||||
|
|
||||||
|
:param logger: the logger to log the exception default is LOG.exception
|
||||||
|
|
||||||
|
:returns: origin value if no exception raised; re-raise the exception if
|
||||||
|
any occurred
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, logger=None):
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
def __call__(self, func):
|
||||||
|
if self.logger is None:
|
||||||
|
LOG = logging.getLogger(func.__module__)
|
||||||
|
self.logger = LOG.exception
|
||||||
|
|
||||||
|
def call(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
self.logger(e)
|
||||||
|
return call
|
0
octavia/controller/__init__.py
Normal file
0
octavia/controller/__init__.py
Normal file
0
octavia/db/__init__.py
Normal file
0
octavia/db/__init__.py
Normal file
0
octavia/db/migration/__init__.py
Normal file
0
octavia/db/migration/__init__.py
Normal file
0
octavia/network/__init__.py
Normal file
0
octavia/network/__init__.py
Normal file
0
octavia/network/base.py
Normal file
0
octavia/network/base.py
Normal file
0
octavia/network/drivers/__init__.py
Normal file
0
octavia/network/drivers/__init__.py
Normal file
3
octavia/network/drivers/neutron/README
Normal file
3
octavia/network/drivers/neutron/README
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
|
||||||
|
This is the only module in all of octavia that is allowed to import neutron directly.
|
||||||
|
|
0
octavia/network/drivers/neutron/__init__.py
Normal file
0
octavia/network/drivers/neutron/__init__.py
Normal file
0
octavia/network/drivers/nova-network/__init__.py
Normal file
0
octavia/network/drivers/nova-network/__init__.py
Normal file
0
octavia/openstack/__init__.py
Normal file
0
octavia/openstack/__init__.py
Normal file
17
octavia/openstack/common/__init__.py
Normal file
17
octavia/openstack/common/__init__.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
0
octavia/openstack/common/cache/__init__.py
vendored
Normal file
0
octavia/openstack/common/cache/__init__.py
vendored
Normal file
250
octavia/openstack/common/cache/backends.py
vendored
Normal file
250
octavia/openstack/common/cache/backends.py
vendored
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import abc
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
NOTSET = object()
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class BaseCache(object):
|
||||||
|
"""Base Cache Abstraction
|
||||||
|
|
||||||
|
:params parsed_url: Parsed url object.
|
||||||
|
:params options: A dictionary with configuration parameters
|
||||||
|
for the cache. For example:
|
||||||
|
|
||||||
|
- default_ttl: An integer defining the default ttl for keys.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, parsed_url, options=None):
|
||||||
|
self._parsed_url = parsed_url
|
||||||
|
self._options = options or {}
|
||||||
|
self._default_ttl = int(self._options.get('default_ttl', 0))
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _set(self, key, value, ttl, not_exists=False):
|
||||||
|
"""Implementations of this class have to override this method."""
|
||||||
|
|
||||||
|
def set(self, key, value, ttl, not_exists=False):
|
||||||
|
"""Sets or updates a cache entry
|
||||||
|
|
||||||
|
.. note:: Thread-safety is required and has to be guaranteed by the
|
||||||
|
backend implementation.
|
||||||
|
|
||||||
|
:params key: Item key as string.
|
||||||
|
:type key: `unicode string`
|
||||||
|
:params value: Value to assign to the key. This can be anything that
|
||||||
|
is handled by the current backend.
|
||||||
|
:params ttl: Key's timeout in seconds. 0 means no timeout.
|
||||||
|
:type ttl: int
|
||||||
|
:params not_exists: If True, the key will be set if it doesn't exist.
|
||||||
|
Otherwise, it'll always be set.
|
||||||
|
:type not_exists: bool
|
||||||
|
|
||||||
|
:returns: True if the operation succeeds, False otherwise.
|
||||||
|
"""
|
||||||
|
if ttl is None:
|
||||||
|
ttl = self._default_ttl
|
||||||
|
|
||||||
|
return self._set(key, value, ttl, not_exists)
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
self.set(key, value, self._default_ttl)
|
||||||
|
|
||||||
|
def setdefault(self, key, value):
|
||||||
|
"""Sets the key value to `value` if it doesn't exist
|
||||||
|
|
||||||
|
:params key: Item key as string.
|
||||||
|
:type key: `unicode string`
|
||||||
|
:params value: Value to assign to the key. This can be anything that
|
||||||
|
is handled by the current backend.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
self[key] = value
|
||||||
|
return value
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _get(self, key, default):
|
||||||
|
"""Implementations of this class have to override this method."""
|
||||||
|
|
||||||
|
def get(self, key, default=None):
|
||||||
|
"""Gets one item from the cache
|
||||||
|
|
||||||
|
.. note:: Thread-safety is required and it has to be guaranteed
|
||||||
|
by the backend implementation.
|
||||||
|
|
||||||
|
:params key: Key for the item to retrieve from the cache.
|
||||||
|
:params default: The default value to return.
|
||||||
|
|
||||||
|
:returns: `key`'s value in the cache if it exists, otherwise
|
||||||
|
`default` should be returned.
|
||||||
|
"""
|
||||||
|
return self._get(key, default)
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
value = self.get(key, NOTSET)
|
||||||
|
|
||||||
|
if value is NOTSET:
|
||||||
|
raise KeyError
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def __delitem__(self, key):
|
||||||
|
"""Removes an item from cache.
|
||||||
|
|
||||||
|
.. note:: Thread-safety is required and it has to be guaranteed by
|
||||||
|
the backend implementation.
|
||||||
|
|
||||||
|
:params key: The key to remove.
|
||||||
|
|
||||||
|
:returns: The key value if there's one
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _clear(self):
|
||||||
|
"""Implementations of this class have to override this method."""
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
"""Removes all items from the cache.
|
||||||
|
|
||||||
|
.. note:: Thread-safety is required and it has to be guaranteed by
|
||||||
|
the backend implementation.
|
||||||
|
"""
|
||||||
|
return self._clear()
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _incr(self, key, delta):
|
||||||
|
"""Implementations of this class have to override this method."""
|
||||||
|
|
||||||
|
def incr(self, key, delta=1):
|
||||||
|
"""Increments the value for a key
|
||||||
|
|
||||||
|
:params key: The key for the value to be incremented
|
||||||
|
:params delta: Number of units by which to increment the value.
|
||||||
|
Pass a negative number to decrement the value.
|
||||||
|
|
||||||
|
:returns: The new value
|
||||||
|
"""
|
||||||
|
return self._incr(key, delta)
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _append_tail(self, key, tail):
|
||||||
|
"""Implementations of this class have to override this method."""
|
||||||
|
|
||||||
|
def append_tail(self, key, tail):
|
||||||
|
"""Appends `tail` to `key`'s value.
|
||||||
|
|
||||||
|
:params key: The key of the value to which `tail` should be appended.
|
||||||
|
:params tail: The list of values to append to the original.
|
||||||
|
|
||||||
|
:returns: The new value
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not hasattr(tail, "__iter__"):
|
||||||
|
raise TypeError('Tail must be an iterable')
|
||||||
|
|
||||||
|
if not isinstance(tail, list):
|
||||||
|
# NOTE(flaper87): Make sure we pass a list
|
||||||
|
# down to the implementation. Not all drivers
|
||||||
|
# have support for generators, sets or other
|
||||||
|
# iterables.
|
||||||
|
tail = list(tail)
|
||||||
|
|
||||||
|
return self._append_tail(key, tail)
|
||||||
|
|
||||||
|
def append(self, key, value):
|
||||||
|
"""Appends `value` to `key`'s value.
|
||||||
|
|
||||||
|
:params key: The key of the value to which `tail` should be appended.
|
||||||
|
:params value: The value to append to the original.
|
||||||
|
|
||||||
|
:returns: The new value
|
||||||
|
"""
|
||||||
|
return self.append_tail(key, [value])
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def __contains__(self, key):
|
||||||
|
"""Verifies that a key exists.
|
||||||
|
|
||||||
|
:params key: The key to verify.
|
||||||
|
|
||||||
|
:returns: True if the key exists, otherwise False.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _get_many(self, keys, default):
|
||||||
|
"""Implementations of this class have to override this method."""
|
||||||
|
return ((k, self.get(k, default=default)) for k in keys)
|
||||||
|
|
||||||
|
def get_many(self, keys, default=NOTSET):
|
||||||
|
"""Gets keys' value from cache
|
||||||
|
|
||||||
|
:params keys: List of keys to retrieve.
|
||||||
|
:params default: The default value to return for each key that is not
|
||||||
|
in the cache.
|
||||||
|
|
||||||
|
:returns: A generator of (key, value)
|
||||||
|
"""
|
||||||
|
return self._get_many(keys, default)
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _set_many(self, data, ttl):
|
||||||
|
"""Implementations of this class have to override this method."""
|
||||||
|
|
||||||
|
for key, value in data.items():
|
||||||
|
self.set(key, value, ttl=ttl)
|
||||||
|
|
||||||
|
def set_many(self, data, ttl=None):
|
||||||
|
"""Puts several items into the cache at once
|
||||||
|
|
||||||
|
Depending on the backend, this operation may or may not be efficient.
|
||||||
|
The default implementation calls set for each (key, value) pair
|
||||||
|
passed, other backends support set_many operations as part of their
|
||||||
|
protocols.
|
||||||
|
|
||||||
|
:params data: A dictionary like {key: val} to store in the cache.
|
||||||
|
:params ttl: Key's timeout in seconds.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if ttl is None:
|
||||||
|
ttl = self._default_ttl
|
||||||
|
|
||||||
|
self._set_many(data, ttl)
|
||||||
|
|
||||||
|
def update(self, **kwargs):
|
||||||
|
"""Sets several (key, value) paris.
|
||||||
|
|
||||||
|
Refer to the `set_many` docstring.
|
||||||
|
"""
|
||||||
|
self.set_many(kwargs, ttl=self._default_ttl)
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _unset_many(self, keys):
|
||||||
|
"""Implementations of this class have to override this method."""
|
||||||
|
for key in keys:
|
||||||
|
del self[key]
|
||||||
|
|
||||||
|
def unset_many(self, keys):
|
||||||
|
"""Removes several keys from the cache at once
|
||||||
|
|
||||||
|
:params keys: List of keys to unset.
|
||||||
|
"""
|
||||||
|
self._unset_many(keys)
|
78
octavia/openstack/common/cache/cache.py
vendored
Normal file
78
octavia/openstack/common/cache/cache.py
vendored
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Cache library.
|
||||||
|
|
||||||
|
Supported configuration options:
|
||||||
|
|
||||||
|
`default_backend`: Name of the cache backend to use.
|
||||||
|
`key_namespace`: Namespace under which keys will be created.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from six.moves.urllib import parse
|
||||||
|
from stevedore import driver
|
||||||
|
|
||||||
|
|
||||||
|
def _get_olso_configs():
|
||||||
|
"""Returns the oslo.config options to register."""
|
||||||
|
# NOTE(flaper87): Oslo config should be
|
||||||
|
# optional. Instead of doing try / except
|
||||||
|
# at the top of this file, lets import cfg
|
||||||
|
# here and assume that the caller of this
|
||||||
|
# function already took care of this dependency.
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
return [
|
||||||
|
cfg.StrOpt('cache_url', default='memory://',
|
||||||
|
help='URL to connect to the cache back end.')
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def register_oslo.configs(conf):
|
||||||
|
"""Registers a cache configuration options
|
||||||
|
|
||||||
|
:params conf: Config object.
|
||||||
|
:type conf: `cfg.ConfigOptions`
|
||||||
|
"""
|
||||||
|
conf.register_opts(_get_olso_configs())
|
||||||
|
|
||||||
|
|
||||||
|
def get_cache(url='memory://'):
|
||||||
|
"""Loads the cache backend
|
||||||
|
|
||||||
|
This function loads the cache backend
|
||||||
|
specified in the given configuration.
|
||||||
|
|
||||||
|
:param conf: Configuration instance to use
|
||||||
|
"""
|
||||||
|
|
||||||
|
parsed = parse.urlparse(url)
|
||||||
|
backend = parsed.scheme
|
||||||
|
|
||||||
|
query = parsed.query
|
||||||
|
# NOTE(flaper87): We need the following hack
|
||||||
|
# for python versions < 2.7.5. Previous versions
|
||||||
|
# of python parsed query params just for 'known'
|
||||||
|
# schemes. This was changed in this patch:
|
||||||
|
# http://hg.python.org/cpython/rev/79e6ff3d9afd
|
||||||
|
if not query and '?' in parsed.path:
|
||||||
|
query = parsed.path.split('?', 1)[-1]
|
||||||
|
parameters = parse.parse_qsl(query)
|
||||||
|
kwargs = {'options': dict(parameters)}
|
||||||
|
|
||||||
|
mgr = driver.DriverManager('octavia.openstack.common.cache.backends', backend,
|
||||||
|
invoke_on_load=True,
|
||||||
|
invoke_args=[parsed],
|
||||||
|
invoke_kwds=kwargs)
|
||||||
|
return mgr.driver
|
126
octavia/openstack/common/context.py
Normal file
126
octavia/openstack/common/context.py
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Simple class that stores security context information in the web request.
|
||||||
|
|
||||||
|
Projects should subclass this class if they wish to enhance the request
|
||||||
|
context or provide additional information in their specific WSGI pipeline.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import itertools
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
def generate_request_id():
|
||||||
|
return b'req-' + str(uuid.uuid4()).encode('ascii')
|
||||||
|
|
||||||
|
|
||||||
|
class RequestContext(object):
|
||||||
|
|
||||||
|
"""Helper class to represent useful information about a request context.
|
||||||
|
|
||||||
|
Stores information about the security context under which the user
|
||||||
|
accesses the system, as well as additional request information.
|
||||||
|
"""
|
||||||
|
|
||||||
|
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
|
||||||
|
|
||||||
|
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
|
||||||
|
user_domain=None, project_domain=None, is_admin=False,
|
||||||
|
read_only=False, show_deleted=False, request_id=None,
|
||||||
|
instance_uuid=None):
|
||||||
|
self.auth_token = auth_token
|
||||||
|
self.user = user
|
||||||
|
self.tenant = tenant
|
||||||
|
self.domain = domain
|
||||||
|
self.user_domain = user_domain
|
||||||
|
self.project_domain = project_domain
|
||||||
|
self.is_admin = is_admin
|
||||||
|
self.read_only = read_only
|
||||||
|
self.show_deleted = show_deleted
|
||||||
|
self.instance_uuid = instance_uuid
|
||||||
|
if not request_id:
|
||||||
|
request_id = generate_request_id()
|
||||||
|
self.request_id = request_id
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
user_idt = (
|
||||||
|
self.user_idt_format.format(user=self.user or '-',
|
||||||
|
tenant=self.tenant or '-',
|
||||||
|
domain=self.domain or '-',
|
||||||
|
user_domain=self.user_domain or '-',
|
||||||
|
p_domain=self.project_domain or '-'))
|
||||||
|
|
||||||
|
return {'user': self.user,
|
||||||
|
'tenant': self.tenant,
|
||||||
|
'domain': self.domain,
|
||||||
|
'user_domain': self.user_domain,
|
||||||
|
'project_domain': self.project_domain,
|
||||||
|
'is_admin': self.is_admin,
|
||||||
|
'read_only': self.read_only,
|
||||||
|
'show_deleted': self.show_deleted,
|
||||||
|
'auth_token': self.auth_token,
|
||||||
|
'request_id': self.request_id,
|
||||||
|
'instance_uuid': self.instance_uuid,
|
||||||
|
'user_identity': user_idt}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, ctx):
|
||||||
|
return cls(
|
||||||
|
auth_token=ctx.get("auth_token"),
|
||||||
|
user=ctx.get("user"),
|
||||||
|
tenant=ctx.get("tenant"),
|
||||||
|
domain=ctx.get("domain"),
|
||||||
|
user_domain=ctx.get("user_domain"),
|
||||||
|
project_domain=ctx.get("project_domain"),
|
||||||
|
is_admin=ctx.get("is_admin", False),
|
||||||
|
read_only=ctx.get("read_only", False),
|
||||||
|
show_deleted=ctx.get("show_deleted", False),
|
||||||
|
request_id=ctx.get("request_id"),
|
||||||
|
instance_uuid=ctx.get("instance_uuid"))
|
||||||
|
|
||||||
|
|
||||||
|
def get_admin_context(show_deleted=False):
|
||||||
|
context = RequestContext(None,
|
||||||
|
tenant=None,
|
||||||
|
is_admin=True,
|
||||||
|
show_deleted=show_deleted)
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def get_context_from_function_and_args(function, args, kwargs):
|
||||||
|
"""Find an arg of type RequestContext and return it.
|
||||||
|
|
||||||
|
This is useful in a couple of decorators where we don't
|
||||||
|
know much about the function we're wrapping.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for arg in itertools.chain(kwargs.values(), args):
|
||||||
|
if isinstance(arg, RequestContext):
|
||||||
|
return arg
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def is_user_context(context):
|
||||||
|
"""Indicates if the request context is a normal user."""
|
||||||
|
if not context:
|
||||||
|
return False
|
||||||
|
if context.is_admin:
|
||||||
|
return False
|
||||||
|
if not context.user_id or not context.project_id:
|
||||||
|
return False
|
||||||
|
return True
|
145
octavia/openstack/common/eventlet_backdoor.py
Normal file
145
octavia/openstack/common/eventlet_backdoor.py
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
# Copyright (c) 2012 OpenStack Foundation.
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import gc
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
import eventlet.backdoor
|
||||||
|
import greenlet
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _LI
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
|
||||||
|
help_for_backdoor_port = (
|
||||||
|
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
|
||||||
|
"in listening on a random tcp port number; <port> results in listening "
|
||||||
|
"on the specified port number (and not enabling backdoor if that port "
|
||||||
|
"is in use); and <start>:<end> results in listening on the smallest "
|
||||||
|
"unused port number within the specified range of port numbers. The "
|
||||||
|
"chosen port is displayed in the service's log file.")
|
||||||
|
eventlet_backdoor_opts = [
|
||||||
|
cfg.StrOpt('backdoor_port',
|
||||||
|
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(eventlet_backdoor_opts)
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class EventletBackdoorConfigValueError(Exception):
|
||||||
|
def __init__(self, port_range, help_msg, ex):
|
||||||
|
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||||
|
'%(help)s' %
|
||||||
|
{'range': port_range, 'ex': ex, 'help': help_msg})
|
||||||
|
super(EventletBackdoorConfigValueError, self).__init__(msg)
|
||||||
|
self.port_range = port_range
|
||||||
|
|
||||||
|
|
||||||
|
def _dont_use_this():
|
||||||
|
print("Don't use this, just disconnect instead")
|
||||||
|
|
||||||
|
|
||||||
|
def _find_objects(t):
|
||||||
|
return [o for o in gc.get_objects() if isinstance(o, t)]
|
||||||
|
|
||||||
|
|
||||||
|
def _print_greenthreads():
|
||||||
|
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||||
|
print(i, gt)
|
||||||
|
traceback.print_stack(gt.gr_frame)
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def _print_nativethreads():
|
||||||
|
for threadId, stack in sys._current_frames().items():
|
||||||
|
print(threadId)
|
||||||
|
traceback.print_stack(stack)
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_port_range(port_range):
|
||||||
|
if ':' not in port_range:
|
||||||
|
start, end = port_range, port_range
|
||||||
|
else:
|
||||||
|
start, end = port_range.split(':', 1)
|
||||||
|
try:
|
||||||
|
start, end = int(start), int(end)
|
||||||
|
if end < start:
|
||||||
|
raise ValueError
|
||||||
|
return start, end
|
||||||
|
except ValueError as ex:
|
||||||
|
raise EventletBackdoorConfigValueError(port_range, ex,
|
||||||
|
help_for_backdoor_port)
|
||||||
|
|
||||||
|
|
||||||
|
def _listen(host, start_port, end_port, listen_func):
|
||||||
|
try_port = start_port
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return listen_func((host, try_port))
|
||||||
|
except socket.error as exc:
|
||||||
|
if (exc.errno != errno.EADDRINUSE or
|
||||||
|
try_port >= end_port):
|
||||||
|
raise
|
||||||
|
try_port += 1
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_if_enabled():
|
||||||
|
backdoor_locals = {
|
||||||
|
'exit': _dont_use_this, # So we don't exit the entire process
|
||||||
|
'quit': _dont_use_this, # So we don't exit the entire process
|
||||||
|
'fo': _find_objects,
|
||||||
|
'pgt': _print_greenthreads,
|
||||||
|
'pnt': _print_nativethreads,
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONF.backdoor_port is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
|
||||||
|
|
||||||
|
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||||
|
# the last expression and set it to __builtin__._, which overwrites
|
||||||
|
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||||
|
# since it won't interact poorly with gettext, and it's easier to
|
||||||
|
# read the output too.
|
||||||
|
def displayhook(val):
|
||||||
|
if val is not None:
|
||||||
|
pprint.pprint(val)
|
||||||
|
sys.displayhook = displayhook
|
||||||
|
|
||||||
|
sock = _listen('localhost', start_port, end_port, eventlet.listen)
|
||||||
|
|
||||||
|
# In the case of backdoor port being zero, a port number is assigned by
|
||||||
|
# listen(). In any case, pull the port number out here.
|
||||||
|
port = sock.getsockname()[1]
|
||||||
|
LOG.info(
|
||||||
|
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||||
|
{'port': port, 'pid': os.getpid()}
|
||||||
|
)
|
||||||
|
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||||
|
locals=backdoor_locals)
|
||||||
|
return port
|
113
octavia/openstack/common/excutils.py
Normal file
113
octavia/openstack/common/excutils.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# Copyright 2012, Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Exception related utilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _LE
|
||||||
|
|
||||||
|
|
||||||
|
class save_and_reraise_exception(object):
|
||||||
|
"""Save current exception, run some code and then re-raise.
|
||||||
|
|
||||||
|
In some cases the exception context can be cleared, resulting in None
|
||||||
|
being attempted to be re-raised after an exception handler is run. This
|
||||||
|
can happen when eventlet switches greenthreads or when running an
|
||||||
|
exception handler, code raises and catches an exception. In both
|
||||||
|
cases the exception context will be cleared.
|
||||||
|
|
||||||
|
To work around this, we save the exception state, run handler code, and
|
||||||
|
then re-raise the original exception. If another exception occurs, the
|
||||||
|
saved exception is logged and the new exception is re-raised.
|
||||||
|
|
||||||
|
In some cases the caller may not want to re-raise the exception, and
|
||||||
|
for those circumstances this context provides a reraise flag that
|
||||||
|
can be used to suppress the exception. For example::
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
with save_and_reraise_exception() as ctxt:
|
||||||
|
decide_if_need_reraise()
|
||||||
|
if not should_be_reraised:
|
||||||
|
ctxt.reraise = False
|
||||||
|
|
||||||
|
If another exception occurs and reraise flag is False,
|
||||||
|
the saved exception will not be logged.
|
||||||
|
|
||||||
|
If the caller wants to raise new exception during exception handling
|
||||||
|
he/she sets reraise to False initially with an ability to set it back to
|
||||||
|
True if needed::
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
with save_and_reraise_exception(reraise=False) as ctxt:
|
||||||
|
[if statements to determine whether to raise a new exception]
|
||||||
|
# Not raising a new exception, so reraise
|
||||||
|
ctxt.reraise = True
|
||||||
|
"""
|
||||||
|
def __init__(self, reraise=True):
|
||||||
|
self.reraise = reraise
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.type_, self.value, self.tb, = sys.exc_info()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
if exc_type is not None:
|
||||||
|
if self.reraise:
|
||||||
|
logging.error(_LE('Original exception being dropped: %s'),
|
||||||
|
traceback.format_exception(self.type_,
|
||||||
|
self.value,
|
||||||
|
self.tb))
|
||||||
|
return False
|
||||||
|
if self.reraise:
|
||||||
|
six.reraise(self.type_, self.value, self.tb)
|
||||||
|
|
||||||
|
|
||||||
|
def forever_retry_uncaught_exceptions(infunc):
|
||||||
|
def inner_func(*args, **kwargs):
|
||||||
|
last_log_time = 0
|
||||||
|
last_exc_message = None
|
||||||
|
exc_count = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return infunc(*args, **kwargs)
|
||||||
|
except Exception as exc:
|
||||||
|
this_exc_message = six.u(str(exc))
|
||||||
|
if this_exc_message == last_exc_message:
|
||||||
|
exc_count += 1
|
||||||
|
else:
|
||||||
|
exc_count = 1
|
||||||
|
# Do not log any more frequently than once a minute unless
|
||||||
|
# the exception message changes
|
||||||
|
cur_time = int(time.time())
|
||||||
|
if (cur_time - last_log_time > 60 or
|
||||||
|
this_exc_message != last_exc_message):
|
||||||
|
logging.exception(
|
||||||
|
_LE('Unexpected exception occurred %d time(s)... '
|
||||||
|
'retrying.') % exc_count)
|
||||||
|
last_log_time = cur_time
|
||||||
|
last_exc_message = this_exc_message
|
||||||
|
exc_count = 0
|
||||||
|
# This should be a very rare event. In case it isn't, do
|
||||||
|
# a sleep.
|
||||||
|
time.sleep(1)
|
||||||
|
return inner_func
|
146
octavia/openstack/common/fileutils.py
Normal file
146
octavia/openstack/common/fileutils.py
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from octavia.openstack.common import excutils
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_FILE_CACHE = {}
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_tree(path):
|
||||||
|
"""Create a directory (and any ancestor directories required)
|
||||||
|
|
||||||
|
:param path: Directory to create
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
os.makedirs(path)
|
||||||
|
except OSError as exc:
|
||||||
|
if exc.errno == errno.EEXIST:
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def read_cached_file(filename, force_reload=False):
|
||||||
|
"""Read from a file if it has been modified.
|
||||||
|
|
||||||
|
:param force_reload: Whether to reload the file.
|
||||||
|
:returns: A tuple with a boolean specifying if the data is fresh
|
||||||
|
or not.
|
||||||
|
"""
|
||||||
|
global _FILE_CACHE
|
||||||
|
|
||||||
|
if force_reload:
|
||||||
|
delete_cached_file(filename)
|
||||||
|
|
||||||
|
reloaded = False
|
||||||
|
mtime = os.path.getmtime(filename)
|
||||||
|
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||||
|
|
||||||
|
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||||
|
LOG.debug("Reloading cached file %s" % filename)
|
||||||
|
with open(filename) as fap:
|
||||||
|
cache_info['data'] = fap.read()
|
||||||
|
cache_info['mtime'] = mtime
|
||||||
|
reloaded = True
|
||||||
|
return (reloaded, cache_info['data'])
|
||||||
|
|
||||||
|
|
||||||
|
def delete_cached_file(filename):
|
||||||
|
"""Delete cached file if present.
|
||||||
|
|
||||||
|
:param filename: filename to delete
|
||||||
|
"""
|
||||||
|
global _FILE_CACHE
|
||||||
|
|
||||||
|
if filename in _FILE_CACHE:
|
||||||
|
del _FILE_CACHE[filename]
|
||||||
|
|
||||||
|
|
||||||
|
def delete_if_exists(path, remove=os.unlink):
|
||||||
|
"""Delete a file, but ignore file not found error.
|
||||||
|
|
||||||
|
:param path: File to delete
|
||||||
|
:param remove: Optional function to remove passed path
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
remove(path)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def remove_path_on_error(path, remove=delete_if_exists):
|
||||||
|
"""Protect code that wants to operate on PATH atomically.
|
||||||
|
Any exception will cause PATH to be removed.
|
||||||
|
|
||||||
|
:param path: File to work with
|
||||||
|
:param remove: Optional function to remove passed path
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
except Exception:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
remove(path)
|
||||||
|
|
||||||
|
|
||||||
|
def file_open(*args, **kwargs):
|
||||||
|
"""Open file
|
||||||
|
|
||||||
|
see built-in open() documentation for more details
|
||||||
|
|
||||||
|
Note: The reason this is kept in a separate module is to easily
|
||||||
|
be able to provide a stub module that doesn't alter system
|
||||||
|
state at all (for unit tests)
|
||||||
|
"""
|
||||||
|
return open(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
|
||||||
|
"""Create temporary file or use existing file.
|
||||||
|
|
||||||
|
This util is needed for creating temporary file with
|
||||||
|
specified content, suffix and prefix. If path is not None,
|
||||||
|
it will be used for writing content. If the path doesn't
|
||||||
|
exist it'll be created.
|
||||||
|
|
||||||
|
:param content: content for temporary file.
|
||||||
|
:param path: same as parameter 'dir' for mkstemp
|
||||||
|
:param suffix: same as parameter 'suffix' for mkstemp
|
||||||
|
:param prefix: same as parameter 'prefix' for mkstemp
|
||||||
|
|
||||||
|
For example: it can be used in database tests for creating
|
||||||
|
configuration files.
|
||||||
|
"""
|
||||||
|
if path:
|
||||||
|
ensure_tree(path)
|
||||||
|
|
||||||
|
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
|
||||||
|
try:
|
||||||
|
os.write(fd, content)
|
||||||
|
finally:
|
||||||
|
os.close(fd)
|
||||||
|
return path
|
0
octavia/openstack/common/fixture/__init__.py
Normal file
0
octavia/openstack/common/fixture/__init__.py
Normal file
85
octavia/openstack/common/fixture/config.py
Normal file
85
octavia/openstack/common/fixture/config.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2013 Mirantis, Inc.
|
||||||
|
# Copyright 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
class Config(fixtures.Fixture):
|
||||||
|
"""Allows overriding configuration settings for the test.
|
||||||
|
|
||||||
|
`conf` will be reset on cleanup.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, conf=cfg.CONF):
|
||||||
|
self.conf = conf
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(Config, self).setUp()
|
||||||
|
# NOTE(morganfainberg): unregister must be added to cleanup before
|
||||||
|
# reset is because cleanup works in reverse order of registered items,
|
||||||
|
# and a reset must occur before unregistering options can occur.
|
||||||
|
self.addCleanup(self._unregister_config_opts)
|
||||||
|
self.addCleanup(self.conf.reset)
|
||||||
|
self._registered_config_opts = {}
|
||||||
|
|
||||||
|
def config(self, **kw):
|
||||||
|
"""Override configuration values.
|
||||||
|
|
||||||
|
The keyword arguments are the names of configuration options to
|
||||||
|
override and their values.
|
||||||
|
|
||||||
|
If a `group` argument is supplied, the overrides are applied to
|
||||||
|
the specified configuration option group, otherwise the overrides
|
||||||
|
are applied to the ``default`` group.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
group = kw.pop('group', None)
|
||||||
|
for k, v in six.iteritems(kw):
|
||||||
|
self.conf.set_override(k, v, group)
|
||||||
|
|
||||||
|
def _unregister_config_opts(self):
|
||||||
|
for group in self._registered_config_opts:
|
||||||
|
self.conf.unregister_opts(self._registered_config_opts[group],
|
||||||
|
group=group)
|
||||||
|
|
||||||
|
def register_opt(self, opt, group=None):
|
||||||
|
"""Register a single option for the test run.
|
||||||
|
|
||||||
|
Options registered in this manner will automatically be unregistered
|
||||||
|
during cleanup.
|
||||||
|
|
||||||
|
If a `group` argument is supplied, it will register the new option
|
||||||
|
to that group, otherwise the option is registered to the ``default``
|
||||||
|
group.
|
||||||
|
"""
|
||||||
|
self.conf.register_opt(opt, group=group)
|
||||||
|
self._registered_config_opts.setdefault(group, set()).add(opt)
|
||||||
|
|
||||||
|
def register_opts(self, opts, group=None):
|
||||||
|
"""Register multiple options for the test run.
|
||||||
|
|
||||||
|
This works in the same manner as register_opt() but takes a list of
|
||||||
|
options as the first argument. All arguments will be registered to the
|
||||||
|
same group if the ``group`` argument is supplied, otherwise all options
|
||||||
|
will be registered to the ``default`` group.
|
||||||
|
"""
|
||||||
|
for opt in opts:
|
||||||
|
self.register_opt(opt, group=group)
|
51
octavia/openstack/common/fixture/lockutils.py
Normal file
51
octavia/openstack/common/fixture/lockutils.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
from octavia.openstack.common import lockutils
|
||||||
|
|
||||||
|
|
||||||
|
class LockFixture(fixtures.Fixture):
|
||||||
|
"""External locking fixture.
|
||||||
|
|
||||||
|
This fixture is basically an alternative to the synchronized decorator with
|
||||||
|
the external flag so that tearDowns and addCleanups will be included in
|
||||||
|
the lock context for locking between tests. The fixture is recommended to
|
||||||
|
be the first line in a test method, like so::
|
||||||
|
|
||||||
|
def test_method(self):
|
||||||
|
self.useFixture(LockFixture)
|
||||||
|
...
|
||||||
|
|
||||||
|
or the first line in setUp if all the test methods in the class are
|
||||||
|
required to be serialized. Something like::
|
||||||
|
|
||||||
|
class TestCase(testtools.testcase):
|
||||||
|
def setUp(self):
|
||||||
|
self.useFixture(LockFixture)
|
||||||
|
super(TestCase, self).setUp()
|
||||||
|
...
|
||||||
|
|
||||||
|
This is because addCleanups are put on a LIFO queue that gets run after the
|
||||||
|
test method exits. (either by completing or raising an exception)
|
||||||
|
"""
|
||||||
|
def __init__(self, name, lock_file_prefix=None):
|
||||||
|
self.mgr = lockutils.lock(name, lock_file_prefix, True)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(LockFixture, self).setUp()
|
||||||
|
self.addCleanup(self.mgr.__exit__, None, None, None)
|
||||||
|
self.lock = self.mgr.__enter__()
|
34
octavia/openstack/common/fixture/logging.py
Normal file
34
octavia/openstack/common/fixture/logging.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
|
||||||
|
def get_logging_handle_error_fixture():
|
||||||
|
"""returns a fixture to make logging raise formatting exceptions.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
self.useFixture(logging.get_logging_handle_error_fixture())
|
||||||
|
"""
|
||||||
|
return fixtures.MonkeyPatch('logging.Handler.handleError',
|
||||||
|
_handleError)
|
||||||
|
|
||||||
|
|
||||||
|
def _handleError(self, record):
|
||||||
|
"""Monkey patch for logging.Handler.handleError.
|
||||||
|
|
||||||
|
The default handleError just logs the error to stderr but we want
|
||||||
|
the option of actually raising an exception.
|
||||||
|
"""
|
||||||
|
raise
|
62
octavia/openstack/common/fixture/mockpatch.py
Normal file
62
octavia/openstack/common/fixture/mockpatch.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# DO NOT MODIFY THIS FILE
|
||||||
|
#
|
||||||
|
# This file is being graduated to the oslotest library. Please make all
|
||||||
|
# changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
import mock
|
||||||
|
|
||||||
|
|
||||||
|
class PatchObject(fixtures.Fixture):
|
||||||
|
"""Deal with code around mock."""
|
||||||
|
|
||||||
|
def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs):
|
||||||
|
self.obj = obj
|
||||||
|
self.attr = attr
|
||||||
|
self.kwargs = kwargs
|
||||||
|
self.new = new
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(PatchObject, self).setUp()
|
||||||
|
_p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs)
|
||||||
|
self.mock = _p.start()
|
||||||
|
self.addCleanup(_p.stop)
|
||||||
|
|
||||||
|
|
||||||
|
class Patch(fixtures.Fixture):
|
||||||
|
|
||||||
|
"""Deal with code around mock.patch."""
|
||||||
|
|
||||||
|
def __init__(self, obj, new=mock.DEFAULT, **kwargs):
|
||||||
|
self.obj = obj
|
||||||
|
self.kwargs = kwargs
|
||||||
|
self.new = new
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(Patch, self).setUp()
|
||||||
|
_p = mock.patch(self.obj, self.new, **self.kwargs)
|
||||||
|
self.mock = _p.start()
|
||||||
|
self.addCleanup(_p.stop)
|
43
octavia/openstack/common/fixture/moxstubout.py
Normal file
43
octavia/openstack/common/fixture/moxstubout.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# DO NOT MODIFY THIS FILE
|
||||||
|
#
|
||||||
|
# This file is being graduated to the oslotest library. Please make all
|
||||||
|
# changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
from six.moves import mox
|
||||||
|
|
||||||
|
|
||||||
|
class MoxStubout(fixtures.Fixture):
|
||||||
|
"""Deal with code around mox and stubout as a fixture."""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(MoxStubout, self).setUp()
|
||||||
|
# emulate some of the mox stuff, we can't use the metaclass
|
||||||
|
# because it screws with our generators
|
||||||
|
self.mox = mox.Mox()
|
||||||
|
self.stubs = self.mox.stubs
|
||||||
|
self.addCleanup(self.mox.UnsetStubs)
|
||||||
|
self.addCleanup(self.mox.VerifyAll)
|
479
octavia/openstack/common/gettextutils.py
Normal file
479
octavia/openstack/common/gettextutils.py
Normal file
@ -0,0 +1,479 @@
|
|||||||
|
# Copyright 2012 Red Hat, Inc.
|
||||||
|
# Copyright 2013 IBM Corp.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
gettext for openstack-common modules.
|
||||||
|
|
||||||
|
Usual usage in an openstack.common module:
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _
|
||||||
|
"""
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import gettext
|
||||||
|
import locale
|
||||||
|
from logging import handlers
|
||||||
|
import os
|
||||||
|
|
||||||
|
from babel import localedata
|
||||||
|
import six
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES = {}
|
||||||
|
|
||||||
|
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
|
||||||
|
USE_LAZY = False
|
||||||
|
|
||||||
|
|
||||||
|
class TranslatorFactory(object):
|
||||||
|
"""Create translator functions
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, domain, localedir=None):
|
||||||
|
"""Establish a set of translation functions for the domain.
|
||||||
|
|
||||||
|
:param domain: Name of translation domain,
|
||||||
|
specifying a message catalog.
|
||||||
|
:type domain: str
|
||||||
|
:param lazy: Delays translation until a message is emitted.
|
||||||
|
Defaults to False.
|
||||||
|
:type lazy: Boolean
|
||||||
|
:param localedir: Directory with translation catalogs.
|
||||||
|
:type localedir: str
|
||||||
|
"""
|
||||||
|
self.domain = domain
|
||||||
|
if localedir is None:
|
||||||
|
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||||
|
self.localedir = localedir
|
||||||
|
|
||||||
|
def _make_translation_func(self, domain=None):
|
||||||
|
"""Return a new translation function ready for use.
|
||||||
|
|
||||||
|
Takes into account whether or not lazy translation is being
|
||||||
|
done.
|
||||||
|
|
||||||
|
The domain can be specified to override the default from the
|
||||||
|
factory, but the localedir from the factory is always used
|
||||||
|
because we assume the log-level translation catalogs are
|
||||||
|
installed in the same directory as the main application
|
||||||
|
catalog.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if domain is None:
|
||||||
|
domain = self.domain
|
||||||
|
t = gettext.translation(domain,
|
||||||
|
localedir=self.localedir,
|
||||||
|
fallback=True)
|
||||||
|
# Use the appropriate method of the translation object based
|
||||||
|
# on the python version.
|
||||||
|
m = t.gettext if six.PY3 else t.ugettext
|
||||||
|
|
||||||
|
def f(msg):
|
||||||
|
"""oslo.i18n.gettextutils translation function."""
|
||||||
|
if USE_LAZY:
|
||||||
|
return Message(msg, domain=domain)
|
||||||
|
return m(msg)
|
||||||
|
return f
|
||||||
|
|
||||||
|
@property
|
||||||
|
def primary(self):
|
||||||
|
"The default translation function."
|
||||||
|
return self._make_translation_func()
|
||||||
|
|
||||||
|
def _make_log_translation_func(self, level):
|
||||||
|
return self._make_translation_func(self.domain + '-log-' + level)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def log_info(self):
|
||||||
|
"Translate info-level log messages."
|
||||||
|
return self._make_log_translation_func('info')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def log_warning(self):
|
||||||
|
"Translate warning-level log messages."
|
||||||
|
return self._make_log_translation_func('warning')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def log_error(self):
|
||||||
|
"Translate error-level log messages."
|
||||||
|
return self._make_log_translation_func('error')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def log_critical(self):
|
||||||
|
"Translate critical-level log messages."
|
||||||
|
return self._make_log_translation_func('critical')
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(dhellmann): When this module moves out of the incubator into
|
||||||
|
# oslo.i18n, these global variables can be moved to an integration
|
||||||
|
# module within each application.
|
||||||
|
|
||||||
|
# Create the global translation functions.
|
||||||
|
_translators = TranslatorFactory('octavia')
|
||||||
|
|
||||||
|
# The primary translation function using the well-known name "_"
|
||||||
|
_ = _translators.primary
|
||||||
|
|
||||||
|
# Translators for log levels.
|
||||||
|
#
|
||||||
|
# The abbreviated names are meant to reflect the usual use of a short
|
||||||
|
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||||
|
# the level.
|
||||||
|
_LI = _translators.log_info
|
||||||
|
_LW = _translators.log_warning
|
||||||
|
_LE = _translators.log_error
|
||||||
|
_LC = _translators.log_critical
|
||||||
|
|
||||||
|
# NOTE(dhellmann): End of globals that will move to the application's
|
||||||
|
# integration module.
|
||||||
|
|
||||||
|
|
||||||
|
def enable_lazy():
|
||||||
|
"""Convenience function for configuring _() to use lazy gettext
|
||||||
|
|
||||||
|
Call this at the start of execution to enable the gettextutils._
|
||||||
|
function to use lazy gettext functionality. This is useful if
|
||||||
|
your project is importing _ directly instead of using the
|
||||||
|
gettextutils.install() way of importing the _ function.
|
||||||
|
"""
|
||||||
|
global USE_LAZY
|
||||||
|
USE_LAZY = True
|
||||||
|
|
||||||
|
|
||||||
|
def install(domain):
|
||||||
|
"""Install a _() function using the given translation domain.
|
||||||
|
|
||||||
|
Given a translation domain, install a _() function using gettext's
|
||||||
|
install() function.
|
||||||
|
|
||||||
|
The main difference from gettext.install() is that we allow
|
||||||
|
overriding the default localedir (e.g. /usr/share/locale) using
|
||||||
|
a translation-domain-specific environment variable (e.g.
|
||||||
|
NOVA_LOCALEDIR).
|
||||||
|
|
||||||
|
Note that to enable lazy translation, enable_lazy must be
|
||||||
|
called.
|
||||||
|
|
||||||
|
:param domain: the translation domain
|
||||||
|
"""
|
||||||
|
from six import moves
|
||||||
|
tf = TranslatorFactory(domain)
|
||||||
|
moves.builtins.__dict__['_'] = tf.primary
|
||||||
|
|
||||||
|
|
||||||
|
class Message(six.text_type):
|
||||||
|
"""A Message object is a unicode object that can be translated.
|
||||||
|
|
||||||
|
Translation of Message is done explicitly using the translate() method.
|
||||||
|
For all non-translation intents and purposes, a Message is simply unicode,
|
||||||
|
and can be treated as such.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(cls, msgid, msgtext=None, params=None,
|
||||||
|
domain='octavia', *args):
|
||||||
|
"""Create a new Message object.
|
||||||
|
|
||||||
|
In order for translation to work gettext requires a message ID, this
|
||||||
|
msgid will be used as the base unicode text. It is also possible
|
||||||
|
for the msgid and the base unicode text to be different by passing
|
||||||
|
the msgtext parameter.
|
||||||
|
"""
|
||||||
|
# If the base msgtext is not given, we use the default translation
|
||||||
|
# of the msgid (which is in English) just in case the system locale is
|
||||||
|
# not English, so that the base text will be in that locale by default.
|
||||||
|
if not msgtext:
|
||||||
|
msgtext = Message._translate_msgid(msgid, domain)
|
||||||
|
# We want to initialize the parent unicode with the actual object that
|
||||||
|
# would have been plain unicode if 'Message' was not enabled.
|
||||||
|
msg = super(Message, cls).__new__(cls, msgtext)
|
||||||
|
msg.msgid = msgid
|
||||||
|
msg.domain = domain
|
||||||
|
msg.params = params
|
||||||
|
return msg
|
||||||
|
|
||||||
|
def translate(self, desired_locale=None):
|
||||||
|
"""Translate this message to the desired locale.
|
||||||
|
|
||||||
|
:param desired_locale: The desired locale to translate the message to,
|
||||||
|
if no locale is provided the message will be
|
||||||
|
translated to the system's default locale.
|
||||||
|
|
||||||
|
:returns: the translated message in unicode
|
||||||
|
"""
|
||||||
|
|
||||||
|
translated_message = Message._translate_msgid(self.msgid,
|
||||||
|
self.domain,
|
||||||
|
desired_locale)
|
||||||
|
if self.params is None:
|
||||||
|
# No need for more translation
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
# This Message object may have been formatted with one or more
|
||||||
|
# Message objects as substitution arguments, given either as a single
|
||||||
|
# argument, part of a tuple, or as one or more values in a dictionary.
|
||||||
|
# When translating this Message we need to translate those Messages too
|
||||||
|
translated_params = _translate_args(self.params, desired_locale)
|
||||||
|
|
||||||
|
translated_message = translated_message % translated_params
|
||||||
|
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _translate_msgid(msgid, domain, desired_locale=None):
|
||||||
|
if not desired_locale:
|
||||||
|
system_locale = locale.getdefaultlocale()
|
||||||
|
# If the system locale is not available to the runtime use English
|
||||||
|
if not system_locale[0]:
|
||||||
|
desired_locale = 'en_US'
|
||||||
|
else:
|
||||||
|
desired_locale = system_locale[0]
|
||||||
|
|
||||||
|
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||||
|
lang = gettext.translation(domain,
|
||||||
|
localedir=locale_dir,
|
||||||
|
languages=[desired_locale],
|
||||||
|
fallback=True)
|
||||||
|
if six.PY3:
|
||||||
|
translator = lang.gettext
|
||||||
|
else:
|
||||||
|
translator = lang.ugettext
|
||||||
|
|
||||||
|
translated_message = translator(msgid)
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
def __mod__(self, other):
|
||||||
|
# When we mod a Message we want the actual operation to be performed
|
||||||
|
# by the parent class (i.e. unicode()), the only thing we do here is
|
||||||
|
# save the original msgid and the parameters in case of a translation
|
||||||
|
params = self._sanitize_mod_params(other)
|
||||||
|
unicode_mod = super(Message, self).__mod__(params)
|
||||||
|
modded = Message(self.msgid,
|
||||||
|
msgtext=unicode_mod,
|
||||||
|
params=params,
|
||||||
|
domain=self.domain)
|
||||||
|
return modded
|
||||||
|
|
||||||
|
def _sanitize_mod_params(self, other):
|
||||||
|
"""Sanitize the object being modded with this Message.
|
||||||
|
|
||||||
|
- Add support for modding 'None' so translation supports it
|
||||||
|
- Trim the modded object, which can be a large dictionary, to only
|
||||||
|
those keys that would actually be used in a translation
|
||||||
|
- Snapshot the object being modded, in case the message is
|
||||||
|
translated, it will be used as it was when the Message was created
|
||||||
|
"""
|
||||||
|
if other is None:
|
||||||
|
params = (other,)
|
||||||
|
elif isinstance(other, dict):
|
||||||
|
# Merge the dictionaries
|
||||||
|
# Copy each item in case one does not support deep copy.
|
||||||
|
params = {}
|
||||||
|
if isinstance(self.params, dict):
|
||||||
|
for key, val in self.params.items():
|
||||||
|
params[key] = self._copy_param(val)
|
||||||
|
for key, val in other.items():
|
||||||
|
params[key] = self._copy_param(val)
|
||||||
|
else:
|
||||||
|
params = self._copy_param(other)
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _copy_param(self, param):
|
||||||
|
try:
|
||||||
|
return copy.deepcopy(param)
|
||||||
|
except Exception:
|
||||||
|
# Fallback to casting to unicode this will handle the
|
||||||
|
# python code-like objects that can't be deep-copied
|
||||||
|
return six.text_type(param)
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
msg = _('Message objects do not support addition.')
|
||||||
|
raise TypeError(msg)
|
||||||
|
|
||||||
|
def __radd__(self, other):
|
||||||
|
return self.__add__(other)
|
||||||
|
|
||||||
|
if six.PY2:
|
||||||
|
def __str__(self):
|
||||||
|
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||||
|
# and it expects specifically a UnicodeError in order to proceed.
|
||||||
|
msg = _('Message objects do not support str() because they may '
|
||||||
|
'contain non-ascii characters. '
|
||||||
|
'Please use unicode() or translate() instead.')
|
||||||
|
raise UnicodeError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_languages(domain):
|
||||||
|
"""Lists the available languages for the given translation domain.
|
||||||
|
|
||||||
|
:param domain: the domain to get languages for
|
||||||
|
"""
|
||||||
|
if domain in _AVAILABLE_LANGUAGES:
|
||||||
|
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
||||||
|
|
||||||
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
|
find = lambda x: gettext.find(domain,
|
||||||
|
localedir=os.environ.get(localedir),
|
||||||
|
languages=[x])
|
||||||
|
|
||||||
|
# NOTE(mrodden): en_US should always be available (and first in case
|
||||||
|
# order matters) since our in-line message strings are en_US
|
||||||
|
language_list = ['en_US']
|
||||||
|
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||||
|
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||||
|
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||||
|
# this check when the master list updates to >=1.0, and update all projects
|
||||||
|
list_identifiers = (getattr(localedata, 'list', None) or
|
||||||
|
getattr(localedata, 'locale_identifiers'))
|
||||||
|
locale_identifiers = list_identifiers()
|
||||||
|
|
||||||
|
for i in locale_identifiers:
|
||||||
|
if find(i) is not None:
|
||||||
|
language_list.append(i)
|
||||||
|
|
||||||
|
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
|
||||||
|
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
|
||||||
|
# are perfectly legitimate locales:
|
||||||
|
# https://github.com/mitsuhiko/babel/issues/37
|
||||||
|
# In Babel 1.3 they fixed the bug and they support these locales, but
|
||||||
|
# they are still not explicitly "listed" by locale_identifiers().
|
||||||
|
# That is why we add the locales here explicitly if necessary so that
|
||||||
|
# they are listed as supported.
|
||||||
|
aliases = {'zh': 'zh_CN',
|
||||||
|
'zh_Hant_HK': 'zh_HK',
|
||||||
|
'zh_Hant': 'zh_TW',
|
||||||
|
'fil': 'tl_PH'}
|
||||||
|
for (locale_, alias) in six.iteritems(aliases):
|
||||||
|
if locale_ in language_list and alias not in language_list:
|
||||||
|
language_list.append(alias)
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES[domain] = language_list
|
||||||
|
return copy.copy(language_list)
|
||||||
|
|
||||||
|
|
||||||
|
def translate(obj, desired_locale=None):
|
||||||
|
"""Gets the translated unicode representation of the given object.
|
||||||
|
|
||||||
|
If the object is not translatable it is returned as-is.
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param obj: the object to translate
|
||||||
|
:param desired_locale: the locale to translate the message to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: the translated object in unicode, or the original object if
|
||||||
|
it could not be translated
|
||||||
|
"""
|
||||||
|
message = obj
|
||||||
|
if not isinstance(message, Message):
|
||||||
|
# If the object to translate is not already translatable,
|
||||||
|
# let's first get its unicode representation
|
||||||
|
message = six.text_type(obj)
|
||||||
|
if isinstance(message, Message):
|
||||||
|
# Even after unicoding() we still need to check if we are
|
||||||
|
# running with translatable unicode before translating
|
||||||
|
return message.translate(desired_locale)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_args(args, desired_locale=None):
|
||||||
|
"""Translates all the translatable elements of the given arguments object.
|
||||||
|
|
||||||
|
This method is used for translating the translatable values in method
|
||||||
|
arguments which include values of tuples or dictionaries.
|
||||||
|
If the object is not a tuple or a dictionary the object itself is
|
||||||
|
translated if it is translatable.
|
||||||
|
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param args: the args to translate
|
||||||
|
:param desired_locale: the locale to translate the args to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: a new args object with the translated contents of the original
|
||||||
|
"""
|
||||||
|
if isinstance(args, tuple):
|
||||||
|
return tuple(translate(v, desired_locale) for v in args)
|
||||||
|
if isinstance(args, dict):
|
||||||
|
translated_dict = {}
|
||||||
|
for (k, v) in six.iteritems(args):
|
||||||
|
translated_v = translate(v, desired_locale)
|
||||||
|
translated_dict[k] = translated_v
|
||||||
|
return translated_dict
|
||||||
|
return translate(args, desired_locale)
|
||||||
|
|
||||||
|
|
||||||
|
class TranslationHandler(handlers.MemoryHandler):
|
||||||
|
"""Handler that translates records before logging them.
|
||||||
|
|
||||||
|
The TranslationHandler takes a locale and a target logging.Handler object
|
||||||
|
to forward LogRecord objects to after translating them. This handler
|
||||||
|
depends on Message objects being logged, instead of regular strings.
|
||||||
|
|
||||||
|
The handler can be configured declaratively in the logging.conf as follows:
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys = translatedlog, translator
|
||||||
|
|
||||||
|
[handler_translatedlog]
|
||||||
|
class = handlers.WatchedFileHandler
|
||||||
|
args = ('/var/log/api-localized.log',)
|
||||||
|
formatter = context
|
||||||
|
|
||||||
|
[handler_translator]
|
||||||
|
class = openstack.common.log.TranslationHandler
|
||||||
|
target = translatedlog
|
||||||
|
args = ('zh_CN',)
|
||||||
|
|
||||||
|
If the specified locale is not available in the system, the handler will
|
||||||
|
log in the default locale.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, locale=None, target=None):
|
||||||
|
"""Initialize a TranslationHandler
|
||||||
|
|
||||||
|
:param locale: locale to use for translating messages
|
||||||
|
:param target: logging.Handler object to forward
|
||||||
|
LogRecord objects to after translation
|
||||||
|
"""
|
||||||
|
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
||||||
|
# other handlers, such as a FileHandler, and still be able to
|
||||||
|
# configure it using logging.conf, this handler has to extend
|
||||||
|
# MemoryHandler because only the MemoryHandlers' logging.conf
|
||||||
|
# parsing is implemented such that it accepts a target handler.
|
||||||
|
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
||||||
|
self.locale = locale
|
||||||
|
|
||||||
|
def setFormatter(self, fmt):
|
||||||
|
self.target.setFormatter(fmt)
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
# We save the message from the original record to restore it
|
||||||
|
# after translation, so other handlers are not affected by this
|
||||||
|
original_msg = record.msg
|
||||||
|
original_args = record.args
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._translate_and_log_record(record)
|
||||||
|
finally:
|
||||||
|
record.msg = original_msg
|
||||||
|
record.args = original_args
|
||||||
|
|
||||||
|
def _translate_and_log_record(self, record):
|
||||||
|
record.msg = translate(record.msg, self.locale)
|
||||||
|
|
||||||
|
# In addition to translating the message, we also need to translate
|
||||||
|
# arguments that were passed to the log method that were not part
|
||||||
|
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
||||||
|
record.args = _translate_args(record.args, self.locale)
|
||||||
|
|
||||||
|
self.target.emit(record)
|
73
octavia/openstack/common/importutils.py
Normal file
73
octavia/openstack/common/importutils.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Import related utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
|
||||||
|
def import_class(import_str):
|
||||||
|
"""Returns a class from a string including module and class."""
|
||||||
|
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||||
|
__import__(mod_str)
|
||||||
|
try:
|
||||||
|
return getattr(sys.modules[mod_str], class_str)
|
||||||
|
except AttributeError:
|
||||||
|
raise ImportError('Class %s cannot be found (%s)' %
|
||||||
|
(class_str,
|
||||||
|
traceback.format_exception(*sys.exc_info())))
|
||||||
|
|
||||||
|
|
||||||
|
def import_object(import_str, *args, **kwargs):
|
||||||
|
"""Import a class and return an instance of it."""
|
||||||
|
return import_class(import_str)(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||||
|
"""Tries to import object from default namespace.
|
||||||
|
|
||||||
|
Imports a class and return an instance of it, first by trying
|
||||||
|
to find the class in a default namespace, then failing back to
|
||||||
|
a full path if not found in the default namespace.
|
||||||
|
"""
|
||||||
|
import_value = "%s.%s" % (name_space, import_str)
|
||||||
|
try:
|
||||||
|
return import_class(import_value)(*args, **kwargs)
|
||||||
|
except ImportError:
|
||||||
|
return import_class(import_str)(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def import_module(import_str):
|
||||||
|
"""Import a module."""
|
||||||
|
__import__(import_str)
|
||||||
|
return sys.modules[import_str]
|
||||||
|
|
||||||
|
|
||||||
|
def import_versioned_module(version, submodule=None):
|
||||||
|
module = 'octavia.v%s' % version
|
||||||
|
if submodule:
|
||||||
|
module = '.'.join((module, submodule))
|
||||||
|
return import_module(module)
|
||||||
|
|
||||||
|
|
||||||
|
def try_import(import_str, default=None):
|
||||||
|
"""Try to import a module and if it fails return default."""
|
||||||
|
try:
|
||||||
|
return import_module(import_str)
|
||||||
|
except ImportError:
|
||||||
|
return default
|
196
octavia/openstack/common/jsonutils.py
Normal file
196
octavia/openstack/common/jsonutils.py
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
'''
|
||||||
|
JSON related utilities.
|
||||||
|
|
||||||
|
This module provides a few things:
|
||||||
|
|
||||||
|
1) A handy function for getting an object down to something that can be
|
||||||
|
JSON serialized. See to_primitive().
|
||||||
|
|
||||||
|
2) Wrappers around loads() and dumps(). The dumps() wrapper will
|
||||||
|
automatically use to_primitive() for you if needed.
|
||||||
|
|
||||||
|
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
|
||||||
|
is available.
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import codecs
|
||||||
|
import datetime
|
||||||
|
import functools
|
||||||
|
import inspect
|
||||||
|
import itertools
|
||||||
|
import sys
|
||||||
|
|
||||||
|
is_simplejson = False
|
||||||
|
if sys.version_info < (2, 7):
|
||||||
|
# On Python <= 2.6, json module is not C boosted, so try to use
|
||||||
|
# simplejson module if available
|
||||||
|
try:
|
||||||
|
import simplejson as json
|
||||||
|
is_simplejson = True
|
||||||
|
except ImportError:
|
||||||
|
import json
|
||||||
|
else:
|
||||||
|
import json
|
||||||
|
|
||||||
|
import six
|
||||||
|
import six.moves.xmlrpc_client as xmlrpclib
|
||||||
|
|
||||||
|
from octavia.openstack.common import gettextutils
|
||||||
|
from octavia.openstack.common import importutils
|
||||||
|
from octavia.openstack.common import strutils
|
||||||
|
from octavia.openstack.common import timeutils
|
||||||
|
|
||||||
|
netaddr = importutils.try_import("netaddr")
|
||||||
|
|
||||||
|
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||||
|
inspect.isfunction, inspect.isgeneratorfunction,
|
||||||
|
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||||
|
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||||
|
inspect.isabstract]
|
||||||
|
|
||||||
|
_simple_types = (six.string_types + six.integer_types
|
||||||
|
+ (type(None), bool, float))
|
||||||
|
|
||||||
|
|
||||||
|
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||||
|
level=0, max_depth=3):
|
||||||
|
"""Convert a complex object into primitives.
|
||||||
|
|
||||||
|
Handy for JSON serialization. We can optionally handle instances,
|
||||||
|
but since this is a recursive function, we could have cyclical
|
||||||
|
data structures.
|
||||||
|
|
||||||
|
To handle cyclical data structures we could track the actual objects
|
||||||
|
visited in a set, but not all objects are hashable. Instead we just
|
||||||
|
track the depth of the object inspections and don't go too deep.
|
||||||
|
|
||||||
|
Therefore, convert_instances=True is lossy ... be aware.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# handle obvious types first - order of basic types determined by running
|
||||||
|
# full tests on nova project, resulting in the following counts:
|
||||||
|
# 572754 <type 'NoneType'>
|
||||||
|
# 460353 <type 'int'>
|
||||||
|
# 379632 <type 'unicode'>
|
||||||
|
# 274610 <type 'str'>
|
||||||
|
# 199918 <type 'dict'>
|
||||||
|
# 114200 <type 'datetime.datetime'>
|
||||||
|
# 51817 <type 'bool'>
|
||||||
|
# 26164 <type 'list'>
|
||||||
|
# 6491 <type 'float'>
|
||||||
|
# 283 <type 'tuple'>
|
||||||
|
# 19 <type 'long'>
|
||||||
|
if isinstance(value, _simple_types):
|
||||||
|
return value
|
||||||
|
|
||||||
|
if isinstance(value, datetime.datetime):
|
||||||
|
if convert_datetime:
|
||||||
|
return timeutils.strtime(value)
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
|
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||||
|
# and results in infinite loop when list(value) is called.
|
||||||
|
if type(value) == itertools.count:
|
||||||
|
return six.text_type(value)
|
||||||
|
|
||||||
|
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||||
|
# tests that raise an exception in a mocked method that
|
||||||
|
# has a @wrap_exception with a notifier will fail. If
|
||||||
|
# we up the dependency to 0.5.4 (when it is released) we
|
||||||
|
# can remove this workaround.
|
||||||
|
if getattr(value, '__module__', None) == 'mox':
|
||||||
|
return 'mock'
|
||||||
|
|
||||||
|
if level > max_depth:
|
||||||
|
return '?'
|
||||||
|
|
||||||
|
# The try block may not be necessary after the class check above,
|
||||||
|
# but just in case ...
|
||||||
|
try:
|
||||||
|
recursive = functools.partial(to_primitive,
|
||||||
|
convert_instances=convert_instances,
|
||||||
|
convert_datetime=convert_datetime,
|
||||||
|
level=level,
|
||||||
|
max_depth=max_depth)
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||||
|
elif isinstance(value, (list, tuple)):
|
||||||
|
return [recursive(lv) for lv in value]
|
||||||
|
|
||||||
|
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||||
|
# for our purposes, make it a datetime type which is explicitly
|
||||||
|
# handled
|
||||||
|
if isinstance(value, xmlrpclib.DateTime):
|
||||||
|
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||||
|
|
||||||
|
if convert_datetime and isinstance(value, datetime.datetime):
|
||||||
|
return timeutils.strtime(value)
|
||||||
|
elif isinstance(value, gettextutils.Message):
|
||||||
|
return value.data
|
||||||
|
elif hasattr(value, 'iteritems'):
|
||||||
|
return recursive(dict(value.iteritems()), level=level + 1)
|
||||||
|
elif hasattr(value, '__iter__'):
|
||||||
|
return recursive(list(value))
|
||||||
|
elif convert_instances and hasattr(value, '__dict__'):
|
||||||
|
# Likely an instance of something. Watch for cycles.
|
||||||
|
# Ignore class member vars.
|
||||||
|
return recursive(value.__dict__, level=level + 1)
|
||||||
|
elif netaddr and isinstance(value, netaddr.IPAddress):
|
||||||
|
return six.text_type(value)
|
||||||
|
else:
|
||||||
|
if any(test(value) for test in _nasty_type_tests):
|
||||||
|
return six.text_type(value)
|
||||||
|
return value
|
||||||
|
except TypeError:
|
||||||
|
# Class objects are tricky since they may define something like
|
||||||
|
# __iter__ defined but it isn't callable as list().
|
||||||
|
return six.text_type(value)
|
||||||
|
|
||||||
|
|
||||||
|
def dumps(value, default=to_primitive, **kwargs):
|
||||||
|
if is_simplejson:
|
||||||
|
kwargs['namedtuple_as_object'] = False
|
||||||
|
return json.dumps(value, default=default, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def dump(obj, fp, *args, **kwargs):
|
||||||
|
if is_simplejson:
|
||||||
|
kwargs['namedtuple_as_object'] = False
|
||||||
|
return json.dump(obj, fp, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def loads(s, encoding='utf-8', **kwargs):
|
||||||
|
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def load(fp, encoding='utf-8', **kwargs):
|
||||||
|
return json.load(codecs.getreader(encoding)(fp), **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import anyjson
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
anyjson._modules.append((__name__, 'dumps', TypeError,
|
||||||
|
'loads', ValueError, 'load'))
|
||||||
|
anyjson.force_implementation(__name__)
|
45
octavia/openstack/common/local.py
Normal file
45
octavia/openstack/common/local.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Local storage of variables using weak references"""
|
||||||
|
|
||||||
|
import threading
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
|
||||||
|
class WeakLocal(threading.local):
|
||||||
|
def __getattribute__(self, attr):
|
||||||
|
rval = super(WeakLocal, self).__getattribute__(attr)
|
||||||
|
if rval:
|
||||||
|
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||||
|
# reference, not the value itself. We therefore need to lookup
|
||||||
|
# the weak reference and return the inner value here.
|
||||||
|
rval = rval()
|
||||||
|
return rval
|
||||||
|
|
||||||
|
def __setattr__(self, attr, value):
|
||||||
|
value = weakref.ref(value)
|
||||||
|
return super(WeakLocal, self).__setattr__(attr, value)
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||||
|
store = WeakLocal()
|
||||||
|
|
||||||
|
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||||
|
# when it falls out of scope in the code that uses the thread local storage. A
|
||||||
|
# "strong" store will hold a reference to the object so that it never falls out
|
||||||
|
# of scope.
|
||||||
|
weak_store = WeakLocal()
|
||||||
|
strong_store = threading.local()
|
377
octavia/openstack/common/lockutils.py
Normal file
377
octavia/openstack/common/lockutils.py
Normal file
@ -0,0 +1,377 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import errno
|
||||||
|
import functools
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from octavia.openstack.common import fileutils
|
||||||
|
from octavia.openstack.common.gettextutils import _, _LE, _LI
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
util_opts = [
|
||||||
|
cfg.BoolOpt('disable_process_locking', default=False,
|
||||||
|
help='Enables or disables inter-process locks.'),
|
||||||
|
cfg.StrOpt('lock_path',
|
||||||
|
default=os.environ.get("OCTAVIA_LOCK_PATH"),
|
||||||
|
help='Directory to use for lock files.')
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(util_opts)
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(lock_path):
|
||||||
|
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||||
|
|
||||||
|
|
||||||
|
class _FileLock(object):
|
||||||
|
"""Lock implementation which allows multiple locks, working around
|
||||||
|
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||||
|
not require any cleanup. Since the lock is always held on a file
|
||||||
|
descriptor rather than outside of the process, the lock gets dropped
|
||||||
|
automatically if the process crashes, even if __exit__ is not executed.
|
||||||
|
|
||||||
|
There are no guarantees regarding usage by multiple green threads in a
|
||||||
|
single process here. This lock works only between processes. Exclusive
|
||||||
|
access between local threads should be achieved using the semaphores
|
||||||
|
in the @synchronized decorator.
|
||||||
|
|
||||||
|
Note these locks are released when the descriptor is closed, so it's not
|
||||||
|
safe to close the file descriptor while another green thread holds the
|
||||||
|
lock. Just opening and closing the lock file can break synchronisation,
|
||||||
|
so lock files must be accessed only using this abstraction.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, name):
|
||||||
|
self.lockfile = None
|
||||||
|
self.fname = name
|
||||||
|
|
||||||
|
def acquire(self):
|
||||||
|
basedir = os.path.dirname(self.fname)
|
||||||
|
|
||||||
|
if not os.path.exists(basedir):
|
||||||
|
fileutils.ensure_tree(basedir)
|
||||||
|
LOG.info(_LI('Created lock path: %s'), basedir)
|
||||||
|
|
||||||
|
self.lockfile = open(self.fname, 'w')
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Using non-blocking locks since green threads are not
|
||||||
|
# patched to deal with blocking locking calls.
|
||||||
|
# Also upon reading the MSDN docs for locking(), it seems
|
||||||
|
# to have a laughable 10 attempts "blocking" mechanism.
|
||||||
|
self.trylock()
|
||||||
|
LOG.debug('Got file lock "%s"', self.fname)
|
||||||
|
return True
|
||||||
|
except IOError as e:
|
||||||
|
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||||
|
# external locks synchronise things like iptables
|
||||||
|
# updates - give it some time to prevent busy spinning
|
||||||
|
time.sleep(0.01)
|
||||||
|
else:
|
||||||
|
raise threading.ThreadError(_("Unable to acquire lock on"
|
||||||
|
" `%(filename)s` due to"
|
||||||
|
" %(exception)s") %
|
||||||
|
{'filename': self.fname,
|
||||||
|
'exception': e})
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.acquire()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
try:
|
||||||
|
self.unlock()
|
||||||
|
self.lockfile.close()
|
||||||
|
LOG.debug('Released file lock "%s"', self.fname)
|
||||||
|
except IOError:
|
||||||
|
LOG.exception(_LE("Could not release the acquired lock `%s`"),
|
||||||
|
self.fname)
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
return os.path.exists(self.fname)
|
||||||
|
|
||||||
|
def trylock(self):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def unlock(self):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class _WindowsLock(_FileLock):
|
||||||
|
def trylock(self):
|
||||||
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||||
|
|
||||||
|
def unlock(self):
|
||||||
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class _FcntlLock(_FileLock):
|
||||||
|
def trylock(self):
|
||||||
|
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
|
||||||
|
def unlock(self):
|
||||||
|
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
|
||||||
|
class _PosixLock(object):
|
||||||
|
def __init__(self, name):
|
||||||
|
# Hash the name because it's not valid to have POSIX semaphore
|
||||||
|
# names with things like / in them. Then use base64 to encode
|
||||||
|
# the digest() instead taking the hexdigest() because the
|
||||||
|
# result is shorter and most systems can't have shm sempahore
|
||||||
|
# names longer than 31 characters.
|
||||||
|
h = hashlib.sha1()
|
||||||
|
h.update(name.encode('ascii'))
|
||||||
|
self.name = str((b'/' + base64.urlsafe_b64encode(
|
||||||
|
h.digest())).decode('ascii'))
|
||||||
|
|
||||||
|
def acquire(self, timeout=None):
|
||||||
|
self.semaphore = posix_ipc.Semaphore(self.name,
|
||||||
|
flags=posix_ipc.O_CREAT,
|
||||||
|
initial_value=1)
|
||||||
|
self.semaphore.acquire(timeout)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.acquire()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
self.semaphore.release()
|
||||||
|
self.semaphore.close()
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
try:
|
||||||
|
semaphore = posix_ipc.Semaphore(self.name)
|
||||||
|
except posix_ipc.ExistentialError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
semaphore.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
if os.name == 'nt':
|
||||||
|
import msvcrt
|
||||||
|
InterProcessLock = _WindowsLock
|
||||||
|
FileLock = _WindowsLock
|
||||||
|
else:
|
||||||
|
import base64
|
||||||
|
import fcntl
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
import posix_ipc
|
||||||
|
InterProcessLock = _PosixLock
|
||||||
|
FileLock = _FcntlLock
|
||||||
|
|
||||||
|
_semaphores = weakref.WeakValueDictionary()
|
||||||
|
_semaphores_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
def _get_lock_path(name, lock_file_prefix, lock_path=None):
|
||||||
|
# NOTE(mikal): the lock name cannot contain directory
|
||||||
|
# separators
|
||||||
|
name = name.replace(os.sep, '_')
|
||||||
|
if lock_file_prefix:
|
||||||
|
sep = '' if lock_file_prefix.endswith('-') else '-'
|
||||||
|
name = '%s%s%s' % (lock_file_prefix, sep, name)
|
||||||
|
|
||||||
|
local_lock_path = lock_path or CONF.lock_path
|
||||||
|
|
||||||
|
if not local_lock_path:
|
||||||
|
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
|
||||||
|
# unnecessarily raise the RequiredOptError below.
|
||||||
|
if InterProcessLock is not _PosixLock:
|
||||||
|
raise cfg.RequiredOptError('lock_path')
|
||||||
|
local_lock_path = 'posixlock:/'
|
||||||
|
|
||||||
|
return os.path.join(local_lock_path, name)
|
||||||
|
|
||||||
|
|
||||||
|
def external_lock(name, lock_file_prefix=None, lock_path=None):
|
||||||
|
LOG.debug('Attempting to grab external lock "%(lock)s"',
|
||||||
|
{'lock': name})
|
||||||
|
|
||||||
|
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
|
||||||
|
|
||||||
|
# NOTE(bnemec): If an explicit lock_path was passed to us then it
|
||||||
|
# means the caller is relying on file-based locking behavior, so
|
||||||
|
# we can't use posix locks for those calls.
|
||||||
|
if lock_path:
|
||||||
|
return FileLock(lock_file_path)
|
||||||
|
return InterProcessLock(lock_file_path)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_external_lock_file(name, lock_file_prefix=None):
|
||||||
|
"""Remove an external lock file when it's not used anymore
|
||||||
|
This will be helpful when we have a lot of lock files
|
||||||
|
"""
|
||||||
|
with internal_lock(name):
|
||||||
|
lock_file_path = _get_lock_path(name, lock_file_prefix)
|
||||||
|
try:
|
||||||
|
os.remove(lock_file_path)
|
||||||
|
except OSError:
|
||||||
|
LOG.info(_LI('Failed to remove file %(file)s'),
|
||||||
|
{'file': lock_file_path})
|
||||||
|
|
||||||
|
|
||||||
|
def internal_lock(name):
|
||||||
|
with _semaphores_lock:
|
||||||
|
try:
|
||||||
|
sem = _semaphores[name]
|
||||||
|
except KeyError:
|
||||||
|
sem = threading.Semaphore()
|
||||||
|
_semaphores[name] = sem
|
||||||
|
|
||||||
|
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
|
||||||
|
return sem
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
|
"""Context based lock
|
||||||
|
|
||||||
|
This function yields a `threading.Semaphore` instance (if we don't use
|
||||||
|
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
|
||||||
|
True, in which case, it'll yield an InterProcessLock instance.
|
||||||
|
|
||||||
|
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||||
|
lock files on disk with a meaningful prefix.
|
||||||
|
|
||||||
|
:param external: The external keyword argument denotes whether this lock
|
||||||
|
should work across multiple processes. This means that if two different
|
||||||
|
workers both run a method decorated with @synchronized('mylock',
|
||||||
|
external=True), only one of them will execute at a time.
|
||||||
|
"""
|
||||||
|
int_lock = internal_lock(name)
|
||||||
|
with int_lock:
|
||||||
|
if external and not CONF.disable_process_locking:
|
||||||
|
ext_lock = external_lock(name, lock_file_prefix, lock_path)
|
||||||
|
with ext_lock:
|
||||||
|
yield ext_lock
|
||||||
|
else:
|
||||||
|
yield int_lock
|
||||||
|
LOG.debug('Released semaphore "%(lock)s"', {'lock': name})
|
||||||
|
|
||||||
|
|
||||||
|
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
|
"""Synchronization decorator.
|
||||||
|
|
||||||
|
Decorating a method like so::
|
||||||
|
|
||||||
|
@synchronized('mylock')
|
||||||
|
def foo(self, *args):
|
||||||
|
...
|
||||||
|
|
||||||
|
ensures that only one thread will execute the foo method at a time.
|
||||||
|
|
||||||
|
Different methods can share the same lock::
|
||||||
|
|
||||||
|
@synchronized('mylock')
|
||||||
|
def foo(self, *args):
|
||||||
|
...
|
||||||
|
|
||||||
|
@synchronized('mylock')
|
||||||
|
def bar(self, *args):
|
||||||
|
...
|
||||||
|
|
||||||
|
This way only one of either foo or bar can be executing at a time.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def wrap(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
with lock(name, lock_file_prefix, external, lock_path):
|
||||||
|
LOG.debug('Got semaphore / lock "%(function)s"',
|
||||||
|
{'function': f.__name__})
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
LOG.debug('Semaphore / lock released "%(function)s"',
|
||||||
|
{'function': f.__name__})
|
||||||
|
return inner
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def synchronized_with_prefix(lock_file_prefix):
|
||||||
|
"""Partial object generator for the synchronization decorator.
|
||||||
|
|
||||||
|
Redefine @synchronized in each project like so::
|
||||||
|
|
||||||
|
(in nova/utils.py)
|
||||||
|
from nova.openstack.common import lockutils
|
||||||
|
|
||||||
|
synchronized = lockutils.synchronized_with_prefix('nova-')
|
||||||
|
|
||||||
|
|
||||||
|
(in nova/foo.py)
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
|
@utils.synchronized('mylock')
|
||||||
|
def bar(self, *args):
|
||||||
|
...
|
||||||
|
|
||||||
|
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||||
|
meaningful prefix.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
"""Create a dir for locks and pass it to command from arguments
|
||||||
|
|
||||||
|
If you run this:
|
||||||
|
python -m openstack.common.lockutils python setup.py testr <etc>
|
||||||
|
|
||||||
|
a temporary directory will be created for all your locks and passed to all
|
||||||
|
your tests in an environment variable. The temporary dir will be deleted
|
||||||
|
afterwards and the return value will be preserved.
|
||||||
|
"""
|
||||||
|
|
||||||
|
lock_dir = tempfile.mkdtemp()
|
||||||
|
os.environ["OCTAVIA_LOCK_PATH"] = lock_dir
|
||||||
|
try:
|
||||||
|
ret_val = subprocess.call(argv[1:])
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
||||||
|
return ret_val
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(sys.argv))
|
713
octavia/openstack/common/log.py
Normal file
713
octavia/openstack/common/log.py
Normal file
@ -0,0 +1,713 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""OpenStack logging handler.
|
||||||
|
|
||||||
|
This module adds to logging functionality by adding the option to specify
|
||||||
|
a context object when calling the various log methods. If the context object
|
||||||
|
is not specified, default formatting is used. Additionally, an instance uuid
|
||||||
|
may be passed as part of the log message, which is intended to make it easier
|
||||||
|
for admins to find messages related to a specific instance.
|
||||||
|
|
||||||
|
It also allows setting of formatting information through conf.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import itertools
|
||||||
|
import logging
|
||||||
|
import logging.config
|
||||||
|
import logging.handlers
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
from six import moves
|
||||||
|
|
||||||
|
_PY26 = sys.version_info[0:2] == (2, 6)
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _
|
||||||
|
from octavia.openstack.common import importutils
|
||||||
|
from octavia.openstack.common import jsonutils
|
||||||
|
from octavia.openstack.common import local
|
||||||
|
# NOTE(flaper87): Pls, remove when graduating this module
|
||||||
|
# from the incubator.
|
||||||
|
from octavia.openstack.common.strutils import mask_password # noqa
|
||||||
|
|
||||||
|
|
||||||
|
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||||
|
|
||||||
|
|
||||||
|
common_cli_opts = [
|
||||||
|
cfg.BoolOpt('debug',
|
||||||
|
short='d',
|
||||||
|
default=False,
|
||||||
|
help='Print debugging output (set logging level to '
|
||||||
|
'DEBUG instead of default WARNING level).'),
|
||||||
|
cfg.BoolOpt('verbose',
|
||||||
|
short='v',
|
||||||
|
default=False,
|
||||||
|
help='Print more verbose output (set logging level to '
|
||||||
|
'INFO instead of default WARNING level).'),
|
||||||
|
]
|
||||||
|
|
||||||
|
logging_cli_opts = [
|
||||||
|
cfg.StrOpt('log-config-append',
|
||||||
|
metavar='PATH',
|
||||||
|
deprecated_name='log-config',
|
||||||
|
help='The name of a logging configuration file. This file '
|
||||||
|
'is appended to any existing logging configuration '
|
||||||
|
'files. For details about logging configuration files, '
|
||||||
|
'see the Python logging module documentation.'),
|
||||||
|
cfg.StrOpt('log-format',
|
||||||
|
metavar='FORMAT',
|
||||||
|
help='DEPRECATED. '
|
||||||
|
'A logging.Formatter log message format string which may '
|
||||||
|
'use any of the available logging.LogRecord attributes. '
|
||||||
|
'This option is deprecated. Please use '
|
||||||
|
'logging_context_format_string and '
|
||||||
|
'logging_default_format_string instead.'),
|
||||||
|
cfg.StrOpt('log-date-format',
|
||||||
|
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||||
|
metavar='DATE_FORMAT',
|
||||||
|
help='Format string for %%(asctime)s in log records. '
|
||||||
|
'Default: %(default)s .'),
|
||||||
|
cfg.StrOpt('log-file',
|
||||||
|
metavar='PATH',
|
||||||
|
deprecated_name='logfile',
|
||||||
|
help='(Optional) Name of log file to output to. '
|
||||||
|
'If no default is set, logging will go to stdout.'),
|
||||||
|
cfg.StrOpt('log-dir',
|
||||||
|
deprecated_name='logdir',
|
||||||
|
help='(Optional) The base directory used for relative '
|
||||||
|
'--log-file paths.'),
|
||||||
|
cfg.BoolOpt('use-syslog',
|
||||||
|
default=False,
|
||||||
|
help='Use syslog for logging. '
|
||||||
|
'Existing syslog format is DEPRECATED during I, '
|
||||||
|
'and will change in J to honor RFC5424.'),
|
||||||
|
cfg.BoolOpt('use-syslog-rfc-format',
|
||||||
|
# TODO(bogdando) remove or use True after existing
|
||||||
|
# syslog format deprecation in J
|
||||||
|
default=False,
|
||||||
|
help='(Optional) Enables or disables syslog rfc5424 format '
|
||||||
|
'for logging. If enabled, prefixes the MSG part of the '
|
||||||
|
'syslog message with APP-NAME (RFC5424). The '
|
||||||
|
'format without the APP-NAME is deprecated in I, '
|
||||||
|
'and will be removed in J.'),
|
||||||
|
cfg.StrOpt('syslog-log-facility',
|
||||||
|
default='LOG_USER',
|
||||||
|
help='Syslog facility to receive log lines.')
|
||||||
|
]
|
||||||
|
|
||||||
|
generic_log_opts = [
|
||||||
|
cfg.BoolOpt('use_stderr',
|
||||||
|
default=True,
|
||||||
|
help='Log output to standard error.')
|
||||||
|
]
|
||||||
|
|
||||||
|
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
|
||||||
|
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
|
||||||
|
'oslo.messaging=INFO', 'iso8601=WARN',
|
||||||
|
'requests.packages.urllib3.connectionpool=WARN',
|
||||||
|
'urllib3.connectionpool=WARN', 'websocket=WARN',
|
||||||
|
"keystonemiddleware=WARN", "routes.middleware=WARN",
|
||||||
|
"stevedore=WARN"]
|
||||||
|
|
||||||
|
log_opts = [
|
||||||
|
cfg.StrOpt('logging_context_format_string',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
|
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||||
|
'%(instance)s%(message)s',
|
||||||
|
help='Format string to use for log messages with context.'),
|
||||||
|
cfg.StrOpt('logging_default_format_string',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
|
'%(name)s [-] %(instance)s%(message)s',
|
||||||
|
help='Format string to use for log messages without context.'),
|
||||||
|
cfg.StrOpt('logging_debug_format_suffix',
|
||||||
|
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||||
|
help='Data to append to log format when level is DEBUG.'),
|
||||||
|
cfg.StrOpt('logging_exception_prefix',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||||
|
'%(instance)s',
|
||||||
|
help='Prefix each line of exception output with this format.'),
|
||||||
|
cfg.ListOpt('default_log_levels',
|
||||||
|
default=DEFAULT_LOG_LEVELS,
|
||||||
|
help='List of logger=LEVEL pairs.'),
|
||||||
|
cfg.BoolOpt('publish_errors',
|
||||||
|
default=False,
|
||||||
|
help='Enables or disables publication of error events.'),
|
||||||
|
cfg.BoolOpt('fatal_deprecations',
|
||||||
|
default=False,
|
||||||
|
help='Enables or disables fatal status of deprecations.'),
|
||||||
|
|
||||||
|
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||||
|
# a full instance (and could include more information), and other times we
|
||||||
|
# are just handed a UUID for the instance.
|
||||||
|
cfg.StrOpt('instance_format',
|
||||||
|
default='[instance: %(uuid)s] ',
|
||||||
|
help='The format for an instance that is passed with the log '
|
||||||
|
'message.'),
|
||||||
|
cfg.StrOpt('instance_uuid_format',
|
||||||
|
default='[instance: %(uuid)s] ',
|
||||||
|
help='The format for an instance UUID that is passed with the '
|
||||||
|
'log message.'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_cli_opts(common_cli_opts)
|
||||||
|
CONF.register_cli_opts(logging_cli_opts)
|
||||||
|
CONF.register_opts(generic_log_opts)
|
||||||
|
CONF.register_opts(log_opts)
|
||||||
|
|
||||||
|
# our new audit level
|
||||||
|
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
||||||
|
# module aware of it so it acts like other levels.
|
||||||
|
logging.AUDIT = logging.INFO + 1
|
||||||
|
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
NullHandler = logging.NullHandler
|
||||||
|
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
||||||
|
class NullHandler(logging.Handler):
|
||||||
|
def handle(self, record):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def createLock(self):
|
||||||
|
self.lock = None
|
||||||
|
|
||||||
|
|
||||||
|
def _dictify_context(context):
|
||||||
|
if context is None:
|
||||||
|
return None
|
||||||
|
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
||||||
|
context = context.to_dict()
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def _get_binary_name():
|
||||||
|
return os.path.basename(inspect.stack()[-1][1])
|
||||||
|
|
||||||
|
|
||||||
|
def _get_log_file_path(binary=None):
|
||||||
|
logfile = CONF.log_file
|
||||||
|
logdir = CONF.log_dir
|
||||||
|
|
||||||
|
if logfile and not logdir:
|
||||||
|
return logfile
|
||||||
|
|
||||||
|
if logfile and logdir:
|
||||||
|
return os.path.join(logdir, logfile)
|
||||||
|
|
||||||
|
if logdir:
|
||||||
|
binary = binary or _get_binary_name()
|
||||||
|
return '%s.log' % (os.path.join(logdir, binary),)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||||
|
|
||||||
|
def audit(self, msg, *args, **kwargs):
|
||||||
|
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||||
|
|
||||||
|
def isEnabledFor(self, level):
|
||||||
|
if _PY26:
|
||||||
|
# This method was added in python 2.7 (and it does the exact
|
||||||
|
# same logic, so we need to do the exact same logic so that
|
||||||
|
# python 2.6 has this capability as well).
|
||||||
|
return self.logger.isEnabledFor(level)
|
||||||
|
else:
|
||||||
|
return super(BaseLoggerAdapter, self).isEnabledFor(level)
|
||||||
|
|
||||||
|
|
||||||
|
class LazyAdapter(BaseLoggerAdapter):
|
||||||
|
def __init__(self, name='unknown', version='unknown'):
|
||||||
|
self._logger = None
|
||||||
|
self.extra = {}
|
||||||
|
self.name = name
|
||||||
|
self.version = version
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logger(self):
|
||||||
|
if not self._logger:
|
||||||
|
self._logger = getLogger(self.name, self.version)
|
||||||
|
if six.PY3:
|
||||||
|
# In Python 3, the code fails because the 'manager' attribute
|
||||||
|
# cannot be found when using a LoggerAdapter as the
|
||||||
|
# underlying logger. Work around this issue.
|
||||||
|
self._logger.manager = self._logger.logger.manager
|
||||||
|
return self._logger
|
||||||
|
|
||||||
|
|
||||||
|
class ContextAdapter(BaseLoggerAdapter):
|
||||||
|
warn = logging.LoggerAdapter.warning
|
||||||
|
|
||||||
|
def __init__(self, logger, project_name, version_string):
|
||||||
|
self.logger = logger
|
||||||
|
self.project = project_name
|
||||||
|
self.version = version_string
|
||||||
|
self._deprecated_messages_sent = dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def handlers(self):
|
||||||
|
return self.logger.handlers
|
||||||
|
|
||||||
|
def deprecated(self, msg, *args, **kwargs):
|
||||||
|
"""Call this method when a deprecated feature is used.
|
||||||
|
|
||||||
|
If the system is configured for fatal deprecations then the message
|
||||||
|
is logged at the 'critical' level and :class:`DeprecatedConfig` will
|
||||||
|
be raised.
|
||||||
|
|
||||||
|
Otherwise, the message will be logged (once) at the 'warn' level.
|
||||||
|
|
||||||
|
:raises: :class:`DeprecatedConfig` if the system is configured for
|
||||||
|
fatal deprecations.
|
||||||
|
|
||||||
|
"""
|
||||||
|
stdmsg = _("Deprecated: %s") % msg
|
||||||
|
if CONF.fatal_deprecations:
|
||||||
|
self.critical(stdmsg, *args, **kwargs)
|
||||||
|
raise DeprecatedConfig(msg=stdmsg)
|
||||||
|
|
||||||
|
# Using a list because a tuple with dict can't be stored in a set.
|
||||||
|
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
|
||||||
|
|
||||||
|
if args in sent_args:
|
||||||
|
# Already logged this message, so don't log it again.
|
||||||
|
return
|
||||||
|
|
||||||
|
sent_args.append(args)
|
||||||
|
self.warn(stdmsg, *args, **kwargs)
|
||||||
|
|
||||||
|
def process(self, msg, kwargs):
|
||||||
|
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
|
||||||
|
# before it can get to the python logging and
|
||||||
|
# possibly cause string encoding trouble
|
||||||
|
if not isinstance(msg, six.text_type):
|
||||||
|
msg = six.text_type(msg)
|
||||||
|
|
||||||
|
if 'extra' not in kwargs:
|
||||||
|
kwargs['extra'] = {}
|
||||||
|
extra = kwargs['extra']
|
||||||
|
|
||||||
|
context = kwargs.pop('context', None)
|
||||||
|
if not context:
|
||||||
|
context = getattr(local.store, 'context', None)
|
||||||
|
if context:
|
||||||
|
extra.update(_dictify_context(context))
|
||||||
|
|
||||||
|
instance = kwargs.pop('instance', None)
|
||||||
|
instance_uuid = (extra.get('instance_uuid') or
|
||||||
|
kwargs.pop('instance_uuid', None))
|
||||||
|
instance_extra = ''
|
||||||
|
if instance:
|
||||||
|
instance_extra = CONF.instance_format % instance
|
||||||
|
elif instance_uuid:
|
||||||
|
instance_extra = (CONF.instance_uuid_format
|
||||||
|
% {'uuid': instance_uuid})
|
||||||
|
extra['instance'] = instance_extra
|
||||||
|
|
||||||
|
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||||
|
|
||||||
|
extra['project'] = self.project
|
||||||
|
extra['version'] = self.version
|
||||||
|
extra['extra'] = extra.copy()
|
||||||
|
return msg, kwargs
|
||||||
|
|
||||||
|
|
||||||
|
class JSONFormatter(logging.Formatter):
|
||||||
|
def __init__(self, fmt=None, datefmt=None):
|
||||||
|
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
||||||
|
# since logging.config.fileConfig passes it.
|
||||||
|
self.datefmt = datefmt
|
||||||
|
|
||||||
|
def formatException(self, ei, strip_newlines=True):
|
||||||
|
lines = traceback.format_exception(*ei)
|
||||||
|
if strip_newlines:
|
||||||
|
lines = [moves.filter(
|
||||||
|
lambda x: x,
|
||||||
|
line.rstrip().splitlines()) for line in lines]
|
||||||
|
lines = list(itertools.chain(*lines))
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
message = {'message': record.getMessage(),
|
||||||
|
'asctime': self.formatTime(record, self.datefmt),
|
||||||
|
'name': record.name,
|
||||||
|
'msg': record.msg,
|
||||||
|
'args': record.args,
|
||||||
|
'levelname': record.levelname,
|
||||||
|
'levelno': record.levelno,
|
||||||
|
'pathname': record.pathname,
|
||||||
|
'filename': record.filename,
|
||||||
|
'module': record.module,
|
||||||
|
'lineno': record.lineno,
|
||||||
|
'funcname': record.funcName,
|
||||||
|
'created': record.created,
|
||||||
|
'msecs': record.msecs,
|
||||||
|
'relative_created': record.relativeCreated,
|
||||||
|
'thread': record.thread,
|
||||||
|
'thread_name': record.threadName,
|
||||||
|
'process_name': record.processName,
|
||||||
|
'process': record.process,
|
||||||
|
'traceback': None}
|
||||||
|
|
||||||
|
if hasattr(record, 'extra'):
|
||||||
|
message['extra'] = record.extra
|
||||||
|
|
||||||
|
if record.exc_info:
|
||||||
|
message['traceback'] = self.formatException(record.exc_info)
|
||||||
|
|
||||||
|
return jsonutils.dumps(message)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_logging_excepthook(product_name):
|
||||||
|
def logging_excepthook(exc_type, value, tb):
|
||||||
|
extra = {'exc_info': (exc_type, value, tb)}
|
||||||
|
getLogger(product_name).critical(
|
||||||
|
"".join(traceback.format_exception_only(exc_type, value)),
|
||||||
|
**extra)
|
||||||
|
return logging_excepthook
|
||||||
|
|
||||||
|
|
||||||
|
class LogConfigError(Exception):
|
||||||
|
|
||||||
|
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||||
|
|
||||||
|
def __init__(self, log_config, err_msg):
|
||||||
|
self.log_config = log_config
|
||||||
|
self.err_msg = err_msg
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.message % dict(log_config=self.log_config,
|
||||||
|
err_msg=self.err_msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_log_config(log_config_append):
|
||||||
|
try:
|
||||||
|
logging.config.fileConfig(log_config_append,
|
||||||
|
disable_existing_loggers=False)
|
||||||
|
except (moves.configparser.Error, KeyError) as exc:
|
||||||
|
raise LogConfigError(log_config_append, six.text_type(exc))
|
||||||
|
|
||||||
|
|
||||||
|
def setup(product_name, version='unknown'):
|
||||||
|
"""Setup logging."""
|
||||||
|
if CONF.log_config_append:
|
||||||
|
_load_log_config(CONF.log_config_append)
|
||||||
|
else:
|
||||||
|
_setup_logging_from_conf(product_name, version)
|
||||||
|
sys.excepthook = _create_logging_excepthook(product_name)
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(logging_context_format_string=None,
|
||||||
|
default_log_levels=None):
|
||||||
|
# Just in case the caller is not setting the
|
||||||
|
# default_log_level. This is insurance because
|
||||||
|
# we introduced the default_log_level parameter
|
||||||
|
# later in a backwards in-compatible change
|
||||||
|
if default_log_levels is not None:
|
||||||
|
cfg.set_defaults(
|
||||||
|
log_opts,
|
||||||
|
default_log_levels=default_log_levels)
|
||||||
|
if logging_context_format_string is not None:
|
||||||
|
cfg.set_defaults(
|
||||||
|
log_opts,
|
||||||
|
logging_context_format_string=logging_context_format_string)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_facility_from_conf():
|
||||||
|
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||||
|
facility = getattr(logging.handlers.SysLogHandler,
|
||||||
|
CONF.syslog_log_facility,
|
||||||
|
None)
|
||||||
|
|
||||||
|
if facility is None and CONF.syslog_log_facility in facility_names:
|
||||||
|
facility = facility_names.get(CONF.syslog_log_facility)
|
||||||
|
|
||||||
|
if facility is None:
|
||||||
|
valid_facilities = facility_names.keys()
|
||||||
|
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
||||||
|
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
||||||
|
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
||||||
|
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
||||||
|
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
||||||
|
valid_facilities.extend(consts)
|
||||||
|
raise TypeError(_('syslog facility must be one of: %s') %
|
||||||
|
', '.join("'%s'" % fac
|
||||||
|
for fac in valid_facilities))
|
||||||
|
|
||||||
|
return facility
|
||||||
|
|
||||||
|
|
||||||
|
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.binary_name = _get_binary_name()
|
||||||
|
# Do not use super() unless type(logging.handlers.SysLogHandler)
|
||||||
|
# is 'type' (Python 2.7).
|
||||||
|
# Use old style calls, if the type is 'classobj' (Python 2.6)
|
||||||
|
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
# Do not use super() unless type(logging.handlers.SysLogHandler)
|
||||||
|
# is 'type' (Python 2.7).
|
||||||
|
# Use old style calls, if the type is 'classobj' (Python 2.6)
|
||||||
|
msg = logging.handlers.SysLogHandler.format(self, record)
|
||||||
|
msg = self.binary_name + ' ' + msg
|
||||||
|
return msg
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_logging_from_conf(project, version):
|
||||||
|
log_root = getLogger(None).logger
|
||||||
|
for handler in log_root.handlers:
|
||||||
|
log_root.removeHandler(handler)
|
||||||
|
|
||||||
|
logpath = _get_log_file_path()
|
||||||
|
if logpath:
|
||||||
|
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||||
|
log_root.addHandler(filelog)
|
||||||
|
|
||||||
|
if CONF.use_stderr:
|
||||||
|
streamlog = ColorHandler()
|
||||||
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
|
elif not logpath:
|
||||||
|
# pass sys.stdout as a positional argument
|
||||||
|
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||||
|
streamlog = logging.StreamHandler(sys.stdout)
|
||||||
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
|
if CONF.publish_errors:
|
||||||
|
try:
|
||||||
|
handler = importutils.import_object(
|
||||||
|
"octavia.openstack.common.log_handler.PublishErrorsHandler",
|
||||||
|
logging.ERROR)
|
||||||
|
except ImportError:
|
||||||
|
handler = importutils.import_object(
|
||||||
|
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
|
||||||
|
logging.ERROR)
|
||||||
|
log_root.addHandler(handler)
|
||||||
|
|
||||||
|
datefmt = CONF.log_date_format
|
||||||
|
for handler in log_root.handlers:
|
||||||
|
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||||
|
# should be deprecated in favor of context aware formatting.
|
||||||
|
if CONF.log_format:
|
||||||
|
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||||
|
datefmt=datefmt))
|
||||||
|
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||||
|
'be removed in the next release')
|
||||||
|
else:
|
||||||
|
handler.setFormatter(ContextFormatter(project=project,
|
||||||
|
version=version,
|
||||||
|
datefmt=datefmt))
|
||||||
|
|
||||||
|
if CONF.debug:
|
||||||
|
log_root.setLevel(logging.DEBUG)
|
||||||
|
elif CONF.verbose:
|
||||||
|
log_root.setLevel(logging.INFO)
|
||||||
|
else:
|
||||||
|
log_root.setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
for pair in CONF.default_log_levels:
|
||||||
|
mod, _sep, level_name = pair.partition('=')
|
||||||
|
logger = logging.getLogger(mod)
|
||||||
|
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
|
||||||
|
# to integer code.
|
||||||
|
if sys.version_info < (2, 7):
|
||||||
|
level = logging.getLevelName(level_name)
|
||||||
|
logger.setLevel(level)
|
||||||
|
else:
|
||||||
|
logger.setLevel(level_name)
|
||||||
|
|
||||||
|
if CONF.use_syslog:
|
||||||
|
try:
|
||||||
|
facility = _find_facility_from_conf()
|
||||||
|
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
||||||
|
# after existing syslog format deprecation in J
|
||||||
|
if CONF.use_syslog_rfc_format:
|
||||||
|
syslog = RFCSysLogHandler(facility=facility)
|
||||||
|
else:
|
||||||
|
syslog = logging.handlers.SysLogHandler(facility=facility)
|
||||||
|
log_root.addHandler(syslog)
|
||||||
|
except socket.error:
|
||||||
|
log_root.error('Unable to add syslog handler. Verify that syslog'
|
||||||
|
'is running.')
|
||||||
|
|
||||||
|
|
||||||
|
_loggers = {}
|
||||||
|
|
||||||
|
|
||||||
|
def getLogger(name='unknown', version='unknown'):
|
||||||
|
if name not in _loggers:
|
||||||
|
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
||||||
|
name,
|
||||||
|
version)
|
||||||
|
return _loggers[name]
|
||||||
|
|
||||||
|
|
||||||
|
def getLazyLogger(name='unknown', version='unknown'):
|
||||||
|
"""Returns lazy logger.
|
||||||
|
|
||||||
|
Creates a pass-through logger that does not create the real logger
|
||||||
|
until it is really needed and delegates all calls to the real logger
|
||||||
|
once it is created.
|
||||||
|
"""
|
||||||
|
return LazyAdapter(name, version)
|
||||||
|
|
||||||
|
|
||||||
|
class WritableLogger(object):
|
||||||
|
"""A thin wrapper that responds to `write` and logs."""
|
||||||
|
|
||||||
|
def __init__(self, logger, level=logging.INFO):
|
||||||
|
self.logger = logger
|
||||||
|
self.level = level
|
||||||
|
|
||||||
|
def write(self, msg):
|
||||||
|
self.logger.log(self.level, msg.rstrip())
|
||||||
|
|
||||||
|
|
||||||
|
class ContextFormatter(logging.Formatter):
|
||||||
|
"""A context.RequestContext aware formatter configured through flags.
|
||||||
|
|
||||||
|
The flags used to set format strings are: logging_context_format_string
|
||||||
|
and logging_default_format_string. You can also specify
|
||||||
|
logging_debug_format_suffix to append extra formatting if the log level is
|
||||||
|
debug.
|
||||||
|
|
||||||
|
For information about what variables are available for the formatter see:
|
||||||
|
http://docs.python.org/library/logging.html#formatter
|
||||||
|
|
||||||
|
If available, uses the context value stored in TLS - local.store.context
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
"""Initialize ContextFormatter instance
|
||||||
|
|
||||||
|
Takes additional keyword arguments which can be used in the message
|
||||||
|
format string.
|
||||||
|
|
||||||
|
:keyword project: project name
|
||||||
|
:type project: string
|
||||||
|
:keyword version: project version
|
||||||
|
:type version: string
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.project = kwargs.pop('project', 'unknown')
|
||||||
|
self.version = kwargs.pop('version', 'unknown')
|
||||||
|
|
||||||
|
logging.Formatter.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
"""Uses contextstring if request_id is set, otherwise default."""
|
||||||
|
|
||||||
|
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
|
||||||
|
# before it can get to the python logging and
|
||||||
|
# possibly cause string encoding trouble
|
||||||
|
if not isinstance(record.msg, six.text_type):
|
||||||
|
record.msg = six.text_type(record.msg)
|
||||||
|
|
||||||
|
# store project info
|
||||||
|
record.project = self.project
|
||||||
|
record.version = self.version
|
||||||
|
|
||||||
|
# store request info
|
||||||
|
context = getattr(local.store, 'context', None)
|
||||||
|
if context:
|
||||||
|
d = _dictify_context(context)
|
||||||
|
for k, v in d.items():
|
||||||
|
setattr(record, k, v)
|
||||||
|
|
||||||
|
# NOTE(sdague): default the fancier formatting params
|
||||||
|
# to an empty string so we don't throw an exception if
|
||||||
|
# they get used
|
||||||
|
for key in ('instance', 'color', 'user_identity'):
|
||||||
|
if key not in record.__dict__:
|
||||||
|
record.__dict__[key] = ''
|
||||||
|
|
||||||
|
if record.__dict__.get('request_id'):
|
||||||
|
fmt = CONF.logging_context_format_string
|
||||||
|
else:
|
||||||
|
fmt = CONF.logging_default_format_string
|
||||||
|
|
||||||
|
if (record.levelno == logging.DEBUG and
|
||||||
|
CONF.logging_debug_format_suffix):
|
||||||
|
fmt += " " + CONF.logging_debug_format_suffix
|
||||||
|
|
||||||
|
if sys.version_info < (3, 2):
|
||||||
|
self._fmt = fmt
|
||||||
|
else:
|
||||||
|
self._style = logging.PercentStyle(fmt)
|
||||||
|
self._fmt = self._style._fmt
|
||||||
|
# Cache this on the record, Logger will respect our formatted copy
|
||||||
|
if record.exc_info:
|
||||||
|
record.exc_text = self.formatException(record.exc_info, record)
|
||||||
|
return logging.Formatter.format(self, record)
|
||||||
|
|
||||||
|
def formatException(self, exc_info, record=None):
|
||||||
|
"""Format exception output with CONF.logging_exception_prefix."""
|
||||||
|
if not record:
|
||||||
|
return logging.Formatter.formatException(self, exc_info)
|
||||||
|
|
||||||
|
stringbuffer = moves.StringIO()
|
||||||
|
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||||
|
None, stringbuffer)
|
||||||
|
lines = stringbuffer.getvalue().split('\n')
|
||||||
|
stringbuffer.close()
|
||||||
|
|
||||||
|
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
||||||
|
record.asctime = self.formatTime(record, self.datefmt)
|
||||||
|
|
||||||
|
formatted_lines = []
|
||||||
|
for line in lines:
|
||||||
|
pl = CONF.logging_exception_prefix % record.__dict__
|
||||||
|
fl = '%s%s' % (pl, line)
|
||||||
|
formatted_lines.append(fl)
|
||||||
|
return '\n'.join(formatted_lines)
|
||||||
|
|
||||||
|
|
||||||
|
class ColorHandler(logging.StreamHandler):
|
||||||
|
LEVEL_COLORS = {
|
||||||
|
logging.DEBUG: '\033[00;32m', # GREEN
|
||||||
|
logging.INFO: '\033[00;36m', # CYAN
|
||||||
|
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
||||||
|
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
||||||
|
logging.ERROR: '\033[01;31m', # BOLD RED
|
||||||
|
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
||||||
|
}
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
record.color = self.LEVEL_COLORS[record.levelno]
|
||||||
|
return logging.StreamHandler.format(self, record)
|
||||||
|
|
||||||
|
|
||||||
|
class DeprecatedConfig(Exception):
|
||||||
|
message = _("Fatal call to deprecated config: %(msg)s")
|
||||||
|
|
||||||
|
def __init__(self, msg):
|
||||||
|
super(Exception, self).__init__(self.message % dict(msg=msg))
|
147
octavia/openstack/common/loopingcall.py
Normal file
147
octavia/openstack/common/loopingcall.py
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
from eventlet import event
|
||||||
|
from eventlet import greenthread
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _LE, _LW
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
|
||||||
|
# with time.time() called in the standard logging module
|
||||||
|
# during unittests.
|
||||||
|
_ts = lambda: time.time()
|
||||||
|
|
||||||
|
|
||||||
|
class LoopingCallDone(Exception):
|
||||||
|
"""Exception to break out and stop a LoopingCallBase.
|
||||||
|
|
||||||
|
The poll-function passed to LoopingCallBase can raise this exception to
|
||||||
|
break out of the loop normally. This is somewhat analogous to
|
||||||
|
StopIteration.
|
||||||
|
|
||||||
|
An optional return-value can be included as the argument to the exception;
|
||||||
|
this return-value will be returned by LoopingCallBase.wait()
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, retvalue=True):
|
||||||
|
""":param retvalue: Value that LoopingCallBase.wait() should return."""
|
||||||
|
self.retvalue = retvalue
|
||||||
|
|
||||||
|
|
||||||
|
class LoopingCallBase(object):
|
||||||
|
def __init__(self, f=None, *args, **kw):
|
||||||
|
self.args = args
|
||||||
|
self.kw = kw
|
||||||
|
self.f = f
|
||||||
|
self._running = False
|
||||||
|
self.done = None
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._running = False
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
return self.done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||||
|
"""A fixed interval looping call."""
|
||||||
|
|
||||||
|
def start(self, interval, initial_delay=None):
|
||||||
|
self._running = True
|
||||||
|
done = event.Event()
|
||||||
|
|
||||||
|
def _inner():
|
||||||
|
if initial_delay:
|
||||||
|
greenthread.sleep(initial_delay)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while self._running:
|
||||||
|
start = _ts()
|
||||||
|
self.f(*self.args, **self.kw)
|
||||||
|
end = _ts()
|
||||||
|
if not self._running:
|
||||||
|
break
|
||||||
|
delay = end - start - interval
|
||||||
|
if delay > 0:
|
||||||
|
LOG.warn(_LW('task %(func_name)s run outlasted '
|
||||||
|
'interval by %(delay).2f sec'),
|
||||||
|
{'func_name': repr(self.f), 'delay': delay})
|
||||||
|
greenthread.sleep(-delay if delay < 0 else 0)
|
||||||
|
except LoopingCallDone as e:
|
||||||
|
self.stop()
|
||||||
|
done.send(e.retvalue)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_LE('in fixed duration looping call'))
|
||||||
|
done.send_exception(*sys.exc_info())
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
done.send(True)
|
||||||
|
|
||||||
|
self.done = done
|
||||||
|
|
||||||
|
greenthread.spawn_n(_inner)
|
||||||
|
return self.done
|
||||||
|
|
||||||
|
|
||||||
|
class DynamicLoopingCall(LoopingCallBase):
|
||||||
|
"""A looping call which sleeps until the next known event.
|
||||||
|
|
||||||
|
The function called should return how long to sleep for before being
|
||||||
|
called again.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||||
|
self._running = True
|
||||||
|
done = event.Event()
|
||||||
|
|
||||||
|
def _inner():
|
||||||
|
if initial_delay:
|
||||||
|
greenthread.sleep(initial_delay)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while self._running:
|
||||||
|
idle = self.f(*self.args, **self.kw)
|
||||||
|
if not self._running:
|
||||||
|
break
|
||||||
|
|
||||||
|
if periodic_interval_max is not None:
|
||||||
|
idle = min(idle, periodic_interval_max)
|
||||||
|
LOG.debug('Dynamic looping call %(func_name)s sleeping '
|
||||||
|
'for %(idle).02f seconds',
|
||||||
|
{'func_name': repr(self.f), 'idle': idle})
|
||||||
|
greenthread.sleep(idle)
|
||||||
|
except LoopingCallDone as e:
|
||||||
|
self.stop()
|
||||||
|
done.send(e.retvalue)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_LE('in dynamic looping call'))
|
||||||
|
done.send_exception(*sys.exc_info())
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
done.send(True)
|
||||||
|
|
||||||
|
self.done = done
|
||||||
|
|
||||||
|
greenthread.spawn(_inner)
|
||||||
|
return self.done
|
0
octavia/openstack/common/middleware/__init__.py
Normal file
0
octavia/openstack/common/middleware/__init__.py
Normal file
56
octavia/openstack/common/middleware/base.py
Normal file
56
octavia/openstack/common/middleware/base.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Base class(es) for WSGI Middleware."""
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
|
||||||
|
|
||||||
|
class Middleware(object):
|
||||||
|
"""Base WSGI middleware wrapper.
|
||||||
|
|
||||||
|
These classes require an application to be initialized that will be called
|
||||||
|
next. By default the middleware will simply call its wrapped app, or you
|
||||||
|
can override __call__ to customize its behavior.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def factory(cls, global_conf, **local_conf):
|
||||||
|
"""Factory method for paste.deploy."""
|
||||||
|
return cls
|
||||||
|
|
||||||
|
def __init__(self, application):
|
||||||
|
self.application = application
|
||||||
|
|
||||||
|
def process_request(self, req):
|
||||||
|
"""Called on each request.
|
||||||
|
|
||||||
|
If this returns None, the next application down the stack will be
|
||||||
|
executed. If it returns a response then that response will be returned
|
||||||
|
and execution will stop here.
|
||||||
|
"""
|
||||||
|
return None
|
||||||
|
|
||||||
|
def process_response(self, response):
|
||||||
|
"""Do whatever you'd like to the response."""
|
||||||
|
return response
|
||||||
|
|
||||||
|
@webob.dec.wsgify
|
||||||
|
def __call__(self, req):
|
||||||
|
response = self.process_request(req)
|
||||||
|
if response:
|
||||||
|
return response
|
||||||
|
response = req.get_response(self.application)
|
||||||
|
return self.process_response(response)
|
46
octavia/openstack/common/middleware/catch_errors.py
Normal file
46
octavia/openstack/common/middleware/catch_errors.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# Copyright (c) 2013 NEC Corporation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Middleware that provides high-level error handling.
|
||||||
|
|
||||||
|
It catches all exceptions from subsequent applications in WSGI pipeline
|
||||||
|
to hide internal errors from API response.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _LE
|
||||||
|
from octavia.openstack.common.middleware import base
|
||||||
|
from octavia.openstack.common import versionutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
|
||||||
|
in_favor_of='octavia.middleware.CatchErrors')
|
||||||
|
class CatchErrorsMiddleware(base.Middleware):
|
||||||
|
|
||||||
|
@webob.dec.wsgify
|
||||||
|
def __call__(self, req):
|
||||||
|
try:
|
||||||
|
response = req.get_response(self.application)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_LE('An error occurred during '
|
||||||
|
'processing the request: %s'))
|
||||||
|
response = webob.exc.HTTPInternalServerError()
|
||||||
|
return response
|
31
octavia/openstack/common/middleware/correlation_id.py
Normal file
31
octavia/openstack/common/middleware/correlation_id.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Copyright (c) 2013 Rackspace Hosting
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Middleware that attaches a correlation id to WSGI request"""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from octavia.openstack.common.middleware import base
|
||||||
|
from octavia.openstack.common import versionutils
|
||||||
|
|
||||||
|
|
||||||
|
@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
|
||||||
|
in_favor_of='octavia.middleware.CorrelationId')
|
||||||
|
class CorrelationIdMiddleware(base.Middleware):
|
||||||
|
|
||||||
|
def process_request(self, req):
|
||||||
|
correlation_id = (req.headers.get("X_CORRELATION_ID") or
|
||||||
|
str(uuid.uuid4()))
|
||||||
|
req.headers['X_CORRELATION_ID'] = correlation_id
|
63
octavia/openstack/common/middleware/debug.py
Normal file
63
octavia/openstack/common/middleware/debug.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Debug middleware"""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import six
|
||||||
|
import webob.dec
|
||||||
|
|
||||||
|
from octavia.openstack.common.middleware import base
|
||||||
|
from octavia.openstack.common import versionutils
|
||||||
|
|
||||||
|
|
||||||
|
@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
|
||||||
|
in_favor_of='octavia.middleware.Debug')
|
||||||
|
class Debug(base.Middleware):
|
||||||
|
"""Helper class that returns debug information.
|
||||||
|
|
||||||
|
Can be inserted into any WSGI application chain to get information about
|
||||||
|
the request and response.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@webob.dec.wsgify
|
||||||
|
def __call__(self, req):
|
||||||
|
print(("*" * 40) + " REQUEST ENVIRON")
|
||||||
|
for key, value in req.environ.items():
|
||||||
|
print(key, "=", value)
|
||||||
|
print()
|
||||||
|
resp = req.get_response(self.application)
|
||||||
|
|
||||||
|
print(("*" * 40) + " RESPONSE HEADERS")
|
||||||
|
for (key, value) in six.iteritems(resp.headers):
|
||||||
|
print(key, "=", value)
|
||||||
|
print()
|
||||||
|
|
||||||
|
resp.app_iter = self.print_generator(resp.app_iter)
|
||||||
|
|
||||||
|
return resp
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def print_generator(app_iter):
|
||||||
|
"""Prints the contents of a wrapper string iterator when iterated."""
|
||||||
|
print(("*" * 40) + " BODY")
|
||||||
|
for part in app_iter:
|
||||||
|
sys.stdout.write(part)
|
||||||
|
sys.stdout.flush()
|
||||||
|
yield part
|
||||||
|
print()
|
44
octavia/openstack/common/middleware/request_id.py
Normal file
44
octavia/openstack/common/middleware/request_id.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Copyright (c) 2013 NEC Corporation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Middleware that ensures request ID.
|
||||||
|
|
||||||
|
It ensures to assign request ID for each API request and set it to
|
||||||
|
request environment. The request ID is also added to API response.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
|
||||||
|
from octavia.openstack.common import context
|
||||||
|
from octavia.openstack.common.middleware import base
|
||||||
|
from octavia.openstack.common import versionutils
|
||||||
|
|
||||||
|
|
||||||
|
ENV_REQUEST_ID = 'openstack.request_id'
|
||||||
|
HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id'
|
||||||
|
|
||||||
|
|
||||||
|
@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
|
||||||
|
in_favor_of='octavia.middleware.RequestId')
|
||||||
|
class RequestIdMiddleware(base.Middleware):
|
||||||
|
|
||||||
|
@webob.dec.wsgify
|
||||||
|
def __call__(self, req):
|
||||||
|
req_id = context.generate_request_id()
|
||||||
|
req.environ[ENV_REQUEST_ID] = req_id
|
||||||
|
response = req.get_response(self.application)
|
||||||
|
if HTTP_RESP_HEADER_REQUEST_ID not in response.headers:
|
||||||
|
response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, req_id)
|
||||||
|
return response
|
85
octavia/openstack/common/middleware/sizelimit.py
Normal file
85
octavia/openstack/common/middleware/sizelimit.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
# Copyright (c) 2012 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Request Body limiting middleware.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _
|
||||||
|
from octavia.openstack.common.middleware import base
|
||||||
|
from octavia.openstack.common import versionutils
|
||||||
|
|
||||||
|
|
||||||
|
# default request size is 112k
|
||||||
|
max_req_body_size = cfg.IntOpt('max_request_body_size',
|
||||||
|
deprecated_name='osapi_max_request_body_size',
|
||||||
|
default=114688,
|
||||||
|
help='The maximum body size for each '
|
||||||
|
' request, in bytes.')
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opt(max_req_body_size)
|
||||||
|
|
||||||
|
|
||||||
|
class LimitingReader(object):
|
||||||
|
"""Reader to limit the size of an incoming request."""
|
||||||
|
def __init__(self, data, limit):
|
||||||
|
"""Initiates LimitingReader object.
|
||||||
|
|
||||||
|
:param data: Underlying data object
|
||||||
|
:param limit: maximum number of bytes the reader should allow
|
||||||
|
"""
|
||||||
|
self.data = data
|
||||||
|
self.limit = limit
|
||||||
|
self.bytes_read = 0
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
for chunk in self.data:
|
||||||
|
self.bytes_read += len(chunk)
|
||||||
|
if self.bytes_read > self.limit:
|
||||||
|
msg = _("Request is too large.")
|
||||||
|
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
|
||||||
|
else:
|
||||||
|
yield chunk
|
||||||
|
|
||||||
|
def read(self, i=None):
|
||||||
|
result = self.data.read(i)
|
||||||
|
self.bytes_read += len(result)
|
||||||
|
if self.bytes_read > self.limit:
|
||||||
|
msg = _("Request is too large.")
|
||||||
|
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
|
||||||
|
in_favor_of='octavia.middleware.RequestBodySizeLimiter')
|
||||||
|
class RequestBodySizeLimiter(base.Middleware):
|
||||||
|
"""Limit the size of incoming requests."""
|
||||||
|
|
||||||
|
@webob.dec.wsgify
|
||||||
|
def __call__(self, req):
|
||||||
|
if (req.content_length is not None and
|
||||||
|
req.content_length > CONF.max_request_body_size):
|
||||||
|
msg = _("Request is too large.")
|
||||||
|
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
|
||||||
|
if req.content_length is None and req.is_body_readable:
|
||||||
|
limiter = LimitingReader(req.body_file,
|
||||||
|
CONF.max_request_body_size)
|
||||||
|
req.body_file = limiter
|
||||||
|
return self.application
|
163
octavia/openstack/common/network_utils.py
Normal file
163
octavia/openstack/common/network_utils.py
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
# Copyright 2012 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Network-related utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from six.moves.urllib import parse
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _LW
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_host_port(address, default_port=None):
|
||||||
|
"""Interpret a string as a host:port pair.
|
||||||
|
|
||||||
|
An IPv6 address MUST be escaped if accompanied by a port,
|
||||||
|
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||||
|
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||||
|
[2001:db8:85a3::8a2e:370]:7334.
|
||||||
|
|
||||||
|
>>> parse_host_port('server01:80')
|
||||||
|
('server01', 80)
|
||||||
|
>>> parse_host_port('server01')
|
||||||
|
('server01', None)
|
||||||
|
>>> parse_host_port('server01', default_port=1234)
|
||||||
|
('server01', 1234)
|
||||||
|
>>> parse_host_port('[::1]:80')
|
||||||
|
('::1', 80)
|
||||||
|
>>> parse_host_port('[::1]')
|
||||||
|
('::1', None)
|
||||||
|
>>> parse_host_port('[::1]', default_port=1234)
|
||||||
|
('::1', 1234)
|
||||||
|
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
|
||||||
|
('2001:db8:85a3::8a2e:370:7334', 1234)
|
||||||
|
>>> parse_host_port(None)
|
||||||
|
(None, None)
|
||||||
|
"""
|
||||||
|
if not address:
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
|
if address[0] == '[':
|
||||||
|
# Escaped ipv6
|
||||||
|
_host, _port = address[1:].split(']')
|
||||||
|
host = _host
|
||||||
|
if ':' in _port:
|
||||||
|
port = _port.split(':')[1]
|
||||||
|
else:
|
||||||
|
port = default_port
|
||||||
|
else:
|
||||||
|
if address.count(':') == 1:
|
||||||
|
host, port = address.split(':')
|
||||||
|
else:
|
||||||
|
# 0 means ipv4, >1 means ipv6.
|
||||||
|
# We prohibit unescaped ipv6 addresses with port.
|
||||||
|
host = address
|
||||||
|
port = default_port
|
||||||
|
|
||||||
|
return (host, None if port is None else int(port))
|
||||||
|
|
||||||
|
|
||||||
|
class ModifiedSplitResult(parse.SplitResult):
|
||||||
|
"""Split results class for urlsplit."""
|
||||||
|
|
||||||
|
# NOTE(dims): The functions below are needed for Python 2.6.x.
|
||||||
|
# We can remove these when we drop support for 2.6.x.
|
||||||
|
@property
|
||||||
|
def hostname(self):
|
||||||
|
netloc = self.netloc.split('@', 1)[-1]
|
||||||
|
host, port = parse_host_port(netloc)
|
||||||
|
return host
|
||||||
|
|
||||||
|
@property
|
||||||
|
def port(self):
|
||||||
|
netloc = self.netloc.split('@', 1)[-1]
|
||||||
|
host, port = parse_host_port(netloc)
|
||||||
|
return port
|
||||||
|
|
||||||
|
|
||||||
|
def urlsplit(url, scheme='', allow_fragments=True):
|
||||||
|
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
||||||
|
This function papers over Python issue9374 when needed.
|
||||||
|
|
||||||
|
The parameters are the same as urlparse.urlsplit.
|
||||||
|
"""
|
||||||
|
scheme, netloc, path, query, fragment = parse.urlsplit(
|
||||||
|
url, scheme, allow_fragments)
|
||||||
|
if allow_fragments and '#' in path:
|
||||||
|
path, fragment = path.split('#', 1)
|
||||||
|
if '?' in path:
|
||||||
|
path, query = path.split('?', 1)
|
||||||
|
return ModifiedSplitResult(scheme, netloc,
|
||||||
|
path, query, fragment)
|
||||||
|
|
||||||
|
|
||||||
|
def set_tcp_keepalive(sock, tcp_keepalive=True,
|
||||||
|
tcp_keepidle=None,
|
||||||
|
tcp_keepalive_interval=None,
|
||||||
|
tcp_keepalive_count=None):
|
||||||
|
"""Set values for tcp keepalive parameters
|
||||||
|
|
||||||
|
This function configures tcp keepalive parameters if users wish to do
|
||||||
|
so.
|
||||||
|
|
||||||
|
:param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are
|
||||||
|
not sure, this should be True, and default values will be used.
|
||||||
|
|
||||||
|
:param tcp_keepidle: time to wait before starting to send keepalive probes
|
||||||
|
:param tcp_keepalive_interval: time between successive probes, once the
|
||||||
|
initial wait time is over
|
||||||
|
:param tcp_keepalive_count: number of probes to send before the connection
|
||||||
|
is killed
|
||||||
|
"""
|
||||||
|
|
||||||
|
# NOTE(praneshp): Despite keepalive being a tcp concept, the level is
|
||||||
|
# still SOL_SOCKET. This is a quirk.
|
||||||
|
if isinstance(tcp_keepalive, bool):
|
||||||
|
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive)
|
||||||
|
else:
|
||||||
|
raise TypeError("tcp_keepalive must be a boolean")
|
||||||
|
|
||||||
|
if not tcp_keepalive:
|
||||||
|
return
|
||||||
|
|
||||||
|
# These options aren't available in the OS X version of eventlet,
|
||||||
|
# Idle + Count * Interval effectively gives you the total timeout.
|
||||||
|
if tcp_keepidle is not None:
|
||||||
|
if hasattr(socket, 'TCP_KEEPIDLE'):
|
||||||
|
sock.setsockopt(socket.IPPROTO_TCP,
|
||||||
|
socket.TCP_KEEPIDLE,
|
||||||
|
tcp_keepidle)
|
||||||
|
else:
|
||||||
|
LOG.warning(_LW('tcp_keepidle not available on your system'))
|
||||||
|
if tcp_keepalive_interval is not None:
|
||||||
|
if hasattr(socket, 'TCP_KEEPINTVL'):
|
||||||
|
sock.setsockopt(socket.IPPROTO_TCP,
|
||||||
|
socket.TCP_KEEPINTVL,
|
||||||
|
tcp_keepalive_interval)
|
||||||
|
else:
|
||||||
|
LOG.warning(_LW('tcp_keepintvl not available on your system'))
|
||||||
|
if tcp_keepalive_count is not None:
|
||||||
|
if hasattr(socket, 'TCP_KEEPCNT'):
|
||||||
|
sock.setsockopt(socket.IPPROTO_TCP,
|
||||||
|
socket.TCP_KEEPCNT,
|
||||||
|
tcp_keepalive_count)
|
||||||
|
else:
|
||||||
|
LOG.warning(_LW('tcp_keepknt not available on your system'))
|
206
octavia/openstack/common/periodic_task.py
Normal file
206
octavia/openstack/common/periodic_task.py
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _, _LE, _LI
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
periodic_opts = [
|
||||||
|
cfg.BoolOpt('run_external_periodic_tasks',
|
||||||
|
default=True,
|
||||||
|
help='Some periodic tasks can be run in a separate process. '
|
||||||
|
'Should we run them here?'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(periodic_opts)
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
DEFAULT_INTERVAL = 60.0
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidPeriodicTaskArg(Exception):
|
||||||
|
message = _("Unexpected argument for periodic task creation: %(arg)s.")
|
||||||
|
|
||||||
|
|
||||||
|
def periodic_task(*args, **kwargs):
|
||||||
|
"""Decorator to indicate that a method is a periodic task.
|
||||||
|
|
||||||
|
This decorator can be used in two ways:
|
||||||
|
|
||||||
|
1. Without arguments '@periodic_task', this will be run on the default
|
||||||
|
interval of 60 seconds.
|
||||||
|
|
||||||
|
2. With arguments:
|
||||||
|
@periodic_task(spacing=N [, run_immediately=[True|False]])
|
||||||
|
this will be run on approximately every N seconds. If this number is
|
||||||
|
negative the periodic task will be disabled. If the run_immediately
|
||||||
|
argument is provided and has a value of 'True', the first run of the
|
||||||
|
task will be shortly after task scheduler starts. If
|
||||||
|
run_immediately is omitted or set to 'False', the first time the
|
||||||
|
task runs will be approximately N seconds after the task scheduler
|
||||||
|
starts.
|
||||||
|
"""
|
||||||
|
def decorator(f):
|
||||||
|
# Test for old style invocation
|
||||||
|
if 'ticks_between_runs' in kwargs:
|
||||||
|
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
|
||||||
|
|
||||||
|
# Control if run at all
|
||||||
|
f._periodic_task = True
|
||||||
|
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
|
||||||
|
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
|
||||||
|
f._periodic_enabled = False
|
||||||
|
else:
|
||||||
|
f._periodic_enabled = kwargs.pop('enabled', True)
|
||||||
|
|
||||||
|
# Control frequency
|
||||||
|
f._periodic_spacing = kwargs.pop('spacing', 0)
|
||||||
|
f._periodic_immediate = kwargs.pop('run_immediately', False)
|
||||||
|
if f._periodic_immediate:
|
||||||
|
f._periodic_last_run = None
|
||||||
|
else:
|
||||||
|
f._periodic_last_run = time.time()
|
||||||
|
return f
|
||||||
|
|
||||||
|
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
|
||||||
|
# and without parenthesis.
|
||||||
|
#
|
||||||
|
# In the 'with-parenthesis' case (with kwargs present), this function needs
|
||||||
|
# to return a decorator function since the interpreter will invoke it like:
|
||||||
|
#
|
||||||
|
# periodic_task(*args, **kwargs)(f)
|
||||||
|
#
|
||||||
|
# In the 'without-parenthesis' case, the original function will be passed
|
||||||
|
# in as the first argument, like:
|
||||||
|
#
|
||||||
|
# periodic_task(f)
|
||||||
|
if kwargs:
|
||||||
|
return decorator
|
||||||
|
else:
|
||||||
|
return decorator(args[0])
|
||||||
|
|
||||||
|
|
||||||
|
class _PeriodicTasksMeta(type):
|
||||||
|
def __init__(cls, names, bases, dict_):
|
||||||
|
"""Metaclass that allows us to collect decorated periodic tasks."""
|
||||||
|
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
|
||||||
|
|
||||||
|
# NOTE(sirp): if the attribute is not present then we must be the base
|
||||||
|
# class, so, go ahead an initialize it. If the attribute is present,
|
||||||
|
# then we're a subclass so make a copy of it so we don't step on our
|
||||||
|
# parent's toes.
|
||||||
|
try:
|
||||||
|
cls._periodic_tasks = cls._periodic_tasks[:]
|
||||||
|
except AttributeError:
|
||||||
|
cls._periodic_tasks = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
cls._periodic_spacing = cls._periodic_spacing.copy()
|
||||||
|
except AttributeError:
|
||||||
|
cls._periodic_spacing = {}
|
||||||
|
|
||||||
|
for value in cls.__dict__.values():
|
||||||
|
if getattr(value, '_periodic_task', False):
|
||||||
|
task = value
|
||||||
|
name = task.__name__
|
||||||
|
|
||||||
|
if task._periodic_spacing < 0:
|
||||||
|
LOG.info(_LI('Skipping periodic task %(task)s because '
|
||||||
|
'its interval is negative'),
|
||||||
|
{'task': name})
|
||||||
|
continue
|
||||||
|
if not task._periodic_enabled:
|
||||||
|
LOG.info(_LI('Skipping periodic task %(task)s because '
|
||||||
|
'it is disabled'),
|
||||||
|
{'task': name})
|
||||||
|
continue
|
||||||
|
|
||||||
|
# A periodic spacing of zero indicates that this task should
|
||||||
|
# be run on the default interval to avoid running too
|
||||||
|
# frequently.
|
||||||
|
if task._periodic_spacing == 0:
|
||||||
|
task._periodic_spacing = DEFAULT_INTERVAL
|
||||||
|
|
||||||
|
cls._periodic_tasks.append((name, task))
|
||||||
|
cls._periodic_spacing[name] = task._periodic_spacing
|
||||||
|
|
||||||
|
|
||||||
|
def _nearest_boundary(last_run, spacing):
|
||||||
|
"""Find nearest boundary which is in the past, which is a multiple of the
|
||||||
|
spacing with the last run as an offset.
|
||||||
|
|
||||||
|
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
|
||||||
|
31, 38...
|
||||||
|
|
||||||
|
0% to 5% of the spacing value will be added to this value to ensure tasks
|
||||||
|
do not synchronize. This jitter is rounded to the nearest second, this
|
||||||
|
means that spacings smaller than 20 seconds will not have jitter.
|
||||||
|
"""
|
||||||
|
current_time = time.time()
|
||||||
|
if last_run is None:
|
||||||
|
return current_time
|
||||||
|
delta = current_time - last_run
|
||||||
|
offset = delta % spacing
|
||||||
|
# Add up to 5% jitter
|
||||||
|
jitter = int(spacing * (random.random() / 20))
|
||||||
|
return current_time - offset + jitter
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(_PeriodicTasksMeta)
|
||||||
|
class PeriodicTasks(object):
|
||||||
|
def __init__(self):
|
||||||
|
super(PeriodicTasks, self).__init__()
|
||||||
|
self._periodic_last_run = {}
|
||||||
|
for name, task in self._periodic_tasks:
|
||||||
|
self._periodic_last_run[name] = task._periodic_last_run
|
||||||
|
|
||||||
|
def run_periodic_tasks(self, context, raise_on_error=False):
|
||||||
|
"""Tasks to be run at a periodic interval."""
|
||||||
|
idle_for = DEFAULT_INTERVAL
|
||||||
|
for task_name, task in self._periodic_tasks:
|
||||||
|
full_task_name = '.'.join([self.__class__.__name__, task_name])
|
||||||
|
|
||||||
|
spacing = self._periodic_spacing[task_name]
|
||||||
|
last_run = self._periodic_last_run[task_name]
|
||||||
|
|
||||||
|
# Check if due, if not skip
|
||||||
|
idle_for = min(idle_for, spacing)
|
||||||
|
if last_run is not None:
|
||||||
|
delta = last_run + spacing - time.time()
|
||||||
|
if delta > 0:
|
||||||
|
idle_for = min(idle_for, delta)
|
||||||
|
continue
|
||||||
|
|
||||||
|
LOG.debug("Running periodic task %(full_task_name)s",
|
||||||
|
{"full_task_name": full_task_name})
|
||||||
|
self._periodic_last_run[task_name] = _nearest_boundary(
|
||||||
|
last_run, spacing)
|
||||||
|
|
||||||
|
try:
|
||||||
|
task(self, context)
|
||||||
|
except Exception as e:
|
||||||
|
if raise_on_error:
|
||||||
|
raise
|
||||||
|
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
|
||||||
|
{"full_task_name": full_task_name, "e": e})
|
||||||
|
time.sleep(0)
|
||||||
|
|
||||||
|
return idle_for
|
895
octavia/openstack/common/policy.py
Normal file
895
octavia/openstack/common/policy.py
Normal file
@ -0,0 +1,895 @@
|
|||||||
|
# Copyright (c) 2012 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Common Policy Engine Implementation
|
||||||
|
|
||||||
|
Policies can be expressed in one of two forms: A list of lists, or a
|
||||||
|
string written in the new policy language.
|
||||||
|
|
||||||
|
In the list-of-lists representation, each check inside the innermost
|
||||||
|
list is combined as with an "and" conjunction--for that check to pass,
|
||||||
|
all the specified checks must pass. These innermost lists are then
|
||||||
|
combined as with an "or" conjunction. This is the original way of
|
||||||
|
expressing policies, but there now exists a new way: the policy
|
||||||
|
language.
|
||||||
|
|
||||||
|
In the policy language, each check is specified the same way as in the
|
||||||
|
list-of-lists representation: a simple "a:b" pair that is matched to
|
||||||
|
the correct code to perform that check. However, conjunction
|
||||||
|
operators are available, allowing for more expressiveness in crafting
|
||||||
|
policies.
|
||||||
|
|
||||||
|
As an example, take the following rule, expressed in the list-of-lists
|
||||||
|
representation::
|
||||||
|
|
||||||
|
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
|
||||||
|
|
||||||
|
In the policy language, this becomes::
|
||||||
|
|
||||||
|
role:admin or (project_id:%(project_id)s and role:projectadmin)
|
||||||
|
|
||||||
|
The policy language also has the "not" operator, allowing a richer
|
||||||
|
policy rule::
|
||||||
|
|
||||||
|
project_id:%(project_id)s and not role:dunce
|
||||||
|
|
||||||
|
It is possible to perform policy checks on the following user
|
||||||
|
attributes (obtained through the token): user_id, domain_id or
|
||||||
|
project_id::
|
||||||
|
|
||||||
|
domain_id:<some_value>
|
||||||
|
|
||||||
|
Attributes sent along with API calls can be used by the policy engine
|
||||||
|
(on the right side of the expression), by using the following syntax::
|
||||||
|
|
||||||
|
<some_value>:user.id
|
||||||
|
|
||||||
|
Contextual attributes of objects identified by their IDs are loaded
|
||||||
|
from the database. They are also available to the policy engine and
|
||||||
|
can be checked through the `target` keyword::
|
||||||
|
|
||||||
|
<some_value>:target.role.name
|
||||||
|
|
||||||
|
All these attributes (related to users, API calls, and context) can be
|
||||||
|
checked against each other or against constants, be it literals (True,
|
||||||
|
<a_number>) or strings.
|
||||||
|
|
||||||
|
Finally, two special policy checks should be mentioned; the policy
|
||||||
|
check "@" will always accept an access, and the policy check "!" will
|
||||||
|
always reject an access. (Note that if a rule is either the empty
|
||||||
|
list ("[]") or the empty string, this is equivalent to the "@" policy
|
||||||
|
check.) Of these, the "!" policy check is probably the most useful,
|
||||||
|
as it allows particular rules to be explicitly disabled.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import abc
|
||||||
|
import ast
|
||||||
|
import re
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
import six.moves.urllib.parse as urlparse
|
||||||
|
import six.moves.urllib.request as urlrequest
|
||||||
|
|
||||||
|
from octavia.openstack.common import fileutils
|
||||||
|
from octavia.openstack.common.gettextutils import _, _LE
|
||||||
|
from octavia.openstack.common import jsonutils
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
policy_opts = [
|
||||||
|
cfg.StrOpt('policy_file',
|
||||||
|
default='policy.json',
|
||||||
|
help=_('The JSON file that defines policies.')),
|
||||||
|
cfg.StrOpt('policy_default_rule',
|
||||||
|
default='default',
|
||||||
|
help=_('Default rule. Enforced when a requested rule is not '
|
||||||
|
'found.')),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(policy_opts)
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_checks = {}
|
||||||
|
|
||||||
|
|
||||||
|
class PolicyNotAuthorized(Exception):
|
||||||
|
|
||||||
|
def __init__(self, rule):
|
||||||
|
msg = _("Policy doesn't allow %s to be performed.") % rule
|
||||||
|
super(PolicyNotAuthorized, self).__init__(msg)
|
||||||
|
|
||||||
|
|
||||||
|
class Rules(dict):
|
||||||
|
"""A store for rules. Handles the default_rule setting directly."""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_json(cls, data, default_rule=None):
|
||||||
|
"""Allow loading of JSON rule data."""
|
||||||
|
|
||||||
|
# Suck in the JSON data and parse the rules
|
||||||
|
rules = dict((k, parse_rule(v)) for k, v in
|
||||||
|
jsonutils.loads(data).items())
|
||||||
|
|
||||||
|
return cls(rules, default_rule)
|
||||||
|
|
||||||
|
def __init__(self, rules=None, default_rule=None):
|
||||||
|
"""Initialize the Rules store."""
|
||||||
|
|
||||||
|
super(Rules, self).__init__(rules or {})
|
||||||
|
self.default_rule = default_rule
|
||||||
|
|
||||||
|
def __missing__(self, key):
|
||||||
|
"""Implements the default rule handling."""
|
||||||
|
|
||||||
|
if isinstance(self.default_rule, dict):
|
||||||
|
raise KeyError(key)
|
||||||
|
|
||||||
|
# If the default rule isn't actually defined, do something
|
||||||
|
# reasonably intelligent
|
||||||
|
if not self.default_rule:
|
||||||
|
raise KeyError(key)
|
||||||
|
|
||||||
|
if isinstance(self.default_rule, BaseCheck):
|
||||||
|
return self.default_rule
|
||||||
|
|
||||||
|
# We need to check this or we can get infinite recursion
|
||||||
|
if self.default_rule not in self:
|
||||||
|
raise KeyError(key)
|
||||||
|
|
||||||
|
elif isinstance(self.default_rule, six.string_types):
|
||||||
|
return self[self.default_rule]
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Dumps a string representation of the rules."""
|
||||||
|
|
||||||
|
# Start by building the canonical strings for the rules
|
||||||
|
out_rules = {}
|
||||||
|
for key, value in self.items():
|
||||||
|
# Use empty string for singleton TrueCheck instances
|
||||||
|
if isinstance(value, TrueCheck):
|
||||||
|
out_rules[key] = ''
|
||||||
|
else:
|
||||||
|
out_rules[key] = str(value)
|
||||||
|
|
||||||
|
# Dump a pretty-printed JSON representation
|
||||||
|
return jsonutils.dumps(out_rules, indent=4)
|
||||||
|
|
||||||
|
|
||||||
|
class Enforcer(object):
|
||||||
|
"""Responsible for loading and enforcing rules.
|
||||||
|
|
||||||
|
:param policy_file: Custom policy file to use, if none is
|
||||||
|
specified, `CONF.policy_file` will be
|
||||||
|
used.
|
||||||
|
:param rules: Default dictionary / Rules to use. It will be
|
||||||
|
considered just in the first instantiation. If
|
||||||
|
`load_rules(True)`, `clear()` or `set_rules(True)`
|
||||||
|
is called this will be overwritten.
|
||||||
|
:param default_rule: Default rule to use, CONF.default_rule will
|
||||||
|
be used if none is specified.
|
||||||
|
:param use_conf: Whether to load rules from cache or config file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, policy_file=None, rules=None,
|
||||||
|
default_rule=None, use_conf=True):
|
||||||
|
self.rules = Rules(rules, default_rule)
|
||||||
|
self.default_rule = default_rule or CONF.policy_default_rule
|
||||||
|
|
||||||
|
self.policy_path = None
|
||||||
|
self.policy_file = policy_file or CONF.policy_file
|
||||||
|
self.use_conf = use_conf
|
||||||
|
|
||||||
|
def set_rules(self, rules, overwrite=True, use_conf=False):
|
||||||
|
"""Create a new Rules object based on the provided dict of rules.
|
||||||
|
|
||||||
|
:param rules: New rules to use. It should be an instance of dict.
|
||||||
|
:param overwrite: Whether to overwrite current rules or update them
|
||||||
|
with the new rules.
|
||||||
|
:param use_conf: Whether to reload rules from cache or config file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not isinstance(rules, dict):
|
||||||
|
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
||||||
|
"got %s instead") % type(rules))
|
||||||
|
self.use_conf = use_conf
|
||||||
|
if overwrite:
|
||||||
|
self.rules = Rules(rules, self.default_rule)
|
||||||
|
else:
|
||||||
|
self.rules.update(rules)
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
"""Clears Enforcer rules, policy's cache and policy's path."""
|
||||||
|
self.set_rules({})
|
||||||
|
fileutils.delete_cached_file(self.policy_path)
|
||||||
|
self.default_rule = None
|
||||||
|
self.policy_path = None
|
||||||
|
|
||||||
|
def load_rules(self, force_reload=False):
|
||||||
|
"""Loads policy_path's rules.
|
||||||
|
|
||||||
|
Policy file is cached and will be reloaded if modified.
|
||||||
|
|
||||||
|
:param force_reload: Whether to overwrite current rules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if force_reload:
|
||||||
|
self.use_conf = force_reload
|
||||||
|
|
||||||
|
if self.use_conf:
|
||||||
|
if not self.policy_path:
|
||||||
|
self.policy_path = self._get_policy_path()
|
||||||
|
|
||||||
|
reloaded, data = fileutils.read_cached_file(
|
||||||
|
self.policy_path, force_reload=force_reload)
|
||||||
|
if reloaded or not self.rules:
|
||||||
|
rules = Rules.load_json(data, self.default_rule)
|
||||||
|
self.set_rules(rules)
|
||||||
|
LOG.debug("Rules successfully reloaded")
|
||||||
|
|
||||||
|
def _get_policy_path(self):
|
||||||
|
"""Locate the policy json data file.
|
||||||
|
|
||||||
|
:param policy_file: Custom policy file to locate.
|
||||||
|
|
||||||
|
:returns: The policy path
|
||||||
|
|
||||||
|
:raises: ConfigFilesNotFoundError if the file couldn't
|
||||||
|
be located.
|
||||||
|
"""
|
||||||
|
policy_file = CONF.find_file(self.policy_file)
|
||||||
|
|
||||||
|
if policy_file:
|
||||||
|
return policy_file
|
||||||
|
|
||||||
|
raise cfg.ConfigFilesNotFoundError((self.policy_file,))
|
||||||
|
|
||||||
|
def enforce(self, rule, target, creds, do_raise=False,
|
||||||
|
exc=None, *args, **kwargs):
|
||||||
|
"""Checks authorization of a rule against the target and credentials.
|
||||||
|
|
||||||
|
:param rule: A string or BaseCheck instance specifying the rule
|
||||||
|
to evaluate.
|
||||||
|
:param target: As much information about the object being operated
|
||||||
|
on as possible, as a dictionary.
|
||||||
|
:param creds: As much information about the user performing the
|
||||||
|
action as possible, as a dictionary.
|
||||||
|
:param do_raise: Whether to raise an exception or not if check
|
||||||
|
fails.
|
||||||
|
:param exc: Class of the exception to raise if the check fails.
|
||||||
|
Any remaining arguments passed to check() (both
|
||||||
|
positional and keyword arguments) will be passed to
|
||||||
|
the exception class. If not specified, PolicyNotAuthorized
|
||||||
|
will be used.
|
||||||
|
|
||||||
|
:return: Returns False if the policy does not allow the action and
|
||||||
|
exc is not provided; otherwise, returns a value that
|
||||||
|
evaluates to True. Note: for rules using the "case"
|
||||||
|
expression, this True value will be the specified string
|
||||||
|
from the expression.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.load_rules()
|
||||||
|
|
||||||
|
# Allow the rule to be a Check tree
|
||||||
|
if isinstance(rule, BaseCheck):
|
||||||
|
result = rule(target, creds, self)
|
||||||
|
elif not self.rules:
|
||||||
|
# No rules to reference means we're going to fail closed
|
||||||
|
result = False
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
# Evaluate the rule
|
||||||
|
result = self.rules[rule](target, creds, self)
|
||||||
|
except KeyError:
|
||||||
|
LOG.debug("Rule [%s] doesn't exist" % rule)
|
||||||
|
# If the rule doesn't exist, fail closed
|
||||||
|
result = False
|
||||||
|
|
||||||
|
# If it is False, raise the exception if requested
|
||||||
|
if do_raise and not result:
|
||||||
|
if exc:
|
||||||
|
raise exc(*args, **kwargs)
|
||||||
|
|
||||||
|
raise PolicyNotAuthorized(rule)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class BaseCheck(object):
|
||||||
|
"""Abstract base class for Check classes."""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def __str__(self):
|
||||||
|
"""String representation of the Check tree rooted at this node."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def __call__(self, target, cred, enforcer):
|
||||||
|
"""Triggers if instance of the class is called.
|
||||||
|
|
||||||
|
Performs the check. Returns False to reject the access or a
|
||||||
|
true value (not necessary True) to accept the access.
|
||||||
|
"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FalseCheck(BaseCheck):
|
||||||
|
"""A policy check that always returns False (disallow)."""
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Return a string representation of this check."""
|
||||||
|
|
||||||
|
return "!"
|
||||||
|
|
||||||
|
def __call__(self, target, cred, enforcer):
|
||||||
|
"""Check the policy."""
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class TrueCheck(BaseCheck):
|
||||||
|
"""A policy check that always returns True (allow)."""
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Return a string representation of this check."""
|
||||||
|
|
||||||
|
return "@"
|
||||||
|
|
||||||
|
def __call__(self, target, cred, enforcer):
|
||||||
|
"""Check the policy."""
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class Check(BaseCheck):
|
||||||
|
"""A base class to allow for user-defined policy checks."""
|
||||||
|
|
||||||
|
def __init__(self, kind, match):
|
||||||
|
"""Initiates Check instance.
|
||||||
|
|
||||||
|
:param kind: The kind of the check, i.e., the field before the
|
||||||
|
':'.
|
||||||
|
:param match: The match of the check, i.e., the field after
|
||||||
|
the ':'.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.kind = kind
|
||||||
|
self.match = match
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Return a string representation of this check."""
|
||||||
|
|
||||||
|
return "%s:%s" % (self.kind, self.match)
|
||||||
|
|
||||||
|
|
||||||
|
class NotCheck(BaseCheck):
|
||||||
|
"""Implements the "not" logical operator.
|
||||||
|
|
||||||
|
A policy check that inverts the result of another policy check.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, rule):
|
||||||
|
"""Initialize the 'not' check.
|
||||||
|
|
||||||
|
:param rule: The rule to negate. Must be a Check.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.rule = rule
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Return a string representation of this check."""
|
||||||
|
|
||||||
|
return "not %s" % self.rule
|
||||||
|
|
||||||
|
def __call__(self, target, cred, enforcer):
|
||||||
|
"""Check the policy.
|
||||||
|
|
||||||
|
Returns the logical inverse of the wrapped check.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return not self.rule(target, cred, enforcer)
|
||||||
|
|
||||||
|
|
||||||
|
class AndCheck(BaseCheck):
|
||||||
|
"""Implements the "and" logical operator.
|
||||||
|
|
||||||
|
A policy check that requires that a list of other checks all return True.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, rules):
|
||||||
|
"""Initialize the 'and' check.
|
||||||
|
|
||||||
|
:param rules: A list of rules that will be tested.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.rules = rules
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Return a string representation of this check."""
|
||||||
|
|
||||||
|
return "(%s)" % ' and '.join(str(r) for r in self.rules)
|
||||||
|
|
||||||
|
def __call__(self, target, cred, enforcer):
|
||||||
|
"""Check the policy.
|
||||||
|
|
||||||
|
Requires that all rules accept in order to return True.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for rule in self.rules:
|
||||||
|
if not rule(target, cred, enforcer):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def add_check(self, rule):
|
||||||
|
"""Adds rule to be tested.
|
||||||
|
|
||||||
|
Allows addition of another rule to the list of rules that will
|
||||||
|
be tested. Returns the AndCheck object for convenience.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.rules.append(rule)
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
class OrCheck(BaseCheck):
|
||||||
|
"""Implements the "or" operator.
|
||||||
|
|
||||||
|
A policy check that requires that at least one of a list of other
|
||||||
|
checks returns True.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, rules):
|
||||||
|
"""Initialize the 'or' check.
|
||||||
|
|
||||||
|
:param rules: A list of rules that will be tested.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.rules = rules
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Return a string representation of this check."""
|
||||||
|
|
||||||
|
return "(%s)" % ' or '.join(str(r) for r in self.rules)
|
||||||
|
|
||||||
|
def __call__(self, target, cred, enforcer):
|
||||||
|
"""Check the policy.
|
||||||
|
|
||||||
|
Requires that at least one rule accept in order to return True.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for rule in self.rules:
|
||||||
|
if rule(target, cred, enforcer):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def add_check(self, rule):
|
||||||
|
"""Adds rule to be tested.
|
||||||
|
|
||||||
|
Allows addition of another rule to the list of rules that will
|
||||||
|
be tested. Returns the OrCheck object for convenience.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.rules.append(rule)
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_check(rule):
|
||||||
|
"""Parse a single base check rule into an appropriate Check object."""
|
||||||
|
|
||||||
|
# Handle the special checks
|
||||||
|
if rule == '!':
|
||||||
|
return FalseCheck()
|
||||||
|
elif rule == '@':
|
||||||
|
return TrueCheck()
|
||||||
|
|
||||||
|
try:
|
||||||
|
kind, match = rule.split(':', 1)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_LE("Failed to understand rule %s") % rule)
|
||||||
|
# If the rule is invalid, we'll fail closed
|
||||||
|
return FalseCheck()
|
||||||
|
|
||||||
|
# Find what implements the check
|
||||||
|
if kind in _checks:
|
||||||
|
return _checks[kind](kind, match)
|
||||||
|
elif None in _checks:
|
||||||
|
return _checks[None](kind, match)
|
||||||
|
else:
|
||||||
|
LOG.error(_LE("No handler for matches of kind %s") % kind)
|
||||||
|
return FalseCheck()
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_list_rule(rule):
|
||||||
|
"""Translates the old list-of-lists syntax into a tree of Check objects.
|
||||||
|
|
||||||
|
Provided for backwards compatibility.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Empty rule defaults to True
|
||||||
|
if not rule:
|
||||||
|
return TrueCheck()
|
||||||
|
|
||||||
|
# Outer list is joined by "or"; inner list by "and"
|
||||||
|
or_list = []
|
||||||
|
for inner_rule in rule:
|
||||||
|
# Elide empty inner lists
|
||||||
|
if not inner_rule:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handle bare strings
|
||||||
|
if isinstance(inner_rule, six.string_types):
|
||||||
|
inner_rule = [inner_rule]
|
||||||
|
|
||||||
|
# Parse the inner rules into Check objects
|
||||||
|
and_list = [_parse_check(r) for r in inner_rule]
|
||||||
|
|
||||||
|
# Append the appropriate check to the or_list
|
||||||
|
if len(and_list) == 1:
|
||||||
|
or_list.append(and_list[0])
|
||||||
|
else:
|
||||||
|
or_list.append(AndCheck(and_list))
|
||||||
|
|
||||||
|
# If we have only one check, omit the "or"
|
||||||
|
if not or_list:
|
||||||
|
return FalseCheck()
|
||||||
|
elif len(or_list) == 1:
|
||||||
|
return or_list[0]
|
||||||
|
|
||||||
|
return OrCheck(or_list)
|
||||||
|
|
||||||
|
|
||||||
|
# Used for tokenizing the policy language
|
||||||
|
_tokenize_re = re.compile(r'\s+')
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_tokenize(rule):
|
||||||
|
"""Tokenizer for the policy language.
|
||||||
|
|
||||||
|
Most of the single-character tokens are specified in the
|
||||||
|
_tokenize_re; however, parentheses need to be handled specially,
|
||||||
|
because they can appear inside a check string. Thankfully, those
|
||||||
|
parentheses that appear inside a check string can never occur at
|
||||||
|
the very beginning or end ("%(variable)s" is the correct syntax).
|
||||||
|
"""
|
||||||
|
|
||||||
|
for tok in _tokenize_re.split(rule):
|
||||||
|
# Skip empty tokens
|
||||||
|
if not tok or tok.isspace():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handle leading parens on the token
|
||||||
|
clean = tok.lstrip('(')
|
||||||
|
for i in range(len(tok) - len(clean)):
|
||||||
|
yield '(', '('
|
||||||
|
|
||||||
|
# If it was only parentheses, continue
|
||||||
|
if not clean:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
tok = clean
|
||||||
|
|
||||||
|
# Handle trailing parens on the token
|
||||||
|
clean = tok.rstrip(')')
|
||||||
|
trail = len(tok) - len(clean)
|
||||||
|
|
||||||
|
# Yield the cleaned token
|
||||||
|
lowered = clean.lower()
|
||||||
|
if lowered in ('and', 'or', 'not'):
|
||||||
|
# Special tokens
|
||||||
|
yield lowered, clean
|
||||||
|
elif clean:
|
||||||
|
# Not a special token, but not composed solely of ')'
|
||||||
|
if len(tok) >= 2 and ((tok[0], tok[-1]) in
|
||||||
|
[('"', '"'), ("'", "'")]):
|
||||||
|
# It's a quoted string
|
||||||
|
yield 'string', tok[1:-1]
|
||||||
|
else:
|
||||||
|
yield 'check', _parse_check(clean)
|
||||||
|
|
||||||
|
# Yield the trailing parens
|
||||||
|
for i in range(trail):
|
||||||
|
yield ')', ')'
|
||||||
|
|
||||||
|
|
||||||
|
class ParseStateMeta(type):
|
||||||
|
"""Metaclass for the ParseState class.
|
||||||
|
|
||||||
|
Facilitates identifying reduction methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(mcs, name, bases, cls_dict):
|
||||||
|
"""Create the class.
|
||||||
|
|
||||||
|
Injects the 'reducers' list, a list of tuples matching token sequences
|
||||||
|
to the names of the corresponding reduction methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
reducers = []
|
||||||
|
|
||||||
|
for key, value in cls_dict.items():
|
||||||
|
if not hasattr(value, 'reducers'):
|
||||||
|
continue
|
||||||
|
for reduction in value.reducers:
|
||||||
|
reducers.append((reduction, key))
|
||||||
|
|
||||||
|
cls_dict['reducers'] = reducers
|
||||||
|
|
||||||
|
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
|
||||||
|
|
||||||
|
|
||||||
|
def reducer(*tokens):
|
||||||
|
"""Decorator for reduction methods.
|
||||||
|
|
||||||
|
Arguments are a sequence of tokens, in order, which should trigger running
|
||||||
|
this reduction method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def decorator(func):
|
||||||
|
# Make sure we have a list of reducer sequences
|
||||||
|
if not hasattr(func, 'reducers'):
|
||||||
|
func.reducers = []
|
||||||
|
|
||||||
|
# Add the tokens to the list of reducer sequences
|
||||||
|
func.reducers.append(list(tokens))
|
||||||
|
|
||||||
|
return func
|
||||||
|
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(ParseStateMeta)
|
||||||
|
class ParseState(object):
|
||||||
|
"""Implement the core of parsing the policy language.
|
||||||
|
|
||||||
|
Uses a greedy reduction algorithm to reduce a sequence of tokens into
|
||||||
|
a single terminal, the value of which will be the root of the Check tree.
|
||||||
|
|
||||||
|
Note: error reporting is rather lacking. The best we can get with
|
||||||
|
this parser formulation is an overall "parse failed" error.
|
||||||
|
Fortunately, the policy language is simple enough that this
|
||||||
|
shouldn't be that big a problem.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize the ParseState."""
|
||||||
|
|
||||||
|
self.tokens = []
|
||||||
|
self.values = []
|
||||||
|
|
||||||
|
def reduce(self):
|
||||||
|
"""Perform a greedy reduction of the token stream.
|
||||||
|
|
||||||
|
If a reducer method matches, it will be executed, then the
|
||||||
|
reduce() method will be called recursively to search for any more
|
||||||
|
possible reductions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for reduction, methname in self.reducers:
|
||||||
|
if (len(self.tokens) >= len(reduction) and
|
||||||
|
self.tokens[-len(reduction):] == reduction):
|
||||||
|
# Get the reduction method
|
||||||
|
meth = getattr(self, methname)
|
||||||
|
|
||||||
|
# Reduce the token stream
|
||||||
|
results = meth(*self.values[-len(reduction):])
|
||||||
|
|
||||||
|
# Update the tokens and values
|
||||||
|
self.tokens[-len(reduction):] = [r[0] for r in results]
|
||||||
|
self.values[-len(reduction):] = [r[1] for r in results]
|
||||||
|
|
||||||
|
# Check for any more reductions
|
||||||
|
return self.reduce()
|
||||||
|
|
||||||
|
def shift(self, tok, value):
|
||||||
|
"""Adds one more token to the state. Calls reduce()."""
|
||||||
|
|
||||||
|
self.tokens.append(tok)
|
||||||
|
self.values.append(value)
|
||||||
|
|
||||||
|
# Do a greedy reduce...
|
||||||
|
self.reduce()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def result(self):
|
||||||
|
"""Obtain the final result of the parse.
|
||||||
|
|
||||||
|
Raises ValueError if the parse failed to reduce to a single result.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(self.values) != 1:
|
||||||
|
raise ValueError("Could not parse rule")
|
||||||
|
return self.values[0]
|
||||||
|
|
||||||
|
@reducer('(', 'check', ')')
|
||||||
|
@reducer('(', 'and_expr', ')')
|
||||||
|
@reducer('(', 'or_expr', ')')
|
||||||
|
def _wrap_check(self, _p1, check, _p2):
|
||||||
|
"""Turn parenthesized expressions into a 'check' token."""
|
||||||
|
|
||||||
|
return [('check', check)]
|
||||||
|
|
||||||
|
@reducer('check', 'and', 'check')
|
||||||
|
def _make_and_expr(self, check1, _and, check2):
|
||||||
|
"""Create an 'and_expr'.
|
||||||
|
|
||||||
|
Join two checks by the 'and' operator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return [('and_expr', AndCheck([check1, check2]))]
|
||||||
|
|
||||||
|
@reducer('and_expr', 'and', 'check')
|
||||||
|
def _extend_and_expr(self, and_expr, _and, check):
|
||||||
|
"""Extend an 'and_expr' by adding one more check."""
|
||||||
|
|
||||||
|
return [('and_expr', and_expr.add_check(check))]
|
||||||
|
|
||||||
|
@reducer('check', 'or', 'check')
|
||||||
|
def _make_or_expr(self, check1, _or, check2):
|
||||||
|
"""Create an 'or_expr'.
|
||||||
|
|
||||||
|
Join two checks by the 'or' operator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return [('or_expr', OrCheck([check1, check2]))]
|
||||||
|
|
||||||
|
@reducer('or_expr', 'or', 'check')
|
||||||
|
def _extend_or_expr(self, or_expr, _or, check):
|
||||||
|
"""Extend an 'or_expr' by adding one more check."""
|
||||||
|
|
||||||
|
return [('or_expr', or_expr.add_check(check))]
|
||||||
|
|
||||||
|
@reducer('not', 'check')
|
||||||
|
def _make_not_expr(self, _not, check):
|
||||||
|
"""Invert the result of another check."""
|
||||||
|
|
||||||
|
return [('check', NotCheck(check))]
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_text_rule(rule):
|
||||||
|
"""Parses policy to the tree.
|
||||||
|
|
||||||
|
Translates a policy written in the policy language into a tree of
|
||||||
|
Check objects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Empty rule means always accept
|
||||||
|
if not rule:
|
||||||
|
return TrueCheck()
|
||||||
|
|
||||||
|
# Parse the token stream
|
||||||
|
state = ParseState()
|
||||||
|
for tok, value in _parse_tokenize(rule):
|
||||||
|
state.shift(tok, value)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return state.result
|
||||||
|
except ValueError:
|
||||||
|
# Couldn't parse the rule
|
||||||
|
LOG.exception(_LE("Failed to understand rule %r") % rule)
|
||||||
|
|
||||||
|
# Fail closed
|
||||||
|
return FalseCheck()
|
||||||
|
|
||||||
|
|
||||||
|
def parse_rule(rule):
|
||||||
|
"""Parses a policy rule into a tree of Check objects."""
|
||||||
|
|
||||||
|
# If the rule is a string, it's in the policy language
|
||||||
|
if isinstance(rule, six.string_types):
|
||||||
|
return _parse_text_rule(rule)
|
||||||
|
return _parse_list_rule(rule)
|
||||||
|
|
||||||
|
|
||||||
|
def register(name, func=None):
|
||||||
|
"""Register a function or Check class as a policy check.
|
||||||
|
|
||||||
|
:param name: Gives the name of the check type, e.g., 'rule',
|
||||||
|
'role', etc. If name is None, a default check type
|
||||||
|
will be registered.
|
||||||
|
:param func: If given, provides the function or class to register.
|
||||||
|
If not given, returns a function taking one argument
|
||||||
|
to specify the function or class to register,
|
||||||
|
allowing use as a decorator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Perform the actual decoration by registering the function or
|
||||||
|
# class. Returns the function or class for compliance with the
|
||||||
|
# decorator interface.
|
||||||
|
def decorator(func):
|
||||||
|
_checks[name] = func
|
||||||
|
return func
|
||||||
|
|
||||||
|
# If the function or class is given, do the registration
|
||||||
|
if func:
|
||||||
|
return decorator(func)
|
||||||
|
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
@register("rule")
|
||||||
|
class RuleCheck(Check):
|
||||||
|
def __call__(self, target, creds, enforcer):
|
||||||
|
"""Recursively checks credentials based on the defined rules."""
|
||||||
|
|
||||||
|
try:
|
||||||
|
return enforcer.rules[self.match](target, creds, enforcer)
|
||||||
|
except KeyError:
|
||||||
|
# We don't have any matching rule; fail closed
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@register("role")
|
||||||
|
class RoleCheck(Check):
|
||||||
|
def __call__(self, target, creds, enforcer):
|
||||||
|
"""Check that there is a matching role in the cred dict."""
|
||||||
|
|
||||||
|
return self.match.lower() in [x.lower() for x in creds['roles']]
|
||||||
|
|
||||||
|
|
||||||
|
@register('http')
|
||||||
|
class HttpCheck(Check):
|
||||||
|
def __call__(self, target, creds, enforcer):
|
||||||
|
"""Check http: rules by calling to a remote server.
|
||||||
|
|
||||||
|
This example implementation simply verifies that the response
|
||||||
|
is exactly 'True'.
|
||||||
|
"""
|
||||||
|
|
||||||
|
url = ('http:' + self.match) % target
|
||||||
|
data = {'target': jsonutils.dumps(target),
|
||||||
|
'credentials': jsonutils.dumps(creds)}
|
||||||
|
post_data = urlparse.urlencode(data)
|
||||||
|
f = urlrequest.urlopen(url, post_data)
|
||||||
|
return f.read() == "True"
|
||||||
|
|
||||||
|
|
||||||
|
@register(None)
|
||||||
|
class GenericCheck(Check):
|
||||||
|
def __call__(self, target, creds, enforcer):
|
||||||
|
"""Check an individual match.
|
||||||
|
|
||||||
|
Matches look like:
|
||||||
|
|
||||||
|
tenant:%(tenant_id)s
|
||||||
|
role:compute:admin
|
||||||
|
True:%(user.enabled)s
|
||||||
|
'Member':%(role.name)s
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO(termie): do dict inspection via dot syntax
|
||||||
|
try:
|
||||||
|
match = self.match % target
|
||||||
|
except KeyError:
|
||||||
|
# While doing GenericCheck if key not
|
||||||
|
# present in Target return false
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Try to interpret self.kind as a literal
|
||||||
|
leftval = ast.literal_eval(self.kind)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
leftval = creds[self.kind]
|
||||||
|
except KeyError:
|
||||||
|
return False
|
||||||
|
return match == six.text_type(leftval)
|
285
octavia/openstack/common/processutils.py
Normal file
285
octavia/openstack/common/processutils.py
Normal file
@ -0,0 +1,285 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
System-level utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import logging
|
||||||
|
import multiprocessing
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import shlex
|
||||||
|
import signal
|
||||||
|
|
||||||
|
from eventlet.green import subprocess
|
||||||
|
from eventlet import greenthread
|
||||||
|
import six
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _
|
||||||
|
from octavia.openstack.common import strutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidArgumentError(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super(InvalidArgumentError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class UnknownArgumentError(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super(UnknownArgumentError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ProcessExecutionError(Exception):
|
||||||
|
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
|
||||||
|
description=None):
|
||||||
|
self.exit_code = exit_code
|
||||||
|
self.stderr = stderr
|
||||||
|
self.stdout = stdout
|
||||||
|
self.cmd = cmd
|
||||||
|
self.description = description
|
||||||
|
|
||||||
|
if description is None:
|
||||||
|
description = _("Unexpected error while running command.")
|
||||||
|
if exit_code is None:
|
||||||
|
exit_code = '-'
|
||||||
|
message = _('%(description)s\n'
|
||||||
|
'Command: %(cmd)s\n'
|
||||||
|
'Exit code: %(exit_code)s\n'
|
||||||
|
'Stdout: %(stdout)r\n'
|
||||||
|
'Stderr: %(stderr)r') % {'description': description,
|
||||||
|
'cmd': cmd,
|
||||||
|
'exit_code': exit_code,
|
||||||
|
'stdout': stdout,
|
||||||
|
'stderr': stderr}
|
||||||
|
super(ProcessExecutionError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class NoRootWrapSpecified(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super(NoRootWrapSpecified, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
def _subprocess_setup():
|
||||||
|
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||||
|
# non-Python subprocesses expect.
|
||||||
|
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||||
|
|
||||||
|
|
||||||
|
def execute(*cmd, **kwargs):
|
||||||
|
"""Helper method to shell out and execute a command through subprocess.
|
||||||
|
|
||||||
|
Allows optional retry.
|
||||||
|
|
||||||
|
:param cmd: Passed to subprocess.Popen.
|
||||||
|
:type cmd: string
|
||||||
|
:param process_input: Send to opened process.
|
||||||
|
:type process_input: string
|
||||||
|
:param env_variables: Environment variables and their values that
|
||||||
|
will be set for the process.
|
||||||
|
:type env_variables: dict
|
||||||
|
:param check_exit_code: Single bool, int, or list of allowed exit
|
||||||
|
codes. Defaults to [0]. Raise
|
||||||
|
:class:`ProcessExecutionError` unless
|
||||||
|
program exits with one of these code.
|
||||||
|
:type check_exit_code: boolean, int, or [int]
|
||||||
|
:param delay_on_retry: True | False. Defaults to True. If set to True,
|
||||||
|
wait a short amount of time before retrying.
|
||||||
|
:type delay_on_retry: boolean
|
||||||
|
:param attempts: How many times to retry cmd.
|
||||||
|
:type attempts: int
|
||||||
|
:param run_as_root: True | False. Defaults to False. If set to True,
|
||||||
|
the command is prefixed by the command specified
|
||||||
|
in the root_helper kwarg.
|
||||||
|
:type run_as_root: boolean
|
||||||
|
:param root_helper: command to prefix to commands called with
|
||||||
|
run_as_root=True
|
||||||
|
:type root_helper: string
|
||||||
|
:param shell: whether or not there should be a shell used to
|
||||||
|
execute this command. Defaults to false.
|
||||||
|
:type shell: boolean
|
||||||
|
:param loglevel: log level for execute commands.
|
||||||
|
:type loglevel: int. (Should be logging.DEBUG or logging.INFO)
|
||||||
|
:returns: (stdout, stderr) from process execution
|
||||||
|
:raises: :class:`UnknownArgumentError` on
|
||||||
|
receiving unknown arguments
|
||||||
|
:raises: :class:`ProcessExecutionError`
|
||||||
|
"""
|
||||||
|
|
||||||
|
process_input = kwargs.pop('process_input', None)
|
||||||
|
env_variables = kwargs.pop('env_variables', None)
|
||||||
|
check_exit_code = kwargs.pop('check_exit_code', [0])
|
||||||
|
ignore_exit_code = False
|
||||||
|
delay_on_retry = kwargs.pop('delay_on_retry', True)
|
||||||
|
attempts = kwargs.pop('attempts', 1)
|
||||||
|
run_as_root = kwargs.pop('run_as_root', False)
|
||||||
|
root_helper = kwargs.pop('root_helper', '')
|
||||||
|
shell = kwargs.pop('shell', False)
|
||||||
|
loglevel = kwargs.pop('loglevel', logging.DEBUG)
|
||||||
|
|
||||||
|
if isinstance(check_exit_code, bool):
|
||||||
|
ignore_exit_code = not check_exit_code
|
||||||
|
check_exit_code = [0]
|
||||||
|
elif isinstance(check_exit_code, int):
|
||||||
|
check_exit_code = [check_exit_code]
|
||||||
|
|
||||||
|
if kwargs:
|
||||||
|
raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs)
|
||||||
|
|
||||||
|
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
|
||||||
|
if not root_helper:
|
||||||
|
raise NoRootWrapSpecified(
|
||||||
|
message=_('Command requested root, but did not '
|
||||||
|
'specify a root helper.'))
|
||||||
|
cmd = shlex.split(root_helper) + list(cmd)
|
||||||
|
|
||||||
|
cmd = map(str, cmd)
|
||||||
|
sanitized_cmd = strutils.mask_password(' '.join(cmd))
|
||||||
|
|
||||||
|
while attempts > 0:
|
||||||
|
attempts -= 1
|
||||||
|
try:
|
||||||
|
LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd)
|
||||||
|
_PIPE = subprocess.PIPE # pylint: disable=E1101
|
||||||
|
|
||||||
|
if os.name == 'nt':
|
||||||
|
preexec_fn = None
|
||||||
|
close_fds = False
|
||||||
|
else:
|
||||||
|
preexec_fn = _subprocess_setup
|
||||||
|
close_fds = True
|
||||||
|
|
||||||
|
obj = subprocess.Popen(cmd,
|
||||||
|
stdin=_PIPE,
|
||||||
|
stdout=_PIPE,
|
||||||
|
stderr=_PIPE,
|
||||||
|
close_fds=close_fds,
|
||||||
|
preexec_fn=preexec_fn,
|
||||||
|
shell=shell,
|
||||||
|
env=env_variables)
|
||||||
|
result = None
|
||||||
|
for _i in six.moves.range(20):
|
||||||
|
# NOTE(russellb) 20 is an arbitrary number of retries to
|
||||||
|
# prevent any chance of looping forever here.
|
||||||
|
try:
|
||||||
|
if process_input is not None:
|
||||||
|
result = obj.communicate(process_input)
|
||||||
|
else:
|
||||||
|
result = obj.communicate()
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno in (errno.EAGAIN, errno.EINTR):
|
||||||
|
continue
|
||||||
|
raise
|
||||||
|
break
|
||||||
|
obj.stdin.close() # pylint: disable=E1101
|
||||||
|
_returncode = obj.returncode # pylint: disable=E1101
|
||||||
|
LOG.log(loglevel, 'Result was %s' % _returncode)
|
||||||
|
if not ignore_exit_code and _returncode not in check_exit_code:
|
||||||
|
(stdout, stderr) = result
|
||||||
|
sanitized_stdout = strutils.mask_password(stdout)
|
||||||
|
sanitized_stderr = strutils.mask_password(stderr)
|
||||||
|
raise ProcessExecutionError(exit_code=_returncode,
|
||||||
|
stdout=sanitized_stdout,
|
||||||
|
stderr=sanitized_stderr,
|
||||||
|
cmd=sanitized_cmd)
|
||||||
|
return result
|
||||||
|
except ProcessExecutionError:
|
||||||
|
if not attempts:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd)
|
||||||
|
if delay_on_retry:
|
||||||
|
greenthread.sleep(random.randint(20, 200) / 100.0)
|
||||||
|
finally:
|
||||||
|
# NOTE(termie): this appears to be necessary to let the subprocess
|
||||||
|
# call clean something up in between calls, without
|
||||||
|
# it two execute calls in a row hangs the second one
|
||||||
|
greenthread.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
|
def trycmd(*args, **kwargs):
|
||||||
|
"""A wrapper around execute() to more easily handle warnings and errors.
|
||||||
|
|
||||||
|
Returns an (out, err) tuple of strings containing the output of
|
||||||
|
the command's stdout and stderr. If 'err' is not empty then the
|
||||||
|
command can be considered to have failed.
|
||||||
|
|
||||||
|
:discard_warnings True | False. Defaults to False. If set to True,
|
||||||
|
then for succeeding commands, stderr is cleared
|
||||||
|
|
||||||
|
"""
|
||||||
|
discard_warnings = kwargs.pop('discard_warnings', False)
|
||||||
|
|
||||||
|
try:
|
||||||
|
out, err = execute(*args, **kwargs)
|
||||||
|
failed = False
|
||||||
|
except ProcessExecutionError as exn:
|
||||||
|
out, err = '', six.text_type(exn)
|
||||||
|
failed = True
|
||||||
|
|
||||||
|
if not failed and discard_warnings and err:
|
||||||
|
# Handle commands that output to stderr but otherwise succeed
|
||||||
|
err = ''
|
||||||
|
|
||||||
|
return out, err
|
||||||
|
|
||||||
|
|
||||||
|
def ssh_execute(ssh, cmd, process_input=None,
|
||||||
|
addl_env=None, check_exit_code=True):
|
||||||
|
LOG.debug('Running cmd (SSH): %s', cmd)
|
||||||
|
if addl_env:
|
||||||
|
raise InvalidArgumentError(_('Environment not supported over SSH'))
|
||||||
|
|
||||||
|
if process_input:
|
||||||
|
# This is (probably) fixable if we need it...
|
||||||
|
raise InvalidArgumentError(_('process_input not supported over SSH'))
|
||||||
|
|
||||||
|
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
|
||||||
|
channel = stdout_stream.channel
|
||||||
|
|
||||||
|
# NOTE(justinsb): This seems suspicious...
|
||||||
|
# ...other SSH clients have buffering issues with this approach
|
||||||
|
stdout = stdout_stream.read()
|
||||||
|
stderr = stderr_stream.read()
|
||||||
|
stdin_stream.close()
|
||||||
|
|
||||||
|
exit_status = channel.recv_exit_status()
|
||||||
|
|
||||||
|
# exit_status == -1 if no exit code was returned
|
||||||
|
if exit_status != -1:
|
||||||
|
LOG.debug('Result was %s' % exit_status)
|
||||||
|
if check_exit_code and exit_status != 0:
|
||||||
|
raise ProcessExecutionError(exit_code=exit_status,
|
||||||
|
stdout=stdout,
|
||||||
|
stderr=stderr,
|
||||||
|
cmd=cmd)
|
||||||
|
|
||||||
|
return (stdout, stderr)
|
||||||
|
|
||||||
|
|
||||||
|
def get_worker_count():
|
||||||
|
"""Utility to get the default worker count.
|
||||||
|
|
||||||
|
@return: The number of CPUs if that can be determined, else a default
|
||||||
|
worker count of 1 is returned.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return multiprocessing.cpu_count()
|
||||||
|
except NotImplementedError:
|
||||||
|
return 1
|
512
octavia/openstack/common/service.py
Normal file
512
octavia/openstack/common/service.py
Normal file
@ -0,0 +1,512 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Generic Node base class for all workers that run on hosts."""
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import logging as std_logging
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Importing just the symbol here because the io module does not
|
||||||
|
# exist in Python 2.6.
|
||||||
|
from io import UnsupportedOperation # noqa
|
||||||
|
except ImportError:
|
||||||
|
# Python 2.6
|
||||||
|
UnsupportedOperation = None
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
from eventlet import event
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from octavia.openstack.common import eventlet_backdoor
|
||||||
|
from octavia.openstack.common.gettextutils import _LE, _LI, _LW
|
||||||
|
from octavia.openstack.common import importutils
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
from octavia.openstack.common import systemd
|
||||||
|
from octavia.openstack.common import threadgroup
|
||||||
|
|
||||||
|
|
||||||
|
rpc = importutils.try_import('octavia.openstack.common.rpc')
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _sighup_supported():
|
||||||
|
return hasattr(signal, 'SIGHUP')
|
||||||
|
|
||||||
|
|
||||||
|
def _is_daemon():
|
||||||
|
# The process group for a foreground process will match the
|
||||||
|
# process group of the controlling terminal. If those values do
|
||||||
|
# not match, or ioctl() fails on the stdout file handle, we assume
|
||||||
|
# the process is running in the background as a daemon.
|
||||||
|
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
|
||||||
|
try:
|
||||||
|
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.ENOTTY:
|
||||||
|
# Assume we are a daemon because there is no terminal.
|
||||||
|
is_daemon = True
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
except UnsupportedOperation:
|
||||||
|
# Could not get the fileno for stdout, so we must be a daemon.
|
||||||
|
is_daemon = True
|
||||||
|
return is_daemon
|
||||||
|
|
||||||
|
|
||||||
|
def _is_sighup_and_daemon(signo):
|
||||||
|
if not (_sighup_supported() and signo == signal.SIGHUP):
|
||||||
|
# Avoid checking if we are a daemon, because the signal isn't
|
||||||
|
# SIGHUP.
|
||||||
|
return False
|
||||||
|
return _is_daemon()
|
||||||
|
|
||||||
|
|
||||||
|
def _signo_to_signame(signo):
|
||||||
|
signals = {signal.SIGTERM: 'SIGTERM',
|
||||||
|
signal.SIGINT: 'SIGINT'}
|
||||||
|
if _sighup_supported():
|
||||||
|
signals[signal.SIGHUP] = 'SIGHUP'
|
||||||
|
return signals[signo]
|
||||||
|
|
||||||
|
|
||||||
|
def _set_signals_handler(handler):
|
||||||
|
signal.signal(signal.SIGTERM, handler)
|
||||||
|
signal.signal(signal.SIGINT, handler)
|
||||||
|
if _sighup_supported():
|
||||||
|
signal.signal(signal.SIGHUP, handler)
|
||||||
|
|
||||||
|
|
||||||
|
class Launcher(object):
|
||||||
|
"""Launch one or more services and wait for them to complete."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize the service launcher.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.services = Services()
|
||||||
|
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
|
||||||
|
|
||||||
|
def launch_service(self, service):
|
||||||
|
"""Load and start the given service.
|
||||||
|
|
||||||
|
:param service: The service you would like to start.
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
service.backdoor_port = self.backdoor_port
|
||||||
|
self.services.add(service)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""Stop all services which are currently running.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.services.stop()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
"""Waits until all services have been stopped, and then returns.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.services.wait()
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
"""Reload config files and restart service.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
cfg.CONF.reload_config_files()
|
||||||
|
self.services.restart()
|
||||||
|
|
||||||
|
|
||||||
|
class SignalExit(SystemExit):
|
||||||
|
def __init__(self, signo, exccode=1):
|
||||||
|
super(SignalExit, self).__init__(exccode)
|
||||||
|
self.signo = signo
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceLauncher(Launcher):
|
||||||
|
def _handle_signal(self, signo, frame):
|
||||||
|
# Allow the process to be killed again and die from natural causes
|
||||||
|
_set_signals_handler(signal.SIG_DFL)
|
||||||
|
raise SignalExit(signo)
|
||||||
|
|
||||||
|
def handle_signal(self):
|
||||||
|
_set_signals_handler(self._handle_signal)
|
||||||
|
|
||||||
|
def _wait_for_exit_or_signal(self, ready_callback=None):
|
||||||
|
status = None
|
||||||
|
signo = 0
|
||||||
|
|
||||||
|
LOG.debug('Full set of CONF:')
|
||||||
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if ready_callback:
|
||||||
|
ready_callback()
|
||||||
|
super(ServiceLauncher, self).wait()
|
||||||
|
except SignalExit as exc:
|
||||||
|
signame = _signo_to_signame(exc.signo)
|
||||||
|
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||||
|
status = exc.code
|
||||||
|
signo = exc.signo
|
||||||
|
except SystemExit as exc:
|
||||||
|
status = exc.code
|
||||||
|
finally:
|
||||||
|
self.stop()
|
||||||
|
if rpc:
|
||||||
|
try:
|
||||||
|
rpc.cleanup()
|
||||||
|
except Exception:
|
||||||
|
# We're shutting down, so it doesn't matter at this point.
|
||||||
|
LOG.exception(_LE('Exception during rpc cleanup.'))
|
||||||
|
|
||||||
|
return status, signo
|
||||||
|
|
||||||
|
def wait(self, ready_callback=None):
|
||||||
|
systemd.notify_once()
|
||||||
|
while True:
|
||||||
|
self.handle_signal()
|
||||||
|
status, signo = self._wait_for_exit_or_signal(ready_callback)
|
||||||
|
if not _is_sighup_and_daemon(signo):
|
||||||
|
return status
|
||||||
|
self.restart()
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceWrapper(object):
|
||||||
|
def __init__(self, service, workers):
|
||||||
|
self.service = service
|
||||||
|
self.workers = workers
|
||||||
|
self.children = set()
|
||||||
|
self.forktimes = []
|
||||||
|
|
||||||
|
|
||||||
|
class ProcessLauncher(object):
|
||||||
|
def __init__(self, wait_interval=0.01):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param wait_interval: The interval to sleep for between checks
|
||||||
|
of child process exit.
|
||||||
|
"""
|
||||||
|
self.children = {}
|
||||||
|
self.sigcaught = None
|
||||||
|
self.running = True
|
||||||
|
self.wait_interval = wait_interval
|
||||||
|
rfd, self.writepipe = os.pipe()
|
||||||
|
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||||
|
self.handle_signal()
|
||||||
|
|
||||||
|
def handle_signal(self):
|
||||||
|
_set_signals_handler(self._handle_signal)
|
||||||
|
|
||||||
|
def _handle_signal(self, signo, frame):
|
||||||
|
self.sigcaught = signo
|
||||||
|
self.running = False
|
||||||
|
|
||||||
|
# Allow the process to be killed again and die from natural causes
|
||||||
|
_set_signals_handler(signal.SIG_DFL)
|
||||||
|
|
||||||
|
def _pipe_watcher(self):
|
||||||
|
# This will block until the write end is closed when the parent
|
||||||
|
# dies unexpectedly
|
||||||
|
self.readpipe.read()
|
||||||
|
|
||||||
|
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def _child_process_handle_signal(self):
|
||||||
|
# Setup child signal handlers differently
|
||||||
|
def _sigterm(*args):
|
||||||
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||||
|
raise SignalExit(signal.SIGTERM)
|
||||||
|
|
||||||
|
def _sighup(*args):
|
||||||
|
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||||
|
raise SignalExit(signal.SIGHUP)
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, _sigterm)
|
||||||
|
if _sighup_supported():
|
||||||
|
signal.signal(signal.SIGHUP, _sighup)
|
||||||
|
# Block SIGINT and let the parent send us a SIGTERM
|
||||||
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
|
|
||||||
|
def _child_wait_for_exit_or_signal(self, launcher):
|
||||||
|
status = 0
|
||||||
|
signo = 0
|
||||||
|
|
||||||
|
# NOTE(johannes): All exceptions are caught to ensure this
|
||||||
|
# doesn't fallback into the loop spawning children. It would
|
||||||
|
# be bad for a child to spawn more children.
|
||||||
|
try:
|
||||||
|
launcher.wait()
|
||||||
|
except SignalExit as exc:
|
||||||
|
signame = _signo_to_signame(exc.signo)
|
||||||
|
LOG.info(_LI('Child caught %s, exiting'), signame)
|
||||||
|
status = exc.code
|
||||||
|
signo = exc.signo
|
||||||
|
except SystemExit as exc:
|
||||||
|
status = exc.code
|
||||||
|
except BaseException:
|
||||||
|
LOG.exception(_LE('Unhandled exception'))
|
||||||
|
status = 2
|
||||||
|
finally:
|
||||||
|
launcher.stop()
|
||||||
|
|
||||||
|
return status, signo
|
||||||
|
|
||||||
|
def _child_process(self, service):
|
||||||
|
self._child_process_handle_signal()
|
||||||
|
|
||||||
|
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||||
|
# fd with parent and/or siblings, which would be bad
|
||||||
|
eventlet.hubs.use_hub()
|
||||||
|
|
||||||
|
# Close write to ensure only parent has it open
|
||||||
|
os.close(self.writepipe)
|
||||||
|
# Create greenthread to watch for parent to close pipe
|
||||||
|
eventlet.spawn_n(self._pipe_watcher)
|
||||||
|
|
||||||
|
# Reseed random number generator
|
||||||
|
random.seed()
|
||||||
|
|
||||||
|
launcher = Launcher()
|
||||||
|
launcher.launch_service(service)
|
||||||
|
return launcher
|
||||||
|
|
||||||
|
def _start_child(self, wrap):
|
||||||
|
if len(wrap.forktimes) > wrap.workers:
|
||||||
|
# Limit ourselves to one process a second (over the period of
|
||||||
|
# number of workers * 1 second). This will allow workers to
|
||||||
|
# start up quickly but ensure we don't fork off children that
|
||||||
|
# die instantly too quickly.
|
||||||
|
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||||
|
LOG.info(_LI('Forking too fast, sleeping'))
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
wrap.forktimes.pop(0)
|
||||||
|
|
||||||
|
wrap.forktimes.append(time.time())
|
||||||
|
|
||||||
|
pid = os.fork()
|
||||||
|
if pid == 0:
|
||||||
|
launcher = self._child_process(wrap.service)
|
||||||
|
while True:
|
||||||
|
self._child_process_handle_signal()
|
||||||
|
status, signo = self._child_wait_for_exit_or_signal(launcher)
|
||||||
|
if not _is_sighup_and_daemon(signo):
|
||||||
|
break
|
||||||
|
launcher.restart()
|
||||||
|
|
||||||
|
os._exit(status)
|
||||||
|
|
||||||
|
LOG.info(_LI('Started child %d'), pid)
|
||||||
|
|
||||||
|
wrap.children.add(pid)
|
||||||
|
self.children[pid] = wrap
|
||||||
|
|
||||||
|
return pid
|
||||||
|
|
||||||
|
def launch_service(self, service, workers=1):
|
||||||
|
wrap = ServiceWrapper(service, workers)
|
||||||
|
|
||||||
|
LOG.info(_LI('Starting %d workers'), wrap.workers)
|
||||||
|
while self.running and len(wrap.children) < wrap.workers:
|
||||||
|
self._start_child(wrap)
|
||||||
|
|
||||||
|
def _wait_child(self):
|
||||||
|
try:
|
||||||
|
# Don't block if no child processes have exited
|
||||||
|
pid, status = os.waitpid(0, os.WNOHANG)
|
||||||
|
if not pid:
|
||||||
|
return None
|
||||||
|
except OSError as exc:
|
||||||
|
if exc.errno not in (errno.EINTR, errno.ECHILD):
|
||||||
|
raise
|
||||||
|
return None
|
||||||
|
|
||||||
|
if os.WIFSIGNALED(status):
|
||||||
|
sig = os.WTERMSIG(status)
|
||||||
|
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
|
||||||
|
dict(pid=pid, sig=sig))
|
||||||
|
else:
|
||||||
|
code = os.WEXITSTATUS(status)
|
||||||
|
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
|
||||||
|
dict(pid=pid, code=code))
|
||||||
|
|
||||||
|
if pid not in self.children:
|
||||||
|
LOG.warning(_LW('pid %d not in child list'), pid)
|
||||||
|
return None
|
||||||
|
|
||||||
|
wrap = self.children.pop(pid)
|
||||||
|
wrap.children.remove(pid)
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
def _respawn_children(self):
|
||||||
|
while self.running:
|
||||||
|
wrap = self._wait_child()
|
||||||
|
if not wrap:
|
||||||
|
# Yield to other threads if no children have exited
|
||||||
|
# Sleep for a short time to avoid excessive CPU usage
|
||||||
|
# (see bug #1095346)
|
||||||
|
eventlet.greenthread.sleep(self.wait_interval)
|
||||||
|
continue
|
||||||
|
while self.running and len(wrap.children) < wrap.workers:
|
||||||
|
self._start_child(wrap)
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
"""Loop waiting on children to die and respawning as necessary."""
|
||||||
|
|
||||||
|
systemd.notify_once()
|
||||||
|
LOG.debug('Full set of CONF:')
|
||||||
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
self.handle_signal()
|
||||||
|
self._respawn_children()
|
||||||
|
# No signal means that stop was called. Don't clean up here.
|
||||||
|
if not self.sigcaught:
|
||||||
|
return
|
||||||
|
|
||||||
|
signame = _signo_to_signame(self.sigcaught)
|
||||||
|
LOG.info(_LI('Caught %s, stopping children'), signame)
|
||||||
|
if not _is_sighup_and_daemon(self.sigcaught):
|
||||||
|
break
|
||||||
|
|
||||||
|
for pid in self.children:
|
||||||
|
os.kill(pid, signal.SIGHUP)
|
||||||
|
self.running = True
|
||||||
|
self.sigcaught = None
|
||||||
|
except eventlet.greenlet.GreenletExit:
|
||||||
|
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
|
||||||
|
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""Terminate child processes and wait on each."""
|
||||||
|
self.running = False
|
||||||
|
for pid in self.children:
|
||||||
|
try:
|
||||||
|
os.kill(pid, signal.SIGTERM)
|
||||||
|
except OSError as exc:
|
||||||
|
if exc.errno != errno.ESRCH:
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Wait for children to die
|
||||||
|
if self.children:
|
||||||
|
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
|
||||||
|
while self.children:
|
||||||
|
self._wait_child()
|
||||||
|
|
||||||
|
|
||||||
|
class Service(object):
|
||||||
|
"""Service object for binaries running on hosts."""
|
||||||
|
|
||||||
|
def __init__(self, threads=1000):
|
||||||
|
self.tg = threadgroup.ThreadGroup(threads)
|
||||||
|
|
||||||
|
# signal that the service is done shutting itself down:
|
||||||
|
self._done = event.Event()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
# NOTE(Fengqian): docs for Event.reset() recommend against using it
|
||||||
|
self._done = event.Event()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.tg.stop()
|
||||||
|
self.tg.wait()
|
||||||
|
# Signal that service cleanup is done:
|
||||||
|
if not self._done.ready():
|
||||||
|
self._done.send()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
self._done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
class Services(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.services = []
|
||||||
|
self.tg = threadgroup.ThreadGroup()
|
||||||
|
self.done = event.Event()
|
||||||
|
|
||||||
|
def add(self, service):
|
||||||
|
self.services.append(service)
|
||||||
|
self.tg.add_thread(self.run_service, service, self.done)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
# wait for graceful shutdown of services:
|
||||||
|
for service in self.services:
|
||||||
|
service.stop()
|
||||||
|
service.wait()
|
||||||
|
|
||||||
|
# Each service has performed cleanup, now signal that the run_service
|
||||||
|
# wrapper threads can now die:
|
||||||
|
if not self.done.ready():
|
||||||
|
self.done.send()
|
||||||
|
|
||||||
|
# reap threads:
|
||||||
|
self.tg.stop()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
self.tg.wait()
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
self.stop()
|
||||||
|
self.done = event.Event()
|
||||||
|
for restart_service in self.services:
|
||||||
|
restart_service.reset()
|
||||||
|
self.tg.add_thread(self.run_service, restart_service, self.done)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def run_service(service, done):
|
||||||
|
"""Service start wrapper.
|
||||||
|
|
||||||
|
:param service: service to run
|
||||||
|
:param done: event to wait on until a shutdown is triggered
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
service.start()
|
||||||
|
done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
def launch(service, workers=1):
|
||||||
|
if workers is None or workers == 1:
|
||||||
|
launcher = ServiceLauncher()
|
||||||
|
launcher.launch_service(service)
|
||||||
|
else:
|
||||||
|
launcher = ProcessLauncher()
|
||||||
|
launcher.launch_service(service, workers=workers)
|
||||||
|
|
||||||
|
return launcher
|
95
octavia/openstack/common/sslutils.py
Normal file
95
octavia/openstack/common/sslutils.py
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
# Copyright 2013 IBM Corp.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import ssl
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
ssl_opts = [
|
||||||
|
cfg.StrOpt('ca_file',
|
||||||
|
help="CA certificate file to use to verify "
|
||||||
|
"connecting clients."),
|
||||||
|
cfg.StrOpt('cert_file',
|
||||||
|
help="Certificate file to use when starting "
|
||||||
|
"the server securely."),
|
||||||
|
cfg.StrOpt('key_file',
|
||||||
|
help="Private key file to use when starting "
|
||||||
|
"the server securely."),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(ssl_opts, "ssl")
|
||||||
|
|
||||||
|
|
||||||
|
def is_enabled():
|
||||||
|
cert_file = CONF.ssl.cert_file
|
||||||
|
key_file = CONF.ssl.key_file
|
||||||
|
ca_file = CONF.ssl.ca_file
|
||||||
|
use_ssl = cert_file or key_file
|
||||||
|
|
||||||
|
if cert_file and not os.path.exists(cert_file):
|
||||||
|
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
|
||||||
|
|
||||||
|
if ca_file and not os.path.exists(ca_file):
|
||||||
|
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
|
||||||
|
|
||||||
|
if key_file and not os.path.exists(key_file):
|
||||||
|
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
|
||||||
|
|
||||||
|
if use_ssl and (not cert_file or not key_file):
|
||||||
|
raise RuntimeError(_("When running server in SSL mode, you must "
|
||||||
|
"specify both a cert_file and key_file "
|
||||||
|
"option value in your configuration file"))
|
||||||
|
|
||||||
|
return use_ssl
|
||||||
|
|
||||||
|
|
||||||
|
def wrap(sock):
|
||||||
|
ssl_kwargs = {
|
||||||
|
'server_side': True,
|
||||||
|
'certfile': CONF.ssl.cert_file,
|
||||||
|
'keyfile': CONF.ssl.key_file,
|
||||||
|
'cert_reqs': ssl.CERT_NONE,
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONF.ssl.ca_file:
|
||||||
|
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
|
||||||
|
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
|
||||||
|
|
||||||
|
return ssl.wrap_socket(sock, **ssl_kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
_SSL_PROTOCOLS = {
|
||||||
|
"tlsv1": ssl.PROTOCOL_TLSv1,
|
||||||
|
"sslv23": ssl.PROTOCOL_SSLv23,
|
||||||
|
"sslv3": ssl.PROTOCOL_SSLv3
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def validate_ssl_version(version):
|
||||||
|
key = version.lower()
|
||||||
|
try:
|
||||||
|
return _SSL_PROTOCOLS[key]
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(_("Invalid SSL version : %s") % version)
|
311
octavia/openstack/common/strutils.py
Normal file
311
octavia/openstack/common/strutils.py
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
System-level utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import math
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import unicodedata
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
UNIT_PREFIX_EXPONENT = {
|
||||||
|
'k': 1,
|
||||||
|
'K': 1,
|
||||||
|
'Ki': 1,
|
||||||
|
'M': 2,
|
||||||
|
'Mi': 2,
|
||||||
|
'G': 3,
|
||||||
|
'Gi': 3,
|
||||||
|
'T': 4,
|
||||||
|
'Ti': 4,
|
||||||
|
}
|
||||||
|
UNIT_SYSTEM_INFO = {
|
||||||
|
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
|
||||||
|
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
|
||||||
|
}
|
||||||
|
|
||||||
|
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
||||||
|
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
||||||
|
|
||||||
|
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
|
||||||
|
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(flaper87): The following globals are used by `mask_password`
|
||||||
|
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||||
|
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||||
|
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||||
|
# for XML and JSON automatically.
|
||||||
|
_SANITIZE_PATTERNS_2 = []
|
||||||
|
_SANITIZE_PATTERNS_1 = []
|
||||||
|
|
||||||
|
# NOTE(amrith): Some regular expressions have only one parameter, some
|
||||||
|
# have two parameters. Use different lists of patterns here.
|
||||||
|
_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+']
|
||||||
|
_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||||
|
r'(%(key)s\s+[\"\']).*?([\"\'])',
|
||||||
|
r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)',
|
||||||
|
r'(<%(key)s>).*?(</%(key)s>)',
|
||||||
|
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||||
|
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
|
||||||
|
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?'
|
||||||
|
'[\'"]).*?([\'"])',
|
||||||
|
r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
|
||||||
|
|
||||||
|
for key in _SANITIZE_KEYS:
|
||||||
|
for pattern in _FORMAT_PATTERNS_2:
|
||||||
|
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||||
|
_SANITIZE_PATTERNS_2.append(reg_ex)
|
||||||
|
|
||||||
|
for pattern in _FORMAT_PATTERNS_1:
|
||||||
|
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||||
|
_SANITIZE_PATTERNS_1.append(reg_ex)
|
||||||
|
|
||||||
|
|
||||||
|
def int_from_bool_as_string(subject):
|
||||||
|
"""Interpret a string as a boolean and return either 1 or 0.
|
||||||
|
|
||||||
|
Any string value in:
|
||||||
|
|
||||||
|
('True', 'true', 'On', 'on', '1')
|
||||||
|
|
||||||
|
is interpreted as a boolean True.
|
||||||
|
|
||||||
|
Useful for JSON-decoded stuff and config file parsing
|
||||||
|
"""
|
||||||
|
return bool_from_string(subject) and 1 or 0
|
||||||
|
|
||||||
|
|
||||||
|
def bool_from_string(subject, strict=False, default=False):
|
||||||
|
"""Interpret a string as a boolean.
|
||||||
|
|
||||||
|
A case-insensitive match is performed such that strings matching 't',
|
||||||
|
'true', 'on', 'y', 'yes', or '1' are considered True and, when
|
||||||
|
`strict=False`, anything else returns the value specified by 'default'.
|
||||||
|
|
||||||
|
Useful for JSON-decoded stuff and config file parsing.
|
||||||
|
|
||||||
|
If `strict=True`, unrecognized values, including None, will raise a
|
||||||
|
ValueError which is useful when parsing values passed in from an API call.
|
||||||
|
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
|
||||||
|
"""
|
||||||
|
if not isinstance(subject, six.string_types):
|
||||||
|
subject = six.text_type(subject)
|
||||||
|
|
||||||
|
lowered = subject.strip().lower()
|
||||||
|
|
||||||
|
if lowered in TRUE_STRINGS:
|
||||||
|
return True
|
||||||
|
elif lowered in FALSE_STRINGS:
|
||||||
|
return False
|
||||||
|
elif strict:
|
||||||
|
acceptable = ', '.join(
|
||||||
|
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
|
||||||
|
msg = _("Unrecognized value '%(val)s', acceptable values are:"
|
||||||
|
" %(acceptable)s") % {'val': subject,
|
||||||
|
'acceptable': acceptable}
|
||||||
|
raise ValueError(msg)
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def safe_decode(text, incoming=None, errors='strict'):
|
||||||
|
"""Decodes incoming text/bytes string using `incoming` if they're not
|
||||||
|
already unicode.
|
||||||
|
|
||||||
|
:param incoming: Text's current encoding
|
||||||
|
:param errors: Errors handling policy. See here for valid
|
||||||
|
values http://docs.python.org/2/library/codecs.html
|
||||||
|
:returns: text or a unicode `incoming` encoded
|
||||||
|
representation of it.
|
||||||
|
:raises TypeError: If text is not an instance of str
|
||||||
|
"""
|
||||||
|
if not isinstance(text, (six.string_types, six.binary_type)):
|
||||||
|
raise TypeError("%s can't be decoded" % type(text))
|
||||||
|
|
||||||
|
if isinstance(text, six.text_type):
|
||||||
|
return text
|
||||||
|
|
||||||
|
if not incoming:
|
||||||
|
incoming = (sys.stdin.encoding or
|
||||||
|
sys.getdefaultencoding())
|
||||||
|
|
||||||
|
try:
|
||||||
|
return text.decode(incoming, errors)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
# Note(flaper87) If we get here, it means that
|
||||||
|
# sys.stdin.encoding / sys.getdefaultencoding
|
||||||
|
# didn't return a suitable encoding to decode
|
||||||
|
# text. This happens mostly when global LANG
|
||||||
|
# var is not set correctly and there's no
|
||||||
|
# default encoding. In this case, most likely
|
||||||
|
# python will use ASCII or ANSI encoders as
|
||||||
|
# default encodings but they won't be capable
|
||||||
|
# of decoding non-ASCII characters.
|
||||||
|
#
|
||||||
|
# Also, UTF-8 is being used since it's an ASCII
|
||||||
|
# extension.
|
||||||
|
return text.decode('utf-8', errors)
|
||||||
|
|
||||||
|
|
||||||
|
def safe_encode(text, incoming=None,
|
||||||
|
encoding='utf-8', errors='strict'):
|
||||||
|
"""Encodes incoming text/bytes string using `encoding`.
|
||||||
|
|
||||||
|
If incoming is not specified, text is expected to be encoded with
|
||||||
|
current python's default encoding. (`sys.getdefaultencoding`)
|
||||||
|
|
||||||
|
:param incoming: Text's current encoding
|
||||||
|
:param encoding: Expected encoding for text (Default UTF-8)
|
||||||
|
:param errors: Errors handling policy. See here for valid
|
||||||
|
values http://docs.python.org/2/library/codecs.html
|
||||||
|
:returns: text or a bytestring `encoding` encoded
|
||||||
|
representation of it.
|
||||||
|
:raises TypeError: If text is not an instance of str
|
||||||
|
"""
|
||||||
|
if not isinstance(text, (six.string_types, six.binary_type)):
|
||||||
|
raise TypeError("%s can't be encoded" % type(text))
|
||||||
|
|
||||||
|
if not incoming:
|
||||||
|
incoming = (sys.stdin.encoding or
|
||||||
|
sys.getdefaultencoding())
|
||||||
|
|
||||||
|
if isinstance(text, six.text_type):
|
||||||
|
return text.encode(encoding, errors)
|
||||||
|
elif text and encoding != incoming:
|
||||||
|
# Decode text before encoding it with `encoding`
|
||||||
|
text = safe_decode(text, incoming, errors)
|
||||||
|
return text.encode(encoding, errors)
|
||||||
|
else:
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def string_to_bytes(text, unit_system='IEC', return_int=False):
|
||||||
|
"""Converts a string into an float representation of bytes.
|
||||||
|
|
||||||
|
The units supported for IEC ::
|
||||||
|
|
||||||
|
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
|
||||||
|
KB, KiB, MB, MiB, GB, GiB, TB, TiB
|
||||||
|
|
||||||
|
The units supported for SI ::
|
||||||
|
|
||||||
|
kb(it), Mb(it), Gb(it), Tb(it)
|
||||||
|
kB, MB, GB, TB
|
||||||
|
|
||||||
|
Note that the SI unit system does not support capital letter 'K'
|
||||||
|
|
||||||
|
:param text: String input for bytes size conversion.
|
||||||
|
:param unit_system: Unit system for byte size conversion.
|
||||||
|
:param return_int: If True, returns integer representation of text
|
||||||
|
in bytes. (default: decimal)
|
||||||
|
:returns: Numerical representation of text in bytes.
|
||||||
|
:raises ValueError: If text has an invalid value.
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
|
||||||
|
except KeyError:
|
||||||
|
msg = _('Invalid unit system: "%s"') % unit_system
|
||||||
|
raise ValueError(msg)
|
||||||
|
match = reg_ex.match(text)
|
||||||
|
if match:
|
||||||
|
magnitude = float(match.group(1))
|
||||||
|
unit_prefix = match.group(2)
|
||||||
|
if match.group(3) in ['b', 'bit']:
|
||||||
|
magnitude /= 8
|
||||||
|
else:
|
||||||
|
msg = _('Invalid string format: %s') % text
|
||||||
|
raise ValueError(msg)
|
||||||
|
if not unit_prefix:
|
||||||
|
res = magnitude
|
||||||
|
else:
|
||||||
|
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
|
||||||
|
if return_int:
|
||||||
|
return int(math.ceil(res))
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def to_slug(value, incoming=None, errors="strict"):
|
||||||
|
"""Normalize string.
|
||||||
|
|
||||||
|
Convert to lowercase, remove non-word characters, and convert spaces
|
||||||
|
to hyphens.
|
||||||
|
|
||||||
|
Inspired by Django's `slugify` filter.
|
||||||
|
|
||||||
|
:param value: Text to slugify
|
||||||
|
:param incoming: Text's current encoding
|
||||||
|
:param errors: Errors handling policy. See here for valid
|
||||||
|
values http://docs.python.org/2/library/codecs.html
|
||||||
|
:returns: slugified unicode representation of `value`
|
||||||
|
:raises TypeError: If text is not an instance of str
|
||||||
|
"""
|
||||||
|
value = safe_decode(value, incoming, errors)
|
||||||
|
# NOTE(aababilov): no need to use safe_(encode|decode) here:
|
||||||
|
# encodings are always "ascii", error handling is always "ignore"
|
||||||
|
# and types are always known (first: unicode; second: str)
|
||||||
|
value = unicodedata.normalize("NFKD", value).encode(
|
||||||
|
"ascii", "ignore").decode("ascii")
|
||||||
|
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
|
||||||
|
return SLUGIFY_HYPHENATE_RE.sub("-", value)
|
||||||
|
|
||||||
|
|
||||||
|
def mask_password(message, secret="***"):
|
||||||
|
"""Replace password with 'secret' in message.
|
||||||
|
|
||||||
|
:param message: The string which includes security information.
|
||||||
|
:param secret: value with which to replace passwords.
|
||||||
|
:returns: The unicode value of message with the password fields masked.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||||
|
"'adminPass' : '***'"
|
||||||
|
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||||
|
"'admin_pass' : '***'"
|
||||||
|
>>> mask_password('"password" : "aaaaa"')
|
||||||
|
'"password" : "***"'
|
||||||
|
>>> mask_password("'original_password' : 'aaaaa'")
|
||||||
|
"'original_password' : '***'"
|
||||||
|
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||||
|
"u'original_password' : u'***'"
|
||||||
|
"""
|
||||||
|
message = six.text_type(message)
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||||
|
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||||
|
# we don't have to mask any passwords.
|
||||||
|
if not any(key in message for key in _SANITIZE_KEYS):
|
||||||
|
return message
|
||||||
|
|
||||||
|
substitute = r'\g<1>' + secret + r'\g<2>'
|
||||||
|
for pattern in _SANITIZE_PATTERNS_2:
|
||||||
|
message = re.sub(pattern, substitute, message)
|
||||||
|
|
||||||
|
substitute = r'\g<1>' + secret
|
||||||
|
for pattern in _SANITIZE_PATTERNS_1:
|
||||||
|
message = re.sub(pattern, substitute, message)
|
||||||
|
|
||||||
|
return message
|
106
octavia/openstack/common/systemd.py
Normal file
106
octavia/openstack/common/systemd.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
# Copyright 2012-2014 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helper module for systemd service readiness notification.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _abstractify(socket_name):
|
||||||
|
if socket_name.startswith('@'):
|
||||||
|
# abstract namespace socket
|
||||||
|
socket_name = '\0%s' % socket_name[1:]
|
||||||
|
return socket_name
|
||||||
|
|
||||||
|
|
||||||
|
def _sd_notify(unset_env, msg):
|
||||||
|
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||||
|
if notify_socket:
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
try:
|
||||||
|
sock.connect(_abstractify(notify_socket))
|
||||||
|
sock.sendall(msg)
|
||||||
|
if unset_env:
|
||||||
|
del os.environ['NOTIFY_SOCKET']
|
||||||
|
except EnvironmentError:
|
||||||
|
LOG.debug("Systemd notification failed", exc_info=True)
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
|
||||||
|
def notify():
|
||||||
|
"""Send notification to Systemd that service is ready.
|
||||||
|
|
||||||
|
For details see
|
||||||
|
http://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||||
|
"""
|
||||||
|
_sd_notify(False, 'READY=1')
|
||||||
|
|
||||||
|
|
||||||
|
def notify_once():
|
||||||
|
"""Send notification once to Systemd that service is ready.
|
||||||
|
|
||||||
|
Systemd sets NOTIFY_SOCKET environment variable with the name of the
|
||||||
|
socket listening for notifications from services.
|
||||||
|
This method removes the NOTIFY_SOCKET environment variable to ensure
|
||||||
|
notification is sent only once.
|
||||||
|
"""
|
||||||
|
_sd_notify(True, 'READY=1')
|
||||||
|
|
||||||
|
|
||||||
|
def onready(notify_socket, timeout):
|
||||||
|
"""Wait for systemd style notification on the socket.
|
||||||
|
|
||||||
|
:param notify_socket: local socket address
|
||||||
|
:type notify_socket: string
|
||||||
|
:param timeout: socket timeout
|
||||||
|
:type timeout: float
|
||||||
|
:returns: 0 service ready
|
||||||
|
1 service not ready
|
||||||
|
2 timeout occurred
|
||||||
|
"""
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
sock.settimeout(timeout)
|
||||||
|
sock.bind(_abstractify(notify_socket))
|
||||||
|
try:
|
||||||
|
msg = sock.recv(512)
|
||||||
|
except socket.timeout:
|
||||||
|
return 2
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
if 'READY=1' in msg:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# simple CLI for testing
|
||||||
|
if len(sys.argv) == 1:
|
||||||
|
notify()
|
||||||
|
elif len(sys.argv) >= 2:
|
||||||
|
timeout = float(sys.argv[1])
|
||||||
|
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||||
|
if notify_socket:
|
||||||
|
retval = onready(notify_socket, timeout)
|
||||||
|
sys.exit(retval)
|
147
octavia/openstack/common/threadgroup.py
Normal file
147
octavia/openstack/common/threadgroup.py
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
# Copyright 2012 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import threading
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
from eventlet import greenpool
|
||||||
|
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
from octavia.openstack.common import loopingcall
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _thread_done(gt, *args, **kwargs):
|
||||||
|
"""Callback function to be passed to GreenThread.link() when we spawn()
|
||||||
|
Calls the :class:`ThreadGroup` to notify if.
|
||||||
|
|
||||||
|
"""
|
||||||
|
kwargs['group'].thread_done(kwargs['thread'])
|
||||||
|
|
||||||
|
|
||||||
|
class Thread(object):
|
||||||
|
"""Wrapper around a greenthread, that holds a reference to the
|
||||||
|
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||||
|
it has done so it can be removed from the threads list.
|
||||||
|
"""
|
||||||
|
def __init__(self, thread, group):
|
||||||
|
self.thread = thread
|
||||||
|
self.thread.link(_thread_done, group=group, thread=self)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.thread.kill()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
return self.thread.wait()
|
||||||
|
|
||||||
|
def link(self, func, *args, **kwargs):
|
||||||
|
self.thread.link(func, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class ThreadGroup(object):
|
||||||
|
"""The point of the ThreadGroup class is to:
|
||||||
|
|
||||||
|
* keep track of timers and greenthreads (making it easier to stop them
|
||||||
|
when need be).
|
||||||
|
* provide an easy API to add timers.
|
||||||
|
"""
|
||||||
|
def __init__(self, thread_pool_size=10):
|
||||||
|
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||||
|
self.threads = []
|
||||||
|
self.timers = []
|
||||||
|
|
||||||
|
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||||
|
periodic_interval_max=None, *args, **kwargs):
|
||||||
|
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||||
|
timer.start(initial_delay=initial_delay,
|
||||||
|
periodic_interval_max=periodic_interval_max)
|
||||||
|
self.timers.append(timer)
|
||||||
|
|
||||||
|
def add_timer(self, interval, callback, initial_delay=None,
|
||||||
|
*args, **kwargs):
|
||||||
|
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||||
|
pulse.start(interval=interval,
|
||||||
|
initial_delay=initial_delay)
|
||||||
|
self.timers.append(pulse)
|
||||||
|
|
||||||
|
def add_thread(self, callback, *args, **kwargs):
|
||||||
|
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||||
|
th = Thread(gt, self)
|
||||||
|
self.threads.append(th)
|
||||||
|
return th
|
||||||
|
|
||||||
|
def thread_done(self, thread):
|
||||||
|
self.threads.remove(thread)
|
||||||
|
|
||||||
|
def _stop_threads(self):
|
||||||
|
current = threading.current_thread()
|
||||||
|
|
||||||
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
|
# modify the list while we're iterating
|
||||||
|
for x in self.threads[:]:
|
||||||
|
if x is current:
|
||||||
|
# don't kill the current thread.
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
x.stop()
|
||||||
|
except Exception as ex:
|
||||||
|
LOG.exception(ex)
|
||||||
|
|
||||||
|
def stop_timers(self):
|
||||||
|
for x in self.timers:
|
||||||
|
try:
|
||||||
|
x.stop()
|
||||||
|
except Exception as ex:
|
||||||
|
LOG.exception(ex)
|
||||||
|
self.timers = []
|
||||||
|
|
||||||
|
def stop(self, graceful=False):
|
||||||
|
"""stop function has the option of graceful=True/False.
|
||||||
|
|
||||||
|
* In case of graceful=True, wait for all threads to be finished.
|
||||||
|
Never kill threads.
|
||||||
|
* In case of graceful=False, kill threads immediately.
|
||||||
|
"""
|
||||||
|
self.stop_timers()
|
||||||
|
if graceful:
|
||||||
|
# In case of graceful=True, wait for all threads to be
|
||||||
|
# finished, never kill threads
|
||||||
|
self.wait()
|
||||||
|
else:
|
||||||
|
# In case of graceful=False(Default), kill threads
|
||||||
|
# immediately
|
||||||
|
self._stop_threads()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
for x in self.timers:
|
||||||
|
try:
|
||||||
|
x.wait()
|
||||||
|
except eventlet.greenlet.GreenletExit:
|
||||||
|
pass
|
||||||
|
except Exception as ex:
|
||||||
|
LOG.exception(ex)
|
||||||
|
current = threading.current_thread()
|
||||||
|
|
||||||
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
|
# modify the list while we're iterating
|
||||||
|
for x in self.threads[:]:
|
||||||
|
if x is current:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
x.wait()
|
||||||
|
except eventlet.greenlet.GreenletExit:
|
||||||
|
pass
|
||||||
|
except Exception as ex:
|
||||||
|
LOG.exception(ex)
|
210
octavia/openstack/common/timeutils.py
Normal file
210
octavia/openstack/common/timeutils.py
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Time related utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import calendar
|
||||||
|
import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
|
import iso8601
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
# ISO 8601 extended time format with microseconds
|
||||||
|
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||||
|
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||||
|
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||||
|
|
||||||
|
|
||||||
|
def isotime(at=None, subsecond=False):
|
||||||
|
"""Stringify time in ISO 8601 format."""
|
||||||
|
if not at:
|
||||||
|
at = utcnow()
|
||||||
|
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||||
|
if not subsecond
|
||||||
|
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||||
|
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||||
|
st += ('Z' if tz == 'UTC' else tz)
|
||||||
|
return st
|
||||||
|
|
||||||
|
|
||||||
|
def parse_isotime(timestr):
|
||||||
|
"""Parse time from ISO 8601 format."""
|
||||||
|
try:
|
||||||
|
return iso8601.parse_date(timestr)
|
||||||
|
except iso8601.ParseError as e:
|
||||||
|
raise ValueError(six.text_type(e))
|
||||||
|
except TypeError as e:
|
||||||
|
raise ValueError(six.text_type(e))
|
||||||
|
|
||||||
|
|
||||||
|
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||||
|
"""Returns formatted utcnow."""
|
||||||
|
if not at:
|
||||||
|
at = utcnow()
|
||||||
|
return at.strftime(fmt)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
||||||
|
"""Turn a formatted time back into a datetime."""
|
||||||
|
return datetime.datetime.strptime(timestr, fmt)
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_time(timestamp):
|
||||||
|
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||||
|
offset = timestamp.utcoffset()
|
||||||
|
if offset is None:
|
||||||
|
return timestamp
|
||||||
|
return timestamp.replace(tzinfo=None) - offset
|
||||||
|
|
||||||
|
|
||||||
|
def is_older_than(before, seconds):
|
||||||
|
"""Return True if before is older than seconds."""
|
||||||
|
if isinstance(before, six.string_types):
|
||||||
|
before = parse_strtime(before).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
before = before.replace(tzinfo=None)
|
||||||
|
|
||||||
|
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
|
def is_newer_than(after, seconds):
|
||||||
|
"""Return True if after is newer than seconds."""
|
||||||
|
if isinstance(after, six.string_types):
|
||||||
|
after = parse_strtime(after).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
after = after.replace(tzinfo=None)
|
||||||
|
|
||||||
|
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
|
def utcnow_ts():
|
||||||
|
"""Timestamp version of our utcnow function."""
|
||||||
|
if utcnow.override_time is None:
|
||||||
|
# NOTE(kgriffs): This is several times faster
|
||||||
|
# than going through calendar.timegm(...)
|
||||||
|
return int(time.time())
|
||||||
|
|
||||||
|
return calendar.timegm(utcnow().timetuple())
|
||||||
|
|
||||||
|
|
||||||
|
def utcnow():
|
||||||
|
"""Overridable version of utils.utcnow."""
|
||||||
|
if utcnow.override_time:
|
||||||
|
try:
|
||||||
|
return utcnow.override_time.pop(0)
|
||||||
|
except AttributeError:
|
||||||
|
return utcnow.override_time
|
||||||
|
return datetime.datetime.utcnow()
|
||||||
|
|
||||||
|
|
||||||
|
def iso8601_from_timestamp(timestamp):
|
||||||
|
"""Returns an iso8601 formatted date from timestamp."""
|
||||||
|
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||||
|
|
||||||
|
|
||||||
|
utcnow.override_time = None
|
||||||
|
|
||||||
|
|
||||||
|
def set_time_override(override_time=None):
|
||||||
|
"""Overrides utils.utcnow.
|
||||||
|
|
||||||
|
Make it return a constant time or a list thereof, one at a time.
|
||||||
|
|
||||||
|
:param override_time: datetime instance or list thereof. If not
|
||||||
|
given, defaults to the current UTC time.
|
||||||
|
"""
|
||||||
|
utcnow.override_time = override_time or datetime.datetime.utcnow()
|
||||||
|
|
||||||
|
|
||||||
|
def advance_time_delta(timedelta):
|
||||||
|
"""Advance overridden time using a datetime.timedelta."""
|
||||||
|
assert utcnow.override_time is not None
|
||||||
|
try:
|
||||||
|
for dt in utcnow.override_time:
|
||||||
|
dt += timedelta
|
||||||
|
except TypeError:
|
||||||
|
utcnow.override_time += timedelta
|
||||||
|
|
||||||
|
|
||||||
|
def advance_time_seconds(seconds):
|
||||||
|
"""Advance overridden time by seconds."""
|
||||||
|
advance_time_delta(datetime.timedelta(0, seconds))
|
||||||
|
|
||||||
|
|
||||||
|
def clear_time_override():
|
||||||
|
"""Remove the overridden time."""
|
||||||
|
utcnow.override_time = None
|
||||||
|
|
||||||
|
|
||||||
|
def marshall_now(now=None):
|
||||||
|
"""Make an rpc-safe datetime with microseconds.
|
||||||
|
|
||||||
|
Note: tzinfo is stripped, but not required for relative times.
|
||||||
|
"""
|
||||||
|
if not now:
|
||||||
|
now = utcnow()
|
||||||
|
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||||
|
minute=now.minute, second=now.second,
|
||||||
|
microsecond=now.microsecond)
|
||||||
|
|
||||||
|
|
||||||
|
def unmarshall_time(tyme):
|
||||||
|
"""Unmarshall a datetime dict."""
|
||||||
|
return datetime.datetime(day=tyme['day'],
|
||||||
|
month=tyme['month'],
|
||||||
|
year=tyme['year'],
|
||||||
|
hour=tyme['hour'],
|
||||||
|
minute=tyme['minute'],
|
||||||
|
second=tyme['second'],
|
||||||
|
microsecond=tyme['microsecond'])
|
||||||
|
|
||||||
|
|
||||||
|
def delta_seconds(before, after):
|
||||||
|
"""Return the difference between two timing objects.
|
||||||
|
|
||||||
|
Compute the difference in seconds between two date, time, or
|
||||||
|
datetime objects (as a float, to microsecond resolution).
|
||||||
|
"""
|
||||||
|
delta = after - before
|
||||||
|
return total_seconds(delta)
|
||||||
|
|
||||||
|
|
||||||
|
def total_seconds(delta):
|
||||||
|
"""Return the total seconds of datetime.timedelta object.
|
||||||
|
|
||||||
|
Compute total seconds of datetime.timedelta, datetime.timedelta
|
||||||
|
doesn't have method total_seconds in Python2.6, calculate it manually.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return delta.total_seconds()
|
||||||
|
except AttributeError:
|
||||||
|
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||||
|
float(delta.microseconds) / (10 ** 6))
|
||||||
|
|
||||||
|
|
||||||
|
def is_soon(dt, window):
|
||||||
|
"""Determines if time is going to happen in the next window seconds.
|
||||||
|
|
||||||
|
:param dt: the time
|
||||||
|
:param window: minimum seconds to remain to consider the time not soon
|
||||||
|
|
||||||
|
:return: True if expiration is within the given duration
|
||||||
|
"""
|
||||||
|
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||||
|
return normalize_time(dt) <= soon
|
37
octavia/openstack/common/uuidutils.py
Normal file
37
octavia/openstack/common/uuidutils.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# Copyright (c) 2012 Intel Corporation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
UUID related utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
def generate_uuid():
|
||||||
|
return str(uuid.uuid4())
|
||||||
|
|
||||||
|
|
||||||
|
def is_uuid_like(val):
|
||||||
|
"""Returns validation of a value as a UUID.
|
||||||
|
|
||||||
|
For our purposes, a UUID is a canonical form string:
|
||||||
|
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return str(uuid.UUID(val)) == val
|
||||||
|
except (TypeError, ValueError, AttributeError):
|
||||||
|
return False
|
203
octavia/openstack/common/versionutils.py
Normal file
203
octavia/openstack/common/versionutils.py
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helpers for comparing version strings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
import pkg_resources
|
||||||
|
import six
|
||||||
|
|
||||||
|
from octavia.openstack.common.gettextutils import _
|
||||||
|
from octavia.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class deprecated(object):
|
||||||
|
"""A decorator to mark callables as deprecated.
|
||||||
|
|
||||||
|
This decorator logs a deprecation message when the callable it decorates is
|
||||||
|
used. The message will include the release where the callable was
|
||||||
|
deprecated, the release where it may be removed and possibly an optional
|
||||||
|
replacement.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
1. Specifying the required deprecated release
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE)
|
||||||
|
... def a(): pass
|
||||||
|
|
||||||
|
2. Specifying a replacement:
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
|
||||||
|
... def b(): pass
|
||||||
|
|
||||||
|
3. Specifying the release where the functionality may be removed:
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
|
||||||
|
... def c(): pass
|
||||||
|
|
||||||
|
4. Specifying the deprecated functionality will not be removed:
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
|
||||||
|
... def d(): pass
|
||||||
|
|
||||||
|
5. Specifying a replacement, deprecated functionality will not be removed:
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
|
||||||
|
... def e(): pass
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
|
||||||
|
# expected we maintain a gap between Bexar and Folsom in this list.
|
||||||
|
BEXAR = 'B'
|
||||||
|
FOLSOM = 'F'
|
||||||
|
GRIZZLY = 'G'
|
||||||
|
HAVANA = 'H'
|
||||||
|
ICEHOUSE = 'I'
|
||||||
|
JUNO = 'J'
|
||||||
|
KILO = 'K'
|
||||||
|
|
||||||
|
_RELEASES = {
|
||||||
|
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
|
||||||
|
# expected we maintain a gap between Bexar and Folsom in this list.
|
||||||
|
'B': 'Bexar',
|
||||||
|
'F': 'Folsom',
|
||||||
|
'G': 'Grizzly',
|
||||||
|
'H': 'Havana',
|
||||||
|
'I': 'Icehouse',
|
||||||
|
'J': 'Juno',
|
||||||
|
'K': 'Kilo',
|
||||||
|
}
|
||||||
|
|
||||||
|
_deprecated_msg_with_alternative = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s in favor of '
|
||||||
|
'%(in_favor_of)s and may be removed in %(remove_in)s.')
|
||||||
|
|
||||||
|
_deprecated_msg_no_alternative = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s and may be '
|
||||||
|
'removed in %(remove_in)s. It will not be superseded.')
|
||||||
|
|
||||||
|
_deprecated_msg_with_alternative_no_removal = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
|
||||||
|
|
||||||
|
_deprecated_msg_with_no_alternative_no_removal = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
|
||||||
|
|
||||||
|
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
|
||||||
|
"""Initialize decorator
|
||||||
|
|
||||||
|
:param as_of: the release deprecating the callable. Constants
|
||||||
|
are define in this class for convenience.
|
||||||
|
:param in_favor_of: the replacement for the callable (optional)
|
||||||
|
:param remove_in: an integer specifying how many releases to wait
|
||||||
|
before removing (default: 2)
|
||||||
|
:param what: name of the thing being deprecated (default: the
|
||||||
|
callable's name)
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.as_of = as_of
|
||||||
|
self.in_favor_of = in_favor_of
|
||||||
|
self.remove_in = remove_in
|
||||||
|
self.what = what
|
||||||
|
|
||||||
|
def __call__(self, func_or_cls):
|
||||||
|
if not self.what:
|
||||||
|
self.what = func_or_cls.__name__ + '()'
|
||||||
|
msg, details = self._build_message()
|
||||||
|
|
||||||
|
if inspect.isfunction(func_or_cls):
|
||||||
|
|
||||||
|
@six.wraps(func_or_cls)
|
||||||
|
def wrapped(*args, **kwargs):
|
||||||
|
LOG.deprecated(msg, details)
|
||||||
|
return func_or_cls(*args, **kwargs)
|
||||||
|
return wrapped
|
||||||
|
elif inspect.isclass(func_or_cls):
|
||||||
|
orig_init = func_or_cls.__init__
|
||||||
|
|
||||||
|
# TODO(tsufiev): change `functools` module to `six` as
|
||||||
|
# soon as six 1.7.4 (with fix for passing `assigned`
|
||||||
|
# argument to underlying `functools.wraps`) is released
|
||||||
|
# and added to the octavia-incubator requrements
|
||||||
|
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
|
||||||
|
def new_init(self, *args, **kwargs):
|
||||||
|
LOG.deprecated(msg, details)
|
||||||
|
orig_init(self, *args, **kwargs)
|
||||||
|
func_or_cls.__init__ = new_init
|
||||||
|
return func_or_cls
|
||||||
|
else:
|
||||||
|
raise TypeError('deprecated can be used only with functions or '
|
||||||
|
'classes')
|
||||||
|
|
||||||
|
def _get_safe_to_remove_release(self, release):
|
||||||
|
# TODO(dstanek): this method will have to be reimplemented once
|
||||||
|
# when we get to the X release because once we get to the Y
|
||||||
|
# release, what is Y+2?
|
||||||
|
new_release = chr(ord(release) + self.remove_in)
|
||||||
|
if new_release in self._RELEASES:
|
||||||
|
return self._RELEASES[new_release]
|
||||||
|
else:
|
||||||
|
return new_release
|
||||||
|
|
||||||
|
def _build_message(self):
|
||||||
|
details = dict(what=self.what,
|
||||||
|
as_of=self._RELEASES[self.as_of],
|
||||||
|
remove_in=self._get_safe_to_remove_release(self.as_of))
|
||||||
|
|
||||||
|
if self.in_favor_of:
|
||||||
|
details['in_favor_of'] = self.in_favor_of
|
||||||
|
if self.remove_in > 0:
|
||||||
|
msg = self._deprecated_msg_with_alternative
|
||||||
|
else:
|
||||||
|
# There are no plans to remove this function, but it is
|
||||||
|
# now deprecated.
|
||||||
|
msg = self._deprecated_msg_with_alternative_no_removal
|
||||||
|
else:
|
||||||
|
if self.remove_in > 0:
|
||||||
|
msg = self._deprecated_msg_no_alternative
|
||||||
|
else:
|
||||||
|
# There are no plans to remove this function, but it is
|
||||||
|
# now deprecated.
|
||||||
|
msg = self._deprecated_msg_with_no_alternative_no_removal
|
||||||
|
return msg, details
|
||||||
|
|
||||||
|
|
||||||
|
def is_compatible(requested_version, current_version, same_major=True):
|
||||||
|
"""Determine whether `requested_version` is satisfied by
|
||||||
|
`current_version`; in other words, `current_version` is >=
|
||||||
|
`requested_version`.
|
||||||
|
|
||||||
|
:param requested_version: version to check for compatibility
|
||||||
|
:param current_version: version to check against
|
||||||
|
:param same_major: if True, the major version must be identical between
|
||||||
|
`requested_version` and `current_version`. This is used when a
|
||||||
|
major-version difference indicates incompatibility between the two
|
||||||
|
versions. Since this is the common-case in practice, the default is
|
||||||
|
True.
|
||||||
|
:returns: True if compatible, False if not
|
||||||
|
"""
|
||||||
|
requested_parts = pkg_resources.parse_version(requested_version)
|
||||||
|
current_parts = pkg_resources.parse_version(current_version)
|
||||||
|
|
||||||
|
if same_major and (requested_parts[0] != current_parts[0]):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return current_parts >= requested_parts
|
0
octavia/tests/unit/__init__.py
Normal file
0
octavia/tests/unit/__init__.py
Normal file
0
octavia/tests/unit/amphorae/__init__.py
Normal file
0
octavia/tests/unit/amphorae/__init__.py
Normal file
0
octavia/tests/unit/amphorae/drivers/__init__.py
Normal file
0
octavia/tests/unit/amphorae/drivers/__init__.py
Normal file
19
octavia/tests/unit/base.py
Normal file
19
octavia/tests/unit/base.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Copyright 2014, Doug Wiegley, A10 Networks.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
|
||||||
|
class TestCase(testtools.TestCase):
|
||||||
|
pass
|
0
octavia/tests/unit/common/__init__.py
Normal file
0
octavia/tests/unit/common/__init__.py
Normal file
25
octavia/tests/unit/common/test_config.py
Normal file
25
octavia/tests/unit/common/test_config.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# Copyright 2014, Doug Wiegley, A10 Networks.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
import octavia.common.config as config
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestConfig(base.TestCase):
|
||||||
|
|
||||||
|
def test_sanity(self):
|
||||||
|
config.init([])
|
||||||
|
config.setup_logging(cfg.CONF)
|
23
octavia/tests/unit/common/test_constants.py
Normal file
23
octavia/tests/unit/common/test_constants.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# Copyright 2014, Doug Wiegley, A10 Networks.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import octavia.common.constants as constants
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestConstants(base.TestCase):
|
||||||
|
# Rough sanity test of module import; not meant to be exhaustive
|
||||||
|
|
||||||
|
def test_import(self):
|
||||||
|
self.assertEqual(constants.PROTOCOL_UDP, 'UDP')
|
28
octavia/tests/unit/common/test_exceptions.py
Normal file
28
octavia/tests/unit/common/test_exceptions.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Copyright 2014, Doug Wiegley, A10 Networks.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import octavia.common.exceptions as exc
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestExceptions(base.TestCase):
|
||||||
|
# Rough sanity test of module import; not meant to be exhaustive
|
||||||
|
|
||||||
|
def test_exception(self):
|
||||||
|
try:
|
||||||
|
raise exc.NotFound()
|
||||||
|
except exc.NotFound:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise Exception()
|
25
octavia/tests/unit/common/test_utils.py
Normal file
25
octavia/tests/unit/common/test_utils.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# Copyright 2014, Doug Wiegley, A10 Networks.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import octavia.common.utils as utils
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestConfig(base.TestCase):
|
||||||
|
|
||||||
|
def test_get_hostname(self):
|
||||||
|
self.assertNotEqual(utils.get_hostname(), '')
|
||||||
|
|
||||||
|
def test_random_string(self):
|
||||||
|
self.assertNotEqual(utils.get_random_string(10), '')
|
0
octavia/tests/unit/controller/__init__.py
Normal file
0
octavia/tests/unit/controller/__init__.py
Normal file
0
octavia/tests/unit/db/__init__.py
Normal file
0
octavia/tests/unit/db/__init__.py
Normal file
0
octavia/tests/unit/network/__init__.py
Normal file
0
octavia/tests/unit/network/__init__.py
Normal file
0
octavia/tests/unit/network/drivers/__init__.py
Normal file
0
octavia/tests/unit/network/drivers/__init__.py
Normal file
0
octavia/tests/unit/openstack/__init__.py
Normal file
0
octavia/tests/unit/openstack/__init__.py
Normal file
0
octavia/tests/unit/openstack/common/__init__.py
Normal file
0
octavia/tests/unit/openstack/common/__init__.py
Normal file
45
octavia/tests/unit/openstack/common/test_common.py
Normal file
45
octavia/tests/unit/openstack/common/test_common.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# Copyright 2014, Doug Wiegley, A10 Networks.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
import octavia.openstack.common.cache
|
||||||
|
import octavia.openstack.common.context
|
||||||
|
import octavia.openstack.common.excutils
|
||||||
|
import octavia.openstack.common.fixture
|
||||||
|
import octavia.openstack.common.gettextutils
|
||||||
|
import octavia.openstack.common.importutils
|
||||||
|
import octavia.openstack.common.jsonutils
|
||||||
|
import octavia.openstack.common.local
|
||||||
|
import octavia.openstack.common.lockutils
|
||||||
|
import octavia.openstack.common.log
|
||||||
|
import octavia.openstack.common.loopingcall
|
||||||
|
import octavia.openstack.common.middleware
|
||||||
|
import octavia.openstack.common.network_utils
|
||||||
|
import octavia.openstack.common.periodic_task
|
||||||
|
import octavia.openstack.common.policy
|
||||||
|
import octavia.openstack.common.processutils
|
||||||
|
import octavia.openstack.common.service
|
||||||
|
import octavia.openstack.common.sslutils
|
||||||
|
import octavia.openstack.common.strutils
|
||||||
|
import octavia.openstack.common.systemd
|
||||||
|
import octavia.openstack.common.threadgroup
|
||||||
|
import octavia.openstack.common.timeutils
|
||||||
|
import octavia.openstack.common.uuidutils
|
||||||
|
import octavia.openstack.common.versionutils
|
||||||
|
|
||||||
|
|
||||||
|
class TestCommon(base.TestCase):
|
||||||
|
def test_openstack_common(self):
|
||||||
|
# The test is the imports
|
||||||
|
pass
|
17
octavia/version.py
Normal file
17
octavia/version.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Copyright 2011-2014 OpenStack Foundation
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import pbr.version
|
||||||
|
|
||||||
|
version_info = pbr.version.VersionInfo('octavia')
|
37
openstack-common.conf
Normal file
37
openstack-common.conf
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
# The list of modules to copy from oslo-incubator.git
|
||||||
|
module=cache
|
||||||
|
module=context
|
||||||
|
module=eventlet_backdoor
|
||||||
|
module=excutils
|
||||||
|
module=fileutils
|
||||||
|
module=fixture
|
||||||
|
module=gettextutils
|
||||||
|
module=importutils
|
||||||
|
module=install_venv_common
|
||||||
|
module=jsonutils
|
||||||
|
module=local
|
||||||
|
module=lockutils
|
||||||
|
module=log
|
||||||
|
module=loopingcall
|
||||||
|
module=middleware.base
|
||||||
|
module=middleware.catch_errors
|
||||||
|
module=middleware.correlation_id
|
||||||
|
module=middleware.debug
|
||||||
|
module=middleware.request_id
|
||||||
|
module=middleware.sizelimit
|
||||||
|
module=network_utils
|
||||||
|
module=periodic_task
|
||||||
|
module=policy
|
||||||
|
module=processutils
|
||||||
|
module=service
|
||||||
|
module=sslutils
|
||||||
|
module=strutils
|
||||||
|
module=systemd
|
||||||
|
module=threadgroup
|
||||||
|
module=timeutils
|
||||||
|
module=uuidutils
|
||||||
|
module=versionutils
|
||||||
|
|
||||||
|
# The base module to hold the copy of openstack.common
|
||||||
|
base=octavia
|
@ -11,3 +11,21 @@ sphinxcontrib-blockdiag
|
|||||||
sphinxcontrib-nwdiag
|
sphinxcontrib-nwdiag
|
||||||
sphinxcontrib-seqdiag
|
sphinxcontrib-seqdiag
|
||||||
graphviz
|
graphviz
|
||||||
|
|
||||||
|
anyjson>=0.3.3
|
||||||
|
Babel>=1.3
|
||||||
|
eventlet>=0.13.0
|
||||||
|
requests>=1.2.1
|
||||||
|
jsonrpclib
|
||||||
|
netaddr>=0.7.6
|
||||||
|
python-neutronclient>=2.3.6,<3
|
||||||
|
SQLAlchemy>=0.8.4,<=0.8.99,>=0.9.7,<=0.9.99
|
||||||
|
WebOb>=1.2.3
|
||||||
|
alembic>=0.6.4
|
||||||
|
six>=1.7.0
|
||||||
|
oslo.config>=1.4.0.0a3
|
||||||
|
oslo.db>=0.4.0 # Apache-2.0
|
||||||
|
oslo.messaging>=1.4.0.0a3
|
||||||
|
oslo.rootwrap>=1.3.0.0a1
|
||||||
|
python-novaclient>=2.17.0
|
||||||
|
posix_ipc
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
name = octavia
|
name = octavia
|
||||||
|
version = 0.1
|
||||||
summary = OpenStack Octavia Scalable Load Balancer as as Service
|
summary = OpenStack Octavia Scalable Load Balancer as as Service
|
||||||
description-file =
|
description-file =
|
||||||
README.rst
|
README.rst
|
||||||
@ -7,6 +8,7 @@ author = OpenStack
|
|||||||
author-email = openstack-dev@lists.openstack.org
|
author-email = openstack-dev@lists.openstack.org
|
||||||
home-page = http://www.openstack.org/
|
home-page = http://www.openstack.org/
|
||||||
classifier =
|
classifier =
|
||||||
|
Development Status :: 2 - Pre-Alpha
|
||||||
Environment :: OpenStack
|
Environment :: OpenStack
|
||||||
Intended Audience :: Developers
|
Intended Audience :: Developers
|
||||||
Intended Audience :: Information Technology
|
Intended Audience :: Information Technology
|
||||||
@ -14,8 +16,8 @@ classifier =
|
|||||||
License :: OSI Approved :: Apache Software License
|
License :: OSI Approved :: Apache Software License
|
||||||
Operating System :: POSIX :: Linux
|
Operating System :: POSIX :: Linux
|
||||||
Programming Language :: Python
|
Programming Language :: Python
|
||||||
|
Programming Language :: Python :: 2.6
|
||||||
Programming Language :: Python :: 2.7
|
Programming Language :: Python :: 2.7
|
||||||
Programming Language :: Python :: 3.3
|
|
||||||
|
|
||||||
[build_sphinx]
|
[build_sphinx]
|
||||||
all_files = 1
|
all_files = 1
|
||||||
|
172
tools/install_venv_common.py
Normal file
172
tools/install_venv_common.py
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
# Copyright 2013 OpenStack Foundation
|
||||||
|
# Copyright 2013 IBM Corp.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Provides methods needed by installation script for OpenStack development
|
||||||
|
virtual environments.
|
||||||
|
|
||||||
|
Since this script is used to bootstrap a virtualenv from the system's Python
|
||||||
|
environment, it should be kept strictly compatible with Python 2.6.
|
||||||
|
|
||||||
|
Synced in from openstack-common
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
class InstallVenv(object):
|
||||||
|
|
||||||
|
def __init__(self, root, venv, requirements,
|
||||||
|
test_requirements, py_version,
|
||||||
|
project):
|
||||||
|
self.root = root
|
||||||
|
self.venv = venv
|
||||||
|
self.requirements = requirements
|
||||||
|
self.test_requirements = test_requirements
|
||||||
|
self.py_version = py_version
|
||||||
|
self.project = project
|
||||||
|
|
||||||
|
def die(self, message, *args):
|
||||||
|
print(message % args, file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def check_python_version(self):
|
||||||
|
if sys.version_info < (2, 6):
|
||||||
|
self.die("Need Python Version >= 2.6")
|
||||||
|
|
||||||
|
def run_command_with_code(self, cmd, redirect_output=True,
|
||||||
|
check_exit_code=True):
|
||||||
|
"""Runs a command in an out-of-process shell.
|
||||||
|
|
||||||
|
Returns the output of that command. Working directory is self.root.
|
||||||
|
"""
|
||||||
|
if redirect_output:
|
||||||
|
stdout = subprocess.PIPE
|
||||||
|
else:
|
||||||
|
stdout = None
|
||||||
|
|
||||||
|
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
|
||||||
|
output = proc.communicate()[0]
|
||||||
|
if check_exit_code and proc.returncode != 0:
|
||||||
|
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
|
||||||
|
return (output, proc.returncode)
|
||||||
|
|
||||||
|
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
|
||||||
|
return self.run_command_with_code(cmd, redirect_output,
|
||||||
|
check_exit_code)[0]
|
||||||
|
|
||||||
|
def get_distro(self):
|
||||||
|
if (os.path.exists('/etc/fedora-release') or
|
||||||
|
os.path.exists('/etc/redhat-release')):
|
||||||
|
return Fedora(
|
||||||
|
self.root, self.venv, self.requirements,
|
||||||
|
self.test_requirements, self.py_version, self.project)
|
||||||
|
else:
|
||||||
|
return Distro(
|
||||||
|
self.root, self.venv, self.requirements,
|
||||||
|
self.test_requirements, self.py_version, self.project)
|
||||||
|
|
||||||
|
def check_dependencies(self):
|
||||||
|
self.get_distro().install_virtualenv()
|
||||||
|
|
||||||
|
def create_virtualenv(self, no_site_packages=True):
|
||||||
|
"""Creates the virtual environment and installs PIP.
|
||||||
|
|
||||||
|
Creates the virtual environment and installs PIP only into the
|
||||||
|
virtual environment.
|
||||||
|
"""
|
||||||
|
if not os.path.isdir(self.venv):
|
||||||
|
print('Creating venv...', end=' ')
|
||||||
|
if no_site_packages:
|
||||||
|
self.run_command(['virtualenv', '-q', '--no-site-packages',
|
||||||
|
self.venv])
|
||||||
|
else:
|
||||||
|
self.run_command(['virtualenv', '-q', self.venv])
|
||||||
|
print('done.')
|
||||||
|
else:
|
||||||
|
print("venv already exists...")
|
||||||
|
pass
|
||||||
|
|
||||||
|
def pip_install(self, *args):
|
||||||
|
self.run_command(['tools/with_venv.sh',
|
||||||
|
'pip', 'install', '--upgrade'] + list(args),
|
||||||
|
redirect_output=False)
|
||||||
|
|
||||||
|
def install_dependencies(self):
|
||||||
|
print('Installing dependencies with pip (this can take a while)...')
|
||||||
|
|
||||||
|
# First things first, make sure our venv has the latest pip and
|
||||||
|
# setuptools and pbr
|
||||||
|
self.pip_install('pip>=1.4')
|
||||||
|
self.pip_install('setuptools')
|
||||||
|
self.pip_install('pbr')
|
||||||
|
|
||||||
|
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
|
||||||
|
|
||||||
|
def parse_args(self, argv):
|
||||||
|
"""Parses command-line arguments."""
|
||||||
|
parser = optparse.OptionParser()
|
||||||
|
parser.add_option('-n', '--no-site-packages',
|
||||||
|
action='store_true',
|
||||||
|
help="Do not inherit packages from global Python "
|
||||||
|
"install.")
|
||||||
|
return parser.parse_args(argv[1:])[0]
|
||||||
|
|
||||||
|
|
||||||
|
class Distro(InstallVenv):
|
||||||
|
|
||||||
|
def check_cmd(self, cmd):
|
||||||
|
return bool(self.run_command(['which', cmd],
|
||||||
|
check_exit_code=False).strip())
|
||||||
|
|
||||||
|
def install_virtualenv(self):
|
||||||
|
if self.check_cmd('virtualenv'):
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.check_cmd('easy_install'):
|
||||||
|
print('Installing virtualenv via easy_install...', end=' ')
|
||||||
|
if self.run_command(['easy_install', 'virtualenv']):
|
||||||
|
print('Succeeded')
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
print('Failed')
|
||||||
|
|
||||||
|
self.die('ERROR: virtualenv not found.\n\n%s development'
|
||||||
|
' requires virtualenv, please install it using your'
|
||||||
|
' favorite package management tool' % self.project)
|
||||||
|
|
||||||
|
|
||||||
|
class Fedora(Distro):
|
||||||
|
"""This covers all Fedora-based distributions.
|
||||||
|
|
||||||
|
Includes: Fedora, RHEL, CentOS, Scientific Linux
|
||||||
|
"""
|
||||||
|
|
||||||
|
def check_pkg(self, pkg):
|
||||||
|
return self.run_command_with_code(['rpm', '-q', pkg],
|
||||||
|
check_exit_code=False)[1] == 0
|
||||||
|
|
||||||
|
def install_virtualenv(self):
|
||||||
|
if self.check_cmd('virtualenv'):
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self.check_pkg('python-virtualenv'):
|
||||||
|
self.die("Please install 'python-virtualenv'.")
|
||||||
|
|
||||||
|
super(Fedora, self).install_virtualenv()
|
5
tox.ini
5
tox.ini
@ -1,6 +1,6 @@
|
|||||||
[tox]
|
[tox]
|
||||||
minversion = 1.6
|
minversion = 1.6
|
||||||
envlist = docs,py27,py33,pep8
|
envlist = docs,py26,py27,pep8
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
@ -21,8 +21,9 @@ commands = python setup.py build_sphinx
|
|||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
ignore = None
|
ignore = None
|
||||||
exclude = .tox,doc
|
|
||||||
show-source = true
|
show-source = true
|
||||||
|
builtins = _
|
||||||
|
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios
|
||||||
|
|
||||||
[doc8]
|
[doc8]
|
||||||
max-line-length = 79
|
max-line-length = 79
|
||||||
|
Loading…
x
Reference in New Issue
Block a user