Remove log translations from octavia
Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. This patch also adds hacking rules for the translation tags. See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Co-Authored-By: Michael Johnson <johnsomor@gmail.com> Change-Id: Ic95111d09e38b3f44fd6c85d0bcf0355c21ef545
This commit is contained in:
parent
7829ed96f6
commit
dc882e9d27
@ -13,7 +13,6 @@ Octavia Specific Commandments
|
||||
- [O318] Change assert(Not)Equal(A, None) or assert(Not)Equal(None, A)
|
||||
by optimal assert like assertIs(Not)None(A).
|
||||
- [O319] Validate that debug level logs are not translated.
|
||||
- [O320] Validate that LOG messages, except debug ones, have translations
|
||||
- [O321] Validate that jsonutils module is used instead of json
|
||||
- [O322] Don't use author tags
|
||||
- [O323] Change assertEqual(True, A) or assertEqual(False, A) to the more
|
||||
@ -24,6 +23,8 @@ Octavia Specific Commandments
|
||||
specific assertIn/NotIn(A, B)
|
||||
- [O339] LOG.warn() is not allowed. Use LOG.warning()
|
||||
- [O340] Don't use xrange()
|
||||
- [O341] Don't translate logs.
|
||||
- [0342] Exception messages should be translated
|
||||
|
||||
Creating Unit Tests
|
||||
-------------------
|
||||
|
@ -33,7 +33,6 @@ from octavia.amphorae.backends.agent.api_server import util
|
||||
from octavia.amphorae.backends.utils import haproxy_query as query
|
||||
from octavia.common import constants as consts
|
||||
from octavia.common import utils as octavia_utils
|
||||
from octavia.i18n import _LE
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
BUFFER = 100
|
||||
@ -136,7 +135,7 @@ class Listener(object):
|
||||
try:
|
||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error(_LE("Failed to verify haproxy file: %s"), e)
|
||||
LOG.error("Failed to verify haproxy file: %s", e)
|
||||
os.remove(name) # delete file
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Invalid request",
|
||||
@ -166,7 +165,7 @@ class Listener(object):
|
||||
raise util.UnknownInitError()
|
||||
|
||||
except util.UnknownInitError:
|
||||
LOG.error(_LE("Unknown init system found."))
|
||||
LOG.error("Unknown init system found.")
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Unknown init system in amphora",
|
||||
details="The amphora image is running an unknown init "
|
||||
@ -203,8 +202,7 @@ class Listener(object):
|
||||
subprocess.check_output(init_enable_cmd.split(),
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error(_LE("Failed to enable haproxy-%(list)s "
|
||||
"service: %(err)s"),
|
||||
LOG.error("Failed to enable haproxy-%(list)s service: %(err)s",
|
||||
{'list': listener_id, 'err': e})
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Error enabling haproxy-{0} service".format(
|
||||
@ -276,7 +274,7 @@ class Listener(object):
|
||||
try:
|
||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error(_LE("Failed to stop HAProxy service: %s"), e)
|
||||
LOG.error("Failed to stop HAProxy service: %s", e)
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Error stopping haproxy",
|
||||
details=e.output)), 500)
|
||||
@ -311,9 +309,8 @@ class Listener(object):
|
||||
subprocess.check_output(init_disable_cmd.split(),
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error(_LE("Failed to disable haproxy-%(list)s "
|
||||
"service: %(err)s"),
|
||||
{'list': listener_id, 'err': e})
|
||||
LOG.error("Failed to disable haproxy-%(list)s service: "
|
||||
"%(err)s", {'list': listener_id, 'err': e})
|
||||
return flask.make_response(flask.jsonify(dict(
|
||||
message="Error disabling haproxy-{0} service".format(
|
||||
listener_id), details=e.output)), 500)
|
||||
|
@ -29,7 +29,6 @@ from werkzeug import exceptions
|
||||
from octavia.common import constants as consts
|
||||
from octavia.common import exceptions as octavia_exceptions
|
||||
from octavia.common import utils
|
||||
from octavia.i18n import _LE
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -144,12 +143,11 @@ class BaseOS(object):
|
||||
try:
|
||||
ip_addr = fixed_ip['ip_address']
|
||||
cidr = fixed_ip['subnet_cidr']
|
||||
ip = ipaddress.ip_address(
|
||||
ip_addr if six.text_type == type(
|
||||
ip_addr) else six.u(ip_addr))
|
||||
ip = ipaddress.ip_address(ip_addr if isinstance(
|
||||
ip_addr, six.text_type) else six.u(ip_addr))
|
||||
network = ipaddress.ip_network(
|
||||
cidr if six.text_type == type(
|
||||
cidr) else six.u(cidr))
|
||||
cidr if isinstance(
|
||||
cidr, six.text_type) else six.u(cidr))
|
||||
broadcast = network.broadcast_address.exploded
|
||||
netmask = (network.prefixlen if ip.version is 6
|
||||
else network.netmask.exploded)
|
||||
@ -186,8 +184,8 @@ class BaseOS(object):
|
||||
try:
|
||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error(_LE('Failed to if up {0} due to '
|
||||
'error: {1}').format(interface, str(e)))
|
||||
LOG.error('Failed to if up {0} due to '
|
||||
'error: {1}'.format(interface, str(e)))
|
||||
raise exceptions.HTTPException(
|
||||
response=flask.make_response(flask.jsonify(dict(
|
||||
message='Error plugging {0}'.format(what),
|
||||
|
@ -29,7 +29,6 @@ import six
|
||||
from werkzeug import exceptions
|
||||
|
||||
from octavia.common import constants as consts
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -55,9 +54,9 @@ class Plug(object):
|
||||
try:
|
||||
render_host_routes = []
|
||||
ip = ipaddress.ip_address(
|
||||
vip if six.text_type == type(vip) else six.u(vip))
|
||||
vip if isinstance(vip, six.text_type) else six.u(vip))
|
||||
network = ipaddress.ip_network(
|
||||
subnet_cidr if six.text_type == type(subnet_cidr)
|
||||
subnet_cidr if isinstance(subnet_cidr, six.text_type)
|
||||
else six.u(subnet_cidr))
|
||||
vip = ip.exploded
|
||||
broadcast = network.broadcast_address.exploded
|
||||
@ -66,7 +65,7 @@ class Plug(object):
|
||||
vrrp_version = None
|
||||
if vrrp_ip:
|
||||
vrrp_ip_obj = ipaddress.ip_address(
|
||||
vrrp_ip if six.text_type == type(vrrp_ip)
|
||||
vrrp_ip if isinstance(vrrp_ip, six.text_type)
|
||||
else six.u(vrrp_ip)
|
||||
)
|
||||
vrrp_version = vrrp_ip_obj.version
|
||||
@ -184,10 +183,10 @@ class Plug(object):
|
||||
# Note, eth0 is skipped because that is the VIP interface
|
||||
netns_interface = 'eth{0}'.format(len(netns.get_links()))
|
||||
|
||||
LOG.info(_LI('Plugged interface {0} will become {1} in the '
|
||||
'namespace {2}').format(default_netns_interface,
|
||||
netns_interface,
|
||||
consts.AMPHORA_NAMESPACE))
|
||||
LOG.info('Plugged interface {0} will become {1} in the '
|
||||
'namespace {2}'.format(default_netns_interface,
|
||||
netns_interface,
|
||||
consts.AMPHORA_NAMESPACE))
|
||||
interface_file_path = self._osutils.get_network_interface_file(
|
||||
netns_interface)
|
||||
self._osutils.write_port_interface_file(
|
||||
|
@ -24,7 +24,6 @@ import six
|
||||
from octavia.amphorae.backends.agent.api_server import util
|
||||
from octavia.amphorae.backends.health_daemon import health_sender
|
||||
from octavia.amphorae.backends.utils import haproxy_query
|
||||
from octavia.i18n import _LI
|
||||
|
||||
if six.PY2:
|
||||
import Queue as queue
|
||||
@ -48,7 +47,7 @@ def list_sock_stat_files(hadir=None):
|
||||
|
||||
|
||||
def run_sender(cmd_queue):
|
||||
LOG.info(_LI('Health Manager Sender starting.'))
|
||||
LOG.info('Health Manager Sender starting.')
|
||||
sender = health_sender.UDPStatusSender()
|
||||
while True:
|
||||
message = build_stats_message()
|
||||
@ -56,10 +55,10 @@ def run_sender(cmd_queue):
|
||||
try:
|
||||
cmd = cmd_queue.get_nowait()
|
||||
if cmd is 'reload':
|
||||
LOG.info(_LI('Reloading configuration'))
|
||||
LOG.info('Reloading configuration')
|
||||
CONF.reload_config_files()
|
||||
elif cmd is 'shutdown':
|
||||
LOG.info(_LI('Health Manager Sender shutting down.'))
|
||||
LOG.info('Health Manager Sender shutting down.')
|
||||
break
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
@ -18,7 +18,6 @@ from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from octavia.amphorae.backends.health_daemon import status_message
|
||||
from octavia.i18n import _LE
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -39,9 +38,8 @@ class UDPStatusSender(object):
|
||||
try:
|
||||
ip, port = ipport.rsplit(':', 1)
|
||||
except ValueError:
|
||||
LOG.error(_LE("Invalid ip and port '%s' in "
|
||||
"health_manager controller_ip_port_list"),
|
||||
ipport)
|
||||
LOG.error("Invalid ip and port '%s' in health_manager "
|
||||
"controller_ip_port_list", ipport)
|
||||
break
|
||||
self.update(ip, port)
|
||||
self.v4sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
@ -62,8 +60,7 @@ class UDPStatusSender(object):
|
||||
# dest = (family, socktype, proto, canonname, sockaddr)
|
||||
# e.g. 0 = sock family, 4 = sockaddr - what we actually need
|
||||
if addrinfo is None:
|
||||
LOG.error(_LE('No controller address found. '
|
||||
'Unable to send heartbeat.'))
|
||||
LOG.error('No controller address found. Unable to send heartbeat.')
|
||||
return
|
||||
try:
|
||||
if addrinfo[0] == socket.AF_INET:
|
||||
|
@ -22,7 +22,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import secretutils
|
||||
|
||||
from octavia.common import exceptions
|
||||
from octavia.i18n import _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -58,9 +57,9 @@ def unwrap_envelope(envelope, key):
|
||||
expected_hmc = envelope[-hash_len:]
|
||||
calculated_hmc = get_hmac(payload, key)
|
||||
if not secretutils.constant_time_compare(expected_hmc, calculated_hmc):
|
||||
LOG.warning(_LW('calculated hmac: %(s1)s not equal to msg hmac: '
|
||||
'%(s2)s dropping packet'), {'s1': to_hex(calculated_hmc),
|
||||
's2': to_hex(expected_hmc)})
|
||||
LOG.warning('calculated hmac: %(s1)s not equal to msg hmac: '
|
||||
'%(s2)s dropping packet', {'s1': to_hex(calculated_hmc),
|
||||
's2': to_hex(expected_hmc)})
|
||||
fmt = 'calculated hmac: {0} not equal to msg hmac: {1} dropping packet'
|
||||
raise exceptions.InvalidHMACException(fmt.format(
|
||||
to_hex(calculated_hmc), to_hex(expected_hmc)))
|
||||
|
@ -48,7 +48,7 @@ class HAProxyQuery(object):
|
||||
try:
|
||||
sock.connect(self.socket)
|
||||
except socket.error:
|
||||
raise Exception("HAProxy '{0}' query failed.".format(query))
|
||||
raise Exception(_("HAProxy '{0}' query failed.").format(query))
|
||||
|
||||
try:
|
||||
sock.send(six.b(query + '\n'))
|
||||
|
@ -32,7 +32,6 @@ from octavia.common import constants as consts
|
||||
from octavia.common.jinja.haproxy import jinja_cfg
|
||||
from octavia.common.tls_utils import cert_parser
|
||||
from octavia.common import utils
|
||||
from octavia.i18n import _LE, _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
API_VERSION = consts.API_VERSION
|
||||
@ -135,9 +134,9 @@ class HaproxyAmphoraLoadBalancerDriver(
|
||||
load_balancer.vip.ip_address,
|
||||
net_info)
|
||||
except exc.Conflict:
|
||||
LOG.warning(_LW('VIP with MAC {mac} already exists on '
|
||||
'amphora, skipping post_vip_plug').format(
|
||||
mac=port.mac_address))
|
||||
LOG.warning(('VIP with MAC {mac} already exists on '
|
||||
'amphora, skipping post_vip_plug').format(
|
||||
mac=port.mac_address))
|
||||
|
||||
def post_network_plug(self, amphora, port):
|
||||
fixed_ips = []
|
||||
@ -155,9 +154,9 @@ class HaproxyAmphoraLoadBalancerDriver(
|
||||
try:
|
||||
self.client.plug_network(amphora, port_info)
|
||||
except exc.Conflict:
|
||||
LOG.warning(_LW('Network with MAC {mac} already exists on '
|
||||
'amphora, skipping post_network_plug').format(
|
||||
mac=port.mac_address))
|
||||
LOG.warning(('Network with MAC {mac} already exists on '
|
||||
'amphora, skipping post_network_plug').format(
|
||||
mac=port.mac_address))
|
||||
|
||||
def get_vrrp_interface(self, amphora):
|
||||
return self.client.get_interface(amphora, amphora.vrrp_ip)['interface']
|
||||
@ -288,12 +287,12 @@ class AmphoraAPIClient(object):
|
||||
return r
|
||||
except (requests.ConnectionError, requests.Timeout) as e:
|
||||
exception = e
|
||||
LOG.warning(_LW("Could not connect to instance. Retrying."))
|
||||
LOG.warning("Could not connect to instance. Retrying.")
|
||||
time.sleep(CONF.haproxy_amphora.connection_retry_interval)
|
||||
|
||||
LOG.error(_LE("Connection retries (currently set to %(max_retries)s) "
|
||||
"exhausted. The amphora is unavailable. Reason: "
|
||||
"%(exception)s"),
|
||||
LOG.error("Connection retries (currently set to %(max_retries)s) "
|
||||
"exhausted. The amphora is unavailable. Reason: "
|
||||
"%(exception)s",
|
||||
{'max_retries': CONF.haproxy_amphora.connection_max_retries,
|
||||
'exception': exception})
|
||||
raise driver_except.TimeOutException()
|
||||
|
@ -21,8 +21,6 @@ from oslo_log import log as logging
|
||||
from octavia.amphorae.backends.health_daemon import status_message
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import repositories
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
UDP_MAX_SIZE = 64 * 1024
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -41,7 +39,7 @@ class UDPStatusGetter(object):
|
||||
self.ip = cfg.CONF.health_manager.bind_ip
|
||||
self.port = cfg.CONF.health_manager.bind_port
|
||||
self.sockaddr = None
|
||||
LOG.info(_LI('attempting to listen on %(ip)s port %(port)s'),
|
||||
LOG.info('attempting to listen on %(ip)s port %(port)s',
|
||||
{'ip': self.ip, 'port': self.port})
|
||||
self.sock = None
|
||||
self.update(self.key, self.ip, self.port)
|
||||
@ -68,7 +66,7 @@ class UDPStatusGetter(object):
|
||||
self.sock.bind(self.sockaddr)
|
||||
if cfg.CONF.health_manager.sock_rlimit > 0:
|
||||
rlimit = cfg.CONF.health_manager.sock_rlimit
|
||||
LOG.info(_LI("setting sock rlimit to %s"), rlimit)
|
||||
LOG.info("setting sock rlimit to %s", rlimit)
|
||||
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
|
||||
rlimit)
|
||||
break # just used the first addr getaddrinfo finds
|
||||
|
@ -18,7 +18,6 @@ import six
|
||||
from octavia.amphorae.drivers import driver_base as driver_base
|
||||
from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg
|
||||
from octavia.common import constants
|
||||
from octavia.i18n import _LI
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
API_VERSION = constants.API_VERSION
|
||||
@ -53,7 +52,7 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
||||
|
||||
:param loadbalancer: loadbalancer object
|
||||
"""
|
||||
LOG.info(_LI("Stop loadbalancer %s amphora VRRP Service."),
|
||||
LOG.info("Stop loadbalancer %s amphora VRRP Service.",
|
||||
loadbalancer.id)
|
||||
|
||||
for amp in six.moves.filter(
|
||||
@ -67,7 +66,7 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
||||
|
||||
:param loadbalancer: loadbalancer object
|
||||
"""
|
||||
LOG.info(_LI("Start loadbalancer %s amphora VRRP Service."),
|
||||
LOG.info("Start loadbalancer %s amphora VRRP Service.",
|
||||
loadbalancer.id)
|
||||
|
||||
for amp in six.moves.filter(
|
||||
@ -82,7 +81,7 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
||||
|
||||
:param loadbalancer: loadbalancer object
|
||||
"""
|
||||
LOG.info(_LI("Reload loadbalancer %s amphora VRRP Service."),
|
||||
LOG.info("Reload loadbalancer %s amphora VRRP Service.",
|
||||
loadbalancer.id)
|
||||
|
||||
for amp in six.moves.filter(
|
||||
|
@ -29,8 +29,6 @@ from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.db import api as db_api
|
||||
import octavia.db.repositories as repos
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
ASYNC_TIME = 1
|
||||
@ -51,7 +49,7 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
||||
|
||||
def member_controller(member, delete=False, update=False, create=False):
|
||||
time.sleep(ASYNC_TIME)
|
||||
LOG.info(_LI("Simulating controller operation for member..."))
|
||||
LOG.info("Simulating controller operation for member...")
|
||||
|
||||
db_mem = None
|
||||
if delete:
|
||||
@ -83,12 +81,12 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
||||
member.pool.load_balancer.id,
|
||||
operating_status=constants.ONLINE,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
||||
LOG.info("Simulated Controller Handler Thread Complete")
|
||||
|
||||
def l7policy_controller(l7policy, delete=False, update=False,
|
||||
create=False):
|
||||
time.sleep(ASYNC_TIME)
|
||||
LOG.info(_LI("Simulating controller operation for l7policy..."))
|
||||
LOG.info("Simulating controller operation for l7policy...")
|
||||
|
||||
db_l7policy = None
|
||||
if delete:
|
||||
@ -110,11 +108,11 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
||||
db_l7policy.listener.load_balancer.id,
|
||||
operating_status=constants.ONLINE,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
||||
LOG.info("Simulated Controller Handler Thread Complete")
|
||||
|
||||
def l7rule_controller(l7rule, delete=False, update=False, create=False):
|
||||
time.sleep(ASYNC_TIME)
|
||||
LOG.info(_LI("Simulating controller operation for l7rule..."))
|
||||
LOG.info("Simulating controller operation for l7rule...")
|
||||
|
||||
db_l7rule = None
|
||||
if delete:
|
||||
@ -135,12 +133,12 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
||||
listener.load_balancer.id,
|
||||
operating_status=constants.ONLINE,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
||||
LOG.info("Simulated Controller Handler Thread Complete")
|
||||
|
||||
def health_monitor_controller(health_monitor, delete=False, update=False,
|
||||
create=False):
|
||||
time.sleep(ASYNC_TIME)
|
||||
LOG.info(_LI("Simulating controller operation for health monitor..."))
|
||||
LOG.info("Simulating controller operation for health monitor...")
|
||||
|
||||
db_hm = None
|
||||
if delete:
|
||||
@ -182,11 +180,11 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
||||
health_monitor.pool.load_balancer.id,
|
||||
operating_status=constants.ONLINE,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
||||
LOG.info("Simulated Controller Handler Thread Complete")
|
||||
|
||||
def pool_controller(pool, delete=False, update=False, create=False):
|
||||
time.sleep(ASYNC_TIME)
|
||||
LOG.info(_LI("Simulating controller operation for pool..."))
|
||||
LOG.info("Simulating controller operation for pool...")
|
||||
|
||||
db_pool = None
|
||||
if delete:
|
||||
@ -218,12 +216,12 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
||||
pool.load_balancer.id,
|
||||
operating_status=constants.ONLINE,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
||||
LOG.info("Simulated Controller Handler Thread Complete")
|
||||
|
||||
def listener_controller(listener, delete=False, update=False,
|
||||
create=False):
|
||||
time.sleep(ASYNC_TIME)
|
||||
LOG.info(_LI("Simulating controller operation for listener..."))
|
||||
LOG.info("Simulating controller operation for listener...")
|
||||
|
||||
if delete:
|
||||
repo.listener.update(db_api.get_session(), listener.id,
|
||||
@ -244,12 +242,12 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
||||
listener.load_balancer.id,
|
||||
operating_status=constants.ONLINE,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
||||
LOG.info("Simulated Controller Handler Thread Complete")
|
||||
|
||||
def loadbalancer_controller(loadbalancer, delete=False, update=False,
|
||||
create=False):
|
||||
time.sleep(ASYNC_TIME)
|
||||
LOG.info(_LI("Simulating controller operation for loadbalancer..."))
|
||||
LOG.info("Simulating controller operation for loadbalancer...")
|
||||
|
||||
if delete:
|
||||
repo.load_balancer.update(
|
||||
@ -266,7 +264,7 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
||||
repo.load_balancer.update(db_api.get_session(), id=loadbalancer.id,
|
||||
operating_status=constants.ONLINE,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
||||
LOG.info("Simulated Controller Handler Thread Complete")
|
||||
|
||||
controller = loadbalancer_controller
|
||||
if isinstance(data_model, data_models.Member):
|
||||
@ -294,22 +292,19 @@ class InvalidHandlerInputObject(Exception):
|
||||
class LoadBalancerHandler(abstract_handler.BaseObjectHandler):
|
||||
|
||||
def create(self, load_balancer_id):
|
||||
LOG.info(_LI("%(entity)s handling the creation of "
|
||||
"load balancer %(id)s"),
|
||||
LOG.info("%(entity)s handling the creation of load balancer %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": load_balancer_id})
|
||||
simulate_controller(load_balancer_id, create=True)
|
||||
|
||||
def update(self, old_lb, load_balancer):
|
||||
validate_input(data_models.LoadBalancer, load_balancer)
|
||||
LOG.info(_LI("%(entity)s handling the update of "
|
||||
"load balancer %(id)s"),
|
||||
LOG.info("%(entity)s handling the update of load balancer %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": old_lb.id})
|
||||
load_balancer.id = old_lb.id
|
||||
simulate_controller(load_balancer, update=True)
|
||||
|
||||
def delete(self, load_balancer_id):
|
||||
LOG.info(_LI("%(entity)s handling the deletion of "
|
||||
"load balancer %(id)s"),
|
||||
LOG.info("%(entity)s handling the deletion of load balancer %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": load_balancer_id})
|
||||
simulate_controller(load_balancer_id, delete=True)
|
||||
|
||||
@ -317,19 +312,19 @@ class LoadBalancerHandler(abstract_handler.BaseObjectHandler):
|
||||
class ListenerHandler(abstract_handler.BaseObjectHandler):
|
||||
|
||||
def create(self, listener_id):
|
||||
LOG.info(_LI("%(entity)s handling the creation of listener %(id)s"),
|
||||
LOG.info("%(entity)s handling the creation of listener %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": listener_id})
|
||||
simulate_controller(listener_id, create=True)
|
||||
|
||||
def update(self, old_listener, listener):
|
||||
validate_input(data_models.Listener, listener)
|
||||
LOG.info(_LI("%(entity)s handling the update of listener %(id)s"),
|
||||
LOG.info("%(entity)s handling the update of listener %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": old_listener.id})
|
||||
listener.id = old_listener.id
|
||||
simulate_controller(listener, update=True)
|
||||
|
||||
def delete(self, listener_id):
|
||||
LOG.info(_LI("%(entity)s handling the deletion of listener %(id)s"),
|
||||
LOG.info("%(entity)s handling the deletion of listener %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": listener_id})
|
||||
simulate_controller(listener_id, delete=True)
|
||||
|
||||
@ -337,19 +332,19 @@ class ListenerHandler(abstract_handler.BaseObjectHandler):
|
||||
class PoolHandler(abstract_handler.BaseObjectHandler):
|
||||
|
||||
def create(self, pool_id):
|
||||
LOG.info(_LI("%(entity)s handling the creation of pool %(id)s"),
|
||||
LOG.info("%(entity)s handling the creation of pool %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": pool_id})
|
||||
simulate_controller(pool_id, create=True)
|
||||
|
||||
def update(self, old_pool, pool):
|
||||
validate_input(data_models.Pool, pool)
|
||||
LOG.info(_LI("%(entity)s handling the update of pool %(id)s"),
|
||||
LOG.info("%(entity)s handling the update of pool %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": old_pool.id})
|
||||
pool.id = old_pool.id
|
||||
simulate_controller(pool, update=True)
|
||||
|
||||
def delete(self, pool_id):
|
||||
LOG.info(_LI("%(entity)s handling the deletion of pool %(id)s"),
|
||||
LOG.info("%(entity)s handling the deletion of pool %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": pool_id})
|
||||
simulate_controller(pool_id, delete=True)
|
||||
|
||||
@ -357,23 +352,23 @@ class PoolHandler(abstract_handler.BaseObjectHandler):
|
||||
class HealthMonitorHandler(abstract_handler.BaseObjectHandler):
|
||||
|
||||
def create(self, pool_id):
|
||||
LOG.info(_LI("%(entity)s handling the creation of health monitor "
|
||||
"on pool %(id)s"),
|
||||
LOG.info("%(entity)s handling the creation of health monitor "
|
||||
"on pool %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": pool_id})
|
||||
simulate_controller(pool_id, create=True)
|
||||
|
||||
def update(self, old_health_monitor, health_monitor):
|
||||
validate_input(data_models.HealthMonitor, health_monitor)
|
||||
LOG.info(_LI("%(entity)s handling the update of health monitor "
|
||||
"on pool %(id)s"),
|
||||
LOG.info("%(entity)s handling the update of health monitor "
|
||||
"on pool %(id)s",
|
||||
{"entity": self.__class__.__name__,
|
||||
"id": old_health_monitor.pool_id})
|
||||
health_monitor.pool_id = old_health_monitor.pool_id
|
||||
simulate_controller(health_monitor, update=True)
|
||||
|
||||
def delete(self, pool_id):
|
||||
LOG.info(_LI("%(entity)s handling the deletion of health monitor "
|
||||
"on pool %(id)s"),
|
||||
LOG.info("%(entity)s handling the deletion of health monitor "
|
||||
"on pool %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": pool_id})
|
||||
simulate_controller(pool_id, delete=True)
|
||||
|
||||
@ -381,19 +376,19 @@ class HealthMonitorHandler(abstract_handler.BaseObjectHandler):
|
||||
class MemberHandler(abstract_handler.BaseObjectHandler):
|
||||
|
||||
def create(self, member_id):
|
||||
LOG.info(_LI("%(entity)s handling the creation of member %(id)s"),
|
||||
LOG.info("%(entity)s handling the creation of member %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": member_id})
|
||||
simulate_controller(member_id, create=True)
|
||||
|
||||
def update(self, old_member, member):
|
||||
validate_input(data_models.Member, member)
|
||||
LOG.info(_LI("%(entity)s handling the update of member %(id)s"),
|
||||
LOG.info("%(entity)s handling the update of member %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": old_member.id})
|
||||
member.id = old_member.id
|
||||
simulate_controller(member, update=True)
|
||||
|
||||
def delete(self, member_id):
|
||||
LOG.info(_LI("%(entity)s handling the deletion of member %(id)s"),
|
||||
LOG.info("%(entity)s handling the deletion of member %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": member_id})
|
||||
simulate_controller(member_id, delete=True)
|
||||
|
||||
@ -401,19 +396,19 @@ class MemberHandler(abstract_handler.BaseObjectHandler):
|
||||
class L7PolicyHandler(abstract_handler.BaseObjectHandler):
|
||||
|
||||
def create(self, l7policy_id):
|
||||
LOG.info(_LI("%(entity)s handling the creation of l7policy %(id)s"),
|
||||
LOG.info("%(entity)s handling the creation of l7policy %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": l7policy_id})
|
||||
simulate_controller(l7policy_id, create=True)
|
||||
|
||||
def update(self, old_l7policy, l7policy):
|
||||
validate_input(data_models.L7Policy, l7policy)
|
||||
LOG.info(_LI("%(entity)s handling the update of l7policy %(id)s"),
|
||||
LOG.info("%(entity)s handling the update of l7policy %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": old_l7policy.id})
|
||||
l7policy.id = old_l7policy.id
|
||||
simulate_controller(l7policy, update=True)
|
||||
|
||||
def delete(self, l7policy_id):
|
||||
LOG.info(_LI("%(entity)s handling the deletion of l7policy %(id)s"),
|
||||
LOG.info("%(entity)s handling the deletion of l7policy %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": l7policy_id})
|
||||
simulate_controller(l7policy_id, delete=True)
|
||||
|
||||
@ -421,19 +416,19 @@ class L7PolicyHandler(abstract_handler.BaseObjectHandler):
|
||||
class L7RuleHandler(abstract_handler.BaseObjectHandler):
|
||||
|
||||
def create(self, l7rule):
|
||||
LOG.info(_LI("%(entity)s handling the creation of l7rule %(id)s"),
|
||||
LOG.info("%(entity)s handling the creation of l7rule %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": l7rule.id})
|
||||
simulate_controller(l7rule, create=True)
|
||||
|
||||
def update(self, old_l7rule, l7rule):
|
||||
validate_input(data_models.L7Rule, l7rule)
|
||||
LOG.info(_LI("%(entity)s handling the update of l7rule %(id)s"),
|
||||
LOG.info("%(entity)s handling the update of l7rule %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": old_l7rule.id})
|
||||
l7rule.id = old_l7rule.id
|
||||
simulate_controller(l7rule, update=True)
|
||||
|
||||
def delete(self, l7rule):
|
||||
LOG.info(_LI("%(entity)s handling the deletion of l7rule %(id)s"),
|
||||
LOG.info("%(entity)s handling the deletion of l7rule %(id)s",
|
||||
{"entity": self.__class__.__name__, "id": l7rule.id})
|
||||
simulate_controller(l7rule, delete=True)
|
||||
|
||||
|
@ -21,7 +21,6 @@ from stevedore import driver as stevedore_driver
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import repositories
|
||||
from octavia.i18n import _LE
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -61,7 +60,7 @@ class BaseController(rest.RestController):
|
||||
"""Gets an object from the database and returns it."""
|
||||
db_obj = repo.get(session, id=id)
|
||||
if not db_obj:
|
||||
LOG.exception(_LE("{name} {id} not found").format(
|
||||
LOG.exception("{name} {id} not found".format(
|
||||
name=data_model._name(), id=id))
|
||||
raise exceptions.NotFound(
|
||||
resource=data_model._name(), id=id)
|
||||
|
@ -27,8 +27,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -47,7 +45,7 @@ class HealthMonitorController(base.BaseController):
|
||||
db_hm = self.repositories.health_monitor.get(
|
||||
session, pool_id=self.pool_id)
|
||||
if not db_hm:
|
||||
LOG.info(_LI("Health Monitor for Pool %s was not found"),
|
||||
LOG.info("Health Monitor for Pool %s was not found",
|
||||
self.pool_id)
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.HealthMonitor._name(),
|
||||
@ -83,8 +81,8 @@ class HealthMonitorController(base.BaseController):
|
||||
session, self.load_balancer_id,
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=self._get_affected_listener_ids(session, hm)):
|
||||
LOG.info(_LI("Health Monitor cannot be created or modified "
|
||||
"because the Load Balancer is in an immutable state"))
|
||||
LOG.info("Health Monitor cannot be created or modified "
|
||||
"because the Load Balancer is in an immutable state")
|
||||
lb_repo = self.repositories.load_balancer
|
||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
@ -134,8 +132,8 @@ class HealthMonitorController(base.BaseController):
|
||||
lock_session.rollback()
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Health Monitor for Pool %s to "
|
||||
"handler"), self.pool_id)
|
||||
LOG.info("Sending Creation of Health Monitor for Pool %s to "
|
||||
"handler", self.pool_id)
|
||||
self.handler.create(db_hm)
|
||||
except Exception:
|
||||
for listener_id in self._get_affected_listener_ids(
|
||||
@ -161,8 +159,8 @@ class HealthMonitorController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session, hm=db_hm)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of Health Monitor for Pool %s to "
|
||||
"handler"), self.pool_id)
|
||||
LOG.info("Sending Update of Health Monitor for Pool %s to handler",
|
||||
self.pool_id)
|
||||
self.handler.update(db_hm, health_monitor)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
@ -182,8 +180,8 @@ class HealthMonitorController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session, hm=db_hm)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of Health Monitor for Pool %s to "
|
||||
"handler"), self.pool_id)
|
||||
LOG.info("Sending Deletion of Health Monitor for Pool %s to "
|
||||
"handler", self.pool_id)
|
||||
self.handler.delete(db_hm)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
|
@ -28,8 +28,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.common import validate
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -65,8 +63,8 @@ class L7PolicyController(base.BaseController):
|
||||
session, self.load_balancer_id,
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=[self.listener_id]):
|
||||
LOG.info(_LI("L7Policy cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state"))
|
||||
LOG.info("L7Policy cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state")
|
||||
lb_repo = self.repositories.load_balancer
|
||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
@ -104,7 +102,7 @@ class L7PolicyController(base.BaseController):
|
||||
if ['id'] == de.columns:
|
||||
raise exceptions.IDAlreadyExists()
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of L7Policy %s to handler"),
|
||||
LOG.info("Sending Creation of L7Policy %s to handler",
|
||||
db_l7policy.id)
|
||||
self.handler.create(db_l7policy)
|
||||
except Exception:
|
||||
@ -132,7 +130,7 @@ class L7PolicyController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of L7Policy %s to handler"), id)
|
||||
LOG.info("Sending Update of L7Policy %s to handler", id)
|
||||
self.handler.update(
|
||||
db_l7policy, l7policy_types.L7PolicyPUT(**l7policy_dict))
|
||||
except Exception:
|
||||
@ -152,7 +150,7 @@ class L7PolicyController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of L7Policy %s to handler"),
|
||||
LOG.info("Sending Deletion of L7Policy %s to handler",
|
||||
db_l7policy.id)
|
||||
self.handler.delete(db_l7policy)
|
||||
except Exception:
|
||||
@ -177,7 +175,7 @@ class L7PolicyController(base.BaseController):
|
||||
db_l7policy = self.repositories.l7policy.get(
|
||||
context.session, id=l7policy_id)
|
||||
if not db_l7policy:
|
||||
LOG.info(_LI("L7Policy %s not found."), l7policy_id)
|
||||
LOG.info("L7Policy %s not found.", l7policy_id)
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.L7Policy._name(), id=l7policy_id)
|
||||
return l7rule.L7RuleController(
|
||||
|
@ -27,8 +27,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.common import validate
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -65,8 +63,8 @@ class L7RuleController(base.BaseController):
|
||||
session, self.load_balancer_id,
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=[self.listener_id]):
|
||||
LOG.info(_LI("L7Rule cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state"))
|
||||
LOG.info("L7Rule cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state")
|
||||
lb_repo = self.repositories.load_balancer
|
||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
@ -109,7 +107,7 @@ class L7RuleController(base.BaseController):
|
||||
if ['id'] == de.columns:
|
||||
raise exceptions.IDAlreadyExists()
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of L7Rule %s to handler"),
|
||||
LOG.info("Sending Creation of L7Rule %s to handler",
|
||||
db_l7rule.id)
|
||||
self.handler.create(db_l7rule)
|
||||
except Exception:
|
||||
@ -138,7 +136,7 @@ class L7RuleController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of L7Rule %s to handler"), id)
|
||||
LOG.info("Sending Update of L7Rule %s to handler", id)
|
||||
self.handler.update(db_l7rule, l7rule)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
@ -157,7 +155,7 @@ class L7RuleController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of L7Rule %s to handler"),
|
||||
LOG.info("Sending Deletion of L7Rule %s to handler",
|
||||
db_l7rule.id)
|
||||
self.handler.delete(db_l7rule)
|
||||
except Exception:
|
||||
|
@ -31,8 +31,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -55,7 +53,7 @@ class ListenersController(base.BaseController):
|
||||
db_listener = self.repositories.listener.get(
|
||||
session, load_balancer_id=self.load_balancer_id, id=id)
|
||||
if not db_listener:
|
||||
LOG.info(_LI("Listener %s not found."), id)
|
||||
LOG.info("Listener %s not found.", id)
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.Listener._name(), id=id)
|
||||
return db_listener
|
||||
@ -85,7 +83,7 @@ class ListenersController(base.BaseController):
|
||||
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
|
||||
session, self.load_balancer_id, constants.PENDING_UPDATE,
|
||||
listener_status, listener_ids=[id]):
|
||||
LOG.info(_LI("Load Balancer %s is immutable."),
|
||||
LOG.info("Load Balancer %s is immutable.",
|
||||
self.load_balancer_id)
|
||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
@ -94,7 +92,7 @@ class ListenersController(base.BaseController):
|
||||
if not lb_repo.test_and_set_provisioning_status(
|
||||
session, self.load_balancer_id, constants.PENDING_UPDATE):
|
||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||
LOG.info(_LI("Load Balancer %s is immutable."), db_lb.id)
|
||||
LOG.info("Load Balancer %s is immutable.", db_lb.id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
id=self.load_balancer_id)
|
||||
|
||||
@ -144,7 +142,7 @@ class ListenersController(base.BaseController):
|
||||
|
||||
def _send_listener_to_handler(self, session, db_listener):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Listener %s to handler"),
|
||||
LOG.info("Sending Creation of Listener %s to handler",
|
||||
db_listener.id)
|
||||
self.handler.create(db_listener)
|
||||
except Exception:
|
||||
@ -210,7 +208,7 @@ class ListenersController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session, id=id)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of Listener %s to handler"), id)
|
||||
LOG.info("Sending Update of Listener %s to handler", id)
|
||||
self.handler.update(db_listener, listener)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
@ -229,7 +227,7 @@ class ListenersController(base.BaseController):
|
||||
context.session, id=id, listener_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of Listener %s to handler"),
|
||||
LOG.info("Sending Deletion of Listener %s to handler",
|
||||
db_listener.id)
|
||||
self.handler.delete(db_listener)
|
||||
except Exception:
|
||||
@ -258,7 +256,7 @@ class ListenersController(base.BaseController):
|
||||
db_listener = self.repositories.listener.get(
|
||||
context.session, id=listener_id)
|
||||
if not db_listener:
|
||||
LOG.info(_LI("Listener %s not found."), listener_id)
|
||||
LOG.info("Listener %s not found.", listener_id)
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.Listener._name(), id=listener_id)
|
||||
if controller == 'pools':
|
||||
|
@ -33,7 +33,7 @@ from octavia.common import utils
|
||||
import octavia.common.validate as validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _, _LI
|
||||
from octavia.i18n import _
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -72,7 +72,7 @@ class LoadBalancersController(base.BaseController):
|
||||
lb_repo = self.repositories.load_balancer
|
||||
if not lb_repo.test_and_set_provisioning_status(
|
||||
session, id, lb_status):
|
||||
LOG.info(_LI("Load Balancer %s is immutable."), id)
|
||||
LOG.info("Load Balancer %s is immutable.", id)
|
||||
db_lb = lb_repo.get(session, id=id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
id=id)
|
||||
@ -90,8 +90,8 @@ class LoadBalancersController(base.BaseController):
|
||||
|
||||
def _load_balancer_graph_to_handler(self, context, db_lb):
|
||||
try:
|
||||
LOG.info(_LI("Sending full load balancer configuration %s to "
|
||||
"the handler"), db_lb.id)
|
||||
LOG.info("Sending full load balancer configuration %s to "
|
||||
"the handler", db_lb.id)
|
||||
self.handler.create(db_lb)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
@ -197,7 +197,7 @@ class LoadBalancersController(base.BaseController):
|
||||
|
||||
# Handler will be responsible for sending to controller
|
||||
try:
|
||||
LOG.info(_LI("Sending created Load Balancer %s to the handler"),
|
||||
LOG.info("Sending created Load Balancer %s to the handler",
|
||||
db_lb.id)
|
||||
self.handler.create(db_lb)
|
||||
except Exception:
|
||||
@ -217,7 +217,7 @@ class LoadBalancersController(base.BaseController):
|
||||
self._test_lb_status(context.session, id)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending updated Load Balancer %s to the handler"),
|
||||
LOG.info("Sending updated Load Balancer %s to the handler",
|
||||
id)
|
||||
self.handler.update(db_lb, load_balancer)
|
||||
except Exception:
|
||||
@ -239,7 +239,7 @@ class LoadBalancersController(base.BaseController):
|
||||
raise exceptions.ValidationException(detail=msg)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending deleted Load Balancer %s to the handler"),
|
||||
LOG.info("Sending deleted Load Balancer %s to the handler",
|
||||
db_lb.id)
|
||||
self.handler.delete(db_lb, cascade)
|
||||
except Exception:
|
||||
@ -270,7 +270,7 @@ class LoadBalancersController(base.BaseController):
|
||||
db_lb = self.repositories.load_balancer.get(context.session,
|
||||
id=lb_id)
|
||||
if not db_lb:
|
||||
LOG.info(_LI("Load Balancer %s was not found."), lb_id)
|
||||
LOG.info("Load Balancer %s was not found.", lb_id)
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.LoadBalancer._name(), id=lb_id)
|
||||
if controller == 'listeners':
|
||||
@ -287,11 +287,11 @@ class LoadBalancersController(base.BaseController):
|
||||
|
||||
|
||||
class LBCascadeDeleteController(LoadBalancersController):
|
||||
def __init__(self, lb_id):
|
||||
super(LBCascadeDeleteController, self).__init__()
|
||||
self.lb_id = lb_id
|
||||
def __init__(self, lb_id):
|
||||
super(LBCascadeDeleteController, self).__init__()
|
||||
self.lb_id = lb_id
|
||||
|
||||
@wsme_pecan.wsexpose(None, status_code=202)
|
||||
def delete(self):
|
||||
"""Deletes a load balancer."""
|
||||
return self._delete(self.lb_id, cascade=True)
|
||||
@wsme_pecan.wsexpose(None, status_code=202)
|
||||
def delete(self):
|
||||
"""Deletes a load balancer."""
|
||||
return self._delete(self.lb_id, cascade=True)
|
||||
|
@ -29,8 +29,6 @@ from octavia.common import exceptions
|
||||
import octavia.common.validate as validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -82,8 +80,8 @@ class MembersController(base.BaseController):
|
||||
session, self.load_balancer_id,
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=self._get_affected_listener_ids(session, member)):
|
||||
LOG.info(_LI("Member cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state"))
|
||||
LOG.info("Member cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state")
|
||||
lb_repo = self.repositories.load_balancer
|
||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
@ -133,7 +131,7 @@ class MembersController(base.BaseController):
|
||||
lock_session.rollback()
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Member %s to handler"),
|
||||
LOG.info("Sending Creation of Member %s to handler",
|
||||
db_member.id)
|
||||
self.handler.create(db_member)
|
||||
except Exception:
|
||||
@ -156,7 +154,7 @@ class MembersController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session, member=db_member)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of Member %s to handler"), id)
|
||||
LOG.info("Sending Update of Member %s to handler", id)
|
||||
self.handler.update(db_member, member)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
@ -176,7 +174,7 @@ class MembersController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session, member=db_member)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of Member %s to handler"),
|
||||
LOG.info("Sending Deletion of Member %s to handler",
|
||||
db_member.id)
|
||||
self.handler.delete(db_member)
|
||||
except Exception:
|
||||
|
@ -30,8 +30,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -82,8 +80,8 @@ class PoolsController(base.BaseController):
|
||||
session, self.load_balancer_id,
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=self._get_affected_listener_ids(session, pool)):
|
||||
LOG.info(_LI("Pool cannot be created or modified because the Load "
|
||||
"Balancer is in an immutable state"))
|
||||
LOG.info("Pool cannot be created or modified because the Load "
|
||||
"Balancer is in an immutable state")
|
||||
lb_repo = self.repositories.load_balancer
|
||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
@ -109,8 +107,7 @@ class PoolsController(base.BaseController):
|
||||
|
||||
def _send_pool_to_handler(self, session, db_pool):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Pool %s to handler"),
|
||||
db_pool.id)
|
||||
LOG.info("Sending Creation of Pool %s to handler", db_pool.id)
|
||||
self.handler.create(db_pool)
|
||||
except Exception:
|
||||
for listener_id in self._get_affected_listener_ids(session):
|
||||
@ -180,7 +177,7 @@ class PoolsController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session, pool=db_pool)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of Pool %s to handler"), id)
|
||||
LOG.info("Sending Update of Pool %s to handler", id)
|
||||
self.handler.update(db_pool, pool)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
@ -205,8 +202,7 @@ class PoolsController(base.BaseController):
|
||||
self._test_lb_and_listener_statuses(context.session, pool=db_pool)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of Pool %s to handler"),
|
||||
db_pool.id)
|
||||
LOG.info("Sending Deletion of Pool %s to handler", db_pool.id)
|
||||
self.handler.delete(db_pool)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
@ -234,7 +230,7 @@ class PoolsController(base.BaseController):
|
||||
remainder = remainder[1:]
|
||||
db_pool = self.repositories.pool.get(context.session, id=pool_id)
|
||||
if not db_pool:
|
||||
LOG.info(_LI("Pool %s not found."), pool_id)
|
||||
LOG.info("Pool %s not found.", pool_id)
|
||||
raise exceptions.NotFound(resource=data_models.Pool._name(),
|
||||
id=pool_id)
|
||||
if controller == 'members':
|
||||
|
@ -21,7 +21,6 @@ from stevedore import driver as stevedore_driver
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import repositories
|
||||
from octavia.i18n import _LE
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -61,7 +60,7 @@ class BaseController(rest.RestController):
|
||||
"""Gets an object from the database and returns it."""
|
||||
db_obj = repo.get(session, id=id)
|
||||
if not db_obj:
|
||||
LOG.exception(_LE("{name} {id} not found").format(
|
||||
LOG.exception("{name} {id} not found".format(
|
||||
name=data_model._name(), id=id))
|
||||
raise exceptions.NotFound(
|
||||
resource=data_model._name(), id=id)
|
||||
|
@ -29,7 +29,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -47,7 +46,7 @@ class HealthMonitorController(base.BaseController):
|
||||
db_hm = self.repositories.health_monitor.get(
|
||||
session, id=hm_id)
|
||||
if not db_hm:
|
||||
LOG.info(_LI("Health Monitor %s was not found"), hm_id)
|
||||
LOG.info("Health Monitor %s was not found", hm_id)
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.HealthMonitor._name(),
|
||||
id=hm_id)
|
||||
@ -100,8 +99,8 @@ class HealthMonitorController(base.BaseController):
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=self._get_affected_listener_ids(session, hm),
|
||||
pool_id=hm.pool_id):
|
||||
LOG.info(_LI("Health Monitor cannot be created or modified "
|
||||
"because the Load Balancer is in an immutable state"))
|
||||
LOG.info("Health Monitor cannot be created or modified because "
|
||||
"the Load Balancer is in an immutable state")
|
||||
raise exceptions.ImmutableObject(resource='Load Balancer',
|
||||
id=load_balancer_id)
|
||||
|
||||
@ -136,7 +135,7 @@ class HealthMonitorController(base.BaseController):
|
||||
|
||||
def _send_hm_to_handler(self, session, db_hm):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Health Monitor %s to handler"),
|
||||
LOG.info("Sending Creation of Health Monitor %s to handler",
|
||||
db_hm.id)
|
||||
self.handler.create(db_hm)
|
||||
except Exception:
|
||||
@ -191,8 +190,8 @@ class HealthMonitorController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_UPDATE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of Health Monitor for Pool %s to "
|
||||
"handler"), id)
|
||||
LOG.info("Sending Update of Health Monitor for Pool %s to "
|
||||
"handler", id)
|
||||
self.handler.update(db_hm, health_monitor)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -220,8 +219,8 @@ class HealthMonitorController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of Health Monitor for Pool %s to "
|
||||
"handler"), id)
|
||||
LOG.info("Sending Deletion of Health Monitor for Pool %s to "
|
||||
"handler", id)
|
||||
self.handler.delete(db_hm)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
|
@ -30,7 +30,6 @@ from octavia.common import exceptions
|
||||
from octavia.common import validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -76,8 +75,8 @@ class L7PolicyController(base.BaseController):
|
||||
session, lb_id,
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=listener_ids):
|
||||
LOG.info(_LI("L7Policy cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state"))
|
||||
LOG.info("L7Policy cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state")
|
||||
raise exceptions.ImmutableObject(resource='Load Balancer',
|
||||
id=lb_id)
|
||||
|
||||
@ -112,7 +111,7 @@ class L7PolicyController(base.BaseController):
|
||||
|
||||
def _send_l7policy_to_handler(self, session, db_l7policy, lb_id):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of L7Policy %s to handler"),
|
||||
LOG.info("Sending Creation of L7Policy %s to handler",
|
||||
db_l7policy.id)
|
||||
self.handler.create(db_l7policy)
|
||||
except Exception:
|
||||
@ -203,7 +202,7 @@ class L7PolicyController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_UPDATE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of L7Policy %s to handler"), id)
|
||||
LOG.info("Sending Update of L7Policy %s to handler", id)
|
||||
self.handler.update(
|
||||
db_l7policy, sanitized_l7policy)
|
||||
except Exception:
|
||||
@ -236,7 +235,7 @@ class L7PolicyController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of L7Policy %s to handler"),
|
||||
LOG.info("Sending Deletion of L7Policy %s to handler",
|
||||
db_l7policy.id)
|
||||
self.handler.delete(db_l7policy)
|
||||
except Exception:
|
||||
@ -267,7 +266,7 @@ class L7PolicyController(base.BaseController):
|
||||
db_l7policy = self.repositories.l7policy.get(
|
||||
context.session, id=l7policy_id)
|
||||
if not db_l7policy:
|
||||
LOG.info(_LI("L7Policy %s not found."), l7policy_id)
|
||||
LOG.info("L7Policy %s not found.", l7policy_id)
|
||||
raise exceptions.NotFound(
|
||||
resource='L7Policy', id=l7policy_id)
|
||||
return l7rule.L7RuleController(
|
||||
|
@ -28,7 +28,6 @@ from octavia.common import exceptions
|
||||
from octavia.common import validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -69,8 +68,8 @@ class L7RuleController(base.BaseController):
|
||||
session, load_balancer_id,
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=[listener_id], l7policy_id=self.l7policy_id):
|
||||
LOG.info(_LI("L7Rule cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state"))
|
||||
LOG.info("L7Rule cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state")
|
||||
raise exceptions.ImmutableObject(resource='Load Balancer',
|
||||
id=load_balancer_id)
|
||||
|
||||
@ -111,8 +110,7 @@ class L7RuleController(base.BaseController):
|
||||
|
||||
def _send_l7rule_to_handler(self, session, db_l7rule):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of L7Rule %s to handler"),
|
||||
db_l7rule.id)
|
||||
LOG.info("Sending Creation of L7Rule %s to handler", db_l7rule.id)
|
||||
self.handler.create(db_l7rule)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -185,7 +183,7 @@ class L7RuleController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_UPDATE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of L7Rule %s to handler"), id)
|
||||
LOG.info("Sending Update of L7Rule %s to handler", id)
|
||||
self.handler.update(db_l7rule, l7rule)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -212,8 +210,7 @@ class L7RuleController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of L7Rule %s to handler"),
|
||||
db_l7rule.id)
|
||||
LOG.info("Sending Deletion of L7Rule %s to handler", db_l7rule.id)
|
||||
self.handler.delete(db_l7rule)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
|
@ -29,7 +29,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -50,7 +49,7 @@ class ListenersController(base.BaseController):
|
||||
db_listener = self.repositories.listener.get(
|
||||
session, load_balancer_id=load_balancer_id, id=id)
|
||||
if not db_listener:
|
||||
LOG.info(_LI("Listener %s not found."), id)
|
||||
LOG.info("Listener %s not found.", id)
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.Listener._name(), id=id)
|
||||
return db_listener
|
||||
@ -91,8 +90,7 @@ class ListenersController(base.BaseController):
|
||||
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
|
||||
session, lb_id, constants.PENDING_UPDATE,
|
||||
listener_status, listener_ids=[id]):
|
||||
LOG.info(_LI("Load Balancer %s is immutable."),
|
||||
lb_id)
|
||||
LOG.info("Load Balancer %s is immutable.", lb_id)
|
||||
db_lb = lb_repo.get(session, id=lb_id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
id=lb_id)
|
||||
@ -100,7 +98,7 @@ class ListenersController(base.BaseController):
|
||||
if not lb_repo.test_and_set_provisioning_status(
|
||||
session, lb_id, constants.PENDING_UPDATE):
|
||||
db_lb = lb_repo.get(session, id=lb_id)
|
||||
LOG.info(_LI("Load Balancer %s is immutable."), db_lb.id)
|
||||
LOG.info("Load Balancer %s is immutable.", db_lb.id)
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
id=lb_id)
|
||||
|
||||
@ -168,7 +166,7 @@ class ListenersController(base.BaseController):
|
||||
|
||||
def _send_listener_to_handler(self, session, db_listener):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Listener %s to handler"),
|
||||
LOG.info("Sending Creation of Listener %s to handler",
|
||||
db_listener.id)
|
||||
self.handler.create(db_listener)
|
||||
except Exception:
|
||||
@ -230,7 +228,7 @@ class ListenersController(base.BaseController):
|
||||
id=id)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of Listener %s to handler"), id)
|
||||
LOG.info("Sending Update of Listener %s to handler", id)
|
||||
self.handler.update(db_listener, listener)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -257,7 +255,7 @@ class ListenersController(base.BaseController):
|
||||
id=id, listener_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of Listener %s to handler"),
|
||||
LOG.info("Sending Deletion of Listener %s to handler",
|
||||
db_listener.id)
|
||||
self.handler.delete(db_listener)
|
||||
except Exception:
|
||||
|
@ -30,7 +30,7 @@ from octavia.common import utils
|
||||
import octavia.common.validate as validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _, _LI
|
||||
from octavia.i18n import _
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -79,9 +79,8 @@ class LoadBalancersController(base.BaseController):
|
||||
if not lb_repo.test_and_set_provisioning_status(
|
||||
session, id, lb_status):
|
||||
prov_status = lb_repo.get(session, id=id).provisioning_status
|
||||
LOG.info(_LI(
|
||||
"Invalid state %(state)s of loadbalancer resource %(id)s"),
|
||||
{"state": prov_status, "id": id})
|
||||
LOG.info("Invalid state %(state)s of loadbalancer resource %(id)s",
|
||||
{"state": prov_status, "id": id})
|
||||
raise exceptions.LBPendingStateError(
|
||||
state=prov_status, id=id)
|
||||
|
||||
@ -174,7 +173,7 @@ class LoadBalancersController(base.BaseController):
|
||||
|
||||
# Handler will be responsible for sending to controller
|
||||
try:
|
||||
LOG.info(_LI("Sending created Load Balancer %s to the handler"),
|
||||
LOG.info("Sending created Load Balancer %s to the handler",
|
||||
db_lb.id)
|
||||
self.handler.create(db_lb)
|
||||
except Exception:
|
||||
@ -195,8 +194,7 @@ class LoadBalancersController(base.BaseController):
|
||||
db_lb = self._get_db_lb(context.session, id)
|
||||
self._test_lb_status(context.session, id)
|
||||
try:
|
||||
LOG.info(_LI("Sending updated Load Balancer %s to the handler"),
|
||||
id)
|
||||
LOG.info("Sending updated Load Balancer %s to the handler", id)
|
||||
self.handler.update(db_lb, load_balancer)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
@ -214,7 +212,7 @@ class LoadBalancersController(base.BaseController):
|
||||
lb_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending deleted Load Balancer %s to the handler"),
|
||||
LOG.info("Sending deleted Load Balancer %s to the handler",
|
||||
db_lb.id)
|
||||
self.handler.delete(db_lb, cascade)
|
||||
except Exception:
|
||||
|
@ -29,7 +29,6 @@ from octavia.common import exceptions
|
||||
import octavia.common.validate as validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -81,8 +80,8 @@ class MembersController(base.BaseController):
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=self._get_affected_listener_ids(session, member),
|
||||
pool_id=self.pool_id):
|
||||
LOG.info(_LI("Member cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state"))
|
||||
LOG.info("Member cannot be created or modified because the "
|
||||
"Load Balancer is in an immutable state")
|
||||
raise exceptions.ImmutableObject(resource='Load Balancer',
|
||||
id=load_balancer_id)
|
||||
|
||||
@ -123,8 +122,7 @@ class MembersController(base.BaseController):
|
||||
|
||||
def _send_member_to_handler(self, session, db_member):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Pool %s to handler"),
|
||||
db_member.id)
|
||||
LOG.info("Sending Creation of Pool %s to handler", db_member.id)
|
||||
self.handler.create(db_member)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -191,7 +189,7 @@ class MembersController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_UPDATE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of Member %s to handler"), id)
|
||||
LOG.info("Sending Update of Member %s to handler", id)
|
||||
self.handler.update(db_member, member)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -219,8 +217,7 @@ class MembersController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of Member %s to handler"),
|
||||
db_member.id)
|
||||
LOG.info("Sending Deletion of Member %s to handler", db_member.id)
|
||||
self.handler.delete(db_member)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
|
@ -31,7 +31,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -82,8 +81,8 @@ class PoolsController(base.BaseController):
|
||||
session, lb_id,
|
||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||
listener_ids=listener_ids):
|
||||
LOG.info(_LI("Pool cannot be created or modified because the Load "
|
||||
"Balancer is in an immutable state"))
|
||||
LOG.info("Pool cannot be created or modified because the Load "
|
||||
"Balancer is in an immutable state")
|
||||
raise exceptions.ImmutableObject(resource=_('Load Balancer'),
|
||||
id=lb_id)
|
||||
|
||||
@ -119,8 +118,7 @@ class PoolsController(base.BaseController):
|
||||
|
||||
def _send_pool_to_handler(self, session, db_pool, listener_id):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Pool %s to handler"),
|
||||
db_pool.id)
|
||||
LOG.info("Sending Creation of Pool %s to handler", db_pool.id)
|
||||
self.handler.create(db_pool)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -212,7 +210,7 @@ class PoolsController(base.BaseController):
|
||||
context.session, db_pool.id,
|
||||
provisioning_status=constants.PENDING_UPDATE)
|
||||
try:
|
||||
LOG.info(_LI("Sending Update of Pool %s to handler"), id)
|
||||
LOG.info("Sending Update of Pool %s to handler", id)
|
||||
self.handler.update(db_pool, pool)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -244,8 +242,7 @@ class PoolsController(base.BaseController):
|
||||
provisioning_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Deletion of Pool %s to handler"),
|
||||
db_pool.id)
|
||||
LOG.info("Sending Deletion of Pool %s to handler", db_pool.id)
|
||||
self.handler.delete(db_pool)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
@ -275,7 +272,7 @@ class PoolsController(base.BaseController):
|
||||
remainder = remainder[1:]
|
||||
db_pool = self.repositories.pool.get(context.session, id=pool_id)
|
||||
if not db_pool:
|
||||
LOG.info(_LI("Pool %s not found."), pool_id)
|
||||
LOG.info("Pool %s not found.", pool_id)
|
||||
raise exceptions.NotFound(resource=data_models.Pool._name(),
|
||||
id=pool_id)
|
||||
if controller == 'members':
|
||||
|
@ -23,8 +23,6 @@ from oslo_utils import excutils
|
||||
|
||||
from octavia.certificates.common import barbican as barbican_common
|
||||
from octavia.common import keystone
|
||||
from octavia.i18n import _LE
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
@ -45,5 +43,5 @@ class BarbicanACLAuth(barbican_common.BarbicanAuth):
|
||||
)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Error creating Barbican client"))
|
||||
LOG.exception("Error creating Barbican client")
|
||||
return cls._barbican_client
|
||||
|
@ -25,7 +25,7 @@ from oslo_utils import encodeutils
|
||||
|
||||
from octavia.certificates.common import cert
|
||||
from octavia.common.tls_utils import cert_parser
|
||||
from octavia.i18n import _LE
|
||||
from octavia.i18n import _
|
||||
|
||||
|
||||
class BarbicanCert(cert.Cert):
|
||||
@ -33,9 +33,8 @@ class BarbicanCert(cert.Cert):
|
||||
def __init__(self, cert_container):
|
||||
if not isinstance(cert_container,
|
||||
barbican_client.containers.CertificateContainer):
|
||||
raise TypeError(_LE(
|
||||
"Retrieved Barbican Container is not of the correct type "
|
||||
"(certificate)."))
|
||||
raise TypeError(_("Retrieved Barbican Container is not of the "
|
||||
"correct type (certificate)."))
|
||||
self._cert_container = cert_container
|
||||
|
||||
def get_certificate(self):
|
||||
|
@ -19,8 +19,6 @@ import requests
|
||||
|
||||
from octavia.certificates.generator import local
|
||||
from octavia.common import exceptions
|
||||
from octavia.i18n import _LE
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -57,11 +55,11 @@ class AnchorCertGenerator(local.LocalCertGenerator):
|
||||
|
||||
if r.status_code != 200:
|
||||
LOG.debug('Anchor returned: %s', r.content)
|
||||
raise AnchorException("Anchor returned Status Code : " +
|
||||
str(r.status_code))
|
||||
raise AnchorException(_("Anchor returned Status Code : "
|
||||
"{0}").format(str(r.status_code)))
|
||||
|
||||
return r.content
|
||||
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Unable to sign certificate."))
|
||||
LOG.error("Unable to sign certificate.")
|
||||
raise exceptions.CertificateGenerationException(msg=e)
|
||||
|
@ -28,7 +28,6 @@ import six
|
||||
from octavia.certificates.common import local as local_common
|
||||
from octavia.certificates.generator import cert_gen
|
||||
from octavia.common import exceptions
|
||||
from octavia.i18n import _LE, _LI
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -45,7 +44,7 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
||||
@classmethod
|
||||
def _validate_cert(cls, ca_cert, ca_key, ca_key_pass):
|
||||
if not ca_cert:
|
||||
LOG.info(_LI("Using CA Certificate from config."))
|
||||
LOG.info("Using CA Certificate from config.")
|
||||
try:
|
||||
ca_cert = open(CONF.certificates.ca_certificate, 'rb').read()
|
||||
except IOError:
|
||||
@ -54,7 +53,7 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
||||
.format(CONF.certificates.ca_certificate)
|
||||
)
|
||||
if not ca_key:
|
||||
LOG.info(_LI("Using CA Private Key from config."))
|
||||
LOG.info("Using CA Private Key from config.")
|
||||
try:
|
||||
ca_key = open(CONF.certificates.ca_private_key, 'rb').read()
|
||||
except IOError:
|
||||
@ -65,13 +64,10 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
||||
if not ca_key_pass:
|
||||
ca_key_pass = CONF.certificates.ca_private_key_passphrase
|
||||
if ca_key_pass:
|
||||
LOG.info(_LI(
|
||||
"Using CA Private Key Passphrase from config."
|
||||
))
|
||||
LOG.info("Using CA Private Key Passphrase from config.")
|
||||
else:
|
||||
LOG.info(_LI(
|
||||
"No Passphrase found for CA Private Key, not using one."
|
||||
))
|
||||
LOG.info("No Passphrase found for CA Private Key, not using "
|
||||
"one.")
|
||||
|
||||
@classmethod
|
||||
def sign_cert(cls, csr, validity, ca_cert=None, ca_key=None,
|
||||
@ -91,9 +87,7 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
||||
:return: Signed certificate
|
||||
:raises Exception: if certificate signing fails
|
||||
"""
|
||||
LOG.info(_LI(
|
||||
"Signing a certificate request using OpenSSL locally."
|
||||
))
|
||||
LOG.info("Signing a certificate request using OpenSSL locally.")
|
||||
cls._validate_cert(ca_cert, ca_key, ca_key_pass)
|
||||
if not ca_digest:
|
||||
ca_digest = CONF.certificates.signing_digest
|
||||
@ -169,7 +163,7 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
||||
return signed_cert.public_bytes(
|
||||
encoding=serialization.Encoding.PEM)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Unable to sign certificate."))
|
||||
LOG.error("Unable to sign certificate.")
|
||||
raise exceptions.CertificateGenerationException(msg=e)
|
||||
|
||||
@classmethod
|
||||
|
@ -23,8 +23,6 @@ from stevedore import driver as stevedore_driver
|
||||
|
||||
from octavia.certificates.common import barbican as barbican_common
|
||||
from octavia.certificates.manager import cert_mgr
|
||||
from octavia.i18n import _LE, _LI, _LW
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -57,9 +55,8 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
||||
"""
|
||||
connection = self.auth.get_barbican_client(project_id)
|
||||
|
||||
LOG.info(_LI(
|
||||
"Storing certificate container '{0}' in Barbican."
|
||||
).format(name))
|
||||
LOG.info("Storing certificate container '{0}' in "
|
||||
"Barbican.".format(name))
|
||||
|
||||
certificate_secret = None
|
||||
private_key_secret = None
|
||||
@ -106,18 +103,14 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
||||
old_ref = i.secret_ref
|
||||
try:
|
||||
i.delete()
|
||||
LOG.info(_LI(
|
||||
"Deleted secret {0} ({1}) during rollback."
|
||||
).format(i.name, old_ref))
|
||||
LOG.info("Deleted secret {0} ({1}) during "
|
||||
"rollback.".format(i.name, old_ref))
|
||||
except Exception:
|
||||
LOG.warning(_LW(
|
||||
"Failed to delete {0} ({1}) during rollback. This "
|
||||
"might not be a problem."
|
||||
).format(i.name, old_ref))
|
||||
LOG.warning("Failed to delete {0} ({1}) during "
|
||||
"rollback. This might not be a "
|
||||
"problem.".format(i.name, old_ref))
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE(
|
||||
"Error storing certificate data: {0}"
|
||||
).format(str(e)))
|
||||
LOG.error("Error storing certificate data: {0}".format(str(e)))
|
||||
|
||||
def get_cert(self, project_id, cert_ref, resource_ref=None,
|
||||
check_only=False, service_name='Octavia'):
|
||||
@ -134,9 +127,8 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
||||
"""
|
||||
connection = self.auth.get_barbican_client(project_id)
|
||||
|
||||
LOG.info(_LI(
|
||||
"Loading certificate container {0} from Barbican."
|
||||
).format(cert_ref))
|
||||
LOG.info("Loading certificate container {0} from "
|
||||
"Barbican.".format(cert_ref))
|
||||
try:
|
||||
if check_only:
|
||||
cert_container = connection.containers.get(
|
||||
@ -151,9 +143,7 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
||||
return barbican_common.BarbicanCert(cert_container)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE(
|
||||
"Error getting {0}: {1}"
|
||||
).format(cert_ref, str(e)))
|
||||
LOG.error("Error getting {0}: {1}".format(cert_ref, str(e)))
|
||||
|
||||
def delete_cert(self, project_id, cert_ref, resource_ref=None,
|
||||
service_name='Octavia'):
|
||||
@ -167,9 +157,8 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
||||
"""
|
||||
connection = self.auth.get_barbican_client(project_id)
|
||||
|
||||
LOG.info(_LI(
|
||||
"Deregistering as a consumer of {0} in Barbican."
|
||||
).format(cert_ref))
|
||||
LOG.info("Deregistering as a consumer of {0} in "
|
||||
"Barbican.".format(cert_ref))
|
||||
try:
|
||||
connection.containers.remove_consumer(
|
||||
container_ref=cert_ref,
|
||||
@ -178,6 +167,5 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
||||
)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE(
|
||||
"Error deregistering as a consumer of {0}: {1}"
|
||||
).format(cert_ref, str(e)))
|
||||
LOG.error("Error deregistering as a consumer of {0}: "
|
||||
"{1}".format(cert_ref, str(e)))
|
||||
|
@ -20,7 +20,6 @@ from oslo_log import log as logging
|
||||
from oslo_reports import guru_meditation_report as gmr
|
||||
|
||||
from octavia.api import app as api_app
|
||||
from octavia.i18n import _LI
|
||||
from octavia import version
|
||||
|
||||
|
||||
@ -33,7 +32,7 @@ def main():
|
||||
app = api_app.setup_app(argv=sys.argv)
|
||||
|
||||
host, port = cfg.CONF.bind_host, cfg.CONF.bind_port
|
||||
LOG.info(_LI("Starting API server on %(host)s:%(port)s"),
|
||||
LOG.info("Starting API server on %(host)s:%(port)s",
|
||||
{"host": host, "port": port})
|
||||
srv = simple_server.make_server(host, port, app)
|
||||
|
||||
|
@ -23,7 +23,6 @@ from octavia.amphorae.drivers.health import heartbeat_udp
|
||||
from octavia.common import service
|
||||
from octavia.controller.healthmanager import health_manager
|
||||
from octavia.controller.healthmanager import update_db
|
||||
from octavia.i18n import _LI
|
||||
from octavia import version
|
||||
|
||||
|
||||
@ -59,15 +58,15 @@ def main():
|
||||
hm_health_check_proc = multiprocessing.Process(name='HM_health_check',
|
||||
target=hm_health_check)
|
||||
processes.append(hm_health_check_proc)
|
||||
LOG.info(_LI("Health Manager listener process starts:"))
|
||||
LOG.info("Health Manager listener process starts:")
|
||||
hm_listener_proc.start()
|
||||
LOG.info(_LI("Health manager check process starts:"))
|
||||
LOG.info("Health manager check process starts:")
|
||||
hm_health_check_proc.start()
|
||||
|
||||
try:
|
||||
for process in processes:
|
||||
process.join()
|
||||
except KeyboardInterrupt:
|
||||
LOG.info(_LI("Health Manager existing due to signal"))
|
||||
LOG.info("Health Manager existing due to signal")
|
||||
hm_listener_proc.terminate()
|
||||
hm_health_check_proc.terminate()
|
||||
|
@ -24,10 +24,8 @@ from oslo_reports import guru_meditation_report as gmr
|
||||
|
||||
from octavia.common import service
|
||||
from octavia.controller.housekeeping import house_keeping
|
||||
from octavia.i18n import _LI
|
||||
from octavia import version
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -41,7 +39,7 @@ def spare_amphora_check():
|
||||
|
||||
# Read the interval from CONF
|
||||
interval = CONF.house_keeping.spare_check_interval
|
||||
LOG.info(_LI("Spare check interval is set to %d sec"), interval)
|
||||
LOG.info("Spare check interval is set to %d sec", interval)
|
||||
|
||||
spare_amp = house_keeping.SpareAmphora()
|
||||
while not spare_amp_thread_event.is_set():
|
||||
@ -54,10 +52,10 @@ def db_cleanup():
|
||||
"""Perform db cleanup for old resources."""
|
||||
# Read the interval from CONF
|
||||
interval = CONF.house_keeping.cleanup_interval
|
||||
LOG.info(_LI("DB cleanup interval is set to %d sec"), interval)
|
||||
LOG.info(_LI('Amphora expiry age is %s seconds'),
|
||||
LOG.info("DB cleanup interval is set to %d sec", interval)
|
||||
LOG.info('Amphora expiry age is %s seconds',
|
||||
CONF.house_keeping.amphora_expiry_age)
|
||||
LOG.info(_LI('Load balancer expiry age is %s seconds'),
|
||||
LOG.info('Load balancer expiry age is %s seconds',
|
||||
CONF.house_keeping.load_balancer_expiry_age)
|
||||
|
||||
db_cleanup = house_keeping.DatabaseCleanup()
|
||||
@ -72,7 +70,7 @@ def cert_rotation():
|
||||
"""Perform certificate rotation."""
|
||||
interval = CONF.house_keeping.cert_interval
|
||||
LOG.info(
|
||||
_LI("Expiring certificate check interval is set to %d sec"), interval)
|
||||
"Expiring certificate check interval is set to %d sec", interval)
|
||||
cert_rotate = house_keeping.CertRotation()
|
||||
while not cert_rotate_thread_event.is_set():
|
||||
LOG.debug("Initiating certification rotation ...")
|
||||
@ -86,7 +84,7 @@ def main():
|
||||
gmr.TextGuruMeditation.setup_autorun(version)
|
||||
|
||||
timestamp = str(datetime.datetime.utcnow())
|
||||
LOG.info(_LI("Starting house keeping at %s"), timestamp)
|
||||
LOG.info("Starting house keeping at %s", timestamp)
|
||||
|
||||
# Thread to perform spare amphora check
|
||||
spare_amp_thread = threading.Thread(target=spare_amphora_check)
|
||||
@ -108,11 +106,11 @@ def main():
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
LOG.info(_LI("Attempting to gracefully terminate House-Keeping"))
|
||||
LOG.info("Attempting to gracefully terminate House-Keeping")
|
||||
spare_amp_thread_event.set()
|
||||
db_cleanup_thread_event.set()
|
||||
cert_rotate_thread_event.set()
|
||||
spare_amp_thread.join()
|
||||
db_cleanup_thread.join()
|
||||
cert_rotate_thread.join()
|
||||
LOG.info(_LI("House-Keeping process terminated"))
|
||||
LOG.info("House-Keeping process terminated")
|
||||
|
@ -19,7 +19,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from octavia.common import keystone
|
||||
from octavia.i18n import _LE
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
@ -64,7 +63,7 @@ class NovaAuth(object):
|
||||
version=api_versions.APIVersion(NOVA_VERSION), **kwargs)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Error creating Nova client."))
|
||||
LOG.exception("Error creating Nova client.")
|
||||
return cls.nova_client
|
||||
|
||||
|
||||
@ -103,7 +102,7 @@ class NeutronAuth(object):
|
||||
NEUTRON_VERSION, **kwargs)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Error creating Neutron client."))
|
||||
LOG.exception("Error creating Neutron client.")
|
||||
return cls.neutron_client
|
||||
|
||||
|
||||
@ -142,5 +141,5 @@ class GlanceAuth(object):
|
||||
GLANCE_VERSION, **kwargs)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Error creating Glance client."))
|
||||
LOG.exception("Error creating Glance client.")
|
||||
return cls.glance_client
|
||||
|
@ -25,7 +25,6 @@ import oslo_messaging as messaging
|
||||
|
||||
from octavia.common import constants
|
||||
from octavia.common import utils
|
||||
from octavia.i18n import _LI
|
||||
from octavia import version
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -522,4 +521,4 @@ def setup_logging(conf):
|
||||
"""
|
||||
product_name = "octavia"
|
||||
logging.setup(conf, product_name)
|
||||
LOG.info(_LI("Logging enabled!"))
|
||||
LOG.info("Logging enabled!")
|
||||
|
@ -22,8 +22,6 @@ import six
|
||||
from oslo_utils import excutils
|
||||
from webob import exc
|
||||
|
||||
from octavia.i18n import _LE
|
||||
|
||||
|
||||
class OctaviaException(Exception):
|
||||
"""Base Octavia Exception.
|
||||
@ -167,7 +165,7 @@ class ComputeBuildException(OctaviaException):
|
||||
|
||||
|
||||
class ComputeBuildQueueTimeoutException(OctaviaException):
|
||||
message = _LE('Failed to get an amphora build slot.')
|
||||
message = _('Failed to get an amphora build slot.')
|
||||
|
||||
|
||||
class ComputeDeleteException(OctaviaException):
|
||||
|
@ -19,7 +19,6 @@ from oslo_policy import policy as oslo_policy
|
||||
from oslo_utils import excutils
|
||||
|
||||
from octavia.common import exceptions
|
||||
from octavia.i18n import _LE
|
||||
from octavia import policies
|
||||
|
||||
|
||||
@ -93,7 +92,7 @@ class Policy(oslo_policy.Enforcer):
|
||||
action, target, credentials, do_raise=do_raise, exc=exc)
|
||||
except oslo_policy.PolicyNotRegistered:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Policy not registered'))
|
||||
LOG.exception('Policy not registered')
|
||||
except Exception:
|
||||
credentials.pop('auth_token', None)
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
@ -17,8 +17,6 @@ import logging
|
||||
from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.db import repositories as repo
|
||||
from octavia.i18n import _LW
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -36,9 +34,8 @@ class StatsMixin(object):
|
||||
db_ls = self.listener_stats_repo.get_all(
|
||||
session, listener_id=listener_id)
|
||||
if not db_ls:
|
||||
LOG.warning(
|
||||
_LW("Listener Statistics for Listener %s was not found"),
|
||||
listener_id)
|
||||
LOG.warning("Listener Statistics for Listener %s was not found",
|
||||
listener_id)
|
||||
|
||||
statistics = data_models.ListenerStatistics(listener_id=listener_id)
|
||||
|
||||
|
@ -26,8 +26,6 @@ import six
|
||||
|
||||
from octavia.common import data_models as data_models
|
||||
import octavia.common.exceptions as exceptions
|
||||
from octavia.i18n import _LE
|
||||
|
||||
|
||||
X509_BEG = b'-----BEGIN CERTIFICATE-----'
|
||||
X509_END = b'-----END CERTIFICATE-----'
|
||||
@ -81,7 +79,7 @@ def _read_private_key(private_key_pem, passphrase=None):
|
||||
return serialization.load_pem_private_key(private_key_pem, passphrase,
|
||||
backends.default_backend())
|
||||
except Exception:
|
||||
LOG.exception(_LE("Passphrase required."))
|
||||
LOG.exception("Passphrase required.")
|
||||
raise exceptions.NeedsPassphrase
|
||||
|
||||
|
||||
@ -162,7 +160,7 @@ def _parse_pkcs7_bundle(pkcs7):
|
||||
for cert in _get_certs_from_pkcs7_substrate(substrate):
|
||||
yield cert
|
||||
except Exception:
|
||||
LOG.exception(_LE('Unreadable Certificate.'))
|
||||
LOG.exception('Unreadable Certificate.')
|
||||
raise exceptions.UnreadableCert
|
||||
|
||||
# If no PEM encoding, assume this is DER encoded and try to decode
|
||||
@ -221,10 +219,10 @@ def _get_certs_from_pkcs7_substrate(substrate):
|
||||
asn1Spec=rfc2315.ContentInfo())
|
||||
contentType = contentInfo.getComponentByName('contentType')
|
||||
except Exception:
|
||||
LOG.exception(_LE('Unreadable Certificate.'))
|
||||
LOG.exception('Unreadable Certificate.')
|
||||
raise exceptions.UnreadableCert
|
||||
if contentType != rfc2315.signedData:
|
||||
LOG.exception(_LE('Unreadable Certificate.'))
|
||||
LOG.exception('Unreadable Certificate.')
|
||||
raise exceptions.UnreadableCert
|
||||
|
||||
try:
|
||||
@ -232,7 +230,7 @@ def _get_certs_from_pkcs7_substrate(substrate):
|
||||
contentInfo.getComponentByName('content'),
|
||||
asn1Spec=rfc2315.SignedData())
|
||||
except Exception:
|
||||
LOG.exception(_LE('Unreadable Certificate.'))
|
||||
LOG.exception('Unreadable Certificate.')
|
||||
raise exceptions.UnreadableCert
|
||||
|
||||
for cert in content.getComponentByName('certificates'):
|
||||
@ -269,7 +267,7 @@ def get_host_names(certificate):
|
||||
|
||||
return host_names
|
||||
except Exception:
|
||||
LOG.exception(_LE('Unreadable Certificate.'))
|
||||
LOG.exception('Unreadable Certificate.')
|
||||
raise exceptions.UnreadableCert
|
||||
|
||||
|
||||
@ -284,7 +282,7 @@ def get_cert_expiration(certificate_pem):
|
||||
backends.default_backend())
|
||||
return cert.not_valid_after
|
||||
except Exception:
|
||||
LOG.exception(_LE('Unreadable Certificate.'))
|
||||
LOG.exception('Unreadable Certificate.')
|
||||
raise exceptions.UnreadableCert
|
||||
|
||||
|
||||
@ -300,7 +298,7 @@ def _get_x509_from_pem_bytes(certificate_pem):
|
||||
x509cert = x509.load_pem_x509_certificate(certificate_pem,
|
||||
backends.default_backend())
|
||||
except Exception:
|
||||
LOG.exception(_LE('Unreadable Certificate.'))
|
||||
LOG.exception('Unreadable Certificate.')
|
||||
raise exceptions.UnreadableCert
|
||||
return x509cert
|
||||
|
||||
@ -315,7 +313,7 @@ def _get_x509_from_der_bytes(certificate_der):
|
||||
x509cert = x509.load_der_x509_certificate(certificate_der,
|
||||
backends.default_backend())
|
||||
except Exception:
|
||||
LOG.exception(_LE('Unreadable Certificate.'))
|
||||
LOG.exception('Unreadable Certificate.')
|
||||
raise exceptions.UnreadableCert
|
||||
return x509cert
|
||||
|
||||
|
@ -24,7 +24,6 @@ from octavia.common import constants
|
||||
from octavia.common import data_models as models
|
||||
from octavia.common import exceptions
|
||||
from octavia.compute import compute_base
|
||||
from octavia.i18n import _LE, _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -51,20 +50,17 @@ def _extract_amp_image_id_by_tag(client, image_tag, image_owner):
|
||||
image_id = images[0]['id']
|
||||
num_images = len(images)
|
||||
if num_images > 1:
|
||||
LOG.warning(
|
||||
_LW("A single Glance image should be tagged with %(tag)s tag, "
|
||||
"but at least two were found. Using %(image_id)s."),
|
||||
{'tag': image_tag, 'image_id': image_id}
|
||||
)
|
||||
LOG.warning("A single Glance image should be tagged with %(tag)s tag, "
|
||||
"but at least two were found. Using %(image_id)s.",
|
||||
{'tag': image_tag, 'image_id': image_id})
|
||||
return image_id
|
||||
|
||||
|
||||
def _get_image_uuid(client, image_id, image_tag, image_owner):
|
||||
if image_id:
|
||||
if image_tag:
|
||||
LOG.warning(
|
||||
_LW("Both amp_image_id and amp_image_tag options defined. "
|
||||
"Using the amp_image_id."))
|
||||
LOG.warning("Both amp_image_id and amp_image_tag options defined. "
|
||||
"Using the amp_image_id.")
|
||||
return image_id
|
||||
|
||||
return _extract_amp_image_id_by_tag(client, image_tag, image_owner)
|
||||
@ -157,7 +153,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
||||
|
||||
return amphora.id
|
||||
except Exception:
|
||||
LOG.exception(_LE("Error building nova virtual machine."))
|
||||
LOG.exception("Error building nova virtual machine.")
|
||||
raise exceptions.ComputeBuildException()
|
||||
|
||||
def delete(self, compute_id):
|
||||
@ -168,10 +164,10 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
||||
try:
|
||||
self.manager.delete(server=compute_id)
|
||||
except nova_exceptions.NotFound:
|
||||
LOG.warning(_LW("Nova instance with id: %s not found. "
|
||||
"Assuming already deleted."), compute_id)
|
||||
LOG.warning("Nova instance with id: %s not found. "
|
||||
"Assuming already deleted.", compute_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Error deleting nova virtual machine."))
|
||||
LOG.exception("Error deleting nova virtual machine.")
|
||||
raise exceptions.ComputeDeleteException()
|
||||
|
||||
def status(self, compute_id):
|
||||
@ -185,7 +181,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
||||
if amphora and amphora.status == 'ACTIVE':
|
||||
return constants.UP
|
||||
except Exception:
|
||||
LOG.exception(_LE("Error retrieving nova virtual machine status."))
|
||||
LOG.exception("Error retrieving nova virtual machine status.")
|
||||
raise exceptions.ComputeStatusException()
|
||||
return constants.DOWN
|
||||
|
||||
@ -199,7 +195,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
||||
try:
|
||||
amphora = self.manager.get(compute_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Error retrieving nova virtual machine."))
|
||||
LOG.exception("Error retrieving nova virtual machine.")
|
||||
raise exceptions.ComputeGetException()
|
||||
return self._translate_amphora(amphora)
|
||||
|
||||
@ -246,7 +242,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
||||
server_group_obj = self.server_groups.create(**kwargs)
|
||||
return server_group_obj
|
||||
except Exception:
|
||||
LOG.exception(_LE("Error create server group instance."))
|
||||
LOG.exception("Error create server group instance.")
|
||||
raise exceptions.ServerGroupObjectCreateException()
|
||||
|
||||
def delete_server_group(self, server_group_id):
|
||||
@ -259,8 +255,8 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
||||
self.server_groups.delete(server_group_id)
|
||||
|
||||
except nova_exceptions.NotFound:
|
||||
LOG.warning(_LW("Server group instance with id: %s not found. "
|
||||
"Assuming already deleted."), server_group_id)
|
||||
LOG.warning("Server group instance with id: %s not found. "
|
||||
"Assuming already deleted.", server_group_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Error delete server group instance."))
|
||||
LOG.exception("Error delete server group instance.")
|
||||
raise exceptions.ServerGroupObjectDeleteException()
|
||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
||||
from octavia.controller.worker import controller_worker as cw
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import repositories as repo
|
||||
from octavia.i18n import _LI
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -50,12 +49,12 @@ class HealthManager(object):
|
||||
if amp is None:
|
||||
break
|
||||
failover_count += 1
|
||||
LOG.info(_LI("Stale amphora's id is: %s"),
|
||||
LOG.info("Stale amphora's id is: %s",
|
||||
amp.amphora_id)
|
||||
executor.submit(self.cw.failover_amphora,
|
||||
amp.amphora_id)
|
||||
if failover_count > 0:
|
||||
LOG.info(_LI("Failed over %s amphora"),
|
||||
LOG.info("Failed over %s amphora",
|
||||
failover_count)
|
||||
time.sleep(CONF.health_manager.health_check_interval)
|
||||
finally:
|
||||
|
@ -25,8 +25,6 @@ from octavia.controller.healthmanager import update_serializer
|
||||
from octavia.controller.queue import event_queue
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import repositories as repo
|
||||
from octavia.i18n import _LE, _LW
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -107,10 +105,9 @@ class UpdateHealthDb(object):
|
||||
last_update=(datetime.
|
||||
datetime.utcnow()))
|
||||
else:
|
||||
LOG.warning(_LW('Amphora %(id)s health message reports %(found)i '
|
||||
'listeners when %(expected)i expected'),
|
||||
{'id': health['id'],
|
||||
'found': len(listeners),
|
||||
LOG.warning('Amphora %(id)s health message reports %(found)i '
|
||||
'listeners when %(expected)i expected',
|
||||
{'id': health['id'], 'found': len(listeners),
|
||||
'expected': expected_listener_count})
|
||||
|
||||
# We got a heartbeat so lb is healthy until proven otherwise
|
||||
@ -129,8 +126,8 @@ class UpdateHealthDb(object):
|
||||
if lb_status == constants.ONLINE:
|
||||
lb_status = constants.DEGRADED
|
||||
else:
|
||||
LOG.warning(_LW('Listener %(list)s reported status of '
|
||||
'%(status)s'), {'list': listener_id,
|
||||
LOG.warning(('Listener %(list)s reported status of '
|
||||
'%(status)s'), {'list': listener_id,
|
||||
'status': listener.get('status')})
|
||||
|
||||
try:
|
||||
@ -140,7 +137,7 @@ class UpdateHealthDb(object):
|
||||
listener_id, listener_status
|
||||
)
|
||||
except sqlalchemy.orm.exc.NoResultFound:
|
||||
LOG.error(_LE("Listener %s is not in DB"), listener_id)
|
||||
LOG.error("Listener %s is not in DB", listener_id)
|
||||
|
||||
pools = listener['pools']
|
||||
for pool_id, pool in pools.items():
|
||||
@ -154,8 +151,8 @@ class UpdateHealthDb(object):
|
||||
pool_status = constants.ERROR
|
||||
lb_status = constants.ERROR
|
||||
else:
|
||||
LOG.warning(_LW('Pool %(pool)s reported status of '
|
||||
'%(status)s'), {'pool': pool_id,
|
||||
LOG.warning(('Pool %(pool)s reported status of '
|
||||
'%(status)s'), {'pool': pool_id,
|
||||
'status': pool.get('status')})
|
||||
|
||||
members = pool['members']
|
||||
@ -173,9 +170,9 @@ class UpdateHealthDb(object):
|
||||
elif status == constants.NO_CHECK:
|
||||
member_status = constants.NO_MONITOR
|
||||
else:
|
||||
LOG.warning(_LW('Member %(mem)s reported status of '
|
||||
'%(status)s'), {'mem': member_id,
|
||||
'status': status})
|
||||
LOG.warning('Member %(mem)s reported status of '
|
||||
'%(status)s', {'mem': member_id,
|
||||
'status': status})
|
||||
|
||||
try:
|
||||
if member_status is not None:
|
||||
@ -184,8 +181,8 @@ class UpdateHealthDb(object):
|
||||
member_id, member_status
|
||||
)
|
||||
except sqlalchemy.orm.exc.NoResultFound:
|
||||
LOG.error(_LE("Member %s is not able to update "
|
||||
"in DB"), member_id)
|
||||
LOG.error("Member %s is not able to update "
|
||||
"in DB", member_id)
|
||||
|
||||
try:
|
||||
if pool_status is not None:
|
||||
@ -194,7 +191,7 @@ class UpdateHealthDb(object):
|
||||
pool_id, pool_status
|
||||
)
|
||||
except sqlalchemy.orm.exc.NoResultFound:
|
||||
LOG.error(_LE("Pool %s is not in DB"), pool_id)
|
||||
LOG.error("Pool %s is not in DB", pool_id)
|
||||
|
||||
# Update the load balancer status last
|
||||
# TODO(sbalukoff): This logic will need to be adjusted if we
|
||||
@ -208,7 +205,7 @@ class UpdateHealthDb(object):
|
||||
constants.LOADBALANCER, lb_id, lb_status
|
||||
)
|
||||
except sqlalchemy.orm.exc.NoResultFound:
|
||||
LOG.error(_LE("Load balancer %s is not in DB"), lb_id)
|
||||
LOG.error("Load balancer %s is not in DB", lb_id)
|
||||
|
||||
|
||||
class UpdateStatsDb(stats.StatsMixin):
|
||||
|
@ -22,7 +22,6 @@ from octavia.common import constants
|
||||
from octavia.controller.worker import controller_worker as cw
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import repositories as repo
|
||||
from octavia.i18n import _LI
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
@ -47,8 +46,7 @@ class SpareAmphora(object):
|
||||
|
||||
# When the current spare amphora is less than required
|
||||
if diff_count > 0:
|
||||
LOG.info(_LI("Initiating creation of %d spare amphora.") %
|
||||
diff_count)
|
||||
LOG.info("Initiating creation of %d spare amphora." % diff_count)
|
||||
|
||||
# Call Amphora Create Flow diff_count times
|
||||
for i in range(1, diff_count + 1):
|
||||
@ -56,8 +54,7 @@ class SpareAmphora(object):
|
||||
self.cw.create_amphora()
|
||||
|
||||
else:
|
||||
LOG.debug(_LI("Current spare amphora count satisfies the "
|
||||
"requirement"))
|
||||
LOG.debug("Current spare amphora count satisfies the requirement")
|
||||
|
||||
|
||||
class DatabaseCleanup(object):
|
||||
@ -77,9 +74,9 @@ class DatabaseCleanup(object):
|
||||
for amp in amphora:
|
||||
if self.amp_health_repo.check_amphora_expired(session, amp.id,
|
||||
exp_age):
|
||||
LOG.info(_LI('Attempting to delete Amphora id : %s'), amp.id)
|
||||
LOG.info('Attempting to delete Amphora id : %s', amp.id)
|
||||
self.amp_repo.delete(session, id=amp.id)
|
||||
LOG.info(_LI('Deleted Amphora id : %s') % amp.id)
|
||||
LOG.info('Deleted Amphora id : %s' % amp.id)
|
||||
|
||||
def cleanup_load_balancers(self):
|
||||
"""Checks the DB for old load balancers and triggers their removal."""
|
||||
@ -93,10 +90,9 @@ class DatabaseCleanup(object):
|
||||
for lb in load_balancers:
|
||||
if self.lb_repo.check_load_balancer_expired(session, lb.id,
|
||||
exp_age):
|
||||
LOG.info(_LI('Attempting to delete load balancer id : %s'),
|
||||
lb.id)
|
||||
LOG.info('Attempting to delete load balancer id : %s', lb.id)
|
||||
self.lb_repo.delete(session, id=lb.id)
|
||||
LOG.info(_LI('Deleted load balancer id : %s') % lb.id)
|
||||
LOG.info('Deleted load balancer id : %s' % lb.id)
|
||||
|
||||
|
||||
class CertRotation(object):
|
||||
@ -120,7 +116,7 @@ class CertRotation(object):
|
||||
LOG.debug("Cert expired amphora's id is: %s", amp.id)
|
||||
executor.submit(self.cw.amphora_cert_rotation, amp.id)
|
||||
if rotation_count > 0:
|
||||
LOG.info(_LI("Rotated certificates for %s amphora") %
|
||||
LOG.info("Rotated certificates for %s amphora" %
|
||||
rotation_count)
|
||||
finally:
|
||||
executor.shutdown(wait=True)
|
||||
|
@ -18,7 +18,6 @@ import oslo_messaging as messaging
|
||||
from oslo_messaging.rpc import dispatcher
|
||||
|
||||
from octavia.controller.queue import endpoint
|
||||
from octavia.i18n import _LI
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -35,7 +34,7 @@ class ConsumerService(cotyledon.Service):
|
||||
self.message_listener = None
|
||||
|
||||
def run(self):
|
||||
LOG.info(_LI('Starting consumer...'))
|
||||
LOG.info('Starting consumer...')
|
||||
transport = messaging.get_transport(self.conf)
|
||||
target = messaging.Target(topic=self.topic, server=self.server,
|
||||
fanout=False)
|
||||
@ -47,15 +46,14 @@ class ConsumerService(cotyledon.Service):
|
||||
|
||||
def terminate(self, graceful=False):
|
||||
if self.message_listener:
|
||||
LOG.info(_LI('Stopping consumer...'))
|
||||
LOG.info('Stopping consumer...')
|
||||
self.message_listener.stop()
|
||||
if graceful:
|
||||
LOG.info(
|
||||
_LI('Consumer successfully stopped. Waiting for final '
|
||||
'messages to be processed...'))
|
||||
LOG.info('Consumer successfully stopped. Waiting for final '
|
||||
'messages to be processed...')
|
||||
self.message_listener.wait()
|
||||
if self.endpoints:
|
||||
LOG.info(_LI('Shutting down endpoint worker executors...'))
|
||||
LOG.info('Shutting down endpoint worker executors...')
|
||||
for e in self.endpoints:
|
||||
try:
|
||||
e.worker.executor.shutdown()
|
||||
|
@ -18,7 +18,6 @@ import oslo_messaging as messaging
|
||||
from stevedore import driver as stevedore_driver
|
||||
|
||||
from octavia.common import constants
|
||||
from octavia.i18n import _LI
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -41,87 +40,87 @@ class Endpoint(object):
|
||||
).driver
|
||||
|
||||
def create_load_balancer(self, context, load_balancer_id):
|
||||
LOG.info(_LI('Creating load balancer \'%s\'...'), load_balancer_id)
|
||||
LOG.info('Creating load balancer \'%s\'...'), load_balancer_id
|
||||
self.worker.create_load_balancer(load_balancer_id)
|
||||
|
||||
def update_load_balancer(self, context, load_balancer_id,
|
||||
load_balancer_updates):
|
||||
LOG.info(_LI('Updating load balancer \'%s\'...'), load_balancer_id)
|
||||
LOG.info('Updating load balancer \'%s\'...', load_balancer_id)
|
||||
self.worker.update_load_balancer(load_balancer_id,
|
||||
load_balancer_updates)
|
||||
|
||||
def delete_load_balancer(self, context, load_balancer_id, cascade=False):
|
||||
LOG.info(_LI('Deleting load balancer \'%s\'...'), load_balancer_id)
|
||||
LOG.info('Deleting load balancer \'%s\'...', load_balancer_id)
|
||||
self.worker.delete_load_balancer(load_balancer_id, cascade)
|
||||
|
||||
def create_listener(self, context, listener_id):
|
||||
LOG.info(_LI('Creating listener \'%s\'...'), listener_id)
|
||||
LOG.info('Creating listener \'%s\'...', listener_id)
|
||||
self.worker.create_listener(listener_id)
|
||||
|
||||
def update_listener(self, context, listener_id, listener_updates):
|
||||
LOG.info(_LI('Updating listener \'%s\'...'), listener_id)
|
||||
LOG.info('Updating listener \'%s\'...', listener_id)
|
||||
self.worker.update_listener(listener_id, listener_updates)
|
||||
|
||||
def delete_listener(self, context, listener_id):
|
||||
LOG.info(_LI('Deleting listener \'%s\'...'), listener_id)
|
||||
LOG.info('Deleting listener \'%s\'...', listener_id)
|
||||
self.worker.delete_listener(listener_id)
|
||||
|
||||
def create_pool(self, context, pool_id):
|
||||
LOG.info(_LI('Creating pool \'%s\'...'), pool_id)
|
||||
LOG.info('Creating pool \'%s\'...', pool_id)
|
||||
self.worker.create_pool(pool_id)
|
||||
|
||||
def update_pool(self, context, pool_id, pool_updates):
|
||||
LOG.info(_LI('Updating pool \'%s\'...'), pool_id)
|
||||
LOG.info('Updating pool \'%s\'...', pool_id)
|
||||
self.worker.update_pool(pool_id, pool_updates)
|
||||
|
||||
def delete_pool(self, context, pool_id):
|
||||
LOG.info(_LI('Deleting pool \'%s\'...'), pool_id)
|
||||
LOG.info('Deleting pool \'%s\'...', pool_id)
|
||||
self.worker.delete_pool(pool_id)
|
||||
|
||||
def create_health_monitor(self, context, pool_id):
|
||||
LOG.info(_LI('Creating health monitor on pool \'%s\'...'), pool_id)
|
||||
LOG.info('Creating health monitor on pool \'%s\'...', pool_id)
|
||||
self.worker.create_health_monitor(pool_id)
|
||||
|
||||
def update_health_monitor(self, context, pool_id, health_monitor_updates):
|
||||
LOG.info(_LI('Updating health monitor on pool \'%s\'...'), pool_id)
|
||||
LOG.info('Updating health monitor on pool \'%s\'...', pool_id)
|
||||
self.worker.update_health_monitor(pool_id, health_monitor_updates)
|
||||
|
||||
def delete_health_monitor(self, context, pool_id):
|
||||
LOG.info(_LI('Deleting health monitor on pool \'%s\'...'), pool_id)
|
||||
LOG.info('Deleting health monitor on pool \'%s\'...', pool_id)
|
||||
self.worker.delete_health_monitor(pool_id)
|
||||
|
||||
def create_member(self, context, member_id):
|
||||
LOG.info(_LI('Creating member \'%s\'...') % member_id)
|
||||
LOG.info('Creating member \'%s\'...' % member_id)
|
||||
self.worker.create_member(member_id)
|
||||
|
||||
def update_member(self, context, member_id, member_updates):
|
||||
LOG.info(_LI('Updating member \'%s\'...') % member_id)
|
||||
LOG.info('Updating member \'%s\'...' % member_id)
|
||||
self.worker.update_member(member_id, member_updates)
|
||||
|
||||
def delete_member(self, context, member_id):
|
||||
LOG.info(_LI('Deleting member \'%s\'...') % member_id)
|
||||
LOG.info('Deleting member \'%s\'...' % member_id)
|
||||
self.worker.delete_member(member_id)
|
||||
|
||||
def create_l7policy(self, context, l7policy_id):
|
||||
LOG.info(_LI('Creating l7policy \'%s\'...') % l7policy_id)
|
||||
LOG.info('Creating l7policy \'%s\'...' % l7policy_id)
|
||||
self.worker.create_l7policy(l7policy_id)
|
||||
|
||||
def update_l7policy(self, context, l7policy_id, l7policy_updates):
|
||||
LOG.info(_LI('Updating l7policy \'%s\'...') % l7policy_id)
|
||||
LOG.info('Updating l7policy \'%s\'...' % l7policy_id)
|
||||
self.worker.update_l7policy(l7policy_id, l7policy_updates)
|
||||
|
||||
def delete_l7policy(self, context, l7policy_id):
|
||||
LOG.info(_LI('Deleting l7policy \'%s\'...') % l7policy_id)
|
||||
LOG.info('Deleting l7policy \'%s\'...' % l7policy_id)
|
||||
self.worker.delete_l7policy(l7policy_id)
|
||||
|
||||
def create_l7rule(self, context, l7rule_id):
|
||||
LOG.info(_LI('Creating l7rule \'%s\'...') % l7rule_id)
|
||||
LOG.info('Creating l7rule \'%s\'...' % l7rule_id)
|
||||
self.worker.create_l7rule(l7rule_id)
|
||||
|
||||
def update_l7rule(self, context, l7rule_id, l7rule_updates):
|
||||
LOG.info(_LI('Updating l7rule \'%s\'...') % l7rule_id)
|
||||
LOG.info('Updating l7rule \'%s\'...' % l7rule_id)
|
||||
self.worker.update_l7rule(l7rule_id, l7rule_updates)
|
||||
|
||||
def delete_l7rule(self, context, l7rule_id):
|
||||
LOG.info(_LI('Deleting l7rule \'%s\'...') % l7rule_id)
|
||||
LOG.info('Deleting l7rule \'%s\'...' % l7rule_id)
|
||||
self.worker.delete_l7rule(l7rule_id)
|
||||
|
@ -27,7 +27,6 @@ from octavia.controller.worker.flows import member_flows
|
||||
from octavia.controller.worker.flows import pool_flows
|
||||
from octavia.db import api as db_apis
|
||||
from octavia.db import repositories as repo
|
||||
from octavia.i18n import _LE, _LI
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import excutils
|
||||
@ -653,7 +652,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
||||
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Failover exception: %s") % e)
|
||||
LOG.error("Failover exception: %s" % e)
|
||||
|
||||
def amphora_cert_rotation(self, amphora_id):
|
||||
"""Perform cert rotation for an amphora.
|
||||
@ -665,8 +664,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
||||
|
||||
amp = self._amphora_repo.get(db_apis.get_session(),
|
||||
id=amphora_id)
|
||||
LOG.info(_LI("Start amphora cert rotation, amphora's id is: %s")
|
||||
% amp.id)
|
||||
LOG.info("Start amphora cert rotation, amphora's id is: %s" % amp.id)
|
||||
|
||||
certrotation_amphora_tf = self._taskflow_load(
|
||||
self._amphora_flows.cert_rotate_amphora_flow(),
|
||||
|
@ -30,8 +30,6 @@ from octavia.controller.worker.tasks import database_tasks
|
||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
||||
from octavia.controller.worker.tasks import model_tasks
|
||||
from octavia.controller.worker.tasks import network_tasks
|
||||
from octavia.i18n import _LE
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -63,8 +61,8 @@ class LoadBalancerFlows(object):
|
||||
elif topology == constants.TOPOLOGY_SINGLE:
|
||||
lb_create_flow.add(*self._create_single_topology())
|
||||
else:
|
||||
LOG.error(_LE("Unknown topology: %s. Unable to build load "
|
||||
"balancer."), topology)
|
||||
LOG.error("Unknown topology: %s. Unable to build load balancer.",
|
||||
topology)
|
||||
raise exceptions.InvalidTopology(topology=topology)
|
||||
|
||||
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
|
||||
|
@ -19,7 +19,6 @@ import logging
|
||||
from octavia.common import constants
|
||||
from octavia.db import api as db_apis
|
||||
from octavia.db import repositories as repo
|
||||
from octavia.i18n import _LE
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -51,9 +50,9 @@ class TaskUtils(object):
|
||||
id=amphora_id,
|
||||
status=constants.ERROR)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
||||
"status to ERROR due to: "
|
||||
"%(except)s"), {'amp': amphora_id, 'except': e})
|
||||
LOG.error("Failed to update amphora %(amp)s "
|
||||
"status to ERROR due to: "
|
||||
"%(except)s", {'amp': amphora_id, 'except': e})
|
||||
|
||||
def mark_health_mon_prov_status_error(self, health_mon_id):
|
||||
"""Sets a health monitor provisioning status to ERROR.
|
||||
@ -67,10 +66,9 @@ class TaskUtils(object):
|
||||
pool_id=health_mon_id,
|
||||
provisioning_status=constants.ERROR)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update health monitor %(health)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s"), {'health': health_mon_id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update health monitor %(health)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s", {'health': health_mon_id, 'except': e})
|
||||
|
||||
def mark_l7policy_prov_status_error(self, l7policy_id):
|
||||
"""Sets a L7 policy provisioning status to ERROR.
|
||||
@ -84,9 +82,9 @@ class TaskUtils(object):
|
||||
id=l7policy_id,
|
||||
provisioning_status=constants.ERROR)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update l7policy %(l7p)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s"), {'l7p': l7policy_id, 'except': e})
|
||||
LOG.error("Failed to update l7policy %(l7p)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s", {'l7p': l7policy_id, 'except': e})
|
||||
|
||||
def mark_l7rule_prov_status_error(self, l7rule_id):
|
||||
"""Sets a L7 rule provisioning status to ERROR.
|
||||
@ -100,9 +98,9 @@ class TaskUtils(object):
|
||||
id=l7rule_id,
|
||||
provisioning_status=constants.ERROR)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update l7rule %(l7r)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s"), {'l7r': l7rule_id, 'except': e})
|
||||
LOG.error("Failed to update l7rule %(l7r)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s", {'l7r': l7rule_id, 'except': e})
|
||||
|
||||
def mark_listener_prov_status_error(self, listener_id):
|
||||
"""Sets a listener provisioning status to ERROR.
|
||||
@ -116,9 +114,9 @@ class TaskUtils(object):
|
||||
id=listener_id,
|
||||
provisioning_status=constants.ERROR)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update listener %(list)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s"), {'list': listener_id, 'except': e})
|
||||
LOG.error("Failed to update listener %(list)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s", {'list': listener_id, 'except': e})
|
||||
|
||||
def mark_loadbalancer_prov_status_error(self, loadbalancer_id):
|
||||
"""Sets a load balancer provisioning status to ERROR.
|
||||
@ -133,9 +131,9 @@ class TaskUtils(object):
|
||||
id=loadbalancer_id,
|
||||
provisioning_status=constants.ERROR)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update load balancer %(lb)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s"), {'lb': loadbalancer_id, 'except': e})
|
||||
LOG.error("Failed to update load balancer %(lb)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s", {'lb': loadbalancer_id, 'except': e})
|
||||
|
||||
def mark_listener_prov_status_active(self, listener_id):
|
||||
"""Sets a listener provisioning status to ACTIVE.
|
||||
@ -150,9 +148,9 @@ class TaskUtils(object):
|
||||
id=listener_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update listener %(list)s "
|
||||
"provisioning status to ACTIVE due to: "
|
||||
"%(except)s"), {'list': listener_id, 'except': e})
|
||||
LOG.error("Failed to update listener %(list)s "
|
||||
"provisioning status to ACTIVE due to: "
|
||||
"%(except)s", {'list': listener_id, 'except': e})
|
||||
|
||||
def mark_pool_prov_status_active(self, pool_id):
|
||||
"""Sets a pool provisioning status to ACTIVE.
|
||||
@ -166,9 +164,9 @@ class TaskUtils(object):
|
||||
id=pool_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update pool %(pool)s "
|
||||
"provisioning status to ACTIVE due to: "
|
||||
"%(except)s"), {'pool': pool_id, 'except': e})
|
||||
LOG.error("Failed to update pool %(pool)s provisioning status "
|
||||
"to ACTIVE due to: %(except)s", {'pool': pool_id,
|
||||
'except': e})
|
||||
|
||||
def mark_loadbalancer_prov_status_active(self, loadbalancer_id):
|
||||
"""Sets a load balancer provisioning status to ACTIVE.
|
||||
@ -183,9 +181,9 @@ class TaskUtils(object):
|
||||
id=loadbalancer_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update load balancer %(lb)s "
|
||||
"provisioning status to ACTIVE due to: "
|
||||
"%(except)s"), {'lb': loadbalancer_id, 'except': e})
|
||||
LOG.error("Failed to update load balancer %(lb)s "
|
||||
"provisioning status to ACTIVE due to: "
|
||||
"%(except)s", {'lb': loadbalancer_id, 'except': e})
|
||||
|
||||
def mark_member_prov_status_error(self, member_id):
|
||||
"""Sets a member provisioning status to ERROR.
|
||||
@ -199,9 +197,9 @@ class TaskUtils(object):
|
||||
id=member_id,
|
||||
provisioning_status=constants.ERROR)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update member %(member)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s"), {'member': member_id, 'except': e})
|
||||
LOG.error("Failed to update member %(member)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s", {'member': member_id, 'except': e})
|
||||
|
||||
def mark_pool_prov_status_error(self, pool_id):
|
||||
"""Sets a pool provisioning status to ERROR.
|
||||
@ -215,6 +213,6 @@ class TaskUtils(object):
|
||||
id=pool_id,
|
||||
provisioning_status=constants.ERROR)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update pool %(pool)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s"), {'pool': pool_id, 'except': e})
|
||||
LOG.error("Failed to update pool %(pool)s "
|
||||
"provisioning status to ERROR due to: "
|
||||
"%(except)s", {'pool': pool_id, 'except': e})
|
||||
|
@ -25,7 +25,6 @@ from octavia.common import constants
|
||||
from octavia.controller.worker import task_utils as task_utilities
|
||||
from octavia.db import api as db_apis
|
||||
from octavia.db import repositories as repo
|
||||
from octavia.i18n import _LE, _LW
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -59,7 +58,7 @@ class ListenersUpdate(BaseAmphoraTask):
|
||||
def revert(self, loadbalancer, *args, **kwargs):
|
||||
"""Handle failed listeners updates."""
|
||||
|
||||
LOG.warning(_LW("Reverting listeners updates."))
|
||||
LOG.warning("Reverting listeners updates.")
|
||||
|
||||
for listener in loadbalancer.listeners:
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
@ -78,7 +77,7 @@ class ListenerStop(BaseAmphoraTask):
|
||||
def revert(self, listener, *args, **kwargs):
|
||||
"""Handle a failed listener stop."""
|
||||
|
||||
LOG.warning(_LW("Reverting listener stop."))
|
||||
LOG.warning("Reverting listener stop.")
|
||||
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
|
||||
@ -96,7 +95,7 @@ class ListenerStart(BaseAmphoraTask):
|
||||
def revert(self, listener, *args, **kwargs):
|
||||
"""Handle a failed listener start."""
|
||||
|
||||
LOG.warning(_LW("Reverting listener start."))
|
||||
LOG.warning("Reverting listener start.")
|
||||
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
|
||||
@ -115,7 +114,7 @@ class ListenersStart(BaseAmphoraTask):
|
||||
def revert(self, listeners, *args, **kwargs):
|
||||
"""Handle failed listeners starts."""
|
||||
|
||||
LOG.warning(_LW("Reverting listeners starts."))
|
||||
LOG.warning("Reverting listeners starts.")
|
||||
for listener in listeners:
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
|
||||
@ -133,7 +132,7 @@ class ListenerDelete(BaseAmphoraTask):
|
||||
def revert(self, listener, *args, **kwargs):
|
||||
"""Handle a failed listener delete."""
|
||||
|
||||
LOG.warning(_LW("Reverting listener delete."))
|
||||
LOG.warning("Reverting listener delete.")
|
||||
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
|
||||
@ -166,7 +165,7 @@ class AmphoraFinalize(BaseAmphoraTask):
|
||||
"""Handle a failed amphora finalize."""
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
LOG.warning(_LW("Reverting amphora finalize."))
|
||||
LOG.warning("Reverting amphora finalize.")
|
||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||
|
||||
|
||||
@ -185,7 +184,7 @@ class AmphoraPostNetworkPlug(BaseAmphoraTask):
|
||||
"""Handle a failed post network plug."""
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
LOG.warning(_LW("Reverting post network plug."))
|
||||
LOG.warning("Reverting post network plug.")
|
||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||
|
||||
|
||||
@ -203,7 +202,7 @@ class AmphoraePostNetworkPlug(BaseAmphoraTask):
|
||||
"""Handle a failed post network plug."""
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
LOG.warning(_LW("Reverting post network plug."))
|
||||
LOG.warning("Reverting post network plug.")
|
||||
for amphora in six.moves.filter(
|
||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||
loadbalancer.amphorae):
|
||||
@ -224,7 +223,7 @@ class AmphoraPostVIPPlug(BaseAmphoraTask):
|
||||
"""Handle a failed amphora vip plug notification."""
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
LOG.warning(_LW("Reverting post vip plug."))
|
||||
LOG.warning("Reverting post vip plug.")
|
||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||
|
||||
@ -244,7 +243,7 @@ class AmphoraePostVIPPlug(BaseAmphoraTask):
|
||||
"""Handle a failed amphora vip plug notification."""
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
LOG.warning(_LW("Reverting amphorae post vip plug."))
|
||||
LOG.warning("Reverting amphorae post vip plug.")
|
||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||
|
||||
|
||||
@ -266,12 +265,12 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
||||
for amp in six.moves.filter(
|
||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||
loadbalancer.amphorae):
|
||||
# Currently this is supported only with REST Driver
|
||||
interface = self.amphora_driver.get_vrrp_interface(amp)
|
||||
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
||||
vrrp_interface=interface)
|
||||
amps.append(self.amphora_repo.get(db_apis.get_session(),
|
||||
id=amp.id))
|
||||
# Currently this is supported only with REST Driver
|
||||
interface = self.amphora_driver.get_vrrp_interface(amp)
|
||||
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
||||
vrrp_interface=interface)
|
||||
amps.append(self.amphora_repo.get(db_apis.get_session(),
|
||||
id=amp.id))
|
||||
loadbalancer.amphorae = amps
|
||||
return loadbalancer
|
||||
|
||||
@ -279,7 +278,7 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
||||
"""Handle a failed amphora vip plug notification."""
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
LOG.warning(_LW("Reverting Get Amphora VRRP Interface."))
|
||||
LOG.warning("Reverting Get Amphora VRRP Interface.")
|
||||
for amp in six.moves.filter(
|
||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||
loadbalancer.amphorae):
|
||||
@ -288,10 +287,9 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
||||
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
||||
vrrp_interface=None)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
||||
"VRRP interface to None due to: "
|
||||
"%(except)s"), {'amp': amp.id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update amphora %(amp)s "
|
||||
"VRRP interface to None due to: %(except)s",
|
||||
{'amp': amp.id, 'except': e})
|
||||
|
||||
|
||||
class AmphoraVRRPUpdate(BaseAmphoraTask):
|
||||
|
@ -27,7 +27,6 @@ from octavia.common import constants
|
||||
from octavia.common import exceptions
|
||||
from octavia.common.jinja import user_data_jinja_cfg
|
||||
from octavia.controller.worker import amphora_rate_limit
|
||||
from octavia.i18n import _LE, _LW
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -101,7 +100,7 @@ class ComputeCreate(BaseComputeTask):
|
||||
return compute_id
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Compute create for amphora id: %s failed"),
|
||||
LOG.exception("Compute create for amphora id: %s failed",
|
||||
amphora_id)
|
||||
raise
|
||||
|
||||
@ -113,13 +112,13 @@ class ComputeCreate(BaseComputeTask):
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
compute_id = result
|
||||
LOG.warning(_LW("Reverting compute create for amphora with id"
|
||||
"%(amp)s and compute id: %(comp)s"),
|
||||
LOG.warning("Reverting compute create for amphora with id"
|
||||
"%(amp)s and compute id: %(comp)s",
|
||||
{'amp': amphora_id, 'comp': compute_id})
|
||||
try:
|
||||
self.compute.delete(compute_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Reverting compute create failed"))
|
||||
LOG.exception("Reverting compute create failed")
|
||||
|
||||
|
||||
class CertComputeCreate(ComputeCreate):
|
||||
@ -157,7 +156,7 @@ class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):
|
||||
try:
|
||||
self.compute.delete(amp.compute_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Compute delete for amphora id: %s failed"),
|
||||
LOG.exception("Compute delete for amphora id: %s failed",
|
||||
amp.id)
|
||||
raise
|
||||
|
||||
@ -169,7 +168,7 @@ class ComputeDelete(BaseComputeTask):
|
||||
try:
|
||||
self.compute.delete(amphora.compute_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Compute delete for amphora id: %s failed"),
|
||||
LOG.exception("Compute delete for amphora id: %s failed",
|
||||
amphora.id)
|
||||
raise
|
||||
|
||||
@ -219,14 +218,14 @@ class NovaServerGroupCreate(BaseComputeTask):
|
||||
:param result: here it refers to server group id
|
||||
"""
|
||||
server_group_id = result
|
||||
LOG.warning(_LW("Reverting server group create with id:%s"),
|
||||
LOG.warning("Reverting server group create with id:%s",
|
||||
server_group_id)
|
||||
try:
|
||||
self.compute.delete_server_group(server_group_id)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to delete server group. Resources may "
|
||||
"still be in use for server group: %(sg)s due to "
|
||||
"error: %(except)s"),
|
||||
LOG.error("Failed to delete server group. Resources may "
|
||||
"still be in use for server group: %(sg)s due to "
|
||||
"error: %(except)s",
|
||||
{'sg': server_group_id, 'except': e})
|
||||
|
||||
|
||||
|
@ -31,7 +31,6 @@ import octavia.common.tls_utils.cert_parser as cert_parser
|
||||
from octavia.controller.worker import task_utils as task_utilities
|
||||
from octavia.db import api as db_apis
|
||||
from octavia.db import repositories as repo
|
||||
from octavia.i18n import _LE, _LI, _LW
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -98,7 +97,7 @@ class CreateAmphoraInDB(BaseDatabaseTask):
|
||||
status=constants.PENDING_CREATE,
|
||||
cert_busy=False)
|
||||
|
||||
LOG.info(_LI("Created Amphora in DB with id %s"), amphora.id)
|
||||
LOG.info("Created Amphora in DB with id %s", amphora.id)
|
||||
return amphora.id
|
||||
|
||||
def revert(self, result, *args, **kwargs):
|
||||
@ -120,17 +119,15 @@ class CreateAmphoraInDB(BaseDatabaseTask):
|
||||
# executed after this failed so we will need to do something and
|
||||
# result is the amphora's id
|
||||
|
||||
LOG.warning(_LW("Reverting create amphora in DB for amp id %s "),
|
||||
result)
|
||||
LOG.warning("Reverting create amphora in DB for amp id %s ", result)
|
||||
|
||||
# Delete the amphora for now. May want to just update status later
|
||||
try:
|
||||
self.amphora_repo.delete(db_apis.get_session(), id=result)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to delete amphora %(amp)s "
|
||||
"in the database due to: "
|
||||
"%(except)s"), {'amp': result,
|
||||
'except': e})
|
||||
LOG.error("Failed to delete amphora %(amp)s "
|
||||
"in the database due to: "
|
||||
"%(except)s", {'amp': result, 'except': e})
|
||||
|
||||
|
||||
class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask):
|
||||
@ -176,8 +173,8 @@ class DeleteHealthMonitorInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark health monitor delete in DB "
|
||||
"for health monitor on pool with id %s"), pool_id)
|
||||
LOG.warning("Reverting mark health monitor delete in DB "
|
||||
"for health monitor on pool with id %s", pool_id)
|
||||
# TODO(johnsom) fix this
|
||||
# self.health_mon_repo.update(db_apis.get_session(), health_mon.id,
|
||||
# provisioning_status=constants.ERROR)
|
||||
@ -230,8 +227,7 @@ class DeleteMemberInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting delete in DB "
|
||||
"for member id %s"), member.id)
|
||||
LOG.warning("Reverting delete in DB for member id %s", member.id)
|
||||
# TODO(johnsom) fix this
|
||||
# self.member_repo.update(db_apis.get_session(), member.id,
|
||||
# operating_status=constants.ERROR)
|
||||
@ -256,8 +252,8 @@ class DeleteListenerInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark listener delete in DB "
|
||||
"for listener id %s"), listener.id)
|
||||
LOG.warning("Reverting mark listener delete in DB for listener id %s",
|
||||
listener.id)
|
||||
|
||||
|
||||
class DeletePoolInDB(BaseDatabaseTask):
|
||||
@ -283,8 +279,7 @@ class DeletePoolInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting delete in DB "
|
||||
"for pool id %s"), pool.id)
|
||||
LOG.warning("Reverting delete in DB for pool id %s", pool.id)
|
||||
# TODO(johnsom) Fix this
|
||||
# self.pool_repo.update(db_apis.get_session(), pool.id,
|
||||
# operating_status=constants.ERROR)
|
||||
@ -313,8 +308,7 @@ class DeleteL7PolicyInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting delete in DB "
|
||||
"for l7policy id %s"), l7policy.id)
|
||||
LOG.warning("Reverting delete in DB for l7policy id %s", l7policy.id)
|
||||
# TODO(sbalukoff) Fix this
|
||||
# self.listener_repo.update(db_apis.get_session(), l7policy.listener.id,
|
||||
# operating_status=constants.ERROR)
|
||||
@ -343,8 +337,7 @@ class DeleteL7RuleInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting delete in DB "
|
||||
"for l7rule id %s"), l7rule.id)
|
||||
LOG.warning("Reverting delete in DB for l7rule id %s", l7rule.id)
|
||||
# TODO(sbalukoff) Fix this
|
||||
# self.listener_repo.update(db_apis.get_session(),
|
||||
# l7rule.l7policy.listener.id,
|
||||
@ -462,10 +455,9 @@ class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
|
||||
self.repos.amphora.update(db_apis.get_session(), amphora_id,
|
||||
loadbalancer_id=None)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
||||
"load balancer id to None due to: "
|
||||
"%(except)s"), {'amp': amphora_id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update amphora %(amp)s "
|
||||
"load balancer id to None due to: "
|
||||
"%(except)s", {'amp': amphora_id, 'except': e})
|
||||
|
||||
|
||||
class MapLoadbalancerToAmphora(BaseDatabaseTask):
|
||||
@ -496,8 +488,8 @@ class MapLoadbalancerToAmphora(BaseDatabaseTask):
|
||||
return amp.id
|
||||
|
||||
def revert(self, result, loadbalancer_id, *args, **kwargs):
|
||||
LOG.warning(_LW("Reverting Amphora allocation for the load "
|
||||
"balancer %s in the database."), loadbalancer_id)
|
||||
LOG.warning("Reverting Amphora allocation for the load "
|
||||
"balancer %s in the database.", loadbalancer_id)
|
||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id)
|
||||
|
||||
|
||||
@ -530,18 +522,16 @@ class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask):
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
|
||||
LOG.warning(_LW("Reverting amphora role in DB for amp "
|
||||
"id %(amp)s"),
|
||||
LOG.warning("Reverting amphora role in DB for amp id %(amp)s",
|
||||
{'amp': amphora.id})
|
||||
try:
|
||||
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||
role=None,
|
||||
vrrp_priority=None)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
||||
"role and vrrp_priority to None due to: "
|
||||
"%(except)s"), {'amp': amphora.id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update amphora %(amp)s "
|
||||
"role and vrrp_priority to None due to: "
|
||||
"%(except)s", {'amp': amphora.id, 'except': e})
|
||||
|
||||
|
||||
class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB):
|
||||
@ -623,10 +613,10 @@ class MarkAmphoraAllocatedInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.info(_LI("Mark ALLOCATED in DB for amphora: %(amp)s with "
|
||||
"compute id %(comp)s for load balancer: %(lb)s"),
|
||||
LOG.info(("Mark ALLOCATED in DB for amphora: %(amp)s with "
|
||||
"compute id %(comp)s for load balancer: %(lb)s"),
|
||||
{"amp": amphora.id, "comp": amphora.compute_id,
|
||||
"lb": loadbalancer_id})
|
||||
"lb": loadbalancer_id})
|
||||
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||
status=constants.AMPHORA_ALLOCATED,
|
||||
compute_id=amphora.compute_id,
|
||||
@ -646,8 +636,8 @@ class MarkAmphoraAllocatedInDB(BaseDatabaseTask):
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
|
||||
LOG.warning(_LW("Reverting mark amphora ready in DB for amp "
|
||||
"id %(amp)s and compute id %(comp)s"),
|
||||
LOG.warning("Reverting mark amphora ready in DB for amp "
|
||||
"id %(amp)s and compute id %(comp)s",
|
||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||
|
||||
@ -681,18 +671,17 @@ class MarkAmphoraBootingInDB(BaseDatabaseTask):
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
|
||||
LOG.warning(_LW("Reverting mark amphora booting in DB for amp "
|
||||
"id %(amp)s and compute id %(comp)s"),
|
||||
LOG.warning("Reverting mark amphora booting in DB for amp "
|
||||
"id %(amp)s and compute id %(comp)s",
|
||||
{'amp': amphora_id, 'comp': compute_id})
|
||||
try:
|
||||
self.amphora_repo.update(db_apis.get_session(), amphora_id,
|
||||
status=constants.ERROR,
|
||||
compute_id=compute_id)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
||||
"status to ERROR due to: "
|
||||
"%(except)s"), {'amp': amphora_id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update amphora %(amp)s "
|
||||
"status to ERROR due to: "
|
||||
"%(except)s", {'amp': amphora_id, 'except': e})
|
||||
|
||||
|
||||
class MarkAmphoraDeletedInDB(BaseDatabaseTask):
|
||||
@ -721,8 +710,8 @@ class MarkAmphoraDeletedInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark amphora deleted in DB "
|
||||
"for amp id %(amp)s and compute id %(comp)s"),
|
||||
LOG.warning("Reverting mark amphora deleted in DB "
|
||||
"for amp id %(amp)s and compute id %(comp)s",
|
||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||
|
||||
@ -753,8 +742,8 @@ class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark amphora pending delete in DB "
|
||||
"for amp id %(amp)s and compute id %(comp)s"),
|
||||
LOG.warning("Reverting mark amphora pending delete in DB "
|
||||
"for amp id %(amp)s and compute id %(comp)s",
|
||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||
|
||||
@ -785,8 +774,8 @@ class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark amphora pending update in DB "
|
||||
"for amp id %(amp)s and compute id %(comp)s"),
|
||||
LOG.warning("Reverting mark amphora pending update in DB "
|
||||
"for amp id %(amp)s and compute id %(comp)s",
|
||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||
|
||||
@ -805,8 +794,8 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.info(_LI("Mark READY in DB for amphora: %(amp)s with compute "
|
||||
"id %(comp)s"),
|
||||
LOG.info("Mark READY in DB for amphora: %(amp)s with compute "
|
||||
"id %(comp)s",
|
||||
{"amp": amphora.id, "comp": amphora.compute_id})
|
||||
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||
status=constants.AMPHORA_READY,
|
||||
@ -820,8 +809,8 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark amphora ready in DB for amp "
|
||||
"id %(amp)s and compute id %(comp)s"),
|
||||
LOG.warning("Reverting mark amphora ready in DB for amp "
|
||||
"id %(amp)s and compute id %(comp)s",
|
||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||
try:
|
||||
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||
@ -829,10 +818,9 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
|
||||
compute_id=amphora.compute_id,
|
||||
lb_network_ip=amphora.lb_network_ip)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
||||
"status to ERROR due to: "
|
||||
"%(except)s"), {'amp': amphora.id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update amphora %(amp)s "
|
||||
"status to ERROR due to: "
|
||||
"%(except)s", {'amp': amphora.id, 'except': e})
|
||||
|
||||
|
||||
class UpdateAmphoraComputeId(BaseDatabaseTask):
|
||||
@ -927,7 +915,7 @@ class MarkLBActiveInDB(BaseDatabaseTask):
|
||||
listener.id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
|
||||
LOG.info(_LI("Mark ACTIVE in DB for load balancer id: %s"),
|
||||
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
|
||||
loadbalancer.id)
|
||||
self.loadbalancer_repo.update(db_apis.get_session(),
|
||||
loadbalancer.id,
|
||||
@ -952,11 +940,11 @@ class MarkLBActiveInDB(BaseDatabaseTask):
|
||||
db_apis.get_session(), listener.id,
|
||||
provisioning_status=constants.ERROR)
|
||||
except Exception:
|
||||
LOG.warning(_LW("Error updating listener %s provisioning "
|
||||
"status"), listener.id)
|
||||
LOG.warning("Error updating listener %s provisioning "
|
||||
"status", listener.id)
|
||||
|
||||
LOG.warning(_LW("Reverting mark load balancer deleted in DB "
|
||||
"for load balancer id %s"), loadbalancer.id)
|
||||
LOG.warning("Reverting mark load balancer deleted in DB "
|
||||
"for load balancer id %s", loadbalancer.id)
|
||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||
|
||||
|
||||
@ -986,18 +974,17 @@ class UpdateLBServerGroupInDB(BaseDatabaseTask):
|
||||
associated with the load balancer
|
||||
:returns: None
|
||||
"""
|
||||
LOG.warning(_LW('Reverting Server Group updated with id: %(s1)s for '
|
||||
'load balancer id: %(s2)s '),
|
||||
LOG.warning('Reverting Server Group updated with id: %(s1)s for '
|
||||
'load balancer id: %(s2)s ',
|
||||
{'s1': server_group_id, 's2': loadbalancer_id})
|
||||
try:
|
||||
self.loadbalancer_repo.update(db_apis.get_session(),
|
||||
id=loadbalancer_id,
|
||||
server_group_id=None)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update load balancer %(lb)s "
|
||||
"server_group_id to None due to: "
|
||||
"%(except)s"), {'lb': loadbalancer_id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update load balancer %(lb)s "
|
||||
"server_group_id to None due to: "
|
||||
"%(except)s", {'lb': loadbalancer_id, 'except': e})
|
||||
|
||||
|
||||
class MarkLBDeletedInDB(BaseDatabaseTask):
|
||||
@ -1026,8 +1013,8 @@ class MarkLBDeletedInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark load balancer deleted in DB "
|
||||
"for load balancer id %s"), loadbalancer.id)
|
||||
LOG.warning("Reverting mark load balancer deleted in DB "
|
||||
"for load balancer id %s", loadbalancer.id)
|
||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||
|
||||
|
||||
@ -1058,8 +1045,8 @@ class MarkLBPendingDeleteInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark load balancer pending delete in DB "
|
||||
"for load balancer id %s"), loadbalancer.id)
|
||||
LOG.warning("Reverting mark load balancer pending delete in DB "
|
||||
"for load balancer id %s", loadbalancer.id)
|
||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||
|
||||
|
||||
@ -1095,10 +1082,8 @@ class MarkLBAndListenersActiveInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark load balancer "
|
||||
"and listeners active in DB "
|
||||
"for load balancer id %(LB)s and "
|
||||
"listener ids: %(list)s"),
|
||||
LOG.warning("Reverting mark load balancer and listeners active in DB "
|
||||
"for load balancer id %(LB)s and listener ids: %(list)s",
|
||||
{'LB': loadbalancer.id,
|
||||
'list': ', '.join([l.id for l in listeners])})
|
||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||
@ -1130,8 +1115,8 @@ class MarkListenerActiveInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark listener active in DB "
|
||||
"for listener id %s"), listener.id)
|
||||
LOG.warning("Reverting mark listener active in DB "
|
||||
"for listener id %s", listener.id)
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
|
||||
|
||||
@ -1159,8 +1144,8 @@ class MarkListenerDeletedInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark listener deleted in DB "
|
||||
"for listener id %s"), listener.id)
|
||||
LOG.warning("Reverting mark listener deleted in DB "
|
||||
"for listener id %s", listener.id)
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
|
||||
|
||||
@ -1189,8 +1174,8 @@ class MarkListenerPendingDeleteInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark listener pending delete in DB "
|
||||
"for listener id %s"), listener.id)
|
||||
LOG.warning("Reverting mark listener pending delete in DB "
|
||||
"for listener id %s", listener.id)
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
|
||||
|
||||
@ -1219,8 +1204,8 @@ class UpdateLoadbalancerInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting update loadbalancer in DB "
|
||||
"for loadbalancer id %s"), loadbalancer.id)
|
||||
LOG.warning("Reverting update loadbalancer in DB "
|
||||
"for loadbalancer id %s", loadbalancer.id)
|
||||
|
||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||
|
||||
@ -1250,18 +1235,17 @@ class UpdateHealthMonInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting update health monitor in DB "
|
||||
"for health monitor id %s"), health_mon.pool_id)
|
||||
LOG.warning("Reverting update health monitor in DB "
|
||||
"for health monitor id %s", health_mon.pool_id)
|
||||
# TODO(johnsom) fix this to set the upper ojects to ERROR
|
||||
try:
|
||||
self.health_mon_repo.update(db_apis.get_session(),
|
||||
health_mon.pool_id,
|
||||
enabled=0)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update health monitor %(hm)s "
|
||||
"enabled to 0 due to: "
|
||||
"%(except)s"), {'hm': health_mon.pool_id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update health monitor %(hm)s "
|
||||
"enabled to 0 due to: %(except)s",
|
||||
{'hm': health_mon.pool_id, 'except': e})
|
||||
|
||||
|
||||
class UpdateListenerInDB(BaseDatabaseTask):
|
||||
@ -1289,8 +1273,8 @@ class UpdateListenerInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting update listener in DB "
|
||||
"for listener id %s"), listener.id)
|
||||
LOG.warning("Reverting update listener in DB "
|
||||
"for listener id %s", listener.id)
|
||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||
|
||||
|
||||
@ -1319,17 +1303,15 @@ class UpdateMemberInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting update member in DB "
|
||||
"for member id %s"), member.id)
|
||||
LOG.warning("Reverting update member in DB "
|
||||
"for member id %s", member.id)
|
||||
# TODO(johnsom) fix this to set the upper objects to ERROR
|
||||
try:
|
||||
self.member_repo.update(db_apis.get_session(), member.id,
|
||||
enabled=0)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update member %(member)s "
|
||||
"enabled to 0 due to: "
|
||||
"%(except)s"), {'member': member.id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update member %(member)s enabled to 0 due "
|
||||
"to: %(except)s", {'member': member.id, 'except': e})
|
||||
|
||||
|
||||
class UpdatePoolInDB(BaseDatabaseTask):
|
||||
@ -1357,17 +1339,14 @@ class UpdatePoolInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting update pool in DB "
|
||||
"for pool id %s"), pool.id)
|
||||
LOG.warning("Reverting update pool in DB for pool id %s", pool.id)
|
||||
# TODO(johnsom) fix this to set the upper objects to ERROR
|
||||
try:
|
||||
self.repos.update_pool_and_sp(db_apis.get_session(),
|
||||
pool.id, enabled=0)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update pool %(pool)s "
|
||||
"enabled 0 due to: "
|
||||
"%(except)s"), {'pool': pool.id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update pool %(pool)s enabled 0 due to: "
|
||||
"%(except)s", {'pool': pool.id, 'except': e})
|
||||
|
||||
|
||||
class UpdateL7PolicyInDB(BaseDatabaseTask):
|
||||
@ -1395,17 +1374,15 @@ class UpdateL7PolicyInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting update l7policy in DB "
|
||||
"for l7policy id %s"), l7policy.id)
|
||||
LOG.warning("Reverting update l7policy in DB "
|
||||
"for l7policy id %s", l7policy.id)
|
||||
# TODO(sbalukoff) fix this to set the upper objects to ERROR
|
||||
try:
|
||||
self.l7policy_repo.update(db_apis.get_session(), l7policy.id,
|
||||
enabled=0)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update l7policy %(l7p)s "
|
||||
"enabled to 0 due to: "
|
||||
"%(except)s"), {'l7p': l7policy.id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update l7policy %(l7p)s enabled to 0 due "
|
||||
"to: %(except)s", {'l7p': l7policy.id, 'except': e})
|
||||
|
||||
|
||||
class UpdateL7RuleInDB(BaseDatabaseTask):
|
||||
@ -1433,18 +1410,16 @@ class UpdateL7RuleInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting update l7rule in DB "
|
||||
"for l7rule id %s"), l7rule.id)
|
||||
LOG.warning("Reverting update l7rule in DB "
|
||||
"for l7rule id %s", l7rule.id)
|
||||
# TODO(sbalukoff) fix this to set appropriate upper objects to ERROR
|
||||
try:
|
||||
self.l7policy_repo.update(db_apis.get_session(),
|
||||
l7rule.l7policy.id,
|
||||
enabled=0)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to update L7rule %(l7r)s "
|
||||
"enabled to 0 due to: "
|
||||
"%(except)s"), {'l7r': l7rule.l7policy.id,
|
||||
'except': e})
|
||||
LOG.error("Failed to update L7rule %(l7r)s enabled to 0 due to: "
|
||||
"%(except)s", {'l7r': l7rule.l7policy.id, 'except': e})
|
||||
|
||||
|
||||
class GetAmphoraDetails(BaseDatabaseTask):
|
||||
@ -1614,8 +1589,8 @@ class MarkHealthMonitorActiveInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark health montor ACTIVE in DB "
|
||||
"for health monitor id %s"), health_mon.pool_id)
|
||||
LOG.warning("Reverting mark health montor ACTIVE in DB "
|
||||
"for health monitor id %s", health_mon.pool_id)
|
||||
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||
|
||||
|
||||
@ -1646,8 +1621,8 @@ class MarkHealthMonitorPendingCreateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark health monitor pending create in DB "
|
||||
"for health monitor id %s"), health_mon.pool_id)
|
||||
LOG.warning("Reverting mark health monitor pending create in DB "
|
||||
"for health monitor id %s", health_mon.pool_id)
|
||||
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||
|
||||
|
||||
@ -1678,8 +1653,8 @@ class MarkHealthMonitorPendingDeleteInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark health monitor pending delete in DB "
|
||||
"for health monitor id %s"), health_mon.pool_id)
|
||||
LOG.warning("Reverting mark health monitor pending delete in DB "
|
||||
"for health monitor id %s", health_mon.pool_id)
|
||||
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||
|
||||
|
||||
@ -1710,8 +1685,8 @@ class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark health monitor pending update in DB "
|
||||
"for health monitor id %s"), health_mon.pool_id)
|
||||
LOG.warning("Reverting mark health monitor pending update in DB "
|
||||
"for health monitor id %s", health_mon.pool_id)
|
||||
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||
|
||||
|
||||
@ -1744,8 +1719,8 @@ class MarkL7PolicyActiveInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark l7policy ACTIVE in DB "
|
||||
"for l7policy id %s"), l7policy.id)
|
||||
LOG.warning("Reverting mark l7policy ACTIVE in DB "
|
||||
"for l7policy id %s", l7policy.id)
|
||||
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||
|
||||
|
||||
@ -1775,8 +1750,8 @@ class MarkL7PolicyPendingCreateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark l7policy pending create in DB "
|
||||
"for l7policy id %s"), l7policy.id)
|
||||
LOG.warning("Reverting mark l7policy pending create in DB "
|
||||
"for l7policy id %s", l7policy.id)
|
||||
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||
|
||||
|
||||
@ -1806,8 +1781,8 @@ class MarkL7PolicyPendingDeleteInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark l7policy pending delete in DB "
|
||||
"for l7policy id %s"), l7policy.id)
|
||||
LOG.warning("Reverting mark l7policy pending delete in DB "
|
||||
"for l7policy id %s", l7policy.id)
|
||||
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||
|
||||
|
||||
@ -1838,8 +1813,8 @@ class MarkL7PolicyPendingUpdateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark l7policy pending update in DB "
|
||||
"for l7policy id %s"), l7policy.id)
|
||||
LOG.warning("Reverting mark l7policy pending update in DB "
|
||||
"for l7policy id %s", l7policy.id)
|
||||
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||
|
||||
|
||||
@ -1871,8 +1846,8 @@ class MarkL7RuleActiveInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark l7rule ACTIVE in DB "
|
||||
"for l7rule id %s"), l7rule.id)
|
||||
LOG.warning("Reverting mark l7rule ACTIVE in DB "
|
||||
"for l7rule id %s", l7rule.id)
|
||||
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||
|
||||
|
||||
@ -1902,8 +1877,8 @@ class MarkL7RulePendingCreateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark l7rule pending create in DB "
|
||||
"for l7rule id %s"), l7rule.id)
|
||||
LOG.warning("Reverting mark l7rule pending create in DB "
|
||||
"for l7rule id %s", l7rule.id)
|
||||
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||
|
||||
|
||||
@ -1933,8 +1908,8 @@ class MarkL7RulePendingDeleteInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark l7rule pending delete in DB "
|
||||
"for l7rule id %s"), l7rule.id)
|
||||
LOG.warning("Reverting mark l7rule pending delete in DB "
|
||||
"for l7rule id %s", l7rule.id)
|
||||
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||
|
||||
|
||||
@ -1964,8 +1939,8 @@ class MarkL7RulePendingUpdateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark l7rule pending update in DB "
|
||||
"for l7rule id %s"), l7rule.id)
|
||||
LOG.warning("Reverting mark l7rule pending update in DB "
|
||||
"for l7rule id %s", l7rule.id)
|
||||
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||
|
||||
|
||||
@ -1994,8 +1969,8 @@ class MarkMemberActiveInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark member ACTIVE in DB "
|
||||
"for member id %s"), member.id)
|
||||
LOG.warning("Reverting mark member ACTIVE in DB "
|
||||
"for member id %s", member.id)
|
||||
self.task_utils.mark_member_prov_status_error(member.id)
|
||||
|
||||
|
||||
@ -2024,8 +1999,8 @@ class MarkMemberPendingCreateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark member pending create in DB "
|
||||
"for member id %s"), member.id)
|
||||
LOG.warning("Reverting mark member pending create in DB "
|
||||
"for member id %s", member.id)
|
||||
self.task_utils.mark_member_prov_status_error(member.id)
|
||||
|
||||
|
||||
@ -2054,8 +2029,8 @@ class MarkMemberPendingDeleteInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark member pending delete in DB "
|
||||
"for member id %s"), member.id)
|
||||
LOG.warning("Reverting mark member pending delete in DB "
|
||||
"for member id %s", member.id)
|
||||
self.task_utils.mark_member_prov_status_error(member.id)
|
||||
|
||||
|
||||
@ -2085,8 +2060,8 @@ class MarkMemberPendingUpdateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark member pending update in DB "
|
||||
"for member id %s"), member.id)
|
||||
LOG.warning("Reverting mark member pending update in DB "
|
||||
"for member id %s", member.id)
|
||||
self.task_utils.mark_member_prov_status_error(member.id)
|
||||
|
||||
|
||||
@ -2116,8 +2091,7 @@ class MarkPoolActiveInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark pool ACTIVE in DB "
|
||||
"for pool id %s"), pool.id)
|
||||
LOG.warning("Reverting mark pool ACTIVE in DB for pool id %s", pool.id)
|
||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||
|
||||
|
||||
@ -2147,8 +2121,8 @@ class MarkPoolPendingCreateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark pool pending create in DB "
|
||||
"for pool id %s"), pool.id)
|
||||
LOG.warning("Reverting mark pool pending create in DB "
|
||||
"for pool id %s", pool.id)
|
||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||
|
||||
|
||||
@ -2178,8 +2152,8 @@ class MarkPoolPendingDeleteInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark pool pending delete in DB "
|
||||
"for pool id %s"), pool.id)
|
||||
LOG.warning("Reverting mark pool pending delete in DB "
|
||||
"for pool id %s", pool.id)
|
||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||
|
||||
|
||||
@ -2209,8 +2183,8 @@ class MarkPoolPendingUpdateInDB(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW("Reverting mark pool pending update in DB "
|
||||
"for pool id %s"), pool.id)
|
||||
LOG.warning("Reverting mark pool pending update in DB "
|
||||
"for pool id %s", pool.id)
|
||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||
|
||||
|
||||
@ -2238,10 +2212,9 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement health monitor quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=health_mon.project_id))
|
||||
LOG.error('Failed to decrement health monitor quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.'.format(proj=health_mon.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, health_mon, result, *args, **kwargs):
|
||||
@ -2251,9 +2224,9 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for health monitor '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=health_mon.project_id))
|
||||
LOG.warning('Reverting decrement quota for health monitor '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.'.format(proj=health_mon.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
@ -2298,10 +2271,9 @@ class DecrementListenerQuota(BaseDatabaseTask):
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement listener quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=listener.project_id))
|
||||
LOG.error('Failed to decrement listener quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.'.format(proj=listener.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, listener, result, *args, **kwargs):
|
||||
@ -2311,9 +2283,9 @@ class DecrementListenerQuota(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for listener '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=listener.project_id))
|
||||
LOG.warning('Reverting decrement quota for listener '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.'.format(proj=listener.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
@ -2358,10 +2330,9 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement load balancer quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=loadbalancer.project_id))
|
||||
LOG.error('Failed to decrement load balancer quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.'.format(proj=loadbalancer.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, loadbalancer, result, *args, **kwargs):
|
||||
@ -2371,9 +2342,9 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for load balancer '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=loadbalancer.project_id))
|
||||
LOG.warning('Reverting decrement quota for load balancer '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.'.format(proj=loadbalancer.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
@ -2418,10 +2389,9 @@ class DecrementMemberQuota(BaseDatabaseTask):
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement member quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=member.project_id))
|
||||
LOG.error('Failed to decrement member quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.'.format(proj=member.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, member, result, *args, **kwargs):
|
||||
@ -2431,9 +2401,9 @@ class DecrementMemberQuota(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for member '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=member.project_id))
|
||||
LOG.warning('Reverting decrement quota for member '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.'.format(proj=member.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
@ -2490,10 +2460,9 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement pool quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=pool.project_id))
|
||||
LOG.error('Failed to decrement pool quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.'.format(proj=pool.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, pool, pool_child_count, result, *args, **kwargs):
|
||||
@ -2503,9 +2472,9 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for pool '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=pool.project_id))
|
||||
LOG.warning('Reverting decrement quota for pool '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.'.format(proj=pool.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
|
@ -22,7 +22,6 @@ from taskflow.types import failure
|
||||
|
||||
from octavia.common import constants
|
||||
from octavia.common import utils
|
||||
from octavia.i18n import _LW, _LE
|
||||
from octavia.network import base
|
||||
from octavia.network import data_models as n_data_models
|
||||
|
||||
@ -148,7 +147,7 @@ class PlugNetworks(BaseNetworkTask):
|
||||
def revert(self, amphora, delta, *args, **kwargs):
|
||||
"""Handle a failed network plug by removing all nics added."""
|
||||
|
||||
LOG.warning(_LW("Unable to plug networks for amp id %s"), amphora.id)
|
||||
LOG.warning("Unable to plug networks for amp id %s", amphora.id)
|
||||
if not delta:
|
||||
return
|
||||
|
||||
@ -183,7 +182,7 @@ class UnPlugNetworks(BaseNetworkTask):
|
||||
LOG.debug("Network %d not found", nic.network_id)
|
||||
pass
|
||||
except Exception:
|
||||
LOG.exception(_LE("Unable to unplug network"))
|
||||
LOG.exception("Unable to unplug network")
|
||||
pass # Todo(german) follow up if that makes sense
|
||||
|
||||
|
||||
@ -238,7 +237,7 @@ class HandleNetworkDeltas(BaseNetworkTask):
|
||||
except base.NetworkNotFound:
|
||||
LOG.debug("Network %d not found ", nic.network_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Unable to unplug network"))
|
||||
LOG.exception("Unable to unplug network")
|
||||
return added_ports
|
||||
|
||||
def revert(self, result, deltas, *args, **kwargs):
|
||||
@ -247,7 +246,7 @@ class HandleNetworkDeltas(BaseNetworkTask):
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
for amp_id, delta in six.iteritems(deltas):
|
||||
LOG.warning(_LW("Unable to plug networks for amp id %s"),
|
||||
LOG.warning("Unable to plug networks for amp id %s",
|
||||
delta.amphora_id)
|
||||
if not delta:
|
||||
return
|
||||
@ -277,7 +276,7 @@ class PlugVIP(BaseNetworkTask):
|
||||
|
||||
if isinstance(result, failure.Failure):
|
||||
return
|
||||
LOG.warning(_LW("Unable to plug VIP for loadbalancer id %s"),
|
||||
LOG.warning("Unable to plug VIP for loadbalancer id %s",
|
||||
loadbalancer.id)
|
||||
|
||||
try:
|
||||
@ -291,9 +290,8 @@ class PlugVIP(BaseNetworkTask):
|
||||
|
||||
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to unplug VIP. Resources may still "
|
||||
"be in use from vip: %(vip)s due to "
|
||||
"error: %(except)s"),
|
||||
LOG.error("Failed to unplug VIP. Resources may still "
|
||||
"be in use from vip: %(vip)s due to error: %(except)s",
|
||||
{'vip': loadbalancer.vip.ip_address, 'except': e})
|
||||
|
||||
|
||||
@ -307,7 +305,7 @@ class UnplugVIP(BaseNetworkTask):
|
||||
try:
|
||||
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Unable to unplug vip from load balancer %s"),
|
||||
LOG.exception("Unable to unplug vip from load balancer %s",
|
||||
loadbalancer.id)
|
||||
|
||||
|
||||
@ -328,16 +326,15 @@ class AllocateVIP(BaseNetworkTask):
|
||||
"""Handle a failure to allocate vip."""
|
||||
|
||||
if isinstance(result, failure.Failure):
|
||||
LOG.exception(_LE("Unable to allocate VIP"))
|
||||
LOG.exception("Unable to allocate VIP")
|
||||
return
|
||||
vip = result
|
||||
LOG.warning(_LW("Deallocating vip %s"), vip.ip_address)
|
||||
LOG.warning("Deallocating vip %s", vip.ip_address)
|
||||
try:
|
||||
self.network_driver.deallocate_vip(vip)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to deallocate VIP. Resources may still "
|
||||
"be in use from vip: %(vip)s due to "
|
||||
"error: %(except)s"),
|
||||
LOG.error("Failed to deallocate VIP. Resources may still "
|
||||
"be in use from vip: %(vip)s due to error: %(except)s",
|
||||
{'vip': vip.ip_address, 'except': e})
|
||||
|
||||
|
||||
@ -438,9 +435,8 @@ class PlugVIPPort(BaseNetworkTask):
|
||||
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
|
||||
self.network_driver.unplug_port(amphora, vrrp_port)
|
||||
except Exception:
|
||||
LOG.warning(_LW('Failed to unplug vrrp port: {port} '
|
||||
'from amphora: {amp}').format(port=vrrp_port.id,
|
||||
amp=amphora.id))
|
||||
LOG.warning(('Failed to unplug vrrp port: {port} from amphora: '
|
||||
'{amp}').format(port=vrrp_port.id, amp=amphora.id))
|
||||
|
||||
|
||||
class WaitForPortDetach(BaseNetworkTask):
|
||||
|
@ -42,7 +42,7 @@ def add_alembic_subparser(sub, cmd):
|
||||
|
||||
def do_upgrade(config, cmd):
|
||||
if not CONF.command.revision and not CONF.command.delta:
|
||||
raise SystemExit('You must provide a revision or relative delta')
|
||||
raise SystemExit(_('You must provide a revision or relative delta'))
|
||||
|
||||
revision = CONF.command.revision or ''
|
||||
if '-' in revision:
|
||||
|
@ -31,8 +31,6 @@ from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.common import validate
|
||||
from octavia.db import models
|
||||
from octavia.i18n import _LE, _LW
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -407,8 +405,8 @@ class Repositories(object):
|
||||
else:
|
||||
return True
|
||||
except db_exception.DBDeadlock:
|
||||
LOG.warning(_LW('Quota project lock timed out for project: '
|
||||
'{proj}').format(proj=project_id))
|
||||
LOG.warning(('Quota project lock timed out for project: '
|
||||
'{proj}').format(proj=project_id))
|
||||
raise exceptions.ProjectBusyException()
|
||||
return False
|
||||
|
||||
@ -431,10 +429,10 @@ class Repositories(object):
|
||||
project_id=project_id).with_for_update().first()
|
||||
if not quotas:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.error(_LE(
|
||||
'Quota decrement on {clss} called on project: {proj} '
|
||||
'with no quota record in the database.').format(
|
||||
clss=type(_class), proj=project_id))
|
||||
LOG.error('Quota decrement on {clss} called on project: '
|
||||
'{proj} with no quota record in the '
|
||||
'database.'.format(clss=type(_class),
|
||||
proj=project_id))
|
||||
return
|
||||
if _class == data_models.LoadBalancer:
|
||||
if (quotas.in_use_load_balancer is not None and
|
||||
@ -443,11 +441,10 @@ class Repositories(object):
|
||||
quotas.in_use_load_balancer - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
LOG.warning('Quota decrement on {clss} called on '
|
||||
'project: {proj} that would cause a '
|
||||
'negative quota.'.format(clss=type(_class),
|
||||
proj=project_id))
|
||||
if _class == data_models.Listener:
|
||||
if (quotas.in_use_listener is not None and
|
||||
quotas.in_use_listener > 0):
|
||||
@ -455,11 +452,10 @@ class Repositories(object):
|
||||
quotas.in_use_listener - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
LOG.warning('Quota decrement on {clss} called on '
|
||||
'project: {proj} that would cause a '
|
||||
'negative quota.'.format(clss=type(_class),
|
||||
proj=project_id))
|
||||
if _class == data_models.Pool:
|
||||
if (quotas.in_use_pool is not None and
|
||||
quotas.in_use_pool > 0):
|
||||
@ -467,11 +463,10 @@ class Repositories(object):
|
||||
quotas.in_use_pool - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
LOG.warning('Quota decrement on {clss} called on '
|
||||
'project: {proj} that would cause a '
|
||||
'negative quota.'.format(clss=type(_class),
|
||||
proj=project_id))
|
||||
if _class == data_models.HealthMonitor:
|
||||
if (quotas.in_use_health_monitor is not None and
|
||||
quotas.in_use_health_monitor > 0):
|
||||
@ -479,11 +474,10 @@ class Repositories(object):
|
||||
quotas.in_use_health_monitor - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
LOG.warning('Quota decrement on {clss} called on '
|
||||
'project: {proj} that would cause a '
|
||||
'negative quota.'.format(clss=type(_class),
|
||||
proj=project_id))
|
||||
if _class == data_models.Member:
|
||||
if (quotas.in_use_member is not None and
|
||||
quotas.in_use_member > 0):
|
||||
@ -491,14 +485,13 @@ class Repositories(object):
|
||||
quotas.in_use_member - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
LOG.warning('Quota decrement on {clss} called on '
|
||||
'project: {proj} that would cause a '
|
||||
'negative quota.'.format(clss=type(_class),
|
||||
proj=project_id))
|
||||
except db_exception.DBDeadlock:
|
||||
LOG.warning(_LW('Quota project lock timed out for project: '
|
||||
'{proj}').format(proj=project_id))
|
||||
LOG.warning(('Quota project lock timed out for project: '
|
||||
'{proj}').format(proj=project_id))
|
||||
raise exceptions.ProjectBusyException()
|
||||
|
||||
def create_load_balancer_tree(self, session, lock_session, lb_dict):
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
import re
|
||||
|
||||
import pep8
|
||||
|
||||
"""
|
||||
Guidelines for writing new hacking checks
|
||||
@ -31,28 +30,17 @@ Guidelines for writing new hacking checks
|
||||
|
||||
"""
|
||||
|
||||
log_translation = re.compile(
|
||||
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")")
|
||||
author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor"),
|
||||
re.compile("^\.\.\s+moduleauthor::"))
|
||||
_all_hints = set(['_', '_LI', '_LE', '_LW', '_LC'])
|
||||
_all_log_levels = {
|
||||
# NOTE(yamamoto): Following nova which uses _() for audit.
|
||||
'audit': '_',
|
||||
'error': '_LE',
|
||||
'info': '_LI',
|
||||
'warn': '_LW',
|
||||
'warning': '_LW',
|
||||
'critical': '_LC',
|
||||
'exception': '_LE',
|
||||
}
|
||||
log_translation_hints = []
|
||||
for level, hint in _all_log_levels.items():
|
||||
r = "(.)*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
|
||||
'level': level,
|
||||
'wrong_hints': '|'.join(_all_hints - set([hint])),
|
||||
}
|
||||
log_translation_hints.append(re.compile(r))
|
||||
|
||||
_all_log_levels = {'critical', 'error', 'exception', 'info', 'warning'}
|
||||
_all_hints = {'_LC', '_LE', '_LI', '_', '_LW'}
|
||||
|
||||
_log_translation_hint = re.compile(
|
||||
r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % {
|
||||
'levels': '|'.join(_all_log_levels),
|
||||
'hints': '|'.join(_all_hints),
|
||||
})
|
||||
|
||||
assert_trueinst_re = re.compile(
|
||||
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
|
||||
@ -76,8 +64,9 @@ assert_no_xrange_re = re.compile(
|
||||
r"\s*xrange\s*\(")
|
||||
|
||||
|
||||
def _directory_to_check_translation(filename):
|
||||
return True
|
||||
def _translation_checks_not_enforced(filename):
|
||||
# Do not do these validations on tests
|
||||
return any(pat in filename for pat in ["/tests/", "rally-jobs/plugins/"])
|
||||
|
||||
|
||||
def assert_true_instance(logical_line):
|
||||
@ -106,38 +95,6 @@ def assert_equal_or_not_none(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def no_translate_debug_logs(logical_line, filename):
|
||||
"""Check for 'LOG.debug(_('
|
||||
|
||||
As per our translation policy,
|
||||
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
|
||||
we shouldn't translate debug level logs.
|
||||
|
||||
* This check assumes that 'LOG' is a logger.
|
||||
O319
|
||||
"""
|
||||
if _directory_to_check_translation(filename) and logical_line.startswith(
|
||||
"LOG.debug(_("):
|
||||
yield(0, "O319 Don't translate debug level logs")
|
||||
|
||||
|
||||
def validate_log_translations(logical_line, physical_line, filename):
|
||||
# Translations are not required in the test directory
|
||||
if "octavia/tests" in filename:
|
||||
return
|
||||
if pep8.noqa(physical_line):
|
||||
return
|
||||
msg = "O320: Log messages require translations!"
|
||||
if log_translation.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
if _directory_to_check_translation(filename):
|
||||
msg = "O320: Log messages require translation hints!"
|
||||
for log_translation_hint in log_translation_hints:
|
||||
if log_translation_hint.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def use_jsonutils(logical_line, filename):
|
||||
msg = "O321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
|
||||
|
||||
@ -219,11 +176,57 @@ def no_xrange(logical_line):
|
||||
yield(0, "O340: Do not use xrange().")
|
||||
|
||||
|
||||
def no_translate_logs(logical_line, filename):
|
||||
"""O341 - Don't translate logs.
|
||||
|
||||
Check for 'LOG.*(_(' and 'LOG.*(_Lx('
|
||||
|
||||
Translators don't provide translations for log messages, and operators
|
||||
asked not to translate them.
|
||||
|
||||
* This check assumes that 'LOG' is a logger.
|
||||
|
||||
:param logical_line: The logical line to check.
|
||||
:param filename: The file name where the logical line exists.
|
||||
:returns: None if the logical line passes the check, otherwise a tuple
|
||||
is yielded that contains the offending index in logical line and a
|
||||
message describe the check validation failure.
|
||||
"""
|
||||
if _translation_checks_not_enforced(filename):
|
||||
return
|
||||
|
||||
msg = "O341: Log messages should not be translated!"
|
||||
match = _log_translation_hint.match(logical_line)
|
||||
if match:
|
||||
yield (logical_line.index(match.group()), msg)
|
||||
|
||||
|
||||
def check_raised_localized_exceptions(logical_line, filename):
|
||||
"""O342 - Untranslated exception message.
|
||||
|
||||
:param logical_line: The logical line to check.
|
||||
:param filename: The file name where the logical line exists.
|
||||
:returns: None if the logical line passes the check, otherwise a tuple
|
||||
is yielded that contains the offending index in logical line and a
|
||||
message describe the check validation failure.
|
||||
"""
|
||||
if _translation_checks_not_enforced(filename):
|
||||
return
|
||||
|
||||
logical_line = logical_line.strip()
|
||||
raised_search = re.compile(
|
||||
r"raise (?:\w*)\((.*)\)").match(logical_line)
|
||||
if raised_search:
|
||||
exception_msg = raised_search.groups()[0]
|
||||
if exception_msg.startswith("\"") or exception_msg.startswith("\'"):
|
||||
msg = "O342: Untranslated exception message."
|
||||
yield (logical_line.index(exception_msg), msg)
|
||||
|
||||
|
||||
def factory(register):
|
||||
register(assert_true_instance)
|
||||
register(assert_equal_or_not_none)
|
||||
register(no_translate_debug_logs)
|
||||
register(validate_log_translations)
|
||||
register(no_translate_logs)
|
||||
register(use_jsonutils)
|
||||
register(no_author_tags)
|
||||
register(assert_equal_true_or_false)
|
||||
@ -231,3 +234,4 @@ def factory(register):
|
||||
register(assert_equal_in)
|
||||
register(no_log_warn)
|
||||
register(no_xrange)
|
||||
register(check_raised_localized_exceptions)
|
||||
|
@ -23,7 +23,6 @@ import six
|
||||
from octavia.common import clients
|
||||
from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.i18n import _LE, _LI, _LW
|
||||
from octavia.network import base
|
||||
from octavia.network import data_models as n_data_models
|
||||
from octavia.network.drivers.neutron import base as neutron_base
|
||||
@ -112,7 +111,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
try:
|
||||
self._add_allowed_address_pair_to_port(port_id, vip_address)
|
||||
except neutron_client_exceptions.PortNotFoundClient as e:
|
||||
raise base.PortNotFound(e.message)
|
||||
raise base.PortNotFound(e.message)
|
||||
except Exception:
|
||||
message = _('Error adding allowed address pair {ip} '
|
||||
'to port {port_id}.').format(ip=vip_address,
|
||||
@ -128,7 +127,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
|
||||
def _get_ethertype_for_ip(self, ip):
|
||||
address = ipaddress.ip_address(
|
||||
ip if six.text_type == type(ip) else six.u(ip))
|
||||
ip if isinstance(ip, six.text_type) else six.u(ip))
|
||||
return 'IPv6' if address.version is 6 else 'IPv4'
|
||||
|
||||
def _update_security_group_rules(self, load_balancer, sec_grp_id):
|
||||
@ -222,15 +221,15 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
while attempts <= CONF.networking.max_retries:
|
||||
try:
|
||||
self.neutron_client.delete_security_group(sec_grp)
|
||||
LOG.info(_LI("Deleted security group %s"), sec_grp)
|
||||
LOG.info("Deleted security group %s", sec_grp)
|
||||
return
|
||||
except neutron_client_exceptions.NotFound:
|
||||
LOG.info(_LI("Security group %s not found, will assume it is "
|
||||
"already deleted"), sec_grp)
|
||||
LOG.info("Security group %s not found, will assume it is "
|
||||
"already deleted", sec_grp)
|
||||
return
|
||||
except Exception:
|
||||
LOG.warning(_LW("Attempt %(attempt)s to remove security group "
|
||||
"%(sg)s failed."),
|
||||
LOG.warning("Attempt %(attempt)s to remove security group "
|
||||
"%(sg)s failed.",
|
||||
{'attempt': attempts + 1, 'sg': sec_grp})
|
||||
attempts += 1
|
||||
time.sleep(CONF.networking.retry_interval)
|
||||
@ -249,7 +248,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
if sec_grp:
|
||||
sec_grp = sec_grp.get('id')
|
||||
LOG.info(
|
||||
_LI("Removing security group %(sg)s from port %(port)s"),
|
||||
"Removing security group %(sg)s from port %(port)s",
|
||||
{'sg': sec_grp, 'port': vip.port_id})
|
||||
raw_port = self.neutron_client.show_port(port.id)
|
||||
sec_grps = raw_port.get('port', {}).get('security_groups', [])
|
||||
@ -291,8 +290,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
LOG.exception(message)
|
||||
raise base.DeallocateVIPException(message)
|
||||
else:
|
||||
LOG.info(_LI("Port %s will not be deleted by Octavia as it was "
|
||||
"not created by Octavia."), vip.port_id)
|
||||
LOG.info("Port %s will not be deleted by Octavia as it was "
|
||||
"not created by Octavia.", vip.port_id)
|
||||
|
||||
def plug_vip(self, load_balancer, vip):
|
||||
if self.sec_grp_enabled:
|
||||
@ -330,7 +329,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
|
||||
def allocate_vip(self, load_balancer):
|
||||
if load_balancer.vip.port_id:
|
||||
LOG.info(_LI('Port %s already exists. Nothing to be done.'),
|
||||
LOG.info('Port %s already exists. Nothing to be done.',
|
||||
load_balancer.vip.port_id)
|
||||
port = self.get_port(load_balancer.vip.port_id)
|
||||
return self._port_to_vip(port, load_balancer)
|
||||
@ -356,8 +355,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
try:
|
||||
subnet = self.get_subnet(vip.subnet_id)
|
||||
except base.SubnetNotFound:
|
||||
msg = _LE("Can't unplug vip because vip subnet {0} was not "
|
||||
"found").format(vip.subnet_id)
|
||||
msg = ("Can't unplug vip because vip subnet {0} was not "
|
||||
"found").format(vip.subnet_id)
|
||||
LOG.exception(msg)
|
||||
raise base.PluggedVIPNotFound(msg)
|
||||
for amphora in six.moves.filter(
|
||||
@ -369,7 +368,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
if not interface:
|
||||
# Thought about raising PluggedVIPNotFound exception but
|
||||
# then that wouldn't evaluate all amphorae, so just continue
|
||||
LOG.debug(_LI('Cannot get amphora %s interface, skipped'),
|
||||
LOG.debug('Cannot get amphora %s interface, skipped',
|
||||
amphora.compute_id)
|
||||
continue
|
||||
try:
|
||||
@ -397,9 +396,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
except base.PortNotFound:
|
||||
pass
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Failed to delete port. Resources may still '
|
||||
'be in use for port: %(port)s due to '
|
||||
'error: %s(except)s'),
|
||||
LOG.error('Failed to delete port. Resources may still be in '
|
||||
'use for port: %(port)s due to error: %s(except)s',
|
||||
{'port': amphora.vrrp_port_id, 'except': e})
|
||||
|
||||
def plug_network(self, compute_id, network_id, ip_address=None):
|
||||
@ -501,8 +499,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
else:
|
||||
raise base.PlugNetworkException(e.message)
|
||||
except nova_client_exceptions.Conflict:
|
||||
LOG.info(_LI('Port %(portid)s is already plugged, '
|
||||
'skipping') % {'portid': port.id})
|
||||
LOG.info('Port %(portid)s is already plugged, '
|
||||
'skipping' % {'portid': port.id})
|
||||
plugged_interface = n_data_models.Interface(
|
||||
compute_id=amphora.compute_id,
|
||||
network_id=port.network_id,
|
||||
@ -594,4 +592,4 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
|
||||
except (neutron_client_exceptions.NotFound,
|
||||
neutron_client_exceptions.PortNotFoundClient):
|
||||
pass
|
||||
pass
|
||||
|
Loading…
Reference in New Issue
Block a user