Remove log translations from octavia
Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. This patch also adds hacking rules for the translation tags. See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Co-Authored-By: Michael Johnson <johnsomor@gmail.com> Change-Id: Ic95111d09e38b3f44fd6c85d0bcf0355c21ef545
This commit is contained in:
parent
7829ed96f6
commit
dc882e9d27
@ -13,7 +13,6 @@ Octavia Specific Commandments
|
|||||||
- [O318] Change assert(Not)Equal(A, None) or assert(Not)Equal(None, A)
|
- [O318] Change assert(Not)Equal(A, None) or assert(Not)Equal(None, A)
|
||||||
by optimal assert like assertIs(Not)None(A).
|
by optimal assert like assertIs(Not)None(A).
|
||||||
- [O319] Validate that debug level logs are not translated.
|
- [O319] Validate that debug level logs are not translated.
|
||||||
- [O320] Validate that LOG messages, except debug ones, have translations
|
|
||||||
- [O321] Validate that jsonutils module is used instead of json
|
- [O321] Validate that jsonutils module is used instead of json
|
||||||
- [O322] Don't use author tags
|
- [O322] Don't use author tags
|
||||||
- [O323] Change assertEqual(True, A) or assertEqual(False, A) to the more
|
- [O323] Change assertEqual(True, A) or assertEqual(False, A) to the more
|
||||||
@ -24,6 +23,8 @@ Octavia Specific Commandments
|
|||||||
specific assertIn/NotIn(A, B)
|
specific assertIn/NotIn(A, B)
|
||||||
- [O339] LOG.warn() is not allowed. Use LOG.warning()
|
- [O339] LOG.warn() is not allowed. Use LOG.warning()
|
||||||
- [O340] Don't use xrange()
|
- [O340] Don't use xrange()
|
||||||
|
- [O341] Don't translate logs.
|
||||||
|
- [0342] Exception messages should be translated
|
||||||
|
|
||||||
Creating Unit Tests
|
Creating Unit Tests
|
||||||
-------------------
|
-------------------
|
||||||
|
@ -33,7 +33,6 @@ from octavia.amphorae.backends.agent.api_server import util
|
|||||||
from octavia.amphorae.backends.utils import haproxy_query as query
|
from octavia.amphorae.backends.utils import haproxy_query as query
|
||||||
from octavia.common import constants as consts
|
from octavia.common import constants as consts
|
||||||
from octavia.common import utils as octavia_utils
|
from octavia.common import utils as octavia_utils
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
BUFFER = 100
|
BUFFER = 100
|
||||||
@ -136,7 +135,7 @@ class Listener(object):
|
|||||||
try:
|
try:
|
||||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
LOG.error(_LE("Failed to verify haproxy file: %s"), e)
|
LOG.error("Failed to verify haproxy file: %s", e)
|
||||||
os.remove(name) # delete file
|
os.remove(name) # delete file
|
||||||
return flask.make_response(flask.jsonify(dict(
|
return flask.make_response(flask.jsonify(dict(
|
||||||
message="Invalid request",
|
message="Invalid request",
|
||||||
@ -166,7 +165,7 @@ class Listener(object):
|
|||||||
raise util.UnknownInitError()
|
raise util.UnknownInitError()
|
||||||
|
|
||||||
except util.UnknownInitError:
|
except util.UnknownInitError:
|
||||||
LOG.error(_LE("Unknown init system found."))
|
LOG.error("Unknown init system found.")
|
||||||
return flask.make_response(flask.jsonify(dict(
|
return flask.make_response(flask.jsonify(dict(
|
||||||
message="Unknown init system in amphora",
|
message="Unknown init system in amphora",
|
||||||
details="The amphora image is running an unknown init "
|
details="The amphora image is running an unknown init "
|
||||||
@ -203,8 +202,7 @@ class Listener(object):
|
|||||||
subprocess.check_output(init_enable_cmd.split(),
|
subprocess.check_output(init_enable_cmd.split(),
|
||||||
stderr=subprocess.STDOUT)
|
stderr=subprocess.STDOUT)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
LOG.error(_LE("Failed to enable haproxy-%(list)s "
|
LOG.error("Failed to enable haproxy-%(list)s service: %(err)s",
|
||||||
"service: %(err)s"),
|
|
||||||
{'list': listener_id, 'err': e})
|
{'list': listener_id, 'err': e})
|
||||||
return flask.make_response(flask.jsonify(dict(
|
return flask.make_response(flask.jsonify(dict(
|
||||||
message="Error enabling haproxy-{0} service".format(
|
message="Error enabling haproxy-{0} service".format(
|
||||||
@ -276,7 +274,7 @@ class Listener(object):
|
|||||||
try:
|
try:
|
||||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
LOG.error(_LE("Failed to stop HAProxy service: %s"), e)
|
LOG.error("Failed to stop HAProxy service: %s", e)
|
||||||
return flask.make_response(flask.jsonify(dict(
|
return flask.make_response(flask.jsonify(dict(
|
||||||
message="Error stopping haproxy",
|
message="Error stopping haproxy",
|
||||||
details=e.output)), 500)
|
details=e.output)), 500)
|
||||||
@ -311,9 +309,8 @@ class Listener(object):
|
|||||||
subprocess.check_output(init_disable_cmd.split(),
|
subprocess.check_output(init_disable_cmd.split(),
|
||||||
stderr=subprocess.STDOUT)
|
stderr=subprocess.STDOUT)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
LOG.error(_LE("Failed to disable haproxy-%(list)s "
|
LOG.error("Failed to disable haproxy-%(list)s service: "
|
||||||
"service: %(err)s"),
|
"%(err)s", {'list': listener_id, 'err': e})
|
||||||
{'list': listener_id, 'err': e})
|
|
||||||
return flask.make_response(flask.jsonify(dict(
|
return flask.make_response(flask.jsonify(dict(
|
||||||
message="Error disabling haproxy-{0} service".format(
|
message="Error disabling haproxy-{0} service".format(
|
||||||
listener_id), details=e.output)), 500)
|
listener_id), details=e.output)), 500)
|
||||||
|
@ -29,7 +29,6 @@ from werkzeug import exceptions
|
|||||||
from octavia.common import constants as consts
|
from octavia.common import constants as consts
|
||||||
from octavia.common import exceptions as octavia_exceptions
|
from octavia.common import exceptions as octavia_exceptions
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
@ -144,12 +143,11 @@ class BaseOS(object):
|
|||||||
try:
|
try:
|
||||||
ip_addr = fixed_ip['ip_address']
|
ip_addr = fixed_ip['ip_address']
|
||||||
cidr = fixed_ip['subnet_cidr']
|
cidr = fixed_ip['subnet_cidr']
|
||||||
ip = ipaddress.ip_address(
|
ip = ipaddress.ip_address(ip_addr if isinstance(
|
||||||
ip_addr if six.text_type == type(
|
ip_addr, six.text_type) else six.u(ip_addr))
|
||||||
ip_addr) else six.u(ip_addr))
|
|
||||||
network = ipaddress.ip_network(
|
network = ipaddress.ip_network(
|
||||||
cidr if six.text_type == type(
|
cidr if isinstance(
|
||||||
cidr) else six.u(cidr))
|
cidr, six.text_type) else six.u(cidr))
|
||||||
broadcast = network.broadcast_address.exploded
|
broadcast = network.broadcast_address.exploded
|
||||||
netmask = (network.prefixlen if ip.version is 6
|
netmask = (network.prefixlen if ip.version is 6
|
||||||
else network.netmask.exploded)
|
else network.netmask.exploded)
|
||||||
@ -186,8 +184,8 @@ class BaseOS(object):
|
|||||||
try:
|
try:
|
||||||
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
LOG.error(_LE('Failed to if up {0} due to '
|
LOG.error('Failed to if up {0} due to '
|
||||||
'error: {1}').format(interface, str(e)))
|
'error: {1}'.format(interface, str(e)))
|
||||||
raise exceptions.HTTPException(
|
raise exceptions.HTTPException(
|
||||||
response=flask.make_response(flask.jsonify(dict(
|
response=flask.make_response(flask.jsonify(dict(
|
||||||
message='Error plugging {0}'.format(what),
|
message='Error plugging {0}'.format(what),
|
||||||
|
@ -29,7 +29,6 @@ import six
|
|||||||
from werkzeug import exceptions
|
from werkzeug import exceptions
|
||||||
|
|
||||||
from octavia.common import constants as consts
|
from octavia.common import constants as consts
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -55,9 +54,9 @@ class Plug(object):
|
|||||||
try:
|
try:
|
||||||
render_host_routes = []
|
render_host_routes = []
|
||||||
ip = ipaddress.ip_address(
|
ip = ipaddress.ip_address(
|
||||||
vip if six.text_type == type(vip) else six.u(vip))
|
vip if isinstance(vip, six.text_type) else six.u(vip))
|
||||||
network = ipaddress.ip_network(
|
network = ipaddress.ip_network(
|
||||||
subnet_cidr if six.text_type == type(subnet_cidr)
|
subnet_cidr if isinstance(subnet_cidr, six.text_type)
|
||||||
else six.u(subnet_cidr))
|
else six.u(subnet_cidr))
|
||||||
vip = ip.exploded
|
vip = ip.exploded
|
||||||
broadcast = network.broadcast_address.exploded
|
broadcast = network.broadcast_address.exploded
|
||||||
@ -66,7 +65,7 @@ class Plug(object):
|
|||||||
vrrp_version = None
|
vrrp_version = None
|
||||||
if vrrp_ip:
|
if vrrp_ip:
|
||||||
vrrp_ip_obj = ipaddress.ip_address(
|
vrrp_ip_obj = ipaddress.ip_address(
|
||||||
vrrp_ip if six.text_type == type(vrrp_ip)
|
vrrp_ip if isinstance(vrrp_ip, six.text_type)
|
||||||
else six.u(vrrp_ip)
|
else six.u(vrrp_ip)
|
||||||
)
|
)
|
||||||
vrrp_version = vrrp_ip_obj.version
|
vrrp_version = vrrp_ip_obj.version
|
||||||
@ -184,10 +183,10 @@ class Plug(object):
|
|||||||
# Note, eth0 is skipped because that is the VIP interface
|
# Note, eth0 is skipped because that is the VIP interface
|
||||||
netns_interface = 'eth{0}'.format(len(netns.get_links()))
|
netns_interface = 'eth{0}'.format(len(netns.get_links()))
|
||||||
|
|
||||||
LOG.info(_LI('Plugged interface {0} will become {1} in the '
|
LOG.info('Plugged interface {0} will become {1} in the '
|
||||||
'namespace {2}').format(default_netns_interface,
|
'namespace {2}'.format(default_netns_interface,
|
||||||
netns_interface,
|
netns_interface,
|
||||||
consts.AMPHORA_NAMESPACE))
|
consts.AMPHORA_NAMESPACE))
|
||||||
interface_file_path = self._osutils.get_network_interface_file(
|
interface_file_path = self._osutils.get_network_interface_file(
|
||||||
netns_interface)
|
netns_interface)
|
||||||
self._osutils.write_port_interface_file(
|
self._osutils.write_port_interface_file(
|
||||||
|
@ -24,7 +24,6 @@ import six
|
|||||||
from octavia.amphorae.backends.agent.api_server import util
|
from octavia.amphorae.backends.agent.api_server import util
|
||||||
from octavia.amphorae.backends.health_daemon import health_sender
|
from octavia.amphorae.backends.health_daemon import health_sender
|
||||||
from octavia.amphorae.backends.utils import haproxy_query
|
from octavia.amphorae.backends.utils import haproxy_query
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
if six.PY2:
|
if six.PY2:
|
||||||
import Queue as queue
|
import Queue as queue
|
||||||
@ -48,7 +47,7 @@ def list_sock_stat_files(hadir=None):
|
|||||||
|
|
||||||
|
|
||||||
def run_sender(cmd_queue):
|
def run_sender(cmd_queue):
|
||||||
LOG.info(_LI('Health Manager Sender starting.'))
|
LOG.info('Health Manager Sender starting.')
|
||||||
sender = health_sender.UDPStatusSender()
|
sender = health_sender.UDPStatusSender()
|
||||||
while True:
|
while True:
|
||||||
message = build_stats_message()
|
message = build_stats_message()
|
||||||
@ -56,10 +55,10 @@ def run_sender(cmd_queue):
|
|||||||
try:
|
try:
|
||||||
cmd = cmd_queue.get_nowait()
|
cmd = cmd_queue.get_nowait()
|
||||||
if cmd is 'reload':
|
if cmd is 'reload':
|
||||||
LOG.info(_LI('Reloading configuration'))
|
LOG.info('Reloading configuration')
|
||||||
CONF.reload_config_files()
|
CONF.reload_config_files()
|
||||||
elif cmd is 'shutdown':
|
elif cmd is 'shutdown':
|
||||||
LOG.info(_LI('Health Manager Sender shutting down.'))
|
LOG.info('Health Manager Sender shutting down.')
|
||||||
break
|
break
|
||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
pass
|
pass
|
||||||
|
@ -18,7 +18,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from octavia.amphorae.backends.health_daemon import status_message
|
from octavia.amphorae.backends.health_daemon import status_message
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -39,9 +38,8 @@ class UDPStatusSender(object):
|
|||||||
try:
|
try:
|
||||||
ip, port = ipport.rsplit(':', 1)
|
ip, port = ipport.rsplit(':', 1)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.error(_LE("Invalid ip and port '%s' in "
|
LOG.error("Invalid ip and port '%s' in health_manager "
|
||||||
"health_manager controller_ip_port_list"),
|
"controller_ip_port_list", ipport)
|
||||||
ipport)
|
|
||||||
break
|
break
|
||||||
self.update(ip, port)
|
self.update(ip, port)
|
||||||
self.v4sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
self.v4sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
@ -62,8 +60,7 @@ class UDPStatusSender(object):
|
|||||||
# dest = (family, socktype, proto, canonname, sockaddr)
|
# dest = (family, socktype, proto, canonname, sockaddr)
|
||||||
# e.g. 0 = sock family, 4 = sockaddr - what we actually need
|
# e.g. 0 = sock family, 4 = sockaddr - what we actually need
|
||||||
if addrinfo is None:
|
if addrinfo is None:
|
||||||
LOG.error(_LE('No controller address found. '
|
LOG.error('No controller address found. Unable to send heartbeat.')
|
||||||
'Unable to send heartbeat.'))
|
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
if addrinfo[0] == socket.AF_INET:
|
if addrinfo[0] == socket.AF_INET:
|
||||||
|
@ -22,7 +22,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import secretutils
|
from oslo_utils import secretutils
|
||||||
|
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -58,9 +57,9 @@ def unwrap_envelope(envelope, key):
|
|||||||
expected_hmc = envelope[-hash_len:]
|
expected_hmc = envelope[-hash_len:]
|
||||||
calculated_hmc = get_hmac(payload, key)
|
calculated_hmc = get_hmac(payload, key)
|
||||||
if not secretutils.constant_time_compare(expected_hmc, calculated_hmc):
|
if not secretutils.constant_time_compare(expected_hmc, calculated_hmc):
|
||||||
LOG.warning(_LW('calculated hmac: %(s1)s not equal to msg hmac: '
|
LOG.warning('calculated hmac: %(s1)s not equal to msg hmac: '
|
||||||
'%(s2)s dropping packet'), {'s1': to_hex(calculated_hmc),
|
'%(s2)s dropping packet', {'s1': to_hex(calculated_hmc),
|
||||||
's2': to_hex(expected_hmc)})
|
's2': to_hex(expected_hmc)})
|
||||||
fmt = 'calculated hmac: {0} not equal to msg hmac: {1} dropping packet'
|
fmt = 'calculated hmac: {0} not equal to msg hmac: {1} dropping packet'
|
||||||
raise exceptions.InvalidHMACException(fmt.format(
|
raise exceptions.InvalidHMACException(fmt.format(
|
||||||
to_hex(calculated_hmc), to_hex(expected_hmc)))
|
to_hex(calculated_hmc), to_hex(expected_hmc)))
|
||||||
|
@ -48,7 +48,7 @@ class HAProxyQuery(object):
|
|||||||
try:
|
try:
|
||||||
sock.connect(self.socket)
|
sock.connect(self.socket)
|
||||||
except socket.error:
|
except socket.error:
|
||||||
raise Exception("HAProxy '{0}' query failed.".format(query))
|
raise Exception(_("HAProxy '{0}' query failed.").format(query))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sock.send(six.b(query + '\n'))
|
sock.send(six.b(query + '\n'))
|
||||||
|
@ -32,7 +32,6 @@ from octavia.common import constants as consts
|
|||||||
from octavia.common.jinja.haproxy import jinja_cfg
|
from octavia.common.jinja.haproxy import jinja_cfg
|
||||||
from octavia.common.tls_utils import cert_parser
|
from octavia.common.tls_utils import cert_parser
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.i18n import _LE, _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
API_VERSION = consts.API_VERSION
|
API_VERSION = consts.API_VERSION
|
||||||
@ -135,9 +134,9 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
load_balancer.vip.ip_address,
|
load_balancer.vip.ip_address,
|
||||||
net_info)
|
net_info)
|
||||||
except exc.Conflict:
|
except exc.Conflict:
|
||||||
LOG.warning(_LW('VIP with MAC {mac} already exists on '
|
LOG.warning(('VIP with MAC {mac} already exists on '
|
||||||
'amphora, skipping post_vip_plug').format(
|
'amphora, skipping post_vip_plug').format(
|
||||||
mac=port.mac_address))
|
mac=port.mac_address))
|
||||||
|
|
||||||
def post_network_plug(self, amphora, port):
|
def post_network_plug(self, amphora, port):
|
||||||
fixed_ips = []
|
fixed_ips = []
|
||||||
@ -155,9 +154,9 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
try:
|
try:
|
||||||
self.client.plug_network(amphora, port_info)
|
self.client.plug_network(amphora, port_info)
|
||||||
except exc.Conflict:
|
except exc.Conflict:
|
||||||
LOG.warning(_LW('Network with MAC {mac} already exists on '
|
LOG.warning(('Network with MAC {mac} already exists on '
|
||||||
'amphora, skipping post_network_plug').format(
|
'amphora, skipping post_network_plug').format(
|
||||||
mac=port.mac_address))
|
mac=port.mac_address))
|
||||||
|
|
||||||
def get_vrrp_interface(self, amphora):
|
def get_vrrp_interface(self, amphora):
|
||||||
return self.client.get_interface(amphora, amphora.vrrp_ip)['interface']
|
return self.client.get_interface(amphora, amphora.vrrp_ip)['interface']
|
||||||
@ -288,12 +287,12 @@ class AmphoraAPIClient(object):
|
|||||||
return r
|
return r
|
||||||
except (requests.ConnectionError, requests.Timeout) as e:
|
except (requests.ConnectionError, requests.Timeout) as e:
|
||||||
exception = e
|
exception = e
|
||||||
LOG.warning(_LW("Could not connect to instance. Retrying."))
|
LOG.warning("Could not connect to instance. Retrying.")
|
||||||
time.sleep(CONF.haproxy_amphora.connection_retry_interval)
|
time.sleep(CONF.haproxy_amphora.connection_retry_interval)
|
||||||
|
|
||||||
LOG.error(_LE("Connection retries (currently set to %(max_retries)s) "
|
LOG.error("Connection retries (currently set to %(max_retries)s) "
|
||||||
"exhausted. The amphora is unavailable. Reason: "
|
"exhausted. The amphora is unavailable. Reason: "
|
||||||
"%(exception)s"),
|
"%(exception)s",
|
||||||
{'max_retries': CONF.haproxy_amphora.connection_max_retries,
|
{'max_retries': CONF.haproxy_amphora.connection_max_retries,
|
||||||
'exception': exception})
|
'exception': exception})
|
||||||
raise driver_except.TimeOutException()
|
raise driver_except.TimeOutException()
|
||||||
|
@ -21,8 +21,6 @@ from oslo_log import log as logging
|
|||||||
from octavia.amphorae.backends.health_daemon import status_message
|
from octavia.amphorae.backends.health_daemon import status_message
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import repositories
|
from octavia.db import repositories
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
UDP_MAX_SIZE = 64 * 1024
|
UDP_MAX_SIZE = 64 * 1024
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -41,7 +39,7 @@ class UDPStatusGetter(object):
|
|||||||
self.ip = cfg.CONF.health_manager.bind_ip
|
self.ip = cfg.CONF.health_manager.bind_ip
|
||||||
self.port = cfg.CONF.health_manager.bind_port
|
self.port = cfg.CONF.health_manager.bind_port
|
||||||
self.sockaddr = None
|
self.sockaddr = None
|
||||||
LOG.info(_LI('attempting to listen on %(ip)s port %(port)s'),
|
LOG.info('attempting to listen on %(ip)s port %(port)s',
|
||||||
{'ip': self.ip, 'port': self.port})
|
{'ip': self.ip, 'port': self.port})
|
||||||
self.sock = None
|
self.sock = None
|
||||||
self.update(self.key, self.ip, self.port)
|
self.update(self.key, self.ip, self.port)
|
||||||
@ -68,7 +66,7 @@ class UDPStatusGetter(object):
|
|||||||
self.sock.bind(self.sockaddr)
|
self.sock.bind(self.sockaddr)
|
||||||
if cfg.CONF.health_manager.sock_rlimit > 0:
|
if cfg.CONF.health_manager.sock_rlimit > 0:
|
||||||
rlimit = cfg.CONF.health_manager.sock_rlimit
|
rlimit = cfg.CONF.health_manager.sock_rlimit
|
||||||
LOG.info(_LI("setting sock rlimit to %s"), rlimit)
|
LOG.info("setting sock rlimit to %s", rlimit)
|
||||||
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
|
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
|
||||||
rlimit)
|
rlimit)
|
||||||
break # just used the first addr getaddrinfo finds
|
break # just used the first addr getaddrinfo finds
|
||||||
|
@ -18,7 +18,6 @@ import six
|
|||||||
from octavia.amphorae.drivers import driver_base as driver_base
|
from octavia.amphorae.drivers import driver_base as driver_base
|
||||||
from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg
|
from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
API_VERSION = constants.API_VERSION
|
API_VERSION = constants.API_VERSION
|
||||||
@ -53,7 +52,7 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
|||||||
|
|
||||||
:param loadbalancer: loadbalancer object
|
:param loadbalancer: loadbalancer object
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Stop loadbalancer %s amphora VRRP Service."),
|
LOG.info("Stop loadbalancer %s amphora VRRP Service.",
|
||||||
loadbalancer.id)
|
loadbalancer.id)
|
||||||
|
|
||||||
for amp in six.moves.filter(
|
for amp in six.moves.filter(
|
||||||
@ -67,7 +66,7 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
|||||||
|
|
||||||
:param loadbalancer: loadbalancer object
|
:param loadbalancer: loadbalancer object
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Start loadbalancer %s amphora VRRP Service."),
|
LOG.info("Start loadbalancer %s amphora VRRP Service.",
|
||||||
loadbalancer.id)
|
loadbalancer.id)
|
||||||
|
|
||||||
for amp in six.moves.filter(
|
for amp in six.moves.filter(
|
||||||
@ -82,7 +81,7 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
|||||||
|
|
||||||
:param loadbalancer: loadbalancer object
|
:param loadbalancer: loadbalancer object
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Reload loadbalancer %s amphora VRRP Service."),
|
LOG.info("Reload loadbalancer %s amphora VRRP Service.",
|
||||||
loadbalancer.id)
|
loadbalancer.id)
|
||||||
|
|
||||||
for amp in six.moves.filter(
|
for amp in six.moves.filter(
|
||||||
|
@ -29,8 +29,6 @@ from octavia.common import constants
|
|||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
import octavia.db.repositories as repos
|
import octavia.db.repositories as repos
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
ASYNC_TIME = 1
|
ASYNC_TIME = 1
|
||||||
@ -51,7 +49,7 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
|||||||
|
|
||||||
def member_controller(member, delete=False, update=False, create=False):
|
def member_controller(member, delete=False, update=False, create=False):
|
||||||
time.sleep(ASYNC_TIME)
|
time.sleep(ASYNC_TIME)
|
||||||
LOG.info(_LI("Simulating controller operation for member..."))
|
LOG.info("Simulating controller operation for member...")
|
||||||
|
|
||||||
db_mem = None
|
db_mem = None
|
||||||
if delete:
|
if delete:
|
||||||
@ -83,12 +81,12 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
|||||||
member.pool.load_balancer.id,
|
member.pool.load_balancer.id,
|
||||||
operating_status=constants.ONLINE,
|
operating_status=constants.ONLINE,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
LOG.info("Simulated Controller Handler Thread Complete")
|
||||||
|
|
||||||
def l7policy_controller(l7policy, delete=False, update=False,
|
def l7policy_controller(l7policy, delete=False, update=False,
|
||||||
create=False):
|
create=False):
|
||||||
time.sleep(ASYNC_TIME)
|
time.sleep(ASYNC_TIME)
|
||||||
LOG.info(_LI("Simulating controller operation for l7policy..."))
|
LOG.info("Simulating controller operation for l7policy...")
|
||||||
|
|
||||||
db_l7policy = None
|
db_l7policy = None
|
||||||
if delete:
|
if delete:
|
||||||
@ -110,11 +108,11 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
|||||||
db_l7policy.listener.load_balancer.id,
|
db_l7policy.listener.load_balancer.id,
|
||||||
operating_status=constants.ONLINE,
|
operating_status=constants.ONLINE,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
LOG.info("Simulated Controller Handler Thread Complete")
|
||||||
|
|
||||||
def l7rule_controller(l7rule, delete=False, update=False, create=False):
|
def l7rule_controller(l7rule, delete=False, update=False, create=False):
|
||||||
time.sleep(ASYNC_TIME)
|
time.sleep(ASYNC_TIME)
|
||||||
LOG.info(_LI("Simulating controller operation for l7rule..."))
|
LOG.info("Simulating controller operation for l7rule...")
|
||||||
|
|
||||||
db_l7rule = None
|
db_l7rule = None
|
||||||
if delete:
|
if delete:
|
||||||
@ -135,12 +133,12 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
|||||||
listener.load_balancer.id,
|
listener.load_balancer.id,
|
||||||
operating_status=constants.ONLINE,
|
operating_status=constants.ONLINE,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
LOG.info("Simulated Controller Handler Thread Complete")
|
||||||
|
|
||||||
def health_monitor_controller(health_monitor, delete=False, update=False,
|
def health_monitor_controller(health_monitor, delete=False, update=False,
|
||||||
create=False):
|
create=False):
|
||||||
time.sleep(ASYNC_TIME)
|
time.sleep(ASYNC_TIME)
|
||||||
LOG.info(_LI("Simulating controller operation for health monitor..."))
|
LOG.info("Simulating controller operation for health monitor...")
|
||||||
|
|
||||||
db_hm = None
|
db_hm = None
|
||||||
if delete:
|
if delete:
|
||||||
@ -182,11 +180,11 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
|||||||
health_monitor.pool.load_balancer.id,
|
health_monitor.pool.load_balancer.id,
|
||||||
operating_status=constants.ONLINE,
|
operating_status=constants.ONLINE,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
LOG.info("Simulated Controller Handler Thread Complete")
|
||||||
|
|
||||||
def pool_controller(pool, delete=False, update=False, create=False):
|
def pool_controller(pool, delete=False, update=False, create=False):
|
||||||
time.sleep(ASYNC_TIME)
|
time.sleep(ASYNC_TIME)
|
||||||
LOG.info(_LI("Simulating controller operation for pool..."))
|
LOG.info("Simulating controller operation for pool...")
|
||||||
|
|
||||||
db_pool = None
|
db_pool = None
|
||||||
if delete:
|
if delete:
|
||||||
@ -218,12 +216,12 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
|||||||
pool.load_balancer.id,
|
pool.load_balancer.id,
|
||||||
operating_status=constants.ONLINE,
|
operating_status=constants.ONLINE,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
LOG.info("Simulated Controller Handler Thread Complete")
|
||||||
|
|
||||||
def listener_controller(listener, delete=False, update=False,
|
def listener_controller(listener, delete=False, update=False,
|
||||||
create=False):
|
create=False):
|
||||||
time.sleep(ASYNC_TIME)
|
time.sleep(ASYNC_TIME)
|
||||||
LOG.info(_LI("Simulating controller operation for listener..."))
|
LOG.info("Simulating controller operation for listener...")
|
||||||
|
|
||||||
if delete:
|
if delete:
|
||||||
repo.listener.update(db_api.get_session(), listener.id,
|
repo.listener.update(db_api.get_session(), listener.id,
|
||||||
@ -244,12 +242,12 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
|||||||
listener.load_balancer.id,
|
listener.load_balancer.id,
|
||||||
operating_status=constants.ONLINE,
|
operating_status=constants.ONLINE,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
LOG.info("Simulated Controller Handler Thread Complete")
|
||||||
|
|
||||||
def loadbalancer_controller(loadbalancer, delete=False, update=False,
|
def loadbalancer_controller(loadbalancer, delete=False, update=False,
|
||||||
create=False):
|
create=False):
|
||||||
time.sleep(ASYNC_TIME)
|
time.sleep(ASYNC_TIME)
|
||||||
LOG.info(_LI("Simulating controller operation for loadbalancer..."))
|
LOG.info("Simulating controller operation for loadbalancer...")
|
||||||
|
|
||||||
if delete:
|
if delete:
|
||||||
repo.load_balancer.update(
|
repo.load_balancer.update(
|
||||||
@ -266,7 +264,7 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
|
|||||||
repo.load_balancer.update(db_api.get_session(), id=loadbalancer.id,
|
repo.load_balancer.update(db_api.get_session(), id=loadbalancer.id,
|
||||||
operating_status=constants.ONLINE,
|
operating_status=constants.ONLINE,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
|
LOG.info("Simulated Controller Handler Thread Complete")
|
||||||
|
|
||||||
controller = loadbalancer_controller
|
controller = loadbalancer_controller
|
||||||
if isinstance(data_model, data_models.Member):
|
if isinstance(data_model, data_models.Member):
|
||||||
@ -294,22 +292,19 @@ class InvalidHandlerInputObject(Exception):
|
|||||||
class LoadBalancerHandler(abstract_handler.BaseObjectHandler):
|
class LoadBalancerHandler(abstract_handler.BaseObjectHandler):
|
||||||
|
|
||||||
def create(self, load_balancer_id):
|
def create(self, load_balancer_id):
|
||||||
LOG.info(_LI("%(entity)s handling the creation of "
|
LOG.info("%(entity)s handling the creation of load balancer %(id)s",
|
||||||
"load balancer %(id)s"),
|
|
||||||
{"entity": self.__class__.__name__, "id": load_balancer_id})
|
{"entity": self.__class__.__name__, "id": load_balancer_id})
|
||||||
simulate_controller(load_balancer_id, create=True)
|
simulate_controller(load_balancer_id, create=True)
|
||||||
|
|
||||||
def update(self, old_lb, load_balancer):
|
def update(self, old_lb, load_balancer):
|
||||||
validate_input(data_models.LoadBalancer, load_balancer)
|
validate_input(data_models.LoadBalancer, load_balancer)
|
||||||
LOG.info(_LI("%(entity)s handling the update of "
|
LOG.info("%(entity)s handling the update of load balancer %(id)s",
|
||||||
"load balancer %(id)s"),
|
|
||||||
{"entity": self.__class__.__name__, "id": old_lb.id})
|
{"entity": self.__class__.__name__, "id": old_lb.id})
|
||||||
load_balancer.id = old_lb.id
|
load_balancer.id = old_lb.id
|
||||||
simulate_controller(load_balancer, update=True)
|
simulate_controller(load_balancer, update=True)
|
||||||
|
|
||||||
def delete(self, load_balancer_id):
|
def delete(self, load_balancer_id):
|
||||||
LOG.info(_LI("%(entity)s handling the deletion of "
|
LOG.info("%(entity)s handling the deletion of load balancer %(id)s",
|
||||||
"load balancer %(id)s"),
|
|
||||||
{"entity": self.__class__.__name__, "id": load_balancer_id})
|
{"entity": self.__class__.__name__, "id": load_balancer_id})
|
||||||
simulate_controller(load_balancer_id, delete=True)
|
simulate_controller(load_balancer_id, delete=True)
|
||||||
|
|
||||||
@ -317,19 +312,19 @@ class LoadBalancerHandler(abstract_handler.BaseObjectHandler):
|
|||||||
class ListenerHandler(abstract_handler.BaseObjectHandler):
|
class ListenerHandler(abstract_handler.BaseObjectHandler):
|
||||||
|
|
||||||
def create(self, listener_id):
|
def create(self, listener_id):
|
||||||
LOG.info(_LI("%(entity)s handling the creation of listener %(id)s"),
|
LOG.info("%(entity)s handling the creation of listener %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": listener_id})
|
{"entity": self.__class__.__name__, "id": listener_id})
|
||||||
simulate_controller(listener_id, create=True)
|
simulate_controller(listener_id, create=True)
|
||||||
|
|
||||||
def update(self, old_listener, listener):
|
def update(self, old_listener, listener):
|
||||||
validate_input(data_models.Listener, listener)
|
validate_input(data_models.Listener, listener)
|
||||||
LOG.info(_LI("%(entity)s handling the update of listener %(id)s"),
|
LOG.info("%(entity)s handling the update of listener %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": old_listener.id})
|
{"entity": self.__class__.__name__, "id": old_listener.id})
|
||||||
listener.id = old_listener.id
|
listener.id = old_listener.id
|
||||||
simulate_controller(listener, update=True)
|
simulate_controller(listener, update=True)
|
||||||
|
|
||||||
def delete(self, listener_id):
|
def delete(self, listener_id):
|
||||||
LOG.info(_LI("%(entity)s handling the deletion of listener %(id)s"),
|
LOG.info("%(entity)s handling the deletion of listener %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": listener_id})
|
{"entity": self.__class__.__name__, "id": listener_id})
|
||||||
simulate_controller(listener_id, delete=True)
|
simulate_controller(listener_id, delete=True)
|
||||||
|
|
||||||
@ -337,19 +332,19 @@ class ListenerHandler(abstract_handler.BaseObjectHandler):
|
|||||||
class PoolHandler(abstract_handler.BaseObjectHandler):
|
class PoolHandler(abstract_handler.BaseObjectHandler):
|
||||||
|
|
||||||
def create(self, pool_id):
|
def create(self, pool_id):
|
||||||
LOG.info(_LI("%(entity)s handling the creation of pool %(id)s"),
|
LOG.info("%(entity)s handling the creation of pool %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": pool_id})
|
{"entity": self.__class__.__name__, "id": pool_id})
|
||||||
simulate_controller(pool_id, create=True)
|
simulate_controller(pool_id, create=True)
|
||||||
|
|
||||||
def update(self, old_pool, pool):
|
def update(self, old_pool, pool):
|
||||||
validate_input(data_models.Pool, pool)
|
validate_input(data_models.Pool, pool)
|
||||||
LOG.info(_LI("%(entity)s handling the update of pool %(id)s"),
|
LOG.info("%(entity)s handling the update of pool %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": old_pool.id})
|
{"entity": self.__class__.__name__, "id": old_pool.id})
|
||||||
pool.id = old_pool.id
|
pool.id = old_pool.id
|
||||||
simulate_controller(pool, update=True)
|
simulate_controller(pool, update=True)
|
||||||
|
|
||||||
def delete(self, pool_id):
|
def delete(self, pool_id):
|
||||||
LOG.info(_LI("%(entity)s handling the deletion of pool %(id)s"),
|
LOG.info("%(entity)s handling the deletion of pool %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": pool_id})
|
{"entity": self.__class__.__name__, "id": pool_id})
|
||||||
simulate_controller(pool_id, delete=True)
|
simulate_controller(pool_id, delete=True)
|
||||||
|
|
||||||
@ -357,23 +352,23 @@ class PoolHandler(abstract_handler.BaseObjectHandler):
|
|||||||
class HealthMonitorHandler(abstract_handler.BaseObjectHandler):
|
class HealthMonitorHandler(abstract_handler.BaseObjectHandler):
|
||||||
|
|
||||||
def create(self, pool_id):
|
def create(self, pool_id):
|
||||||
LOG.info(_LI("%(entity)s handling the creation of health monitor "
|
LOG.info("%(entity)s handling the creation of health monitor "
|
||||||
"on pool %(id)s"),
|
"on pool %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": pool_id})
|
{"entity": self.__class__.__name__, "id": pool_id})
|
||||||
simulate_controller(pool_id, create=True)
|
simulate_controller(pool_id, create=True)
|
||||||
|
|
||||||
def update(self, old_health_monitor, health_monitor):
|
def update(self, old_health_monitor, health_monitor):
|
||||||
validate_input(data_models.HealthMonitor, health_monitor)
|
validate_input(data_models.HealthMonitor, health_monitor)
|
||||||
LOG.info(_LI("%(entity)s handling the update of health monitor "
|
LOG.info("%(entity)s handling the update of health monitor "
|
||||||
"on pool %(id)s"),
|
"on pool %(id)s",
|
||||||
{"entity": self.__class__.__name__,
|
{"entity": self.__class__.__name__,
|
||||||
"id": old_health_monitor.pool_id})
|
"id": old_health_monitor.pool_id})
|
||||||
health_monitor.pool_id = old_health_monitor.pool_id
|
health_monitor.pool_id = old_health_monitor.pool_id
|
||||||
simulate_controller(health_monitor, update=True)
|
simulate_controller(health_monitor, update=True)
|
||||||
|
|
||||||
def delete(self, pool_id):
|
def delete(self, pool_id):
|
||||||
LOG.info(_LI("%(entity)s handling the deletion of health monitor "
|
LOG.info("%(entity)s handling the deletion of health monitor "
|
||||||
"on pool %(id)s"),
|
"on pool %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": pool_id})
|
{"entity": self.__class__.__name__, "id": pool_id})
|
||||||
simulate_controller(pool_id, delete=True)
|
simulate_controller(pool_id, delete=True)
|
||||||
|
|
||||||
@ -381,19 +376,19 @@ class HealthMonitorHandler(abstract_handler.BaseObjectHandler):
|
|||||||
class MemberHandler(abstract_handler.BaseObjectHandler):
|
class MemberHandler(abstract_handler.BaseObjectHandler):
|
||||||
|
|
||||||
def create(self, member_id):
|
def create(self, member_id):
|
||||||
LOG.info(_LI("%(entity)s handling the creation of member %(id)s"),
|
LOG.info("%(entity)s handling the creation of member %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": member_id})
|
{"entity": self.__class__.__name__, "id": member_id})
|
||||||
simulate_controller(member_id, create=True)
|
simulate_controller(member_id, create=True)
|
||||||
|
|
||||||
def update(self, old_member, member):
|
def update(self, old_member, member):
|
||||||
validate_input(data_models.Member, member)
|
validate_input(data_models.Member, member)
|
||||||
LOG.info(_LI("%(entity)s handling the update of member %(id)s"),
|
LOG.info("%(entity)s handling the update of member %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": old_member.id})
|
{"entity": self.__class__.__name__, "id": old_member.id})
|
||||||
member.id = old_member.id
|
member.id = old_member.id
|
||||||
simulate_controller(member, update=True)
|
simulate_controller(member, update=True)
|
||||||
|
|
||||||
def delete(self, member_id):
|
def delete(self, member_id):
|
||||||
LOG.info(_LI("%(entity)s handling the deletion of member %(id)s"),
|
LOG.info("%(entity)s handling the deletion of member %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": member_id})
|
{"entity": self.__class__.__name__, "id": member_id})
|
||||||
simulate_controller(member_id, delete=True)
|
simulate_controller(member_id, delete=True)
|
||||||
|
|
||||||
@ -401,19 +396,19 @@ class MemberHandler(abstract_handler.BaseObjectHandler):
|
|||||||
class L7PolicyHandler(abstract_handler.BaseObjectHandler):
|
class L7PolicyHandler(abstract_handler.BaseObjectHandler):
|
||||||
|
|
||||||
def create(self, l7policy_id):
|
def create(self, l7policy_id):
|
||||||
LOG.info(_LI("%(entity)s handling the creation of l7policy %(id)s"),
|
LOG.info("%(entity)s handling the creation of l7policy %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": l7policy_id})
|
{"entity": self.__class__.__name__, "id": l7policy_id})
|
||||||
simulate_controller(l7policy_id, create=True)
|
simulate_controller(l7policy_id, create=True)
|
||||||
|
|
||||||
def update(self, old_l7policy, l7policy):
|
def update(self, old_l7policy, l7policy):
|
||||||
validate_input(data_models.L7Policy, l7policy)
|
validate_input(data_models.L7Policy, l7policy)
|
||||||
LOG.info(_LI("%(entity)s handling the update of l7policy %(id)s"),
|
LOG.info("%(entity)s handling the update of l7policy %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": old_l7policy.id})
|
{"entity": self.__class__.__name__, "id": old_l7policy.id})
|
||||||
l7policy.id = old_l7policy.id
|
l7policy.id = old_l7policy.id
|
||||||
simulate_controller(l7policy, update=True)
|
simulate_controller(l7policy, update=True)
|
||||||
|
|
||||||
def delete(self, l7policy_id):
|
def delete(self, l7policy_id):
|
||||||
LOG.info(_LI("%(entity)s handling the deletion of l7policy %(id)s"),
|
LOG.info("%(entity)s handling the deletion of l7policy %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": l7policy_id})
|
{"entity": self.__class__.__name__, "id": l7policy_id})
|
||||||
simulate_controller(l7policy_id, delete=True)
|
simulate_controller(l7policy_id, delete=True)
|
||||||
|
|
||||||
@ -421,19 +416,19 @@ class L7PolicyHandler(abstract_handler.BaseObjectHandler):
|
|||||||
class L7RuleHandler(abstract_handler.BaseObjectHandler):
|
class L7RuleHandler(abstract_handler.BaseObjectHandler):
|
||||||
|
|
||||||
def create(self, l7rule):
|
def create(self, l7rule):
|
||||||
LOG.info(_LI("%(entity)s handling the creation of l7rule %(id)s"),
|
LOG.info("%(entity)s handling the creation of l7rule %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": l7rule.id})
|
{"entity": self.__class__.__name__, "id": l7rule.id})
|
||||||
simulate_controller(l7rule, create=True)
|
simulate_controller(l7rule, create=True)
|
||||||
|
|
||||||
def update(self, old_l7rule, l7rule):
|
def update(self, old_l7rule, l7rule):
|
||||||
validate_input(data_models.L7Rule, l7rule)
|
validate_input(data_models.L7Rule, l7rule)
|
||||||
LOG.info(_LI("%(entity)s handling the update of l7rule %(id)s"),
|
LOG.info("%(entity)s handling the update of l7rule %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": old_l7rule.id})
|
{"entity": self.__class__.__name__, "id": old_l7rule.id})
|
||||||
l7rule.id = old_l7rule.id
|
l7rule.id = old_l7rule.id
|
||||||
simulate_controller(l7rule, update=True)
|
simulate_controller(l7rule, update=True)
|
||||||
|
|
||||||
def delete(self, l7rule):
|
def delete(self, l7rule):
|
||||||
LOG.info(_LI("%(entity)s handling the deletion of l7rule %(id)s"),
|
LOG.info("%(entity)s handling the deletion of l7rule %(id)s",
|
||||||
{"entity": self.__class__.__name__, "id": l7rule.id})
|
{"entity": self.__class__.__name__, "id": l7rule.id})
|
||||||
simulate_controller(l7rule, delete=True)
|
simulate_controller(l7rule, delete=True)
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ from stevedore import driver as stevedore_driver
|
|||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import repositories
|
from octavia.db import repositories
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -61,7 +60,7 @@ class BaseController(rest.RestController):
|
|||||||
"""Gets an object from the database and returns it."""
|
"""Gets an object from the database and returns it."""
|
||||||
db_obj = repo.get(session, id=id)
|
db_obj = repo.get(session, id=id)
|
||||||
if not db_obj:
|
if not db_obj:
|
||||||
LOG.exception(_LE("{name} {id} not found").format(
|
LOG.exception("{name} {id} not found".format(
|
||||||
name=data_model._name(), id=id))
|
name=data_model._name(), id=id))
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_model._name(), id=id)
|
resource=data_model._name(), id=id)
|
||||||
|
@ -27,8 +27,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -47,7 +45,7 @@ class HealthMonitorController(base.BaseController):
|
|||||||
db_hm = self.repositories.health_monitor.get(
|
db_hm = self.repositories.health_monitor.get(
|
||||||
session, pool_id=self.pool_id)
|
session, pool_id=self.pool_id)
|
||||||
if not db_hm:
|
if not db_hm:
|
||||||
LOG.info(_LI("Health Monitor for Pool %s was not found"),
|
LOG.info("Health Monitor for Pool %s was not found",
|
||||||
self.pool_id)
|
self.pool_id)
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_models.HealthMonitor._name(),
|
resource=data_models.HealthMonitor._name(),
|
||||||
@ -83,8 +81,8 @@ class HealthMonitorController(base.BaseController):
|
|||||||
session, self.load_balancer_id,
|
session, self.load_balancer_id,
|
||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=self._get_affected_listener_ids(session, hm)):
|
listener_ids=self._get_affected_listener_ids(session, hm)):
|
||||||
LOG.info(_LI("Health Monitor cannot be created or modified "
|
LOG.info("Health Monitor cannot be created or modified "
|
||||||
"because the Load Balancer is in an immutable state"))
|
"because the Load Balancer is in an immutable state")
|
||||||
lb_repo = self.repositories.load_balancer
|
lb_repo = self.repositories.load_balancer
|
||||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
@ -134,8 +132,8 @@ class HealthMonitorController(base.BaseController):
|
|||||||
lock_session.rollback()
|
lock_session.rollback()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of Health Monitor for Pool %s to "
|
LOG.info("Sending Creation of Health Monitor for Pool %s to "
|
||||||
"handler"), self.pool_id)
|
"handler", self.pool_id)
|
||||||
self.handler.create(db_hm)
|
self.handler.create(db_hm)
|
||||||
except Exception:
|
except Exception:
|
||||||
for listener_id in self._get_affected_listener_ids(
|
for listener_id in self._get_affected_listener_ids(
|
||||||
@ -161,8 +159,8 @@ class HealthMonitorController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session, hm=db_hm)
|
self._test_lb_and_listener_statuses(context.session, hm=db_hm)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of Health Monitor for Pool %s to "
|
LOG.info("Sending Update of Health Monitor for Pool %s to handler",
|
||||||
"handler"), self.pool_id)
|
self.pool_id)
|
||||||
self.handler.update(db_hm, health_monitor)
|
self.handler.update(db_hm, health_monitor)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
@ -182,8 +180,8 @@ class HealthMonitorController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session, hm=db_hm)
|
self._test_lb_and_listener_statuses(context.session, hm=db_hm)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of Health Monitor for Pool %s to "
|
LOG.info("Sending Deletion of Health Monitor for Pool %s to "
|
||||||
"handler"), self.pool_id)
|
"handler", self.pool_id)
|
||||||
self.handler.delete(db_hm)
|
self.handler.delete(db_hm)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
|
@ -28,8 +28,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.common import validate
|
from octavia.common import validate
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -65,8 +63,8 @@ class L7PolicyController(base.BaseController):
|
|||||||
session, self.load_balancer_id,
|
session, self.load_balancer_id,
|
||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=[self.listener_id]):
|
listener_ids=[self.listener_id]):
|
||||||
LOG.info(_LI("L7Policy cannot be created or modified because the "
|
LOG.info("L7Policy cannot be created or modified because the "
|
||||||
"Load Balancer is in an immutable state"))
|
"Load Balancer is in an immutable state")
|
||||||
lb_repo = self.repositories.load_balancer
|
lb_repo = self.repositories.load_balancer
|
||||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
@ -104,7 +102,7 @@ class L7PolicyController(base.BaseController):
|
|||||||
if ['id'] == de.columns:
|
if ['id'] == de.columns:
|
||||||
raise exceptions.IDAlreadyExists()
|
raise exceptions.IDAlreadyExists()
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of L7Policy %s to handler"),
|
LOG.info("Sending Creation of L7Policy %s to handler",
|
||||||
db_l7policy.id)
|
db_l7policy.id)
|
||||||
self.handler.create(db_l7policy)
|
self.handler.create(db_l7policy)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -132,7 +130,7 @@ class L7PolicyController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session)
|
self._test_lb_and_listener_statuses(context.session)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of L7Policy %s to handler"), id)
|
LOG.info("Sending Update of L7Policy %s to handler", id)
|
||||||
self.handler.update(
|
self.handler.update(
|
||||||
db_l7policy, l7policy_types.L7PolicyPUT(**l7policy_dict))
|
db_l7policy, l7policy_types.L7PolicyPUT(**l7policy_dict))
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -152,7 +150,7 @@ class L7PolicyController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session)
|
self._test_lb_and_listener_statuses(context.session)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of L7Policy %s to handler"),
|
LOG.info("Sending Deletion of L7Policy %s to handler",
|
||||||
db_l7policy.id)
|
db_l7policy.id)
|
||||||
self.handler.delete(db_l7policy)
|
self.handler.delete(db_l7policy)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -177,7 +175,7 @@ class L7PolicyController(base.BaseController):
|
|||||||
db_l7policy = self.repositories.l7policy.get(
|
db_l7policy = self.repositories.l7policy.get(
|
||||||
context.session, id=l7policy_id)
|
context.session, id=l7policy_id)
|
||||||
if not db_l7policy:
|
if not db_l7policy:
|
||||||
LOG.info(_LI("L7Policy %s not found."), l7policy_id)
|
LOG.info("L7Policy %s not found.", l7policy_id)
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_models.L7Policy._name(), id=l7policy_id)
|
resource=data_models.L7Policy._name(), id=l7policy_id)
|
||||||
return l7rule.L7RuleController(
|
return l7rule.L7RuleController(
|
||||||
|
@ -27,8 +27,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.common import validate
|
from octavia.common import validate
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -65,8 +63,8 @@ class L7RuleController(base.BaseController):
|
|||||||
session, self.load_balancer_id,
|
session, self.load_balancer_id,
|
||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=[self.listener_id]):
|
listener_ids=[self.listener_id]):
|
||||||
LOG.info(_LI("L7Rule cannot be created or modified because the "
|
LOG.info("L7Rule cannot be created or modified because the "
|
||||||
"Load Balancer is in an immutable state"))
|
"Load Balancer is in an immutable state")
|
||||||
lb_repo = self.repositories.load_balancer
|
lb_repo = self.repositories.load_balancer
|
||||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
@ -109,7 +107,7 @@ class L7RuleController(base.BaseController):
|
|||||||
if ['id'] == de.columns:
|
if ['id'] == de.columns:
|
||||||
raise exceptions.IDAlreadyExists()
|
raise exceptions.IDAlreadyExists()
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of L7Rule %s to handler"),
|
LOG.info("Sending Creation of L7Rule %s to handler",
|
||||||
db_l7rule.id)
|
db_l7rule.id)
|
||||||
self.handler.create(db_l7rule)
|
self.handler.create(db_l7rule)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -138,7 +136,7 @@ class L7RuleController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session)
|
self._test_lb_and_listener_statuses(context.session)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of L7Rule %s to handler"), id)
|
LOG.info("Sending Update of L7Rule %s to handler", id)
|
||||||
self.handler.update(db_l7rule, l7rule)
|
self.handler.update(db_l7rule, l7rule)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
@ -157,7 +155,7 @@ class L7RuleController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session)
|
self._test_lb_and_listener_statuses(context.session)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of L7Rule %s to handler"),
|
LOG.info("Sending Deletion of L7Rule %s to handler",
|
||||||
db_l7rule.id)
|
db_l7rule.id)
|
||||||
self.handler.delete(db_l7rule)
|
self.handler.delete(db_l7rule)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -31,8 +31,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -55,7 +53,7 @@ class ListenersController(base.BaseController):
|
|||||||
db_listener = self.repositories.listener.get(
|
db_listener = self.repositories.listener.get(
|
||||||
session, load_balancer_id=self.load_balancer_id, id=id)
|
session, load_balancer_id=self.load_balancer_id, id=id)
|
||||||
if not db_listener:
|
if not db_listener:
|
||||||
LOG.info(_LI("Listener %s not found."), id)
|
LOG.info("Listener %s not found.", id)
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_models.Listener._name(), id=id)
|
resource=data_models.Listener._name(), id=id)
|
||||||
return db_listener
|
return db_listener
|
||||||
@ -85,7 +83,7 @@ class ListenersController(base.BaseController):
|
|||||||
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
|
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
|
||||||
session, self.load_balancer_id, constants.PENDING_UPDATE,
|
session, self.load_balancer_id, constants.PENDING_UPDATE,
|
||||||
listener_status, listener_ids=[id]):
|
listener_status, listener_ids=[id]):
|
||||||
LOG.info(_LI("Load Balancer %s is immutable."),
|
LOG.info("Load Balancer %s is immutable.",
|
||||||
self.load_balancer_id)
|
self.load_balancer_id)
|
||||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
@ -94,7 +92,7 @@ class ListenersController(base.BaseController):
|
|||||||
if not lb_repo.test_and_set_provisioning_status(
|
if not lb_repo.test_and_set_provisioning_status(
|
||||||
session, self.load_balancer_id, constants.PENDING_UPDATE):
|
session, self.load_balancer_id, constants.PENDING_UPDATE):
|
||||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||||
LOG.info(_LI("Load Balancer %s is immutable."), db_lb.id)
|
LOG.info("Load Balancer %s is immutable.", db_lb.id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
id=self.load_balancer_id)
|
id=self.load_balancer_id)
|
||||||
|
|
||||||
@ -144,7 +142,7 @@ class ListenersController(base.BaseController):
|
|||||||
|
|
||||||
def _send_listener_to_handler(self, session, db_listener):
|
def _send_listener_to_handler(self, session, db_listener):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of Listener %s to handler"),
|
LOG.info("Sending Creation of Listener %s to handler",
|
||||||
db_listener.id)
|
db_listener.id)
|
||||||
self.handler.create(db_listener)
|
self.handler.create(db_listener)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -210,7 +208,7 @@ class ListenersController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session, id=id)
|
self._test_lb_and_listener_statuses(context.session, id=id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of Listener %s to handler"), id)
|
LOG.info("Sending Update of Listener %s to handler", id)
|
||||||
self.handler.update(db_listener, listener)
|
self.handler.update(db_listener, listener)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
@ -229,7 +227,7 @@ class ListenersController(base.BaseController):
|
|||||||
context.session, id=id, listener_status=constants.PENDING_DELETE)
|
context.session, id=id, listener_status=constants.PENDING_DELETE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of Listener %s to handler"),
|
LOG.info("Sending Deletion of Listener %s to handler",
|
||||||
db_listener.id)
|
db_listener.id)
|
||||||
self.handler.delete(db_listener)
|
self.handler.delete(db_listener)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -258,7 +256,7 @@ class ListenersController(base.BaseController):
|
|||||||
db_listener = self.repositories.listener.get(
|
db_listener = self.repositories.listener.get(
|
||||||
context.session, id=listener_id)
|
context.session, id=listener_id)
|
||||||
if not db_listener:
|
if not db_listener:
|
||||||
LOG.info(_LI("Listener %s not found."), listener_id)
|
LOG.info("Listener %s not found.", listener_id)
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_models.Listener._name(), id=listener_id)
|
resource=data_models.Listener._name(), id=listener_id)
|
||||||
if controller == 'pools':
|
if controller == 'pools':
|
||||||
|
@ -33,7 +33,7 @@ from octavia.common import utils
|
|||||||
import octavia.common.validate as validate
|
import octavia.common.validate as validate
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _, _LI
|
from octavia.i18n import _
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -72,7 +72,7 @@ class LoadBalancersController(base.BaseController):
|
|||||||
lb_repo = self.repositories.load_balancer
|
lb_repo = self.repositories.load_balancer
|
||||||
if not lb_repo.test_and_set_provisioning_status(
|
if not lb_repo.test_and_set_provisioning_status(
|
||||||
session, id, lb_status):
|
session, id, lb_status):
|
||||||
LOG.info(_LI("Load Balancer %s is immutable."), id)
|
LOG.info("Load Balancer %s is immutable.", id)
|
||||||
db_lb = lb_repo.get(session, id=id)
|
db_lb = lb_repo.get(session, id=id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
id=id)
|
id=id)
|
||||||
@ -90,8 +90,8 @@ class LoadBalancersController(base.BaseController):
|
|||||||
|
|
||||||
def _load_balancer_graph_to_handler(self, context, db_lb):
|
def _load_balancer_graph_to_handler(self, context, db_lb):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending full load balancer configuration %s to "
|
LOG.info("Sending full load balancer configuration %s to "
|
||||||
"the handler"), db_lb.id)
|
"the handler", db_lb.id)
|
||||||
self.handler.create(db_lb)
|
self.handler.create(db_lb)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
@ -197,7 +197,7 @@ class LoadBalancersController(base.BaseController):
|
|||||||
|
|
||||||
# Handler will be responsible for sending to controller
|
# Handler will be responsible for sending to controller
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending created Load Balancer %s to the handler"),
|
LOG.info("Sending created Load Balancer %s to the handler",
|
||||||
db_lb.id)
|
db_lb.id)
|
||||||
self.handler.create(db_lb)
|
self.handler.create(db_lb)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -217,7 +217,7 @@ class LoadBalancersController(base.BaseController):
|
|||||||
self._test_lb_status(context.session, id)
|
self._test_lb_status(context.session, id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending updated Load Balancer %s to the handler"),
|
LOG.info("Sending updated Load Balancer %s to the handler",
|
||||||
id)
|
id)
|
||||||
self.handler.update(db_lb, load_balancer)
|
self.handler.update(db_lb, load_balancer)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -239,7 +239,7 @@ class LoadBalancersController(base.BaseController):
|
|||||||
raise exceptions.ValidationException(detail=msg)
|
raise exceptions.ValidationException(detail=msg)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending deleted Load Balancer %s to the handler"),
|
LOG.info("Sending deleted Load Balancer %s to the handler",
|
||||||
db_lb.id)
|
db_lb.id)
|
||||||
self.handler.delete(db_lb, cascade)
|
self.handler.delete(db_lb, cascade)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -270,7 +270,7 @@ class LoadBalancersController(base.BaseController):
|
|||||||
db_lb = self.repositories.load_balancer.get(context.session,
|
db_lb = self.repositories.load_balancer.get(context.session,
|
||||||
id=lb_id)
|
id=lb_id)
|
||||||
if not db_lb:
|
if not db_lb:
|
||||||
LOG.info(_LI("Load Balancer %s was not found."), lb_id)
|
LOG.info("Load Balancer %s was not found.", lb_id)
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_models.LoadBalancer._name(), id=lb_id)
|
resource=data_models.LoadBalancer._name(), id=lb_id)
|
||||||
if controller == 'listeners':
|
if controller == 'listeners':
|
||||||
@ -287,11 +287,11 @@ class LoadBalancersController(base.BaseController):
|
|||||||
|
|
||||||
|
|
||||||
class LBCascadeDeleteController(LoadBalancersController):
|
class LBCascadeDeleteController(LoadBalancersController):
|
||||||
def __init__(self, lb_id):
|
def __init__(self, lb_id):
|
||||||
super(LBCascadeDeleteController, self).__init__()
|
super(LBCascadeDeleteController, self).__init__()
|
||||||
self.lb_id = lb_id
|
self.lb_id = lb_id
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(None, status_code=202)
|
@wsme_pecan.wsexpose(None, status_code=202)
|
||||||
def delete(self):
|
def delete(self):
|
||||||
"""Deletes a load balancer."""
|
"""Deletes a load balancer."""
|
||||||
return self._delete(self.lb_id, cascade=True)
|
return self._delete(self.lb_id, cascade=True)
|
||||||
|
@ -29,8 +29,6 @@ from octavia.common import exceptions
|
|||||||
import octavia.common.validate as validate
|
import octavia.common.validate as validate
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -82,8 +80,8 @@ class MembersController(base.BaseController):
|
|||||||
session, self.load_balancer_id,
|
session, self.load_balancer_id,
|
||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=self._get_affected_listener_ids(session, member)):
|
listener_ids=self._get_affected_listener_ids(session, member)):
|
||||||
LOG.info(_LI("Member cannot be created or modified because the "
|
LOG.info("Member cannot be created or modified because the "
|
||||||
"Load Balancer is in an immutable state"))
|
"Load Balancer is in an immutable state")
|
||||||
lb_repo = self.repositories.load_balancer
|
lb_repo = self.repositories.load_balancer
|
||||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
@ -133,7 +131,7 @@ class MembersController(base.BaseController):
|
|||||||
lock_session.rollback()
|
lock_session.rollback()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of Member %s to handler"),
|
LOG.info("Sending Creation of Member %s to handler",
|
||||||
db_member.id)
|
db_member.id)
|
||||||
self.handler.create(db_member)
|
self.handler.create(db_member)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -156,7 +154,7 @@ class MembersController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session, member=db_member)
|
self._test_lb_and_listener_statuses(context.session, member=db_member)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of Member %s to handler"), id)
|
LOG.info("Sending Update of Member %s to handler", id)
|
||||||
self.handler.update(db_member, member)
|
self.handler.update(db_member, member)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
@ -176,7 +174,7 @@ class MembersController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session, member=db_member)
|
self._test_lb_and_listener_statuses(context.session, member=db_member)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of Member %s to handler"),
|
LOG.info("Sending Deletion of Member %s to handler",
|
||||||
db_member.id)
|
db_member.id)
|
||||||
self.handler.delete(db_member)
|
self.handler.delete(db_member)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -30,8 +30,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -82,8 +80,8 @@ class PoolsController(base.BaseController):
|
|||||||
session, self.load_balancer_id,
|
session, self.load_balancer_id,
|
||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=self._get_affected_listener_ids(session, pool)):
|
listener_ids=self._get_affected_listener_ids(session, pool)):
|
||||||
LOG.info(_LI("Pool cannot be created or modified because the Load "
|
LOG.info("Pool cannot be created or modified because the Load "
|
||||||
"Balancer is in an immutable state"))
|
"Balancer is in an immutable state")
|
||||||
lb_repo = self.repositories.load_balancer
|
lb_repo = self.repositories.load_balancer
|
||||||
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
db_lb = lb_repo.get(session, id=self.load_balancer_id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
@ -109,8 +107,7 @@ class PoolsController(base.BaseController):
|
|||||||
|
|
||||||
def _send_pool_to_handler(self, session, db_pool):
|
def _send_pool_to_handler(self, session, db_pool):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of Pool %s to handler"),
|
LOG.info("Sending Creation of Pool %s to handler", db_pool.id)
|
||||||
db_pool.id)
|
|
||||||
self.handler.create(db_pool)
|
self.handler.create(db_pool)
|
||||||
except Exception:
|
except Exception:
|
||||||
for listener_id in self._get_affected_listener_ids(session):
|
for listener_id in self._get_affected_listener_ids(session):
|
||||||
@ -180,7 +177,7 @@ class PoolsController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session, pool=db_pool)
|
self._test_lb_and_listener_statuses(context.session, pool=db_pool)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of Pool %s to handler"), id)
|
LOG.info("Sending Update of Pool %s to handler", id)
|
||||||
self.handler.update(db_pool, pool)
|
self.handler.update(db_pool, pool)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
@ -205,8 +202,7 @@ class PoolsController(base.BaseController):
|
|||||||
self._test_lb_and_listener_statuses(context.session, pool=db_pool)
|
self._test_lb_and_listener_statuses(context.session, pool=db_pool)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of Pool %s to handler"),
|
LOG.info("Sending Deletion of Pool %s to handler", db_pool.id)
|
||||||
db_pool.id)
|
|
||||||
self.handler.delete(db_pool)
|
self.handler.delete(db_pool)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
@ -234,7 +230,7 @@ class PoolsController(base.BaseController):
|
|||||||
remainder = remainder[1:]
|
remainder = remainder[1:]
|
||||||
db_pool = self.repositories.pool.get(context.session, id=pool_id)
|
db_pool = self.repositories.pool.get(context.session, id=pool_id)
|
||||||
if not db_pool:
|
if not db_pool:
|
||||||
LOG.info(_LI("Pool %s not found."), pool_id)
|
LOG.info("Pool %s not found.", pool_id)
|
||||||
raise exceptions.NotFound(resource=data_models.Pool._name(),
|
raise exceptions.NotFound(resource=data_models.Pool._name(),
|
||||||
id=pool_id)
|
id=pool_id)
|
||||||
if controller == 'members':
|
if controller == 'members':
|
||||||
|
@ -21,7 +21,6 @@ from stevedore import driver as stevedore_driver
|
|||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import repositories
|
from octavia.db import repositories
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -61,7 +60,7 @@ class BaseController(rest.RestController):
|
|||||||
"""Gets an object from the database and returns it."""
|
"""Gets an object from the database and returns it."""
|
||||||
db_obj = repo.get(session, id=id)
|
db_obj = repo.get(session, id=id)
|
||||||
if not db_obj:
|
if not db_obj:
|
||||||
LOG.exception(_LE("{name} {id} not found").format(
|
LOG.exception("{name} {id} not found".format(
|
||||||
name=data_model._name(), id=id))
|
name=data_model._name(), id=id))
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_model._name(), id=id)
|
resource=data_model._name(), id=id)
|
||||||
|
@ -29,7 +29,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -47,7 +46,7 @@ class HealthMonitorController(base.BaseController):
|
|||||||
db_hm = self.repositories.health_monitor.get(
|
db_hm = self.repositories.health_monitor.get(
|
||||||
session, id=hm_id)
|
session, id=hm_id)
|
||||||
if not db_hm:
|
if not db_hm:
|
||||||
LOG.info(_LI("Health Monitor %s was not found"), hm_id)
|
LOG.info("Health Monitor %s was not found", hm_id)
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_models.HealthMonitor._name(),
|
resource=data_models.HealthMonitor._name(),
|
||||||
id=hm_id)
|
id=hm_id)
|
||||||
@ -100,8 +99,8 @@ class HealthMonitorController(base.BaseController):
|
|||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=self._get_affected_listener_ids(session, hm),
|
listener_ids=self._get_affected_listener_ids(session, hm),
|
||||||
pool_id=hm.pool_id):
|
pool_id=hm.pool_id):
|
||||||
LOG.info(_LI("Health Monitor cannot be created or modified "
|
LOG.info("Health Monitor cannot be created or modified because "
|
||||||
"because the Load Balancer is in an immutable state"))
|
"the Load Balancer is in an immutable state")
|
||||||
raise exceptions.ImmutableObject(resource='Load Balancer',
|
raise exceptions.ImmutableObject(resource='Load Balancer',
|
||||||
id=load_balancer_id)
|
id=load_balancer_id)
|
||||||
|
|
||||||
@ -136,7 +135,7 @@ class HealthMonitorController(base.BaseController):
|
|||||||
|
|
||||||
def _send_hm_to_handler(self, session, db_hm):
|
def _send_hm_to_handler(self, session, db_hm):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of Health Monitor %s to handler"),
|
LOG.info("Sending Creation of Health Monitor %s to handler",
|
||||||
db_hm.id)
|
db_hm.id)
|
||||||
self.handler.create(db_hm)
|
self.handler.create(db_hm)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -191,8 +190,8 @@ class HealthMonitorController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_UPDATE)
|
provisioning_status=constants.PENDING_UPDATE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of Health Monitor for Pool %s to "
|
LOG.info("Sending Update of Health Monitor for Pool %s to "
|
||||||
"handler"), id)
|
"handler", id)
|
||||||
self.handler.update(db_hm, health_monitor)
|
self.handler.update(db_hm, health_monitor)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -220,8 +219,8 @@ class HealthMonitorController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_DELETE)
|
provisioning_status=constants.PENDING_DELETE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of Health Monitor for Pool %s to "
|
LOG.info("Sending Deletion of Health Monitor for Pool %s to "
|
||||||
"handler"), id)
|
"handler", id)
|
||||||
self.handler.delete(db_hm)
|
self.handler.delete(db_hm)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
|
@ -30,7 +30,6 @@ from octavia.common import exceptions
|
|||||||
from octavia.common import validate
|
from octavia.common import validate
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -76,8 +75,8 @@ class L7PolicyController(base.BaseController):
|
|||||||
session, lb_id,
|
session, lb_id,
|
||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=listener_ids):
|
listener_ids=listener_ids):
|
||||||
LOG.info(_LI("L7Policy cannot be created or modified because the "
|
LOG.info("L7Policy cannot be created or modified because the "
|
||||||
"Load Balancer is in an immutable state"))
|
"Load Balancer is in an immutable state")
|
||||||
raise exceptions.ImmutableObject(resource='Load Balancer',
|
raise exceptions.ImmutableObject(resource='Load Balancer',
|
||||||
id=lb_id)
|
id=lb_id)
|
||||||
|
|
||||||
@ -112,7 +111,7 @@ class L7PolicyController(base.BaseController):
|
|||||||
|
|
||||||
def _send_l7policy_to_handler(self, session, db_l7policy, lb_id):
|
def _send_l7policy_to_handler(self, session, db_l7policy, lb_id):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of L7Policy %s to handler"),
|
LOG.info("Sending Creation of L7Policy %s to handler",
|
||||||
db_l7policy.id)
|
db_l7policy.id)
|
||||||
self.handler.create(db_l7policy)
|
self.handler.create(db_l7policy)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -203,7 +202,7 @@ class L7PolicyController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_UPDATE)
|
provisioning_status=constants.PENDING_UPDATE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of L7Policy %s to handler"), id)
|
LOG.info("Sending Update of L7Policy %s to handler", id)
|
||||||
self.handler.update(
|
self.handler.update(
|
||||||
db_l7policy, sanitized_l7policy)
|
db_l7policy, sanitized_l7policy)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -236,7 +235,7 @@ class L7PolicyController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_DELETE)
|
provisioning_status=constants.PENDING_DELETE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of L7Policy %s to handler"),
|
LOG.info("Sending Deletion of L7Policy %s to handler",
|
||||||
db_l7policy.id)
|
db_l7policy.id)
|
||||||
self.handler.delete(db_l7policy)
|
self.handler.delete(db_l7policy)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -267,7 +266,7 @@ class L7PolicyController(base.BaseController):
|
|||||||
db_l7policy = self.repositories.l7policy.get(
|
db_l7policy = self.repositories.l7policy.get(
|
||||||
context.session, id=l7policy_id)
|
context.session, id=l7policy_id)
|
||||||
if not db_l7policy:
|
if not db_l7policy:
|
||||||
LOG.info(_LI("L7Policy %s not found."), l7policy_id)
|
LOG.info("L7Policy %s not found.", l7policy_id)
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource='L7Policy', id=l7policy_id)
|
resource='L7Policy', id=l7policy_id)
|
||||||
return l7rule.L7RuleController(
|
return l7rule.L7RuleController(
|
||||||
|
@ -28,7 +28,6 @@ from octavia.common import exceptions
|
|||||||
from octavia.common import validate
|
from octavia.common import validate
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -69,8 +68,8 @@ class L7RuleController(base.BaseController):
|
|||||||
session, load_balancer_id,
|
session, load_balancer_id,
|
||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=[listener_id], l7policy_id=self.l7policy_id):
|
listener_ids=[listener_id], l7policy_id=self.l7policy_id):
|
||||||
LOG.info(_LI("L7Rule cannot be created or modified because the "
|
LOG.info("L7Rule cannot be created or modified because the "
|
||||||
"Load Balancer is in an immutable state"))
|
"Load Balancer is in an immutable state")
|
||||||
raise exceptions.ImmutableObject(resource='Load Balancer',
|
raise exceptions.ImmutableObject(resource='Load Balancer',
|
||||||
id=load_balancer_id)
|
id=load_balancer_id)
|
||||||
|
|
||||||
@ -111,8 +110,7 @@ class L7RuleController(base.BaseController):
|
|||||||
|
|
||||||
def _send_l7rule_to_handler(self, session, db_l7rule):
|
def _send_l7rule_to_handler(self, session, db_l7rule):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of L7Rule %s to handler"),
|
LOG.info("Sending Creation of L7Rule %s to handler", db_l7rule.id)
|
||||||
db_l7rule.id)
|
|
||||||
self.handler.create(db_l7rule)
|
self.handler.create(db_l7rule)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -185,7 +183,7 @@ class L7RuleController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_UPDATE)
|
provisioning_status=constants.PENDING_UPDATE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of L7Rule %s to handler"), id)
|
LOG.info("Sending Update of L7Rule %s to handler", id)
|
||||||
self.handler.update(db_l7rule, l7rule)
|
self.handler.update(db_l7rule, l7rule)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -212,8 +210,7 @@ class L7RuleController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_DELETE)
|
provisioning_status=constants.PENDING_DELETE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of L7Rule %s to handler"),
|
LOG.info("Sending Deletion of L7Rule %s to handler", db_l7rule.id)
|
||||||
db_l7rule.id)
|
|
||||||
self.handler.delete(db_l7rule)
|
self.handler.delete(db_l7rule)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
|
@ -29,7 +29,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -50,7 +49,7 @@ class ListenersController(base.BaseController):
|
|||||||
db_listener = self.repositories.listener.get(
|
db_listener = self.repositories.listener.get(
|
||||||
session, load_balancer_id=load_balancer_id, id=id)
|
session, load_balancer_id=load_balancer_id, id=id)
|
||||||
if not db_listener:
|
if not db_listener:
|
||||||
LOG.info(_LI("Listener %s not found."), id)
|
LOG.info("Listener %s not found.", id)
|
||||||
raise exceptions.NotFound(
|
raise exceptions.NotFound(
|
||||||
resource=data_models.Listener._name(), id=id)
|
resource=data_models.Listener._name(), id=id)
|
||||||
return db_listener
|
return db_listener
|
||||||
@ -91,8 +90,7 @@ class ListenersController(base.BaseController):
|
|||||||
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
|
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
|
||||||
session, lb_id, constants.PENDING_UPDATE,
|
session, lb_id, constants.PENDING_UPDATE,
|
||||||
listener_status, listener_ids=[id]):
|
listener_status, listener_ids=[id]):
|
||||||
LOG.info(_LI("Load Balancer %s is immutable."),
|
LOG.info("Load Balancer %s is immutable.", lb_id)
|
||||||
lb_id)
|
|
||||||
db_lb = lb_repo.get(session, id=lb_id)
|
db_lb = lb_repo.get(session, id=lb_id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
id=lb_id)
|
id=lb_id)
|
||||||
@ -100,7 +98,7 @@ class ListenersController(base.BaseController):
|
|||||||
if not lb_repo.test_and_set_provisioning_status(
|
if not lb_repo.test_and_set_provisioning_status(
|
||||||
session, lb_id, constants.PENDING_UPDATE):
|
session, lb_id, constants.PENDING_UPDATE):
|
||||||
db_lb = lb_repo.get(session, id=lb_id)
|
db_lb = lb_repo.get(session, id=lb_id)
|
||||||
LOG.info(_LI("Load Balancer %s is immutable."), db_lb.id)
|
LOG.info("Load Balancer %s is immutable.", db_lb.id)
|
||||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||||
id=lb_id)
|
id=lb_id)
|
||||||
|
|
||||||
@ -168,7 +166,7 @@ class ListenersController(base.BaseController):
|
|||||||
|
|
||||||
def _send_listener_to_handler(self, session, db_listener):
|
def _send_listener_to_handler(self, session, db_listener):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of Listener %s to handler"),
|
LOG.info("Sending Creation of Listener %s to handler",
|
||||||
db_listener.id)
|
db_listener.id)
|
||||||
self.handler.create(db_listener)
|
self.handler.create(db_listener)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -230,7 +228,7 @@ class ListenersController(base.BaseController):
|
|||||||
id=id)
|
id=id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of Listener %s to handler"), id)
|
LOG.info("Sending Update of Listener %s to handler", id)
|
||||||
self.handler.update(db_listener, listener)
|
self.handler.update(db_listener, listener)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -257,7 +255,7 @@ class ListenersController(base.BaseController):
|
|||||||
id=id, listener_status=constants.PENDING_DELETE)
|
id=id, listener_status=constants.PENDING_DELETE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of Listener %s to handler"),
|
LOG.info("Sending Deletion of Listener %s to handler",
|
||||||
db_listener.id)
|
db_listener.id)
|
||||||
self.handler.delete(db_listener)
|
self.handler.delete(db_listener)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -30,7 +30,7 @@ from octavia.common import utils
|
|||||||
import octavia.common.validate as validate
|
import octavia.common.validate as validate
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _, _LI
|
from octavia.i18n import _
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -79,9 +79,8 @@ class LoadBalancersController(base.BaseController):
|
|||||||
if not lb_repo.test_and_set_provisioning_status(
|
if not lb_repo.test_and_set_provisioning_status(
|
||||||
session, id, lb_status):
|
session, id, lb_status):
|
||||||
prov_status = lb_repo.get(session, id=id).provisioning_status
|
prov_status = lb_repo.get(session, id=id).provisioning_status
|
||||||
LOG.info(_LI(
|
LOG.info("Invalid state %(state)s of loadbalancer resource %(id)s",
|
||||||
"Invalid state %(state)s of loadbalancer resource %(id)s"),
|
{"state": prov_status, "id": id})
|
||||||
{"state": prov_status, "id": id})
|
|
||||||
raise exceptions.LBPendingStateError(
|
raise exceptions.LBPendingStateError(
|
||||||
state=prov_status, id=id)
|
state=prov_status, id=id)
|
||||||
|
|
||||||
@ -174,7 +173,7 @@ class LoadBalancersController(base.BaseController):
|
|||||||
|
|
||||||
# Handler will be responsible for sending to controller
|
# Handler will be responsible for sending to controller
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending created Load Balancer %s to the handler"),
|
LOG.info("Sending created Load Balancer %s to the handler",
|
||||||
db_lb.id)
|
db_lb.id)
|
||||||
self.handler.create(db_lb)
|
self.handler.create(db_lb)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -195,8 +194,7 @@ class LoadBalancersController(base.BaseController):
|
|||||||
db_lb = self._get_db_lb(context.session, id)
|
db_lb = self._get_db_lb(context.session, id)
|
||||||
self._test_lb_status(context.session, id)
|
self._test_lb_status(context.session, id)
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending updated Load Balancer %s to the handler"),
|
LOG.info("Sending updated Load Balancer %s to the handler", id)
|
||||||
id)
|
|
||||||
self.handler.update(db_lb, load_balancer)
|
self.handler.update(db_lb, load_balancer)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
@ -214,7 +212,7 @@ class LoadBalancersController(base.BaseController):
|
|||||||
lb_status=constants.PENDING_DELETE)
|
lb_status=constants.PENDING_DELETE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending deleted Load Balancer %s to the handler"),
|
LOG.info("Sending deleted Load Balancer %s to the handler",
|
||||||
db_lb.id)
|
db_lb.id)
|
||||||
self.handler.delete(db_lb, cascade)
|
self.handler.delete(db_lb, cascade)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -29,7 +29,6 @@ from octavia.common import exceptions
|
|||||||
import octavia.common.validate as validate
|
import octavia.common.validate as validate
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -81,8 +80,8 @@ class MembersController(base.BaseController):
|
|||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=self._get_affected_listener_ids(session, member),
|
listener_ids=self._get_affected_listener_ids(session, member),
|
||||||
pool_id=self.pool_id):
|
pool_id=self.pool_id):
|
||||||
LOG.info(_LI("Member cannot be created or modified because the "
|
LOG.info("Member cannot be created or modified because the "
|
||||||
"Load Balancer is in an immutable state"))
|
"Load Balancer is in an immutable state")
|
||||||
raise exceptions.ImmutableObject(resource='Load Balancer',
|
raise exceptions.ImmutableObject(resource='Load Balancer',
|
||||||
id=load_balancer_id)
|
id=load_balancer_id)
|
||||||
|
|
||||||
@ -123,8 +122,7 @@ class MembersController(base.BaseController):
|
|||||||
|
|
||||||
def _send_member_to_handler(self, session, db_member):
|
def _send_member_to_handler(self, session, db_member):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of Pool %s to handler"),
|
LOG.info("Sending Creation of Pool %s to handler", db_member.id)
|
||||||
db_member.id)
|
|
||||||
self.handler.create(db_member)
|
self.handler.create(db_member)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -191,7 +189,7 @@ class MembersController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_UPDATE)
|
provisioning_status=constants.PENDING_UPDATE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of Member %s to handler"), id)
|
LOG.info("Sending Update of Member %s to handler", id)
|
||||||
self.handler.update(db_member, member)
|
self.handler.update(db_member, member)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -219,8 +217,7 @@ class MembersController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_DELETE)
|
provisioning_status=constants.PENDING_DELETE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of Member %s to handler"),
|
LOG.info("Sending Deletion of Member %s to handler", db_member.id)
|
||||||
db_member.id)
|
|
||||||
self.handler.delete(db_member)
|
self.handler.delete(db_member)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
|
@ -31,7 +31,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -82,8 +81,8 @@ class PoolsController(base.BaseController):
|
|||||||
session, lb_id,
|
session, lb_id,
|
||||||
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
|
||||||
listener_ids=listener_ids):
|
listener_ids=listener_ids):
|
||||||
LOG.info(_LI("Pool cannot be created or modified because the Load "
|
LOG.info("Pool cannot be created or modified because the Load "
|
||||||
"Balancer is in an immutable state"))
|
"Balancer is in an immutable state")
|
||||||
raise exceptions.ImmutableObject(resource=_('Load Balancer'),
|
raise exceptions.ImmutableObject(resource=_('Load Balancer'),
|
||||||
id=lb_id)
|
id=lb_id)
|
||||||
|
|
||||||
@ -119,8 +118,7 @@ class PoolsController(base.BaseController):
|
|||||||
|
|
||||||
def _send_pool_to_handler(self, session, db_pool, listener_id):
|
def _send_pool_to_handler(self, session, db_pool, listener_id):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Creation of Pool %s to handler"),
|
LOG.info("Sending Creation of Pool %s to handler", db_pool.id)
|
||||||
db_pool.id)
|
|
||||||
self.handler.create(db_pool)
|
self.handler.create(db_pool)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -212,7 +210,7 @@ class PoolsController(base.BaseController):
|
|||||||
context.session, db_pool.id,
|
context.session, db_pool.id,
|
||||||
provisioning_status=constants.PENDING_UPDATE)
|
provisioning_status=constants.PENDING_UPDATE)
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Update of Pool %s to handler"), id)
|
LOG.info("Sending Update of Pool %s to handler", id)
|
||||||
self.handler.update(db_pool, pool)
|
self.handler.update(db_pool, pool)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -244,8 +242,7 @@ class PoolsController(base.BaseController):
|
|||||||
provisioning_status=constants.PENDING_DELETE)
|
provisioning_status=constants.PENDING_DELETE)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Sending Deletion of Pool %s to handler"),
|
LOG.info("Sending Deletion of Pool %s to handler", db_pool.id)
|
||||||
db_pool.id)
|
|
||||||
self.handler.delete(db_pool)
|
self.handler.delete(db_pool)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
@ -275,7 +272,7 @@ class PoolsController(base.BaseController):
|
|||||||
remainder = remainder[1:]
|
remainder = remainder[1:]
|
||||||
db_pool = self.repositories.pool.get(context.session, id=pool_id)
|
db_pool = self.repositories.pool.get(context.session, id=pool_id)
|
||||||
if not db_pool:
|
if not db_pool:
|
||||||
LOG.info(_LI("Pool %s not found."), pool_id)
|
LOG.info("Pool %s not found.", pool_id)
|
||||||
raise exceptions.NotFound(resource=data_models.Pool._name(),
|
raise exceptions.NotFound(resource=data_models.Pool._name(),
|
||||||
id=pool_id)
|
id=pool_id)
|
||||||
if controller == 'members':
|
if controller == 'members':
|
||||||
|
@ -23,8 +23,6 @@ from oslo_utils import excutils
|
|||||||
|
|
||||||
from octavia.certificates.common import barbican as barbican_common
|
from octavia.certificates.common import barbican as barbican_common
|
||||||
from octavia.common import keystone
|
from octavia.common import keystone
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -45,5 +43,5 @@ class BarbicanACLAuth(barbican_common.BarbicanAuth):
|
|||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Error creating Barbican client"))
|
LOG.exception("Error creating Barbican client")
|
||||||
return cls._barbican_client
|
return cls._barbican_client
|
||||||
|
@ -25,7 +25,7 @@ from oslo_utils import encodeutils
|
|||||||
|
|
||||||
from octavia.certificates.common import cert
|
from octavia.certificates.common import cert
|
||||||
from octavia.common.tls_utils import cert_parser
|
from octavia.common.tls_utils import cert_parser
|
||||||
from octavia.i18n import _LE
|
from octavia.i18n import _
|
||||||
|
|
||||||
|
|
||||||
class BarbicanCert(cert.Cert):
|
class BarbicanCert(cert.Cert):
|
||||||
@ -33,9 +33,8 @@ class BarbicanCert(cert.Cert):
|
|||||||
def __init__(self, cert_container):
|
def __init__(self, cert_container):
|
||||||
if not isinstance(cert_container,
|
if not isinstance(cert_container,
|
||||||
barbican_client.containers.CertificateContainer):
|
barbican_client.containers.CertificateContainer):
|
||||||
raise TypeError(_LE(
|
raise TypeError(_("Retrieved Barbican Container is not of the "
|
||||||
"Retrieved Barbican Container is not of the correct type "
|
"correct type (certificate)."))
|
||||||
"(certificate)."))
|
|
||||||
self._cert_container = cert_container
|
self._cert_container = cert_container
|
||||||
|
|
||||||
def get_certificate(self):
|
def get_certificate(self):
|
||||||
|
@ -19,8 +19,6 @@ import requests
|
|||||||
|
|
||||||
from octavia.certificates.generator import local
|
from octavia.certificates.generator import local
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -57,11 +55,11 @@ class AnchorCertGenerator(local.LocalCertGenerator):
|
|||||||
|
|
||||||
if r.status_code != 200:
|
if r.status_code != 200:
|
||||||
LOG.debug('Anchor returned: %s', r.content)
|
LOG.debug('Anchor returned: %s', r.content)
|
||||||
raise AnchorException("Anchor returned Status Code : " +
|
raise AnchorException(_("Anchor returned Status Code : "
|
||||||
str(r.status_code))
|
"{0}").format(str(r.status_code)))
|
||||||
|
|
||||||
return r.content
|
return r.content
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Unable to sign certificate."))
|
LOG.error("Unable to sign certificate.")
|
||||||
raise exceptions.CertificateGenerationException(msg=e)
|
raise exceptions.CertificateGenerationException(msg=e)
|
||||||
|
@ -28,7 +28,6 @@ import six
|
|||||||
from octavia.certificates.common import local as local_common
|
from octavia.certificates.common import local as local_common
|
||||||
from octavia.certificates.generator import cert_gen
|
from octavia.certificates.generator import cert_gen
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.i18n import _LE, _LI
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -45,7 +44,7 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def _validate_cert(cls, ca_cert, ca_key, ca_key_pass):
|
def _validate_cert(cls, ca_cert, ca_key, ca_key_pass):
|
||||||
if not ca_cert:
|
if not ca_cert:
|
||||||
LOG.info(_LI("Using CA Certificate from config."))
|
LOG.info("Using CA Certificate from config.")
|
||||||
try:
|
try:
|
||||||
ca_cert = open(CONF.certificates.ca_certificate, 'rb').read()
|
ca_cert = open(CONF.certificates.ca_certificate, 'rb').read()
|
||||||
except IOError:
|
except IOError:
|
||||||
@ -54,7 +53,7 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
|||||||
.format(CONF.certificates.ca_certificate)
|
.format(CONF.certificates.ca_certificate)
|
||||||
)
|
)
|
||||||
if not ca_key:
|
if not ca_key:
|
||||||
LOG.info(_LI("Using CA Private Key from config."))
|
LOG.info("Using CA Private Key from config.")
|
||||||
try:
|
try:
|
||||||
ca_key = open(CONF.certificates.ca_private_key, 'rb').read()
|
ca_key = open(CONF.certificates.ca_private_key, 'rb').read()
|
||||||
except IOError:
|
except IOError:
|
||||||
@ -65,13 +64,10 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
|||||||
if not ca_key_pass:
|
if not ca_key_pass:
|
||||||
ca_key_pass = CONF.certificates.ca_private_key_passphrase
|
ca_key_pass = CONF.certificates.ca_private_key_passphrase
|
||||||
if ca_key_pass:
|
if ca_key_pass:
|
||||||
LOG.info(_LI(
|
LOG.info("Using CA Private Key Passphrase from config.")
|
||||||
"Using CA Private Key Passphrase from config."
|
|
||||||
))
|
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI(
|
LOG.info("No Passphrase found for CA Private Key, not using "
|
||||||
"No Passphrase found for CA Private Key, not using one."
|
"one.")
|
||||||
))
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def sign_cert(cls, csr, validity, ca_cert=None, ca_key=None,
|
def sign_cert(cls, csr, validity, ca_cert=None, ca_key=None,
|
||||||
@ -91,9 +87,7 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
|||||||
:return: Signed certificate
|
:return: Signed certificate
|
||||||
:raises Exception: if certificate signing fails
|
:raises Exception: if certificate signing fails
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI(
|
LOG.info("Signing a certificate request using OpenSSL locally.")
|
||||||
"Signing a certificate request using OpenSSL locally."
|
|
||||||
))
|
|
||||||
cls._validate_cert(ca_cert, ca_key, ca_key_pass)
|
cls._validate_cert(ca_cert, ca_key, ca_key_pass)
|
||||||
if not ca_digest:
|
if not ca_digest:
|
||||||
ca_digest = CONF.certificates.signing_digest
|
ca_digest = CONF.certificates.signing_digest
|
||||||
@ -169,7 +163,7 @@ class LocalCertGenerator(cert_gen.CertGenerator):
|
|||||||
return signed_cert.public_bytes(
|
return signed_cert.public_bytes(
|
||||||
encoding=serialization.Encoding.PEM)
|
encoding=serialization.Encoding.PEM)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Unable to sign certificate."))
|
LOG.error("Unable to sign certificate.")
|
||||||
raise exceptions.CertificateGenerationException(msg=e)
|
raise exceptions.CertificateGenerationException(msg=e)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -23,8 +23,6 @@ from stevedore import driver as stevedore_driver
|
|||||||
|
|
||||||
from octavia.certificates.common import barbican as barbican_common
|
from octavia.certificates.common import barbican as barbican_common
|
||||||
from octavia.certificates.manager import cert_mgr
|
from octavia.certificates.manager import cert_mgr
|
||||||
from octavia.i18n import _LE, _LI, _LW
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -57,9 +55,8 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
|||||||
"""
|
"""
|
||||||
connection = self.auth.get_barbican_client(project_id)
|
connection = self.auth.get_barbican_client(project_id)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info("Storing certificate container '{0}' in "
|
||||||
"Storing certificate container '{0}' in Barbican."
|
"Barbican.".format(name))
|
||||||
).format(name))
|
|
||||||
|
|
||||||
certificate_secret = None
|
certificate_secret = None
|
||||||
private_key_secret = None
|
private_key_secret = None
|
||||||
@ -106,18 +103,14 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
|||||||
old_ref = i.secret_ref
|
old_ref = i.secret_ref
|
||||||
try:
|
try:
|
||||||
i.delete()
|
i.delete()
|
||||||
LOG.info(_LI(
|
LOG.info("Deleted secret {0} ({1}) during "
|
||||||
"Deleted secret {0} ({1}) during rollback."
|
"rollback.".format(i.name, old_ref))
|
||||||
).format(i.name, old_ref))
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW(
|
LOG.warning("Failed to delete {0} ({1}) during "
|
||||||
"Failed to delete {0} ({1}) during rollback. This "
|
"rollback. This might not be a "
|
||||||
"might not be a problem."
|
"problem.".format(i.name, old_ref))
|
||||||
).format(i.name, old_ref))
|
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE(
|
LOG.error("Error storing certificate data: {0}".format(str(e)))
|
||||||
"Error storing certificate data: {0}"
|
|
||||||
).format(str(e)))
|
|
||||||
|
|
||||||
def get_cert(self, project_id, cert_ref, resource_ref=None,
|
def get_cert(self, project_id, cert_ref, resource_ref=None,
|
||||||
check_only=False, service_name='Octavia'):
|
check_only=False, service_name='Octavia'):
|
||||||
@ -134,9 +127,8 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
|||||||
"""
|
"""
|
||||||
connection = self.auth.get_barbican_client(project_id)
|
connection = self.auth.get_barbican_client(project_id)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info("Loading certificate container {0} from "
|
||||||
"Loading certificate container {0} from Barbican."
|
"Barbican.".format(cert_ref))
|
||||||
).format(cert_ref))
|
|
||||||
try:
|
try:
|
||||||
if check_only:
|
if check_only:
|
||||||
cert_container = connection.containers.get(
|
cert_container = connection.containers.get(
|
||||||
@ -151,9 +143,7 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
|||||||
return barbican_common.BarbicanCert(cert_container)
|
return barbican_common.BarbicanCert(cert_container)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE(
|
LOG.error("Error getting {0}: {1}".format(cert_ref, str(e)))
|
||||||
"Error getting {0}: {1}"
|
|
||||||
).format(cert_ref, str(e)))
|
|
||||||
|
|
||||||
def delete_cert(self, project_id, cert_ref, resource_ref=None,
|
def delete_cert(self, project_id, cert_ref, resource_ref=None,
|
||||||
service_name='Octavia'):
|
service_name='Octavia'):
|
||||||
@ -167,9 +157,8 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
|||||||
"""
|
"""
|
||||||
connection = self.auth.get_barbican_client(project_id)
|
connection = self.auth.get_barbican_client(project_id)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info("Deregistering as a consumer of {0} in "
|
||||||
"Deregistering as a consumer of {0} in Barbican."
|
"Barbican.".format(cert_ref))
|
||||||
).format(cert_ref))
|
|
||||||
try:
|
try:
|
||||||
connection.containers.remove_consumer(
|
connection.containers.remove_consumer(
|
||||||
container_ref=cert_ref,
|
container_ref=cert_ref,
|
||||||
@ -178,6 +167,5 @@ class BarbicanCertManager(cert_mgr.CertManager):
|
|||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE(
|
LOG.error("Error deregistering as a consumer of {0}: "
|
||||||
"Error deregistering as a consumer of {0}: {1}"
|
"{1}".format(cert_ref, str(e)))
|
||||||
).format(cert_ref, str(e)))
|
|
||||||
|
@ -20,7 +20,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_reports import guru_meditation_report as gmr
|
from oslo_reports import guru_meditation_report as gmr
|
||||||
|
|
||||||
from octavia.api import app as api_app
|
from octavia.api import app as api_app
|
||||||
from octavia.i18n import _LI
|
|
||||||
from octavia import version
|
from octavia import version
|
||||||
|
|
||||||
|
|
||||||
@ -33,7 +32,7 @@ def main():
|
|||||||
app = api_app.setup_app(argv=sys.argv)
|
app = api_app.setup_app(argv=sys.argv)
|
||||||
|
|
||||||
host, port = cfg.CONF.bind_host, cfg.CONF.bind_port
|
host, port = cfg.CONF.bind_host, cfg.CONF.bind_port
|
||||||
LOG.info(_LI("Starting API server on %(host)s:%(port)s"),
|
LOG.info("Starting API server on %(host)s:%(port)s",
|
||||||
{"host": host, "port": port})
|
{"host": host, "port": port})
|
||||||
srv = simple_server.make_server(host, port, app)
|
srv = simple_server.make_server(host, port, app)
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ from octavia.amphorae.drivers.health import heartbeat_udp
|
|||||||
from octavia.common import service
|
from octavia.common import service
|
||||||
from octavia.controller.healthmanager import health_manager
|
from octavia.controller.healthmanager import health_manager
|
||||||
from octavia.controller.healthmanager import update_db
|
from octavia.controller.healthmanager import update_db
|
||||||
from octavia.i18n import _LI
|
|
||||||
from octavia import version
|
from octavia import version
|
||||||
|
|
||||||
|
|
||||||
@ -59,15 +58,15 @@ def main():
|
|||||||
hm_health_check_proc = multiprocessing.Process(name='HM_health_check',
|
hm_health_check_proc = multiprocessing.Process(name='HM_health_check',
|
||||||
target=hm_health_check)
|
target=hm_health_check)
|
||||||
processes.append(hm_health_check_proc)
|
processes.append(hm_health_check_proc)
|
||||||
LOG.info(_LI("Health Manager listener process starts:"))
|
LOG.info("Health Manager listener process starts:")
|
||||||
hm_listener_proc.start()
|
hm_listener_proc.start()
|
||||||
LOG.info(_LI("Health manager check process starts:"))
|
LOG.info("Health manager check process starts:")
|
||||||
hm_health_check_proc.start()
|
hm_health_check_proc.start()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for process in processes:
|
for process in processes:
|
||||||
process.join()
|
process.join()
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
LOG.info(_LI("Health Manager existing due to signal"))
|
LOG.info("Health Manager existing due to signal")
|
||||||
hm_listener_proc.terminate()
|
hm_listener_proc.terminate()
|
||||||
hm_health_check_proc.terminate()
|
hm_health_check_proc.terminate()
|
||||||
|
@ -24,10 +24,8 @@ from oslo_reports import guru_meditation_report as gmr
|
|||||||
|
|
||||||
from octavia.common import service
|
from octavia.common import service
|
||||||
from octavia.controller.housekeeping import house_keeping
|
from octavia.controller.housekeeping import house_keeping
|
||||||
from octavia.i18n import _LI
|
|
||||||
from octavia import version
|
from octavia import version
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
@ -41,7 +39,7 @@ def spare_amphora_check():
|
|||||||
|
|
||||||
# Read the interval from CONF
|
# Read the interval from CONF
|
||||||
interval = CONF.house_keeping.spare_check_interval
|
interval = CONF.house_keeping.spare_check_interval
|
||||||
LOG.info(_LI("Spare check interval is set to %d sec"), interval)
|
LOG.info("Spare check interval is set to %d sec", interval)
|
||||||
|
|
||||||
spare_amp = house_keeping.SpareAmphora()
|
spare_amp = house_keeping.SpareAmphora()
|
||||||
while not spare_amp_thread_event.is_set():
|
while not spare_amp_thread_event.is_set():
|
||||||
@ -54,10 +52,10 @@ def db_cleanup():
|
|||||||
"""Perform db cleanup for old resources."""
|
"""Perform db cleanup for old resources."""
|
||||||
# Read the interval from CONF
|
# Read the interval from CONF
|
||||||
interval = CONF.house_keeping.cleanup_interval
|
interval = CONF.house_keeping.cleanup_interval
|
||||||
LOG.info(_LI("DB cleanup interval is set to %d sec"), interval)
|
LOG.info("DB cleanup interval is set to %d sec", interval)
|
||||||
LOG.info(_LI('Amphora expiry age is %s seconds'),
|
LOG.info('Amphora expiry age is %s seconds',
|
||||||
CONF.house_keeping.amphora_expiry_age)
|
CONF.house_keeping.amphora_expiry_age)
|
||||||
LOG.info(_LI('Load balancer expiry age is %s seconds'),
|
LOG.info('Load balancer expiry age is %s seconds',
|
||||||
CONF.house_keeping.load_balancer_expiry_age)
|
CONF.house_keeping.load_balancer_expiry_age)
|
||||||
|
|
||||||
db_cleanup = house_keeping.DatabaseCleanup()
|
db_cleanup = house_keeping.DatabaseCleanup()
|
||||||
@ -72,7 +70,7 @@ def cert_rotation():
|
|||||||
"""Perform certificate rotation."""
|
"""Perform certificate rotation."""
|
||||||
interval = CONF.house_keeping.cert_interval
|
interval = CONF.house_keeping.cert_interval
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI("Expiring certificate check interval is set to %d sec"), interval)
|
"Expiring certificate check interval is set to %d sec", interval)
|
||||||
cert_rotate = house_keeping.CertRotation()
|
cert_rotate = house_keeping.CertRotation()
|
||||||
while not cert_rotate_thread_event.is_set():
|
while not cert_rotate_thread_event.is_set():
|
||||||
LOG.debug("Initiating certification rotation ...")
|
LOG.debug("Initiating certification rotation ...")
|
||||||
@ -86,7 +84,7 @@ def main():
|
|||||||
gmr.TextGuruMeditation.setup_autorun(version)
|
gmr.TextGuruMeditation.setup_autorun(version)
|
||||||
|
|
||||||
timestamp = str(datetime.datetime.utcnow())
|
timestamp = str(datetime.datetime.utcnow())
|
||||||
LOG.info(_LI("Starting house keeping at %s"), timestamp)
|
LOG.info("Starting house keeping at %s", timestamp)
|
||||||
|
|
||||||
# Thread to perform spare amphora check
|
# Thread to perform spare amphora check
|
||||||
spare_amp_thread = threading.Thread(target=spare_amphora_check)
|
spare_amp_thread = threading.Thread(target=spare_amphora_check)
|
||||||
@ -108,11 +106,11 @@ def main():
|
|||||||
while True:
|
while True:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
LOG.info(_LI("Attempting to gracefully terminate House-Keeping"))
|
LOG.info("Attempting to gracefully terminate House-Keeping")
|
||||||
spare_amp_thread_event.set()
|
spare_amp_thread_event.set()
|
||||||
db_cleanup_thread_event.set()
|
db_cleanup_thread_event.set()
|
||||||
cert_rotate_thread_event.set()
|
cert_rotate_thread_event.set()
|
||||||
spare_amp_thread.join()
|
spare_amp_thread.join()
|
||||||
db_cleanup_thread.join()
|
db_cleanup_thread.join()
|
||||||
cert_rotate_thread.join()
|
cert_rotate_thread.join()
|
||||||
LOG.info(_LI("House-Keeping process terminated"))
|
LOG.info("House-Keeping process terminated")
|
||||||
|
@ -19,7 +19,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from octavia.common import keystone
|
from octavia.common import keystone
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -64,7 +63,7 @@ class NovaAuth(object):
|
|||||||
version=api_versions.APIVersion(NOVA_VERSION), **kwargs)
|
version=api_versions.APIVersion(NOVA_VERSION), **kwargs)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Error creating Nova client."))
|
LOG.exception("Error creating Nova client.")
|
||||||
return cls.nova_client
|
return cls.nova_client
|
||||||
|
|
||||||
|
|
||||||
@ -103,7 +102,7 @@ class NeutronAuth(object):
|
|||||||
NEUTRON_VERSION, **kwargs)
|
NEUTRON_VERSION, **kwargs)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Error creating Neutron client."))
|
LOG.exception("Error creating Neutron client.")
|
||||||
return cls.neutron_client
|
return cls.neutron_client
|
||||||
|
|
||||||
|
|
||||||
@ -142,5 +141,5 @@ class GlanceAuth(object):
|
|||||||
GLANCE_VERSION, **kwargs)
|
GLANCE_VERSION, **kwargs)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Error creating Glance client."))
|
LOG.exception("Error creating Glance client.")
|
||||||
return cls.glance_client
|
return cls.glance_client
|
||||||
|
@ -25,7 +25,6 @@ import oslo_messaging as messaging
|
|||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.i18n import _LI
|
|
||||||
from octavia import version
|
from octavia import version
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -522,4 +521,4 @@ def setup_logging(conf):
|
|||||||
"""
|
"""
|
||||||
product_name = "octavia"
|
product_name = "octavia"
|
||||||
logging.setup(conf, product_name)
|
logging.setup(conf, product_name)
|
||||||
LOG.info(_LI("Logging enabled!"))
|
LOG.info("Logging enabled!")
|
||||||
|
@ -22,8 +22,6 @@ import six
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from webob import exc
|
from webob import exc
|
||||||
|
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
|
|
||||||
class OctaviaException(Exception):
|
class OctaviaException(Exception):
|
||||||
"""Base Octavia Exception.
|
"""Base Octavia Exception.
|
||||||
@ -167,7 +165,7 @@ class ComputeBuildException(OctaviaException):
|
|||||||
|
|
||||||
|
|
||||||
class ComputeBuildQueueTimeoutException(OctaviaException):
|
class ComputeBuildQueueTimeoutException(OctaviaException):
|
||||||
message = _LE('Failed to get an amphora build slot.')
|
message = _('Failed to get an amphora build slot.')
|
||||||
|
|
||||||
|
|
||||||
class ComputeDeleteException(OctaviaException):
|
class ComputeDeleteException(OctaviaException):
|
||||||
|
@ -19,7 +19,6 @@ from oslo_policy import policy as oslo_policy
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.i18n import _LE
|
|
||||||
from octavia import policies
|
from octavia import policies
|
||||||
|
|
||||||
|
|
||||||
@ -93,7 +92,7 @@ class Policy(oslo_policy.Enforcer):
|
|||||||
action, target, credentials, do_raise=do_raise, exc=exc)
|
action, target, credentials, do_raise=do_raise, exc=exc)
|
||||||
except oslo_policy.PolicyNotRegistered:
|
except oslo_policy.PolicyNotRegistered:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Policy not registered'))
|
LOG.exception('Policy not registered')
|
||||||
except Exception:
|
except Exception:
|
||||||
credentials.pop('auth_token', None)
|
credentials.pop('auth_token', None)
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
|
@ -17,8 +17,6 @@ import logging
|
|||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.i18n import _LW
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -36,9 +34,8 @@ class StatsMixin(object):
|
|||||||
db_ls = self.listener_stats_repo.get_all(
|
db_ls = self.listener_stats_repo.get_all(
|
||||||
session, listener_id=listener_id)
|
session, listener_id=listener_id)
|
||||||
if not db_ls:
|
if not db_ls:
|
||||||
LOG.warning(
|
LOG.warning("Listener Statistics for Listener %s was not found",
|
||||||
_LW("Listener Statistics for Listener %s was not found"),
|
listener_id)
|
||||||
listener_id)
|
|
||||||
|
|
||||||
statistics = data_models.ListenerStatistics(listener_id=listener_id)
|
statistics = data_models.ListenerStatistics(listener_id=listener_id)
|
||||||
|
|
||||||
|
@ -26,8 +26,6 @@ import six
|
|||||||
|
|
||||||
from octavia.common import data_models as data_models
|
from octavia.common import data_models as data_models
|
||||||
import octavia.common.exceptions as exceptions
|
import octavia.common.exceptions as exceptions
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
|
|
||||||
X509_BEG = b'-----BEGIN CERTIFICATE-----'
|
X509_BEG = b'-----BEGIN CERTIFICATE-----'
|
||||||
X509_END = b'-----END CERTIFICATE-----'
|
X509_END = b'-----END CERTIFICATE-----'
|
||||||
@ -81,7 +79,7 @@ def _read_private_key(private_key_pem, passphrase=None):
|
|||||||
return serialization.load_pem_private_key(private_key_pem, passphrase,
|
return serialization.load_pem_private_key(private_key_pem, passphrase,
|
||||||
backends.default_backend())
|
backends.default_backend())
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Passphrase required."))
|
LOG.exception("Passphrase required.")
|
||||||
raise exceptions.NeedsPassphrase
|
raise exceptions.NeedsPassphrase
|
||||||
|
|
||||||
|
|
||||||
@ -162,7 +160,7 @@ def _parse_pkcs7_bundle(pkcs7):
|
|||||||
for cert in _get_certs_from_pkcs7_substrate(substrate):
|
for cert in _get_certs_from_pkcs7_substrate(substrate):
|
||||||
yield cert
|
yield cert
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Unreadable Certificate.'))
|
LOG.exception('Unreadable Certificate.')
|
||||||
raise exceptions.UnreadableCert
|
raise exceptions.UnreadableCert
|
||||||
|
|
||||||
# If no PEM encoding, assume this is DER encoded and try to decode
|
# If no PEM encoding, assume this is DER encoded and try to decode
|
||||||
@ -221,10 +219,10 @@ def _get_certs_from_pkcs7_substrate(substrate):
|
|||||||
asn1Spec=rfc2315.ContentInfo())
|
asn1Spec=rfc2315.ContentInfo())
|
||||||
contentType = contentInfo.getComponentByName('contentType')
|
contentType = contentInfo.getComponentByName('contentType')
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Unreadable Certificate.'))
|
LOG.exception('Unreadable Certificate.')
|
||||||
raise exceptions.UnreadableCert
|
raise exceptions.UnreadableCert
|
||||||
if contentType != rfc2315.signedData:
|
if contentType != rfc2315.signedData:
|
||||||
LOG.exception(_LE('Unreadable Certificate.'))
|
LOG.exception('Unreadable Certificate.')
|
||||||
raise exceptions.UnreadableCert
|
raise exceptions.UnreadableCert
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -232,7 +230,7 @@ def _get_certs_from_pkcs7_substrate(substrate):
|
|||||||
contentInfo.getComponentByName('content'),
|
contentInfo.getComponentByName('content'),
|
||||||
asn1Spec=rfc2315.SignedData())
|
asn1Spec=rfc2315.SignedData())
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Unreadable Certificate.'))
|
LOG.exception('Unreadable Certificate.')
|
||||||
raise exceptions.UnreadableCert
|
raise exceptions.UnreadableCert
|
||||||
|
|
||||||
for cert in content.getComponentByName('certificates'):
|
for cert in content.getComponentByName('certificates'):
|
||||||
@ -269,7 +267,7 @@ def get_host_names(certificate):
|
|||||||
|
|
||||||
return host_names
|
return host_names
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Unreadable Certificate.'))
|
LOG.exception('Unreadable Certificate.')
|
||||||
raise exceptions.UnreadableCert
|
raise exceptions.UnreadableCert
|
||||||
|
|
||||||
|
|
||||||
@ -284,7 +282,7 @@ def get_cert_expiration(certificate_pem):
|
|||||||
backends.default_backend())
|
backends.default_backend())
|
||||||
return cert.not_valid_after
|
return cert.not_valid_after
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Unreadable Certificate.'))
|
LOG.exception('Unreadable Certificate.')
|
||||||
raise exceptions.UnreadableCert
|
raise exceptions.UnreadableCert
|
||||||
|
|
||||||
|
|
||||||
@ -300,7 +298,7 @@ def _get_x509_from_pem_bytes(certificate_pem):
|
|||||||
x509cert = x509.load_pem_x509_certificate(certificate_pem,
|
x509cert = x509.load_pem_x509_certificate(certificate_pem,
|
||||||
backends.default_backend())
|
backends.default_backend())
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Unreadable Certificate.'))
|
LOG.exception('Unreadable Certificate.')
|
||||||
raise exceptions.UnreadableCert
|
raise exceptions.UnreadableCert
|
||||||
return x509cert
|
return x509cert
|
||||||
|
|
||||||
@ -315,7 +313,7 @@ def _get_x509_from_der_bytes(certificate_der):
|
|||||||
x509cert = x509.load_der_x509_certificate(certificate_der,
|
x509cert = x509.load_der_x509_certificate(certificate_der,
|
||||||
backends.default_backend())
|
backends.default_backend())
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Unreadable Certificate.'))
|
LOG.exception('Unreadable Certificate.')
|
||||||
raise exceptions.UnreadableCert
|
raise exceptions.UnreadableCert
|
||||||
return x509cert
|
return x509cert
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ from octavia.common import constants
|
|||||||
from octavia.common import data_models as models
|
from octavia.common import data_models as models
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.compute import compute_base
|
from octavia.compute import compute_base
|
||||||
from octavia.i18n import _LE, _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -51,20 +50,17 @@ def _extract_amp_image_id_by_tag(client, image_tag, image_owner):
|
|||||||
image_id = images[0]['id']
|
image_id = images[0]['id']
|
||||||
num_images = len(images)
|
num_images = len(images)
|
||||||
if num_images > 1:
|
if num_images > 1:
|
||||||
LOG.warning(
|
LOG.warning("A single Glance image should be tagged with %(tag)s tag, "
|
||||||
_LW("A single Glance image should be tagged with %(tag)s tag, "
|
"but at least two were found. Using %(image_id)s.",
|
||||||
"but at least two were found. Using %(image_id)s."),
|
{'tag': image_tag, 'image_id': image_id})
|
||||||
{'tag': image_tag, 'image_id': image_id}
|
|
||||||
)
|
|
||||||
return image_id
|
return image_id
|
||||||
|
|
||||||
|
|
||||||
def _get_image_uuid(client, image_id, image_tag, image_owner):
|
def _get_image_uuid(client, image_id, image_tag, image_owner):
|
||||||
if image_id:
|
if image_id:
|
||||||
if image_tag:
|
if image_tag:
|
||||||
LOG.warning(
|
LOG.warning("Both amp_image_id and amp_image_tag options defined. "
|
||||||
_LW("Both amp_image_id and amp_image_tag options defined. "
|
"Using the amp_image_id.")
|
||||||
"Using the amp_image_id."))
|
|
||||||
return image_id
|
return image_id
|
||||||
|
|
||||||
return _extract_amp_image_id_by_tag(client, image_tag, image_owner)
|
return _extract_amp_image_id_by_tag(client, image_tag, image_owner)
|
||||||
@ -157,7 +153,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
|
|
||||||
return amphora.id
|
return amphora.id
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error building nova virtual machine."))
|
LOG.exception("Error building nova virtual machine.")
|
||||||
raise exceptions.ComputeBuildException()
|
raise exceptions.ComputeBuildException()
|
||||||
|
|
||||||
def delete(self, compute_id):
|
def delete(self, compute_id):
|
||||||
@ -168,10 +164,10 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
try:
|
try:
|
||||||
self.manager.delete(server=compute_id)
|
self.manager.delete(server=compute_id)
|
||||||
except nova_exceptions.NotFound:
|
except nova_exceptions.NotFound:
|
||||||
LOG.warning(_LW("Nova instance with id: %s not found. "
|
LOG.warning("Nova instance with id: %s not found. "
|
||||||
"Assuming already deleted."), compute_id)
|
"Assuming already deleted.", compute_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error deleting nova virtual machine."))
|
LOG.exception("Error deleting nova virtual machine.")
|
||||||
raise exceptions.ComputeDeleteException()
|
raise exceptions.ComputeDeleteException()
|
||||||
|
|
||||||
def status(self, compute_id):
|
def status(self, compute_id):
|
||||||
@ -185,7 +181,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
if amphora and amphora.status == 'ACTIVE':
|
if amphora and amphora.status == 'ACTIVE':
|
||||||
return constants.UP
|
return constants.UP
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error retrieving nova virtual machine status."))
|
LOG.exception("Error retrieving nova virtual machine status.")
|
||||||
raise exceptions.ComputeStatusException()
|
raise exceptions.ComputeStatusException()
|
||||||
return constants.DOWN
|
return constants.DOWN
|
||||||
|
|
||||||
@ -199,7 +195,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
try:
|
try:
|
||||||
amphora = self.manager.get(compute_id)
|
amphora = self.manager.get(compute_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error retrieving nova virtual machine."))
|
LOG.exception("Error retrieving nova virtual machine.")
|
||||||
raise exceptions.ComputeGetException()
|
raise exceptions.ComputeGetException()
|
||||||
return self._translate_amphora(amphora)
|
return self._translate_amphora(amphora)
|
||||||
|
|
||||||
@ -246,7 +242,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
server_group_obj = self.server_groups.create(**kwargs)
|
server_group_obj = self.server_groups.create(**kwargs)
|
||||||
return server_group_obj
|
return server_group_obj
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error create server group instance."))
|
LOG.exception("Error create server group instance.")
|
||||||
raise exceptions.ServerGroupObjectCreateException()
|
raise exceptions.ServerGroupObjectCreateException()
|
||||||
|
|
||||||
def delete_server_group(self, server_group_id):
|
def delete_server_group(self, server_group_id):
|
||||||
@ -259,8 +255,8 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
self.server_groups.delete(server_group_id)
|
self.server_groups.delete(server_group_id)
|
||||||
|
|
||||||
except nova_exceptions.NotFound:
|
except nova_exceptions.NotFound:
|
||||||
LOG.warning(_LW("Server group instance with id: %s not found. "
|
LOG.warning("Server group instance with id: %s not found. "
|
||||||
"Assuming already deleted."), server_group_id)
|
"Assuming already deleted.", server_group_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error delete server group instance."))
|
LOG.exception("Error delete server group instance.")
|
||||||
raise exceptions.ServerGroupObjectDeleteException()
|
raise exceptions.ServerGroupObjectDeleteException()
|
||||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
|||||||
from octavia.controller.worker import controller_worker as cw
|
from octavia.controller.worker import controller_worker as cw
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -50,12 +49,12 @@ class HealthManager(object):
|
|||||||
if amp is None:
|
if amp is None:
|
||||||
break
|
break
|
||||||
failover_count += 1
|
failover_count += 1
|
||||||
LOG.info(_LI("Stale amphora's id is: %s"),
|
LOG.info("Stale amphora's id is: %s",
|
||||||
amp.amphora_id)
|
amp.amphora_id)
|
||||||
executor.submit(self.cw.failover_amphora,
|
executor.submit(self.cw.failover_amphora,
|
||||||
amp.amphora_id)
|
amp.amphora_id)
|
||||||
if failover_count > 0:
|
if failover_count > 0:
|
||||||
LOG.info(_LI("Failed over %s amphora"),
|
LOG.info("Failed over %s amphora",
|
||||||
failover_count)
|
failover_count)
|
||||||
time.sleep(CONF.health_manager.health_check_interval)
|
time.sleep(CONF.health_manager.health_check_interval)
|
||||||
finally:
|
finally:
|
||||||
|
@ -25,8 +25,6 @@ from octavia.controller.healthmanager import update_serializer
|
|||||||
from octavia.controller.queue import event_queue
|
from octavia.controller.queue import event_queue
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.i18n import _LE, _LW
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -107,10 +105,9 @@ class UpdateHealthDb(object):
|
|||||||
last_update=(datetime.
|
last_update=(datetime.
|
||||||
datetime.utcnow()))
|
datetime.utcnow()))
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Amphora %(id)s health message reports %(found)i '
|
LOG.warning('Amphora %(id)s health message reports %(found)i '
|
||||||
'listeners when %(expected)i expected'),
|
'listeners when %(expected)i expected',
|
||||||
{'id': health['id'],
|
{'id': health['id'], 'found': len(listeners),
|
||||||
'found': len(listeners),
|
|
||||||
'expected': expected_listener_count})
|
'expected': expected_listener_count})
|
||||||
|
|
||||||
# We got a heartbeat so lb is healthy until proven otherwise
|
# We got a heartbeat so lb is healthy until proven otherwise
|
||||||
@ -129,8 +126,8 @@ class UpdateHealthDb(object):
|
|||||||
if lb_status == constants.ONLINE:
|
if lb_status == constants.ONLINE:
|
||||||
lb_status = constants.DEGRADED
|
lb_status = constants.DEGRADED
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Listener %(list)s reported status of '
|
LOG.warning(('Listener %(list)s reported status of '
|
||||||
'%(status)s'), {'list': listener_id,
|
'%(status)s'), {'list': listener_id,
|
||||||
'status': listener.get('status')})
|
'status': listener.get('status')})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -140,7 +137,7 @@ class UpdateHealthDb(object):
|
|||||||
listener_id, listener_status
|
listener_id, listener_status
|
||||||
)
|
)
|
||||||
except sqlalchemy.orm.exc.NoResultFound:
|
except sqlalchemy.orm.exc.NoResultFound:
|
||||||
LOG.error(_LE("Listener %s is not in DB"), listener_id)
|
LOG.error("Listener %s is not in DB", listener_id)
|
||||||
|
|
||||||
pools = listener['pools']
|
pools = listener['pools']
|
||||||
for pool_id, pool in pools.items():
|
for pool_id, pool in pools.items():
|
||||||
@ -154,8 +151,8 @@ class UpdateHealthDb(object):
|
|||||||
pool_status = constants.ERROR
|
pool_status = constants.ERROR
|
||||||
lb_status = constants.ERROR
|
lb_status = constants.ERROR
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Pool %(pool)s reported status of '
|
LOG.warning(('Pool %(pool)s reported status of '
|
||||||
'%(status)s'), {'pool': pool_id,
|
'%(status)s'), {'pool': pool_id,
|
||||||
'status': pool.get('status')})
|
'status': pool.get('status')})
|
||||||
|
|
||||||
members = pool['members']
|
members = pool['members']
|
||||||
@ -173,9 +170,9 @@ class UpdateHealthDb(object):
|
|||||||
elif status == constants.NO_CHECK:
|
elif status == constants.NO_CHECK:
|
||||||
member_status = constants.NO_MONITOR
|
member_status = constants.NO_MONITOR
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Member %(mem)s reported status of '
|
LOG.warning('Member %(mem)s reported status of '
|
||||||
'%(status)s'), {'mem': member_id,
|
'%(status)s', {'mem': member_id,
|
||||||
'status': status})
|
'status': status})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if member_status is not None:
|
if member_status is not None:
|
||||||
@ -184,8 +181,8 @@ class UpdateHealthDb(object):
|
|||||||
member_id, member_status
|
member_id, member_status
|
||||||
)
|
)
|
||||||
except sqlalchemy.orm.exc.NoResultFound:
|
except sqlalchemy.orm.exc.NoResultFound:
|
||||||
LOG.error(_LE("Member %s is not able to update "
|
LOG.error("Member %s is not able to update "
|
||||||
"in DB"), member_id)
|
"in DB", member_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if pool_status is not None:
|
if pool_status is not None:
|
||||||
@ -194,7 +191,7 @@ class UpdateHealthDb(object):
|
|||||||
pool_id, pool_status
|
pool_id, pool_status
|
||||||
)
|
)
|
||||||
except sqlalchemy.orm.exc.NoResultFound:
|
except sqlalchemy.orm.exc.NoResultFound:
|
||||||
LOG.error(_LE("Pool %s is not in DB"), pool_id)
|
LOG.error("Pool %s is not in DB", pool_id)
|
||||||
|
|
||||||
# Update the load balancer status last
|
# Update the load balancer status last
|
||||||
# TODO(sbalukoff): This logic will need to be adjusted if we
|
# TODO(sbalukoff): This logic will need to be adjusted if we
|
||||||
@ -208,7 +205,7 @@ class UpdateHealthDb(object):
|
|||||||
constants.LOADBALANCER, lb_id, lb_status
|
constants.LOADBALANCER, lb_id, lb_status
|
||||||
)
|
)
|
||||||
except sqlalchemy.orm.exc.NoResultFound:
|
except sqlalchemy.orm.exc.NoResultFound:
|
||||||
LOG.error(_LE("Load balancer %s is not in DB"), lb_id)
|
LOG.error("Load balancer %s is not in DB", lb_id)
|
||||||
|
|
||||||
|
|
||||||
class UpdateStatsDb(stats.StatsMixin):
|
class UpdateStatsDb(stats.StatsMixin):
|
||||||
|
@ -22,7 +22,6 @@ from octavia.common import constants
|
|||||||
from octavia.controller.worker import controller_worker as cw
|
from octavia.controller.worker import controller_worker as cw
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -47,8 +46,7 @@ class SpareAmphora(object):
|
|||||||
|
|
||||||
# When the current spare amphora is less than required
|
# When the current spare amphora is less than required
|
||||||
if diff_count > 0:
|
if diff_count > 0:
|
||||||
LOG.info(_LI("Initiating creation of %d spare amphora.") %
|
LOG.info("Initiating creation of %d spare amphora." % diff_count)
|
||||||
diff_count)
|
|
||||||
|
|
||||||
# Call Amphora Create Flow diff_count times
|
# Call Amphora Create Flow diff_count times
|
||||||
for i in range(1, diff_count + 1):
|
for i in range(1, diff_count + 1):
|
||||||
@ -56,8 +54,7 @@ class SpareAmphora(object):
|
|||||||
self.cw.create_amphora()
|
self.cw.create_amphora()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
LOG.debug(_LI("Current spare amphora count satisfies the "
|
LOG.debug("Current spare amphora count satisfies the requirement")
|
||||||
"requirement"))
|
|
||||||
|
|
||||||
|
|
||||||
class DatabaseCleanup(object):
|
class DatabaseCleanup(object):
|
||||||
@ -77,9 +74,9 @@ class DatabaseCleanup(object):
|
|||||||
for amp in amphora:
|
for amp in amphora:
|
||||||
if self.amp_health_repo.check_amphora_expired(session, amp.id,
|
if self.amp_health_repo.check_amphora_expired(session, amp.id,
|
||||||
exp_age):
|
exp_age):
|
||||||
LOG.info(_LI('Attempting to delete Amphora id : %s'), amp.id)
|
LOG.info('Attempting to delete Amphora id : %s', amp.id)
|
||||||
self.amp_repo.delete(session, id=amp.id)
|
self.amp_repo.delete(session, id=amp.id)
|
||||||
LOG.info(_LI('Deleted Amphora id : %s') % amp.id)
|
LOG.info('Deleted Amphora id : %s' % amp.id)
|
||||||
|
|
||||||
def cleanup_load_balancers(self):
|
def cleanup_load_balancers(self):
|
||||||
"""Checks the DB for old load balancers and triggers their removal."""
|
"""Checks the DB for old load balancers and triggers their removal."""
|
||||||
@ -93,10 +90,9 @@ class DatabaseCleanup(object):
|
|||||||
for lb in load_balancers:
|
for lb in load_balancers:
|
||||||
if self.lb_repo.check_load_balancer_expired(session, lb.id,
|
if self.lb_repo.check_load_balancer_expired(session, lb.id,
|
||||||
exp_age):
|
exp_age):
|
||||||
LOG.info(_LI('Attempting to delete load balancer id : %s'),
|
LOG.info('Attempting to delete load balancer id : %s', lb.id)
|
||||||
lb.id)
|
|
||||||
self.lb_repo.delete(session, id=lb.id)
|
self.lb_repo.delete(session, id=lb.id)
|
||||||
LOG.info(_LI('Deleted load balancer id : %s') % lb.id)
|
LOG.info('Deleted load balancer id : %s' % lb.id)
|
||||||
|
|
||||||
|
|
||||||
class CertRotation(object):
|
class CertRotation(object):
|
||||||
@ -120,7 +116,7 @@ class CertRotation(object):
|
|||||||
LOG.debug("Cert expired amphora's id is: %s", amp.id)
|
LOG.debug("Cert expired amphora's id is: %s", amp.id)
|
||||||
executor.submit(self.cw.amphora_cert_rotation, amp.id)
|
executor.submit(self.cw.amphora_cert_rotation, amp.id)
|
||||||
if rotation_count > 0:
|
if rotation_count > 0:
|
||||||
LOG.info(_LI("Rotated certificates for %s amphora") %
|
LOG.info("Rotated certificates for %s amphora" %
|
||||||
rotation_count)
|
rotation_count)
|
||||||
finally:
|
finally:
|
||||||
executor.shutdown(wait=True)
|
executor.shutdown(wait=True)
|
||||||
|
@ -18,7 +18,6 @@ import oslo_messaging as messaging
|
|||||||
from oslo_messaging.rpc import dispatcher
|
from oslo_messaging.rpc import dispatcher
|
||||||
|
|
||||||
from octavia.controller.queue import endpoint
|
from octavia.controller.queue import endpoint
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -35,7 +34,7 @@ class ConsumerService(cotyledon.Service):
|
|||||||
self.message_listener = None
|
self.message_listener = None
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
LOG.info(_LI('Starting consumer...'))
|
LOG.info('Starting consumer...')
|
||||||
transport = messaging.get_transport(self.conf)
|
transport = messaging.get_transport(self.conf)
|
||||||
target = messaging.Target(topic=self.topic, server=self.server,
|
target = messaging.Target(topic=self.topic, server=self.server,
|
||||||
fanout=False)
|
fanout=False)
|
||||||
@ -47,15 +46,14 @@ class ConsumerService(cotyledon.Service):
|
|||||||
|
|
||||||
def terminate(self, graceful=False):
|
def terminate(self, graceful=False):
|
||||||
if self.message_listener:
|
if self.message_listener:
|
||||||
LOG.info(_LI('Stopping consumer...'))
|
LOG.info('Stopping consumer...')
|
||||||
self.message_listener.stop()
|
self.message_listener.stop()
|
||||||
if graceful:
|
if graceful:
|
||||||
LOG.info(
|
LOG.info('Consumer successfully stopped. Waiting for final '
|
||||||
_LI('Consumer successfully stopped. Waiting for final '
|
'messages to be processed...')
|
||||||
'messages to be processed...'))
|
|
||||||
self.message_listener.wait()
|
self.message_listener.wait()
|
||||||
if self.endpoints:
|
if self.endpoints:
|
||||||
LOG.info(_LI('Shutting down endpoint worker executors...'))
|
LOG.info('Shutting down endpoint worker executors...')
|
||||||
for e in self.endpoints:
|
for e in self.endpoints:
|
||||||
try:
|
try:
|
||||||
e.worker.executor.shutdown()
|
e.worker.executor.shutdown()
|
||||||
|
@ -18,7 +18,6 @@ import oslo_messaging as messaging
|
|||||||
from stevedore import driver as stevedore_driver
|
from stevedore import driver as stevedore_driver
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.i18n import _LI
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
@ -41,87 +40,87 @@ class Endpoint(object):
|
|||||||
).driver
|
).driver
|
||||||
|
|
||||||
def create_load_balancer(self, context, load_balancer_id):
|
def create_load_balancer(self, context, load_balancer_id):
|
||||||
LOG.info(_LI('Creating load balancer \'%s\'...'), load_balancer_id)
|
LOG.info('Creating load balancer \'%s\'...'), load_balancer_id
|
||||||
self.worker.create_load_balancer(load_balancer_id)
|
self.worker.create_load_balancer(load_balancer_id)
|
||||||
|
|
||||||
def update_load_balancer(self, context, load_balancer_id,
|
def update_load_balancer(self, context, load_balancer_id,
|
||||||
load_balancer_updates):
|
load_balancer_updates):
|
||||||
LOG.info(_LI('Updating load balancer \'%s\'...'), load_balancer_id)
|
LOG.info('Updating load balancer \'%s\'...', load_balancer_id)
|
||||||
self.worker.update_load_balancer(load_balancer_id,
|
self.worker.update_load_balancer(load_balancer_id,
|
||||||
load_balancer_updates)
|
load_balancer_updates)
|
||||||
|
|
||||||
def delete_load_balancer(self, context, load_balancer_id, cascade=False):
|
def delete_load_balancer(self, context, load_balancer_id, cascade=False):
|
||||||
LOG.info(_LI('Deleting load balancer \'%s\'...'), load_balancer_id)
|
LOG.info('Deleting load balancer \'%s\'...', load_balancer_id)
|
||||||
self.worker.delete_load_balancer(load_balancer_id, cascade)
|
self.worker.delete_load_balancer(load_balancer_id, cascade)
|
||||||
|
|
||||||
def create_listener(self, context, listener_id):
|
def create_listener(self, context, listener_id):
|
||||||
LOG.info(_LI('Creating listener \'%s\'...'), listener_id)
|
LOG.info('Creating listener \'%s\'...', listener_id)
|
||||||
self.worker.create_listener(listener_id)
|
self.worker.create_listener(listener_id)
|
||||||
|
|
||||||
def update_listener(self, context, listener_id, listener_updates):
|
def update_listener(self, context, listener_id, listener_updates):
|
||||||
LOG.info(_LI('Updating listener \'%s\'...'), listener_id)
|
LOG.info('Updating listener \'%s\'...', listener_id)
|
||||||
self.worker.update_listener(listener_id, listener_updates)
|
self.worker.update_listener(listener_id, listener_updates)
|
||||||
|
|
||||||
def delete_listener(self, context, listener_id):
|
def delete_listener(self, context, listener_id):
|
||||||
LOG.info(_LI('Deleting listener \'%s\'...'), listener_id)
|
LOG.info('Deleting listener \'%s\'...', listener_id)
|
||||||
self.worker.delete_listener(listener_id)
|
self.worker.delete_listener(listener_id)
|
||||||
|
|
||||||
def create_pool(self, context, pool_id):
|
def create_pool(self, context, pool_id):
|
||||||
LOG.info(_LI('Creating pool \'%s\'...'), pool_id)
|
LOG.info('Creating pool \'%s\'...', pool_id)
|
||||||
self.worker.create_pool(pool_id)
|
self.worker.create_pool(pool_id)
|
||||||
|
|
||||||
def update_pool(self, context, pool_id, pool_updates):
|
def update_pool(self, context, pool_id, pool_updates):
|
||||||
LOG.info(_LI('Updating pool \'%s\'...'), pool_id)
|
LOG.info('Updating pool \'%s\'...', pool_id)
|
||||||
self.worker.update_pool(pool_id, pool_updates)
|
self.worker.update_pool(pool_id, pool_updates)
|
||||||
|
|
||||||
def delete_pool(self, context, pool_id):
|
def delete_pool(self, context, pool_id):
|
||||||
LOG.info(_LI('Deleting pool \'%s\'...'), pool_id)
|
LOG.info('Deleting pool \'%s\'...', pool_id)
|
||||||
self.worker.delete_pool(pool_id)
|
self.worker.delete_pool(pool_id)
|
||||||
|
|
||||||
def create_health_monitor(self, context, pool_id):
|
def create_health_monitor(self, context, pool_id):
|
||||||
LOG.info(_LI('Creating health monitor on pool \'%s\'...'), pool_id)
|
LOG.info('Creating health monitor on pool \'%s\'...', pool_id)
|
||||||
self.worker.create_health_monitor(pool_id)
|
self.worker.create_health_monitor(pool_id)
|
||||||
|
|
||||||
def update_health_monitor(self, context, pool_id, health_monitor_updates):
|
def update_health_monitor(self, context, pool_id, health_monitor_updates):
|
||||||
LOG.info(_LI('Updating health monitor on pool \'%s\'...'), pool_id)
|
LOG.info('Updating health monitor on pool \'%s\'...', pool_id)
|
||||||
self.worker.update_health_monitor(pool_id, health_monitor_updates)
|
self.worker.update_health_monitor(pool_id, health_monitor_updates)
|
||||||
|
|
||||||
def delete_health_monitor(self, context, pool_id):
|
def delete_health_monitor(self, context, pool_id):
|
||||||
LOG.info(_LI('Deleting health monitor on pool \'%s\'...'), pool_id)
|
LOG.info('Deleting health monitor on pool \'%s\'...', pool_id)
|
||||||
self.worker.delete_health_monitor(pool_id)
|
self.worker.delete_health_monitor(pool_id)
|
||||||
|
|
||||||
def create_member(self, context, member_id):
|
def create_member(self, context, member_id):
|
||||||
LOG.info(_LI('Creating member \'%s\'...') % member_id)
|
LOG.info('Creating member \'%s\'...' % member_id)
|
||||||
self.worker.create_member(member_id)
|
self.worker.create_member(member_id)
|
||||||
|
|
||||||
def update_member(self, context, member_id, member_updates):
|
def update_member(self, context, member_id, member_updates):
|
||||||
LOG.info(_LI('Updating member \'%s\'...') % member_id)
|
LOG.info('Updating member \'%s\'...' % member_id)
|
||||||
self.worker.update_member(member_id, member_updates)
|
self.worker.update_member(member_id, member_updates)
|
||||||
|
|
||||||
def delete_member(self, context, member_id):
|
def delete_member(self, context, member_id):
|
||||||
LOG.info(_LI('Deleting member \'%s\'...') % member_id)
|
LOG.info('Deleting member \'%s\'...' % member_id)
|
||||||
self.worker.delete_member(member_id)
|
self.worker.delete_member(member_id)
|
||||||
|
|
||||||
def create_l7policy(self, context, l7policy_id):
|
def create_l7policy(self, context, l7policy_id):
|
||||||
LOG.info(_LI('Creating l7policy \'%s\'...') % l7policy_id)
|
LOG.info('Creating l7policy \'%s\'...' % l7policy_id)
|
||||||
self.worker.create_l7policy(l7policy_id)
|
self.worker.create_l7policy(l7policy_id)
|
||||||
|
|
||||||
def update_l7policy(self, context, l7policy_id, l7policy_updates):
|
def update_l7policy(self, context, l7policy_id, l7policy_updates):
|
||||||
LOG.info(_LI('Updating l7policy \'%s\'...') % l7policy_id)
|
LOG.info('Updating l7policy \'%s\'...' % l7policy_id)
|
||||||
self.worker.update_l7policy(l7policy_id, l7policy_updates)
|
self.worker.update_l7policy(l7policy_id, l7policy_updates)
|
||||||
|
|
||||||
def delete_l7policy(self, context, l7policy_id):
|
def delete_l7policy(self, context, l7policy_id):
|
||||||
LOG.info(_LI('Deleting l7policy \'%s\'...') % l7policy_id)
|
LOG.info('Deleting l7policy \'%s\'...' % l7policy_id)
|
||||||
self.worker.delete_l7policy(l7policy_id)
|
self.worker.delete_l7policy(l7policy_id)
|
||||||
|
|
||||||
def create_l7rule(self, context, l7rule_id):
|
def create_l7rule(self, context, l7rule_id):
|
||||||
LOG.info(_LI('Creating l7rule \'%s\'...') % l7rule_id)
|
LOG.info('Creating l7rule \'%s\'...' % l7rule_id)
|
||||||
self.worker.create_l7rule(l7rule_id)
|
self.worker.create_l7rule(l7rule_id)
|
||||||
|
|
||||||
def update_l7rule(self, context, l7rule_id, l7rule_updates):
|
def update_l7rule(self, context, l7rule_id, l7rule_updates):
|
||||||
LOG.info(_LI('Updating l7rule \'%s\'...') % l7rule_id)
|
LOG.info('Updating l7rule \'%s\'...' % l7rule_id)
|
||||||
self.worker.update_l7rule(l7rule_id, l7rule_updates)
|
self.worker.update_l7rule(l7rule_id, l7rule_updates)
|
||||||
|
|
||||||
def delete_l7rule(self, context, l7rule_id):
|
def delete_l7rule(self, context, l7rule_id):
|
||||||
LOG.info(_LI('Deleting l7rule \'%s\'...') % l7rule_id)
|
LOG.info('Deleting l7rule \'%s\'...' % l7rule_id)
|
||||||
self.worker.delete_l7rule(l7rule_id)
|
self.worker.delete_l7rule(l7rule_id)
|
||||||
|
@ -27,7 +27,6 @@ from octavia.controller.worker.flows import member_flows
|
|||||||
from octavia.controller.worker.flows import pool_flows
|
from octavia.controller.worker.flows import pool_flows
|
||||||
from octavia.db import api as db_apis
|
from octavia.db import api as db_apis
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.i18n import _LE, _LI
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
@ -653,7 +652,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failover exception: %s") % e)
|
LOG.error("Failover exception: %s" % e)
|
||||||
|
|
||||||
def amphora_cert_rotation(self, amphora_id):
|
def amphora_cert_rotation(self, amphora_id):
|
||||||
"""Perform cert rotation for an amphora.
|
"""Perform cert rotation for an amphora.
|
||||||
@ -665,8 +664,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
|
|
||||||
amp = self._amphora_repo.get(db_apis.get_session(),
|
amp = self._amphora_repo.get(db_apis.get_session(),
|
||||||
id=amphora_id)
|
id=amphora_id)
|
||||||
LOG.info(_LI("Start amphora cert rotation, amphora's id is: %s")
|
LOG.info("Start amphora cert rotation, amphora's id is: %s" % amp.id)
|
||||||
% amp.id)
|
|
||||||
|
|
||||||
certrotation_amphora_tf = self._taskflow_load(
|
certrotation_amphora_tf = self._taskflow_load(
|
||||||
self._amphora_flows.cert_rotate_amphora_flow(),
|
self._amphora_flows.cert_rotate_amphora_flow(),
|
||||||
|
@ -30,8 +30,6 @@ from octavia.controller.worker.tasks import database_tasks
|
|||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import model_tasks
|
from octavia.controller.worker.tasks import model_tasks
|
||||||
from octavia.controller.worker.tasks import network_tasks
|
from octavia.controller.worker.tasks import network_tasks
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -63,8 +61,8 @@ class LoadBalancerFlows(object):
|
|||||||
elif topology == constants.TOPOLOGY_SINGLE:
|
elif topology == constants.TOPOLOGY_SINGLE:
|
||||||
lb_create_flow.add(*self._create_single_topology())
|
lb_create_flow.add(*self._create_single_topology())
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Unknown topology: %s. Unable to build load "
|
LOG.error("Unknown topology: %s. Unable to build load balancer.",
|
||||||
"balancer."), topology)
|
topology)
|
||||||
raise exceptions.InvalidTopology(topology=topology)
|
raise exceptions.InvalidTopology(topology=topology)
|
||||||
|
|
||||||
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
|
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
|
||||||
|
@ -19,7 +19,6 @@ import logging
|
|||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.db import api as db_apis
|
from octavia.db import api as db_apis
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.i18n import _LE
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -51,9 +50,9 @@ class TaskUtils(object):
|
|||||||
id=amphora_id,
|
id=amphora_id,
|
||||||
status=constants.ERROR)
|
status=constants.ERROR)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
LOG.error("Failed to update amphora %(amp)s "
|
||||||
"status to ERROR due to: "
|
"status to ERROR due to: "
|
||||||
"%(except)s"), {'amp': amphora_id, 'except': e})
|
"%(except)s", {'amp': amphora_id, 'except': e})
|
||||||
|
|
||||||
def mark_health_mon_prov_status_error(self, health_mon_id):
|
def mark_health_mon_prov_status_error(self, health_mon_id):
|
||||||
"""Sets a health monitor provisioning status to ERROR.
|
"""Sets a health monitor provisioning status to ERROR.
|
||||||
@ -67,10 +66,9 @@ class TaskUtils(object):
|
|||||||
pool_id=health_mon_id,
|
pool_id=health_mon_id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update health monitor %(health)s "
|
LOG.error("Failed to update health monitor %(health)s "
|
||||||
"provisioning status to ERROR due to: "
|
"provisioning status to ERROR due to: "
|
||||||
"%(except)s"), {'health': health_mon_id,
|
"%(except)s", {'health': health_mon_id, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
def mark_l7policy_prov_status_error(self, l7policy_id):
|
def mark_l7policy_prov_status_error(self, l7policy_id):
|
||||||
"""Sets a L7 policy provisioning status to ERROR.
|
"""Sets a L7 policy provisioning status to ERROR.
|
||||||
@ -84,9 +82,9 @@ class TaskUtils(object):
|
|||||||
id=l7policy_id,
|
id=l7policy_id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update l7policy %(l7p)s "
|
LOG.error("Failed to update l7policy %(l7p)s "
|
||||||
"provisioning status to ERROR due to: "
|
"provisioning status to ERROR due to: "
|
||||||
"%(except)s"), {'l7p': l7policy_id, 'except': e})
|
"%(except)s", {'l7p': l7policy_id, 'except': e})
|
||||||
|
|
||||||
def mark_l7rule_prov_status_error(self, l7rule_id):
|
def mark_l7rule_prov_status_error(self, l7rule_id):
|
||||||
"""Sets a L7 rule provisioning status to ERROR.
|
"""Sets a L7 rule provisioning status to ERROR.
|
||||||
@ -100,9 +98,9 @@ class TaskUtils(object):
|
|||||||
id=l7rule_id,
|
id=l7rule_id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update l7rule %(l7r)s "
|
LOG.error("Failed to update l7rule %(l7r)s "
|
||||||
"provisioning status to ERROR due to: "
|
"provisioning status to ERROR due to: "
|
||||||
"%(except)s"), {'l7r': l7rule_id, 'except': e})
|
"%(except)s", {'l7r': l7rule_id, 'except': e})
|
||||||
|
|
||||||
def mark_listener_prov_status_error(self, listener_id):
|
def mark_listener_prov_status_error(self, listener_id):
|
||||||
"""Sets a listener provisioning status to ERROR.
|
"""Sets a listener provisioning status to ERROR.
|
||||||
@ -116,9 +114,9 @@ class TaskUtils(object):
|
|||||||
id=listener_id,
|
id=listener_id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update listener %(list)s "
|
LOG.error("Failed to update listener %(list)s "
|
||||||
"provisioning status to ERROR due to: "
|
"provisioning status to ERROR due to: "
|
||||||
"%(except)s"), {'list': listener_id, 'except': e})
|
"%(except)s", {'list': listener_id, 'except': e})
|
||||||
|
|
||||||
def mark_loadbalancer_prov_status_error(self, loadbalancer_id):
|
def mark_loadbalancer_prov_status_error(self, loadbalancer_id):
|
||||||
"""Sets a load balancer provisioning status to ERROR.
|
"""Sets a load balancer provisioning status to ERROR.
|
||||||
@ -133,9 +131,9 @@ class TaskUtils(object):
|
|||||||
id=loadbalancer_id,
|
id=loadbalancer_id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update load balancer %(lb)s "
|
LOG.error("Failed to update load balancer %(lb)s "
|
||||||
"provisioning status to ERROR due to: "
|
"provisioning status to ERROR due to: "
|
||||||
"%(except)s"), {'lb': loadbalancer_id, 'except': e})
|
"%(except)s", {'lb': loadbalancer_id, 'except': e})
|
||||||
|
|
||||||
def mark_listener_prov_status_active(self, listener_id):
|
def mark_listener_prov_status_active(self, listener_id):
|
||||||
"""Sets a listener provisioning status to ACTIVE.
|
"""Sets a listener provisioning status to ACTIVE.
|
||||||
@ -150,9 +148,9 @@ class TaskUtils(object):
|
|||||||
id=listener_id,
|
id=listener_id,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update listener %(list)s "
|
LOG.error("Failed to update listener %(list)s "
|
||||||
"provisioning status to ACTIVE due to: "
|
"provisioning status to ACTIVE due to: "
|
||||||
"%(except)s"), {'list': listener_id, 'except': e})
|
"%(except)s", {'list': listener_id, 'except': e})
|
||||||
|
|
||||||
def mark_pool_prov_status_active(self, pool_id):
|
def mark_pool_prov_status_active(self, pool_id):
|
||||||
"""Sets a pool provisioning status to ACTIVE.
|
"""Sets a pool provisioning status to ACTIVE.
|
||||||
@ -166,9 +164,9 @@ class TaskUtils(object):
|
|||||||
id=pool_id,
|
id=pool_id,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update pool %(pool)s "
|
LOG.error("Failed to update pool %(pool)s provisioning status "
|
||||||
"provisioning status to ACTIVE due to: "
|
"to ACTIVE due to: %(except)s", {'pool': pool_id,
|
||||||
"%(except)s"), {'pool': pool_id, 'except': e})
|
'except': e})
|
||||||
|
|
||||||
def mark_loadbalancer_prov_status_active(self, loadbalancer_id):
|
def mark_loadbalancer_prov_status_active(self, loadbalancer_id):
|
||||||
"""Sets a load balancer provisioning status to ACTIVE.
|
"""Sets a load balancer provisioning status to ACTIVE.
|
||||||
@ -183,9 +181,9 @@ class TaskUtils(object):
|
|||||||
id=loadbalancer_id,
|
id=loadbalancer_id,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update load balancer %(lb)s "
|
LOG.error("Failed to update load balancer %(lb)s "
|
||||||
"provisioning status to ACTIVE due to: "
|
"provisioning status to ACTIVE due to: "
|
||||||
"%(except)s"), {'lb': loadbalancer_id, 'except': e})
|
"%(except)s", {'lb': loadbalancer_id, 'except': e})
|
||||||
|
|
||||||
def mark_member_prov_status_error(self, member_id):
|
def mark_member_prov_status_error(self, member_id):
|
||||||
"""Sets a member provisioning status to ERROR.
|
"""Sets a member provisioning status to ERROR.
|
||||||
@ -199,9 +197,9 @@ class TaskUtils(object):
|
|||||||
id=member_id,
|
id=member_id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update member %(member)s "
|
LOG.error("Failed to update member %(member)s "
|
||||||
"provisioning status to ERROR due to: "
|
"provisioning status to ERROR due to: "
|
||||||
"%(except)s"), {'member': member_id, 'except': e})
|
"%(except)s", {'member': member_id, 'except': e})
|
||||||
|
|
||||||
def mark_pool_prov_status_error(self, pool_id):
|
def mark_pool_prov_status_error(self, pool_id):
|
||||||
"""Sets a pool provisioning status to ERROR.
|
"""Sets a pool provisioning status to ERROR.
|
||||||
@ -215,6 +213,6 @@ class TaskUtils(object):
|
|||||||
id=pool_id,
|
id=pool_id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update pool %(pool)s "
|
LOG.error("Failed to update pool %(pool)s "
|
||||||
"provisioning status to ERROR due to: "
|
"provisioning status to ERROR due to: "
|
||||||
"%(except)s"), {'pool': pool_id, 'except': e})
|
"%(except)s", {'pool': pool_id, 'except': e})
|
||||||
|
@ -25,7 +25,6 @@ from octavia.common import constants
|
|||||||
from octavia.controller.worker import task_utils as task_utilities
|
from octavia.controller.worker import task_utils as task_utilities
|
||||||
from octavia.db import api as db_apis
|
from octavia.db import api as db_apis
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.i18n import _LE, _LW
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -59,7 +58,7 @@ class ListenersUpdate(BaseAmphoraTask):
|
|||||||
def revert(self, loadbalancer, *args, **kwargs):
|
def revert(self, loadbalancer, *args, **kwargs):
|
||||||
"""Handle failed listeners updates."""
|
"""Handle failed listeners updates."""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting listeners updates."))
|
LOG.warning("Reverting listeners updates.")
|
||||||
|
|
||||||
for listener in loadbalancer.listeners:
|
for listener in loadbalancer.listeners:
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
@ -78,7 +77,7 @@ class ListenerStop(BaseAmphoraTask):
|
|||||||
def revert(self, listener, *args, **kwargs):
|
def revert(self, listener, *args, **kwargs):
|
||||||
"""Handle a failed listener stop."""
|
"""Handle a failed listener stop."""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting listener stop."))
|
LOG.warning("Reverting listener stop.")
|
||||||
|
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
@ -96,7 +95,7 @@ class ListenerStart(BaseAmphoraTask):
|
|||||||
def revert(self, listener, *args, **kwargs):
|
def revert(self, listener, *args, **kwargs):
|
||||||
"""Handle a failed listener start."""
|
"""Handle a failed listener start."""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting listener start."))
|
LOG.warning("Reverting listener start.")
|
||||||
|
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
@ -115,7 +114,7 @@ class ListenersStart(BaseAmphoraTask):
|
|||||||
def revert(self, listeners, *args, **kwargs):
|
def revert(self, listeners, *args, **kwargs):
|
||||||
"""Handle failed listeners starts."""
|
"""Handle failed listeners starts."""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting listeners starts."))
|
LOG.warning("Reverting listeners starts.")
|
||||||
for listener in listeners:
|
for listener in listeners:
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
@ -133,7 +132,7 @@ class ListenerDelete(BaseAmphoraTask):
|
|||||||
def revert(self, listener, *args, **kwargs):
|
def revert(self, listener, *args, **kwargs):
|
||||||
"""Handle a failed listener delete."""
|
"""Handle a failed listener delete."""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting listener delete."))
|
LOG.warning("Reverting listener delete.")
|
||||||
|
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
@ -166,7 +165,7 @@ class AmphoraFinalize(BaseAmphoraTask):
|
|||||||
"""Handle a failed amphora finalize."""
|
"""Handle a failed amphora finalize."""
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Reverting amphora finalize."))
|
LOG.warning("Reverting amphora finalize.")
|
||||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
|
|
||||||
@ -185,7 +184,7 @@ class AmphoraPostNetworkPlug(BaseAmphoraTask):
|
|||||||
"""Handle a failed post network plug."""
|
"""Handle a failed post network plug."""
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Reverting post network plug."))
|
LOG.warning("Reverting post network plug.")
|
||||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
|
|
||||||
@ -203,7 +202,7 @@ class AmphoraePostNetworkPlug(BaseAmphoraTask):
|
|||||||
"""Handle a failed post network plug."""
|
"""Handle a failed post network plug."""
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Reverting post network plug."))
|
LOG.warning("Reverting post network plug.")
|
||||||
for amphora in six.moves.filter(
|
for amphora in six.moves.filter(
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
loadbalancer.amphorae):
|
loadbalancer.amphorae):
|
||||||
@ -224,7 +223,7 @@ class AmphoraPostVIPPlug(BaseAmphoraTask):
|
|||||||
"""Handle a failed amphora vip plug notification."""
|
"""Handle a failed amphora vip plug notification."""
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Reverting post vip plug."))
|
LOG.warning("Reverting post vip plug.")
|
||||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
|
|
||||||
@ -244,7 +243,7 @@ class AmphoraePostVIPPlug(BaseAmphoraTask):
|
|||||||
"""Handle a failed amphora vip plug notification."""
|
"""Handle a failed amphora vip plug notification."""
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Reverting amphorae post vip plug."))
|
LOG.warning("Reverting amphorae post vip plug.")
|
||||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
@ -266,12 +265,12 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
|||||||
for amp in six.moves.filter(
|
for amp in six.moves.filter(
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
loadbalancer.amphorae):
|
loadbalancer.amphorae):
|
||||||
# Currently this is supported only with REST Driver
|
# Currently this is supported only with REST Driver
|
||||||
interface = self.amphora_driver.get_vrrp_interface(amp)
|
interface = self.amphora_driver.get_vrrp_interface(amp)
|
||||||
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
||||||
vrrp_interface=interface)
|
vrrp_interface=interface)
|
||||||
amps.append(self.amphora_repo.get(db_apis.get_session(),
|
amps.append(self.amphora_repo.get(db_apis.get_session(),
|
||||||
id=amp.id))
|
id=amp.id))
|
||||||
loadbalancer.amphorae = amps
|
loadbalancer.amphorae = amps
|
||||||
return loadbalancer
|
return loadbalancer
|
||||||
|
|
||||||
@ -279,7 +278,7 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
|||||||
"""Handle a failed amphora vip plug notification."""
|
"""Handle a failed amphora vip plug notification."""
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Reverting Get Amphora VRRP Interface."))
|
LOG.warning("Reverting Get Amphora VRRP Interface.")
|
||||||
for amp in six.moves.filter(
|
for amp in six.moves.filter(
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
loadbalancer.amphorae):
|
loadbalancer.amphorae):
|
||||||
@ -288,10 +287,9 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
|||||||
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
||||||
vrrp_interface=None)
|
vrrp_interface=None)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
LOG.error("Failed to update amphora %(amp)s "
|
||||||
"VRRP interface to None due to: "
|
"VRRP interface to None due to: %(except)s",
|
||||||
"%(except)s"), {'amp': amp.id,
|
{'amp': amp.id, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class AmphoraVRRPUpdate(BaseAmphoraTask):
|
class AmphoraVRRPUpdate(BaseAmphoraTask):
|
||||||
|
@ -27,7 +27,6 @@ from octavia.common import constants
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.common.jinja import user_data_jinja_cfg
|
from octavia.common.jinja import user_data_jinja_cfg
|
||||||
from octavia.controller.worker import amphora_rate_limit
|
from octavia.controller.worker import amphora_rate_limit
|
||||||
from octavia.i18n import _LE, _LW
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -101,7 +100,7 @@ class ComputeCreate(BaseComputeTask):
|
|||||||
return compute_id
|
return compute_id
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Compute create for amphora id: %s failed"),
|
LOG.exception("Compute create for amphora id: %s failed",
|
||||||
amphora_id)
|
amphora_id)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -113,13 +112,13 @@ class ComputeCreate(BaseComputeTask):
|
|||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
compute_id = result
|
compute_id = result
|
||||||
LOG.warning(_LW("Reverting compute create for amphora with id"
|
LOG.warning("Reverting compute create for amphora with id"
|
||||||
"%(amp)s and compute id: %(comp)s"),
|
"%(amp)s and compute id: %(comp)s",
|
||||||
{'amp': amphora_id, 'comp': compute_id})
|
{'amp': amphora_id, 'comp': compute_id})
|
||||||
try:
|
try:
|
||||||
self.compute.delete(compute_id)
|
self.compute.delete(compute_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Reverting compute create failed"))
|
LOG.exception("Reverting compute create failed")
|
||||||
|
|
||||||
|
|
||||||
class CertComputeCreate(ComputeCreate):
|
class CertComputeCreate(ComputeCreate):
|
||||||
@ -157,7 +156,7 @@ class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):
|
|||||||
try:
|
try:
|
||||||
self.compute.delete(amp.compute_id)
|
self.compute.delete(amp.compute_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Compute delete for amphora id: %s failed"),
|
LOG.exception("Compute delete for amphora id: %s failed",
|
||||||
amp.id)
|
amp.id)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -169,7 +168,7 @@ class ComputeDelete(BaseComputeTask):
|
|||||||
try:
|
try:
|
||||||
self.compute.delete(amphora.compute_id)
|
self.compute.delete(amphora.compute_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Compute delete for amphora id: %s failed"),
|
LOG.exception("Compute delete for amphora id: %s failed",
|
||||||
amphora.id)
|
amphora.id)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -219,14 +218,14 @@ class NovaServerGroupCreate(BaseComputeTask):
|
|||||||
:param result: here it refers to server group id
|
:param result: here it refers to server group id
|
||||||
"""
|
"""
|
||||||
server_group_id = result
|
server_group_id = result
|
||||||
LOG.warning(_LW("Reverting server group create with id:%s"),
|
LOG.warning("Reverting server group create with id:%s",
|
||||||
server_group_id)
|
server_group_id)
|
||||||
try:
|
try:
|
||||||
self.compute.delete_server_group(server_group_id)
|
self.compute.delete_server_group(server_group_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to delete server group. Resources may "
|
LOG.error("Failed to delete server group. Resources may "
|
||||||
"still be in use for server group: %(sg)s due to "
|
"still be in use for server group: %(sg)s due to "
|
||||||
"error: %(except)s"),
|
"error: %(except)s",
|
||||||
{'sg': server_group_id, 'except': e})
|
{'sg': server_group_id, 'except': e})
|
||||||
|
|
||||||
|
|
||||||
|
@ -31,7 +31,6 @@ import octavia.common.tls_utils.cert_parser as cert_parser
|
|||||||
from octavia.controller.worker import task_utils as task_utilities
|
from octavia.controller.worker import task_utils as task_utilities
|
||||||
from octavia.db import api as db_apis
|
from octavia.db import api as db_apis
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.i18n import _LE, _LI, _LW
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -98,7 +97,7 @@ class CreateAmphoraInDB(BaseDatabaseTask):
|
|||||||
status=constants.PENDING_CREATE,
|
status=constants.PENDING_CREATE,
|
||||||
cert_busy=False)
|
cert_busy=False)
|
||||||
|
|
||||||
LOG.info(_LI("Created Amphora in DB with id %s"), amphora.id)
|
LOG.info("Created Amphora in DB with id %s", amphora.id)
|
||||||
return amphora.id
|
return amphora.id
|
||||||
|
|
||||||
def revert(self, result, *args, **kwargs):
|
def revert(self, result, *args, **kwargs):
|
||||||
@ -120,17 +119,15 @@ class CreateAmphoraInDB(BaseDatabaseTask):
|
|||||||
# executed after this failed so we will need to do something and
|
# executed after this failed so we will need to do something and
|
||||||
# result is the amphora's id
|
# result is the amphora's id
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting create amphora in DB for amp id %s "),
|
LOG.warning("Reverting create amphora in DB for amp id %s ", result)
|
||||||
result)
|
|
||||||
|
|
||||||
# Delete the amphora for now. May want to just update status later
|
# Delete the amphora for now. May want to just update status later
|
||||||
try:
|
try:
|
||||||
self.amphora_repo.delete(db_apis.get_session(), id=result)
|
self.amphora_repo.delete(db_apis.get_session(), id=result)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to delete amphora %(amp)s "
|
LOG.error("Failed to delete amphora %(amp)s "
|
||||||
"in the database due to: "
|
"in the database due to: "
|
||||||
"%(except)s"), {'amp': result,
|
"%(except)s", {'amp': result, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask):
|
class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask):
|
||||||
@ -176,8 +173,8 @@ class DeleteHealthMonitorInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark health monitor delete in DB "
|
LOG.warning("Reverting mark health monitor delete in DB "
|
||||||
"for health monitor on pool with id %s"), pool_id)
|
"for health monitor on pool with id %s", pool_id)
|
||||||
# TODO(johnsom) fix this
|
# TODO(johnsom) fix this
|
||||||
# self.health_mon_repo.update(db_apis.get_session(), health_mon.id,
|
# self.health_mon_repo.update(db_apis.get_session(), health_mon.id,
|
||||||
# provisioning_status=constants.ERROR)
|
# provisioning_status=constants.ERROR)
|
||||||
@ -230,8 +227,7 @@ class DeleteMemberInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting delete in DB "
|
LOG.warning("Reverting delete in DB for member id %s", member.id)
|
||||||
"for member id %s"), member.id)
|
|
||||||
# TODO(johnsom) fix this
|
# TODO(johnsom) fix this
|
||||||
# self.member_repo.update(db_apis.get_session(), member.id,
|
# self.member_repo.update(db_apis.get_session(), member.id,
|
||||||
# operating_status=constants.ERROR)
|
# operating_status=constants.ERROR)
|
||||||
@ -256,8 +252,8 @@ class DeleteListenerInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark listener delete in DB "
|
LOG.warning("Reverting mark listener delete in DB for listener id %s",
|
||||||
"for listener id %s"), listener.id)
|
listener.id)
|
||||||
|
|
||||||
|
|
||||||
class DeletePoolInDB(BaseDatabaseTask):
|
class DeletePoolInDB(BaseDatabaseTask):
|
||||||
@ -283,8 +279,7 @@ class DeletePoolInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting delete in DB "
|
LOG.warning("Reverting delete in DB for pool id %s", pool.id)
|
||||||
"for pool id %s"), pool.id)
|
|
||||||
# TODO(johnsom) Fix this
|
# TODO(johnsom) Fix this
|
||||||
# self.pool_repo.update(db_apis.get_session(), pool.id,
|
# self.pool_repo.update(db_apis.get_session(), pool.id,
|
||||||
# operating_status=constants.ERROR)
|
# operating_status=constants.ERROR)
|
||||||
@ -313,8 +308,7 @@ class DeleteL7PolicyInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting delete in DB "
|
LOG.warning("Reverting delete in DB for l7policy id %s", l7policy.id)
|
||||||
"for l7policy id %s"), l7policy.id)
|
|
||||||
# TODO(sbalukoff) Fix this
|
# TODO(sbalukoff) Fix this
|
||||||
# self.listener_repo.update(db_apis.get_session(), l7policy.listener.id,
|
# self.listener_repo.update(db_apis.get_session(), l7policy.listener.id,
|
||||||
# operating_status=constants.ERROR)
|
# operating_status=constants.ERROR)
|
||||||
@ -343,8 +337,7 @@ class DeleteL7RuleInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting delete in DB "
|
LOG.warning("Reverting delete in DB for l7rule id %s", l7rule.id)
|
||||||
"for l7rule id %s"), l7rule.id)
|
|
||||||
# TODO(sbalukoff) Fix this
|
# TODO(sbalukoff) Fix this
|
||||||
# self.listener_repo.update(db_apis.get_session(),
|
# self.listener_repo.update(db_apis.get_session(),
|
||||||
# l7rule.l7policy.listener.id,
|
# l7rule.l7policy.listener.id,
|
||||||
@ -462,10 +455,9 @@ class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
|
|||||||
self.repos.amphora.update(db_apis.get_session(), amphora_id,
|
self.repos.amphora.update(db_apis.get_session(), amphora_id,
|
||||||
loadbalancer_id=None)
|
loadbalancer_id=None)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
LOG.error("Failed to update amphora %(amp)s "
|
||||||
"load balancer id to None due to: "
|
"load balancer id to None due to: "
|
||||||
"%(except)s"), {'amp': amphora_id,
|
"%(except)s", {'amp': amphora_id, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class MapLoadbalancerToAmphora(BaseDatabaseTask):
|
class MapLoadbalancerToAmphora(BaseDatabaseTask):
|
||||||
@ -496,8 +488,8 @@ class MapLoadbalancerToAmphora(BaseDatabaseTask):
|
|||||||
return amp.id
|
return amp.id
|
||||||
|
|
||||||
def revert(self, result, loadbalancer_id, *args, **kwargs):
|
def revert(self, result, loadbalancer_id, *args, **kwargs):
|
||||||
LOG.warning(_LW("Reverting Amphora allocation for the load "
|
LOG.warning("Reverting Amphora allocation for the load "
|
||||||
"balancer %s in the database."), loadbalancer_id)
|
"balancer %s in the database.", loadbalancer_id)
|
||||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id)
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id)
|
||||||
|
|
||||||
|
|
||||||
@ -530,18 +522,16 @@ class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask):
|
|||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting amphora role in DB for amp "
|
LOG.warning("Reverting amphora role in DB for amp id %(amp)s",
|
||||||
"id %(amp)s"),
|
|
||||||
{'amp': amphora.id})
|
{'amp': amphora.id})
|
||||||
try:
|
try:
|
||||||
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
role=None,
|
role=None,
|
||||||
vrrp_priority=None)
|
vrrp_priority=None)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
LOG.error("Failed to update amphora %(amp)s "
|
||||||
"role and vrrp_priority to None due to: "
|
"role and vrrp_priority to None due to: "
|
||||||
"%(except)s"), {'amp': amphora.id,
|
"%(except)s", {'amp': amphora.id, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB):
|
class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB):
|
||||||
@ -623,10 +613,10 @@ class MarkAmphoraAllocatedInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.info(_LI("Mark ALLOCATED in DB for amphora: %(amp)s with "
|
LOG.info(("Mark ALLOCATED in DB for amphora: %(amp)s with "
|
||||||
"compute id %(comp)s for load balancer: %(lb)s"),
|
"compute id %(comp)s for load balancer: %(lb)s"),
|
||||||
{"amp": amphora.id, "comp": amphora.compute_id,
|
{"amp": amphora.id, "comp": amphora.compute_id,
|
||||||
"lb": loadbalancer_id})
|
"lb": loadbalancer_id})
|
||||||
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
status=constants.AMPHORA_ALLOCATED,
|
status=constants.AMPHORA_ALLOCATED,
|
||||||
compute_id=amphora.compute_id,
|
compute_id=amphora.compute_id,
|
||||||
@ -646,8 +636,8 @@ class MarkAmphoraAllocatedInDB(BaseDatabaseTask):
|
|||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark amphora ready in DB for amp "
|
LOG.warning("Reverting mark amphora ready in DB for amp "
|
||||||
"id %(amp)s and compute id %(comp)s"),
|
"id %(amp)s and compute id %(comp)s",
|
||||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
@ -681,18 +671,17 @@ class MarkAmphoraBootingInDB(BaseDatabaseTask):
|
|||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark amphora booting in DB for amp "
|
LOG.warning("Reverting mark amphora booting in DB for amp "
|
||||||
"id %(amp)s and compute id %(comp)s"),
|
"id %(amp)s and compute id %(comp)s",
|
||||||
{'amp': amphora_id, 'comp': compute_id})
|
{'amp': amphora_id, 'comp': compute_id})
|
||||||
try:
|
try:
|
||||||
self.amphora_repo.update(db_apis.get_session(), amphora_id,
|
self.amphora_repo.update(db_apis.get_session(), amphora_id,
|
||||||
status=constants.ERROR,
|
status=constants.ERROR,
|
||||||
compute_id=compute_id)
|
compute_id=compute_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
LOG.error("Failed to update amphora %(amp)s "
|
||||||
"status to ERROR due to: "
|
"status to ERROR due to: "
|
||||||
"%(except)s"), {'amp': amphora_id,
|
"%(except)s", {'amp': amphora_id, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class MarkAmphoraDeletedInDB(BaseDatabaseTask):
|
class MarkAmphoraDeletedInDB(BaseDatabaseTask):
|
||||||
@ -721,8 +710,8 @@ class MarkAmphoraDeletedInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark amphora deleted in DB "
|
LOG.warning("Reverting mark amphora deleted in DB "
|
||||||
"for amp id %(amp)s and compute id %(comp)s"),
|
"for amp id %(amp)s and compute id %(comp)s",
|
||||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
@ -753,8 +742,8 @@ class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark amphora pending delete in DB "
|
LOG.warning("Reverting mark amphora pending delete in DB "
|
||||||
"for amp id %(amp)s and compute id %(comp)s"),
|
"for amp id %(amp)s and compute id %(comp)s",
|
||||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
@ -785,8 +774,8 @@ class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark amphora pending update in DB "
|
LOG.warning("Reverting mark amphora pending update in DB "
|
||||||
"for amp id %(amp)s and compute id %(comp)s"),
|
"for amp id %(amp)s and compute id %(comp)s",
|
||||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
@ -805,8 +794,8 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.info(_LI("Mark READY in DB for amphora: %(amp)s with compute "
|
LOG.info("Mark READY in DB for amphora: %(amp)s with compute "
|
||||||
"id %(comp)s"),
|
"id %(comp)s",
|
||||||
{"amp": amphora.id, "comp": amphora.compute_id})
|
{"amp": amphora.id, "comp": amphora.compute_id})
|
||||||
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
status=constants.AMPHORA_READY,
|
status=constants.AMPHORA_READY,
|
||||||
@ -820,8 +809,8 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark amphora ready in DB for amp "
|
LOG.warning("Reverting mark amphora ready in DB for amp "
|
||||||
"id %(amp)s and compute id %(comp)s"),
|
"id %(amp)s and compute id %(comp)s",
|
||||||
{'amp': amphora.id, 'comp': amphora.compute_id})
|
{'amp': amphora.id, 'comp': amphora.compute_id})
|
||||||
try:
|
try:
|
||||||
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
@ -829,10 +818,9 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
|
|||||||
compute_id=amphora.compute_id,
|
compute_id=amphora.compute_id,
|
||||||
lb_network_ip=amphora.lb_network_ip)
|
lb_network_ip=amphora.lb_network_ip)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update amphora %(amp)s "
|
LOG.error("Failed to update amphora %(amp)s "
|
||||||
"status to ERROR due to: "
|
"status to ERROR due to: "
|
||||||
"%(except)s"), {'amp': amphora.id,
|
"%(except)s", {'amp': amphora.id, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class UpdateAmphoraComputeId(BaseDatabaseTask):
|
class UpdateAmphoraComputeId(BaseDatabaseTask):
|
||||||
@ -927,7 +915,7 @@ class MarkLBActiveInDB(BaseDatabaseTask):
|
|||||||
listener.id,
|
listener.id,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
|
|
||||||
LOG.info(_LI("Mark ACTIVE in DB for load balancer id: %s"),
|
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
|
||||||
loadbalancer.id)
|
loadbalancer.id)
|
||||||
self.loadbalancer_repo.update(db_apis.get_session(),
|
self.loadbalancer_repo.update(db_apis.get_session(),
|
||||||
loadbalancer.id,
|
loadbalancer.id,
|
||||||
@ -952,11 +940,11 @@ class MarkLBActiveInDB(BaseDatabaseTask):
|
|||||||
db_apis.get_session(), listener.id,
|
db_apis.get_session(), listener.id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW("Error updating listener %s provisioning "
|
LOG.warning("Error updating listener %s provisioning "
|
||||||
"status"), listener.id)
|
"status", listener.id)
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark load balancer deleted in DB "
|
LOG.warning("Reverting mark load balancer deleted in DB "
|
||||||
"for load balancer id %s"), loadbalancer.id)
|
"for load balancer id %s", loadbalancer.id)
|
||||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
@ -986,18 +974,17 @@ class UpdateLBServerGroupInDB(BaseDatabaseTask):
|
|||||||
associated with the load balancer
|
associated with the load balancer
|
||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
LOG.warning(_LW('Reverting Server Group updated with id: %(s1)s for '
|
LOG.warning('Reverting Server Group updated with id: %(s1)s for '
|
||||||
'load balancer id: %(s2)s '),
|
'load balancer id: %(s2)s ',
|
||||||
{'s1': server_group_id, 's2': loadbalancer_id})
|
{'s1': server_group_id, 's2': loadbalancer_id})
|
||||||
try:
|
try:
|
||||||
self.loadbalancer_repo.update(db_apis.get_session(),
|
self.loadbalancer_repo.update(db_apis.get_session(),
|
||||||
id=loadbalancer_id,
|
id=loadbalancer_id,
|
||||||
server_group_id=None)
|
server_group_id=None)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update load balancer %(lb)s "
|
LOG.error("Failed to update load balancer %(lb)s "
|
||||||
"server_group_id to None due to: "
|
"server_group_id to None due to: "
|
||||||
"%(except)s"), {'lb': loadbalancer_id,
|
"%(except)s", {'lb': loadbalancer_id, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class MarkLBDeletedInDB(BaseDatabaseTask):
|
class MarkLBDeletedInDB(BaseDatabaseTask):
|
||||||
@ -1026,8 +1013,8 @@ class MarkLBDeletedInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark load balancer deleted in DB "
|
LOG.warning("Reverting mark load balancer deleted in DB "
|
||||||
"for load balancer id %s"), loadbalancer.id)
|
"for load balancer id %s", loadbalancer.id)
|
||||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1058,8 +1045,8 @@ class MarkLBPendingDeleteInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark load balancer pending delete in DB "
|
LOG.warning("Reverting mark load balancer pending delete in DB "
|
||||||
"for load balancer id %s"), loadbalancer.id)
|
"for load balancer id %s", loadbalancer.id)
|
||||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1095,10 +1082,8 @@ class MarkLBAndListenersActiveInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark load balancer "
|
LOG.warning("Reverting mark load balancer and listeners active in DB "
|
||||||
"and listeners active in DB "
|
"for load balancer id %(LB)s and listener ids: %(list)s",
|
||||||
"for load balancer id %(LB)s and "
|
|
||||||
"listener ids: %(list)s"),
|
|
||||||
{'LB': loadbalancer.id,
|
{'LB': loadbalancer.id,
|
||||||
'list': ', '.join([l.id for l in listeners])})
|
'list': ', '.join([l.id for l in listeners])})
|
||||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
@ -1130,8 +1115,8 @@ class MarkListenerActiveInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark listener active in DB "
|
LOG.warning("Reverting mark listener active in DB "
|
||||||
"for listener id %s"), listener.id)
|
"for listener id %s", listener.id)
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1159,8 +1144,8 @@ class MarkListenerDeletedInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark listener deleted in DB "
|
LOG.warning("Reverting mark listener deleted in DB "
|
||||||
"for listener id %s"), listener.id)
|
"for listener id %s", listener.id)
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1189,8 +1174,8 @@ class MarkListenerPendingDeleteInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark listener pending delete in DB "
|
LOG.warning("Reverting mark listener pending delete in DB "
|
||||||
"for listener id %s"), listener.id)
|
"for listener id %s", listener.id)
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1219,8 +1204,8 @@ class UpdateLoadbalancerInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting update loadbalancer in DB "
|
LOG.warning("Reverting update loadbalancer in DB "
|
||||||
"for loadbalancer id %s"), loadbalancer.id)
|
"for loadbalancer id %s", loadbalancer.id)
|
||||||
|
|
||||||
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
|
|
||||||
@ -1250,18 +1235,17 @@ class UpdateHealthMonInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting update health monitor in DB "
|
LOG.warning("Reverting update health monitor in DB "
|
||||||
"for health monitor id %s"), health_mon.pool_id)
|
"for health monitor id %s", health_mon.pool_id)
|
||||||
# TODO(johnsom) fix this to set the upper ojects to ERROR
|
# TODO(johnsom) fix this to set the upper ojects to ERROR
|
||||||
try:
|
try:
|
||||||
self.health_mon_repo.update(db_apis.get_session(),
|
self.health_mon_repo.update(db_apis.get_session(),
|
||||||
health_mon.pool_id,
|
health_mon.pool_id,
|
||||||
enabled=0)
|
enabled=0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update health monitor %(hm)s "
|
LOG.error("Failed to update health monitor %(hm)s "
|
||||||
"enabled to 0 due to: "
|
"enabled to 0 due to: %(except)s",
|
||||||
"%(except)s"), {'hm': health_mon.pool_id,
|
{'hm': health_mon.pool_id, 'except': e})
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class UpdateListenerInDB(BaseDatabaseTask):
|
class UpdateListenerInDB(BaseDatabaseTask):
|
||||||
@ -1289,8 +1273,8 @@ class UpdateListenerInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting update listener in DB "
|
LOG.warning("Reverting update listener in DB "
|
||||||
"for listener id %s"), listener.id)
|
"for listener id %s", listener.id)
|
||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1319,17 +1303,15 @@ class UpdateMemberInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting update member in DB "
|
LOG.warning("Reverting update member in DB "
|
||||||
"for member id %s"), member.id)
|
"for member id %s", member.id)
|
||||||
# TODO(johnsom) fix this to set the upper objects to ERROR
|
# TODO(johnsom) fix this to set the upper objects to ERROR
|
||||||
try:
|
try:
|
||||||
self.member_repo.update(db_apis.get_session(), member.id,
|
self.member_repo.update(db_apis.get_session(), member.id,
|
||||||
enabled=0)
|
enabled=0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update member %(member)s "
|
LOG.error("Failed to update member %(member)s enabled to 0 due "
|
||||||
"enabled to 0 due to: "
|
"to: %(except)s", {'member': member.id, 'except': e})
|
||||||
"%(except)s"), {'member': member.id,
|
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class UpdatePoolInDB(BaseDatabaseTask):
|
class UpdatePoolInDB(BaseDatabaseTask):
|
||||||
@ -1357,17 +1339,14 @@ class UpdatePoolInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting update pool in DB "
|
LOG.warning("Reverting update pool in DB for pool id %s", pool.id)
|
||||||
"for pool id %s"), pool.id)
|
|
||||||
# TODO(johnsom) fix this to set the upper objects to ERROR
|
# TODO(johnsom) fix this to set the upper objects to ERROR
|
||||||
try:
|
try:
|
||||||
self.repos.update_pool_and_sp(db_apis.get_session(),
|
self.repos.update_pool_and_sp(db_apis.get_session(),
|
||||||
pool.id, enabled=0)
|
pool.id, enabled=0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update pool %(pool)s "
|
LOG.error("Failed to update pool %(pool)s enabled 0 due to: "
|
||||||
"enabled 0 due to: "
|
"%(except)s", {'pool': pool.id, 'except': e})
|
||||||
"%(except)s"), {'pool': pool.id,
|
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class UpdateL7PolicyInDB(BaseDatabaseTask):
|
class UpdateL7PolicyInDB(BaseDatabaseTask):
|
||||||
@ -1395,17 +1374,15 @@ class UpdateL7PolicyInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting update l7policy in DB "
|
LOG.warning("Reverting update l7policy in DB "
|
||||||
"for l7policy id %s"), l7policy.id)
|
"for l7policy id %s", l7policy.id)
|
||||||
# TODO(sbalukoff) fix this to set the upper objects to ERROR
|
# TODO(sbalukoff) fix this to set the upper objects to ERROR
|
||||||
try:
|
try:
|
||||||
self.l7policy_repo.update(db_apis.get_session(), l7policy.id,
|
self.l7policy_repo.update(db_apis.get_session(), l7policy.id,
|
||||||
enabled=0)
|
enabled=0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update l7policy %(l7p)s "
|
LOG.error("Failed to update l7policy %(l7p)s enabled to 0 due "
|
||||||
"enabled to 0 due to: "
|
"to: %(except)s", {'l7p': l7policy.id, 'except': e})
|
||||||
"%(except)s"), {'l7p': l7policy.id,
|
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class UpdateL7RuleInDB(BaseDatabaseTask):
|
class UpdateL7RuleInDB(BaseDatabaseTask):
|
||||||
@ -1433,18 +1410,16 @@ class UpdateL7RuleInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting update l7rule in DB "
|
LOG.warning("Reverting update l7rule in DB "
|
||||||
"for l7rule id %s"), l7rule.id)
|
"for l7rule id %s", l7rule.id)
|
||||||
# TODO(sbalukoff) fix this to set appropriate upper objects to ERROR
|
# TODO(sbalukoff) fix this to set appropriate upper objects to ERROR
|
||||||
try:
|
try:
|
||||||
self.l7policy_repo.update(db_apis.get_session(),
|
self.l7policy_repo.update(db_apis.get_session(),
|
||||||
l7rule.l7policy.id,
|
l7rule.l7policy.id,
|
||||||
enabled=0)
|
enabled=0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update L7rule %(l7r)s "
|
LOG.error("Failed to update L7rule %(l7r)s enabled to 0 due to: "
|
||||||
"enabled to 0 due to: "
|
"%(except)s", {'l7r': l7rule.l7policy.id, 'except': e})
|
||||||
"%(except)s"), {'l7r': l7rule.l7policy.id,
|
|
||||||
'except': e})
|
|
||||||
|
|
||||||
|
|
||||||
class GetAmphoraDetails(BaseDatabaseTask):
|
class GetAmphoraDetails(BaseDatabaseTask):
|
||||||
@ -1614,8 +1589,8 @@ class MarkHealthMonitorActiveInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark health montor ACTIVE in DB "
|
LOG.warning("Reverting mark health montor ACTIVE in DB "
|
||||||
"for health monitor id %s"), health_mon.pool_id)
|
"for health monitor id %s", health_mon.pool_id)
|
||||||
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||||
|
|
||||||
|
|
||||||
@ -1646,8 +1621,8 @@ class MarkHealthMonitorPendingCreateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark health monitor pending create in DB "
|
LOG.warning("Reverting mark health monitor pending create in DB "
|
||||||
"for health monitor id %s"), health_mon.pool_id)
|
"for health monitor id %s", health_mon.pool_id)
|
||||||
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||||
|
|
||||||
|
|
||||||
@ -1678,8 +1653,8 @@ class MarkHealthMonitorPendingDeleteInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark health monitor pending delete in DB "
|
LOG.warning("Reverting mark health monitor pending delete in DB "
|
||||||
"for health monitor id %s"), health_mon.pool_id)
|
"for health monitor id %s", health_mon.pool_id)
|
||||||
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||||
|
|
||||||
|
|
||||||
@ -1710,8 +1685,8 @@ class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark health monitor pending update in DB "
|
LOG.warning("Reverting mark health monitor pending update in DB "
|
||||||
"for health monitor id %s"), health_mon.pool_id)
|
"for health monitor id %s", health_mon.pool_id)
|
||||||
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||||
|
|
||||||
|
|
||||||
@ -1744,8 +1719,8 @@ class MarkL7PolicyActiveInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark l7policy ACTIVE in DB "
|
LOG.warning("Reverting mark l7policy ACTIVE in DB "
|
||||||
"for l7policy id %s"), l7policy.id)
|
"for l7policy id %s", l7policy.id)
|
||||||
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1775,8 +1750,8 @@ class MarkL7PolicyPendingCreateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark l7policy pending create in DB "
|
LOG.warning("Reverting mark l7policy pending create in DB "
|
||||||
"for l7policy id %s"), l7policy.id)
|
"for l7policy id %s", l7policy.id)
|
||||||
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1806,8 +1781,8 @@ class MarkL7PolicyPendingDeleteInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark l7policy pending delete in DB "
|
LOG.warning("Reverting mark l7policy pending delete in DB "
|
||||||
"for l7policy id %s"), l7policy.id)
|
"for l7policy id %s", l7policy.id)
|
||||||
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1838,8 +1813,8 @@ class MarkL7PolicyPendingUpdateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark l7policy pending update in DB "
|
LOG.warning("Reverting mark l7policy pending update in DB "
|
||||||
"for l7policy id %s"), l7policy.id)
|
"for l7policy id %s", l7policy.id)
|
||||||
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1871,8 +1846,8 @@ class MarkL7RuleActiveInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark l7rule ACTIVE in DB "
|
LOG.warning("Reverting mark l7rule ACTIVE in DB "
|
||||||
"for l7rule id %s"), l7rule.id)
|
"for l7rule id %s", l7rule.id)
|
||||||
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1902,8 +1877,8 @@ class MarkL7RulePendingCreateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark l7rule pending create in DB "
|
LOG.warning("Reverting mark l7rule pending create in DB "
|
||||||
"for l7rule id %s"), l7rule.id)
|
"for l7rule id %s", l7rule.id)
|
||||||
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1933,8 +1908,8 @@ class MarkL7RulePendingDeleteInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark l7rule pending delete in DB "
|
LOG.warning("Reverting mark l7rule pending delete in DB "
|
||||||
"for l7rule id %s"), l7rule.id)
|
"for l7rule id %s", l7rule.id)
|
||||||
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1964,8 +1939,8 @@ class MarkL7RulePendingUpdateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark l7rule pending update in DB "
|
LOG.warning("Reverting mark l7rule pending update in DB "
|
||||||
"for l7rule id %s"), l7rule.id)
|
"for l7rule id %s", l7rule.id)
|
||||||
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||||
|
|
||||||
|
|
||||||
@ -1994,8 +1969,8 @@ class MarkMemberActiveInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark member ACTIVE in DB "
|
LOG.warning("Reverting mark member ACTIVE in DB "
|
||||||
"for member id %s"), member.id)
|
"for member id %s", member.id)
|
||||||
self.task_utils.mark_member_prov_status_error(member.id)
|
self.task_utils.mark_member_prov_status_error(member.id)
|
||||||
|
|
||||||
|
|
||||||
@ -2024,8 +1999,8 @@ class MarkMemberPendingCreateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark member pending create in DB "
|
LOG.warning("Reverting mark member pending create in DB "
|
||||||
"for member id %s"), member.id)
|
"for member id %s", member.id)
|
||||||
self.task_utils.mark_member_prov_status_error(member.id)
|
self.task_utils.mark_member_prov_status_error(member.id)
|
||||||
|
|
||||||
|
|
||||||
@ -2054,8 +2029,8 @@ class MarkMemberPendingDeleteInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark member pending delete in DB "
|
LOG.warning("Reverting mark member pending delete in DB "
|
||||||
"for member id %s"), member.id)
|
"for member id %s", member.id)
|
||||||
self.task_utils.mark_member_prov_status_error(member.id)
|
self.task_utils.mark_member_prov_status_error(member.id)
|
||||||
|
|
||||||
|
|
||||||
@ -2085,8 +2060,8 @@ class MarkMemberPendingUpdateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark member pending update in DB "
|
LOG.warning("Reverting mark member pending update in DB "
|
||||||
"for member id %s"), member.id)
|
"for member id %s", member.id)
|
||||||
self.task_utils.mark_member_prov_status_error(member.id)
|
self.task_utils.mark_member_prov_status_error(member.id)
|
||||||
|
|
||||||
|
|
||||||
@ -2116,8 +2091,7 @@ class MarkPoolActiveInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark pool ACTIVE in DB "
|
LOG.warning("Reverting mark pool ACTIVE in DB for pool id %s", pool.id)
|
||||||
"for pool id %s"), pool.id)
|
|
||||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||||
|
|
||||||
|
|
||||||
@ -2147,8 +2121,8 @@ class MarkPoolPendingCreateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark pool pending create in DB "
|
LOG.warning("Reverting mark pool pending create in DB "
|
||||||
"for pool id %s"), pool.id)
|
"for pool id %s", pool.id)
|
||||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||||
|
|
||||||
|
|
||||||
@ -2178,8 +2152,8 @@ class MarkPoolPendingDeleteInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark pool pending delete in DB "
|
LOG.warning("Reverting mark pool pending delete in DB "
|
||||||
"for pool id %s"), pool.id)
|
"for pool id %s", pool.id)
|
||||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||||
|
|
||||||
|
|
||||||
@ -2209,8 +2183,8 @@ class MarkPoolPendingUpdateInDB(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW("Reverting mark pool pending update in DB "
|
LOG.warning("Reverting mark pool pending update in DB "
|
||||||
"for pool id %s"), pool.id)
|
"for pool id %s", pool.id)
|
||||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||||
|
|
||||||
|
|
||||||
@ -2238,10 +2212,9 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
|
|||||||
lock_session.commit()
|
lock_session.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to decrement health monitor quota for '
|
LOG.error('Failed to decrement health monitor quota for '
|
||||||
'project: {proj} the project may have excess '
|
'project: {proj} the project may have excess '
|
||||||
'quota in use.').format(
|
'quota in use.'.format(proj=health_mon.project_id))
|
||||||
proj=health_mon.project_id))
|
|
||||||
lock_session.rollback()
|
lock_session.rollback()
|
||||||
|
|
||||||
def revert(self, health_mon, result, *args, **kwargs):
|
def revert(self, health_mon, result, *args, **kwargs):
|
||||||
@ -2251,9 +2224,9 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW('Reverting decrement quota for health monitor '
|
LOG.warning('Reverting decrement quota for health monitor '
|
||||||
'on project {proj} Project quota counts may be '
|
'on project {proj} Project quota counts may be '
|
||||||
'incorrect.').format(proj=health_mon.project_id))
|
'incorrect.'.format(proj=health_mon.project_id))
|
||||||
|
|
||||||
# Increment the quota back if this task wasn't the failure
|
# Increment the quota back if this task wasn't the failure
|
||||||
if not isinstance(result, failure.Failure):
|
if not isinstance(result, failure.Failure):
|
||||||
@ -2298,10 +2271,9 @@ class DecrementListenerQuota(BaseDatabaseTask):
|
|||||||
lock_session.commit()
|
lock_session.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to decrement listener quota for '
|
LOG.error('Failed to decrement listener quota for '
|
||||||
'project: {proj} the project may have excess '
|
'project: {proj} the project may have excess '
|
||||||
'quota in use.').format(
|
'quota in use.'.format(proj=listener.project_id))
|
||||||
proj=listener.project_id))
|
|
||||||
lock_session.rollback()
|
lock_session.rollback()
|
||||||
|
|
||||||
def revert(self, listener, result, *args, **kwargs):
|
def revert(self, listener, result, *args, **kwargs):
|
||||||
@ -2311,9 +2283,9 @@ class DecrementListenerQuota(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW('Reverting decrement quota for listener '
|
LOG.warning('Reverting decrement quota for listener '
|
||||||
'on project {proj} Project quota counts may be '
|
'on project {proj} Project quota counts may be '
|
||||||
'incorrect.').format(proj=listener.project_id))
|
'incorrect.'.format(proj=listener.project_id))
|
||||||
|
|
||||||
# Increment the quota back if this task wasn't the failure
|
# Increment the quota back if this task wasn't the failure
|
||||||
if not isinstance(result, failure.Failure):
|
if not isinstance(result, failure.Failure):
|
||||||
@ -2358,10 +2330,9 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
|
|||||||
lock_session.commit()
|
lock_session.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to decrement load balancer quota for '
|
LOG.error('Failed to decrement load balancer quota for '
|
||||||
'project: {proj} the project may have excess '
|
'project: {proj} the project may have excess '
|
||||||
'quota in use.').format(
|
'quota in use.'.format(proj=loadbalancer.project_id))
|
||||||
proj=loadbalancer.project_id))
|
|
||||||
lock_session.rollback()
|
lock_session.rollback()
|
||||||
|
|
||||||
def revert(self, loadbalancer, result, *args, **kwargs):
|
def revert(self, loadbalancer, result, *args, **kwargs):
|
||||||
@ -2371,9 +2342,9 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW('Reverting decrement quota for load balancer '
|
LOG.warning('Reverting decrement quota for load balancer '
|
||||||
'on project {proj} Project quota counts may be '
|
'on project {proj} Project quota counts may be '
|
||||||
'incorrect.').format(proj=loadbalancer.project_id))
|
'incorrect.'.format(proj=loadbalancer.project_id))
|
||||||
|
|
||||||
# Increment the quota back if this task wasn't the failure
|
# Increment the quota back if this task wasn't the failure
|
||||||
if not isinstance(result, failure.Failure):
|
if not isinstance(result, failure.Failure):
|
||||||
@ -2418,10 +2389,9 @@ class DecrementMemberQuota(BaseDatabaseTask):
|
|||||||
lock_session.commit()
|
lock_session.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to decrement member quota for '
|
LOG.error('Failed to decrement member quota for '
|
||||||
'project: {proj} the project may have excess '
|
'project: {proj} the project may have excess '
|
||||||
'quota in use.').format(
|
'quota in use.'.format(proj=member.project_id))
|
||||||
proj=member.project_id))
|
|
||||||
lock_session.rollback()
|
lock_session.rollback()
|
||||||
|
|
||||||
def revert(self, member, result, *args, **kwargs):
|
def revert(self, member, result, *args, **kwargs):
|
||||||
@ -2431,9 +2401,9 @@ class DecrementMemberQuota(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW('Reverting decrement quota for member '
|
LOG.warning('Reverting decrement quota for member '
|
||||||
'on project {proj} Project quota counts may be '
|
'on project {proj} Project quota counts may be '
|
||||||
'incorrect.').format(proj=member.project_id))
|
'incorrect.'.format(proj=member.project_id))
|
||||||
|
|
||||||
# Increment the quota back if this task wasn't the failure
|
# Increment the quota back if this task wasn't the failure
|
||||||
if not isinstance(result, failure.Failure):
|
if not isinstance(result, failure.Failure):
|
||||||
@ -2490,10 +2460,9 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
|||||||
lock_session.commit()
|
lock_session.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to decrement pool quota for '
|
LOG.error('Failed to decrement pool quota for '
|
||||||
'project: {proj} the project may have excess '
|
'project: {proj} the project may have excess '
|
||||||
'quota in use.').format(
|
'quota in use.'.format(proj=pool.project_id))
|
||||||
proj=pool.project_id))
|
|
||||||
lock_session.rollback()
|
lock_session.rollback()
|
||||||
|
|
||||||
def revert(self, pool, pool_child_count, result, *args, **kwargs):
|
def revert(self, pool, pool_child_count, result, *args, **kwargs):
|
||||||
@ -2503,9 +2472,9 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
|||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warning(_LW('Reverting decrement quota for pool '
|
LOG.warning('Reverting decrement quota for pool '
|
||||||
'on project {proj} Project quota counts may be '
|
'on project {proj} Project quota counts may be '
|
||||||
'incorrect.').format(proj=pool.project_id))
|
'incorrect.'.format(proj=pool.project_id))
|
||||||
|
|
||||||
# Increment the quota back if this task wasn't the failure
|
# Increment the quota back if this task wasn't the failure
|
||||||
if not isinstance(result, failure.Failure):
|
if not isinstance(result, failure.Failure):
|
||||||
|
@ -22,7 +22,6 @@ from taskflow.types import failure
|
|||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.i18n import _LW, _LE
|
|
||||||
from octavia.network import base
|
from octavia.network import base
|
||||||
from octavia.network import data_models as n_data_models
|
from octavia.network import data_models as n_data_models
|
||||||
|
|
||||||
@ -148,7 +147,7 @@ class PlugNetworks(BaseNetworkTask):
|
|||||||
def revert(self, amphora, delta, *args, **kwargs):
|
def revert(self, amphora, delta, *args, **kwargs):
|
||||||
"""Handle a failed network plug by removing all nics added."""
|
"""Handle a failed network plug by removing all nics added."""
|
||||||
|
|
||||||
LOG.warning(_LW("Unable to plug networks for amp id %s"), amphora.id)
|
LOG.warning("Unable to plug networks for amp id %s", amphora.id)
|
||||||
if not delta:
|
if not delta:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -183,7 +182,7 @@ class UnPlugNetworks(BaseNetworkTask):
|
|||||||
LOG.debug("Network %d not found", nic.network_id)
|
LOG.debug("Network %d not found", nic.network_id)
|
||||||
pass
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to unplug network"))
|
LOG.exception("Unable to unplug network")
|
||||||
pass # Todo(german) follow up if that makes sense
|
pass # Todo(german) follow up if that makes sense
|
||||||
|
|
||||||
|
|
||||||
@ -238,7 +237,7 @@ class HandleNetworkDeltas(BaseNetworkTask):
|
|||||||
except base.NetworkNotFound:
|
except base.NetworkNotFound:
|
||||||
LOG.debug("Network %d not found ", nic.network_id)
|
LOG.debug("Network %d not found ", nic.network_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to unplug network"))
|
LOG.exception("Unable to unplug network")
|
||||||
return added_ports
|
return added_ports
|
||||||
|
|
||||||
def revert(self, result, deltas, *args, **kwargs):
|
def revert(self, result, deltas, *args, **kwargs):
|
||||||
@ -247,7 +246,7 @@ class HandleNetworkDeltas(BaseNetworkTask):
|
|||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
for amp_id, delta in six.iteritems(deltas):
|
for amp_id, delta in six.iteritems(deltas):
|
||||||
LOG.warning(_LW("Unable to plug networks for amp id %s"),
|
LOG.warning("Unable to plug networks for amp id %s",
|
||||||
delta.amphora_id)
|
delta.amphora_id)
|
||||||
if not delta:
|
if not delta:
|
||||||
return
|
return
|
||||||
@ -277,7 +276,7 @@ class PlugVIP(BaseNetworkTask):
|
|||||||
|
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Unable to plug VIP for loadbalancer id %s"),
|
LOG.warning("Unable to plug VIP for loadbalancer id %s",
|
||||||
loadbalancer.id)
|
loadbalancer.id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -291,9 +290,8 @@ class PlugVIP(BaseNetworkTask):
|
|||||||
|
|
||||||
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
|
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to unplug VIP. Resources may still "
|
LOG.error("Failed to unplug VIP. Resources may still "
|
||||||
"be in use from vip: %(vip)s due to "
|
"be in use from vip: %(vip)s due to error: %(except)s",
|
||||||
"error: %(except)s"),
|
|
||||||
{'vip': loadbalancer.vip.ip_address, 'except': e})
|
{'vip': loadbalancer.vip.ip_address, 'except': e})
|
||||||
|
|
||||||
|
|
||||||
@ -307,7 +305,7 @@ class UnplugVIP(BaseNetworkTask):
|
|||||||
try:
|
try:
|
||||||
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
|
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to unplug vip from load balancer %s"),
|
LOG.exception("Unable to unplug vip from load balancer %s",
|
||||||
loadbalancer.id)
|
loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
@ -328,16 +326,15 @@ class AllocateVIP(BaseNetworkTask):
|
|||||||
"""Handle a failure to allocate vip."""
|
"""Handle a failure to allocate vip."""
|
||||||
|
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
LOG.exception(_LE("Unable to allocate VIP"))
|
LOG.exception("Unable to allocate VIP")
|
||||||
return
|
return
|
||||||
vip = result
|
vip = result
|
||||||
LOG.warning(_LW("Deallocating vip %s"), vip.ip_address)
|
LOG.warning("Deallocating vip %s", vip.ip_address)
|
||||||
try:
|
try:
|
||||||
self.network_driver.deallocate_vip(vip)
|
self.network_driver.deallocate_vip(vip)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to deallocate VIP. Resources may still "
|
LOG.error("Failed to deallocate VIP. Resources may still "
|
||||||
"be in use from vip: %(vip)s due to "
|
"be in use from vip: %(vip)s due to error: %(except)s",
|
||||||
"error: %(except)s"),
|
|
||||||
{'vip': vip.ip_address, 'except': e})
|
{'vip': vip.ip_address, 'except': e})
|
||||||
|
|
||||||
|
|
||||||
@ -438,9 +435,8 @@ class PlugVIPPort(BaseNetworkTask):
|
|||||||
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
|
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
|
||||||
self.network_driver.unplug_port(amphora, vrrp_port)
|
self.network_driver.unplug_port(amphora, vrrp_port)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW('Failed to unplug vrrp port: {port} '
|
LOG.warning(('Failed to unplug vrrp port: {port} from amphora: '
|
||||||
'from amphora: {amp}').format(port=vrrp_port.id,
|
'{amp}').format(port=vrrp_port.id, amp=amphora.id))
|
||||||
amp=amphora.id))
|
|
||||||
|
|
||||||
|
|
||||||
class WaitForPortDetach(BaseNetworkTask):
|
class WaitForPortDetach(BaseNetworkTask):
|
||||||
|
@ -42,7 +42,7 @@ def add_alembic_subparser(sub, cmd):
|
|||||||
|
|
||||||
def do_upgrade(config, cmd):
|
def do_upgrade(config, cmd):
|
||||||
if not CONF.command.revision and not CONF.command.delta:
|
if not CONF.command.revision and not CONF.command.delta:
|
||||||
raise SystemExit('You must provide a revision or relative delta')
|
raise SystemExit(_('You must provide a revision or relative delta'))
|
||||||
|
|
||||||
revision = CONF.command.revision or ''
|
revision = CONF.command.revision or ''
|
||||||
if '-' in revision:
|
if '-' in revision:
|
||||||
|
@ -31,8 +31,6 @@ from octavia.common import data_models
|
|||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.common import validate
|
from octavia.common import validate
|
||||||
from octavia.db import models
|
from octavia.db import models
|
||||||
from octavia.i18n import _LE, _LW
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
@ -407,8 +405,8 @@ class Repositories(object):
|
|||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
except db_exception.DBDeadlock:
|
except db_exception.DBDeadlock:
|
||||||
LOG.warning(_LW('Quota project lock timed out for project: '
|
LOG.warning(('Quota project lock timed out for project: '
|
||||||
'{proj}').format(proj=project_id))
|
'{proj}').format(proj=project_id))
|
||||||
raise exceptions.ProjectBusyException()
|
raise exceptions.ProjectBusyException()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -431,10 +429,10 @@ class Repositories(object):
|
|||||||
project_id=project_id).with_for_update().first()
|
project_id=project_id).with_for_update().first()
|
||||||
if not quotas:
|
if not quotas:
|
||||||
if not CONF.auth_strategy == consts.NOAUTH:
|
if not CONF.auth_strategy == consts.NOAUTH:
|
||||||
LOG.error(_LE(
|
LOG.error('Quota decrement on {clss} called on project: '
|
||||||
'Quota decrement on {clss} called on project: {proj} '
|
'{proj} with no quota record in the '
|
||||||
'with no quota record in the database.').format(
|
'database.'.format(clss=type(_class),
|
||||||
clss=type(_class), proj=project_id))
|
proj=project_id))
|
||||||
return
|
return
|
||||||
if _class == data_models.LoadBalancer:
|
if _class == data_models.LoadBalancer:
|
||||||
if (quotas.in_use_load_balancer is not None and
|
if (quotas.in_use_load_balancer is not None and
|
||||||
@ -443,11 +441,10 @@ class Repositories(object):
|
|||||||
quotas.in_use_load_balancer - quantity)
|
quotas.in_use_load_balancer - quantity)
|
||||||
else:
|
else:
|
||||||
if not CONF.auth_strategy == consts.NOAUTH:
|
if not CONF.auth_strategy == consts.NOAUTH:
|
||||||
LOG.warning(_LW(
|
LOG.warning('Quota decrement on {clss} called on '
|
||||||
'Quota decrement on {clss} called on project: '
|
'project: {proj} that would cause a '
|
||||||
'{proj} that would cause a negative '
|
'negative quota.'.format(clss=type(_class),
|
||||||
'quota.').format(clss=type(_class),
|
proj=project_id))
|
||||||
proj=project_id))
|
|
||||||
if _class == data_models.Listener:
|
if _class == data_models.Listener:
|
||||||
if (quotas.in_use_listener is not None and
|
if (quotas.in_use_listener is not None and
|
||||||
quotas.in_use_listener > 0):
|
quotas.in_use_listener > 0):
|
||||||
@ -455,11 +452,10 @@ class Repositories(object):
|
|||||||
quotas.in_use_listener - quantity)
|
quotas.in_use_listener - quantity)
|
||||||
else:
|
else:
|
||||||
if not CONF.auth_strategy == consts.NOAUTH:
|
if not CONF.auth_strategy == consts.NOAUTH:
|
||||||
LOG.warning(_LW(
|
LOG.warning('Quota decrement on {clss} called on '
|
||||||
'Quota decrement on {clss} called on project: '
|
'project: {proj} that would cause a '
|
||||||
'{proj} that would cause a negative '
|
'negative quota.'.format(clss=type(_class),
|
||||||
'quota.').format(clss=type(_class),
|
proj=project_id))
|
||||||
proj=project_id))
|
|
||||||
if _class == data_models.Pool:
|
if _class == data_models.Pool:
|
||||||
if (quotas.in_use_pool is not None and
|
if (quotas.in_use_pool is not None and
|
||||||
quotas.in_use_pool > 0):
|
quotas.in_use_pool > 0):
|
||||||
@ -467,11 +463,10 @@ class Repositories(object):
|
|||||||
quotas.in_use_pool - quantity)
|
quotas.in_use_pool - quantity)
|
||||||
else:
|
else:
|
||||||
if not CONF.auth_strategy == consts.NOAUTH:
|
if not CONF.auth_strategy == consts.NOAUTH:
|
||||||
LOG.warning(_LW(
|
LOG.warning('Quota decrement on {clss} called on '
|
||||||
'Quota decrement on {clss} called on project: '
|
'project: {proj} that would cause a '
|
||||||
'{proj} that would cause a negative '
|
'negative quota.'.format(clss=type(_class),
|
||||||
'quota.').format(clss=type(_class),
|
proj=project_id))
|
||||||
proj=project_id))
|
|
||||||
if _class == data_models.HealthMonitor:
|
if _class == data_models.HealthMonitor:
|
||||||
if (quotas.in_use_health_monitor is not None and
|
if (quotas.in_use_health_monitor is not None and
|
||||||
quotas.in_use_health_monitor > 0):
|
quotas.in_use_health_monitor > 0):
|
||||||
@ -479,11 +474,10 @@ class Repositories(object):
|
|||||||
quotas.in_use_health_monitor - quantity)
|
quotas.in_use_health_monitor - quantity)
|
||||||
else:
|
else:
|
||||||
if not CONF.auth_strategy == consts.NOAUTH:
|
if not CONF.auth_strategy == consts.NOAUTH:
|
||||||
LOG.warning(_LW(
|
LOG.warning('Quota decrement on {clss} called on '
|
||||||
'Quota decrement on {clss} called on project: '
|
'project: {proj} that would cause a '
|
||||||
'{proj} that would cause a negative '
|
'negative quota.'.format(clss=type(_class),
|
||||||
'quota.').format(clss=type(_class),
|
proj=project_id))
|
||||||
proj=project_id))
|
|
||||||
if _class == data_models.Member:
|
if _class == data_models.Member:
|
||||||
if (quotas.in_use_member is not None and
|
if (quotas.in_use_member is not None and
|
||||||
quotas.in_use_member > 0):
|
quotas.in_use_member > 0):
|
||||||
@ -491,14 +485,13 @@ class Repositories(object):
|
|||||||
quotas.in_use_member - quantity)
|
quotas.in_use_member - quantity)
|
||||||
else:
|
else:
|
||||||
if not CONF.auth_strategy == consts.NOAUTH:
|
if not CONF.auth_strategy == consts.NOAUTH:
|
||||||
LOG.warning(_LW(
|
LOG.warning('Quota decrement on {clss} called on '
|
||||||
'Quota decrement on {clss} called on project: '
|
'project: {proj} that would cause a '
|
||||||
'{proj} that would cause a negative '
|
'negative quota.'.format(clss=type(_class),
|
||||||
'quota.').format(clss=type(_class),
|
proj=project_id))
|
||||||
proj=project_id))
|
|
||||||
except db_exception.DBDeadlock:
|
except db_exception.DBDeadlock:
|
||||||
LOG.warning(_LW('Quota project lock timed out for project: '
|
LOG.warning(('Quota project lock timed out for project: '
|
||||||
'{proj}').format(proj=project_id))
|
'{proj}').format(proj=project_id))
|
||||||
raise exceptions.ProjectBusyException()
|
raise exceptions.ProjectBusyException()
|
||||||
|
|
||||||
def create_load_balancer_tree(self, session, lock_session, lb_dict):
|
def create_load_balancer_tree(self, session, lock_session, lb_dict):
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
import pep8
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Guidelines for writing new hacking checks
|
Guidelines for writing new hacking checks
|
||||||
@ -31,28 +30,17 @@ Guidelines for writing new hacking checks
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
log_translation = re.compile(
|
|
||||||
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")")
|
|
||||||
author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor"),
|
author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor"),
|
||||||
re.compile("^\.\.\s+moduleauthor::"))
|
re.compile("^\.\.\s+moduleauthor::"))
|
||||||
_all_hints = set(['_', '_LI', '_LE', '_LW', '_LC'])
|
|
||||||
_all_log_levels = {
|
_all_log_levels = {'critical', 'error', 'exception', 'info', 'warning'}
|
||||||
# NOTE(yamamoto): Following nova which uses _() for audit.
|
_all_hints = {'_LC', '_LE', '_LI', '_', '_LW'}
|
||||||
'audit': '_',
|
|
||||||
'error': '_LE',
|
_log_translation_hint = re.compile(
|
||||||
'info': '_LI',
|
r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % {
|
||||||
'warn': '_LW',
|
'levels': '|'.join(_all_log_levels),
|
||||||
'warning': '_LW',
|
'hints': '|'.join(_all_hints),
|
||||||
'critical': '_LC',
|
})
|
||||||
'exception': '_LE',
|
|
||||||
}
|
|
||||||
log_translation_hints = []
|
|
||||||
for level, hint in _all_log_levels.items():
|
|
||||||
r = "(.)*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
|
|
||||||
'level': level,
|
|
||||||
'wrong_hints': '|'.join(_all_hints - set([hint])),
|
|
||||||
}
|
|
||||||
log_translation_hints.append(re.compile(r))
|
|
||||||
|
|
||||||
assert_trueinst_re = re.compile(
|
assert_trueinst_re = re.compile(
|
||||||
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
|
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
|
||||||
@ -76,8 +64,9 @@ assert_no_xrange_re = re.compile(
|
|||||||
r"\s*xrange\s*\(")
|
r"\s*xrange\s*\(")
|
||||||
|
|
||||||
|
|
||||||
def _directory_to_check_translation(filename):
|
def _translation_checks_not_enforced(filename):
|
||||||
return True
|
# Do not do these validations on tests
|
||||||
|
return any(pat in filename for pat in ["/tests/", "rally-jobs/plugins/"])
|
||||||
|
|
||||||
|
|
||||||
def assert_true_instance(logical_line):
|
def assert_true_instance(logical_line):
|
||||||
@ -106,38 +95,6 @@ def assert_equal_or_not_none(logical_line):
|
|||||||
yield (0, msg)
|
yield (0, msg)
|
||||||
|
|
||||||
|
|
||||||
def no_translate_debug_logs(logical_line, filename):
|
|
||||||
"""Check for 'LOG.debug(_('
|
|
||||||
|
|
||||||
As per our translation policy,
|
|
||||||
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
|
|
||||||
we shouldn't translate debug level logs.
|
|
||||||
|
|
||||||
* This check assumes that 'LOG' is a logger.
|
|
||||||
O319
|
|
||||||
"""
|
|
||||||
if _directory_to_check_translation(filename) and logical_line.startswith(
|
|
||||||
"LOG.debug(_("):
|
|
||||||
yield(0, "O319 Don't translate debug level logs")
|
|
||||||
|
|
||||||
|
|
||||||
def validate_log_translations(logical_line, physical_line, filename):
|
|
||||||
# Translations are not required in the test directory
|
|
||||||
if "octavia/tests" in filename:
|
|
||||||
return
|
|
||||||
if pep8.noqa(physical_line):
|
|
||||||
return
|
|
||||||
msg = "O320: Log messages require translations!"
|
|
||||||
if log_translation.match(logical_line):
|
|
||||||
yield (0, msg)
|
|
||||||
|
|
||||||
if _directory_to_check_translation(filename):
|
|
||||||
msg = "O320: Log messages require translation hints!"
|
|
||||||
for log_translation_hint in log_translation_hints:
|
|
||||||
if log_translation_hint.match(logical_line):
|
|
||||||
yield (0, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def use_jsonutils(logical_line, filename):
|
def use_jsonutils(logical_line, filename):
|
||||||
msg = "O321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
|
msg = "O321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
|
||||||
|
|
||||||
@ -219,11 +176,57 @@ def no_xrange(logical_line):
|
|||||||
yield(0, "O340: Do not use xrange().")
|
yield(0, "O340: Do not use xrange().")
|
||||||
|
|
||||||
|
|
||||||
|
def no_translate_logs(logical_line, filename):
|
||||||
|
"""O341 - Don't translate logs.
|
||||||
|
|
||||||
|
Check for 'LOG.*(_(' and 'LOG.*(_Lx('
|
||||||
|
|
||||||
|
Translators don't provide translations for log messages, and operators
|
||||||
|
asked not to translate them.
|
||||||
|
|
||||||
|
* This check assumes that 'LOG' is a logger.
|
||||||
|
|
||||||
|
:param logical_line: The logical line to check.
|
||||||
|
:param filename: The file name where the logical line exists.
|
||||||
|
:returns: None if the logical line passes the check, otherwise a tuple
|
||||||
|
is yielded that contains the offending index in logical line and a
|
||||||
|
message describe the check validation failure.
|
||||||
|
"""
|
||||||
|
if _translation_checks_not_enforced(filename):
|
||||||
|
return
|
||||||
|
|
||||||
|
msg = "O341: Log messages should not be translated!"
|
||||||
|
match = _log_translation_hint.match(logical_line)
|
||||||
|
if match:
|
||||||
|
yield (logical_line.index(match.group()), msg)
|
||||||
|
|
||||||
|
|
||||||
|
def check_raised_localized_exceptions(logical_line, filename):
|
||||||
|
"""O342 - Untranslated exception message.
|
||||||
|
|
||||||
|
:param logical_line: The logical line to check.
|
||||||
|
:param filename: The file name where the logical line exists.
|
||||||
|
:returns: None if the logical line passes the check, otherwise a tuple
|
||||||
|
is yielded that contains the offending index in logical line and a
|
||||||
|
message describe the check validation failure.
|
||||||
|
"""
|
||||||
|
if _translation_checks_not_enforced(filename):
|
||||||
|
return
|
||||||
|
|
||||||
|
logical_line = logical_line.strip()
|
||||||
|
raised_search = re.compile(
|
||||||
|
r"raise (?:\w*)\((.*)\)").match(logical_line)
|
||||||
|
if raised_search:
|
||||||
|
exception_msg = raised_search.groups()[0]
|
||||||
|
if exception_msg.startswith("\"") or exception_msg.startswith("\'"):
|
||||||
|
msg = "O342: Untranslated exception message."
|
||||||
|
yield (logical_line.index(exception_msg), msg)
|
||||||
|
|
||||||
|
|
||||||
def factory(register):
|
def factory(register):
|
||||||
register(assert_true_instance)
|
register(assert_true_instance)
|
||||||
register(assert_equal_or_not_none)
|
register(assert_equal_or_not_none)
|
||||||
register(no_translate_debug_logs)
|
register(no_translate_logs)
|
||||||
register(validate_log_translations)
|
|
||||||
register(use_jsonutils)
|
register(use_jsonutils)
|
||||||
register(no_author_tags)
|
register(no_author_tags)
|
||||||
register(assert_equal_true_or_false)
|
register(assert_equal_true_or_false)
|
||||||
@ -231,3 +234,4 @@ def factory(register):
|
|||||||
register(assert_equal_in)
|
register(assert_equal_in)
|
||||||
register(no_log_warn)
|
register(no_log_warn)
|
||||||
register(no_xrange)
|
register(no_xrange)
|
||||||
|
register(check_raised_localized_exceptions)
|
||||||
|
@ -23,7 +23,6 @@ import six
|
|||||||
from octavia.common import clients
|
from octavia.common import clients
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.i18n import _LE, _LI, _LW
|
|
||||||
from octavia.network import base
|
from octavia.network import base
|
||||||
from octavia.network import data_models as n_data_models
|
from octavia.network import data_models as n_data_models
|
||||||
from octavia.network.drivers.neutron import base as neutron_base
|
from octavia.network.drivers.neutron import base as neutron_base
|
||||||
@ -112,7 +111,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
try:
|
try:
|
||||||
self._add_allowed_address_pair_to_port(port_id, vip_address)
|
self._add_allowed_address_pair_to_port(port_id, vip_address)
|
||||||
except neutron_client_exceptions.PortNotFoundClient as e:
|
except neutron_client_exceptions.PortNotFoundClient as e:
|
||||||
raise base.PortNotFound(e.message)
|
raise base.PortNotFound(e.message)
|
||||||
except Exception:
|
except Exception:
|
||||||
message = _('Error adding allowed address pair {ip} '
|
message = _('Error adding allowed address pair {ip} '
|
||||||
'to port {port_id}.').format(ip=vip_address,
|
'to port {port_id}.').format(ip=vip_address,
|
||||||
@ -128,7 +127,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
|
|
||||||
def _get_ethertype_for_ip(self, ip):
|
def _get_ethertype_for_ip(self, ip):
|
||||||
address = ipaddress.ip_address(
|
address = ipaddress.ip_address(
|
||||||
ip if six.text_type == type(ip) else six.u(ip))
|
ip if isinstance(ip, six.text_type) else six.u(ip))
|
||||||
return 'IPv6' if address.version is 6 else 'IPv4'
|
return 'IPv6' if address.version is 6 else 'IPv4'
|
||||||
|
|
||||||
def _update_security_group_rules(self, load_balancer, sec_grp_id):
|
def _update_security_group_rules(self, load_balancer, sec_grp_id):
|
||||||
@ -222,15 +221,15 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
while attempts <= CONF.networking.max_retries:
|
while attempts <= CONF.networking.max_retries:
|
||||||
try:
|
try:
|
||||||
self.neutron_client.delete_security_group(sec_grp)
|
self.neutron_client.delete_security_group(sec_grp)
|
||||||
LOG.info(_LI("Deleted security group %s"), sec_grp)
|
LOG.info("Deleted security group %s", sec_grp)
|
||||||
return
|
return
|
||||||
except neutron_client_exceptions.NotFound:
|
except neutron_client_exceptions.NotFound:
|
||||||
LOG.info(_LI("Security group %s not found, will assume it is "
|
LOG.info("Security group %s not found, will assume it is "
|
||||||
"already deleted"), sec_grp)
|
"already deleted", sec_grp)
|
||||||
return
|
return
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW("Attempt %(attempt)s to remove security group "
|
LOG.warning("Attempt %(attempt)s to remove security group "
|
||||||
"%(sg)s failed."),
|
"%(sg)s failed.",
|
||||||
{'attempt': attempts + 1, 'sg': sec_grp})
|
{'attempt': attempts + 1, 'sg': sec_grp})
|
||||||
attempts += 1
|
attempts += 1
|
||||||
time.sleep(CONF.networking.retry_interval)
|
time.sleep(CONF.networking.retry_interval)
|
||||||
@ -249,7 +248,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
if sec_grp:
|
if sec_grp:
|
||||||
sec_grp = sec_grp.get('id')
|
sec_grp = sec_grp.get('id')
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI("Removing security group %(sg)s from port %(port)s"),
|
"Removing security group %(sg)s from port %(port)s",
|
||||||
{'sg': sec_grp, 'port': vip.port_id})
|
{'sg': sec_grp, 'port': vip.port_id})
|
||||||
raw_port = self.neutron_client.show_port(port.id)
|
raw_port = self.neutron_client.show_port(port.id)
|
||||||
sec_grps = raw_port.get('port', {}).get('security_groups', [])
|
sec_grps = raw_port.get('port', {}).get('security_groups', [])
|
||||||
@ -291,8 +290,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
LOG.exception(message)
|
LOG.exception(message)
|
||||||
raise base.DeallocateVIPException(message)
|
raise base.DeallocateVIPException(message)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Port %s will not be deleted by Octavia as it was "
|
LOG.info("Port %s will not be deleted by Octavia as it was "
|
||||||
"not created by Octavia."), vip.port_id)
|
"not created by Octavia.", vip.port_id)
|
||||||
|
|
||||||
def plug_vip(self, load_balancer, vip):
|
def plug_vip(self, load_balancer, vip):
|
||||||
if self.sec_grp_enabled:
|
if self.sec_grp_enabled:
|
||||||
@ -330,7 +329,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
|
|
||||||
def allocate_vip(self, load_balancer):
|
def allocate_vip(self, load_balancer):
|
||||||
if load_balancer.vip.port_id:
|
if load_balancer.vip.port_id:
|
||||||
LOG.info(_LI('Port %s already exists. Nothing to be done.'),
|
LOG.info('Port %s already exists. Nothing to be done.',
|
||||||
load_balancer.vip.port_id)
|
load_balancer.vip.port_id)
|
||||||
port = self.get_port(load_balancer.vip.port_id)
|
port = self.get_port(load_balancer.vip.port_id)
|
||||||
return self._port_to_vip(port, load_balancer)
|
return self._port_to_vip(port, load_balancer)
|
||||||
@ -356,8 +355,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
try:
|
try:
|
||||||
subnet = self.get_subnet(vip.subnet_id)
|
subnet = self.get_subnet(vip.subnet_id)
|
||||||
except base.SubnetNotFound:
|
except base.SubnetNotFound:
|
||||||
msg = _LE("Can't unplug vip because vip subnet {0} was not "
|
msg = ("Can't unplug vip because vip subnet {0} was not "
|
||||||
"found").format(vip.subnet_id)
|
"found").format(vip.subnet_id)
|
||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
raise base.PluggedVIPNotFound(msg)
|
raise base.PluggedVIPNotFound(msg)
|
||||||
for amphora in six.moves.filter(
|
for amphora in six.moves.filter(
|
||||||
@ -369,7 +368,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
if not interface:
|
if not interface:
|
||||||
# Thought about raising PluggedVIPNotFound exception but
|
# Thought about raising PluggedVIPNotFound exception but
|
||||||
# then that wouldn't evaluate all amphorae, so just continue
|
# then that wouldn't evaluate all amphorae, so just continue
|
||||||
LOG.debug(_LI('Cannot get amphora %s interface, skipped'),
|
LOG.debug('Cannot get amphora %s interface, skipped',
|
||||||
amphora.compute_id)
|
amphora.compute_id)
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
@ -397,9 +396,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
except base.PortNotFound:
|
except base.PortNotFound:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Failed to delete port. Resources may still '
|
LOG.error('Failed to delete port. Resources may still be in '
|
||||||
'be in use for port: %(port)s due to '
|
'use for port: %(port)s due to error: %s(except)s',
|
||||||
'error: %s(except)s'),
|
|
||||||
{'port': amphora.vrrp_port_id, 'except': e})
|
{'port': amphora.vrrp_port_id, 'except': e})
|
||||||
|
|
||||||
def plug_network(self, compute_id, network_id, ip_address=None):
|
def plug_network(self, compute_id, network_id, ip_address=None):
|
||||||
@ -501,8 +499,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
else:
|
else:
|
||||||
raise base.PlugNetworkException(e.message)
|
raise base.PlugNetworkException(e.message)
|
||||||
except nova_client_exceptions.Conflict:
|
except nova_client_exceptions.Conflict:
|
||||||
LOG.info(_LI('Port %(portid)s is already plugged, '
|
LOG.info('Port %(portid)s is already plugged, '
|
||||||
'skipping') % {'portid': port.id})
|
'skipping' % {'portid': port.id})
|
||||||
plugged_interface = n_data_models.Interface(
|
plugged_interface = n_data_models.Interface(
|
||||||
compute_id=amphora.compute_id,
|
compute_id=amphora.compute_id,
|
||||||
network_id=port.network_id,
|
network_id=port.network_id,
|
||||||
@ -594,4 +592,4 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
|
|
||||||
except (neutron_client_exceptions.NotFound,
|
except (neutron_client_exceptions.NotFound,
|
||||||
neutron_client_exceptions.PortNotFoundClient):
|
neutron_client_exceptions.PortNotFoundClient):
|
||||||
pass
|
pass
|
||||||
|
Loading…
Reference in New Issue
Block a user