Drop log translations
Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Change-Id: I9d37ae28a3fecbe910e60dc7f22e229a7b65940c
This commit is contained in:
parent
72af9f1b34
commit
6228a06399
3
tox.ini
3
tox.ini
@ -97,7 +97,8 @@ commands = sphinx-build -W -b html doc/source doc/build/html
|
||||
# H904 Wrap long lines in parentheses instead of a backslash
|
||||
# TODO(dougwig) -- uncomment this to test for remaining linkages
|
||||
# N530 direct neutron imports not allowed
|
||||
ignore = E125,E126,E128,E129,E265,H305,H307,H404,H405,H904,N530
|
||||
# N531 translations hints
|
||||
ignore = E125,E126,E128,E129,E265,H305,H307,H404,H405,H904,N530,N531
|
||||
show-source = true
|
||||
builtins = _
|
||||
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,.ropeproject
|
||||
|
@ -27,16 +27,6 @@ _C = _translators.contextual_form
|
||||
# The plural translation function using the name "_P"
|
||||
_P = _translators.plural_form
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
||||
|
||||
|
||||
def get_available_languages():
|
||||
return oslo_i18n.get_available_languages(DOMAIN)
|
||||
|
@ -22,7 +22,6 @@ from oslo_log import log as logging
|
||||
import six
|
||||
from six.moves import http_client as httplib
|
||||
|
||||
from vmware_nsx._i18n import _LE, _LI, _LW
|
||||
from vmware_nsx import api_client
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -101,15 +100,15 @@ class ApiClientBase(object):
|
||||
api_providers are configured.
|
||||
'''
|
||||
if not self._api_providers:
|
||||
LOG.warning(_LW("[%d] no API providers currently available."), rid)
|
||||
LOG.warning("[%d] no API providers currently available.", rid)
|
||||
return None
|
||||
if self._conn_pool.empty():
|
||||
LOG.debug("[%d] Waiting to acquire API client connection.", rid)
|
||||
priority, conn = self._conn_pool.get()
|
||||
now = time.time()
|
||||
if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
|
||||
LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
|
||||
"seconds; reconnecting."),
|
||||
LOG.info("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
|
||||
"seconds; reconnecting.",
|
||||
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
|
||||
'sec': now - conn.last_used})
|
||||
conn = self._create_connection(*self._conn_params(conn))
|
||||
@ -149,8 +148,8 @@ class ApiClientBase(object):
|
||||
priority = http_conn.priority
|
||||
if bad_state:
|
||||
# Reconnect to provider.
|
||||
LOG.warning(_LW("[%(rid)d] Connection returned in bad state, "
|
||||
"reconnecting to %(conn)s"),
|
||||
LOG.warning("[%(rid)d] Connection returned in bad state, "
|
||||
"reconnecting to %(conn)s",
|
||||
{'rid': rid,
|
||||
'conn': api_client.ctrl_conn_to_str(http_conn)})
|
||||
http_conn = self._create_connection(*self._conn_params(http_conn))
|
||||
@ -181,7 +180,7 @@ class ApiClientBase(object):
|
||||
|
||||
data = self._get_provider_data(conn)
|
||||
if data is None:
|
||||
LOG.error(_LE("Login request for an invalid connection: '%s'"),
|
||||
LOG.error("Login request for an invalid connection: '%s'",
|
||||
api_client.ctrl_conn_to_str(conn))
|
||||
return
|
||||
provider_sem = data[0]
|
||||
|
@ -19,7 +19,6 @@
|
||||
from oslo_log import log as logging
|
||||
from six.moves import http_client as httplib
|
||||
|
||||
from vmware_nsx._i18n import _LE
|
||||
from vmware_nsx.api_client import base
|
||||
from vmware_nsx.api_client import eventlet_client
|
||||
from vmware_nsx.api_client import eventlet_request
|
||||
@ -101,7 +100,7 @@ class NsxApiClient(eventlet_client.EventletApiClient):
|
||||
|
||||
if response is None:
|
||||
# Timeout.
|
||||
LOG.error(_LE('Request timed out: %(method)s to %(url)s'),
|
||||
LOG.error('Request timed out: %(method)s to %(url)s',
|
||||
{'method': method, 'url': url})
|
||||
raise exception.RequestTimeout()
|
||||
|
||||
@ -112,15 +111,15 @@ class NsxApiClient(eventlet_client.EventletApiClient):
|
||||
# Fail-fast: Check for exception conditions and raise the
|
||||
# appropriate exceptions for known error codes.
|
||||
if status in exception.ERROR_MAPPINGS:
|
||||
LOG.error(_LE("Received error code: %s"), status)
|
||||
LOG.error(_LE("Server Error Message: %s"), response.body)
|
||||
LOG.error("Received error code: %s", status)
|
||||
LOG.error("Server Error Message: %s", response.body)
|
||||
exception.ERROR_MAPPINGS[status](response)
|
||||
|
||||
# Continue processing for non-error condition.
|
||||
if (status != httplib.OK and status != httplib.CREATED
|
||||
and status != httplib.NO_CONTENT):
|
||||
LOG.error(_LE("%(method)s to %(url)s, unexpected response code: "
|
||||
"%(status)d (content = '%(body)s')"),
|
||||
LOG.error("%(method)s to %(url)s, unexpected response code: "
|
||||
"%(status)d (content = '%(body)s')",
|
||||
{'method': method, 'url': url,
|
||||
'status': response.status, 'body': response.body})
|
||||
return None
|
||||
@ -136,6 +135,6 @@ class NsxApiClient(eventlet_client.EventletApiClient):
|
||||
# one of the server that responds.
|
||||
self.request('GET', '/ws.v1/control-cluster/node')
|
||||
if not self._version:
|
||||
LOG.error(_LE('Unable to determine NSX version. '
|
||||
'Plugin might not work as expected.'))
|
||||
LOG.error('Unable to determine NSX version. '
|
||||
'Plugin might not work as expected.')
|
||||
return self._version
|
||||
|
@ -21,7 +21,6 @@ import eventlet
|
||||
eventlet.monkey_patch()
|
||||
from oslo_log import log as logging
|
||||
|
||||
from vmware_nsx._i18n import _LE
|
||||
from vmware_nsx.api_client import base
|
||||
from vmware_nsx.api_client import eventlet_request
|
||||
|
||||
@ -143,7 +142,7 @@ class EventletApiClient(base.ApiClientBase):
|
||||
ret = g.join()
|
||||
if ret:
|
||||
if isinstance(ret, Exception):
|
||||
LOG.error(_LE('Login error "%s"'), ret)
|
||||
LOG.error('Login error "%s"', ret)
|
||||
raise ret
|
||||
|
||||
cookie = ret.getheader("Set-Cookie")
|
||||
|
@ -21,7 +21,7 @@ from oslo_serialization import jsonutils
|
||||
from six.moves import http_client as httplib
|
||||
from six.moves.urllib import parse
|
||||
|
||||
from vmware_nsx._i18n import _, _LI, _LW
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.api_client import request
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -119,7 +119,7 @@ class EventletApiRequest(request.ApiRequest):
|
||||
with eventlet.timeout.Timeout(self._request_timeout, False):
|
||||
return self._handle_request()
|
||||
|
||||
LOG.info(_LI('[%d] Request timeout.'), self._rid())
|
||||
LOG.info('[%d] Request timeout.', self._rid())
|
||||
self._request_error = Exception(_('Request timeout'))
|
||||
return None
|
||||
else:
|
||||
@ -153,8 +153,8 @@ class EventletApiRequest(request.ApiRequest):
|
||||
self._request_error = None
|
||||
response = req
|
||||
else:
|
||||
LOG.info(_LI('[%(rid)d] Error while handling request: '
|
||||
'%(req)s'),
|
||||
LOG.info('[%(rid)d] Error while handling request: '
|
||||
'%(req)s',
|
||||
{'rid': self._rid(), 'req': req})
|
||||
self._request_error = req
|
||||
response = None
|
||||
@ -210,7 +210,7 @@ class GetApiProvidersRequestEventlet(EventletApiRequest):
|
||||
ret.append(_provider_from_listen_addr(addr))
|
||||
return ret
|
||||
except Exception as e:
|
||||
LOG.warning(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
|
||||
LOG.warning("[%(rid)d] Failed to parse API provider: %(e)s",
|
||||
{'rid': self._rid(), 'e': e})
|
||||
# intentionally fall through
|
||||
return None
|
||||
|
@ -27,7 +27,7 @@ import six
|
||||
from six.moves import http_client as httplib
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from vmware_nsx._i18n import _, _LI, _LW
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx import api_client
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -122,8 +122,8 @@ class ApiRequest(object):
|
||||
conn.request(self._method, url, self._body, headers)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warning(_LW("[%(rid)d] Exception issuing request: "
|
||||
"%(e)s"),
|
||||
LOG.warning("[%(rid)d] Exception issuing request: "
|
||||
"%(e)s",
|
||||
{'rid': self._rid(), 'e': e})
|
||||
|
||||
response = conn.getresponse()
|
||||
@ -158,8 +158,8 @@ class ApiRequest(object):
|
||||
httplib.TEMPORARY_REDIRECT]:
|
||||
break
|
||||
elif redirects >= self._redirects:
|
||||
LOG.info(_LI("[%d] Maximum redirects exceeded, aborting "
|
||||
"request"), self._rid())
|
||||
LOG.info("[%d] Maximum redirects exceeded, aborting "
|
||||
"request", self._rid())
|
||||
break
|
||||
redirects += 1
|
||||
|
||||
@ -168,7 +168,7 @@ class ApiRequest(object):
|
||||
if url is None:
|
||||
response.status = httplib.INTERNAL_SERVER_ERROR
|
||||
break
|
||||
LOG.info(_LI("[%(rid)d] Redirecting request to: %(conn)s"),
|
||||
LOG.info("[%(rid)d] Redirecting request to: %(conn)s",
|
||||
{'rid': self._rid(),
|
||||
'conn': self._request_str(conn, url)})
|
||||
# yield here, just in case we are not out of the loop yet
|
||||
@ -181,8 +181,8 @@ class ApiRequest(object):
|
||||
# queue.
|
||||
if (response.status == httplib.INTERNAL_SERVER_ERROR and
|
||||
response.status > httplib.NOT_IMPLEMENTED):
|
||||
LOG.warning(_LW("[%(rid)d] Request '%(method)s %(url)s' "
|
||||
"received: %(status)s"),
|
||||
LOG.warning("[%(rid)d] Request '%(method)s %(url)s' "
|
||||
"received: %(status)s",
|
||||
{'rid': self._rid(), 'method': self._method,
|
||||
'url': self._url, 'status': response.status})
|
||||
raise Exception(_('Server error return: %s'), response.status)
|
||||
@ -197,8 +197,8 @@ class ApiRequest(object):
|
||||
msg = str(e)
|
||||
if response is None:
|
||||
elapsed_time = time.time() - issued_time
|
||||
LOG.warning(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
|
||||
"(%(elapsed)s seconds)"),
|
||||
LOG.warning("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
|
||||
"(%(elapsed)s seconds)",
|
||||
{'rid': self._rid(),
|
||||
'conn': self._request_str(conn, url),
|
||||
'msg': msg, 'elapsed': elapsed_time})
|
||||
@ -232,8 +232,8 @@ class ApiRequest(object):
|
||||
url = value
|
||||
break
|
||||
if not url:
|
||||
LOG.warning(_LW("[%d] Received redirect status without location "
|
||||
"header field"), self._rid())
|
||||
LOG.warning("[%d] Received redirect status without location "
|
||||
"header field", self._rid())
|
||||
return (conn, None)
|
||||
# Accept location with the following format:
|
||||
# 1. /path, redirect to same node
|
||||
@ -249,13 +249,13 @@ class ApiRequest(object):
|
||||
url = result.path
|
||||
return (conn, url) # case 1
|
||||
else:
|
||||
LOG.warning(_LW("[%(rid)d] Received invalid redirect "
|
||||
"location: '%(url)s'"),
|
||||
LOG.warning("[%(rid)d] Received invalid redirect "
|
||||
"location: '%(url)s'",
|
||||
{'rid': self._rid(), 'url': url})
|
||||
return (conn, None) # case 3
|
||||
elif result.scheme not in ["http", "https"] or not result.hostname:
|
||||
LOG.warning(_LW("[%(rid)d] Received malformed redirect "
|
||||
"location: %(url)s"),
|
||||
LOG.warning("[%(rid)d] Received malformed redirect "
|
||||
"location: %(url)s",
|
||||
{'rid': self._rid(), 'url': url})
|
||||
return (conn, None) # case 3
|
||||
# case 2, redirect location includes a scheme
|
||||
|
@ -17,9 +17,6 @@
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from vmware_nsx._i18n import _LW
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -30,8 +27,8 @@ def find_version(headers):
|
||||
if header_name == 'server':
|
||||
return Version(header_value.split('/')[1])
|
||||
except IndexError:
|
||||
LOG.warning(_LW("Unable to fetch NSX version from response "
|
||||
"headers :%s"), headers)
|
||||
LOG.warning("Unable to fetch NSX version from response "
|
||||
"headers :%s", headers)
|
||||
|
||||
|
||||
class Version(object):
|
||||
|
@ -18,7 +18,7 @@ from oslo_log import log as logging
|
||||
|
||||
from neutron.db import l3_hamode_db
|
||||
|
||||
from vmware_nsx._i18n import _, _LW
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.common import exceptions as nsx_exc
|
||||
from vmware_nsx.dvs import dvs_utils
|
||||
from vmware_nsx.extensions import routersize
|
||||
@ -843,9 +843,9 @@ def validate_nsxv_config_options():
|
||||
error = _("manager_uri, user, and password must be configured!")
|
||||
raise nsx_exc.NsxPluginException(err_msg=error)
|
||||
if cfg.CONF.nsxv.dvs_id is None:
|
||||
LOG.warning(_LW("dvs_id must be configured to support VLANs!"))
|
||||
LOG.warning("dvs_id must be configured to support VLANs!")
|
||||
if cfg.CONF.nsxv.vdn_scope_id is None:
|
||||
LOG.warning(_LW("vdn_scope_id must be configured to support VXLANs!"))
|
||||
LOG.warning("vdn_scope_id must be configured to support VXLANs!")
|
||||
if cfg.CONF.nsxv.use_dvs_features and not dvs_utils.dvs_is_enabled(
|
||||
dvs_id=cfg.CONF.nsxv.dvs_id):
|
||||
error = _("dvs host/vcenter credentials must be defined to use "
|
||||
|
@ -18,8 +18,6 @@ from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
import stevedore
|
||||
|
||||
from vmware_nsx._i18n import _LE, _LI
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@ -31,13 +29,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
||||
# the order in which the drivers are called.
|
||||
self.ordered_ext_drivers = []
|
||||
|
||||
LOG.info(_LI("Configured extension driver names: %s"),
|
||||
LOG.info("Configured extension driver names: %s",
|
||||
cfg.CONF.nsx_extension_drivers)
|
||||
super(ExtensionManager, self).__init__('vmware_nsx.extension_drivers',
|
||||
cfg.CONF.nsx_extension_drivers,
|
||||
invoke_on_load=True,
|
||||
name_order=True)
|
||||
LOG.info(_LI("Loaded extension driver names: %s"), self.names())
|
||||
LOG.info("Loaded extension driver names: %s", self.names())
|
||||
self._register_drivers()
|
||||
|
||||
def _register_drivers(self):
|
||||
@ -48,13 +46,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
||||
"""
|
||||
for ext in self:
|
||||
self.ordered_ext_drivers.append(ext)
|
||||
LOG.info(_LI("Registered extension drivers: %s"),
|
||||
LOG.info("Registered extension drivers: %s",
|
||||
[driver.name for driver in self.ordered_ext_drivers])
|
||||
|
||||
def initialize(self):
|
||||
# Initialize each driver in the list.
|
||||
for driver in self.ordered_ext_drivers:
|
||||
LOG.info(_LI("Initializing extension driver '%s'"), driver.name)
|
||||
LOG.info("Initializing extension driver '%s'", driver.name)
|
||||
driver.obj.initialize()
|
||||
|
||||
def extension_aliases(self):
|
||||
@ -63,7 +61,7 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
||||
alias = driver.obj.extension_alias
|
||||
if alias:
|
||||
exts.append(alias)
|
||||
LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"),
|
||||
LOG.info("Got %(alias)s extension from driver '%(drv)s'",
|
||||
{'alias': alias, 'drv': driver.name})
|
||||
return exts
|
||||
|
||||
@ -74,8 +72,8 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
||||
getattr(driver.obj, method_name)(plugin_context, data, result)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.info(_LI("Extension driver '%(name)s' failed in "
|
||||
"%(method)s"),
|
||||
LOG.info("Extension driver '%(name)s' failed in "
|
||||
"%(method)s",
|
||||
{'name': driver.name, 'method': method_name})
|
||||
|
||||
def process_create_network(self, plugin_context, data, result):
|
||||
@ -113,8 +111,8 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
||||
try:
|
||||
getattr(driver.obj, method_name)(session, base_model, result)
|
||||
except Exception:
|
||||
LOG.error(_LE("Extension driver '%(name)s' failed in "
|
||||
"%(method)s"),
|
||||
LOG.error("Extension driver '%(name)s' failed in "
|
||||
"%(method)s",
|
||||
{'name': driver.name, 'method': method_name})
|
||||
raise
|
||||
|
||||
|
@ -21,7 +21,6 @@ from neutron_lib import exceptions as n_exc
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
from vmware_nsx._i18n import _LW
|
||||
from vmware_nsx.api_client import client
|
||||
from vmware_nsx.api_client import exception as api_exc
|
||||
from vmware_nsx.common import utils as vmw_utils
|
||||
@ -68,8 +67,8 @@ def get_nsx_switch_ids(session, cluster, neutron_network_id):
|
||||
# more than once for each network in Neutron's lifetime
|
||||
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
|
||||
if not nsx_switches:
|
||||
LOG.warning(_LW("Unable to find NSX switches for Neutron network "
|
||||
"%s"), neutron_network_id)
|
||||
LOG.warning("Unable to find NSX switches for Neutron network "
|
||||
"%s", neutron_network_id)
|
||||
return
|
||||
nsx_switch_ids = []
|
||||
with session.begin(subtransactions=True):
|
||||
@ -115,7 +114,7 @@ def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
|
||||
# NOTE(salv-orlando): Not handling the case where more than one
|
||||
# port is found with the same neutron port tag
|
||||
if not nsx_ports:
|
||||
LOG.warning(_LW("Unable to find NSX port for Neutron port %s"),
|
||||
LOG.warning("Unable to find NSX port for Neutron port %s",
|
||||
neutron_port_id)
|
||||
# This method is supposed to return a tuple
|
||||
return None, None
|
||||
@ -155,12 +154,12 @@ def get_nsx_security_group_id(session, cluster, neutron_id):
|
||||
# NOTE(salv-orlando): Not handling the case where more than one
|
||||
# security profile is found with the same neutron port tag
|
||||
if not nsx_sec_profiles:
|
||||
LOG.warning(_LW("Unable to find NSX security profile for Neutron "
|
||||
"security group %s"), neutron_id)
|
||||
LOG.warning("Unable to find NSX security profile for Neutron "
|
||||
"security group %s", neutron_id)
|
||||
return
|
||||
elif len(nsx_sec_profiles) > 1:
|
||||
LOG.warning(_LW("Multiple NSX security profiles found for Neutron "
|
||||
"security group %s"), neutron_id)
|
||||
LOG.warning("Multiple NSX security profiles found for Neutron "
|
||||
"security group %s", neutron_id)
|
||||
nsx_sec_profile = nsx_sec_profiles[0]
|
||||
nsx_id = nsx_sec_profile['uuid']
|
||||
with session.begin(subtransactions=True):
|
||||
@ -192,7 +191,7 @@ def get_nsx_router_id(session, cluster, neutron_router_id):
|
||||
# NOTE(salv-orlando): Not handling the case where more than one
|
||||
# port is found with the same neutron port tag
|
||||
if not nsx_routers:
|
||||
LOG.warning(_LW("Unable to find NSX router for Neutron router %s"),
|
||||
LOG.warning("Unable to find NSX router for Neutron router %s",
|
||||
neutron_router_id)
|
||||
return
|
||||
nsx_router = nsx_routers[0]
|
||||
@ -249,12 +248,12 @@ def get_nsx_device_statuses(cluster, tenant_id):
|
||||
except api_exc.NsxApiException:
|
||||
# Do not make a NSX API exception fatal
|
||||
if tenant_id:
|
||||
LOG.warning(_LW("Unable to retrieve operational status for "
|
||||
"gateway devices belonging to tenant: %s"),
|
||||
LOG.warning("Unable to retrieve operational status for "
|
||||
"gateway devices belonging to tenant: %s",
|
||||
tenant_id)
|
||||
else:
|
||||
LOG.warning(_LW("Unable to retrieve operational status for "
|
||||
"gateway devices"))
|
||||
LOG.warning("Unable to retrieve operational status for "
|
||||
"gateway devices")
|
||||
|
||||
|
||||
def _convert_bindings_to_nsx_transport_zones(bindings):
|
||||
|
@ -30,7 +30,7 @@ from neutron.db.models import l3 as l3_db
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import l3
|
||||
|
||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.api_client import exception as api_exc
|
||||
from vmware_nsx.common import exceptions as nsx_exc
|
||||
from vmware_nsx.common import nsx_utils
|
||||
@ -273,8 +273,8 @@ class NsxSynchronizer(object):
|
||||
# TODO(salv-orlando): We should be catching
|
||||
# api_exc.ResourceNotFound here
|
||||
# The logical switch was not found
|
||||
LOG.warning(_LW("Logical switch for neutron network %s not "
|
||||
"found on NSX."), neutron_network_data['id'])
|
||||
LOG.warning("Logical switch for neutron network %s not "
|
||||
"found on NSX.", neutron_network_data['id'])
|
||||
lswitches = []
|
||||
else:
|
||||
for lswitch in lswitches:
|
||||
@ -360,8 +360,8 @@ class NsxSynchronizer(object):
|
||||
# NOTE(salv-orlando): We should be catching
|
||||
# api_exc.ResourceNotFound here
|
||||
# The logical router was not found
|
||||
LOG.warning(_LW("Logical router for neutron router %s not "
|
||||
"found on NSX."), neutron_router_data['id'])
|
||||
LOG.warning("Logical router for neutron router %s not "
|
||||
"found on NSX.", neutron_router_data['id'])
|
||||
if lrouter:
|
||||
# Update the cache
|
||||
self._nsx_cache.update_lrouter(lrouter)
|
||||
@ -410,8 +410,8 @@ class NsxSynchronizer(object):
|
||||
neutron_router_mappings[neutron_router_id] = (
|
||||
self._nsx_cache[lr_uuid])
|
||||
else:
|
||||
LOG.warning(_LW("Unable to find Neutron router id for "
|
||||
"NSX logical router: %s"), lr_uuid)
|
||||
LOG.warning("Unable to find Neutron router id for "
|
||||
"NSX logical router: %s", lr_uuid)
|
||||
# Fetch neutron routers from database
|
||||
filters = ({} if scan_missing else
|
||||
{'id': neutron_router_mappings.keys()})
|
||||
@ -452,8 +452,8 @@ class NsxSynchronizer(object):
|
||||
# api_exc.ResourceNotFound here instead
|
||||
# of PortNotFoundOnNetwork when the id exists but
|
||||
# the logical switch port was not found
|
||||
LOG.warning(_LW("Logical switch port for neutron port %s "
|
||||
"not found on NSX."), neutron_port_data['id'])
|
||||
LOG.warning("Logical switch port for neutron port %s "
|
||||
"not found on NSX.", neutron_port_data['id'])
|
||||
lswitchport = None
|
||||
else:
|
||||
# If lswitchport is not None, update the cache.
|
||||
@ -545,11 +545,11 @@ class NsxSynchronizer(object):
|
||||
# be emitted.
|
||||
num_requests = page_size // (MAX_PAGE_SIZE + 1) + 1
|
||||
if num_requests > 1:
|
||||
LOG.warning(_LW("Requested page size is %(cur_chunk_size)d. "
|
||||
"It might be necessary to do %(num_requests)d "
|
||||
"round-trips to NSX for fetching data. Please "
|
||||
"tune sync parameters to ensure chunk size "
|
||||
"is less than %(max_page_size)d"),
|
||||
LOG.warning("Requested page size is %(cur_chunk_size)d. "
|
||||
"It might be necessary to do %(num_requests)d "
|
||||
"round-trips to NSX for fetching data. Please "
|
||||
"tune sync parameters to ensure chunk size "
|
||||
"is less than %(max_page_size)d",
|
||||
{'cur_chunk_size': page_size,
|
||||
'num_requests': num_requests,
|
||||
'max_page_size': MAX_PAGE_SIZE})
|
||||
@ -578,8 +578,8 @@ class NsxSynchronizer(object):
|
||||
def _fetch_nsx_data_chunk(self, sp):
|
||||
base_chunk_size = sp.chunk_size
|
||||
chunk_size = base_chunk_size + sp.extra_chunk_size
|
||||
LOG.info(_LI("Fetching up to %s resources "
|
||||
"from NSX backend"), chunk_size)
|
||||
LOG.info("Fetching up to %s resources "
|
||||
"from NSX backend", chunk_size)
|
||||
fetched = ls_count = lr_count = lp_count = 0
|
||||
lswitches = lrouters = lswitchports = []
|
||||
if sp.ls_cursor or sp.ls_cursor == 'start':
|
||||
@ -618,7 +618,7 @@ class NsxSynchronizer(object):
|
||||
# Reset page cursor variables if necessary
|
||||
if sp.current_chunk == 0:
|
||||
sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
|
||||
LOG.info(_LI("Running state synchronization task. Chunk: %s"),
|
||||
LOG.info("Running state synchronization task. Chunk: %s",
|
||||
sp.current_chunk)
|
||||
# Fetch chunk_size data from NSX
|
||||
try:
|
||||
@ -628,9 +628,9 @@ class NsxSynchronizer(object):
|
||||
sleep_interval = self._sync_backoff
|
||||
# Cap max back off to 64 seconds
|
||||
self._sync_backoff = min(self._sync_backoff * 2, 64)
|
||||
LOG.exception(_LE("An error occurred while communicating with "
|
||||
"NSX backend. Will retry synchronization "
|
||||
"in %d seconds"), sleep_interval)
|
||||
LOG.exception("An error occurred while communicating with "
|
||||
"NSX backend. Will retry synchronization "
|
||||
"in %d seconds", sleep_interval)
|
||||
return sleep_interval
|
||||
LOG.debug("Time elapsed querying NSX: %s",
|
||||
timeutils.utcnow() - start)
|
||||
@ -669,8 +669,8 @@ class NsxSynchronizer(object):
|
||||
self._synchronize_lswitchports(ctx, lp_uuids,
|
||||
scan_missing=scan_missing)
|
||||
# Increase chunk counter
|
||||
LOG.info(_LI("Synchronization for chunk %(chunk_num)d of "
|
||||
"%(total_chunks)d performed"),
|
||||
LOG.info("Synchronization for chunk %(chunk_num)d of "
|
||||
"%(total_chunks)d performed",
|
||||
{'chunk_num': sp.current_chunk + 1,
|
||||
'total_chunks': num_chunks})
|
||||
sp.current_chunk = (sp.current_chunk + 1) % num_chunks
|
||||
|
@ -28,8 +28,6 @@ from neutron_lib import constants
|
||||
from oslo_context import context as common_context
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsx._i18n import _LE
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
MAX_DISPLAY_NAME_LEN = 40
|
||||
@ -162,8 +160,8 @@ def read_file(path):
|
||||
with open(path) as file:
|
||||
return file.read().strip()
|
||||
except IOError as e:
|
||||
LOG.error(_LE("Error while opening file "
|
||||
"%(path)s: %(err)s"), {'path': path, 'err': str(e)})
|
||||
LOG.error("Error while opening file "
|
||||
"%(path)s: %(err)s", {'path': path, 'err': str(e)})
|
||||
|
||||
|
||||
def get_name_and_uuid(name, uuid, tag=None, maxlen=80):
|
||||
|
@ -34,7 +34,6 @@ from neutron_lib import constants as n_constants
|
||||
from neutron_lib.db import model_base
|
||||
from neutron_lib.utils import helpers
|
||||
|
||||
from vmware_nsx._i18n import _LW
|
||||
from vmware_nsx.extensions import providersecuritygroup as provider_sg
|
||||
from vmware_nsx.extensions import securitygrouplogging as sg_logging
|
||||
from vmware_nsx.extensions import securitygrouppolicy as sg_policy
|
||||
@ -204,8 +203,8 @@ class ExtendedSecurityGroupPropertiesMixin(object):
|
||||
if self._is_provider_security_group(context, sg):
|
||||
if only_warn:
|
||||
LOG.warning(
|
||||
_LW("Ignored provider security group %(sg)s in "
|
||||
"security groups list for port %(id)s"),
|
||||
"Ignored provider security group %(sg)s in "
|
||||
"security groups list for port %(id)s",
|
||||
{'sg': sg, 'id': port['id']})
|
||||
else:
|
||||
raise provider_sg.SecurityGroupIsProvider(id=sg)
|
||||
|
@ -27,7 +27,7 @@ from sqlalchemy import func
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy.sql import expression as expr
|
||||
|
||||
from vmware_nsx._i18n import _, _LE, _LW
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.common import exceptions as nsx_exc
|
||||
from vmware_nsx.common import nsxv_constants
|
||||
from vmware_nsx.db import db as nsx_db
|
||||
@ -70,10 +70,10 @@ def warn_on_binding_status_error(f, *args, **kwargs):
|
||||
|
||||
for binding in bindings:
|
||||
if binding and binding['status'] == neutron_const.ERROR:
|
||||
LOG.warning(_LW("Found NSXV router binding entry with status "
|
||||
"%(status)s: router %(router)s, "
|
||||
"edge %(edge)s, lswitch %(lswitch)s, "
|
||||
"status description: %(desc)s "),
|
||||
LOG.warning("Found NSXV router binding entry with status "
|
||||
"%(status)s: router %(router)s, "
|
||||
"edge %(edge)s, lswitch %(lswitch)s, "
|
||||
"status description: %(desc)s ",
|
||||
{'status': binding['status'],
|
||||
'router': binding['router_id'],
|
||||
'edge': binding['edge_id'],
|
||||
@ -318,8 +318,8 @@ def create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id):
|
||||
return _create_edge_dhcp_static_binding(session, edge_id, mac_address,
|
||||
binding_id)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
LOG.warning(_LW('Conflicting DHCP binding entry for '
|
||||
'%(edge_id)s:%(mac_address)s. Overwriting!'),
|
||||
LOG.warning('Conflicting DHCP binding entry for '
|
||||
'%(edge_id)s:%(mac_address)s. Overwriting!',
|
||||
{'edge_id': edge_id, 'mac_address': mac_address})
|
||||
delete_edge_dhcp_static_binding(session, edge_id, mac_address)
|
||||
return _create_edge_dhcp_static_binding(session, edge_id, mac_address,
|
||||
@ -373,8 +373,8 @@ def create_nsxv_internal_network(session, network_purpose,
|
||||
session.add(network)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Duplicate internal network for purpose "
|
||||
"%(p)s and availabiltiy zone %(az)s"),
|
||||
LOG.exception("Duplicate internal network for purpose "
|
||||
"%(p)s and availabiltiy zone %(az)s",
|
||||
{'p': network_purpose,
|
||||
'az': availability_zone})
|
||||
|
||||
@ -412,7 +412,7 @@ def create_nsxv_internal_edge(session, ext_ip_address, purpose, router_id):
|
||||
session.add(internal_edge)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Duplicate internal Edge IP %s"),
|
||||
LOG.exception("Duplicate internal Edge IP %s",
|
||||
ext_ip_address)
|
||||
|
||||
|
||||
|
@ -23,7 +23,6 @@ from neutron.db import models_v2
|
||||
from oslo_log import log
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from vmware_nsx._i18n import _LI
|
||||
from vmware_nsx.db import nsx_models
|
||||
from vmware_nsx.extensions import qos_queue as qos
|
||||
|
||||
@ -254,8 +253,8 @@ class QoSDbMixin(qos.QueuePluginBase):
|
||||
if dscp:
|
||||
# must raise because a non-zero dscp was provided
|
||||
raise qos.QueueInvalidMarking()
|
||||
LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
|
||||
"marking"), dscp)
|
||||
LOG.info("DSCP value (%s) will be ignored with 'trusted' "
|
||||
"marking", dscp)
|
||||
max = qos_queue.get('max')
|
||||
min = qos_queue.get('min')
|
||||
# Max can be None
|
||||
|
@ -21,7 +21,7 @@ from oslo_db import exception as db_exc
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from vmware_nsx._i18n import _, _LE, _LW
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.api_client import exception as api_exc
|
||||
from vmware_nsx.common import exceptions as p_exc
|
||||
from vmware_nsx.common import nsx_utils
|
||||
@ -69,14 +69,14 @@ class LsnManager(object):
|
||||
return lsn_api.lsn_for_network_get(self.cluster, network_id)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
if raise_on_err:
|
||||
LOG.error(_LE('Unable to find Logical Service Node for '
|
||||
'network %s.'),
|
||||
LOG.error('Unable to find Logical Service Node for '
|
||||
'network %s.',
|
||||
network_id)
|
||||
raise p_exc.LsnNotFound(entity='network',
|
||||
entity_id=network_id)
|
||||
else:
|
||||
LOG.warning(_LW('Unable to find Logical Service Node for '
|
||||
'the requested network %s.'),
|
||||
LOG.warning('Unable to find Logical Service Node for '
|
||||
'the requested network %s.',
|
||||
network_id)
|
||||
|
||||
def lsn_create(self, context, network_id):
|
||||
@ -92,7 +92,7 @@ class LsnManager(object):
|
||||
try:
|
||||
lsn_api.lsn_delete(self.cluster, lsn_id)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
LOG.warning(_LW('Unable to delete Logical Service Node %s'),
|
||||
LOG.warning('Unable to delete Logical Service Node %s',
|
||||
lsn_id)
|
||||
|
||||
def lsn_delete_by_network(self, context, network_id):
|
||||
@ -110,17 +110,17 @@ class LsnManager(object):
|
||||
self.cluster, lsn_id, subnet_id)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
if raise_on_err:
|
||||
LOG.error(_LE('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and subnet '
|
||||
'%(subnet_id)s'),
|
||||
LOG.error('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and subnet '
|
||||
'%(subnet_id)s',
|
||||
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
|
||||
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
|
||||
entity='subnet',
|
||||
entity_id=subnet_id)
|
||||
else:
|
||||
LOG.warning(_LW('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and subnet '
|
||||
'%(subnet_id)s'),
|
||||
LOG.warning('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and subnet '
|
||||
'%(subnet_id)s',
|
||||
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
|
||||
return (lsn_id, None)
|
||||
else:
|
||||
@ -137,17 +137,17 @@ class LsnManager(object):
|
||||
self.cluster, lsn_id, mac)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
if raise_on_err:
|
||||
LOG.error(_LE('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and mac address '
|
||||
'%(mac)s'),
|
||||
LOG.error('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and mac address '
|
||||
'%(mac)s',
|
||||
{'lsn_id': lsn_id, 'mac': mac})
|
||||
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
|
||||
entity='MAC',
|
||||
entity_id=mac)
|
||||
else:
|
||||
LOG.warning(_LW('Unable to find Logical Service Node '
|
||||
'Port for LSN %(lsn_id)s and mac address '
|
||||
'%(mac)s'),
|
||||
LOG.warning('Unable to find Logical Service Node '
|
||||
'Port for LSN %(lsn_id)s and mac address '
|
||||
'%(mac)s',
|
||||
{'lsn_id': lsn_id, 'mac': mac})
|
||||
return (lsn_id, None)
|
||||
else:
|
||||
@ -170,7 +170,7 @@ class LsnManager(object):
|
||||
try:
|
||||
lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
LOG.warning(_LW('Unable to delete LSN Port %s'), lsn_port_id)
|
||||
LOG.warning('Unable to delete LSN Port %s', lsn_port_id)
|
||||
|
||||
def lsn_port_dispose(self, context, network_id, mac_address):
|
||||
"""Delete a LSN port given the network and the mac address."""
|
||||
@ -187,12 +187,12 @@ class LsnManager(object):
|
||||
self.cluster, network_id, lswitch_port_id)
|
||||
except (n_exc.PortNotFoundOnNetwork,
|
||||
api_exc.NsxApiException):
|
||||
LOG.warning(_LW("Metadata port not found while attempting "
|
||||
"to delete it from network %s"),
|
||||
LOG.warning("Metadata port not found while attempting "
|
||||
"to delete it from network %s",
|
||||
network_id)
|
||||
else:
|
||||
LOG.warning(_LW("Unable to find Logical Services Node "
|
||||
"Port with MAC %s"), mac_address)
|
||||
LOG.warning("Unable to find Logical Services Node "
|
||||
"Port with MAC %s", mac_address)
|
||||
|
||||
def lsn_port_dhcp_setup(
|
||||
self, context, network_id, port_id, port_data, subnet_config=None):
|
||||
@ -319,8 +319,8 @@ class LsnManager(object):
|
||||
if lsn_id and lsn_port_id:
|
||||
hdlr(self.cluster, lsn_id, lsn_port_id, data)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
LOG.error(_LE('Error while configuring LSN '
|
||||
'port %s'), lsn_port_id)
|
||||
LOG.error('Error while configuring LSN '
|
||||
'port %s', lsn_port_id)
|
||||
raise p_exc.PortConfigurationError(
|
||||
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
|
||||
|
||||
|
@ -20,7 +20,7 @@ from neutron_lib import constants as const
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from oslo_log import log as logging
|
||||
|
||||
from vmware_nsx._i18n import _, _LE
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.common import exceptions as p_exc
|
||||
from vmware_nsx.dhcp_meta import nsx
|
||||
from vmware_nsx.dhcp_meta import rpc
|
||||
@ -80,7 +80,7 @@ class DhcpMetadataBuilder(object):
|
||||
try:
|
||||
self.plugin.delete_port(context, port['id'])
|
||||
except n_exc.PortNotFound:
|
||||
LOG.error(_LE('Port %s is already gone'), port['id'])
|
||||
LOG.error('Port %s is already gone', port['id'])
|
||||
|
||||
def dhcp_allocate(self, context, network_id, subnet):
|
||||
"""Allocate dhcp resources for the subnet."""
|
||||
|
@ -28,7 +28,7 @@ from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.db import agents_db
|
||||
|
||||
from vmware_nsx._i18n import _, _LW
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.common import config
|
||||
from vmware_nsx.common import exceptions as nsx_exc
|
||||
from vmware_nsx.dhcp_meta import combined
|
||||
@ -121,7 +121,7 @@ class DhcpMetadataAccess(object):
|
||||
# This becomes ineffective, as all new networks creations
|
||||
# are handled by Logical Services Nodes in NSX
|
||||
cfg.CONF.set_override('network_auto_schedule', False)
|
||||
LOG.warning(_LW('network_auto_schedule has been disabled'))
|
||||
LOG.warning('network_auto_schedule has been disabled')
|
||||
notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
|
||||
lsn_manager)
|
||||
self.supported_extension_aliases.append(lsn.EXT_ALIAS)
|
||||
|
@ -25,7 +25,7 @@ from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import l3_db
|
||||
from neutron.extensions import external_net
|
||||
|
||||
from vmware_nsx._i18n import _, _LE, _LI
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.common import exceptions as p_exc
|
||||
from vmware_nsx.dhcp_meta import constants as d_const
|
||||
from vmware_nsx.nsxlib.mh import lsn as lsn_api
|
||||
@ -138,9 +138,9 @@ class DhcpAgentNotifyAPI(object):
|
||||
# down below as well as handle_port_metadata_access
|
||||
self.plugin.create_port(context, {'port': dhcp_port})
|
||||
except p_exc.PortConfigurationError as e:
|
||||
LOG.error(_LE("Error while creating subnet %(cidr)s for "
|
||||
"network %(network)s. Please, contact "
|
||||
"administrator"),
|
||||
LOG.error("Error while creating subnet %(cidr)s for "
|
||||
"network %(network)s. Please, contact "
|
||||
"administrator",
|
||||
{"cidr": subnet["cidr"],
|
||||
"network": network_id})
|
||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||
@ -207,12 +207,12 @@ def check_services_requirements(cluster):
|
||||
|
||||
|
||||
def handle_network_dhcp_access(plugin, context, network, action):
|
||||
LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
|
||||
LOG.info("Performing DHCP %(action)s for resource: %(resource)s",
|
||||
{"action": action, "resource": network})
|
||||
if action == 'create_network':
|
||||
network_id = network['id']
|
||||
if network.get(external_net.EXTERNAL):
|
||||
LOG.info(_LI("Network %s is external: no LSN to create"),
|
||||
LOG.info("Network %s is external: no LSN to create",
|
||||
network_id)
|
||||
return
|
||||
plugin.lsn_manager.lsn_create(context, network_id)
|
||||
@ -221,12 +221,12 @@ def handle_network_dhcp_access(plugin, context, network, action):
|
||||
# is just the network id
|
||||
network_id = network
|
||||
plugin.lsn_manager.lsn_delete_by_network(context, network_id)
|
||||
LOG.info(_LI("Logical Services Node for network "
|
||||
"%s configured successfully"), network_id)
|
||||
LOG.info("Logical Services Node for network "
|
||||
"%s configured successfully", network_id)
|
||||
|
||||
|
||||
def handle_port_dhcp_access(plugin, context, port, action):
|
||||
LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
|
||||
LOG.info("Performing DHCP %(action)s for resource: %(resource)s",
|
||||
{"action": action, "resource": port})
|
||||
if port["device_owner"] == const.DEVICE_OWNER_DHCP:
|
||||
network_id = port["network_id"]
|
||||
@ -243,8 +243,8 @@ def handle_port_dhcp_access(plugin, context, port, action):
|
||||
plugin.lsn_manager.lsn_port_dhcp_setup(
|
||||
context, network_id, port['id'], subnet_data, subnet)
|
||||
except p_exc.PortConfigurationError:
|
||||
LOG.error(_LE("Error while configuring DHCP for "
|
||||
"port %s"), port['id'])
|
||||
LOG.error("Error while configuring DHCP for "
|
||||
"port %s", port['id'])
|
||||
raise n_exc.NeutronException()
|
||||
elif action == "delete_port":
|
||||
plugin.lsn_manager.lsn_port_dispose(context, network_id,
|
||||
@ -254,8 +254,8 @@ def handle_port_dhcp_access(plugin, context, port, action):
|
||||
# do something only if there are IP's and dhcp is enabled
|
||||
subnet_id = port["fixed_ips"][0]['subnet_id']
|
||||
if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
|
||||
LOG.info(_LI("DHCP is disabled for subnet %s: nothing "
|
||||
"to do"), subnet_id)
|
||||
LOG.info("DHCP is disabled for subnet %s: nothing "
|
||||
"to do", subnet_id)
|
||||
return
|
||||
host_data = {
|
||||
"mac_address": port["mac_address"],
|
||||
@ -273,7 +273,7 @@ def handle_port_dhcp_access(plugin, context, port, action):
|
||||
if action == 'create_port':
|
||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||
plugin, context, port['id'])
|
||||
LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
|
||||
LOG.info("DHCP for port %s configured successfully", port['id'])
|
||||
|
||||
|
||||
def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
@ -281,7 +281,7 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
network_id = port["network_id"]
|
||||
network = plugin.get_network(context, network_id)
|
||||
if network[external_net.EXTERNAL]:
|
||||
LOG.info(_LI("Network %s is external: nothing to do"),
|
||||
LOG.info("Network %s is external: nothing to do",
|
||||
network_id)
|
||||
return
|
||||
subnet_id = port["fixed_ips"][0]['subnet_id']
|
||||
@ -290,7 +290,7 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
"tenant_id": port["tenant_id"],
|
||||
"ip_address": port["fixed_ips"][0]['ip_address']
|
||||
}
|
||||
LOG.info(_LI("Configuring metadata entry for port %s"), port)
|
||||
LOG.info("Configuring metadata entry for port %s", port)
|
||||
if not is_delete:
|
||||
handler = plugin.lsn_manager.lsn_port_meta_host_add
|
||||
else:
|
||||
@ -302,13 +302,13 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
if not is_delete:
|
||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||
plugin, context, port['id'])
|
||||
LOG.info(_LI("Metadata for port %s configured successfully"),
|
||||
LOG.info("Metadata for port %s configured successfully",
|
||||
port['id'])
|
||||
|
||||
|
||||
def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
||||
LOG.info(_LI("Handle metadata access via router: %(r)s and "
|
||||
"interface %(i)s"), {'r': router_id, 'i': interface})
|
||||
LOG.info("Handle metadata access via router: %(r)s and "
|
||||
"interface %(i)s", {'r': router_id, 'i': interface})
|
||||
if interface:
|
||||
try:
|
||||
plugin.get_port(context, interface['port_id'])
|
||||
@ -324,4 +324,4 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
||||
if is_enabled:
|
||||
l3_db.L3_NAT_db_mixin.remove_router_interface(
|
||||
plugin, context, router_id, interface)
|
||||
LOG.info(_LI("Metadata for router %s handled successfully"), router_id)
|
||||
LOG.info("Metadata for router %s handled successfully", router_id)
|
||||
|
@ -24,7 +24,6 @@ from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import models_v2
|
||||
|
||||
from vmware_nsx._i18n import _LE, _LI, _LW
|
||||
from vmware_nsx.api_client import exception as api_exc
|
||||
from vmware_nsx.common import config
|
||||
from vmware_nsx.common import exceptions as nsx_exc
|
||||
@ -55,7 +54,7 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
if not port.get('fixed_ips'):
|
||||
# If port does not have an IP, the associated subnet is in
|
||||
# deleting state.
|
||||
LOG.info(_LI('Port %s has no IP due to subnet in deleting state'),
|
||||
LOG.info('Port %s has no IP due to subnet in deleting state',
|
||||
port['id'])
|
||||
return
|
||||
fixed_ip = port['fixed_ips'][0]
|
||||
@ -66,8 +65,8 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
# route. This is done via the enable_isolated_metadata
|
||||
# option if desired.
|
||||
if not subnet.get('gateway_ip'):
|
||||
LOG.info(_LI('Subnet %s does not have a gateway, the '
|
||||
'metadata route will not be created'),
|
||||
LOG.info('Subnet %s does not have a gateway, the '
|
||||
'metadata route will not be created',
|
||||
subnet['id'])
|
||||
return
|
||||
metadata_routes = [r for r in subnet.routes
|
||||
@ -99,8 +98,8 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
||||
LOG.debug("Metadata access network is disabled")
|
||||
return
|
||||
if not cfg.CONF.allow_overlapping_ips:
|
||||
LOG.warning(_LW("Overlapping IPs must be enabled in order to setup "
|
||||
"the metadata access network"))
|
||||
LOG.warning("Overlapping IPs must be enabled in order to setup "
|
||||
"the metadata access network")
|
||||
return
|
||||
ctx_elevated = context.elevated()
|
||||
on_demand = getattr(plugin_cfg, 'metadata_on_demand', False)
|
||||
@ -138,8 +137,8 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
||||
except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
|
||||
api_exc.NsxApiException):
|
||||
# Any exception here should be regarded as non-fatal
|
||||
LOG.exception(_LE("An error occurred while operating on the "
|
||||
"metadata access network for router:'%s'"),
|
||||
LOG.exception("An error occurred while operating on the "
|
||||
"metadata access network for router:'%s'",
|
||||
router_id)
|
||||
|
||||
|
||||
|
@ -17,7 +17,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_vmware import vim_util
|
||||
|
||||
from vmware_nsx._i18n import _LE, _LI
|
||||
from vmware_nsx.common import exceptions as nsx_exc
|
||||
from vmware_nsx.dvs import dvs_utils
|
||||
|
||||
@ -154,10 +153,10 @@ class DvsManager(VCManagerBase):
|
||||
except Exception:
|
||||
# NOTE(garyk): handle more specific exceptions
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Failed to create port group for '
|
||||
'%(net_id)s with tag %(tag)s.'),
|
||||
LOG.exception('Failed to create port group for '
|
||||
'%(net_id)s with tag %(tag)s.',
|
||||
{'net_id': net_id, 'tag': vlan_tag})
|
||||
LOG.info(_LI("%(net_id)s with tag %(vlan_tag)s created on %(dvs)s."),
|
||||
LOG.info("%(net_id)s with tag %(vlan_tag)s created on %(dvs)s.",
|
||||
{'net_id': net_id,
|
||||
'vlan_tag': vlan_tag,
|
||||
'dvs': dvs_moref.value})
|
||||
@ -282,7 +281,7 @@ class DvsManager(VCManagerBase):
|
||||
self._session.vim,
|
||||
pg_moref, ['config'])
|
||||
if len(pg_spec) == 0 or len(pg_spec[0].propSet[0]) == 0:
|
||||
LOG.error(_LE('Failed to get object properties of %s'), pg_moref)
|
||||
LOG.error('Failed to get object properties of %s |