Merge "Correct i18n message for nicira plugin"
This commit is contained in:
@@ -96,7 +96,7 @@ class NVPApiHelper(client_eventlet.NvpApiClientEventlet):
|
||||
retries=self._retries, redirects=self._redirects)
|
||||
g.start()
|
||||
response = g.join()
|
||||
LOG.debug('NVPApiHelper.request() returns "%s"' % response)
|
||||
LOG.debug(_('NVPApiHelper.request() returns "%s"'), response)
|
||||
|
||||
# response is a modified HTTPResponse object or None.
|
||||
# response.read() will not work on response as the underlying library
|
||||
@@ -109,7 +109,7 @@ class NVPApiHelper(client_eventlet.NvpApiClientEventlet):
|
||||
|
||||
if response is None:
|
||||
# Timeout.
|
||||
LOG.error('Request timed out: %s to %s' % (method, url))
|
||||
LOG.error(_('Request timed out: %(method)s to %(url)s'), locals())
|
||||
raise RequestTimeout()
|
||||
|
||||
status = response.status
|
||||
@@ -119,15 +119,17 @@ class NVPApiHelper(client_eventlet.NvpApiClientEventlet):
|
||||
# Fail-fast: Check for exception conditions and raise the
|
||||
# appropriate exceptions for known error codes.
|
||||
if status in self.error_codes:
|
||||
LOG.error("Received error code: %s" % status)
|
||||
LOG.error("Server Error Message: %s" % response.body)
|
||||
LOG.error(_("Received error code: %s"), status)
|
||||
LOG.error(_("Server Error Message: %s"), response.body)
|
||||
self.error_codes[status](self)
|
||||
|
||||
# Continue processing for non-error condition.
|
||||
if (status != httplib.OK and status != httplib.CREATED
|
||||
and status != httplib.NO_CONTENT):
|
||||
LOG.error("%s to %s, unexpected response code: %d (content = '%s')"
|
||||
% (method, url, response.status, response.body))
|
||||
LOG.error(_("%(method)s to %(url)s, unexpected response code: "
|
||||
"%(status)d (content = '%(body)s')"),
|
||||
{'method': method, 'url': url,
|
||||
'status': response.status, 'body': response.body})
|
||||
return None
|
||||
|
||||
return response.body
|
||||
|
||||
@@ -94,7 +94,7 @@ def parse_config():
|
||||
nvp_conf[cluster_name].nova_zone_id,
|
||||
'nvp_controller_connection':
|
||||
nvp_conf[cluster_name].nvp_controller_connection, })
|
||||
LOG.debug("cluster options:%s", clusters_options)
|
||||
LOG.debug(_("Cluster options: %s"), clusters_options)
|
||||
return db_options, nvp_options, clusters_options
|
||||
|
||||
|
||||
@@ -251,10 +251,10 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
c_opts['nova_zone_id']])
|
||||
cluster.add_controller(*args)
|
||||
except Exception:
|
||||
LOG.exception("Invalid connection parameters for "
|
||||
"controller %s in cluster %s",
|
||||
controller_connection,
|
||||
c_opts['name'])
|
||||
LOG.exception(_("Invalid connection parameters for "
|
||||
"controller %(conn)s in cluster %(name)s"),
|
||||
{'conn': controller_connection,
|
||||
'name': c_opts['name']})
|
||||
raise
|
||||
|
||||
api_providers = [(x['ip'], x['port'], True)
|
||||
@@ -311,8 +311,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
networks = []
|
||||
for c in self.clusters:
|
||||
networks.extend(nvplib.get_all_networks(c, tenant_id, networks))
|
||||
LOG.debug("get_all_networks() completed for tenant %s: %s" % (
|
||||
tenant_id, networks))
|
||||
LOG.debug(_("get_all_networks() completed for tenant "
|
||||
"%(tenant_id)s: %(networks)s"), locals())
|
||||
return networks
|
||||
|
||||
def create_network(self, context, network):
|
||||
@@ -335,9 +335,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
"""
|
||||
# FIXME(arosen) implement admin_state_up = False in NVP
|
||||
if network['network']['admin_state_up'] is False:
|
||||
LOG.warning("Network with admin_state_up=False are not yet "
|
||||
"supported by this plugin. Ignoring setting for "
|
||||
"network %s",
|
||||
LOG.warning(_("Network with admin_state_up=False are not yet "
|
||||
"supported by this plugin. Ignoring setting for "
|
||||
"network %s"),
|
||||
network['network'].get('name', '<unknown>'))
|
||||
|
||||
tenant_id = self._get_tenant_id_for_create(context, network)
|
||||
@@ -367,7 +367,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
for (cluster, switches) in pairs:
|
||||
nvplib.delete_networks(cluster, id, switches)
|
||||
|
||||
LOG.debug("delete_network() completed for tenant: %s" %
|
||||
LOG.debug(_("delete_network() completed for tenant: %s"),
|
||||
context.tenant_id)
|
||||
|
||||
def _get_lswitch_cluster_pairs(self, netw_id, tenant_id):
|
||||
@@ -384,7 +384,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
pairs.append((c, lswitches))
|
||||
if len(pairs) == 0:
|
||||
raise exception.NetworkNotFound(net_id=netw_id)
|
||||
LOG.debug("Returning pairs for network: %s" % (pairs))
|
||||
LOG.debug(_("Returning pairs for network: %s"), pairs)
|
||||
return pairs
|
||||
|
||||
def get_network(self, context, id, fields=None):
|
||||
@@ -440,7 +440,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
lswitch_results[0]['display_name'])
|
||||
break
|
||||
except Exception:
|
||||
LOG.error("Unable to get switches: %s" % traceback.format_exc())
|
||||
LOG.error(_("Unable to get switches: %s"), traceback.format_exc())
|
||||
raise exception.QuantumException()
|
||||
|
||||
if 'lswitch-display-name' not in result:
|
||||
@@ -456,8 +456,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
'shared': network['shared'],
|
||||
'subnets': quantum_db.get('subnets', [])}
|
||||
|
||||
LOG.debug("get_network() completed for tenant %s: %s" % (
|
||||
context.tenant_id, d))
|
||||
LOG.debug(_("get_network() completed for tenant %(tenant_id)s: %(d)s"),
|
||||
{'tenant_id': context.tenant_id, 'd': d})
|
||||
return d
|
||||
|
||||
def get_networks(self, context, filters=None, fields=None):
|
||||
@@ -508,7 +508,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
|
||||
nvp_lswitches.extend(res)
|
||||
except Exception:
|
||||
LOG.error("Unable to get switches: %s" % traceback.format_exc())
|
||||
LOG.error(_("Unable to get switches: %s"), traceback.format_exc())
|
||||
raise exception.QuantumException()
|
||||
|
||||
# TODO (Aaron) This can be optimized
|
||||
@@ -535,15 +535,17 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
break
|
||||
|
||||
if not Found:
|
||||
raise Exception("Quantum and NVP Databases are out of Sync!")
|
||||
raise Exception(_("Quantum and NVP Databases are out of "
|
||||
"Sync!"))
|
||||
# do not make the case in which switches are found in NVP
|
||||
# but not in Quantum catastrophic.
|
||||
if len(nvp_lswitches):
|
||||
LOG.warning("Found %s logical switches not bound "
|
||||
LOG.warning(_("Found %s logical switches not bound "
|
||||
"to Quantum networks. Quantum and NVP are "
|
||||
"potentially out of sync", len(nvp_lswitches))
|
||||
"potentially out of sync"), len(nvp_lswitches))
|
||||
|
||||
LOG.debug("get_networks() completed for tenant %s" % context.tenant_id)
|
||||
LOG.debug(_("get_networks() completed for tenant %s"),
|
||||
context.tenant_id)
|
||||
|
||||
if fields:
|
||||
ret_fields = []
|
||||
@@ -594,7 +596,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
for switch in switches:
|
||||
result = nvplib.update_network(cluster, switch, **params)
|
||||
|
||||
LOG.debug("update_network() completed for tenant: %s" %
|
||||
LOG.debug(_("update_network() completed for tenant: %s"),
|
||||
context.tenant_id)
|
||||
return super(NvpPluginV2, self).update_network(context, id, network)
|
||||
|
||||
@@ -661,7 +663,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
nvp_lports[tag["tag"]] = port
|
||||
|
||||
except Exception:
|
||||
LOG.error("Unable to get ports: %s" % traceback.format_exc())
|
||||
LOG.error(_("Unable to get ports: %s"), traceback.format_exc())
|
||||
raise exception.QuantumException()
|
||||
|
||||
lports = []
|
||||
@@ -684,13 +686,14 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
del nvp_lports[quantum_lport["id"]]
|
||||
lports.append(quantum_lport)
|
||||
except KeyError:
|
||||
raise Exception("Quantum and NVP Databases are out of Sync!")
|
||||
raise Exception(_("Quantum and NVP Databases are out of "
|
||||
"Sync!"))
|
||||
# do not make the case in which ports are found in NVP
|
||||
# but not in Quantum catastrophic.
|
||||
if len(nvp_lports):
|
||||
LOG.warning("Found %s logical ports not bound "
|
||||
"to Quantum ports. Quantum and NVP are "
|
||||
"potentially out of sync", len(nvp_lports))
|
||||
LOG.warning(_("Found %s logical ports not bound "
|
||||
"to Quantum ports. Quantum and NVP are "
|
||||
"potentially out of sync"), len(nvp_lports))
|
||||
|
||||
if fields:
|
||||
ret_fields = []
|
||||
@@ -761,8 +764,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
d = {"port-id": port["port"]["id"],
|
||||
"port-op-status": port["port"]["status"]}
|
||||
|
||||
LOG.debug("create_port() completed for tenant %s: %s" %
|
||||
(tenant_id, d))
|
||||
LOG.debug(_("create_port() completed for tenant %(tenant_id)s: %(d)s"),
|
||||
locals())
|
||||
|
||||
# update port with admin_state_up True
|
||||
port_update = {"port": {"admin_state_up": True}}
|
||||
@@ -802,7 +805,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
nvplib.get_port_by_quantum_tag(self.clusters,
|
||||
quantum_db["network_id"], id))
|
||||
|
||||
LOG.debug("Update port request: %s" % (params))
|
||||
LOG.debug(_("Update port request: %s"), params)
|
||||
|
||||
params["cluster"] = cluster
|
||||
params["port"] = port["port"]
|
||||
@@ -810,7 +813,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
params["port"]["tenant_id"] = quantum_db["tenant_id"]
|
||||
result = nvplib.update_port(quantum_db["network_id"],
|
||||
port_nvp["uuid"], **params)
|
||||
LOG.debug("update_port() completed for tenant: %s" % context.tenant_id)
|
||||
LOG.debug(_("update_port() completed for tenant: %s"),
|
||||
context.tenant_id)
|
||||
|
||||
return super(NvpPluginV2, self).update_port(context, id, port)
|
||||
|
||||
@@ -836,7 +840,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
# the lswitch.
|
||||
nvplib.delete_port(cluster, port)
|
||||
|
||||
LOG.debug("delete_port() completed for tenant: %s" % context.tenant_id)
|
||||
LOG.debug(_("delete_port() completed for tenant: %s"),
|
||||
context.tenant_id)
|
||||
return super(NvpPluginV2, self).delete_port(context, id)
|
||||
|
||||
def get_port(self, context, id, fields=None):
|
||||
@@ -868,8 +873,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||
else:
|
||||
quantum_db["status"] = constants.PORT_STATUS_DOWN
|
||||
|
||||
LOG.debug("Port details for tenant %s: %s" %
|
||||
(context.tenant_id, quantum_db))
|
||||
LOG.debug(_("Port details for tenant %(tenant_id)s: %(quantum_db)s"),
|
||||
{'tenant_id': context.tenant_id, 'quantum_db': quantum_db})
|
||||
return quantum_db
|
||||
|
||||
def get_plugin_version(self):
|
||||
|
||||
@@ -110,7 +110,7 @@ class NvpApiClientEventlet(object):
|
||||
return (http_conn.host, http_conn.port, is_ssl)
|
||||
|
||||
def update_providers(self, api_providers):
|
||||
raise Exception('update_providers() not implemented.')
|
||||
raise Exception(_('update_providers() not implemented.'))
|
||||
|
||||
@property
|
||||
def user(self):
|
||||
@@ -156,19 +156,19 @@ class NvpApiClientEventlet(object):
|
||||
api_providers are configured.
|
||||
'''
|
||||
if not self._api_providers:
|
||||
lg.warn("[%d] no API providers currently available." % rid)
|
||||
lg.warn(_("[%d] no API providers currently available."), rid)
|
||||
return None
|
||||
|
||||
# The sleep time is to give controllers time to become consistent after
|
||||
# there has been a change in the controller used as the api_provider.
|
||||
now = time.time()
|
||||
if now < getattr(self, '_issue_conn_barrier', now):
|
||||
lg.warn("[%d] Waiting for failover timer to expire." % rid)
|
||||
lg.warn(_("[%d] Waiting for failover timer to expire."), rid)
|
||||
time.sleep(self._issue_conn_barrier - now)
|
||||
|
||||
# Print out a warning if all connections are in use.
|
||||
if self._conn_pool[self._active_conn_pool_idx].empty():
|
||||
lg.debug("[%d] Waiting to acquire client connection." % rid)
|
||||
lg.debug(_("[%d] Waiting to acquire client connection."), rid)
|
||||
|
||||
# Try to acquire a connection (block in get() until connection
|
||||
# available or timeout occurs).
|
||||
@@ -178,16 +178,19 @@ class NvpApiClientEventlet(object):
|
||||
if active_conn_pool_idx != self._active_conn_pool_idx:
|
||||
# active_conn_pool became inactive while we were waiting.
|
||||
# Put connection back on old pool and try again.
|
||||
lg.warn("[%d] Active pool expired while waiting for connection: %s"
|
||||
% (rid, _conn_str(conn)))
|
||||
lg.warn(_("[%(rid)d] Active pool expired while waiting for "
|
||||
"connection: %(conn)s"),
|
||||
{'rid': rid, 'conn': _conn_str(conn)})
|
||||
self._conn_pool[active_conn_pool_idx].put(conn)
|
||||
return self.acquire_connection(rid=rid)
|
||||
|
||||
# Check if the connection has been idle too long.
|
||||
now = time.time()
|
||||
if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT:
|
||||
lg.info("[%d] Connection %s idle for %0.2f seconds; reconnecting."
|
||||
% (rid, _conn_str(conn), now - conn.last_used))
|
||||
lg.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
|
||||
"seconds; reconnecting."),
|
||||
{'rid': rid, 'conn': _conn_str(conn),
|
||||
'sec': now - conn.last_used})
|
||||
conn = self._create_connection(*self._conn_params(conn))
|
||||
|
||||
# Stash conn pool so conn knows where to go when it releases.
|
||||
@@ -195,8 +198,9 @@ class NvpApiClientEventlet(object):
|
||||
|
||||
conn.last_used = now
|
||||
qsize = self._conn_pool[self._active_conn_pool_idx].qsize()
|
||||
lg.debug("[%d] Acquired connection %s. %d connection(s) available."
|
||||
% (rid, _conn_str(conn), qsize))
|
||||
lg.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
|
||||
"connection(s) available."),
|
||||
{'rid': rid, 'conn': _conn_str(conn), 'qsize': qsize})
|
||||
return conn
|
||||
|
||||
def release_connection(self, http_conn, bad_state=False, rid=-1):
|
||||
@@ -209,8 +213,9 @@ class NvpApiClientEventlet(object):
|
||||
:param rid: request id passed in from request eventlet.
|
||||
'''
|
||||
if self._conn_params(http_conn) not in self._api_providers:
|
||||
lg.warn("[%d] Released connection '%s' is not an API provider "
|
||||
"for the cluster" % (rid, _conn_str(http_conn)))
|
||||
lg.warn(_("[%(rid)d] Released connection '%(conn)s' is not an "
|
||||
"API provider for the cluster"),
|
||||
{'rid': rid, 'conn': _conn_str(http_conn)})
|
||||
return
|
||||
|
||||
# Retrieve "home" connection pool.
|
||||
@@ -218,8 +223,9 @@ class NvpApiClientEventlet(object):
|
||||
conn_pool = self._conn_pool[conn_pool_idx]
|
||||
if bad_state:
|
||||
# Reconnect to provider.
|
||||
lg.warn("[%d] Connection returned in bad state, reconnecting to %s"
|
||||
% (rid, _conn_str(http_conn)))
|
||||
lg.warn(_("[%(rid)d] Connection returned in bad state, "
|
||||
"reconnecting to %(conn)s"),
|
||||
{'rid': rid, 'conn': _conn_str(http_conn)})
|
||||
http_conn = self._create_connection(*self._conn_params(http_conn))
|
||||
http_conn.idx = conn_pool_idx
|
||||
|
||||
@@ -227,16 +233,20 @@ class NvpApiClientEventlet(object):
|
||||
# This pool is no longer in a good state. Switch to next pool.
|
||||
self._active_conn_pool_idx += 1
|
||||
self._active_conn_pool_idx %= len(self._conn_pool)
|
||||
lg.warn("[%d] Switched active_conn_pool from %d to %d."
|
||||
% (rid, http_conn.idx, self._active_conn_pool_idx))
|
||||
lg.warn(_("[%(rid)d] Switched active_conn_pool from "
|
||||
"%(idx)d to %(pool_idx)d."),
|
||||
{'rid': rid, 'idx': http_conn.idx,
|
||||
'pool_idx': self._active_conn_pool_idx})
|
||||
|
||||
# No connections to the new provider allowed until after this
|
||||
# timer has expired (allow time for synchronization).
|
||||
self._issue_conn_barrier = time.time() + self._failover_time
|
||||
|
||||
conn_pool.put(http_conn)
|
||||
lg.debug("[%d] Released connection %s. %d connection(s) available."
|
||||
% (rid, _conn_str(http_conn), conn_pool.qsize()))
|
||||
lg.debug(_("[%(rid)d] Released connection %(conn)s. "
|
||||
"%(qsize)d connection(s) available."),
|
||||
{'rid': rid, 'conn': _conn_str(http_conn),
|
||||
'qsize': conn_pool.qsize()})
|
||||
|
||||
@property
|
||||
def need_login(self):
|
||||
@@ -253,7 +263,7 @@ class NvpApiClientEventlet(object):
|
||||
self.login()
|
||||
self._doing_login_sem.release()
|
||||
else:
|
||||
lg.debug("Waiting for auth to complete")
|
||||
lg.debug(_("Waiting for auth to complete"))
|
||||
self._doing_login_sem.acquire()
|
||||
self._doing_login_sem.release()
|
||||
return self._cookie
|
||||
@@ -267,13 +277,13 @@ class NvpApiClientEventlet(object):
|
||||
|
||||
if ret:
|
||||
if isinstance(ret, Exception):
|
||||
lg.error('NvpApiClient: login error "%s"' % ret)
|
||||
lg.error(_('NvpApiClient: login error "%s"'), ret)
|
||||
raise ret
|
||||
|
||||
self._cookie = None
|
||||
cookie = ret.getheader("Set-Cookie")
|
||||
if cookie:
|
||||
lg.debug("Saving new authentication cookie '%s'" % cookie)
|
||||
lg.debug(_("Saving new authentication cookie '%s'"), cookie)
|
||||
self._cookie = cookie
|
||||
self._need_login = False
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ def _conn_str(conn):
|
||||
elif isinstance(conn, mock.Mock):
|
||||
proto = "http://"
|
||||
else:
|
||||
raise TypeError('_conn_str() invalid connection type: %s' % type(conn))
|
||||
raise TypeError(_('_conn_str() invalid connection type: %s') %
|
||||
type(conn))
|
||||
|
||||
return "%s%s:%s" % (proto, conn.host, conn.port)
|
||||
|
||||
@@ -139,7 +139,7 @@ class NvpApiRequestEventlet:
|
||||
'''Wait for instance green thread to complete.'''
|
||||
if self._green_thread is not None:
|
||||
return self._green_thread.wait()
|
||||
return Exception('Joining an invalid green thread')
|
||||
return Exception(_('Joining an invalid green thread'))
|
||||
|
||||
def start(self):
|
||||
'''Start request processing.'''
|
||||
@@ -164,8 +164,8 @@ class NvpApiRequestEventlet:
|
||||
with timeout.Timeout(self._request_timeout, False):
|
||||
return self._handle_request()
|
||||
|
||||
lg.info('[%d] Request timeout.' % self._rid())
|
||||
self._request_error = Exception('Request timeout')
|
||||
lg.info(_('[%d] Request timeout.'), self._rid())
|
||||
self._request_error = Exception(_('Request timeout'))
|
||||
return None
|
||||
else:
|
||||
return self._handle_request()
|
||||
@@ -178,7 +178,7 @@ class NvpApiRequestEventlet:
|
||||
'''Issue a request to a provider.'''
|
||||
conn = self._api_client.acquire_connection(rid=self._rid())
|
||||
if conn is None:
|
||||
error = Exception("No API connections available")
|
||||
error = Exception(_("No API connections available"))
|
||||
self._request_error = error
|
||||
return error
|
||||
|
||||
@@ -187,9 +187,9 @@ class NvpApiRequestEventlet:
|
||||
acquired_conn = conn
|
||||
|
||||
url = self._url
|
||||
lg.debug("[%d] Issuing - request '%s'" %
|
||||
(self._rid(),
|
||||
self._request_str(conn, url)))
|
||||
lg.debug(_("[%(rid)d] Issuing - request '%(req)s'"),
|
||||
{'rid': self._rid(),
|
||||
'req': self._request_str(conn, url)})
|
||||
issued_time = time.time()
|
||||
is_conn_error = False
|
||||
try:
|
||||
@@ -208,26 +208,31 @@ class NvpApiRequestEventlet:
|
||||
gen = self._api_client.nvp_config_gen
|
||||
if gen:
|
||||
headers["X-Nvp-Wait-For-Config-Generation"] = gen
|
||||
lg.debug("Setting %s request header: %s" %
|
||||
('X-Nvp-Wait-For-Config-Generation', gen))
|
||||
lg.debug(_("Setting %(header)s request header: %(gen)s"),
|
||||
{'header': 'X-Nvp-Wait-For-Config-Generation',
|
||||
'gen': gen})
|
||||
try:
|
||||
conn.request(self._method, url, self._body, headers)
|
||||
except Exception as e:
|
||||
lg.warn('[%d] Exception issuing request: %s' %
|
||||
(self._rid(), e))
|
||||
lg.warn(_('[%(rid)d] Exception issuing request: %(e)s'),
|
||||
{'rid': self._rid(), 'e': e})
|
||||
raise e
|
||||
|
||||
response = conn.getresponse()
|
||||
response.body = response.read()
|
||||
response.headers = response.getheaders()
|
||||
lg.debug("[%d] Completed request '%s': %s (%0.2f seconds)"
|
||||
% (self._rid(), self._request_str(conn, url),
|
||||
response.status, time.time() - issued_time))
|
||||
lg.debug(_("[%(rid)d] Completed request '%(req)s': %(status)s "
|
||||
"(%(time)0.2f seconds)"),
|
||||
{'rid': self._rid(),
|
||||
'req': self._request_str(conn, url),
|
||||
'status': response.status,
|
||||
'time': time.time() - issued_time})
|
||||
|
||||
new_gen = response.getheader('X-Nvp-Config-Generation', None)
|
||||
if new_gen:
|
||||
lg.debug("Reading %s response header: %s" %
|
||||
('X-Nvp-config-Generation', new_gen))
|
||||
lg.debug(_("Reading %(header)s response header: %(gen)s"),
|
||||
{'header': 'X-Nvp-config-Generation',
|
||||
'gen': new_gen})
|
||||
if (self._api_client.nvp_config_gen is None or
|
||||
self._api_client.nvp_config_gen < int(new_gen)):
|
||||
self._api_client.nvp_config_gen = int(new_gen)
|
||||
@@ -236,8 +241,8 @@ class NvpApiRequestEventlet:
|
||||
httplib.TEMPORARY_REDIRECT]:
|
||||
break
|
||||
elif redirects >= self._redirects:
|
||||
lg.info("[%d] Maximum redirects exceeded, aborting request"
|
||||
% self._rid())
|
||||
lg.info(_("[%d] Maximum redirects exceeded, aborting "
|
||||
"request"), self._rid())
|
||||
break
|
||||
redirects += 1
|
||||
|
||||
@@ -247,8 +252,9 @@ class NvpApiRequestEventlet:
|
||||
if url is None:
|
||||
response.status = httplib.INTERNAL_SERVER_ERROR
|
||||
break
|
||||
lg.info("[%d] Redirecting request to: %s" %
|
||||
(self._rid(), self._request_str(conn, url)))
|
||||
lg.info(_("[%(rid)d] Redirecting request to: %(req)s"),
|
||||
{'rid': self._rid(),
|
||||
'req': self._request_str(conn, url)})
|
||||
|
||||
# FIX for #9415. If we receive any of these responses, then
|
||||
# our server did not process our request and may be in an
|
||||
@@ -257,20 +263,24 @@ class NvpApiRequestEventlet:
|
||||
# which puts the conn on the back of the client's priority
|
||||
# queue.
|
||||
if response.status >= 500:
|
||||
lg.warn("[%d] Request '%s %s' received: %s"
|
||||
% (self._rid(), self._method, self._url,
|
||||
response.status))
|
||||
raise Exception('Server error return: %s' %
|
||||
lg.warn(_("[%(rid)d] Request '%(method)s %(url)s' "
|
||||
"received: %(status)s"),
|
||||
{'rid': self._rid(), 'method': self._method,
|
||||
'url': self._url,
|
||||
'status': response.status})
|
||||
raise Exception(_('Server error return: %s') %
|
||||
response.status)
|
||||
return response
|
||||
except Exception as e:
|
||||
if isinstance(e, httplib.BadStatusLine):
|
||||
msg = "Invalid server response"
|
||||
msg = _("Invalid server response")
|
||||
else:
|
||||
msg = unicode(e)
|
||||
lg.warn("[%d] Failed request '%s': %s (%0.2f seconds)"
|
||||
% (self._rid(), self._request_str(conn, url), msg,
|
||||
time.time() - issued_time))
|
||||
lg.warn(_("[%(rid)d] Failed request '%(req)s': %(msg)s "
|
||||
"(%(time)0.2f seconds)"),
|
||||
{'rid': self._rid(), 'req': self._request_str(conn, url),
|
||||
'msg': msg,
|
||||
'time': time.time() - issued_time})
|
||||
self._request_error = e
|
||||
is_conn_error = True
|
||||
return e
|
||||
@@ -288,8 +298,8 @@ class NvpApiRequestEventlet:
|
||||
url = value
|
||||
break
|
||||
if not url:
|
||||
lg.warn("[%d] Received redirect status without location header"
|
||||
" field" % self._rid())
|
||||
lg.warn(_("[%d] Received redirect status without location header "
|
||||
"field"), self._rid())
|
||||
return (conn, None)
|
||||
# Accept location with the following format:
|
||||
# 1. /path, redirect to same node
|
||||
@@ -305,12 +315,14 @@ class NvpApiRequestEventlet:
|
||||
url = result.path
|
||||
return (conn, url) # case 1
|
||||
else:
|
||||
lg.warn("[%d] Received invalid redirect location: %s" %
|
||||
(self._rid(), url))
|
||||
lg.warn(_("[%(rid)d] Received invalid redirect location: "
|
||||
"%(url)s"),
|
||||
{'rid': self._rid(), 'url': url})
|
||||
return (conn, None) # case 3
|
||||
elif result.scheme not in ["http", "https"] or not result.hostname:
|
||||
lg.warn("[%d] Received malformed redirect location: %s" %
|
||||
(self._rid(), url))
|
||||
lg.warn(_("[%(rid)d] Received malformed redirect location: "
|
||||
"%(url)s"),
|
||||
{'rid': self._rid(), 'url': url})
|
||||
return (conn, None) # case 3
|
||||
# case 2, redirect location includes a scheme
|
||||
# so setup a new connection and authenticate
|
||||
@@ -354,13 +366,15 @@ class NvpApiRequestEventlet:
|
||||
continue
|
||||
# else fall through to return the error code
|
||||
|
||||
lg.debug("[%d] Completed request '%s %s': %s"
|
||||
% (self._rid(), self._method, self._url, req.status))
|
||||
lg.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'"
|
||||
": %(status)s"),
|
||||
{'rid': self._rid(), 'method': self._method,
|
||||
'url': self._url, 'status': req.status})
|
||||
self._request_error = None
|
||||
response = req
|
||||
else:
|
||||
lg.info('[%d] Error while handling request: %s' % (self._rid(),
|
||||
req))
|
||||
lg.info(_('[%(rid)d] Error while handling request: %(req)s'),
|
||||
{'rid': self._rid(), 'req': req})
|
||||
self._request_error = req
|
||||
response = None
|
||||
|
||||
@@ -413,7 +427,8 @@ class NvpGetApiProvidersRequestEventlet(NvpApiRequestEventlet):
|
||||
ret.append(_provider_from_listen_addr(addr))
|
||||
return ret
|
||||
except Exception as e:
|
||||
lg.warn("[%d] Failed to parse API provider: %s" % (self._rid(), e))
|
||||
lg.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"),
|
||||
{'rid': self._rid(), 'e': e})
|
||||
# intentionally fall through
|
||||
return None
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ def get_cluster_version(cluster):
|
||||
raise exception.QuantumException()
|
||||
version_parts = res["version"].split(".")
|
||||
version = "%s.%s" % tuple(version_parts[:2])
|
||||
LOG.info("NVP controller cluster version: %s" % version)
|
||||
LOG.info(_("NVP controller cluster version: %s"), version)
|
||||
return version
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ def do_multi_request(*args, **kwargs):
|
||||
results = []
|
||||
clusters = kwargs["clusters"]
|
||||
for x in clusters:
|
||||
LOG.debug("Issuing request to cluster: %s" % x.name)
|
||||
LOG.debug(_("Issuing request to cluster: %s"), x.name)
|
||||
rv = x.api_client.request(*args)
|
||||
results.append(rv)
|
||||
return results
|
||||
@@ -134,12 +134,12 @@ def find_port_and_cluster(clusters, port_id):
|
||||
"""
|
||||
for c in clusters:
|
||||
query = "/ws.v1/lswitch/*/lport?uuid=%s&fields=*" % port_id
|
||||
LOG.debug("Looking for lswitch with port id \"%s\" on: %s"
|
||||
% (port_id, c))
|
||||
LOG.debug(_("Looking for lswitch with port id "
|
||||
"'%(port_id)s' on: %(c)s"), locals())
|
||||
try:
|
||||
res = do_single_request('GET', query, cluster=c)
|
||||
except Exception as e:
|
||||
LOG.error("get_port_cluster_and_url, exception: %s" % str(e))
|
||||
LOG.error(_("get_port_cluster_and_url, exception: %s"), str(e))
|
||||
continue
|
||||
res = json.loads(res)
|
||||
if len(res["results"]) == 1:
|
||||
@@ -160,22 +160,22 @@ def get_network(cluster, net_id):
|
||||
try:
|
||||
resp_obj = do_single_request("GET", path, cluster=cluster)
|
||||
network = json.loads(resp_obj)
|
||||
LOG.warning("### nw:%s", network)
|
||||
LOG.warning(_("### nw:%s"), network)
|
||||
except NvpApiClient.ResourceNotFound:
|
||||
raise exception.NetworkNotFound(net_id=net_id)
|
||||
except NvpApiClient.NvpApiException:
|
||||
raise exception.QuantumException()
|
||||
LOG.debug("Got network \"%s\": %s" % (net_id, network))
|
||||
LOG.debug(_("Got network '%(net_id)s': %(network)s"), locals())
|
||||
return network
|
||||
|
||||
|
||||
def create_lswitch(cluster, lswitch_obj):
|
||||
LOG.info("Creating lswitch: %s" % lswitch_obj)
|
||||
LOG.info(_("Creating lswitch: %s"), lswitch_obj)
|
||||
# Warn if no tenant is specified
|
||||
found = "os_tid" in [x["scope"] for x in lswitch_obj["tags"]]
|
||||
if not found:
|
||||
LOG.warn("No tenant-id tag specified in logical switch: %s" % (
|
||||
lswitch_obj))
|
||||
LOG.warn(_("No tenant-id tag specified in logical switch: %s"),
|
||||
lswitch_obj)
|
||||
uri = "/ws.v1/lswitch"
|
||||
try:
|
||||
resp_obj = do_single_request("POST", uri,
|
||||
@@ -188,7 +188,7 @@ def create_lswitch(cluster, lswitch_obj):
|
||||
d = {}
|
||||
d["net-id"] = r['uuid']
|
||||
d["net-name"] = r['display_name']
|
||||
LOG.debug("Created logical switch: %s" % d["net-id"])
|
||||
LOG.debug(_("Created logical switch: %s"), d["net-id"])
|
||||
return d
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ def update_network(cluster, lswitch_id, **params):
|
||||
resp_obj = do_single_request("PUT", uri, json.dumps(lswitch_obj),
|
||||
cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Network not found, Error: %s"), str(e))
|
||||
raise exception.NetworkNotFound(net_id=lswitch_id)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
@@ -256,7 +256,7 @@ def delete_networks(cluster, net_id, lswitch_ids):
|
||||
try:
|
||||
do_single_request("DELETE", path, cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Network not found, Error: %s"), str(e))
|
||||
raise exception.NetworkNotFound(net_id=ls_id)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
@@ -292,7 +292,7 @@ def query_ports(cluster, network, relations=None, fields="*", filters=None):
|
||||
try:
|
||||
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Network not found, Error: %s"), str(e))
|
||||
raise exception.NetworkNotFound(net_id=network)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
@@ -303,7 +303,7 @@ def delete_port(cluster, port):
|
||||
try:
|
||||
do_single_request("DELETE", port['_href'], cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFound(port_id=port['uuid'])
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
@@ -316,8 +316,9 @@ def get_port_by_quantum_tag(clusters, lswitch, quantum_tag):
|
||||
"fabric_status_up,uuid&tag=%s&tag_scope=q_port_id"
|
||||
"&relations=LogicalPortStatus" % (lswitch, quantum_tag))
|
||||
|
||||
LOG.debug("Looking for port with q_tag \"%s\" on: %s"
|
||||
% (quantum_tag, lswitch))
|
||||
LOG.debug(_("Looking for port with q_tag '%(quantum_tag)s' "
|
||||
"on: %(lswitch)s"),
|
||||
locals())
|
||||
for c in clusters:
|
||||
try:
|
||||
res_obj = do_single_request('GET', query, cluster=c)
|
||||
@@ -327,7 +328,7 @@ def get_port_by_quantum_tag(clusters, lswitch, quantum_tag):
|
||||
if len(res["results"]) == 1:
|
||||
return (res["results"][0], c)
|
||||
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFound(port_id=quantum_tag, net_id=lswitch)
|
||||
|
||||
|
||||
@@ -336,8 +337,8 @@ def get_port_by_display_name(clusters, lswitch, display_name):
|
||||
"""
|
||||
query = ("/ws.v1/lswitch/%s/lport?display_name=%s&fields=*" %
|
||||
(lswitch, display_name))
|
||||
LOG.debug("Looking for port with display_name \"%s\" on: %s"
|
||||
% (display_name, lswitch))
|
||||
LOG.debug(_("Looking for port with display_name "
|
||||
"'%(display_name)s' on: %(lswitch)s"), locals())
|
||||
for c in clusters:
|
||||
try:
|
||||
res_obj = do_single_request('GET', query, cluster=c)
|
||||
@@ -347,12 +348,12 @@ def get_port_by_display_name(clusters, lswitch, display_name):
|
||||
if len(res["results"]) == 1:
|
||||
return (res["results"][0], c)
|
||||
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFound(port_id=display_name, net_id=lswitch)
|
||||
|
||||
|
||||
def get_port(cluster, network, port, relations=None):
|
||||
LOG.info("get_port() %s %s" % (network, port))
|
||||
LOG.info(_("get_port() %(network)s %(port)s"), locals())
|
||||
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
|
||||
if relations:
|
||||
uri += "relations=%s" % relations
|
||||
@@ -360,7 +361,7 @@ def get_port(cluster, network, port, relations=None):
|
||||
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
||||
port = json.loads(resp_obj)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFound(port_id=port, net_id=network)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
@@ -392,7 +393,7 @@ def update_port(network, port_id, **params):
|
||||
resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
|
||||
cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFound(port_id=port_id, net_id=network)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
@@ -444,7 +445,7 @@ def get_port_status(cluster, lswitch_id, port_id):
|
||||
(lswitch_id, port_id), cluster=cluster)
|
||||
r = json.loads(r)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port not found, Error: %s" % str(e))
|
||||
LOG.error(_("Port not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFound(port_id=port_id, net_id=lswitch_id)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
@@ -467,11 +468,11 @@ def plug_interface(clusters, lswitch_id, port, type, attachment=None):
|
||||
resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
|
||||
cluster=dest_cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFound(port_id=port, net_id=lswitch_id)
|
||||
except NvpApiClient.Conflict as e:
|
||||
LOG.error("Conflict while making attachment to port, "
|
||||
"Error: %s" % str(e))
|
||||
LOG.error(_("Conflict while making attachment to port, "
|
||||
"Error: %s"), str(e))
|
||||
raise exception.AlreadyAttached(att_id=attachment,
|
||||
port_id=port,
|
||||
net_id=lswitch_id,
|
||||
|
||||
Reference in New Issue
Block a user