2011-08-03 17:41:33 -04:00
|
|
|
# Copyright 2010 Jacob Kaplan-Moss
|
2013-03-13 18:09:17 -04:00
|
|
|
# Copyright 2011 OpenStack Foundation
|
2011-08-08 13:41:29 -07:00
|
|
|
# Copyright 2011 Piston Cloud Computing, Inc.
|
2011-08-05 23:17:35 -07:00
|
|
|
|
2011-09-26 12:28:43 -07:00
|
|
|
# All Rights Reserved.
|
2013-12-06 10:47:41 +10:30
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
# not use this file except in compliance with the License. You may obtain
|
|
|
|
# a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
|
|
|
|
2011-08-03 17:41:33 -04:00
|
|
|
"""
|
|
|
|
OpenStack Client interface. Handles the REST calls and responses.
|
|
|
|
"""
|
|
|
|
|
2014-07-15 18:25:57 +08:00
|
|
|
import copy
|
2014-04-08 11:40:41 +10:00
|
|
|
import functools
|
2015-04-07 15:23:14 +08:00
|
|
|
import glob
|
2014-06-09 10:20:31 -04:00
|
|
|
import hashlib
|
2015-04-07 15:23:14 +08:00
|
|
|
import imp
|
|
|
|
import itertools
|
2011-08-03 17:41:33 -04:00
|
|
|
import logging
|
2015-04-07 15:23:14 +08:00
|
|
|
import os
|
2015-03-31 18:09:36 +03:00
|
|
|
import pkgutil
|
2014-07-08 10:56:36 +09:00
|
|
|
import re
|
2015-09-09 17:18:14 +03:00
|
|
|
import warnings
|
2011-08-03 17:41:33 -04:00
|
|
|
|
2014-08-07 20:18:58 +10:00
|
|
|
from keystoneclient import adapter
|
2015-07-22 15:13:01 -05:00
|
|
|
from keystoneclient import session
|
2015-01-26 16:37:53 +02:00
|
|
|
from oslo_utils import importutils
|
|
|
|
from oslo_utils import netutils
|
2015-04-07 15:23:14 +08:00
|
|
|
import pkg_resources
|
2012-12-18 14:05:29 -06:00
|
|
|
import requests
|
2012-08-02 16:41:47 +02:00
|
|
|
|
2011-08-03 17:41:33 -04:00
|
|
|
try:
|
|
|
|
import json
|
|
|
|
except ImportError:
|
|
|
|
import simplejson as json
|
|
|
|
|
2014-02-14 08:43:12 +08:00
|
|
|
from six.moves.urllib import parse
|
|
|
|
|
2015-04-02 16:37:59 +03:00
|
|
|
from novaclient import api_versions
|
2011-08-03 17:41:33 -04:00
|
|
|
from novaclient import exceptions
|
2015-04-07 15:23:14 +08:00
|
|
|
from novaclient import extension as ext
|
2015-09-09 17:18:14 +03:00
|
|
|
from novaclient.i18n import _, _LW
|
2011-12-29 15:37:05 -05:00
|
|
|
from novaclient import service_catalog
|
2015-03-18 17:37:54 +08:00
|
|
|
from novaclient import utils
|
2011-08-03 17:41:33 -04:00
|
|
|
|
|
|
|
|
2014-03-26 15:22:03 +04:00
|
|
|
class _ClientConnectionPool(object):
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
|
2014-03-26 15:22:03 +04:00
|
|
|
def __init__(self):
|
|
|
|
self._adapters = {}
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
|
2014-03-26 15:22:03 +04:00
|
|
|
def get(self, url):
|
|
|
|
"""
|
|
|
|
Store and reuse HTTP adapters per Service URL.
|
|
|
|
"""
|
|
|
|
if url not in self._adapters:
|
2015-07-22 15:13:01 -05:00
|
|
|
self._adapters[url] = session.TCPKeepAliveAdapter()
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
|
2014-03-26 15:22:03 +04:00
|
|
|
return self._adapters[url]
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
|
2011-08-03 17:41:33 -04:00
|
|
|
|
2014-08-07 20:18:58 +10:00
|
|
|
class SessionClient(adapter.LegacyJsonAdapter):
|
2014-04-08 11:40:41 +10:00
|
|
|
|
2014-10-23 01:50:49 -07:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
self.times = []
|
2015-03-18 17:37:54 +08:00
|
|
|
self.timings = kwargs.pop('timings', False)
|
2015-04-02 16:37:59 +03:00
|
|
|
self.api_version = kwargs.pop('api_version', None)
|
|
|
|
self.api_version = self.api_version or api_versions.APIVersion()
|
2014-10-23 01:50:49 -07:00
|
|
|
super(SessionClient, self).__init__(*args, **kwargs)
|
|
|
|
|
2014-04-08 11:40:41 +10:00
|
|
|
def request(self, url, method, **kwargs):
|
2015-04-02 16:37:59 +03:00
|
|
|
kwargs.setdefault('headers', kwargs.get('headers', {}))
|
|
|
|
api_versions.update_headers(kwargs["headers"], self.api_version)
|
2014-08-07 20:18:58 +10:00
|
|
|
# NOTE(jamielennox): The standard call raises errors from
|
|
|
|
# keystoneclient, where we need to raise the novaclient errors.
|
|
|
|
raise_exc = kwargs.pop('raise_exc', True)
|
2015-03-18 17:37:54 +08:00
|
|
|
with utils.record_time(self.times, self.timings, method, url):
|
|
|
|
resp, body = super(SessionClient, self).request(url,
|
|
|
|
method,
|
|
|
|
raise_exc=False,
|
|
|
|
**kwargs)
|
2014-08-07 20:18:58 +10:00
|
|
|
if raise_exc and resp.status_code >= 400:
|
2014-04-08 11:40:41 +10:00
|
|
|
raise exceptions.from_response(resp, body, url, method)
|
|
|
|
|
|
|
|
return resp, body
|
|
|
|
|
2014-10-23 01:50:49 -07:00
|
|
|
def get_timings(self):
|
|
|
|
return self.times
|
|
|
|
|
|
|
|
def reset_timings(self):
|
|
|
|
self.times = []
|
|
|
|
|
2014-04-08 11:40:41 +10:00
|
|
|
|
|
|
|
def _original_only(f):
|
|
|
|
"""Indicates and enforces that this function can only be used if we are
|
|
|
|
using the original HTTPClient object.
|
|
|
|
|
|
|
|
We use this to specify that if you use the newer Session HTTP client then
|
|
|
|
you are aware that the way you use your client has been updated and certain
|
|
|
|
functions are no longer allowed to be used.
|
|
|
|
"""
|
|
|
|
@functools.wraps(f)
|
|
|
|
def wrapper(self, *args, **kwargs):
|
|
|
|
if isinstance(self.client, SessionClient):
|
|
|
|
msg = ('This call is no longer available. The operation should '
|
|
|
|
'be performed on the session object instead.')
|
|
|
|
raise exceptions.InvalidUsage(msg)
|
|
|
|
|
|
|
|
return f(self, *args, **kwargs)
|
|
|
|
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
class HTTPClient(object):
|
2011-08-03 17:41:33 -04:00
|
|
|
USER_AGENT = 'python-novaclient'
|
|
|
|
|
2013-06-27 22:57:10 +01:00
|
|
|
def __init__(self, user, password, projectid=None, auth_url=None,
|
2012-08-02 16:41:47 +02:00
|
|
|
insecure=False, timeout=None, proxy_tenant_id=None,
|
2012-03-06 22:40:28 -06:00
|
|
|
proxy_token=None, region_name=None,
|
2012-02-24 02:30:48 +00:00
|
|
|
endpoint_type='publicURL', service_type=None,
|
2012-06-15 15:12:23 -03:00
|
|
|
service_name=None, volume_service_name=None,
|
2012-12-07 11:47:49 -05:00
|
|
|
timings=False, bypass_url=None,
|
|
|
|
os_cache=False, no_cache=True,
|
2012-12-18 14:05:29 -06:00
|
|
|
http_log_debug=False, auth_system='keystone',
|
2013-12-19 19:26:06 +00:00
|
|
|
auth_plugin=None, auth_token=None,
|
2014-03-26 15:22:03 +04:00
|
|
|
cacert=None, tenant_id=None, user_id=None,
|
2015-04-02 16:37:59 +03:00
|
|
|
connection_pool=False, api_version=None):
|
2011-08-03 17:41:33 -04:00
|
|
|
self.user = user
|
2014-03-06 12:37:12 +00:00
|
|
|
self.user_id = user_id
|
2011-11-09 07:10:46 -08:00
|
|
|
self.password = password
|
2011-08-03 17:41:33 -04:00
|
|
|
self.projectid = projectid
|
2013-06-27 22:57:10 +01:00
|
|
|
self.tenant_id = tenant_id
|
2015-04-02 16:37:59 +03:00
|
|
|
self.api_version = api_version or api_versions.APIVersion()
|
2013-03-06 16:41:46 +01:00
|
|
|
|
2014-03-26 15:22:03 +04:00
|
|
|
self._connection_pool = (_ClientConnectionPool()
|
2014-09-25 20:05:18 +03:00
|
|
|
if connection_pool else None)
|
2014-03-26 15:22:03 +04:00
|
|
|
|
2013-12-19 19:26:06 +00:00
|
|
|
# This will be called by #_get_password if self.password is None.
|
|
|
|
# EG if a password can only be obtained by prompting the user, but a
|
|
|
|
# token is available, you don't want to prompt until the token has
|
|
|
|
# been proven invalid
|
|
|
|
self.password_func = None
|
|
|
|
|
2013-03-06 16:41:46 +01:00
|
|
|
if auth_system and auth_system != 'keystone' and not auth_plugin:
|
|
|
|
raise exceptions.AuthSystemNotFound(auth_system)
|
|
|
|
|
2012-08-02 16:41:47 +02:00
|
|
|
if not auth_url and auth_system and auth_system != 'keystone':
|
2013-03-06 16:41:46 +01:00
|
|
|
auth_url = auth_plugin.get_auth_url()
|
2012-10-04 16:20:23 -04:00
|
|
|
if not auth_url:
|
|
|
|
raise exceptions.EndpointNotFound()
|
2014-03-18 00:21:21 +09:00
|
|
|
self.auth_url = auth_url.rstrip('/') if auth_url else auth_url
|
2011-12-15 23:10:59 +00:00
|
|
|
self.version = 'v1.1'
|
2011-09-07 13:02:50 -07:00
|
|
|
self.region_name = region_name
|
2012-01-31 18:08:22 -06:00
|
|
|
self.endpoint_type = endpoint_type
|
2012-02-24 02:30:48 +00:00
|
|
|
self.service_type = service_type
|
2012-01-31 18:08:22 -06:00
|
|
|
self.service_name = service_name
|
2012-04-12 15:41:35 -05:00
|
|
|
self.volume_service_name = volume_service_name
|
2012-06-15 15:12:23 -03:00
|
|
|
self.timings = timings
|
2014-03-18 00:21:21 +09:00
|
|
|
self.bypass_url = bypass_url.rstrip('/') if bypass_url else bypass_url
|
2012-12-07 11:47:49 -05:00
|
|
|
self.os_cache = os_cache or not no_cache
|
2012-07-02 15:22:59 -05:00
|
|
|
self.http_log_debug = http_log_debug
|
2013-01-11 21:44:56 -08:00
|
|
|
if timeout is not None:
|
|
|
|
self.timeout = float(timeout)
|
|
|
|
else:
|
|
|
|
self.timeout = None
|
2012-06-15 15:12:23 -03:00
|
|
|
|
|
|
|
self.times = [] # [("item", starttime, endtime), ...]
|
2011-08-03 17:41:33 -04:00
|
|
|
|
2013-12-19 19:26:06 +00:00
|
|
|
self.management_url = self.bypass_url or None
|
|
|
|
self.auth_token = auth_token
|
2012-03-06 22:40:28 -06:00
|
|
|
self.proxy_token = proxy_token
|
|
|
|
self.proxy_tenant_id = proxy_tenant_id
|
2013-01-30 12:53:35 -08:00
|
|
|
self.keyring_saver = None
|
|
|
|
self.keyring_saved = False
|
2011-08-03 17:41:33 -04:00
|
|
|
|
2012-12-18 14:05:29 -06:00
|
|
|
if insecure:
|
|
|
|
self.verify_cert = False
|
|
|
|
else:
|
|
|
|
if cacert:
|
|
|
|
self.verify_cert = cacert
|
|
|
|
else:
|
|
|
|
self.verify_cert = True
|
2011-08-03 17:41:33 -04:00
|
|
|
|
2012-08-02 16:41:47 +02:00
|
|
|
self.auth_system = auth_system
|
2013-03-06 16:41:46 +01:00
|
|
|
self.auth_plugin = auth_plugin
|
2014-03-26 15:22:03 +04:00
|
|
|
self._session = None
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
self._current_url = None
|
2012-07-02 15:22:59 -05:00
|
|
|
self._logger = logging.getLogger(__name__)
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
|
2013-05-21 16:23:50 -07:00
|
|
|
if self.http_log_debug and not self._logger.handlers:
|
2013-03-13 16:47:16 +01:00
|
|
|
# Logging level is already set on the root logger
|
2012-07-02 15:22:59 -05:00
|
|
|
ch = logging.StreamHandler()
|
|
|
|
self._logger.addHandler(ch)
|
2013-03-13 16:47:16 +01:00
|
|
|
self._logger.propagate = False
|
2013-02-12 11:46:02 -03:00
|
|
|
if hasattr(requests, 'logging'):
|
2013-03-13 16:47:16 +01:00
|
|
|
rql = requests.logging.getLogger(requests.__name__)
|
|
|
|
rql.addHandler(ch)
|
|
|
|
# Since we have already setup the root logger on debug, we
|
|
|
|
# have to set it up here on WARNING (its original level)
|
2014-02-25 14:17:35 +08:00
|
|
|
# otherwise we will get all the requests logging messages
|
2013-03-13 16:47:16 +01:00
|
|
|
rql.setLevel(logging.WARNING)
|
2012-07-02 15:22:59 -05:00
|
|
|
|
2015-03-14 23:39:05 +00:00
|
|
|
self.service_catalog = None
|
|
|
|
self.services_url = {}
|
2015-06-07 10:11:13 -04:00
|
|
|
self.last_request_id = None
|
2015-03-14 23:39:05 +00:00
|
|
|
|
2012-07-05 09:45:46 -03:00
|
|
|
def use_token_cache(self, use_it):
|
2012-12-07 11:47:49 -05:00
|
|
|
self.os_cache = use_it
|
2012-07-05 09:45:46 -03:00
|
|
|
|
|
|
|
def unauthenticate(self):
|
|
|
|
"""Forget all of our authentication information."""
|
|
|
|
self.management_url = None
|
|
|
|
self.auth_token = None
|
|
|
|
|
2012-06-15 15:12:23 -03:00
|
|
|
def set_management_url(self, url):
|
|
|
|
self.management_url = url
|
|
|
|
|
|
|
|
def get_timings(self):
|
|
|
|
return self.times
|
|
|
|
|
2012-06-28 17:00:47 -03:00
|
|
|
def reset_timings(self):
|
|
|
|
self.times = []
|
|
|
|
|
2014-07-15 18:25:57 +08:00
|
|
|
def _redact(self, target, path, text=None):
|
|
|
|
"""Replace the value of a key in `target`.
|
|
|
|
|
|
|
|
The key can be at the top level by specifying a list with a single
|
|
|
|
key as the path. Nested dictionaries are also supported by passing a
|
|
|
|
list of keys to be navigated to find the one that should be replaced.
|
|
|
|
In this case the last one is the one that will be replaced.
|
|
|
|
|
|
|
|
:param dict target: the dictionary that may have a key to be redacted;
|
|
|
|
modified in place
|
|
|
|
:param list path: a list representing the nested structure in `target`
|
|
|
|
that should be redacted; modified in place
|
|
|
|
:param string text: optional text to use as a replacement for the
|
|
|
|
redacted key. if text is not specified, the
|
|
|
|
default text will be sha1 hash of the value being
|
|
|
|
redacted
|
|
|
|
"""
|
|
|
|
|
|
|
|
key = path.pop()
|
|
|
|
|
|
|
|
# move to the most nested dict
|
|
|
|
for p in path:
|
|
|
|
try:
|
|
|
|
target = target[p]
|
|
|
|
except KeyError:
|
|
|
|
return
|
|
|
|
|
|
|
|
if key in target:
|
|
|
|
if text:
|
|
|
|
target[key] = text
|
2015-08-24 19:45:53 +03:00
|
|
|
elif target[key] is not None:
|
2014-07-15 18:25:57 +08:00
|
|
|
# because in python3 byte string handling is ... ug
|
|
|
|
value = target[key].encode('utf-8')
|
|
|
|
sha1sum = hashlib.sha1(value)
|
|
|
|
target[key] = "{SHA1}%s" % sha1sum.hexdigest()
|
2014-06-09 10:20:31 -04:00
|
|
|
|
2013-10-24 14:56:20 +11:00
|
|
|
def http_log_req(self, method, url, kwargs):
|
2012-07-02 15:22:59 -05:00
|
|
|
if not self.http_log_debug:
|
2011-08-03 17:41:33 -04:00
|
|
|
return
|
|
|
|
|
2014-11-14 14:26:53 +00:00
|
|
|
string_parts = ['curl -g -i']
|
2013-10-24 14:52:34 +11:00
|
|
|
|
|
|
|
if not kwargs.get('verify', True):
|
|
|
|
string_parts.append(' --insecure')
|
|
|
|
|
2013-10-24 14:56:20 +11:00
|
|
|
string_parts.append(" '%s'" % url)
|
|
|
|
string_parts.append(' -X %s' % method)
|
2011-08-03 17:41:33 -04:00
|
|
|
|
2014-07-15 18:25:57 +08:00
|
|
|
headers = copy.deepcopy(kwargs['headers'])
|
|
|
|
self._redact(headers, ['X-Auth-Token'])
|
2014-06-09 10:20:31 -04:00
|
|
|
# because dict ordering changes from 2 to 3
|
2014-07-15 18:25:57 +08:00
|
|
|
keys = sorted(headers.keys())
|
2014-06-09 10:20:31 -04:00
|
|
|
for name in keys:
|
2014-07-15 18:25:57 +08:00
|
|
|
value = headers[name]
|
|
|
|
header = ' -H "%s: %s"' % (name, value)
|
2011-08-03 17:41:33 -04:00
|
|
|
string_parts.append(header)
|
|
|
|
|
2013-01-10 11:20:07 -05:00
|
|
|
if 'data' in kwargs:
|
2014-07-15 18:25:57 +08:00
|
|
|
data = json.loads(kwargs['data'])
|
|
|
|
self._redact(data, ['auth', 'passwordCredentials', 'password'])
|
|
|
|
string_parts.append(" -d '%s'" % json.dumps(data))
|
2014-06-09 10:20:31 -04:00
|
|
|
self._logger.debug("REQ: %s" % "".join(string_parts))
|
2012-08-16 17:57:09 -07:00
|
|
|
|
2012-12-18 14:05:29 -06:00
|
|
|
def http_log_resp(self, resp):
|
2012-08-16 17:57:09 -07:00
|
|
|
if not self.http_log_debug:
|
|
|
|
return
|
2014-07-15 18:25:57 +08:00
|
|
|
|
|
|
|
if resp.text and resp.status_code != 400:
|
|
|
|
try:
|
|
|
|
body = json.loads(resp.text)
|
|
|
|
self._redact(body, ['access', 'token', 'id'])
|
|
|
|
except ValueError:
|
|
|
|
body = None
|
|
|
|
else:
|
|
|
|
body = None
|
|
|
|
|
2014-05-12 19:09:50 +02:00
|
|
|
self._logger.debug("RESP: [%(status)s] %(headers)s\nRESP BODY: "
|
|
|
|
"%(text)s\n", {'status': resp.status_code,
|
|
|
|
'headers': resp.headers,
|
2014-07-15 18:25:57 +08:00
|
|
|
'text': json.dumps(body)})
|
2011-08-03 17:41:33 -04:00
|
|
|
|
2014-03-26 15:22:03 +04:00
|
|
|
def open_session(self):
|
|
|
|
if not self._connection_pool:
|
|
|
|
self._session = requests.Session()
|
|
|
|
|
|
|
|
def close_session(self):
|
|
|
|
if self._session and not self._connection_pool:
|
|
|
|
self._session.close()
|
|
|
|
self._session = None
|
|
|
|
|
|
|
|
def _get_session(self, url):
|
|
|
|
if self._connection_pool:
|
|
|
|
magic_tuple = parse.urlsplit(url)
|
|
|
|
scheme, netloc, path, query, frag = magic_tuple
|
|
|
|
service_url = '%s://%s' % (scheme, netloc)
|
|
|
|
if self._current_url != service_url:
|
|
|
|
# Invalidate Session object in case the url is somehow changed
|
|
|
|
if self._session:
|
|
|
|
self._session.close()
|
|
|
|
self._current_url = service_url
|
|
|
|
self._logger.debug(
|
2014-09-24 23:20:58 +03:00
|
|
|
"New session created for: (%s)" % service_url)
|
2014-03-26 15:22:03 +04:00
|
|
|
self._session = requests.Session()
|
|
|
|
self._session.mount(service_url,
|
2014-09-25 20:05:18 +03:00
|
|
|
self._connection_pool.get(service_url))
|
2014-03-26 15:22:03 +04:00
|
|
|
return self._session
|
|
|
|
elif self._session:
|
|
|
|
return self._session
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
|
2012-12-18 14:05:29 -06:00
|
|
|
def request(self, url, method, **kwargs):
|
2011-09-09 06:33:38 -07:00
|
|
|
kwargs.setdefault('headers', kwargs.get('headers', {}))
|
2011-08-03 17:41:33 -04:00
|
|
|
kwargs['headers']['User-Agent'] = self.USER_AGENT
|
2012-02-02 16:37:55 -08:00
|
|
|
kwargs['headers']['Accept'] = 'application/json'
|
2011-08-03 17:41:33 -04:00
|
|
|
if 'body' in kwargs:
|
|
|
|
kwargs['headers']['Content-Type'] = 'application/json'
|
2012-12-18 14:05:29 -06:00
|
|
|
kwargs['data'] = json.dumps(kwargs['body'])
|
|
|
|
del kwargs['body']
|
2015-04-02 16:37:59 +03:00
|
|
|
api_versions.update_headers(kwargs["headers"], self.api_version)
|
2013-01-11 21:44:56 -08:00
|
|
|
if self.timeout is not None:
|
|
|
|
kwargs.setdefault('timeout', self.timeout)
|
2013-10-24 14:52:34 +11:00
|
|
|
kwargs['verify'] = self.verify_cert
|
2012-12-18 14:05:29 -06:00
|
|
|
|
2013-10-24 14:56:20 +11:00
|
|
|
self.http_log_req(method, url, kwargs)
|
2014-03-26 15:22:03 +04:00
|
|
|
|
|
|
|
request_func = requests.request
|
|
|
|
session = self._get_session(url)
|
|
|
|
if session:
|
|
|
|
request_func = session.request
|
|
|
|
|
|
|
|
resp = request_func(
|
2012-12-18 14:05:29 -06:00
|
|
|
method,
|
|
|
|
url,
|
|
|
|
**kwargs)
|
Fix in in novaclient, to avoid excessive conns
The current client creates new .Session() on each request,
but since Horizon is a stateless app, each Session creates
new HttpAdapter, which itself has its own connection pool,
and each connection there is used (almost) once and then
is being kept in the pool(with Keep-Alive) for a certain
amount of time(waiting for inactivity timeout). The problem
is that the connection cannot be used anymore from next Django
calls - they create new connection pool with new connections, etc.
This keeps lots of open connections on the server.
Now the client will store an HTTPAdapter for each URL into
a singleton object, and will reuse its connections between
Django calls, but still taking advantage of Sessions during
a single page load(although we do not fully use this).
Note: the default pool behavior is non-blocking, which means
that if the max_pool_size is reached, a new connection will
still be opened, and when released - will be discarded.
It could be useful to add max_pool_size param into settings,
for performance fine-tuning. The default max_pool_size is 10.
Since python-novaclient is also used from non-Django projects,
I'd expect feedback from more people on the impact this change
could have over other projects.
Patch Set 3: Removed explicit connection closing, leaving
connections open in the pool.
Change-Id: Icc9dc2fa2863d0e0e26a86c8180f2e0fbcd1fcff
Closes-Bug: #1247056
2014-02-20 23:11:34 +02:00
|
|
|
|
2012-12-18 14:05:29 -06:00
|
|
|
self.http_log_resp(resp)
|
|
|
|
|
|
|
|
if resp.text:
|
|
|
|
# TODO(dtroyer): verify the note below in a requests context
|
2012-09-07 15:59:42 -04:00
|
|
|
# NOTE(alaski): Because force_exceptions_to_status_code=True
|
|
|
|
# httplib2 returns a connection refused event as a 400 response.
|
|
|
|
# To determine if it is a bad request or refused connection we need
|
|
|
|
# to check the body. httplib2 tests check for 'Connection refused'
|
|
|
|
# or 'actively refused' in the body, so that's what we'll do.
|
2012-12-18 14:05:29 -06:00
|
|
|
if resp.status_code == 400:
|
|
|
|
if ('Connection refused' in resp.text or
|
2014-09-24 23:55:54 +03:00
|
|
|
'actively refused' in resp.text):
|
2012-12-18 14:05:29 -06:00
|
|
|
raise exceptions.ConnectionRefused(resp.text)
|
2011-08-03 17:41:33 -04:00
|
|
|
try:
|
2012-12-18 14:05:29 -06:00
|
|
|
body = json.loads(resp.text)
|
2012-04-02 12:27:30 -05:00
|
|
|
except ValueError:
|
2012-12-18 14:05:29 -06:00
|
|
|
body = None
|
2011-08-03 17:41:33 -04:00
|
|
|
else:
|
|
|
|
body = None
|
|
|
|
|
2015-06-07 10:11:13 -04:00
|
|
|
self.last_request_id = (resp.headers.get('x-openstack-request-id')
|
|
|
|
if resp.headers else None)
|
2012-12-18 14:05:29 -06:00
|
|
|
if resp.status_code >= 400:
|
2013-01-23 11:14:41 -06:00
|
|
|
raise exceptions.from_response(resp, body, url, method)
|
2011-08-03 17:41:33 -04:00
|
|
|
|
|
|
|
return resp, body
|
|
|
|
|
2012-06-15 15:12:23 -03:00
|
|
|
def _time_request(self, url, method, **kwargs):
|
2015-03-18 17:37:54 +08:00
|
|
|
with utils.record_time(self.times, self.timings, method, url):
|
|
|
|
resp, body = self.request(url, method, **kwargs)
|
2012-06-15 15:12:23 -03:00
|
|
|
return resp, body
|
|
|
|
|
2011-08-03 17:41:33 -04:00
|
|
|
def _cs_request(self, url, method, **kwargs):
|
|
|
|
if not self.management_url:
|
|
|
|
self.authenticate()
|
2014-07-08 10:56:36 +09:00
|
|
|
if url is None:
|
|
|
|
# To get API version information, it is necessary to GET
|
|
|
|
# a nova endpoint directly without "v2/<tenant-id>".
|
|
|
|
magic_tuple = parse.urlsplit(self.management_url)
|
|
|
|
scheme, netloc, path, query, frag = magic_tuple
|
2015-08-21 21:58:38 +03:00
|
|
|
path = re.sub(r'v[1-9](\.[1-9][0-9]*)?/[a-z0-9]+$', '', path)
|
2014-07-08 10:56:36 +09:00
|
|
|
url = parse.urlunsplit((scheme, netloc, path, None, None))
|
|
|
|
else:
|
2015-04-20 22:00:27 +00:00
|
|
|
if self.service_catalog and not self.bypass_url:
|
2015-03-14 23:39:05 +00:00
|
|
|
url = self.get_service_url(self.service_type) + url
|
|
|
|
else:
|
|
|
|
url = self.management_url + url
|
2011-08-03 17:41:33 -04:00
|
|
|
|
|
|
|
# Perform the request once. If we get a 401 back then it
|
|
|
|
# might be because the auth token expired, so try to
|
|
|
|
# re-authenticate and try again. If it still fails, bail.
|
|
|
|
try:
|
|
|
|
kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
|
|
|
|
if self.projectid:
|
|
|
|
kwargs['headers']['X-Auth-Project-Id'] = self.projectid
|
|
|
|
|
2014-07-08 10:56:36 +09:00
|
|
|
resp, body = self._time_request(url, method, **kwargs)
|
2011-08-03 17:41:33 -04:00
|
|
|
return resp, body
|
2013-02-20 09:46:57 +01:00
|
|
|
except exceptions.Unauthorized as e:
|
2011-08-03 17:41:33 -04:00
|
|
|
try:
|
2013-10-10 08:21:48 +00:00
|
|
|
# first discard auth token, to avoid the possibly expired
|
2013-06-19 18:27:24 +00:00
|
|
|
# token being re-used in the re-authentication attempt
|
|
|
|
self.unauthenticate()
|
2013-10-10 08:21:48 +00:00
|
|
|
# overwrite bad token
|
|
|
|
self.keyring_saved = False
|
2011-08-03 17:41:33 -04:00
|
|
|
self.authenticate()
|
2012-08-01 13:23:07 -05:00
|
|
|
kwargs['headers']['X-Auth-Token'] = self.auth_token
|
2014-07-08 10:56:36 +09:00
|
|
|
resp, body = self._time_request(url, method, **kwargs)
|
2011-08-03 17:41:33 -04:00
|
|
|
return resp, body
|
|
|
|
except exceptions.Unauthorized:
|
2013-02-20 09:46:57 +01:00
|
|
|
raise e
|
2011-08-03 17:41:33 -04:00
|
|
|
|
2013-12-19 19:26:06 +00:00
|
|
|
def _get_password(self):
|
|
|
|
if not self.password and self.password_func:
|
|
|
|
self.password = self.password_func()
|
|
|
|
return self.password
|
|
|
|
|
2011-08-03 17:41:33 -04:00
|
|
|
def get(self, url, **kwargs):
|
|
|
|
return self._cs_request(url, 'GET', **kwargs)
|
|
|
|
|
|
|
|
def post(self, url, **kwargs):
|
|
|
|
return self._cs_request(url, 'POST', **kwargs)
|
|
|
|
|
|
|
|
def put(self, url, **kwargs):
|
|
|
|
return self._cs_request(url, 'PUT', **kwargs)
|
|
|
|
|
|
|
|
def delete(self, url, **kwargs):
|
|
|
|
return self._cs_request(url, 'DELETE', **kwargs)
|
|
|
|
|
2015-03-14 23:39:05 +00:00
|
|
|
def get_service_url(self, service_type):
|
|
|
|
if service_type not in self.services_url:
|
|
|
|
url = self.service_catalog.url_for(
|
|
|
|
attr='region',
|
|
|
|
filter_value=self.region_name,
|
|
|
|
endpoint_type=self.endpoint_type,
|
|
|
|
service_type=service_type,
|
|
|
|
service_name=self.service_name,
|
|
|
|
volume_service_name=self.volume_service_name,)
|
|
|
|
url = url.rstrip('/')
|
|
|
|
self.services_url[service_type] = url
|
|
|
|
return self.services_url[service_type]
|
|
|
|
|
2011-10-25 06:28:30 -07:00
|
|
|
def _extract_service_catalog(self, url, resp, body, extract_token=True):
|
2011-09-09 06:33:38 -07:00
|
|
|
"""See what the auth service told us and process the response.
|
|
|
|
We may get redirected to another site, fail or actually get
|
2013-12-12 03:55:50 +00:00
|
|
|
back a service catalog with a token and our endpoints.
|
|
|
|
"""
|
2011-09-09 06:33:38 -07:00
|
|
|
|
2013-02-22 13:33:40 -05:00
|
|
|
# content must always present
|
|
|
|
if resp.status_code == 200 or resp.status_code == 201:
|
2011-09-09 06:33:38 -07:00
|
|
|
try:
|
|
|
|
self.auth_url = url
|
|
|
|
self.service_catalog = \
|
|
|
|
service_catalog.ServiceCatalog(body)
|
2011-10-25 06:28:30 -07:00
|
|
|
if extract_token:
|
|
|
|
self.auth_token = self.service_catalog.get_token()
|
2013-01-31 11:51:29 -08:00
|
|
|
self.tenant_id = self.service_catalog.get_tenant_id()
|
2012-02-03 14:48:51 -06:00
|
|
|
|
2015-03-14 23:39:05 +00:00
|
|
|
self.management_url = self.get_service_url(self.service_type)
|
2011-09-09 06:33:38 -07:00
|
|
|
return None
|
2012-04-02 12:27:30 -05:00
|
|
|
except exceptions.AmbiguousEndpoints:
|
2013-12-12 23:17:28 -05:00
|
|
|
print(_("Found more than one valid endpoint. Use a more "
|
|
|
|
"restrictive filter"))
|
2012-01-31 18:08:22 -06:00
|
|
|
raise
|
2011-09-26 12:58:03 -07:00
|
|
|
except KeyError:
|
2011-09-09 06:33:38 -07:00
|
|
|
raise exceptions.AuthorizationFailure()
|
2011-09-26 12:53:29 -07:00
|
|
|
except exceptions.EndpointNotFound:
|
2013-12-12 23:17:28 -05:00
|
|
|
print(_("Could not find any suitable endpoint. Correct "
|
|
|
|
"region?"))
|
2011-09-26 12:53:29 -07:00
|
|
|
raise
|
|
|
|
|
2012-12-18 14:05:29 -06:00
|
|
|
elif resp.status_code == 305:
|
|
|
|
return resp.headers['location']
|
2011-09-09 06:33:38 -07:00
|
|
|
else:
|
2013-01-23 11:14:41 -06:00
|
|
|
raise exceptions.from_response(resp, body, url)
|
2011-09-09 06:33:38 -07:00
|
|
|
|
|
|
|
def _fetch_endpoints_from_auth(self, url):
|
|
|
|
"""We have a token, but don't know the final endpoint for
|
|
|
|
the region. We have to go back to the auth service and
|
|
|
|
ask again. This request requires an admin-level token
|
|
|
|
to work. The proxy token supplied could be from a low-level enduser.
|
|
|
|
|
2011-09-12 06:39:11 -07:00
|
|
|
We can't get this from the keystone service endpoint, we have to use
|
|
|
|
the admin endpoint.
|
|
|
|
|
2011-09-09 06:33:38 -07:00
|
|
|
This will overwrite our admin token with the user token.
|
|
|
|
"""
|
|
|
|
|
2011-09-12 06:39:11 -07:00
|
|
|
# GET ...:5001/v2.0/tokens/#####/endpoints
|
2012-03-06 22:40:28 -06:00
|
|
|
url = '/'.join([url, 'tokens', '%s?belongsTo=%s'
|
|
|
|
% (self.proxy_token, self.proxy_tenant_id)])
|
2014-05-12 19:09:50 +02:00
|
|
|
self._logger.debug("Using Endpoint URL: %s" % url)
|
2012-08-16 17:57:09 -07:00
|
|
|
resp, body = self._time_request(
|
2013-04-02 15:43:47 -04:00
|
|
|
url, "GET", headers={'X-Auth-Token': self.auth_token})
|
2011-10-25 09:34:23 -07:00
|
|
|
return self._extract_service_catalog(url, resp, body,
|
|
|
|
extract_token=False)
|
2011-09-09 06:33:38 -07:00
|
|
|
|
2011-08-03 17:41:33 -04:00
|
|
|
def authenticate(self):
|
2015-02-09 16:12:44 +02:00
|
|
|
if not self.auth_url:
|
|
|
|
msg = _("Authentication requires 'auth_url', which should be "
|
|
|
|
"specified in '%s'") % self.__class__.__name__
|
|
|
|
raise exceptions.AuthorizationFailure(msg)
|
2014-08-27 18:08:14 +03:00
|
|
|
magic_tuple = netutils.urlsplit(self.auth_url)
|
2011-09-12 06:39:11 -07:00
|
|
|
scheme, netloc, path, query, frag = magic_tuple
|
|
|
|
port = magic_tuple.port
|
2012-01-04 09:54:39 +08:00
|
|
|
if port is None:
|
2011-09-12 06:39:11 -07:00
|
|
|
port = 80
|
2011-08-03 17:41:33 -04:00
|
|
|
path_parts = path.split('/')
|
|
|
|
for part in path_parts:
|
|
|
|
if len(part) > 0 and part[0] == 'v':
|
|
|
|
self.version = part
|
|
|
|
break
|
|
|
|
|
2013-12-19 19:26:06 +00:00
|
|
|
if self.auth_token and self.management_url:
|
|
|
|
self._save_keys()
|
|
|
|
return
|
|
|
|
|
2011-10-25 06:28:30 -07:00
|
|
|
# TODO(sandy): Assume admin endpoint is 35357 for now.
|
|
|
|
# Ideally this is going to have to be provided by the service catalog.
|
2011-10-25 09:34:23 -07:00
|
|
|
new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,))
|
2014-02-14 08:43:12 +08:00
|
|
|
admin_url = parse.urlunsplit(
|
2012-08-16 17:57:09 -07:00
|
|
|
(scheme, new_netloc, path, query, frag))
|
2011-09-12 06:39:11 -07:00
|
|
|
|
2011-08-24 05:32:40 -07:00
|
|
|
auth_url = self.auth_url
|
2011-08-09 01:44:01 +04:00
|
|
|
if self.version == "v2.0": # FIXME(chris): This should be better.
|
2011-08-09 01:41:51 +04:00
|
|
|
while auth_url:
|
2012-08-02 16:41:47 +02:00
|
|
|
if not self.auth_system or self.auth_system == 'keystone':
|
2011-12-13 15:54:09 -06:00
|
|
|
auth_url = self._v2_auth(auth_url)
|
2012-08-02 16:41:47 +02:00
|
|
|
else:
|
|
|
|
auth_url = self._plugin_auth(auth_url)
|
2011-09-09 06:33:38 -07:00
|
|
|
|
|
|
|
# Are we acting on behalf of another user via an
|
|
|
|
# existing token? If so, our actual endpoints may
|
|
|
|
# be different than that of the admin token.
|
|
|
|
if self.proxy_token:
|
2013-04-02 15:48:44 -04:00
|
|
|
if self.bypass_url:
|
|
|
|
self.set_management_url(self.bypass_url)
|
|
|
|
else:
|
|
|
|
self._fetch_endpoints_from_auth(admin_url)
|
2011-10-25 16:49:22 -07:00
|
|
|
# Since keystone no longer returns the user token
|
|
|
|
# with the endpoints any more, we need to replace
|
|
|
|
# our service account token with the user token.
|
|
|
|
self.auth_token = self.proxy_token
|
2011-08-09 00:06:55 +04:00
|
|
|
else:
|
2011-08-09 01:41:51 +04:00
|
|
|
try:
|
|
|
|
while auth_url:
|
|
|
|
auth_url = self._v1_auth(auth_url)
|
|
|
|
# In some configurations nova makes redirection to
|
|
|
|
# v2.0 keystone endpoint. Also, new location does not contain
|
|
|
|
# real endpoint, only hostname and port.
|
2011-08-15 10:36:55 +04:00
|
|
|
except exceptions.AuthorizationFailure:
|
2011-08-09 01:41:51 +04:00
|
|
|
if auth_url.find('v2.0') < 0:
|
2012-02-03 14:48:51 -06:00
|
|
|
auth_url = auth_url + '/v2.0'
|
2011-08-09 01:41:51 +04:00
|
|
|
self._v2_auth(auth_url)
|
|
|
|
|
2012-06-18 17:37:45 -03:00
|
|
|
if self.bypass_url:
|
|
|
|
self.set_management_url(self.bypass_url)
|
2013-02-08 14:54:36 -06:00
|
|
|
elif not self.management_url:
|
|
|
|
raise exceptions.Unauthorized('Nova Client')
|
2012-06-18 17:37:45 -03:00
|
|
|
|
2013-12-19 19:26:06 +00:00
|
|
|
self._save_keys()
|
|
|
|
|
|
|
|
def _save_keys(self):
|
2012-06-18 17:37:45 -03:00
|
|
|
# Store the token/mgmt url in the keyring for later requests.
|
2013-12-19 19:26:06 +00:00
|
|
|
if (self.keyring_saver and self.os_cache and not self.keyring_saved
|
|
|
|
and self.auth_token and self.management_url
|
|
|
|
and self.tenant_id):
|
2013-01-31 11:51:29 -08:00
|
|
|
self.keyring_saver.save(self.auth_token,
|
|
|
|
self.management_url,
|
|
|
|
self.tenant_id)
|
2013-01-30 12:53:35 -08:00
|
|
|
# Don't save it again
|
|
|
|
self.keyring_saved = True
|
2012-06-18 17:37:45 -03:00
|
|
|
|
2011-08-09 01:41:51 +04:00
|
|
|
def _v1_auth(self, url):
|
2011-09-09 06:33:38 -07:00
|
|
|
if self.proxy_token:
|
2011-12-29 11:10:04 -05:00
|
|
|
raise exceptions.NoTokenLookupException()
|
2011-09-09 06:33:38 -07:00
|
|
|
|
2011-08-09 00:06:55 +04:00
|
|
|
headers = {'X-Auth-User': self.user,
|
2013-12-19 19:26:06 +00:00
|
|
|
'X-Auth-Key': self._get_password()}
|
2011-08-09 00:06:55 +04:00
|
|
|
if self.projectid:
|
|
|
|
headers['X-Auth-Project-Id'] = self.projectid
|
2011-08-05 23:17:35 -07:00
|
|
|
|
2012-06-15 15:12:23 -03:00
|
|
|
resp, body = self._time_request(url, 'GET', headers=headers)
|
2012-12-18 14:05:29 -06:00
|
|
|
if resp.status_code in (200, 204): # in some cases we get No Content
|
2011-08-11 13:43:51 +04:00
|
|
|
try:
|
2012-02-03 14:48:51 -06:00
|
|
|
mgmt_header = 'x-server-management-url'
|
2012-12-18 14:05:29 -06:00
|
|
|
self.management_url = resp.headers[mgmt_header].rstrip('/')
|
|
|
|
self.auth_token = resp.headers['x-auth-token']
|
2011-08-11 13:43:51 +04:00
|
|
|
self.auth_url = url
|
2012-12-18 14:05:29 -06:00
|
|
|
except (KeyError, TypeError):
|
2011-08-10 13:25:17 -04:00
|
|
|
raise exceptions.AuthorizationFailure()
|
2012-12-18 14:05:29 -06:00
|
|
|
elif resp.status_code == 305:
|
|
|
|
return resp.headers['location']
|
2011-08-05 23:17:35 -07:00
|
|
|
else:
|
2013-01-23 11:14:41 -06:00
|
|
|
raise exceptions.from_response(resp, body, url)
|
2011-08-05 23:17:35 -07:00
|
|
|
|
2012-08-02 16:41:47 +02:00
|
|
|
def _plugin_auth(self, auth_url):
|
2013-07-02 16:49:03 -07:00
|
|
|
return self.auth_plugin.authenticate(self, auth_url)
|
2012-08-02 16:41:47 +02:00
|
|
|
|
2011-08-09 01:41:51 +04:00
|
|
|
def _v2_auth(self, url):
|
2011-09-09 06:33:38 -07:00
|
|
|
"""Authenticate against a v2.0 auth service."""
|
2013-01-30 12:53:35 -08:00
|
|
|
if self.auth_token:
|
2013-10-10 06:01:26 +00:00
|
|
|
body = {"auth": {
|
|
|
|
"token": {"id": self.auth_token}}}
|
2014-03-06 12:37:12 +00:00
|
|
|
elif self.user_id:
|
|
|
|
body = {"auth": {
|
|
|
|
"passwordCredentials": {"userId": self.user_id,
|
|
|
|
"password": self._get_password()}}}
|
2013-01-30 12:53:35 -08:00
|
|
|
else:
|
2013-10-10 06:01:26 +00:00
|
|
|
body = {"auth": {
|
|
|
|
"passwordCredentials": {"username": self.user,
|
2013-12-19 19:26:06 +00:00
|
|
|
"password": self._get_password()}}}
|
2011-08-05 23:17:35 -07:00
|
|
|
|
2013-06-27 22:57:10 +01:00
|
|
|
if self.tenant_id:
|
|
|
|
body['auth']['tenantId'] = self.tenant_id
|
|
|
|
elif self.projectid:
|
2011-09-28 11:00:15 -07:00
|
|
|
body['auth']['tenantName'] = self.projectid
|
2011-08-05 23:17:35 -07:00
|
|
|
|
2013-10-10 06:01:26 +00:00
|
|
|
return self._authenticate(url, body)
|
2011-12-13 15:54:09 -06:00
|
|
|
|
2013-03-06 16:41:46 +01:00
|
|
|
def _authenticate(self, url, body, **kwargs):
|
2011-12-13 15:54:09 -06:00
|
|
|
"""Authenticate and extract the service catalog."""
|
2013-07-03 11:08:41 -07:00
|
|
|
method = "POST"
|
2012-02-03 14:48:51 -06:00
|
|
|
token_url = url + "/tokens"
|
2011-11-09 16:54:58 -06:00
|
|
|
|
|
|
|
# Make sure we follow redirects when trying to reach Keystone
|
2013-07-03 11:08:41 -07:00
|
|
|
resp, respbody = self._time_request(
|
2012-12-18 14:05:29 -06:00
|
|
|
token_url,
|
2013-07-03 11:08:41 -07:00
|
|
|
method,
|
2012-12-18 14:05:29 -06:00
|
|
|
body=body,
|
2013-03-06 16:41:46 +01:00
|
|
|
allow_redirects=True,
|
|
|
|
**kwargs)
|
2011-11-09 16:54:58 -06:00
|
|
|
|
2013-07-03 11:08:41 -07:00
|
|
|
return self._extract_service_catalog(url, resp, respbody)
|
2011-12-29 15:37:05 -05:00
|
|
|
|
|
|
|
|
2014-04-08 11:40:41 +10:00
|
|
|
def _construct_http_client(username=None, password=None, project_id=None,
|
|
|
|
auth_url=None, insecure=False, timeout=None,
|
|
|
|
proxy_tenant_id=None, proxy_token=None,
|
|
|
|
region_name=None, endpoint_type='publicURL',
|
|
|
|
extensions=None, service_type='compute',
|
|
|
|
service_name=None, volume_service_name=None,
|
|
|
|
timings=False, bypass_url=None, os_cache=False,
|
|
|
|
no_cache=True, http_log_debug=False,
|
|
|
|
auth_system='keystone', auth_plugin=None,
|
|
|
|
auth_token=None, cacert=None, tenant_id=None,
|
|
|
|
user_id=None, connection_pool=False, session=None,
|
2014-08-07 20:18:58 +10:00
|
|
|
auth=None, user_agent='python-novaclient',
|
2015-04-02 16:37:59 +03:00
|
|
|
interface=None, api_version=None, **kwargs):
|
2014-04-08 11:40:41 +10:00
|
|
|
if session:
|
|
|
|
return SessionClient(session=session,
|
|
|
|
auth=auth,
|
2014-12-17 15:47:15 +10:00
|
|
|
interface=interface or endpoint_type,
|
2014-04-08 11:40:41 +10:00
|
|
|
service_type=service_type,
|
2014-08-07 20:18:58 +10:00
|
|
|
region_name=region_name,
|
|
|
|
service_name=service_name,
|
|
|
|
user_agent=user_agent,
|
2015-03-18 17:37:54 +08:00
|
|
|
timings=timings,
|
2015-04-02 16:37:59 +03:00
|
|
|
api_version=api_version,
|
2014-08-07 20:18:58 +10:00
|
|
|
**kwargs)
|
2014-04-08 11:40:41 +10:00
|
|
|
else:
|
|
|
|
# FIXME(jamielennox): username and password are now optional. Need
|
|
|
|
# to test that they were provided in this mode.
|
|
|
|
return HTTPClient(username,
|
|
|
|
password,
|
|
|
|
user_id=user_id,
|
|
|
|
projectid=project_id,
|
|
|
|
tenant_id=tenant_id,
|
|
|
|
auth_url=auth_url,
|
|
|
|
auth_token=auth_token,
|
|
|
|
insecure=insecure,
|
|
|
|
timeout=timeout,
|
|
|
|
auth_system=auth_system,
|
|
|
|
auth_plugin=auth_plugin,
|
|
|
|
proxy_token=proxy_token,
|
|
|
|
proxy_tenant_id=proxy_tenant_id,
|
|
|
|
region_name=region_name,
|
|
|
|
endpoint_type=endpoint_type,
|
|
|
|
service_type=service_type,
|
|
|
|
service_name=service_name,
|
|
|
|
volume_service_name=volume_service_name,
|
|
|
|
timings=timings,
|
|
|
|
bypass_url=bypass_url,
|
|
|
|
os_cache=os_cache,
|
|
|
|
http_log_debug=http_log_debug,
|
|
|
|
cacert=cacert,
|
2015-04-02 16:37:59 +03:00
|
|
|
connection_pool=connection_pool,
|
|
|
|
api_version=api_version)
|
2014-04-08 11:40:41 +10:00
|
|
|
|
|
|
|
|
2015-04-07 15:23:14 +08:00
|
|
|
def discover_extensions(version):
|
2015-04-02 16:37:59 +03:00
|
|
|
if not isinstance(version, api_versions.APIVersion):
|
|
|
|
version = api_versions.get_api_version(version)
|
2015-04-07 15:23:14 +08:00
|
|
|
extensions = []
|
|
|
|
for name, module in itertools.chain(
|
|
|
|
_discover_via_python_path(),
|
|
|
|
_discover_via_contrib_path(version),
|
|
|
|
_discover_via_entry_points()):
|
|
|
|
|
|
|
|
extension = ext.Extension(name, module)
|
|
|
|
extensions.append(extension)
|
|
|
|
|
|
|
|
return extensions
|
|
|
|
|
|
|
|
|
|
|
|
def _discover_via_python_path():
|
|
|
|
for (module_loader, name, _ispkg) in pkgutil.iter_modules():
|
|
|
|
if name.endswith('_python_novaclient_ext'):
|
|
|
|
if not hasattr(module_loader, 'load_module'):
|
|
|
|
# Python 2.6 compat: actually get an ImpImporter obj
|
|
|
|
module_loader = module_loader.find_module(name)
|
|
|
|
|
|
|
|
module = module_loader.load_module(name)
|
|
|
|
if hasattr(module, 'extension_name'):
|
|
|
|
name = module.extension_name
|
|
|
|
|
|
|
|
yield name, module
|
|
|
|
|
|
|
|
|
|
|
|
def _discover_via_contrib_path(version):
|
|
|
|
module_path = os.path.dirname(os.path.abspath(__file__))
|
2015-04-02 16:37:59 +03:00
|
|
|
ext_path = os.path.join(module_path, "v%s" % version.ver_major, 'contrib')
|
2015-04-07 15:23:14 +08:00
|
|
|
ext_glob = os.path.join(ext_path, "*.py")
|
|
|
|
|
|
|
|
for ext_path in glob.iglob(ext_glob):
|
|
|
|
name = os.path.basename(ext_path)[:-3]
|
|
|
|
|
|
|
|
if name == "__init__":
|
|
|
|
continue
|
|
|
|
|
|
|
|
module = imp.load_source(name, ext_path)
|
|
|
|
yield name, module
|
|
|
|
|
|
|
|
|
|
|
|
def _discover_via_entry_points():
|
|
|
|
for ep in pkg_resources.iter_entry_points('novaclient.extension'):
|
|
|
|
name = ep.name
|
|
|
|
module = ep.load()
|
|
|
|
|
|
|
|
yield name, module
|
|
|
|
|
|
|
|
|
2015-04-02 16:37:59 +03:00
|
|
|
def _get_client_class_and_version(version):
|
|
|
|
if not isinstance(version, api_versions.APIVersion):
|
|
|
|
version = api_versions.get_api_version(version)
|
|
|
|
else:
|
|
|
|
api_versions.check_major_version(version)
|
|
|
|
if version.is_latest():
|
|
|
|
raise exceptions.UnsupportedVersion(
|
|
|
|
_("The version should be explicit, not latest."))
|
|
|
|
return version, importutils.import_class(
|
|
|
|
"novaclient.v%s.client.Client" % version.ver_major)
|
2015-06-10 13:05:18 +01:00
|
|
|
|
|
|
|
|
2011-12-29 15:37:05 -05:00
|
|
|
def get_client_class(version):
|
2015-04-02 16:37:59 +03:00
|
|
|
"""Returns Client class based on given version."""
|
2015-09-09 17:18:14 +03:00
|
|
|
warnings.warn(_LW("'get_client_class' is deprecated. "
|
|
|
|
"Please use `novaclient.client.Client` instead."))
|
2015-04-02 16:37:59 +03:00
|
|
|
_api_version, client_class = _get_client_class_and_version(version)
|
|
|
|
return client_class
|
2011-12-29 15:37:05 -05:00
|
|
|
|
|
|
|
|
|
|
|
def Client(version, *args, **kwargs):
|
2015-09-09 14:02:42 +03:00
|
|
|
"""Initialize client object based on given version.
|
|
|
|
|
|
|
|
HOW-TO:
|
|
|
|
The simplest way to create a client instance is initialization with your
|
|
|
|
credentials::
|
|
|
|
|
|
|
|
>>> from novaclient import client
|
|
|
|
>>> nova = client.Client(VERSION, USERNAME, PASSWORD,
|
|
|
|
... PROJECT_ID, AUTH_URL)
|
|
|
|
|
|
|
|
Here ``VERSION`` can be a string or
|
|
|
|
``novaclient.api_versions.APIVersion`` obj. If you prefer string value,
|
|
|
|
you can use ``1.1`` (deprecated now), ``2`` or ``2.X``
|
|
|
|
(where X is a microversion).
|
|
|
|
|
|
|
|
|
|
|
|
Alternatively, you can create a client instance using the keystoneclient
|
|
|
|
session API. See "The novaclient Python API" page at
|
|
|
|
python-novaclient's doc.
|
|
|
|
"""
|
2015-04-02 16:37:59 +03:00
|
|
|
api_version, client_class = _get_client_class_and_version(version)
|
2015-09-09 14:02:42 +03:00
|
|
|
kwargs.pop("direct_use", None)
|
|
|
|
return client_class(api_version=api_version, direct_use=False,
|
|
|
|
*args, **kwargs)
|