Make OpenStackCloud a subclass of Connection
The shade code lives in openstacksdk now and is a part of Connection. Start making shade a subclass of that for backwards compat. Mostly only deal with the constructor for now, but go ahead and do list_flavors and get_flavor_by_id while we're looking at the extra config override. Also remove list_projects because of its use of the _normalize module. keystoneauth lower-constraint needs to be bumped because of status_code_retries. Change-Id: Ibbe6e167d6285b30a9adbd0c5a89bc679c5645f3
This commit is contained in:
parent
7460ad36d8
commit
ab3f400064
@ -67,13 +67,13 @@ Returned Resources
|
||||
==================
|
||||
|
||||
Complex objects returned to the caller must be a `munch.Munch` type. The
|
||||
`shade._adapter.Adapter` class makes resources into `munch.Munch`.
|
||||
`openstack._adapter.Adapter` class makes resources into `munch.Munch`.
|
||||
|
||||
All objects should be normalized. It is shade's purpose in life to make
|
||||
OpenStack consistent for end users, and this means not trusting the clouds
|
||||
to return consistent objects. There should be a normalize function in
|
||||
`shade/_normalize.py` that is applied to objects before returning them to
|
||||
the user. See :doc:`../user/model` for further details on object model
|
||||
`sopenstack/cloud/_normalize.py` that is applied to objects before returning
|
||||
them to the user. See :doc:`../user/model` for further details on object model
|
||||
requirements.
|
||||
|
||||
Fields should not be in the normalization contract if we cannot commit to
|
||||
|
@ -13,13 +13,13 @@ iso8601==0.1.12
|
||||
jmespath==0.9.3
|
||||
jsonpatch==1.21
|
||||
jsonpointer==2.0
|
||||
keystoneauth1==3.4.0
|
||||
keystoneauth1==3.8.0
|
||||
linecache2==1.0.0
|
||||
mock==2.0.0
|
||||
mox3==0.20.0
|
||||
munch==2.2.0
|
||||
netifaces==0.10.6
|
||||
openstacksdk==0.13.0
|
||||
openstacksdk==0.15.0
|
||||
os-client-config==1.28.0
|
||||
os-service-types==1.2.0
|
||||
oslotest==3.2.0
|
||||
|
@ -3,5 +3,7 @@
|
||||
# process, which may cause wedges in the gate later.
|
||||
pbr!=2.1.0,>=2.0.0 # Apache-2.0
|
||||
|
||||
# shade depends on os-client-config in addition to openstacksdk so that it
|
||||
# can continue to provide the make_legacy_client functions.
|
||||
os-client-config>=1.28.0 # Apache-2.0
|
||||
openstacksdk>=0.13.0 # Apache-2.0
|
||||
openstacksdk>=0.15.0 # Apache-2.0
|
||||
|
@ -1,164 +0,0 @@
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
''' Wrapper around keystoneauth Session to wrap calls in TaskManager '''
|
||||
|
||||
import functools
|
||||
from keystoneauth1 import adapter
|
||||
from six.moves import urllib
|
||||
|
||||
from shade import _log
|
||||
from shade import exc
|
||||
from shade import task_manager
|
||||
|
||||
|
||||
def extract_name(url):
|
||||
'''Produce a key name to use in logging/metrics from the URL path.
|
||||
|
||||
We want to be able to logic/metric sane general things, so we pull
|
||||
the url apart to generate names. The function returns a list because
|
||||
there are two different ways in which the elements want to be combined
|
||||
below (one for logging, one for statsd)
|
||||
|
||||
Some examples are likely useful:
|
||||
|
||||
/servers -> ['servers']
|
||||
/servers/{id} -> ['servers']
|
||||
/servers/{id}/os-security-groups -> ['servers', 'os-security-groups']
|
||||
/v2.0/networks.json -> ['networks']
|
||||
'''
|
||||
|
||||
url_path = urllib.parse.urlparse(url).path.strip()
|
||||
# Remove / from the beginning to keep the list indexes of interesting
|
||||
# things consistent
|
||||
if url_path.startswith('/'):
|
||||
url_path = url_path[1:]
|
||||
|
||||
# Special case for neutron, which puts .json on the end of urls
|
||||
if url_path.endswith('.json'):
|
||||
url_path = url_path[:-len('.json')]
|
||||
|
||||
url_parts = url_path.split('/')
|
||||
if url_parts[-1] == 'detail':
|
||||
# Special case detail calls
|
||||
# GET /servers/detail
|
||||
# returns ['servers', 'detail']
|
||||
name_parts = url_parts[-2:]
|
||||
else:
|
||||
# Strip leading version piece so that
|
||||
# GET /v2.0/networks
|
||||
# returns ['networks']
|
||||
if url_parts[0] in ('v1', 'v2', 'v2.0'):
|
||||
url_parts = url_parts[1:]
|
||||
name_parts = []
|
||||
# Pull out every other URL portion - so that
|
||||
# GET /servers/{id}/os-security-groups
|
||||
# returns ['servers', 'os-security-groups']
|
||||
for idx in range(0, len(url_parts)):
|
||||
if not idx % 2 and url_parts[idx]:
|
||||
name_parts.append(url_parts[idx])
|
||||
|
||||
# Keystone Token fetching is a special case, so we name it "tokens"
|
||||
if url_path.endswith('tokens'):
|
||||
name_parts = ['tokens']
|
||||
|
||||
# Getting the root of an endpoint is doing version discovery
|
||||
if not name_parts:
|
||||
name_parts = ['discovery']
|
||||
|
||||
# Strip out anything that's empty or None
|
||||
return [part for part in name_parts if part]
|
||||
|
||||
|
||||
class ShadeAdapter(adapter.Adapter):
|
||||
|
||||
def __init__(self, shade_logger, manager, *args, **kwargs):
|
||||
super(ShadeAdapter, self).__init__(*args, **kwargs)
|
||||
self.shade_logger = shade_logger
|
||||
self.manager = manager
|
||||
self.request_log = _log.setup_logging('shade.request_ids')
|
||||
|
||||
def _log_request_id(self, response, obj=None):
|
||||
# Log the request id and object id in a specific logger. This way
|
||||
# someone can turn it on if they're interested in this kind of tracing.
|
||||
request_id = response.headers.get('x-openstack-request-id')
|
||||
if not request_id:
|
||||
return response
|
||||
tmpl = "{meth} call to {service} for {url} used request id {req}"
|
||||
kwargs = dict(
|
||||
meth=response.request.method,
|
||||
service=self.service_type,
|
||||
url=response.request.url,
|
||||
req=request_id)
|
||||
|
||||
if isinstance(obj, dict):
|
||||
obj_id = obj.get('id', obj.get('uuid'))
|
||||
if obj_id:
|
||||
kwargs['obj_id'] = obj_id
|
||||
tmpl += " returning object {obj_id}"
|
||||
self.request_log.debug(tmpl.format(**kwargs))
|
||||
return response
|
||||
|
||||
def _munch_response(self, response, result_key=None, error_message=None):
|
||||
exc.raise_from_response(response, error_message=error_message)
|
||||
|
||||
if not response.content:
|
||||
# This doens't have any content
|
||||
return self._log_request_id(response)
|
||||
|
||||
# Some REST calls do not return json content. Don't decode it.
|
||||
if 'application/json' not in response.headers.get('Content-Type'):
|
||||
return self._log_request_id(response)
|
||||
|
||||
try:
|
||||
result_json = response.json()
|
||||
self._log_request_id(response, result_json)
|
||||
except Exception:
|
||||
return self._log_request_id(response)
|
||||
return result_json
|
||||
|
||||
def request(
|
||||
self, url, method, run_async=False, error_message=None,
|
||||
*args, **kwargs):
|
||||
name_parts = extract_name(url)
|
||||
name = '.'.join([self.service_type, method] + name_parts)
|
||||
class_name = "".join([
|
||||
part.lower().capitalize() for part in name.split('.')])
|
||||
|
||||
request_method = functools.partial(
|
||||
super(ShadeAdapter, self).request, url, method)
|
||||
|
||||
class RequestTask(task_manager.BaseTask):
|
||||
|
||||
def __init__(self, **kw):
|
||||
super(RequestTask, self).__init__(**kw)
|
||||
self.name = name
|
||||
self.__class__.__name__ = str(class_name)
|
||||
self.run_async = run_async
|
||||
|
||||
def main(self, client):
|
||||
self.args.setdefault('raise_exc', False)
|
||||
return request_method(**self.args)
|
||||
|
||||
response = self.manager.submit_task(RequestTask(**kwargs))
|
||||
if run_async:
|
||||
return response
|
||||
else:
|
||||
return self._munch_response(response, error_message=error_message)
|
||||
|
||||
def _version_matches(self, version):
|
||||
api_version = self.get_api_major_version()
|
||||
if api_version:
|
||||
return api_version[0] == version
|
||||
return False
|
@ -13,9 +13,9 @@ import importlib
|
||||
import warnings
|
||||
|
||||
from keystoneauth1 import plugin
|
||||
from openstack.cloud import _utils
|
||||
from os_client_config import constructors
|
||||
|
||||
from shade import _utils
|
||||
from shade import exc
|
||||
|
||||
|
||||
|
1109
shade/_normalize.py
1109
shade/_normalize.py
File diff suppressed because it is too large
Load Diff
759
shade/_utils.py
759
shade/_utils.py
@ -1,759 +0,0 @@
|
||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import contextlib
|
||||
import fnmatch
|
||||
import inspect
|
||||
import jmespath
|
||||
import munch
|
||||
import netifaces
|
||||
import re
|
||||
import six
|
||||
import sre_constants
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from decorator import decorator
|
||||
|
||||
from shade import _log
|
||||
from shade import exc
|
||||
from shade import meta
|
||||
|
||||
_decorated_methods = []
|
||||
|
||||
|
||||
def _exc_clear():
|
||||
"""Because sys.exc_clear is gone in py3 and is not in six."""
|
||||
if sys.version_info[0] == 2:
|
||||
sys.exc_clear()
|
||||
|
||||
|
||||
def _iterate_timeout(timeout, message, wait=2):
|
||||
"""Iterate and raise an exception on timeout.
|
||||
|
||||
This is a generator that will continually yield and sleep for
|
||||
wait seconds, and if the timeout is reached, will raise an exception
|
||||
with <message>.
|
||||
|
||||
"""
|
||||
log = _log.setup_logging('shade.iterate_timeout')
|
||||
|
||||
try:
|
||||
# None as a wait winds up flowing well in the per-resource cache
|
||||
# flow. We could spread this logic around to all of the calling
|
||||
# points, but just having this treat None as "I don't have a value"
|
||||
# seems friendlier
|
||||
if wait is None:
|
||||
wait = 2
|
||||
elif wait == 0:
|
||||
# wait should be < timeout, unless timeout is None
|
||||
wait = 0.1 if timeout is None else min(0.1, timeout)
|
||||
wait = float(wait)
|
||||
except ValueError:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Wait value must be an int or float value. {wait} given"
|
||||
" instead".format(wait=wait))
|
||||
|
||||
start = time.time()
|
||||
count = 0
|
||||
while (timeout is None) or (time.time() < start + timeout):
|
||||
count += 1
|
||||
yield count
|
||||
log.debug('Waiting %s seconds', wait)
|
||||
time.sleep(wait)
|
||||
raise exc.OpenStackCloudTimeout(message)
|
||||
|
||||
|
||||
def _make_unicode(input):
|
||||
"""Turn an input into unicode unconditionally
|
||||
|
||||
:param input:
|
||||
A unicode, string or other object
|
||||
"""
|
||||
try:
|
||||
if isinstance(input, unicode):
|
||||
return input
|
||||
if isinstance(input, str):
|
||||
return input.decode('utf-8')
|
||||
else:
|
||||
# int, for example
|
||||
return unicode(input)
|
||||
except NameError:
|
||||
# python3!
|
||||
return str(input)
|
||||
|
||||
|
||||
def _dictify_resource(resource):
|
||||
if isinstance(resource, list):
|
||||
return [_dictify_resource(r) for r in resource]
|
||||
else:
|
||||
if hasattr(resource, 'toDict'):
|
||||
return resource.toDict()
|
||||
else:
|
||||
return resource
|
||||
|
||||
|
||||
def _filter_list(data, name_or_id, filters):
|
||||
"""Filter a list by name/ID and arbitrary meta data.
|
||||
|
||||
:param list data:
|
||||
The list of dictionary data to filter. It is expected that
|
||||
each dictionary contains an 'id' and 'name'
|
||||
key if a value for name_or_id is given.
|
||||
:param string name_or_id:
|
||||
The name or ID of the entity being filtered. Can be a glob pattern,
|
||||
such as 'nb01*'.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering. Elements
|
||||
of this dictionary may, themselves, be dictionaries. Example::
|
||||
|
||||
{
|
||||
'last_name': 'Smith',
|
||||
'other': {
|
||||
'gender': 'Female'
|
||||
}
|
||||
}
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
"""
|
||||
# The logger is shade.fmmatch to allow a user/operator to configure logging
|
||||
# not to communicate about fnmatch misses (they shouldn't be too spammy,
|
||||
# but one never knows)
|
||||
log = _log.setup_logging('shade.fnmatch')
|
||||
if name_or_id:
|
||||
# name_or_id might already be unicode
|
||||
name_or_id = _make_unicode(name_or_id)
|
||||
identifier_matches = []
|
||||
bad_pattern = False
|
||||
try:
|
||||
fn_reg = re.compile(fnmatch.translate(name_or_id))
|
||||
except sre_constants.error:
|
||||
# If the fnmatch re doesn't compile, then we don't care,
|
||||
# but log it in case the user DID pass a pattern but did
|
||||
# it poorly and wants to know what went wrong with their
|
||||
# search
|
||||
fn_reg = None
|
||||
for e in data:
|
||||
e_id = _make_unicode(e.get('id', None))
|
||||
e_name = _make_unicode(e.get('name', None))
|
||||
|
||||
if ((e_id and e_id == name_or_id) or
|
||||
(e_name and e_name == name_or_id)):
|
||||
identifier_matches.append(e)
|
||||
else:
|
||||
# Only try fnmatch if we don't match exactly
|
||||
if not fn_reg:
|
||||
# If we don't have a pattern, skip this, but set the flag
|
||||
# so that we log the bad pattern
|
||||
bad_pattern = True
|
||||
continue
|
||||
if ((e_id and fn_reg.match(e_id)) or
|
||||
(e_name and fn_reg.match(e_name))):
|
||||
identifier_matches.append(e)
|
||||
if not identifier_matches and bad_pattern:
|
||||
log.debug("Bad pattern passed to fnmatch", exc_info=True)
|
||||
data = identifier_matches
|
||||
|
||||
if not filters:
|
||||
return data
|
||||
|
||||
if isinstance(filters, six.string_types):
|
||||
return jmespath.search(filters, data)
|
||||
|
||||
def _dict_filter(f, d):
|
||||
if not d:
|
||||
return False
|
||||
for key in f.keys():
|
||||
if isinstance(f[key], dict):
|
||||
if not _dict_filter(f[key], d.get(key, None)):
|
||||
return False
|
||||
elif d.get(key, None) != f[key]:
|
||||
return False
|
||||
return True
|
||||
|
||||
filtered = []
|
||||
for e in data:
|
||||
filtered.append(e)
|
||||
for key in filters.keys():
|
||||
if isinstance(filters[key], dict):
|
||||
if not _dict_filter(filters[key], e.get(key, None)):
|
||||
filtered.pop()
|
||||
break
|
||||
elif e.get(key, None) != filters[key]:
|
||||
filtered.pop()
|
||||
break
|
||||
return filtered
|
||||
|
||||
|
||||
def _get_entity(cloud, resource, name_or_id, filters, **kwargs):
|
||||
"""Return a single entity from the list returned by a given method.
|
||||
|
||||
:param object cloud:
|
||||
The controller class (Example: the main OpenStackCloud object) .
|
||||
:param string or callable resource:
|
||||
The string that identifies the resource to use to lookup the
|
||||
get_<>_by_id or search_<resource>s methods(Example: network)
|
||||
or a callable to invoke.
|
||||
:param string name_or_id:
|
||||
The name or ID of the entity being filtered or an object or dict.
|
||||
If this is an object/dict with an 'id' attr/key, we return it and
|
||||
bypass resource lookup.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering.
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
"""
|
||||
|
||||
# Sometimes in the control flow of shade, we already have an object
|
||||
# fetched. Rather than then needing to pull the name or id out of that
|
||||
# object, pass it in here and rely on caching to prevent us from making
|
||||
# an additional call, it's simple enough to test to see if we got an
|
||||
# object and just short-circuit return it.
|
||||
|
||||
if (hasattr(name_or_id, 'id') or
|
||||
(isinstance(name_or_id, dict) and 'id' in name_or_id)):
|
||||
return name_or_id
|
||||
|
||||
# If a uuid is passed short-circuit it calling the
|
||||
# get_<resorce_name>_by_id method
|
||||
if getattr(cloud, 'use_direct_get', False) and _is_uuid_like(name_or_id):
|
||||
get_resource = getattr(cloud, 'get_%s_by_id' % resource, None)
|
||||
if get_resource:
|
||||
return get_resource(name_or_id)
|
||||
|
||||
search = resource if callable(resource) else getattr(
|
||||
cloud, 'search_%ss' % resource, None)
|
||||
if search:
|
||||
entities = search(name_or_id, filters, **kwargs)
|
||||
if entities:
|
||||
if len(entities) > 1:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Multiple matches found for %s" % name_or_id)
|
||||
return entities[0]
|
||||
return None
|
||||
|
||||
|
||||
def normalize_keystone_services(services):
|
||||
"""Normalize the structure of keystone services
|
||||
|
||||
In keystone v2, there is a field called "service_type". In v3, it's
|
||||
"type". Just make the returned dict have both.
|
||||
|
||||
:param list services: A list of keystone service dicts
|
||||
|
||||
:returns: A list of normalized dicts.
|
||||
"""
|
||||
ret = []
|
||||
for service in services:
|
||||
service_type = service.get('type', service.get('service_type'))
|
||||
new_service = {
|
||||
'id': service['id'],
|
||||
'name': service['name'],
|
||||
'description': service.get('description', None),
|
||||
'type': service_type,
|
||||
'service_type': service_type,
|
||||
'enabled': service['enabled']
|
||||
}
|
||||
ret.append(new_service)
|
||||
return meta.obj_list_to_munch(ret)
|
||||
|
||||
|
||||
def localhost_supports_ipv6():
|
||||
"""Determine whether the local host supports IPv6
|
||||
|
||||
We look for a default route that supports the IPv6 address family,
|
||||
and assume that if it is present, this host has globally routable
|
||||
IPv6 connectivity.
|
||||
"""
|
||||
|
||||
try:
|
||||
return netifaces.AF_INET6 in netifaces.gateways()['default']
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
|
||||
def normalize_users(users):
|
||||
ret = [
|
||||
dict(
|
||||
id=user.get('id'),
|
||||
email=user.get('email'),
|
||||
name=user.get('name'),
|
||||
username=user.get('username'),
|
||||
default_project_id=user.get('default_project_id',
|
||||
user.get('tenantId')),
|
||||
domain_id=user.get('domain_id'),
|
||||
enabled=user.get('enabled'),
|
||||
description=user.get('description')
|
||||
) for user in users
|
||||
]
|
||||
return meta.obj_list_to_munch(ret)
|
||||
|
||||
|
||||
def normalize_domains(domains):
|
||||
ret = [
|
||||
dict(
|
||||
id=domain.get('id'),
|
||||
name=domain.get('name'),
|
||||
description=domain.get('description'),
|
||||
enabled=domain.get('enabled'),
|
||||
) for domain in domains
|
||||
]
|
||||
return meta.obj_list_to_munch(ret)
|
||||
|
||||
|
||||
def normalize_groups(domains):
|
||||
"""Normalize Identity groups."""
|
||||
ret = [
|
||||
dict(
|
||||
id=domain.get('id'),
|
||||
name=domain.get('name'),
|
||||
description=domain.get('description'),
|
||||
domain_id=domain.get('domain_id'),
|
||||
) for domain in domains
|
||||
]
|
||||
return meta.obj_list_to_munch(ret)
|
||||
|
||||
|
||||
def normalize_role_assignments(assignments):
|
||||
"""Put role_assignments into a form that works with search/get interface.
|
||||
|
||||
Role assignments have the structure::
|
||||
|
||||
[
|
||||
{
|
||||
"role": {
|
||||
"id": "--role-id--"
|
||||
},
|
||||
"scope": {
|
||||
"domain": {
|
||||
"id": "--domain-id--"
|
||||
}
|
||||
},
|
||||
"user": {
|
||||
"id": "--user-id--"
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
Which is hard to work with in the rest of our interface. Map this to be::
|
||||
|
||||
[
|
||||
{
|
||||
"id": "--role-id--",
|
||||
"domain": "--domain-id--",
|
||||
"user": "--user-id--",
|
||||
}
|
||||
]
|
||||
|
||||
Scope can be "domain" or "project" and "user" can also be "group".
|
||||
|
||||
:param list assignments: A list of dictionaries of role assignments.
|
||||
|
||||
:returns: A list of flattened/normalized role assignment dicts.
|
||||
"""
|
||||
new_assignments = []
|
||||
for assignment in assignments:
|
||||
new_val = munch.Munch({'id': assignment['role']['id']})
|
||||
for scope in ('project', 'domain'):
|
||||
if scope in assignment['scope']:
|
||||
new_val[scope] = assignment['scope'][scope]['id']
|
||||
for assignee in ('user', 'group'):
|
||||
if assignee in assignment:
|
||||
new_val[assignee] = assignment[assignee]['id']
|
||||
new_assignments.append(new_val)
|
||||
return new_assignments
|
||||
|
||||
|
||||
def normalize_flavor_accesses(flavor_accesses):
|
||||
"""Normalize Flavor access list."""
|
||||
return [munch.Munch(
|
||||
dict(
|
||||
flavor_id=acl.get('flavor_id'),
|
||||
project_id=acl.get('project_id') or acl.get('tenant_id'),
|
||||
)
|
||||
) for acl in flavor_accesses
|
||||
]
|
||||
|
||||
|
||||
def valid_kwargs(*valid_args):
|
||||
# This decorator checks if argument passed as **kwargs to a function are
|
||||
# present in valid_args.
|
||||
#
|
||||
# Typically, valid_kwargs is used when we want to distinguish between
|
||||
# None and omitted arguments and we still want to validate the argument
|
||||
# list.
|
||||
#
|
||||
# Example usage:
|
||||
#
|
||||
# @valid_kwargs('opt_arg1', 'opt_arg2')
|
||||
# def my_func(self, mandatory_arg1, mandatory_arg2, **kwargs):
|
||||
# ...
|
||||
#
|
||||
@decorator
|
||||
def func_wrapper(func, *args, **kwargs):
|
||||
argspec = inspect.getargspec(func)
|
||||
for k in kwargs:
|
||||
if k not in argspec.args[1:] and k not in valid_args:
|
||||
raise TypeError(
|
||||
"{f}() got an unexpected keyword argument "
|
||||
"'{arg}'".format(f=inspect.stack()[1][3], arg=k))
|
||||
return func(*args, **kwargs)
|
||||
return func_wrapper
|
||||
|
||||
|
||||
def cache_on_arguments(*cache_on_args, **cache_on_kwargs):
|
||||
_cache_name = cache_on_kwargs.pop('resource', None)
|
||||
|
||||
def _inner_cache_on_arguments(func):
|
||||
def _cache_decorator(obj, *args, **kwargs):
|
||||
the_method = obj._get_cache(_cache_name).cache_on_arguments(
|
||||
*cache_on_args, **cache_on_kwargs)(
|
||||
func.__get__(obj, type(obj)))
|
||||
return the_method(*args, **kwargs)
|
||||
|
||||
def invalidate(obj, *args, **kwargs):
|
||||
return obj._get_cache(
|
||||
_cache_name).cache_on_arguments()(func).invalidate(
|
||||
*args, **kwargs)
|
||||
|
||||
_cache_decorator.invalidate = invalidate
|
||||
_cache_decorator.func = func
|
||||
_decorated_methods.append(func.__name__)
|
||||
|
||||
return _cache_decorator
|
||||
return _inner_cache_on_arguments
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def shade_exceptions(error_message=None):
|
||||
"""Context manager for dealing with shade exceptions.
|
||||
|
||||
:param string error_message: String to use for the exception message
|
||||
content on non-OpenStackCloudExceptions.
|
||||
|
||||
Useful for avoiding wrapping shade OpenStackCloudException exceptions
|
||||
within themselves. Code called from within the context may throw such
|
||||
exceptions without having to catch and reraise them.
|
||||
|
||||
Non-OpenStackCloudException exceptions thrown within the context will
|
||||
be wrapped and the exception message will be appended to the given error
|
||||
message.
|
||||
"""
|
||||
try:
|
||||
yield
|
||||
except exc.OpenStackCloudException:
|
||||
raise
|
||||
except Exception as e:
|
||||
if error_message is None:
|
||||
error_message = str(e)
|
||||
raise exc.OpenStackCloudException(error_message)
|
||||
|
||||
|
||||
def safe_dict_min(key, data):
|
||||
"""Safely find the minimum for a given key in a list of dict objects.
|
||||
|
||||
This will find the minimum integer value for specific dictionary key
|
||||
across a list of dictionaries. The values for the given key MUST be
|
||||
integers, or string representations of an integer.
|
||||
|
||||
The dictionary key does not have to be present in all (or any)
|
||||
of the elements/dicts within the data set.
|
||||
|
||||
:param string key: The dictionary key to search for the minimum value.
|
||||
:param list data: List of dicts to use for the data set.
|
||||
|
||||
:returns: None if the field was not not found in any elements, or
|
||||
the minimum value for the field otherwise.
|
||||
"""
|
||||
min_value = None
|
||||
for d in data:
|
||||
if (key in d) and (d[key] is not None):
|
||||
try:
|
||||
val = int(d[key])
|
||||
except ValueError:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Search for minimum value failed. "
|
||||
"Value for {key} is not an integer: {value}".format(
|
||||
key=key, value=d[key])
|
||||
)
|
||||
if (min_value is None) or (val < min_value):
|
||||
min_value = val
|
||||
return min_value
|
||||
|
||||
|
||||
def safe_dict_max(key, data):
|
||||
"""Safely find the maximum for a given key in a list of dict objects.
|
||||
|
||||
This will find the maximum integer value for specific dictionary key
|
||||
across a list of dictionaries. The values for the given key MUST be
|
||||
integers, or string representations of an integer.
|
||||
|
||||
The dictionary key does not have to be present in all (or any)
|
||||
of the elements/dicts within the data set.
|
||||
|
||||
:param string key: The dictionary key to search for the maximum value.
|
||||
:param list data: List of dicts to use for the data set.
|
||||
|
||||
:returns: None if the field was not not found in any elements, or
|
||||
the maximum value for the field otherwise.
|
||||
"""
|
||||
max_value = None
|
||||
for d in data:
|
||||
if (key in d) and (d[key] is not None):
|
||||
try:
|
||||
val = int(d[key])
|
||||
except ValueError:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Search for maximum value failed. "
|
||||
"Value for {key} is not an integer: {value}".format(
|
||||
key=key, value=d[key])
|
||||
)
|
||||
if (max_value is None) or (val > max_value):
|
||||
max_value = val
|
||||
return max_value
|
||||
|
||||
|
||||
def _call_client_and_retry(client, url, retry_on=None,
|
||||
call_retries=3, retry_wait=2,
|
||||
**kwargs):
|
||||
"""Method to provide retry operations.
|
||||
|
||||
Some APIs utilize HTTP errors on certian operations to indicate that
|
||||
the resource is presently locked, and as such this mechanism provides
|
||||
the ability to retry upon known error codes.
|
||||
|
||||
:param object client: The client method, such as:
|
||||
``self.baremetal_client.post``
|
||||
:param string url: The URL to perform the operation upon.
|
||||
:param integer retry_on: A list of error codes that can be retried on.
|
||||
The method also supports a single integer to be
|
||||
defined.
|
||||
:param integer call_retries: The number of times to retry the call upon
|
||||
the error code defined by the 'retry_on'
|
||||
parameter. Default: 3
|
||||
:param integer retry_wait: The time in seconds to wait between retry
|
||||
attempts. Default: 2
|
||||
|
||||
:returns: The object returned by the client call.
|
||||
"""
|
||||
|
||||
# NOTE(TheJulia): This method, as of this note, does not have direct
|
||||
# unit tests, although is fairly well tested by the tests checking
|
||||
# retry logic in test_baremetal_node.py.
|
||||
log = _log.setup_logging('shade.http')
|
||||
|
||||
if isinstance(retry_on, int):
|
||||
retry_on = [retry_on]
|
||||
|
||||
count = 0
|
||||
while (count < call_retries):
|
||||
count += 1
|
||||
try:
|
||||
ret_val = client(url, **kwargs)
|
||||
except exc.OpenStackCloudHTTPError as e:
|
||||
if (retry_on is not None and
|
||||
e.response.status_code in retry_on):
|
||||
log.debug('Received retryable error {err}, waiting '
|
||||
'{wait} seconds to retry', {
|
||||
'err': e.response.status_code,
|
||||
'wait': retry_wait
|
||||
})
|
||||
time.sleep(retry_wait)
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
# Break out of the loop, since the loop should only continue
|
||||
# when we encounter a known connection error.
|
||||
return ret_val
|
||||
|
||||
|
||||
def parse_range(value):
|
||||
"""Parse a numerical range string.
|
||||
|
||||
Breakdown a range expression into its operater and numerical parts.
|
||||
This expression must be a string. Valid values must be an integer string,
|
||||
optionally preceeded by one of the following operators::
|
||||
|
||||
- "<" : Less than
|
||||
- ">" : Greater than
|
||||
- "<=" : Less than or equal to
|
||||
- ">=" : Greater than or equal to
|
||||
|
||||
Some examples of valid values and function return values::
|
||||
|
||||
- "1024" : returns (None, 1024)
|
||||
- "<5" : returns ("<", 5)
|
||||
- ">=100" : returns (">=", 100)
|
||||
|
||||
:param string value: The range expression to be parsed.
|
||||
|
||||
:returns: A tuple with the operator string (or None if no operator
|
||||
was given) and the integer value. None is returned if parsing failed.
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
range_exp = re.match('(<|>|<=|>=){0,1}(\d+)$', value)
|
||||
if range_exp is None:
|
||||
return None
|
||||
|
||||
op = range_exp.group(1)
|
||||
num = int(range_exp.group(2))
|
||||
return (op, num)
|
||||
|
||||
|
||||
def range_filter(data, key, range_exp):
|
||||
"""Filter a list by a single range expression.
|
||||
|
||||
:param list data: List of dictionaries to be searched.
|
||||
:param string key: Key name to search within the data set.
|
||||
:param string range_exp: The expression describing the range of values.
|
||||
|
||||
:returns: A list subset of the original data set.
|
||||
:raises: OpenStackCloudException on invalid range expressions.
|
||||
"""
|
||||
filtered = []
|
||||
range_exp = str(range_exp).upper()
|
||||
|
||||
if range_exp == "MIN":
|
||||
key_min = safe_dict_min(key, data)
|
||||
if key_min is None:
|
||||
return []
|
||||
for d in data:
|
||||
if int(d[key]) == key_min:
|
||||
filtered.append(d)
|
||||
return filtered
|
||||
elif range_exp == "MAX":
|
||||
key_max = safe_dict_max(key, data)
|
||||
if key_max is None:
|
||||
return []
|
||||
for d in data:
|
||||
if int(d[key]) == key_max:
|
||||
filtered.append(d)
|
||||
return filtered
|
||||
|
||||
# Not looking for a min or max, so a range or exact value must
|
||||
# have been supplied.
|
||||
val_range = parse_range(range_exp)
|
||||
|
||||
# If parsing the range fails, it must be a bad value.
|
||||
if val_range is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Invalid range value: {value}".format(value=range_exp))
|
||||
|
||||
op = val_range[0]
|
||||
if op:
|
||||
# Range matching
|
||||
for d in data:
|
||||
d_val = int(d[key])
|
||||
if op == '<':
|
||||
if d_val < val_range[1]:
|
||||
filtered.append(d)
|
||||
elif op == '>':
|
||||
if d_val > val_range[1]:
|
||||
filtered.append(d)
|
||||
elif op == '<=':
|
||||
if d_val <= val_range[1]:
|
||||
filtered.append(d)
|
||||
elif op == '>=':
|
||||
if d_val >= val_range[1]:
|
||||
filtered.append(d)
|
||||
return filtered
|
||||
else:
|
||||
# Exact number match
|
||||
for d in data:
|
||||
if int(d[key]) == val_range[1]:
|
||||
filtered.append(d)
|
||||
return filtered
|
||||
|
||||
|
||||
def generate_patches_from_kwargs(operation, **kwargs):
|
||||
"""Given a set of parameters, returns a list with the
|
||||
valid patch values.
|
||||
|
||||
:param string operation: The operation to perform.
|
||||
:param list kwargs: Dict of parameters.
|
||||
|
||||
:returns: A list with the right patch values.
|
||||
"""
|
||||
patches = []
|
||||
for k, v in kwargs.items():
|
||||
patch = {'op': operation,
|
||||
'value': v,
|
||||
'path': '/%s' % k}
|
||||
patches.append(patch)
|
||||
return sorted(patches)
|
||||
|
||||
|
||||
class FileSegment(object):
|
||||
"""File-like object to pass to requests."""
|
||||
|
||||
def __init__(self, filename, offset, length):
|
||||
self.filename = filename
|
||||
self.offset = offset
|
||||
self.length = length
|
||||
self.pos = 0
|
||||
self._file = open(filename, 'rb')
|
||||
self.seek(0)
|
||||
|
||||
def tell(self):
|
||||
return self._file.tell() - self.offset
|
||||
|
||||
def seek(self, offset, whence=0):
|
||||
if whence == 0:
|
||||
self._file.seek(self.offset + offset, whence)
|
||||
elif whence == 1:
|
||||
self._file.seek(offset, whence)
|
||||
elif whence == 2:
|
||||
self._file.seek(self.offset + self.length - offset, 0)
|
||||
|
||||
def read(self, size=-1):
|
||||
remaining = self.length - self.pos
|
||||
if remaining <= 0:
|
||||
return b''
|
||||
|
||||
to_read = remaining if size < 0 else min(size, remaining)
|
||||
chunk = self._file.read(to_read)
|
||||
self.pos += len(chunk)
|
||||
|
||||
return chunk
|
||||
|
||||
def reset(self):
|
||||
self._file.seek(self.offset, 0)
|
||||
|
||||
|
||||
def _format_uuid_string(string):
|
||||
return (string.replace('urn:', '')
|
||||
.replace('uuid:', '')
|
||||
.strip('{}')
|
||||
.replace('-', '')
|
||||
.lower())
|
||||
|
||||
|
||||
def _is_uuid_like(val):
|
||||
"""Returns validation of a value as a UUID.
|
||||
|
||||
:param val: Value to verify
|
||||
:type val: string
|
||||
:returns: bool
|
||||
|
||||
.. versionchanged:: 1.1.1
|
||||
Support non-lowercase UUIDs.
|
||||
"""
|
||||
try:
|
||||
return str(uuid.UUID(val)).replace('-', '') == _format_uuid_string(val)
|
||||
except (TypeError, ValueError, AttributeError):
|
||||
return False
|
160
shade/exc.py
160
shade/exc.py
@ -12,162 +12,4 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
||||
import munch
|
||||
from requests import exceptions as _rex
|
||||
|
||||
from shade import _log
|
||||
|
||||
|
||||
class OpenStackCloudException(Exception):
|
||||
|
||||
log_inner_exceptions = False
|
||||
|
||||
def __init__(self, message, extra_data=None, **kwargs):
|
||||
args = [message]
|
||||
if extra_data:
|
||||
if isinstance(extra_data, munch.Munch):
|
||||
extra_data = extra_data.toDict()
|
||||
args.append("Extra: {0}".format(str(extra_data)))
|
||||
super(OpenStackCloudException, self).__init__(*args, **kwargs)
|
||||
self.extra_data = extra_data
|
||||
# NOTE(mordred) The next two are not used for anything, but
|
||||
# they are public attributes so we keep them around.
|
||||
self.inner_exception = sys.exc_info()
|
||||
self.orig_message = message
|
||||
|
||||
def log_error(self, logger=None):
|
||||
# NOTE(mordred) This method is here for backwards compat. As shade
|
||||
# no longer wraps any exceptions, this doesn't do anything.
|
||||
pass
|
||||
|
||||
|
||||
class OpenStackCloudCreateException(OpenStackCloudException):
|
||||
|
||||
def __init__(self, resource, resource_id, extra_data=None, **kwargs):
|
||||
super(OpenStackCloudCreateException, self).__init__(
|
||||
message="Error creating {resource}: {resource_id}".format(
|
||||
resource=resource, resource_id=resource_id),
|
||||
extra_data=extra_data, **kwargs)
|
||||
self.resource_id = resource_id
|
||||
|
||||
|
||||
class OpenStackCloudTimeout(OpenStackCloudException):
|
||||
pass
|
||||
|
||||
|
||||
class OpenStackCloudUnavailableExtension(OpenStackCloudException):
|
||||
pass
|
||||
|
||||
|
||||
class OpenStackCloudUnavailableFeature(OpenStackCloudException):
|
||||
pass
|
||||
|
||||
|
||||
class OpenStackCloudHTTPError(OpenStackCloudException, _rex.HTTPError):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
OpenStackCloudException.__init__(self, *args, **kwargs)
|
||||
_rex.HTTPError.__init__(self, *args, **kwargs)
|
||||
|
||||
|
||||
class OpenStackCloudBadRequest(OpenStackCloudHTTPError):
|
||||
"""There is something wrong with the request payload.
|
||||
|
||||
Possible reasons can include malformed json or invalid values to parameters
|
||||
such as flavorRef to a server create.
|
||||
"""
|
||||
|
||||
|
||||
class OpenStackCloudURINotFound(OpenStackCloudHTTPError):
|
||||
pass
|
||||
|
||||
# Backwards compat
|
||||
OpenStackCloudResourceNotFound = OpenStackCloudURINotFound
|
||||
|
||||
|
||||
def _log_response_extras(response):
|
||||
# Sometimes we get weird HTML errors. This is usually from load balancers
|
||||
# or other things. Log them to a special logger so that they can be
|
||||
# toggled indepdently - and at debug level so that a person logging
|
||||
# shade.* only gets them at debug.
|
||||
if response.headers.get('content-type') != 'text/html':
|
||||
return
|
||||
try:
|
||||
if int(response.headers.get('content-length', 0)) == 0:
|
||||
return
|
||||
except Exception:
|
||||
return
|
||||
logger = _log.setup_logging('shade.http')
|
||||
if response.reason:
|
||||
logger.debug(
|
||||
"Non-standard error '{reason}' returned from {url}:".format(
|
||||
reason=response.reason,
|
||||
url=response.url))
|
||||
else:
|
||||
logger.debug(
|
||||
"Non-standard error returned from {url}:".format(
|
||||
url=response.url))
|
||||
for response_line in response.text.split('\n'):
|
||||
logger.debug(response_line)
|
||||
|
||||
|
||||
# Logic shamelessly stolen from requests
|
||||
def raise_from_response(response, error_message=None):
|
||||
msg = ''
|
||||
if 400 <= response.status_code < 500:
|
||||
source = "Client"
|
||||
elif 500 <= response.status_code < 600:
|
||||
source = "Server"
|
||||
else:
|
||||
return
|
||||
|
||||
remote_error = "Error for url: {url}".format(url=response.url)
|
||||
try:
|
||||
details = response.json()
|
||||
# Nova returns documents that look like
|
||||
# {statusname: 'message': message, 'code': code}
|
||||
detail_keys = list(details.keys())
|
||||
if len(detail_keys) == 1:
|
||||
detail_key = detail_keys[0]
|
||||
detail_message = details[detail_key].get('message')
|
||||
if detail_message:
|
||||
remote_error += " {message}".format(message=detail_message)
|
||||
except ValueError:
|
||||
if response.reason:
|
||||
remote_error += " {reason}".format(reason=response.reason)
|
||||
except AttributeError:
|
||||
if response.reason:
|
||||
remote_error += " {reason}".format(reason=response.reason)
|
||||
try:
|
||||
json_resp = json.loads(details[detail_key])
|
||||
fault_string = json_resp.get('faultstring')
|
||||
if fault_string:
|
||||
remote_error += " {fault}".format(fault=fault_string)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_log_response_extras(response)
|
||||
|
||||
if error_message:
|
||||
msg = '{error_message}. ({code}) {source} {remote_error}'.format(
|
||||
error_message=error_message,
|
||||
source=source,
|
||||
code=response.status_code,
|
||||
remote_error=remote_error)
|
||||
else:
|
||||
msg = '({code}) {source} {remote_error}'.format(
|
||||
code=response.status_code,
|
||||
source=source,
|
||||
remote_error=remote_error)
|
||||
|
||||
# Special case 404 since we raised a specific one for neutron exceptions
|
||||
# before
|
||||
if response.status_code == 404:
|
||||
raise OpenStackCloudURINotFound(msg, response=response)
|
||||
elif response.status_code == 400:
|
||||
raise OpenStackCloudBadRequest(msg, response=response)
|
||||
if msg:
|
||||
raise OpenStackCloudHTTPError(msg, response=response)
|
||||
from openstack.cloud.exc import * # noqa
|
||||
|
@ -15,10 +15,10 @@
|
||||
import functools
|
||||
|
||||
from openstack import exceptions
|
||||
from openstack.cloud import _utils
|
||||
from openstack.config import loader
|
||||
|
||||
import shade
|
||||
from shade import _utils
|
||||
|
||||
|
||||
class OpenStackInventory(object):
|
||||
|
@ -22,31 +22,28 @@ import json
|
||||
import jsonpatch
|
||||
import operator
|
||||
import six
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
|
||||
import dogpile.cache
|
||||
import munch
|
||||
import requestsexceptions
|
||||
from six.moves import urllib
|
||||
|
||||
import keystoneauth1.exceptions
|
||||
import keystoneauth1.session
|
||||
import os
|
||||
from openstack.cloud import _utils
|
||||
from openstack.config import loader
|
||||
from openstack import connection
|
||||
from openstack import task_manager
|
||||
from openstack import utils
|
||||
|
||||
import shade
|
||||
from shade import _adapter
|
||||
from shade import exc
|
||||
from shade._heat import event_utils
|
||||
from shade._heat import template_utils
|
||||
from shade import _log
|
||||
from shade import _legacy_clients
|
||||
from shade import _normalize
|
||||
from shade import meta
|
||||
from shade import task_manager
|
||||
from shade import _utils
|
||||
|
||||
OBJECT_MD5_KEY = 'x-object-meta-x-shade-md5'
|
||||
OBJECT_SHA256_KEY = 'x-object-meta-x-shade-sha256'
|
||||
@ -98,7 +95,7 @@ def _no_pending_stacks(stacks):
|
||||
|
||||
|
||||
class OpenStackCloud(
|
||||
_normalize.Normalizer,
|
||||
connection.Connection,
|
||||
_legacy_clients.LegacyClientFactoryMixin):
|
||||
"""Represent a connection to an OpenStack Cloud.
|
||||
|
||||
@ -135,166 +132,29 @@ class OpenStackCloud(
|
||||
app_version=None,
|
||||
use_direct_get=False,
|
||||
**kwargs):
|
||||
super(OpenStackCloud, self).__init__(
|
||||
config=cloud_config,
|
||||
strict=strict,
|
||||
task_manager=manager,
|
||||
app_name=app_name,
|
||||
app_version=app_version,
|
||||
use_direct_get=use_direct_get,
|
||||
**kwargs)
|
||||
|
||||
# Logging in shade is based on 'shade' not 'openstack'
|
||||
self.log = _log.setup_logging('shade')
|
||||
|
||||
if not cloud_config:
|
||||
config = loader.OpenStackConfig(
|
||||
app_name=app_name, app_version=app_version)
|
||||
# shade has this as cloud_config, but sdk has config
|
||||
self.cloud_config = self.config
|
||||
|
||||
cloud_config = config.get_one(**kwargs)
|
||||
cloud_region = cloud_config
|
||||
|
||||
self.name = cloud_region.name
|
||||
self.auth = cloud_region.get_auth_args()
|
||||
self.region_name = cloud_region.region_name
|
||||
self.default_interface = cloud_region.get_interface()
|
||||
self.private = cloud_region.config.get('private', False)
|
||||
self.api_timeout = cloud_region.config['api_timeout']
|
||||
self.image_api_use_tasks = cloud_region.config['image_api_use_tasks']
|
||||
self.secgroup_source = cloud_region.config['secgroup_source']
|
||||
self.force_ipv4 = cloud_region.force_ipv4
|
||||
self.strict_mode = strict
|
||||
self._extra_config = cloud_region.get_client_config(
|
||||
# Backwards compat for get_extra behavior
|
||||
self._extra_config = self.config.get_client_config(
|
||||
'shade', {
|
||||
'get_flavor_extra_specs': True,
|
||||
})
|
||||
|
||||
if manager is not None:
|
||||
self.manager = manager
|
||||
else:
|
||||
self.manager = task_manager.TaskManager(
|
||||
name=':'.join([self.name, self.region_name]), client=self)
|
||||
|
||||
self._external_ipv4_names = cloud_region.get_external_ipv4_networks()
|
||||
self._internal_ipv4_names = cloud_region.get_internal_ipv4_networks()
|
||||
self._external_ipv6_names = cloud_region.get_external_ipv6_networks()
|
||||
self._internal_ipv6_names = cloud_region.get_internal_ipv6_networks()
|
||||
self._nat_destination = cloud_region.get_nat_destination()
|
||||
self._default_network = cloud_region.get_default_network()
|
||||
|
||||
self._floating_ip_source = cloud_region.config.get(
|
||||
'floating_ip_source')
|
||||
if self._floating_ip_source:
|
||||
if self._floating_ip_source.lower() == 'none':
|
||||
self._floating_ip_source = None
|
||||
else:
|
||||
self._floating_ip_source = self._floating_ip_source.lower()
|
||||
|
||||
self._use_external_network = cloud_region.config.get(
|
||||
'use_external_network', True)
|
||||
self._use_internal_network = cloud_region.config.get(
|
||||
'use_internal_network', True)
|
||||
|
||||
# Work around older TaskManager objects that don't have submit_task
|
||||
if not hasattr(self.manager, 'submit_task'):
|
||||
self.manager.submit_task = self.manager.submitTask
|
||||
|
||||
(self.verify, self.cert) = cloud_region.get_requests_verify_args()
|
||||
# Turn off urllib3 warnings about insecure certs if we have
|
||||
# explicitly configured requests to tell it we do not want
|
||||
# cert verification
|
||||
if not self.verify:
|
||||
self.log.debug(
|
||||
"Turning off Insecure SSL warnings since verify=False")
|
||||
category = requestsexceptions.InsecureRequestWarning
|
||||
if category:
|
||||
# InsecureRequestWarning references a Warning class or is None
|
||||
warnings.filterwarnings('ignore', category=category)
|
||||
|
||||
self._disable_warnings = {}
|
||||
self.use_direct_get = use_direct_get
|
||||
|
||||
self._servers = None
|
||||
self._servers_time = 0
|
||||
self._servers_lock = threading.Lock()
|
||||
|
||||
self._ports = None
|
||||
self._ports_time = 0
|
||||
self._ports_lock = threading.Lock()
|
||||
|
||||
self._floating_ips = None
|
||||
self._floating_ips_time = 0
|
||||
self._floating_ips_lock = threading.Lock()
|
||||
|
||||
self._floating_network_by_router = None
|
||||
self._floating_network_by_router_run = False
|
||||
self._floating_network_by_router_lock = threading.Lock()
|
||||
|
||||
self._networks_lock = threading.Lock()
|
||||
self._reset_network_caches()
|
||||
|
||||
cache_expiration_time = int(cloud_region.get_cache_expiration_time())
|
||||
cache_class = cloud_region.get_cache_class()
|
||||
cache_arguments = cloud_region.get_cache_arguments()
|
||||
|
||||
self._resource_caches = {}
|
||||
|
||||
if cache_class != 'dogpile.cache.null':
|
||||
self.cache_enabled = True
|
||||
self._cache = self._make_cache(
|
||||
cache_class, cache_expiration_time, cache_arguments)
|
||||
expirations = cloud_region.get_cache_expirations()
|
||||
for expire_key in expirations.keys():
|
||||
# Only build caches for things we have list operations for
|
||||
if getattr(
|
||||
self, 'list_{0}'.format(expire_key), None):
|
||||
self._resource_caches[expire_key] = self._make_cache(
|
||||
cache_class, expirations[expire_key], cache_arguments)
|
||||
|
||||
self._SERVER_AGE = DEFAULT_SERVER_AGE
|
||||
self._PORT_AGE = DEFAULT_PORT_AGE
|
||||
self._FLOAT_AGE = DEFAULT_FLOAT_AGE
|
||||
else:
|
||||
self.cache_enabled = False
|
||||
|
||||
def _fake_invalidate(unused):
|
||||
pass
|
||||
|
||||
class _FakeCache(object):
|
||||
def invalidate(self):
|
||||
pass
|
||||
|
||||
# Don't cache list_servers if we're not caching things.
|
||||
# Replace this with a more specific cache configuration
|
||||
# soon.
|
||||
self._SERVER_AGE = 0
|
||||
self._PORT_AGE = 0
|
||||
self._FLOAT_AGE = 0
|
||||
self._cache = _FakeCache()
|
||||
# Undecorate cache decorated methods. Otherwise the call stacks
|
||||
# wind up being stupidly long and hard to debug
|
||||
for method in _utils._decorated_methods:
|
||||
meth_obj = getattr(self, method, None)
|
||||
if not meth_obj:
|
||||
continue
|
||||
if (hasattr(meth_obj, 'invalidate')
|
||||
and hasattr(meth_obj, 'func')):
|
||||
new_func = functools.partial(meth_obj.func, self)
|
||||
new_func.invalidate = _fake_invalidate
|
||||
setattr(self, method, new_func)
|
||||
|
||||
# If server expiration time is set explicitly, use that. Otherwise
|
||||
# fall back to whatever it was before
|
||||
self._SERVER_AGE = cloud_region.get_cache_resource_expiration(
|
||||
'server', self._SERVER_AGE)
|
||||
self._PORT_AGE = cloud_region.get_cache_resource_expiration(
|
||||
'port', self._PORT_AGE)
|
||||
self._FLOAT_AGE = cloud_region.get_cache_resource_expiration(
|
||||
'floating_ip', self._FLOAT_AGE)
|
||||
|
||||
self._container_cache = dict()
|
||||
self._file_hash_cache = dict()
|
||||
|
||||
self._keystone_session = None
|
||||
|
||||
# Place to store legacy client objects
|
||||
self._legacy_clients = {}
|
||||
self._raw_clients = {}
|
||||
|
||||
self._local_ipv6 = (
|
||||
_utils.localhost_supports_ipv6() if not self.force_ipv4 else False)
|
||||
|
||||
self.cloud_config = cloud_region
|
||||
|
||||
def connect_as(self, **kwargs):
|
||||
"""Make a new OpenStackCloud object with new auth context.
|
||||
@ -465,98 +325,6 @@ class OpenStackCloud(
|
||||
return int(version[0])
|
||||
return version
|
||||
|
||||
def _get_versioned_client(
|
||||
self, service_type, min_version=None, max_version=None):
|
||||
config_version = self.cloud_config.get_api_version(service_type)
|
||||
config_major = self._get_major_version_id(config_version)
|
||||
max_major = self._get_major_version_id(max_version)
|
||||
min_major = self._get_major_version_id(min_version)
|
||||
# NOTE(mordred) The shade logic for versions is slightly different
|
||||
# than the ksa Adapter constructor logic. shade knows the versions
|
||||
# it knows, and uses them when it detects them. However, if a user
|
||||
# requests a version, and it's not found, and a different one shade
|
||||
# does know about it found, that's a warning in shade.
|
||||
if config_version:
|
||||
if min_major and config_major < min_major:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Version {config_version} requested for {service_type}"
|
||||
" but shade understands a minimum of {min_version}".format(
|
||||
config_version=config_version,
|
||||
service_type=service_type,
|
||||
min_version=min_version))
|
||||
elif max_major and config_major > max_major:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Version {config_version} requested for {service_type}"
|
||||
" but shade understands a maximum of {max_version}".format(
|
||||
config_version=config_version,
|
||||
service_type=service_type,
|
||||
max_version=max_version))
|
||||
request_min_version = config_version
|
||||
request_max_version = '{version}.latest'.format(
|
||||
version=config_major)
|
||||
adapter = _adapter.ShadeAdapter(
|
||||
session=self.keystone_session,
|
||||
manager=self.manager,
|
||||
service_type=self.cloud_config.get_service_type(service_type),
|
||||
service_name=self.cloud_config.get_service_name(service_type),
|
||||
interface=self.cloud_config.get_interface(service_type),
|
||||
endpoint_override=self.cloud_config.get_endpoint(service_type),
|
||||
region_name=self.cloud_config.region,
|
||||
min_version=request_min_version,
|
||||
max_version=request_max_version,
|
||||
shade_logger=self.log)
|
||||
if adapter.get_endpoint():
|
||||
return adapter
|
||||
|
||||
adapter = _adapter.ShadeAdapter(
|
||||
session=self.keystone_session,
|
||||
manager=self.manager,
|
||||
service_type=self.cloud_config.get_service_type(service_type),
|
||||
|