From ab3f400064f93391bfe444fe64f184be06c7dbe6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 23 Jun 2018 08:47:47 -0500 Subject: [PATCH] Make OpenStackCloud a subclass of Connection The shade code lives in openstacksdk now and is a part of Connection. Start making shade a subclass of that for backwards compat. Mostly only deal with the constructor for now, but go ahead and do list_flavors and get_flavor_by_id while we're looking at the extra config override. Also remove list_projects because of its use of the _normalize module. keystoneauth lower-constraint needs to be bumped because of status_code_retries. Change-Id: Ibbe6e167d6285b30a9adbd0c5a89bc679c5645f3 --- doc/source/contributor/coding.rst | 6 +- lower-constraints.txt | 4 +- requirements.txt | 4 +- shade/_adapter.py | 164 --- shade/_legacy_clients.py | 2 +- shade/_normalize.py | 1109 -------------------- shade/_utils.py | 759 -------------- shade/exc.py | 160 +-- shade/inventory.py | 2 +- shade/openstackcloud.py | 447 ++------ shade/tests/functional/test_compute.py | 5 +- shade/tests/functional/test_floating_ip.py | 6 +- shade/tests/functional/test_volume.py | 4 +- shade/tests/unit/test__adapter.py | 38 - shade/tests/unit/test__utils.py | 385 ------- shade/tests/unit/test_baremetal_node.py | 4 +- shade/tests/unit/test_domains.py | 3 +- shade/tests/unit/test_project.py | 1 - shade/tests/unit/test_shade.py | 35 - shade/tests/unit/test_shade_operator.py | 2 +- 20 files changed, 89 insertions(+), 3051 deletions(-) delete mode 100644 shade/_adapter.py delete mode 100644 shade/_normalize.py delete mode 100644 shade/_utils.py delete mode 100644 shade/tests/unit/test__adapter.py delete mode 100644 shade/tests/unit/test__utils.py diff --git a/doc/source/contributor/coding.rst b/doc/source/contributor/coding.rst index 3909e1082..26aa50cc7 100644 --- a/doc/source/contributor/coding.rst +++ b/doc/source/contributor/coding.rst @@ -67,13 +67,13 @@ Returned Resources ================== Complex objects returned to the caller must be a `munch.Munch` type. The -`shade._adapter.Adapter` class makes resources into `munch.Munch`. +`openstack._adapter.Adapter` class makes resources into `munch.Munch`. All objects should be normalized. It is shade's purpose in life to make OpenStack consistent for end users, and this means not trusting the clouds to return consistent objects. There should be a normalize function in -`shade/_normalize.py` that is applied to objects before returning them to -the user. See :doc:`../user/model` for further details on object model +`sopenstack/cloud/_normalize.py` that is applied to objects before returning +them to the user. See :doc:`../user/model` for further details on object model requirements. Fields should not be in the normalization contract if we cannot commit to diff --git a/lower-constraints.txt b/lower-constraints.txt index e16995536..e80714fa8 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -13,13 +13,13 @@ iso8601==0.1.12 jmespath==0.9.3 jsonpatch==1.21 jsonpointer==2.0 -keystoneauth1==3.4.0 +keystoneauth1==3.8.0 linecache2==1.0.0 mock==2.0.0 mox3==0.20.0 munch==2.2.0 netifaces==0.10.6 -openstacksdk==0.13.0 +openstacksdk==0.15.0 os-client-config==1.28.0 os-service-types==1.2.0 oslotest==3.2.0 diff --git a/requirements.txt b/requirements.txt index 0e970cc1e..45e77114c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,5 +3,7 @@ # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 +# shade depends on os-client-config in addition to openstacksdk so that it +# can continue to provide the make_legacy_client functions. os-client-config>=1.28.0 # Apache-2.0 -openstacksdk>=0.13.0 # Apache-2.0 +openstacksdk>=0.15.0 # Apache-2.0 diff --git a/shade/_adapter.py b/shade/_adapter.py deleted file mode 100644 index bd88d6fd6..000000000 --- a/shade/_adapter.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' Wrapper around keystoneauth Session to wrap calls in TaskManager ''' - -import functools -from keystoneauth1 import adapter -from six.moves import urllib - -from shade import _log -from shade import exc -from shade import task_manager - - -def extract_name(url): - '''Produce a key name to use in logging/metrics from the URL path. - - We want to be able to logic/metric sane general things, so we pull - the url apart to generate names. The function returns a list because - there are two different ways in which the elements want to be combined - below (one for logging, one for statsd) - - Some examples are likely useful: - - /servers -> ['servers'] - /servers/{id} -> ['servers'] - /servers/{id}/os-security-groups -> ['servers', 'os-security-groups'] - /v2.0/networks.json -> ['networks'] - ''' - - url_path = urllib.parse.urlparse(url).path.strip() - # Remove / from the beginning to keep the list indexes of interesting - # things consistent - if url_path.startswith('/'): - url_path = url_path[1:] - - # Special case for neutron, which puts .json on the end of urls - if url_path.endswith('.json'): - url_path = url_path[:-len('.json')] - - url_parts = url_path.split('/') - if url_parts[-1] == 'detail': - # Special case detail calls - # GET /servers/detail - # returns ['servers', 'detail'] - name_parts = url_parts[-2:] - else: - # Strip leading version piece so that - # GET /v2.0/networks - # returns ['networks'] - if url_parts[0] in ('v1', 'v2', 'v2.0'): - url_parts = url_parts[1:] - name_parts = [] - # Pull out every other URL portion - so that - # GET /servers/{id}/os-security-groups - # returns ['servers', 'os-security-groups'] - for idx in range(0, len(url_parts)): - if not idx % 2 and url_parts[idx]: - name_parts.append(url_parts[idx]) - - # Keystone Token fetching is a special case, so we name it "tokens" - if url_path.endswith('tokens'): - name_parts = ['tokens'] - - # Getting the root of an endpoint is doing version discovery - if not name_parts: - name_parts = ['discovery'] - - # Strip out anything that's empty or None - return [part for part in name_parts if part] - - -class ShadeAdapter(adapter.Adapter): - - def __init__(self, shade_logger, manager, *args, **kwargs): - super(ShadeAdapter, self).__init__(*args, **kwargs) - self.shade_logger = shade_logger - self.manager = manager - self.request_log = _log.setup_logging('shade.request_ids') - - def _log_request_id(self, response, obj=None): - # Log the request id and object id in a specific logger. This way - # someone can turn it on if they're interested in this kind of tracing. - request_id = response.headers.get('x-openstack-request-id') - if not request_id: - return response - tmpl = "{meth} call to {service} for {url} used request id {req}" - kwargs = dict( - meth=response.request.method, - service=self.service_type, - url=response.request.url, - req=request_id) - - if isinstance(obj, dict): - obj_id = obj.get('id', obj.get('uuid')) - if obj_id: - kwargs['obj_id'] = obj_id - tmpl += " returning object {obj_id}" - self.request_log.debug(tmpl.format(**kwargs)) - return response - - def _munch_response(self, response, result_key=None, error_message=None): - exc.raise_from_response(response, error_message=error_message) - - if not response.content: - # This doens't have any content - return self._log_request_id(response) - - # Some REST calls do not return json content. Don't decode it. - if 'application/json' not in response.headers.get('Content-Type'): - return self._log_request_id(response) - - try: - result_json = response.json() - self._log_request_id(response, result_json) - except Exception: - return self._log_request_id(response) - return result_json - - def request( - self, url, method, run_async=False, error_message=None, - *args, **kwargs): - name_parts = extract_name(url) - name = '.'.join([self.service_type, method] + name_parts) - class_name = "".join([ - part.lower().capitalize() for part in name.split('.')]) - - request_method = functools.partial( - super(ShadeAdapter, self).request, url, method) - - class RequestTask(task_manager.BaseTask): - - def __init__(self, **kw): - super(RequestTask, self).__init__(**kw) - self.name = name - self.__class__.__name__ = str(class_name) - self.run_async = run_async - - def main(self, client): - self.args.setdefault('raise_exc', False) - return request_method(**self.args) - - response = self.manager.submit_task(RequestTask(**kwargs)) - if run_async: - return response - else: - return self._munch_response(response, error_message=error_message) - - def _version_matches(self, version): - api_version = self.get_api_major_version() - if api_version: - return api_version[0] == version - return False diff --git a/shade/_legacy_clients.py b/shade/_legacy_clients.py index a3aaae9cf..f46e393a9 100644 --- a/shade/_legacy_clients.py +++ b/shade/_legacy_clients.py @@ -13,9 +13,9 @@ import importlib import warnings from keystoneauth1 import plugin +from openstack.cloud import _utils from os_client_config import constructors -from shade import _utils from shade import exc diff --git a/shade/_normalize.py b/shade/_normalize.py deleted file mode 100644 index 72f91ae2e..000000000 --- a/shade/_normalize.py +++ /dev/null @@ -1,1109 +0,0 @@ -# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import datetime -import munch -import six - -_IMAGE_FIELDS = ( - 'checksum', - 'container_format', - 'direct_url', - 'disk_format', - 'file', - 'id', - 'name', - 'owner', - 'virtual_size', -) - -_SERVER_FIELDS = ( - 'accessIPv4', - 'accessIPv6', - 'addresses', - 'adminPass', - 'created', - 'key_name', - 'metadata', - 'networks', - 'private_v4', - 'public_v4', - 'public_v6', - 'status', - 'updated', - 'user_id', -) - -_KEYPAIR_FIELDS = ( - 'fingerprint', - 'name', - 'private_key', - 'public_key', - 'user_id', -) - -_KEYPAIR_USELESS_FIELDS = ( - 'deleted', - 'deleted_at', - 'id', - 'updated_at', -) - -_COMPUTE_LIMITS_FIELDS = ( - ('maxPersonality', 'max_personality'), - ('maxPersonalitySize', 'max_personality_size'), - ('maxServerGroupMembers', 'max_server_group_members'), - ('maxServerGroups', 'max_server_groups'), - ('maxServerMeta', 'max_server_meta'), - ('maxTotalCores', 'max_total_cores'), - ('maxTotalInstances', 'max_total_instances'), - ('maxTotalKeypairs', 'max_total_keypairs'), - ('maxTotalRAMSize', 'max_total_ram_size'), - ('totalCoresUsed', 'total_cores_used'), - ('totalInstancesUsed', 'total_instances_used'), - ('totalRAMUsed', 'total_ram_used'), - ('totalServerGroupsUsed', 'total_server_groups_used'), -) - - -_pushdown_fields = { - 'project': [ - 'domain_id' - ] -} - - -def _split_filters(obj_name='', filters=None, **kwargs): - # Handle jmsepath filters - if not filters: - filters = {} - if not isinstance(filters, dict): - return {}, filters - # Filter out None values from extra kwargs, because those are - # defaults. If you want to search for things with None values, - # they're going to need to go into the filters dict - for (key, value) in kwargs.items(): - if value is not None: - filters[key] = value - pushdown = {} - client = {} - for (key, value) in filters.items(): - if key in _pushdown_fields.get(obj_name, {}): - pushdown[key] = value - else: - client[key] = value - return pushdown, client - - -def _to_bool(value): - if isinstance(value, six.string_types): - if not value: - return False - prospective = value.lower().capitalize() - return prospective == 'True' - return bool(value) - - -def _pop_int(resource, key): - return int(resource.pop(key, 0) or 0) - - -def _pop_float(resource, key): - return float(resource.pop(key, 0) or 0) - - -def _pop_or_get(resource, key, default, strict): - if strict: - return resource.pop(key, default) - else: - return resource.get(key, default) - - -class Normalizer(object): - '''Mix-in class to provide the normalization functions. - - This is in a separate class just for on-disk source code organization - reasons. - ''' - - def _normalize_compute_limits(self, limits, project_id=None): - """ Normalize a limits object. - - Limits modified in this method and shouldn't be modified afterwards. - """ - - # Copy incoming limits because of shared dicts in unittests - limits = limits['absolute'].copy() - - new_limits = munch.Munch() - new_limits['location'] = self._get_current_location( - project_id=project_id) - - for field in _COMPUTE_LIMITS_FIELDS: - new_limits[field[1]] = limits.pop(field[0], None) - - new_limits['properties'] = limits.copy() - - return new_limits - - def _remove_novaclient_artifacts(self, item): - # Remove novaclient artifacts - item.pop('links', None) - item.pop('NAME_ATTR', None) - item.pop('HUMAN_ID', None) - item.pop('human_id', None) - item.pop('request_ids', None) - item.pop('x_openstack_request_ids', None) - - def _normalize_flavors(self, flavors): - """ Normalize a list of flavor objects """ - ret = [] - for flavor in flavors: - ret.append(self._normalize_flavor(flavor)) - return ret - - def _normalize_flavor(self, flavor): - """ Normalize a flavor object """ - new_flavor = munch.Munch() - - # Copy incoming group because of shared dicts in unittests - flavor = flavor.copy() - - # Discard noise - self._remove_novaclient_artifacts(flavor) - flavor.pop('links', None) - - ephemeral = int(_pop_or_get( - flavor, 'OS-FLV-EXT-DATA:ephemeral', 0, self.strict_mode)) - ephemeral = flavor.pop('ephemeral', ephemeral) - is_public = _to_bool(_pop_or_get( - flavor, 'os-flavor-access:is_public', True, self.strict_mode)) - is_public = _to_bool(flavor.pop('is_public', is_public)) - is_disabled = _to_bool(_pop_or_get( - flavor, 'OS-FLV-DISABLED:disabled', False, self.strict_mode)) - extra_specs = _pop_or_get( - flavor, 'OS-FLV-WITH-EXT-SPECS:extra_specs', {}, self.strict_mode) - extra_specs = flavor.pop('extra_specs', extra_specs) - extra_specs = munch.Munch(extra_specs) - - new_flavor['location'] = self.current_location - new_flavor['id'] = flavor.pop('id') - new_flavor['name'] = flavor.pop('name') - new_flavor['is_public'] = is_public - new_flavor['is_disabled'] = is_disabled - new_flavor['ram'] = _pop_int(flavor, 'ram') - new_flavor['vcpus'] = _pop_int(flavor, 'vcpus') - new_flavor['disk'] = _pop_int(flavor, 'disk') - new_flavor['ephemeral'] = ephemeral - new_flavor['swap'] = _pop_int(flavor, 'swap') - new_flavor['rxtx_factor'] = _pop_float(flavor, 'rxtx_factor') - - new_flavor['properties'] = flavor.copy() - new_flavor['extra_specs'] = extra_specs - - # Backwards compat with nova - passthrough values - if not self.strict_mode: - for (k, v) in new_flavor['properties'].items(): - new_flavor.setdefault(k, v) - - return new_flavor - - def _normalize_keypairs(self, keypairs): - """Normalize Nova Keypairs""" - ret = [] - for keypair in keypairs: - ret.append(self._normalize_keypair(keypair)) - return ret - - def _normalize_keypair(self, keypair): - """Normalize Ironic Machine""" - - new_keypair = munch.Munch() - keypair = keypair.copy() - - # Discard noise - self._remove_novaclient_artifacts(keypair) - - new_keypair['location'] = self.current_location - for key in _KEYPAIR_FIELDS: - new_keypair[key] = keypair.pop(key, None) - # These are completely meaningless fields - for key in _KEYPAIR_USELESS_FIELDS: - keypair.pop(key, None) - new_keypair['type'] = keypair.pop('type', 'ssh') - # created_at isn't returned from the keypair creation. (what?) - new_keypair['created_at'] = keypair.pop( - 'created_at', datetime.datetime.now().isoformat()) - # Don't even get me started on this - new_keypair['id'] = new_keypair['name'] - - new_keypair['properties'] = keypair.copy() - - return new_keypair - - def _normalize_images(self, images): - ret = [] - for image in images: - ret.append(self._normalize_image(image)) - return ret - - def _normalize_image(self, image): - new_image = munch.Munch( - location=self._get_current_location(project_id=image.get('owner'))) - - # This copy is to keep things from getting epically weird in tests - image = image.copy() - - # Discard noise - self._remove_novaclient_artifacts(image) - - # If someone made a property called "properties" that contains a - # string (this has happened at least one time in the wild), the - # the rest of the normalization here goes belly up. - properties = image.pop('properties', {}) - if not isinstance(properties, dict): - properties = {'properties': properties} - - visibility = image.pop('visibility', None) - protected = _to_bool(image.pop('protected', False)) - - if visibility: - is_public = (visibility == 'public') - else: - is_public = image.pop('is_public', False) - visibility = 'public' if is_public else 'private' - - new_image['size'] = image.pop('OS-EXT-IMG-SIZE:size', 0) - new_image['size'] = image.pop('size', new_image['size']) - - new_image['min_ram'] = image.pop('minRam', 0) - new_image['min_ram'] = image.pop('min_ram', new_image['min_ram']) - - new_image['min_disk'] = image.pop('minDisk', 0) - new_image['min_disk'] = image.pop('min_disk', new_image['min_disk']) - - new_image['created_at'] = image.pop('created', '') - new_image['created_at'] = image.pop( - 'created_at', new_image['created_at']) - - new_image['updated_at'] = image.pop('updated', '') - new_image['updated_at'] = image.pop( - 'updated_at', new_image['updated_at']) - - for field in _IMAGE_FIELDS: - new_image[field] = image.pop(field, None) - - new_image['tags'] = image.pop('tags', []) - new_image['status'] = image.pop('status').lower() - for field in ('min_ram', 'min_disk', 'size', 'virtual_size'): - new_image[field] = _pop_int(new_image, field) - new_image['is_protected'] = protected - new_image['locations'] = image.pop('locations', []) - - metadata = image.pop('metadata', {}) - for key, val in metadata.items(): - properties.setdefault(key, val) - - for key, val in image.items(): - properties.setdefault(key, val) - new_image['properties'] = properties - new_image['is_public'] = is_public - new_image['visibility'] = visibility - - # Backwards compat with glance - if not self.strict_mode: - for key, val in properties.items(): - if key != 'properties': - new_image[key] = val - new_image['protected'] = protected - new_image['metadata'] = properties - new_image['created'] = new_image['created_at'] - new_image['updated'] = new_image['updated_at'] - new_image['minDisk'] = new_image['min_disk'] - new_image['minRam'] = new_image['min_ram'] - return new_image - - def _normalize_secgroups(self, groups): - """Normalize the structure of security groups - - This makes security group dicts, as returned from nova, look like the - security group dicts as returned from neutron. This does not make them - look exactly the same, but it's pretty close. - - :param list groups: A list of security group dicts. - - :returns: A list of normalized dicts. - """ - ret = [] - for group in groups: - ret.append(self._normalize_secgroup(group)) - return ret - - def _normalize_secgroup(self, group): - - ret = munch.Munch() - # Copy incoming group because of shared dicts in unittests - group = group.copy() - - # Discard noise - self._remove_novaclient_artifacts(group) - - rules = self._normalize_secgroup_rules( - group.pop('security_group_rules', group.pop('rules', []))) - project_id = group.pop('tenant_id', '') - project_id = group.pop('project_id', project_id) - - ret['location'] = self._get_current_location(project_id=project_id) - ret['id'] = group.pop('id') - ret['name'] = group.pop('name') - ret['security_group_rules'] = rules - ret['description'] = group.pop('description') - ret['properties'] = group - - # Backwards compat with Neutron - if not self.strict_mode: - ret['tenant_id'] = project_id - ret['project_id'] = project_id - for key, val in ret['properties'].items(): - ret.setdefault(key, val) - - return ret - - def _normalize_secgroup_rules(self, rules): - """Normalize the structure of nova security group rules - - Note that nova uses -1 for non-specific port values, but neutron - represents these with None. - - :param list rules: A list of security group rule dicts. - - :returns: A list of normalized dicts. - """ - ret = [] - for rule in rules: - ret.append(self._normalize_secgroup_rule(rule)) - return ret - - def _normalize_secgroup_rule(self, rule): - ret = munch.Munch() - # Copy incoming rule because of shared dicts in unittests - rule = rule.copy() - - ret['id'] = rule.pop('id') - ret['direction'] = rule.pop('direction', 'ingress') - ret['ethertype'] = rule.pop('ethertype', 'IPv4') - port_range_min = rule.get( - 'port_range_min', rule.pop('from_port', None)) - if port_range_min == -1: - port_range_min = None - if port_range_min is not None: - port_range_min = int(port_range_min) - ret['port_range_min'] = port_range_min - port_range_max = rule.pop( - 'port_range_max', rule.pop('to_port', None)) - if port_range_max == -1: - port_range_max = None - if port_range_min is not None: - port_range_min = int(port_range_min) - ret['port_range_max'] = port_range_max - ret['protocol'] = rule.pop('protocol', rule.pop('ip_protocol', None)) - ret['remote_ip_prefix'] = rule.pop( - 'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None)) - ret['security_group_id'] = rule.pop( - 'security_group_id', rule.pop('parent_group_id', None)) - ret['remote_group_id'] = rule.pop('remote_group_id', None) - project_id = rule.pop('tenant_id', '') - project_id = rule.pop('project_id', project_id) - ret['location'] = self._get_current_location(project_id=project_id) - ret['properties'] = rule - - # Backwards compat with Neutron - if not self.strict_mode: - ret['tenant_id'] = project_id - ret['project_id'] = project_id - for key, val in ret['properties'].items(): - ret.setdefault(key, val) - return ret - - def _normalize_servers(self, servers): - # Here instead of _utils because we need access to region and cloud - # name from the cloud object - ret = [] - for server in servers: - ret.append(self._normalize_server(server)) - return ret - - def _normalize_server(self, server): - ret = munch.Munch() - # Copy incoming server because of shared dicts in unittests - server = server.copy() - - self._remove_novaclient_artifacts(server) - - ret['id'] = server.pop('id') - ret['name'] = server.pop('name') - - server['flavor'].pop('links', None) - ret['flavor'] = server.pop('flavor') - - # OpenStack can return image as a string when you've booted - # from volume - if str(server['image']) != server['image']: - server['image'].pop('links', None) - ret['image'] = server.pop('image') - - project_id = server.pop('tenant_id', '') - project_id = server.pop('project_id', project_id) - - az = _pop_or_get( - server, 'OS-EXT-AZ:availability_zone', None, self.strict_mode) - ret['location'] = self._get_current_location( - project_id=project_id, zone=az) - - # Ensure volumes is always in the server dict, even if empty - ret['volumes'] = _pop_or_get( - server, 'os-extended-volumes:volumes_attached', - [], self.strict_mode) - - config_drive = server.pop('config_drive', False) - ret['has_config_drive'] = _to_bool(config_drive) - - host_id = server.pop('hostId', None) - ret['host_id'] = host_id - - ret['progress'] = _pop_int(server, 'progress') - - # Leave these in so that the general properties handling works - ret['disk_config'] = _pop_or_get( - server, 'OS-DCF:diskConfig', None, self.strict_mode) - for key in ( - 'OS-EXT-STS:power_state', - 'OS-EXT-STS:task_state', - 'OS-EXT-STS:vm_state', - 'OS-SRV-USG:launched_at', - 'OS-SRV-USG:terminated_at'): - short_key = key.split(':')[1] - ret[short_key] = _pop_or_get(server, key, None, self.strict_mode) - - # Protect against security_groups being None - ret['security_groups'] = server.pop('security_groups', None) or [] - - # NOTE(mnaser): The Nova API returns the creation date in `created` - # however the Shade contract returns `created_at` for - # all resources. - ret['created_at'] = server.get('created') - - for field in _SERVER_FIELDS: - ret[field] = server.pop(field, None) - if not ret['networks']: - ret['networks'] = {} - - ret['interface_ip'] = '' - - ret['properties'] = server.copy() - - # Backwards compat - if not self.strict_mode: - ret['hostId'] = host_id - ret['config_drive'] = config_drive - ret['project_id'] = project_id - ret['tenant_id'] = project_id - ret['region'] = self.region_name - ret['cloud'] = self.name - ret['az'] = az - for key, val in ret['properties'].items(): - ret.setdefault(key, val) - return ret - - def _normalize_floating_ips(self, ips): - """Normalize the structure of floating IPs - - Unfortunately, not all the Neutron floating_ip attributes are available - with Nova and not all Nova floating_ip attributes are available with - Neutron. - This function extract attributes that are common to Nova and Neutron - floating IP resource. - If the whole structure is needed inside shade, shade provides private - methods that returns "original" objects (e.g. - _neutron_allocate_floating_ip) - - :param list ips: A list of Neutron floating IPs. - - :returns: - A list of normalized dicts with the following attributes:: - - [ - { - "id": "this-is-a-floating-ip-id", - "fixed_ip_address": "192.0.2.10", - "floating_ip_address": "198.51.100.10", - "network": "this-is-a-net-or-pool-id", - "attached": True, - "status": "ACTIVE" - }, ... - ] - - """ - return [ - self._normalize_floating_ip(ip) for ip in ips - ] - - def _normalize_floating_ip(self, ip): - ret = munch.Munch() - - # Copy incoming floating ip because of shared dicts in unittests - ip = ip.copy() - - fixed_ip_address = ip.pop('fixed_ip_address', ip.pop('fixed_ip', None)) - floating_ip_address = ip.pop('floating_ip_address', ip.pop('ip', None)) - network_id = ip.pop( - 'floating_network_id', ip.pop('network', ip.pop('pool', None))) - project_id = ip.pop('tenant_id', '') - project_id = ip.pop('project_id', project_id) - - instance_id = ip.pop('instance_id', None) - router_id = ip.pop('router_id', None) - id = ip.pop('id') - port_id = ip.pop('port_id', None) - created_at = ip.pop('created_at', None) - updated_at = ip.pop('updated_at', None) - # Note - description may not always be on the underlying cloud. - # Normalizing it here is easy - what do we do when people want to - # set a description? - description = ip.pop('description', '') - revision_number = ip.pop('revision_number', None) - - if self._use_neutron_floating(): - attached = bool(port_id) - status = ip.pop('status', 'UNKNOWN') - else: - attached = bool(instance_id) - # In neutron's terms, Nova floating IPs are always ACTIVE - status = 'ACTIVE' - - ret = munch.Munch( - attached=attached, - fixed_ip_address=fixed_ip_address, - floating_ip_address=floating_ip_address, - id=id, - location=self._get_current_location(project_id=project_id), - network=network_id, - port=port_id, - router=router_id, - status=status, - created_at=created_at, - updated_at=updated_at, - description=description, - revision_number=revision_number, - properties=ip.copy(), - ) - # Backwards compat - if not self.strict_mode: - ret['port_id'] = port_id - ret['router_id'] = router_id - ret['project_id'] = project_id - ret['tenant_id'] = project_id - ret['floating_network_id'] = network_id - for key, val in ret['properties'].items(): - ret.setdefault(key, val) - - return ret - - def _normalize_projects(self, projects): - """Normalize the structure of projects - - This makes tenants from keystone v2 look like projects from v3. - - :param list projects: A list of projects to normalize - - :returns: A list of normalized dicts. - """ - ret = [] - for project in projects: - ret.append(self._normalize_project(project)) - return ret - - def _normalize_project(self, project): - - # Copy incoming project because of shared dicts in unittests - project = project.copy() - - # Discard noise - self._remove_novaclient_artifacts(project) - - # In both v2 and v3 - project_id = project.pop('id') - name = project.pop('name', '') - description = project.pop('description', '') - is_enabled = project.pop('enabled', True) - - # v3 additions - domain_id = project.pop('domain_id', 'default') - parent_id = project.pop('parent_id', None) - is_domain = project.pop('is_domain', False) - - # Projects have a special relationship with location - location = self._get_identity_location() - location['project']['domain_id'] = domain_id - location['project']['id'] = parent_id - - ret = munch.Munch( - location=location, - id=project_id, - name=name, - description=description, - is_enabled=is_enabled, - is_domain=is_domain, - domain_id=domain_id, - properties=project.copy() - ) - - # Backwards compat - if not self.strict_mode: - ret['enabled'] = is_enabled - ret['parent_id'] = parent_id - for key, val in ret['properties'].items(): - ret.setdefault(key, val) - - return ret - - def _normalize_volume_type_access(self, volume_type_access): - - volume_type_access = volume_type_access.copy() - - volume_type_id = volume_type_access.pop('volume_type_id') - project_id = volume_type_access.pop('project_id') - ret = munch.Munch( - location=self.current_location, - project_id=project_id, - volume_type_id=volume_type_id, - properties=volume_type_access.copy(), - ) - return ret - - def _normalize_volume_type_accesses(self, volume_type_accesses): - ret = [] - for volume_type_access in volume_type_accesses: - ret.append(self._normalize_volume_type_access(volume_type_access)) - return ret - - def _normalize_volume_type(self, volume_type): - - volume_type = volume_type.copy() - - volume_id = volume_type.pop('id') - description = volume_type.pop('description', None) - name = volume_type.pop('name', None) - old_is_public = volume_type.pop('os-volume-type-access:is_public', - False) - is_public = volume_type.pop('is_public', old_is_public) - qos_specs_id = volume_type.pop('qos_specs_id', None) - extra_specs = volume_type.pop('extra_specs', {}) - ret = munch.Munch( - location=self.current_location, - is_public=is_public, - id=volume_id, - name=name, - description=description, - qos_specs_id=qos_specs_id, - extra_specs=extra_specs, - properties=volume_type.copy(), - ) - return ret - - def _normalize_volume_types(self, volume_types): - ret = [] - for volume in volume_types: - ret.append(self._normalize_volume_type(volume)) - return ret - - def _normalize_volumes(self, volumes): - """Normalize the structure of volumes - - This makes tenants from cinder v1 look like volumes from v2. - - :param list projects: A list of volumes to normalize - - :returns: A list of normalized dicts. - """ - ret = [] - for volume in volumes: - ret.append(self._normalize_volume(volume)) - return ret - - def _normalize_volume(self, volume): - - volume = volume.copy() - - # Discard noise - self._remove_novaclient_artifacts(volume) - - volume_id = volume.pop('id') - - name = volume.pop('display_name', None) - name = volume.pop('name', name) - - description = volume.pop('display_description', None) - description = volume.pop('description', description) - - is_bootable = _to_bool(volume.pop('bootable', True)) - is_encrypted = _to_bool(volume.pop('encrypted', False)) - can_multiattach = _to_bool(volume.pop('multiattach', False)) - - project_id = _pop_or_get( - volume, 'os-vol-tenant-attr:tenant_id', None, self.strict_mode) - az = volume.pop('availability_zone', None) - - location = self._get_current_location(project_id=project_id, zone=az) - - host = _pop_or_get( - volume, 'os-vol-host-attr:host', None, self.strict_mode) - replication_extended_status = _pop_or_get( - volume, 'os-volume-replication:extended_status', - None, self.strict_mode) - - migration_status = _pop_or_get( - volume, 'os-vol-mig-status-attr:migstat', None, self.strict_mode) - migration_status = volume.pop('migration_status', migration_status) - _pop_or_get(volume, 'user_id', None, self.strict_mode) - source_volume_id = _pop_or_get( - volume, 'source_volid', None, self.strict_mode) - replication_driver = _pop_or_get( - volume, 'os-volume-replication:driver_data', - None, self.strict_mode) - - ret = munch.Munch( - location=location, - id=volume_id, - name=name, - description=description, - size=_pop_int(volume, 'size'), - attachments=volume.pop('attachments', []), - status=volume.pop('status'), - migration_status=migration_status, - host=host, - replication_driver=replication_driver, - replication_status=volume.pop('replication_status', None), - replication_extended_status=replication_extended_status, - snapshot_id=volume.pop('snapshot_id', None), - created_at=volume.pop('created_at'), - updated_at=volume.pop('updated_at', None), - source_volume_id=source_volume_id, - consistencygroup_id=volume.pop('consistencygroup_id', None), - volume_type=volume.pop('volume_type', None), - metadata=volume.pop('metadata', {}), - is_bootable=is_bootable, - is_encrypted=is_encrypted, - can_multiattach=can_multiattach, - properties=volume.copy(), - ) - - # Backwards compat - if not self.strict_mode: - ret['display_name'] = name - ret['display_description'] = description - ret['bootable'] = is_bootable - ret['encrypted'] = is_encrypted - ret['multiattach'] = can_multiattach - ret['availability_zone'] = az - for key, val in ret['properties'].items(): - ret.setdefault(key, val) - return ret - - def _normalize_volume_attachment(self, attachment): - """ Normalize a volume attachment object""" - - attachment = attachment.copy() - - # Discard noise - self._remove_novaclient_artifacts(attachment) - return munch.Munch(**attachment) - - def _normalize_volume_backups(self, backups): - ret = [] - for backup in backups: - ret.append(self._normalize_volume_backup(backup)) - return ret - - def _normalize_volume_backup(self, backup): - """ Normalize a valume backup object""" - - backup = backup.copy() - # Discard noise - self._remove_novaclient_artifacts(backup) - return munch.Munch(**backup) - - def _normalize_compute_usage(self, usage): - """ Normalize a compute usage object """ - - usage = usage.copy() - - # Discard noise - self._remove_novaclient_artifacts(usage) - project_id = usage.pop('tenant_id', None) - - ret = munch.Munch( - location=self._get_current_location(project_id=project_id), - ) - for key in ( - 'max_personality', - 'max_personality_size', - 'max_server_group_members', - 'max_server_groups', - 'max_server_meta', - 'max_total_cores', - 'max_total_instances', - 'max_total_keypairs', - 'max_total_ram_size', - 'total_cores_used', - 'total_hours', - 'total_instances_used', - 'total_local_gb_usage', - 'total_memory_mb_usage', - 'total_ram_used', - 'total_server_groups_used', - 'total_vcpus_usage'): - ret[key] = usage.pop(key, 0) - ret['started_at'] = usage.pop('start', None) - ret['stopped_at'] = usage.pop('stop', None) - ret['server_usages'] = self._normalize_server_usages( - usage.pop('server_usages', [])) - ret['properties'] = usage - return ret - - def _normalize_server_usage(self, server_usage): - """ Normalize a server usage object """ - - server_usage = server_usage.copy() - # TODO(mordred) Right now there is already a location on the usage - # object. Including one here seems verbose. - server_usage.pop('tenant_id') - ret = munch.Munch() - - ret['ended_at'] = server_usage.pop('ended_at', None) - ret['started_at'] = server_usage.pop('started_at', None) - for key in ( - 'flavor', - 'instance_id', - 'name', - 'state'): - ret[key] = server_usage.pop(key, '') - for key in ( - 'hours', - 'local_gb', - 'memory_mb', - 'uptime', - 'vcpus'): - ret[key] = server_usage.pop(key, 0) - ret['properties'] = server_usage - return ret - - def _normalize_server_usages(self, server_usages): - ret = [] - for server_usage in server_usages: - ret.append(self._normalize_server_usage(server_usage)) - return ret - - def _normalize_cluster_templates(self, cluster_templates): - ret = [] - for cluster_template in cluster_templates: - ret.append(self._normalize_cluster_template(cluster_template)) - return ret - - def _normalize_cluster_template(self, cluster_template): - """Normalize Magnum cluster_templates.""" - cluster_template = cluster_template.copy() - - # Discard noise - cluster_template.pop('links', None) - cluster_template.pop('human_id', None) - # model_name is a magnumclient-ism - cluster_template.pop('model_name', None) - - ct_id = cluster_template.pop('uuid') - - ret = munch.Munch( - id=ct_id, - location=self._get_current_location(), - ) - ret['is_public'] = cluster_template.pop('public') - ret['is_registry_enabled'] = cluster_template.pop('registry_enabled') - ret['is_tls_disabled'] = cluster_template.pop('tls_disabled') - # pop floating_ip_enabled since we want to hide it in a future patch - fip_enabled = cluster_template.pop('floating_ip_enabled', None) - if not self.strict_mode: - ret['uuid'] = ct_id - if fip_enabled is not None: - ret['floating_ip_enabled'] = fip_enabled - ret['public'] = ret['is_public'] - ret['registry_enabled'] = ret['is_registry_enabled'] - ret['tls_disabled'] = ret['is_tls_disabled'] - - # Optional keys - for (key, default) in ( - ('fixed_network', None), - ('fixed_subnet', None), - ('http_proxy', None), - ('https_proxy', None), - ('labels', {}), - ('master_flavor_id', None), - ('no_proxy', None)): - if key in cluster_template: - ret[key] = cluster_template.pop(key, default) - - for key in ( - 'apiserver_port', - 'cluster_distro', - 'coe', - 'created_at', - 'dns_nameserver', - 'docker_volume_size', - 'external_network_id', - 'flavor_id', - 'image_id', - 'insecure_registry', - 'keypair_id', - 'name', - 'network_driver', - 'server_type', - 'updated_at', - 'volume_driver'): - ret[key] = cluster_template.pop(key) - - ret['properties'] = cluster_template - return ret - - def _normalize_magnum_services(self, magnum_services): - ret = [] - for magnum_service in magnum_services: - ret.append(self._normalize_magnum_service(magnum_service)) - return ret - - def _normalize_magnum_service(self, magnum_service): - """Normalize Magnum magnum_services.""" - magnum_service = magnum_service.copy() - - # Discard noise - magnum_service.pop('links', None) - magnum_service.pop('human_id', None) - # model_name is a magnumclient-ism - magnum_service.pop('model_name', None) - - ret = munch.Munch(location=self._get_current_location()) - - for key in ( - 'binary', - 'created_at', - 'disabled_reason', - 'host', - 'id', - 'report_count', - 'state', - 'updated_at'): - ret[key] = magnum_service.pop(key) - ret['properties'] = magnum_service - return ret - - def _normalize_stacks(self, stacks): - """Normalize Heat Stacks""" - ret = [] - for stack in stacks: - ret.append(self._normalize_stack(stack)) - return ret - - def _normalize_stack(self, stack): - """Normalize Heat Stack""" - stack = stack.copy() - - # Discard noise - self._remove_novaclient_artifacts(stack) - - # Discard things heatclient adds that aren't in the REST - stack.pop('action', None) - stack.pop('status', None) - stack.pop('identifier', None) - - stack_status = stack.pop('stack_status') - (action, status) = stack_status.split('_', 1) - - ret = munch.Munch( - id=stack.pop('id'), - location=self._get_current_location(), - action=action, - status=status, - ) - if not self.strict_mode: - ret['stack_status'] = stack_status - - for (new_name, old_name) in ( - ('name', 'stack_name'), - ('created_at', 'creation_time'), - ('deleted_at', 'deletion_time'), - ('updated_at', 'updated_time'), - ('description', 'description'), - ('is_rollback_enabled', 'disable_rollback'), - ('parent', 'parent'), - ('notification_topics', 'notification_topics'), - ('parameters', 'parameters'), - ('outputs', 'outputs'), - ('owner', 'stack_owner'), - ('status_reason', 'stack_status_reason'), - ('stack_user_project_id', 'stack_user_project_id'), - ('tempate_description', 'template_description'), - ('timeout_mins', 'timeout_mins'), - ('tags', 'tags')): - value = stack.pop(old_name, None) - ret[new_name] = value - if not self.strict_mode: - ret[old_name] = value - ret['identifier'] = '{name}/{id}'.format( - name=ret['name'], id=ret['id']) - ret['properties'] = stack - return ret - - def _normalize_machines(self, machines): - """Normalize Ironic Machines""" - ret = [] - for machine in machines: - ret.append(self._normalize_machine(machine)) - return ret - - def _normalize_machine(self, machine): - """Normalize Ironic Machine""" - machine = machine.copy() - - # Discard noise - self._remove_novaclient_artifacts(machine) - - # TODO(mordred) Normalize this resource - - return machine - - def _normalize_roles(self, roles): - """Normalize Keystone roles""" - ret = [] - for role in roles: - ret.append(self._normalize_role(role)) - return ret - - def _normalize_role(self, role): - """Normalize Identity roles.""" - - return munch.Munch( - id=role.get('id'), - name=role.get('name'), - domain_id=role.get('domain_id'), - location=self._get_identity_location(), - properties={}, - ) diff --git a/shade/_utils.py b/shade/_utils.py deleted file mode 100644 index 3ca54d128..000000000 --- a/shade/_utils.py +++ /dev/null @@ -1,759 +0,0 @@ -# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib -import fnmatch -import inspect -import jmespath -import munch -import netifaces -import re -import six -import sre_constants -import sys -import time -import uuid - -from decorator import decorator - -from shade import _log -from shade import exc -from shade import meta - -_decorated_methods = [] - - -def _exc_clear(): - """Because sys.exc_clear is gone in py3 and is not in six.""" - if sys.version_info[0] == 2: - sys.exc_clear() - - -def _iterate_timeout(timeout, message, wait=2): - """Iterate and raise an exception on timeout. - - This is a generator that will continually yield and sleep for - wait seconds, and if the timeout is reached, will raise an exception - with . - - """ - log = _log.setup_logging('shade.iterate_timeout') - - try: - # None as a wait winds up flowing well in the per-resource cache - # flow. We could spread this logic around to all of the calling - # points, but just having this treat None as "I don't have a value" - # seems friendlier - if wait is None: - wait = 2 - elif wait == 0: - # wait should be < timeout, unless timeout is None - wait = 0.1 if timeout is None else min(0.1, timeout) - wait = float(wait) - except ValueError: - raise exc.OpenStackCloudException( - "Wait value must be an int or float value. {wait} given" - " instead".format(wait=wait)) - - start = time.time() - count = 0 - while (timeout is None) or (time.time() < start + timeout): - count += 1 - yield count - log.debug('Waiting %s seconds', wait) - time.sleep(wait) - raise exc.OpenStackCloudTimeout(message) - - -def _make_unicode(input): - """Turn an input into unicode unconditionally - - :param input: - A unicode, string or other object - """ - try: - if isinstance(input, unicode): - return input - if isinstance(input, str): - return input.decode('utf-8') - else: - # int, for example - return unicode(input) - except NameError: - # python3! - return str(input) - - -def _dictify_resource(resource): - if isinstance(resource, list): - return [_dictify_resource(r) for r in resource] - else: - if hasattr(resource, 'toDict'): - return resource.toDict() - else: - return resource - - -def _filter_list(data, name_or_id, filters): - """Filter a list by name/ID and arbitrary meta data. - - :param list data: - The list of dictionary data to filter. It is expected that - each dictionary contains an 'id' and 'name' - key if a value for name_or_id is given. - :param string name_or_id: - The name or ID of the entity being filtered. Can be a glob pattern, - such as 'nb01*'. - :param filters: - A dictionary of meta data to use for further filtering. Elements - of this dictionary may, themselves, be dictionaries. Example:: - - { - 'last_name': 'Smith', - 'other': { - 'gender': 'Female' - } - } - OR - A string containing a jmespath expression for further filtering. - """ - # The logger is shade.fmmatch to allow a user/operator to configure logging - # not to communicate about fnmatch misses (they shouldn't be too spammy, - # but one never knows) - log = _log.setup_logging('shade.fnmatch') - if name_or_id: - # name_or_id might already be unicode - name_or_id = _make_unicode(name_or_id) - identifier_matches = [] - bad_pattern = False - try: - fn_reg = re.compile(fnmatch.translate(name_or_id)) - except sre_constants.error: - # If the fnmatch re doesn't compile, then we don't care, - # but log it in case the user DID pass a pattern but did - # it poorly and wants to know what went wrong with their - # search - fn_reg = None - for e in data: - e_id = _make_unicode(e.get('id', None)) - e_name = _make_unicode(e.get('name', None)) - - if ((e_id and e_id == name_or_id) or - (e_name and e_name == name_or_id)): - identifier_matches.append(e) - else: - # Only try fnmatch if we don't match exactly - if not fn_reg: - # If we don't have a pattern, skip this, but set the flag - # so that we log the bad pattern - bad_pattern = True - continue - if ((e_id and fn_reg.match(e_id)) or - (e_name and fn_reg.match(e_name))): - identifier_matches.append(e) - if not identifier_matches and bad_pattern: - log.debug("Bad pattern passed to fnmatch", exc_info=True) - data = identifier_matches - - if not filters: - return data - - if isinstance(filters, six.string_types): - return jmespath.search(filters, data) - - def _dict_filter(f, d): - if not d: - return False - for key in f.keys(): - if isinstance(f[key], dict): - if not _dict_filter(f[key], d.get(key, None)): - return False - elif d.get(key, None) != f[key]: - return False - return True - - filtered = [] - for e in data: - filtered.append(e) - for key in filters.keys(): - if isinstance(filters[key], dict): - if not _dict_filter(filters[key], e.get(key, None)): - filtered.pop() - break - elif e.get(key, None) != filters[key]: - filtered.pop() - break - return filtered - - -def _get_entity(cloud, resource, name_or_id, filters, **kwargs): - """Return a single entity from the list returned by a given method. - - :param object cloud: - The controller class (Example: the main OpenStackCloud object) . - :param string or callable resource: - The string that identifies the resource to use to lookup the - get_<>_by_id or search_s methods(Example: network) - or a callable to invoke. - :param string name_or_id: - The name or ID of the entity being filtered or an object or dict. - If this is an object/dict with an 'id' attr/key, we return it and - bypass resource lookup. - :param filters: - A dictionary of meta data to use for further filtering. - OR - A string containing a jmespath expression for further filtering. - Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" - """ - - # Sometimes in the control flow of shade, we already have an object - # fetched. Rather than then needing to pull the name or id out of that - # object, pass it in here and rely on caching to prevent us from making - # an additional call, it's simple enough to test to see if we got an - # object and just short-circuit return it. - - if (hasattr(name_or_id, 'id') or - (isinstance(name_or_id, dict) and 'id' in name_or_id)): - return name_or_id - - # If a uuid is passed short-circuit it calling the - # get__by_id method - if getattr(cloud, 'use_direct_get', False) and _is_uuid_like(name_or_id): - get_resource = getattr(cloud, 'get_%s_by_id' % resource, None) - if get_resource: - return get_resource(name_or_id) - - search = resource if callable(resource) else getattr( - cloud, 'search_%ss' % resource, None) - if search: - entities = search(name_or_id, filters, **kwargs) - if entities: - if len(entities) > 1: - raise exc.OpenStackCloudException( - "Multiple matches found for %s" % name_or_id) - return entities[0] - return None - - -def normalize_keystone_services(services): - """Normalize the structure of keystone services - - In keystone v2, there is a field called "service_type". In v3, it's - "type". Just make the returned dict have both. - - :param list services: A list of keystone service dicts - - :returns: A list of normalized dicts. - """ - ret = [] - for service in services: - service_type = service.get('type', service.get('service_type')) - new_service = { - 'id': service['id'], - 'name': service['name'], - 'description': service.get('description', None), - 'type': service_type, - 'service_type': service_type, - 'enabled': service['enabled'] - } - ret.append(new_service) - return meta.obj_list_to_munch(ret) - - -def localhost_supports_ipv6(): - """Determine whether the local host supports IPv6 - - We look for a default route that supports the IPv6 address family, - and assume that if it is present, this host has globally routable - IPv6 connectivity. - """ - - try: - return netifaces.AF_INET6 in netifaces.gateways()['default'] - except AttributeError: - return False - - -def normalize_users(users): - ret = [ - dict( - id=user.get('id'), - email=user.get('email'), - name=user.get('name'), - username=user.get('username'), - default_project_id=user.get('default_project_id', - user.get('tenantId')), - domain_id=user.get('domain_id'), - enabled=user.get('enabled'), - description=user.get('description') - ) for user in users - ] - return meta.obj_list_to_munch(ret) - - -def normalize_domains(domains): - ret = [ - dict( - id=domain.get('id'), - name=domain.get('name'), - description=domain.get('description'), - enabled=domain.get('enabled'), - ) for domain in domains - ] - return meta.obj_list_to_munch(ret) - - -def normalize_groups(domains): - """Normalize Identity groups.""" - ret = [ - dict( - id=domain.get('id'), - name=domain.get('name'), - description=domain.get('description'), - domain_id=domain.get('domain_id'), - ) for domain in domains - ] - return meta.obj_list_to_munch(ret) - - -def normalize_role_assignments(assignments): - """Put role_assignments into a form that works with search/get interface. - - Role assignments have the structure:: - - [ - { - "role": { - "id": "--role-id--" - }, - "scope": { - "domain": { - "id": "--domain-id--" - } - }, - "user": { - "id": "--user-id--" - } - }, - ] - - Which is hard to work with in the rest of our interface. Map this to be:: - - [ - { - "id": "--role-id--", - "domain": "--domain-id--", - "user": "--user-id--", - } - ] - - Scope can be "domain" or "project" and "user" can also be "group". - - :param list assignments: A list of dictionaries of role assignments. - - :returns: A list of flattened/normalized role assignment dicts. - """ - new_assignments = [] - for assignment in assignments: - new_val = munch.Munch({'id': assignment['role']['id']}) - for scope in ('project', 'domain'): - if scope in assignment['scope']: - new_val[scope] = assignment['scope'][scope]['id'] - for assignee in ('user', 'group'): - if assignee in assignment: - new_val[assignee] = assignment[assignee]['id'] - new_assignments.append(new_val) - return new_assignments - - -def normalize_flavor_accesses(flavor_accesses): - """Normalize Flavor access list.""" - return [munch.Munch( - dict( - flavor_id=acl.get('flavor_id'), - project_id=acl.get('project_id') or acl.get('tenant_id'), - ) - ) for acl in flavor_accesses - ] - - -def valid_kwargs(*valid_args): - # This decorator checks if argument passed as **kwargs to a function are - # present in valid_args. - # - # Typically, valid_kwargs is used when we want to distinguish between - # None and omitted arguments and we still want to validate the argument - # list. - # - # Example usage: - # - # @valid_kwargs('opt_arg1', 'opt_arg2') - # def my_func(self, mandatory_arg1, mandatory_arg2, **kwargs): - # ... - # - @decorator - def func_wrapper(func, *args, **kwargs): - argspec = inspect.getargspec(func) - for k in kwargs: - if k not in argspec.args[1:] and k not in valid_args: - raise TypeError( - "{f}() got an unexpected keyword argument " - "'{arg}'".format(f=inspect.stack()[1][3], arg=k)) - return func(*args, **kwargs) - return func_wrapper - - -def cache_on_arguments(*cache_on_args, **cache_on_kwargs): - _cache_name = cache_on_kwargs.pop('resource', None) - - def _inner_cache_on_arguments(func): - def _cache_decorator(obj, *args, **kwargs): - the_method = obj._get_cache(_cache_name).cache_on_arguments( - *cache_on_args, **cache_on_kwargs)( - func.__get__(obj, type(obj))) - return the_method(*args, **kwargs) - - def invalidate(obj, *args, **kwargs): - return obj._get_cache( - _cache_name).cache_on_arguments()(func).invalidate( - *args, **kwargs) - - _cache_decorator.invalidate = invalidate - _cache_decorator.func = func - _decorated_methods.append(func.__name__) - - return _cache_decorator - return _inner_cache_on_arguments - - -@contextlib.contextmanager -def shade_exceptions(error_message=None): - """Context manager for dealing with shade exceptions. - - :param string error_message: String to use for the exception message - content on non-OpenStackCloudExceptions. - - Useful for avoiding wrapping shade OpenStackCloudException exceptions - within themselves. Code called from within the context may throw such - exceptions without having to catch and reraise them. - - Non-OpenStackCloudException exceptions thrown within the context will - be wrapped and the exception message will be appended to the given error - message. - """ - try: - yield - except exc.OpenStackCloudException: - raise - except Exception as e: - if error_message is None: - error_message = str(e) - raise exc.OpenStackCloudException(error_message) - - -def safe_dict_min(key, data): - """Safely find the minimum for a given key in a list of dict objects. - - This will find the minimum integer value for specific dictionary key - across a list of dictionaries. The values for the given key MUST be - integers, or string representations of an integer. - - The dictionary key does not have to be present in all (or any) - of the elements/dicts within the data set. - - :param string key: The dictionary key to search for the minimum value. - :param list data: List of dicts to use for the data set. - - :returns: None if the field was not not found in any elements, or - the minimum value for the field otherwise. - """ - min_value = None - for d in data: - if (key in d) and (d[key] is not None): - try: - val = int(d[key]) - except ValueError: - raise exc.OpenStackCloudException( - "Search for minimum value failed. " - "Value for {key} is not an integer: {value}".format( - key=key, value=d[key]) - ) - if (min_value is None) or (val < min_value): - min_value = val - return min_value - - -def safe_dict_max(key, data): - """Safely find the maximum for a given key in a list of dict objects. - - This will find the maximum integer value for specific dictionary key - across a list of dictionaries. The values for the given key MUST be - integers, or string representations of an integer. - - The dictionary key does not have to be present in all (or any) - of the elements/dicts within the data set. - - :param string key: The dictionary key to search for the maximum value. - :param list data: List of dicts to use for the data set. - - :returns: None if the field was not not found in any elements, or - the maximum value for the field otherwise. - """ - max_value = None - for d in data: - if (key in d) and (d[key] is not None): - try: - val = int(d[key]) - except ValueError: - raise exc.OpenStackCloudException( - "Search for maximum value failed. " - "Value for {key} is not an integer: {value}".format( - key=key, value=d[key]) - ) - if (max_value is None) or (val > max_value): - max_value = val - return max_value - - -def _call_client_and_retry(client, url, retry_on=None, - call_retries=3, retry_wait=2, - **kwargs): - """Method to provide retry operations. - - Some APIs utilize HTTP errors on certian operations to indicate that - the resource is presently locked, and as such this mechanism provides - the ability to retry upon known error codes. - - :param object client: The client method, such as: - ``self.baremetal_client.post`` - :param string url: The URL to perform the operation upon. - :param integer retry_on: A list of error codes that can be retried on. - The method also supports a single integer to be - defined. - :param integer call_retries: The number of times to retry the call upon - the error code defined by the 'retry_on' - parameter. Default: 3 - :param integer retry_wait: The time in seconds to wait between retry - attempts. Default: 2 - - :returns: The object returned by the client call. - """ - - # NOTE(TheJulia): This method, as of this note, does not have direct - # unit tests, although is fairly well tested by the tests checking - # retry logic in test_baremetal_node.py. - log = _log.setup_logging('shade.http') - - if isinstance(retry_on, int): - retry_on = [retry_on] - - count = 0 - while (count < call_retries): - count += 1 - try: - ret_val = client(url, **kwargs) - except exc.OpenStackCloudHTTPError as e: - if (retry_on is not None and - e.response.status_code in retry_on): - log.debug('Received retryable error {err}, waiting ' - '{wait} seconds to retry', { - 'err': e.response.status_code, - 'wait': retry_wait - }) - time.sleep(retry_wait) - continue - else: - raise - # Break out of the loop, since the loop should only continue - # when we encounter a known connection error. - return ret_val - - -def parse_range(value): - """Parse a numerical range string. - - Breakdown a range expression into its operater and numerical parts. - This expression must be a string. Valid values must be an integer string, - optionally preceeded by one of the following operators:: - - - "<" : Less than - - ">" : Greater than - - "<=" : Less than or equal to - - ">=" : Greater than or equal to - - Some examples of valid values and function return values:: - - - "1024" : returns (None, 1024) - - "<5" : returns ("<", 5) - - ">=100" : returns (">=", 100) - - :param string value: The range expression to be parsed. - - :returns: A tuple with the operator string (or None if no operator - was given) and the integer value. None is returned if parsing failed. - """ - if value is None: - return None - - range_exp = re.match('(<|>|<=|>=){0,1}(\d+)$', value) - if range_exp is None: - return None - - op = range_exp.group(1) - num = int(range_exp.group(2)) - return (op, num) - - -def range_filter(data, key, range_exp): - """Filter a list by a single range expression. - - :param list data: List of dictionaries to be searched. - :param string key: Key name to search within the data set. - :param string range_exp: The expression describing the range of values. - - :returns: A list subset of the original data set. - :raises: OpenStackCloudException on invalid range expressions. - """ - filtered = [] - range_exp = str(range_exp).upper() - - if range_exp == "MIN": - key_min = safe_dict_min(key, data) - if key_min is None: - return [] - for d in data: - if int(d[key]) == key_min: - filtered.append(d) - return filtered - elif range_exp == "MAX": - key_max = safe_dict_max(key, data) - if key_max is None: - return [] - for d in data: - if int(d[key]) == key_max: - filtered.append(d) - return filtered - - # Not looking for a min or max, so a range or exact value must - # have been supplied. - val_range = parse_range(range_exp) - - # If parsing the range fails, it must be a bad value. - if val_range is None: - raise exc.OpenStackCloudException( - "Invalid range value: {value}".format(value=range_exp)) - - op = val_range[0] - if op: - # Range matching - for d in data: - d_val = int(d[key]) - if op == '<': - if d_val < val_range[1]: - filtered.append(d) - elif op == '>': - if d_val > val_range[1]: - filtered.append(d) - elif op == '<=': - if d_val <= val_range[1]: - filtered.append(d) - elif op == '>=': - if d_val >= val_range[1]: - filtered.append(d) - return filtered - else: - # Exact number match - for d in data: - if int(d[key]) == val_range[1]: - filtered.append(d) - return filtered - - -def generate_patches_from_kwargs(operation, **kwargs): - """Given a set of parameters, returns a list with the - valid patch values. - - :param string operation: The operation to perform. - :param list kwargs: Dict of parameters. - - :returns: A list with the right patch values. - """ - patches = [] - for k, v in kwargs.items(): - patch = {'op': operation, - 'value': v, - 'path': '/%s' % k} - patches.append(patch) - return sorted(patches) - - -class FileSegment(object): - """File-like object to pass to requests.""" - - def __init__(self, filename, offset, length): - self.filename = filename - self.offset = offset - self.length = length - self.pos = 0 - self._file = open(filename, 'rb') - self.seek(0) - - def tell(self): - return self._file.tell() - self.offset - - def seek(self, offset, whence=0): - if whence == 0: - self._file.seek(self.offset + offset, whence) - elif whence == 1: - self._file.seek(offset, whence) - elif whence == 2: - self._file.seek(self.offset + self.length - offset, 0) - - def read(self, size=-1): - remaining = self.length - self.pos - if remaining <= 0: - return b'' - - to_read = remaining if size < 0 else min(size, remaining) - chunk = self._file.read(to_read) - self.pos += len(chunk) - - return chunk - - def reset(self): - self._file.seek(self.offset, 0) - - -def _format_uuid_string(string): - return (string.replace('urn:', '') - .replace('uuid:', '') - .strip('{}') - .replace('-', '') - .lower()) - - -def _is_uuid_like(val): - """Returns validation of a value as a UUID. - - :param val: Value to verify - :type val: string - :returns: bool - - .. versionchanged:: 1.1.1 - Support non-lowercase UUIDs. - """ - try: - return str(uuid.UUID(val)).replace('-', '') == _format_uuid_string(val) - except (TypeError, ValueError, AttributeError): - return False diff --git a/shade/exc.py b/shade/exc.py index 0091f838c..4093fd858 100644 --- a/shade/exc.py +++ b/shade/exc.py @@ -12,162 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys -import json - -import munch -from requests import exceptions as _rex - -from shade import _log - - -class OpenStackCloudException(Exception): - - log_inner_exceptions = False - - def __init__(self, message, extra_data=None, **kwargs): - args = [message] - if extra_data: - if isinstance(extra_data, munch.Munch): - extra_data = extra_data.toDict() - args.append("Extra: {0}".format(str(extra_data))) - super(OpenStackCloudException, self).__init__(*args, **kwargs) - self.extra_data = extra_data - # NOTE(mordred) The next two are not used for anything, but - # they are public attributes so we keep them around. - self.inner_exception = sys.exc_info() - self.orig_message = message - - def log_error(self, logger=None): - # NOTE(mordred) This method is here for backwards compat. As shade - # no longer wraps any exceptions, this doesn't do anything. - pass - - -class OpenStackCloudCreateException(OpenStackCloudException): - - def __init__(self, resource, resource_id, extra_data=None, **kwargs): - super(OpenStackCloudCreateException, self).__init__( - message="Error creating {resource}: {resource_id}".format( - resource=resource, resource_id=resource_id), - extra_data=extra_data, **kwargs) - self.resource_id = resource_id - - -class OpenStackCloudTimeout(OpenStackCloudException): - pass - - -class OpenStackCloudUnavailableExtension(OpenStackCloudException): - pass - - -class OpenStackCloudUnavailableFeature(OpenStackCloudException): - pass - - -class OpenStackCloudHTTPError(OpenStackCloudException, _rex.HTTPError): - - def __init__(self, *args, **kwargs): - OpenStackCloudException.__init__(self, *args, **kwargs) - _rex.HTTPError.__init__(self, *args, **kwargs) - - -class OpenStackCloudBadRequest(OpenStackCloudHTTPError): - """There is something wrong with the request payload. - - Possible reasons can include malformed json or invalid values to parameters - such as flavorRef to a server create. - """ - - -class OpenStackCloudURINotFound(OpenStackCloudHTTPError): - pass - -# Backwards compat -OpenStackCloudResourceNotFound = OpenStackCloudURINotFound - - -def _log_response_extras(response): - # Sometimes we get weird HTML errors. This is usually from load balancers - # or other things. Log them to a special logger so that they can be - # toggled indepdently - and at debug level so that a person logging - # shade.* only gets them at debug. - if response.headers.get('content-type') != 'text/html': - return - try: - if int(response.headers.get('content-length', 0)) == 0: - return - except Exception: - return - logger = _log.setup_logging('shade.http') - if response.reason: - logger.debug( - "Non-standard error '{reason}' returned from {url}:".format( - reason=response.reason, - url=response.url)) - else: - logger.debug( - "Non-standard error returned from {url}:".format( - url=response.url)) - for response_line in response.text.split('\n'): - logger.debug(response_line) - - -# Logic shamelessly stolen from requests -def raise_from_response(response, error_message=None): - msg = '' - if 400 <= response.status_code < 500: - source = "Client" - elif 500 <= response.status_code < 600: - source = "Server" - else: - return - - remote_error = "Error for url: {url}".format(url=response.url) - try: - details = response.json() - # Nova returns documents that look like - # {statusname: 'message': message, 'code': code} - detail_keys = list(details.keys()) - if len(detail_keys) == 1: - detail_key = detail_keys[0] - detail_message = details[detail_key].get('message') - if detail_message: - remote_error += " {message}".format(message=detail_message) - except ValueError: - if response.reason: - remote_error += " {reason}".format(reason=response.reason) - except AttributeError: - if response.reason: - remote_error += " {reason}".format(reason=response.reason) - try: - json_resp = json.loads(details[detail_key]) - fault_string = json_resp.get('faultstring') - if fault_string: - remote_error += " {fault}".format(fault=fault_string) - except Exception: - pass - - _log_response_extras(response) - - if error_message: - msg = '{error_message}. ({code}) {source} {remote_error}'.format( - error_message=error_message, - source=source, - code=response.status_code, - remote_error=remote_error) - else: - msg = '({code}) {source} {remote_error}'.format( - code=response.status_code, - source=source, - remote_error=remote_error) - - # Special case 404 since we raised a specific one for neutron exceptions - # before - if response.status_code == 404: - raise OpenStackCloudURINotFound(msg, response=response) - elif response.status_code == 400: - raise OpenStackCloudBadRequest(msg, response=response) - if msg: - raise OpenStackCloudHTTPError(msg, response=response) +from openstack.cloud.exc import * # noqa diff --git a/shade/inventory.py b/shade/inventory.py index 1559ba08e..f0246eb5f 100644 --- a/shade/inventory.py +++ b/shade/inventory.py @@ -15,10 +15,10 @@ import functools from openstack import exceptions +from openstack.cloud import _utils from openstack.config import loader import shade -from shade import _utils class OpenStackInventory(object): diff --git a/shade/openstackcloud.py b/shade/openstackcloud.py index 59fcd50cf..34e6c5b9a 100644 --- a/shade/openstackcloud.py +++ b/shade/openstackcloud.py @@ -22,31 +22,28 @@ import json import jsonpatch import operator import six -import threading import time import warnings import dogpile.cache import munch -import requestsexceptions from six.moves import urllib import keystoneauth1.exceptions import keystoneauth1.session import os +from openstack.cloud import _utils from openstack.config import loader +from openstack import connection +from openstack import task_manager +from openstack import utils -import shade -from shade import _adapter from shade import exc from shade._heat import event_utils from shade._heat import template_utils from shade import _log from shade import _legacy_clients -from shade import _normalize from shade import meta -from shade import task_manager -from shade import _utils OBJECT_MD5_KEY = 'x-object-meta-x-shade-md5' OBJECT_SHA256_KEY = 'x-object-meta-x-shade-sha256' @@ -98,7 +95,7 @@ def _no_pending_stacks(stacks): class OpenStackCloud( - _normalize.Normalizer, + connection.Connection, _legacy_clients.LegacyClientFactoryMixin): """Represent a connection to an OpenStack Cloud. @@ -135,166 +132,29 @@ class OpenStackCloud( app_version=None, use_direct_get=False, **kwargs): + super(OpenStackCloud, self).__init__( + config=cloud_config, + strict=strict, + task_manager=manager, + app_name=app_name, + app_version=app_version, + use_direct_get=use_direct_get, + **kwargs) + # Logging in shade is based on 'shade' not 'openstack' self.log = _log.setup_logging('shade') - if not cloud_config: - config = loader.OpenStackConfig( - app_name=app_name, app_version=app_version) + # shade has this as cloud_config, but sdk has config + self.cloud_config = self.config - cloud_config = config.get_one(**kwargs) - cloud_region = cloud_config - - self.name = cloud_region.name - self.auth = cloud_region.get_auth_args() - self.region_name = cloud_region.region_name - self.default_interface = cloud_region.get_interface() - self.private = cloud_region.config.get('private', False) - self.api_timeout = cloud_region.config['api_timeout'] - self.image_api_use_tasks = cloud_region.config['image_api_use_tasks'] - self.secgroup_source = cloud_region.config['secgroup_source'] - self.force_ipv4 = cloud_region.force_ipv4 - self.strict_mode = strict - self._extra_config = cloud_region.get_client_config( + # Backwards compat for get_extra behavior + self._extra_config = self.config.get_client_config( 'shade', { 'get_flavor_extra_specs': True, }) - if manager is not None: - self.manager = manager - else: - self.manager = task_manager.TaskManager( - name=':'.join([self.name, self.region_name]), client=self) - - self._external_ipv4_names = cloud_region.get_external_ipv4_networks() - self._internal_ipv4_names = cloud_region.get_internal_ipv4_networks() - self._external_ipv6_names = cloud_region.get_external_ipv6_networks() - self._internal_ipv6_names = cloud_region.get_internal_ipv6_networks() - self._nat_destination = cloud_region.get_nat_destination() - self._default_network = cloud_region.get_default_network() - - self._floating_ip_source = cloud_region.config.get( - 'floating_ip_source') - if self._floating_ip_source: - if self._floating_ip_source.lower() == 'none': - self._floating_ip_source = None - else: - self._floating_ip_source = self._floating_ip_source.lower() - - self._use_external_network = cloud_region.config.get( - 'use_external_network', True) - self._use_internal_network = cloud_region.config.get( - 'use_internal_network', True) - - # Work around older TaskManager objects that don't have submit_task - if not hasattr(self.manager, 'submit_task'): - self.manager.submit_task = self.manager.submitTask - - (self.verify, self.cert) = cloud_region.get_requests_verify_args() - # Turn off urllib3 warnings about insecure certs if we have - # explicitly configured requests to tell it we do not want - # cert verification - if not self.verify: - self.log.debug( - "Turning off Insecure SSL warnings since verify=False") - category = requestsexceptions.InsecureRequestWarning - if category: - # InsecureRequestWarning references a Warning class or is None - warnings.filterwarnings('ignore', category=category) - - self._disable_warnings = {} - self.use_direct_get = use_direct_get - - self._servers = None - self._servers_time = 0 - self._servers_lock = threading.Lock() - - self._ports = None - self._ports_time = 0 - self._ports_lock = threading.Lock() - - self._floating_ips = None - self._floating_ips_time = 0 - self._floating_ips_lock = threading.Lock() - - self._floating_network_by_router = None - self._floating_network_by_router_run = False - self._floating_network_by_router_lock = threading.Lock() - - self._networks_lock = threading.Lock() - self._reset_network_caches() - - cache_expiration_time = int(cloud_region.get_cache_expiration_time()) - cache_class = cloud_region.get_cache_class() - cache_arguments = cloud_region.get_cache_arguments() - - self._resource_caches = {} - - if cache_class != 'dogpile.cache.null': - self.cache_enabled = True - self._cache = self._make_cache( - cache_class, cache_expiration_time, cache_arguments) - expirations = cloud_region.get_cache_expirations() - for expire_key in expirations.keys(): - # Only build caches for things we have list operations for - if getattr( - self, 'list_{0}'.format(expire_key), None): - self._resource_caches[expire_key] = self._make_cache( - cache_class, expirations[expire_key], cache_arguments) - - self._SERVER_AGE = DEFAULT_SERVER_AGE - self._PORT_AGE = DEFAULT_PORT_AGE - self._FLOAT_AGE = DEFAULT_FLOAT_AGE - else: - self.cache_enabled = False - - def _fake_invalidate(unused): - pass - - class _FakeCache(object): - def invalidate(self): - pass - - # Don't cache list_servers if we're not caching things. - # Replace this with a more specific cache configuration - # soon. - self._SERVER_AGE = 0 - self._PORT_AGE = 0 - self._FLOAT_AGE = 0 - self._cache = _FakeCache() - # Undecorate cache decorated methods. Otherwise the call stacks - # wind up being stupidly long and hard to debug - for method in _utils._decorated_methods: - meth_obj = getattr(self, method, None) - if not meth_obj: - continue - if (hasattr(meth_obj, 'invalidate') - and hasattr(meth_obj, 'func')): - new_func = functools.partial(meth_obj.func, self) - new_func.invalidate = _fake_invalidate - setattr(self, method, new_func) - - # If server expiration time is set explicitly, use that. Otherwise - # fall back to whatever it was before - self._SERVER_AGE = cloud_region.get_cache_resource_expiration( - 'server', self._SERVER_AGE) - self._PORT_AGE = cloud_region.get_cache_resource_expiration( - 'port', self._PORT_AGE) - self._FLOAT_AGE = cloud_region.get_cache_resource_expiration( - 'floating_ip', self._FLOAT_AGE) - - self._container_cache = dict() - self._file_hash_cache = dict() - - self._keystone_session = None - + # Place to store legacy client objects self._legacy_clients = {} - self._raw_clients = {} - - self._local_ipv6 = ( - _utils.localhost_supports_ipv6() if not self.force_ipv4 else False) - - self.cloud_config = cloud_region def connect_as(self, **kwargs): """Make a new OpenStackCloud object with new auth context. @@ -465,98 +325,6 @@ class OpenStackCloud( return int(version[0]) return version - def _get_versioned_client( - self, service_type, min_version=None, max_version=None): - config_version = self.cloud_config.get_api_version(service_type) - config_major = self._get_major_version_id(config_version) - max_major = self._get_major_version_id(max_version) - min_major = self._get_major_version_id(min_version) - # NOTE(mordred) The shade logic for versions is slightly different - # than the ksa Adapter constructor logic. shade knows the versions - # it knows, and uses them when it detects them. However, if a user - # requests a version, and it's not found, and a different one shade - # does know about it found, that's a warning in shade. - if config_version: - if min_major and config_major < min_major: - raise exc.OpenStackCloudException( - "Version {config_version} requested for {service_type}" - " but shade understands a minimum of {min_version}".format( - config_version=config_version, - service_type=service_type, - min_version=min_version)) - elif max_major and config_major > max_major: - raise exc.OpenStackCloudException( - "Version {config_version} requested for {service_type}" - " but shade understands a maximum of {max_version}".format( - config_version=config_version, - service_type=service_type, - max_version=max_version)) - request_min_version = config_version - request_max_version = '{version}.latest'.format( - version=config_major) - adapter = _adapter.ShadeAdapter( - session=self.keystone_session, - manager=self.manager, - service_type=self.cloud_config.get_service_type(service_type), - service_name=self.cloud_config.get_service_name(service_type), - interface=self.cloud_config.get_interface(service_type), - endpoint_override=self.cloud_config.get_endpoint(service_type), - region_name=self.cloud_config.region, - min_version=request_min_version, - max_version=request_max_version, - shade_logger=self.log) - if adapter.get_endpoint(): - return adapter - - adapter = _adapter.ShadeAdapter( - session=self.keystone_session, - manager=self.manager, - service_type=self.cloud_config.get_service_type(service_type), - service_name=self.cloud_config.get_service_name(service_type), - interface=self.cloud_config.get_interface(service_type), - endpoint_override=self.cloud_config.get_endpoint(service_type), - region_name=self.cloud_config.region, - min_version=min_version, - max_version=max_version, - shade_logger=self.log) - - # data.api_version can be None if no version was detected, such - # as with neutron - api_version = adapter.get_api_major_version( - endpoint_override=self.cloud_config.get_endpoint(service_type)) - api_major = self._get_major_version_id(api_version) - - # If we detect a different version that was configured, warn the user. - # shade still knows what to do - but if the user gave us an explicit - # version and we couldn't find it, they may want to investigate. - if api_version and (api_major != config_major): - warning_msg = ( - '{service_type} is configured for {config_version}' - ' but only {api_version} is available. shade is happy' - ' with this version, but if you were trying to force an' - ' override, that did not happen. You may want to check' - ' your cloud, or remove the version specification from' - ' your config.'.format( - service_type=service_type, - config_version=config_version, - api_version='.'.join([str(f) for f in api_version]))) - self.log.debug(warning_msg) - warnings.warn(warning_msg) - return adapter - - def _get_raw_client( - self, service_type, api_version=None, endpoint_override=None): - return _adapter.ShadeAdapter( - session=self.keystone_session, - manager=self.manager, - service_type=self.cloud_config.get_service_type(service_type), - service_name=self.cloud_config.get_service_name(service_type), - interface=self.cloud_config.get_interface(service_type), - endpoint_override=self.cloud_config.get_endpoint( - service_type) or endpoint_override, - region_name=self.cloud_config.region, - shade_logger=self.log) - def _is_client_version(self, client, version): client_name = '_{client}_client'.format(client=client) client = getattr(self, client_name) @@ -686,16 +454,7 @@ class OpenStackCloud( @property def keystone_session(self): - if self._keystone_session is None: - try: - self._keystone_session = self.cloud_config.get_session() - if hasattr(self._keystone_session, 'additional_user_agent'): - self._keystone_session.additional_user_agent.append( - ('shade', shade.__version__)) - except Exception as e: - raise exc.OpenStackCloudException( - "Error authenticating to keystone: %s " % str(e)) - return self._keystone_session + return self.session @property def _keystone_catalog(self): @@ -774,7 +533,7 @@ class OpenStackCloud( def _get_current_location(self, project_id=None, zone=None): return munch.Munch( cloud=self.name, - region_name=self.region_name, + region_name=self.config.region_name, zone=zone, project=self._get_project_info(project_id), ) @@ -888,46 +647,6 @@ class OpenStackCloud( """ return meta.get_and_munchify(key, data) - @_utils.cache_on_arguments() - def list_projects(self, domain_id=None, name_or_id=None, filters=None): - """List projects. - - With no parameters, returns a full listing of all visible projects. - - :param domain_id: domain ID to scope the searched projects. - :param name_or_id: project name or ID. - :param filters: a dict containing additional filters to use - OR - A string containing a jmespath expression for further filtering. - Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" - - :returns: a list of ``munch.Munch`` containing the projects - - :raises: ``OpenStackCloudException``: if something goes wrong during - the OpenStack API call. - """ - kwargs = dict( - filters=filters, - domain_id=domain_id) - if self._is_client_version('identity', 3): - kwargs['obj_name'] = 'project' - - pushdown, filters = _normalize._split_filters(**kwargs) - - try: - if self._is_client_version('identity', 3): - key = 'projects' - else: - key = 'tenants' - data = self._identity_client.get( - '/{endpoint}'.format(endpoint=key), params=pushdown) - projects = self._normalize_projects( - self._get_and_munchify(key, data)) - except Exception as e: - self.log.debug("Failed to list projects", exc_info=True) - raise exc.OpenStackCloudException(str(e)) - return _utils._filter_list(projects, name_or_id, filters) - def search_projects(self, name_or_id=None, filters=None, domain_id=None): '''Backwards compatibility method for search_projects @@ -1438,7 +1157,11 @@ class OpenStackCloud( return self.name def get_region(self): - return self.region_name + return self.config.region_name + + @property + def region_name(self): + return self.config.region_name def get_flavor_name(self, flavor_id): flavor = self.get_flavor(flavor_id, get_extra=False) @@ -1481,7 +1204,7 @@ class OpenStackCloud( " {error}".format( service=service_key, cloud=self.name, - region=self.region_name, + region=self.config.region_name, error=str(e))) return endpoint @@ -1964,29 +1687,11 @@ class OpenStackCloud( """ if get_extra is None: get_extra = self._extra_config['get_flavor_extra_specs'] - data = self._compute_client.get( - '/flavors/detail', params=dict(is_public='None'), - error_message="Error fetching flavor list") - flavors = self._normalize_flavors( - self._get_and_munchify('flavors', data)) - for flavor in flavors: - if not flavor.extra_specs and get_extra: - endpoint = "/flavors/{id}/os-extra_specs".format( - id=flavor.id) - try: - data = self._compute_client.get( - endpoint, - error_message="Error fetching flavor extra specs") - flavor.extra_specs = self._get_and_munchify( - 'extra_specs', data) - except exc.OpenStackCloudHTTPError as e: - flavor.extra_specs = {} - self.log.debug( - 'Fetching extra specs for flavor failed:' - ' %(msg)s', {'msg': str(e)}) - - return flavors + # This method is already cache-decorated. We don't want to call the + # decorated inner-method, we want to call the method it is decorating. + return connection.Connection.list_flavors.func( + self, get_extra=get_extra) @_utils.cache_on_arguments(should_cache_fn=_no_pending_stacks) def list_stacks(self): @@ -2210,7 +1915,7 @@ class OpenStackCloud( filters=None): error_msg = "Error fetching server list on {cloud}:{region}:".format( cloud=self.name, - region=self.region_name) + region=self.config.region_name) params = filters or {} if all_projects: params['all_tenants'] = True @@ -3039,32 +2744,10 @@ class OpenStackCloud( specs. :returns: A flavor ``munch.Munch``. """ - data = self._compute_client.get( - '/flavors/{id}'.format(id=id), - error_message="Error getting flavor with ID {id}".format(id=id) - ) - flavor = self._normalize_flavor( - self._get_and_munchify('flavor', data)) - if get_extra is None: get_extra = self._extra_config['get_flavor_extra_specs'] - - if not flavor.extra_specs and get_extra: - endpoint = "/flavors/{id}/os-extra_specs".format( - id=flavor.id) - try: - data = self._compute_client.get( - endpoint, - error_message="Error fetching flavor extra specs") - flavor.extra_specs = self._get_and_munchify( - 'extra_specs', data) - except exc.OpenStackCloudHTTPError as e: - flavor.extra_specs = {} - self.log.debug( - 'Fetching extra specs for flavor failed:' - ' %(msg)s', {'msg': str(e)}) - - return flavor + return super(OpenStackCloud, self).get_flavor_by_id( + id, get_extra=get_extra) def get_security_group(self, name_or_id, filters=None): """Get a security group by name or ID. @@ -4551,7 +4234,7 @@ class OpenStackCloud( def wait_for_image(self, image, timeout=3600): image_id = image['id'] - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for image to snapshot"): self.list_images.invalidate(self) image = self.get_image(image_id) @@ -4590,7 +4273,7 @@ class OpenStackCloud( self.delete_object(container=container, name=objname) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to be deleted."): self._get_cache(None).invalidate() @@ -4820,7 +4503,7 @@ class OpenStackCloud( if not wait: return self.get_image(response['image_id']) try: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to finish."): image_obj = self.get_image(response['image_id']) @@ -4914,7 +4597,7 @@ class OpenStackCloud( if not wait: return image try: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to finish."): image_obj = self.get_image(image.id) @@ -4954,7 +4637,7 @@ class OpenStackCloud( if wait: start = time.time() image_id = None - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to import."): try: @@ -5117,7 +4800,7 @@ class OpenStackCloud( if wait: vol_id = volume['id'] - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume to be available."): volume = self.get_volume(vol_id) @@ -5204,7 +4887,7 @@ class OpenStackCloud( self.list_volumes.invalidate(self) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume to be deleted."): @@ -5292,7 +4975,7 @@ class OpenStackCloud( volume=volume['id'], server=server['id']))) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to detach." % volume['id']): try: @@ -5360,7 +5043,7 @@ class OpenStackCloud( server_id=server['id'])) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to attach." % volume['id']): try: @@ -5435,7 +5118,7 @@ class OpenStackCloud( snapshot = self._get_and_munchify('snapshot', data) if wait: snapshot_id = snapshot['id'] - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume snapshot to be available." ): @@ -5531,7 +5214,7 @@ class OpenStackCloud( backup_id = backup['id'] msg = ("Timeout waiting for the volume backup {} to be " "available".format(backup_id)) - for _ in _utils._iterate_timeout(timeout, msg): + for _ in utils.iterate_timeout(timeout, msg): backup = self.get_volume_backup(backup_id) if backup['status'] == 'available': @@ -5622,7 +5305,7 @@ class OpenStackCloud( error_message=msg) if wait: msg = "Timeout waiting for the volume backup to be deleted." - for count in _utils._iterate_timeout(timeout, msg): + for count in utils.iterate_timeout(timeout, msg): if not self.get_volume_backup(volume_backup['id']): break @@ -5652,7 +5335,7 @@ class OpenStackCloud( error_message="Error in deleting volume snapshot") if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume snapshot to be deleted."): if not self.get_volume_snapshot(volumesnapshot['id']): @@ -5953,7 +5636,7 @@ class OpenStackCloud( # if we've provided a port as a parameter if wait: try: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the floating IP" " to be ACTIVE", @@ -6159,7 +5842,7 @@ class OpenStackCloud( if wait: # Wait for the address to be assigned to the server server_id = server['id'] - for _ in _utils._iterate_timeout( + for _ in utils.iterate_timeout( timeout, "Timeout waiting for the floating IP to be attached.", wait=self._SERVER_AGE): @@ -6191,7 +5874,7 @@ class OpenStackCloud( timeout = self._PORT_AGE * 2 else: timeout = None - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for port to show up in list", wait=self._PORT_AGE): @@ -6622,7 +6305,7 @@ class OpenStackCloud( 'Volume {boot_volume} is not a valid volume' ' in {cloud}:{region}'.format( boot_volume=boot_volume, - cloud=self.name, region=self.region_name)) + cloud=self.name, region=self.config.region_name)) block_mapping = { 'boot_index': '0', 'delete_on_termination': terminate_volume, @@ -6643,7 +6326,7 @@ class OpenStackCloud( 'Image {image} is not a valid image in' ' {cloud}:{region}'.format( image=image, - cloud=self.name, region=self.region_name)) + cloud=self.name, region=self.config.region_name)) block_mapping = { 'boot_index': '0', @@ -6673,7 +6356,7 @@ class OpenStackCloud( 'Volume {volume} is not a valid volume' ' in {cloud}:{region}'.format( volume=volume, - cloud=self.name, region=self.region_name)) + cloud=self.name, region=self.config.region_name)) block_mapping = { 'boot_index': '-1', 'delete_on_termination': False, @@ -6865,7 +6548,7 @@ class OpenStackCloud( 'Network {network} is not a valid network in' ' {cloud}:{region}'.format( network=network, - cloud=self.name, region=self.region_name)) + cloud=self.name, region=self.config.region_name)) nics.append({'net-id': network_obj['id']}) kwargs['nics'] = nics @@ -6977,7 +6660,7 @@ class OpenStackCloud( start_time = time.time() # There is no point in iterating faster than the list_servers cache - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, timeout_message, # if _SERVER_AGE is 0 we still want to wait a bit @@ -7067,7 +6750,7 @@ class OpenStackCloud( self._normalize_server(server), bare=bare, detailed=detailed) admin_pass = server.get('adminPass') or admin_pass - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for server {0} to " "rebuild.".format(server_id), @@ -7223,7 +6906,7 @@ class OpenStackCloud( and self.get_volumes(server)): reset_volume_cache = True - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timed out waiting for server to get deleted.", # if _SERVER_AGE is 0 we still want to wait a bit @@ -9057,7 +8740,7 @@ class OpenStackCloud( with _utils.shade_exceptions("Error inspecting machine"): machine = self.node_set_provision_state(machine['uuid'], 'inspect') if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for node transition to " "target state of 'inspect'"): @@ -9176,7 +8859,7 @@ class OpenStackCloud( with _utils.shade_exceptions( "Error transitioning node to available state"): if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for node transition to " "available state"): @@ -9212,7 +8895,7 @@ class OpenStackCloud( # Note(TheJulia): We need to wait for the lock to clear # before we attempt to set the machine into provide state # which allows for the transition to available. - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( lock_timeout, "Timeout waiting for reservation to clear " "before setting provide state"): @@ -9311,7 +8994,7 @@ class OpenStackCloud( microversion=version) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for machine to be deleted"): if not self.get_machine(uuid): @@ -9552,7 +9235,7 @@ class OpenStackCloud( error_message=msg, microversion=version) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for node transition to " "target state of '%s'" % state): @@ -9776,7 +9459,7 @@ class OpenStackCloud( else: msg = 'Waiting for lock to be released for node {node}'.format( node=node['uuid']) - for count in _utils._iterate_timeout(timeout, msg, 2): + for count in utils.iterate_timeout(timeout, msg, 2): current_node = self.get_machine(node['uuid']) if current_node['reservation'] is None: return @@ -10924,7 +10607,7 @@ class OpenStackCloud( self._identity_client.put(url, error_message=error_msg) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for role to be granted"): if self.list_role_assignments(filters=filters): @@ -11003,7 +10686,7 @@ class OpenStackCloud( self._identity_client.delete(url, error_message=error_msg) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for role to be revoked"): if not self.list_role_assignments(filters=filters): diff --git a/shade/tests/functional/test_compute.py b/shade/tests/functional/test_compute.py index c5b0eb6ca..efb8ae333 100644 --- a/shade/tests/functional/test_compute.py +++ b/shade/tests/functional/test_compute.py @@ -20,10 +20,11 @@ Functional tests for `shade` compute methods. from fixtures import TimeoutException import six +from openstack import utils + from shade import exc from shade.tests.functional import base from shade.tests.functional.util import pick_flavor -from shade import _utils class TestCompute(base.BaseFunctionalTestCase): @@ -291,7 +292,7 @@ class TestCompute(base.BaseFunctionalTestCase): # Volumes do not show up as unattached for a bit immediately after # deleting a server that had had a volume attached. Yay for eventual # consistency! - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( 60, 'Timeout waiting for volume {volume_id} to detach'.format( volume_id=volume_id)): diff --git a/shade/tests/functional/test_floating_ip.py b/shade/tests/functional/test_floating_ip.py index e74a23f5a..e835df160 100644 --- a/shade/tests/functional/test_floating_ip.py +++ b/shade/tests/functional/test_floating_ip.py @@ -21,9 +21,9 @@ Functional tests for floating IP resource. import pprint +from openstack import utils from testtools import content -from shade import _utils from shade import meta from shade.exc import OpenStackCloudException from shade.tests.functional import base @@ -193,7 +193,7 @@ class TestFloatingIP(base.BaseFunctionalTestCase): # ToDo: remove the following iteration when create_server waits for # the IP to be attached ip = None - for _ in _utils._iterate_timeout( + for _ in utils.iterate_timeout( self.timeout, "Timeout waiting for IP address to be attached"): ip = meta.get_server_external_ipv4(self.user_cloud, new_server) if ip is not None: @@ -213,7 +213,7 @@ class TestFloatingIP(base.BaseFunctionalTestCase): # ToDo: remove the following iteration when create_server waits for # the IP to be attached ip = None - for _ in _utils._iterate_timeout( + for _ in utils.iterate_timeout( self.timeout, "Timeout waiting for IP address to be attached"): ip = meta.get_server_external_ipv4(self.user_cloud, new_server) if ip is not None: diff --git a/shade/tests/functional/test_volume.py b/shade/tests/functional/test_volume.py index 5425b7146..4a1883291 100644 --- a/shade/tests/functional/test_volume.py +++ b/shade/tests/functional/test_volume.py @@ -18,9 +18,9 @@ Functional tests for `shade` block storage methods. """ from fixtures import TimeoutException +from openstack import utils from testtools import content -from shade import _utils from shade import exc from shade.tests.functional import base @@ -107,7 +107,7 @@ class TestVolume(base.BaseFunctionalTestCase): for v in volume: self.user_cloud.delete_volume(v, wait=False) try: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( 180, "Timeout waiting for volume cleanup"): found = False for existing in self.user_cloud.list_volumes(): diff --git a/shade/tests/unit/test__adapter.py b/shade/tests/unit/test__adapter.py deleted file mode 100644 index 68063d65f..000000000 --- a/shade/tests/unit/test__adapter.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from testscenarios import load_tests_apply_scenarios as load_tests # noqa - -from shade import _adapter -from shade.tests.unit import base - - -class TestExtractName(base.TestCase): - - scenarios = [ - ('slash_servers_bare', dict(url='/servers', parts=['servers'])), - ('slash_servers_arg', dict(url='/servers/1', parts=['servers'])), - ('servers_bare', dict(url='servers', parts=['servers'])), - ('servers_arg', dict(url='servers/1', parts=['servers'])), - ('networks_bare', dict(url='/v2.0/networks', parts=['networks'])), - ('networks_arg', dict(url='/v2.0/networks/1', parts=['networks'])), - ('tokens', dict(url='/v3/tokens', parts=['tokens'])), - ('discovery', dict(url='/', parts=['discovery'])), - ('secgroups', dict( - url='/servers/1/os-security-groups', - parts=['servers', 'os-security-groups'])), - ] - - def test_extract_name(self): - - results = _adapter.extract_name(self.url) - self.assertEqual(self.parts, results) diff --git a/shade/tests/unit/test__utils.py b/shade/tests/unit/test__utils.py deleted file mode 100644 index a67fdf30d..000000000 --- a/shade/tests/unit/test__utils.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random -import string -import tempfile -from uuid import uuid4 - -import mock -import testtools - -from shade import _utils -from shade import exc -from shade.tests.unit import base - - -RANGE_DATA = [ - dict(id=1, key1=1, key2=5), - dict(id=2, key1=1, key2=20), - dict(id=3, key1=2, key2=10), - dict(id=4, key1=2, key2=30), - dict(id=5, key1=3, key2=40), - dict(id=6, key1=3, key2=40), -] - - -class TestUtils(base.TestCase): - - def test__filter_list_name_or_id(self): - el1 = dict(id=100, name='donald') - el2 = dict(id=200, name='pluto') - data = [el1, el2] - ret = _utils._filter_list(data, 'donald', None) - self.assertEqual([el1], ret) - - def test__filter_list_name_or_id_special(self): - el1 = dict(id=100, name='donald') - el2 = dict(id=200, name='pluto[2017-01-10]') - data = [el1, el2] - ret = _utils._filter_list(data, 'pluto[2017-01-10]', None) - self.assertEqual([el2], ret) - - def test__filter_list_name_or_id_partial_bad(self): - el1 = dict(id=100, name='donald') - el2 = dict(id=200, name='pluto[2017-01-10]') - data = [el1, el2] - ret = _utils._filter_list(data, 'pluto[2017-01]', None) - self.assertEqual([], ret) - - def test__filter_list_name_or_id_partial_glob(self): - el1 = dict(id=100, name='donald') - el2 = dict(id=200, name='pluto[2017-01-10]') - data = [el1, el2] - ret = _utils._filter_list(data, 'pluto*', None) - self.assertEqual([el2], ret) - - def test__filter_list_name_or_id_non_glob_glob(self): - el1 = dict(id=100, name='donald') - el2 = dict(id=200, name='pluto[2017-01-10]') - data = [el1, el2] - ret = _utils._filter_list(data, 'pluto', None) - self.assertEqual([], ret) - - def test__filter_list_name_or_id_glob(self): - el1 = dict(id=100, name='donald') - el2 = dict(id=200, name='pluto') - el3 = dict(id=200, name='pluto-2') - data = [el1, el2, el3] - ret = _utils._filter_list(data, 'pluto*', None) - self.assertEqual([el2, el3], ret) - - def test__filter_list_name_or_id_glob_not_found(self): - el1 = dict(id=100, name='donald') - el2 = dict(id=200, name='pluto') - el3 = dict(id=200, name='pluto-2') - data = [el1, el2, el3] - ret = _utils._filter_list(data, 'q*', None) - self.assertEqual([], ret) - - def test__filter_list_unicode(self): - el1 = dict(id=100, name=u'中文', last='duck', - other=dict(category='duck', financial=dict(status='poor'))) - el2 = dict(id=200, name=u'中文', last='trump', - other=dict(category='human', financial=dict(status='rich'))) - el3 = dict(id=300, name='donald', last='ronald mac', - other=dict(category='clown', financial=dict(status='rich'))) - data = [el1, el2, el3] - ret = _utils._filter_list( - data, u'中文', - {'other': { - 'financial': {'status': 'rich'} - }}) - self.assertEqual([el2], ret) - - def test__filter_list_filter(self): - el1 = dict(id=100, name='donald', other='duck') - el2 = dict(id=200, name='donald', other='trump') - data = [el1, el2] - ret = _utils._filter_list(data, 'donald', {'other': 'duck'}) - self.assertEqual([el1], ret) - - def test__filter_list_filter_jmespath(self): - el1 = dict(id=100, name='donald', other='duck') - el2 = dict(id=200, name='donald', other='trump') - data = [el1, el2] - ret = _utils._filter_list(data, 'donald', "[?other == `duck`]") - self.assertEqual([el1], ret) - - def test__filter_list_dict1(self): - el1 = dict(id=100, name='donald', last='duck', - other=dict(category='duck')) - el2 = dict(id=200, name='donald', last='trump', - other=dict(category='human')) - el3 = dict(id=300, name='donald', last='ronald mac', - other=dict(category='clown')) - data = [el1, el2, el3] - ret = _utils._filter_list( - data, 'donald', {'other': {'category': 'clown'}}) - self.assertEqual([el3], ret) - - def test__filter_list_dict2(self): - el1 = dict(id=100, name='donald', last='duck', - other=dict(category='duck', financial=dict(status='poor'))) - el2 = dict(id=200, name='donald', last='trump', - other=dict(category='human', financial=dict(status='rich'))) - el3 = dict(id=300, name='donald', last='ronald mac', - other=dict(category='clown', financial=dict(status='rich'))) - data = [el1, el2, el3] - ret = _utils._filter_list( - data, 'donald', - {'other': { - 'financial': {'status': 'rich'} - }}) - self.assertEqual([el2, el3], ret) - - def test_safe_dict_min_ints(self): - """Test integer comparison""" - data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] - retval = _utils.safe_dict_min('f1', data) - self.assertEqual(1, retval) - - def test_safe_dict_min_strs(self): - """Test integer as strings comparison""" - data = [{'f1': '3'}, {'f1': '2'}, {'f1': '1'}] - retval = _utils.safe_dict_min('f1', data) - self.assertEqual(1, retval) - - def test_safe_dict_min_None(self): - """Test None values""" - data = [{'f1': 3}, {'f1': None}, {'f1': 1}] - retval = _utils.safe_dict_min('f1', data) - self.assertEqual(1, retval) - - def test_safe_dict_min_key_missing(self): - """Test missing key for an entry still works""" - data = [{'f1': 3}, {'x': 2}, {'f1': 1}] - retval = _utils.safe_dict_min('f1', data) - self.assertEqual(1, retval) - - def test_safe_dict_min_key_not_found(self): - """Test key not found in any elements returns None""" - data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] - retval = _utils.safe_dict_min('doesnotexist', data) - self.assertIsNone(retval) - - def test_safe_dict_min_not_int(self): - """Test non-integer key value raises OSCE""" - data = [{'f1': 3}, {'f1': "aaa"}, {'f1': 1}] - with testtools.ExpectedException( - exc.OpenStackCloudException, - "Search for minimum value failed. " - "Value for f1 is not an integer: aaa" - ): - _utils.safe_dict_min('f1', data) - - def test_safe_dict_max_ints(self): - """Test integer comparison""" - data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] - retval = _utils.safe_dict_max('f1', data) - self.assertEqual(3, retval) - - def test_safe_dict_max_strs(self): - """Test integer as strings comparison""" - data = [{'f1': '3'}, {'f1': '2'}, {'f1': '1'}] - retval = _utils.safe_dict_max('f1', data) - self.assertEqual(3, retval) - - def test_safe_dict_max_None(self): - """Test None values""" - data = [{'f1': 3}, {'f1': None}, {'f1': 1}] - retval = _utils.safe_dict_max('f1', data) - self.assertEqual(3, retval) - - def test_safe_dict_max_key_missing(self): - """Test missing key for an entry still works""" - data = [{'f1': 3}, {'x': 2}, {'f1': 1}] - retval = _utils.safe_dict_max('f1', data) - self.assertEqual(3, retval) - - def test_safe_dict_max_key_not_found(self): - """Test key not found in any elements returns None""" - data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] - retval = _utils.safe_dict_max('doesnotexist', data) - self.assertIsNone(retval) - - def test_safe_dict_max_not_int(self): - """Test non-integer key value raises OSCE""" - data = [{'f1': 3}, {'f1': "aaa"}, {'f1': 1}] - with testtools.ExpectedException( - exc.OpenStackCloudException, - "Search for maximum value failed. " - "Value for f1 is not an integer: aaa" - ): - _utils.safe_dict_max('f1', data) - - def test_parse_range_None(self): - self.assertIsNone(_utils.parse_range(None)) - - def test_parse_range_invalid(self): - self.assertIsNone(_utils.parse_range("1024") - self.assertIsInstance(retval, tuple) - self.assertEqual(">", retval[0]) - self.assertEqual(1024, retval[1]) - - def test_parse_range_le(self): - retval = _utils.parse_range("<=1024") - self.assertIsInstance(retval, tuple) - self.assertEqual("<=", retval[0]) - self.assertEqual(1024, retval[1]) - - def test_parse_range_ge(self): - retval = _utils.parse_range(">=1024") - self.assertIsInstance(retval, tuple) - self.assertEqual(">=", retval[0]) - self.assertEqual(1024, retval[1]) - - def test_range_filter_min(self): - retval = _utils.range_filter(RANGE_DATA, "key1", "min") - self.assertIsInstance(retval, list) - self.assertEqual(2, len(retval)) - self.assertEqual(RANGE_DATA[:2], retval) - - def test_range_filter_max(self): - retval = _utils.range_filter(RANGE_DATA, "key1", "max") - self.assertIsInstance(retval, list) - self.assertEqual(2, len(retval)) - self.assertEqual(RANGE_DATA[-2:], retval) - - def test_range_filter_range(self): - retval = _utils.range_filter(RANGE_DATA, "key1", "<3") - self.assertIsInstance(retval, list) - self.assertEqual(4, len(retval)) - self.assertEqual(RANGE_DATA[:4], retval) - - def test_range_filter_exact(self): - retval = _utils.range_filter(RANGE_DATA, "key1", "2") - self.assertIsInstance(retval, list) - self.assertEqual(2, len(retval)) - self.assertEqual(RANGE_DATA[2:4], retval) - - def test_range_filter_invalid_int(self): - with testtools.ExpectedException( - exc.OpenStackCloudException, - "Invalid range value: <1A0" - ): - _utils.range_filter(RANGE_DATA, "key1", "<1A0") - - def test_range_filter_invalid_op(self): - with testtools.ExpectedException( - exc.OpenStackCloudException, - "Invalid range value: <>100" - ): - _utils.range_filter(RANGE_DATA, "key1", "<>100") - - def test_file_segment(self): - file_size = 4200 - content = ''.join(random.SystemRandom().choice( - string.ascii_uppercase + string.digits) - for _ in range(file_size)).encode('latin-1') - self.imagefile = tempfile.NamedTemporaryFile(delete=False) - self.imagefile.write(content) - self.imagefile.close() - - segments = self.cloud._get_file_segments( - endpoint='test_container/test_image', - filename=self.imagefile.name, - file_size=file_size, - segment_size=1000) - self.assertEqual(len(segments), 5) - segment_content = b'' - for (index, (name, segment)) in enumerate(segments.items()): - self.assertEqual( - 'test_container/test_image/{index:0>6}'.format(index=index), - name) - segment_content += segment.read() - self.assertEqual(content, segment_content) - - def test_get_entity_pass_object(self): - obj = mock.Mock(id=uuid4().hex) - self.cloud.use_direct_get = True - self.assertEqual(obj, _utils._get_entity(self.cloud, '', obj, {})) - - def test_get_entity_pass_dict(self): - d = dict(id=uuid4().hex) - self.cloud.use_direct_get = True - self.assertEqual(d, _utils._get_entity(self.cloud, '', d, {})) - - def test_get_entity_no_use_direct_get(self): - # test we are defaulting to the search_ methods - # if the use_direct_get flag is set to False(default). - uuid = uuid4().hex - resource = 'network' - func = 'search_%ss' % resource - filters = {} - with mock.patch.object(self.cloud, func) as search: - _utils._get_entity(self.cloud, resource, uuid, filters) - search.assert_called_once_with(uuid, filters) - - def test_get_entity_no_uuid_like(self): - # test we are defaulting to the search_ methods - # if the name_or_id param is a name(string) but not a uuid. - self.cloud.use_direct_get = True - name = 'name_no_uuid' - resource = 'network' - func = 'search_%ss' % resource - filters = {} - with mock.patch.object(self.cloud, func) as search: - _utils._get_entity(self.cloud, resource, name, filters) - search.assert_called_once_with(name, filters) - - def test_get_entity_pass_uuid(self): - uuid = uuid4().hex - self.cloud.use_direct_get = True - resources = ['flavor', 'image', 'volume', 'network', - 'subnet', 'port', 'floating_ip', 'security_group'] - for r in resources: - f = 'get_%s_by_id' % r - with mock.patch.object(self.cloud, f) as get: - _utils._get_entity(self.cloud, r, uuid, {}) - get.assert_called_once_with(uuid) - - def test_get_entity_pass_search_methods(self): - self.cloud.use_direct_get = True - resources = ['flavor', 'image', 'volume', 'network', - 'subnet', 'port', 'floating_ip', 'security_group'] - filters = {} - name = 'name_no_uuid' - for r in resources: - f = 'search_%ss' % r - with mock.patch.object(self.cloud, f) as search: - _utils._get_entity(self.cloud, r, name, {}) - search.assert_called_once_with(name, filters) - - def test_get_entity_get_and_search(self): - resources = ['flavor', 'image', 'volume', 'network', - 'subnet', 'port', 'floating_ip', 'security_group'] - for r in resources: - self.assertTrue(hasattr(self.cloud, 'get_%s_by_id' % r)) - self.assertTrue(hasattr(self.cloud, 'search_%ss' % r)) diff --git a/shade/tests/unit/test_baremetal_node.py b/shade/tests/unit/test_baremetal_node.py index 42f852a4d..1a25d5f25 100644 --- a/shade/tests/unit/test_baremetal_node.py +++ b/shade/tests/unit/test_baremetal_node.py @@ -871,7 +871,7 @@ class TestBaremetalNode(base.IronicTestCase): ]) self.assertRaisesRegexp( exc.OpenStackCloudException, - '^Baremetal .* to dummy.*/states/provision invalid state$', + '^Baremetal .* to dummy.*/states/provision.*invalid state', self.op_cloud.node_set_provision_state, self.fake_baremetal_node['uuid'], 'dummy') @@ -891,7 +891,7 @@ class TestBaremetalNode(base.IronicTestCase): ]) self.assertRaisesRegexp( exc.OpenStackCloudException, - '^Baremetal .* to dummy.*/states/provision$', + '^Baremetal .* to dummy.*/states/provision', self.op_cloud.node_set_provision_state, self.fake_baremetal_node['uuid'], 'dummy') diff --git a/shade/tests/unit/test_domains.py b/shade/tests/unit/test_domains.py index b1996c1c5..1059b00b7 100644 --- a/shade/tests/unit/test_domains.py +++ b/shade/tests/unit/test_domains.py @@ -15,6 +15,7 @@ import uuid +import openstack.exceptions import testtools from testtools import matchers @@ -203,7 +204,7 @@ class TestDomains(base.RequestsMockTestCase): json=domain_data.json_response, validate=dict(json={'domain': {'enabled': False}}))]) with testtools.ExpectedException( - shade.OpenStackCloudHTTPError, + openstack.exceptions.ConflictException, "Error in updating domain %s" % domain_data.domain_id ): self.op_cloud.delete_domain(domain_data.domain_id) diff --git a/shade/tests/unit/test_project.py b/shade/tests/unit/test_project.py index f836bc0d2..d31064065 100644 --- a/shade/tests/unit/test_project.py +++ b/shade/tests/unit/test_project.py @@ -14,7 +14,6 @@ import testtools from testtools import matchers import shade -import shade._utils from shade.tests.unit import base diff --git a/shade/tests/unit/test_shade.py b/shade/tests/unit/test_shade.py index 19356cccf..d40ff000b 100644 --- a/shade/tests/unit/test_shade.py +++ b/shade/tests/unit/test_shade.py @@ -16,7 +16,6 @@ import uuid import testtools import shade -from shade import _utils from shade import exc from shade.tests import fakes from shade.tests.unit import base @@ -379,40 +378,6 @@ class TestShade(base.RequestsMockTestCase): self.assert_calls() - def test_iterate_timeout_bad_wait(self): - with testtools.ExpectedException( - exc.OpenStackCloudException, - "Wait value must be an int or float value."): - for count in _utils._iterate_timeout( - 1, "test_iterate_timeout_bad_wait", wait="timeishard"): - pass - - @mock.patch('time.sleep') - def test_iterate_timeout_str_wait(self, mock_sleep): - iter = _utils._iterate_timeout( - 10, "test_iterate_timeout_str_wait", wait="1.6") - next(iter) - next(iter) - mock_sleep.assert_called_with(1.6) - - @mock.patch('time.sleep') - def test_iterate_timeout_int_wait(self, mock_sleep): - iter = _utils._iterate_timeout( - 10, "test_iterate_timeout_int_wait", wait=1) - next(iter) - next(iter) - mock_sleep.assert_called_with(1.0) - - @mock.patch('time.sleep') - def test_iterate_timeout_timeout(self, mock_sleep): - message = "timeout test" - with testtools.ExpectedException( - exc.OpenStackCloudTimeout, - message): - for count in _utils._iterate_timeout(0.1, message, wait=1): - pass - mock_sleep.assert_called_with(1.0) - def test__nova_extensions(self): body = [ { diff --git a/shade/tests/unit/test_shade_operator.py b/shade/tests/unit/test_shade_operator.py index 98296a841..0a7bc794e 100644 --- a/shade/tests/unit/test_shade_operator.py +++ b/shade/tests/unit/test_shade_operator.py @@ -83,7 +83,7 @@ class TestShadeOperator(base.RequestsMockTestCase): session_mock.get_endpoint.side_effect = side_effect get_session_mock.return_value = session_mock self.op_cloud.name = 'testcloud' - self.op_cloud.region_name = 'testregion' + self.op_cloud.config.region_name = 'testregion' with testtools.ExpectedException( exc.OpenStackCloudException, "Error getting image endpoint on testcloud:testregion:"