Replace catalog-v3.json with keystoneauth fixture
As a first step to having a reusable fixture, replace our use of catalog-v3.json with a fixture built on top of keystoneauth token fixtures. Make a catalog containing all of the openstack services. There are a bunch of places where we just have some things hardcoded that change with the auto-generation - like bare-metal to baremetal. Those are whatever. There are also places where the test code is requesting an admin url but in the old catalog the admin and non-admin were the same, so we were not catching that this was incorrect. Started to fix this, but it got hairy and it's only a keystone v2 thing which is on lifesupport. Make the admin and public urls the same just for ease of landing this patch. We can go back in and make them different and audit all the v2 keystone codepaths if anyone decides to care about keystone v2. There was a bunch of things hardcoded for block-storage which break when we actually use proxies, which in turn breaks when we try to properly use a better catalog fixture. As a followup, we should stop forcing block_storage_api_version=2 because that's absurd, but this is a big enough patch as it is. Also, volume quota calls were in the image file. Ooops. Change-Id: I308cd159a5b71c94511f86c9d46bdbc589580c6d
This commit is contained in:
parent
658cd3a7f1
commit
5534590861
@ -33,13 +33,6 @@ def _no_pending_volumes(volumes):
|
||||
|
||||
class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
|
||||
@property
|
||||
def _volume_client(self):
|
||||
if 'block-storage' not in self._raw_clients:
|
||||
client = self._get_raw_client('block-storage')
|
||||
self._raw_clients['block-storage'] = client
|
||||
return self._raw_clients['block-storage']
|
||||
|
||||
@_utils.cache_on_arguments(should_cache_fn=_no_pending_volumes)
|
||||
def list_volumes(self, cache=True):
|
||||
"""List all available volumes.
|
||||
@ -56,7 +49,8 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
break
|
||||
if endpoint:
|
||||
try:
|
||||
_list(self._volume_client.get(endpoint))
|
||||
_list(proxy._json_response(
|
||||
self.block_storage.get(endpoint)))
|
||||
except exc.OpenStackCloudURINotFound:
|
||||
# Catch and re-raise here because we are making recursive
|
||||
# calls and we just have context for the log here
|
||||
@ -75,7 +69,8 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
attempts = 5
|
||||
for _ in range(attempts):
|
||||
volumes = []
|
||||
data = self._volume_client.get('/volumes/detail')
|
||||
data = proxy._json_response(
|
||||
self.block_storage.get('/volumes/detail'))
|
||||
if 'volumes_links' not in data:
|
||||
# no pagination needed
|
||||
volumes.extend(data.get('volumes', []))
|
||||
@ -103,9 +98,11 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
:returns: A list of volume ``munch.Munch``.
|
||||
|
||||
"""
|
||||
data = self._volume_client.get(
|
||||
resp = self.block_storage.get(
|
||||
'/types',
|
||||
params=dict(is_public='None'),
|
||||
params=dict(is_public='None'))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message='Error fetching volume_type list')
|
||||
return self._normalize_volume_types(
|
||||
self._get_and_munchify('volume_types', data))
|
||||
@ -141,8 +138,9 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
:param id: ID of the volume.
|
||||
:returns: A volume ``munch.Munch``.
|
||||
"""
|
||||
data = self._volume_client.get(
|
||||
'/volumes/{id}'.format(id=id),
|
||||
resp = self.block_storage.get('/volumes/{id}'.format(id=id))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message="Error getting volume with ID {id}".format(id=id)
|
||||
)
|
||||
volume = self._normalize_volume(
|
||||
@ -214,9 +212,11 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
if 'scheduler_hints' in kwargs:
|
||||
payload['OS-SCH-HNT:scheduler_hints'] = kwargs.pop(
|
||||
'scheduler_hints', None)
|
||||
data = self._volume_client.post(
|
||||
resp = self.block_storage.post(
|
||||
'/volumes',
|
||||
json=dict(payload),
|
||||
json=dict(payload))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message='Error in creating volume')
|
||||
volume = self._get_and_munchify('volume', data)
|
||||
self.list_volumes.invalidate(self)
|
||||
@ -254,9 +254,11 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
raise exc.OpenStackCloudException(
|
||||
"Volume %s not found." % name_or_id)
|
||||
|
||||
data = self._volume_client.put(
|
||||
resp = self.block_storage.put(
|
||||
'/volumes/{volume_id}'.format(volume_id=volume.id),
|
||||
json=dict({'volume': kwargs}),
|
||||
json=dict({'volume': kwargs}))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message='Error updating volume')
|
||||
|
||||
self.list_volumes.invalidate(self)
|
||||
@ -281,9 +283,11 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
"Volume {name_or_id} does not exist".format(
|
||||
name_or_id=name_or_id))
|
||||
|
||||
self._volume_client.post(
|
||||
resp = self.block_storage.post(
|
||||
'volumes/{id}/action'.format(id=volume['id']),
|
||||
json={'os-set_bootable': {'bootable': bootable}},
|
||||
json={'os-set_bootable': {'bootable': bootable}})
|
||||
proxy._json_response(
|
||||
resp,
|
||||
error_message="Error setting bootable on volume {volume}".format(
|
||||
volume=volume['id'])
|
||||
)
|
||||
@ -315,12 +319,12 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
with _utils.shade_exceptions("Error in deleting volume"):
|
||||
try:
|
||||
if force:
|
||||
self._volume_client.post(
|
||||
proxy._json_response(self.block_storage.post(
|
||||
'volumes/{id}/action'.format(id=volume['id']),
|
||||
json={'os-force_delete': None})
|
||||
json={'os-force_delete': None}))
|
||||
else:
|
||||
self._volume_client.delete(
|
||||
'volumes/{id}'.format(id=volume['id']))
|
||||
proxy._json_response(self.block_storage.delete(
|
||||
'volumes/{id}'.format(id=volume['id'])))
|
||||
except exc.OpenStackCloudURINotFound:
|
||||
self.log.debug(
|
||||
"Volume {id} not found when deleting. Ignoring.".format(
|
||||
@ -368,7 +372,8 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
error_msg = "{msg} for the project: {project} ".format(
|
||||
msg=error_msg, project=name_or_id)
|
||||
|
||||
data = self._volume_client.get('/limits', params=params)
|
||||
data = proxy._json_response(
|
||||
self.block_storage.get('/limits', params=params))
|
||||
limits = self._get_and_munchify('limits', data)
|
||||
return limits
|
||||
|
||||
@ -516,12 +521,12 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
description = kwargs.pop('description',
|
||||
kwargs.pop('display_description', None))
|
||||
if name:
|
||||
if self._is_client_version('volume', 2):
|
||||
if self.block_storage._version_matches(2):
|
||||
kwargs['name'] = name
|
||||
else:
|
||||
kwargs['display_name'] = name
|
||||
if description:
|
||||
if self._is_client_version('volume', 2):
|
||||
if self.block_storage._version_matches(2):
|
||||
kwargs['description'] = description
|
||||
else:
|
||||
kwargs['display_description'] = description
|
||||
@ -553,9 +558,11 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
kwargs = self._get_volume_kwargs(kwargs)
|
||||
payload = {'volume_id': volume_id, 'force': force}
|
||||
payload.update(kwargs)
|
||||
data = self._volume_client.post(
|
||||
resp = self.block_storage.post(
|
||||
'/snapshots',
|
||||
json=dict(snapshot=payload),
|
||||
json=dict(snapshot=payload))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message="Error creating snapshot of volume "
|
||||
"{volume_id}".format(volume_id=volume_id))
|
||||
snapshot = self._get_and_munchify('snapshot', data)
|
||||
@ -588,8 +595,10 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
param: snapshot_id: ID of the volume snapshot.
|
||||
|
||||
"""
|
||||
data = self._volume_client.get(
|
||||
'/snapshots/{snapshot_id}'.format(snapshot_id=snapshot_id),
|
||||
resp = self.block_storage.get(
|
||||
'/snapshots/{snapshot_id}'.format(snapshot_id=snapshot_id))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message="Error getting snapshot "
|
||||
"{snapshot_id}".format(snapshot_id=snapshot_id))
|
||||
return self._normalize_volume(
|
||||
@ -647,8 +656,10 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
'force': force,
|
||||
}
|
||||
|
||||
data = self._volume_client.post(
|
||||
'/backups', json=dict(backup=payload),
|
||||
resp = self.block_storage.post(
|
||||
'/backups', json=dict(backup=payload))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message="Error creating backup of volume "
|
||||
"{volume_id}".format(volume_id=volume_id))
|
||||
backup = self._get_and_munchify('backup', data)
|
||||
@ -686,9 +697,11 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
|
||||
"""
|
||||
endpoint = '/snapshots/detail' if detailed else '/snapshots'
|
||||
data = self._volume_client.get(
|
||||
resp = self.block_storage.get(
|
||||
endpoint,
|
||||
params=search_opts,
|
||||
params=search_opts)
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message="Error getting a list of snapshots")
|
||||
return self._get_and_munchify('snapshots', data)
|
||||
|
||||
@ -710,8 +723,10 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
:returns: A list of volume backups ``munch.Munch``.
|
||||
"""
|
||||
endpoint = '/backups/detail' if detailed else '/backups'
|
||||
data = self._volume_client.get(
|
||||
endpoint, params=search_opts,
|
||||
resp = self.block_storage.get(
|
||||
endpoint, params=search_opts)
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message="Error getting a list of backups")
|
||||
return self._get_and_munchify('backups', data)
|
||||
|
||||
@ -736,16 +751,15 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
|
||||
msg = "Error in deleting volume backup"
|
||||
if force:
|
||||
self._volume_client.post(
|
||||
resp = self.block_storage.post(
|
||||
'/backups/{backup_id}/action'.format(
|
||||
backup_id=volume_backup['id']),
|
||||
json={'os-force_delete': None},
|
||||
error_message=msg)
|
||||
json={'os-force_delete': None})
|
||||
else:
|
||||
self._volume_client.delete(
|
||||
resp = self.block_storage.delete(
|
||||
'/backups/{backup_id}'.format(
|
||||
backup_id=volume_backup['id']),
|
||||
error_message=msg)
|
||||
backup_id=volume_backup['id']))
|
||||
proxy._json_response(resp, error_message=msg)
|
||||
if wait:
|
||||
msg = "Timeout waiting for the volume backup to be deleted."
|
||||
for count in utils.iterate_timeout(timeout, msg):
|
||||
@ -772,9 +786,11 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
if not volumesnapshot:
|
||||
return False
|
||||
|
||||
self._volume_client.delete(
|
||||
resp = self.block_storage.delete(
|
||||
'/snapshots/{snapshot_id}'.format(
|
||||
snapshot_id=volumesnapshot['id']),
|
||||
snapshot_id=volumesnapshot['id']))
|
||||
proxy._json_response(
|
||||
resp,
|
||||
error_message="Error in deleting volume snapshot")
|
||||
|
||||
if wait:
|
||||
@ -818,8 +834,10 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
raise exc.OpenStackCloudException(
|
||||
"VolumeType not found: %s" % name_or_id)
|
||||
|
||||
data = self._volume_client.get(
|
||||
'/types/{id}/os-volume-type-access'.format(id=volume_type.id),
|
||||
resp = self.block_storage.get(
|
||||
'/types/{id}/os-volume-type-access'.format(id=volume_type.id))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message="Unable to get volume type access"
|
||||
" {name}".format(name=name_or_id))
|
||||
return self._normalize_volume_type_accesses(
|
||||
@ -839,14 +857,15 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
if not volume_type:
|
||||
raise exc.OpenStackCloudException(
|
||||
"VolumeType not found: %s" % name_or_id)
|
||||
with _utils.shade_exceptions():
|
||||
payload = {'project': project_id}
|
||||
self._volume_client.post(
|
||||
'/types/{id}/action'.format(id=volume_type.id),
|
||||
json=dict(addProjectAccess=payload),
|
||||
error_message="Unable to authorize {project} "
|
||||
"to use volume type {name}".format(
|
||||
name=name_or_id, project=project_id))
|
||||
payload = {'project': project_id}
|
||||
resp = self.block_storage.post(
|
||||
'/types/{id}/action'.format(id=volume_type.id),
|
||||
json=dict(addProjectAccess=payload))
|
||||
proxy._json_response(
|
||||
resp,
|
||||
error_message="Unable to authorize {project} "
|
||||
"to use volume type {name}".format(
|
||||
name=name_or_id, project=project_id))
|
||||
|
||||
def remove_volume_type_access(self, name_or_id, project_id):
|
||||
"""Revoke access on a volume_type to a project.
|
||||
@ -860,11 +879,72 @@ class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
if not volume_type:
|
||||
raise exc.OpenStackCloudException(
|
||||
"VolumeType not found: %s" % name_or_id)
|
||||
with _utils.shade_exceptions():
|
||||
payload = {'project': project_id}
|
||||
self._volume_client.post(
|
||||
'/types/{id}/action'.format(id=volume_type.id),
|
||||
json=dict(removeProjectAccess=payload),
|
||||
error_message="Unable to revoke {project} "
|
||||
"to use volume type {name}".format(
|
||||
name=name_or_id, project=project_id))
|
||||
payload = {'project': project_id}
|
||||
resp = self.block_storage.post(
|
||||
'/types/{id}/action'.format(id=volume_type.id),
|
||||
json=dict(removeProjectAccess=payload))
|
||||
proxy._json_response(
|
||||
resp,
|
||||
error_message="Unable to revoke {project} "
|
||||
"to use volume type {name}".format(
|
||||
name=name_or_id, project=project_id))
|
||||
|
||||
def set_volume_quotas(self, name_or_id, **kwargs):
|
||||
""" Set a volume quota in a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:param kwargs: key/value pairs of quota name and quota value
|
||||
|
||||
:raises: OpenStackCloudException if the resource to set the
|
||||
quota does not exist.
|
||||
"""
|
||||
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
kwargs['tenant_id'] = proj.id
|
||||
resp = self.block_storage.put(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id),
|
||||
json={'quota_set': kwargs})
|
||||
proxy._json_response(
|
||||
resp,
|
||||
error_message="No valid quota or resource")
|
||||
|
||||
def get_volume_quotas(self, name_or_id):
|
||||
""" Get volume quotas for a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:raises: OpenStackCloudException if it's not a valid project
|
||||
|
||||
:returns: Munch object with the quotas
|
||||
"""
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
resp = self.block_storage.get(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id))
|
||||
data = proxy._json_response(
|
||||
resp,
|
||||
error_message="cinder client call failed")
|
||||
return self._get_and_munchify('quota_set', data)
|
||||
|
||||
def delete_volume_quotas(self, name_or_id):
|
||||
""" Delete volume quotas for a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:raises: OpenStackCloudException if it's not a valid project or the
|
||||
cinder client call failed
|
||||
|
||||
:returns: dict with the quotas
|
||||
"""
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
resp = self.block_storage.delete(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id))
|
||||
return proxy._json_response(
|
||||
resp,
|
||||
error_message="cinder client call failed")
|
||||
|
@ -328,57 +328,3 @@ class ImageCloudMixin(_normalize.Normalizer):
|
||||
image = image or name_or_id
|
||||
return self.image.update_image_properties(
|
||||
image=image, meta=meta, **properties)
|
||||
|
||||
def set_volume_quotas(self, name_or_id, **kwargs):
|
||||
""" Set a volume quota in a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:param kwargs: key/value pairs of quota name and quota value
|
||||
|
||||
:raises: OpenStackCloudException if the resource to set the
|
||||
quota does not exist.
|
||||
"""
|
||||
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
kwargs['tenant_id'] = proj.id
|
||||
self._volume_client.put(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id),
|
||||
json={'quota_set': kwargs},
|
||||
error_message="No valid quota or resource")
|
||||
|
||||
def get_volume_quotas(self, name_or_id):
|
||||
""" Get volume quotas for a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:raises: OpenStackCloudException if it's not a valid project
|
||||
|
||||
:returns: Munch object with the quotas
|
||||
"""
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
data = self._volume_client.get(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id),
|
||||
error_message="cinder client call failed")
|
||||
return self._get_and_munchify('quota_set', data)
|
||||
|
||||
def delete_volume_quotas(self, name_or_id):
|
||||
""" Delete volume quotas for a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:raises: OpenStackCloudException if it's not a valid project or the
|
||||
cinder client call failed
|
||||
|
||||
:returns: dict with the quotas
|
||||
"""
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
return self._volume_client.delete(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id),
|
||||
error_message="cinder client call failed")
|
||||
|
@ -453,7 +453,8 @@ class _OpenStackCloudMixin(object):
|
||||
region_name=self.config.get_region_name(service_type))
|
||||
|
||||
def _is_client_version(self, client, version):
|
||||
client_name = '_{client}_client'.format(client=client)
|
||||
client_name = '_{client}_client'.format(
|
||||
client=client.replace('-', '_'))
|
||||
client = getattr(self, client_name)
|
||||
return client._version_matches(version)
|
||||
|
||||
|
0
openstack/fixture/__init__.py
Normal file
0
openstack/fixture/__init__.py
Normal file
107
openstack/fixture/connection.py
Normal file
107
openstack/fixture/connection.py
Normal file
@ -0,0 +1,107 @@
|
||||
# Copyright 2019 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
import fixtures
|
||||
from keystoneauth1.fixture import v2
|
||||
from keystoneauth1.fixture import v3
|
||||
import os_service_types
|
||||
|
||||
_service_type_manager = os_service_types.ServiceTypes()
|
||||
|
||||
_SUBURL_TEMPLATES = {
|
||||
'public': 'https://example.com/{service_type}',
|
||||
'internal': 'https://internal.example.com/{service_type}',
|
||||
'admin': 'https://example.com/{service_type}',
|
||||
}
|
||||
_ENDPOINT_TEMPLATES = {
|
||||
'public': 'https://{service_type}.example.com',
|
||||
'internal': 'https://internal.{service_type}.example.com',
|
||||
'admin': 'https://{service_type}.example.com',
|
||||
}
|
||||
|
||||
|
||||
class ConnectionFixture(fixtures.Fixture):
|
||||
|
||||
_suffixes = {
|
||||
'baremetal': '/',
|
||||
'block-storage': '/{project_id}',
|
||||
'compute': '/v2.1/',
|
||||
'container-infrastructure-management': '/v1',
|
||||
'object-store': '/v1/{project_id}',
|
||||
'orchestration': '/v1/{project_id}',
|
||||
'volumev2': '/v2/{project_id}',
|
||||
'volumev3': '/v3/{project_id}',
|
||||
}
|
||||
|
||||
def __init__(self, suburl=False, project_id=None, *args, **kwargs):
|
||||
super(ConnectionFixture, self).__init__(*args, **kwargs)
|
||||
self._endpoint_templates = _ENDPOINT_TEMPLATES
|
||||
if suburl:
|
||||
self.use_suburl()
|
||||
self.project_id = project_id or uuid.uuid4().hex.replace('-', '')
|
||||
self.build_tokens()
|
||||
|
||||
def use_suburl(self):
|
||||
self._endpoint_templates = _SUBURL_TEMPLATES
|
||||
|
||||
def _get_endpoint_templates(self, service_type, alias=None, v2=False):
|
||||
templates = {}
|
||||
for k, v in self._endpoint_templates.items():
|
||||
suffix = self._suffixes.get(
|
||||
alias, self._suffixes.get(service_type, ''))
|
||||
# For a keystone v2 catalog, we want to list the
|
||||
# versioned endpoint in the catalog, because that's
|
||||
# more likely how those were deployed.
|
||||
if v2:
|
||||
suffix = '/v2.0'
|
||||
templates[k] = (v + suffix).format(
|
||||
service_type=service_type,
|
||||
project_id=self.project_id,
|
||||
)
|
||||
return templates
|
||||
|
||||
def _setUp(self):
|
||||
pass
|
||||
|
||||
def clear_tokens(self):
|
||||
self.v2_token = v2.Token(tenant_id=self.project_id)
|
||||
self.v3_token = v3.Token(project_id=self.project_id)
|
||||
|
||||
def build_tokens(self):
|
||||
self.clear_tokens()
|
||||
for service in _service_type_manager.services:
|
||||
service_type = service['service_type']
|
||||
if service_type == 'ec2-api':
|
||||
continue
|
||||
service_name = service['project']
|
||||
ets = self._get_endpoint_templates(service_type)
|
||||
v3_svc = self.v3_token.add_service(
|
||||
service_type, name=service_name)
|
||||
v2_svc = self.v2_token.add_service(
|
||||
service_type, name=service_name)
|
||||
v3_svc.add_standard_endpoints(region='RegionOne', **ets)
|
||||
if service_type == 'identity':
|
||||
ets = self._get_endpoint_templates(service_type, v2=True)
|
||||
v2_svc.add_endpoint(region='RegionOne', **ets)
|
||||
for alias in service.get('aliases', []):
|
||||
ets = self._get_endpoint_templates(service_type, alias=alias)
|
||||
v3_svc = self.v3_token.add_service(alias, name=service_name)
|
||||
v2_svc = self.v2_token.add_service(alias, name=service_name)
|
||||
v3_svc.add_standard_endpoints(region='RegionOne', **ets)
|
||||
v2_svc.add_endpoint(region='RegionOne', **ets)
|
||||
|
||||
def _cleanup(self):
|
||||
pass
|
@ -38,13 +38,13 @@ class TestVolumeType(base.BaseFunctionalTest):
|
||||
"name": 'test-volume-type',
|
||||
"description": None,
|
||||
"os-volume-type-access:is_public": False}
|
||||
self.operator_cloud._volume_client.post(
|
||||
self.operator_cloud.block_storage.post(
|
||||
'/types', json={'volume_type': volume_type})
|
||||
|
||||
def tearDown(self):
|
||||
ret = self.operator_cloud.get_volume_type('test-volume-type')
|
||||
if ret.get('id'):
|
||||
self.operator_cloud._volume_client.delete(
|
||||
self.operator_cloud.block_storage.delete(
|
||||
'/types/{volume_type_id}'.format(volume_type_id=ret.id))
|
||||
super(TestVolumeType, self).tearDown()
|
||||
|
||||
|
@ -29,6 +29,8 @@ import tempfile
|
||||
|
||||
import openstack.cloud
|
||||
import openstack.connection
|
||||
from openstack.tests import fakes
|
||||
from openstack.fixture import connection as os_fixture
|
||||
from openstack.tests import base
|
||||
|
||||
|
||||
@ -101,6 +103,8 @@ class TestCase(base.TestCase):
|
||||
'time.sleep',
|
||||
_nosleep))
|
||||
self.fixtures_directory = 'openstack/tests/unit/fixtures'
|
||||
self.os_fixture = self.useFixture(
|
||||
os_fixture.ConnectionFixture(project_id=fakes.PROJECT_ID))
|
||||
|
||||
# Isolate openstack.config from test environment
|
||||
config = tempfile.NamedTemporaryFile(delete=False)
|
||||
@ -422,45 +426,42 @@ class TestCase(base.TestCase):
|
||||
|
||||
def get_keystone_v3_token(
|
||||
self,
|
||||
catalog='catalog-v3.json',
|
||||
project_name='admin',
|
||||
):
|
||||
catalog_file = os.path.join(self.fixtures_directory, catalog)
|
||||
with open(catalog_file, 'r') as tokens_file:
|
||||
return dict(
|
||||
method='POST',
|
||||
uri='https://identity.example.com/v3/auth/tokens',
|
||||
headers={
|
||||
'X-Subject-Token': self.getUniqueString('KeystoneToken')
|
||||
},
|
||||
text=tokens_file.read(),
|
||||
validate=dict(json={
|
||||
'auth': {
|
||||
'identity': {
|
||||
'methods': ['password'],
|
||||
'password': {
|
||||
'user': {
|
||||
'domain': {
|
||||
'name': 'default',
|
||||
},
|
||||
'name': 'admin',
|
||||
'password': 'password'
|
||||
}
|
||||
}
|
||||
},
|
||||
'scope': {
|
||||
'project': {
|
||||
return dict(
|
||||
method='POST',
|
||||
uri='https://identity.example.com/v3/auth/tokens',
|
||||
headers={
|
||||
'X-Subject-Token': self.getUniqueString('KeystoneToken')
|
||||
},
|
||||
json=self.os_fixture.v3_token,
|
||||
validate=dict(json={
|
||||
'auth': {
|
||||
'identity': {
|
||||
'methods': ['password'],
|
||||
'password': {
|
||||
'user': {
|
||||
'domain': {
|
||||
'name': 'default'
|
||||
'name': 'default',
|
||||
},
|
||||
'name': project_name
|
||||
'name': 'admin',
|
||||
'password': 'password'
|
||||
}
|
||||
}
|
||||
},
|
||||
'scope': {
|
||||
'project': {
|
||||
'domain': {
|
||||
'name': 'default'
|
||||
},
|
||||
'name': project_name
|
||||
}
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
}),
|
||||
)
|
||||
|
||||
def get_keystone_v3_discovery(self):
|
||||
def get_keystone_discovery(self):
|
||||
with open(self.discovery_json, 'r') as discovery_file:
|
||||
return dict(
|
||||
method='GET',
|
||||
@ -468,13 +469,13 @@ class TestCase(base.TestCase):
|
||||
text=discovery_file.read(),
|
||||
)
|
||||
|
||||
def use_keystone_v3(self, catalog='catalog-v3.json'):
|
||||
def use_keystone_v3(self):
|
||||
self.adapter = self.useFixture(rm_fixture.Fixture())
|
||||
self.calls = []
|
||||
self._uri_registry.clear()
|
||||
self.__do_register_uris([
|
||||
self.get_keystone_v3_discovery(),
|
||||
self.get_keystone_v3_token(catalog),
|
||||
self.get_keystone_discovery(),
|
||||
self.get_keystone_v3_token(),
|
||||
])
|
||||
self._make_test_cloud(identity_api_version='3')
|
||||
|
||||
@ -483,18 +484,13 @@ class TestCase(base.TestCase):
|
||||
self.calls = []
|
||||
self._uri_registry.clear()
|
||||
|
||||
with open(self.discovery_json, 'r') as discovery_file, \
|
||||
open(os.path.join(
|
||||
self.fixtures_directory,
|
||||
'catalog-v2.json'), 'r') as tokens_file:
|
||||
self.__do_register_uris([
|
||||
dict(method='GET', uri='https://identity.example.com/',
|
||||
text=discovery_file.read()),
|
||||
dict(method='POST',
|
||||
uri='https://identity.example.com/v2.0/tokens',
|
||||
text=tokens_file.read()
|
||||
),
|
||||
])
|
||||
self.__do_register_uris([
|
||||
self.get_keystone_discovery(),
|
||||
dict(method='POST',
|
||||
uri='https://identity.example.com/v2.0/tokens',
|
||||
json=self.os_fixture.v2_token,
|
||||
),
|
||||
])
|
||||
|
||||
self._make_test_cloud(cloud_name='_test_cloud_v2_',
|
||||
identity_api_version='2.0')
|
||||
@ -509,7 +505,7 @@ class TestCase(base.TestCase):
|
||||
def get_cinder_discovery_mock_dict(
|
||||
self,
|
||||
block_storage_version_json='block-storage-version.json',
|
||||
block_storage_discovery_url='https://volume.example.com/'):
|
||||
block_storage_discovery_url='https://block-storage.example.com/'):
|
||||
discovery_fixture = os.path.join(
|
||||
self.fixtures_directory, block_storage_version_json)
|
||||
return dict(method='GET', uri=block_storage_discovery_url,
|
||||
@ -551,7 +547,7 @@ class TestCase(base.TestCase):
|
||||
def get_ironic_discovery_mock_dict(self):
|
||||
discovery_fixture = os.path.join(
|
||||
self.fixtures_directory, "baremetal.json")
|
||||
return dict(method='GET', uri="https://bare-metal.example.com/",
|
||||
return dict(method='GET', uri="https://baremetal.example.com/",
|
||||
text=open(discovery_fixture, 'r').read())
|
||||
|
||||
def get_senlin_discovery_mock_dict(self):
|
||||
@ -580,6 +576,10 @@ class TestCase(base.TestCase):
|
||||
self.get_glance_discovery_mock_dict(
|
||||
image_version_json, image_discovery_url)])
|
||||
|
||||
def use_cinder(self):
|
||||
self.__do_register_uris([
|
||||
self.get_cinder_discovery_mock_dict()])
|
||||
|
||||
def use_placement(self):
|
||||
self.__do_register_uris([
|
||||
self.get_placement_discovery_mock_dict()])
|
||||
|
@ -117,7 +117,7 @@ class TestMemoryCache(base.TestCase):
|
||||
for p in project_list]}
|
||||
|
||||
mock_uri = self.get_mock_url(
|
||||
service_type='identity', interface='admin', resource='projects',
|
||||
service_type='identity', resource='projects',
|
||||
base_url_append='v3')
|
||||
|
||||
self.register_uris([
|
||||
@ -206,6 +206,7 @@ class TestMemoryCache(base.TestCase):
|
||||
'Volume 2 Display Name')
|
||||
fake_volume2_dict = meta.obj_to_munch(fake_volume2)
|
||||
self.register_uris([
|
||||
self.get_cinder_discovery_mock_dict(),
|
||||
dict(method='GET',
|
||||
uri=self.get_mock_url(
|
||||
'volumev2', 'public', append=['volumes', 'detail']),
|
||||
@ -236,6 +237,7 @@ class TestMemoryCache(base.TestCase):
|
||||
'Volume 2 Display Name')
|
||||
fake_volume2_dict = meta.obj_to_munch(fake_volume2)
|
||||
self.register_uris([
|
||||
self.get_cinder_discovery_mock_dict(),
|
||||
dict(method='GET',
|
||||
uri=self.get_mock_url(
|
||||
'volumev2', 'public', append=['volumes', 'detail']),
|
||||
@ -266,6 +268,7 @@ class TestMemoryCache(base.TestCase):
|
||||
fake_vol_avail['status'] = 'deleting'
|
||||
|
||||
self.register_uris([
|
||||
self.get_cinder_discovery_mock_dict(),
|
||||
dict(method='GET',
|
||||
uri=self.get_mock_url(
|
||||
'volumev2', 'public', append=['volumes', 'detail']),
|
||||
@ -323,7 +326,6 @@ class TestMemoryCache(base.TestCase):
|
||||
dict(method='GET',
|
||||
uri=self.get_mock_url(
|
||||
service_type='identity',
|
||||
interface='admin',
|
||||
resource='users',
|
||||
base_url_append='v3'),
|
||||
status_code=200,
|
||||
|
@ -51,16 +51,24 @@ cluster_template_obj = munch.Munch(
|
||||
|
||||
class TestClusterTemplates(base.TestCase):
|
||||
|
||||
def get_mock_url(
|
||||
self,
|
||||
service_type='container-infrastructure-management',
|
||||
base_url_append=None, append=None, resource=None):
|
||||
return super(TestClusterTemplates, self).get_mock_url(
|
||||
service_type=service_type, resource=resource,
|
||||
append=append, base_url_append=base_url_append)
|
||||
|
||||
def test_list_cluster_templates_without_detail(self):
|
||||
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/baymodels/detail',
|
||||
uri=self.get_mock_url(resource='baymodels/detail'),
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()]))])
|
||||
cluster_templates_list = self.cloud.list_cluster_templates()
|
||||
self.assertEqual(
|
||||
@ -72,11 +80,11 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/baymodels/detail',
|
||||
uri=self.get_mock_url(resource='baymodels/detail'),
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()]))])
|
||||
cluster_templates_list = self.cloud.list_cluster_templates(detail=True)
|
||||
self.assertEqual(
|
||||
@ -88,11 +96,11 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/baymodels/detail',
|
||||
uri=self.get_mock_url(resource='baymodels/detail'),
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()]))])
|
||||
|
||||
cluster_templates = self.cloud.search_cluster_templates(
|
||||
@ -107,11 +115,11 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/baymodels/detail',
|
||||
uri=self.get_mock_url(resource='baymodels/detail'),
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()]))])
|
||||
|
||||
cluster_templates = self.cloud.search_cluster_templates(
|
||||
@ -124,11 +132,11 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/baymodels/detail',
|
||||
uri=self.get_mock_url(resource='baymodels/detail'),
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()]))])
|
||||
|
||||
r = self.cloud.get_cluster_template('fake-cluster-template')
|
||||
@ -141,11 +149,11 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/baymodels/detail',
|
||||
uri=self.get_mock_url(resource='baymodels/detail'),
|
||||
json=dict(baymodels=[]))])
|
||||
r = self.cloud.get_cluster_template('doesNotExist')
|
||||
self.assertIsNone(r)
|
||||
@ -155,11 +163,11 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='POST',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='POST',
|
||||
uri='https://container-infra.example.com/v1/baymodels',
|
||||
uri=self.get_mock_url(resource='baymodels'),
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()]),
|
||||
validate=dict(json={
|
||||
'coe': 'fake-coe',
|
||||
@ -177,11 +185,11 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='POST',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='POST',
|
||||
uri='https://container-infra.example.com/v1/baymodels',
|
||||
uri=self.get_mock_url(resource='baymodels'),
|
||||
status_code=403)])
|
||||
# TODO(mordred) requests here doens't give us a great story
|
||||
# for matching the old error message text. Investigate plumbing
|
||||
@ -196,37 +204,35 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.assert_calls()
|
||||
|
||||
def test_delete_cluster_template(self):
|
||||
uri = 'https://container-infra.example.com/v1/baymodels/fake-uuid'
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/baymodels/detail',
|
||||
uri=self.get_mock_url(resource='baymodels/detail'),
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()])),
|
||||
dict(
|
||||
method='DELETE',
|
||||
uri=uri),
|
||||
uri=self.get_mock_url(resource='baymodels/fake-uuid')),
|
||||
])
|
||||
self.cloud.delete_cluster_template('fake-uuid')
|
||||
self.assert_calls()
|
||||
|
||||
def test_update_cluster_template(self):
|
||||
uri = 'https://container-infra.example.com/v1/baymodels/fake-uuid'
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
status_code=404),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/baymodels/detail',
|
||||
uri=self.get_mock_url(resource='baymodels/detail'),
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()])),
|
||||
dict(
|
||||
method='PATCH',
|
||||
uri=uri,
|
||||
uri=self.get_mock_url(resource='baymodels/fake-uuid'),
|
||||
status_code=200,
|
||||
validate=dict(
|
||||
json=[{
|
||||
@ -237,7 +243,7 @@ class TestClusterTemplates(base.TestCase):
|
||||
)),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
# This json value is not meaningful to the test - it just has
|
||||
# to be valid.
|
||||
json=dict(baymodels=[cluster_template_obj.toDict()])),
|
||||
@ -251,7 +257,7 @@ class TestClusterTemplates(base.TestCase):
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clustertemplates',
|
||||
uri=self.get_mock_url(resource='clustertemplates'),
|
||||
json=dict(clustertemplates=[cluster_template_obj.toDict()]))])
|
||||
|
||||
r = self.cloud.get_coe_cluster_template('fake-cluster-template')
|
||||
|
@ -39,11 +39,19 @@ coe_cluster_obj = munch.Munch(
|
||||
|
||||
class TestCOEClusters(base.TestCase):
|
||||
|
||||
def get_mock_url(
|
||||
self,
|
||||
service_type='container-infrastructure-management',
|
||||
base_url_append=None, append=None, resource=None):
|
||||
return super(TestCOEClusters, self).get_mock_url(
|
||||
service_type=service_type, resource=resource,
|
||||
append=append, base_url_append=base_url_append)
|
||||
|
||||
def test_list_coe_clusters(self):
|
||||
|
||||
self.register_uris([dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
json=dict(clusters=[coe_cluster_obj.toDict()]))])
|
||||
cluster_list = self.cloud.list_coe_clusters()
|
||||
self.assertEqual(
|
||||
@ -54,7 +62,7 @@ class TestCOEClusters(base.TestCase):
|
||||
def test_create_coe_cluster(self):
|
||||
self.register_uris([dict(
|
||||
method='POST',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
json=dict(baymodels=[coe_cluster_obj.toDict()]),
|
||||
validate=dict(json={
|
||||
'name': 'k8s',
|
||||
@ -72,7 +80,7 @@ class TestCOEClusters(base.TestCase):
|
||||
def test_search_coe_cluster_by_name(self):
|
||||
self.register_uris([dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
json=dict(clusters=[coe_cluster_obj.toDict()]))])
|
||||
|
||||
coe_clusters = self.cloud.search_coe_clusters(
|
||||
@ -86,7 +94,7 @@ class TestCOEClusters(base.TestCase):
|
||||
|
||||
self.register_uris([dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
json=dict(clusters=[coe_cluster_obj.toDict()]))])
|
||||
|
||||
coe_clusters = self.cloud.search_coe_clusters(
|
||||
@ -98,7 +106,7 @@ class TestCOEClusters(base.TestCase):
|
||||
def test_get_coe_cluster(self):
|
||||
self.register_uris([dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
json=dict(clusters=[coe_cluster_obj.toDict()]))])
|
||||
|
||||
r = self.cloud.get_coe_cluster(coe_cluster_obj.name)
|
||||
@ -110,38 +118,38 @@ class TestCOEClusters(base.TestCase):
|
||||
def test_get_coe_cluster_not_found(self):
|
||||
self.register_uris([dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
json=dict(clusters=[]))])
|
||||
r = self.cloud.get_coe_cluster('doesNotExist')
|
||||
self.assertIsNone(r)
|
||||
self.assert_calls()
|
||||
|
||||
def test_delete_coe_cluster(self):
|
||||
uri = ('https://container-infra.example.com/v1/clusters/%s' %
|
||||
coe_cluster_obj.uuid)
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
json=dict(clusters=[coe_cluster_obj.toDict()])),
|
||||
dict(
|
||||
method='DELETE',
|
||||
uri=uri),
|
||||
uri=self.get_mock_url(
|
||||
resource='clusters',
|
||||
append=[coe_cluster_obj.uuid])),
|
||||
])
|
||||
self.cloud.delete_coe_cluster(coe_cluster_obj.uuid)
|
||||
self.assert_calls()
|
||||
|
||||
def test_update_coe_cluster(self):
|
||||
uri = ('https://container-infra.example.com/v1/clusters/%s' %
|
||||
coe_cluster_obj.uuid)
|
||||
self.register_uris([
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
json=dict(clusters=[coe_cluster_obj.toDict()])),
|
||||
dict(
|
||||
method='PATCH',
|
||||
uri=uri,
|
||||
uri=self.get_mock_url(
|
||||
resource='clusters',
|
||||
append=[coe_cluster_obj.uuid]),
|
||||
status_code=200,
|
||||
validate=dict(
|
||||
json=[{
|
||||
@ -152,7 +160,7 @@ class TestCOEClusters(base.TestCase):
|
||||
)),
|
||||
dict(
|
||||
method='GET',
|
||||
uri='https://container-infra.example.com/v1/clusters',
|
||||
uri=self.get_mock_url(resource='clusters'),
|
||||
# This json value is not meaningful to the test - it just has
|
||||
# to be valid.
|
||||
json=dict(clusters=[coe_cluster_obj.toDict()])),
|
||||
|
@ -34,11 +34,20 @@ coe_cluster_signed_cert_obj = munch.Munch(
|
||||
|
||||
class TestCOEClusters(base.TestCase):
|
||||
|
||||
def get_mock_url(
|
||||
self,
|
||||
service_type='container-infrastructure-management',
|
||||
base_url_append=None, append=None, resource=None):
|
||||
return super(TestCOEClusters, self).get_mock_url(
|
||||
service_type=service_type, resource=resource,
|
||||
append=append, base_url_append=base_url_append)
|
||||
|
||||
def test_get_coe_cluster_certificate(self):
|
||||
self.register_uris([dict(
|
||||
method='GET',
|
||||
uri=('https://container-infra.example.com/v1/certificates/%s' %
|
||||
coe_cluster_ca_obj.cluster_uuid),
|
||||
uri=self.get_mock_url(
|
||||
resource='certificates',
|
||||
append=[coe_cluster_ca_obj.cluster_uuid]),
|
||||
json=coe_cluster_ca_obj)
|
||||
])
|
||||
ca_cert = self.cloud.get_coe_cluster_certificate(
|
||||
@ -51,7 +60,7 @@ class TestCOEClusters(base.TestCase):
|
||||
def test_sign_coe_cluster_certificate(self):
|
||||
self.register_uris([dict(
|
||||
method='POST',
|
||||
uri='https://container-infra.example.com/v1/certificates',
|
||||
uri=self.get_mock_url(resource='certificates'),
|
||||
json={"cluster_uuid": coe_cluster_signed_cert_obj.cluster_uuid,
|
||||
"csr": coe_cluster_signed_cert_obj.csr}
|
||||
)])
|
||||
|
@ -25,6 +25,10 @@ from openstack.tests.unit import base
|
||||
|
||||
class TestCreateVolumeSnapshot(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestCreateVolumeSnapshot, self).setUp()
|
||||
self.use_cinder()
|
||||
|
||||
def test_create_volume_snapshot_wait(self):
|
||||
"""
|
||||
Test that create_volume_snapshot with a wait returns the volume
|
||||
|
@ -25,6 +25,10 @@ from openstack.tests.unit import base
|
||||
|
||||
class TestDeleteVolumeSnapshot(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestDeleteVolumeSnapshot, self).setUp()
|
||||
self.use_cinder()
|
||||
|
||||
def test_delete_volume_snapshot(self):
|
||||
"""
|
||||
Test that delete_volume_snapshot without a wait returns True instance
|
||||
|
@ -25,10 +25,10 @@ from openstack.tests.unit import base
|
||||
class TestDomains(base.TestCase):
|
||||
|
||||
def get_mock_url(self, service_type='identity',
|
||||
interface='admin', resource='domains',
|
||||
resource='domains',
|
||||
append=None, base_url_append='v3'):
|
||||
return super(TestDomains, self).get_mock_url(
|
||||
service_type=service_type, interface=interface, resource=resource,
|
||||
service_type=service_type, resource=resource,
|
||||
append=append, base_url_append=base_url_append)
|
||||
|
||||
def test_list_domains(self):
|
||||
|
@ -29,7 +29,7 @@ from testtools import matchers
|
||||
|
||||
class TestCloudEndpoints(base.TestCase):
|
||||
|
||||
def get_mock_url(self, service_type='identity', interface='admin',
|
||||
def get_mock_url(self, service_type='identity', interface='public',
|
||||
resource='endpoints', append=None, base_url_append='v3'):
|
||||
return super(TestCloudEndpoints, self).get_mock_url(
|
||||
service_type, interface, resource, append, base_url_append)
|
||||
|
@ -20,10 +20,10 @@ class TestGroups(base.TestCase):
|
||||
cloud_config_fixture=cloud_config_fixture)
|
||||
self.addCleanup(self.assert_calls)
|
||||
|
||||
def get_mock_url(self, service_type='identity', interface='admin',
|
||||
def get_mock_url(self, service_type='identity', interface='public',
|
||||
resource='groups', append=None, base_url_append='v3'):
|
||||
return super(TestGroups, self).get_mock_url(
|
||||
service_type='identity', interface='admin', resource=resource,
|
||||
service_type='identity', interface=interface, resource=resource,
|
||||
append=append, base_url_append=base_url_append)
|
||||
|
||||
def test_list_groups(self):
|
||||
|
@ -36,7 +36,7 @@ RAW_ROLE_ASSIGNMENTS = [
|
||||
|
||||
class TestIdentityRoles(base.TestCase):
|
||||
|
||||
def get_mock_url(self, service_type='identity', interface='admin',
|
||||
def get_mock_url(self, service_type='identity', interface='public',
|
||||
resource='roles', append=None, base_url_append='v3',
|
||||
qs_elements=None):
|
||||
return super(TestIdentityRoles, self).get_mock_url(
|
||||
|