Blackify openstack.cloud

Black used with the '-l 79 -S' flags.

A future change will ignore this commit in git-blame history by adding a
'git-blame-ignore-revs' file.

Change-Id: Ib58bb45ce8c29e5347ffc36d40d6f5d52b140c6b
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2023-05-05 11:22:56 +01:00
parent c2ff7336ce
commit 004c7352d0
128 changed files with 26600 additions and 16255 deletions

View File

@ -70,7 +70,7 @@ class AcceleratorCloudMixin:
"""
device_profile = self.accelerator.get_device_profile(
name_or_id,
filters
filters,
)
if device_profile is None:
self.log.debug(
@ -104,7 +104,7 @@ class AcceleratorCloudMixin:
"""
accelerator_request = self.accelerator.get_accelerator_request(
name_or_id,
filters
filters,
)
if accelerator_request is None:
self.log.debug(

View File

@ -39,7 +39,8 @@ def _normalize_port_list(nics):
except KeyError:
raise TypeError(
"Either 'address' or 'mac' must be provided "
"for port %s" % row)
"for port %s" % row
)
ports.append(dict(row, address=address))
return ports
@ -136,32 +137,34 @@ class BaremetalCloudMixin:
raise exc.OpenStackCloudException(
"Refusing to inspect available machine %(node)s "
"which is associated with an instance "
"(instance_uuid %(inst)s)" %
{'node': node.id, 'inst': node.instance_id})
"(instance_uuid %(inst)s)"
% {'node': node.id, 'inst': node.instance_id}
)
return_to_available = True
# NOTE(TheJulia): Changing available machine to managedable state
# and due to state transitions we need to until that transition has
# completed.
node = self.baremetal.set_node_provision_state(node, 'manage',
wait=True,
timeout=timeout)
node = self.baremetal.set_node_provision_state(
node, 'manage', wait=True, timeout=timeout
)
if node.provision_state not in ('manageable', 'inspect failed'):
raise exc.OpenStackCloudException(
"Machine %(node)s must be in 'manageable', 'inspect failed' "
"or 'available' provision state to start inspection, the "
"current state is %(state)s" %
{'node': node.id, 'state': node.provision_state})
"current state is %(state)s"
% {'node': node.id, 'state': node.provision_state}
)
node = self.baremetal.set_node_provision_state(node, 'inspect',
wait=True,
timeout=timeout)
node = self.baremetal.set_node_provision_state(
node, 'inspect', wait=True, timeout=timeout
)
if return_to_available:
node = self.baremetal.set_node_provision_state(node, 'provide',
wait=True,
timeout=timeout)
node = self.baremetal.set_node_provision_state(
node, 'provide', wait=True, timeout=timeout
)
return node
@ -170,19 +173,27 @@ class BaremetalCloudMixin:
try:
yield
except Exception as exc:
self.log.debug("cleaning up node %s because of an error: %s",
node.id, exc)
self.log.debug(
"cleaning up node %s because of an error: %s", node.id, exc
)
tb = sys.exc_info()[2]
try:
self.baremetal.delete_node(node)
except Exception:
self.log.debug("could not remove node %s", node.id,
exc_info=True)
self.log.debug(
"could not remove node %s", node.id, exc_info=True
)
raise exc.with_traceback(tb)
def register_machine(self, nics, wait=False, timeout=3600,
lock_timeout=600, provision_state='available',
**kwargs):
def register_machine(
self,
nics,
wait=False,
timeout=3600,
lock_timeout=600,
provision_state='available',
**kwargs
):
"""Register Baremetal with Ironic
Allows for the registration of Baremetal nodes with Ironic
@ -233,9 +244,10 @@ class BaremetalCloudMixin:
:returns: Current state of the node.
"""
if provision_state not in ('enroll', 'manageable', 'available'):
raise ValueError('Initial provision state must be enroll, '
'manageable or available, got %s'
% provision_state)
raise ValueError(
'Initial provision state must be enroll, '
'manageable or available, got %s' % provision_state
)
# Available is tricky: it cannot be directly requested on newer API
# versions, we need to go through cleaning. But we cannot go through
@ -246,19 +258,24 @@ class BaremetalCloudMixin:
with self._delete_node_on_error(machine):
# Making a node at least manageable
if (machine.provision_state == 'enroll'
and provision_state != 'enroll'):
if (
machine.provision_state == 'enroll'
and provision_state != 'enroll'
):
machine = self.baremetal.set_node_provision_state(
machine, 'manage', wait=True, timeout=timeout)
machine, 'manage', wait=True, timeout=timeout
)
machine = self.baremetal.wait_for_node_reservation(
machine, timeout=lock_timeout)
machine, timeout=lock_timeout
)
# Create NICs before trying to run cleaning
created_nics = []
try:
for port in _normalize_port_list(nics):
nic = self.baremetal.create_port(node_id=machine.id,
**port)
nic = self.baremetal.create_port(
node_id=machine.id, **port
)
created_nics.append(nic.id)
except Exception:
@ -269,10 +286,13 @@ class BaremetalCloudMixin:
pass
raise
if (machine.provision_state != 'available'
and provision_state == 'available'):
if (
machine.provision_state != 'available'
and provision_state == 'available'
):
machine = self.baremetal.set_node_provision_state(
machine, 'provide', wait=wait, timeout=timeout)
machine, 'provide', wait=wait, timeout=timeout
)
return machine
@ -295,15 +315,18 @@ class BaremetalCloudMixin:
:raises: OpenStackCloudException on operation failure.
"""
if wait is not None:
warnings.warn("wait argument is deprecated and has no effect",
DeprecationWarning)
warnings.warn(
"wait argument is deprecated and has no effect",
DeprecationWarning,
)
machine = self.get_machine(uuid)
invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed']
if machine['provision_state'] in invalid_states:
raise exc.OpenStackCloudException(
"Error unregistering node '%s' due to current provision "
"state '%s'" % (uuid, machine['provision_state']))
"state '%s'" % (uuid, machine['provision_state'])
)
# NOTE(TheJulia) There is a high possibility of a lock being present
# if the machine was just moved through the state machine. This was
@ -314,7 +337,8 @@ class BaremetalCloudMixin:
except exc.OpenStackCloudException as e:
raise exc.OpenStackCloudException(
"Error unregistering node '%s': Exception occured while"
" waiting to be able to proceed: %s" % (machine['uuid'], e))
" waiting to be able to proceed: %s" % (machine['uuid'], e)
)
for nic in _normalize_port_list(nics):
try:
@ -381,32 +405,28 @@ class BaremetalCloudMixin:
machine = self.get_machine(name_or_id)
if not machine:
raise exc.OpenStackCloudException(
"Machine update failed to find Machine: %s. " % name_or_id)
"Machine update failed to find Machine: %s. " % name_or_id
)
new_config = dict(machine._to_munch(), **attrs)
try:
patch = jsonpatch.JsonPatch.from_diff(
machine._to_munch(),
new_config)
machine._to_munch(), new_config
)
except Exception as e:
raise exc.OpenStackCloudException(
"Machine update failed - Error generating JSON patch object "
"for submission to the API. Machine: %s Error: %s"
% (name_or_id, e))
% (name_or_id, e)
)
if not patch:
return dict(
node=machine,
changes=None
)
return dict(node=machine, changes=None)
change_list = [change['path'] for change in patch]
node = self.baremetal.update_node(machine, **attrs)
return dict(
node=node,
changes=change_list
)
return dict(node=node, changes=change_list)
def attach_port_to_machine(self, name_or_id, port_name_or_id):
"""Attach a virtual port to the bare metal machine.
@ -459,16 +479,16 @@ class BaremetalCloudMixin:
self.baremetal.validate_node(name_or_id, required=ifaces)
def validate_node(self, uuid):
warnings.warn('validate_node is deprecated, please use '
'validate_machine instead', DeprecationWarning)
warnings.warn(
'validate_node is deprecated, please use '
'validate_machine instead',
DeprecationWarning,
)
self.baremetal.validate_node(uuid)
def node_set_provision_state(self,
name_or_id,
state,
configdrive=None,
wait=False,
timeout=3600):
def node_set_provision_state(
self, name_or_id, state, configdrive=None, wait=False, timeout=3600
):
"""Set Node Provision State
Enables a user to provision a Machine and optionally define a
@ -495,15 +515,17 @@ class BaremetalCloudMixin:
:rtype: :class:`~openstack.baremetal.v1.node.Node`.
"""
node = self.baremetal.set_node_provision_state(
name_or_id, target=state, config_drive=configdrive,
wait=wait, timeout=timeout)
name_or_id,
target=state,
config_drive=configdrive,
wait=wait,
timeout=timeout,
)
return node
def set_machine_maintenance_state(
self,
name_or_id,
state=True,
reason=None):
self, name_or_id, state=True, reason=None
):
"""Set Baremetal Machine Maintenance State
Sets Baremetal maintenance state and maintenance reason.
@ -587,28 +609,33 @@ class BaremetalCloudMixin:
"""
self.baremetal.set_node_power_state(name_or_id, 'rebooting')
def activate_node(self, uuid, configdrive=None,
wait=False, timeout=1200):
def activate_node(self, uuid, configdrive=None, wait=False, timeout=1200):
self.node_set_provision_state(
uuid, 'active', configdrive, wait=wait, timeout=timeout)
uuid, 'active', configdrive, wait=wait, timeout=timeout
)
def deactivate_node(self, uuid, wait=False,
timeout=1200):
def deactivate_node(self, uuid, wait=False, timeout=1200):
self.node_set_provision_state(
uuid, 'deleted', wait=wait, timeout=timeout)
uuid, 'deleted', wait=wait, timeout=timeout
)
def set_node_instance_info(self, uuid, patch):
warnings.warn("The set_node_instance_info call is deprecated, "
"use patch_machine or update_machine instead",
DeprecationWarning)
warnings.warn(
"The set_node_instance_info call is deprecated, "
"use patch_machine or update_machine instead",
DeprecationWarning,
)
return self.patch_machine(uuid, patch)
def purge_node_instance_info(self, uuid):
warnings.warn("The purge_node_instance_info call is deprecated, "
"use patch_machine or update_machine instead",
DeprecationWarning)
return self.patch_machine(uuid,
dict(path='/instance_info', op='remove'))
warnings.warn(
"The purge_node_instance_info call is deprecated, "
"use patch_machine or update_machine instead",
DeprecationWarning,
)
return self.patch_machine(
uuid, dict(path='/instance_info', op='remove')
)
def wait_for_baremetal_node_lock(self, node, timeout=30):
"""Wait for a baremetal node to have no lock.
@ -618,7 +645,10 @@ class BaremetalCloudMixin:
:raises: OpenStackCloudException upon client failure.
:returns: None
"""
warnings.warn("The wait_for_baremetal_node_lock call is deprecated "
"in favor of wait_for_node_reservation on the baremetal "
"proxy", DeprecationWarning)
warnings.warn(
"The wait_for_baremetal_node_lock call is deprecated "
"in favor of wait_for_node_reservation on the baremetal "
"proxy",
DeprecationWarning,
)
self.baremetal.wait_for_node_reservation(node, timeout)

View File

@ -127,8 +127,7 @@ class BlockStorageCloudMixin:
:returns: A volume ``Type`` object if found, else None.
"""
return _utils._get_entity(
self, 'volume_type', name_or_id, filters)
return _utils._get_entity(self, 'volume_type', name_or_id, filters)
def create_volume(
self,
@ -162,7 +161,9 @@ class BlockStorageCloudMixin:
raise exc.OpenStackCloudException(
"Image {image} was requested as the basis for a new"
" volume, but was not found on the cloud".format(
image=image))
image=image
)
)
kwargs['imageRef'] = image_obj['id']
kwargs = self._get_volume_kwargs(kwargs)
kwargs['size'] = size
@ -193,10 +194,10 @@ class BlockStorageCloudMixin:
volume = self.get_volume(name_or_id)
if not volume:
raise exc.OpenStackCloudException(
"Volume %s not found." % name_or_id)
"Volume %s not found." % name_or_id
)
volume = self.block_storage.update_volume(
volume, **kwargs)
volume = self.block_storage.update_volume(volume, **kwargs)
self.list_volumes.invalidate(self)
@ -219,7 +220,9 @@ class BlockStorageCloudMixin:
if not volume:
raise exc.OpenStackCloudException(
"Volume {name_or_id} does not exist".format(
name_or_id=name_or_id))
name_or_id=name_or_id
)
)
self.block_storage.set_volume_bootable_status(volume, bootable)
@ -249,7 +252,8 @@ class BlockStorageCloudMixin:
self.log.debug(
"Volume %(name_or_id)s does not exist",
{'name_or_id': name_or_id},
exc_info=True)
exc_info=True,
)
return False
try:
self.block_storage.delete_volume(volume, force=force)
@ -297,10 +301,12 @@ class BlockStorageCloudMixin:
project_id = proj.id
params['tenant_id'] = project_id
error_msg = "{msg} for the project: {project} ".format(
msg=error_msg, project=name_or_id)
msg=error_msg, project=name_or_id
)
data = proxy._json_response(
self.block_storage.get('/limits', params=params))
self.block_storage.get('/limits', params=params)
)
limits = self._get_and_munchify('limits', data)
return limits
@ -413,22 +419,23 @@ class BlockStorageCloudMixin:
# If we got volume as dict we need to re-fetch it to be able to
# use wait_for_status.
volume = self.block_storage.get_volume(volume['id'])
self.block_storage.wait_for_status(
volume, 'in-use', wait=timeout)
self.block_storage.wait_for_status(volume, 'in-use', wait=timeout)
return attachment
def _get_volume_kwargs(self, kwargs):
name = kwargs.pop('name', kwargs.pop('display_name', None))
description = kwargs.pop('description',
kwargs.pop('display_description', None))
description = kwargs.pop(
'description', kwargs.pop('display_description', None)
)
if name:
kwargs['name'] = name
if description:
kwargs['description'] = description
return kwargs
@_utils.valid_kwargs('name', 'display_name',
'description', 'display_description')
@_utils.valid_kwargs(
'name', 'display_name', 'description', 'display_description'
)
def create_volume_snapshot(
self,
volume_id,
@ -459,7 +466,8 @@ class BlockStorageCloudMixin:
snapshot = self.block_storage.create_snapshot(**payload)
if wait:
snapshot = self.block_storage.wait_for_status(
snapshot, wait=timeout)
snapshot, wait=timeout
)
return snapshot
@ -499,8 +507,7 @@ class BlockStorageCloudMixin:
:returns: A volume ``Snapshot`` object if found, else None.
"""
return _utils._get_entity(self, 'volume_snapshot', name_or_id,
filters)
return _utils._get_entity(self, 'volume_snapshot', name_or_id, filters)
def create_volume_backup(
self,
@ -572,8 +579,7 @@ class BlockStorageCloudMixin:
:returns: A volume ``Backup`` object if found, else None.
"""
return _utils._get_entity(self, 'volume_backup', name_or_id,
filters)
return _utils._get_entity(self, 'volume_backup', name_or_id, filters)
def list_volume_snapshots(self, detailed=True, filters=None):
"""List all volume snapshots.
@ -615,8 +621,9 @@ class BlockStorageCloudMixin:
return list(self.block_storage.backups(details=detailed, **filters))
def delete_volume_backup(self, name_or_id=None, force=False, wait=False,
timeout=None):
def delete_volume_backup(
self, name_or_id=None, force=False, wait=False, timeout=None
):
"""Delete a volume backup.
:param name_or_id: Name or unique ID of the volume backup.
@ -635,7 +642,8 @@ class BlockStorageCloudMixin:
return False
self.block_storage.delete_backup(
volume_backup, ignore_missing=False, force=force)
volume_backup, ignore_missing=False, force=force
)
if wait:
self.block_storage.wait_for_delete(volume_backup, wait=timeout)
@ -663,7 +671,8 @@ class BlockStorageCloudMixin:
return False
self.block_storage.delete_snapshot(
volumesnapshot, ignore_missing=False)
volumesnapshot, ignore_missing=False
)
if wait:
self.block_storage.wait_for_delete(volumesnapshot, wait=timeout)
@ -695,8 +704,7 @@ class BlockStorageCloudMixin:
:returns: A list of volume ``Volume`` objects, if any are found.
"""
volumes = self.list_volumes()
return _utils._filter_list(
volumes, name_or_id, filters)
return _utils._filter_list(volumes, name_or_id, filters)
def search_volume_snapshots(self, name_or_id=None, filters=None):
"""Search for one or more volume snapshots.
@ -723,8 +731,7 @@ class BlockStorageCloudMixin:
:returns: A list of volume ``Snapshot`` objects, if any are found.
"""
volumesnapshots = self.list_volume_snapshots()
return _utils._filter_list(
volumesnapshots, name_or_id, filters)
return _utils._filter_list(volumesnapshots, name_or_id, filters)
def search_volume_backups(self, name_or_id=None, filters=None):
"""Search for one or more volume backups.
@ -751,8 +758,7 @@ class BlockStorageCloudMixin:
:returns: A list of volume ``Backup`` objects, if any are found.
"""
volume_backups = self.list_volume_backups()
return _utils._filter_list(
volume_backups, name_or_id, filters)
return _utils._filter_list(volume_backups, name_or_id, filters)
# TODO(stephenfin): Remove 'get_extra' in a future major version
def search_volume_types(
@ -797,7 +803,8 @@ class BlockStorageCloudMixin:
volume_type = self.get_volume_type(name_or_id)
if not volume_type:
raise exc.OpenStackCloudException(
"VolumeType not found: %s" % name_or_id)
"VolumeType not found: %s" % name_or_id
)
return self.block_storage.get_type_access(volume_type)
@ -814,7 +821,8 @@ class BlockStorageCloudMixin:
volume_type = self.get_volume_type(name_or_id)
if not volume_type:
raise exc.OpenStackCloudException(
"VolumeType not found: %s" % name_or_id)
"VolumeType not found: %s" % name_or_id
)
self.block_storage.add_type_access(volume_type, project_id)
@ -829,7 +837,8 @@ class BlockStorageCloudMixin:
volume_type = self.get_volume_type(name_or_id)
if not volume_type:
raise exc.OpenStackCloudException(
"VolumeType not found: %s" % name_or_id)
"VolumeType not found: %s" % name_or_id
)
self.block_storage.remove_type_access(volume_type, project_id)
def set_volume_quotas(self, name_or_id, **kwargs):
@ -842,12 +851,11 @@ class BlockStorageCloudMixin:
quota does not exist.
"""
proj = self.identity.find_project(
name_or_id, ignore_missing=False)
proj = self.identity.find_project(name_or_id, ignore_missing=False)
self.block_storage.update_quota_set(
_qs.QuotaSet(project_id=proj.id),
**kwargs)
_qs.QuotaSet(project_id=proj.id), **kwargs
)
def get_volume_quotas(self, name_or_id):
"""Get volume quotas for a project

View File

@ -23,10 +23,12 @@ class ClusteringCloudMixin:
def _clustering_client(self):
if 'clustering' not in self._raw_clients:
clustering_client = self._get_versioned_client(
'clustering', min_version=1, max_version='1.latest')
'clustering', min_version=1, max_version='1.latest'
)
self._raw_clients['clustering'] = clustering_client
return self._raw_clients['clustering']
# NOTE(gtema): work on getting rid of direct API calls showed that this
# implementation never worked properly and tests in reality verifying wrong
# things. Unless someone is really interested in this piece of code this will

View File

@ -19,7 +19,6 @@ from openstack.cloud import exc
class CoeCloudMixin:
@_utils.cache_on_arguments()
def list_coe_clusters(self):
"""List COE (Container Orchestration Engine) cluster.
@ -72,7 +71,10 @@ class CoeCloudMixin:
return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)
def create_coe_cluster(
self, name, cluster_template_id, **kwargs,
self,
name,
cluster_template_id,
**kwargs,
):
"""Create a COE cluster based on given cluster template.
@ -133,11 +135,11 @@ class CoeCloudMixin:
cluster = self.get_coe_cluster(name_or_id)
if not cluster:
raise exc.OpenStackCloudException(
"COE cluster %s not found." % name_or_id)
"COE cluster %s not found." % name_or_id
)
cluster = self.container_infrastructure_management.update_cluster(
cluster,
**kwargs
cluster, **kwargs
)
return cluster
@ -149,8 +151,11 @@ class CoeCloudMixin:
:returns: Details about the CA certificate for the given cluster.
"""
return self.container_infrastructure_management\
.get_cluster_certificate(cluster_id)
return (
self.container_infrastructure_management.get_cluster_certificate(
cluster_id
)
)
def sign_coe_cluster_certificate(self, cluster_id, csr):
"""Sign client key and generate the CA certificate for a cluster
@ -164,10 +169,9 @@ class CoeCloudMixin:
:raises: OpenStackCloudException on operation error.
"""
return self.container_infrastructure_management\
.create_cluster_certificate(
cluster_uuid=cluster_id,
csr=csr)
return self.container_infrastructure_management.create_cluster_certificate( # noqa: E501
cluster_uuid=cluster_id, csr=csr
)
@_utils.cache_on_arguments()
def list_cluster_templates(self, detail=False):
@ -182,10 +186,12 @@ class CoeCloudMixin:
the OpenStack API call.
"""
return list(
self.container_infrastructure_management.cluster_templates())
self.container_infrastructure_management.cluster_templates()
)
def search_cluster_templates(
self, name_or_id=None, filters=None, detail=False):
self, name_or_id=None, filters=None, detail=False
):
"""Search cluster templates.
:param name_or_id: cluster template name or ID.
@ -199,8 +205,7 @@ class CoeCloudMixin:
the OpenStack API call.
"""
cluster_templates = self.list_cluster_templates(detail=detail)
return _utils._filter_list(
cluster_templates, name_or_id, filters)
return _utils._filter_list(cluster_templates, name_or_id, filters)
def get_cluster_template(self, name_or_id, filters=None, detail=False):
"""Get a cluster template by name or ID.
@ -225,11 +230,16 @@ class CoeCloudMixin:
cluster template is found.
"""
return _utils._get_entity(
self, 'cluster_template', name_or_id,
filters=filters, detail=detail)
self,
'cluster_template',
name_or_id,
filters=filters,
detail=detail,
)
def create_cluster_template(
self, name, image_id=None, keypair_id=None, coe=None, **kwargs):
self, name, image_id=None, keypair_id=None, coe=None, **kwargs
):
"""Create a cluster template.
:param string name: Name of the cluster template.
@ -243,14 +253,15 @@ class CoeCloudMixin:
:raises: ``OpenStackCloudException`` if something goes wrong during
the OpenStack API call
"""
cluster_template = self.container_infrastructure_management \
.create_cluster_template(
cluster_template = (
self.container_infrastructure_management.create_cluster_template(
name=name,
image_id=image_id,
keypair_id=keypair_id,
coe=coe,
**kwargs,
)
)
return cluster_template
@ -270,11 +281,13 @@ class CoeCloudMixin:
self.log.debug(
"Cluster template %(name_or_id)s does not exist",
{'name_or_id': name_or_id},
exc_info=True)
exc_info=True,
)
return False
self.container_infrastructure_management.delete_cluster_template(
cluster_template)
cluster_template
)
return True
def update_cluster_template(self, name_or_id, **kwargs):
@ -289,14 +302,15 @@ class CoeCloudMixin:
cluster_template = self.get_cluster_template(name_or_id)
if not cluster_template:
raise exc.OpenStackCloudException(
"Cluster template %s not found." % name_or_id)
cluster_template = self.container_infrastructure_management \
.update_cluster_template(
cluster_template,
**kwargs
"Cluster template %s not found." % name_or_id
)
cluster_template = (
self.container_infrastructure_management.update_cluster_template(
cluster_template, **kwargs
)
)
return cluster_template
def list_magnum_services(self):

View File

@ -114,12 +114,15 @@ class ComputeCloudMixin:
"""
flavors = self.list_flavors(get_extra=get_extra)
for flavor in sorted(flavors, key=operator.itemgetter('ram')):
if (flavor['ram'] >= ram
and (not include or include in flavor['name'])):
if flavor['ram'] >= ram and (
not include or include in flavor['name']
):
return flavor
raise exc.OpenStackCloudException(
"Could not find a flavor with {ram} and '{include}'".format(
ram=ram, include=include))
ram=ram, include=include
)
)
@_utils.cache_on_arguments()
def _nova_extensions(self):
@ -155,8 +158,12 @@ class ComputeCloudMixin:
return _utils._filter_list(flavors, name_or_id, filters)
def search_servers(
self, name_or_id=None, filters=None, detailed=False,
all_projects=False, bare=False,
self,
name_or_id=None,
filters=None,
detailed=False,
all_projects=False,
bare=False,
):
"""Search servers.
@ -169,7 +176,8 @@ class ComputeCloudMixin:
criteria.
"""
servers = self.list_servers(
detailed=detailed, all_projects=all_projects, bare=bare)
detailed=detailed, all_projects=all_projects, bare=bare
)
return _utils._filter_list(servers, name_or_id, filters)
def search_server_groups(self, name_or_id=None, filters=None):
@ -213,8 +221,8 @@ class ComputeCloudMixin:
return ret
except exceptions.SDKException:
self.log.debug(
"Availability zone list could not be fetched",
exc_info=True)
"Availability zone list could not be fetched", exc_info=True
)
return []
@_utils.cache_on_arguments()
@ -226,8 +234,9 @@ class ComputeCloudMixin:
clouds.yaml by setting openstack.cloud.get_extra_specs to False.
:returns: A list of compute ``Flavor`` objects.
"""
return list(self.compute.flavors(
details=True, get_extra_specs=get_extra))
return list(
self.compute.flavors(details=True, get_extra_specs=get_extra)
)
def list_server_security_groups(self, server):
"""List all security groups associated with the given server.
@ -268,8 +277,9 @@ class ComputeCloudMixin:
sg = self.get_security_group(sg)
if sg is None:
self.log.debug('Security group %s not found for adding',
sg)
self.log.debug(
'Security group %s not found for adding', sg
)
return None, None
@ -288,7 +298,8 @@ class ComputeCloudMixin:
:raises: ``OpenStackCloudException``, on operation error.
"""
server, security_groups = self._get_server_security_groups(
server, security_groups)
server, security_groups
)
if not (server and security_groups):
return False
@ -310,7 +321,8 @@ class ComputeCloudMixin:
:raises: ``OpenStackCloudException``, on operation error.
"""
server, security_groups = self._get_server_security_groups(
server, security_groups)
server, security_groups
)
if not (server and security_groups):
return False
@ -327,7 +339,10 @@ class ComputeCloudMixin:
# error? Nova returns ok if you try to add a group twice.
self.log.debug(
"The security group %s was not present on server %s so "
"no action was performed", sg.name, server.name)
"no action was performed",
sg.name,
server.name,
)
ret = False
return ret
@ -377,7 +392,8 @@ class ComputeCloudMixin:
self._servers = self._list_servers(
detailed=detailed,
all_projects=all_projects,
bare=bare)
bare=bare,
)
self._servers_time = time.time()
finally:
self._servers_lock.release()
@ -386,14 +402,15 @@ class ComputeCloudMixin:
# list from the cloud, we still return a filtered list.
return _utils._filter_list(self._servers, None, filters)
def _list_servers(self, detailed=False, all_projects=False, bare=False,
filters=None):
def _list_servers(
self, detailed=False, all_projects=False, bare=False, filters=None
):
filters = filters or {}
return [
self._expand_server(server, detailed, bare)
for server in self.compute.servers(
all_projects=all_projects,
**filters)
all_projects=all_projects, **filters
)
]
def list_server_groups(self):
@ -472,12 +489,15 @@ class ComputeCloudMixin:
if not filters:
filters = {}
flavor = self.compute.find_flavor(
name_or_id, get_extra_specs=get_extra,
ignore_missing=True, **filters)
name_or_id,
get_extra_specs=get_extra,
ignore_missing=True,
**filters,
)
return flavor
def get_flavor_by_id(self, id, get_extra=False):
""" Get a flavor by ID
"""Get a flavor by ID
:param id: ID of the flavor.
:param get_extra: Whether or not the list_flavors call should get the
@ -505,7 +525,8 @@ class ComputeCloudMixin:
if not server:
raise exc.OpenStackCloudException(
"Console log requested for invalid server")
"Console log requested for invalid server"
)
try:
return self._get_server_console_output(server['id'], length)
@ -514,8 +535,7 @@ class ComputeCloudMixin:
def _get_server_console_output(self, server_id, length=None):
output = self.compute.get_server_console_output(
server=server_id,
length=length
server=server_id, length=length
)
if 'output' in output:
return output['output']
@ -555,9 +575,12 @@ class ComputeCloudMixin:
the current auth scoped project.
:returns: A compute ``Server`` object if found, else None.
"""
searchfunc = functools.partial(self.search_servers,
detailed=detailed, bare=True,
all_projects=all_projects)
searchfunc = functools.partial(
self.search_servers,
detailed=detailed,
bare=True,
all_projects=all_projects,
)
server = _utils._get_entity(self, searchfunc, name_or_id, filters)
return self._expand_server(server, detailed, bare)
@ -600,8 +623,7 @@ class ComputeCloudMixin:
:returns: A compute ``ServerGroup`` object if found, else None.
"""
return _utils._get_entity(self, 'server_group', name_or_id,
filters)
return _utils._get_entity(self, 'server_group', name_or_id, filters)
def create_keypair(self, name, public_key=None):
"""Create a new keypair.
@ -664,10 +686,12 @@ class ComputeCloudMixin:
if not server_obj:
raise exc.OpenStackCloudException(
"Server {server} could not be found and therefore"
" could not be snapshotted.".format(server=server))
" could not be snapshotted.".format(server=server)
)
server = server_obj
image = self.compute.create_server_image(
server, name=name, metadata=metadata, wait=wait, timeout=timeout)
server, name=name, metadata=metadata, wait=wait, timeout=timeout
)
return image
def get_server_id(self, name_or_id):
@ -709,12 +733,25 @@ class ComputeCloudMixin:
return dict(server_vars=server_vars, groups=groups)
@_utils.valid_kwargs(
'meta', 'files', 'userdata', 'description',
'reservation_id', 'return_raw', 'min_count',
'max_count', 'security_groups', 'key_name',
'availability_zone', 'block_device_mapping',
'block_device_mapping_v2', 'nics', 'scheduler_hints',
'config_drive', 'admin_pass', 'disk_config')
'meta',
'files',
'userdata',
'description',
'reservation_id',
'return_raw',
'min_count',
'max_count',
'security_groups',
'key_name',
'availability_zone',
'block_device_mapping',
'block_device_mapping_v2',
'nics',
'scheduler_hints',
'config_drive',
'admin_pass',
'disk_config',
)
def create_server(
self,
name,
@ -818,10 +855,12 @@ class ComputeCloudMixin:
# after image in the argument list. Doh.
if not flavor:
raise TypeError(
"create_server() missing 1 required argument: 'flavor'")
"create_server() missing 1 required argument: 'flavor'"
)
if not image and not boot_volume:
raise TypeError(
"create_server() requires either 'image' or 'boot_volume'")
"create_server() requires either 'image' or 'boot_volume'"
)
# TODO(mordred) Add support for description starting in 2.19
security_groups = kwargs.get('security_groups', [])
@ -836,11 +875,12 @@ class ComputeCloudMixin:
if user_data:
kwargs['user_data'] = self._encode_server_userdata(user_data)
for (desired, given) in (
('OS-DCF:diskConfig', 'disk_config'),
('config_drive', 'config_drive'),
('key_name', 'key_name'),
('metadata', 'meta'),
('adminPass', 'admin_pass')):
('OS-DCF:diskConfig', 'disk_config'),
('config_drive', 'config_drive'),
('key_name', 'key_name'),
('metadata', 'meta'),
('adminPass', 'admin_pass'),
):
value = kwargs.pop(given, None)
if value:
kwargs[desired] = value
@ -850,7 +890,8 @@ class ComputeCloudMixin:
if not group_obj:
raise exc.OpenStackCloudException(
"Server Group {group} was requested but was not found"
" on the cloud".format(group=group))
" on the cloud".format(group=group)
)
if 'scheduler_hints' not in kwargs:
kwargs['scheduler_hints'] = {}
kwargs['scheduler_hints']['group'] = group_obj['id']
@ -865,7 +906,8 @@ class ComputeCloudMixin:
else:
raise exc.OpenStackCloudException(
'nics parameter to create_server takes a list of dicts.'
' Got: {nics}'.format(nics=kwargs['nics']))
' Got: {nics}'.format(nics=kwargs['nics'])
)
if network and ('nics' not in kwargs or not kwargs['nics']):
nics = []
@ -881,7 +923,10 @@ class ComputeCloudMixin:
'Network {network} is not a valid network in'
' {cloud}:{region}'.format(
network=network,
cloud=self.name, region=self._compute_region))
cloud=self.name,
region=self._compute_region,
)
)
nics.append({'net-id': network_obj['id']})
kwargs['nics'] = nics
@ -904,14 +949,17 @@ class ComputeCloudMixin:
if not nic_net:
raise exc.OpenStackCloudException(
"Requested network {net} could not be found.".format(
net=net_name))
net=net_name
)
)
net['uuid'] = nic_net['id']
for ip_key in ('v4-fixed-ip', 'v6-fixed-ip', 'fixed_ip'):
fixed_ip = nic.pop(ip_key, None)
if fixed_ip and net.get('fixed_ip'):
raise exc.OpenStackCloudException(
"Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip"
" may be given")
" may be given"
)
if fixed_ip:
net['fixed_ip'] = fixed_ip
for key in ('port', 'port-id'):
@ -920,13 +968,13 @@ class ComputeCloudMixin:
# A tag supported only in server microversion 2.32-2.36 or >= 2.42
# Bumping the version to 2.42 to support the 'tag' implementation
if 'tag' in nic:
utils.require_microversion(
self.compute, '2.42')
utils.require_microversion(self.compute, '2.42')
net['tag'] = nic.pop('tag')
if nic:
raise exc.OpenStackCloudException(
"Additional unsupported keys given for server network"
" creation: {keys}".format(keys=nic.keys()))
" creation: {keys}".format(keys=nic.keys())
)
networks.append(net)
if networks:
kwargs['networks'] = networks
@ -954,10 +1002,14 @@ class ComputeCloudMixin:
boot_volume = root_volume
kwargs = self._get_boot_from_volume_kwargs(
image=image, boot_from_volume=boot_from_volume,
boot_volume=boot_volume, volume_size=str(volume_size),
image=image,
boot_from_volume=boot_from_volume,
boot_volume=boot_volume,
volume_size=str(volume_size),
terminate_volume=terminate_volume,
volumes=volumes, kwargs=kwargs)
volumes=volumes,
kwargs=kwargs,
)
kwargs['name'] = name
@ -977,14 +1029,18 @@ class ComputeCloudMixin:
server = self.compute.get_server(server.id)
if server.status == 'ERROR':
raise exc.OpenStackCloudCreateException(
resource='server', resource_id=server.id)
resource='server', resource_id=server.id
)
server = meta.add_server_interfaces(self, server)
else:
server = self.wait_for_server(
server,
auto_ip=auto_ip, ips=ips, ip_pool=ip_pool,
reuse=reuse_ips, timeout=timeout,
auto_ip=auto_ip,
ips=ips,
ip_pool=ip_pool,
reuse=reuse_ips,
timeout=timeout,
nat_destination=nat_destination,
)
@ -992,8 +1048,15 @@ class ComputeCloudMixin:
return server
def _get_boot_from_volume_kwargs(
self, image, boot_from_volume, boot_volume, volume_size,
terminate_volume, volumes, kwargs):
self,
image,
boot_from_volume,
boot_volume,
volume_size,
terminate_volume,
volumes,
kwargs,
):
"""Return block device mappings
:param image: Image dict, name or id to boot with.
@ -1015,7 +1078,10 @@ class ComputeCloudMixin:
'Volume {boot_volume} is not a valid volume'
' in {cloud}:{region}'.format(
boot_volume=boot_volume,
cloud=self.name, region=self._compute_region))
cloud=self.name,
region=self._compute_region,
)
)
block_mapping = {
'boot_index': '0',
'delete_on_termination': terminate_volume,
@ -1036,7 +1102,10 @@ class ComputeCloudMixin:
'Image {image} is not a valid image in'
' {cloud}:{region}'.format(
image=image,
cloud=self.name, region=self._compute_region))
cloud=self.name,
region=self._compute_region,
)
)
block_mapping = {
'boot_index': '0',
@ -1066,7 +1135,10 @@ class ComputeCloudMixin:
'Volume {volume} is not a valid volume'
' in {cloud}:{region}'.format(
volume=volume,
cloud=self.name, region=self._compute_region))
cloud=self.name,
region=self._compute_region,
)
)
block_mapping = {
'boot_index': '-1',
'delete_on_termination': False,
@ -1080,8 +1152,15 @@ class ComputeCloudMixin:
return kwargs
def wait_for_server(
self, server, auto_ip=True, ips=None, ip_pool=None,
reuse=True, timeout=180, nat_destination=None):
self,
server,
auto_ip=True,
ips=None,
ip_pool=None,
reuse=True,
timeout=180,
nat_destination=None,
):
"""
Wait for a server to reach ACTIVE status.
"""
@ -1094,11 +1173,12 @@ class ComputeCloudMixin:
# There is no point in iterating faster than the list_servers cache
for count in utils.iterate_timeout(
timeout,
timeout_message,
# if _SERVER_AGE is 0 we still want to wait a bit
# to be friendly with the server.
wait=self._SERVER_AGE or 2):
timeout,
timeout_message,
# if _SERVER_AGE is 0 we still want to wait a bit
# to be friendly with the server.
wait=self._SERVER_AGE or 2,
):
try:
# Use the get_server call so that the list_servers
# cache can be leveraged
@ -1116,10 +1196,15 @@ class ComputeCloudMixin:
raise exc.OpenStackCloudTimeout(timeout_message)
server = self.get_active_server(
server=server, reuse=reuse,
auto_ip=auto_ip, ips=ips, ip_pool=ip_pool,
wait=True, timeout=remaining_timeout,
nat_destination=nat_destination)
server=server,
reuse=reuse,
auto_ip=auto_ip,
ips=ips,
ip_pool=ip_pool,
wait=True,
timeout=remaining_timeout,
nat_destination=nat_destination,
)
if server is not None and server['status'] == 'ACTIVE':
return server
@ -1136,43 +1221,58 @@ class ComputeCloudMixin:
nat_destination=None,
):
if server['status'] == 'ERROR':
if ('fault' in server and server['fault'] is not None
and 'message' in server['fault']):
if (
'fault' in server
and server['fault'] is not None
and 'message' in server['fault']
):
raise exc.OpenStackCloudException(
"Error in creating the server."
" Compute service reports fault: {reason}".format(
reason=server['fault']['message']),
extra_data=dict(server=server))
reason=server['fault']['message']
),
extra_data=dict(server=server),
)
raise exc.OpenStackCloudException(
"Error in creating the server"
" (no further information available)",
extra_data=dict(server=server))
extra_data=dict(server=server),
)
if server['status'] == 'ACTIVE':
if 'addresses' in server and server['addresses']:
return self.add_ips_to_server(
server, auto_ip, ips, ip_pool, reuse=reuse,
server,
auto_ip,
ips,
ip_pool,
reuse=reuse,
nat_destination=nat_destination,
wait=wait, timeout=timeout)
wait=wait,
timeout=timeout,
)
self.log.debug(
'Server %(server)s reached ACTIVE state without'
' being allocated an IP address.'
' Deleting server.', {'server': server['id']})
' Deleting server.',
{'server': server['id']},
)
try:
self._delete_server(
server=server, wait=wait, timeout=timeout)
self._delete_server(server=server, wait=wait, timeout=timeout)
except Exception as e:
raise exc.OpenStackCloudException(
'Server reached ACTIVE state without being'
' allocated an IP address AND then could not'
' be deleted: {0}'.format(e),
extra_data=dict(server=server))
extra_data=dict(server=server),
)
raise exc.OpenStackCloudException(
'Server reached ACTIVE state without being'
' allocated an IP address.',
extra_data=dict(server=server))
extra_data=dict(server=server),
)
return None
def rebuild_server(
@ -1202,17 +1302,12 @@ class ComputeCloudMixin:
if admin_pass:
kwargs['admin_password'] = admin_pass
server = self.compute.rebuild_server(
server_id,
**kwargs
)
server = self.compute.rebuild_server(server_id, **kwargs)
if not wait:
return self._expand_server(
server, bare=bare, detailed=detailed)
return self._expand_server(server, bare=bare, detailed=detailed)
admin_pass = server.get('adminPass') or admin_pass
server = self.compute.wait_for_server(
server, wait=timeout)
server = self.compute.wait_for_server(server, wait=timeout)
if server['status'] == 'ACTIVE':
server.adminPass = admin_pass
@ -1231,7 +1326,8 @@ class ComputeCloudMixin:
server = self.get_server(name_or_id, bare=True)
if not server:
raise exc.OpenStackCloudException(
'Invalid Server {server}'.format(server=name_or_id))
'Invalid Server {server}'.format(server=name_or_id)
)
self.compute.set_server_metadata(server=server.id, **metadata)
@ -1248,10 +1344,12 @@ class ComputeCloudMixin:
server = self.get_server(name_or_id, bare=True)
if not server:
raise exc.OpenStackCloudException(
'Invalid Server {server}'.format(server=name_or_id))
'Invalid Server {server}'.format(server=name_or_id)
)
self.compute.delete_server_metadata(server=server.id,
keys=metadata_keys)
self.compute.delete_server_metadata(
server=server.id, keys=metadata_keys
)
def delete_server(
self,
@ -1275,8 +1373,7 @@ class ComputeCloudMixin:
:raises: OpenStackCloudException on operation error.
"""
# If delete_ips is True, we need the server to not be bare.
server = self.compute.find_server(
name_or_id, ignore_missing=True)
server = self.compute.find_server(name_or_id, ignore_missing=True)
if not server:
return False
@ -1284,18 +1381,24 @@ class ComputeCloudMixin:
# private method in order to avoid an unnecessary API call to get
# a server we already have.
return self._delete_server(
server, wait=wait, timeout=timeout, delete_ips=delete_ips,
delete_ip_retry=delete_ip_retry)
server,
wait=wait,
timeout=timeout,
delete_ips=delete_ips,
delete_ip_retry=delete_ip_retry,
)
def _delete_server_floating_ips(self, server, delete_ip_retry):
# Does the server have floating ips in its
# addresses dict? If not, skip this.
server_floats = meta.find_nova_interfaces(
server['addresses'], ext_tag='floating')
server['addresses'], ext_tag='floating'
)
for fip in server_floats:
try:
ip = self.get_floating_ip(id=None, filters={
'floating_ip_address': fip['addr']})
ip = self.get_floating_ip(
id=None, filters={'floating_ip_address': fip['addr']}
)
except exc.OpenStackCloudURINotFound:
# We're deleting. If it doesn't exist - awesome
# NOTE(mordred) If the cloud is a nova FIP cloud but
@ -1304,19 +1407,24 @@ class ComputeCloudMixin:
continue
if not ip:
continue
deleted = self.delete_floating_ip(
ip['id'], retry=delete_ip_retry)
deleted = self.delete_floating_ip(ip['id'], retry=delete_ip_retry)
if not deleted:
raise exc.OpenStackCloudException(
"Tried to delete floating ip {floating_ip}"
" associated with server {id} but there was"
" an error deleting it. Not deleting server.".format(
floating_ip=ip['floating_ip_address'],
id=server['id']))
floating_ip=ip['floating_ip_address'], id=server['id']
)
)
def _delete_server(
self, server, wait=False, timeout=180, delete_ips=False,
delete_ip_retry=1):
self,
server,
wait=False,
timeout=180,
delete_ips=False,
delete_ip_retry=1,
):
if not server:
return False
@ -1324,8 +1432,7 @@ class ComputeCloudMixin:
self._delete_server_floating_ips(server, delete_ip_retry)
try:
self.compute.delete_server(
server)
self.compute.delete_server(server)
except exceptions.ResourceNotFound:
return False
except Exception:
@ -1339,9 +1446,11 @@ class ComputeCloudMixin:
# need to invalidate the cache. Avoid the extra API call if
# caching is not enabled.
reset_volume_cache = False
if (self.cache_enabled
and self.has_service('volume')
and self.get_volumes(server)):
if (
self.cache_enabled
and self.has_service('volume')
and self.get_volumes(server)
):
reset_volume_cache = True
if not isinstance(server, _server.Server):
@ -1349,8 +1458,7 @@ class ComputeCloudMixin:
# If this is the case - convert it into real server to be able to
# use wait_for_delete
server = _server.Server(id=server['id'])
self.compute.wait_for_delete(
server, wait=timeout)
self.compute.wait_for_delete(server, wait=timeout)
if reset_volume_cache:
self.list_volumes.invalidate(self)
@ -1360,8 +1468,7 @@ class ComputeCloudMixin:
self._servers_time = self._servers_time - self._SERVER_AGE
return True
@_utils.valid_kwargs(
'name', 'description')
@_utils.valid_kwargs('name', 'description')
def update_server(self, name_or_id, detailed=False, bare=False, **kwargs):
"""Update a server.
@ -1377,13 +1484,9 @@ class ComputeCloudMixin:
:returns: The updated compute ``Server`` object.
:raises: OpenStackCloudException on operation error.
"""
server = self.compute.find_server(
name_or_id,
ignore_missing=False
)
server = self.compute.find_server(name_or_id, ignore_missing=False)
server = self.compute.update_server(
server, **kwargs)
server = self.compute.update_server(server, **kwargs)
return self._expand_server(server, bare=bare, detailed=detailed)
@ -1395,16 +1498,12 @@ class ComputeCloudMixin:
:returns: The created compute ``ServerGroup`` object.
:raises: OpenStackCloudException on operation error.
"""
sg_attrs = {
'name': name
}
sg_attrs = {'name': name}
if policies:
sg_attrs['policies'] = policies
if policy:
sg_attrs['policy'] = policy
return self.compute.create_server_group(
**sg_attrs
)
return self.compute.create_server_group(**sg_attrs)
def delete_server_group(self, name_or_id):
"""Delete a server group.
@ -1415,8 +1514,9 @@ class ComputeCloudMixin:
"""
server_group = self.get_server_group(name_or_id)
if not server_group:
self.log.debug("Server group %s not found for deleting",
name_or_id)
self.log.debug(
"Server group %s not found for deleting", name_or_id
)
return False
self.compute.delete_server_group(server_group, ignore_missing=False)
@ -1477,14 +1577,14 @@ class ComputeCloudMixin:
try:
flavor = self.compute.find_flavor(name_or_id)
if not flavor:
self.log.debug(
"Flavor %s not found for deleting", name_or_id)
self.log.debug("Flavor %s not found for deleting", name_or_id)
return False
self.compute.delete_flavor(flavor)
return True
except exceptions.SDKException:
raise exceptions.OpenStackCloudException(
"Unable to delete flavor {name}".format(name=name_or_id))
"Unable to delete flavor {name}".format(name=name_or_id)
)
def set_flavor_specs(self, flavor_id, extra_specs):
"""Add extra specs to a flavor
@ -1545,9 +1645,7 @@ class ComputeCloudMixin:
:returns: A list of compute ``Hypervisor`` objects.
"""
return list(self.compute.hypervisors(
details=True,
**filters))
return list(self.compute.hypervisors(details=True, **filters))
def search_aggregates(self, name_or_id=None, filters=None):
"""Seach host aggregates.
@ -1587,8 +1685,7 @@ class ComputeCloudMixin:
:returns: An aggregate dict or None if no matching aggregate is
found.
"""
return self.compute.find_aggregate(
name_or_id, ignore_missing=True)
return self.compute.find_aggregate(name_or_id, ignore_missing=True)
def create_aggregate(self, name, availability_zone=None):
"""Create a new host aggregate.
@ -1599,8 +1696,7 @@ class ComputeCloudMixin:
:raises: OpenStackCloudException on operation error.
"""
return self.compute.create_aggregate(
name=name,
availability_zone=availability_zone
name=name, availability_zone=availability_zone
)
@_utils.valid_kwargs('name', 'availability_zone')
@ -1623,14 +1719,12 @@ class ComputeCloudMixin:
:returns: True if delete succeeded, False otherwise.
:raises: OpenStackCloudException on operation error.
"""
if (
isinstance(name_or_id, (str, bytes))
and not name_or_id.isdigit()
):
if isinstance(name_or_id, (str, bytes)) and not name_or_id.isdigit():
aggregate = self.get_aggregate(name_or_id)
if not aggregate:
self.log.debug(
"Aggregate %s not found for deleting", name_or_id)
"Aggregate %s not found for deleting", name_or_id
)
return False
name_or_id = aggregate.id
try:
@ -1654,7 +1748,8 @@ class ComputeCloudMixin:
aggregate = self.get_aggregate(name_or_id)
if not aggregate:
raise exc.OpenStackCloudException(
"Host aggregate %s not found." % name_or_id)
"Host aggregate %s not found." % name_or_id
)
return self.compute.set_aggregate_metadata(aggregate, metadata)
@ -1669,7 +1764,8 @@ class ComputeCloudMixin:
aggregate = self.get_aggregate(name_or_id)
if not aggregate:
raise exc.OpenStackCloudException(
"Host aggregate %s not found." % name_or_id)
"Host aggregate %s not found." % name_or_id
)
return self.compute.add_host_to_aggregate(aggregate, host_name)
@ -1684,12 +1780,13 @@ class ComputeCloudMixin:
aggregate = self.get_aggregate(name_or_id)
if not aggregate:
raise exc.OpenStackCloudException(
"Host aggregate %s not found." % name_or_id)
"Host aggregate %s not found." % name_or_id
)
return self.compute.remove_host_from_aggregate(aggregate, host_name)
def set_compute_quotas(self, name_or_id, **kwargs):
""" Set a quota in a project
"""Set a quota in a project
:param name_or_id: project name or id
:param kwargs: key/value pairs of quota name and quota value
@ -1697,39 +1794,35 @@ class ComputeCloudMixin:
:raises: OpenStackCloudException if the resource to set the
quota does not exist.
"""
proj = self.identity.find_project(
name_or_id, ignore_missing=False)
proj = self.identity.find_project(name_or_id, ignore_missing=False)
kwargs['force'] = True
self.compute.update_quota_set(
_qs.QuotaSet(project_id=proj.id),
**kwargs
_qs.QuotaSet(project_id=proj.id), **kwargs
)
def get_compute_quotas(self, name_or_id):
""" Get quota for a project
"""Get quota for a project
:param name_or_id: project name or id
:returns: A compute ``QuotaSet`` object if found, else None.
:raises: OpenStackCloudException if it's not a valid project
"""
proj = self.identity.find_project(
name_or_id, ignore_missing=False)
proj = self.identity.find_project(name_or_id, ignore_missing=False)
return self.compute.get_quota_set(proj)
def delete_compute_quotas(self, name_or_id):
""" Delete quota for a project
"""Delete quota for a project
:param name_or_id: project name or id
:raises: OpenStackCloudException if it's not a valid project or the
nova client call failed
:returns: None
"""
proj = self.identity.find_project(
name_or_id, ignore_missing=False)
proj = self.identity.find_project(name_or_id, ignore_missing=False)
self.compute.revert_quota_set(proj)
def get_compute_usage(self, name_or_id, start=None, end=None):
""" Get usage for a specific project
"""Get usage for a specific project
:param name_or_id: project name or id
:param start: :class:`datetime.datetime` or string. Start date in UTC
@ -1741,6 +1834,7 @@ class ComputeCloudMixin:
:returns: A :class:`~openstack.compute.v2.usage.Usage` object
"""
def parse_date(date):
try:
return iso8601.parse_date(date)
@ -1751,8 +1845,8 @@ class ComputeCloudMixin:
raise exc.OpenStackCloudException(
"Date given, {date}, is invalid. Please pass in a date"
" string in ISO 8601 format -"
" YYYY-MM-DDTHH:MM:SS".format(
date=date))
" YYYY-MM-DDTHH:MM:SS".format(date=date)
)
if isinstance(start, str):
start = parse_date(start)
@ -1762,7 +1856,8 @@ class ComputeCloudMixin:
proj = self.get_project(name_or_id)
if not proj:
raise exc.OpenStackCloudException(
"project does not exist: {name}".format(name=proj.id))
"project does not exist: {name}".format(name=proj.id)
)
return self.compute.get_usage(proj, start, end)
@ -1830,22 +1925,28 @@ class ComputeCloudMixin:
project_id = server.pop('project_id', project_id)
az = _pop_or_get(
server, 'OS-EXT-AZ:availability_zone', None, self.strict_mode)
server, 'OS-EXT-AZ:availability_zone', None, self.strict_mode
)
# the server resource has this already, but it's missing az info
# from the resource.
# TODO(mordred) create_server is still normalizing servers that aren't
# from the resource layer.
ret['location'] = server.pop(
'location', self._get_current_location(
project_id=project_id, zone=az))
'location',
self._get_current_location(project_id=project_id, zone=az),
)
# Ensure volumes is always in the server dict, even if empty
ret['volumes'] = _pop_or_get(
server, 'os-extended-volumes:volumes_attached',
[], self.strict_mode)
server,
'os-extended-volumes:volumes_attached',
[],
self.strict_mode,
)
config_drive = server.pop(
'has_config_drive', server.pop('config_drive', False))
'has_config_drive', server.pop('config_drive', False)
)
ret['has_config_drive'] = _to_bool(config_drive)
host_id = server.pop('hostId', server.pop('host_id', None))
@ -1855,24 +1956,25 @@ class ComputeCloudMixin:
# Leave these in so that the general properties handling works
ret['disk_config'] = _pop_or_get(
server, 'OS-DCF:diskConfig', None, self.strict_mode)
server, 'OS-DCF:diskConfig', None, self.strict_mode
)
for key in (
'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state',
'OS-EXT-STS:vm_state',
'OS-SRV-USG:launched_at',
'OS-SRV-USG:terminated_at',
'OS-EXT-SRV-ATTR:hypervisor_hostname',
'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:user_data',
'OS-EXT-SRV-ATTR:host',
'OS-EXT-SRV-ATTR:hostname',
'OS-EXT-SRV-ATTR:kernel_id',
'OS-EXT-SRV-ATTR:launch_index',
'OS-EXT-SRV-ATTR:ramdisk_id',
'OS-EXT-SRV-ATTR:reservation_id',
'OS-EXT-SRV-ATTR:root_device_name',
'OS-SCH-HNT:scheduler_hints',
'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state',
'OS-EXT-STS:vm_state',
'OS-SRV-USG:launched_at',
'OS-SRV-USG:terminated_at',
'OS-EXT-SRV-ATTR:hypervisor_hostname',
'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:user_data',
'OS-EXT-SRV-ATTR:host',
'OS-EXT-SRV-ATTR:hostname',
'OS-EXT-SRV-ATTR:kernel_id',
'OS-EXT-SRV-ATTR:launch_index',
'OS-EXT-SRV-ATTR:ramdisk_id',
'OS-EXT-SRV-ATTR:reservation_id',
'OS-EXT-SRV-ATTR:root_device_name',
'OS-SCH-HNT:scheduler_hints',
):
short_key = key.split(':')[1]
ret[short_key] = _pop_or_get(server, key, None, self.strict_mode)

View File

@ -33,8 +33,7 @@ class DnsCloudMixin:
"""
if not filters:
filters = {}
return list(self.dns.zones(allow_unknown_params=True,
**filters))
return list(self.dns.zones(allow_unknown_params=True, **filters))
def get_zone(self, name_or_id, filters=None):
"""Get a zone by name or ID.
@ -49,7 +48,8 @@ class DnsCloudMixin:
if not filters:
filters = {}
zone = self.dns.find_zone(
name_or_id=name_or_id, ignore_missing=True, **filters)
name_or_id=name_or_id, ignore_missing=True, **filters
)
if not zone:
return None
return zone
@ -58,8 +58,15 @@ class DnsCloudMixin:
zones = self.list_zones(filters)
return _utils._filter_list(zones, name_or_id, filters)
def create_zone(self, name, zone_type=None, email=None, description=None,
ttl=None, masters=None):
def create_zone(
self,
name,
zone_type=None,
email=None,
description=None,
ttl=None,
masters=None,
):
"""Create a new zone.
:param name: Name of the zone being created.
@ -82,8 +89,9 @@ class DnsCloudMixin:
zone_type = zone_type.upper()
if zone_type not in ('PRIMARY', 'SECONDARY'):
raise exc.OpenStackCloudException(
"Invalid type %s, valid choices are PRIMARY or SECONDARY" %
zone_type)
"Invalid type %s, valid choices are PRIMARY or SECONDARY"
% zone_type
)
zone = {
"name": name,
@ -125,7 +133,8 @@ class DnsCloudMixin:
zone = self.get_zone(name_or_id)
if not zone:
raise exc.OpenStackCloudException(
"Zone %s not found." % name_or_id)
"Zone %s not found." % name_or_id
)
return self.dns.update_zone(zone['id'], **kwargs)
@ -162,8 +171,7 @@ class DnsCloudMixin:
else:
zone_obj = self.get_zone(zone)
if zone_obj is None:
raise exc.OpenStackCloudException(
"Zone %s not found." % zone)
raise exc.OpenStackCloudException("Zone %s not found." % zone)
return list(self.dns.recordsets(zone_obj))
def get_recordset(self, zone, name_or_id):
@ -182,11 +190,11 @@ class DnsCloudMixin:
else:
zone_obj = self.get_zone(zone)
if not zone_obj:
raise exc.OpenStackCloudException(
"Zone %s not found." % zone)
raise exc.OpenStackCloudException("Zone %s not found." % zone)
try:
return self.dns.find_recordset(
zone=zone_obj, name_or_id=name_or_id, ignore_missing=False)
zone=zone_obj, name_or_id=name_or_id, ignore_missing=False
)
except Exception:
return None
@ -194,8 +202,9 @@ class DnsCloudMixin:
recordsets = self.list_recordsets(zone=zone)
return _utils._filter_list(recordsets, name_or_id, filters)
def create_recordset(self, zone, name, recordset_type, records,
description=None, ttl=None):
def create_recordset(
self, zone, name, recordset_type, records, description=None, ttl=None
):
"""Create a recordset.
:param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance
@ -216,17 +225,12 @@ class DnsCloudMixin:
else:
zone_obj = self.get_zone(zone)
if not zone_obj:
raise exc.OpenStackCloudException(
"Zone %s not found." % zone)
raise exc.OpenStackCloudException("Zone %s not found." % zone)
# We capitalize the type in case the user sends in lowercase
recordset_type = recordset_type.upper()
body = {
'name': name,
'type': recordset_type,
'records': records
}
body = {'name': name, 'type': recordset_type, 'records': records}
if description:
body['description'] = description
@ -255,7 +259,8 @@ class DnsCloudMixin:
rs = self.get_recordset(zone, name_or_id)
if not rs:
raise exc.OpenStackCloudException(
"Recordset %s not found." % name_or_id)
"Recordset %s not found." % name_or_id
)
rs = self.dns.update_recordset(recordset=rs, **kwargs)

View File

@ -14,7 +14,6 @@
# We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list
import ipaddress
# import jsonpatch
import threading
import time
import types # noqa
@ -30,7 +29,8 @@ from openstack import utils
_CONFIG_DOC_URL = (
"https://docs.openstack.org/openstacksdk/latest/"
"user/config/configuration.html")
"user/config/configuration.html"
)
class FloatingIPCloudMixin:
@ -39,8 +39,7 @@ class FloatingIPCloudMixin:
def __init__(self):
self.private = self.config.config.get('private', False)
self._floating_ip_source = self.config.config.get(
'floating_ip_source')
self._floating_ip_source = self.config.config.get('floating_ip_source')
if self._floating_ip_source:
if self._floating_ip_source.lower() == 'none':
self._floating_ip_source = None
@ -68,7 +67,8 @@ class FloatingIPCloudMixin:
# understand, obviously.
warnings.warn(
"search_floating_ips is deprecated. "
"Use search_resource instead.")
"Use search_resource instead."
)
if self._use_neutron_floating() and isinstance(filters, dict):
return list(self.network.ips(**filters))
else:
@ -83,8 +83,7 @@ class FloatingIPCloudMixin:
def _nova_list_floating_ips(self):
try:
data = proxy._json_response(
self.compute.get('/os-floating-ips'))
data = proxy._json_response(self.compute.get('/os-floating-ips'))
except exc.OpenStackCloudURINotFound:
return []
return self._get_and_munchify('floating_ips', data)
@ -137,10 +136,11 @@ class FloatingIPCloudMixin:
" using clouds.yaml to configure settings for your"
" cloud(s), and you want to configure this setting,"
" you will need a clouds.yaml file. For more"
" information, please see %(doc_url)s", {
" information, please see %(doc_url)s",
{
'cloud': self.name,
'doc_url': _CONFIG_DOC_URL,
}
},
)
# We can't fallback to nova because we push-down filters.
# We got a 404 which means neutron doesn't exist. If the
@ -148,7 +148,9 @@ class FloatingIPCloudMixin:
return []
self.log.debug(
"Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)})
"'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova
else:
if filters:
@ -174,11 +176,13 @@ class FloatingIPCloudMixin:
"""
if not self._has_nova_extension('os-floating-ip-pools'):
raise exc.OpenStackCloudUnavailableExtension(
'Floating IP pools extension is not available on target cloud')
'Floating IP pools extension is not available on target cloud'
)
data = proxy._json_response(
self.compute.get('os-floating-ip-pools'),
error_message="Error fetching floating IP pool list")
error_message="Error fetching floating IP pool list",
)
pools = self._get_and_munchify('floating_ip_pools', data)
return [{'name': p['name']} for p in pools]
@ -217,7 +221,7 @@ class FloatingIPCloudMixin:
return _utils._filter_list(self._floating_ips, None, filters)
def get_floating_ip_by_id(self, id):
""" Get a floating ip by ID
"""Get a floating ip by ID
:param id: ID of the floating ip.
:returns: A floating ip
@ -231,12 +235,15 @@ class FloatingIPCloudMixin:
else:
data = proxy._json_response(
self.compute.get('/os-floating-ips/{id}'.format(id=id)),
error_message=error_message)
error_message=error_message,
)
return self._normalize_floating_ip(
self._get_and_munchify('floating_ip', data))
self._get_and_munchify('floating_ip', data)
)
def _neutron_available_floating_ips(
self, network=None, project_id=None, server=None):
self, network=None, project_id=None, server=None
):
"""Get a floating IP from a network.
Return a list of available floating IPs or allocate a new one and
@ -271,8 +278,7 @@ class FloatingIPCloudMixin:
if floating_network_id is None:
raise exc.OpenStackCloudResourceNotFound(
"unable to find external network {net}".format(
net=network)
"unable to find external network {net}".format(net=network)
)
else:
floating_network_id = self._get_floating_network_id()
@ -285,14 +291,16 @@ class FloatingIPCloudMixin:
floating_ips = self._list_floating_ips()
available_ips = _utils._filter_list(
floating_ips, name_or_id=None, filters=filters)
floating_ips, name_or_id=None, filters=filters
)
if available_ips:
return available_ips
# No available IP found or we didn't try
# allocate a new Floating IP
f_ip = self._neutron_create_floating_ip(
network_id=floating_network_id, server=server)
network_id=floating_network_id, server=server
)
return [f_ip]
@ -311,23 +319,22 @@ class FloatingIPCloudMixin:
"""
with _utils.shade_exceptions(
"Unable to create floating IP in pool {pool}".format(
pool=pool)):
"Unable to create floating IP in pool {pool}".format(pool=pool)
):
if pool is None:
pools = self.list_floating_ip_pools()
if not pools:
raise exc.OpenStackCloudResourceNotFound(
"unable to find a floating ip pool")
"unable to find a floating ip pool"
)
pool = pools[0]['name']
filters = {
'instance_id': None,
'pool': pool
}
filters = {'instance_id': None, 'pool': pool}
floating_ips = self._nova_list_floating_ips()
available_ips = _utils._filter_list(
floating_ips, name_or_id=None, filters=filters)
floating_ips, name_or_id=None, filters=filters
)
if available_ips:
return available_ips
@ -341,7 +348,8 @@ class FloatingIPCloudMixin:
"""Find the network providing floating ips by looking at routers."""
if self._floating_network_by_router_lock.acquire(
not self._floating_network_by_router_run):
not self._floating_network_by_router_run
):
if self._floating_network_by_router_run:
self._floating_network_by_router_lock.release()
return self._floating_network_by_router
@ -349,7 +357,8 @@ class FloatingIPCloudMixin:
for router in self.list_routers():
if router['admin_state_up']:
network_id = router.get(
'external_gateway_info', {}).get('network_id')
'external_gateway_info', {}
).get('network_id')
if network_id:
self._floating_network_by_router = network_id
finally:
@ -371,12 +380,15 @@ class FloatingIPCloudMixin:
if self._use_neutron_floating():
try:
f_ips = self._neutron_available_floating_ips(
network=network, server=server)
network=network, server=server
)
return f_ips[0]
except exc.OpenStackCloudURINotFound as e:
self.log.debug(
"Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)})
"'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova
f_ips = self._normalize_floating_ips(
@ -395,12 +407,20 @@ class FloatingIPCloudMixin:
floating_network_id = floating_network
else:
raise exc.OpenStackCloudResourceNotFound(
"unable to find an external network")
"unable to find an external network"
)
return floating_network_id
def create_floating_ip(self, network=None, server=None,
fixed_address=None, nat_destination=None,
port=None, wait=False, timeout=60):
def create_floating_ip(
self,
network=None,
server=None,
fixed_address=None,
nat_destination=None,
port=None,
wait=False,
timeout=60,
):
"""Allocate a new floating IP from a network or a pool.
:param network: Name or ID of the network
@ -430,15 +450,20 @@ class FloatingIPCloudMixin:
if self._use_neutron_floating():
try:
return self._neutron_create_floating_ip(
network_name_or_id=network, server=server,
network_name_or_id=network,
server=server,
fixed_address=fixed_address,
nat_destination=nat_destination,
port=port,
wait=wait, timeout=timeout)
wait=wait,
timeout=timeout,
)
except exc.OpenStackCloudURINotFound as e:
self.log.debug(
"Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)})
"'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova
if port:
@ -447,10 +472,12 @@ class FloatingIPCloudMixin:
" arbitrary floating-ip/port mappings. Please nudge"
" your cloud provider to upgrade the networking stack"
" to neutron, or alternately provide the server,"
" fixed_address and nat_destination arguments as appropriate")
" fixed_address and nat_destination arguments as appropriate"
)
# Else, we are using Nova network
f_ips = self._normalize_floating_ips(
[self._nova_create_floating_ip(pool=network)])
[self._nova_create_floating_ip(pool=network)]
)
return f_ips[0]
def _submit_create_fip(self, kwargs):
@ -458,10 +485,16 @@ class FloatingIPCloudMixin:
return self.network.create_ip(**kwargs)
def _neutron_create_floating_ip(
self, network_name_or_id=None, server=None,
fixed_address=None, nat_destination=None,
port=None,
wait=False, timeout=60, network_id=None):
self,
network_name_or_id=None,
server=None,
fixed_address=None,
nat_destination=None,
port=None,
wait=False,
timeout=60,
network_id=None,
):
if not network_id:
if network_name_or_id:
@ -470,7 +503,8 @@ class FloatingIPCloudMixin:
except exceptions.ResourceNotFound:
raise exc.OpenStackCloudResourceNotFound(
"unable to find network for floating ips with ID "
"{0}".format(network_name_or_id))
"{0}".format(network_name_or_id)
)
network_id = network['id']
else:
network_id = self._get_floating_network_id()
@ -480,8 +514,10 @@ class FloatingIPCloudMixin:
if not port:
if server:
(port_obj, fixed_ip_address) = self._nat_destination_port(
server, fixed_address=fixed_address,
nat_destination=nat_destination)
server,
fixed_address=fixed_address,
nat_destination=nat_destination,
)
if port_obj:
port = port_obj['id']
if fixed_ip_address:
@ -499,57 +535,68 @@ class FloatingIPCloudMixin:
if wait:
try:
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for the floating IP"
" to be ACTIVE",
wait=self._FLOAT_AGE):
timeout,
"Timeout waiting for the floating IP" " to be ACTIVE",
wait=self._FLOAT_AGE,
):
fip = self.get_floating_ip(fip_id)
if fip and fip['status'] == 'ACTIVE':
break
except exc.OpenStackCloudTimeout:
self.log.error(
"Timed out on floating ip %(fip)s becoming active."
" Deleting", {'fip': fip_id})
" Deleting",
{'fip': fip_id},
)
try:
self.delete_floating_ip(fip_id)
except Exception as e:
self.log.error(
"FIP LEAK: Attempted to delete floating ip "
"%(fip)s but received %(exc)s exception: "
"%(err)s", {'fip': fip_id, 'exc': e.__class__,
'err': str(e)})
"%(err)s",
{'fip': fip_id, 'exc': e.__class__, 'err': str(e)},
)
raise
if fip['port_id'] != port:
if server:
raise exc.OpenStackCloudException(
"Attempted to create FIP on port {port} for server"
" {server} but FIP has port {port_id}".format(
port=port, port_id=fip['port_id'],
server=server['id']))
port=port,
port_id=fip['port_id'],
server=server['id'],
)
)
else:
raise exc.OpenStackCloudException(
"Attempted to create FIP on port {port}"
" but something went wrong".format(port=port))
" but something went wrong".format(port=port)
)
return fip
def _nova_create_floating_ip(self, pool=None):
with _utils.shade_exceptions(
"Unable to create floating IP in pool {pool}".format(
pool=pool)):
"Unable to create floating IP in pool {pool}".format(pool=pool)
):
if pool is None:
pools = self.list_floating_ip_pools()
if not pools:
raise exc.OpenStackCloudResourceNotFound(
"unable to find a floating ip pool")
"unable to find a floating ip pool"
)
pool = pools[0]['name']
data = proxy._json_response(self.compute.post(
'/os-floating-ips', json=dict(pool=pool)))
data = proxy._json_response(
self.compute.post('/os-floating-ips', json=dict(pool=pool))
)
pool_ip = self._get_and_munchify('floating_ip', data)
# TODO(mordred) Remove this - it's just for compat
data = proxy._json_response(
self.compute.get('/os-floating-ips/{id}'.format(
id=pool_ip['id'])))
self.compute.get(
'/os-floating-ips/{id}'.format(id=pool_ip['id'])
)
)
return self._get_and_munchify('floating_ip', data)
def delete_floating_ip(self, floating_ip_id, retry=1):
@ -589,8 +636,11 @@ class FloatingIPCloudMixin:
" {retry} times. Although the cloud did not indicate any errors"
" the floating ip is still in existence. Aborting further"
" operations.".format(
id=floating_ip_id, ip=f_ip['floating_ip_address'],
retry=retry + 1))
id=floating_ip_id,
ip=f_ip['floating_ip_address'],
retry=retry + 1,
)
)
def _delete_floating_ip(self, floating_ip_id):
if self._use_neutron_floating():
@ -599,14 +649,14 @@ class FloatingIPCloudMixin:
except exc.OpenStackCloudURINotFound as e:
self.log.debug(
"Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)})
"'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
return self._nova_delete_floating_ip(floating_ip_id)
def _neutron_delete_floating_ip(self, floating_ip_id):
try:
self.network.delete_ip(
floating_ip_id, ignore_missing=False
)
self.network.delete_ip(floating_ip_id, ignore_missing=False)
except exceptions.ResourceNotFound:
return False
return True
@ -615,9 +665,12 @@ class FloatingIPCloudMixin:
try:
proxy._json_response(
self.compute.delete(
'/os-floating-ips/{id}'.format(id=floating_ip_id)),
'/os-floating-ips/{id}'.format(id=floating_ip_id)
),
error_message='Unable to delete floating IP {fip_id}'.format(
fip_id=floating_ip_id))
fip_id=floating_ip_id
),
)
except exc.OpenStackCloudURINotFound:
return False
return True
@ -648,14 +701,23 @@ class FloatingIPCloudMixin:
if self._use_neutron_floating():
for ip in self.list_floating_ips():
if not bool(ip.port_id):
processed.append(self.delete_floating_ip(
floating_ip_id=ip['id'], retry=retry))
processed.append(
self.delete_floating_ip(
floating_ip_id=ip['id'], retry=retry
)
)
return len(processed) if all(processed) else False
def _attach_ip_to_server(
self, server, floating_ip,
fixed_address=None, wait=False,
timeout=60, skip_attach=False, nat_destination=None):
self,
server,
floating_ip,
fixed_address=None,
wait=False,
timeout=60,
skip_attach=False,
nat_destination=None,
):
"""Attach a floating IP to a server.
:param server: Server dict
@ -685,8 +747,9 @@ class FloatingIPCloudMixin:
# the server data and try again. There are some clouds, which
# explicitely forbids FIP assign call if it is already assigned.
server = self.get_server_by_id(server['id'])
ext_ip = meta.get_server_ip(server, ext_tag='floating',
public=True)
ext_ip = meta.get_server_ip(
server, ext_tag='floating', public=True
)
if ext_ip == floating_ip['floating_ip_address']:
return server
@ -694,74 +757,84 @@ class FloatingIPCloudMixin:
if not skip_attach:
try:
self._neutron_attach_ip_to_server(
server=server, floating_ip=floating_ip,
server=server,
floating_ip=floating_ip,
fixed_address=fixed_address,
nat_destination=nat_destination)
nat_destination=nat_destination,
)
except exc.OpenStackCloudURINotFound as e:
self.log.debug(
"Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)})
"'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova
else:
# Nova network
self._nova_attach_ip_to_server(
server_id=server['id'], floating_ip_id=floating_ip['id'],
fixed_address=fixed_address)
server_id=server['id'],
floating_ip_id=floating_ip['id'],
fixed_address=fixed_address,
)
if wait:
# Wait for the address to be assigned to the server
server_id = server['id']
for _ in utils.iterate_timeout(
timeout,
"Timeout waiting for the floating IP to be attached.",
wait=self._SERVER_AGE):
timeout,
"Timeout waiting for the floating IP to be attached.",
wait=self._SERVER_AGE,
):
server = self.get_server_by_id(server_id)
ext_ip = meta.get_server_ip(
server, ext_tag='floating', public=True)
server, ext_tag='floating', public=True
)
if ext_ip == floating_ip['floating_ip_address']:
return server
return server
def _neutron_attach_ip_to_server(
self, server, floating_ip, fixed_address=None,
nat_destination=None):
self, server, floating_ip, fixed_address=None, nat_destination=None
):
# Find an available port
(port, fixed_address) = self._nat_destination_port(
server, fixed_address=fixed_address,
nat_destination=nat_destination)
server,
fixed_address=fixed_address,
nat_destination=nat_destination,
)
if not port:
raise exc.OpenStackCloudException(
"unable to find a port for server {0}".format(
server['id']))
"unable to find a port for server {0}".format(server['id'])
)
floating_ip_args = {'port_id': port['id']}
if fixed_address is not None:
floating_ip_args['fixed_ip_address'] = fixed_address
return self.network.update_ip(
floating_ip,
**floating_ip_args)
return self.network.update_ip(floating_ip, **floating_ip_args)
def _nova_attach_ip_to_server(self, server_id, floating_ip_id,
fixed_address=None):
f_ip = self.get_floating_ip(
id=floating_ip_id)
def _nova_attach_ip_to_server(
self, server_id, floating_ip_id, fixed_address=None
):
f_ip = self.get_floating_ip(id=floating_ip_id)
if f_ip is None:
raise exc.OpenStackCloudException(
"unable to find floating IP {0}".format(floating_ip_id))
"unable to find floating IP {0}".format(floating_ip_id)
)
error_message = "Error attaching IP {ip} to instance {id}".format(
ip=floating_ip_id, id=server_id)
body = {
'address': f_ip['floating_ip_address']
}
ip=floating_ip_id, id=server_id
)
body = {'address': f_ip['floating_ip_address']}
if fixed_address:
body['fixed_address'] = fixed_address
return proxy._json_response(
self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id),
json=dict(addFloatingIp=body)),
error_message=error_message)
json=dict(addFloatingIp=body),
),
error_message=error_message,
)
def detach_ip_from_server(self, server_id, floating_ip_id):
"""Detach a floating IP from a server.
@ -777,31 +850,36 @@ class FloatingIPCloudMixin:
if self._use_neutron_floating():
try:
return self._neutron_detach_ip_from_server(
server_id=server_id, floating_ip_id=floating_ip_id)
server_id=server_id, floating_ip_id=floating_ip_id
)
except exc.OpenStackCloudURINotFound as e:
self.log.debug(
"Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)})
"'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova
# Nova network
self._nova_detach_ip_from_server(
server_id=server_id, floating_ip_id=floating_ip_id)
server_id=server_id, floating_ip_id=floating_ip_id
)
def _neutron_detach_ip_from_server(self, server_id, floating_ip_id):
f_ip = self.get_floating_ip(id=floating_ip_id)
if f_ip is None or not bool(f_ip.port_id):
return False
try:
self.network.update_ip(
floating_ip_id,
port_id=None
)
self.network.update_ip(floating_ip_id, port_id=None)
except exceptions.SDKException:
raise exceptions.SDKException(
("Error detaching IP {ip} from "
"server {server_id}".format(
ip=floating_ip_id, server_id=server_id)))
(
"Error detaching IP {ip} from "
"server {server_id}".format(
ip=floating_ip_id, server_id=server_id
)
)
)
return True
@ -810,21 +888,33 @@ class FloatingIPCloudMixin:
f_ip = self.get_floating_ip(id=floating_ip_id)
if f_ip is None:
raise exc.OpenStackCloudException(
"unable to find floating IP {0}".format(floating_ip_id))
"unable to find floating IP {0}".format(floating_ip_id)
)
error_message = "Error detaching IP {ip} from instance {id}".format(
ip=floating_ip_id, id=server_id)
ip=floating_ip_id, id=server_id
)
return proxy._json_response(
self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id),
json=dict(removeFloatingIp=dict(
address=f_ip['floating_ip_address']))),
error_message=error_message)
json=dict(
removeFloatingIp=dict(address=f_ip['floating_ip_address'])
),
),
error_message=error_message,
)
return True
def _add_ip_from_pool(
self, server, network, fixed_address=None, reuse=True,
wait=False, timeout=60, nat_destination=None):
self,
server,
network,
fixed_address=None,
reuse=True,
wait=False,
timeout=60,
nat_destination=None,
):
"""Add a floating IP to a server from a given pool
This method reuses available IPs, when possible, or allocate new IPs
@ -851,9 +941,12 @@ class FloatingIPCloudMixin:
start_time = time.time()
f_ip = self.create_floating_ip(
server=server,
network=network, nat_destination=nat_destination,
network=network,
nat_destination=nat_destination,
fixed_address=fixed_address,
wait=wait, timeout=timeout)
wait=wait,
timeout=timeout,
)
timeout = timeout - (time.time() - start_time)
# Wait for cache invalidation time so that we don't try
# to attach the FIP a second time below
@ -866,12 +959,23 @@ class FloatingIPCloudMixin:
# the attach function below to get back the server dict refreshed
# with the FIP information.
return self._attach_ip_to_server(
server=server, floating_ip=f_ip, fixed_address=fixed_address,
wait=wait, timeout=timeout, nat_destination=nat_destination)
server=server,
floating_ip=f_ip,
fixed_address=fixed_address,
wait=wait,
timeout=timeout,
nat_destination=nat_destination,
)
def add_ip_list(
self, server, ips, wait=False, timeout=60,
fixed_address=None, nat_destination=None):
self,
server,
ips,
wait=False,
timeout=60,
fixed_address=None,
nat_destination=None,
):
"""Attach a list of IPs to a server.
:param server: a server object
@ -896,10 +1000,16 @@ class FloatingIPCloudMixin:
for ip in ips:
f_ip = self.get_floating_ip(
id=None, filters={'floating_ip_address': ip})
id=None, filters={'floating_ip_address': ip}
)
server = self._attach_ip_to_server(
server=server, floating_ip=f_ip, wait=wait, timeout=timeout,
fixed_address=fixed_address, nat_destination=nat_destination)
server=server,
floating_ip=f_ip,
wait=wait,
timeout=timeout,
fixed_address=fixed_address,
nat_destination=nat_destination,
)
return server
def add_auto_ip(self, server, wait=False, timeout=60, reuse=True):
@ -925,7 +1035,8 @@ class FloatingIPCloudMixin:
"""
server = self._add_auto_ip(
server, wait=wait, timeout=timeout, reuse=reuse)
server, wait=wait, timeout=timeout, reuse=reuse
)
return server['interface_ip'] or None
def _add_auto_ip(self, server, wait=False, timeout=60, reuse=True):
@ -936,7 +1047,8 @@ class FloatingIPCloudMixin:
else:
start_time = time.time()
f_ip = self.create_floating_ip(
server=server, wait=wait, timeout=timeout)
server=server, wait=wait, timeout=timeout
)
timeout = timeout - (time.time() - start_time)
if server:
# This gets passed in for both nova and neutron
@ -951,8 +1063,12 @@ class FloatingIPCloudMixin:
# the attach function below to get back the server dict refreshed
# with the FIP information.
return self._attach_ip_to_server(
server=server, floating_ip=f_ip, wait=wait, timeout=timeout,
skip_attach=skip_attach)
server=server,
floating_ip=f_ip,
wait=wait,
timeout=timeout,
skip_attach=skip_attach,
)
except exc.OpenStackCloudTimeout:
if self._use_neutron_floating() and created:
# We are here because we created an IP on the port
@ -962,36 +1078,60 @@ class FloatingIPCloudMixin:
"Timeout waiting for floating IP to become"
" active. Floating IP %(ip)s:%(id)s was created for"
" server %(server)s but is being deleted due to"
" activation failure.", {
" activation failure.",
{
'ip': f_ip['floating_ip_address'],
'id': f_ip['id'],
'server': server['id']})
'server': server['id'],
},
)
try:
self.delete_floating_ip(f_ip['id'])
except Exception as e:
self.log.error(
"FIP LEAK: Attempted to delete floating ip "
"%(fip)s but received %(exc)s exception: %(err)s",
{'fip': f_ip['id'], 'exc': e.__class__, 'err': str(e)})
{'fip': f_ip['id'], 'exc': e.__class__, 'err': str(e)},
)
raise e
raise
def add_ips_to_server(
self, server, auto_ip=True, ips=None, ip_pool=None,
wait=False, timeout=60, reuse=True, fixed_address=None,
nat_destination=None):
self,
server,
auto_ip=True,
ips=None,
ip_pool=None,
wait=False,
timeout=60,
reuse=True,
fixed_address=None,
nat_destination=None,
):
if ip_pool:
server = self._add_ip_from_pool(
server, ip_pool, reuse=reuse, wait=wait, timeout=timeout,
fixed_address=fixed_address, nat_destination=nat_destination)
server,
ip_pool,
reuse=reuse,
wait=wait,
timeout=timeout,
fixed_address=fixed_address,
nat_destination=nat_destination,
)
elif ips:
server = self.add_ip_list(
server, ips, wait=wait, timeout=timeout,
fixed_address=fixed_address, nat_destination=nat_destination)
server,
ips,
wait=wait,
timeout=timeout,
fixed_address=fixed_address,
nat_destination=nat_destination,
)
elif auto_ip:
if self._needs_floating_ip(server, nat_destination):
server = self._add_auto_ip(
server, wait=wait, timeout=timeout, reuse=reuse)
server, wait=wait, timeout=timeout, reuse=reuse
)
return server
def _needs_floating_ip(self, server, nat_destination):
@ -1026,18 +1166,30 @@ class FloatingIPCloudMixin:
# meta.add_server_interfaces() was not called
server = self.compute.get_server(server)
if server['public_v4'] \
or any([any([address['OS-EXT-IPS:type'] == 'floating'
for address in addresses])
for addresses
in (server['addresses'] or {}).values()]):
if server['public_v4'] or any(
[
any(
[
address['OS-EXT-IPS:type'] == 'floating'
for address in addresses
]
)
for addresses in (server['addresses'] or {}).values()
]
):
return False
if not server['private_v4'] \
and not any([any([address['OS-EXT-IPS:type'] == 'fixed'
for address in addresses])
for addresses
in (server['addresses'] or {}).values()]):
if not server['private_v4'] and not any(
[
any(
[
address['OS-EXT-IPS:type'] == 'fixed'
for address in addresses
]
)
for addresses in (server['addresses'] or {}).values()
]
):
return False
if self.private:
@ -1053,7 +1205,8 @@ class FloatingIPCloudMixin:
return False
(port_obj, fixed_ip_address) = self._nat_destination_port(
server, nat_destination=nat_destination)
server, nat_destination=nat_destination
)
if not port_obj or not fixed_ip_address:
return False
@ -1061,7 +1214,8 @@ class FloatingIPCloudMixin:
return True
def _nat_destination_port(
self, server, fixed_address=None, nat_destination=None):
self, server, fixed_address=None, nat_destination=None
):
"""Returns server port that is on a nat_destination network
Find a port attached to the server which is on a network which
@ -1082,9 +1236,10 @@ class FloatingIPCloudMixin:
else:
timeout = None
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for port to show up in list",
wait=self._PORT_AGE):
timeout,
"Timeout waiting for port to show up in list",
wait=self._PORT_AGE,
):
try:
port_filter = {'device_id': server['id']}
ports = self.search_ports(filters=port_filter)
@ -1103,7 +1258,9 @@ class FloatingIPCloudMixin:
'NAT Destination {nat_destination} was configured'
' but not found on the cloud. Please check your'
' config and your cloud and try again.'.format(
nat_destination=nat_destination))
nat_destination=nat_destination
)
)
else:
nat_network = self.get_nat_destination()
@ -1118,7 +1275,8 @@ class FloatingIPCloudMixin:
' nat_destination property of the networks list in'
' your clouds.yaml file. If you do not have a'
' clouds.yaml file, please make one - your setup'
' is complicated.'.format(server=server['id']))
' is complicated.'.format(server=server['id'])
)
maybe_ports = []
for maybe_port in ports:
@ -1129,7 +1287,9 @@ class FloatingIPCloudMixin:
'No port on server {server} was found matching'
' your NAT destination network {dest}. Please '
' check your config'.format(
server=server['id'], dest=nat_network['name']))
server=server['id'], dest=nat_network['name']
)
)
ports = maybe_ports
# Select the most recent available IPv4 address
@ -1139,9 +1299,8 @@ class FloatingIPCloudMixin:
# if there are more than one, will be the arbitrary port we
# select.
for port in sorted(
ports,
key=lambda p: p.get('created_at', 0),
reverse=True):
ports, key=lambda p: p.get('created_at', 0), reverse=True
):
for address in port.get('fixed_ips', list()):
try:
ip = ipaddress.ip_address(address['ip_address'])
@ -1152,7 +1311,8 @@ class FloatingIPCloudMixin:
return port, fixed_address
raise exc.OpenStackCloudException(
"unable to find a free fixed IPv4 address for server "
"{0}".format(server['id']))
"{0}".format(server['id'])
)
# unfortunately a port can have more than one fixed IP:
# we can't use the search_ports filtering for fixed_address as
# they are contained in a list. e.g.
@ -1178,8 +1338,10 @@ class FloatingIPCloudMixin:
return self._floating_ip_source in ('nova', 'neutron')
def _use_neutron_floating(self):
return (self.has_service('network')
and self._floating_ip_source == 'neutron')
return (
self.has_service('network')
and self._floating_ip_source == 'neutron'
)
def _normalize_floating_ips(self, ips):
"""Normalize the structure of floating IPs
@ -1210,16 +1372,13 @@ class FloatingIPCloudMixin:
]
"""
return [
self._normalize_floating_ip(ip) for ip in ips
]
return [self._normalize_floating_ip(ip) for ip in ips]
def _normalize_floating_ip(self, ip):
# Copy incoming floating ip because of shared dicts in unittests
# Only import munch when we really need it
location = self._get_current_location(
project_id=ip.get('owner'))
location = self._get_current_location(project_id=ip.get('owner'))
# This copy is to keep things from getting epically weird in tests
ip = ip.copy()
@ -1228,7 +1387,8 @@ class FloatingIPCloudMixin:
fixed_ip_address = ip.pop('fixed_ip_address', ip.pop('fixed_ip', None))
floating_ip_address = ip.pop('floating_ip_address', ip.pop('ip', None))
network_id = ip.pop(
'floating_network_id', ip.pop('network', ip.pop('pool', None)))
'floating_network_id', ip.pop('network', ip.pop('pool', None))
)
project_id = ip.pop('tenant_id', '')
project_id = ip.pop('project_id', project_id)

View File

@ -28,7 +28,8 @@ class IdentityCloudMixin:
def _identity_client(self):
if 'identity' not in self._raw_clients:
self._raw_clients['identity'] = self._get_versioned_client(
'identity', min_version=2, max_version='3.latest')
'identity', min_version=2, max_version='3.latest'
)
return self._raw_clients['identity']
@_utils.cache_on_arguments()
@ -129,8 +130,9 @@ class IdentityCloudMixin:
:raises: ``OpenStackCloudException`` if something goes wrong during
the OpenStack API call.
"""
return _utils._get_entity(self, 'project', name_or_id, filters,
domain_id=domain_id)
return _utils._get_entity(
self, 'project', name_or_id, filters, domain_id=domain_id
)
def update_project(
self,
@ -178,7 +180,7 @@ class IdentityCloudMixin:
name=name,
description=description,
domain_id=domain_id,
is_enabled=enabled
is_enabled=enabled,
)
if kwargs:
attrs.update(kwargs)
@ -195,19 +197,19 @@ class IdentityCloudMixin:
"""
try:
project = self.identity.find_project(
name_or_id=name_or_id,
ignore_missing=True,
domain_id=domain_id
name_or_id=name_or_id, ignore_missing=True, domain_id=domain_id
)
if not project:
self.log.debug(
"Project %s not found for deleting", name_or_id)
self.log.debug("Project %s not found for deleting", name_or_id)
return False
self.identity.delete_project(project)
return True
except exceptions.SDKException:
self.log.exception("Error in deleting project {project}".format(
project=name_or_id))
self.log.exception(
"Error in deleting project {project}".format(
project=name_or_id
)
)
return False
@_utils.valid_kwargs('domain_id', 'name')
@ -299,8 +301,15 @@ class IdentityCloudMixin:
"""
return self.identity.get_user(user_id)
@_utils.valid_kwargs('name', 'email', 'enabled', 'domain_id', 'password',
'description', 'default_project')
@_utils.valid_kwargs(
'name',
'email',
'enabled',
'domain_id',
'password',
'description',
'default_project',
)
def update_user(self, name_or_id, **kwargs):
self.list_users.invalidate(self)
user_kwargs = {}
@ -351,7 +360,8 @@ class IdentityCloudMixin:
user = self.get_user(name_or_id, **kwargs)
if not user:
self.log.debug(
"User {0} not found for deleting".format(name_or_id))
"User {0} not found for deleting".format(name_or_id)
)
return False
self.identity.delete_user(user)
@ -359,21 +369,23 @@ class IdentityCloudMixin:
return True
except exceptions.SDKException:
self.log.exception("Error in deleting user {user}".format(
user=name_or_id
))
self.log.exception(
"Error in deleting user {user}".format(user=name_or_id)
)
return False
def _get_user_and_group(self, user_name_or_id, group_name_or_id):
user = self.get_user(user_name_or_id)
if not user:
raise exc.OpenStackCloudException(
'User {user} not found'.format(user=user_name_or_id))
'User {user} not found'.format(user=user_name_or_id)
)
group = self.get_group(group_name_or_id)
if not group:
raise exc.OpenStackCloudException(
'Group {user} not found'.format(user=group_name_or_id))
'Group {user} not found'.format(user=group_name_or_id)
)
return (user, group)
@ -438,8 +450,9 @@ class IdentityCloudMixin:
return self.identity.create_service(**kwargs)
@_utils.valid_kwargs('name', 'enabled', 'type', 'service_type',
'description')
@_utils.valid_kwargs(
'name', 'enabled', 'type', 'service_type', 'description'
)
def update_service(self, name_or_id, **kwargs):
# NOTE(SamYaple): Keystone v3 only accepts 'type' but shade accepts
@ -519,7 +532,8 @@ class IdentityCloudMixin:
return True
except exceptions.SDKException:
self.log.exception(
'Failed to delete service {id}'.format(id=service['id']))
'Failed to delete service {id}'.format(id=service['id'])
)
return False
@_utils.valid_kwargs('public_url', 'internal_url', 'admin_url')
@ -560,31 +574,42 @@ class IdentityCloudMixin:
if service is None:
raise exc.OpenStackCloudException(
"service {service} not found".format(
service=service_name_or_id))
service=service_name_or_id
)
)
endpoints_args = []
if url:
# v3 in use, v3-like arguments, one endpoint created
endpoints_args.append(
{'url': url, 'interface': interface,
'service_id': service['id'], 'enabled': enabled,
'region_id': region})
{
'url': url,
'interface': interface,
'service_id': service['id'],
'enabled': enabled,
'region_id': region,
}
)
else:
# v3 in use, v2.0-like arguments, one endpoint created for each
# interface url provided
endpoint_args = {'region_id': region, 'enabled': enabled,
'service_id': service['id']}
endpoint_args = {
'region_id': region,
'enabled': enabled,
'service_id': service['id'],
}
if public_url:
endpoint_args.update({'url': public_url,
'interface': 'public'})
endpoint_args.update(
{'url': public_url, 'interface': 'public'}
)
endpoints_args.append(endpoint_args.copy())
if internal_url:
endpoint_args.update({'url': internal_url,
'interface': 'internal'})
endpoint_args.update(
{'url': internal_url, 'interface': 'internal'}
)
endpoints_args.append(endpoint_args.copy())
if admin_url:
endpoint_args.update({'url': admin_url,
'interface': 'admin'})
endpoint_args.update({'url': admin_url, 'interface': 'admin'})
endpoints_args.append(endpoint_args.copy())
endpoints = []
@ -592,8 +617,9 @@ class IdentityCloudMixin:
endpoints.append(self.identity.create_endpoint(**args))
return endpoints
@_utils.valid_kwargs('enabled', 'service_name_or_id', 'url', 'interface',
'region')
@_utils.valid_kwargs(
'enabled', 'service_name_or_id', 'url', 'interface', 'region'
)
def update_endpoint(self, endpoint_id, **kwargs):
service_name_or_id = kwargs.pop('service_name_or_id', None)
if service_name_or_id is not None:
@ -670,8 +696,7 @@ class IdentityCloudMixin:
self.identity.delete_endpoint(id)
return True
except exceptions.SDKException:
self.log.exception(
"Failed to delete endpoint {id}".format(id=id))
self.log.exception("Failed to delete endpoint {id}".format(id=id))
return False
def create_domain(self, name, description=None, enabled=True):
@ -746,7 +771,8 @@ class IdentityCloudMixin:
dom = self.get_domain(name_or_id=name_or_id)
if dom is None:
self.log.debug(
"Domain %s not found for deleting", name_or_id)
"Domain %s not found for deleting", name_or_id
)
return False
domain_id = dom['id']
@ -963,8 +989,7 @@ class IdentityCloudMixin:
try:
group = self.identity.find_group(name_or_id)
if group is None:
self.log.debug(
"Group %s not found for deleting", name_or_id)
self.log.debug("Group %s not found for deleting", name_or_id)
return False
self.identity.delete_group(group)
@ -974,7 +999,8 @@ class IdentityCloudMixin:
except exceptions.SDKException:
self.log.exception(
"Unable to delete group {name}".format(name=name_or_id))
"Unable to delete group {name}".format(name=name_or_id)
)
return False
def list_roles(self, **kwargs):
@ -1051,8 +1077,9 @@ class IdentityCloudMixin:
filters['scope.' + k + '.id'] = filters[k]
del filters[k]
if 'os_inherit_extension_inherited_to' in filters:
filters['scope.OS-INHERIT:inherited_to'] = (
filters['os_inherit_extension_inherited_to'])
filters['scope.OS-INHERIT:inherited_to'] = filters[
'os_inherit_extension_inherited_to'
]
del filters['os_inherit_extension_inherited_to']
return list(self.identity.role_assignments(**filters))
@ -1138,8 +1165,7 @@ class IdentityCloudMixin:
"""
role = self.get_role(name_or_id, **kwargs)
if role is None:
self.log.debug(
"Role %s not found for updating", name_or_id)
self.log.debug("Role %s not found for updating", name_or_id)
return False
return self.identity.update_role(role, name=name, **kwargs)
@ -1156,8 +1182,7 @@ class IdentityCloudMixin:
"""
role = self.get_role(name_or_id, **kwargs)
if role is None:
self.log.debug(
"Role %s not found for deleting", name_or_id)
self.log.debug("Role %s not found for deleting", name_or_id)
return False
try:
@ -1165,17 +1190,25 @@ class IdentityCloudMixin:
return True
except exceptions.SDKExceptions:
self.log.exception(
"Unable to delete role {name}".format(
name=name_or_id))
"Unable to delete role {name}".format(name=name_or_id)
)
raise
def _get_grant_revoke_params(self, role, user=None, group=None,
project=None, domain=None, system=None):
def _get_grant_revoke_params(
self,
role,
user=None,
group=None,
project=None,
domain=None,
system=None,
):
data = {}
search_args = {}
if domain:
data['domain'] = self.identity.find_domain(
domain, ignore_missing=False)
domain, ignore_missing=False
)
# We have domain. We should use it for further searching user,
# group, role, project
search_args['domain_id'] = data['domain'].id
@ -1183,33 +1216,47 @@ class IdentityCloudMixin:
data['role'] = self.identity.find_role(name_or_id=role)
if not data['role']:
raise exc.OpenStackCloudException(
'Role {0} not found.'.format(role))
'Role {0} not found.'.format(role)
)
if user:
# use cloud.get_user to save us from bad searching by name
data['user'] = self.get_user(user, filters=search_args)
if group:
data['group'] = self.identity.find_group(
group, ignore_missing=False, **search_args)
group, ignore_missing=False, **search_args
)
if data.get('user') and data.get('group'):
raise exc.OpenStackCloudException(
'Specify either a group or a user, not both')
'Specify either a group or a user, not both'
)
if data.get('user') is None and data.get('group') is None:
raise exc.OpenStackCloudException(
'Must specify either a user or a group')
'Must specify either a user or a group'
)
if project is None and domain is None and system is None:
raise exc.OpenStackCloudException(
'Must specify either a domain, project or system')
'Must specify either a domain, project or system'
)
if project:
data['project'] = self.identity.find_project(
project, ignore_missing=False, **search_args)
project, ignore_missing=False, **search_args
)
return data
def grant_role(self, name_or_id, user=None, group=None,
project=None, domain=None, system=None, wait=False,
timeout=60):
def grant_role(
self,
name_or_id,
user=None,
group=None,
project=None,
domain=None,
system=None,
wait=False,
timeout=60,
):
"""Grant a role to a user.
:param string name_or_id: Name or unique ID of the role.
@ -1236,8 +1283,13 @@ class IdentityCloudMixin:
:raise OpenStackCloudException: if the role cannot be granted
"""
data = self._get_grant_revoke_params(
name_or_id, user=user, group=group,
project=project, domain=domain, system=system)
name_or_id,
user=user,
group=group,
project=project,
domain=domain,
system=system,
)
user = data.get('user')
group = data.get('group')
@ -1249,63 +1301,73 @@ class IdentityCloudMixin:
# Proceed with project - precedence over domain and system
if user:
has_role = self.identity.validate_user_has_project_role(
project, user, role)
project, user, role
)
if has_role:
self.log.debug('Assignment already exists')
return False
self.identity.assign_project_role_to_user(
project, user, role)
self.identity.assign_project_role_to_user(project, user, role)
else:
has_role = self.identity.validate_group_has_project_role(
project, group, role)
project, group, role
)
if has_role:
self.log.debug('Assignment already exists')
return False
self.identity.assign_project_role_to_group(
project, group, role)
project, group, role
)
elif domain:
# Proceed with domain - precedence over system
if user:
has_role = self.identity.validate_user_has_domain_role(
domain, user, role)
domain, user, role
)
if has_role:
self.log.debug('Assignment already exists')
return False
self.identity.assign_domain_role_to_user(
domain, user, role)
self.identity.assign_domain_role_to_user(domain, user, role)
else:
has_role = self.identity.validate_group_has_domain_role(
domain, group, role)
domain, group, role
)
if has_role:
self.log.debug('Assignment already exists')
return False
self.identity.assign_domain_role_to_group(
domain, group, role)
self.identity.assign_domain_role_to_group(domain, group, role)
else:
# Proceed with system
# System name must be 'all' due to checks performed in
# _get_grant_revoke_params
if user:
has_role = self.identity.validate_user_has_system_role(
user, role, system)
user, role, system
)
if has_role:
self.log.debug('Assignment already exists')
return False
self.identity.assign_system_role_to_user(
user, role, system)
self.identity.assign_system_role_to_user(user, role, system)
else:
has_role = self.identity.validate_group_has_system_role(
group, role, system)
group, role, system
)
if has_role:
self.log.debug('Assignment already exists')
return False
self.identity.assign_system_role_to_group(
group, role, system)
self.identity.assign_system_role_to_group(group, role, system)
return True
def revoke_role(self, name_or_id, user=None, group=None,
project=None, domain=None, system=None,
wait=False, timeout=60):
def revoke_role(
self,
name_or_id,
user=None,
group=None,
project=None,
domain=None,
system=None,
wait=False,
timeout=60,
):
"""Revoke a role from a user.
:param string name_or_id: Name or unique ID of the role.
@ -1329,8 +1391,13 @@ class IdentityCloudMixin:
:raise OpenStackCloudException: if the role cannot be removed
"""
data = self._get_grant_revoke_params(
name_or_id, user=user, group=group,
project=project, domain=domain, system=system)
name_or_id,
user=user,
group=group,
project=project,
domain=domain,
system=system,
)
user = data.get('user')
group = data.get('group')
@ -1342,58 +1409,70 @@ class IdentityCloudMixin:
# Proceed with project - precedence over domain and system
if user:
has_role = self.identity.validate_user_has_project_role(
project, user, role)
project, user, role
)
if not has_role:
self.log.debug('Assignment does not exists')
return False
self.identity.unassign_project_role_from_user(
project, user, role)
project, user, role
)
else:
has_role = self.identity.validate_group_has_project_role(
project, group, role)
project, group, role
)
if not has_role:
self.log.debug('Assignment does not exists')
return False
self.identity.unassign_project_role_from_group(
project, group, role)
project, group, role
)
elif domain:
# Proceed with domain - precedence over system
if user:
has_role = self.identity.validate_user_has_domain_role(
domain, user, role)
domain, user, role
)
if not has_role:
self.log.debug('Assignment does not exists')
return False
self.identity.unassign_domain_role_from_user(
domain, user, role)
domain, user, role
)
else:
has_role = self.identity.validate_group_has_domain_role(
domain, group, role)
domain, group, role
)
if not has_role:
self.log.debug('Assignment does not exists')
return False
self.identity.unassign_domain_role_from_group(
domain, group, role)
domain, group, role
)
else:
# Proceed with system
# System name must be 'all' due to checks performed in
# _get_grant_revoke_params
if user:
has_role = self.identity.validate_user_has_system_role(
user, role, system)
user, role, system
)
if not has_role:
self.log.debug('Assignment does not exist')
return False
self.identity.unassign_system_role_from_user(
user, role, system)
user, role, system
)
else:
has_role = self.identity.validate_group_has_system_role(
group, role, system)
group, role, system
)
if not has_role:
self.log.debug('Assignment does not exist')
return False
self.identity.unassign_system_role_from_group(
group, role, system)
group, role, system
)
return True
def _get_identity_params(self, domain_id=None, project=None):
@ -1406,7 +1485,8 @@ class IdentityCloudMixin:
if not domain_id:
raise exc.OpenStackCloudException(
"User or project creation requires an explicit"
" domain_id argument.")
" domain_id argument."
)
else:
ret.update({'domain_id': domain_id})

View File

@ -46,7 +46,8 @@ class ImageCloudMixin:
def _image_client(self):
if 'image' not in self._raw_clients:
self._raw_clients['image'] = self._get_versioned_client(
'image', min_version=1, max_version='2.latest')
'image', min_version=1, max_version='2.latest'
)
return self._raw_clients['image']
def search_images(self, name_or_id=None, filters=None):
@ -108,7 +109,7 @@ class ImageCloudMixin:
return _utils._get_entity(self, 'image', name_or_id, filters)
def get_image_by_id(self, id):
""" Get a image by ID
"""Get a image by ID
:param id: ID of the image.
:returns: An image :class:`openstack.image.v2.image.Image` object.
@ -145,20 +146,23 @@ class ImageCloudMixin:
if output_path is None and output_file is None:
raise exc.OpenStackCloudException(
'No output specified, an output path or file object'
' is necessary to write the image data to')
' is necessary to write the image data to'
)
elif output_path is not None and output_file is not None:
raise exc.OpenStackCloudException(
'Both an output path and file object were provided,'
' however only one can be used at once')
' however only one can be used at once'
)
image = self.image.find_image(name_or_id)
if not image:
raise exc.OpenStackCloudResourceNotFound(
"No images with name or ID %s were found" % name_or_id, None)
"No images with name or ID %s were found" % name_or_id, None
)
return self.image.download_image(
image, output=output_file or output_path,
chunk_size=chunk_size)
image, output=output_file or output_path, chunk_size=chunk_size
)
def get_image_exclude(self, name_or_id, exclude):
for image in self.search_images(name_or_id):
@ -184,7 +188,8 @@ class ImageCloudMixin:
def wait_for_image(self, image, timeout=3600):
image_id = image['id']
for count in utils.iterate_timeout(
timeout, "Timeout waiting for image to snapshot"):
timeout, "Timeout waiting for image to snapshot"
):
self.list_images.invalidate(self)
image = self.get_image(image_id)
if not image:
@ -193,7 +198,8 @@ class ImageCloudMixin:
return image
elif image['status'] == 'error':
raise exc.OpenStackCloudException(
'Image {image} hit error state'.format(image=image_id))
'Image {image} hit error state'.format(image=image_id)
)
def delete_image(
self,
@ -222,17 +228,19 @@ class ImageCloudMixin:
# Task API means an image was uploaded to swift
# TODO(gtema) does it make sense to move this into proxy?
if self.image_api_use_tasks and (
self.image._IMAGE_OBJECT_KEY in image.properties
or self.image._SHADE_IMAGE_OBJECT_KEY in image.properties):
self.image._IMAGE_OBJECT_KEY in image.properties
or self.image._SHADE_IMAGE_OBJECT_KEY in image.properties
):
(container, objname) = image.properties.get(
self.image._IMAGE_OBJECT_KEY, image.properties.get(
self.image._SHADE_IMAGE_OBJECT_KEY)).split('/', 1)
self.image._IMAGE_OBJECT_KEY,
image.properties.get(self.image._SHADE_IMAGE_OBJECT_KEY),
).split('/', 1)
self.delete_object(container=container, name=objname)
if wait:
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for the image to be deleted."):
timeout, "Timeout waiting for the image to be deleted."
):
self._get_cache(None).invalidate()
if self.get_image(image.id) is None:
break
@ -307,38 +315,53 @@ class ImageCloudMixin:
"""
if volume:
image = self.block_storage.create_image(
name=name, volume=volume,
name=name,
volume=volume,
allow_duplicates=allow_duplicates,
container_format=container_format, disk_format=disk_format,
wait=wait, timeout=timeout)
container_format=container_format,
disk_format=disk_format,
wait=wait,
timeout=timeout,
)
else:
image = self.image.create_image(
name, filename=filename,
name,
filename=filename,
container=container,
md5=md5, sha256=sha256,
disk_format=disk_format, container_format=container_format,
md5=md5,
sha256=sha256,
disk_format=disk_format,
container_format=container_format,
disable_vendor_agent=disable_vendor_agent,
wait=wait, timeout=timeout, tags=tags,
allow_duplicates=allow_duplicates, meta=meta, **kwargs)
wait=wait,
timeout=timeout,
tags=tags,
allow_duplicates=allow_duplicates,
meta=meta,
**kwargs,
)
self._get_cache(None).invalidate()
if not wait:
return image
try:
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for the image to finish."):
timeout, "Timeout waiting for the image to finish."
):
image_obj = self.get_image(image.id)
if image_obj and image_obj.status not in ('queued', 'saving'):
return image_obj
except exc.OpenStackCloudTimeout:
self.log.debug(
"Timeout waiting for image to become ready. Deleting.")
"Timeout waiting for image to become ready. Deleting."
)
self.delete_image(image.id, wait=True)
raise
def update_image_properties(
self, image=None, name_or_id=None, meta=None, **properties):
self, image=None, name_or_id=None, meta=None, **properties
):
image = image or name_or_id
return self.image.update_image_properties(
image=image, meta=meta, **properties)
image=image, meta=meta, **properties
)

File diff suppressed because it is too large Load Diff

View File

@ -20,8 +20,8 @@ from openstack.cloud import exc
class NetworkCommonCloudMixin:
"""Shared networking functions used by FloatingIP, Network, Compute classes
"""
"""Shared networking functions used by FloatingIP, Network, Compute
classes."""
def __init__(self):
self._external_ipv4_names = self.config.get_external_ipv4_networks()
@ -33,9 +33,11 @@ class NetworkCommonCloudMixin:
self._default_network = self.config.get_default_network()
self._use_external_network = self.config.config.get(
'use_external_network', True)
'use_external_network', True
)
self._use_internal_network = self.config.config.get(
'use_internal_network', True)
'use_internal_network', True
)
self._networks_lock = threading.Lock()
self._reset_network_caches()
@ -90,46 +92,63 @@ class NetworkCommonCloudMixin:
for network in all_networks:
# External IPv4 networks
if (network['name'] in self._external_ipv4_names
or network['id'] in self._external_ipv4_names):
if (
network['name'] in self._external_ipv4_names
or network['id'] in self._external_ipv4_names
):
external_ipv4_networks.append(network)
elif ((network.is_router_external
or network.provider_physical_network)
and network['name'] not in self._internal_ipv4_names
and network['id'] not in self._internal_ipv4_names):
elif (
(
network.is_router_external
or network.provider_physical_network
)
and network['name'] not in self._internal_ipv4_names
and network['id'] not in self._internal_ipv4_names
):
external_ipv4_networks.append(network)
# Internal networks
if (network['name'] in self._internal_ipv4_names
or network['id'] in self._internal_ipv4_names):
if (
network['name'] in self._internal_ipv4_names
or network['id'] in self._internal_ipv4_names
):
internal_ipv4_networks.append(network)
elif (not network.is_router_external
and not network.provider_physical_network
and network['name'] not in self._external_ipv4_names
and network['id'] not in self._external_ipv4_names):
elif (
not network.is_router_external
and not network.provider_physical_network
and network['name'] not in self._external_ipv4_names
and network['id'] not in self._external_ipv4_names
):
internal_ipv4_networks.append(network)
# External networks
if (network['name'] in self._external_ipv6_names
or network['id'] in self._external_ipv6_names):
if (
network['name'] in self._external_ipv6_names
or network['id'] in self._external_ipv6_names
):
external_ipv6_networks.append(network)
elif (network.is_router_external
and network['name'] not in self._internal_ipv6_names
and network['id'] not in self._internal_ipv6_names):
elif (
network.is_router_external
and network['name'] not in self._internal_ipv6_names
and network['id'] not in self._internal_ipv6_names
):
external_ipv6_networks.append(network)
# Internal networks
if (network['name'] in self._internal_ipv6_names
or network['id'] in self._internal_ipv6_names):
if (
network['name'] in self._internal_ipv6_names
or network['id'] in self._internal_ipv6_names
):
internal_ipv6_networks.append(network)
elif (not network.is_router_external
and network['name'] not in self._external_ipv6_names
and network['id'] not in self._external_ipv6_names):
elif (
not network.is_router_external
and network['name'] not in self._external_ipv6_names
and network['id'] not in self._external_ipv6_names
):
internal_ipv6_networks.append(network)
# External Floating IPv4 networks
if self._nat_source in (
network['name'], network['id']):
if self._nat_source in (network['name'], network['id']):
if nat_source:
raise exc.OpenStackCloudException(
'Multiple networks were found matching'
@ -137,8 +156,8 @@ class NetworkCommonCloudMixin:
' to be the NAT source. Please check your'
' cloud resources. It is probably a good idea'
' to configure this network by ID rather than'
' by name.'.format(
nat_net=self._nat_source))
' by name.'.format(nat_net=self._nat_source)
)
external_ipv4_floating_networks.append(network)
nat_source = network
elif self._nat_source is None:
@ -147,8 +166,7 @@ class NetworkCommonCloudMixin:
nat_source = nat_source or network
# NAT Destination
if self._nat_destination in (
network['name'], network['id']):
if self._nat_destination in (network['name'], network['id']):
if nat_destination:
raise exc.OpenStackCloudException(
'Multiple networks were found matching'
@ -156,8 +174,8 @@ class NetworkCommonCloudMixin:
' to be the NAT destination. Please check your'
' cloud resources. It is probably a good idea'
' to configure this network by ID rather than'
' by name.'.format(
nat_net=self._nat_destination))
' by name.'.format(nat_net=self._nat_destination)
)
nat_destination = network
elif self._nat_destination is None:
# TODO(mordred) need a config value for floating
@ -174,14 +192,16 @@ class NetworkCommonCloudMixin:
for subnet in all_subnets:
# TODO(mordred) trap for detecting more than
# one network with a gateway_ip without a config
if ('gateway_ip' in subnet and subnet['gateway_ip']
and network['id'] == subnet['network_id']):
if (
'gateway_ip' in subnet
and subnet['gateway_ip']
and network['id'] == subnet['network_id']
):
nat_destination = network
break
# Default network
if self._default_network in (
network['name'], network['id']):
if self._default_network in (network['name'], network['id']):
if default_network:
raise exc.OpenStackCloudException(
'Multiple networks were found matching'
@ -190,8 +210,8 @@ class NetworkCommonCloudMixin:
' network. Please check your cloud resources.'
' It is probably a good idea'
' to configure this network by ID rather than'
' by name.'.format(
default_net=self._default_network))
' by name.'.format(default_net=self._default_network)
)
default_network = network
# Validate config vs. reality
@ -200,49 +220,57 @@ class NetworkCommonCloudMixin:
raise exc.OpenStackCloudException(
"Networks: {network} was provided for external IPv4"
" access and those networks could not be found".format(
network=net_name))
network=net_name
)
)
for net_name in self._internal_ipv4_names:
if net_name not in [net['name'] for net in internal_ipv4_networks]:
raise exc.OpenStackCloudException(
"Networks: {network} was provided for internal IPv4"
" access and those networks could not be found".format(
network=net_name))
network=net_name
)
)
for net_name in self._external_ipv6_names:
if net_name not in [net['name'] for net in external_ipv6_networks]:
raise exc.OpenStackCloudException(
"Networks: {network} was provided for external IPv6"
" access and those networks could not be found".format(
network=net_name))
network=net_name
)
)
for net_name in self._internal_ipv6_names:
if net_name not in [net['name'] for net in internal_ipv6_networks]:
raise exc.OpenStackCloudException(
"Networks: {network} was provided for internal IPv6"
" access and those networks could not be found".format(
network=net_name))
network=net_name
)
)
if self._nat_destination and not nat_destination:
raise exc.OpenStackCloudException(
'Network {network} was configured to be the'
' destination for inbound NAT but it could not be'
' found'.format(
network=self._nat_destination))
' found'.format(network=self._nat_destination)
)
if self._nat_source and not nat_source:
raise exc.OpenStackCloudException(
'Network {network} was configured to be the'
' source for inbound NAT but it could not be'
' found'.format(
network=self._nat_source))
' found'.format(network=self._nat_source)
)
if self._default_network and not default_network:
raise exc.OpenStackCloudException(
'Network {network} was configured to be the'
' default network interface but it could not be'
' found'.format(
network=self._default_network))
' found'.format(network=self._default_network)
)
self._external_ipv4_networks = external_ipv4_networks
self._external_ipv4_floating_networks = external_ipv4_floating_networks
@ -304,9 +332,8 @@ class NetworkCommonCloudMixin:
:returns: A list of network ``Network`` objects if any are found
"""
self._find_interesting_networks()
return (
list(self._external_ipv4_networks)
+ list(self._external_ipv6_networks)
return list(self._external_ipv4_networks) + list(
self._external_ipv6_networks
)
def get_internal_networks(self):
@ -318,9 +345,8 @@ class NetworkCommonCloudMixin:
:returns: A list of network ``Network`` objects if any are found
"""
self._find_interesting_networks()
return (
list(self._internal_ipv4_networks)
+ list(self._internal_ipv6_networks)
return list(self._internal_ipv4_networks) + list(
self._internal_ipv6_networks
)
def get_external_ipv4_networks(self):

View File

@ -105,9 +105,7 @@ class ObjectStoreCloudMixin:
container = self.get_container(name)
if container:
return container
attrs = dict(
name=name
)
attrs = dict(name=name)
if public:
attrs['read_ACL'] = OBJECT_CONTAINER_ACLS['public']
container = self.object_store.create_container(**attrs)
@ -129,7 +127,9 @@ class ObjectStoreCloudMixin:
'Attempt to delete container {container} failed. The'
' container is not empty. Please delete the objects'
' inside it before deleting the container'.format(
container=name))
container=name
)
)
def update_container(self, name, headers):
"""Update the metadata in a container.
@ -138,7 +138,8 @@ class ObjectStoreCloudMixin:
:param dict headers: Key/Value headers to set on the container.
"""
self.object_store.set_container_metadata(
name, refresh=False, **headers)
name, refresh=False, **headers
)
def set_container_access(self, name, access, refresh=False):
"""Set the access control list on a container.
@ -152,11 +153,10 @@ class ObjectStoreCloudMixin:
if access not in OBJECT_CONTAINER_ACLS:
raise exc.OpenStackCloudException(
"Invalid container access specified: %s. Must be one of %s"
% (access, list(OBJECT_CONTAINER_ACLS.keys())))
% (access, list(OBJECT_CONTAINER_ACLS.keys()))
)
return self.object_store.set_container_metadata(
name,
read_ACL=OBJECT_CONTAINER_ACLS[access],
refresh=refresh
name, read_ACL=OBJECT_CONTAINER_ACLS[access], refresh=refresh
)
def get_container_access(self, name):
@ -179,7 +179,8 @@ class ObjectStoreCloudMixin:
if str(acl) == str(value):
return key
raise exc.OpenStackCloudException(
"Could not determine container access for ACL: %s." % acl)
"Could not determine container access for ACL: %s." % acl
)
@_utils.cache_on_arguments()
def get_object_capabilities(self):
@ -201,7 +202,8 @@ class ObjectStoreCloudMixin:
return self.object_store.get_object_segment_size(segment_size)
def is_object_stale(
self, container, name, filename, file_md5=None, file_sha256=None):
self, container, name, filename, file_md5=None, file_sha256=None
):
"""Check to see if an object matches the hashes of a file.
:param container: Name of the container.
@ -213,8 +215,11 @@ class ObjectStoreCloudMixin:
Defaults to None which means calculate locally.
"""
return self.object_store.is_object_stale(
container, name, filename,
file_md5=file_md5, file_sha256=file_sha256
container,
name,
filename,
file_md5=file_md5,
file_sha256=file_sha256,
)
def create_directory_marker_object(self, container, name, **headers):
@ -241,11 +246,8 @@ class ObjectStoreCloudMixin:
headers['content-type'] = 'application/directory'
return self.create_object(
container,
name,
data='',
generate_checksums=False,
**headers)
container, name, data='', generate_checksums=False, **headers
)
def create_object(
self,
@ -295,12 +297,16 @@ class ObjectStoreCloudMixin:
:raises: ``OpenStackCloudException`` on operation error.
"""
return self.object_store.create_object(
container, name,
filename=filename, data=data,
md5=md5, sha256=sha256, use_slo=use_slo,
container,
name,
filename=filename,
data=data,
md5=md5,
sha256=sha256,
use_slo=use_slo,
generate_checksums=generate_checksums,
metadata=metadata,
**headers
**headers,
)
def update_object(self, container, name, metadata=None, **headers):
@ -317,8 +323,7 @@ class ObjectStoreCloudMixin:
"""
meta = metadata.copy() or {}
meta.update(**headers)
self.object_store.set_object_metadata(
name, container, **meta)
self.object_store.set_object_metadata(name, container, **meta)
def list_objects(self, container, full_listing=True, prefix=None):
"""List objects.
@ -330,10 +335,9 @@ class ObjectStoreCloudMixin:
:returns: A list of object store ``Object`` objects.
:raises: OpenStackCloudException on operation error.
"""
return list(self.object_store.objects(
container=container,
prefix=prefix
))
return list(
self.object_store.objects(container=container, prefix=prefix)
)
def search_objects(self, container, name=None, filters=None):
"""Search objects.
@ -364,7 +368,9 @@ class ObjectStoreCloudMixin:
"""
try:
self.object_store.delete_object(
name, ignore_missing=False, container=container,
name,
ignore_missing=False,
container=container,
)
return True
except exceptions.SDKException:
@ -400,9 +406,7 @@ class ObjectStoreCloudMixin:
:param name:
:returns: The object metadata.
"""
return self.object_store.get_object_metadata(
name, container
).metadata
return self.object_store.get_object_metadata(name, container).metadata
def get_object_raw(self, container, obj, query_string=None, stream=False):
"""Get a raw response object for an object.
@ -422,12 +426,12 @@ class ObjectStoreCloudMixin:
endpoint = urllib.parse.quote(container)
if obj:
endpoint = '{endpoint}/{object}'.format(
endpoint=endpoint,
object=urllib.parse.quote(obj)
endpoint=endpoint, object=urllib.parse.quote(obj)
)
if query_string:
endpoint = '{endpoint}?{query_string}'.format(
endpoint=endpoint, query_string=query_string)
endpoint=endpoint, query_string=query_string
)
return endpoint
def stream_object(
@ -451,13 +455,21 @@ class ObjectStoreCloudMixin:
"""
try:
for ret in self.object_store.stream_object(
obj, container, chunk_size=resp_chunk_size):
obj, container, chunk_size=resp_chunk_size
):
yield ret
except exceptions.ResourceNotFound:
return
def get_object(self, container, obj, query_string=None,
resp_chunk_size=1024, outfile=None, stream=False):
def get_object(
self,
container,
obj,
query_string=None,
resp_chunk_size=1024,
outfile=None,
stream=False,
):
"""Get the headers and body of an object
:param string container: Name of the container.
@ -477,13 +489,13 @@ class ObjectStoreCloudMixin:
"""
try:
obj = self.object_store.get_object(
obj, container=container,
obj,
container=container,
resp_chunk_size=resp_chunk_size,
outfile=outfile,
remember_content=(outfile is None)
remember_content=(outfile is None),
)
headers = {
k.lower(): v for k, v in obj._last_headers.items()}
headers = {k.lower(): v for k, v in obj._last_headers.items()}
return (headers, obj.data)
except exceptions.ResourceNotFound:
@ -500,10 +512,13 @@ class ObjectStoreCloudMixin:
result = completed.result()
exceptions.raise_from_response(result)
results.append(result)
except (keystoneauth1.exceptions.RetriableConnectionFailure,
exceptions.HttpException) as e:
except (
keystoneauth1.exceptions.RetriableConnectionFailure,
exceptions.HttpException,
) as e:
error_text = "Exception processing async task: {}".format(
str(e))
str(e)
)
if raise_on_error:
self.log.exception(error_text)
raise

View File

@ -41,20 +41,33 @@ class OrchestrationCloudMixin:
return self._raw_clients['orchestration']
def get_template_contents(
self, template_file=None, template_url=None,
template_object=None, files=None):
self,
template_file=None,
template_url=None,
template_object=None,
files=None,
):
return self.orchestration.get_template_contents(
template_file=template_file, template_url=template_url,
template_object=template_object, files=files)
template_file=template_file,
template_url=template_url,
template_object=template_object,
files=files,
)
def create_stack(
self, name, tags=None,
template_file=None, template_url=None,
template_object=None, files=None,
rollback=True,
wait=False, timeout=3600,
environment_files=None,
**parameters):
self,
name,
tags=None,
template_file=None,
template_url=None,
template_object=None,
files=None,
rollback=True,
wait=False,
timeout=3600,
environment_files=None,
**parameters
):
"""Create a stack.
:param string name: Name of the stack.
@ -83,27 +96,36 @@ class OrchestrationCloudMixin:
tags=tags,
is_rollback_disabled=not rollback,
timeout_mins=timeout // 60,
parameters=parameters
parameters=parameters,
)
params.update(
self.orchestration.read_env_and_templates(
template_file=template_file,
template_url=template_url,
template_object=template_object,
files=files,
environment_files=environment_files,
)
)
params.update(self.orchestration.read_env_and_templates(
template_file=template_file, template_url=template_url,
template_object=template_object, files=files,
environment_files=environment_files
))
self.orchestration.create_stack(name=name, **params)
if wait:
event_utils.poll_for_events(self, stack_name=name,
action='CREATE')
event_utils.poll_for_events(self, stack_name=name, action='CREATE')
return self.get_stack(name)
def update_stack(
self, name_or_id,
template_file=None, template_url=None,
template_object=None, files=None,
rollback=True, tags=None,
wait=False, timeout=3600,
environment_files=None,
**parameters):
self,
name_or_id,
template_file=None,
template_url=None,
template_object=None,
files=None,
rollback=True,
tags=None,
wait=False,
timeout=3600,
environment_files=None,
**parameters
):
"""Update a stack.
:param string name_or_id: Name or ID of the stack to update.
@ -131,27 +153,31 @@ class OrchestrationCloudMixin:
tags=tags,
is_rollback_disabled=not rollback,
timeout_mins=timeout // 60,
parameters=parameters
parameters=parameters,
)
params.update(
self.orchestration.read_env_and_templates(
template_file=template_file,
template_url=template_url,
template_object=template_object,
files=files,
environment_files=environment_files,
)
)
params.update(self.orchestration.read_env_and_templates(
template_file=template_file, template_url=template_url,
template_object=template_object, files=files,
environment_files=environment_files
))
if wait:
# find the last event to use as the marker
events = event_utils.get_events(
self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1})
self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}
)
marker = events[0].id if events else None
# Not to cause update of ID field pass stack as dict
self.orchestration.update_stack(stack={'id': name_or_id}, **params)
if wait:
event_utils.poll_for_events(self,
name_or_id,
action='UPDATE',
marker=marker)
event_utils.poll_for_events(
self, name_or_id, action='UPDATE', marker=marker
)
return self.get_stack(name_or_id)
def delete_stack(self, name_or_id, wait=False):
@ -173,24 +199,26 @@ class OrchestrationCloudMixin:
if wait:
# find the last event to use as the marker
events = event_utils.get_events(
self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1})
self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}
)
marker = events[0].id if events else None
self.orchestration.delete_stack(stack)
if wait:
try:
event_utils.poll_for_events(self,
stack_name=name_or_id,
action='DELETE',
marker=marker)
event_utils.poll_for_events(
self, stack_name=name_or_id, action='DELETE', marker=marker
)
except exc.OpenStackCloudHTTPError:
pass
stack = self.get_stack(name_or_id, resolve_outputs=False)
if stack and stack['stack_status'] == 'DELETE_FAILED':
raise exc.OpenStackCloudException(
"Failed to delete stack {id}: {reason}".format(
id=name_or_id, reason=stack['stack_status_reason']))
id=name_or_id, reason=stack['stack_status_reason']
)
)
return True
@ -246,12 +274,12 @@ class OrchestrationCloudMixin:
stack = self.orchestration.find_stack(
name_or_id,
ignore_missing=False,
resolve_outputs=resolve_outputs)
resolve_outputs=resolve_outputs,
)
if stack.status == 'DELETE_COMPLETE':
return []
except exc.OpenStackCloudURINotFound:
return []
return _utils._filter_list([stack], name_or_id, filters)
return _utils._get_entity(
self, _search_one_stack, name_or_id, filters)
return _utils._get_entity(self, _search_one_stack, name_or_id, filters)

View File

@ -59,15 +59,16 @@ class SecurityGroupCloudMixin:
if self._use_neutron_secgroups():
# pass filters dict to the list to filter as much as possible on
# the server side
return list(
self.network.security_groups(**filters))
return list(self.network.security_groups(**filters))
# Handle nova security groups
else:
data = proxy._json_response(self.compute.get(
'/os-security-groups', params=filters))
data = proxy._json_response(
self.compute.get('/os-security-groups', params=filters)
)
return self._normalize_secgroups(
self._get_and_munchify('security_groups', data))
self._get_and_munchify('security_groups', data)
)
def get_security_group(self, name_or_id, filters=None):
"""Get a security group by name or ID.
@ -93,11 +94,10 @@ class SecurityGroupCloudMixin:
or None if no matching security group is found.
"""
return _utils._get_entity(
self, 'security_group', name_or_id, filters)
return _utils._get_entity(self, 'security_group', name_or_id, filters)
def get_security_group_by_id(self, id):
""" Get a security group by ID
"""Get a security group by ID
:param id: ID of the security group.
:returns: A security group
@ -107,20 +107,23 @@ class SecurityGroupCloudMixin:
raise exc.OpenStackCloudUnavailableFeature(
"Unavailable feature: security groups"
)
error_message = ("Error getting security group with"
" ID {id}".format(id=id))
error_message = "Error getting security group with" " ID {id}".format(
id=id
)
if self._use_neutron_secgroups():
return self.network.get_security_group(id)
else:
data = proxy._json_response(
self.compute.get(
'/os-security-groups/{id}'.format(id=id)),
error_message=error_message)
self.compute.get('/os-security-groups/{id}'.format(id=id)),
error_message=error_message,
)
return self._normalize_secgroup(
self._get_and_munchify('security_group', data))
self._get_and_munchify('security_group', data)
)
def create_security_group(self, name, description,
project_id=None, stateful=None):
def create_security_group(
self, name, description, project_id=None, stateful=None
):
"""Create a new security group
:param string name: A name for the security group.
@ -145,22 +148,23 @@ class SecurityGroupCloudMixin:
)
data = []
security_group_json = {
'name': name, 'description': description
}
security_group_json = {'name': name, 'description': description}
if stateful is not None:
security_group_json['stateful'] = stateful
if project_id is not None:
security_group_json['tenant_id'] = project_id
if self._use_neutron_secgroups():
return self.network.create_security_group(
**security_group_json)
return self.network.create_security_group(**security_group_json)
else:
data = proxy._json_response(self.compute.post(
'/os-security-groups',
json={'security_group': security_group_json}))
data = proxy._json_response(
self.compute.post(
'/os-security-groups',
json={'security_group': security_group_json},
)
)
return self._normalize_secgroup(
self._get_and_munchify('security_group', data))
self._get_and_munchify('security_group', data)
)
def delete_security_group(self, name_or_id):
"""Delete a security group
@ -183,18 +187,23 @@ class SecurityGroupCloudMixin:
# the delete.
secgroup = self.get_security_group(name_or_id)
if secgroup is None:
self.log.debug('Security group %s not found for deleting',
name_or_id)
self.log.debug(
'Security group %s not found for deleting', name_or_id
)
return False
if self._use_neutron_secgroups():
self.network.delete_security_group(
secgroup['id'], ignore_missing=False)
secgroup['id'], ignore_missing=False
)
return True
else:
proxy._json_response(self.compute.delete(
'/os-security-groups/{id}'.format(id=secgroup['id'])))
proxy._json_response(
self.compute.delete(
'/os-security-groups/{id}'.format(id=secgroup['id'])
)
)
return True
@_utils.valid_kwargs('name', 'description', 'stateful')
@ -220,35 +229,38 @@ class SecurityGroupCloudMixin:
if group is None:
raise exc.OpenStackCloudException(
"Security group %s not found." % name_or_id)
"Security group %s not found." % name_or_id
)
if self._use_neutron_secgroups():
return self.network.update_security_group(
group['id'],
**kwargs
)
return self.network.update_security_group(group['id'], **kwargs)
else:
for key in ('name', 'description'):
kwargs.setdefault(key, group[key])
data = proxy._json_response(
self.compute.put(
'/os-security-groups/{id}'.format(id=group['id']),
json={'security_group': kwargs}))
json={'security_group': kwargs},
)
)
return self._normalize_secgroup(
self._get_and_munchify('security_group', data))
self._get_and_munchify('security_group', data)
)
def create_security_group_rule(self,
secgroup_name_or_id,
port_range_min=None,
port_range_max=None,
protocol=None,
remote_ip_prefix=None,
remote_group_id=None,
remote_address_group_id=None,
direction='ingress',
ethertype='IPv4',
project_id=None,
description=None):
def create_security_group_rule(
self,
secgroup_name_or_id,
port_range_min=None,
port_range_max=None,
protocol=None,
remote_ip_prefix=None,
remote_group_id=None,
remote_address_group_id=None,
direction='ingress',
ethertype='IPv4',
project_id=None,
description=None,
):
"""Create a new security group rule
:param string secgroup_name_or_id:
@ -308,31 +320,32 @@ class SecurityGroupCloudMixin:
secgroup = self.get_security_group(secgroup_name_or_id)
if not secgroup:
raise exc.OpenStackCloudException(
"Security group %s not found." % secgroup_name_or_id)
"Security group %s not found." % secgroup_name_or_id
)
if self._use_neutron_secgroups():
# NOTE: Nova accepts -1 port numbers, but Neutron accepts None
# as the equivalent value.
rule_def = {
'security_group_id': secgroup['id'],
'port_range_min':
None if port_range_min == -1 else port_range_min,
'port_range_max':
None if port_range_max == -1 else port_range_max,
'port_range_min': None
if port_range_min == -1
else port_range_min,
'port_range_max': None
if port_range_max == -1
else port_range_max,
'protocol': protocol,
'remote_ip_prefix': remote_ip_prefix,
'remote_group_id': remote_group_id,
'remote_address_group_id': remote_address_group_id,
'direction': direction,
'ethertype': ethertype
'ethertype': ethertype,
}
if project_id is not None:
rule_def['tenant_id'] = project_id
if description is not None:
rule_def["description"] = description
return self.network.create_security_group_rule(
**rule_def
)
return self.network.create_security_group_rule(**rule_def)
else:
# NOTE: Neutron accepts None for protocol. Nova does not.
if protocol is None:
@ -343,7 +356,8 @@ class SecurityGroupCloudMixin:
'Rule creation failed: Nova does not support egress rules'
)
raise exc.OpenStackCloudException(
'No support for egress rules')
'No support for egress rules'
)
# NOTE: Neutron accepts None for ports, but Nova requires -1
# as the equivalent value for ICMP.
@ -363,24 +377,28 @@ class SecurityGroupCloudMixin:
port_range_min = 1
port_range_max = 65535
security_group_rule_dict = dict(security_group_rule=dict(
parent_group_id=secgroup['id'],
ip_protocol=protocol,
from_port=port_range_min,
to_port=port_range_max,
cidr=remote_ip_prefix,
group_id=remote_group_id
))
security_group_rule_dict = dict(
security_group_rule=dict(
parent_group_id=secgroup['id'],
ip_protocol=protocol,
from_port=port_range_min,
to_port=port_range_max,
cidr=remote_ip_prefix,
group_id=remote_group_id,
)
)
if project_id is not None:
security_group_rule_dict[
'security_group_rule']['tenant_id'] = project_id
security_group_rule_dict['security_group_rule'][
'tenant_id'
] = project_id
data = proxy._json_response(
self.compute.post(
'/os-security-group-rules',
json=security_group_rule_dict
))
'/os-security-group-rules', json=security_group_rule_dict
)
)
return self._normalize_secgroup_rule(
self._get_and_munchify('security_group_rule', data))
self._get_and_munchify('security_group_rule', data)
)
def delete_security_group_rule(self, rule_id):
"""Delete a security group rule
@ -401,8 +419,7 @@ class SecurityGroupCloudMixin:
if self._use_neutron_secgroups():
self.network.delete_security_group_rule(
rule_id,
ignore_missing=False
rule_id, ignore_missing=False
)
return True
@ -410,7 +427,9 @@ class SecurityGroupCloudMixin:
try:
exceptions.raise_from_response(
self.compute.delete(
'/os-security-group-rules/{id}'.format(id=rule_id)))
'/os-security-group-rules/{id}'.format(id=rule_id)
)
)
except exc.OpenStackCloudResourceNotFound:
return False
@ -423,8 +442,9 @@ class SecurityGroupCloudMixin:
return self.secgroup_source.lower() in ('nova', 'neutron')
def _use_neutron_secgroups(self):
return (self.has_service('network')
and self.secgroup_source == 'neutron')
return (
self.has_service('network') and self.secgroup_source == 'neutron'
)
def _normalize_secgroups(self, groups):
"""Normalize the structure of security groups
@ -454,7 +474,8 @@ class SecurityGroupCloudMixin:
self._remove_novaclient_artifacts(group)
rules = self._normalize_secgroup_rules(
group.pop('security_group_rules', group.pop('rules', [])))
group.pop('security_group_rules', group.pop('rules', []))
)
project_id = group.pop('tenant_id', '')
project_id = group.pop('project_id', project_id)
@ -506,14 +527,14 @@ class SecurityGroupCloudMixin:
ret['direction'] = rule.pop('direction', 'ingress')
ret['ethertype'] = rule.pop('ethertype', 'IPv4')
port_range_min = rule.get(
'port_range_min', rule.pop('from_port', None))
'port_range_min', rule.pop('from_port', None)
)
if port_range_min == -1:
port_range_min = None
if port_range_min is not None:
port_range_min = int(port_range_min)
ret['port_range_min'] = port_range_min
port_range_max = rule.pop(
'port_range_max', rule.pop('to_port', None))
port_range_max = rule.pop('port_range_max', rule.pop('to_port', None))
if port_range_max == -1:
port_range_max = None
if port_range_min is not None:
@ -521,9 +542,11 @@ class SecurityGroupCloudMixin:
ret['port_range_max'] = port_range_max
ret['protocol'] = rule.pop('protocol', rule.pop('ip_protocol', None))
ret['remote_ip_prefix'] = rule.pop(
'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None))
'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None)
)
ret['security_group_id'] = rule.pop(
'security_group_id', rule.pop('parent_group_id', None))
'security_group_id', rule.pop('parent_group_id', None)
)
ret['remote_group_id'] = rule.pop('remote_group_id', None)
project_id = rule.pop('tenant_id', '')
project_id = rule.pop('project_id', project_id)

View File

@ -102,8 +102,9 @@ def _filter_list(data, name_or_id, filters):
e_id = _make_unicode(e.get('id', None))
e_name = _make_unicode(e.get('name', None))
if ((e_id and e_id == name_or_id)
or (e_name and e_name == name_or_id)):
if (e_id and e_id == name_or_id) or (
e_name and e_name == name_or_id
):
identifier_matches.append(e)
else:
# Only try fnmatch if we don't match exactly
@ -112,8 +113,9 @@ def _filter_list(data, name_or_id, filters):
# so that we log the bad pattern
bad_pattern = True
continue
if ((e_id and fn_reg.match(e_id))
or (e_name and fn_reg.match(e_name))):
if (e_id and fn_reg.match(e_id)) or (
e_name and fn_reg.match(e_name)
):
identifier_matches.append(e)
if not identifier_matches and bad_pattern:
log.debug("Bad pattern passed to fnmatch", exc_info=True)
@ -172,8 +174,9 @@ def _get_entity(cloud, resource, name_or_id, filters, **kwargs):
# an additional call, it's simple enough to test to see if we got an
# object and just short-circuit return it.
if (hasattr(name_or_id, 'id')
or (isinstance(name_or_id, dict) and 'id' in name_or_id)):
if hasattr(name_or_id, 'id') or (
isinstance(name_or_id, dict) and 'id' in name_or_id
):
return name_or_id
# If a uuid is passed short-circuit it calling the
@ -183,14 +186,18 @@ def _get_entity(cloud, resource, name_or_id, filters, **kwargs):
if get_resource:
return get_resource(name_or_id)
search = resource if callable(resource) else getattr(
cloud, 'search_%ss' % resource, None)
search = (
resource
if callable(resource)
else getattr(cloud, 'search_%ss' % resource, None)
)
if search:
entities = search(name_or_id, filters, **kwargs)
if entities:
if len(entities) > 1:
raise exc.OpenStackCloudException(
"Multiple matches found for %s" % name_or_id)
"Multiple matches found for %s" % name_or_id
)
return entities[0]
return None
@ -230,8 +237,10 @@ def valid_kwargs(*valid_args):
if k not in argspec.args[1:] and k not in valid_args:
raise TypeError(
"{f}() got an unexpected keyword argument "
"'{arg}'".format(f=inspect.stack()[1][3], arg=k))
"'{arg}'".format(f=inspect.stack()[1][3], arg=k)
)
return func(*args, **kwargs)
return func_wrapper
@ -244,6 +253,7 @@ def _func_wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
return f(*args, **kwargs)
return inner
@ -253,20 +263,23 @@ def cache_on_arguments(*cache_on_args, **cache_on_kwargs):
def _inner_cache_on_arguments(func):
def _cache_decorator(obj, *args, **kwargs):
the_method = obj._get_cache(_cache_name).cache_on_arguments(
*cache_on_args, **cache_on_kwargs)(
_func_wrap(func.__get__(obj, type(obj))))
*cache_on_args, **cache_on_kwargs
)(_func_wrap(func.__get__(obj, type(obj))))
return the_method(*args, **kwargs)
def invalidate(obj, *args, **kwargs):
return obj._get_cache(
_cache_name).cache_on_arguments()(func).invalidate(
*args, **kwargs)
return (
obj._get_cache(_cache_name)
.cache_on_arguments()(func)
.invalidate(*args, **kwargs)
)
_cache_decorator.invalidate = invalidate
_cache_decorator.func = func
_decorated_methods.append(func.__name__)
return _cache_decorator
return _inner_cache_on_arguments
@ -320,7 +333,8 @@ def safe_dict_min(key, data):
raise exc.OpenStackCloudException(
"Search for minimum value failed. "
"Value for {key} is not an integer: {value}".format(
key=key, value=d[key])
key=key, value=d[key]
)
)
if (min_value is None) or (val < min_value):
min_value = val
@ -352,16 +366,17 @@ def safe_dict_max(key, data):
raise exc.OpenStackCloudException(
"Search for maximum value failed. "
"Value for {key} is not an integer: {value}".format(
key=key, value=d[key])
key=key, value=d[key]
)
)
if (max_value is None) or (val > max_value):
max_value = val
return max_value
def _call_client_and_retry(client, url, retry_on=None,
call_retries=3, retry_wait=2,
**kwargs):
def _call_client_and_retry(
client, url, retry_on=None, call_retries=3, retry_wait=2, **kwargs
):
"""Method to provide retry operations.
Some APIs utilize HTTP errors on certain operations to indicate that
@ -391,18 +406,17 @@ def _call_client_and_retry(client, url, retry_on=None,
retry_on = [retry_on]
count = 0
while (count < call_retries):
while count < call_retries:
count += 1
try:
ret_val = client(url, **kwargs)
except exc.OpenStackCloudHTTPError as e:
if (retry_on is not None
and e.response.status_code in retry_on):
log.debug('Received retryable error %(err)s, waiting '
'%(wait)s seconds to retry', {
'err': e.response.status_code,
'wait': retry_wait
})
if retry_on is not None and e.response.status_code in retry_on:
log.debug(
'Received retryable error %(err)s, waiting '
'%(wait)s seconds to retry',
{'err': e.response.status_code, 'wait': retry_wait},
)
time.sleep(retry_wait)
continue
else:
@ -484,7 +498,8 @@ def range_filter(data, key, range_exp):
# If parsing the range fails, it must be a bad value.
if val_range is None:
raise exc.OpenStackCloudException(
"Invalid range value: {value}".format(value=range_exp))
"Invalid range value: {value}".format(value=range_exp)
)
op = val_range[0]
if op:
@ -523,9 +538,7 @@ def generate_patches_from_kwargs(operation, **kwargs):
"""
patches = []
for k, v in kwargs.items():
patch = {'op': operation,
'value': v,
'path': '/%s' % k}
patch = {'op': operation, 'value': v, 'path': '/%s' % k}
patches.append(patch)
return sorted(patches)
@ -568,11 +581,13 @@ class FileSegment:
def _format_uuid_string(string):
return (string.replace('urn:', '')
.replace('uuid:', '')
.strip('{}')
.replace('-', '')
.lower())
return (
string.replace('urn:', '')
.replace('uuid:', '')
.strip('{}')
.replace('-', '')
.lower()
)
def _is_uuid_like(val):

View File

@ -32,20 +32,35 @@ def output_format_dict(data, use_yaml):
def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--refresh', action='store_true',
help='Refresh cached information')
parser.add_argument(
'--refresh', action='store_true', help='Refresh cached information'
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument(
'--list', action='store_true', help='List active servers'
)
group.add_argument('--host', help='List details about the specific host')
parser.add_argument('--private', action='store_true', default=False,
help='Use private IPs for interface_ip')
parser.add_argument('--cloud', default=None,
help='Return data for one cloud only')
parser.add_argument('--yaml', action='store_true', default=False,
help='Output data in nicely readable yaml')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug output')
parser.add_argument(
'--private',
action='store_true',
default=False,
help='Use private IPs for interface_ip',
)
parser.add_argument(
'--cloud', default=None, help='Return data for one cloud only'
)
parser.add_argument(
'--yaml',
action='store_true',
default=False,
help='Output data in nicely readable yaml',
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='Enable debug output',
)
return parser.parse_args()
@ -54,8 +69,8 @@ def main():
try:
openstack.enable_logging(debug=args.debug)
inventory = openstack.cloud.inventory.OpenStackInventory(
refresh=args.refresh, private=args.private,
cloud=args.cloud)
refresh=args.refresh, private=args.private, cloud=args.cloud
)
if args.list:
output = inventory.list_hosts()
elif args.host:

View File

@ -19,12 +19,14 @@ OpenStackCloudTimeout = exceptions.ResourceTimeout
class OpenStackCloudCreateException(OpenStackCloudException):
def __init__(self, resource, resource_id, extra_data=None, **kwargs):
super(OpenStackCloudCreateException, self).__init__(
message="Error creating {resource}: {resource_id}".format(
resource=resource, resource_id=resource_id),
extra_data=extra_data, **kwargs)
resource=resource, resource_id=resource_id
),
extra_data=extra_data,
**kwargs
)
self.resource_id = resource_id

View File

@ -28,15 +28,23 @@ class OpenStackInventory:
extra_config = None
def __init__(
self, config_files=None, refresh=False, private=False,
config_key=None, config_defaults=None, cloud=None,
use_direct_get=False):
self,
config_files=None,
refresh=False,
private=False,
config_key=None,
config_defaults=None,
cloud=None,
use_direct_get=False,
):
if config_files is None:
config_files = []
config = loader.OpenStackConfig(
config_files=loader.CONFIG_FILES + config_files)
config_files=loader.CONFIG_FILES + config_files
)
self.extra_config = config.get_extra_config(
config_key, config_defaults)
config_key, config_defaults
)
if cloud is None:
self.clouds = [
@ -44,9 +52,7 @@ class OpenStackInventory:
for cloud_region in config.get_all()
]
else:
self.clouds = [
connection.Connection(config=config.get_one(cloud))
]
self.clouds = [connection.Connection(config=config.get_one(cloud))]
if private:
for cloud in self.clouds:
@ -57,15 +63,17 @@ class OpenStackInventory:
for cloud in self.clouds:
cloud._cache.invalidate()
def list_hosts(self, expand=True, fail_on_cloud_config=True,
all_projects=False):
def list_hosts(
self, expand=True, fail_on_cloud_config=True, all_projects=False
):
hostvars = []
for cloud in self.clouds:
try:
# Cycle on servers
for server in cloud.list_servers(detailed=expand,
all_projects=all_projects):
for server in cloud.list_servers(
detailed=expand, all_projects=all_projects
):
hostvars.append(server)
except exceptions.OpenStackCloudException:
# Don't fail on one particular cloud as others may work

View File

@ -23,8 +23,9 @@ from openstack import utils
NON_CALLABLES = (str, bool, dict, int, float, list, type(None))
def find_nova_interfaces(addresses, ext_tag=None, key_name=None, version=4,
mac_addr=None):
def find_nova_interfaces(
addresses, ext_tag=None, key_name=None, version=4, mac_addr=None
):
ret = []
for (k, v) in iter(addresses.items()):
if key_name is not None and k != key_name:
@ -64,10 +65,12 @@ def find_nova_interfaces(addresses, ext_tag=None, key_name=None, version=4,
return ret
def find_nova_addresses(addresses, ext_tag=None, key_name=None, version=4,
mac_addr=None):
interfaces = find_nova_interfaces(addresses, ext_tag, key_name, version,
mac_addr)
def find_nova_addresses(
addresses, ext_tag=None, key_name=None, version=4, mac_addr=None
):
interfaces = find_nova_interfaces(
addresses, ext_tag, key_name, version, mac_addr
)
floating_addrs = []
fixed_addrs = []
for i in interfaces:
@ -91,8 +94,7 @@ def get_server_ip(server, public=False, cloud_public=True, **kwargs):
private ip we expect shade to be able to reach
"""
addrs = find_nova_addresses(server['addresses'], **kwargs)
return find_best_address(
addrs, public=public, cloud_public=cloud_public)
return find_best_address(addrs, public=public, cloud_public=cloud_public)
def get_server_private_ip(server, cloud=None):
@ -126,30 +128,34 @@ def get_server_private_ip(server, cloud=None):
int_nets = cloud.get_internal_ipv4_networks()
for int_net in int_nets:
int_ip = get_server_ip(
server, key_name=int_net['name'],
server,
key_name=int_net['name'],
ext_tag='fixed',
cloud_public=not cloud.private,
mac_addr=fip_mac)
mac_addr=fip_mac,
)
if int_ip is not None:
return int_ip
# Try a second time without the fixed tag. This is for old nova-network
# results that do not have the fixed/floating tag.
for int_net in int_nets:
int_ip = get_server_ip(
server, key_name=int_net['name'],
server,
key_name=int_net['name'],
cloud_public=not cloud.private,
mac_addr=fip_mac)
mac_addr=fip_mac,
)
if int_ip is not None:
return int_ip
ip = get_server_ip(
server, ext_tag='fixed', key_name='private', mac_addr=fip_mac)
server, ext_tag='fixed', key_name='private', mac_addr=fip_mac
)
if ip:
return ip
# Last resort, and Rackspace
return get_server_ip(
server, key_name='private')
return get_server_ip(server, key_name='private')
def get_server_external_ipv4(cloud, server):
@ -183,8 +189,11 @@ def get_server_external_ipv4(cloud, server):
ext_nets = cloud.get_external_ipv4_networks()
for ext_net in ext_nets:
ext_ip = get_server_ip(
server, key_name=ext_net['name'], public=True,
cloud_public=not cloud.private)
server,
key_name=ext_net['name'],
public=True,
cloud_public=not cloud.private,
)
if ext_ip is not None:
return ext_ip
@ -192,8 +201,8 @@ def get_server_external_ipv4(cloud, server):
# Much as I might find floating IPs annoying, if it has one, that's
# almost certainly the one that wants to be used
ext_ip = get_server_ip(
server, ext_tag='floating', public=True,
cloud_public=not cloud.private)
server, ext_tag='floating', public=True, cloud_public=not cloud.private
)
if ext_ip is not None:
return ext_ip
@ -203,8 +212,8 @@ def get_server_external_ipv4(cloud, server):
# Try to get an address from a network named 'public'
ext_ip = get_server_ip(
server, key_name='public', public=True,
cloud_public=not cloud.private)
server, key_name='public', public=True, cloud_public=not cloud.private
)
if ext_ip is not None:
return ext_ip
@ -238,15 +247,21 @@ def find_best_address(addresses, public=False, cloud_public=True):
for address in addresses:
try:
for count in utils.iterate_timeout(
5, "Timeout waiting for %s" % address, wait=0.1):
5, "Timeout waiting for %s" % address, wait=0.1
):
# Return the first one that is reachable
try:
for res in socket.getaddrinfo(
address, 22, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0):
address,
22,
socket.AF_UNSPEC,
socket.SOCK_STREAM,
0,
):
family, socktype, proto, _, sa = res
connect_socket = socket.socket(
family, socktype, proto)
family, socktype, proto
)
connect_socket.settimeout(1)
connect_socket.connect(sa)
return address
@ -265,12 +280,13 @@ def find_best_address(addresses, public=False, cloud_public=True):
"The cloud returned multiple addresses %s:, and we could not "
"connect to port 22 on either. That might be what you wanted, "
"but we have no clue what's going on, so we picked the first one "
"%s" % (addresses, addresses[0]))
"%s" % (addresses, addresses[0])
)
return addresses[0]
def get_server_external_ipv6(server):
""" Get an IPv6 address reachable from outside the cloud.
"""Get an IPv6 address reachable from outside the cloud.
This function assumes that if a server has an IPv6 address, that address
is reachable from outside the cloud.
@ -286,7 +302,7 @@ def get_server_external_ipv6(server):
def get_server_default_ip(cloud, server):
""" Get the configured 'default' address
"""Get the configured 'default' address
It is possible in clouds.yaml to configure for a cloud a network that
is the 'default_interface'. This is the network that should be used
@ -299,22 +315,26 @@ def get_server_default_ip(cloud, server):
"""
ext_net = cloud.get_default_network()
if ext_net:
if (cloud._local_ipv6 and not cloud.force_ipv4):
if cloud._local_ipv6 and not cloud.force_ipv4:
# try 6 first, fall back to four
versions = [6, 4]
else:
versions = [4]
for version in versions:
ext_ip = get_server_ip(
server, key_name=ext_net['name'], version=version, public=True,
cloud_public=not cloud.private)
server,
key_name=ext_net['name'],
version=version,
public=True,
cloud_public=not cloud.private,
)
if ext_ip is not None:
return ext_ip
return None
def _get_interface_ip(cloud, server):
""" Get the interface IP for the server
"""Get the interface IP for the server
Interface IP is the IP that should be used for communicating with the
server. It is:
@ -329,7 +349,7 @@ def _get_interface_ip(cloud, server):
if cloud.private and server['private_v4']:
return server['private_v4']
if (server['public_v6'] and cloud._local_ipv6 and not cloud.force_ipv4):
if server['public_v6'] and cloud._local_ipv6 and not cloud.force_ipv4:
return server['public_v6']
else:
return server['public_v4']
@ -404,15 +424,19 @@ def _get_supplemental_addresses(cloud, server):
try:
# Don't bother doing this before the server is active, it's a waste
# of an API call while polling for a server to come up
if (cloud.has_service('network')
and cloud._has_floating_ips()
and server['status'] == 'ACTIVE'):
if (
cloud.has_service('network')
and cloud._has_floating_ips()
and server['status'] == 'ACTIVE'
):
for port in cloud.search_ports(
filters=dict(device_id=server['id'])):
filters=dict(device_id=server['id'])
):
# This SHOULD return one and only one FIP - but doing it as a
# search/list lets the logic work regardless
for fip in cloud.search_floating_ips(
filters=dict(port_id=port['id'])):
filters=dict(port_id=port['id'])
):
fixed_net = fixed_ip_mapping.get(fip['fixed_ip_address'])
if fixed_net is None:
log = _log.setup_logging('openstack')
@ -422,10 +446,12 @@ def _get_supplemental_addresses(cloud, server):
" with the floating ip in the neutron listing"
" does not exist in the nova listing. Something"
" is exceptionally broken.",
dict(fip=fip['id'], server=server['id']))
dict(fip=fip['id'], server=server['id']),
)
else:
server['addresses'][fixed_net].append(
_make_address_dict(fip, port))
_make_address_dict(fip, port)
)
except exc.OpenStackCloudException:
# If something goes wrong with a cloud call, that's cool - this is
# an attempt to provide additional data and should not block forward
@ -485,8 +511,7 @@ def get_hostvars_from_server(cloud, server, mounts=None):
expand_server_vars if caching is not set up. If caching is set up,
the extra cost should be minimal.
"""
server_vars = obj_to_munch(
add_server_interfaces(cloud, server))
server_vars = obj_to_munch(add_server_interfaces(cloud, server))
flavor_id = server['flavor'].get('id')
if flavor_id:
@ -539,7 +564,7 @@ def get_hostvars_from_server(cloud, server, mounts=None):
def obj_to_munch(obj):
""" Turn an object with attributes into a dict suitable for serializing.
"""Turn an object with attributes into a dict suitable for serializing.
Some of the things that are returned in OpenStack are objects with
attributes. That's awesome - except when you want to expose them as JSON

View File

@ -12,6 +12,7 @@
import copy
import functools
import queue
# import types so that we can reference ListType in sphinx param declarations.
# We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list
@ -60,6 +61,7 @@ class _OpenStackCloudMixin:
:param bool strict: Only return documented attributes for each resource
as per the Data Model contract. (Default False)
"""
_OBJECT_MD5_KEY = 'x-sdk-md5'
_OBJECT_SHA256_KEY = 'x-sdk-sha256'
_OBJECT_AUTOCREATE_KEY = 'x-sdk-autocreated'
@ -90,7 +92,8 @@ class _OpenStackCloudMixin:
# cert verification
if not self.verify:
self.log.debug(
"Turning off Insecure SSL warnings since verify=False")
"Turning off Insecure SSL warnings since verify=False"
)
category = requestsexceptions.InsecureRequestWarning
if category:
# InsecureRequestWarning references a Warning class or is None
@ -131,19 +134,20 @@ class _OpenStackCloudMixin:
meth_obj = getattr(self, method, None)
if not meth_obj:
continue
if (hasattr(meth_obj, 'invalidate')
and hasattr(meth_obj, 'func')):
if hasattr(meth_obj, 'invalidate') and hasattr(
meth_obj, 'func'
):
new_func = functools.partial(meth_obj.func, self)
new_func.invalidate = _fake_invalidate
setattr(self, method, new_func)
# Uncoditionally create cache even with a "null" backend
self._cache = self._make_cache(
cache_class, cache_expiration_time, cache_arguments)
cache_class, cache_expiration_time, cache_arguments
)
expirations = self.config.get_cache_expirations()
for expire_key in expirations.keys():
self._cache_expirations[expire_key] = \
expirations[expire_key]
self._cache_expirations[expire_key] = expirations[expire_key]
# TODO(gtema): delete in next change
self._SERVER_AGE = 0
@ -159,7 +163,8 @@ class _OpenStackCloudMixin:
self._raw_clients = {}
self._local_ipv6 = (
_utils.localhost_supports_ipv6() if not self.force_ipv4 else False)
_utils.localhost_supports_ipv6() if not self.force_ipv4 else False
)
def connect_as(self, **kwargs):
"""Make a new OpenStackCloud object with new auth context.
@ -191,7 +196,8 @@ class _OpenStackCloudMixin:
config = openstack.config.OpenStackConfig(
app_name=self.config._app_name,
app_version=self.config._app_version,
load_yaml_config=False)
load_yaml_config=False,
)
params = copy.deepcopy(self.config.config)
# Remove profile from current cloud so that overridding works
params.pop('profile', None)
@ -298,7 +304,8 @@ class _OpenStackCloudMixin:
app_name=self.config._app_name,
app_version=self.config._app_version,
discovery_cache=self.session._discovery_cache,
**params)
**params
)
# Override the cloud name so that logging/location work right
cloud_region._name = self.name
@ -313,9 +320,8 @@ class _OpenStackCloudMixin:
return dogpile.cache.make_region(
function_key_generator=self._make_cache_key
).configure(
cache_class,
expiration_time=expiration_time,
arguments=arguments)
cache_class, expiration_time=expiration_time, arguments=arguments
)
def _make_cache_key(self, namespace, fn):
fname = fn.__name__
@ -329,10 +335,11 @@ class _OpenStackCloudMixin:
arg_key = ''
kw_keys = sorted(kwargs.keys())
kwargs_key = ','.join(
['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache'])
ans = "_".join(
[str(name_key), fname, arg_key, kwargs_key])
['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache']
)
ans = "_".join([str(name_key), fname, arg_key, kwargs_key])
return ans
return generate_key
def _get_cache(self, resource_name):
@ -349,7 +356,8 @@ class _OpenStackCloudMixin:
return version
def _get_versioned_client(
self, service_type, min_version=None, max_version=None):
self, service_type, min_version=None, max_version=None
):
config_version = self.config.get_api_version(service_type)
config_major = self._get_major_version_id(config_version)
max_major = self._get_major_version_id(max_version)
@ -372,7 +380,9 @@ class _OpenStackCloudMixin:
" but shade understands a minimum of {min_version}".format(
config_version=config_version,
service_type=service_type,
min_version=min_version))
min_version=min_version,
)
)
elif max_major and config_major > max_major:
raise exc.OpenStackCloudException(
"Version {config_version} requested for {service_type}"
@ -380,10 +390,13 @@ class _OpenStackCloudMixin:
" {max_version}".format(
config_version=config_version,
service_type=service_type,
max_version=max_version))
max_version=max_version,
)
)
request_min_version = config_version
request_max_version = '{version}.latest'.format(
version=config_major)
version=config_major
)
adapter = proxy._ShadeAdapter(
session=self.session,
service_type=self.config.get_service_type(service_type),
@ -397,7 +410,8 @@ class _OpenStackCloudMixin:
prometheus_histogram=self.config.get_prometheus_histogram(),
influxdb_client=self.config.get_influxdb_client(),
min_version=request_min_version,
max_version=request_max_version)
max_version=request_max_version,
)
if adapter.get_endpoint():
return adapter
@ -409,12 +423,14 @@ class _OpenStackCloudMixin:
endpoint_override=self.config.get_endpoint(service_type),
region_name=self.config.get_region_name(service_type),
min_version=min_version,
max_version=max_version)
max_version=max_version,
)
# data.api_version can be None if no version was detected, such
# as with neutron
api_version = adapter.get_api_major_version(
endpoint_override=self.config.get_endpoint(service_type))
endpoint_override=self.config.get_endpoint(service_type)
)
api_major = self._get_major_version_id(api_version)
# If we detect a different version that was configured, warn the user.
@ -430,7 +446,9 @@ class _OpenStackCloudMixin:
' your config.'.format(
service_type=service_type,
config_version=config_version,
api_version='.'.join([str(f) for f in api_version])))
api_version='.'.join([str(f) for f in api_version]),
)
)
self.log.debug(warning_msg)
warnings.warn(warning_msg)
return adapter
@ -438,19 +456,22 @@ class _OpenStackCloudMixin:
# TODO(shade) This should be replaced with using openstack Connection
# object.
def _get_raw_client(
self, service_type, api_version=None, endpoint_override=None):
self, service_type, api_version=None, endpoint_override=None
):
return proxy._ShadeAdapter(
session=self.session,
service_type=self.config.get_service_type(service_type),
service_name=self.config.get_service_name(service_type),
interface=self.config.get_interface(service_type),
endpoint_override=self.config.get_endpoint(
service_type) or endpoint_override,
region_name=self.config.get_region_name(service_type))
endpoint_override=self.config.get_endpoint(service_type)
or endpoint_override,
region_name=self.config.get_region_name(service_type),
)
def _is_client_version(self, client, version):
client_name = '_{client}_client'.format(
client=client.replace('-', '_'))
client=client.replace('-', '_')
)
client = getattr(self, client_name)
return client._version_matches(version)
@ -458,7 +479,8 @@ class _OpenStackCloudMixin:
def _application_catalog_client(self):
if 'application-catalog' not in self._raw_clients:
self._raw_clients['application-catalog'] = self._get_raw_client(
'application-catalog')
'application-catalog'
)
return self._raw_clients['application-catalog']
@property
@ -478,6 +500,7 @@ class _OpenStackCloudMixin:
"""Wrapper around pprint that groks munch objects"""
# import late since this is a utility function
import pprint
new_resource = _utils._dictify_resource(resource)
pprint.pprint(new_resource)
@ -485,6 +508,7 @@ class _OpenStackCloudMixin:
"""Wrapper around pformat that groks munch objects"""
# import late since this is a utility function
import pprint
new_resource = _utils._dictify_resource(resource)
return pprint.pformat(new_resource)
@ -521,7 +545,8 @@ class _OpenStackCloudMixin:
return self.config.get_endpoint_from_catalog(
service_type=service_type,
interface=interface,
region_name=region_name)
region_name=region_name,
)
@property
def auth_token(self):
@ -600,10 +625,9 @@ class _OpenStackCloudMixin:
region_name=None,
zone=None,
project=utils.Munch(
id=None,
name=None,
domain_id=None,
domain_name=None))
id=None, name=None, domain_id=None, domain_name=None
),
)
def _get_project_id_param_dict(self, name_or_id):
if name_or_id:
@ -628,7 +652,8 @@ class _OpenStackCloudMixin:
if not domain_id:
raise exc.OpenStackCloudException(
"User or project creation requires an explicit"
" domain_id argument.")
" domain_id argument."
)
else:
return {'domain_id': domain_id}
else:
@ -714,7 +739,8 @@ class _OpenStackCloudMixin:
return self.config.get_session_endpoint(service_key, **kwargs)
except keystoneauth1.exceptions.catalog.EndpointNotFound as e:
self.log.debug(
"Endpoint not found in %s cloud: %s", self.name, str(e))
"Endpoint not found in %s cloud: %s", self.name, str(e)
)
endpoint = None
except exc.OpenStackCloudException:
raise
@ -725,17 +751,22 @@ class _OpenStackCloudMixin:
service=service_key,
cloud=self.name,
region=self.config.get_region_name(service_key),
error=str(e)))
error=str(e),
)
)
return endpoint
def has_service(self, service_key, version=None):
if not self.config.has_service(service_key):
# TODO(mordred) add a stamp here so that we only report this once
if not (service_key in self._disable_warnings
and self._disable_warnings[service_key]):
if not (
service_key in self._disable_warnings
and self._disable_warnings[service_key]
):
self.log.debug(
"Disabling %(service_key)s entry in catalog"
" per config", {'service_key': service_key})
"Disabling %(service_key)s entry in catalog" " per config",
{'service_key': service_key},
)
self._disable_warnings[service_key] = True
return False
try:
@ -786,26 +817,23 @@ class _OpenStackCloudMixin:
(service_name, resource_name) = resource_type.split('.')
if not hasattr(self, service_name):
raise exceptions.SDKException(
"service %s is not existing/enabled" %
service_name
"service %s is not existing/enabled" % service_name
)
service_proxy = getattr(self, service_name)
try:
resource_type = service_proxy._resource_registry[resource_name]
except KeyError:
raise exceptions.SDKException(
"Resource %s is not known in service %s" %
(resource_name, service_name)
"Resource %s is not known in service %s"
% (resource_name, service_name)
)
if name_or_id:
# name_or_id is definitely not None
try:
resource_by_id = service_proxy._get(
resource_type,
name_or_id,
*get_args,
**get_kwargs)
resource_type, name_or_id, *get_args, **get_kwargs
)
return [resource_by_id]
except exceptions.ResourceNotFound:
pass
@ -817,11 +845,9 @@ class _OpenStackCloudMixin:
filters["name"] = name_or_id
list_kwargs.update(filters)
return list(service_proxy._list(
resource_type,
*list_args,
**list_kwargs
))
return list(
service_proxy._list(resource_type, *list_args, **list_kwargs)
)
def project_cleanup(
self,
@ -829,7 +855,7 @@ class _OpenStackCloudMixin:
wait_timeout=120,
status_queue=None,
filters=None,
resource_evaluation_fn=None
resource_evaluation_fn=None,
):
"""Cleanup the project resources.
@ -866,7 +892,7 @@ class _OpenStackCloudMixin:
dependencies.update(deps)
except (
exceptions.NotSupported,
exceptions.ServiceDisabledException
exceptions.ServiceDisabledException,
):
# Cloud may include endpoint in catalog but not
# implement the service or disable it
@ -895,7 +921,7 @@ class _OpenStackCloudMixin:
client_status_queue=status_queue,
identified_resources=cleanup_resources,
filters=filters,
resource_evaluation_fn=resource_evaluation_fn
resource_evaluation_fn=resource_evaluation_fn,
)
except exceptions.ServiceDisabledException:
# same reason as above
@ -908,9 +934,10 @@ class _OpenStackCloudMixin:
dep_graph.node_done(service)
for count in utils.iterate_timeout(
timeout=wait_timeout,
message="Timeout waiting for cleanup to finish",
wait=1):
timeout=wait_timeout,
message="Timeout waiting for cleanup to finish",
wait=1,
):
if dep_graph.is_complete():
return

View File

@ -21,7 +21,6 @@ from openstack.tests.functional import base
class TestAggregate(base.BaseFunctionalTest):
def test_aggregates(self):
if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test")
@ -30,31 +29,28 @@ class TestAggregate(base.BaseFunctionalTest):
self.addCleanup(self.cleanup, aggregate_name)
aggregate = self.operator_cloud.create_aggregate(aggregate_name)
aggregate_ids = [v['id']
for v in self.operator_cloud.list_aggregates()]
aggregate_ids = [
v['id'] for v in self.operator_cloud.list_aggregates()
]
self.assertIn(aggregate['id'], aggregate_ids)
aggregate = self.operator_cloud.update_aggregate(
aggregate_name,
availability_zone=availability_zone
aggregate_name, availability_zone=availability_zone
)
self.assertEqual(availability_zone, aggregate['availability_zone'])
aggregate = self.operator_cloud.set_aggregate_metadata(
aggregate_name,
{'key': 'value'}
aggregate_name, {'key': 'value'}
)
self.assertIn('key', aggregate['metadata'])
aggregate = self.operator_cloud.set_aggregate_metadata(
aggregate_name,
{'key': None}
aggregate_name, {'key': None}
)
self.assertNotIn('key', aggregate['metadata'])
# Validate that we can delete by name
self.assertTrue(
self.operator_cloud.delete_aggregate(aggregate_name))
self.assertTrue(self.operator_cloud.delete_aggregate(aggregate_name))
def cleanup(self, aggregate_name):
aggregate = self.operator_cloud.get_aggregate(aggregate_name)

View File

@ -26,7 +26,6 @@ from openstack.tests.functional import base
class TestClusterTemplate(base.BaseFunctionalTest):
def setUp(self):
super(TestClusterTemplate, self).setUp()
if not self.user_cloud.has_service(
@ -52,8 +51,16 @@ class TestClusterTemplate(base.BaseFunctionalTest):
# generate a keypair to add to nova
subprocess.call(
['ssh-keygen', '-t', 'rsa', '-N', '', '-f',
'%s/id_rsa_sdk' % self.ssh_directory])
[
'ssh-keygen',
'-t',
'rsa',
'-N',
'',
'-f',
'%s/id_rsa_sdk' % self.ssh_directory,
]
)
# add keypair to nova
with open('%s/id_rsa_sdk.pub' % self.ssh_directory) as f:
@ -62,8 +69,8 @@ class TestClusterTemplate(base.BaseFunctionalTest):
# Test we can create a cluster_template and we get it returned
self.ct = self.user_cloud.create_cluster_template(
name=name, image_id=image_id,
keypair_id=keypair_id, coe=coe)
name=name, image_id=image_id, keypair_id=keypair_id, coe=coe
)
self.assertEqual(self.ct['name'], name)
self.assertEqual(self.ct['image_id'], image_id)
self.assertEqual(self.ct['keypair_id'], keypair_id)
@ -80,7 +87,8 @@ class TestClusterTemplate(base.BaseFunctionalTest):
# Test we get the same cluster_template with the
# get_cluster_template method
cluster_template_get = self.user_cloud.get_cluster_template(
self.ct['uuid'])
self.ct['uuid']
)
self.assertEqual(cluster_template_get['uuid'], self.ct['uuid'])
# Test the get method also works by name
@ -90,14 +98,15 @@ class TestClusterTemplate(base.BaseFunctionalTest):
# Test we can update a field on the cluster_template and only that
# field is updated
cluster_template_update = self.user_cloud.update_cluster_template(
self.ct, tls_disabled=True)
self.assertEqual(
cluster_template_update['uuid'], self.ct['uuid'])
self.ct, tls_disabled=True
)
self.assertEqual(cluster_template_update['uuid'], self.ct['uuid'])
self.assertTrue(cluster_template_update['tls_disabled'])
# Test we can delete and get True returned
cluster_template_delete = self.user_cloud.delete_cluster_template(
self.ct['uuid'])
self.ct['uuid']
)
self.assertTrue(cluster_template_delete)
def cleanup(self, name):

File diff suppressed because it is too large Load Diff

View File

@ -59,7 +59,8 @@ class TestCompute(base.BaseFunctionalTest):
self.user_cloud.delete_server(server.name)
for volume in volumes:
self.operator_cloud.delete_volume(
volume.id, wait=False, force=True)
volume.id, wait=False, force=True
)
def test_create_and_delete_server(self):
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -67,13 +68,15 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name,
image=self.image,
flavor=self.flavor,
wait=True)
wait=True,
)
self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertIsNotNone(server['adminPass'])
self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -84,14 +87,17 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image,
flavor=self.flavor,
auto_ip=True,
wait=True)
wait=True,
)
self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertIsNotNone(server['adminPass'])
self.assertTrue(
self.user_cloud.delete_server(
self.server_name, wait=True, delete_ips=True))
self.server_name, wait=True, delete_ips=True
)
)
srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -100,8 +106,8 @@ class TestCompute(base.BaseFunctionalTest):
server_name = self.getUniqueString()
self.addCleanup(self._cleanup_servers_and_volumes, server_name)
server = self.user_cloud.create_server(
name=server_name, image=self.image, flavor=self.flavor,
wait=True)
name=server_name, image=self.image, flavor=self.flavor, wait=True
)
volume = self.user_cloud.create_volume(1)
vol_attachment = self.user_cloud.attach_volume(server, volume)
for key in ('device', 'serverId', 'volumeId'):
@ -116,14 +122,16 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image,
flavor=self.flavor,
config_drive=True,
wait=True)
wait=True,
)
self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertTrue(server['has_config_drive'])
self.assertIsNotNone(server['adminPass'])
self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -137,15 +145,16 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image,
flavor=self.flavor,
config_drive=None,
wait=True)
wait=True,
)
self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertFalse(server['has_config_drive'])
self.assertIsNotNone(server['adminPass'])
self.assertTrue(
self.user_cloud.delete_server(
self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -157,7 +166,8 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name,
image=self.image,
flavor=self.flavor,
wait=True)
wait=True,
)
# We're going to get servers from other tests, but that's ok, as long
# as we get the server we created with the demo user.
found_server = False
@ -171,7 +181,8 @@ class TestCompute(base.BaseFunctionalTest):
self.assertRaises(
exc.OpenStackCloudException,
self.user_cloud.list_servers,
all_projects=True)
all_projects=True,
)
def test_create_server_image_flavor_dict(self):
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -179,13 +190,15 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name,
image={'id': self.image.id},
flavor={'id': self.flavor.id},
wait=True)
wait=True,
)
self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertIsNotNone(server['adminPass'])
self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -195,7 +208,8 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name,
image=self.image,
flavor=self.flavor,
wait=True)
wait=True,
)
# _get_server_console_output does not trap HTTP exceptions, so this
# returning a string tests that the call is correct. Testing that
# the cloud returns actual data in the output is out of scope.
@ -208,19 +222,22 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name,
image=self.image,
flavor=self.flavor,
wait=True)
wait=True,
)
log = self.user_cloud.get_server_console(server=self.server_name)
self.assertIsInstance(log, str)
def test_list_availability_zone_names(self):
self.assertEqual(
['nova'], self.user_cloud.list_availability_zone_names())
['nova'], self.user_cloud.list_availability_zone_names()
)
def test_get_server_console_bad_server(self):
self.assertRaises(
exc.OpenStackCloudException,
self.user_cloud.get_server_console,
server=self.server_name)
server=self.server_name,
)
def test_create_and_delete_server_with_admin_pass(self):
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -229,27 +246,33 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image,
flavor=self.flavor,
admin_pass='sheiqu9loegahSh',
wait=True)
wait=True,
)
self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertEqual(server['adminPass'], 'sheiqu9loegahSh')
self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted')
def test_get_image_id(self):
self.assertEqual(
self.image.id, self.user_cloud.get_image_id(self.image.id))
self.image.id, self.user_cloud.get_image_id(self.image.id)
)
self.assertEqual(
self.image.id, self.user_cloud.get_image_id(self.image.name))
self.image.id, self.user_cloud.get_image_id(self.image.name)
)
def test_get_image_name(self):
self.assertEqual(
self.image.name, self.user_cloud.get_image_name(self.image.id))
self.image.name, self.user_cloud.get_image_name(self.image.id)
)
self.assertEqual(
self.image.name, self.user_cloud.get_image_name(self.image.name))
self.image.name, self.user_cloud.get_image_name(self.image.name)
)
def _assert_volume_attach(self, server, volume_id=None, image=''):
self.assertEqual(self.server_name, server['name'])
@ -277,7 +300,8 @@ class TestCompute(base.BaseFunctionalTest):
flavor=self.flavor,
boot_from_volume=True,
volume_size=1,
wait=True)
wait=True,
)
volume_id = self._assert_volume_attach(server)
volume = self.user_cloud.get_volume(volume_id)
self.assertIsNotNone(volume)
@ -296,13 +320,18 @@ class TestCompute(base.BaseFunctionalTest):
# deleting a server that had had a volume attached. Yay for eventual
# consistency!
for count in utils.iterate_timeout(
60,
'Timeout waiting for volume {volume_id} to detach'.format(
volume_id=volume_id)):
60,
'Timeout waiting for volume {volume_id} to detach'.format(
volume_id=volume_id
),
):
volume = self.user_cloud.get_volume(volume_id)
if volume.status in (
'available', 'error',
'error_restoring', 'error_extending'):
'available',
'error',
'error_restoring',
'error_extending',
):
return
def test_create_terminate_volume_image(self):
@ -317,10 +346,12 @@ class TestCompute(base.BaseFunctionalTest):
boot_from_volume=True,
terminate_volume=True,
volume_size=1,
wait=True)
wait=True,
)
volume_id = self._assert_volume_attach(server)
self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
volume = self.user_cloud.get_volume(volume_id)
# We can either get None (if the volume delete was quick), or a volume
# that is in the process of being deleted.
@ -335,7 +366,8 @@ class TestCompute(base.BaseFunctionalTest):
self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
volume = self.user_cloud.create_volume(
size=1, name=self.server_name, image=self.image, wait=True)
size=1, name=self.server_name, image=self.image, wait=True
)
self.addCleanup(self.user_cloud.delete_volume, volume.id)
server = self.user_cloud.create_server(
name=self.server_name,
@ -343,10 +375,12 @@ class TestCompute(base.BaseFunctionalTest):
flavor=self.flavor,
boot_volume=volume,
volume_size=1,
wait=True)
wait=True,
)
volume_id = self._assert_volume_attach(server, volume_id=volume['id'])
self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
volume = self.user_cloud.get_volume(volume_id)
self.assertIsNotNone(volume)
self.assertEqual(volume['name'], volume['display_name'])
@ -364,7 +398,8 @@ class TestCompute(base.BaseFunctionalTest):
self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
volume = self.user_cloud.create_volume(
size=1, name=self.server_name, image=self.image, wait=True)
size=1, name=self.server_name, image=self.image, wait=True
)
self.addCleanup(self.user_cloud.delete_volume, volume['id'])
server = self.user_cloud.create_server(
name=self.server_name,
@ -372,11 +407,14 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image,
boot_from_volume=False,
volumes=[volume],
wait=True)
wait=True,
)
volume_id = self._assert_volume_attach(
server, volume_id=volume['id'], image={'id': self.image['id']})
server, volume_id=volume['id'], image={'id': self.image['id']}
)
self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
volume = self.user_cloud.get_volume(volume_id)
self.assertIsNotNone(volume)
self.assertEqual(volume['name'], volume['display_name'])
@ -393,7 +431,8 @@ class TestCompute(base.BaseFunctionalTest):
self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
volume = self.user_cloud.create_volume(
size=1, name=self.server_name, image=self.image, wait=True)
size=1, name=self.server_name, image=self.image, wait=True
)
server = self.user_cloud.create_server(
name=self.server_name,
image=None,
@ -401,10 +440,12 @@ class TestCompute(base.BaseFunctionalTest):
boot_volume=volume,
terminate_volume=True,
volume_size=1,
wait=True)
wait=True,
)
volume_id = self._assert_volume_attach(server, volume_id=volume['id'])
self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True))
self.user_cloud.delete_server(self.server_name, wait=True)
)
volume = self.user_cloud.get_volume(volume_id)
# We can either get None (if the volume delete was quick), or a volume
# that is in the process of being deleted.
@ -420,9 +461,11 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image,
flavor=self.flavor,
admin_pass='sheiqu9loegahSh',
wait=True)
image = self.user_cloud.create_image_snapshot('test-snapshot', server,
wait=True)
wait=True,
)
image = self.user_cloud.create_image_snapshot(
'test-snapshot', server, wait=True
)
self.addCleanup(self.user_cloud.delete_image, image['id'])
self.assertEqual('active', image['status'])
@ -432,24 +475,32 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name,
image=self.image,
flavor=self.flavor,
wait=True)
self.user_cloud.set_server_metadata(self.server_name,
{'key1': 'value1',
'key2': 'value2'})
wait=True,
)
self.user_cloud.set_server_metadata(
self.server_name, {'key1': 'value1', 'key2': 'value2'}
)
updated_server = self.user_cloud.get_server(self.server_name)
self.assertEqual(set(updated_server.metadata.items()),
set({'key1': 'value1', 'key2': 'value2'}.items()))
self.assertEqual(
set(updated_server.metadata.items()),
set({'key1': 'value1', 'key2': 'value2'}.items()),
)
self.user_cloud.set_server_metadata(self.server_name,
{'key2': 'value3'})
self.user_cloud.set_server_metadata(
self.server_name, {'key2': 'value3'}
)
updated_server = self.user_cloud.get_server(self.server_name)
self.assertEqual(set(updated_server.metadata.items()),
set({'key1': 'value1', 'key2': 'value3'}.items()))
self.assertEqual(
set(updated_server.metadata.items()),
set({'key1': 'value1', 'key2': 'value3'}.items()),
)
self.user_cloud.delete_server_metadata(self.server_name, ['key2'])
updated_server = self.user_cloud.get_server(self.server_name)
self.assertEqual(set(updated_server.metadata.items()),
set({'key1': 'value1'}.items()))
self.assertEqual(
set(updated_server.metadata.items()),
set({'key1': 'value1'}.items()),
)
self.user_cloud.delete_server_metadata(self.server_name, ['key1'])
updated_server = self.user_cloud.get_server(self.server_name)
@ -458,7 +509,9 @@ class TestCompute(base.BaseFunctionalTest):
self.assertRaises(
exc.OpenStackCloudURINotFound,
self.user_cloud.delete_server_metadata,
self.server_name, ['key1'])
self.server_name,
['key1'],
)
def test_update_server(self):
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -466,10 +519,10 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name,
image=self.image,
flavor=self.flavor,
wait=True)
wait=True,
)
server_updated = self.user_cloud.update_server(
self.server_name,
name='new_name'
self.server_name, name='new_name'
)
self.assertEqual('new_name', server_updated['name'])
@ -484,7 +537,8 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name,
image=self.image,
flavor=self.flavor,
wait=True)
wait=True,
)
start = datetime.datetime.now() - datetime.timedelta(seconds=5)
usage = self.operator_cloud.get_compute_usage('demo', start)
self.add_info_on_exception('usage', usage)

View File

@ -30,16 +30,18 @@ class TestDevstack(base.BaseFunctionalTest):
scenarios = [
('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict(
env='MAGNUM',
service='container-infrastructure-management'
)),
(
'magnum',
dict(env='MAGNUM', service='container-infrastructure-management'),
),
('neutron', dict(env='NEUTRON', service='network')),
('octavia', dict(env='OCTAVIA', service='load-balancer')),
('swift', dict(env='SWIFT', service='object-store')),
]
def test_has_service(self):
if os.environ.get(
'OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0') == '1':
if (
os.environ.get('OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0')
== '1'
):
self.assertTrue(self.user_cloud.has_service(self.service))

View File

@ -22,7 +22,6 @@ from openstack.tests.functional import base
class TestDomain(base.BaseFunctionalTest):
def setUp(self):
super(TestDomain, self).setUp()
if not self.operator_cloud:
@ -47,14 +46,16 @@ class TestDomain(base.BaseFunctionalTest):
# Raise an error: we must make users aware that something went
# wrong
raise openstack.cloud.OpenStackCloudException(
'\n'.join(exception_list))
'\n'.join(exception_list)
)
def test_search_domains(self):
domain_name = self.domain_prefix + '_search'
# Shouldn't find any domain with this name yet
results = self.operator_cloud.search_domains(
filters=dict(name=domain_name))
filters=dict(name=domain_name)
)
self.assertEqual(0, len(results))
# Now create a new domain
@ -63,7 +64,8 @@ class TestDomain(base.BaseFunctionalTest):
# Now we should find only the new domain
results = self.operator_cloud.search_domains(
filters=dict(name=domain_name))
filters=dict(name=domain_name)
)
self.assertEqual(1, len(results))
self.assertEqual(domain_name, results[0]['name'])
@ -74,13 +76,17 @@ class TestDomain(base.BaseFunctionalTest):
def test_update_domain(self):
domain = self.operator_cloud.create_domain(
self.domain_prefix, 'description')
self.domain_prefix, 'description'
)
self.assertEqual(self.domain_prefix, domain['name'])
self.assertEqual('description', domain['description'])
self.assertTrue(domain['enabled'])
updated = self.operator_cloud.update_domain(
domain['id'], name='updated name',
description='updated description', enabled=False)
domain['id'],
name='updated name',
description='updated description',
enabled=False,
)
self.assertEqual('updated name', updated['name'])
self.assertEqual('updated description', updated['description'])
self.assertFalse(updated['enabled'])
@ -91,14 +97,16 @@ class TestDomain(base.BaseFunctionalTest):
name_or_id='updated name',
name='updated name 2',
description='updated description 2',
enabled=True)
enabled=True,
)
self.assertEqual('updated name 2', updated['name'])
self.assertEqual('updated description 2', updated['description'])
self.assertTrue(updated['enabled'])
def test_delete_domain(self):
domain = self.operator_cloud.create_domain(self.domain_prefix,
'description')
domain = self.operator_cloud.create_domain(
self.domain_prefix, 'description'
)
self.assertEqual(self.domain_prefix, domain['name'])
self.assertEqual('description', domain['description'])
self.assertTrue(domain['enabled'])
@ -107,7 +115,8 @@ class TestDomain(base.BaseFunctionalTest):
# Now we delete domain by name with name_or_id
domain = self.operator_cloud.create_domain(
self.domain_prefix, 'description')
self.domain_prefix, 'description'
)
self.assertEqual(self.domain_prefix, domain['name'])
self.assertEqual('description', domain['description'])
self.assertTrue(domain['enabled'])
@ -117,7 +126,8 @@ class TestDomain(base.BaseFunctionalTest):
# Finally, we assert we get False from delete_domain if domain does
# not exist
domain = self.operator_cloud.create_domain(
self.domain_prefix, 'description')
self.domain_prefix, 'description'
)
self.assertEqual(self.domain_prefix, domain['name'])
self.assertEqual('description', domain['description'])
self.assertTrue(domain['enabled'])

View File

@ -29,8 +29,14 @@ from openstack.tests.functional import base
class TestEndpoints(base.KeystoneBaseFunctionalTest):
endpoint_attributes = ['id', 'region', 'publicurl', 'internalurl',
'service_id', 'adminurl']
endpoint_attributes = [
'id',
'region',
'publicurl',
'internalurl',
'service_id',
'adminurl',
]
def setUp(self):
super(TestEndpoints, self).setUp()
@ -39,7 +45,8 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
# Generate a random name for services and regions in this test
self.new_item_name = 'test_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5))
random.choice(string.ascii_lowercase) for _ in range(5)
)
self.addCleanup(self._cleanup_services)
self.addCleanup(self._cleanup_endpoints)
@ -47,8 +54,9 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
def _cleanup_endpoints(self):
exception_list = list()
for e in self.operator_cloud.list_endpoints():
if e.get('region') is not None and \
e['region'].startswith(self.new_item_name):
if e.get('region') is not None and e['region'].startswith(
self.new_item_name
):
try:
self.operator_cloud.delete_endpoint(id=e['id'])
except Exception as e:
@ -63,8 +71,9 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
def _cleanup_services(self):
exception_list = list()
for s in self.operator_cloud.list_services():
if s['name'] is not None and \
s['name'].startswith(self.new_item_name):
if s['name'] is not None and s['name'].startswith(
self.new_item_name
):
try:
self.operator_cloud.delete_service(name_or_id=s['id'])
except Exception as e:
@ -82,15 +91,18 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
region = list(self.operator_cloud.identity.regions())[0].id
service = self.operator_cloud.create_service(
name=service_name, type='test_type',
description='this is a test description')
name=service_name,
type='test_type',
description='this is a test description',
)
endpoints = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'],
public_url='http://public.test/',
internal_url='http://internal.test/',
admin_url='http://admin.url/',
region=region)
region=region,
)
self.assertNotEqual([], endpoints)
self.assertIsNotNone(endpoints[0].get('id'))
@ -99,7 +111,8 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
endpoints = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'],
public_url='http://public.test/',
region=region)
region=region,
)
self.assertNotEqual([], endpoints)
self.assertIsNotNone(endpoints[0].get('id'))
@ -108,32 +121,38 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
ver = self.operator_cloud.config.get_api_version('identity')
if ver.startswith('2'):
# NOTE(SamYaple): Update endpoint only works with v3 api
self.assertRaises(OpenStackCloudUnavailableFeature,
self.operator_cloud.update_endpoint,
'endpoint_id1')
self.assertRaises(
OpenStackCloudUnavailableFeature,
self.operator_cloud.update_endpoint,
'endpoint_id1',
)
else:
# service operations require existing region. Do not test updating
# region for now
region = list(self.operator_cloud.identity.regions())[0].id
service = self.operator_cloud.create_service(
name='service1', type='test_type')
name='service1', type='test_type'
)
endpoint = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'],
url='http://admin.url/',
interface='admin',
region=region,
enabled=False)[0]
enabled=False,
)[0]
new_service = self.operator_cloud.create_service(
name='service2', type='test_type')
name='service2', type='test_type'
)
new_endpoint = self.operator_cloud.update_endpoint(
endpoint.id,
service_name_or_id=new_service.id,
url='http://public.url/',
interface='public',
region=region,
enabled=True)
enabled=True,
)
self.assertEqual(new_endpoint.url, 'http://public.url/')
self.assertEqual(new_endpoint.interface, 'public')
@ -147,14 +166,17 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
region = list(self.operator_cloud.identity.regions())[0].id
service = self.operator_cloud.create_service(
name=service_name, type='test_type',
description='this is a test description')
name=service_name,
type='test_type',
description='this is a test description',
)
endpoints = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'],
public_url='http://public.test/',
internal_url='http://internal.test/',
region=region)
region=region,
)
observed_endpoints = self.operator_cloud.list_endpoints()
found = False
@ -170,10 +192,10 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
elif e['interface'] == 'public':
self.assertEqual('http://public.test/', e['url'])
else:
self.assertEqual('http://public.test/',
e['publicurl'])
self.assertEqual('http://internal.test/',
e['internalurl'])
self.assertEqual('http://public.test/', e['publicurl'])
self.assertEqual(
'http://internal.test/', e['internalurl']
)
self.assertEqual(region, e['region_id'])
self.assertTrue(found, msg='new endpoint not found in endpoints list!')
@ -184,14 +206,17 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
region = list(self.operator_cloud.identity.regions())[0].id
service = self.operator_cloud.create_service(
name=service_name, type='test_type',
description='this is a test description')
name=service_name,
type='test_type',
description='this is a test description',
)
endpoints = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'],
public_url='http://public.test/',
internal_url='http://internal.test/',
region=region)
region=region,
)
self.assertNotEqual([], endpoints)
for endpoint in endpoints:
@ -204,5 +229,4 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
if e['id'] == endpoint['id']:
found = True
break
self.assertEqual(
False, found, message='new endpoint was not deleted!')
self.assertEqual(False, found, message='new endpoint was not deleted!')

View File

@ -24,7 +24,6 @@ from openstack.tests.functional import base
class TestFlavor(base.BaseFunctionalTest):
def setUp(self):
super(TestFlavor, self).setUp()
@ -56,8 +55,14 @@ class TestFlavor(base.BaseFunctionalTest):
flavor_name = self.new_item_name + '_create'
flavor_kwargs = dict(
name=flavor_name, ram=1024, vcpus=2, disk=10, ephemeral=5,
swap=100, rxtx_factor=1.5, is_public=True
name=flavor_name,
ram=1024,
vcpus=2,
disk=10,
ephemeral=5,
swap=100,
rxtx_factor=1.5,
is_public=True,
)
flavor = self.operator_cloud.create_flavor(**flavor_kwargs)
@ -144,8 +149,9 @@ class TestFlavor(base.BaseFunctionalTest):
self.assertEqual(project['id'], acls[0]['tenant_id'])
# Now revoke the access and make sure we can't find it
self.operator_cloud.remove_flavor_access(new_flavor['id'],
project['id'])
self.operator_cloud.remove_flavor_access(
new_flavor['id'], project['id']
)
flavors = self.user_cloud.search_flavors(priv_flavor_name)
self.assertEqual(0, len(flavors))
@ -157,9 +163,7 @@ class TestFlavor(base.BaseFunctionalTest):
self.skipTest("Operator cloud is required for this test")
flavor_name = self.new_item_name + '_spec_test'
kwargs = dict(
name=flavor_name, ram=1024, vcpus=2, disk=10
)
kwargs = dict(name=flavor_name, ram=1024, vcpus=2, disk=10)
new_flavor = self.operator_cloud.create_flavor(**kwargs)
# Expect no extra_specs
@ -169,7 +173,8 @@ class TestFlavor(base.BaseFunctionalTest):
extra_specs = {'foo': 'aaa', 'bar': 'bbb'}
self.operator_cloud.set_flavor_specs(new_flavor['id'], extra_specs)
mod_flavor = self.operator_cloud.get_flavor(
new_flavor['id'], get_extra=True)
new_flavor['id'], get_extra=True
)
# Verify extra_specs were set
self.assertIn('extra_specs', mod_flavor)
@ -178,7 +183,8 @@ class TestFlavor(base.BaseFunctionalTest):
# Unset the 'foo' value
self.operator_cloud.unset_flavor_specs(mod_flavor['id'], ['foo'])
mod_flavor = self.operator_cloud.get_flavor_by_id(
new_flavor['id'], get_extra=True)
new_flavor['id'], get_extra=True
)
# Verify 'foo' is unset and 'bar' is still set
self.assertEqual({'bar': 'bbb'}, mod_flavor['extra_specs'])

View File

@ -54,12 +54,14 @@ class TestFloatingIP(base.BaseFunctionalTest):
try:
if r['name'].startswith(self.new_item_name):
self.user_cloud.update_router(
r, ext_gateway_net_id=None)
r, ext_gateway_net_id=None
)
for s in self.user_cloud.list_subnets():
if s['name'].startswith(self.new_item_name):
try:
self.user_cloud.remove_router_interface(
r, subnet_id=s['id'])
r, subnet_id=s['id']
)
except Exception:
pass
self.user_cloud.delete_router(r.id)
@ -93,7 +95,9 @@ class TestFloatingIP(base.BaseFunctionalTest):
self.addDetail(
'exceptions',
content.text_content(
'\n'.join([str(ex) for ex in exception_list])))
'\n'.join([str(ex) for ex in exception_list])
),
)
exc = exception_list[0]
raise exc
@ -121,8 +125,10 @@ class TestFloatingIP(base.BaseFunctionalTest):
fixed_ip = meta.get_server_private_ip(server)
for ip in self.user_cloud.list_floating_ips():
if (ip.get('fixed_ip', None) == fixed_ip
or ip.get('fixed_ip_address', None) == fixed_ip):
if (
ip.get('fixed_ip', None) == fixed_ip
or ip.get('fixed_ip_address', None) == fixed_ip
):
try:
self.user_cloud.delete_floating_ip(ip.id)
except Exception as e:
@ -138,42 +144,49 @@ class TestFloatingIP(base.BaseFunctionalTest):
if self.user_cloud.has_service('network'):
# Create a network
self.test_net = self.user_cloud.create_network(
name=self.new_item_name + '_net')
name=self.new_item_name + '_net'
)
# Create a subnet on it
self.test_subnet = self.user_cloud.create_subnet(
subnet_name=self.new_item_name + '_subnet',
network_name_or_id=self.test_net['id'],
cidr='10.24.4.0/24',
enable_dhcp=True
enable_dhcp=True,
)
# Create a router
self.test_router = self.user_cloud.create_router(
name=self.new_item_name + '_router')
name=self.new_item_name + '_router'
)
# Attach the router to an external network
ext_nets = self.user_cloud.search_networks(
filters={'router:external': True})
filters={'router:external': True}
)
self.user_cloud.update_router(
name_or_id=self.test_router['id'],
ext_gateway_net_id=ext_nets[0]['id'])
ext_gateway_net_id=ext_nets[0]['id'],
)
# Attach the router to the internal subnet
self.user_cloud.add_router_interface(
self.test_router, subnet_id=self.test_subnet['id'])
self.test_router, subnet_id=self.test_subnet['id']
)
# Select the network for creating new servers
self.nic = {'net-id': self.test_net['id']}
self.addDetail(
'networks-neutron',
content.text_content(pprint.pformat(
self.user_cloud.list_networks())))
content.text_content(
pprint.pformat(self.user_cloud.list_networks())
),
)
else:
# Find network names for nova-net
data = proxy._json_response(
self.user_cloud._conn.compute.get('/os-tenant-networks'))
self.user_cloud._conn.compute.get('/os-tenant-networks')
)
nets = meta.get_and_munchify('networks', data)
self.addDetail(
'networks-nova',
content.text_content(pprint.pformat(
nets)))
'networks-nova', content.text_content(pprint.pformat(nets))
)
self.nic = {'net-id': nets[0].id}
def test_private_ip(self):
@ -181,27 +194,36 @@ class TestFloatingIP(base.BaseFunctionalTest):
new_server = self.user_cloud.get_openstack_vars(
self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
wait=True,
name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic]))
flavor=self.flavor,
nics=[self.nic],
)
)
self.addDetail(
'server', content.text_content(pprint.pformat(new_server)))
'server', content.text_content(pprint.pformat(new_server))
)
self.assertNotEqual(new_server['private_v4'], '')
def test_add_auto_ip(self):
self._setup_networks()
new_server = self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
wait=True,
name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic])
flavor=self.flavor,
nics=[self.nic],
)
# ToDo: remove the following iteration when create_server waits for
# the IP to be attached
ip = None
for _ in utils.iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"):
self.timeout, "Timeout waiting for IP address to be attached"
):
ip = meta.get_server_external_ipv4(self.user_cloud, new_server)
if ip is not None:
break
@ -213,15 +235,19 @@ class TestFloatingIP(base.BaseFunctionalTest):
self._setup_networks()
new_server = self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
wait=True,
name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic])
flavor=self.flavor,
nics=[self.nic],
)
# ToDo: remove the following iteration when create_server waits for
# the IP to be attached
ip = None
for _ in utils.iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"):
self.timeout, "Timeout waiting for IP address to be attached"
):
ip = meta.get_server_external_ipv4(self.user_cloud, new_server)
if ip is not None:
break
@ -230,15 +256,18 @@ class TestFloatingIP(base.BaseFunctionalTest):
self.addCleanup(self._cleanup_ips, new_server)
f_ip = self.user_cloud.get_floating_ip(
id=None, filters={'floating_ip_address': ip})
id=None, filters={'floating_ip_address': ip}
)
self.user_cloud.detach_ip_from_server(
server_id=new_server.id, floating_ip_id=f_ip['id'])
server_id=new_server.id, floating_ip_id=f_ip['id']
)
def test_list_floating_ips(self):
if self.operator_cloud:
fip_admin = self.operator_cloud.create_floating_ip()
self.addCleanup(
self.operator_cloud.delete_floating_ip, fip_admin.id)
self.operator_cloud.delete_floating_ip, fip_admin.id
)
fip_user = self.user_cloud.create_floating_ip()
self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id)
@ -260,7 +289,8 @@ class TestFloatingIP(base.BaseFunctionalTest):
# Ask Neutron for only a subset of all the FIPs.
if self.operator_cloud:
filtered_fip_id_list = [
fip.id for fip in self.operator_cloud.list_floating_ips(
fip.id
for fip in self.operator_cloud.list_floating_ips(
{'tenant_id': self.user_cloud.current_project_id}
)
]
@ -275,9 +305,10 @@ class TestFloatingIP(base.BaseFunctionalTest):
if self.operator_cloud:
self.assertNotIn(fip_user.id, fip_op_id_list)
self.assertRaisesRegex(
ValueError, "Nova-network don't support server-side.*",
ValueError,
"Nova-network don't support server-side.*",
self.operator_cloud.list_floating_ips,
filters={'foo': 'bar'}
filters={'foo': 'bar'},
)
def test_search_floating_ips(self):
@ -286,7 +317,7 @@ class TestFloatingIP(base.BaseFunctionalTest):
self.assertIn(
fip_user['id'],
[fip.id for fip in self.user_cloud.search_floating_ips()]
[fip.id for fip in self.user_cloud.search_floating_ips()],
)
def test_get_floating_ip_by_id(self):

View File

@ -38,8 +38,7 @@ class TestFloatingIPPool(base.BaseFunctionalTest):
if not self.user_cloud._has_nova_extension('os-floating-ip-pools'):
# Skipping this test is floating-ip-pool extension is not
# available on the testing cloud
self.skip(
'Floating IP pools extension is not available')
self.skip('Floating IP pools extension is not available')
def test_list_floating_ip_pools(self):
pools = self.user_cloud.list_floating_ip_pools()

View File

@ -22,7 +22,6 @@ from openstack.tests.functional import base
class TestGroup(base.BaseFunctionalTest):
def setUp(self):
super(TestGroup, self).setUp()
if not self.operator_cloud:
@ -48,7 +47,8 @@ class TestGroup(base.BaseFunctionalTest):
# Raise an error: we must make users aware that something went
# wrong
raise openstack.cloud.OpenStackCloudException(
'\n'.join(exception_list))
'\n'.join(exception_list)
)
def test_create_group(self):
group_name = self.group_prefix + '_create'
@ -68,7 +68,8 @@ class TestGroup(base.BaseFunctionalTest):
self.assertTrue(self.operator_cloud.delete_group(group_name))
results = self.operator_cloud.search_groups(
filters=dict(name=group_name))
filters=dict(name=group_name)
)
self.assertEqual(0, len(results))
def test_delete_group_not_exists(self):
@ -79,7 +80,8 @@ class TestGroup(base.BaseFunctionalTest):
# Shouldn't find any group with this name yet
results = self.operator_cloud.search_groups(
filters=dict(name=group_name))
filters=dict(name=group_name)
)
self.assertEqual(0, len(results))
# Now create a new group
@ -88,7 +90,8 @@ class TestGroup(base.BaseFunctionalTest):
# Now we should find only the new group
results = self.operator_cloud.search_groups(
filters=dict(name=group_name))
filters=dict(name=group_name)
)
self.assertEqual(1, len(results))
self.assertEqual(group_name, results[0]['name'])
@ -103,8 +106,7 @@ class TestGroup(base.BaseFunctionalTest):
updated_group_name = group_name + '_xyz'
updated_group_desc = group_desc + ' updated'
updated_group = self.operator_cloud.update_group(
group_name,
name=updated_group_name,
description=updated_group_desc)
group_name, name=updated_group_name, description=updated_group_desc
)
self.assertEqual(updated_group_name, updated_group['name'])
self.assertEqual(updated_group_desc, updated_group['description'])

View File

@ -30,7 +30,8 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test")
self.role_prefix = 'test_role' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5))
random.choice(string.ascii_lowercase) for _ in range(5)
)
self.user_prefix = self.getUniqueString('user')
self.group_prefix = self.getUniqueString('group')
@ -133,7 +134,8 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
user = self.operator_cloud.get_user('demo')
project = self.operator_cloud.get_project('demo')
assignments = self.operator_cloud.list_role_assignments(
filters={'user': user['id'], 'project': project['id']})
filters={'user': user['id'], 'project': project['id']}
)
self.assertIsInstance(assignments, list)
self.assertGreater(len(assignments), 0)
@ -142,25 +144,35 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
user_email = 'nobody@nowhere.com'
role_name = self.role_prefix + '_grant_user_project'
role = self.operator_cloud.create_role(role_name)
user = self._create_user(name=user_name,
email=user_email,
default_project='demo')
self.assertTrue(self.operator_cloud.grant_role(
role_name, user=user['id'], project='demo', wait=True))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'project': self.operator_cloud.get_project('demo')['id']
})
user = self._create_user(
name=user_name, email=user_email, default_project='demo'
)
self.assertTrue(
self.operator_cloud.grant_role(
role_name, user=user['id'], project='demo', wait=True
)
)
assignments = self.operator_cloud.list_role_assignments(
{
'role': role['id'],
'user': user['id'],
'project': self.operator_cloud.get_project('demo')['id'],
}
)
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, user=user['id'], project='demo', wait=True))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'project': self.operator_cloud.get_project('demo')['id']
})
self.assertTrue(
self.operator_cloud.revoke_role(
role_name, user=user['id'], project='demo', wait=True
)
)
assignments = self.operator_cloud.list_role_assignments(
{
'role': role['id'],
'user': user['id'],
'project': self.operator_cloud.get_project('demo')['id'],
}
)
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
@ -171,25 +183,34 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name)
group_name = self.group_prefix + '_group_project'
group = self.operator_cloud.create_group(
name=group_name,
description='test group',
domain='default')
self.assertTrue(self.operator_cloud.grant_role(
role_name, group=group['id'], project='demo'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'project': self.operator_cloud.get_project('demo')['id']
})
name=group_name, description='test group', domain='default'
)
self.assertTrue(
self.operator_cloud.grant_role(
role_name, group=group['id'], project='demo'
)
)
assignments = self.operator_cloud.list_role_assignments(
{
'role': role['id'],
'group': group['id'],
'project': self.operator_cloud.get_project('demo')['id'],
}
)
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, group=group['id'], project='demo'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'project': self.operator_cloud.get_project('demo')['id']
})
self.assertTrue(
self.operator_cloud.revoke_role(
role_name, group=group['id'], project='demo'
)
)
assignments = self.operator_cloud.list_role_assignments(
{
'role': role['id'],
'group': group['id'],
'project': self.operator_cloud.get_project('demo')['id'],
}
)
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
@ -200,25 +221,35 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name)
user_name = self.user_prefix + '_user_domain'
user_email = 'nobody@nowhere.com'
user = self._create_user(name=user_name,
email=user_email,
default_project='demo')
self.assertTrue(self.operator_cloud.grant_role(
role_name, user=user['id'], domain='default'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'domain': self.operator_cloud.get_domain('default')['id']
})
user = self._create_user(
name=user_name, email=user_email, default_project='demo'
)
self.assertTrue(
self.operator_cloud.grant_role(
role_name, user=user['id'], domain='default'
)
)
assignments = self.operator_cloud.list_role_assignments(
{
'role': role['id'],
'user': user['id'],
'domain': self.operator_cloud.get_domain('default')['id'],
}
)
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, user=user['id'], domain='default'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'domain': self.operator_cloud.get_domain('default')['id']
})
self.assertTrue(
self.operator_cloud.revoke_role(
role_name, user=user['id'], domain='default'
)
)
assignments = self.operator_cloud.list_role_assignments(
{
'role': role['id'],
'user': user['id'],
'domain': self.operator_cloud.get_domain('default')['id'],
}
)
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
@ -229,25 +260,34 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name)
group_name = self.group_prefix + '_group_domain'
group = self.operator_cloud.create_group(
name=group_name,
description='test group',
domain='default')
self.assertTrue(self.operator_cloud.grant_role(
role_name, group=group['id'], domain='default'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'domain': self.operator_cloud.get_domain('default')['id']
})
name=group_name, description='test group', domain='default'
)
self.assertTrue(
self.operator_cloud.grant_role(
role_name, group=group['id'], domain='default'
)
)
assignments = self.operator_cloud.list_role_assignments(
{
'role': role['id'],
'group': group['id'],
'domain': self.operator_cloud.get_domain('default')['id'],
}
)
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, group=group['id'], domain='default'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'domain': self.operator_cloud.get_domain('default')['id']
})
self.assertTrue(
self.operator_cloud.revoke_role(
role_name, group=group['id'], domain='default'
)
)
assignments = self.operator_cloud.list_role_assignments(
{
'role': role['id'],
'group': group['id'],
'domain': self.operator_cloud.get_domain('default')['id'],
}
)
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
@ -256,25 +296,27 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name)
user_name = self.user_prefix + '_user_system'
user_email = 'nobody@nowhere.com'
user = self._create_user(name=user_name,
email=user_email,
default_project='demo')
self.assertTrue(self.operator_cloud.grant_role(
role_name, user=user['id'], system='all'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'system': 'all'
})
user = self._create_user(
name=user_name, email=user_email, default_project='demo'
)
self.assertTrue(
self.operator_cloud.grant_role(
role_name, user=user['id'], system='all'
)
)
assignments = self.operator_cloud.list_role_assignments(
{'role': role['id'], 'user': user['id'], 'system': 'all'}
)
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, user=user['id'], system='all'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'system': 'all'
})
self.assertTrue(
self.operator_cloud.revoke_role(
role_name, user=user['id'], system='all'
)
)
assignments = self.operator_cloud.list_role_assignments(
{'role': role['id'], 'user': user['id'], 'system': 'all'}
)
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
@ -285,23 +327,25 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name)
group_name = self.group_prefix + '_group_system'
group = self.operator_cloud.create_group(
name=group_name,
description='test group')
self.assertTrue(self.operator_cloud.grant_role(
role_name, group=group['id'], system='all'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'system': 'all'
})
name=group_name, description='test group'
)
self.assertTrue(
self.operator_cloud.grant_role(
role_name, group=group['id'], system='all'
)
)
assignments = self.operator_cloud.list_role_assignments(
{'role': role['id'], 'group': group['id'], 'system': 'all'}
)
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, group=group['id'], system='all'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'system': 'all'
})
self.assertTrue(
self.operator_cloud.revoke_role(
role_name, group=group['id'], system='all'
)
)
assignments = self.operator_cloud.list_role_assignments(
{'role': role['id'], 'group': group['id'], 'system': 'all'}
)
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))

View File

@ -25,7 +25,6 @@ from openstack.tests.functional import base
class TestImage(base.BaseFunctionalTest):
def test_create_image(self):
test_image = tempfile.NamedTemporaryFile(delete=False)
test_image.write(b'\0' * 1024 * 1024)
@ -40,7 +39,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10,
min_ram=1024,
tags=['custom'],
wait=True)
wait=True,
)
finally:
self.user_cloud.delete_image(image_name, wait=True)
@ -57,13 +57,16 @@ class TestImage(base.BaseFunctionalTest):
container_format='bare',
min_disk=10,
min_ram=1024,
wait=True)
wait=True,
)
self.addCleanup(self.user_cloud.delete_image, image_name, wait=True)
output = os.path.join(tempfile.gettempdir(), self.getUniqueString())
self.user_cloud.download_image(image_name, output)
self.addCleanup(os.remove, output)
self.assertTrue(filecmp.cmp(test_image.name, output),
"Downloaded contents don't match created image")
self.assertTrue(
filecmp.cmp(test_image.name, output),
"Downloaded contents don't match created image",
)
def test_create_image_skip_duplicate(self):
test_image = tempfile.NamedTemporaryFile(delete=False)
@ -79,7 +82,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10,
min_ram=1024,
validate_checksum=True,
wait=True)
wait=True,
)
second_image = self.user_cloud.create_image(
name=image_name,
filename=test_image.name,
@ -88,7 +92,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10,
min_ram=1024,
validate_checksum=True,
wait=True)
wait=True,
)
self.assertEqual(first_image.id, second_image.id)
finally:
self.user_cloud.delete_image(image_name, wait=True)
@ -108,7 +113,8 @@ class TestImage(base.BaseFunctionalTest):
container_format='bare',
min_disk=10,
min_ram=1024,
wait=True)
wait=True,
)
second_image = self.user_cloud.create_image(
name=image_name,
filename=test_image.name,
@ -117,7 +123,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10,
min_ram=1024,
allow_duplicates=True,
wait=True)
wait=True,
)
self.assertNotEqual(first_image.id, second_image.id)
finally:
if first_image:
@ -138,11 +145,11 @@ class TestImage(base.BaseFunctionalTest):
container_format='bare',
min_disk=10,
min_ram=1024,
wait=True)
wait=True,
)
self.user_cloud.update_image_properties(
image=image,
name=image_name,
foo='bar')
image=image, name=image_name, foo='bar'
)
image = self.user_cloud.get_image(image_name)
self.assertIn('foo', image.properties)
self.assertEqual(image.properties['foo'], 'bar')
@ -158,7 +165,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10,
min_ram=1024,
allow_duplicates=True,
wait=False)
wait=False,
)
self.assertEqual(image_name, image.name)
self.user_cloud.delete_image(image.id, wait=True)
@ -175,7 +183,8 @@ class TestImage(base.BaseFunctionalTest):
container_format='bare',
min_disk=10,
min_ram=1024,
wait=True)
wait=True,
)
image = self.user_cloud.get_image_by_id(image.id)
self.assertEqual(image_name, image.name)
self.assertEqual('raw', image.disk_format)

View File

@ -35,8 +35,13 @@ class TestInventory(base.BaseFunctionalTest):
self.server_name = self.getUniqueString('inventory')
self.addCleanup(self._cleanup_server)
server = self.operator_cloud.create_server(
name=self.server_name, image=self.image, flavor=self.flavor,
wait=True, auto_ip=True, network='public')
name=self.server_name,
image=self.image,
flavor=self.flavor,
wait=True,
auto_ip=True,
network='public',
)
self.server_id = server['id']
def _cleanup_server(self):

View File

@ -21,7 +21,6 @@ from openstack.tests.functional import base
class TestKeypairs(base.BaseFunctionalTest):
def test_create_and_delete(self):
'''Test creating and deleting keypairs functionality'''
name = self.getUniqueString('keypair')
@ -46,7 +45,8 @@ class TestKeypairs(base.BaseFunctionalTest):
name = self.getUniqueString('keypair')
self.addCleanup(self.user_cloud.delete_keypair, name)
keypair = self.user_cloud.create_keypair(
name=name, public_key=fakes.FAKE_PUBLIC_KEY)
name=name, public_key=fakes.FAKE_PUBLIC_KEY
)
self.assertEqual(keypair['name'], name)
self.assertIsNotNone(keypair['public_key'])
self.assertIsNone(keypair['private_key'])

View File

@ -21,7 +21,6 @@ from openstack.tests.functional import base
class TestUsage(base.BaseFunctionalTest):
def test_get_our_compute_limits(self):
'''Test quotas functionality'''
limits = self.user_cloud.get_compute_limits()

View File

@ -21,7 +21,6 @@ from openstack.tests.functional import base
class TestMagnumServices(base.BaseFunctionalTest):
def setUp(self):
super(TestMagnumServices, self).setUp()
if not self.user_cloud.has_service(

View File

@ -84,7 +84,8 @@ class TestNetwork(base.BaseFunctionalTest):
def test_create_network_provider_flat(self):
existing_public = self.operator_cloud.search_networks(
filters={'provider:network_type': 'flat'})
filters={'provider:network_type': 'flat'}
)
if existing_public:
self.skipTest('Physical network already allocated')
net1 = self.operator_cloud.create_network(
@ -93,7 +94,7 @@ class TestNetwork(base.BaseFunctionalTest):
provider={
'physical_network': 'public',
'network_type': 'flat',
}
},
)
self.assertIn('id', net1)
self.assertEqual(self.network_name, net1['name'])
@ -117,10 +118,12 @@ class TestNetwork(base.BaseFunctionalTest):
net1 = self.operator_cloud.create_network(name=self.network_name)
self.assertIsNotNone(net1)
net2 = self.operator_cloud.create_network(
name=self.network_name + 'other')
name=self.network_name + 'other'
)
self.assertIsNotNone(net2)
match = self.operator_cloud.list_networks(
filters=dict(name=self.network_name))
filters=dict(name=self.network_name)
)
self.assertEqual(1, len(match))
self.assertEqual(net1['name'], match[0]['name'])

View File

@ -28,7 +28,6 @@ from openstack.tests.functional import base
class TestObject(base.BaseFunctionalTest):
def setUp(self):
super(TestObject, self).setUp()
if not self.user_cloud.has_service('object-store'):
@ -41,69 +40,84 @@ class TestObject(base.BaseFunctionalTest):
self.addCleanup(self.user_cloud.delete_container, container_name)
self.user_cloud.create_container(container_name)
container = self.user_cloud.get_container(container_name)
self.assertEqual(container_name, container.name)
self.assertEqual(
container_name, container.name)
self.assertEqual(
[],
self.user_cloud.list_containers(prefix='somethin'))
[], self.user_cloud.list_containers(prefix='somethin')
)
sizes = (
(64 * 1024, 1), # 64K, one segment
(64 * 1024, 5) # 64MB, 5 segments
(64 * 1024, 5), # 64MB, 5 segments
)
for size, nseg in sizes:
segment_size = int(round(size / nseg))
with tempfile.NamedTemporaryFile() as fake_file:
fake_content = ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.digits)
for _ in range(size)).encode('latin-1')
fake_content = ''.join(
random.SystemRandom().choice(
string.ascii_uppercase + string.digits
)
for _ in range(size)
).encode('latin-1')
fake_file.write(fake_content)
fake_file.flush()
name = 'test-%d' % size
self.addCleanup(
self.user_cloud.delete_object, container_name, name)
self.user_cloud.delete_object, container_name, name
)
self.user_cloud.create_object(
container_name, name,
container_name,
name,
fake_file.name,
segment_size=segment_size,
metadata={'foo': 'bar'})
self.assertFalse(self.user_cloud.is_object_stale(
container_name, name,
fake_file.name
))
metadata={'foo': 'bar'},
)
self.assertFalse(
self.user_cloud.is_object_stale(
container_name, name, fake_file.name
)
)
self.assertEqual(
'bar', self.user_cloud.get_object_metadata(
container_name, name)['foo']
'bar',
self.user_cloud.get_object_metadata(container_name, name)[
'foo'
],
)
self.user_cloud.update_object(
container=container_name,
name=name,
metadata={'testk': 'testv'},
)
self.user_cloud.update_object(container=container_name, name=name,
metadata={'testk': 'testv'})
self.assertEqual(
'testv', self.user_cloud.get_object_metadata(
container_name, name)['testk']
'testv',
self.user_cloud.get_object_metadata(container_name, name)[
'testk'
],
)
try:
self.assertIsNotNone(
self.user_cloud.get_object(container_name, name))
self.user_cloud.get_object(container_name, name)
)
except exc.OpenStackCloudException as e:
self.addDetail(
'failed_response',
content.text_content(str(e.response.headers)))
content.text_content(str(e.response.headers)),
)
self.addDetail(
'failed_response',
content.text_content(e.response.text))
'failed_response', content.text_content(e.response.text)
)
self.assertEqual(
name,
self.user_cloud.list_objects(container_name)[0]['name'])
name, self.user_cloud.list_objects(container_name)[0]['name']
)
self.assertEqual(
[],
self.user_cloud.list_objects(container_name,
prefix='abc'))
[], self.user_cloud.list_objects(container_name, prefix='abc')
)
self.assertTrue(
self.user_cloud.delete_object(container_name, name))
self.user_cloud.delete_object(container_name, name)
)
self.assertEqual([], self.user_cloud.list_objects(container_name))
self.assertEqual(
container_name,
self.user_cloud.get_container(container_name).name)
container_name, self.user_cloud.get_container(container_name).name
)
self.user_cloud.delete_container(container_name)
def test_download_object_to_file(self):
@ -112,64 +126,83 @@ class TestObject(base.BaseFunctionalTest):
self.addDetail('container', content.text_content(container_name))
self.addCleanup(self.user_cloud.delete_container, container_name)
self.user_cloud.create_container(container_name)
self.assertEqual(container_name,
self.user_cloud.list_containers()[0]['name'])
self.assertEqual(
container_name, self.user_cloud.list_containers()[0]['name']
)
sizes = (
(64 * 1024, 1), # 64K, one segment
(64 * 1024, 5) # 64MB, 5 segments
(64 * 1024, 5), # 64MB, 5 segments
)
for size, nseg in sizes:
fake_content = ''
segment_size = int(round(size / nseg))
with tempfile.NamedTemporaryFile() as fake_file:
fake_content = ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.digits)
for _ in range(size)).encode('latin-1')
fake_content = ''.join(
random.SystemRandom().choice(
string.ascii_uppercase + string.digits
)
for _ in range(size)
).encode('latin-1')
fake_file.write(fake_content)
fake_file.flush()
name = 'test-%d' % size
self.addCleanup(
self.user_cloud.delete_object, container_name, name)
self.user_cloud.delete_object, container_name, name
)
self.user_cloud.create_object(
container_name, name,
container_name,
name,
fake_file.name,
segment_size=segment_size,
metadata={'foo': 'bar'})
self.assertFalse(self.user_cloud.is_object_stale(
container_name, name,
fake_file.name
))
metadata={'foo': 'bar'},
)
self.assertFalse(
self.user_cloud.is_object_stale(
container_name, name, fake_file.name
)
)
self.assertEqual(
'bar', self.user_cloud.get_object_metadata(
container_name, name)['foo']
'bar',
self.user_cloud.get_object_metadata(container_name, name)[
'foo'
],
)
self.user_cloud.update_object(
container=container_name,
name=name,
metadata={'testk': 'testv'},
)
self.user_cloud.update_object(container=container_name, name=name,
metadata={'testk': 'testv'})
self.assertEqual(
'testv', self.user_cloud.get_object_metadata(
container_name, name)['testk']
'testv',
self.user_cloud.get_object_metadata(container_name, name)[
'testk'
],
)
try:
with tempfile.NamedTemporaryFile() as fake_file:
self.user_cloud.get_object(
container_name, name, outfile=fake_file.name)
container_name, name, outfile=fake_file.name
)
downloaded_content = open(fake_file.name, 'rb').read()
self.assertEqual(fake_content, downloaded_content)
except exc.OpenStackCloudException as e:
self.addDetail(
'failed_response',
content.text_content(str(e.response.headers)))
content.text_content(str(e.response.headers)),
)
self.addDetail(
'failed_response',
content.text_content(e.response.text))
'failed_response', content.text_content(e.response.text)
)
raise
self.assertEqual(
name,
self.user_cloud.list_objects(container_name)[0]['name'])
name, self.user_cloud.list_objects(container_name)[0]['name']
)
self.assertTrue(
self.user_cloud.delete_object(container_name, name))
self.user_cloud.delete_object(container_name, name)
)
self.assertEqual([], self.user_cloud.list_objects(container_name))
self.assertEqual(container_name,
self.user_cloud.list_containers()[0]['name'])
self.assertEqual(
container_name, self.user_cloud.list_containers()[0]['name']
)
self.user_cloud.delete_container(container_name)

View File

@ -27,7 +27,6 @@ from openstack.tests.functional import base
class TestPort(base.BaseFunctionalTest):
def setUp(self):
super(TestPort, self).setUp()
# Skip Neutron tests if neutron is not present
@ -40,7 +39,8 @@ class TestPort(base.BaseFunctionalTest):
# Generate a unique port name to allow concurrent tests
self.new_port_name = 'test_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5))
random.choice(string.ascii_lowercase) for _ in range(5)
)
self.addCleanup(self._cleanup_ports)
@ -65,7 +65,8 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_create'
port = self.user_cloud.create_port(
network_id=self.net.id, name=port_name)
network_id=self.net.id, name=port_name
)
self.assertIsInstance(port, dict)
self.assertIn('id', port)
self.assertEqual(port.get('name'), port_name)
@ -74,7 +75,8 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_get'
port = self.user_cloud.create_port(
network_id=self.net.id, name=port_name)
network_id=self.net.id, name=port_name
)
self.assertIsInstance(port, dict)
self.assertIn('id', port)
self.assertEqual(port.get('name'), port_name)
@ -89,7 +91,8 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_get_by_id'
port = self.user_cloud.create_port(
network_id=self.net.id, name=port_name)
network_id=self.net.id, name=port_name
)
self.assertIsInstance(port, dict)
self.assertIn('id', port)
self.assertEqual(port.get('name'), port_name)
@ -104,11 +107,11 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_update'
new_port_name = port_name + '_new'
self.user_cloud.create_port(
network_id=self.net.id, name=port_name)
self.user_cloud.create_port(network_id=self.net.id, name=port_name)
port = self.user_cloud.update_port(
name_or_id=port_name, name=new_port_name)
name_or_id=port_name, name=new_port_name
)
self.assertIsInstance(port, dict)
self.assertEqual(port.get('name'), new_port_name)
@ -129,7 +132,8 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_delete'
port = self.user_cloud.create_port(
network_id=self.net.id, name=port_name)
network_id=self.net.id, name=port_name
)
self.assertIsInstance(port, dict)
self.assertIn('id', port)
self.assertEqual(port.get('name'), port_name)

View File

@ -25,7 +25,6 @@ from openstack.tests.functional import base
class TestProject(base.KeystoneBaseFunctionalTest):
def setUp(self):
super(TestProject, self).setUp()
if not self.operator_cloud:
@ -54,8 +53,9 @@ class TestProject(base.KeystoneBaseFunctionalTest):
'description': 'test_create_project',
}
if self.identity_version == '3':
params['domain_id'] = \
self.operator_cloud.get_domain('default')['id']
params['domain_id'] = self.operator_cloud.get_domain('default')[
'id'
]
project = self.operator_cloud.create_project(**params)
@ -66,15 +66,23 @@ class TestProject(base.KeystoneBaseFunctionalTest):
user_id = self.operator_cloud.current_user_id
# Grant the current user access to the project
self.assertTrue(self.operator_cloud.grant_role(
'member', user=user_id, project=project['id'], wait=True))
self.assertTrue(
self.operator_cloud.grant_role(
'member', user=user_id, project=project['id'], wait=True
)
)
self.addCleanup(
self.operator_cloud.revoke_role,
'member', user=user_id, project=project['id'], wait=True)
'member',
user=user_id,
project=project['id'],
wait=True,
)
new_cloud = self.operator_cloud.connect_as_project(project)
self.add_info_on_exception(
'new_cloud_config', pprint.pformat(new_cloud.config.config))
'new_cloud_config', pprint.pformat(new_cloud.config.config)
)
location = new_cloud.current_location
self.assertEqual(project_name, location['project']['name'])
@ -84,15 +92,17 @@ class TestProject(base.KeystoneBaseFunctionalTest):
params = {
'name': project_name,
'description': 'test_update_project',
'enabled': True
'enabled': True,
}
if self.identity_version == '3':
params['domain_id'] = \
self.operator_cloud.get_domain('default')['id']
params['domain_id'] = self.operator_cloud.get_domain('default')[
'id'
]
project = self.operator_cloud.create_project(**params)
updated_project = self.operator_cloud.update_project(
project_name, enabled=False, description='new')
project_name, enabled=False, description='new'
)
self.assertIsNotNone(updated_project)
self.assertEqual(project['id'], updated_project['id'])
self.assertEqual(project['name'], updated_project['name'])
@ -102,12 +112,14 @@ class TestProject(base.KeystoneBaseFunctionalTest):
# Revert the description and verify the project is still disabled
updated_project = self.operator_cloud.update_project(
project_name, description=params['description'])
project_name, description=params['description']
)
self.assertIsNotNone(updated_project)
self.assertEqual(project['id'], updated_project['id'])
self.assertEqual(project['name'], updated_project['name'])
self.assertEqual(project['description'],
updated_project['description'])
self.assertEqual(
project['description'], updated_project['description']
)
self.assertTrue(project['enabled'])
self.assertFalse(updated_project['enabled'])
@ -115,8 +127,9 @@ class TestProject(base.KeystoneBaseFunctionalTest):
project_name = self.new_project_name + '_delete'
params = {'name': project_name}
if self.identity_version == '3':
params['domain_id'] = \
self.operator_cloud.get_domain('default')['id']
params['domain_id'] = self.operator_cloud.get_domain('default')[
'id'
]
project = self.operator_cloud.create_project(**params)
self.assertIsNotNone(project)
self.assertTrue(self.operator_cloud.delete_project(project['id']))

View File

@ -48,8 +48,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
name=self.getUniqueString('router')
)
conn.network.add_interface_to_router(
self.router.id,
subnet_id=self.subnet.id)
self.router.id, subnet_id=self.subnet.id
)
def test_cleanup(self):
self._create_network_resources()
@ -60,7 +60,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2000-01-01'})
filters={'created_at': '2000-01-01'},
)
self.assertTrue(status_queue.empty())
@ -71,7 +72,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2200-01-01'},
resource_evaluation_fn=lambda x, y, z: False)
resource_evaluation_fn=lambda x, y, z: False,
)
self.assertTrue(status_queue.empty())
@ -80,7 +82,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2200-01-01'})
filters={'created_at': '2200-01-01'},
)
objects = []
while not status_queue.empty():
@ -92,9 +95,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Fourth round - dry run with no filters, ensure everything identified
self.conn.project_cleanup(
dry_run=True,
wait_timeout=120,
status_queue=status_queue)
dry_run=True, wait_timeout=120, status_queue=status_queue
)
objects = []
while not status_queue.empty():
@ -109,9 +111,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Last round - do a real cleanup
self.conn.project_cleanup(
dry_run=False,
wait_timeout=600,
status_queue=status_queue)
dry_run=False, wait_timeout=600, status_queue=status_queue
)
objects = []
while not status_queue.empty():
@ -136,10 +137,12 @@ class TestProjectCleanup(base.BaseFunctionalTest):
b1 = self.conn.block_storage.create_backup(volume_id=vol.id)
self.conn.block_storage.wait_for_status(b1)
b2 = self.conn.block_storage.create_backup(
volume_id=vol.id, is_incremental=True, snapshot_id=s1.id)
volume_id=vol.id, is_incremental=True, snapshot_id=s1.id
)
self.conn.block_storage.wait_for_status(b2)
b3 = self.conn.block_storage.create_backup(
volume_id=vol.id, is_incremental=True, snapshot_id=s1.id)
volume_id=vol.id, is_incremental=True, snapshot_id=s1.id
)
self.conn.block_storage.wait_for_status(b3)
# First round - check no resources are old enough
@ -147,7 +150,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2000-01-01'})
filters={'created_at': '2000-01-01'},
)
self.assertTrue(status_queue.empty())
@ -158,7 +162,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2200-01-01'},
resource_evaluation_fn=lambda x, y, z: False)
resource_evaluation_fn=lambda x, y, z: False,
)
self.assertTrue(status_queue.empty())
@ -167,7 +172,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2200-01-01'})
filters={'created_at': '2200-01-01'},
)
objects = []
while not status_queue.empty():
@ -179,9 +185,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Fourth round - dry run with no filters, ensure everything identified
self.conn.project_cleanup(
dry_run=True,
wait_timeout=120,
status_queue=status_queue)
dry_run=True, wait_timeout=120, status_queue=status_queue
)
objects = []
while not status_queue.empty():
@ -196,9 +201,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Last round - do a real cleanup
self.conn.project_cleanup(
dry_run=False,
wait_timeout=600,
status_queue=status_queue)
dry_run=False, wait_timeout=600, status_queue=status_queue
)
# Ensure no backups remain
self.assertEqual(0, len(list(self.conn.block_storage.backups())))
# Ensure no snapshots remain
@ -212,14 +216,16 @@ class TestProjectCleanup(base.BaseFunctionalTest):
self.conn.object_store.create_container('test_cleanup')
for i in range(1, 10):
self.conn.object_store.create_object(
"test_cleanup", f"test{i}", data="test{i}")
"test_cleanup", f"test{i}", data="test{i}"
)
# First round - check no resources are old enough
self.conn.project_cleanup(
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'updated_at': '2000-01-01'})
filters={'updated_at': '2000-01-01'},
)
self.assertTrue(status_queue.empty())
@ -228,7 +234,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'updated_at': '2200-01-01'})
filters={'updated_at': '2200-01-01'},
)
objects = []
while not status_queue.empty():
objects.append(status_queue.get())
@ -238,19 +245,15 @@ class TestProjectCleanup(base.BaseFunctionalTest):
self.assertIn('test1', obj_names)
# Ensure object still exists
obj = self.conn.object_store.get_object(
"test1", "test_cleanup")
obj = self.conn.object_store.get_object("test1", "test_cleanup")
self.assertIsNotNone(obj)
# Last round - do a real cleanup
self.conn.project_cleanup(
dry_run=False,
wait_timeout=600,
status_queue=status_queue)
dry_run=False, wait_timeout=600, status_queue=status_queue
)
objects.clear()
while not status_queue.empty():
objects.append(status_queue.get())
self.assertIsNone(
self.conn.get_container('test_container')
)
self.assertIsNone(self.conn.get_container('test_container'))

View File

@ -50,59 +50,61 @@ class TestQosBandwidthLimitRule(base.BaseFunctionalTest):
# Create bw limit rule
rule = self.operator_cloud.create_qos_bandwidth_limit_rule(
self.policy['id'],
max_kbps=max_kbps,
max_burst_kbps=max_burst_kbps)
self.policy['id'], max_kbps=max_kbps, max_burst_kbps=max_burst_kbps
)
self.assertIn('id', rule)
self.assertEqual(max_kbps, rule['max_kbps'])
self.assertEqual(max_burst_kbps, rule['max_burst_kbps'])
# Now try to update rule
updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule(
self.policy['id'],
rule['id'],
max_kbps=updated_max_kbps)
self.policy['id'], rule['id'], max_kbps=updated_max_kbps
)
self.assertIn('id', updated_rule)
self.assertEqual(updated_max_kbps, updated_rule['max_kbps'])
self.assertEqual(max_burst_kbps, updated_rule['max_burst_kbps'])
# List rules from policy
policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules(
self.policy['id'])
self.policy['id']
)
self.assertEqual([updated_rule], policy_rules)
# Delete rule
self.operator_cloud.delete_qos_bandwidth_limit_rule(
self.policy['id'], updated_rule['id'])
self.policy['id'], updated_rule['id']
)
# Check if there is no rules in policy
policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules(
self.policy['id'])
self.policy['id']
)
self.assertEqual([], policy_rules)
def test_create_qos_bandwidth_limit_rule_direction(self):
if not self.operator_cloud._has_neutron_extension(
'qos-bw-limit-direction'):
self.skipTest("'qos-bw-limit-direction' network extension "
"not supported by cloud")
'qos-bw-limit-direction'
):
self.skipTest(
"'qos-bw-limit-direction' network extension "
"not supported by cloud"
)
max_kbps = 1500
direction = "ingress"
updated_direction = "egress"
# Create bw limit rule
rule = self.operator_cloud.create_qos_bandwidth_limit_rule(
self.policy['id'],
max_kbps=max_kbps,
direction=direction)
self.policy['id'], max_kbps=max_kbps, direction=direction
)
self.assertIn('id', rule)
self.assertEqual(max_kbps, rule['max_kbps'])
self.assertEqual(direction, rule['direction'])
# Now try to update direction in rule
updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule(
self.policy['id'],
rule['id'],
direction=updated_direction)
self.policy['id'], rule['id'], direction=updated_direction
)
self.assertIn('id', updated_rule)
self.assertEqual(max_kbps, updated_rule['max_kbps'])
self.assertEqual(updated_direction, updated_rule['direction'])

View File

@ -49,29 +49,31 @@ class TestQosDscpMarkingRule(base.BaseFunctionalTest):
# Create DSCP marking rule
rule = self.operator_cloud.create_qos_dscp_marking_rule(
self.policy['id'],
dscp_mark=dscp_mark)
self.policy['id'], dscp_mark=dscp_mark
)
self.assertIn('id', rule)
self.assertEqual(dscp_mark, rule['dscp_mark'])
# Now try to update rule
updated_rule = self.operator_cloud.update_qos_dscp_marking_rule(
self.policy['id'],
rule['id'],
dscp_mark=updated_dscp_mark)
self.policy['id'], rule['id'], dscp_mark=updated_dscp_mark
)
self.assertIn('id', updated_rule)
self.assertEqual(updated_dscp_mark, updated_rule['dscp_mark'])
# List rules from policy
policy_rules = self.operator_cloud.list_qos_dscp_marking_rules(
self.policy['id'])
self.policy['id']
)
self.assertEqual([updated_rule], policy_rules)
# Delete rule
self.operator_cloud.delete_qos_dscp_marking_rule(
self.policy['id'], updated_rule['id'])
self.policy['id'], updated_rule['id']
)
# Check if there is no rules in policy
policy_rules = self.operator_cloud.list_qos_dscp_marking_rules(
self.policy['id'])
self.policy['id']
)
self.assertEqual([], policy_rules)

View File

@ -49,29 +49,31 @@ class TestQosMinimumBandwidthRule(base.BaseFunctionalTest):
# Create min bw rule
rule = self.operator_cloud.create_qos_minimum_bandwidth_rule(
self.policy['id'],
min_kbps=min_kbps)
self.policy['id'], min_kbps=min_kbps
)
self.assertIn('id', rule)
self.assertEqual(min_kbps, rule['min_kbps'])
# Now try to update rule
updated_rule = self.operator_cloud.update_qos_minimum_bandwidth_rule(
self.policy['id'],
rule['id'],
min_kbps=updated_min_kbps)
self.policy['id'], rule['id'], min_kbps=updated_min_kbps
)
self.assertIn('id', updated_rule)
self.assertEqual(updated_min_kbps, updated_rule['min_kbps'])
# List rules from policy
policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules(
self.policy['id'])
self.policy['id']
)
self.assertEqual([updated_rule], policy_rules)
# Delete rule
self.operator_cloud.delete_qos_minimum_bandwidth_rule(
self.policy['id'], updated_rule['id'])
self.policy['id'], updated_rule['id']
)
# Check if there is no rules in policy
policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules(
self.policy['id'])
self.policy['id']
)
self.assertEqual([], policy_rules)

View File

@ -56,7 +56,8 @@ class TestQosPolicy(base.BaseFunctionalTest):
def test_create_qos_policy_shared(self):
policy = self.operator_cloud.create_qos_policy(
name=self.policy_name, shared=True)
name=self.policy_name, shared=True
)
self.assertIn('id', policy)
self.assertEqual(self.policy_name, policy['name'])
self.assertTrue(policy['is_shared'])
@ -64,10 +65,12 @@ class TestQosPolicy(base.BaseFunctionalTest):
def test_create_qos_policy_default(self):
if not self.operator_cloud._has_neutron_extension('qos-default'):
self.skipTest("'qos-default' network extension not supported "
"by cloud")
self.skipTest(
"'qos-default' network extension not supported " "by cloud"
)
policy = self.operator_cloud.create_qos_policy(
name=self.policy_name, default=True)
name=self.policy_name, default=True
)
self.assertIn('id', policy)
self.assertEqual(self.policy_name, policy['name'])
self.assertFalse(policy['is_shared'])
@ -80,7 +83,8 @@ class TestQosPolicy(base.BaseFunctionalTest):
self.assertFalse(policy['is_default'])
updated_policy = self.operator_cloud.update_qos_policy(
policy['id'], shared=True, default=True)
policy['id'], shared=True, default=True
)
self.assertEqual(self.policy_name, updated_policy['name'])
self.assertTrue(updated_policy['is_shared'])
self.assertTrue(updated_policy['is_default'])
@ -89,9 +93,11 @@ class TestQosPolicy(base.BaseFunctionalTest):
policy1 = self.operator_cloud.create_qos_policy(name=self.policy_name)
self.assertIsNotNone(policy1)
policy2 = self.operator_cloud.create_qos_policy(
name=self.policy_name + 'other')
name=self.policy_name + 'other'
)
self.assertIsNotNone(policy2)
match = self.operator_cloud.list_qos_policies(
filters=dict(name=self.policy_name))
filters=dict(name=self.policy_name)
)
self.assertEqual(1, len(match))
self.assertEqual(policy1['name'], match[0]['name'])

View File

@ -21,11 +21,9 @@ from openstack.tests.functional import base
class TestComputeQuotas(base.BaseFunctionalTest):
def test_get_quotas(self):
'''Test quotas functionality'''
self.user_cloud.get_compute_quotas(
self.user_cloud.current_project_id)
self.user_cloud.get_compute_quotas(self.user_cloud.current_project_id)
def test_set_quotas(self):
'''Test quotas functionality'''
@ -36,15 +34,15 @@ class TestComputeQuotas(base.BaseFunctionalTest):
cores = quotas['cores']
self.operator_cloud.set_compute_quotas('demo', cores=cores + 1)
self.assertEqual(
cores + 1,
self.operator_cloud.get_compute_quotas('demo')['cores'])
cores + 1, self.operator_cloud.get_compute_quotas('demo')['cores']
)
self.operator_cloud.delete_compute_quotas('demo')
self.assertEqual(
cores, self.operator_cloud.get_compute_quotas('demo')['cores'])
cores, self.operator_cloud.get_compute_quotas('demo')['cores']
)
class TestVolumeQuotas(base.BaseFunctionalTest):
def setUp(self):
super(TestVolumeQuotas, self).setUp()
if not self.user_cloud.has_service('volume'):
@ -52,9 +50,7 @@ class TestVolumeQuotas(base.BaseFunctionalTest):
def test_get_quotas(self):
'''Test get quotas functionality'''
self.user_cloud.get_volume_quotas(
self.user_cloud.current_project_id
)
self.user_cloud.get_volume_quotas(self.user_cloud.current_project_id)
def test_set_quotas(self):
'''Test set quotas functionality'''
@ -66,19 +62,18 @@ class TestVolumeQuotas(base.BaseFunctionalTest):
self.operator_cloud.set_volume_quotas('demo', volumes=volumes + 1)
self.assertEqual(
volumes + 1,
self.operator_cloud.get_volume_quotas('demo')['volumes'])
self.operator_cloud.get_volume_quotas('demo')['volumes'],
)
self.operator_cloud.delete_volume_quotas('demo')
self.assertEqual(
volumes,
self.operator_cloud.get_volume_quotas('demo')['volumes'])
volumes, self.operator_cloud.get_volume_quotas('demo')['volumes']
)
class TestNetworkQuotas(base.BaseFunctionalTest):
def test_get_quotas(self):
'''Test get quotas functionality'''
self.user_cloud.get_network_quotas(
self.user_cloud.current_project_id)
self.user_cloud.get_network_quotas(self.user_cloud.current_project_id)
def test_quotas(self):
'''Test quotas functionality'''
@ -92,11 +87,12 @@ class TestNetworkQuotas(base.BaseFunctionalTest):
self.operator_cloud.set_network_quotas('demo', networks=network + 1)
self.assertEqual(
network + 1,
self.operator_cloud.get_network_quotas('demo')['networks'])
self.operator_cloud.get_network_quotas('demo')['networks'],
)
self.operator_cloud.delete_network_quotas('demo')
self.assertEqual(
network,
self.operator_cloud.get_network_quotas('demo')['networks'])
network, self.operator_cloud.get_network_quotas('demo')['networks']
)
def test_get_quotas_details(self):
if not self.operator_cloud:
@ -105,14 +101,21 @@ class TestNetworkQuotas(base.BaseFunctionalTest):
self.skipTest('network service not supported by cloud')
quotas = [
'floating_ips', 'networks', 'ports',
'rbac_policies', 'routers', 'subnets',
'subnet_pools', 'security_group_rules',
'security_groups']
'floating_ips',
'networks',
'ports',
'rbac_policies',
'routers',
'subnets',
'subnet_pools',
'security_group_rules',
'security_groups',
]
expected_keys = ['limit', 'used', 'reserved']
'''Test getting details about quota usage'''
quota_details = self.operator_cloud.get_network_quotas(
'demo', details=True)
'demo', details=True
)
for quota in quotas:
quota_val = quota_details[quota]
if quota_val:

View File

@ -17,7 +17,6 @@ from openstack.tests.functional import base
class TestRangeSearch(base.BaseFunctionalTest):
def _filter_m1_flavors(self, results):
"""The m1 flavors are the original devstack flavors"""
new_results = []
@ -30,7 +29,10 @@ class TestRangeSearch(base.BaseFunctionalTest):
flavors = self.user_cloud.list_flavors(get_extra=False)
self.assertRaises(
exc.OpenStackCloudException,
self.user_cloud.range_search, flavors, {"ram": "<1a0"})
self.user_cloud.range_search,
flavors,
{"ram": "<1a0"},
)
def test_range_search_exact(self):
flavors = self.user_cloud.list_flavors(get_extra=False)
@ -103,7 +105,8 @@ class TestRangeSearch(base.BaseFunctionalTest):
def test_range_search_multi_1(self):
flavors = self.user_cloud.list_flavors(get_extra=False)
result = self.user_cloud.range_search(
flavors, {"ram": "MIN", "vcpus": "MIN"})
flavors, {"ram": "MIN", "vcpus": "MIN"}
)
self.assertIsInstance(result, list)
self.assertEqual(1, len(result))
# older devstack does not have cirros256
@ -112,7 +115,8 @@ class TestRangeSearch(base.BaseFunctionalTest):
def test_range_search_multi_2(self):
flavors = self.user_cloud.list_flavors(get_extra=False)
result = self.user_cloud.range_search(
flavors, {"ram": "<1024", "vcpus": "MIN"})
flavors, {"ram": "<1024", "vcpus": "MIN"}
)
self.assertIsInstance(result, list)
result = self._filter_m1_flavors(result)
self.assertEqual(1, len(result))
@ -122,7 +126,8 @@ class TestRangeSearch(base.BaseFunctionalTest):
def test_range_search_multi_3(self):
flavors = self.user_cloud.list_flavors(get_extra=False)
result = self.user_cloud.range_search(
flavors, {"ram": ">=4096", "vcpus": "<6"})
flavors, {"ram": ">=4096", "vcpus": "<6"}
)
self.assertIsInstance(result, list)
result = self._filter_m1_flavors(result)
self.assertEqual(2, len(result))
@ -133,7 +138,8 @@ class TestRangeSearch(base.BaseFunctionalTest):
def test_range_search_multi_4(self):
flavors = self.user_cloud.list_flavors(get_extra=False)
result = self.user_cloud.range_search(
flavors, {"ram": ">=4096", "vcpus": "MAX"})
flavors, {"ram": ">=4096", "vcpus": "MAX"}
)
self.assertIsInstance(result, list)
self.assertEqual(1, len(result))
# This is the only result that should have max vcpu

View File

@ -25,7 +25,6 @@ from openstack.tests.functional import base
class TestRecordset(base.BaseFunctionalTest):
def setUp(self):
super(TestRecordset, self).setUp()
if not self.user_cloud.has_service('dns'):
@ -50,11 +49,9 @@ class TestRecordset(base.BaseFunctionalTest):
zone_obj = self.user_cloud.create_zone(name=zone, email=email)
# Test we can create a recordset and we get it returned
created_recordset = self.user_cloud.create_recordset(zone_obj['id'],
name,
type_,
records,
description, ttl)
created_recordset = self.user_cloud.create_recordset(
zone_obj['id'], name, type_, records, description, ttl
)
self.addCleanup(self.cleanup, zone, created_recordset['id'])
self.assertEqual(created_recordset['zone_id'], zone_obj['id'])
@ -65,20 +62,22 @@ class TestRecordset(base.BaseFunctionalTest):
self.assertEqual(created_recordset['ttl'], ttl)
# Test that we can list recordsets
recordsets = self.user_cloud.list_recordsets(zone_obj['id'],)
recordsets = self.user_cloud.list_recordsets(
zone_obj['id'],
)
self.assertIsNotNone(recordsets)
# Test we get the same recordset with the get_recordset method
get_recordset = self.user_cloud.get_recordset(zone_obj['id'],
created_recordset['id'])
get_recordset = self.user_cloud.get_recordset(
zone_obj['id'], created_recordset['id']
)
self.assertEqual(get_recordset['id'], created_recordset['id'])
# Test we can update a field on the recordset and only that field
# is updated
updated_recordset = self.user_cloud.update_recordset(
zone_obj['id'],
created_recordset['id'],
ttl=7200)
zone_obj['id'], created_recordset['id'], ttl=7200
)
self.assertEqual(updated_recordset['id'], created_recordset['id'])
self.assertEqual(updated_recordset['name'], name)
self.assertEqual(updated_recordset['type'], type_.upper())
@ -88,7 +87,8 @@ class TestRecordset(base.BaseFunctionalTest):
# Test we can delete and get True returned
deleted_recordset = self.user_cloud.delete_recordset(
zone, created_recordset['id'])
zone, created_recordset['id']
)
self.assertTrue(deleted_recordset)
def test_recordsets_with_zone_name(self):
@ -110,9 +110,9 @@ class TestRecordset(base.BaseFunctionalTest):
zone_obj = self.user_cloud.create_zone(name=zone, email=email)
# Test we can create a recordset and we get it returned
created_recordset = self.user_cloud.create_recordset(zone, name, type_,
records,
description, ttl)
created_recordset = self.user_cloud.create_recordset(
zone, name, type_, records, description, ttl
)
self.addCleanup(self.cleanup, zone, created_recordset['id'])
self.assertEqual(created_recordset['zone_id'], zone_obj['id'])
@ -127,16 +127,16 @@ class TestRecordset(base.BaseFunctionalTest):
self.assertIsNotNone(recordsets)
# Test we get the same recordset with the get_recordset method
get_recordset = self.user_cloud.get_recordset(zone,
created_recordset['id'])
get_recordset = self.user_cloud.get_recordset(
zone, created_recordset['id']
)
self.assertEqual(get_recordset['id'], created_recordset['id'])
# Test we can update a field on the recordset and only that field
# is updated
updated_recordset = self.user_cloud.update_recordset(
zone_obj['id'],
created_recordset['id'],
ttl=7200)
zone_obj['id'], created_recordset['id'], ttl=7200
)
self.assertEqual(updated_recordset['id'], created_recordset['id'])
self.assertEqual(updated_recordset['name'], name)
self.assertEqual(updated_recordset['type'], type_.upper())
@ -146,10 +146,10 @@ class TestRecordset(base.BaseFunctionalTest):
# Test we can delete and get True returned
deleted_recordset = self.user_cloud.delete_recordset(
zone, created_recordset['id'])
zone, created_recordset['id']
)
self.assertTrue(deleted_recordset)
def cleanup(self, zone_name, recordset_id):
self.user_cloud.delete_recordset(
zone_name, recordset_id)
self.user_cloud.delete_recordset(zone_name, recordset_id)
self.user_cloud.delete_zone(zone_name)

View File

@ -24,8 +24,13 @@ from openstack.tests.functional import base
EXPECTED_TOPLEVEL_FIELDS = (
'id', 'name', 'is_admin_state_up', 'external_gateway_info',
'project_id', 'routes', 'status'
'id',
'name',
'is_admin_state_up',
'external_gateway_info',
'project_id',
'routes',
'status',
)
EXPECTED_GW_INFO_FIELDS = ('network_id', 'enable_snat', 'external_fixed_ips')
@ -90,7 +95,8 @@ class TestRouter(base.BaseFunctionalTest):
def test_create_router_basic(self):
net1_name = self.network_prefix + '_net1'
net1 = self.operator_cloud.create_network(
name=net1_name, external=True)
name=net1_name, external=True
)
router_name = self.router_prefix + '_create_basic'
router = self.operator_cloud.create_router(
@ -117,14 +123,15 @@ class TestRouter(base.BaseFunctionalTest):
proj_id = project['id']
net1_name = self.network_prefix + '_net1'
net1 = self.operator_cloud.create_network(
name=net1_name, external=True, project_id=proj_id)
name=net1_name, external=True, project_id=proj_id
)
router_name = self.router_prefix + '_create_project'
router = self.operator_cloud.create_router(
name=router_name,
admin_state_up=True,
ext_gateway_net_id=net1['id'],
project_id=proj_id
project_id=proj_id,
)
for field in EXPECTED_TOPLEVEL_FIELDS:
@ -140,9 +147,9 @@ class TestRouter(base.BaseFunctionalTest):
self.assertEqual(net1['id'], ext_gw_info['network_id'])
self.assertTrue(ext_gw_info['enable_snat'])
def _create_and_verify_advanced_router(self,
external_cidr,
external_gateway_ip=None):
def _create_and_verify_advanced_router(
self, external_cidr, external_gateway_ip=None
):
# external_cidr must be passed in as unicode (u'')
# NOTE(Shrews): The arguments are needed because these tests
# will run in parallel and we want to make sure that each test
@ -150,10 +157,13 @@ class TestRouter(base.BaseFunctionalTest):
net1_name = self.network_prefix + '_net1'
sub1_name = self.subnet_prefix + '_sub1'
net1 = self.operator_cloud.create_network(
name=net1_name, external=True)
name=net1_name, external=True
)
sub1 = self.operator_cloud.create_subnet(
net1['id'], external_cidr, subnet_name=sub1_name,
gateway_ip=external_gateway_ip
net1['id'],
external_cidr,
subnet_name=sub1_name,
gateway_ip=external_gateway_ip,
)
ip_net = ipaddress.IPv4Network(external_cidr)
@ -165,9 +175,7 @@ class TestRouter(base.BaseFunctionalTest):
admin_state_up=False,
ext_gateway_net_id=net1['id'],
enable_snat=False,
ext_fixed_ips=[
{'subnet_id': sub1['id'], 'ip_address': last_ip}
]
ext_fixed_ips=[{'subnet_id': sub1['id'], 'ip_address': last_ip}],
)
for field in EXPECTED_TOPLEVEL_FIELDS:
@ -183,12 +191,10 @@ class TestRouter(base.BaseFunctionalTest):
self.assertEqual(1, len(ext_gw_info['external_fixed_ips']))
self.assertEqual(
sub1['id'],
ext_gw_info['external_fixed_ips'][0]['subnet_id']
sub1['id'], ext_gw_info['external_fixed_ips'][0]['subnet_id']
)
self.assertEqual(
last_ip,
ext_gw_info['external_fixed_ips'][0]['ip_address']
last_ip, ext_gw_info['external_fixed_ips'][0]['ip_address']
)
return router
@ -198,20 +204,25 @@ class TestRouter(base.BaseFunctionalTest):
def test_add_remove_router_interface(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.3.3.0/24')
external_cidr=u'10.3.3.0/24'
)
net_name = self.network_prefix + '_intnet1'
sub_name = self.subnet_prefix + '_intsub1'
net = self.operator_cloud.create_network(name=net_name)
sub = self.operator_cloud.create_subnet(
net['id'], '10.4.4.0/24', subnet_name=sub_name,
gateway_ip='10.4.4.1'
net['id'],
'10.4.4.0/24',
subnet_name=sub_name,
gateway_ip='10.4.4.1',
)
iface = self.operator_cloud.add_router_interface(
router, subnet_id=sub['id'])
router, subnet_id=sub['id']
)
self.assertIsNone(
self.operator_cloud.remove_router_interface(
router, subnet_id=sub['id'])
router, subnet_id=sub['id']
)
)
# Test return values *after* the interface is detached so the
@ -224,25 +235,32 @@ class TestRouter(base.BaseFunctionalTest):
def test_list_router_interfaces(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.5.5.0/24')
external_cidr=u'10.5.5.0/24'
)
net_name = self.network_prefix + '_intnet1'
sub_name = self.subnet_prefix + '_intsub1'
net = self.operator_cloud.create_network(name=net_name)
sub = self.operator_cloud.create_subnet(
net['id'], '10.6.6.0/24', subnet_name=sub_name,
gateway_ip='10.6.6.1'
net['id'],
'10.6.6.0/24',
subnet_name=sub_name,
gateway_ip='10.6.6.1',
)
iface = self.operator_cloud.add_router_interface(
router, subnet_id=sub['id'])
router, subnet_id=sub['id']
)
all_ifaces = self.operator_cloud.list_router_interfaces(router)
int_ifaces = self.operator_cloud.list_router_interfaces(
router, interface_type='internal')
router, interface_type='internal'
)
ext_ifaces = self.operator_cloud.list_router_interfaces(
router, interface_type='external')
router, interface_type='external'
)
self.assertIsNone(
self.operator_cloud.remove_router_interface(
router, subnet_id=sub['id'])
router, subnet_id=sub['id']
)
)
# Test return values *after* the interface is detached so the
@ -253,17 +271,21 @@ class TestRouter(base.BaseFunctionalTest):
self.assertEqual(1, len(ext_ifaces))
ext_fixed_ips = router['external_gateway_info']['external_fixed_ips']
self.assertEqual(ext_fixed_ips[0]['subnet_id'],
ext_ifaces[0]['fixed_ips'][0]['subnet_id'])
self.assertEqual(
ext_fixed_ips[0]['subnet_id'],
ext_ifaces[0]['fixed_ips'][0]['subnet_id'],
)
self.assertEqual(sub['id'], int_ifaces[0]['fixed_ips'][0]['subnet_id'])
def test_update_router_name(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.7.7.0/24')
external_cidr=u'10.7.7.0/24'
)
new_name = self.router_prefix + '_update_name'
updated = self.operator_cloud.update_router(
router['id'], name=new_name)
router['id'], name=new_name
)
self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS:
@ -275,20 +297,20 @@ class TestRouter(base.BaseFunctionalTest):
# Validate nothing else changed
self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['admin_state_up'], updated['admin_state_up'])
self.assertEqual(router['external_gateway_info'],
updated['external_gateway_info'])
self.assertEqual(
router['external_gateway_info'], updated['external_gateway_info']
)
def test_update_router_routes(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.7.7.0/24')
external_cidr=u'10.7.7.0/24'
)
routes = [{
"destination": "10.7.7.0/24",
"nexthop": "10.7.7.99"
}]
routes = [{"destination": "10.7.7.0/24", "nexthop": "10.7.7.99"}]
updated = self.operator_cloud.update_router(
router['id'], routes=routes)
router['id'], routes=routes
)
self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS:
@ -300,15 +322,18 @@ class TestRouter(base.BaseFunctionalTest):
# Validate nothing else changed
self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['admin_state_up'], updated['admin_state_up'])
self.assertEqual(router['external_gateway_info'],
updated['external_gateway_info'])
self.assertEqual(
router['external_gateway_info'], updated['external_gateway_info']
)
def test_update_router_admin_state(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.8.8.0/24')
external_cidr=u'10.8.8.0/24'
)
updated = self.operator_cloud.update_router(
router['id'], admin_state_up=True)
router['id'], admin_state_up=True
)
self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS:
@ -316,25 +341,30 @@ class TestRouter(base.BaseFunctionalTest):
# admin_state_up is the only change we expect
self.assertTrue(updated['admin_state_up'])
self.assertNotEqual(router['admin_state_up'],
updated['admin_state_up'])
self.assertNotEqual(
router['admin_state_up'], updated['admin_state_up']
)
# Validate nothing else changed
self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['name'], updated['name'])
self.assertEqual(router['external_gateway_info'],
updated['external_gateway_info'])
self.assertEqual(
router['external_gateway_info'], updated['external_gateway_info']
)
def test_update_router_ext_gw_info(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.9.9.0/24')
external_cidr=u'10.9.9.0/24'
)
# create a new subnet
existing_net_id = router['external_gateway_info']['network_id']
sub_name = self.subnet_prefix + '_update'
sub = self.operator_cloud.create_subnet(
existing_net_id, '10.10.10.0/24', subnet_name=sub_name,
gateway_ip='10.10.10.1'
existing_net_id,
'10.10.10.0/24',
subnet_name=sub_name,
gateway_ip='10.10.10.1',
)
updated = self.operator_cloud.update_router(
@ -342,7 +372,7 @@ class TestRouter(base.BaseFunctionalTest):
ext_gateway_net_id=existing_net_id,
ext_fixed_ips=[
{'subnet_id': sub['id'], 'ip_address': '10.10.10.77'}
]
],
)
self.assertIsNotNone(updated)
@ -353,12 +383,10 @@ class TestRouter(base.BaseFunctionalTest):
ext_gw_info = updated['external_gateway_info']
self.assertEqual(1, len(ext_gw_info['external_fixed_ips']))
self.assertEqual(
sub['id'],
ext_gw_info['external_fixed_ips'][0]['subnet_id']
sub['id'], ext_gw_info['external_fixed_ips'][0]['subnet_id']
)
self.assertEqual(
'10.10.10.77',
ext_gw_info['external_fixed_ips'][0]['ip_address']
'10.10.10.77', ext_gw_info['external_fixed_ips'][0]['ip_address']
)
# Validate nothing else changed

View File

@ -23,7 +23,8 @@ from openstack.tests.functional import base
class TestSecurityGroups(base.BaseFunctionalTest):
def test_create_list_security_groups(self):
sg1 = self.user_cloud.create_security_group(
name="sg1", description="sg1")
name="sg1", description="sg1"
)
self.addCleanup(self.user_cloud.delete_security_group, sg1['id'])
if self.user_cloud.has_service('network'):
# Neutron defaults to all_tenants=1 when admin
@ -39,10 +40,12 @@ class TestSecurityGroups(base.BaseFunctionalTest):
self.skipTest("Operator cloud is required for this test")
sg1 = self.user_cloud.create_security_group(
name="sg1", description="sg1")
name="sg1", description="sg1"
)
self.addCleanup(self.user_cloud.delete_security_group, sg1['id'])
sg2 = self.operator_cloud.create_security_group(
name="sg2", description="sg2")
name="sg2", description="sg2"
)
self.addCleanup(self.operator_cloud.delete_security_group, sg2['id'])
if self.user_cloud.has_service('network'):
@ -53,7 +56,8 @@ class TestSecurityGroups(base.BaseFunctionalTest):
# Filter by tenant_id (filtering by project_id won't work with
# Keystone V2)
sg_list = self.operator_cloud.list_security_groups(
filters={'tenant_id': self.user_cloud.current_project_id})
filters={'tenant_id': self.user_cloud.current_project_id}
)
self.assertIn(sg1['id'], [sg['id'] for sg in sg_list])
self.assertNotIn(sg2['id'], [sg['id'] for sg in sg_list])
@ -64,7 +68,8 @@ class TestSecurityGroups(base.BaseFunctionalTest):
self.assertNotIn(sg1['id'], [sg['id'] for sg in sg_list])
sg_list = self.operator_cloud.list_security_groups(
filters={'all_tenants': 1})
filters={'all_tenants': 1}
)
self.assertIn(sg1['id'], [sg['id'] for sg in sg_list])
def test_get_security_group_by_id(self):

View File

@ -21,15 +21,16 @@ from openstack.tests.functional import base
class TestServerGroup(base.BaseFunctionalTest):
def test_server_group(self):
server_group_name = self.getUniqueString()
self.addCleanup(self.cleanup, server_group_name)
server_group = self.user_cloud.create_server_group(
server_group_name, ['affinity'])
server_group_name, ['affinity']
)
server_group_ids = [v['id']
for v in self.user_cloud.list_server_groups()]
server_group_ids = [
v['id'] for v in self.user_cloud.list_server_groups()
]
self.assertIn(server_group['id'], server_group_ids)
self.user_cloud.delete_server_group(server_group_name)

View File

@ -38,15 +38,17 @@ class TestServices(base.KeystoneBaseFunctionalTest):
# Generate a random name for services in this test
self.new_service_name = 'test_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5))
random.choice(string.ascii_lowercase) for _ in range(5)
)
self.addCleanup(self._cleanup_services)
def _cleanup_services(self):
exception_list = list()
for s in self.operator_cloud.list_services():
if s['name'] is not None and \
s['name'].startswith(self.new_service_name):
if s['name'] is not None and s['name'].startswith(
self.new_service_name
):
try:
self.operator_cloud.delete_service(name_or_id=s['id'])
except Exception as e:
@ -60,45 +62,57 @@ class TestServices(base.KeystoneBaseFunctionalTest):
def test_create_service(self):
service = self.operator_cloud.create_service(
name=self.new_service_name + '_create', type='test_type',
description='this is a test description')
name=self.new_service_name + '_create',
type='test_type',
description='this is a test description',
)
self.assertIsNotNone(service.get('id'))
def test_update_service(self):
ver = self.operator_cloud.config.get_api_version('identity')
if ver.startswith('2'):
# NOTE(SamYaple): Update service only works with v3 api
self.assertRaises(OpenStackCloudUnavailableFeature,
self.operator_cloud.update_service,
'service_id', name='new name')
self.assertRaises(
OpenStackCloudUnavailableFeature,
self.operator_cloud.update_service,
'service_id',
name='new name',
)
else:
service = self.operator_cloud.create_service(
name=self.new_service_name + '_create', type='test_type',
description='this is a test description', enabled=True)
name=self.new_service_name + '_create',
type='test_type',
description='this is a test description',
enabled=True,
)
new_service = self.operator_cloud.update_service(
service.id,
name=self.new_service_name + '_update',
description='this is an updated description',
enabled=False
enabled=False,
)
self.assertEqual(
new_service.name, self.new_service_name + '_update'
)
self.assertEqual(
new_service.description, 'this is an updated description'
)
self.assertEqual(new_service.name,
self.new_service_name + '_update')
self.assertEqual(new_service.description,
'this is an updated description')
self.assertFalse(new_service.is_enabled)
self.assertEqual(service.id, new_service.id)
def test_list_services(self):
service = self.operator_cloud.create_service(
name=self.new_service_name + '_list', type='test_type')
name=self.new_service_name + '_list', type='test_type'
)
observed_services = self.operator_cloud.list_services()
self.assertIsInstance(observed_services, list)
found = False
for s in observed_services:
# Test all attributes are returned
if s['id'] == service['id']:
self.assertEqual(self.new_service_name + '_list',
s.get('name'))
self.assertEqual(
self.new_service_name + '_list', s.get('name')
)
self.assertEqual('test_type', s.get('type'))
found = True
self.assertTrue(found, msg='new service not found in service list!')
@ -106,8 +120,8 @@ class TestServices(base.KeystoneBaseFunctionalTest):
def test_delete_service_by_name(self):
# Test delete by name
service = self.operator_cloud.create_service(
name=self.new_service_name + '_delete_by_name',
type='test_type')
name=self.new_service_name + '_delete_by_name', type='test_type'
)
self.operator_cloud.delete_service(name_or_id=service['name'])
observed_services = self.operator_cloud.list_services()
found = False
@ -120,8 +134,8 @@ class TestServices(base.KeystoneBaseFunctionalTest):
def test_delete_service_by_id(self):
# Test delete by id
service = self.operator_cloud.create_service(
name=self.new_service_name + '_delete_by_id',
type='test_type')
name=self.new_service_name + '_delete_by_id', type='test_type'
)
self.operator_cloud.delete_service(name_or_id=service['id'])
observed_services = self.operator_cloud.list_services()
found = False

View File

@ -73,7 +73,6 @@ validate_template = '''heat_template_version: asdf-no-such-version '''
class TestStack(base.BaseFunctionalTest):
def setUp(self):
super(TestStack, self).setUp()
if not self.user_cloud.has_service('orchestration'):
@ -88,10 +87,12 @@ class TestStack(base.BaseFunctionalTest):
test_template.write(validate_template.encode('utf-8'))
test_template.close()
stack_name = self.getUniqueString('validate_template')
self.assertRaises(exc.OpenStackCloudException,
self.user_cloud.create_stack,
name=stack_name,
template_file=test_template.name)
self.assertRaises(
exc.OpenStackCloudException,
self.user_cloud.create_stack,
name=stack_name,
template_file=test_template.name,
)
def test_stack_simple(self):
test_template = tempfile.NamedTemporaryFile(delete=False)
@ -100,9 +101,8 @@ class TestStack(base.BaseFunctionalTest):
self.stack_name = self.getUniqueString('simple_stack')
self.addCleanup(self._cleanup_stack)
stack = self.user_cloud.create_stack(
name=self.stack_name,
template_file=test_template.name,
wait=True)
name=self.stack_name, template_file=test_template.name, wait=True
)
# assert expected values in stack
self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
@ -121,9 +121,8 @@ class TestStack(base.BaseFunctionalTest):
# update with no changes
stack = self.user_cloud.update_stack(
self.stack_name,
template_file=test_template.name,
wait=True)
self.stack_name, template_file=test_template.name, wait=True
)
# assert no change in updated stack
self.assertEqual('UPDATE_COMPLETE', stack['stack_status'])
@ -135,7 +134,8 @@ class TestStack(base.BaseFunctionalTest):
self.stack_name,
template_file=test_template.name,
wait=True,
length=12)
length=12,
)
# assert changed output in updated stack
stack = self.user_cloud.get_stack(self.stack_name)
@ -147,7 +147,8 @@ class TestStack(base.BaseFunctionalTest):
def test_stack_nested(self):
test_template = tempfile.NamedTemporaryFile(
suffix='.yaml', delete=False)
suffix='.yaml', delete=False
)
test_template.write(root_template.encode('utf-8'))
test_template.close()
@ -166,7 +167,8 @@ class TestStack(base.BaseFunctionalTest):
name=self.stack_name,
template_file=test_template.name,
environment_files=[env.name],
wait=True)
wait=True,
)
# assert expected values in stack
self.assertEqual('CREATE_COMPLETE', stack['stack_status'])

View File

@ -105,7 +105,8 @@ class TestUsers(base.KeystoneBaseFunctionalTest):
email='somebody@nowhere.com',
enabled=False,
password='secret',
description='')
description='',
)
self.assertIsNotNone(new_user)
self.assertEqual(user['id'], new_user['id'])
self.assertEqual(user_name + '2', new_user['name'])
@ -115,30 +116,37 @@ class TestUsers(base.KeystoneBaseFunctionalTest):
def test_update_user_password(self):
user_name = self.user_prefix + '_password'
user_email = 'nobody@nowhere.com'
user = self._create_user(name=user_name,
email=user_email,
password='old_secret')
user = self._create_user(
name=user_name, email=user_email, password='old_secret'
)
self.assertIsNotNone(user)
self.assertTrue(user['enabled'])
# This should work for both v2 and v3
new_user = self.operator_cloud.update_user(
user['id'], password='new_secret')
user['id'], password='new_secret'
)
self.assertIsNotNone(new_user)
self.assertEqual(user['id'], new_user['id'])
self.assertEqual(user_name, new_user['name'])
self.assertEqual(user_email, new_user['email'])
self.assertTrue(new_user['enabled'])
self.assertTrue(self.operator_cloud.grant_role(
'member', user=user['id'], project='demo', wait=True))
self.assertTrue(
self.operator_cloud.grant_role(
'member', user=user['id'], project='demo', wait=True
)
)
self.addCleanup(
self.operator_cloud.revoke_role,
'member', user=user['id'], project='demo', wait=True)
'member',
user=user['id'],
project='demo',
wait=True,
)
new_cloud = self.operator_cloud.connect_as(
user_id=user['id'],
password='new_secret',
project_name='demo')
user_id=user['id'], password='new_secret', project_name='demo'
)
self.assertIsNotNone(new_cloud)
location = new_cloud.current_location
@ -166,9 +174,11 @@ class TestUsers(base.KeystoneBaseFunctionalTest):
# Add the user to the group
self.operator_cloud.add_user_to_group(user_name, group_name)
self.assertTrue(
self.operator_cloud.is_user_in_group(user_name, group_name))
self.operator_cloud.is_user_in_group(user_name, group_name)
)
# Remove them from the group
self.operator_cloud.remove_user_from_group(user_name, group_name)
self.assertFalse(
self.operator_cloud.is_user_in_group(user_name, group_name))
self.operator_cloud.is_user_in_group(user_name, group_name)
)

View File

@ -43,10 +43,10 @@ class TestVolume(base.BaseFunctionalTest):
self.addDetail('volume', content.text_content(volume_name))
self.addCleanup(self.cleanup, volume_name, snapshot_name=snapshot_name)
volume = self.user_cloud.create_volume(
display_name=volume_name, size=1)
display_name=volume_name, size=1
)
snapshot = self.user_cloud.create_volume_snapshot(
volume['id'],
display_name=snapshot_name
volume['id'], display_name=snapshot_name
)
ret_volume = self.user_cloud.get_volume_by_id(volume['id'])
@ -60,7 +60,8 @@ class TestVolume(base.BaseFunctionalTest):
self.assertIn(snapshot['id'], snapshot_ids)
ret_snapshot = self.user_cloud.get_volume_snapshot_by_id(
snapshot['id'])
snapshot['id']
)
self.assertEqual(snapshot['id'], ret_snapshot['id'])
self.user_cloud.delete_volume_snapshot(snapshot_name, wait=True)
@ -73,9 +74,11 @@ class TestVolume(base.BaseFunctionalTest):
self.addDetail('volume', content.text_content(volume_name))
self.addCleanup(self.cleanup, volume_name, image_name=image_name)
volume = self.user_cloud.create_volume(
display_name=volume_name, size=1)
display_name=volume_name, size=1
)
image = self.user_cloud.create_image(
image_name, volume=volume, wait=True)
image_name, volume=volume, wait=True
)
volume_ids = [v['id'] for v in self.user_cloud.list_volumes()]
self.assertIn(volume['id'], volume_ids)
@ -93,7 +96,8 @@ class TestVolume(base.BaseFunctionalTest):
snapshot = self.user_cloud.get_volume_snapshot(snapshot_name)
if snapshot:
self.user_cloud.delete_volume_snapshot(
snapshot_name, wait=True)
snapshot_name, wait=True
)
if image_name:
image = self.user_cloud.get_image(image_name)
if image:
@ -108,7 +112,8 @@ class TestVolume(base.BaseFunctionalTest):
self.user_cloud.delete_volume(v, wait=False)
try:
for count in utils.iterate_timeout(
180, "Timeout waiting for volume cleanup"):
180, "Timeout waiting for volume cleanup"
):
found = False
for existing in self.user_cloud.list_volumes():
for v in volume:
@ -127,7 +132,8 @@ class TestVolume(base.BaseFunctionalTest):
for v in volume:
if v['id'] == existing['id']:
self.operator_cloud.delete_volume(
v, wait=False, force=True)
v, wait=False, force=True
)
def test_list_volumes_pagination(self):
'''Test pagination for list volumes functionality'''
@ -146,9 +152,7 @@ class TestVolume(base.BaseFunctionalTest):
for i in self.user_cloud.list_volumes():
if i['name'] and i['name'].startswith(self.id()):
result.append(i['id'])
self.assertEqual(
sorted([i['id'] for i in volumes]),
sorted(result))
self.assertEqual(sorted([i['id'] for i in volumes]), sorted(result))
def test_update_volume(self):
name, desc = self.getUniqueString('name'), self.getUniqueString('desc')

View File

@ -27,14 +27,18 @@ class TestVolume(base.BaseFunctionalTest):
def test_create_get_delete_volume_backup(self):
volume = self.user_cloud.create_volume(
display_name=self.getUniqueString(), size=1)
display_name=self.getUniqueString(), size=1
)
self.addCleanup(self.user_cloud.delete_volume, volume['id'])
backup_name_1 = self.getUniqueString()
backup_desc_1 = self.getUniqueString()
backup = self.user_cloud.create_volume_backup(
volume_id=volume['id'], name=backup_name_1,
description=backup_desc_1, wait=True)
volume_id=volume['id'],
name=backup_name_1,
description=backup_desc_1,
wait=True,
)
self.assertEqual(backup_name_1, backup['name'])
backup = self.user_cloud.get_volume_backup(backup['id'])
@ -48,11 +52,13 @@ class TestVolume(base.BaseFunctionalTest):
volume = self.user_cloud.create_volume(size=1)
snapshot = self.user_cloud.create_volume_snapshot(volume['id'])
self.addCleanup(self.user_cloud.delete_volume, volume['id'])
self.addCleanup(self.user_cloud.delete_volume_snapshot, snapshot['id'],
wait=True)
self.addCleanup(
self.user_cloud.delete_volume_snapshot, snapshot['id'], wait=True
)
backup = self.user_cloud.create_volume_backup(
volume_id=volume['id'], snapshot_id=snapshot['id'], wait=True)
volume_id=volume['id'], snapshot_id=snapshot['id'], wait=True
)
backup = self.user_cloud.get_volume_backup(backup['id'])
self.assertEqual(backup['snapshot_id'], snapshot['id'])
@ -65,9 +71,11 @@ class TestVolume(base.BaseFunctionalTest):
self.addCleanup(self.user_cloud.delete_volume, volume['id'])
full_backup = self.user_cloud.create_volume_backup(
volume_id=volume['id'], wait=True)
volume_id=volume['id'], wait=True
)
incr_backup = self.user_cloud.create_volume_backup(
volume_id=volume['id'], incremental=True, wait=True)
volume_id=volume['id'], incremental=True, wait=True
)
full_backup = self.user_cloud.get_volume_backup(full_backup['id'])
incr_backup = self.user_cloud.get_volume_backup(incr_backup['id'])
@ -81,7 +89,8 @@ class TestVolume(base.BaseFunctionalTest):
def test_list_volume_backups(self):
vol1 = self.user_cloud.create_volume(
display_name=self.getUniqueString(), size=1)
display_name=self.getUniqueString(), size=1
)
self.addCleanup(self.user_cloud.delete_volume, vol1['id'])
# We create 2 volumes to create 2 backups. We could have created 2
@ -89,12 +98,14 @@ class TestVolume(base.BaseFunctionalTest):
# to be race-condition prone. And I didn't want to use an ugly sleep()
# here.
vol2 = self.user_cloud.create_volume(
display_name=self.getUniqueString(), size=1)
display_name=self.getUniqueString(), size=1
)
self.addCleanup(self.user_cloud.delete_volume, vol2['id'])
backup_name_1 = self.getUniqueString()
backup = self.user_cloud.create_volume_backup(
volume_id=vol1['id'], name=backup_name_1)
volume_id=vol1['id'], name=backup_name_1
)
self.addCleanup(self.user_cloud.delete_volume_backup, backup['id'])
backup = self.user_cloud.create_volume_backup(volume_id=vol2['id'])
@ -104,6 +115,7 @@ class TestVolume(base.BaseFunctionalTest):
self.assertEqual(2, len(backups))
backups = self.user_cloud.list_volume_backups(
search_opts={"name": backup_name_1})
search_opts={"name": backup_name_1}
)
self.assertEqual(1, len(backups))
self.assertEqual(backup_name_1, backups[0]['name'])

View File

@ -25,7 +25,6 @@ from openstack.tests.functional import base
class TestVolumeType(base.BaseFunctionalTest):
def _assert_project(self, volume_name_or_id, project_id, allowed=True):
acls = self.operator_cloud.get_volume_type_access(volume_name_or_id)
allowed_projects = [x.get('project_id') for x in acls]
@ -40,83 +39,87 @@ class TestVolumeType(base.BaseFunctionalTest):
volume_type = {
"name": 'test-volume-type',
"description": None,
"os-volume-type-access:is_public": False}
"os-volume-type-access:is_public": False,
}
self.operator_cloud.block_storage.post(
'/types', json={'volume_type': volume_type})
'/types', json={'volume_type': volume_type}
)
def tearDown(self):
ret = self.operator_cloud.get_volume_type('test-volume-type')
if ret.get('id'):
self.operator_cloud.block_storage.delete(
'/types/{volume_type_id}'.format(volume_type_id=ret.id))
'/types/{volume_type_id}'.format(volume_type_id=ret.id)
)
super(TestVolumeType, self).tearDown()
def test_list_volume_types(self):
volume_types = self.operator_cloud.list_volume_types()
self.assertTrue(volume_types)
self.assertTrue(any(
x for x in volume_types if x.name == 'test-volume-type'))
self.assertTrue(
any(x for x in volume_types if x.name == 'test-volume-type')
)
def test_add_remove_volume_type_access(self):
volume_type = self.operator_cloud.get_volume_type('test-volume-type')
self.assertEqual('test-volume-type', volume_type.name)
self.operator_cloud.add_volume_type_access(
'test-volume-type',
self.operator_cloud.current_project_id)
'test-volume-type', self.operator_cloud.current_project_id
)
self._assert_project(
'test-volume-type', self.operator_cloud.current_project_id,
allowed=True)
'test-volume-type',
self.operator_cloud.current_project_id,
allowed=True,
)
self.operator_cloud.remove_volume_type_access(
'test-volume-type',
self.operator_cloud.current_project_id)
'test-volume-type', self.operator_cloud.current_project_id
)
self._assert_project(
'test-volume-type', self.operator_cloud.current_project_id,
allowed=False)
'test-volume-type',
self.operator_cloud.current_project_id,
allowed=False,
)
def test_add_volume_type_access_missing_project(self):
# Project id is not valitaded and it may not exist.
self.operator_cloud.add_volume_type_access(
'test-volume-type',
'00000000000000000000000000000000')
'test-volume-type', '00000000000000000000000000000000'
)
self.operator_cloud.remove_volume_type_access(
'test-volume-type',
'00000000000000000000000000000000')
'test-volume-type', '00000000000000000000000000000000'
)
def test_add_volume_type_access_missing_volume(self):
with testtools.ExpectedException(
exc.OpenStackCloudException,
"VolumeType not found.*"
exc.OpenStackCloudException, "VolumeType not found.*"
):
self.operator_cloud.add_volume_type_access(
'MISSING_VOLUME_TYPE',
self.operator_cloud.current_project_id)
'MISSING_VOLUME_TYPE', self.operator_cloud.current_project_id
)
def test_remove_volume_type_access_missing_volume(self):
with testtools.ExpectedException(
exc.OpenStackCloudException,
"VolumeType not found.*"
exc.OpenStackCloudException, "VolumeType not found.*"
):
self.operator_cloud.remove_volume_type_access(
'MISSING_VOLUME_TYPE',
self.operator_cloud.current_project_id)
'MISSING_VOLUME_TYPE', self.operator_cloud.current_project_id
)
def test_add_volume_type_access_bad_project(self):
with testtools.ExpectedException(
exc.OpenStackCloudBadRequest,
"Unable to authorize.*"
exc.OpenStackCloudBadRequest, "Unable to authorize.*"
):
self.operator_cloud.add_volume_type_access(
'test-volume-type',
'BAD_PROJECT_ID')
'test-volume-type', 'BAD_PROJECT_ID'
)
def test_remove_volume_type_access_missing_project(self):
with testtools.ExpectedException(
exc.OpenStackCloudURINotFound,
"Unable to revoke.*"
exc.OpenStackCloudURINotFound, "Unable to revoke.*"
):
self.operator_cloud.remove_volume_type_access(
'test-volume-type',
'00000000000000000000000000000000')
'test-volume-type', '00000000000000000000000000000000'
)

View File

@ -23,7 +23,6 @@ from openstack.tests.functional import base
class TestZone(base.BaseFunctionalTest):
def setUp(self):
super(TestZone, self).setUp()
if not self.user_cloud.has_service('dns'):
@ -43,9 +42,13 @@ class TestZone(base.BaseFunctionalTest):
# Test we can create a zone and we get it returned
zone = self.user_cloud.create_zone(
name=name, zone_type=zone_type, email=email,
description=description, ttl=ttl,
masters=masters)
name=name,
zone_type=zone_type,
email=email,
description=description,
ttl=ttl,
masters=masters,
)
self.assertEqual(zone['name'], name)
self.assertEqual(zone['type'], zone_type.upper())
self.assertEqual(zone['email'], email)

View File

@ -32,7 +32,6 @@ RANGE_DATA = [
class TestUtils(base.TestCase):
def test__filter_list_name_or_id(self):
el1 = dict(id=100, name='donald')
el2 = dict(id=200, name='pluto')
@ -85,18 +84,28 @@ class TestUtils(base.TestCase):
self.assertEqual([], ret)
def test__filter_list_unicode(self):
el1 = dict(id=100, name=u'中文', last='duck',
other=dict(category='duck', financial=dict(status='poor')))
el2 = dict(id=200, name=u'中文', last='trump',
other=dict(category='human', financial=dict(status='rich')))
el3 = dict(id=300, name='donald', last='ronald mac',
other=dict(category='clown', financial=dict(status='rich')))
el1 = dict(
id=100,
name=u'中文',
last='duck',
other=dict(category='duck', financial=dict(status='poor')),
)
el2 = dict(
id=200,
name=u'中文',
last='trump',
other=dict(category='human', financial=dict(status='rich')),
)
el3 = dict(
id=300,
name='donald',
last='ronald mac',
other=dict(category='clown', financial=dict(status='rich')),
)
data = [el1, el2, el3]
ret = _utils._filter_list(
data, u'中文',
{'other': {
'financial': {'status': 'rich'}
}})
data, u'中文', {'other': {'financial': {'status': 'rich'}}}
)
self.assertEqual([el2], ret)
def test__filter_list_filter(self):
@ -114,30 +123,47 @@ class TestUtils(base.TestCase):
self.assertEqual([el1], ret)
def test__filter_list_dict1(self):
el1 = dict(id=100, name='donald', last='duck',
other=dict(category='duck'))
el2 = dict(id=200, name='donald', last='trump',
other=dict(category='human'))
el3 = dict(id=300, name='donald', last='ronald mac',
other=dict(category='clown'))
el1 = dict(
id=100, name='donald', last='duck', other=dict(category='duck')
)
el2 = dict(
id=200, name='donald', last='trump', other=dict(category='human')
)
el3 = dict(
id=300,
name='donald',
last='ronald mac',
other=dict(category='clown'),
)
data = [el1, el2, el3]
ret = _utils._filter_list(
data, 'donald', {'other': {'category': 'clown'}})
data, 'donald', {'other': {'category': 'clown'}}
)
self.assertEqual([el3], ret)
def test__filter_list_dict2(self):
el1 = dict(id=100, name='donald', last='duck',
other=dict(category='duck', financial=dict(status='poor')))
el2 = dict(id=200, name='donald', last='trump',
other=dict(category='human', financial=dict(status='rich')))
el3 = dict(id=300, name='donald', last='ronald mac',
other=dict(category='clown', financial=dict(status='rich')))
el1 = dict(
id=100,
name='donald',
last='duck',
other=dict(category='duck', financial=dict(status='poor')),
)
el2 = dict(
id=200,
name='donald',
last='trump',
other=dict(category='human', financial=dict(status='rich')),
)
el3 = dict(
id=300,
name='donald',
last='ronald mac',
other=dict(category='clown', financial=dict(status='rich')),
)
data = [el1, el2, el3]
ret = _utils._filter_list(
data, 'donald',
{'other': {
'financial': {'status': 'rich'}
}})
data, 'donald', {'other': {'financial': {'status': 'rich'}}}
)
self.assertEqual([el2, el3], ret)
def test_safe_dict_min_ints(self):
@ -176,7 +202,7 @@ class TestUtils(base.TestCase):
with testtools.ExpectedException(
exc.OpenStackCloudException,
"Search for minimum value failed. "
"Value for f1 is not an integer: aaa"
"Value for f1 is not an integer: aaa",
):
_utils.safe_dict_min('f1', data)
@ -216,7 +242,7 @@ class TestUtils(base.TestCase):
with testtools.ExpectedException(
exc.OpenStackCloudException,
"Search for maximum value failed. "
"Value for f1 is not an integer: aaa"
"Value for f1 is not an integer: aaa",
):
_utils.safe_dict_max('f1', data)
@ -282,15 +308,13 @@ class TestUtils(base.TestCase):
def test_range_filter_invalid_int(self):
with testtools.ExpectedException(
exc.OpenStackCloudException,
"Invalid range value: <1A0"
exc.OpenStackCloudException, "Invalid range value: <1A0"
):
_utils.range_filter(RANGE_DATA, "key1", "<1A0")
def test_range_filter_invalid_op(self):
with testtools.ExpectedException(
exc.OpenStackCloudException,
"Invalid range value: <>100"
exc.OpenStackCloudException, "Invalid range value: <>100"
):
_utils.range_filter(RANGE_DATA, "key1", "<>100")
@ -330,8 +354,16 @@ class TestUtils(base.TestCase):
def test_get_entity_pass_uuid(self):
uuid = uuid4().hex
self.cloud.use_direct_get = True
resources = ['flavor', 'image', 'volume', 'network',
'subnet', 'port', 'floating_ip', 'security_group']
resources = [
'flavor',
'image',
'volume',
'network',
'subnet',
'port',
'floating_ip',
'security_group',
]
for r in resources:
f = 'get_%s_by_id' % r
with mock.patch.object(self.cloud, f) as get:
@ -340,8 +372,16 @@ class TestUtils(base.TestCase):
def test_get_entity_pass_search_methods(self):
self.cloud.use_direct_get = True
resources = ['flavor', 'image', 'volume', 'network',
'subnet', 'port', 'floating_ip', 'security_group']
resources = [
'flavor',
'image',
'volume',
'network',
'subnet',
'port',
'floating_ip',
'security_group',
]
filters = {}
name = 'name_no_uuid'
for r in resources:
@ -351,8 +391,16 @@ class TestUtils(base.TestCase):
search.assert_called_once_with(name, filters)
def test_get_entity_get_and_search(self):
resources = ['flavor', 'image', 'volume', 'network',
'subnet', 'port', 'floating_ip', 'security_group']
resources = [
'flavor',
'image',
'volume',
'network',
'subnet',
'port',
'floating_ip',
'security_group',
]
for r in resources:
self.assertTrue(hasattr(self.cloud, 'get_%s_by_id' % r))
self.assertTrue(hasattr(self.cloud, 'search_%ss' % r))

View File

@ -23,7 +23,7 @@ DEP_DICT = {
'parent_id': None,
'root_id': 1,
'num_accelerators': 4,
'device_id': 0
'device_id': 0,
}
DEV_UUID = uuid.uuid4().hex
@ -40,14 +40,16 @@ DEV_DICT = {
DEV_PROF_UUID = uuid.uuid4().hex
DEV_PROF_GROUPS = [
{"resources:ACCELERATOR_FPGA": "1",
"trait:CUSTOM_FPGA_INTEL_PAC_ARRIA10": "required",
"trait:CUSTOM_FUNCTION_ID_3AFB": "required",
},
{"resources:CUSTOM_ACCELERATOR_FOO": "2",
"resources:CUSTOM_MEMORY": "200",
"trait:CUSTOM_TRAIT_ALWAYS": "required",
}
{
"resources:ACCELERATOR_FPGA": "1",
"trait:CUSTOM_FPGA_INTEL_PAC_ARRIA10": "required",
"trait:CUSTOM_FUNCTION_ID_3AFB": "required",
},
{
"resources:CUSTOM_ACCELERATOR_FOO": "2",
"resources:CUSTOM_MEMORY": "200",
"trait:CUSTOM_TRAIT_ALWAYS": "required",
},
]
DEV_PROF_DICT = {
"id": 1,
@ -61,10 +63,9 @@ NEW_DEV_PROF_DICT = copy.copy(DEV_PROF_DICT)
ARQ_UUID = uuid.uuid4().hex
ARQ_DEV_RP_UUID = uuid.uuid4().hex
ARQ_INSTANCE_UUID = uuid.uuid4().hex
ARQ_ATTACH_INFO_STR = '{"bus": "5e", '\
'"device": "00", '\
'"domain": "0000", '\
'"function": "1"}'
ARQ_ATTACH_INFO_STR = (
'{"bus": "5e", ' '"device": "00", ' '"domain": "0000", ' '"function": "1"}'
)
ARQ_DICT = {
'uuid': ARQ_UUID,
'hostname': 'test_hostname',
@ -85,36 +86,41 @@ class TestAccelerator(base.TestCase):
self.use_cyborg()
def test_list_deployables(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'deployables']),
json={'deployables': [DEP_DICT]}
),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator', 'public', append=['v2', 'deployables']
),
json={'deployables': [DEP_DICT]},
),
]
)
dep_list = self.cloud.list_deployables()
self.assertEqual(len(dep_list), 1)
self.assertEqual(dep_list[0].id, DEP_DICT['uuid'])
self.assertEqual(dep_list[0].name, DEP_DICT['name'])
self.assertEqual(dep_list[0].parent_id, DEP_DICT['parent_id'])
self.assertEqual(dep_list[0].root_id, DEP_DICT['root_id'])
self.assertEqual(dep_list[0].num_accelerators,
DEP_DICT['num_accelerators'])
self.assertEqual(
dep_list[0].num_accelerators, DEP_DICT['num_accelerators']
)
self.assertEqual(dep_list[0].device_id, DEP_DICT['device_id'])
self.assert_calls()
def test_list_devices(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'devices']),
json={'devices': [DEV_DICT]}
),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator', 'public', append=['v2', 'devices']
),
json={'devices': [DEV_DICT]},
),
]
)
dev_list = self.cloud.list_devices()
self.assertEqual(len(dev_list), 1)
self.assertEqual(dev_list[0].id, DEV_DICT['id'])
@ -123,22 +129,28 @@ class TestAccelerator(base.TestCase):
self.assertEqual(dev_list[0].type, DEV_DICT['type'])
self.assertEqual(dev_list[0].vendor, DEV_DICT['vendor'])
self.assertEqual(dev_list[0].model, DEV_DICT['model'])
self.assertEqual(dev_list[0].std_board_info,
DEV_DICT['std_board_info'])
self.assertEqual(dev_list[0].vendor_board_info,
DEV_DICT['vendor_board_info'])
self.assertEqual(
dev_list[0].std_board_info, DEV_DICT['std_board_info']
)
self.assertEqual(
dev_list[0].vendor_board_info, DEV_DICT['vendor_board_info']
)
self.assert_calls()
def test_list_device_profiles(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'device_profiles']),
json={'device_profiles': [DEV_PROF_DICT]}
),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'device_profiles'],
),
json={'device_profiles': [DEV_PROF_DICT]},
),
]
)
dev_prof_list = self.cloud.list_device_profiles()
self.assertEqual(len(dev_prof_list), 1)
self.assertEqual(dev_prof_list[0].id, DEV_PROF_DICT['id'])
@ -148,183 +160,248 @@ class TestAccelerator(base.TestCase):
self.assert_calls()
def test_create_device_profile(self):
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'device_profiles']),
json=NEW_DEV_PROF_DICT)
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'device_profiles'],
),
json=NEW_DEV_PROF_DICT,
)
]
)
attrs = {
'name': NEW_DEV_PROF_DICT['name'],
'groups': NEW_DEV_PROF_DICT['groups']
'groups': NEW_DEV_PROF_DICT['groups'],
}
self.assertTrue(
self.cloud.create_device_profile(
attrs
)
)
self.assertTrue(self.cloud.create_device_profile(attrs))
self.assert_calls()
def test_delete_device_profile(self, filters=None):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'device_profiles', DEV_PROF_DICT['name']]),
json={"device_profiles": [DEV_PROF_DICT]}),
dict(method='DELETE',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'device_profiles', DEV_PROF_DICT['name']]),
json=DEV_PROF_DICT)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'device_profiles',
DEV_PROF_DICT['name'],
],
),
json={"device_profiles": [DEV_PROF_DICT]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'device_profiles',
DEV_PROF_DICT['name'],
],
),
json=DEV_PROF_DICT,
),
]
)
self.assertTrue(
self.cloud.delete_device_profile(
DEV_PROF_DICT['name'],
filters
)
self.cloud.delete_device_profile(DEV_PROF_DICT['name'], filters)
)
self.assert_calls()
def test_list_accelerator_requests(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests']),
json={'arqs': [ARQ_DICT]}
),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests'],
),
json={'arqs': [ARQ_DICT]},
),
]
)
arq_list = self.cloud.list_accelerator_requests()
self.assertEqual(len(arq_list), 1)
self.assertEqual(arq_list[0].uuid, ARQ_DICT['uuid'])
self.assertEqual(arq_list[0].device_profile_name,
ARQ_DICT['device_profile_name'])
self.assertEqual(arq_list[0].device_profile_group_id,
ARQ_DICT['device_profile_group_id'])
self.assertEqual(arq_list[0].device_rp_uuid,
ARQ_DICT['device_rp_uuid'])
self.assertEqual(arq_list[0].instance_uuid,
ARQ_DICT['instance_uuid'])
self.assertEqual(arq_list[0].attach_handle_type,
ARQ_DICT['attach_handle_type'])
self.assertEqual(arq_list[0].attach_handle_info,
ARQ_DICT['attach_handle_info'])
self.assert_calls()
def test_create_accelerator_request(self):
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests']),
json=NEW_ARQ_DICT
),
])
attrs = {
'device_profile_name': NEW_ARQ_DICT['device_profile_name'],
'device_profile_group_id': NEW_ARQ_DICT['device_profile_group_id']
}
self.assertTrue(
self.cloud.create_accelerator_request(
attrs
)
self.assertEqual(
arq_list[0].device_profile_name, ARQ_DICT['device_profile_name']
)
self.assertEqual(
arq_list[0].device_profile_group_id,
ARQ_DICT['device_profile_group_id'],
)
self.assertEqual(
arq_list[0].device_rp_uuid, ARQ_DICT['device_rp_uuid']
)
self.assertEqual(arq_list[0].instance_uuid, ARQ_DICT['instance_uuid'])
self.assertEqual(
arq_list[0].attach_handle_type, ARQ_DICT['attach_handle_type']
)
self.assertEqual(
arq_list[0].attach_handle_info, ARQ_DICT['attach_handle_info']
)
self.assert_calls()
def test_delete_accelerator_request(self, filters=None):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]),
json={"accelerator_requests": [ARQ_DICT]}),
dict(method='DELETE',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]),
json=ARQ_DICT)
def test_create_accelerator_request(self):
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests'],
),
json=NEW_ARQ_DICT,
),
]
)
])
attrs = {
'device_profile_name': NEW_ARQ_DICT['device_profile_name'],
'device_profile_group_id': NEW_ARQ_DICT['device_profile_group_id'],
}
self.assertTrue(self.cloud.create_accelerator_request(attrs))
self.assert_calls()
def test_delete_accelerator_request(self, filters=None):
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json={"accelerator_requests": [ARQ_DICT]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json=ARQ_DICT,
),
]
)
self.assertTrue(
self.cloud.delete_accelerator_request(
ARQ_DICT['uuid'],
filters
)
self.cloud.delete_accelerator_request(ARQ_DICT['uuid'], filters)
)
self.assert_calls()
def test_bind_accelerator_request(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]),
json={"accelerator_requests": [ARQ_DICT]}),
dict(method='PATCH',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]),
json=ARQ_DICT)
])
properties = [{'path': '/hostname',
'value': ARQ_DICT['hostname'],
'op': 'add'},
{'path': '/instance_uuid',
'value': ARQ_DICT['instance_uuid'],
'op': 'add'},
{'path': '/device_rp_uuid',
'value': ARQ_DICT['device_rp_uuid'],
'op': 'add'}]
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json={"accelerator_requests": [ARQ_DICT]},
),
dict(
method='PATCH',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json=ARQ_DICT,
),
]
)
properties = [
{'path': '/hostname', 'value': ARQ_DICT['hostname'], 'op': 'add'},
{
'path': '/instance_uuid',
'value': ARQ_DICT['instance_uuid'],
'op': 'add',
},
{
'path': '/device_rp_uuid',
'value': ARQ_DICT['device_rp_uuid'],
'op': 'add',
},
]
self.assertTrue(
self.cloud.bind_accelerator_request(
ARQ_DICT['uuid'], properties
)
self.cloud.bind_accelerator_request(ARQ_DICT['uuid'], properties)
)
self.assert_calls()
def test_unbind_accelerator_request(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]),
json={"accelerator_requests": [ARQ_DICT]}),
dict(method='PATCH',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]),
json=ARQ_DICT)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json={"accelerator_requests": [ARQ_DICT]},
),
dict(
method='PATCH',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json=ARQ_DICT,
),
]
)
properties = [{'path': '/hostname',
'op': 'remove'},
{'path': '/instance_uuid',
'op': 'remove'},
{'path': '/device_rp_uuid',
'op': 'remove'}]
properties = [
{'path': '/hostname', 'op': 'remove'},
{'path': '/instance_uuid', 'op': 'remove'},
{'path': '/device_rp_uuid', 'op': 'remove'},
]
self.assertTrue(
self.cloud.unbind_accelerator_request(
ARQ_DICT['uuid'], properties
)
self.cloud.unbind_accelerator_request(ARQ_DICT['uuid'], properties)
)
self.assert_calls()

View File

@ -15,7 +15,6 @@ from openstack.tests.unit import base
class TestAggregate(base.TestCase):
def setUp(self):
super(TestAggregate, self).setUp()
self.aggregate_name = self.getUniqueString('aggregate')
@ -27,17 +26,25 @@ class TestAggregate(base.TestCase):
del create_aggregate['metadata']
del create_aggregate['hosts']
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates']),
json={'aggregate': create_aggregate},
validate=dict(json={
'aggregate': {
'name': self.aggregate_name,
'availability_zone': None,
}})),
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates']
),
json={'aggregate': create_aggregate},
validate=dict(
json={
'aggregate': {
'name': self.aggregate_name,
'availability_zone': None,
}
}
),
),
]
)
self.cloud.create_aggregate(name=self.aggregate_name)
self.assert_calls()
@ -45,100 +52,144 @@ class TestAggregate(base.TestCase):
def test_create_aggregate_with_az(self):
availability_zone = 'az1'
az_aggregate = fakes.make_fake_aggregate(
1, self.aggregate_name, availability_zone=availability_zone)
1, self.aggregate_name, availability_zone=availability_zone
)
create_aggregate = az_aggregate.copy()
del create_aggregate['metadata']
del create_aggregate['hosts']
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates']),
json={'aggregate': create_aggregate},
validate=dict(json={
'aggregate': {
'name': self.aggregate_name,
'availability_zone': availability_zone,
}})),
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates']
),
json={'aggregate': create_aggregate},
validate=dict(
json={
'aggregate': {
'name': self.aggregate_name,
'availability_zone': availability_zone,
}
}
),
),
]
)
self.cloud.create_aggregate(
name=self.aggregate_name, availability_zone=availability_zone)
name=self.aggregate_name, availability_zone=availability_zone
)
self.assert_calls()
def test_delete_aggregate(self):
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1'])),
])
self.register_uris(
[
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
),
]
)
self.assertTrue(self.cloud.delete_aggregate('1'))
self.assert_calls()
def test_delete_aggregate_by_name(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates',
self.aggregate_name]
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['os-aggregates', self.aggregate_name],
),
status_code=404,
),
status_code=404,
),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates']),
json={'aggregates': [self.fake_aggregate]}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1'])),
])
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates']
),
json={'aggregates': [self.fake_aggregate]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
),
]
)
self.assertTrue(self.cloud.delete_aggregate(self.aggregate_name))
self.assert_calls()
def test_update_aggregate_set_az(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']),
json=self.fake_aggregate),
dict(method='PUT',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={
'aggregate': {
'availability_zone': 'az',
}})),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
json=self.fake_aggregate,
),
dict(
method='PUT',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={
'aggregate': {
'availability_zone': 'az',
}
}
),
),
]
)
self.cloud.update_aggregate(1, availability_zone='az')
self.assert_calls()
def test_update_aggregate_unset_az(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']),
json=self.fake_aggregate),
dict(method='PUT',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={
'aggregate': {
'availability_zone': None,
}})),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
json=self.fake_aggregate,
),
dict(
method='PUT',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={
'aggregate': {
'availability_zone': None,
}
}
),
),
]
)
self.cloud.update_aggregate(1, availability_zone=None)
@ -146,57 +197,83 @@ class TestAggregate(base.TestCase):
def test_set_aggregate_metadata(self):
metadata = {'key': 'value'}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']),
json=self.fake_aggregate),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['os-aggregates', '1', 'action']),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={'set_metadata': {'metadata': metadata}})),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
json=self.fake_aggregate,
),
dict(
method='POST',
uri=self.get_mock_url(
'compute',
'public',
append=['os-aggregates', '1', 'action'],
),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={'set_metadata': {'metadata': metadata}}
),
),
]
)
self.cloud.set_aggregate_metadata('1', metadata)
self.assert_calls()
def test_add_host_to_aggregate(self):
hostname = 'host1'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']),
json=self.fake_aggregate),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['os-aggregates', '1', 'action']),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={'add_host': {'host': hostname}})),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
json=self.fake_aggregate,
),
dict(
method='POST',
uri=self.get_mock_url(
'compute',
'public',
append=['os-aggregates', '1', 'action'],
),
json={'aggregate': self.fake_aggregate},
validate=dict(json={'add_host': {'host': hostname}}),
),
]
)
self.cloud.add_host_to_aggregate('1', hostname)
self.assert_calls()
def test_remove_host_from_aggregate(self):
hostname = 'host1'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']),
json=self.fake_aggregate),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['os-aggregates', '1', 'action']),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={'remove_host': {'host': hostname}})),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
json=self.fake_aggregate,
),
dict(
method='POST',
uri=self.get_mock_url(
'compute',
'public',
append=['os-aggregates', '1', 'action'],
),
json={'aggregate': self.fake_aggregate},
validate=dict(json={'remove_host': {'host': hostname}}),
),
]
)
self.cloud.remove_host_from_aggregate('1', hostname)
self.assert_calls()

View File

@ -17,62 +17,63 @@ from openstack.tests.unit import base
_fake_zone_list = {
"availabilityZoneInfo": [
{
"hosts": None,
"zoneName": "az1",
"zoneState": {
"available": True
}
},
{
"hosts": None,
"zoneName": "nova",
"zoneState": {
"available": False
}
}
{"hosts": None, "zoneName": "az1", "zoneState": {"available": True}},
{"hosts": None, "zoneName": "nova", "zoneState": {"available": False}},
]
}
class TestAvailabilityZoneNames(base.TestCase):
def test_list_availability_zone_names(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=_fake_zone_list),
])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json=_fake_zone_list,
),
]
)
self.assertEqual(
['az1'], self.cloud.list_availability_zone_names())
self.assertEqual(['az1'], self.cloud.list_availability_zone_names())
self.assert_calls()
def test_unauthorized_availability_zone_names(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
status_code=403),
])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
status_code=403,
),
]
)
self.assertEqual(
[], self.cloud.list_availability_zone_names())
self.assertEqual([], self.cloud.list_availability_zone_names())
self.assert_calls()
def test_list_all_availability_zone_names(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=_fake_zone_list),
])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json=_fake_zone_list,
),
]
)
self.assertEqual(
['az1', 'nova'],
self.cloud.list_availability_zone_names(unavailable=True))
self.cloud.list_availability_zone_names(unavailable=True),
)
self.assert_calls()

File diff suppressed because it is too large Load Diff

View File

@ -25,28 +25,36 @@ from openstack.tests.unit import base
class TestBaremetalPort(base.IronicTestCase):
def setUp(self):
super(TestBaremetalPort, self).setUp()
self.fake_baremetal_node = fakes.make_fake_machine(
self.name, self.uuid)
self.name, self.uuid
)
# TODO(TheJulia): Some tests below have fake ports,
# since they are required in some processes. Lets refactor
# them at some point to use self.fake_baremetal_port.
self.fake_baremetal_port = fakes.make_fake_port(
'00:01:02:03:04:05',
node_id=self.uuid)
'00:01:02:03:04:05', node_id=self.uuid
)
self.fake_baremetal_port2 = fakes.make_fake_port(
'0a:0b:0c:0d:0e:0f',
node_id=self.uuid)
'0a:0b:0c:0d:0e:0f', node_id=self.uuid
)
def test_list_nics(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(resource='ports', append=['detail']),
json={'ports': [self.fake_baremetal_port,
self.fake_baremetal_port2]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='ports', append=['detail']),
json={
'ports': [
self.fake_baremetal_port,
self.fake_baremetal_port2,
]
},
),
]
)
return_value = self.cloud.list_nics()
self.assertEqual(2, len(return_value))
@ -54,59 +62,86 @@ class TestBaremetalPort(base.IronicTestCase):
self.assert_calls()
def test_list_nics_failure(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(resource='ports', append=['detail']),
status_code=400)
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_nics)
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='ports', append=['detail']),
status_code=400,
)
]
)
self.assertRaises(exc.OpenStackCloudException, self.cloud.list_nics)
self.assert_calls()
def test_list_nics_for_machine(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['node_uuid=%s' %
self.fake_baremetal_node['uuid']]),
json={'ports': [self.fake_baremetal_port,
self.fake_baremetal_port2]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=[
'node_uuid=%s' % self.fake_baremetal_node['uuid']
],
),
json={
'ports': [
self.fake_baremetal_port,
self.fake_baremetal_port2,
]
},
),
]
)
return_value = self.cloud.list_nics_for_machine(
self.fake_baremetal_node['uuid'])
self.fake_baremetal_node['uuid']
)
self.assertEqual(2, len(return_value))
self.assertSubdict(self.fake_baremetal_port, return_value[0])
self.assert_calls()
def test_list_nics_for_machine_failure(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['node_uuid=%s' %
self.fake_baremetal_node['uuid']]),
status_code=400)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=[
'node_uuid=%s' % self.fake_baremetal_node['uuid']
],
),
status_code=400,
)
]
)
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_nics_for_machine,
self.fake_baremetal_node['uuid'])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.list_nics_for_machine,
self.fake_baremetal_node['uuid'],
)
self.assert_calls()
def test_get_nic_by_mac(self):
mac = self.fake_baremetal_port['address']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['address=%s' % mac]),
json={'ports': [self.fake_baremetal_port]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['address=%s' % mac],
),
json={'ports': [self.fake_baremetal_port]},
),
]
)
return_value = self.cloud.get_nic_by_mac(mac)
@ -115,14 +150,19 @@ class TestBaremetalPort(base.IronicTestCase):
def test_get_nic_by_mac_failure(self):
mac = self.fake_baremetal_port['address']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['address=%s' % mac]),
json={'ports': []}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['address=%s' % mac],
),
json={'ports': []},
),
]
)
self.assertIsNone(self.cloud.get_nic_by_mac(mac))

View File

@ -39,26 +39,23 @@ def _(msg):
_TASK_PROPERTIES = {
"id": {
"description": _("An identifier for the task"),
"pattern": _('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
"type": "string"
"pattern": _(
'^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'
),
"type": "string",
},
"type": {
"description": _("The type of task represented by this content"),
"enum": [
"import",
],
"type": "string"
"type": "string",
},
"status": {
"description": _("The current status of this task"),
"enum": [
"pending",
"processing",
"success",
"failure"
],
"type": "string"
"enum": ["pending", "processing", "success", "failure"],
"type": "string",
},
"input": {
"description": _("The parameters required by task, JSON blob"),
@ -70,50 +67,55 @@ _TASK_PROPERTIES = {
},
"owner": {
"description": _("An identifier for the owner of this task"),
"type": "string"
"type": "string",
},
"message": {
"description": _("Human-readable informative message only included"
" when appropriate (usually on failure)"),
"description": _(
"Human-readable informative message only included"
" when appropriate (usually on failure)"
),
"type": "string",
},
"expires_at": {
"description": _("Datetime when this resource would be"
" subject to removal"),
"type": ["null", "string"]
"description": _(
"Datetime when this resource would be" " subject to removal"
),
"type": ["null", "string"],
},
"created_at": {
"description": _("Datetime when this resource was created"),
"type": "string"
"type": "string",
},
"updated_at": {
"description": _("Datetime when this resource was updated"),
"type": "string"
"type": "string",
},
'self': {'type': 'string'},
'schema': {'type': 'string'}
'schema': {'type': 'string'},
}
_TASK_SCHEMA = dict(
name='Task', properties=_TASK_PROPERTIES,
name='Task',
properties=_TASK_PROPERTIES,
additionalProperties=False,
)
class TestMemoryCache(base.TestCase):
def setUp(self):
super(TestMemoryCache, self).setUp(
cloud_config_fixture='clouds_cache.yaml')
cloud_config_fixture='clouds_cache.yaml'
)
def _compare_images(self, exp, real):
self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False))
real.to_dict(computed=False),
)
def _compare_volumes(self, exp, real):
self.assertDictEqual(
_volume.Volume(**exp).to_dict(computed=False),
real.to_dict(computed=False)
real.to_dict(computed=False),
)
def test_openstack_cloud(self):
@ -122,13 +124,13 @@ class TestMemoryCache(base.TestCase):
def _compare_projects(self, exp, real):
self.assertDictEqual(
_project.Project(**exp).to_dict(computed=False),
real.to_dict(computed=False)
real.to_dict(computed=False),
)
def _compare_users(self, exp, real):
self.assertDictEqual(
_user.User(**exp).to_dict(computed=False),
real.to_dict(computed=False)
real.to_dict(computed=False),
)
def test_list_projects_v3(self):
@ -137,28 +139,42 @@ class TestMemoryCache(base.TestCase):
project_list = [project_one, project_two]
first_response = {'projects': [project_one.json_response['project']]}
second_response = {'projects': [p.json_response['project']
for p in project_list]}
second_response = {
'projects': [p.json_response['project'] for p in project_list]
}
mock_uri = self.get_mock_url(
service_type='identity', resource='projects',
base_url_append='v3')
service_type='identity', resource='projects', base_url_append='v3'
)
self.register_uris([
dict(method='GET', uri=mock_uri, status_code=200,
json=first_response),
dict(method='GET', uri=mock_uri, status_code=200,
json=second_response)])
self.register_uris(
[
dict(
method='GET',
uri=mock_uri,
status_code=200,
json=first_response,
),
dict(
method='GET',
uri=mock_uri,
status_code=200,
json=second_response,
),
]
)
for a, b in zip(first_response['projects'],
self.cloud.list_projects()):
for a, b in zip(
first_response['projects'], self.cloud.list_projects()
):
self._compare_projects(a, b)
# invalidate the list_projects cache
self.cloud.list_projects.invalidate(self.cloud)
for a, b in zip(second_response['projects'],
self.cloud.list_projects()):
for a, b in zip(
second_response['projects'], self.cloud.list_projects()
):
self._compare_projects(a, b)
self.assert_calls()
@ -166,13 +182,18 @@ class TestMemoryCache(base.TestCase):
def test_list_servers_no_herd(self):
self.cloud._SERVER_AGE = 2
fake_server = fakes.make_fake_server('1234', 'name')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']
),
json={'servers': [fake_server]},
),
]
)
with concurrent.futures.ThreadPoolExecutor(16) as pool:
for i in range(16):
pool.submit(lambda: self.cloud.list_servers(bare=True))
@ -183,125 +204,180 @@ class TestMemoryCache(base.TestCase):
self.assert_calls()
def test_list_volumes(self):
fake_volume = fakes.FakeVolume('volume1', 'available',
'Volume 1 Display Name')
fake_volume = fakes.FakeVolume(
'volume1', 'available', 'Volume 1 Display Name'
)
fake_volume_dict = meta.obj_to_munch(fake_volume)
fake_volume2 = fakes.FakeVolume('volume2', 'available',
'Volume 2 Display Name')
fake_volume2 = fakes.FakeVolume(
'volume2', 'available', 'Volume 2 Display Name'
)
fake_volume2_dict = meta.obj_to_munch(fake_volume2)
self.register_uris([
self.get_cinder_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volume_dict]}),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volume_dict, fake_volume2_dict]})])
self.register_uris(
[
self.get_cinder_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict]},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict, fake_volume2_dict]},
),
]
)
for a, b in zip([fake_volume_dict],
self.cloud.list_volumes()):
for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self._compare_volumes(a, b)
# this call should hit the cache
for a, b in zip([fake_volume_dict],
self.cloud.list_volumes()):
for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self._compare_volumes(a, b)
self.cloud.list_volumes.invalidate(self.cloud)
for a, b in zip([fake_volume_dict, fake_volume2_dict],
self.cloud.list_volumes()):
for a, b in zip(
[fake_volume_dict, fake_volume2_dict], self.cloud.list_volumes()
):
self._compare_volumes(a, b)
self.assert_calls()
def test_list_volumes_creating_invalidates(self):
fake_volume = fakes.FakeVolume('volume1', 'creating',
'Volume 1 Display Name')
fake_volume = fakes.FakeVolume(
'volume1', 'creating', 'Volume 1 Display Name'
)
fake_volume_dict = meta.obj_to_munch(fake_volume)
fake_volume2 = fakes.FakeVolume('volume2', 'available',
'Volume 2 Display Name')
fake_volume2 = fakes.FakeVolume(
'volume2', 'available', 'Volume 2 Display Name'
)
fake_volume2_dict = meta.obj_to_munch(fake_volume2)
self.register_uris([
self.get_cinder_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volume_dict]}),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volume_dict, fake_volume2_dict]})])
for a, b in zip([fake_volume_dict],
self.cloud.list_volumes()):
self.register_uris(
[
self.get_cinder_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict]},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict, fake_volume2_dict]},
),
]
)
for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self._compare_volumes(a, b)
for a, b in zip([fake_volume_dict, fake_volume2_dict],
self.cloud.list_volumes()):
for a, b in zip(
[fake_volume_dict, fake_volume2_dict], self.cloud.list_volumes()
):
self._compare_volumes(a, b)
self.assert_calls()
def test_create_volume_invalidates(self):
fake_volb4 = meta.obj_to_munch(
fakes.FakeVolume('volume1', 'available', ''))
fakes.FakeVolume('volume1', 'available', '')
)
_id = '12345'
fake_vol_creating = meta.obj_to_munch(
fakes.FakeVolume(_id, 'creating', ''))
fakes.FakeVolume(_id, 'creating', '')
)
fake_vol_avail = meta.obj_to_munch(
fakes.FakeVolume(_id, 'available', ''))
fakes.FakeVolume(_id, 'available', '')
)
def now_deleting(request, context):
fake_vol_avail['status'] = 'deleting'
self.register_uris([
self.get_cinder_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volb4]}),
dict(method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes']),
json={'volume': fake_vol_creating}),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]),
json={'volume': fake_vol_creating}),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]),
json={'volume': fake_vol_avail}),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volb4, fake_vol_avail]}),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public',
append=['volumes', _id]),
json={'volume': fake_vol_avail}),
dict(method='DELETE',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]),
json=now_deleting),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volb4, fake_vol_avail]}),
])
self.register_uris(
[
self.get_cinder_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volb4]},
),
dict(
method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes']
),
json={'volume': fake_vol_creating},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json={'volume': fake_vol_creating},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json={'volume': fake_vol_avail},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volb4, fake_vol_avail]},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json={'volume': fake_vol_avail},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json=now_deleting,
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volb4, fake_vol_avail]},
),
]
)
for a, b in zip([fake_volb4], self.cloud.list_volumes()):
self._compare_volumes(a, b)
volume = dict(display_name='junk_vol',
size=1,
display_description='test junk volume')
volume = dict(
display_name='junk_vol',
size=1,
display_description='test junk volume',
)
self.cloud.create_volume(wait=True, timeout=2, **volume)
# If cache was not invalidated, we would not see our own volume here
# because the first volume was available and thus would already be
# cached.
for a, b in zip([fake_volb4, fake_vol_avail],
self.cloud.list_volumes()):
for a, b in zip(
[fake_volb4, fake_vol_avail], self.cloud.list_volumes()
):
self._compare_volumes(a, b)
self.cloud.delete_volume(_id)
# And now delete and check same thing since list is cached as all
@ -312,14 +388,20 @@ class TestMemoryCache(base.TestCase):
def test_list_users(self):
user_data = self._get_user_data(email='test@example.com')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
service_type='identity',
resource='users',
base_url_append='v3'),
status_code=200,
json={'users': [user_data.json_response['user']]})])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
service_type='identity',
resource='users',
base_url_append='v3',
),
status_code=200,
json={'users': [user_data.json_response['user']]},
)
]
)
users = self.cloud.list_users()
self.assertEqual(1, len(users))
self.assertEqual(user_data.user_id, users[0]['id'])
@ -336,14 +418,14 @@ class TestMemoryCache(base.TestCase):
new_req = {'user': {'email': new_resp['user']['email']}}
mock_users_url = self.get_mock_url(
service_type='identity',
interface='admin',
resource='users')
service_type='identity', interface='admin', resource='users'
)
mock_user_resource_url = self.get_mock_url(
service_type='identity',
interface='admin',
resource='users',
append=[user_data.user_id])
append=[user_data.user_id],
)
empty_user_list_resp = {'users': []}
users_list_resp = {'users': [user_data.json_response['user']]}
@ -354,35 +436,68 @@ class TestMemoryCache(base.TestCase):
uris_to_mock = [
# Inital User List is Empty
dict(method='GET', uri=mock_users_url, status_code=200,
json=empty_user_list_resp),
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=empty_user_list_resp,
),
# POST to create the user
# GET to get the user data after POST
dict(method='POST', uri=mock_users_url, status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request)),
dict(
method='POST',
uri=mock_users_url,
status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request),
),
# List Users Call
dict(method='GET', uri=mock_users_url, status_code=200,
json=users_list_resp),
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=users_list_resp,
),
# List users to get ID for update
# Get user using user_id from list
# Update user
# Get updated user
dict(method='GET', uri=mock_users_url, status_code=200,
json=users_list_resp),
dict(method='PUT', uri=mock_user_resource_url, status_code=200,
json=new_resp, validate=dict(json=new_req)),
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=users_list_resp,
),
dict(
method='PUT',
uri=mock_user_resource_url,
status_code=200,
json=new_resp,
validate=dict(json=new_req),
),
# List Users Call
dict(method='GET', uri=mock_users_url, status_code=200,
json=updated_users_list_resp),
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=updated_users_list_resp,
),
# List User to get ID for delete
# delete user
dict(method='GET', uri=mock_users_url, status_code=200,
json=updated_users_list_resp),
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=updated_users_list_resp,
),
dict(method='DELETE', uri=mock_user_resource_url, status_code=204),
# List Users Call (empty post delete)
dict(method='GET', uri=mock_users_url, status_code=200,
json=empty_user_list_resp)
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=empty_user_list_resp,
),
]
self.register_uris(uris_to_mock)
@ -391,8 +506,9 @@ class TestMemoryCache(base.TestCase):
self.assertEqual([], self.cloud.list_users())
# now add one
created = self.cloud.create_user(name=user_data.name,
email=user_data.email)
created = self.cloud.create_user(
name=user_data.name, email=user_data.email
)
self.assertEqual(user_data.user_id, created['id'])
self.assertEqual(user_data.name, created['name'])
self.assertEqual(user_data.email, created['email'])
@ -403,8 +519,9 @@ class TestMemoryCache(base.TestCase):
self.assertEqual(user_data.email, users[0]['email'])
# Update and check to see if it is updated
updated = self.cloud.update_user(user_data.user_id,
email=new_resp['user']['email'])
updated = self.cloud.update_user(
user_data.user_id, email=new_resp['user']['email']
)
self.assertEqual(user_data.user_id, updated.id)
self.assertEqual(user_data.name, updated.name)
self.assertEqual(new_resp['user']['email'], updated.email)
@ -420,17 +537,26 @@ class TestMemoryCache(base.TestCase):
def test_list_flavors(self):
mock_uri = '{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT)
endpoint=fakes.COMPUTE_ENDPOINT
)
uris_to_mock = [
dict(method='GET', uri=mock_uri,
validate=dict(
headers={'OpenStack-API-Version': 'compute 2.53'}),
json={'flavors': []}),
dict(method='GET', uri=mock_uri,
validate=dict(
headers={'OpenStack-API-Version': 'compute 2.53'}),
json={'flavors': fakes.FAKE_FLAVOR_LIST})
dict(
method='GET',
uri=mock_uri,
validate=dict(
headers={'OpenStack-API-Version': 'compute 2.53'}
),
json={'flavors': []},
),
dict(
method='GET',
uri=mock_uri,
validate=dict(
headers={'OpenStack-API-Version': 'compute 2.53'}
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
self.use_compute_discovery()
@ -442,9 +568,7 @@ class TestMemoryCache(base.TestCase):
self.cloud.list_flavors.invalidate(self.cloud)
self.assertResourceListEqual(
self.cloud.list_flavors(),
fakes.FAKE_FLAVOR_LIST,
_flavor.Flavor
self.cloud.list_flavors(), fakes.FAKE_FLAVOR_LIST, _flavor.Flavor
)
self.assert_calls()
@ -454,23 +578,32 @@ class TestMemoryCache(base.TestCase):
self.use_glance()
fake_image = fakes.make_fake_image(image_id='42')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('image', 'public',
append=['v2', 'images']),
json={'images': []}),
dict(method='GET',
uri=self.get_mock_url('image', 'public',
append=['v2', 'images']),
json={'images': [fake_image]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'image', 'public', append=['v2', 'images']
),
json={'images': []},
),
dict(
method='GET',
uri=self.get_mock_url(
'image', 'public', append=['v2', 'images']
),
json={'images': [fake_image]},
),
]
)
self.assertEqual([], self.cloud.list_images())
self.assertEqual([], self.cloud.list_images())
self.cloud.list_images.invalidate(self.cloud)
[self._compare_images(a, b) for a, b in zip(
[fake_image],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([fake_image], self.cloud.list_images())
]
self.assert_calls()
@ -479,23 +612,30 @@ class TestMemoryCache(base.TestCase):
deleted_image_id = self.getUniqueString()
deleted_image = fakes.make_fake_image(
image_id=deleted_image_id, status='deleted')
image_id=deleted_image_id, status='deleted'
)
active_image_id = self.getUniqueString()
active_image = fakes.make_fake_image(image_id=active_image_id)
list_return = {'images': [active_image, deleted_image]}
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=list_return),
])
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=list_return,
),
]
)
[self._compare_images(a, b) for a, b in zip(
[active_image],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([active_image], self.cloud.list_images())
]
[self._compare_images(a, b) for a, b in zip(
[active_image],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([active_image], self.cloud.list_images())
]
# We should only have one call
self.assert_calls()
@ -507,29 +647,38 @@ class TestMemoryCache(base.TestCase):
fi = fakes.make_fake_image(image_id=self.getUniqueString())
fi2 = fakes.make_fake_image(image_id=self.getUniqueString())
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi]}),
dict(method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi, fi2]}),
])
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi]},
),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi, fi2]},
),
]
)
[self._compare_images(a, b) for a, b in zip(
[fi],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([fi], self.cloud.list_images())
]
# Now test that the list was cached
[self._compare_images(a, b) for a, b in zip(
[fi],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([fi], self.cloud.list_images())
]
# Invalidation too
self.cloud.list_images.invalidate(self.cloud)
[self._compare_images(a, b) for a, b in zip(
[fi, fi2],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([fi, fi2], self.cloud.list_images())
]
def test_list_ports_filtered(self):
down_port = test_port.TestPort.mock_neutron_port_create_rep['port']
@ -537,21 +686,31 @@ class TestMemoryCache(base.TestCase):
active_port['status'] = 'ACTIVE'
# We're testing to make sure a query string is passed when we're
# caching (cache by url), and that the results are still filtered.
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports'],
qs_elements=['status=DOWN']),
json={'ports': [
down_port,
active_port,
]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['status=DOWN'],
),
json={
'ports': [
down_port,
active_port,
]
},
),
]
)
ports = self.cloud.list_ports(filters={'status': 'DOWN'})
for a, b in zip([down_port], ports):
self.assertDictEqual(
_port.Port(**a).to_dict(computed=False),
b.to_dict(computed=False))
b.to_dict(computed=False),
)
self.assert_calls()
@ -565,41 +724,56 @@ class TestCacheIgnoresQueuedStatus(base.TestCase):
def setUp(self):
super(TestCacheIgnoresQueuedStatus, self).setUp(
cloud_config_fixture='clouds_cache.yaml')
cloud_config_fixture='clouds_cache.yaml'
)
self.use_glance()
active_image_id = self.getUniqueString()
self.active_image = fakes.make_fake_image(
image_id=active_image_id, status=self.status)
image_id=active_image_id, status=self.status
)
self.active_list_return = {'images': [self.active_image]}
steady_image_id = self.getUniqueString()
self.steady_image = fakes.make_fake_image(image_id=steady_image_id)
self.steady_list_return = {
'images': [self.active_image, self.steady_image]}
'images': [self.active_image, self.steady_image]
}
def _compare_images(self, exp, real):
self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False))
real.to_dict(computed=False),
)
def test_list_images_ignores_pending_status(self):
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=self.active_list_return),
dict(method='GET',
uri='https://image.example.com/v2/images',
json=self.steady_list_return),
])
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=self.active_list_return,
),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=self.steady_list_return,
),
]
)
[self._compare_images(a, b) for a, b in zip(
[self.active_image],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([self.active_image], self.cloud.list_images())
]
# Should expect steady_image to appear if active wasn't cached
[self._compare_images(a, b) for a, b in zip(
[self.active_image, self.steady_image],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip(
[self.active_image, self.steady_image],
self.cloud.list_images(),
)
]
class TestCacheSteadyStatus(base.TestCase):
@ -611,45 +785,53 @@ class TestCacheSteadyStatus(base.TestCase):
def setUp(self):
super(TestCacheSteadyStatus, self).setUp(
cloud_config_fixture='clouds_cache.yaml')
cloud_config_fixture='clouds_cache.yaml'
)
self.use_glance()
active_image_id = self.getUniqueString()
self.active_image = fakes.make_fake_image(
image_id=active_image_id, status=self.status)
image_id=active_image_id, status=self.status
)
self.active_list_return = {'images': [self.active_image]}
def _compare_images(self, exp, real):
self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False))
real.to_dict(computed=False),
)
def test_list_images_caches_steady_status(self):
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=self.active_list_return),
])
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=self.active_list_return,
),
]
)
[self._compare_images(a, b) for a, b in zip(
[self.active_image],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([self.active_image], self.cloud.list_images())
]
[self._compare_images(a, b) for a, b in zip(
[self.active_image],
self.cloud.list_images())]
[
self._compare_images(a, b)
for a, b in zip([self.active_image], self.cloud.list_images())
]
# We should only have one call
self.assert_calls()
class TestBogusAuth(base.TestCase):
def setUp(self):
super(TestBogusAuth, self).setUp(
cloud_config_fixture='clouds_cache.yaml')
cloud_config_fixture='clouds_cache.yaml'
)
def test_get_auth_bogus(self):
with testtools.ExpectedException(exceptions.ConfigException):
openstack.connect(
cloud='_bogus_test_', config=self.config)
openstack.connect(cloud='_bogus_test_', config=self.config)

View File

@ -49,7 +49,6 @@ cluster_template_obj = dict(
class TestClusterTemplates(base.TestCase):
def _compare_clustertemplates(self, exp, real):
self.assertDictEqual(
cluster_template.ClusterTemplate(**exp).to_dict(computed=False),
@ -57,20 +56,30 @@ class TestClusterTemplates(base.TestCase):
)
def get_mock_url(
self,
service_type='container-infrastructure-management',
base_url_append=None, append=None, resource=None):
self,
service_type='container-infrastructure-management',
base_url_append=None,
append=None,
resource=None,
):
return super(TestClusterTemplates, self).get_mock_url(
service_type=service_type, resource=resource,
append=append, base_url_append=base_url_append)
service_type=service_type,
resource=resource,
append=append,
base_url_append=base_url_append,
)
def test_list_cluster_templates_without_detail(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
cluster_templates_list = self.cloud.list_cluster_templates()
self._compare_clustertemplates(
cluster_template_obj,
@ -79,11 +88,15 @@ class TestClusterTemplates(base.TestCase):
self.assert_calls()
def test_list_cluster_templates_with_detail(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
cluster_templates_list = self.cloud.list_cluster_templates(detail=True)
self._compare_clustertemplates(
cluster_template_obj,
@ -92,14 +105,19 @@ class TestClusterTemplates(base.TestCase):
self.assert_calls()
def test_search_cluster_templates_by_name(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
cluster_templates = self.cloud.search_cluster_templates(
name_or_id='fake-cluster-template')
name_or_id='fake-cluster-template'
)
self.assertEqual(1, len(cluster_templates))
self.assertEqual('fake-uuid', cluster_templates[0]['uuid'])
@ -107,24 +125,33 @@ class TestClusterTemplates(base.TestCase):
def test_search_cluster_templates_not_found(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
cluster_templates = self.cloud.search_cluster_templates(
name_or_id='non-existent')
name_or_id='non-existent'
)
self.assertEqual(0, len(cluster_templates))
self.assert_calls()
def test_get_cluster_template(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
r = self.cloud.get_cluster_template('fake-cluster-template')
self.assertIsNotNone(r)
@ -135,41 +162,52 @@ class TestClusterTemplates(base.TestCase):
self.assert_calls()
def test_get_cluster_template_not_found(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[]),
)
]
)
r = self.cloud.get_cluster_template('doesNotExist')
self.assertIsNone(r)
self.assert_calls()
def test_create_cluster_template(self):
json_response = cluster_template_obj.copy()
kwargs = dict(name=cluster_template_obj['name'],
image_id=cluster_template_obj['image_id'],
keypair_id=cluster_template_obj['keypair_id'],
coe=cluster_template_obj['coe'])
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(resource='clustertemplates'),
json=json_response,
validate=dict(json=kwargs))])
response = self.cloud.create_cluster_template(**kwargs)
self._compare_clustertemplates(
json_response,
response
kwargs = dict(
name=cluster_template_obj['name'],
image_id=cluster_template_obj['image_id'],
keypair_id=cluster_template_obj['keypair_id'],
coe=cluster_template_obj['coe'],
)
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(resource='clustertemplates'),
json=json_response,
validate=dict(json=kwargs),
)
]
)
response = self.cloud.create_cluster_template(**kwargs)
self._compare_clustertemplates(json_response, response)
self.assert_calls()
def test_create_cluster_template_exception(self):
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(resource='clustertemplates'),
status_code=403)])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(resource='clustertemplates'),
status_code=403,
)
]
)
# TODO(mordred) requests here doens't give us a great story
# for matching the old error message text. Investigate plumbing
# an error message in to the adapter call so that we can give a
@ -177,54 +215,72 @@ class TestClusterTemplates(base.TestCase):
# OpenStackCloudException - but for some reason testtools will not
# match the more specific HTTPError, even though it's a subclass
# of OpenStackCloudException.
with testtools.ExpectedException(
exceptions.ForbiddenException):
with testtools.ExpectedException(exceptions.ForbiddenException):
self.cloud.create_cluster_template('fake-cluster-template')
self.assert_calls()
def test_delete_cluster_template(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj])),
dict(
method='DELETE',
uri=self.get_mock_url(resource='clustertemplates/fake-uuid')),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='clustertemplates/fake-uuid'
),
),
]
)
self.cloud.delete_cluster_template('fake-uuid')
self.assert_calls()
def test_update_cluster_template(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj])),
dict(
method='PATCH',
uri=self.get_mock_url(resource='clustertemplates/fake-uuid'),
status_code=200,
validate=dict(
json=[{
'op': 'replace',
'path': '/name',
'value': 'new-cluster-template'
}]
)),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
),
dict(
method='PATCH',
uri=self.get_mock_url(
resource='clustertemplates/fake-uuid'
),
status_code=200,
validate=dict(
json=[
{
'op': 'replace',
'path': '/name',
'value': 'new-cluster-template',
}
]
),
),
]
)
new_name = 'new-cluster-template'
updated = self.cloud.update_cluster_template(
'fake-uuid', name=new_name)
'fake-uuid', name=new_name
)
self.assertEqual(new_name, updated.name)
self.assert_calls()
def test_coe_get_cluster_template(self):
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
r = self.cloud.get_cluster_template('fake-cluster-template')
self.assertIsNotNone(r)

View File

@ -24,14 +24,10 @@ CLUSTERING_DICT = {
'max_size': 1,
'min_size': 1,
'timeout': 100,
'metadata': {}
'metadata': {},
}
PROFILE_DICT = {
'name': 'fake-profile-name',
'spec': {},
'metadata': {}
}
PROFILE_DICT = {'name': 'fake-profile-name', 'spec': {}, 'metadata': {}}
POLICY_DICT = {
'name': 'fake-profile-name',
@ -43,7 +39,7 @@ RECEIVER_DICT = {
'cluster_id': 'fake-cluster-id',
'name': 'fake-receiver-name',
'params': {},
'type': 'webhook'
'type': 'webhook',
}
NEW_CLUSTERING_DICT = copy.copy(CLUSTERING_DICT)
@ -57,7 +53,6 @@ NEW_RECEIVER_DICT['id'] = '1'
class TestClustering(base.TestCase):
def assertAreInstances(self, elements, elem_type):
for e in elements:
self.assertIsInstance(e, elem_type)
@ -65,12 +60,14 @@ class TestClustering(base.TestCase):
def _compare_clusters(self, exp, real):
self.assertEqual(
cluster.Cluster(**exp).to_dict(computed=False),
real.to_dict(computed=False))
real.to_dict(computed=False),
)
def setUp(self):
super(TestClustering, self).setUp()
self.use_senlin()
# def test_create_cluster(self):
# self.register_uris([
# dict(method='GET',

View File

@ -212,7 +212,5 @@ class TestCOEClusters(base.TestCase):
),
]
)
self.cloud.update_coe_cluster(
coe_cluster_obj["uuid"], node_count=3
)
self.cloud.update_coe_cluster(coe_cluster_obj["uuid"], node_count=3)
self.assert_calls()

View File

@ -12,7 +12,7 @@
from openstack.container_infrastructure_management.v1 import (
cluster_certificate
cluster_certificate,
)
from openstack.tests.unit import base
@ -20,7 +20,7 @@ coe_cluster_ca_obj = dict(
cluster_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c",
pem="-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n",
bay_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c",
links=[]
links=[],
)
coe_cluster_signed_cert_obj = dict(
@ -28,50 +28,72 @@ coe_cluster_signed_cert_obj = dict(
pem='-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----',
bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c',
links=[],
csr=('-----BEGIN CERTIFICATE REQUEST-----\nMIICfz=='
'\n-----END CERTIFICATE REQUEST-----\n')
csr=(
'-----BEGIN CERTIFICATE REQUEST-----\nMIICfz=='
'\n-----END CERTIFICATE REQUEST-----\n'
),
)
class TestCOEClusters(base.TestCase):
def _compare_cluster_certs(self, exp, real):
self.assertDictEqual(
cluster_certificate.ClusterCertificate(
**exp).to_dict(computed=False),
cluster_certificate.ClusterCertificate(**exp).to_dict(
computed=False
),
real.to_dict(computed=False),
)
def get_mock_url(
self,
service_type='container-infrastructure-management',
base_url_append=None, append=None, resource=None):
self,
service_type='container-infrastructure-management',
base_url_append=None,
append=None,
resource=None,
):
return super(TestCOEClusters, self).get_mock_url(
service_type=service_type, resource=resource,
append=append, base_url_append=base_url_append)
service_type=service_type,
resource=resource,
append=append,
base_url_append=base_url_append,
)
def test_get_coe_cluster_certificate(self):
self.register_uris([dict(
method='GET',
uri=self.get_mock_url(
resource='certificates',
append=[coe_cluster_ca_obj['cluster_uuid']]),
json=coe_cluster_ca_obj)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='certificates',
append=[coe_cluster_ca_obj['cluster_uuid']],
),
json=coe_cluster_ca_obj,
)
]
)
ca_cert = self.cloud.get_coe_cluster_certificate(
coe_cluster_ca_obj['cluster_uuid'])
self._compare_cluster_certs(
coe_cluster_ca_obj,
ca_cert)
coe_cluster_ca_obj['cluster_uuid']
)
self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert)
self.assert_calls()
def test_sign_coe_cluster_certificate(self):
self.register_uris([dict(
method='POST',
uri=self.get_mock_url(resource='certificates'),
json={"cluster_uuid": coe_cluster_signed_cert_obj['cluster_uuid'],
"csr": coe_cluster_signed_cert_obj['csr']}
)])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(resource='certificates'),
json={
"cluster_uuid": coe_cluster_signed_cert_obj[
'cluster_uuid'
],
"csr": coe_cluster_signed_cert_obj['csr'],
},
)
]
)
self.cloud.sign_coe_cluster_certificate(
coe_cluster_signed_cert_obj['cluster_uuid'],
coe_cluster_signed_cert_obj['csr'])
coe_cluster_signed_cert_obj['csr'],
)
self.assert_calls()

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,6 @@ from openstack.tests.unit import base
class TestCreateVolumeSnapshot(base.TestCase):
def setUp(self):
super(TestCreateVolumeSnapshot, self).setUp()
self.use_cinder()
@ -33,7 +32,8 @@ class TestCreateVolumeSnapshot(base.TestCase):
def _compare_snapshots(self, exp, real):
self.assertDictEqual(
snapshot.Snapshot(**exp).to_dict(computed=False),
real.to_dict(computed=False))
real.to_dict(computed=False),
)
def test_create_volume_snapshot_wait(self):
"""
@ -42,32 +42,46 @@ class TestCreateVolumeSnapshot(base.TestCase):
"""
snapshot_id = '5678'
volume_id = '1234'
build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating',
'foo', 'derpysnapshot')
build_snapshot = fakes.FakeVolumeSnapshot(
snapshot_id, 'creating', 'foo', 'derpysnapshot'
)
build_snapshot_dict = meta.obj_to_munch(build_snapshot)
fake_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'available',
'foo', 'derpysnapshot')
fake_snapshot = fakes.FakeVolumeSnapshot(
snapshot_id, 'available', 'foo', 'derpysnapshot'
)
fake_snapshot_dict = meta.obj_to_munch(fake_snapshot)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots']),
json={'snapshot': build_snapshot_dict},
validate=dict(json={
'snapshot': {'volume_id': '1234'}})),
dict(method='GET',
uri=self.get_mock_url('volumev3', 'public',
append=['snapshots', snapshot_id]),
json={'snapshot': build_snapshot_dict}),
dict(method='GET',
uri=self.get_mock_url('volumev3', 'public',
append=['snapshots', snapshot_id]),
json={'snapshot': fake_snapshot_dict})])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots']
),
json={'snapshot': build_snapshot_dict},
validate=dict(json={'snapshot': {'volume_id': '1234'}}),
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', snapshot_id]
),
json={'snapshot': build_snapshot_dict},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', snapshot_id]
),
json={'snapshot': fake_snapshot_dict},
),
]
)
self._compare_snapshots(
fake_snapshot_dict,
self.cloud.create_volume_snapshot(volume_id=volume_id, wait=True))
self.cloud.create_volume_snapshot(volume_id=volume_id, wait=True),
)
self.assert_calls()
def test_create_volume_snapshot_with_timeout(self):
@ -77,26 +91,38 @@ class TestCreateVolumeSnapshot(base.TestCase):
"""
snapshot_id = '5678'
volume_id = '1234'
build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating',
'foo', 'derpysnapshot')
build_snapshot = fakes.FakeVolumeSnapshot(
snapshot_id, 'creating', 'foo', 'derpysnapshot'
)
build_snapshot_dict = meta.obj_to_munch(build_snapshot)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots']),
json={'snapshot': build_snapshot_dict},
validate=dict(json={
'snapshot': {'volume_id': '1234'}})),
dict(method='GET',
uri=self.get_mock_url('volumev3', 'public',
append=['snapshots', snapshot_id]),
json={'snapshot': build_snapshot_dict})])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots']
),
json={'snapshot': build_snapshot_dict},
validate=dict(json={'snapshot': {'volume_id': '1234'}}),
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', snapshot_id]
),
json={'snapshot': build_snapshot_dict},
),
]
)
self.assertRaises(
exc.OpenStackCloudTimeout,
self.cloud.create_volume_snapshot, volume_id=volume_id,
wait=True, timeout=0.01)
self.cloud.create_volume_snapshot,
volume_id=volume_id,
wait=True,
timeout=0.01,
)
self.assert_calls(do_count=False)
def test_create_volume_snapshot_with_error(self):
@ -106,31 +132,47 @@ class TestCreateVolumeSnapshot(base.TestCase):
"""
snapshot_id = '5678'
volume_id = '1234'
build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating',
'bar', 'derpysnapshot')
build_snapshot = fakes.FakeVolumeSnapshot(
snapshot_id, 'creating', 'bar', 'derpysnapshot'
)
build_snapshot_dict = meta.obj_to_munch(build_snapshot)
error_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'error',
'blah', 'derpysnapshot')
error_snapshot = fakes.FakeVolumeSnapshot(
snapshot_id, 'error', 'blah', 'derpysnapshot'
)
error_snapshot_dict = meta.obj_to_munch(error_snapshot)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots']),
json={'snapshot': build_snapshot_dict},
validate=dict(json={
'snapshot': {'volume_id': '1234'}})),
dict(method='GET',
uri=self.get_mock_url('volumev3', 'public',
append=['snapshots', snapshot_id]),
json={'snapshot': build_snapshot_dict}),
dict(method='GET',
uri=self.get_mock_url('volumev3', 'public',
append=['snapshots', snapshot_id]),
json={'snapshot': error_snapshot_dict})])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots']
),
json={'snapshot': build_snapshot_dict},
validate=dict(json={'snapshot': {'volume_id': '1234'}}),
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', snapshot_id]
),
json={'snapshot': build_snapshot_dict},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', snapshot_id]
),
json={'snapshot': error_snapshot_dict},
),
]
)
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.create_volume_snapshot, volume_id=volume_id,
wait=True, timeout=5)
self.cloud.create_volume_snapshot,
volume_id=volume_id,
wait=True,
timeout=5,
)
self.assert_calls()

View File

@ -24,27 +24,39 @@ from openstack.tests.unit import base
class TestDeleteServer(base.TestCase):
def test_delete_server(self):
"""
Test that server delete is called when wait=False
"""
server = fakes.make_fake_server('1234', 'daffy', 'ACTIVE')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'daffy']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=daffy']),
json={'servers': [server]}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234'])),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'daffy']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=daffy'],
),
json={'servers': [server]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
]
)
self.assertTrue(self.cloud.delete_server('daffy', wait=False))
self.assert_calls()
@ -53,35 +65,55 @@ class TestDeleteServer(base.TestCase):
"""
Test that we return immediately when server is already gone
"""
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'tweety']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=tweety']),
json={'servers': []}),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'tweety']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=tweety'],
),
json={'servers': []},
),
]
)
self.assertFalse(self.cloud.delete_server('tweety', wait=False))
self.assert_calls()
def test_delete_server_already_gone_wait(self):
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'speedy']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=speedy']),
json={'servers': []}),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'speedy']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=speedy'],
),
json={'servers': []},
),
]
)
self.assertFalse(self.cloud.delete_server('speedy', wait=True))
self.assert_calls()
@ -90,29 +122,48 @@ class TestDeleteServer(base.TestCase):
Test that delete_server waits for the server to be gone
"""
server = fakes.make_fake_server('9999', 'wily', 'ACTIVE')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'wily']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=wily']),
json={'servers': [server]}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999'])),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999']),
json={'server': server}),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999']),
status_code=404),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'wily']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=wily'],
),
json={'servers': [server]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999']
),
json={'server': server},
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999']
),
status_code=404,
),
]
)
self.assertTrue(self.cloud.delete_server('wily', wait=True))
self.assert_calls()
@ -122,27 +173,42 @@ class TestDeleteServer(base.TestCase):
Test that delete_server raises non-404 exceptions
"""
server = fakes.make_fake_server('1212', 'speedy', 'ACTIVE')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'speedy']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=speedy']),
json={'servers': [server]}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1212']),
status_code=400),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'speedy']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=speedy'],
),
json={'servers': [server]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1212']
),
status_code=400,
),
]
)
self.assertRaises(
shade_exc.OpenStackCloudException,
self.cloud.delete_server, 'speedy',
wait=False)
self.cloud.delete_server,
'speedy',
wait=False,
)
self.assert_calls()
@ -156,24 +222,38 @@ class TestDeleteServer(base.TestCase):
if service_type == 'volume':
return False
return orig_has_service(service_type)
self.cloud.has_service = fake_has_service
server = fakes.make_fake_server('1234', 'porky', 'ACTIVE')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'porky']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=porky']),
json={'servers': [server]}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234'])),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'porky']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=porky'],
),
json={'servers': [server]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
]
)
self.assertTrue(self.cloud.delete_server('porky', wait=False))
self.assert_calls()
@ -185,50 +265,84 @@ class TestDeleteServer(base.TestCase):
server = fakes.make_fake_server('1234', 'porky', 'ACTIVE')
fip_id = uuid.uuid4().hex
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'porky']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=porky']),
json={'servers': [server]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips'],
qs_elements=['floating_ip_address=172.24.5.5']),
complete_qs=True,
json={'floatingips': [{
'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f',
'tenant_id': '4969c491a3c74ee4af974e6d800c62de',
'floating_network_id': '376da547-b977-4cfe-9cba7',
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.5.5',
'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac',
'id': fip_id,
'status': 'ACTIVE'}]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips', fip_id])),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips']),
complete_qs=True,
json={'floatingips': []}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234'])),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
status_code=404),
])
self.assertTrue(self.cloud.delete_server(
'porky', wait=True, delete_ips=True))
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'porky']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=porky'],
),
json={'servers': [server]},
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'floatingips'],
qs_elements=['floating_ip_address=172.24.5.5'],
),
complete_qs=True,
json={
'floatingips': [
{
'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', # noqa: E501
'tenant_id': '4969c491a3c74ee4af974e6d800c62de', # noqa: E501
'floating_network_id': '376da547-b977-4cfe-9cba7', # noqa: E501
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.5.5',
'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', # noqa: E501
'id': fip_id,
'status': 'ACTIVE',
}
]
},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'floatingips', fip_id],
),
),
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips']
),
complete_qs=True,
json={'floatingips': []},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
status_code=404,
),
]
)
self.assertTrue(
self.cloud.delete_server('porky', wait=True, delete_ips=True)
)
self.assert_calls()
@ -238,33 +352,55 @@ class TestDeleteServer(base.TestCase):
"""
server = fakes.make_fake_server('1234', 'porky', 'ACTIVE')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'porky']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=porky']),
json={'servers': [server]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips'],
qs_elements=['floating_ip_address=172.24.5.5']),
complete_qs=True,
status_code=404),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234'])),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
status_code=404),
])
self.assertTrue(self.cloud.delete_server(
'porky', wait=True, delete_ips=True))
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'porky']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=porky'],
),
json={'servers': [server]},
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'floatingips'],
qs_elements=['floating_ip_address=172.24.5.5'],
),
complete_qs=True,
status_code=404,
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
status_code=404,
),
]
)
self.assertTrue(
self.cloud.delete_server('porky', wait=True, delete_ips=True)
)
self.assert_calls()
@ -275,44 +411,73 @@ class TestDeleteServer(base.TestCase):
self.cloud._floating_ip_source = 'nova'
server = fakes.make_fake_server('1234', 'porky', 'ACTIVE')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'porky']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['name=porky']),
json={'servers': [server]}),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips']),
json={'floating_ips': [
{
'fixed_ip': None,
'id': 1,
'instance_id': None,
'ip': '172.24.5.5',
'pool': 'nova'
}]}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips', '1'])),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips']),
json={'floating_ips': []}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234'])),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
status_code=404),
])
self.assertTrue(self.cloud.delete_server(
'porky', wait=True, delete_ips=True))
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'porky']
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=porky'],
),
json={'servers': [server]},
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips']
),
json={
'floating_ips': [
{
'fixed_ip': None,
'id': 1,
'instance_id': None,
'ip': '172.24.5.5',
'pool': 'nova',
}
]
},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips', '1']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips']
),
json={'floating_ips': []},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
status_code=404,
),
]
)
self.assertTrue(
self.cloud.delete_server('porky', wait=True, delete_ips=True)
)
self.assert_calls()

View File

@ -24,7 +24,6 @@ from openstack.tests.unit import base
class TestDeleteVolumeSnapshot(base.TestCase):
def setUp(self):
super(TestDeleteVolumeSnapshot, self).setUp()
self.use_cinder()
@ -34,23 +33,34 @@ class TestDeleteVolumeSnapshot(base.TestCase):
Test that delete_volume_snapshot without a wait returns True instance
when the volume snapshot deletes.
"""
fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available',
'foo', 'derpysnapshot')
fake_snapshot = fakes.FakeVolumeSnapshot(
'1234', 'available', 'foo', 'derpysnapshot'
)
fake_snapshot_dict = meta.obj_to_munch(fake_snapshot)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public',
append=['snapshots', 'detail']),
json={'snapshots': [fake_snapshot_dict]}),
dict(method='DELETE',
uri=self.get_mock_url(
'volumev3', 'public',
append=['snapshots', fake_snapshot_dict['id']]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', 'detail']
),
json={'snapshots': [fake_snapshot_dict]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'volumev3',
'public',
append=['snapshots', fake_snapshot_dict['id']],
),
),
]
)
self.assertTrue(
self.cloud.delete_volume_snapshot(name_or_id='1234', wait=False))
self.cloud.delete_volume_snapshot(name_or_id='1234', wait=False)
)
self.assert_calls()
def test_delete_volume_snapshot_with_error(self):
@ -58,24 +68,36 @@ class TestDeleteVolumeSnapshot(base.TestCase):
Test that a exception while deleting a volume snapshot will cause an
OpenStackCloudException.
"""
fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available',
'foo', 'derpysnapshot')
fake_snapshot = fakes.FakeVolumeSnapshot(
'1234', 'available', 'foo', 'derpysnapshot'
)
fake_snapshot_dict = meta.obj_to_munch(fake_snapshot)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public',
append=['snapshots', 'detail']),
json={'snapshots': [fake_snapshot_dict]}),
dict(method='DELETE',
uri=self.get_mock_url(
'volumev3', 'public',
append=['snapshots', fake_snapshot_dict['id']]),
status_code=404)])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', 'detail']
),
json={'snapshots': [fake_snapshot_dict]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'volumev3',
'public',
append=['snapshots', fake_snapshot_dict['id']],
),
status_code=404,
),
]
)
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.delete_volume_snapshot, name_or_id='1234')
self.cloud.delete_volume_snapshot,
name_or_id='1234',
)
self.assert_calls()
def test_delete_volume_snapshot_with_timeout(self):
@ -83,29 +105,43 @@ class TestDeleteVolumeSnapshot(base.TestCase):
Test that a timeout while waiting for the volume snapshot to delete
raises an exception in delete_volume_snapshot.
"""
fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available',
'foo', 'derpysnapshot')
fake_snapshot = fakes.FakeVolumeSnapshot(
'1234', 'available', 'foo', 'derpysnapshot'
)
fake_snapshot_dict = meta.obj_to_munch(fake_snapshot)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public',
append=['snapshots', 'detail']),
json={'snapshots': [fake_snapshot_dict]}),
dict(method='DELETE',
uri=self.get_mock_url(
'volumev3', 'public',
append=['snapshots', fake_snapshot_dict['id']])),
dict(method='GET',
uri=self.get_mock_url(
'volumev3', 'public',
append=['snapshots', '1234']),
json={'snapshot': fake_snapshot_dict}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', 'detail']
),
json={'snapshots': [fake_snapshot_dict]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'volumev3',
'public',
append=['snapshots', fake_snapshot_dict['id']],
),
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', '1234']
),
json={'snapshot': fake_snapshot_dict},
),
]
)
self.assertRaises(
exc.OpenStackCloudTimeout,
self.cloud.delete_volume_snapshot, name_or_id='1234',
wait=True, timeout=0.01)
self.cloud.delete_volume_snapshot,
name_or_id='1234',
wait=True,
timeout=0.01,
)
self.assert_calls(do_count=False)

View File

@ -15,17 +15,23 @@ from openstack.tests.unit import base
class TestDomainParams(base.TestCase):
def test_identity_params_v3(self):
project_data = self._get_project_data(v3=True)
self.register_uris([
dict(method='GET',
uri='https://identity.example.com/v3/projects',
json=dict(projects=[project_data.json_response['project']]))
])
self.register_uris(
[
dict(
method='GET',
uri='https://identity.example.com/v3/projects',
json=dict(
projects=[project_data.json_response['project']]
),
)
]
)
ret = self.cloud._get_identity_params(
domain_id='5678', project=project_data.project_name)
domain_id='5678', project=project_data.project_name
)
self.assertIn('default_project_id', ret)
self.assertEqual(ret['default_project_id'], project_data.project_id)
self.assertIn('domain_id', ret)
@ -39,6 +45,8 @@ class TestDomainParams(base.TestCase):
self.assertRaises(
exc.OpenStackCloudException,
self.cloud._get_identity_params,
domain_id=None, project=project_data.project_name)
domain_id=None,
project=project_data.project_name,
)
self.assert_calls()

View File

@ -23,36 +23,54 @@ from openstack.tests.unit import base
class TestDomains(base.TestCase):
def get_mock_url(self, service_type='identity',
resource='domains',
append=None, base_url_append='v3',
qs_elements=None):
def get_mock_url(
self,
service_type='identity',
resource='domains',
append=None,
base_url_append='v3',
qs_elements=None,
):
return super(TestDomains, self).get_mock_url(
service_type=service_type, resource=resource,
append=append, base_url_append=base_url_append,
qs_elements=qs_elements)
service_type=service_type,
resource=resource,
append=append,
base_url_append=base_url_append,
qs_elements=qs_elements,
)
def test_list_domains(self):
domain_data = self._get_domain_data()
self.register_uris([
dict(method='GET', uri=self.get_mock_url(), status_code=200,
json={'domains': [domain_data.json_response['domain']]})])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'domains': [domain_data.json_response['domain']]},
)
]
)
domains = self.cloud.list_domains()
self.assertThat(len(domains), matchers.Equals(1))
self.assertThat(domains[0].name,
matchers.Equals(domain_data.domain_name))
self.assertThat(domains[0].id,
matchers.Equals(domain_data.domain_id))
self.assertThat(
domains[0].name, matchers.Equals(domain_data.domain_name)
)
self.assertThat(domains[0].id, matchers.Equals(domain_data.domain_id))
self.assert_calls()
def test_get_domain(self):
domain_data = self._get_domain_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json=domain_data.json_response)])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json=domain_data.json_response,
)
]
)
domain = self.cloud.get_domain(domain_id=domain_data.domain_id)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
@ -61,57 +79,86 @@ class TestDomains(base.TestCase):
def test_get_domain_with_name_or_id(self):
domain_data = self._get_domain_data()
response = {'domains': [domain_data.json_response['domain']]}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json=domain_data.json_response),
dict(method='GET',
uri=self.get_mock_url(append=[domain_data.domain_name]),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
qs_elements=['name=' + domain_data.domain_name]
),
status_code=200,
json=response),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json=domain_data.json_response,
),
dict(
method='GET',
uri=self.get_mock_url(append=[domain_data.domain_name]),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
qs_elements=['name=' + domain_data.domain_name]
),
status_code=200,
json=response,
),
]
)
domain = self.cloud.get_domain(name_or_id=domain_data.domain_id)
domain_by_name = self.cloud.get_domain(
name_or_id=domain_data.domain_name)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat(domain_by_name.id,
matchers.Equals(domain_data.domain_id))
self.assertThat(domain_by_name.name,
matchers.Equals(domain_data.domain_name))
self.assert_calls()
def test_create_domain(self):
domain_data = self._get_domain_data(description=uuid.uuid4().hex,
enabled=True)
self.register_uris([
dict(method='POST', uri=self.get_mock_url(), status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request))])
domain = self.cloud.create_domain(
domain_data.domain_name, domain_data.description)
name_or_id=domain_data.domain_name
)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat(
domain.description, matchers.Equals(domain_data.description))
domain_by_name.id, matchers.Equals(domain_data.domain_id)
)
self.assertThat(
domain_by_name.name, matchers.Equals(domain_data.domain_name)
)
self.assert_calls()
def test_create_domain(self):
domain_data = self._get_domain_data(
description=uuid.uuid4().hex, enabled=True
)
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request),
)
]
)
domain = self.cloud.create_domain(
domain_data.domain_name, domain_data.description
)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat(
domain.description, matchers.Equals(domain_data.description)
)
self.assert_calls()
def test_create_domain_exception(self):
domain_data = self._get_domain_data(domain_name='domain_name',
enabled=True)
domain_data = self._get_domain_data(
domain_name='domain_name', enabled=True
)
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudBadRequest
):
self.register_uris([
dict(method='POST', uri=self.get_mock_url(), status_code=400,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request))])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(),
status_code=400,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request),
)
]
)
self.cloud.create_domain('domain_name')
self.assert_calls()
@ -120,11 +167,20 @@ class TestDomains(base.TestCase):
new_resp = domain_data.json_response.copy()
new_resp['domain']['enabled'] = False
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([
dict(method='PATCH', uri=domain_resource_uri, status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}})),
dict(method='DELETE', uri=domain_resource_uri, status_code=204)])
self.register_uris(
[
dict(
method='PATCH',
uri=domain_resource_uri,
status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}}),
),
dict(
method='DELETE', uri=domain_resource_uri, status_code=204
),
]
)
self.cloud.delete_domain(domain_data.domain_id)
self.assert_calls()
@ -134,15 +190,26 @@ class TestDomains(base.TestCase):
new_resp['domain']['enabled'] = False
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json={'domain': domain_data.json_response['domain']}),
dict(method='PATCH', uri=domain_resource_uri, status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}})),
dict(method='DELETE', uri=domain_resource_uri, status_code=204)])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json={'domain': domain_data.json_response['domain']},
),
dict(
method='PATCH',
uri=domain_resource_uri,
status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}}),
),
dict(
method='DELETE', uri=domain_resource_uri, status_code=204
),
]
)
self.cloud.delete_domain(name_or_id=domain_data.domain_id)
self.assert_calls()
@ -156,11 +223,20 @@ class TestDomains(base.TestCase):
new_resp = domain_data.json_response.copy()
new_resp['domain']['enabled'] = False
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([
dict(method='PATCH', uri=domain_resource_uri, status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}})),
dict(method='DELETE', uri=domain_resource_uri, status_code=404)])
self.register_uris(
[
dict(
method='PATCH',
uri=domain_resource_uri,
status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}}),
),
dict(
method='DELETE', uri=domain_resource_uri, status_code=404
),
]
)
with testtools.ExpectedException(
openstack.exceptions.ResourceNotFound
):
@ -169,53 +245,81 @@ class TestDomains(base.TestCase):
def test_update_domain(self):
domain_data = self._get_domain_data(
description=self.getUniqueString('domainDesc'))
description=self.getUniqueString('domainDesc')
)
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([
dict(method='PATCH', uri=domain_resource_uri, status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request))])
self.register_uris(
[
dict(
method='PATCH',
uri=domain_resource_uri,
status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request),
)
]
)
domain = self.cloud.update_domain(
domain_data.domain_id,
name=domain_data.domain_name,
description=domain_data.description)
description=domain_data.description,
)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat(
domain.description, matchers.Equals(domain_data.description))
domain.description, matchers.Equals(domain_data.description)
)
self.assert_calls()
def test_update_domain_name_or_id(self):
domain_data = self._get_domain_data(
description=self.getUniqueString('domainDesc'))
description=self.getUniqueString('domainDesc')
)
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json={'domain': domain_data.json_response['domain']}),
dict(method='PATCH', uri=domain_resource_uri, status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json={'domain': domain_data.json_response['domain']},
),
dict(
method='PATCH',
uri=domain_resource_uri,
status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request),
),
]
)
domain = self.cloud.update_domain(
name_or_id=domain_data.domain_id,
name=domain_data.domain_name,
description=domain_data.description)
description=domain_data.description,
)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat(
domain.description, matchers.Equals(domain_data.description))
domain.description, matchers.Equals(domain_data.description)
)
self.assert_calls()
def test_update_domain_exception(self):
domain_data = self._get_domain_data(
description=self.getUniqueString('domainDesc'))
self.register_uris([
dict(method='PATCH',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=409,
json=domain_data.json_response,
validate=dict(json={'domain': {'enabled': False}}))])
description=self.getUniqueString('domainDesc')
)
self.register_uris(
[
dict(
method='PATCH',
uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=409,
json=domain_data.json_response,
validate=dict(json={'domain': {'enabled': False}}),
)
]
)
with testtools.ExpectedException(
openstack.exceptions.ConflictException
):

View File

@ -27,11 +27,17 @@ from openstack.tests.unit import base
class TestCloudEndpoints(base.TestCase):
def get_mock_url(self, service_type='identity', interface='public',
resource='endpoints', append=None, base_url_append='v3'):
def get_mock_url(
self,
service_type='identity',
interface='public',
resource='endpoints',
append=None,
base_url_append='v3',
):
return super(TestCloudEndpoints, self).get_mock_url(
service_type, interface, resource, append, base_url_append)
service_type, interface, resource, append, base_url_append
)
def _dummy_url(self):
return 'https://%s.example.com/' % uuid.uuid4().hex
@ -39,148 +45,207 @@ class TestCloudEndpoints(base.TestCase):
def test_create_endpoint_v3(self):
service_data = self._get_service_data()
public_endpoint_data = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='public',
url=self._dummy_url())
service_id=service_data.service_id,
interface='public',
url=self._dummy_url(),
)
public_endpoint_data_disabled = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='public',
url=self._dummy_url(), enabled=False)
service_id=service_data.service_id,
interface='public',
url=self._dummy_url(),
enabled=False,
)
admin_endpoint_data = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='admin',
url=self._dummy_url(), region=public_endpoint_data.region_id)
service_id=service_data.service_id,
interface='admin',
url=self._dummy_url(),
region=public_endpoint_data.region_id,
)
internal_endpoint_data = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='internal',
url=self._dummy_url(), region=public_endpoint_data.region_id)
service_id=service_data.service_id,
interface='internal',
url=self._dummy_url(),
region=public_endpoint_data.region_id,
)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(resource='services'),
status_code=200,
json={'services': [
service_data.json_response_v3['service']]}),
dict(method='POST',
uri=self.get_mock_url(),
status_code=200,
json=public_endpoint_data_disabled.json_response,
validate=dict(
json=public_endpoint_data_disabled.json_request)),
dict(method='GET',
uri=self.get_mock_url(resource='services'),
status_code=200,
json={'services': [
service_data.json_response_v3['service']]}),
dict(method='POST',
uri=self.get_mock_url(),
status_code=200,
json=public_endpoint_data.json_response,
validate=dict(json=public_endpoint_data.json_request)),
dict(method='POST',
uri=self.get_mock_url(),
status_code=200,
json=internal_endpoint_data.json_response,
validate=dict(json=internal_endpoint_data.json_request)),
dict(method='POST',
uri=self.get_mock_url(),
status_code=200,
json=admin_endpoint_data.json_response,
validate=dict(json=admin_endpoint_data.json_request)),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='services'),
status_code=200,
json={
'services': [service_data.json_response_v3['service']]
},
),
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=public_endpoint_data_disabled.json_response,
validate=dict(
json=public_endpoint_data_disabled.json_request
),
),
dict(
method='GET',
uri=self.get_mock_url(resource='services'),
status_code=200,
json={
'services': [service_data.json_response_v3['service']]
},
),
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=public_endpoint_data.json_response,
validate=dict(json=public_endpoint_data.json_request),
),
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=internal_endpoint_data.json_response,
validate=dict(json=internal_endpoint_data.json_request),
),
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=admin_endpoint_data.json_response,
validate=dict(json=admin_endpoint_data.json_request),
),
]
)
endpoints = self.cloud.create_endpoint(
service_name_or_id=service_data.service_id,
region=public_endpoint_data_disabled.region_id,
url=public_endpoint_data_disabled.url,
interface=public_endpoint_data_disabled.interface,
enabled=False)
enabled=False,
)
# Test endpoint values
self.assertThat(
endpoints[0].id,
matchers.Equals(public_endpoint_data_disabled.endpoint_id))
self.assertThat(endpoints[0].url,
matchers.Equals(public_endpoint_data_disabled.url))
matchers.Equals(public_endpoint_data_disabled.endpoint_id),
)
self.assertThat(
endpoints[0].url,
matchers.Equals(public_endpoint_data_disabled.url),
)
self.assertThat(
endpoints[0].interface,
matchers.Equals(public_endpoint_data_disabled.interface))
matchers.Equals(public_endpoint_data_disabled.interface),
)
self.assertThat(
endpoints[0].region_id,
matchers.Equals(public_endpoint_data_disabled.region_id))
matchers.Equals(public_endpoint_data_disabled.region_id),
)
self.assertThat(
endpoints[0].region_id,
matchers.Equals(public_endpoint_data_disabled.region_id))
self.assertThat(endpoints[0].is_enabled,
matchers.Equals(public_endpoint_data_disabled.enabled))
matchers.Equals(public_endpoint_data_disabled.region_id),
)
self.assertThat(
endpoints[0].is_enabled,
matchers.Equals(public_endpoint_data_disabled.enabled),
)
endpoints_2on3 = self.cloud.create_endpoint(
service_name_or_id=service_data.service_id,
region=public_endpoint_data.region_id,
public_url=public_endpoint_data.url,
internal_url=internal_endpoint_data.url,
admin_url=admin_endpoint_data.url)
admin_url=admin_endpoint_data.url,
)
# Three endpoints should be returned, public, internal, and admin
self.assertThat(len(endpoints_2on3), matchers.Equals(3))
# test keys and values are correct for each endpoint created
for result, reference in zip(
endpoints_2on3, [public_endpoint_data,
internal_endpoint_data,
admin_endpoint_data]
endpoints_2on3,
[
public_endpoint_data,
internal_endpoint_data,
admin_endpoint_data,
],
):
self.assertThat(result.id, matchers.Equals(reference.endpoint_id))
self.assertThat(result.url, matchers.Equals(reference.url))
self.assertThat(result.interface,
matchers.Equals(reference.interface))
self.assertThat(result.region_id,
matchers.Equals(reference.region_id))
self.assertThat(result.is_enabled,
matchers.Equals(reference.enabled))
self.assertThat(
result.interface, matchers.Equals(reference.interface)
)
self.assertThat(
result.region_id, matchers.Equals(reference.region_id)
)
self.assertThat(
result.is_enabled, matchers.Equals(reference.enabled)
)
self.assert_calls()
def test_update_endpoint_v3(self):
service_data = self._get_service_data()
dummy_url = self._dummy_url()
endpoint_data = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='admin',
enabled=False)
service_id=service_data.service_id,
interface='admin',
enabled=False,
)
reference_request = endpoint_data.json_request.copy()
reference_request['endpoint']['url'] = dummy_url
self.register_uris([
dict(method='PATCH',
uri=self.get_mock_url(append=[endpoint_data.endpoint_id]),
status_code=200,
json=endpoint_data.json_response,
validate=dict(json=reference_request))
])
self.register_uris(
[
dict(
method='PATCH',
uri=self.get_mock_url(append=[endpoint_data.endpoint_id]),
status_code=200,
json=endpoint_data.json_response,
validate=dict(json=reference_request),
)
]
)
endpoint = self.cloud.update_endpoint(
endpoint_data.endpoint_id,
service_name_or_id=service_data.service_id,
region=endpoint_data.region_id,
url=dummy_url,
interface=endpoint_data.interface,
enabled=False
enabled=False,
)
# test keys and values are correct
self.assertThat(endpoint.id,
matchers.Equals(endpoint_data.endpoint_id))
self.assertThat(endpoint.service_id,
matchers.Equals(service_data.service_id))
self.assertThat(endpoint.url,
matchers.Equals(endpoint_data.url))
self.assertThat(endpoint.interface,
matchers.Equals(endpoint_data.interface))
self.assertThat(
endpoint.id, matchers.Equals(endpoint_data.endpoint_id)
)
self.assertThat(
endpoint.service_id, matchers.Equals(service_data.service_id)
)
self.assertThat(endpoint.url, matchers.Equals(endpoint_data.url))
self.assertThat(
endpoint.interface, matchers.Equals(endpoint_data.interface)
)
self.assert_calls()
def test_list_endpoints(self):
endpoints_data = [self._get_endpoint_v3_data() for e in range(1, 10)]
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'endpoints': [e.json_response['endpoint']
for e in endpoints_data]})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={
'endpoints': [
e.json_response['endpoint'] for e in endpoints_data
]
},
)
]
)
endpoints = self.cloud.list_endpoints()
# test we are getting exactly len(self.mock_endpoints) elements
@ -188,58 +253,89 @@ class TestCloudEndpoints(base.TestCase):
# test keys and values are correct
for i, ep in enumerate(endpoints_data):
self.assertThat(endpoints[i].id,
matchers.Equals(ep.endpoint_id))
self.assertThat(endpoints[i].service_id,
matchers.Equals(ep.service_id))
self.assertThat(endpoints[i].url,
matchers.Equals(ep.url))
self.assertThat(endpoints[i].interface,
matchers.Equals(ep.interface))
self.assertThat(endpoints[i].id, matchers.Equals(ep.endpoint_id))
self.assertThat(
endpoints[i].service_id, matchers.Equals(ep.service_id)
)
self.assertThat(endpoints[i].url, matchers.Equals(ep.url))
self.assertThat(
endpoints[i].interface, matchers.Equals(ep.interface)
)
self.assert_calls()
def test_search_endpoints(self):
endpoints_data = [self._get_endpoint_v3_data(region='region1')
for e in range(0, 2)]
endpoints_data.extend([self._get_endpoint_v3_data()
for e in range(1, 8)])
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'endpoints': [e.json_response['endpoint']
for e in endpoints_data]}),
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'endpoints': [e.json_response['endpoint']
for e in endpoints_data]}),
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'endpoints': [e.json_response['endpoint']
for e in endpoints_data]}),
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'endpoints': [e.json_response['endpoint']
for e in endpoints_data]})
])
endpoints_data = [
self._get_endpoint_v3_data(region='region1') for e in range(0, 2)
]
endpoints_data.extend(
[self._get_endpoint_v3_data() for e in range(1, 8)]
)
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={
'endpoints': [
e.json_response['endpoint'] for e in endpoints_data
]
},
),
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={
'endpoints': [
e.json_response['endpoint'] for e in endpoints_data
]
},
),
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={
'endpoints': [
e.json_response['endpoint'] for e in endpoints_data
]
},
),
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={
'endpoints': [
e.json_response['endpoint'] for e in endpoints_data
]
},
),
]
)
# Search by id
endpoints = self.cloud.search_endpoints(
id=endpoints_data[-1].endpoint_id)
id=endpoints_data[-1].endpoint_id
)
# # test we are getting exactly 1 element
self.assertEqual(1, len(endpoints))
self.assertThat(endpoints[0].id,
matchers.Equals(endpoints_data[-1].endpoint_id))
self.assertThat(endpoints[0].service_id,
matchers.Equals(endpoints_data[-1].service_id))
self.assertThat(endpoints[0].url,
matchers.Equals(endpoints_data[-1].url))
self.assertThat(endpoints[0].interface,
matchers.Equals(endpoints_data[-1].interface))
self.assertThat(
endpoints[0].id, matchers.Equals(endpoints_data[-1].endpoint_id)
)
self.assertThat(
endpoints[0].service_id,
matchers.Equals(endpoints_data[-1].service_id),
)
self.assertThat(
endpoints[0].url, matchers.Equals(endpoints_data[-1].url)
)
self.assertThat(
endpoints[0].interface,
matchers.Equals(endpoints_data[-1].interface),
)
# Not found
endpoints = self.cloud.search_endpoints(id='!invalid!')
@ -247,13 +343,15 @@ class TestCloudEndpoints(base.TestCase):
# Multiple matches
endpoints = self.cloud.search_endpoints(
filters={'region_id': 'region1'})
filters={'region_id': 'region1'}
)
# # test we are getting exactly 2 elements
self.assertEqual(2, len(endpoints))
# test we are getting the correct response for region/region_id compat
endpoints = self.cloud.search_endpoints(
filters={'region_id': 'region1'})
filters={'region_id': 'region1'}
)
# # test we are getting exactly 2 elements, this is v3
self.assertEqual(2, len(endpoints))
@ -261,16 +359,23 @@ class TestCloudEndpoints(base.TestCase):
def test_delete_endpoint(self):
endpoint_data = self._get_endpoint_v3_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'endpoints': [
endpoint_data.json_response['endpoint']]}),
dict(method='DELETE',
uri=self.get_mock_url(append=[endpoint_data.endpoint_id]),
status_code=204)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={
'endpoints': [endpoint_data.json_response['endpoint']]
},
),
dict(
method='DELETE',
uri=self.get_mock_url(append=[endpoint_data.endpoint_id]),
status_code=204,
),
]
)
# Delete by id
self.cloud.delete_endpoint(id=endpoint_data.endpoint_id)

View File

@ -17,7 +17,6 @@ from openstack.tests.unit import base
class TestFlavors(base.TestCase):
def setUp(self):
super(TestFlavors, self).setUp()
# self.use_compute_discovery()
@ -25,55 +24,85 @@ class TestFlavors(base.TestCase):
def test_create_flavor(self):
self.use_compute_discovery()
self.register_uris([
dict(method='POST',
uri='{endpoint}/flavors'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'flavor': fakes.FAKE_FLAVOR},
validate=dict(
json={
'flavor': {
"name": "vanilla",
"description": None,
"ram": 65536,
"vcpus": 24,
"swap": 0,
"os-flavor-access:is_public": True,
"rxtx_factor": 1.0,
"OS-FLV-EXT-DATA:ephemeral": 0,
"disk": 1600,
"id": None}}))])
self.register_uris(
[
dict(
method='POST',
uri='{endpoint}/flavors'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavor': fakes.FAKE_FLAVOR},
validate=dict(
json={
'flavor': {
"name": "vanilla",
"description": None,
"ram": 65536,
"vcpus": 24,
"swap": 0,
"os-flavor-access:is_public": True,
"rxtx_factor": 1.0,
"OS-FLV-EXT-DATA:ephemeral": 0,
"disk": 1600,
"id": None,
}
}
),
)
]
)
self.cloud.create_flavor(
'vanilla', ram=65536, disk=1600, vcpus=24,
'vanilla',
ram=65536,
disk=1600,
vcpus=24,
)
self.assert_calls()
def test_delete_flavor(self):
self.use_compute_discovery()
self.register_uris([
dict(method='GET',
uri='{endpoint}/flavors/vanilla'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=fakes.FAKE_FLAVOR),
dict(method='DELETE',
uri='{endpoint}/flavors/{id}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID))])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/flavors/vanilla'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json=fakes.FAKE_FLAVOR,
),
dict(
method='DELETE',
uri='{endpoint}/flavors/{id}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID
),
),
]
)
self.assertTrue(self.cloud.delete_flavor('vanilla'))
self.assert_calls()
def test_delete_flavor_not_found(self):
self.use_compute_discovery()
self.register_uris([
dict(method='GET',
uri='{endpoint}/flavors/invalid'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
status_code=404),
dict(method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'flavors': fakes.FAKE_FLAVOR_LIST})])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/flavors/invalid'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
status_code=404,
),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
)
self.assertFalse(self.cloud.delete_flavor('invalid'))
@ -81,30 +110,48 @@ class TestFlavors(base.TestCase):
def test_delete_flavor_exception(self):
self.use_compute_discovery()
self.register_uris([
dict(method='GET',
uri='{endpoint}/flavors/vanilla'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=fakes.FAKE_FLAVOR),
dict(method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'flavors': fakes.FAKE_FLAVOR_LIST}),
dict(method='DELETE',
uri='{endpoint}/flavors/{id}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID),
status_code=503)])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/flavors/vanilla'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json=fakes.FAKE_FLAVOR,
),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
dict(
method='DELETE',
uri='{endpoint}/flavors/{id}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID
),
status_code=503,
),
]
)
self.assertRaises(openstack.cloud.OpenStackCloudException,
self.cloud.delete_flavor, 'vanilla')
self.assertRaises(
openstack.cloud.OpenStackCloudException,
self.cloud.delete_flavor,
'vanilla',
)
def test_list_flavors(self):
self.use_compute_discovery()
uris_to_mock = [
dict(method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'flavors': fakes.FAKE_FLAVOR_LIST}),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
self.register_uris(uris_to_mock)
@ -126,17 +173,26 @@ class TestFlavors(base.TestCase):
def test_list_flavors_with_extra(self):
self.use_compute_discovery()
uris_to_mock = [
dict(method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'flavors': fakes.FAKE_FLAVOR_LIST}),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
uris_to_mock.extend([
dict(method='GET',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']),
json={'extra_specs': {}})
for flavor in fakes.FAKE_FLAVOR_LIST])
uris_to_mock.extend(
[
dict(
method='GET',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']
),
json={'extra_specs': {}},
)
for flavor in fakes.FAKE_FLAVOR_LIST
]
)
self.register_uris(uris_to_mock)
flavors = self.cloud.list_flavors(get_extra=True)
@ -157,17 +213,26 @@ class TestFlavors(base.TestCase):
def test_get_flavor_by_ram(self):
self.use_compute_discovery()
uris_to_mock = [
dict(method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'flavors': fakes.FAKE_FLAVOR_LIST}),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
uris_to_mock.extend([
dict(method='GET',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']),
json={'extra_specs': {}})
for flavor in fakes.FAKE_FLAVOR_LIST])
uris_to_mock.extend(
[
dict(
method='GET',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']
),
json={'extra_specs': {}},
)
for flavor in fakes.FAKE_FLAVOR_LIST
]
)
self.register_uris(uris_to_mock)
flavor = self.cloud.get_flavor_by_ram(ram=250)
@ -176,47 +241,69 @@ class TestFlavors(base.TestCase):
def test_get_flavor_by_ram_and_include(self):
self.use_compute_discovery()
uris_to_mock = [
dict(method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'flavors': fakes.FAKE_FLAVOR_LIST}),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
uris_to_mock.extend([
dict(method='GET',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']),
json={'extra_specs': {}})
for flavor in fakes.FAKE_FLAVOR_LIST])
uris_to_mock.extend(
[
dict(
method='GET',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']
),
json={'extra_specs': {}},
)
for flavor in fakes.FAKE_FLAVOR_LIST
]
)
self.register_uris(uris_to_mock)
flavor = self.cloud.get_flavor_by_ram(ram=150, include='strawberry')
self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id'])
def test_get_flavor_by_ram_not_found(self):
self.use_compute_discovery()
self.register_uris([
dict(method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'flavors': []})])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': []},
)
]
)
self.assertRaises(
openstack.cloud.OpenStackCloudException,
self.cloud.get_flavor_by_ram,
ram=100)
ram=100,
)
def test_get_flavor_string_and_int(self):
self.use_compute_discovery()
flavor_resource_uri = '{endpoint}/flavors/1/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT)
endpoint=fakes.COMPUTE_ENDPOINT
)
flavor = fakes.make_fake_flavor('1', 'vanilla')
flavor_json = {'extra_specs': {}}
self.register_uris([
dict(method='GET',
uri='{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=flavor),
dict(method='GET', uri=flavor_resource_uri, json=flavor_json),
])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json=flavor,
),
dict(method='GET', uri=flavor_resource_uri, json=flavor_json),
]
)
flavor1 = self.cloud.get_flavor('1')
self.assertEqual('1', flavor1['id'])
@ -226,11 +313,17 @@ class TestFlavors(base.TestCase):
def test_set_flavor_specs(self):
self.use_compute_discovery()
extra_specs = dict(key1='value1')
self.register_uris([
dict(method='POST',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=1),
json=dict(extra_specs=extra_specs))])
self.register_uris(
[
dict(
method='POST',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=1
),
json=dict(extra_specs=extra_specs),
)
]
)
self.cloud.set_flavor_specs(1, extra_specs)
self.assert_calls()
@ -238,62 +331,97 @@ class TestFlavors(base.TestCase):
def test_unset_flavor_specs(self):
self.use_compute_discovery()
keys = ['key1', 'key2']
self.register_uris([
dict(method='DELETE',
uri='{endpoint}/flavors/{id}/os-extra_specs/{key}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=1, key=key))
for key in keys])
self.register_uris(
[
dict(
method='DELETE',
uri='{endpoint}/flavors/{id}/os-extra_specs/{key}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=1, key=key
),
)
for key in keys
]
)
self.cloud.unset_flavor_specs(1, keys)
self.assert_calls()
def test_add_flavor_access(self):
self.register_uris([
dict(method='POST',
uri='{endpoint}/flavors/{id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'),
json={
'flavor_access': [{
'flavor_id': 'flavor_id', 'tenant_id': 'tenant_id'}]},
validate=dict(
json={'addTenantAccess': {'tenant': 'tenant_id'}}))])
self.register_uris(
[
dict(
method='POST',
uri='{endpoint}/flavors/{id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'
),
json={
'flavor_access': [
{
'flavor_id': 'flavor_id',
'tenant_id': 'tenant_id',
}
]
},
validate=dict(
json={'addTenantAccess': {'tenant': 'tenant_id'}}
),
)
]
)
self.cloud.add_flavor_access('flavor_id', 'tenant_id')
self.assert_calls()
def test_remove_flavor_access(self):
self.register_uris([
dict(method='POST',
uri='{endpoint}/flavors/{id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'),
json={'flavor_access': []},
validate=dict(
json={'removeTenantAccess': {'tenant': 'tenant_id'}}))])
self.register_uris(
[
dict(
method='POST',
uri='{endpoint}/flavors/{id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'
),
json={'flavor_access': []},
validate=dict(
json={'removeTenantAccess': {'tenant': 'tenant_id'}}
),
)
]
)
self.cloud.remove_flavor_access('flavor_id', 'tenant_id')
self.assert_calls()
def test_list_flavor_access(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/flavors/vanilla/os-flavor-access'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={
'flavor_access': [
{'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'}]})
])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/flavors/vanilla/os-flavor-access'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={
'flavor_access': [
{'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'}
]
},
)
]
)
self.cloud.list_flavor_access('vanilla')
self.assert_calls()
def test_get_flavor_by_id(self):
self.use_compute_discovery()
flavor_uri = '{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT)
endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')}
self.register_uris([
dict(method='GET', uri=flavor_uri, json=flavor_json),
])
self.register_uris(
[
dict(method='GET', uri=flavor_uri, json=flavor_json),
]
)
flavor1 = self.cloud.get_flavor_by_id('1')
self.assertEqual('1', flavor1['id'])
@ -305,16 +433,22 @@ class TestFlavors(base.TestCase):
def test_get_flavor_with_extra_specs(self):
self.use_compute_discovery()
flavor_uri = '{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT)
endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_extra_uri = '{endpoint}/flavors/1/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT)
endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')}
flavor_extra_json = {'extra_specs': {'name': 'test'}}
self.register_uris([
dict(method='GET', uri=flavor_uri, json=flavor_json),
dict(method='GET', uri=flavor_extra_uri, json=flavor_extra_json),
])
self.register_uris(
[
dict(method='GET', uri=flavor_uri, json=flavor_json),
dict(
method='GET', uri=flavor_extra_uri, json=flavor_extra_json
),
]
)
flavor1 = self.cloud.get_flavor_by_id('1', get_extra=True)
self.assertEqual('1', flavor1['id'])

View File

@ -29,16 +29,20 @@ from openstack.tests.unit import base
class TestFloatingIP(base.TestCase):
@patch.object(connection.Connection, 'get_floating_ip')
@patch.object(connection.Connection, '_attach_ip_to_server')
@patch.object(connection.Connection, 'available_floating_ip')
def test_add_auto_ip(
self, mock_available_floating_ip, mock_attach_ip_to_server,
mock_get_floating_ip):
self,
mock_available_floating_ip,
mock_attach_ip_to_server,
mock_get_floating_ip,
):
server_dict = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE",
addresses={}
server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={},
)
floating_ip_dict = {
"id": "this-is-a-floating-ip-id",
@ -47,7 +51,7 @@ class TestFloatingIP(base.TestCase):
"floating_ip_address": "203.0.113.29",
"network": "this-is-a-net-or-pool-id",
"attached": False,
"status": "ACTIVE"
"status": "ACTIVE",
}
mock_available_floating_ip.return_value = floating_ip_dict
@ -55,51 +59,63 @@ class TestFloatingIP(base.TestCase):
self.cloud.add_auto_ip(server=server_dict)
mock_attach_ip_to_server.assert_called_with(
timeout=60, wait=False, server=server_dict,
floating_ip=floating_ip_dict, skip_attach=False)
timeout=60,
wait=False,
server=server_dict,
floating_ip=floating_ip_dict,
skip_attach=False,
)
@patch.object(connection.Connection, '_add_ip_from_pool')
def test_add_ips_to_server_pool(self, mock_add_ip_from_pool):
server_dict = fakes.make_fake_server(
server_id='romeo', name='test-server', status="ACTIVE",
addresses={})
server_id='romeo',
name='test-server',
status="ACTIVE",
addresses={},
)
pool = 'nova'
self.cloud.add_ips_to_server(server_dict, ip_pool=pool)
mock_add_ip_from_pool.assert_called_with(
server_dict, pool, reuse=True, wait=False, timeout=60,
fixed_address=None, nat_destination=None)
server_dict,
pool,
reuse=True,
wait=False,
timeout=60,
fixed_address=None,
nat_destination=None,
)
@patch.object(connection.Connection, 'has_service')
@patch.object(connection.Connection, 'get_floating_ip')
@patch.object(connection.Connection, '_add_auto_ip')
def test_add_ips_to_server_ipv6_only(
self, mock_add_auto_ip,
mock_get_floating_ip,
mock_has_service):
self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service
):
self.cloud._floating_ip_source = None
self.cloud.force_ipv4 = False
self.cloud._local_ipv6 = True
mock_has_service.return_value = False
server = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE",
server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={
'private': [{
'addr': "10.223.160.141",
'version': 4
}],
'public': [{
u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42',
u'OS-EXT-IPS:type': u'fixed',
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
'version': 6
}]
}
'private': [{'addr': "10.223.160.141", 'version': 4}],
'public': [
{
u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42',
u'OS-EXT-IPS:type': u'fixed',
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
'version': 6,
}
],
},
)
server_dict = meta.add_server_interfaces(
self.cloud,
_server.Server(**server)
self.cloud, _server.Server(**server)
)
new_server = self.cloud.add_ips_to_server(server=server_dict)
@ -107,80 +123,79 @@ class TestFloatingIP(base.TestCase):
mock_add_auto_ip.assert_not_called()
self.assertEqual(
new_server['interface_ip'],
'2001:4800:7819:103:be76:4eff:fe05:8525')
'2001:4800:7819:103:be76:4eff:fe05:8525',
)
self.assertEqual(new_server['private_v4'], '10.223.160.141')
self.assertEqual(new_server['public_v4'], '')
self.assertEqual(
new_server['public_v6'], '2001:4800:7819:103:be76:4eff:fe05:8525')
new_server['public_v6'], '2001:4800:7819:103:be76:4eff:fe05:8525'
)
@patch.object(connection.Connection, 'has_service')
@patch.object(connection.Connection, 'get_floating_ip')
@patch.object(connection.Connection, '_add_auto_ip')
def test_add_ips_to_server_rackspace(
self, mock_add_auto_ip,
mock_get_floating_ip,
mock_has_service):
self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service
):
self.cloud._floating_ip_source = None
self.cloud.force_ipv4 = False
self.cloud._local_ipv6 = True
mock_has_service.return_value = False
server = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE",
server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={
'private': [{
'addr': "10.223.160.141",
'version': 4
}],
'public': [{
'addr': "104.130.246.91",
'version': 4
}, {
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
'version': 6
}]
}
'private': [{'addr': "10.223.160.141", 'version': 4}],
'public': [
{'addr': "104.130.246.91", 'version': 4},
{
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
'version': 6,
},
],
},
)
server_dict = meta.add_server_interfaces(
self.cloud,
_server.Server(**server))
self.cloud, _server.Server(**server)
)
new_server = self.cloud.add_ips_to_server(server=server_dict)
mock_get_floating_ip.assert_not_called()
mock_add_auto_ip.assert_not_called()
self.assertEqual(
new_server['interface_ip'],
'2001:4800:7819:103:be76:4eff:fe05:8525')
'2001:4800:7819:103:be76:4eff:fe05:8525',
)
@patch.object(connection.Connection, 'has_service')
@patch.object(connection.Connection, 'get_floating_ip')
@patch.object(connection.Connection, '_add_auto_ip')
def test_add_ips_to_server_rackspace_local_ipv4(
self, mock_add_auto_ip,
mock_get_floating_ip,
mock_has_service):
self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service
):
self.cloud._floating_ip_source = None
self.cloud.force_ipv4 = False
self.cloud._local_ipv6 = False
mock_has_service.return_value = False
server = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE",
server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={
'private': [{
'addr': "10.223.160.141",
'version': 4
}],
'public': [{
'addr': "104.130.246.91",
'version': 4
}, {
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
'version': 6
}]
}
'private': [{'addr': "10.223.160.141", 'version': 4}],
'public': [
{'addr': "104.130.246.91", 'version': 4},
{
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
'version': 6,
},
],
},
)
server_dict = meta.add_server_interfaces(
self.cloud,
_server.Server(**server))
self.cloud, _server.Server(**server)
)
new_server = self.cloud.add_ips_to_server(server=server_dict)
mock_get_floating_ip.assert_not_called()
@ -190,24 +205,35 @@ class TestFloatingIP(base.TestCase):
@patch.object(connection.Connection, 'add_ip_list')
def test_add_ips_to_server_ip_list(self, mock_add_ip_list):
server_dict = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE",
addresses={})
server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={},
)
ips = ['203.0.113.29', '172.24.4.229']
self.cloud.add_ips_to_server(server_dict, ips=ips)
mock_add_ip_list.assert_called_with(
server_dict, ips, wait=False, timeout=60,
server_dict,
ips,
wait=False,
timeout=60,
fixed_address=None,
nat_destination=None)
nat_destination=None,
)
@patch.object(connection.Connection, '_needs_floating_ip')
@patch.object(connection.Connection, '_add_auto_ip')
def test_add_ips_to_server_auto_ip(
self, mock_add_auto_ip, mock_needs_floating_ip):
self, mock_add_auto_ip, mock_needs_floating_ip
):
server_dict = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE",
addresses={})
server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={},
)
# TODO(mordred) REMOVE THIS MOCK WHEN THE NEXT PATCH LANDS
# SERIOUSLY THIS TIME. NEXT PATCH - WHICH SHOULD ADD MOCKS FOR
@ -218,4 +244,5 @@ class TestFloatingIP(base.TestCase):
self.cloud.add_ips_to_server(server_dict)
mock_add_auto_ip.assert_called_with(
server_dict, wait=False, timeout=60, reuse=True)
server_dict, wait=False, timeout=60, reuse=True
)

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,7 @@ def get_fake_has_service(has_service):
if s == 'network':
return False
return has_service(s)
return fake_has_service
@ -38,27 +39,28 @@ class TestFloatingIP(base.TestCase):
'id': 1,
'instance_id': None,
'ip': '203.0.113.1',
'pool': 'nova'
'pool': 'nova',
},
{
'fixed_ip': None,
'id': 2,
'instance_id': None,
'ip': '203.0.113.2',
'pool': 'nova'
'pool': 'nova',
},
{
'fixed_ip': '192.0.2.3',
'id': 29,
'instance_id': 'myself',
'ip': '198.51.100.29',
'pool': 'black_hole'
}
'pool': 'black_hole',
},
]
mock_floating_ip_pools = [
{'id': 'pool1_id', 'name': 'nova'},
{'id': 'pool2_id', 'name': 'pool2'}]
{'id': 'pool2_id', 'name': 'pool2'},
]
def assertAreInstances(self, elements, elem_type):
for e in elements:
@ -68,23 +70,36 @@ class TestFloatingIP(base.TestCase):
super(TestFloatingIP, self).setUp()
self.fake_server = fakes.make_fake_server(
'server-id', '', 'ACTIVE',
addresses={u'test_pnztt_net': [{
u'OS-EXT-IPS:type': u'fixed',
u'addr': '192.0.2.129',
u'version': 4,
u'OS-EXT-IPS-MAC:mac_addr':
u'fa:16:3e:ae:7d:42'}]})
'server-id',
'',
'ACTIVE',
addresses={
u'test_pnztt_net': [
{
u'OS-EXT-IPS:type': u'fixed',
u'addr': '192.0.2.129',
u'version': 4,
u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42',
}
]
},
)
self.cloud.has_service = get_fake_has_service(self.cloud.has_service)
def test_list_floating_ips(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
]
)
floating_ips = self.cloud.list_floating_ips()
self.assertIsInstance(floating_ips, list)
@ -95,19 +110,28 @@ class TestFloatingIP(base.TestCase):
def test_list_floating_ips_with_filters(self):
self.assertRaisesRegex(
ValueError, "Nova-network don't support server-side",
self.cloud.list_floating_ips, filters={'Foo': 42}
ValueError,
"Nova-network don't support server-side",
self.cloud.list_floating_ips,
filters={'Foo': 42},
)
def test_search_floating_ips(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
]
)
floating_ips = self.cloud.search_floating_ips(
filters={'attached': False})
filters={'attached': False}
)
self.assertIsInstance(floating_ips, list)
self.assertEqual(2, len(floating_ips))
@ -116,11 +140,17 @@ class TestFloatingIP(base.TestCase):
self.assert_calls()
def test_get_floating_ip(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
]
)
floating_ip = self.cloud.get_floating_ip(id='29')
@ -130,11 +160,17 @@ class TestFloatingIP(base.TestCase):
self.assert_calls()
def test_get_floating_ip_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
]
)
floating_ip = self.cloud.get_floating_ip(id='666')
@ -143,12 +179,17 @@ class TestFloatingIP(base.TestCase):
self.assert_calls()
def test_get_floating_ip_by_id(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips',
'1']),
json={'floating_ip': self.mock_floating_ip_list_rep[0]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips', '1']
),
json={'floating_ip': self.mock_floating_ip_list_rep[0]},
),
]
)
floating_ip = self.cloud.get_floating_ip_by_id(id='1')
@ -157,161 +198,240 @@ class TestFloatingIP(base.TestCase):
self.assert_calls()
def test_create_floating_ip(self):
self.register_uris([
dict(method='POST',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ip': self.mock_floating_ip_list_rep[1]},
validate=dict(
json={'pool': 'nova'})),
dict(method='GET',
uri=self.get_mock_url(
'compute',
append=['os-floating-ips', '2']),
json={'floating_ip': self.mock_floating_ip_list_rep[1]}),
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ip': self.mock_floating_ip_list_rep[1]},
validate=dict(json={'pool': 'nova'}),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips', '2']
),
json={'floating_ip': self.mock_floating_ip_list_rep[1]},
),
]
)
self.cloud.create_floating_ip(network='nova')
self.assert_calls()
def test_available_floating_ip_existing(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep[:1]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep[:1]},
),
]
)
ip = self.cloud.available_floating_ip(network='nova')
self.assertEqual(self.mock_floating_ip_list_rep[0]['ip'],
ip['floating_ip_address'])
self.assertEqual(
self.mock_floating_ip_list_rep[0]['ip'], ip['floating_ip_address']
)
self.assert_calls()
def test_available_floating_ip_new(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': []}),
dict(method='POST',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ip': self.mock_floating_ip_list_rep[0]},
validate=dict(
json={'pool': 'nova'})),
dict(method='GET',
uri=self.get_mock_url(
'compute',
append=['os-floating-ips', '1']),
json={'floating_ip': self.mock_floating_ip_list_rep[0]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': []},
),
dict(
method='POST',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ip': self.mock_floating_ip_list_rep[0]},
validate=dict(json={'pool': 'nova'}),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips', '1']
),
json={'floating_ip': self.mock_floating_ip_list_rep[0]},
),
]
)
ip = self.cloud.available_floating_ip(network='nova')
self.assertEqual(self.mock_floating_ip_list_rep[0]['ip'],
ip['floating_ip_address'])
self.assertEqual(
self.mock_floating_ip_list_rep[0]['ip'], ip['floating_ip_address']
)
self.assert_calls()
def test_delete_floating_ip_existing(self):
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'compute',
append=['os-floating-ips', 'a-wild-id-appears'])),
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': []}),
])
self.register_uris(
[
dict(
method='DELETE',
uri=self.get_mock_url(
'compute',
append=['os-floating-ips', 'a-wild-id-appears'],
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': []},
),
]
)
ret = self.cloud.delete_floating_ip(
floating_ip_id='a-wild-id-appears')
ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears')
self.assertTrue(ret)
self.assert_calls()
def test_delete_floating_ip_not_found(self):
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'compute',
append=['os-floating-ips', 'a-wild-id-appears']),
status_code=404),
])
self.register_uris(
[
dict(
method='DELETE',
uri=self.get_mock_url(
'compute',
append=['os-floating-ips', 'a-wild-id-appears'],
),
status_code=404,
),
]
)
ret = self.cloud.delete_floating_ip(
floating_ip_id='a-wild-id-appears')
ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears')
self.assertFalse(ret)
self.assert_calls()
def test_attach_ip_to_server(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep}),
dict(method='POST',
uri=self.get_mock_url(
'compute',
append=['servers', self.fake_server['id'], 'action']),
validate=dict(
json={
"addFloatingIp": {
"address": "203.0.113.1",
"fixed_address": "192.0.2.129",
}})),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
dict(
method='POST',
uri=self.get_mock_url(
'compute',
append=['servers', self.fake_server['id'], 'action'],
),
validate=dict(
json={
"addFloatingIp": {
"address": "203.0.113.1",
"fixed_address": "192.0.2.129",
}
}
),
),
]
)
self.cloud._attach_ip_to_server(
server=self.fake_server,
floating_ip=self.cloud._normalize_floating_ip(
self.mock_floating_ip_list_rep[0]),
fixed_address='192.0.2.129')
self.mock_floating_ip_list_rep[0]
),
fixed_address='192.0.2.129',
)
self.assert_calls()
def test_detach_ip_from_server(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep}),
dict(method='POST',
uri=self.get_mock_url(
'compute',
append=['servers', self.fake_server['id'], 'action']),
validate=dict(
json={
"removeFloatingIp": {
"address": "203.0.113.1",
}})),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
dict(
method='POST',
uri=self.get_mock_url(
'compute',
append=['servers', self.fake_server['id'], 'action'],
),
validate=dict(
json={
"removeFloatingIp": {
"address": "203.0.113.1",
}
}
),
),
]
)
self.cloud.detach_ip_from_server(
server_id='server-id', floating_ip_id=1)
server_id='server-id', floating_ip_id=1
)
self.assert_calls()
def test_add_ip_from_pool(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep}),
dict(method='GET',
uri=self.get_mock_url('compute', append=['os-floating-ips']),
json={'floating_ips': self.mock_floating_ip_list_rep}),
dict(method='POST',
uri=self.get_mock_url(
'compute',
append=['servers', self.fake_server['id'], 'action']),
validate=dict(
json={
"addFloatingIp": {
"address": "203.0.113.1",
"fixed_address": "192.0.2.129",
}})),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
dict(
method='POST',
uri=self.get_mock_url(
'compute',
append=['servers', self.fake_server['id'], 'action'],
),
validate=dict(
json={
"addFloatingIp": {
"address": "203.0.113.1",
"fixed_address": "192.0.2.129",
}
}
),
),
]
)
server = self.cloud._add_ip_from_pool(
server=self.fake_server,
network='nova',
fixed_address='192.0.2.129')
fixed_address='192.0.2.129',
)
self.assertEqual(server, self.fake_server)
self.assert_calls()

View File

@ -25,27 +25,39 @@ from openstack.tests.unit import base
class TestFloatingIPPool(base.TestCase):
pools = [{'name': u'public'}]
pools = [{'name': 'public'}]
def test_list_floating_ip_pools(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'extensions': [{
u'alias': u'os-floating-ip-pools',
u'updated': u'2014-12-03T00:00:00Z',
u'name': u'FloatingIpPools',
u'links': [],
u'namespace':
u'http://docs.openstack.org/compute/ext/fake_xml',
u'description': u'Floating IPs support.'}]}),
dict(method='GET',
uri='{endpoint}/os-floating-ip-pools'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={"floating_ip_pools": [{"name": "public"}]})
])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={
'extensions': [
{
'alias': 'os-floating-ip-pools',
'updated': '2014-12-03T00:00:00Z',
'name': 'FloatingIpPools',
'links': [],
'namespace': 'http://docs.openstack.org/compute/ext/fake_xml', # noqa: E501
'description': 'Floating IPs support.',
}
]
},
),
dict(
method='GET',
uri='{endpoint}/os-floating-ip-pools'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={"floating_ip_pools": [{"name": "public"}]},
),
]
)
floating_ip_pools = self.cloud.list_floating_ip_pools()
@ -55,24 +67,38 @@ class TestFloatingIPPool(base.TestCase):
def test_list_floating_ip_pools_exception(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={'extensions': [{
u'alias': u'os-floating-ip-pools',
u'updated': u'2014-12-03T00:00:00Z',
u'name': u'FloatingIpPools',
u'links': [],
u'namespace':
u'http://docs.openstack.org/compute/ext/fake_xml',
u'description': u'Floating IPs support.'}]}),
dict(method='GET',
uri='{endpoint}/os-floating-ip-pools'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
status_code=404)])
self.register_uris(
[
dict(
method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={
'extensions': [
{
'alias': 'os-floating-ip-pools',
'updated': '2014-12-03T00:00:00Z',
'name': 'FloatingIpPools',
'links': [],
'namespace': 'http://docs.openstack.org/compute/ext/fake_xml', # noqa: E501
'description': 'Floating IPs support.',
}
]
},
),
dict(
method='GET',
uri='{endpoint}/os-floating-ip-pools'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
status_code=404,
),
]
)
self.assertRaises(
OpenStackCloudException, self.cloud.list_floating_ip_pools)
OpenStackCloudException, self.cloud.list_floating_ip_pools
)
self.assert_calls()

File diff suppressed because it is too large Load Diff

View File

@ -17,86 +17,129 @@ from openstack.tests.unit import base
class TestGroups(base.TestCase):
def setUp(self, cloud_config_fixture='clouds.yaml'):
super(TestGroups, self).setUp(
cloud_config_fixture=cloud_config_fixture)
cloud_config_fixture=cloud_config_fixture
)
self.addCleanup(self.assert_calls)
def get_mock_url(self, service_type='identity', interface='public',
resource='groups', append=None, base_url_append='v3'):
def get_mock_url(
self,
service_type='identity',
interface='public',
resource='groups',
append=None,
base_url_append='v3',
):
return super(TestGroups, self).get_mock_url(
service_type='identity', interface=interface, resource=resource,
append=append, base_url_append=base_url_append)
service_type='identity',
interface=interface,
resource=resource,
append=append,
base_url_append=base_url_append,
)
def test_list_groups(self):
group_data = self._get_group_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'groups': [group_data.json_response['group']]})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'groups': [group_data.json_response['group']]},
)
]
)
self.cloud.list_groups()
def test_get_group(self):
group_data = self._get_group_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'groups': [group_data.json_response['group']]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'groups': [group_data.json_response['group']]},
),
]
)
self.cloud.get_group(group_data.group_id)
def test_delete_group(self):
group_data = self._get_group_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
append=[group_data.group_id]),
status_code=200,
json={'group': group_data.json_response['group']}),
dict(method='DELETE',
uri=self.get_mock_url(append=[group_data.group_id]),
status_code=204),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(append=[group_data.group_id]),
status_code=200,
json={'group': group_data.json_response['group']},
),
dict(
method='DELETE',
uri=self.get_mock_url(append=[group_data.group_id]),
status_code=204,
),
]
)
self.assertTrue(self.cloud.delete_group(group_data.group_id))
def test_create_group(self):
domain_data = self._get_domain_data()
group_data = self._get_group_data(domain_id=domain_data.domain_id)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(resource='domains',
append=[domain_data.domain_id]),
status_code=200,
json=domain_data.json_response),
dict(method='POST',
uri=self.get_mock_url(),
status_code=200,
json=group_data.json_response,
validate=dict(json=group_data.json_request))
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='domains', append=[domain_data.domain_id]
),
status_code=200,
json=domain_data.json_response,
),
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=group_data.json_response,
validate=dict(json=group_data.json_request),
),
]
)
self.cloud.create_group(
name=group_data.group_name, description=group_data.description,
domain=group_data.domain_id)
name=group_data.group_name,
description=group_data.description,
domain=group_data.domain_id,
)
def test_update_group(self):
group_data = self._get_group_data()
# Domain ID is not sent
group_data.json_request['group'].pop('domain_id')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
append=[group_data.group_id]),
status_code=200,
json={'group': group_data.json_response['group']}),
dict(method='PATCH',
uri=self.get_mock_url(
append=[group_data.group_id]),
status_code=200,
json=group_data.json_response,
validate=dict(json={
'group': {'name': 'new_name', 'description':
'new_description'}}))
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(append=[group_data.group_id]),
status_code=200,
json={'group': group_data.json_response['group']},
),
dict(
method='PATCH',
uri=self.get_mock_url(append=[group_data.group_id]),
status_code=200,
json=group_data.json_response,
validate=dict(
json={
'group': {
'name': 'new_name',
'description': 'new_description',
}
}
),
),
]
)
self.cloud.update_group(
group_data.group_id, 'new_name', 'new_description')
group_data.group_id, 'new_name', 'new_description'
)

View File

@ -23,46 +23,65 @@ RAW_ROLE_ASSIGNMENTS = [
"links": {"assignment": "http://example"},
"role": {"id": "123456"},
"scope": {"domain": {"id": "161718"}},
"user": {"id": "313233"}
"user": {"id": "313233"},
},
{
"links": {"assignment": "http://example"},
"group": {"id": "101112"},
"role": {"id": "123456"},
"scope": {"project": {"id": "456789"}}
}
"scope": {"project": {"id": "456789"}},
},
]
class TestIdentityRoles(base.TestCase):
def get_mock_url(self, service_type='identity', interface='public',
resource='roles', append=None, base_url_append='v3',
qs_elements=None):
def get_mock_url(
self,
service_type='identity',
interface='public',
resource='roles',
append=None,
base_url_append='v3',
qs_elements=None,
):
return super(TestIdentityRoles, self).get_mock_url(
service_type, interface, resource, append, base_url_append,
qs_elements)
service_type,
interface,
resource,
append,
base_url_append,
qs_elements,
)
def test_list_roles(self):
role_data = self._get_role_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]},
)
]
)
self.cloud.list_roles()
self.assert_calls()
def test_list_role_by_name(self):
role_data = self._get_role_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
qs_elements=['name={0}'.format(role_data.role_name)]),
status_code=200,
json={'roles': [role_data.json_response['role']]})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
qs_elements=['name={0}'.format(role_data.role_name)]
),
status_code=200,
json={'roles': [role_data.json_response['role']]},
)
]
)
role = self.cloud.list_roles(name=role_data.role_name)[0]
self.assertIsNotNone(role)
@ -72,12 +91,16 @@ class TestIdentityRoles(base.TestCase):
def test_get_role_by_name(self):
role_data = self._get_role_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]},
)
]
)
role = self.cloud.get_role(role_data.role_name)
self.assertIsNotNone(role)
@ -87,12 +110,16 @@ class TestIdentityRoles(base.TestCase):
def test_get_role_by_id(self):
role_data = self._get_role_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]},
)
]
)
role = self.cloud.get_role(role_data.role_id)
self.assertIsNotNone(role)
@ -102,13 +129,17 @@ class TestIdentityRoles(base.TestCase):
def test_create_role(self):
role_data = self._get_role_data()
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(),
status_code=200,
json=role_data.json_response,
validate=dict(json=role_data.json_request))
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=role_data.json_response,
validate=dict(json=role_data.json_request),
)
]
)
role = self.cloud.create_role(role_data.role_name)
@ -120,20 +151,25 @@ class TestIdentityRoles(base.TestCase):
def test_update_role(self):
role_data = self._get_role_data()
req = {'role': {'name': 'new_name'}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]}),
dict(method='PATCH',
uri=self.get_mock_url(append=[role_data.role_id]),
status_code=200,
json=role_data.json_response,
validate=dict(json=req))
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]},
),
dict(
method='PATCH',
uri=self.get_mock_url(append=[role_data.role_id]),
status_code=200,
json=role_data.json_response,
validate=dict(json=req),
),
]
)
role = self.cloud.update_role(
role_data.role_id, 'new_name')
role = self.cloud.update_role(role_data.role_id, 'new_name')
self.assertIsNotNone(role)
self.assertThat(role.name, matchers.Equals(role_data.role_name))
@ -142,30 +178,42 @@ class TestIdentityRoles(base.TestCase):
def test_delete_role_by_id(self):
role_data = self._get_role_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]}),
dict(method='DELETE',
uri=self.get_mock_url(append=[role_data.role_id]),
status_code=204)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]},
),
dict(
method='DELETE',
uri=self.get_mock_url(append=[role_data.role_id]),
status_code=204,
),
]
)
role = self.cloud.delete_role(role_data.role_id)
self.assertThat(role, matchers.Equals(True))
self.assert_calls()
def test_delete_role_by_name(self):
role_data = self._get_role_data()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]}),
dict(method='DELETE',
uri=self.get_mock_url(append=[role_data.role_id]),
status_code=204)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'roles': [role_data.json_response['role']]},
),
dict(
method='DELETE',
uri=self.get_mock_url(append=[role_data.role_id]),
status_code=204,
),
]
)
role = self.cloud.delete_role(role_data.role_name)
self.assertThat(role, matchers.Equals(True))
self.assert_calls()
@ -177,78 +225,102 @@ class TestIdentityRoles(base.TestCase):
project_data = self._get_project_data(domain_id=domain_data.domain_id)
role_data = self._get_role_data()
response = [
{'links': 'https://example.com',
'role': {'id': role_data.role_id},
'scope': {'domain': {'id': domain_data.domain_id}},
'user': {'id': user_data.user_id}},
{'links': 'https://example.com',
'role': {'id': role_data.role_id},
'scope': {'project': {'id': project_data.project_id}},
'group': {'id': group_data.group_id}},
{
'links': 'https://example.com',
'role': {'id': role_data.role_id},
'scope': {'domain': {'id': domain_data.domain_id}},
'user': {'id': user_data.user_id},
},
{
'links': 'https://example.com',
'role': {'id': role_data.role_id},
'scope': {'project': {'id': project_data.project_id}},
'group': {'id': group_data.group_id},
},
]
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='role_assignments'),
status_code=200,
json={'role_assignments': response},
complete_qs=True)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='role_assignments'),
status_code=200,
json={'role_assignments': response},
complete_qs=True,
)
]
)
ret = self.cloud.list_role_assignments()
self.assertThat(len(ret), matchers.Equals(2))
self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id))
self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id))
self.assertThat(
ret[0].scope['domain']['id'],
matchers.Equals(domain_data.domain_id))
matchers.Equals(domain_data.domain_id),
)
self.assertThat(
ret[1].group['id'],
matchers.Equals(group_data.group_id))
ret[1].group['id'], matchers.Equals(group_data.group_id)
)
self.assertThat(ret[1].role['id'], matchers.Equals(role_data.role_id))
self.assertThat(
ret[1].scope['project']['id'],
matchers.Equals(project_data.project_id))
matchers.Equals(project_data.project_id),
)
def test_list_role_assignments_filters(self):
domain_data = self._get_domain_data()
user_data = self._get_user_data(domain_id=domain_data.domain_id)
role_data = self._get_role_data()
response = [
{'links': 'https://example.com',
'role': {'id': role_data.role_id},
'scope': {'domain': {'id': domain_data.domain_id}},
'user': {'id': user_data.user_id}}
{
'links': 'https://example.com',
'role': {'id': role_data.role_id},
'scope': {'domain': {'id': domain_data.domain_id}},
'user': {'id': user_data.user_id},
}
]
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='role_assignments',
qs_elements=['scope.domain.id=%s' % domain_data.domain_id,
'user.id=%s' % user_data.user_id,
'effective=True']),
status_code=200,
json={'role_assignments': response},
complete_qs=True)
])
params = dict(user=user_data.user_id, domain=domain_data.domain_id,
effective=True)
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='role_assignments',
qs_elements=[
'scope.domain.id=%s' % domain_data.domain_id,
'user.id=%s' % user_data.user_id,
'effective=True',
],
),
status_code=200,
json={'role_assignments': response},
complete_qs=True,
)
]
)
params = dict(
user=user_data.user_id,
domain=domain_data.domain_id,
effective=True,
)
ret = self.cloud.list_role_assignments(filters=params)
self.assertThat(len(ret), matchers.Equals(1))
self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id))
self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id))
self.assertThat(
ret[0].scope['domain']['id'],
matchers.Equals(domain_data.domain_id))
matchers.Equals(domain_data.domain_id),
)
def test_list_role_assignments_exception(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(resource='role_assignments'),
status_code=403)
])
with testtools.ExpectedException(
exceptions.ForbiddenException
):
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(resource='role_assignments'),
status_code=403,
)
]
)
with testtools.ExpectedException(exceptions.ForbiddenException):
self.cloud.list_role_assignments()
self.assert_calls()

View File

@ -16,29 +16,46 @@ from openstack.tests.unit import base
class TestIdentityUsers(base.TestCase):
def get_mock_url(self, service_type='identity', interface='public',
resource='users', append=None, base_url_append='v3',
qs_elements=None):
def get_mock_url(
self,
service_type='identity',
interface='public',
resource='users',
append=None,
base_url_append='v3',
qs_elements=None,
):
return super(TestIdentityUsers, self).get_mock_url(
service_type, interface, resource, append, base_url_append,
qs_elements)
service_type,
interface,
resource,
append,
base_url_append,
qs_elements,
)
def test_create_user(self):
domain_data = self._get_domain_data()
user_data = self._get_user_data("myusername", "mypassword",
domain_id=domain_data.domain_id)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(),
status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request))
])
user_data = self._get_user_data(
"myusername", "mypassword", domain_id=domain_data.domain_id
)
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request),
)
]
)
user = self.cloud.create_user(user_data.name,
password=user_data.password,
domain_id=domain_data.domain_id)
user = self.cloud.create_user(
user_data.name,
password=user_data.password,
domain_id=domain_data.domain_id,
)
self.assertIsNotNone(user)
self.assertThat(user.name, matchers.Equals(user_data.name))
@ -46,22 +63,29 @@ class TestIdentityUsers(base.TestCase):
def test_create_user_without_password(self):
domain_data = self._get_domain_data()
user_data = self._get_user_data("myusername",
domain_id=domain_data.domain_id)
user_data = self._get_user_data(
"myusername", domain_id=domain_data.domain_id
)
user_data._replace(
password=None,
json_request=user_data.json_request["user"].pop("password"))
json_request=user_data.json_request["user"].pop("password"),
)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(),
status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request))
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request),
)
]
)
user = self.cloud.create_user(user_data.name,
domain_id=domain_data.domain_id)
user = self.cloud.create_user(
user_data.name, domain_id=domain_data.domain_id
)
self.assertIsNotNone(user)
self.assertThat(user.name, matchers.Equals(user_data.name))

File diff suppressed because it is too large Load Diff

View File

@ -20,47 +20,59 @@ from openstack.tests.unit import base
class TestImageSnapshot(base.TestCase):
def setUp(self):
super(TestImageSnapshot, self).setUp()
self.server_id = str(uuid.uuid4())
self.image_id = str(uuid.uuid4())
self.server_name = self.getUniqueString('name')
self.fake_server = fakes.make_fake_server(
self.server_id, self.server_name)
self.server_id, self.server_name
)
def test_create_image_snapshot_wait_until_active_never_active(self):
snapshot_name = 'test-snapshot'
fake_image = fakes.make_fake_image(self.image_id, status='pending')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='{endpoint}/servers/{server_id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT,
server_id=self.server_id),
headers=dict(
Location='{endpoint}/images/{image_id}'.format(
endpoint='https://images.example.com',
image_id=self.image_id)),
validate=dict(
json={
"createImage": {
"name": snapshot_name,
"metadata": {},
}})),
self.get_glance_discovery_mock_dict(),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[fake_image])),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='{endpoint}/servers/{server_id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT,
server_id=self.server_id,
),
headers=dict(
Location='{endpoint}/images/{image_id}'.format(
endpoint='https://images.example.com',
image_id=self.image_id,
)
),
validate=dict(
json={
"createImage": {
"name": snapshot_name,
"metadata": {},
}
}
),
),
self.get_glance_discovery_mock_dict(),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[fake_image]),
),
]
)
self.assertRaises(
exc.OpenStackCloudTimeout,
self.cloud.create_image_snapshot,
snapshot_name, dict(id=self.server_id),
wait=True, timeout=0.01)
snapshot_name,
dict(id=self.server_id),
wait=True,
timeout=0.01,
)
# After the fifth call, we just keep polling get images for status.
# Due to mocking sleep, we have no clue how many times we'll call it.
@ -70,35 +82,46 @@ class TestImageSnapshot(base.TestCase):
snapshot_name = 'test-snapshot'
pending_image = fakes.make_fake_image(self.image_id, status='pending')
fake_image = fakes.make_fake_image(self.image_id)
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='{endpoint}/servers/{server_id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT,
server_id=self.server_id),
headers=dict(
Location='{endpoint}/images/{image_id}'.format(
endpoint='https://images.example.com',
image_id=self.image_id)),
validate=dict(
json={
"createImage": {
"name": snapshot_name,
"metadata": {},
}})),
self.get_glance_discovery_mock_dict(),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[pending_image])),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[fake_image])),
])
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='{endpoint}/servers/{server_id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT,
server_id=self.server_id,
),
headers=dict(
Location='{endpoint}/images/{image_id}'.format(
endpoint='https://images.example.com',
image_id=self.image_id,
)
),
validate=dict(
json={
"createImage": {
"name": snapshot_name,
"metadata": {},
}
}
),
),
self.get_glance_discovery_mock_dict(),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[pending_image]),
),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[fake_image]),
),
]
)
image = self.cloud.create_image_snapshot(
'test-snapshot', dict(id=self.server_id), wait=True, timeout=2)
'test-snapshot', dict(id=self.server_id), wait=True, timeout=2
)
self.assertEqual(image['id'], self.image_id)
self.assert_calls()

View File

@ -19,7 +19,6 @@ from openstack.tests.unit import base
class TestInventory(base.TestCase):
def setUp(self):
super(TestInventory, self).setUp()
@ -50,8 +49,7 @@ class TestInventory(base.TestCase):
self.assertIsInstance(inv.clouds, list)
self.assertEqual(1, len(inv.clouds))
self.assertFalse(mock_config.return_value.get_all.called)
mock_config.return_value.get_one.assert_called_once_with(
'supercloud')
mock_config.return_value.get_one.assert_called_once_with('supercloud')
@mock.patch("openstack.config.loader.OpenStackConfig")
@mock.patch("openstack.connection.Connection")
@ -68,8 +66,9 @@ class TestInventory(base.TestCase):
ret = inv.list_hosts()
inv.clouds[0].list_servers.assert_called_once_with(detailed=True,
all_projects=False)
inv.clouds[0].list_servers.assert_called_once_with(
detailed=True, all_projects=False
)
self.assertFalse(inv.clouds[0].get_openstack_vars.called)
self.assertEqual([server], ret)
@ -81,16 +80,17 @@ class TestInventory(base.TestCase):
inv = inventory.OpenStackInventory()
server = self.cloud._normalize_server(
fakes.make_fake_server(
'1234', 'test', 'ACTIVE', addresses={}))
fakes.make_fake_server('1234', 'test', 'ACTIVE', addresses={})
)
self.assertIsInstance(inv.clouds, list)
self.assertEqual(1, len(inv.clouds))
inv.clouds[0].list_servers.return_value = [server]
inv.list_hosts(expand=False)
inv.clouds[0].list_servers.assert_called_once_with(detailed=False,
all_projects=False)
inv.clouds[0].list_servers.assert_called_once_with(
detailed=False, all_projects=False
)
self.assertFalse(inv.clouds[0].get_openstack_vars.called)
@mock.patch("openstack.config.loader.OpenStackConfig")
@ -108,8 +108,9 @@ class TestInventory(base.TestCase):
ret = inv.list_hosts(all_projects=True)
inv.clouds[0].list_servers.assert_called_once_with(detailed=True,
all_projects=True)
inv.clouds[0].list_servers.assert_called_once_with(
detailed=True, all_projects=True
)
self.assertFalse(inv.clouds[0].get_openstack_vars.called)
self.assertEqual([server], ret)

View File

@ -19,29 +19,41 @@ from openstack.tests.unit import base
class TestKeypair(base.TestCase):
def setUp(self):
super(TestKeypair, self).setUp()
self.keyname = self.getUniqueString('key')
self.key = fakes.make_fake_keypair(self.keyname)
self.useFixture(fixtures.MonkeyPatch(
'openstack.utils.maximum_supported_microversion',
lambda *args, **kwargs: '2.10'))
self.useFixture(
fixtures.MonkeyPatch(
'openstack.utils.maximum_supported_microversion',
lambda *args, **kwargs: '2.10',
)
)
def test_create_keypair(self):
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']),
json={'keypair': self.key},
validate=dict(json={
'keypair': {
'name': self.key['name'],
'public_key': self.key['public_key']}})),
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']
),
json={'keypair': self.key},
validate=dict(
json={
'keypair': {
'name': self.key['name'],
'public_key': self.key['public_key'],
}
}
),
),
]
)
new_key = self.cloud.create_keypair(
self.keyname, self.key['public_key'])
self.keyname, self.key['public_key']
)
new_key_cmp = new_key.to_dict(ignore_none=True)
new_key_cmp.pop('location')
new_key_cmp.pop('id')
@ -50,97 +62,140 @@ class TestKeypair(base.TestCase):
self.assert_calls()
def test_create_keypair_exception(self):
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']),
status_code=400,
validate=dict(json={
'keypair': {
'name': self.key['name'],
'public_key': self.key['public_key']}})),
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']
),
status_code=400,
validate=dict(
json={
'keypair': {
'name': self.key['name'],
'public_key': self.key['public_key'],
}
}
),
),
]
)
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.create_keypair,
self.keyname, self.key['public_key'])
self.keyname,
self.key['public_key'],
)
self.assert_calls()
def test_delete_keypair(self):
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public',
append=['os-keypairs', self.keyname]),
status_code=202),
])
self.register_uris(
[
dict(
method='DELETE',
uri=self.get_mock_url(
'compute',
'public',
append=['os-keypairs', self.keyname],
),
status_code=202,
),
]
)
self.assertTrue(self.cloud.delete_keypair(self.keyname))
self.assert_calls()
def test_delete_keypair_not_found(self):
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public',
append=['os-keypairs', self.keyname]),
status_code=404),
])
self.register_uris(
[
dict(
method='DELETE',
uri=self.get_mock_url(
'compute',
'public',
append=['os-keypairs', self.keyname],
),
status_code=404,
),
]
)
self.assertFalse(self.cloud.delete_keypair(self.keyname))
self.assert_calls()
def test_list_keypairs(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']),
json={'keypairs': [{'keypair': self.key}]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']
),
json={'keypairs': [{'keypair': self.key}]},
),
]
)
keypairs = self.cloud.list_keypairs()
self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0].name, self.key['name'])
self.assert_calls()
def test_list_keypairs_empty_filters(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']),
json={'keypairs': [{'keypair': self.key}]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']
),
json={'keypairs': [{'keypair': self.key}]},
),
]
)
keypairs = self.cloud.list_keypairs(filters=None)
self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0].name, self.key['name'])
self.assert_calls()
def test_list_keypairs_notempty_filters(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs'],
qs_elements=['user_id=b']),
json={'keypairs': [{'keypair': self.key}]}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['os-keypairs'],
qs_elements=['user_id=b'],
),
json={'keypairs': [{'keypair': self.key}]},
),
]
)
keypairs = self.cloud.list_keypairs(
filters={'user_id': 'b', 'fake': 'dummy'})
filters={'user_id': 'b', 'fake': 'dummy'}
)
self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0].name, self.key['name'])
self.assert_calls()
def test_list_keypairs_exception(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']),
status_code=400),
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_keypairs)
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']
),
status_code=400,
),
]
)
self.assertRaises(
exc.OpenStackCloudException, self.cloud.list_keypairs
)
self.assert_calls()

View File

@ -14,81 +14,93 @@ from openstack.tests.unit import base
class TestLimits(base.TestCase):
def test_get_compute_limits(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['limits']),
json={
"limits": {
"absolute": {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0
},
"rate": []
}
}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['limits']
),
json={
"limits": {
"absolute": {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0,
},
"rate": [],
}
},
),
]
)
self.cloud.get_compute_limits()
self.assert_calls()
def test_other_get_compute_limits(self):
project = self.mock_for_keystone_projects(project_count=1,
list_get=True)[0]
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['limits'],
qs_elements=[
'tenant_id={id}'.format(id=project.project_id)
]),
json={
"limits": {
"absolute": {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0
},
"rate": []
}
}),
])
project = self.mock_for_keystone_projects(
project_count=1, list_get=True
)[0]
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['limits'],
qs_elements=[
'tenant_id={id}'.format(id=project.project_id)
],
),
json={
"limits": {
"absolute": {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0,
},
"rate": [],
}
},
),
]
)
self.cloud.get_compute_limits(project.project_id)

View File

@ -27,14 +27,19 @@ magnum_service_obj = dict(
class TestMagnumServices(base.TestCase):
def test_list_magnum_services(self):
self.register_uris([dict(
method='GET',
uri=self.get_mock_url(
service_type='container-infrastructure-management',
resource='mservices'),
json=dict(mservices=[magnum_service_obj]))])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
service_type='container-infrastructure-management',
resource='mservices',
),
json=dict(mservices=[magnum_service_obj]),
)
]
)
mservices_list = self.cloud.list_magnum_services()
self.assertEqual(
mservices_list[0].to_dict(computed=False),

File diff suppressed because it is too large Load Diff

View File

@ -57,7 +57,7 @@ class TestNetwork(base.TestCase):
"updated": "2015-01-01T10:00:00-00:00",
"description": "Availability zone support for router.",
"links": [],
"name": "Network Availability Zone"
"name": "Network Availability Zone",
}
enabled_neutron_extensions = [network_availability_zone_extension]
@ -65,66 +65,99 @@ class TestNetwork(base.TestCase):
def _compare_networks(self, exp, real):
self.assertDictEqual(
_network.Network(**exp).to_dict(computed=False),
real.to_dict(computed=False))
real.to_dict(computed=False),
)
def test_list_networks(self):
net1 = {'id': '1', 'name': 'net1'}
net2 = {'id': '2', 'name': 'net2'}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': [net1, net2]})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'networks': [net1, net2]},
)
]
)
nets = self.cloud.list_networks()
self.assertEqual(
[_network.Network(**i).to_dict(computed=False) for i in [
net1, net2]],
[i.to_dict(computed=False) for i in nets])
[
_network.Network(**i).to_dict(computed=False)
for i in [net1, net2]
],
[i.to_dict(computed=False) for i in nets],
)
self.assert_calls()
def test_list_networks_filtered(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks'],
qs_elements=["name=test"]),
json={'networks': []})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=["name=test"],
),
json={'networks': []},
)
]
)
self.cloud.list_networks(filters={'name': 'test'})
self.assert_calls()
def test_create_network(self):
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'network': self.mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname'}}))
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'network': self.mock_new_network_rep},
validate=dict(
json={
'network': {
'admin_state_up': True,
'name': 'netname',
}
}
),
)
]
)
network = self.cloud.create_network("netname")
self._compare_networks(
self.mock_new_network_rep, network)
self._compare_networks(self.mock_new_network_rep, network)
self.assert_calls()
def test_create_network_specific_tenant(self):
project_id = "project_id_value"
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['project_id'] = project_id
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'project_id': project_id}}))
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'network': mock_new_network_rep},
validate=dict(
json={
'network': {
'admin_state_up': True,
'name': 'netname',
'project_id': project_id,
}
}
),
)
]
)
network = self.cloud.create_network("netname", project_id=project_id)
self._compare_networks(mock_new_network_rep, network)
self.assert_calls()
@ -132,45 +165,57 @@ class TestNetwork(base.TestCase):
def test_create_network_external(self):
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['router:external'] = True
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'router:external': True}}))
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'network': mock_new_network_rep},
validate=dict(
json={
'network': {
'admin_state_up': True,
'name': 'netname',
'router:external': True,
}
}
),
)
]
)
network = self.cloud.create_network("netname", external=True)
self._compare_networks(mock_new_network_rep, network)
self.assert_calls()
def test_create_network_provider(self):
provider_opts = {'physical_network': 'mynet',
'network_type': 'vlan',
'segmentation_id': 'vlan1'}
provider_opts = {
'physical_network': 'mynet',
'network_type': 'vlan',
'segmentation_id': 'vlan1',
}
new_network_provider_opts = {
'provider:physical_network': 'mynet',
'provider:network_type': 'vlan',
'provider:segmentation_id': 'vlan1'
'provider:segmentation_id': 'vlan1',
}
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep.update(new_network_provider_opts)
expected_send_params = {
'admin_state_up': True,
'name': 'netname'
}
expected_send_params = {'admin_state_up': True, 'name': 'netname'}
expected_send_params.update(new_network_provider_opts)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': expected_send_params}))
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'network': mock_new_network_rep},
validate=dict(json={'network': expected_send_params}),
)
]
)
network = self.cloud.create_network("netname", provider=provider_opts)
self._compare_networks(mock_new_network_rep, network)
self.assert_calls()
@ -179,89 +224,122 @@ class TestNetwork(base.TestCase):
network_id = "test-net-id"
network_name = "network"
network = {'id': network_id, 'name': network_name}
provider_opts = {'physical_network': 'mynet',
'network_type': 'vlan',
'segmentation_id': 'vlan1',
'should_not_be_passed': 1}
provider_opts = {
'physical_network': 'mynet',
'network_type': 'vlan',
'segmentation_id': 'vlan1',
'should_not_be_passed': 1,
}
update_network_provider_opts = {
'provider:physical_network': 'mynet',
'provider:network_type': 'vlan',
'provider:segmentation_id': 'vlan1'
'provider:segmentation_id': 'vlan1',
}
mock_update_rep = copy.copy(self.mock_new_network_rep)
mock_update_rep.update(update_network_provider_opts)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', network_name]),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name]),
json={'networks': [network]}),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', network_id]),
json={'network': mock_update_rep},
validate=dict(
json={'network': update_network_provider_opts}))
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_name],
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name],
),
json={'networks': [network]},
),
dict(
method='PUT',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_id],
),
json={'network': mock_update_rep},
validate=dict(
json={'network': update_network_provider_opts}
),
),
]
)
network = self.cloud.update_network(
network_name,
provider=provider_opts
network_name, provider=provider_opts
)
self._compare_networks(mock_update_rep, network)
self.assert_calls()
def test_create_network_with_availability_zone_hints(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions']),
json={'extensions': self.enabled_neutron_extensions}),
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'network': self.mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'availability_zone_hints': ['nova']}}))
])
network = self.cloud.create_network("netname",
availability_zone_hints=['nova'])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions']
),
json={'extensions': self.enabled_neutron_extensions},
),
dict(
method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'network': self.mock_new_network_rep},
validate=dict(
json={
'network': {
'admin_state_up': True,
'name': 'netname',
'availability_zone_hints': ['nova'],
}
}
),
),
]
)
network = self.cloud.create_network(
"netname", availability_zone_hints=['nova']
)
self._compare_networks(self.mock_new_network_rep, network)
self.assert_calls()
def test_create_network_provider_ignored_value(self):
provider_opts = {'physical_network': 'mynet',
'network_type': 'vlan',
'segmentation_id': 'vlan1',
'should_not_be_passed': 1}
provider_opts = {
'physical_network': 'mynet',
'network_type': 'vlan',
'segmentation_id': 'vlan1',
'should_not_be_passed': 1,
}
new_network_provider_opts = {
'provider:physical_network': 'mynet',
'provider:network_type': 'vlan',
'provider:segmentation_id': 'vlan1'
'provider:segmentation_id': 'vlan1',
}
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep.update(new_network_provider_opts)
expected_send_params = {
'admin_state_up': True,
'name': 'netname'
}
expected_send_params = {'admin_state_up': True, 'name': 'netname'}
expected_send_params.update(new_network_provider_opts)
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': expected_send_params}))
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'network': mock_new_network_rep},
validate=dict(json={'network': expected_send_params}),
)
]
)
network = self.cloud.create_network("netname", provider=provider_opts)
self._compare_networks(mock_new_network_rep, network)
self.assert_calls()
@ -270,16 +348,17 @@ class TestNetwork(base.TestCase):
azh_opts = "invalid"
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException,
"Parameter 'availability_zone_hints' must be a list"
"Parameter 'availability_zone_hints' must be a list",
):
self.cloud.create_network("netname",
availability_zone_hints=azh_opts)
self.cloud.create_network(
"netname", availability_zone_hints=azh_opts
)
def test_create_network_provider_wrong_type(self):
provider_opts = "invalid"
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException,
"Parameter 'provider' must be a dict"
"Parameter 'provider' must be a dict",
):
self.cloud.create_network("netname", provider=provider_opts)
@ -287,20 +366,28 @@ class TestNetwork(base.TestCase):
port_security_state = False
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['port_security_enabled'] = port_security_state
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'port_security_enabled': port_security_state}}))
])
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'network': mock_new_network_rep},
validate=dict(
json={
'network': {
'admin_state_up': True,
'name': 'netname',
'port_security_enabled': port_security_state,
}
}
),
)
]
)
network = self.cloud.create_network(
"netname",
port_security_enabled=port_security_state
"netname", port_security_enabled=port_security_state
)
self._compare_networks(mock_new_network_rep, network)
self.assert_calls()
@ -309,34 +396,41 @@ class TestNetwork(base.TestCase):
mtu_size = 1500
mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['mtu'] = mtu_size
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'network': mock_new_network_rep},
validate=dict(
json={'network': {
'admin_state_up': True,
'name': 'netname',
'mtu': mtu_size}}))
])
network = self.cloud.create_network("netname",
mtu_size=mtu_size
)
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']
),
json={'network': mock_new_network_rep},
validate=dict(
json={
'network': {
'admin_state_up': True,
'name': 'netname',
'mtu': mtu_size,
}
}
),
)
]
)
network = self.cloud.create_network("netname", mtu_size=mtu_size)
self._compare_networks(mock_new_network_rep, network)
self.assert_calls()
def test_create_network_with_wrong_mtu_size(self):
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException,
"Parameter 'mtu_size' must be greater than 67."
openstack.cloud.OpenStackCloudException,
"Parameter 'mtu_size' must be greater than 67.",
):
self.cloud.create_network("netname", mtu_size=42)
def test_create_network_with_wrong_mtu_type(self):
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException,
"Parameter 'mtu_size' must be an integer."
openstack.cloud.OpenStackCloudException,
"Parameter 'mtu_size' must be an integer.",
):
self.cloud.create_network("netname", mtu_size="fourty_two")
@ -344,39 +438,65 @@ class TestNetwork(base.TestCase):
network_id = "test-net-id"
network_name = "network"
network = {'id': network_id, 'name': network_name}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', network_name]),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name]),
json={'networks': [network]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', network_id]),
json={})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_name],
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name],
),
json={'networks': [network]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_id],
),
json={},
),
]
)
self.assertTrue(self.cloud.delete_network(network_name))
self.assert_calls()
def test_delete_network_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', 'test-net']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks'],
qs_elements=['name=test-net']),
json={'networks': []}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', 'test-net'],
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=test-net'],
),
json={'networks': []},
),
]
)
self.assertFalse(self.cloud.delete_network('test-net'))
self.assert_calls()
@ -384,37 +504,61 @@ class TestNetwork(base.TestCase):
network_id = "test-net-id"
network_name = "network"
network = {'id': network_id, 'name': network_name}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', network_name]),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name]),
json={'networks': [network]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', network_id]),
status_code=503)
])
self.assertRaises(openstack.cloud.OpenStackCloudException,
self.cloud.delete_network, network_name)
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_name],
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name],
),
json={'networks': [network]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_id],
),
status_code=503,
),
]
)
self.assertRaises(
openstack.cloud.OpenStackCloudException,
self.cloud.delete_network,
network_name,
)
self.assert_calls()
def test_get_network_by_id(self):
network_id = "test-net-id"
network_name = "network"
network = {'id': network_id, 'name': network_name}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', network_id]),
json={'network': network})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_id],
),
json={'network': network},
)
]
)
self.assertTrue(self.cloud.get_network_by_id(network_id))
self.assert_calls()

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,6 @@ from openstack.tests.unit import base
class TestSearch(base.TestCase):
class FakeResource(resource.Resource):
allow_fetch = True
allow_list = True
@ -33,9 +32,7 @@ class TestSearch(base.TestCase):
self.session._sdk_connection = self.cloud
self.session._get = mock.Mock()
self.session._list = mock.Mock()
self.session._resource_registry = dict(
fake=self.FakeResource
)
self.session._resource_registry = dict(fake=self.FakeResource)
# Set the mock into the cloud connection
setattr(self.cloud, "mock_session", self.session)
@ -44,7 +41,7 @@ class TestSearch(base.TestCase):
exceptions.SDKException,
self.cloud.search_resources,
"wrong_service.wrong_resource",
"name"
"name",
)
def test_raises_unknown_resource(self):
@ -52,44 +49,33 @@ class TestSearch(base.TestCase):
exceptions.SDKException,
self.cloud.search_resources,
"mock_session.wrong_resource",
"name"
"name",
)
def test_search_resources_get_finds(self):
self.session._get.return_value = self.FakeResource(foo="bar")
ret = self.cloud.search_resources(
"mock_session.fake",
"fake_name"
)
self.session._get.assert_called_with(
self.FakeResource, "fake_name")
ret = self.cloud.search_resources("mock_session.fake", "fake_name")
self.session._get.assert_called_with(self.FakeResource, "fake_name")
self.assertEqual(1, len(ret))
self.assertEqual(
self.FakeResource(foo="bar").to_dict(),
ret[0].to_dict()
self.FakeResource(foo="bar").to_dict(), ret[0].to_dict()
)
def test_search_resources_list(self):
self.session._get.side_effect = exceptions.ResourceNotFound
self.session._list.return_value = [
self.FakeResource(foo="bar")
]
self.session._list.return_value = [self.FakeResource(foo="bar")]
ret = self.cloud.search_resources(
"mock_session.fake",
"fake_name"
)
self.session._get.assert_called_with(
self.FakeResource, "fake_name")
ret = self.cloud.search_resources("mock_session.fake", "fake_name")
self.session._get.assert_called_with(self.FakeResource, "fake_name")
self.session._list.assert_called_with(
self.FakeResource, name="fake_name")
self.FakeResource, name="fake_name"
)
self.assertEqual(1, len(ret))
self.assertEqual(
self.FakeResource(foo="bar").to_dict(),
ret[0].to_dict()
self.FakeResource(foo="bar").to_dict(), ret[0].to_dict()
)
def test_search_resources_args(self):
@ -103,33 +89,27 @@ class TestSearch(base.TestCase):
get_kwargs={"getkwarg1": "1"},
list_args=["listarg1"],
list_kwargs={"listkwarg1": "1"},
filter1="foo"
filter1="foo",
)
self.session._get.assert_called_with(
self.FakeResource, "fake_name",
"getarg1", getkwarg1="1")
self.FakeResource, "fake_name", "getarg1", getkwarg1="1"
)
self.session._list.assert_called_with(
self.FakeResource,
"listarg1", listkwarg1="1",
name="fake_name", filter1="foo"
"listarg1",
listkwarg1="1",
name="fake_name",
filter1="foo",
)
def test_search_resources_name_empty(self):
self.session._list.return_value = [
self.FakeResource(foo="bar")
]
self.session._list.return_value = [self.FakeResource(foo="bar")]
ret = self.cloud.search_resources(
"mock_session.fake",
None,
foo="bar"
)
ret = self.cloud.search_resources("mock_session.fake", None, foo="bar")
self.session._get.assert_not_called()
self.session._list.assert_called_with(
self.FakeResource, foo="bar")
self.session._list.assert_called_with(self.FakeResource, foo="bar")
self.assertEqual(1, len(ret))
self.assertEqual(
self.FakeResource(foo="bar").to_dict(),
ret[0].to_dict()
self.FakeResource(foo="bar").to_dict(), ret[0].to_dict()
)

View File

@ -22,7 +22,6 @@ from openstack.tests.unit import base
class TestOperatorCloud(base.TestCase):
def test_get_image_name(self):
self.use_glance()
@ -30,14 +29,20 @@ class TestOperatorCloud(base.TestCase):
fake_image = fakes.make_fake_image(image_id=image_id)
list_return = {'images': [fake_image]}
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=list_return),
dict(method='GET',
uri='https://image.example.com/v2/images',
json=list_return),
])
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=list_return,
),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=list_return,
),
]
)
self.assertEqual('fake_image', self.cloud.get_image_name(image_id))
self.assertEqual('fake_image', self.cloud.get_image_name('fake_image'))
@ -51,14 +56,20 @@ class TestOperatorCloud(base.TestCase):
fake_image = fakes.make_fake_image(image_id=image_id)
list_return = {'images': [fake_image]}
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=list_return),
dict(method='GET',
uri='https://image.example.com/v2/images',
json=list_return),
])
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=list_return,
),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=list_return,
),
]
)
self.assertEqual(image_id, self.cloud.get_image_id(image_id))
self.assertEqual(image_id, self.cloud.get_image_id('fake_image'))
@ -72,15 +83,17 @@ class TestOperatorCloud(base.TestCase):
def side_effect(*args, **kwargs):
raise FakeException("No service")
session_mock = mock.Mock()
session_mock.get_endpoint.side_effect = side_effect
get_session_mock.return_value = session_mock
self.cloud.name = 'testcloud'
self.cloud.config.config['region_name'] = 'testregion'
with testtools.ExpectedException(
exc.OpenStackCloudException,
"Error getting image endpoint on testcloud:testregion:"
" No service"):
exc.OpenStackCloudException,
"Error getting image endpoint on testcloud:testregion:"
" No service",
):
self.cloud.get_session_endpoint("image")
@mock.patch.object(cloud_region.CloudRegion, 'get_session')
@ -97,8 +110,11 @@ class TestOperatorCloud(base.TestCase):
get_session_mock.return_value = session_mock
self.cloud.get_session_endpoint('identity')
kwargs = dict(
interface='public', region_name='RegionOne',
service_name=None, service_type='identity')
interface='public',
region_name='RegionOne',
service_name=None,
service_type='identity',
)
session_mock.get_endpoint.assert_called_with(**kwargs)
@ -122,23 +138,23 @@ class TestOperatorCloud(base.TestCase):
uuid1 = uuid.uuid4().hex
uuid2 = uuid.uuid4().hex
self.use_compute_discovery()
self.register_uris([
dict(
method='GET',
uri='https://compute.example.com/v2.1/os-hypervisors/detail',
json={
'hypervisors': [
fakes.make_fake_hypervisor(uuid1, 'testserver1'),
fakes.make_fake_hypervisor(uuid2, 'testserver2'),
]
},
validate={
'headers': {
'OpenStack-API-Version': 'compute 2.53'
}
}
),
])
self.register_uris(
[
dict(
method='GET',
uri='https://compute.example.com/v2.1/os-hypervisors/detail', # noqa: E501
json={
'hypervisors': [
fakes.make_fake_hypervisor(uuid1, 'testserver1'),
fakes.make_fake_hypervisor(uuid2, 'testserver2'),
]
},
validate={
'headers': {'OpenStack-API-Version': 'compute 2.53'}
},
),
]
)
r = self.cloud.list_hypervisors()
@ -154,19 +170,22 @@ class TestOperatorCloud(base.TestCase):
'''This test verifies that calling list_hypervisors on a pre-2.53 cloud
calls the old version.'''
self.use_compute_discovery(
compute_version_json='old-compute-version.json')
self.register_uris([
dict(
method='GET',
uri='https://compute.example.com/v2.1/os-hypervisors/detail',
json={
'hypervisors': [
fakes.make_fake_hypervisor('1', 'testserver1'),
fakes.make_fake_hypervisor('2', 'testserver2'),
]
}
),
])
compute_version_json='old-compute-version.json'
)
self.register_uris(
[
dict(
method='GET',
uri='https://compute.example.com/v2.1/os-hypervisors/detail', # noqa: E501
json={
'hypervisors': [
fakes.make_fake_hypervisor('1', 'testserver1'),
fakes.make_fake_hypervisor('2', 'testserver2'),
]
},
),
]
)
r = self.cloud.list_hypervisors()

View File

@ -32,19 +32,34 @@ class TestOpenStackCloudOperatorNoAuth(base.TestCase):
# By clearing the URI registry, we remove all calls to a keystone
# catalog or getting a token
self._uri_registry.clear()
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
service_type='baremetal', base_url_append='v1'),
json={'id': 'v1',
'links': [{"href": "https://baremetal.example.com/v1",
"rel": "self"}]}),
dict(method='GET',
uri=self.get_mock_url(
service_type='baremetal', base_url_append='v1',
resource='nodes'),
json={'nodes': []}),
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
service_type='baremetal', base_url_append='v1'
),
json={
'id': 'v1',
'links': [
{
"href": "https://baremetal.example.com/v1",
"rel": "self",
}
],
},
),
dict(
method='GET',
uri=self.get_mock_url(
service_type='baremetal',
base_url_append='v1',
resource='nodes',
),
json={'nodes': []},
),
]
)
def test_ironic_noauth_none_auth_type(self):
"""Test noauth selection for Ironic in OpenStackCloud
@ -58,7 +73,8 @@ class TestOpenStackCloudOperatorNoAuth(base.TestCase):
# client library.
self.cloud_noauth = openstack.connect(
auth_type='none',
baremetal_endpoint_override="https://baremetal.example.com/v1")
baremetal_endpoint_override="https://baremetal.example.com/v1",
)
self.cloud_noauth.list_machines()
@ -92,8 +108,9 @@ class TestOpenStackCloudOperatorNoAuth(base.TestCase):
self.cloud_noauth = openstack.connect(
auth_type='admin_token',
auth=dict(
endpoint='https://baremetal.example.com/v1',
token='ignored'))
endpoint='https://baremetal.example.com/v1', token='ignored'
),
)
self.cloud_noauth.list_machines()
@ -116,65 +133,94 @@ class TestOpenStackCloudOperatorNoAuthUnversioned(base.TestCase):
# By clearing the URI registry, we remove all calls to a keystone
# catalog or getting a token
self._uri_registry.clear()
self.register_uris([
dict(method='GET',
uri='https://baremetal.example.com/',
json={
"default_version": {
"status": "CURRENT",
"min_version": "1.1",
"version": "1.46",
"id": "v1",
"links": [{
"href": "https://baremetal.example.com/v1",
"rel": "self"
}]},
"versions": [{
"status": "CURRENT",
"min_version": "1.1",
"version": "1.46",
"id": "v1",
"links": [{
"href": "https://baremetal.example.com/v1",
"rel": "self"
}]}],
"name": "OpenStack Ironic API",
"description": "Ironic is an OpenStack project."
}),
dict(method='GET',
uri=self.get_mock_url(
service_type='baremetal', base_url_append='v1'),
json={
"media_types": [{
"base": "application/json",
"type": "application/vnd.openstack.ironic.v1+json"
}],
"links": [{
"href": "https://baremetal.example.com/v1",
"rel": "self"
}],
"ports": [{
"href": "https://baremetal.example.com/v1/ports/",
"rel": "self"
}, {
"href": "https://baremetal.example.com/ports/",
"rel": "bookmark"
}],
"nodes": [{
"href": "https://baremetal.example.com/v1/nodes/",
"rel": "self"
}, {
"href": "https://baremetal.example.com/nodes/",
"rel": "bookmark"
}],
"id": "v1"
}),
dict(method='GET',
uri=self.get_mock_url(
service_type='baremetal', base_url_append='v1',
resource='nodes'),
json={'nodes': []}),
])
self.register_uris(
[
dict(
method='GET',
uri='https://baremetal.example.com/',
json={
"default_version": {
"status": "CURRENT",
"min_version": "1.1",
"version": "1.46",
"id": "v1",
"links": [
{
"href": "https://baremetal.example.com/v1",
"rel": "self",
}
],
},
"versions": [
{
"status": "CURRENT",
"min_version": "1.1",
"version": "1.46",
"id": "v1",
"links": [
{
"href": "https://baremetal.example.com/v1", # noqa: E501
"rel": "self",
}
],
}
],
"name": "OpenStack Ironic API",
"description": "Ironic is an OpenStack project.",
},
),
dict(
method='GET',
uri=self.get_mock_url(
service_type='baremetal', base_url_append='v1'
),
json={
"media_types": [
{
"base": "application/json",
"type": "application/vnd.openstack.ironic.v1+json", # noqa: E501
}
],
"links": [
{
"href": "https://baremetal.example.com/v1",
"rel": "self",
}
],
"ports": [
{
"href": "https://baremetal.example.com/v1/ports/", # noqa: E501
"rel": "self",
},
{
"href": "https://baremetal.example.com/ports/",
"rel": "bookmark",
},
],
"nodes": [
{
"href": "https://baremetal.example.com/v1/nodes/", # noqa: E501
"rel": "self",
},
{
"href": "https://baremetal.example.com/nodes/",
"rel": "bookmark",
},
],
"id": "v1",
},
),
dict(
method='GET',
uri=self.get_mock_url(
service_type='baremetal',
base_url_append='v1',
resource='nodes',
),
json={'nodes': []},
),
]
)
def test_ironic_noauth_none_auth_type(self):
"""Test noauth selection for Ironic in OpenStackCloud
@ -188,7 +234,8 @@ class TestOpenStackCloudOperatorNoAuthUnversioned(base.TestCase):
# client library.
self.cloud_noauth = openstack.connect(
auth_type='none',
baremetal_endpoint_override="https://baremetal.example.com")
baremetal_endpoint_override="https://baremetal.example.com",
)
self.cloud_noauth.list_machines()

View File

@ -42,14 +42,11 @@ class TestPort(base.TestCase):
'mac_address': '50:1c:0d:e4:f0:0d',
'binding:profile': {},
'fixed_ips': [
{
'subnet_id': 'test-subnet-id',
'ip_address': '29.29.29.29'
}
{'subnet_id': 'test-subnet-id', 'ip_address': '29.29.29.29'}
],
'id': 'test-port-id',
'security_groups': [],
'device_id': ''
'device_id': '',
}
}
@ -70,14 +67,11 @@ class TestPort(base.TestCase):
'mac_address': '50:1c:0d:e4:f0:0d',
'binding:profile': {},
'fixed_ips': [
{
'subnet_id': 'test-subnet-id',
'ip_address': '29.29.29.29'
}
{'subnet_id': 'test-subnet-id', 'ip_address': '29.29.29.29'}
],
'id': 'test-port-id',
'security_groups': [],
'device_id': ''
'device_id': '',
}
}
@ -94,7 +88,7 @@ class TestPort(base.TestCase):
'extra_dhcp_opts': [],
'binding:vif_details': {
'port_filter': True,
'ovs_hybrid_plug': True
'ovs_hybrid_plug': True,
},
'binding:vif_type': 'ovs',
'device_owner': 'network:router_gateway',
@ -104,12 +98,12 @@ class TestPort(base.TestCase):
'fixed_ips': [
{
'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062',
'ip_address': '172.24.4.2'
'ip_address': '172.24.4.2',
}
],
'id': 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b',
'security_groups': [],
'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824'
'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824',
},
{
'status': 'ACTIVE',
@ -122,7 +116,7 @@ class TestPort(base.TestCase):
'extra_dhcp_opts': [],
'binding:vif_details': {
'port_filter': True,
'ovs_hybrid_plug': True
'ovs_hybrid_plug': True,
},
'binding:vif_type': 'ovs',
'device_owner': 'network:router_interface',
@ -132,104 +126,155 @@ class TestPort(base.TestCase):
'fixed_ips': [
{
'subnet_id': '288bf4a1-51ba-43b6-9d0a-520e9005db17',
'ip_address': '10.0.0.1'
'ip_address': '10.0.0.1',
}
],
'id': 'f71a6703-d6de-4be1-a91a-a570ede1d159',
'security_groups': [],
'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824'
}
'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824',
},
]
}
def _compare_ports(self, exp, real):
self.assertDictEqual(
_port.Port(**exp).to_dict(computed=False),
real.to_dict(computed=False))
real.to_dict(computed=False),
)
def test_create_port(self):
self.register_uris([
dict(method="POST",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']),
json=self.mock_neutron_port_create_rep,
validate=dict(
json={'port': {
'network_id': 'test-net-id',
'name': 'test-port-name',
'admin_state_up': True}}))
])
self.register_uris(
[
dict(
method="POST",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_create_rep,
validate=dict(
json={
'port': {
'network_id': 'test-net-id',
'name': 'test-port-name',
'admin_state_up': True,
}
}
),
)
]
)
port = self.cloud.create_port(
network_id='test-net-id', name='test-port-name',
admin_state_up=True)
network_id='test-net-id',
name='test-port-name',
admin_state_up=True,
)
self._compare_ports(self.mock_neutron_port_create_rep['port'], port)
self.assert_calls()
def test_create_port_parameters(self):
"""Test that we detect invalid arguments passed to create_port"""
self.assertRaises(
TypeError, self.cloud.create_port,
network_id='test-net-id', nome='test-port-name',
stato_amministrativo_porta=True)
TypeError,
self.cloud.create_port,
network_id='test-net-id',
nome='test-port-name',
stato_amministrativo_porta=True,
)
def test_create_port_exception(self):
self.register_uris([
dict(method="POST",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']),
status_code=500,
validate=dict(
json={'port': {
'network_id': 'test-net-id',
'name': 'test-port-name',
'admin_state_up': True}}))
])
self.register_uris(
[
dict(
method="POST",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']
),
status_code=500,
validate=dict(
json={
'port': {
'network_id': 'test-net-id',
'name': 'test-port-name',
'admin_state_up': True,
}
}
),
)
]
)
self.assertRaises(
OpenStackCloudException, self.cloud.create_port,
network_id='test-net-id', name='test-port-name',
admin_state_up=True)
OpenStackCloudException,
self.cloud.create_port,
network_id='test-net-id',
name='test-port-name',
admin_state_up=True,
)
self.assert_calls()
def test_create_port_with_project(self):
self.mock_neutron_port_create_rep["port"].update(
{
'project_id': 'test-project-id',
})
self.register_uris([
dict(method="POST",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']),
json=self.mock_neutron_port_create_rep,
validate=dict(
json={'port': {
'network_id': 'test-net-id',
'project_id': 'test-project-id',
'name': 'test-port-name',
'admin_state_up': True}}))
])
}
)
self.register_uris(
[
dict(
method="POST",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_create_rep,
validate=dict(
json={
'port': {
'network_id': 'test-net-id',
'project_id': 'test-project-id',
'name': 'test-port-name',
'admin_state_up': True,
}
}
),
)
]
)
port = self.cloud.create_port(
network_id='test-net-id', name='test-port-name',
admin_state_up=True, project_id='test-project-id')
network_id='test-net-id',
name='test-port-name',
admin_state_up=True,
project_id='test-project-id',
)
self._compare_ports(self.mock_neutron_port_create_rep['port'], port)
self.assert_calls()
def test_update_port(self):
port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]),
json=dict(port=self.mock_neutron_port_list_rep['ports'][0])),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'ports', port_id]),
json=self.mock_neutron_port_update_rep,
validate=dict(
json={'port': {'name': 'test-port-name-updated'}}))
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]
),
json=dict(
port=self.mock_neutron_port_list_rep['ports'][0]
),
),
dict(
method='PUT',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]
),
json=self.mock_neutron_port_update_rep,
validate=dict(
json={'port': {'name': 'test-port-name-updated'}}
),
),
]
)
port = self.cloud.update_port(
name_or_id=port_id, name='test-port-name-updated')
name_or_id=port_id, name='test-port-name-updated'
)
self._compare_ports(self.mock_neutron_port_update_rep['port'], port)
self.assert_calls()
@ -237,72 +282,107 @@ class TestPort(base.TestCase):
def test_update_port_parameters(self):
"""Test that we detect invalid arguments passed to update_port"""
self.assertRaises(
TypeError, self.cloud.update_port,
name_or_id='test-port-id', nome='test-port-name-updated')
TypeError,
self.cloud.update_port,
name_or_id='test-port-id',
nome='test-port-name-updated',
)
def test_update_port_exception(self):
port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]),
json=self.mock_neutron_port_list_rep),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'ports', port_id]),
status_code=500,
validate=dict(
json={'port': {'name': 'test-port-name-updated'}}))
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]
),
json=self.mock_neutron_port_list_rep,
),
dict(
method='PUT',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]
),
status_code=500,
validate=dict(
json={'port': {'name': 'test-port-name-updated'}}
),
),
]
)
self.assertRaises(
OpenStackCloudException, self.cloud.update_port,
OpenStackCloudException,
self.cloud.update_port,
name_or_id='d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b',
name='test-port-name-updated')
name='test-port-name-updated',
)
self.assert_calls()
def test_list_ports(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']),
json=self.mock_neutron_port_list_rep)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.list_ports()
for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports):
self._compare_ports(a, b)
self.assert_calls()
def test_list_ports_filtered(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports'],
qs_elements=['status=DOWN']),
json=self.mock_neutron_port_list_rep)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['status=DOWN'],
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.list_ports(filters={'status': 'DOWN'})
for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports):
self._compare_ports(a, b)
self.assert_calls()
def test_list_ports_exception(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']),
status_code=500)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']
),
status_code=500,
)
]
)
self.assertRaises(OpenStackCloudException, self.cloud.list_ports)
def test_search_ports_by_id(self):
port_id = 'f71a6703-d6de-4be1-a91a-a570ede1d159'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']),
json=self.mock_neutron_port_list_rep)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.search_ports(name_or_id=port_id)
self.assertEqual(1, len(ports))
@ -311,12 +391,17 @@ class TestPort(base.TestCase):
def test_search_ports_by_name(self):
port_name = "first-port"
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']),
json=self.mock_neutron_port_list_rep)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.search_ports(name_or_id=port_name)
self.assertEqual(1, len(ports))
@ -324,51 +409,80 @@ class TestPort(base.TestCase):
self.assert_calls()
def test_search_ports_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']),
json=self.mock_neutron_port_list_rep)
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.search_ports(name_or_id='non-existent')
self.assertEqual(0, len(ports))
self.assert_calls()
def test_delete_port(self):
port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'ports', 'first-port']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports'],
qs_elements=['name=first-port']),
json=self.mock_neutron_port_list_rep),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'ports', port_id]),
json={})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports', 'first-port'],
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['name=first-port'],
),
json=self.mock_neutron_port_list_rep,
),
dict(
method='DELETE',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]
),
json={},
),
]
)
self.assertTrue(self.cloud.delete_port(name_or_id='first-port'))
def test_delete_port_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports',
'non-existent']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports'],
qs_elements=['name=non-existent']),
json={'ports': []})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports', 'non-existent'],
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['name=non-existent'],
),
json={'ports': []},
),
]
)
self.assertFalse(self.cloud.delete_port(name_or_id='non-existent'))
self.assert_calls()
@ -376,50 +490,78 @@ class TestPort(base.TestCase):
port_name = "port-name"
port1 = dict(id='123', name=port_name)
port2 = dict(id='456', name=port_name)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_name]),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports'],
qs_elements=['name=%s' % port_name]),
json={'ports': [port1, port2]})
])
self.assertRaises(OpenStackCloudException,
self.cloud.delete_port, port_name)
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports', port_name],
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['name=%s' % port_name],
),
json={'ports': [port1, port2]},
),
]
)
self.assertRaises(
OpenStackCloudException, self.cloud.delete_port, port_name
)
self.assert_calls()
def test_delete_subnet_multiple_using_id(self):
port_name = "port-name"
port1 = dict(id='123', name=port_name)
port2 = dict(id='456', name=port_name)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'ports', port1['id']]),
json={'ports': [port1, port2]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'ports', port1['id']]),
json={})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports', port1['id']],
),
json={'ports': [port1, port2]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports', port1['id']],
),
json={},
),
]
)
self.assertTrue(self.cloud.delete_port(name_or_id=port1['id']))
self.assert_calls()
def test_get_port_by_id(self):
fake_port = dict(id='123', name='456')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0',
'ports',
fake_port['id']]),
json={'port': fake_port})
])
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports', fake_port['id']],
),
json={'port': fake_port},
)
]
)
r = self.cloud.get_port_by_id(fake_port['id'])
self.assertIsNotNone(r)
self._compare_ports(fake_port, r)

Some files were not shown because too many files have changed in this diff Show More