Blackify openstack.cloud

Black used with the '-l 79 -S' flags.

A future change will ignore this commit in git-blame history by adding a
'git-blame-ignore-revs' file.

Change-Id: Ib58bb45ce8c29e5347ffc36d40d6f5d52b140c6b
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2023-05-05 11:22:56 +01:00
parent c2ff7336ce
commit 004c7352d0
128 changed files with 26600 additions and 16255 deletions

View File

@ -70,7 +70,7 @@ class AcceleratorCloudMixin:
""" """
device_profile = self.accelerator.get_device_profile( device_profile = self.accelerator.get_device_profile(
name_or_id, name_or_id,
filters filters,
) )
if device_profile is None: if device_profile is None:
self.log.debug( self.log.debug(
@ -104,7 +104,7 @@ class AcceleratorCloudMixin:
""" """
accelerator_request = self.accelerator.get_accelerator_request( accelerator_request = self.accelerator.get_accelerator_request(
name_or_id, name_or_id,
filters filters,
) )
if accelerator_request is None: if accelerator_request is None:
self.log.debug( self.log.debug(

View File

@ -39,7 +39,8 @@ def _normalize_port_list(nics):
except KeyError: except KeyError:
raise TypeError( raise TypeError(
"Either 'address' or 'mac' must be provided " "Either 'address' or 'mac' must be provided "
"for port %s" % row) "for port %s" % row
)
ports.append(dict(row, address=address)) ports.append(dict(row, address=address))
return ports return ports
@ -136,32 +137,34 @@ class BaremetalCloudMixin:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Refusing to inspect available machine %(node)s " "Refusing to inspect available machine %(node)s "
"which is associated with an instance " "which is associated with an instance "
"(instance_uuid %(inst)s)" % "(instance_uuid %(inst)s)"
{'node': node.id, 'inst': node.instance_id}) % {'node': node.id, 'inst': node.instance_id}
)
return_to_available = True return_to_available = True
# NOTE(TheJulia): Changing available machine to managedable state # NOTE(TheJulia): Changing available machine to managedable state
# and due to state transitions we need to until that transition has # and due to state transitions we need to until that transition has
# completed. # completed.
node = self.baremetal.set_node_provision_state(node, 'manage', node = self.baremetal.set_node_provision_state(
wait=True, node, 'manage', wait=True, timeout=timeout
timeout=timeout) )
if node.provision_state not in ('manageable', 'inspect failed'): if node.provision_state not in ('manageable', 'inspect failed'):
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Machine %(node)s must be in 'manageable', 'inspect failed' " "Machine %(node)s must be in 'manageable', 'inspect failed' "
"or 'available' provision state to start inspection, the " "or 'available' provision state to start inspection, the "
"current state is %(state)s" % "current state is %(state)s"
{'node': node.id, 'state': node.provision_state}) % {'node': node.id, 'state': node.provision_state}
)
node = self.baremetal.set_node_provision_state(node, 'inspect', node = self.baremetal.set_node_provision_state(
wait=True, node, 'inspect', wait=True, timeout=timeout
timeout=timeout) )
if return_to_available: if return_to_available:
node = self.baremetal.set_node_provision_state(node, 'provide', node = self.baremetal.set_node_provision_state(
wait=True, node, 'provide', wait=True, timeout=timeout
timeout=timeout) )
return node return node
@ -170,19 +173,27 @@ class BaremetalCloudMixin:
try: try:
yield yield
except Exception as exc: except Exception as exc:
self.log.debug("cleaning up node %s because of an error: %s", self.log.debug(
node.id, exc) "cleaning up node %s because of an error: %s", node.id, exc
)
tb = sys.exc_info()[2] tb = sys.exc_info()[2]
try: try:
self.baremetal.delete_node(node) self.baremetal.delete_node(node)
except Exception: except Exception:
self.log.debug("could not remove node %s", node.id, self.log.debug(
exc_info=True) "could not remove node %s", node.id, exc_info=True
)
raise exc.with_traceback(tb) raise exc.with_traceback(tb)
def register_machine(self, nics, wait=False, timeout=3600, def register_machine(
lock_timeout=600, provision_state='available', self,
**kwargs): nics,
wait=False,
timeout=3600,
lock_timeout=600,
provision_state='available',
**kwargs
):
"""Register Baremetal with Ironic """Register Baremetal with Ironic
Allows for the registration of Baremetal nodes with Ironic Allows for the registration of Baremetal nodes with Ironic
@ -233,9 +244,10 @@ class BaremetalCloudMixin:
:returns: Current state of the node. :returns: Current state of the node.
""" """
if provision_state not in ('enroll', 'manageable', 'available'): if provision_state not in ('enroll', 'manageable', 'available'):
raise ValueError('Initial provision state must be enroll, ' raise ValueError(
'manageable or available, got %s' 'Initial provision state must be enroll, '
% provision_state) 'manageable or available, got %s' % provision_state
)
# Available is tricky: it cannot be directly requested on newer API # Available is tricky: it cannot be directly requested on newer API
# versions, we need to go through cleaning. But we cannot go through # versions, we need to go through cleaning. But we cannot go through
@ -246,19 +258,24 @@ class BaremetalCloudMixin:
with self._delete_node_on_error(machine): with self._delete_node_on_error(machine):
# Making a node at least manageable # Making a node at least manageable
if (machine.provision_state == 'enroll' if (
and provision_state != 'enroll'): machine.provision_state == 'enroll'
and provision_state != 'enroll'
):
machine = self.baremetal.set_node_provision_state( machine = self.baremetal.set_node_provision_state(
machine, 'manage', wait=True, timeout=timeout) machine, 'manage', wait=True, timeout=timeout
)
machine = self.baremetal.wait_for_node_reservation( machine = self.baremetal.wait_for_node_reservation(
machine, timeout=lock_timeout) machine, timeout=lock_timeout
)
# Create NICs before trying to run cleaning # Create NICs before trying to run cleaning
created_nics = [] created_nics = []
try: try:
for port in _normalize_port_list(nics): for port in _normalize_port_list(nics):
nic = self.baremetal.create_port(node_id=machine.id, nic = self.baremetal.create_port(
**port) node_id=machine.id, **port
)
created_nics.append(nic.id) created_nics.append(nic.id)
except Exception: except Exception:
@ -269,10 +286,13 @@ class BaremetalCloudMixin:
pass pass
raise raise
if (machine.provision_state != 'available' if (
and provision_state == 'available'): machine.provision_state != 'available'
and provision_state == 'available'
):
machine = self.baremetal.set_node_provision_state( machine = self.baremetal.set_node_provision_state(
machine, 'provide', wait=wait, timeout=timeout) machine, 'provide', wait=wait, timeout=timeout
)
return machine return machine
@ -295,15 +315,18 @@ class BaremetalCloudMixin:
:raises: OpenStackCloudException on operation failure. :raises: OpenStackCloudException on operation failure.
""" """
if wait is not None: if wait is not None:
warnings.warn("wait argument is deprecated and has no effect", warnings.warn(
DeprecationWarning) "wait argument is deprecated and has no effect",
DeprecationWarning,
)
machine = self.get_machine(uuid) machine = self.get_machine(uuid)
invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed'] invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed']
if machine['provision_state'] in invalid_states: if machine['provision_state'] in invalid_states:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Error unregistering node '%s' due to current provision " "Error unregistering node '%s' due to current provision "
"state '%s'" % (uuid, machine['provision_state'])) "state '%s'" % (uuid, machine['provision_state'])
)
# NOTE(TheJulia) There is a high possibility of a lock being present # NOTE(TheJulia) There is a high possibility of a lock being present
# if the machine was just moved through the state machine. This was # if the machine was just moved through the state machine. This was
@ -314,7 +337,8 @@ class BaremetalCloudMixin:
except exc.OpenStackCloudException as e: except exc.OpenStackCloudException as e:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Error unregistering node '%s': Exception occured while" "Error unregistering node '%s': Exception occured while"
" waiting to be able to proceed: %s" % (machine['uuid'], e)) " waiting to be able to proceed: %s" % (machine['uuid'], e)
)
for nic in _normalize_port_list(nics): for nic in _normalize_port_list(nics):
try: try:
@ -381,32 +405,28 @@ class BaremetalCloudMixin:
machine = self.get_machine(name_or_id) machine = self.get_machine(name_or_id)
if not machine: if not machine:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Machine update failed to find Machine: %s. " % name_or_id) "Machine update failed to find Machine: %s. " % name_or_id
)
new_config = dict(machine._to_munch(), **attrs) new_config = dict(machine._to_munch(), **attrs)
try: try:
patch = jsonpatch.JsonPatch.from_diff( patch = jsonpatch.JsonPatch.from_diff(
machine._to_munch(), machine._to_munch(), new_config
new_config) )
except Exception as e: except Exception as e:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Machine update failed - Error generating JSON patch object " "Machine update failed - Error generating JSON patch object "
"for submission to the API. Machine: %s Error: %s" "for submission to the API. Machine: %s Error: %s"
% (name_or_id, e)) % (name_or_id, e)
)
if not patch: if not patch:
return dict( return dict(node=machine, changes=None)
node=machine,
changes=None
)
change_list = [change['path'] for change in patch] change_list = [change['path'] for change in patch]
node = self.baremetal.update_node(machine, **attrs) node = self.baremetal.update_node(machine, **attrs)
return dict( return dict(node=node, changes=change_list)
node=node,
changes=change_list
)
def attach_port_to_machine(self, name_or_id, port_name_or_id): def attach_port_to_machine(self, name_or_id, port_name_or_id):
"""Attach a virtual port to the bare metal machine. """Attach a virtual port to the bare metal machine.
@ -459,16 +479,16 @@ class BaremetalCloudMixin:
self.baremetal.validate_node(name_or_id, required=ifaces) self.baremetal.validate_node(name_or_id, required=ifaces)
def validate_node(self, uuid): def validate_node(self, uuid):
warnings.warn('validate_node is deprecated, please use ' warnings.warn(
'validate_machine instead', DeprecationWarning) 'validate_node is deprecated, please use '
'validate_machine instead',
DeprecationWarning,
)
self.baremetal.validate_node(uuid) self.baremetal.validate_node(uuid)
def node_set_provision_state(self, def node_set_provision_state(
name_or_id, self, name_or_id, state, configdrive=None, wait=False, timeout=3600
state, ):
configdrive=None,
wait=False,
timeout=3600):
"""Set Node Provision State """Set Node Provision State
Enables a user to provision a Machine and optionally define a Enables a user to provision a Machine and optionally define a
@ -495,15 +515,17 @@ class BaremetalCloudMixin:
:rtype: :class:`~openstack.baremetal.v1.node.Node`. :rtype: :class:`~openstack.baremetal.v1.node.Node`.
""" """
node = self.baremetal.set_node_provision_state( node = self.baremetal.set_node_provision_state(
name_or_id, target=state, config_drive=configdrive, name_or_id,
wait=wait, timeout=timeout) target=state,
config_drive=configdrive,
wait=wait,
timeout=timeout,
)
return node return node
def set_machine_maintenance_state( def set_machine_maintenance_state(
self, self, name_or_id, state=True, reason=None
name_or_id, ):
state=True,
reason=None):
"""Set Baremetal Machine Maintenance State """Set Baremetal Machine Maintenance State
Sets Baremetal maintenance state and maintenance reason. Sets Baremetal maintenance state and maintenance reason.
@ -587,28 +609,33 @@ class BaremetalCloudMixin:
""" """
self.baremetal.set_node_power_state(name_or_id, 'rebooting') self.baremetal.set_node_power_state(name_or_id, 'rebooting')
def activate_node(self, uuid, configdrive=None, def activate_node(self, uuid, configdrive=None, wait=False, timeout=1200):
wait=False, timeout=1200):
self.node_set_provision_state( self.node_set_provision_state(
uuid, 'active', configdrive, wait=wait, timeout=timeout) uuid, 'active', configdrive, wait=wait, timeout=timeout
)
def deactivate_node(self, uuid, wait=False, def deactivate_node(self, uuid, wait=False, timeout=1200):
timeout=1200):
self.node_set_provision_state( self.node_set_provision_state(
uuid, 'deleted', wait=wait, timeout=timeout) uuid, 'deleted', wait=wait, timeout=timeout
)
def set_node_instance_info(self, uuid, patch): def set_node_instance_info(self, uuid, patch):
warnings.warn("The set_node_instance_info call is deprecated, " warnings.warn(
"use patch_machine or update_machine instead", "The set_node_instance_info call is deprecated, "
DeprecationWarning) "use patch_machine or update_machine instead",
DeprecationWarning,
)
return self.patch_machine(uuid, patch) return self.patch_machine(uuid, patch)
def purge_node_instance_info(self, uuid): def purge_node_instance_info(self, uuid):
warnings.warn("The purge_node_instance_info call is deprecated, " warnings.warn(
"use patch_machine or update_machine instead", "The purge_node_instance_info call is deprecated, "
DeprecationWarning) "use patch_machine or update_machine instead",
return self.patch_machine(uuid, DeprecationWarning,
dict(path='/instance_info', op='remove')) )
return self.patch_machine(
uuid, dict(path='/instance_info', op='remove')
)
def wait_for_baremetal_node_lock(self, node, timeout=30): def wait_for_baremetal_node_lock(self, node, timeout=30):
"""Wait for a baremetal node to have no lock. """Wait for a baremetal node to have no lock.
@ -618,7 +645,10 @@ class BaremetalCloudMixin:
:raises: OpenStackCloudException upon client failure. :raises: OpenStackCloudException upon client failure.
:returns: None :returns: None
""" """
warnings.warn("The wait_for_baremetal_node_lock call is deprecated " warnings.warn(
"in favor of wait_for_node_reservation on the baremetal " "The wait_for_baremetal_node_lock call is deprecated "
"proxy", DeprecationWarning) "in favor of wait_for_node_reservation on the baremetal "
"proxy",
DeprecationWarning,
)
self.baremetal.wait_for_node_reservation(node, timeout) self.baremetal.wait_for_node_reservation(node, timeout)

View File

@ -127,8 +127,7 @@ class BlockStorageCloudMixin:
:returns: A volume ``Type`` object if found, else None. :returns: A volume ``Type`` object if found, else None.
""" """
return _utils._get_entity( return _utils._get_entity(self, 'volume_type', name_or_id, filters)
self, 'volume_type', name_or_id, filters)
def create_volume( def create_volume(
self, self,
@ -162,7 +161,9 @@ class BlockStorageCloudMixin:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Image {image} was requested as the basis for a new" "Image {image} was requested as the basis for a new"
" volume, but was not found on the cloud".format( " volume, but was not found on the cloud".format(
image=image)) image=image
)
)
kwargs['imageRef'] = image_obj['id'] kwargs['imageRef'] = image_obj['id']
kwargs = self._get_volume_kwargs(kwargs) kwargs = self._get_volume_kwargs(kwargs)
kwargs['size'] = size kwargs['size'] = size
@ -193,10 +194,10 @@ class BlockStorageCloudMixin:
volume = self.get_volume(name_or_id) volume = self.get_volume(name_or_id)
if not volume: if not volume:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Volume %s not found." % name_or_id) "Volume %s not found." % name_or_id
)
volume = self.block_storage.update_volume( volume = self.block_storage.update_volume(volume, **kwargs)
volume, **kwargs)
self.list_volumes.invalidate(self) self.list_volumes.invalidate(self)
@ -219,7 +220,9 @@ class BlockStorageCloudMixin:
if not volume: if not volume:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Volume {name_or_id} does not exist".format( "Volume {name_or_id} does not exist".format(
name_or_id=name_or_id)) name_or_id=name_or_id
)
)
self.block_storage.set_volume_bootable_status(volume, bootable) self.block_storage.set_volume_bootable_status(volume, bootable)
@ -249,7 +252,8 @@ class BlockStorageCloudMixin:
self.log.debug( self.log.debug(
"Volume %(name_or_id)s does not exist", "Volume %(name_or_id)s does not exist",
{'name_or_id': name_or_id}, {'name_or_id': name_or_id},
exc_info=True) exc_info=True,
)
return False return False
try: try:
self.block_storage.delete_volume(volume, force=force) self.block_storage.delete_volume(volume, force=force)
@ -297,10 +301,12 @@ class BlockStorageCloudMixin:
project_id = proj.id project_id = proj.id
params['tenant_id'] = project_id params['tenant_id'] = project_id
error_msg = "{msg} for the project: {project} ".format( error_msg = "{msg} for the project: {project} ".format(
msg=error_msg, project=name_or_id) msg=error_msg, project=name_or_id
)
data = proxy._json_response( data = proxy._json_response(
self.block_storage.get('/limits', params=params)) self.block_storage.get('/limits', params=params)
)
limits = self._get_and_munchify('limits', data) limits = self._get_and_munchify('limits', data)
return limits return limits
@ -413,22 +419,23 @@ class BlockStorageCloudMixin:
# If we got volume as dict we need to re-fetch it to be able to # If we got volume as dict we need to re-fetch it to be able to
# use wait_for_status. # use wait_for_status.
volume = self.block_storage.get_volume(volume['id']) volume = self.block_storage.get_volume(volume['id'])
self.block_storage.wait_for_status( self.block_storage.wait_for_status(volume, 'in-use', wait=timeout)
volume, 'in-use', wait=timeout)
return attachment return attachment
def _get_volume_kwargs(self, kwargs): def _get_volume_kwargs(self, kwargs):
name = kwargs.pop('name', kwargs.pop('display_name', None)) name = kwargs.pop('name', kwargs.pop('display_name', None))
description = kwargs.pop('description', description = kwargs.pop(
kwargs.pop('display_description', None)) 'description', kwargs.pop('display_description', None)
)
if name: if name:
kwargs['name'] = name kwargs['name'] = name
if description: if description:
kwargs['description'] = description kwargs['description'] = description
return kwargs return kwargs
@_utils.valid_kwargs('name', 'display_name', @_utils.valid_kwargs(
'description', 'display_description') 'name', 'display_name', 'description', 'display_description'
)
def create_volume_snapshot( def create_volume_snapshot(
self, self,
volume_id, volume_id,
@ -459,7 +466,8 @@ class BlockStorageCloudMixin:
snapshot = self.block_storage.create_snapshot(**payload) snapshot = self.block_storage.create_snapshot(**payload)
if wait: if wait:
snapshot = self.block_storage.wait_for_status( snapshot = self.block_storage.wait_for_status(
snapshot, wait=timeout) snapshot, wait=timeout
)
return snapshot return snapshot
@ -499,8 +507,7 @@ class BlockStorageCloudMixin:
:returns: A volume ``Snapshot`` object if found, else None. :returns: A volume ``Snapshot`` object if found, else None.
""" """
return _utils._get_entity(self, 'volume_snapshot', name_or_id, return _utils._get_entity(self, 'volume_snapshot', name_or_id, filters)
filters)
def create_volume_backup( def create_volume_backup(
self, self,
@ -572,8 +579,7 @@ class BlockStorageCloudMixin:
:returns: A volume ``Backup`` object if found, else None. :returns: A volume ``Backup`` object if found, else None.
""" """
return _utils._get_entity(self, 'volume_backup', name_or_id, return _utils._get_entity(self, 'volume_backup', name_or_id, filters)
filters)
def list_volume_snapshots(self, detailed=True, filters=None): def list_volume_snapshots(self, detailed=True, filters=None):
"""List all volume snapshots. """List all volume snapshots.
@ -615,8 +621,9 @@ class BlockStorageCloudMixin:
return list(self.block_storage.backups(details=detailed, **filters)) return list(self.block_storage.backups(details=detailed, **filters))
def delete_volume_backup(self, name_or_id=None, force=False, wait=False, def delete_volume_backup(
timeout=None): self, name_or_id=None, force=False, wait=False, timeout=None
):
"""Delete a volume backup. """Delete a volume backup.
:param name_or_id: Name or unique ID of the volume backup. :param name_or_id: Name or unique ID of the volume backup.
@ -635,7 +642,8 @@ class BlockStorageCloudMixin:
return False return False
self.block_storage.delete_backup( self.block_storage.delete_backup(
volume_backup, ignore_missing=False, force=force) volume_backup, ignore_missing=False, force=force
)
if wait: if wait:
self.block_storage.wait_for_delete(volume_backup, wait=timeout) self.block_storage.wait_for_delete(volume_backup, wait=timeout)
@ -663,7 +671,8 @@ class BlockStorageCloudMixin:
return False return False
self.block_storage.delete_snapshot( self.block_storage.delete_snapshot(
volumesnapshot, ignore_missing=False) volumesnapshot, ignore_missing=False
)
if wait: if wait:
self.block_storage.wait_for_delete(volumesnapshot, wait=timeout) self.block_storage.wait_for_delete(volumesnapshot, wait=timeout)
@ -695,8 +704,7 @@ class BlockStorageCloudMixin:
:returns: A list of volume ``Volume`` objects, if any are found. :returns: A list of volume ``Volume`` objects, if any are found.
""" """
volumes = self.list_volumes() volumes = self.list_volumes()
return _utils._filter_list( return _utils._filter_list(volumes, name_or_id, filters)
volumes, name_or_id, filters)
def search_volume_snapshots(self, name_or_id=None, filters=None): def search_volume_snapshots(self, name_or_id=None, filters=None):
"""Search for one or more volume snapshots. """Search for one or more volume snapshots.
@ -723,8 +731,7 @@ class BlockStorageCloudMixin:
:returns: A list of volume ``Snapshot`` objects, if any are found. :returns: A list of volume ``Snapshot`` objects, if any are found.
""" """
volumesnapshots = self.list_volume_snapshots() volumesnapshots = self.list_volume_snapshots()
return _utils._filter_list( return _utils._filter_list(volumesnapshots, name_or_id, filters)
volumesnapshots, name_or_id, filters)
def search_volume_backups(self, name_or_id=None, filters=None): def search_volume_backups(self, name_or_id=None, filters=None):
"""Search for one or more volume backups. """Search for one or more volume backups.
@ -751,8 +758,7 @@ class BlockStorageCloudMixin:
:returns: A list of volume ``Backup`` objects, if any are found. :returns: A list of volume ``Backup`` objects, if any are found.
""" """
volume_backups = self.list_volume_backups() volume_backups = self.list_volume_backups()
return _utils._filter_list( return _utils._filter_list(volume_backups, name_or_id, filters)
volume_backups, name_or_id, filters)
# TODO(stephenfin): Remove 'get_extra' in a future major version # TODO(stephenfin): Remove 'get_extra' in a future major version
def search_volume_types( def search_volume_types(
@ -797,7 +803,8 @@ class BlockStorageCloudMixin:
volume_type = self.get_volume_type(name_or_id) volume_type = self.get_volume_type(name_or_id)
if not volume_type: if not volume_type:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"VolumeType not found: %s" % name_or_id) "VolumeType not found: %s" % name_or_id
)
return self.block_storage.get_type_access(volume_type) return self.block_storage.get_type_access(volume_type)
@ -814,7 +821,8 @@ class BlockStorageCloudMixin:
volume_type = self.get_volume_type(name_or_id) volume_type = self.get_volume_type(name_or_id)
if not volume_type: if not volume_type:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"VolumeType not found: %s" % name_or_id) "VolumeType not found: %s" % name_or_id
)
self.block_storage.add_type_access(volume_type, project_id) self.block_storage.add_type_access(volume_type, project_id)
@ -829,7 +837,8 @@ class BlockStorageCloudMixin:
volume_type = self.get_volume_type(name_or_id) volume_type = self.get_volume_type(name_or_id)
if not volume_type: if not volume_type:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"VolumeType not found: %s" % name_or_id) "VolumeType not found: %s" % name_or_id
)
self.block_storage.remove_type_access(volume_type, project_id) self.block_storage.remove_type_access(volume_type, project_id)
def set_volume_quotas(self, name_or_id, **kwargs): def set_volume_quotas(self, name_or_id, **kwargs):
@ -842,12 +851,11 @@ class BlockStorageCloudMixin:
quota does not exist. quota does not exist.
""" """
proj = self.identity.find_project( proj = self.identity.find_project(name_or_id, ignore_missing=False)
name_or_id, ignore_missing=False)
self.block_storage.update_quota_set( self.block_storage.update_quota_set(
_qs.QuotaSet(project_id=proj.id), _qs.QuotaSet(project_id=proj.id), **kwargs
**kwargs) )
def get_volume_quotas(self, name_or_id): def get_volume_quotas(self, name_or_id):
"""Get volume quotas for a project """Get volume quotas for a project

View File

@ -23,10 +23,12 @@ class ClusteringCloudMixin:
def _clustering_client(self): def _clustering_client(self):
if 'clustering' not in self._raw_clients: if 'clustering' not in self._raw_clients:
clustering_client = self._get_versioned_client( clustering_client = self._get_versioned_client(
'clustering', min_version=1, max_version='1.latest') 'clustering', min_version=1, max_version='1.latest'
)
self._raw_clients['clustering'] = clustering_client self._raw_clients['clustering'] = clustering_client
return self._raw_clients['clustering'] return self._raw_clients['clustering']
# NOTE(gtema): work on getting rid of direct API calls showed that this # NOTE(gtema): work on getting rid of direct API calls showed that this
# implementation never worked properly and tests in reality verifying wrong # implementation never worked properly and tests in reality verifying wrong
# things. Unless someone is really interested in this piece of code this will # things. Unless someone is really interested in this piece of code this will

View File

@ -19,7 +19,6 @@ from openstack.cloud import exc
class CoeCloudMixin: class CoeCloudMixin:
@_utils.cache_on_arguments() @_utils.cache_on_arguments()
def list_coe_clusters(self): def list_coe_clusters(self):
"""List COE (Container Orchestration Engine) cluster. """List COE (Container Orchestration Engine) cluster.
@ -72,7 +71,10 @@ class CoeCloudMixin:
return _utils._get_entity(self, 'coe_cluster', name_or_id, filters) return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)
def create_coe_cluster( def create_coe_cluster(
self, name, cluster_template_id, **kwargs, self,
name,
cluster_template_id,
**kwargs,
): ):
"""Create a COE cluster based on given cluster template. """Create a COE cluster based on given cluster template.
@ -133,11 +135,11 @@ class CoeCloudMixin:
cluster = self.get_coe_cluster(name_or_id) cluster = self.get_coe_cluster(name_or_id)
if not cluster: if not cluster:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"COE cluster %s not found." % name_or_id) "COE cluster %s not found." % name_or_id
)
cluster = self.container_infrastructure_management.update_cluster( cluster = self.container_infrastructure_management.update_cluster(
cluster, cluster, **kwargs
**kwargs
) )
return cluster return cluster
@ -149,8 +151,11 @@ class CoeCloudMixin:
:returns: Details about the CA certificate for the given cluster. :returns: Details about the CA certificate for the given cluster.
""" """
return self.container_infrastructure_management\ return (
.get_cluster_certificate(cluster_id) self.container_infrastructure_management.get_cluster_certificate(
cluster_id
)
)
def sign_coe_cluster_certificate(self, cluster_id, csr): def sign_coe_cluster_certificate(self, cluster_id, csr):
"""Sign client key and generate the CA certificate for a cluster """Sign client key and generate the CA certificate for a cluster
@ -164,10 +169,9 @@ class CoeCloudMixin:
:raises: OpenStackCloudException on operation error. :raises: OpenStackCloudException on operation error.
""" """
return self.container_infrastructure_management\ return self.container_infrastructure_management.create_cluster_certificate( # noqa: E501
.create_cluster_certificate( cluster_uuid=cluster_id, csr=csr
cluster_uuid=cluster_id, )
csr=csr)
@_utils.cache_on_arguments() @_utils.cache_on_arguments()
def list_cluster_templates(self, detail=False): def list_cluster_templates(self, detail=False):
@ -182,10 +186,12 @@ class CoeCloudMixin:
the OpenStack API call. the OpenStack API call.
""" """
return list( return list(
self.container_infrastructure_management.cluster_templates()) self.container_infrastructure_management.cluster_templates()
)
def search_cluster_templates( def search_cluster_templates(
self, name_or_id=None, filters=None, detail=False): self, name_or_id=None, filters=None, detail=False
):
"""Search cluster templates. """Search cluster templates.
:param name_or_id: cluster template name or ID. :param name_or_id: cluster template name or ID.
@ -199,8 +205,7 @@ class CoeCloudMixin:
the OpenStack API call. the OpenStack API call.
""" """
cluster_templates = self.list_cluster_templates(detail=detail) cluster_templates = self.list_cluster_templates(detail=detail)
return _utils._filter_list( return _utils._filter_list(cluster_templates, name_or_id, filters)
cluster_templates, name_or_id, filters)
def get_cluster_template(self, name_or_id, filters=None, detail=False): def get_cluster_template(self, name_or_id, filters=None, detail=False):
"""Get a cluster template by name or ID. """Get a cluster template by name or ID.
@ -225,11 +230,16 @@ class CoeCloudMixin:
cluster template is found. cluster template is found.
""" """
return _utils._get_entity( return _utils._get_entity(
self, 'cluster_template', name_or_id, self,
filters=filters, detail=detail) 'cluster_template',
name_or_id,
filters=filters,
detail=detail,
)
def create_cluster_template( def create_cluster_template(
self, name, image_id=None, keypair_id=None, coe=None, **kwargs): self, name, image_id=None, keypair_id=None, coe=None, **kwargs
):
"""Create a cluster template. """Create a cluster template.
:param string name: Name of the cluster template. :param string name: Name of the cluster template.
@ -243,14 +253,15 @@ class CoeCloudMixin:
:raises: ``OpenStackCloudException`` if something goes wrong during :raises: ``OpenStackCloudException`` if something goes wrong during
the OpenStack API call the OpenStack API call
""" """
cluster_template = self.container_infrastructure_management \ cluster_template = (
.create_cluster_template( self.container_infrastructure_management.create_cluster_template(
name=name, name=name,
image_id=image_id, image_id=image_id,
keypair_id=keypair_id, keypair_id=keypair_id,
coe=coe, coe=coe,
**kwargs, **kwargs,
) )
)
return cluster_template return cluster_template
@ -270,11 +281,13 @@ class CoeCloudMixin:
self.log.debug( self.log.debug(
"Cluster template %(name_or_id)s does not exist", "Cluster template %(name_or_id)s does not exist",
{'name_or_id': name_or_id}, {'name_or_id': name_or_id},
exc_info=True) exc_info=True,
)
return False return False
self.container_infrastructure_management.delete_cluster_template( self.container_infrastructure_management.delete_cluster_template(
cluster_template) cluster_template
)
return True return True
def update_cluster_template(self, name_or_id, **kwargs): def update_cluster_template(self, name_or_id, **kwargs):
@ -289,14 +302,15 @@ class CoeCloudMixin:
cluster_template = self.get_cluster_template(name_or_id) cluster_template = self.get_cluster_template(name_or_id)
if not cluster_template: if not cluster_template:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Cluster template %s not found." % name_or_id) "Cluster template %s not found." % name_or_id
cluster_template = self.container_infrastructure_management \
.update_cluster_template(
cluster_template,
**kwargs
) )
cluster_template = (
self.container_infrastructure_management.update_cluster_template(
cluster_template, **kwargs
)
)
return cluster_template return cluster_template
def list_magnum_services(self): def list_magnum_services(self):

View File

@ -114,12 +114,15 @@ class ComputeCloudMixin:
""" """
flavors = self.list_flavors(get_extra=get_extra) flavors = self.list_flavors(get_extra=get_extra)
for flavor in sorted(flavors, key=operator.itemgetter('ram')): for flavor in sorted(flavors, key=operator.itemgetter('ram')):
if (flavor['ram'] >= ram if flavor['ram'] >= ram and (
and (not include or include in flavor['name'])): not include or include in flavor['name']
):
return flavor return flavor
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Could not find a flavor with {ram} and '{include}'".format( "Could not find a flavor with {ram} and '{include}'".format(
ram=ram, include=include)) ram=ram, include=include
)
)
@_utils.cache_on_arguments() @_utils.cache_on_arguments()
def _nova_extensions(self): def _nova_extensions(self):
@ -155,8 +158,12 @@ class ComputeCloudMixin:
return _utils._filter_list(flavors, name_or_id, filters) return _utils._filter_list(flavors, name_or_id, filters)
def search_servers( def search_servers(
self, name_or_id=None, filters=None, detailed=False, self,
all_projects=False, bare=False, name_or_id=None,
filters=None,
detailed=False,
all_projects=False,
bare=False,
): ):
"""Search servers. """Search servers.
@ -169,7 +176,8 @@ class ComputeCloudMixin:
criteria. criteria.
""" """
servers = self.list_servers( servers = self.list_servers(
detailed=detailed, all_projects=all_projects, bare=bare) detailed=detailed, all_projects=all_projects, bare=bare
)
return _utils._filter_list(servers, name_or_id, filters) return _utils._filter_list(servers, name_or_id, filters)
def search_server_groups(self, name_or_id=None, filters=None): def search_server_groups(self, name_or_id=None, filters=None):
@ -213,8 +221,8 @@ class ComputeCloudMixin:
return ret return ret
except exceptions.SDKException: except exceptions.SDKException:
self.log.debug( self.log.debug(
"Availability zone list could not be fetched", "Availability zone list could not be fetched", exc_info=True
exc_info=True) )
return [] return []
@_utils.cache_on_arguments() @_utils.cache_on_arguments()
@ -226,8 +234,9 @@ class ComputeCloudMixin:
clouds.yaml by setting openstack.cloud.get_extra_specs to False. clouds.yaml by setting openstack.cloud.get_extra_specs to False.
:returns: A list of compute ``Flavor`` objects. :returns: A list of compute ``Flavor`` objects.
""" """
return list(self.compute.flavors( return list(
details=True, get_extra_specs=get_extra)) self.compute.flavors(details=True, get_extra_specs=get_extra)
)
def list_server_security_groups(self, server): def list_server_security_groups(self, server):
"""List all security groups associated with the given server. """List all security groups associated with the given server.
@ -268,8 +277,9 @@ class ComputeCloudMixin:
sg = self.get_security_group(sg) sg = self.get_security_group(sg)
if sg is None: if sg is None:
self.log.debug('Security group %s not found for adding', self.log.debug(
sg) 'Security group %s not found for adding', sg
)
return None, None return None, None
@ -288,7 +298,8 @@ class ComputeCloudMixin:
:raises: ``OpenStackCloudException``, on operation error. :raises: ``OpenStackCloudException``, on operation error.
""" """
server, security_groups = self._get_server_security_groups( server, security_groups = self._get_server_security_groups(
server, security_groups) server, security_groups
)
if not (server and security_groups): if not (server and security_groups):
return False return False
@ -310,7 +321,8 @@ class ComputeCloudMixin:
:raises: ``OpenStackCloudException``, on operation error. :raises: ``OpenStackCloudException``, on operation error.
""" """
server, security_groups = self._get_server_security_groups( server, security_groups = self._get_server_security_groups(
server, security_groups) server, security_groups
)
if not (server and security_groups): if not (server and security_groups):
return False return False
@ -327,7 +339,10 @@ class ComputeCloudMixin:
# error? Nova returns ok if you try to add a group twice. # error? Nova returns ok if you try to add a group twice.
self.log.debug( self.log.debug(
"The security group %s was not present on server %s so " "The security group %s was not present on server %s so "
"no action was performed", sg.name, server.name) "no action was performed",
sg.name,
server.name,
)
ret = False ret = False
return ret return ret
@ -377,7 +392,8 @@ class ComputeCloudMixin:
self._servers = self._list_servers( self._servers = self._list_servers(
detailed=detailed, detailed=detailed,
all_projects=all_projects, all_projects=all_projects,
bare=bare) bare=bare,
)
self._servers_time = time.time() self._servers_time = time.time()
finally: finally:
self._servers_lock.release() self._servers_lock.release()
@ -386,14 +402,15 @@ class ComputeCloudMixin:
# list from the cloud, we still return a filtered list. # list from the cloud, we still return a filtered list.
return _utils._filter_list(self._servers, None, filters) return _utils._filter_list(self._servers, None, filters)
def _list_servers(self, detailed=False, all_projects=False, bare=False, def _list_servers(
filters=None): self, detailed=False, all_projects=False, bare=False, filters=None
):
filters = filters or {} filters = filters or {}
return [ return [
self._expand_server(server, detailed, bare) self._expand_server(server, detailed, bare)
for server in self.compute.servers( for server in self.compute.servers(
all_projects=all_projects, all_projects=all_projects, **filters
**filters) )
] ]
def list_server_groups(self): def list_server_groups(self):
@ -472,12 +489,15 @@ class ComputeCloudMixin:
if not filters: if not filters:
filters = {} filters = {}
flavor = self.compute.find_flavor( flavor = self.compute.find_flavor(
name_or_id, get_extra_specs=get_extra, name_or_id,
ignore_missing=True, **filters) get_extra_specs=get_extra,
ignore_missing=True,
**filters,
)
return flavor return flavor
def get_flavor_by_id(self, id, get_extra=False): def get_flavor_by_id(self, id, get_extra=False):
""" Get a flavor by ID """Get a flavor by ID
:param id: ID of the flavor. :param id: ID of the flavor.
:param get_extra: Whether or not the list_flavors call should get the :param get_extra: Whether or not the list_flavors call should get the
@ -505,7 +525,8 @@ class ComputeCloudMixin:
if not server: if not server:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Console log requested for invalid server") "Console log requested for invalid server"
)
try: try:
return self._get_server_console_output(server['id'], length) return self._get_server_console_output(server['id'], length)
@ -514,8 +535,7 @@ class ComputeCloudMixin:
def _get_server_console_output(self, server_id, length=None): def _get_server_console_output(self, server_id, length=None):
output = self.compute.get_server_console_output( output = self.compute.get_server_console_output(
server=server_id, server=server_id, length=length
length=length
) )
if 'output' in output: if 'output' in output:
return output['output'] return output['output']
@ -555,9 +575,12 @@ class ComputeCloudMixin:
the current auth scoped project. the current auth scoped project.
:returns: A compute ``Server`` object if found, else None. :returns: A compute ``Server`` object if found, else None.
""" """
searchfunc = functools.partial(self.search_servers, searchfunc = functools.partial(
detailed=detailed, bare=True, self.search_servers,
all_projects=all_projects) detailed=detailed,
bare=True,
all_projects=all_projects,
)
server = _utils._get_entity(self, searchfunc, name_or_id, filters) server = _utils._get_entity(self, searchfunc, name_or_id, filters)
return self._expand_server(server, detailed, bare) return self._expand_server(server, detailed, bare)
@ -600,8 +623,7 @@ class ComputeCloudMixin:
:returns: A compute ``ServerGroup`` object if found, else None. :returns: A compute ``ServerGroup`` object if found, else None.
""" """
return _utils._get_entity(self, 'server_group', name_or_id, return _utils._get_entity(self, 'server_group', name_or_id, filters)
filters)
def create_keypair(self, name, public_key=None): def create_keypair(self, name, public_key=None):
"""Create a new keypair. """Create a new keypair.
@ -664,10 +686,12 @@ class ComputeCloudMixin:
if not server_obj: if not server_obj:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Server {server} could not be found and therefore" "Server {server} could not be found and therefore"
" could not be snapshotted.".format(server=server)) " could not be snapshotted.".format(server=server)
)
server = server_obj server = server_obj
image = self.compute.create_server_image( image = self.compute.create_server_image(
server, name=name, metadata=metadata, wait=wait, timeout=timeout) server, name=name, metadata=metadata, wait=wait, timeout=timeout
)
return image return image
def get_server_id(self, name_or_id): def get_server_id(self, name_or_id):
@ -709,12 +733,25 @@ class ComputeCloudMixin:
return dict(server_vars=server_vars, groups=groups) return dict(server_vars=server_vars, groups=groups)
@_utils.valid_kwargs( @_utils.valid_kwargs(
'meta', 'files', 'userdata', 'description', 'meta',
'reservation_id', 'return_raw', 'min_count', 'files',
'max_count', 'security_groups', 'key_name', 'userdata',
'availability_zone', 'block_device_mapping', 'description',
'block_device_mapping_v2', 'nics', 'scheduler_hints', 'reservation_id',
'config_drive', 'admin_pass', 'disk_config') 'return_raw',
'min_count',
'max_count',
'security_groups',
'key_name',
'availability_zone',
'block_device_mapping',
'block_device_mapping_v2',
'nics',
'scheduler_hints',
'config_drive',
'admin_pass',
'disk_config',
)
def create_server( def create_server(
self, self,
name, name,
@ -818,10 +855,12 @@ class ComputeCloudMixin:
# after image in the argument list. Doh. # after image in the argument list. Doh.
if not flavor: if not flavor:
raise TypeError( raise TypeError(
"create_server() missing 1 required argument: 'flavor'") "create_server() missing 1 required argument: 'flavor'"
)
if not image and not boot_volume: if not image and not boot_volume:
raise TypeError( raise TypeError(
"create_server() requires either 'image' or 'boot_volume'") "create_server() requires either 'image' or 'boot_volume'"
)
# TODO(mordred) Add support for description starting in 2.19 # TODO(mordred) Add support for description starting in 2.19
security_groups = kwargs.get('security_groups', []) security_groups = kwargs.get('security_groups', [])
@ -836,11 +875,12 @@ class ComputeCloudMixin:
if user_data: if user_data:
kwargs['user_data'] = self._encode_server_userdata(user_data) kwargs['user_data'] = self._encode_server_userdata(user_data)
for (desired, given) in ( for (desired, given) in (
('OS-DCF:diskConfig', 'disk_config'), ('OS-DCF:diskConfig', 'disk_config'),
('config_drive', 'config_drive'), ('config_drive', 'config_drive'),
('key_name', 'key_name'), ('key_name', 'key_name'),
('metadata', 'meta'), ('metadata', 'meta'),
('adminPass', 'admin_pass')): ('adminPass', 'admin_pass'),
):
value = kwargs.pop(given, None) value = kwargs.pop(given, None)
if value: if value:
kwargs[desired] = value kwargs[desired] = value
@ -850,7 +890,8 @@ class ComputeCloudMixin:
if not group_obj: if not group_obj:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Server Group {group} was requested but was not found" "Server Group {group} was requested but was not found"
" on the cloud".format(group=group)) " on the cloud".format(group=group)
)
if 'scheduler_hints' not in kwargs: if 'scheduler_hints' not in kwargs:
kwargs['scheduler_hints'] = {} kwargs['scheduler_hints'] = {}
kwargs['scheduler_hints']['group'] = group_obj['id'] kwargs['scheduler_hints']['group'] = group_obj['id']
@ -865,7 +906,8 @@ class ComputeCloudMixin:
else: else:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'nics parameter to create_server takes a list of dicts.' 'nics parameter to create_server takes a list of dicts.'
' Got: {nics}'.format(nics=kwargs['nics'])) ' Got: {nics}'.format(nics=kwargs['nics'])
)
if network and ('nics' not in kwargs or not kwargs['nics']): if network and ('nics' not in kwargs or not kwargs['nics']):
nics = [] nics = []
@ -881,7 +923,10 @@ class ComputeCloudMixin:
'Network {network} is not a valid network in' 'Network {network} is not a valid network in'
' {cloud}:{region}'.format( ' {cloud}:{region}'.format(
network=network, network=network,
cloud=self.name, region=self._compute_region)) cloud=self.name,
region=self._compute_region,
)
)
nics.append({'net-id': network_obj['id']}) nics.append({'net-id': network_obj['id']})
kwargs['nics'] = nics kwargs['nics'] = nics
@ -904,14 +949,17 @@ class ComputeCloudMixin:
if not nic_net: if not nic_net:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Requested network {net} could not be found.".format( "Requested network {net} could not be found.".format(
net=net_name)) net=net_name
)
)
net['uuid'] = nic_net['id'] net['uuid'] = nic_net['id']
for ip_key in ('v4-fixed-ip', 'v6-fixed-ip', 'fixed_ip'): for ip_key in ('v4-fixed-ip', 'v6-fixed-ip', 'fixed_ip'):
fixed_ip = nic.pop(ip_key, None) fixed_ip = nic.pop(ip_key, None)
if fixed_ip and net.get('fixed_ip'): if fixed_ip and net.get('fixed_ip'):
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip" "Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip"
" may be given") " may be given"
)
if fixed_ip: if fixed_ip:
net['fixed_ip'] = fixed_ip net['fixed_ip'] = fixed_ip
for key in ('port', 'port-id'): for key in ('port', 'port-id'):
@ -920,13 +968,13 @@ class ComputeCloudMixin:
# A tag supported only in server microversion 2.32-2.36 or >= 2.42 # A tag supported only in server microversion 2.32-2.36 or >= 2.42
# Bumping the version to 2.42 to support the 'tag' implementation # Bumping the version to 2.42 to support the 'tag' implementation
if 'tag' in nic: if 'tag' in nic:
utils.require_microversion( utils.require_microversion(self.compute, '2.42')
self.compute, '2.42')
net['tag'] = nic.pop('tag') net['tag'] = nic.pop('tag')
if nic: if nic:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Additional unsupported keys given for server network" "Additional unsupported keys given for server network"
" creation: {keys}".format(keys=nic.keys())) " creation: {keys}".format(keys=nic.keys())
)
networks.append(net) networks.append(net)
if networks: if networks:
kwargs['networks'] = networks kwargs['networks'] = networks
@ -954,10 +1002,14 @@ class ComputeCloudMixin:
boot_volume = root_volume boot_volume = root_volume
kwargs = self._get_boot_from_volume_kwargs( kwargs = self._get_boot_from_volume_kwargs(
image=image, boot_from_volume=boot_from_volume, image=image,
boot_volume=boot_volume, volume_size=str(volume_size), boot_from_volume=boot_from_volume,
boot_volume=boot_volume,
volume_size=str(volume_size),
terminate_volume=terminate_volume, terminate_volume=terminate_volume,
volumes=volumes, kwargs=kwargs) volumes=volumes,
kwargs=kwargs,
)
kwargs['name'] = name kwargs['name'] = name
@ -977,14 +1029,18 @@ class ComputeCloudMixin:
server = self.compute.get_server(server.id) server = self.compute.get_server(server.id)
if server.status == 'ERROR': if server.status == 'ERROR':
raise exc.OpenStackCloudCreateException( raise exc.OpenStackCloudCreateException(
resource='server', resource_id=server.id) resource='server', resource_id=server.id
)
server = meta.add_server_interfaces(self, server) server = meta.add_server_interfaces(self, server)
else: else:
server = self.wait_for_server( server = self.wait_for_server(
server, server,
auto_ip=auto_ip, ips=ips, ip_pool=ip_pool, auto_ip=auto_ip,
reuse=reuse_ips, timeout=timeout, ips=ips,
ip_pool=ip_pool,
reuse=reuse_ips,
timeout=timeout,
nat_destination=nat_destination, nat_destination=nat_destination,
) )
@ -992,8 +1048,15 @@ class ComputeCloudMixin:
return server return server
def _get_boot_from_volume_kwargs( def _get_boot_from_volume_kwargs(
self, image, boot_from_volume, boot_volume, volume_size, self,
terminate_volume, volumes, kwargs): image,
boot_from_volume,
boot_volume,
volume_size,
terminate_volume,
volumes,
kwargs,
):
"""Return block device mappings """Return block device mappings
:param image: Image dict, name or id to boot with. :param image: Image dict, name or id to boot with.
@ -1015,7 +1078,10 @@ class ComputeCloudMixin:
'Volume {boot_volume} is not a valid volume' 'Volume {boot_volume} is not a valid volume'
' in {cloud}:{region}'.format( ' in {cloud}:{region}'.format(
boot_volume=boot_volume, boot_volume=boot_volume,
cloud=self.name, region=self._compute_region)) cloud=self.name,
region=self._compute_region,
)
)
block_mapping = { block_mapping = {
'boot_index': '0', 'boot_index': '0',
'delete_on_termination': terminate_volume, 'delete_on_termination': terminate_volume,
@ -1036,7 +1102,10 @@ class ComputeCloudMixin:
'Image {image} is not a valid image in' 'Image {image} is not a valid image in'
' {cloud}:{region}'.format( ' {cloud}:{region}'.format(
image=image, image=image,
cloud=self.name, region=self._compute_region)) cloud=self.name,
region=self._compute_region,
)
)
block_mapping = { block_mapping = {
'boot_index': '0', 'boot_index': '0',
@ -1066,7 +1135,10 @@ class ComputeCloudMixin:
'Volume {volume} is not a valid volume' 'Volume {volume} is not a valid volume'
' in {cloud}:{region}'.format( ' in {cloud}:{region}'.format(
volume=volume, volume=volume,
cloud=self.name, region=self._compute_region)) cloud=self.name,
region=self._compute_region,
)
)
block_mapping = { block_mapping = {
'boot_index': '-1', 'boot_index': '-1',
'delete_on_termination': False, 'delete_on_termination': False,
@ -1080,8 +1152,15 @@ class ComputeCloudMixin:
return kwargs return kwargs
def wait_for_server( def wait_for_server(
self, server, auto_ip=True, ips=None, ip_pool=None, self,
reuse=True, timeout=180, nat_destination=None): server,
auto_ip=True,
ips=None,
ip_pool=None,
reuse=True,
timeout=180,
nat_destination=None,
):
""" """
Wait for a server to reach ACTIVE status. Wait for a server to reach ACTIVE status.
""" """
@ -1094,11 +1173,12 @@ class ComputeCloudMixin:
# There is no point in iterating faster than the list_servers cache # There is no point in iterating faster than the list_servers cache
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
timeout, timeout,
timeout_message, timeout_message,
# if _SERVER_AGE is 0 we still want to wait a bit # if _SERVER_AGE is 0 we still want to wait a bit
# to be friendly with the server. # to be friendly with the server.
wait=self._SERVER_AGE or 2): wait=self._SERVER_AGE or 2,
):
try: try:
# Use the get_server call so that the list_servers # Use the get_server call so that the list_servers
# cache can be leveraged # cache can be leveraged
@ -1116,10 +1196,15 @@ class ComputeCloudMixin:
raise exc.OpenStackCloudTimeout(timeout_message) raise exc.OpenStackCloudTimeout(timeout_message)
server = self.get_active_server( server = self.get_active_server(
server=server, reuse=reuse, server=server,
auto_ip=auto_ip, ips=ips, ip_pool=ip_pool, reuse=reuse,
wait=True, timeout=remaining_timeout, auto_ip=auto_ip,
nat_destination=nat_destination) ips=ips,
ip_pool=ip_pool,
wait=True,
timeout=remaining_timeout,
nat_destination=nat_destination,
)
if server is not None and server['status'] == 'ACTIVE': if server is not None and server['status'] == 'ACTIVE':
return server return server
@ -1136,43 +1221,58 @@ class ComputeCloudMixin:
nat_destination=None, nat_destination=None,
): ):
if server['status'] == 'ERROR': if server['status'] == 'ERROR':
if ('fault' in server and server['fault'] is not None if (
and 'message' in server['fault']): 'fault' in server
and server['fault'] is not None
and 'message' in server['fault']
):
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Error in creating the server." "Error in creating the server."
" Compute service reports fault: {reason}".format( " Compute service reports fault: {reason}".format(
reason=server['fault']['message']), reason=server['fault']['message']
extra_data=dict(server=server)) ),
extra_data=dict(server=server),
)
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Error in creating the server" "Error in creating the server"
" (no further information available)", " (no further information available)",
extra_data=dict(server=server)) extra_data=dict(server=server),
)
if server['status'] == 'ACTIVE': if server['status'] == 'ACTIVE':
if 'addresses' in server and server['addresses']: if 'addresses' in server and server['addresses']:
return self.add_ips_to_server( return self.add_ips_to_server(
server, auto_ip, ips, ip_pool, reuse=reuse, server,
auto_ip,
ips,
ip_pool,
reuse=reuse,
nat_destination=nat_destination, nat_destination=nat_destination,
wait=wait, timeout=timeout) wait=wait,
timeout=timeout,
)
self.log.debug( self.log.debug(
'Server %(server)s reached ACTIVE state without' 'Server %(server)s reached ACTIVE state without'
' being allocated an IP address.' ' being allocated an IP address.'
' Deleting server.', {'server': server['id']}) ' Deleting server.',
{'server': server['id']},
)
try: try:
self._delete_server( self._delete_server(server=server, wait=wait, timeout=timeout)
server=server, wait=wait, timeout=timeout)
except Exception as e: except Exception as e:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Server reached ACTIVE state without being' 'Server reached ACTIVE state without being'
' allocated an IP address AND then could not' ' allocated an IP address AND then could not'
' be deleted: {0}'.format(e), ' be deleted: {0}'.format(e),
extra_data=dict(server=server)) extra_data=dict(server=server),
)
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Server reached ACTIVE state without being' 'Server reached ACTIVE state without being'
' allocated an IP address.', ' allocated an IP address.',
extra_data=dict(server=server)) extra_data=dict(server=server),
)
return None return None
def rebuild_server( def rebuild_server(
@ -1202,17 +1302,12 @@ class ComputeCloudMixin:
if admin_pass: if admin_pass:
kwargs['admin_password'] = admin_pass kwargs['admin_password'] = admin_pass
server = self.compute.rebuild_server( server = self.compute.rebuild_server(server_id, **kwargs)
server_id,
**kwargs
)
if not wait: if not wait:
return self._expand_server( return self._expand_server(server, bare=bare, detailed=detailed)
server, bare=bare, detailed=detailed)
admin_pass = server.get('adminPass') or admin_pass admin_pass = server.get('adminPass') or admin_pass
server = self.compute.wait_for_server( server = self.compute.wait_for_server(server, wait=timeout)
server, wait=timeout)
if server['status'] == 'ACTIVE': if server['status'] == 'ACTIVE':
server.adminPass = admin_pass server.adminPass = admin_pass
@ -1231,7 +1326,8 @@ class ComputeCloudMixin:
server = self.get_server(name_or_id, bare=True) server = self.get_server(name_or_id, bare=True)
if not server: if not server:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Invalid Server {server}'.format(server=name_or_id)) 'Invalid Server {server}'.format(server=name_or_id)
)
self.compute.set_server_metadata(server=server.id, **metadata) self.compute.set_server_metadata(server=server.id, **metadata)
@ -1248,10 +1344,12 @@ class ComputeCloudMixin:
server = self.get_server(name_or_id, bare=True) server = self.get_server(name_or_id, bare=True)
if not server: if not server:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Invalid Server {server}'.format(server=name_or_id)) 'Invalid Server {server}'.format(server=name_or_id)
)
self.compute.delete_server_metadata(server=server.id, self.compute.delete_server_metadata(
keys=metadata_keys) server=server.id, keys=metadata_keys
)
def delete_server( def delete_server(
self, self,
@ -1275,8 +1373,7 @@ class ComputeCloudMixin:
:raises: OpenStackCloudException on operation error. :raises: OpenStackCloudException on operation error.
""" """
# If delete_ips is True, we need the server to not be bare. # If delete_ips is True, we need the server to not be bare.
server = self.compute.find_server( server = self.compute.find_server(name_or_id, ignore_missing=True)
name_or_id, ignore_missing=True)
if not server: if not server:
return False return False
@ -1284,18 +1381,24 @@ class ComputeCloudMixin:
# private method in order to avoid an unnecessary API call to get # private method in order to avoid an unnecessary API call to get
# a server we already have. # a server we already have.
return self._delete_server( return self._delete_server(
server, wait=wait, timeout=timeout, delete_ips=delete_ips, server,
delete_ip_retry=delete_ip_retry) wait=wait,
timeout=timeout,
delete_ips=delete_ips,
delete_ip_retry=delete_ip_retry,
)
def _delete_server_floating_ips(self, server, delete_ip_retry): def _delete_server_floating_ips(self, server, delete_ip_retry):
# Does the server have floating ips in its # Does the server have floating ips in its
# addresses dict? If not, skip this. # addresses dict? If not, skip this.
server_floats = meta.find_nova_interfaces( server_floats = meta.find_nova_interfaces(
server['addresses'], ext_tag='floating') server['addresses'], ext_tag='floating'
)
for fip in server_floats: for fip in server_floats:
try: try:
ip = self.get_floating_ip(id=None, filters={ ip = self.get_floating_ip(
'floating_ip_address': fip['addr']}) id=None, filters={'floating_ip_address': fip['addr']}
)
except exc.OpenStackCloudURINotFound: except exc.OpenStackCloudURINotFound:
# We're deleting. If it doesn't exist - awesome # We're deleting. If it doesn't exist - awesome
# NOTE(mordred) If the cloud is a nova FIP cloud but # NOTE(mordred) If the cloud is a nova FIP cloud but
@ -1304,19 +1407,24 @@ class ComputeCloudMixin:
continue continue
if not ip: if not ip:
continue continue
deleted = self.delete_floating_ip( deleted = self.delete_floating_ip(ip['id'], retry=delete_ip_retry)
ip['id'], retry=delete_ip_retry)
if not deleted: if not deleted:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Tried to delete floating ip {floating_ip}" "Tried to delete floating ip {floating_ip}"
" associated with server {id} but there was" " associated with server {id} but there was"
" an error deleting it. Not deleting server.".format( " an error deleting it. Not deleting server.".format(
floating_ip=ip['floating_ip_address'], floating_ip=ip['floating_ip_address'], id=server['id']
id=server['id'])) )
)
def _delete_server( def _delete_server(
self, server, wait=False, timeout=180, delete_ips=False, self,
delete_ip_retry=1): server,
wait=False,
timeout=180,
delete_ips=False,
delete_ip_retry=1,
):
if not server: if not server:
return False return False
@ -1324,8 +1432,7 @@ class ComputeCloudMixin:
self._delete_server_floating_ips(server, delete_ip_retry) self._delete_server_floating_ips(server, delete_ip_retry)
try: try:
self.compute.delete_server( self.compute.delete_server(server)
server)
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
return False return False
except Exception: except Exception:
@ -1339,9 +1446,11 @@ class ComputeCloudMixin:
# need to invalidate the cache. Avoid the extra API call if # need to invalidate the cache. Avoid the extra API call if
# caching is not enabled. # caching is not enabled.
reset_volume_cache = False reset_volume_cache = False
if (self.cache_enabled if (
and self.has_service('volume') self.cache_enabled
and self.get_volumes(server)): and self.has_service('volume')
and self.get_volumes(server)
):
reset_volume_cache = True reset_volume_cache = True
if not isinstance(server, _server.Server): if not isinstance(server, _server.Server):
@ -1349,8 +1458,7 @@ class ComputeCloudMixin:
# If this is the case - convert it into real server to be able to # If this is the case - convert it into real server to be able to
# use wait_for_delete # use wait_for_delete
server = _server.Server(id=server['id']) server = _server.Server(id=server['id'])
self.compute.wait_for_delete( self.compute.wait_for_delete(server, wait=timeout)
server, wait=timeout)
if reset_volume_cache: if reset_volume_cache:
self.list_volumes.invalidate(self) self.list_volumes.invalidate(self)
@ -1360,8 +1468,7 @@ class ComputeCloudMixin:
self._servers_time = self._servers_time - self._SERVER_AGE self._servers_time = self._servers_time - self._SERVER_AGE
return True return True
@_utils.valid_kwargs( @_utils.valid_kwargs('name', 'description')
'name', 'description')
def update_server(self, name_or_id, detailed=False, bare=False, **kwargs): def update_server(self, name_or_id, detailed=False, bare=False, **kwargs):
"""Update a server. """Update a server.
@ -1377,13 +1484,9 @@ class ComputeCloudMixin:
:returns: The updated compute ``Server`` object. :returns: The updated compute ``Server`` object.
:raises: OpenStackCloudException on operation error. :raises: OpenStackCloudException on operation error.
""" """
server = self.compute.find_server( server = self.compute.find_server(name_or_id, ignore_missing=False)
name_or_id,
ignore_missing=False
)
server = self.compute.update_server( server = self.compute.update_server(server, **kwargs)
server, **kwargs)
return self._expand_server(server, bare=bare, detailed=detailed) return self._expand_server(server, bare=bare, detailed=detailed)
@ -1395,16 +1498,12 @@ class ComputeCloudMixin:
:returns: The created compute ``ServerGroup`` object. :returns: The created compute ``ServerGroup`` object.
:raises: OpenStackCloudException on operation error. :raises: OpenStackCloudException on operation error.
""" """
sg_attrs = { sg_attrs = {'name': name}
'name': name
}
if policies: if policies:
sg_attrs['policies'] = policies sg_attrs['policies'] = policies
if policy: if policy:
sg_attrs['policy'] = policy sg_attrs['policy'] = policy
return self.compute.create_server_group( return self.compute.create_server_group(**sg_attrs)
**sg_attrs
)
def delete_server_group(self, name_or_id): def delete_server_group(self, name_or_id):
"""Delete a server group. """Delete a server group.
@ -1415,8 +1514,9 @@ class ComputeCloudMixin:
""" """
server_group = self.get_server_group(name_or_id) server_group = self.get_server_group(name_or_id)
if not server_group: if not server_group:
self.log.debug("Server group %s not found for deleting", self.log.debug(
name_or_id) "Server group %s not found for deleting", name_or_id
)
return False return False
self.compute.delete_server_group(server_group, ignore_missing=False) self.compute.delete_server_group(server_group, ignore_missing=False)
@ -1477,14 +1577,14 @@ class ComputeCloudMixin:
try: try:
flavor = self.compute.find_flavor(name_or_id) flavor = self.compute.find_flavor(name_or_id)
if not flavor: if not flavor:
self.log.debug( self.log.debug("Flavor %s not found for deleting", name_or_id)
"Flavor %s not found for deleting", name_or_id)
return False return False
self.compute.delete_flavor(flavor) self.compute.delete_flavor(flavor)
return True return True
except exceptions.SDKException: except exceptions.SDKException:
raise exceptions.OpenStackCloudException( raise exceptions.OpenStackCloudException(
"Unable to delete flavor {name}".format(name=name_or_id)) "Unable to delete flavor {name}".format(name=name_or_id)
)
def set_flavor_specs(self, flavor_id, extra_specs): def set_flavor_specs(self, flavor_id, extra_specs):
"""Add extra specs to a flavor """Add extra specs to a flavor
@ -1545,9 +1645,7 @@ class ComputeCloudMixin:
:returns: A list of compute ``Hypervisor`` objects. :returns: A list of compute ``Hypervisor`` objects.
""" """
return list(self.compute.hypervisors( return list(self.compute.hypervisors(details=True, **filters))
details=True,
**filters))
def search_aggregates(self, name_or_id=None, filters=None): def search_aggregates(self, name_or_id=None, filters=None):
"""Seach host aggregates. """Seach host aggregates.
@ -1587,8 +1685,7 @@ class ComputeCloudMixin:
:returns: An aggregate dict or None if no matching aggregate is :returns: An aggregate dict or None if no matching aggregate is
found. found.
""" """
return self.compute.find_aggregate( return self.compute.find_aggregate(name_or_id, ignore_missing=True)
name_or_id, ignore_missing=True)
def create_aggregate(self, name, availability_zone=None): def create_aggregate(self, name, availability_zone=None):
"""Create a new host aggregate. """Create a new host aggregate.
@ -1599,8 +1696,7 @@ class ComputeCloudMixin:
:raises: OpenStackCloudException on operation error. :raises: OpenStackCloudException on operation error.
""" """
return self.compute.create_aggregate( return self.compute.create_aggregate(
name=name, name=name, availability_zone=availability_zone
availability_zone=availability_zone
) )
@_utils.valid_kwargs('name', 'availability_zone') @_utils.valid_kwargs('name', 'availability_zone')
@ -1623,14 +1719,12 @@ class ComputeCloudMixin:
:returns: True if delete succeeded, False otherwise. :returns: True if delete succeeded, False otherwise.
:raises: OpenStackCloudException on operation error. :raises: OpenStackCloudException on operation error.
""" """
if ( if isinstance(name_or_id, (str, bytes)) and not name_or_id.isdigit():
isinstance(name_or_id, (str, bytes))
and not name_or_id.isdigit()
):
aggregate = self.get_aggregate(name_or_id) aggregate = self.get_aggregate(name_or_id)
if not aggregate: if not aggregate:
self.log.debug( self.log.debug(
"Aggregate %s not found for deleting", name_or_id) "Aggregate %s not found for deleting", name_or_id
)
return False return False
name_or_id = aggregate.id name_or_id = aggregate.id
try: try:
@ -1654,7 +1748,8 @@ class ComputeCloudMixin:
aggregate = self.get_aggregate(name_or_id) aggregate = self.get_aggregate(name_or_id)
if not aggregate: if not aggregate:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Host aggregate %s not found." % name_or_id) "Host aggregate %s not found." % name_or_id
)
return self.compute.set_aggregate_metadata(aggregate, metadata) return self.compute.set_aggregate_metadata(aggregate, metadata)
@ -1669,7 +1764,8 @@ class ComputeCloudMixin:
aggregate = self.get_aggregate(name_or_id) aggregate = self.get_aggregate(name_or_id)
if not aggregate: if not aggregate:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Host aggregate %s not found." % name_or_id) "Host aggregate %s not found." % name_or_id
)
return self.compute.add_host_to_aggregate(aggregate, host_name) return self.compute.add_host_to_aggregate(aggregate, host_name)
@ -1684,12 +1780,13 @@ class ComputeCloudMixin:
aggregate = self.get_aggregate(name_or_id) aggregate = self.get_aggregate(name_or_id)
if not aggregate: if not aggregate:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Host aggregate %s not found." % name_or_id) "Host aggregate %s not found." % name_or_id
)
return self.compute.remove_host_from_aggregate(aggregate, host_name) return self.compute.remove_host_from_aggregate(aggregate, host_name)
def set_compute_quotas(self, name_or_id, **kwargs): def set_compute_quotas(self, name_or_id, **kwargs):
""" Set a quota in a project """Set a quota in a project
:param name_or_id: project name or id :param name_or_id: project name or id
:param kwargs: key/value pairs of quota name and quota value :param kwargs: key/value pairs of quota name and quota value
@ -1697,39 +1794,35 @@ class ComputeCloudMixin:
:raises: OpenStackCloudException if the resource to set the :raises: OpenStackCloudException if the resource to set the
quota does not exist. quota does not exist.
""" """
proj = self.identity.find_project( proj = self.identity.find_project(name_or_id, ignore_missing=False)
name_or_id, ignore_missing=False)
kwargs['force'] = True kwargs['force'] = True
self.compute.update_quota_set( self.compute.update_quota_set(
_qs.QuotaSet(project_id=proj.id), _qs.QuotaSet(project_id=proj.id), **kwargs
**kwargs
) )
def get_compute_quotas(self, name_or_id): def get_compute_quotas(self, name_or_id):
""" Get quota for a project """Get quota for a project
:param name_or_id: project name or id :param name_or_id: project name or id
:returns: A compute ``QuotaSet`` object if found, else None. :returns: A compute ``QuotaSet`` object if found, else None.
:raises: OpenStackCloudException if it's not a valid project :raises: OpenStackCloudException if it's not a valid project
""" """
proj = self.identity.find_project( proj = self.identity.find_project(name_or_id, ignore_missing=False)
name_or_id, ignore_missing=False)
return self.compute.get_quota_set(proj) return self.compute.get_quota_set(proj)
def delete_compute_quotas(self, name_or_id): def delete_compute_quotas(self, name_or_id):
""" Delete quota for a project """Delete quota for a project
:param name_or_id: project name or id :param name_or_id: project name or id
:raises: OpenStackCloudException if it's not a valid project or the :raises: OpenStackCloudException if it's not a valid project or the
nova client call failed nova client call failed
:returns: None :returns: None
""" """
proj = self.identity.find_project( proj = self.identity.find_project(name_or_id, ignore_missing=False)
name_or_id, ignore_missing=False)
self.compute.revert_quota_set(proj) self.compute.revert_quota_set(proj)
def get_compute_usage(self, name_or_id, start=None, end=None): def get_compute_usage(self, name_or_id, start=None, end=None):
""" Get usage for a specific project """Get usage for a specific project
:param name_or_id: project name or id :param name_or_id: project name or id
:param start: :class:`datetime.datetime` or string. Start date in UTC :param start: :class:`datetime.datetime` or string. Start date in UTC
@ -1741,6 +1834,7 @@ class ComputeCloudMixin:
:returns: A :class:`~openstack.compute.v2.usage.Usage` object :returns: A :class:`~openstack.compute.v2.usage.Usage` object
""" """
def parse_date(date): def parse_date(date):
try: try:
return iso8601.parse_date(date) return iso8601.parse_date(date)
@ -1751,8 +1845,8 @@ class ComputeCloudMixin:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Date given, {date}, is invalid. Please pass in a date" "Date given, {date}, is invalid. Please pass in a date"
" string in ISO 8601 format -" " string in ISO 8601 format -"
" YYYY-MM-DDTHH:MM:SS".format( " YYYY-MM-DDTHH:MM:SS".format(date=date)
date=date)) )
if isinstance(start, str): if isinstance(start, str):
start = parse_date(start) start = parse_date(start)
@ -1762,7 +1856,8 @@ class ComputeCloudMixin:
proj = self.get_project(name_or_id) proj = self.get_project(name_or_id)
if not proj: if not proj:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"project does not exist: {name}".format(name=proj.id)) "project does not exist: {name}".format(name=proj.id)
)
return self.compute.get_usage(proj, start, end) return self.compute.get_usage(proj, start, end)
@ -1830,22 +1925,28 @@ class ComputeCloudMixin:
project_id = server.pop('project_id', project_id) project_id = server.pop('project_id', project_id)
az = _pop_or_get( az = _pop_or_get(
server, 'OS-EXT-AZ:availability_zone', None, self.strict_mode) server, 'OS-EXT-AZ:availability_zone', None, self.strict_mode
)
# the server resource has this already, but it's missing az info # the server resource has this already, but it's missing az info
# from the resource. # from the resource.
# TODO(mordred) create_server is still normalizing servers that aren't # TODO(mordred) create_server is still normalizing servers that aren't
# from the resource layer. # from the resource layer.
ret['location'] = server.pop( ret['location'] = server.pop(
'location', self._get_current_location( 'location',
project_id=project_id, zone=az)) self._get_current_location(project_id=project_id, zone=az),
)
# Ensure volumes is always in the server dict, even if empty # Ensure volumes is always in the server dict, even if empty
ret['volumes'] = _pop_or_get( ret['volumes'] = _pop_or_get(
server, 'os-extended-volumes:volumes_attached', server,
[], self.strict_mode) 'os-extended-volumes:volumes_attached',
[],
self.strict_mode,
)
config_drive = server.pop( config_drive = server.pop(
'has_config_drive', server.pop('config_drive', False)) 'has_config_drive', server.pop('config_drive', False)
)
ret['has_config_drive'] = _to_bool(config_drive) ret['has_config_drive'] = _to_bool(config_drive)
host_id = server.pop('hostId', server.pop('host_id', None)) host_id = server.pop('hostId', server.pop('host_id', None))
@ -1855,24 +1956,25 @@ class ComputeCloudMixin:
# Leave these in so that the general properties handling works # Leave these in so that the general properties handling works
ret['disk_config'] = _pop_or_get( ret['disk_config'] = _pop_or_get(
server, 'OS-DCF:diskConfig', None, self.strict_mode) server, 'OS-DCF:diskConfig', None, self.strict_mode
)
for key in ( for key in (
'OS-EXT-STS:power_state', 'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state', 'OS-EXT-STS:task_state',
'OS-EXT-STS:vm_state', 'OS-EXT-STS:vm_state',
'OS-SRV-USG:launched_at', 'OS-SRV-USG:launched_at',
'OS-SRV-USG:terminated_at', 'OS-SRV-USG:terminated_at',
'OS-EXT-SRV-ATTR:hypervisor_hostname', 'OS-EXT-SRV-ATTR:hypervisor_hostname',
'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:user_data', 'OS-EXT-SRV-ATTR:user_data',
'OS-EXT-SRV-ATTR:host', 'OS-EXT-SRV-ATTR:host',
'OS-EXT-SRV-ATTR:hostname', 'OS-EXT-SRV-ATTR:hostname',
'OS-EXT-SRV-ATTR:kernel_id', 'OS-EXT-SRV-ATTR:kernel_id',
'OS-EXT-SRV-ATTR:launch_index', 'OS-EXT-SRV-ATTR:launch_index',
'OS-EXT-SRV-ATTR:ramdisk_id', 'OS-EXT-SRV-ATTR:ramdisk_id',
'OS-EXT-SRV-ATTR:reservation_id', 'OS-EXT-SRV-ATTR:reservation_id',
'OS-EXT-SRV-ATTR:root_device_name', 'OS-EXT-SRV-ATTR:root_device_name',
'OS-SCH-HNT:scheduler_hints', 'OS-SCH-HNT:scheduler_hints',
): ):
short_key = key.split(':')[1] short_key = key.split(':')[1]
ret[short_key] = _pop_or_get(server, key, None, self.strict_mode) ret[short_key] = _pop_or_get(server, key, None, self.strict_mode)

View File

@ -33,8 +33,7 @@ class DnsCloudMixin:
""" """
if not filters: if not filters:
filters = {} filters = {}
return list(self.dns.zones(allow_unknown_params=True, return list(self.dns.zones(allow_unknown_params=True, **filters))
**filters))
def get_zone(self, name_or_id, filters=None): def get_zone(self, name_or_id, filters=None):
"""Get a zone by name or ID. """Get a zone by name or ID.
@ -49,7 +48,8 @@ class DnsCloudMixin:
if not filters: if not filters:
filters = {} filters = {}
zone = self.dns.find_zone( zone = self.dns.find_zone(
name_or_id=name_or_id, ignore_missing=True, **filters) name_or_id=name_or_id, ignore_missing=True, **filters
)
if not zone: if not zone:
return None return None
return zone return zone
@ -58,8 +58,15 @@ class DnsCloudMixin:
zones = self.list_zones(filters) zones = self.list_zones(filters)
return _utils._filter_list(zones, name_or_id, filters) return _utils._filter_list(zones, name_or_id, filters)
def create_zone(self, name, zone_type=None, email=None, description=None, def create_zone(
ttl=None, masters=None): self,
name,
zone_type=None,
email=None,
description=None,
ttl=None,
masters=None,
):
"""Create a new zone. """Create a new zone.
:param name: Name of the zone being created. :param name: Name of the zone being created.
@ -82,8 +89,9 @@ class DnsCloudMixin:
zone_type = zone_type.upper() zone_type = zone_type.upper()
if zone_type not in ('PRIMARY', 'SECONDARY'): if zone_type not in ('PRIMARY', 'SECONDARY'):
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Invalid type %s, valid choices are PRIMARY or SECONDARY" % "Invalid type %s, valid choices are PRIMARY or SECONDARY"
zone_type) % zone_type
)
zone = { zone = {
"name": name, "name": name,
@ -125,7 +133,8 @@ class DnsCloudMixin:
zone = self.get_zone(name_or_id) zone = self.get_zone(name_or_id)
if not zone: if not zone:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Zone %s not found." % name_or_id) "Zone %s not found." % name_or_id
)
return self.dns.update_zone(zone['id'], **kwargs) return self.dns.update_zone(zone['id'], **kwargs)
@ -162,8 +171,7 @@ class DnsCloudMixin:
else: else:
zone_obj = self.get_zone(zone) zone_obj = self.get_zone(zone)
if zone_obj is None: if zone_obj is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException("Zone %s not found." % zone)
"Zone %s not found." % zone)
return list(self.dns.recordsets(zone_obj)) return list(self.dns.recordsets(zone_obj))
def get_recordset(self, zone, name_or_id): def get_recordset(self, zone, name_or_id):
@ -182,11 +190,11 @@ class DnsCloudMixin:
else: else:
zone_obj = self.get_zone(zone) zone_obj = self.get_zone(zone)
if not zone_obj: if not zone_obj:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException("Zone %s not found." % zone)
"Zone %s not found." % zone)
try: try:
return self.dns.find_recordset( return self.dns.find_recordset(
zone=zone_obj, name_or_id=name_or_id, ignore_missing=False) zone=zone_obj, name_or_id=name_or_id, ignore_missing=False
)
except Exception: except Exception:
return None return None
@ -194,8 +202,9 @@ class DnsCloudMixin:
recordsets = self.list_recordsets(zone=zone) recordsets = self.list_recordsets(zone=zone)
return _utils._filter_list(recordsets, name_or_id, filters) return _utils._filter_list(recordsets, name_or_id, filters)
def create_recordset(self, zone, name, recordset_type, records, def create_recordset(
description=None, ttl=None): self, zone, name, recordset_type, records, description=None, ttl=None
):
"""Create a recordset. """Create a recordset.
:param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance
@ -216,17 +225,12 @@ class DnsCloudMixin:
else: else:
zone_obj = self.get_zone(zone) zone_obj = self.get_zone(zone)
if not zone_obj: if not zone_obj:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException("Zone %s not found." % zone)
"Zone %s not found." % zone)
# We capitalize the type in case the user sends in lowercase # We capitalize the type in case the user sends in lowercase
recordset_type = recordset_type.upper() recordset_type = recordset_type.upper()
body = { body = {'name': name, 'type': recordset_type, 'records': records}
'name': name,
'type': recordset_type,
'records': records
}
if description: if description:
body['description'] = description body['description'] = description
@ -255,7 +259,8 @@ class DnsCloudMixin:
rs = self.get_recordset(zone, name_or_id) rs = self.get_recordset(zone, name_or_id)
if not rs: if not rs:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Recordset %s not found." % name_or_id) "Recordset %s not found." % name_or_id
)
rs = self.dns.update_recordset(recordset=rs, **kwargs) rs = self.dns.update_recordset(recordset=rs, **kwargs)

View File

@ -14,7 +14,6 @@
# We can't just use list, because sphinx gets confused by # We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list # openstack.resource.Resource.list and openstack.resource2.Resource.list
import ipaddress import ipaddress
# import jsonpatch
import threading import threading
import time import time
import types # noqa import types # noqa
@ -30,7 +29,8 @@ from openstack import utils
_CONFIG_DOC_URL = ( _CONFIG_DOC_URL = (
"https://docs.openstack.org/openstacksdk/latest/" "https://docs.openstack.org/openstacksdk/latest/"
"user/config/configuration.html") "user/config/configuration.html"
)
class FloatingIPCloudMixin: class FloatingIPCloudMixin:
@ -39,8 +39,7 @@ class FloatingIPCloudMixin:
def __init__(self): def __init__(self):
self.private = self.config.config.get('private', False) self.private = self.config.config.get('private', False)
self._floating_ip_source = self.config.config.get( self._floating_ip_source = self.config.config.get('floating_ip_source')
'floating_ip_source')
if self._floating_ip_source: if self._floating_ip_source:
if self._floating_ip_source.lower() == 'none': if self._floating_ip_source.lower() == 'none':
self._floating_ip_source = None self._floating_ip_source = None
@ -68,7 +67,8 @@ class FloatingIPCloudMixin:
# understand, obviously. # understand, obviously.
warnings.warn( warnings.warn(
"search_floating_ips is deprecated. " "search_floating_ips is deprecated. "
"Use search_resource instead.") "Use search_resource instead."
)
if self._use_neutron_floating() and isinstance(filters, dict): if self._use_neutron_floating() and isinstance(filters, dict):
return list(self.network.ips(**filters)) return list(self.network.ips(**filters))
else: else:
@ -83,8 +83,7 @@ class FloatingIPCloudMixin:
def _nova_list_floating_ips(self): def _nova_list_floating_ips(self):
try: try:
data = proxy._json_response( data = proxy._json_response(self.compute.get('/os-floating-ips'))
self.compute.get('/os-floating-ips'))
except exc.OpenStackCloudURINotFound: except exc.OpenStackCloudURINotFound:
return [] return []
return self._get_and_munchify('floating_ips', data) return self._get_and_munchify('floating_ips', data)
@ -137,10 +136,11 @@ class FloatingIPCloudMixin:
" using clouds.yaml to configure settings for your" " using clouds.yaml to configure settings for your"
" cloud(s), and you want to configure this setting," " cloud(s), and you want to configure this setting,"
" you will need a clouds.yaml file. For more" " you will need a clouds.yaml file. For more"
" information, please see %(doc_url)s", { " information, please see %(doc_url)s",
{
'cloud': self.name, 'cloud': self.name,
'doc_url': _CONFIG_DOC_URL, 'doc_url': _CONFIG_DOC_URL,
} },
) )
# We can't fallback to nova because we push-down filters. # We can't fallback to nova because we push-down filters.
# We got a 404 which means neutron doesn't exist. If the # We got a 404 which means neutron doesn't exist. If the
@ -148,7 +148,9 @@ class FloatingIPCloudMixin:
return [] return []
self.log.debug( self.log.debug(
"Something went wrong talking to neutron API: " "Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)}) "'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova # Fall-through, trying with Nova
else: else:
if filters: if filters:
@ -174,11 +176,13 @@ class FloatingIPCloudMixin:
""" """
if not self._has_nova_extension('os-floating-ip-pools'): if not self._has_nova_extension('os-floating-ip-pools'):
raise exc.OpenStackCloudUnavailableExtension( raise exc.OpenStackCloudUnavailableExtension(
'Floating IP pools extension is not available on target cloud') 'Floating IP pools extension is not available on target cloud'
)
data = proxy._json_response( data = proxy._json_response(
self.compute.get('os-floating-ip-pools'), self.compute.get('os-floating-ip-pools'),
error_message="Error fetching floating IP pool list") error_message="Error fetching floating IP pool list",
)
pools = self._get_and_munchify('floating_ip_pools', data) pools = self._get_and_munchify('floating_ip_pools', data)
return [{'name': p['name']} for p in pools] return [{'name': p['name']} for p in pools]
@ -217,7 +221,7 @@ class FloatingIPCloudMixin:
return _utils._filter_list(self._floating_ips, None, filters) return _utils._filter_list(self._floating_ips, None, filters)
def get_floating_ip_by_id(self, id): def get_floating_ip_by_id(self, id):
""" Get a floating ip by ID """Get a floating ip by ID
:param id: ID of the floating ip. :param id: ID of the floating ip.
:returns: A floating ip :returns: A floating ip
@ -231,12 +235,15 @@ class FloatingIPCloudMixin:
else: else:
data = proxy._json_response( data = proxy._json_response(
self.compute.get('/os-floating-ips/{id}'.format(id=id)), self.compute.get('/os-floating-ips/{id}'.format(id=id)),
error_message=error_message) error_message=error_message,
)
return self._normalize_floating_ip( return self._normalize_floating_ip(
self._get_and_munchify('floating_ip', data)) self._get_and_munchify('floating_ip', data)
)
def _neutron_available_floating_ips( def _neutron_available_floating_ips(
self, network=None, project_id=None, server=None): self, network=None, project_id=None, server=None
):
"""Get a floating IP from a network. """Get a floating IP from a network.
Return a list of available floating IPs or allocate a new one and Return a list of available floating IPs or allocate a new one and
@ -271,8 +278,7 @@ class FloatingIPCloudMixin:
if floating_network_id is None: if floating_network_id is None:
raise exc.OpenStackCloudResourceNotFound( raise exc.OpenStackCloudResourceNotFound(
"unable to find external network {net}".format( "unable to find external network {net}".format(net=network)
net=network)
) )
else: else:
floating_network_id = self._get_floating_network_id() floating_network_id = self._get_floating_network_id()
@ -285,14 +291,16 @@ class FloatingIPCloudMixin:
floating_ips = self._list_floating_ips() floating_ips = self._list_floating_ips()
available_ips = _utils._filter_list( available_ips = _utils._filter_list(
floating_ips, name_or_id=None, filters=filters) floating_ips, name_or_id=None, filters=filters
)
if available_ips: if available_ips:
return available_ips return available_ips
# No available IP found or we didn't try # No available IP found or we didn't try
# allocate a new Floating IP # allocate a new Floating IP
f_ip = self._neutron_create_floating_ip( f_ip = self._neutron_create_floating_ip(
network_id=floating_network_id, server=server) network_id=floating_network_id, server=server
)
return [f_ip] return [f_ip]
@ -311,23 +319,22 @@ class FloatingIPCloudMixin:
""" """
with _utils.shade_exceptions( with _utils.shade_exceptions(
"Unable to create floating IP in pool {pool}".format( "Unable to create floating IP in pool {pool}".format(pool=pool)
pool=pool)): ):
if pool is None: if pool is None:
pools = self.list_floating_ip_pools() pools = self.list_floating_ip_pools()
if not pools: if not pools:
raise exc.OpenStackCloudResourceNotFound( raise exc.OpenStackCloudResourceNotFound(
"unable to find a floating ip pool") "unable to find a floating ip pool"
)
pool = pools[0]['name'] pool = pools[0]['name']
filters = { filters = {'instance_id': None, 'pool': pool}
'instance_id': None,
'pool': pool
}
floating_ips = self._nova_list_floating_ips() floating_ips = self._nova_list_floating_ips()
available_ips = _utils._filter_list( available_ips = _utils._filter_list(
floating_ips, name_or_id=None, filters=filters) floating_ips, name_or_id=None, filters=filters
)
if available_ips: if available_ips:
return available_ips return available_ips
@ -341,7 +348,8 @@ class FloatingIPCloudMixin:
"""Find the network providing floating ips by looking at routers.""" """Find the network providing floating ips by looking at routers."""
if self._floating_network_by_router_lock.acquire( if self._floating_network_by_router_lock.acquire(
not self._floating_network_by_router_run): not self._floating_network_by_router_run
):
if self._floating_network_by_router_run: if self._floating_network_by_router_run:
self._floating_network_by_router_lock.release() self._floating_network_by_router_lock.release()
return self._floating_network_by_router return self._floating_network_by_router
@ -349,7 +357,8 @@ class FloatingIPCloudMixin:
for router in self.list_routers(): for router in self.list_routers():
if router['admin_state_up']: if router['admin_state_up']:
network_id = router.get( network_id = router.get(
'external_gateway_info', {}).get('network_id') 'external_gateway_info', {}
).get('network_id')
if network_id: if network_id:
self._floating_network_by_router = network_id self._floating_network_by_router = network_id
finally: finally:
@ -371,12 +380,15 @@ class FloatingIPCloudMixin:
if self._use_neutron_floating(): if self._use_neutron_floating():
try: try:
f_ips = self._neutron_available_floating_ips( f_ips = self._neutron_available_floating_ips(
network=network, server=server) network=network, server=server
)
return f_ips[0] return f_ips[0]
except exc.OpenStackCloudURINotFound as e: except exc.OpenStackCloudURINotFound as e:
self.log.debug( self.log.debug(
"Something went wrong talking to neutron API: " "Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)}) "'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova # Fall-through, trying with Nova
f_ips = self._normalize_floating_ips( f_ips = self._normalize_floating_ips(
@ -395,12 +407,20 @@ class FloatingIPCloudMixin:
floating_network_id = floating_network floating_network_id = floating_network
else: else:
raise exc.OpenStackCloudResourceNotFound( raise exc.OpenStackCloudResourceNotFound(
"unable to find an external network") "unable to find an external network"
)
return floating_network_id return floating_network_id
def create_floating_ip(self, network=None, server=None, def create_floating_ip(
fixed_address=None, nat_destination=None, self,
port=None, wait=False, timeout=60): network=None,
server=None,
fixed_address=None,
nat_destination=None,
port=None,
wait=False,
timeout=60,
):
"""Allocate a new floating IP from a network or a pool. """Allocate a new floating IP from a network or a pool.
:param network: Name or ID of the network :param network: Name or ID of the network
@ -430,15 +450,20 @@ class FloatingIPCloudMixin:
if self._use_neutron_floating(): if self._use_neutron_floating():
try: try:
return self._neutron_create_floating_ip( return self._neutron_create_floating_ip(
network_name_or_id=network, server=server, network_name_or_id=network,
server=server,
fixed_address=fixed_address, fixed_address=fixed_address,
nat_destination=nat_destination, nat_destination=nat_destination,
port=port, port=port,
wait=wait, timeout=timeout) wait=wait,
timeout=timeout,
)
except exc.OpenStackCloudURINotFound as e: except exc.OpenStackCloudURINotFound as e:
self.log.debug( self.log.debug(
"Something went wrong talking to neutron API: " "Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)}) "'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova # Fall-through, trying with Nova
if port: if port:
@ -447,10 +472,12 @@ class FloatingIPCloudMixin:
" arbitrary floating-ip/port mappings. Please nudge" " arbitrary floating-ip/port mappings. Please nudge"
" your cloud provider to upgrade the networking stack" " your cloud provider to upgrade the networking stack"
" to neutron, or alternately provide the server," " to neutron, or alternately provide the server,"
" fixed_address and nat_destination arguments as appropriate") " fixed_address and nat_destination arguments as appropriate"
)
# Else, we are using Nova network # Else, we are using Nova network
f_ips = self._normalize_floating_ips( f_ips = self._normalize_floating_ips(
[self._nova_create_floating_ip(pool=network)]) [self._nova_create_floating_ip(pool=network)]
)
return f_ips[0] return f_ips[0]
def _submit_create_fip(self, kwargs): def _submit_create_fip(self, kwargs):
@ -458,10 +485,16 @@ class FloatingIPCloudMixin:
return self.network.create_ip(**kwargs) return self.network.create_ip(**kwargs)
def _neutron_create_floating_ip( def _neutron_create_floating_ip(
self, network_name_or_id=None, server=None, self,
fixed_address=None, nat_destination=None, network_name_or_id=None,
port=None, server=None,
wait=False, timeout=60, network_id=None): fixed_address=None,
nat_destination=None,
port=None,
wait=False,
timeout=60,
network_id=None,
):
if not network_id: if not network_id:
if network_name_or_id: if network_name_or_id:
@ -470,7 +503,8 @@ class FloatingIPCloudMixin:
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
raise exc.OpenStackCloudResourceNotFound( raise exc.OpenStackCloudResourceNotFound(
"unable to find network for floating ips with ID " "unable to find network for floating ips with ID "
"{0}".format(network_name_or_id)) "{0}".format(network_name_or_id)
)
network_id = network['id'] network_id = network['id']
else: else:
network_id = self._get_floating_network_id() network_id = self._get_floating_network_id()
@ -480,8 +514,10 @@ class FloatingIPCloudMixin:
if not port: if not port:
if server: if server:
(port_obj, fixed_ip_address) = self._nat_destination_port( (port_obj, fixed_ip_address) = self._nat_destination_port(
server, fixed_address=fixed_address, server,
nat_destination=nat_destination) fixed_address=fixed_address,
nat_destination=nat_destination,
)
if port_obj: if port_obj:
port = port_obj['id'] port = port_obj['id']
if fixed_ip_address: if fixed_ip_address:
@ -499,57 +535,68 @@ class FloatingIPCloudMixin:
if wait: if wait:
try: try:
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
timeout, timeout,
"Timeout waiting for the floating IP" "Timeout waiting for the floating IP" " to be ACTIVE",
" to be ACTIVE", wait=self._FLOAT_AGE,
wait=self._FLOAT_AGE): ):
fip = self.get_floating_ip(fip_id) fip = self.get_floating_ip(fip_id)
if fip and fip['status'] == 'ACTIVE': if fip and fip['status'] == 'ACTIVE':
break break
except exc.OpenStackCloudTimeout: except exc.OpenStackCloudTimeout:
self.log.error( self.log.error(
"Timed out on floating ip %(fip)s becoming active." "Timed out on floating ip %(fip)s becoming active."
" Deleting", {'fip': fip_id}) " Deleting",
{'fip': fip_id},
)
try: try:
self.delete_floating_ip(fip_id) self.delete_floating_ip(fip_id)
except Exception as e: except Exception as e:
self.log.error( self.log.error(
"FIP LEAK: Attempted to delete floating ip " "FIP LEAK: Attempted to delete floating ip "
"%(fip)s but received %(exc)s exception: " "%(fip)s but received %(exc)s exception: "
"%(err)s", {'fip': fip_id, 'exc': e.__class__, "%(err)s",
'err': str(e)}) {'fip': fip_id, 'exc': e.__class__, 'err': str(e)},
)
raise raise
if fip['port_id'] != port: if fip['port_id'] != port:
if server: if server:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Attempted to create FIP on port {port} for server" "Attempted to create FIP on port {port} for server"
" {server} but FIP has port {port_id}".format( " {server} but FIP has port {port_id}".format(
port=port, port_id=fip['port_id'], port=port,
server=server['id'])) port_id=fip['port_id'],
server=server['id'],
)
)
else: else:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Attempted to create FIP on port {port}" "Attempted to create FIP on port {port}"
" but something went wrong".format(port=port)) " but something went wrong".format(port=port)
)
return fip return fip
def _nova_create_floating_ip(self, pool=None): def _nova_create_floating_ip(self, pool=None):
with _utils.shade_exceptions( with _utils.shade_exceptions(
"Unable to create floating IP in pool {pool}".format( "Unable to create floating IP in pool {pool}".format(pool=pool)
pool=pool)): ):
if pool is None: if pool is None:
pools = self.list_floating_ip_pools() pools = self.list_floating_ip_pools()
if not pools: if not pools:
raise exc.OpenStackCloudResourceNotFound( raise exc.OpenStackCloudResourceNotFound(
"unable to find a floating ip pool") "unable to find a floating ip pool"
)
pool = pools[0]['name'] pool = pools[0]['name']
data = proxy._json_response(self.compute.post( data = proxy._json_response(
'/os-floating-ips', json=dict(pool=pool))) self.compute.post('/os-floating-ips', json=dict(pool=pool))
)
pool_ip = self._get_and_munchify('floating_ip', data) pool_ip = self._get_and_munchify('floating_ip', data)
# TODO(mordred) Remove this - it's just for compat # TODO(mordred) Remove this - it's just for compat
data = proxy._json_response( data = proxy._json_response(
self.compute.get('/os-floating-ips/{id}'.format( self.compute.get(
id=pool_ip['id']))) '/os-floating-ips/{id}'.format(id=pool_ip['id'])
)
)
return self._get_and_munchify('floating_ip', data) return self._get_and_munchify('floating_ip', data)
def delete_floating_ip(self, floating_ip_id, retry=1): def delete_floating_ip(self, floating_ip_id, retry=1):
@ -589,8 +636,11 @@ class FloatingIPCloudMixin:
" {retry} times. Although the cloud did not indicate any errors" " {retry} times. Although the cloud did not indicate any errors"
" the floating ip is still in existence. Aborting further" " the floating ip is still in existence. Aborting further"
" operations.".format( " operations.".format(
id=floating_ip_id, ip=f_ip['floating_ip_address'], id=floating_ip_id,
retry=retry + 1)) ip=f_ip['floating_ip_address'],
retry=retry + 1,
)
)
def _delete_floating_ip(self, floating_ip_id): def _delete_floating_ip(self, floating_ip_id):
if self._use_neutron_floating(): if self._use_neutron_floating():
@ -599,14 +649,14 @@ class FloatingIPCloudMixin:
except exc.OpenStackCloudURINotFound as e: except exc.OpenStackCloudURINotFound as e:
self.log.debug( self.log.debug(
"Something went wrong talking to neutron API: " "Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)}) "'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
return self._nova_delete_floating_ip(floating_ip_id) return self._nova_delete_floating_ip(floating_ip_id)
def _neutron_delete_floating_ip(self, floating_ip_id): def _neutron_delete_floating_ip(self, floating_ip_id):
try: try:
self.network.delete_ip( self.network.delete_ip(floating_ip_id, ignore_missing=False)
floating_ip_id, ignore_missing=False
)
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
return False return False
return True return True
@ -615,9 +665,12 @@ class FloatingIPCloudMixin:
try: try:
proxy._json_response( proxy._json_response(
self.compute.delete( self.compute.delete(
'/os-floating-ips/{id}'.format(id=floating_ip_id)), '/os-floating-ips/{id}'.format(id=floating_ip_id)
),
error_message='Unable to delete floating IP {fip_id}'.format( error_message='Unable to delete floating IP {fip_id}'.format(
fip_id=floating_ip_id)) fip_id=floating_ip_id
),
)
except exc.OpenStackCloudURINotFound: except exc.OpenStackCloudURINotFound:
return False return False
return True return True
@ -648,14 +701,23 @@ class FloatingIPCloudMixin:
if self._use_neutron_floating(): if self._use_neutron_floating():
for ip in self.list_floating_ips(): for ip in self.list_floating_ips():
if not bool(ip.port_id): if not bool(ip.port_id):
processed.append(self.delete_floating_ip( processed.append(
floating_ip_id=ip['id'], retry=retry)) self.delete_floating_ip(
floating_ip_id=ip['id'], retry=retry
)
)
return len(processed) if all(processed) else False return len(processed) if all(processed) else False
def _attach_ip_to_server( def _attach_ip_to_server(
self, server, floating_ip, self,
fixed_address=None, wait=False, server,
timeout=60, skip_attach=False, nat_destination=None): floating_ip,
fixed_address=None,
wait=False,
timeout=60,
skip_attach=False,
nat_destination=None,
):
"""Attach a floating IP to a server. """Attach a floating IP to a server.
:param server: Server dict :param server: Server dict
@ -685,8 +747,9 @@ class FloatingIPCloudMixin:
# the server data and try again. There are some clouds, which # the server data and try again. There are some clouds, which
# explicitely forbids FIP assign call if it is already assigned. # explicitely forbids FIP assign call if it is already assigned.
server = self.get_server_by_id(server['id']) server = self.get_server_by_id(server['id'])
ext_ip = meta.get_server_ip(server, ext_tag='floating', ext_ip = meta.get_server_ip(
public=True) server, ext_tag='floating', public=True
)
if ext_ip == floating_ip['floating_ip_address']: if ext_ip == floating_ip['floating_ip_address']:
return server return server
@ -694,74 +757,84 @@ class FloatingIPCloudMixin:
if not skip_attach: if not skip_attach:
try: try:
self._neutron_attach_ip_to_server( self._neutron_attach_ip_to_server(
server=server, floating_ip=floating_ip, server=server,
floating_ip=floating_ip,
fixed_address=fixed_address, fixed_address=fixed_address,
nat_destination=nat_destination) nat_destination=nat_destination,
)
except exc.OpenStackCloudURINotFound as e: except exc.OpenStackCloudURINotFound as e:
self.log.debug( self.log.debug(
"Something went wrong talking to neutron API: " "Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)}) "'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova # Fall-through, trying with Nova
else: else:
# Nova network # Nova network
self._nova_attach_ip_to_server( self._nova_attach_ip_to_server(
server_id=server['id'], floating_ip_id=floating_ip['id'], server_id=server['id'],
fixed_address=fixed_address) floating_ip_id=floating_ip['id'],
fixed_address=fixed_address,
)
if wait: if wait:
# Wait for the address to be assigned to the server # Wait for the address to be assigned to the server
server_id = server['id'] server_id = server['id']
for _ in utils.iterate_timeout( for _ in utils.iterate_timeout(
timeout, timeout,
"Timeout waiting for the floating IP to be attached.", "Timeout waiting for the floating IP to be attached.",
wait=self._SERVER_AGE): wait=self._SERVER_AGE,
):
server = self.get_server_by_id(server_id) server = self.get_server_by_id(server_id)
ext_ip = meta.get_server_ip( ext_ip = meta.get_server_ip(
server, ext_tag='floating', public=True) server, ext_tag='floating', public=True
)
if ext_ip == floating_ip['floating_ip_address']: if ext_ip == floating_ip['floating_ip_address']:
return server return server
return server return server
def _neutron_attach_ip_to_server( def _neutron_attach_ip_to_server(
self, server, floating_ip, fixed_address=None, self, server, floating_ip, fixed_address=None, nat_destination=None
nat_destination=None): ):
# Find an available port # Find an available port
(port, fixed_address) = self._nat_destination_port( (port, fixed_address) = self._nat_destination_port(
server, fixed_address=fixed_address, server,
nat_destination=nat_destination) fixed_address=fixed_address,
nat_destination=nat_destination,
)
if not port: if not port:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"unable to find a port for server {0}".format( "unable to find a port for server {0}".format(server['id'])
server['id'])) )
floating_ip_args = {'port_id': port['id']} floating_ip_args = {'port_id': port['id']}
if fixed_address is not None: if fixed_address is not None:
floating_ip_args['fixed_ip_address'] = fixed_address floating_ip_args['fixed_ip_address'] = fixed_address
return self.network.update_ip( return self.network.update_ip(floating_ip, **floating_ip_args)
floating_ip,
**floating_ip_args)
def _nova_attach_ip_to_server(self, server_id, floating_ip_id, def _nova_attach_ip_to_server(
fixed_address=None): self, server_id, floating_ip_id, fixed_address=None
f_ip = self.get_floating_ip( ):
id=floating_ip_id) f_ip = self.get_floating_ip(id=floating_ip_id)
if f_ip is None: if f_ip is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"unable to find floating IP {0}".format(floating_ip_id)) "unable to find floating IP {0}".format(floating_ip_id)
)
error_message = "Error attaching IP {ip} to instance {id}".format( error_message = "Error attaching IP {ip} to instance {id}".format(
ip=floating_ip_id, id=server_id) ip=floating_ip_id, id=server_id
body = { )
'address': f_ip['floating_ip_address'] body = {'address': f_ip['floating_ip_address']}
}
if fixed_address: if fixed_address:
body['fixed_address'] = fixed_address body['fixed_address'] = fixed_address
return proxy._json_response( return proxy._json_response(
self.compute.post( self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id), '/servers/{server_id}/action'.format(server_id=server_id),
json=dict(addFloatingIp=body)), json=dict(addFloatingIp=body),
error_message=error_message) ),
error_message=error_message,
)
def detach_ip_from_server(self, server_id, floating_ip_id): def detach_ip_from_server(self, server_id, floating_ip_id):
"""Detach a floating IP from a server. """Detach a floating IP from a server.
@ -777,31 +850,36 @@ class FloatingIPCloudMixin:
if self._use_neutron_floating(): if self._use_neutron_floating():
try: try:
return self._neutron_detach_ip_from_server( return self._neutron_detach_ip_from_server(
server_id=server_id, floating_ip_id=floating_ip_id) server_id=server_id, floating_ip_id=floating_ip_id
)
except exc.OpenStackCloudURINotFound as e: except exc.OpenStackCloudURINotFound as e:
self.log.debug( self.log.debug(
"Something went wrong talking to neutron API: " "Something went wrong talking to neutron API: "
"'%(msg)s'. Trying with Nova.", {'msg': str(e)}) "'%(msg)s'. Trying with Nova.",
{'msg': str(e)},
)
# Fall-through, trying with Nova # Fall-through, trying with Nova
# Nova network # Nova network
self._nova_detach_ip_from_server( self._nova_detach_ip_from_server(
server_id=server_id, floating_ip_id=floating_ip_id) server_id=server_id, floating_ip_id=floating_ip_id
)
def _neutron_detach_ip_from_server(self, server_id, floating_ip_id): def _neutron_detach_ip_from_server(self, server_id, floating_ip_id):
f_ip = self.get_floating_ip(id=floating_ip_id) f_ip = self.get_floating_ip(id=floating_ip_id)
if f_ip is None or not bool(f_ip.port_id): if f_ip is None or not bool(f_ip.port_id):
return False return False
try: try:
self.network.update_ip( self.network.update_ip(floating_ip_id, port_id=None)
floating_ip_id,
port_id=None
)
except exceptions.SDKException: except exceptions.SDKException:
raise exceptions.SDKException( raise exceptions.SDKException(
("Error detaching IP {ip} from " (
"server {server_id}".format( "Error detaching IP {ip} from "
ip=floating_ip_id, server_id=server_id))) "server {server_id}".format(
ip=floating_ip_id, server_id=server_id
)
)
)
return True return True
@ -810,21 +888,33 @@ class FloatingIPCloudMixin:
f_ip = self.get_floating_ip(id=floating_ip_id) f_ip = self.get_floating_ip(id=floating_ip_id)
if f_ip is None: if f_ip is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"unable to find floating IP {0}".format(floating_ip_id)) "unable to find floating IP {0}".format(floating_ip_id)
)
error_message = "Error detaching IP {ip} from instance {id}".format( error_message = "Error detaching IP {ip} from instance {id}".format(
ip=floating_ip_id, id=server_id) ip=floating_ip_id, id=server_id
)
return proxy._json_response( return proxy._json_response(
self.compute.post( self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id), '/servers/{server_id}/action'.format(server_id=server_id),
json=dict(removeFloatingIp=dict( json=dict(
address=f_ip['floating_ip_address']))), removeFloatingIp=dict(address=f_ip['floating_ip_address'])
error_message=error_message) ),
),
error_message=error_message,
)
return True return True
def _add_ip_from_pool( def _add_ip_from_pool(
self, server, network, fixed_address=None, reuse=True, self,
wait=False, timeout=60, nat_destination=None): server,
network,
fixed_address=None,
reuse=True,
wait=False,
timeout=60,
nat_destination=None,
):
"""Add a floating IP to a server from a given pool """Add a floating IP to a server from a given pool
This method reuses available IPs, when possible, or allocate new IPs This method reuses available IPs, when possible, or allocate new IPs
@ -851,9 +941,12 @@ class FloatingIPCloudMixin:
start_time = time.time() start_time = time.time()
f_ip = self.create_floating_ip( f_ip = self.create_floating_ip(
server=server, server=server,
network=network, nat_destination=nat_destination, network=network,
nat_destination=nat_destination,
fixed_address=fixed_address, fixed_address=fixed_address,
wait=wait, timeout=timeout) wait=wait,
timeout=timeout,
)
timeout = timeout - (time.time() - start_time) timeout = timeout - (time.time() - start_time)
# Wait for cache invalidation time so that we don't try # Wait for cache invalidation time so that we don't try
# to attach the FIP a second time below # to attach the FIP a second time below
@ -866,12 +959,23 @@ class FloatingIPCloudMixin:
# the attach function below to get back the server dict refreshed # the attach function below to get back the server dict refreshed
# with the FIP information. # with the FIP information.
return self._attach_ip_to_server( return self._attach_ip_to_server(
server=server, floating_ip=f_ip, fixed_address=fixed_address, server=server,
wait=wait, timeout=timeout, nat_destination=nat_destination) floating_ip=f_ip,
fixed_address=fixed_address,
wait=wait,
timeout=timeout,
nat_destination=nat_destination,
)
def add_ip_list( def add_ip_list(
self, server, ips, wait=False, timeout=60, self,
fixed_address=None, nat_destination=None): server,
ips,
wait=False,
timeout=60,
fixed_address=None,
nat_destination=None,
):
"""Attach a list of IPs to a server. """Attach a list of IPs to a server.
:param server: a server object :param server: a server object
@ -896,10 +1000,16 @@ class FloatingIPCloudMixin:
for ip in ips: for ip in ips:
f_ip = self.get_floating_ip( f_ip = self.get_floating_ip(
id=None, filters={'floating_ip_address': ip}) id=None, filters={'floating_ip_address': ip}
)
server = self._attach_ip_to_server( server = self._attach_ip_to_server(
server=server, floating_ip=f_ip, wait=wait, timeout=timeout, server=server,
fixed_address=fixed_address, nat_destination=nat_destination) floating_ip=f_ip,
wait=wait,
timeout=timeout,
fixed_address=fixed_address,
nat_destination=nat_destination,
)
return server return server
def add_auto_ip(self, server, wait=False, timeout=60, reuse=True): def add_auto_ip(self, server, wait=False, timeout=60, reuse=True):
@ -925,7 +1035,8 @@ class FloatingIPCloudMixin:
""" """
server = self._add_auto_ip( server = self._add_auto_ip(
server, wait=wait, timeout=timeout, reuse=reuse) server, wait=wait, timeout=timeout, reuse=reuse
)
return server['interface_ip'] or None return server['interface_ip'] or None
def _add_auto_ip(self, server, wait=False, timeout=60, reuse=True): def _add_auto_ip(self, server, wait=False, timeout=60, reuse=True):
@ -936,7 +1047,8 @@ class FloatingIPCloudMixin:
else: else:
start_time = time.time() start_time = time.time()
f_ip = self.create_floating_ip( f_ip = self.create_floating_ip(
server=server, wait=wait, timeout=timeout) server=server, wait=wait, timeout=timeout
)
timeout = timeout - (time.time() - start_time) timeout = timeout - (time.time() - start_time)
if server: if server:
# This gets passed in for both nova and neutron # This gets passed in for both nova and neutron
@ -951,8 +1063,12 @@ class FloatingIPCloudMixin:
# the attach function below to get back the server dict refreshed # the attach function below to get back the server dict refreshed
# with the FIP information. # with the FIP information.
return self._attach_ip_to_server( return self._attach_ip_to_server(
server=server, floating_ip=f_ip, wait=wait, timeout=timeout, server=server,
skip_attach=skip_attach) floating_ip=f_ip,
wait=wait,
timeout=timeout,
skip_attach=skip_attach,
)
except exc.OpenStackCloudTimeout: except exc.OpenStackCloudTimeout:
if self._use_neutron_floating() and created: if self._use_neutron_floating() and created:
# We are here because we created an IP on the port # We are here because we created an IP on the port
@ -962,36 +1078,60 @@ class FloatingIPCloudMixin:
"Timeout waiting for floating IP to become" "Timeout waiting for floating IP to become"
" active. Floating IP %(ip)s:%(id)s was created for" " active. Floating IP %(ip)s:%(id)s was created for"
" server %(server)s but is being deleted due to" " server %(server)s but is being deleted due to"
" activation failure.", { " activation failure.",
{
'ip': f_ip['floating_ip_address'], 'ip': f_ip['floating_ip_address'],
'id': f_ip['id'], 'id': f_ip['id'],
'server': server['id']}) 'server': server['id'],
},
)
try: try:
self.delete_floating_ip(f_ip['id']) self.delete_floating_ip(f_ip['id'])
except Exception as e: except Exception as e:
self.log.error( self.log.error(
"FIP LEAK: Attempted to delete floating ip " "FIP LEAK: Attempted to delete floating ip "
"%(fip)s but received %(exc)s exception: %(err)s", "%(fip)s but received %(exc)s exception: %(err)s",
{'fip': f_ip['id'], 'exc': e.__class__, 'err': str(e)}) {'fip': f_ip['id'], 'exc': e.__class__, 'err': str(e)},
)
raise e raise e
raise raise
def add_ips_to_server( def add_ips_to_server(
self, server, auto_ip=True, ips=None, ip_pool=None, self,
wait=False, timeout=60, reuse=True, fixed_address=None, server,
nat_destination=None): auto_ip=True,
ips=None,
ip_pool=None,
wait=False,
timeout=60,
reuse=True,
fixed_address=None,
nat_destination=None,
):
if ip_pool: if ip_pool:
server = self._add_ip_from_pool( server = self._add_ip_from_pool(
server, ip_pool, reuse=reuse, wait=wait, timeout=timeout, server,
fixed_address=fixed_address, nat_destination=nat_destination) ip_pool,
reuse=reuse,
wait=wait,
timeout=timeout,
fixed_address=fixed_address,
nat_destination=nat_destination,
)
elif ips: elif ips:
server = self.add_ip_list( server = self.add_ip_list(
server, ips, wait=wait, timeout=timeout, server,
fixed_address=fixed_address, nat_destination=nat_destination) ips,
wait=wait,
timeout=timeout,
fixed_address=fixed_address,
nat_destination=nat_destination,
)
elif auto_ip: elif auto_ip:
if self._needs_floating_ip(server, nat_destination): if self._needs_floating_ip(server, nat_destination):
server = self._add_auto_ip( server = self._add_auto_ip(
server, wait=wait, timeout=timeout, reuse=reuse) server, wait=wait, timeout=timeout, reuse=reuse
)
return server return server
def _needs_floating_ip(self, server, nat_destination): def _needs_floating_ip(self, server, nat_destination):
@ -1026,18 +1166,30 @@ class FloatingIPCloudMixin:
# meta.add_server_interfaces() was not called # meta.add_server_interfaces() was not called
server = self.compute.get_server(server) server = self.compute.get_server(server)
if server['public_v4'] \ if server['public_v4'] or any(
or any([any([address['OS-EXT-IPS:type'] == 'floating' [
for address in addresses]) any(
for addresses [
in (server['addresses'] or {}).values()]): address['OS-EXT-IPS:type'] == 'floating'
for address in addresses
]
)
for addresses in (server['addresses'] or {}).values()
]
):
return False return False
if not server['private_v4'] \ if not server['private_v4'] and not any(
and not any([any([address['OS-EXT-IPS:type'] == 'fixed' [
for address in addresses]) any(
for addresses [
in (server['addresses'] or {}).values()]): address['OS-EXT-IPS:type'] == 'fixed'
for address in addresses
]
)
for addresses in (server['addresses'] or {}).values()
]
):
return False return False
if self.private: if self.private:
@ -1053,7 +1205,8 @@ class FloatingIPCloudMixin:
return False return False
(port_obj, fixed_ip_address) = self._nat_destination_port( (port_obj, fixed_ip_address) = self._nat_destination_port(
server, nat_destination=nat_destination) server, nat_destination=nat_destination
)
if not port_obj or not fixed_ip_address: if not port_obj or not fixed_ip_address:
return False return False
@ -1061,7 +1214,8 @@ class FloatingIPCloudMixin:
return True return True
def _nat_destination_port( def _nat_destination_port(
self, server, fixed_address=None, nat_destination=None): self, server, fixed_address=None, nat_destination=None
):
"""Returns server port that is on a nat_destination network """Returns server port that is on a nat_destination network
Find a port attached to the server which is on a network which Find a port attached to the server which is on a network which
@ -1082,9 +1236,10 @@ class FloatingIPCloudMixin:
else: else:
timeout = None timeout = None
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
timeout, timeout,
"Timeout waiting for port to show up in list", "Timeout waiting for port to show up in list",
wait=self._PORT_AGE): wait=self._PORT_AGE,
):
try: try:
port_filter = {'device_id': server['id']} port_filter = {'device_id': server['id']}
ports = self.search_ports(filters=port_filter) ports = self.search_ports(filters=port_filter)
@ -1103,7 +1258,9 @@ class FloatingIPCloudMixin:
'NAT Destination {nat_destination} was configured' 'NAT Destination {nat_destination} was configured'
' but not found on the cloud. Please check your' ' but not found on the cloud. Please check your'
' config and your cloud and try again.'.format( ' config and your cloud and try again.'.format(
nat_destination=nat_destination)) nat_destination=nat_destination
)
)
else: else:
nat_network = self.get_nat_destination() nat_network = self.get_nat_destination()
@ -1118,7 +1275,8 @@ class FloatingIPCloudMixin:
' nat_destination property of the networks list in' ' nat_destination property of the networks list in'
' your clouds.yaml file. If you do not have a' ' your clouds.yaml file. If you do not have a'
' clouds.yaml file, please make one - your setup' ' clouds.yaml file, please make one - your setup'
' is complicated.'.format(server=server['id'])) ' is complicated.'.format(server=server['id'])
)
maybe_ports = [] maybe_ports = []
for maybe_port in ports: for maybe_port in ports:
@ -1129,7 +1287,9 @@ class FloatingIPCloudMixin:
'No port on server {server} was found matching' 'No port on server {server} was found matching'
' your NAT destination network {dest}. Please ' ' your NAT destination network {dest}. Please '
' check your config'.format( ' check your config'.format(
server=server['id'], dest=nat_network['name'])) server=server['id'], dest=nat_network['name']
)
)
ports = maybe_ports ports = maybe_ports
# Select the most recent available IPv4 address # Select the most recent available IPv4 address
@ -1139,9 +1299,8 @@ class FloatingIPCloudMixin:
# if there are more than one, will be the arbitrary port we # if there are more than one, will be the arbitrary port we
# select. # select.
for port in sorted( for port in sorted(
ports, ports, key=lambda p: p.get('created_at', 0), reverse=True
key=lambda p: p.get('created_at', 0), ):
reverse=True):
for address in port.get('fixed_ips', list()): for address in port.get('fixed_ips', list()):
try: try:
ip = ipaddress.ip_address(address['ip_address']) ip = ipaddress.ip_address(address['ip_address'])
@ -1152,7 +1311,8 @@ class FloatingIPCloudMixin:
return port, fixed_address return port, fixed_address
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"unable to find a free fixed IPv4 address for server " "unable to find a free fixed IPv4 address for server "
"{0}".format(server['id'])) "{0}".format(server['id'])
)
# unfortunately a port can have more than one fixed IP: # unfortunately a port can have more than one fixed IP:
# we can't use the search_ports filtering for fixed_address as # we can't use the search_ports filtering for fixed_address as
# they are contained in a list. e.g. # they are contained in a list. e.g.
@ -1178,8 +1338,10 @@ class FloatingIPCloudMixin:
return self._floating_ip_source in ('nova', 'neutron') return self._floating_ip_source in ('nova', 'neutron')
def _use_neutron_floating(self): def _use_neutron_floating(self):
return (self.has_service('network') return (
and self._floating_ip_source == 'neutron') self.has_service('network')
and self._floating_ip_source == 'neutron'
)
def _normalize_floating_ips(self, ips): def _normalize_floating_ips(self, ips):
"""Normalize the structure of floating IPs """Normalize the structure of floating IPs
@ -1210,16 +1372,13 @@ class FloatingIPCloudMixin:
] ]
""" """
return [ return [self._normalize_floating_ip(ip) for ip in ips]
self._normalize_floating_ip(ip) for ip in ips
]
def _normalize_floating_ip(self, ip): def _normalize_floating_ip(self, ip):
# Copy incoming floating ip because of shared dicts in unittests # Copy incoming floating ip because of shared dicts in unittests
# Only import munch when we really need it # Only import munch when we really need it
location = self._get_current_location( location = self._get_current_location(project_id=ip.get('owner'))
project_id=ip.get('owner'))
# This copy is to keep things from getting epically weird in tests # This copy is to keep things from getting epically weird in tests
ip = ip.copy() ip = ip.copy()
@ -1228,7 +1387,8 @@ class FloatingIPCloudMixin:
fixed_ip_address = ip.pop('fixed_ip_address', ip.pop('fixed_ip', None)) fixed_ip_address = ip.pop('fixed_ip_address', ip.pop('fixed_ip', None))
floating_ip_address = ip.pop('floating_ip_address', ip.pop('ip', None)) floating_ip_address = ip.pop('floating_ip_address', ip.pop('ip', None))
network_id = ip.pop( network_id = ip.pop(
'floating_network_id', ip.pop('network', ip.pop('pool', None))) 'floating_network_id', ip.pop('network', ip.pop('pool', None))
)
project_id = ip.pop('tenant_id', '') project_id = ip.pop('tenant_id', '')
project_id = ip.pop('project_id', project_id) project_id = ip.pop('project_id', project_id)

View File

@ -28,7 +28,8 @@ class IdentityCloudMixin:
def _identity_client(self): def _identity_client(self):
if 'identity' not in self._raw_clients: if 'identity' not in self._raw_clients:
self._raw_clients['identity'] = self._get_versioned_client( self._raw_clients['identity'] = self._get_versioned_client(
'identity', min_version=2, max_version='3.latest') 'identity', min_version=2, max_version='3.latest'
)
return self._raw_clients['identity'] return self._raw_clients['identity']
@_utils.cache_on_arguments() @_utils.cache_on_arguments()
@ -129,8 +130,9 @@ class IdentityCloudMixin:
:raises: ``OpenStackCloudException`` if something goes wrong during :raises: ``OpenStackCloudException`` if something goes wrong during
the OpenStack API call. the OpenStack API call.
""" """
return _utils._get_entity(self, 'project', name_or_id, filters, return _utils._get_entity(
domain_id=domain_id) self, 'project', name_or_id, filters, domain_id=domain_id
)
def update_project( def update_project(
self, self,
@ -178,7 +180,7 @@ class IdentityCloudMixin:
name=name, name=name,
description=description, description=description,
domain_id=domain_id, domain_id=domain_id,
is_enabled=enabled is_enabled=enabled,
) )
if kwargs: if kwargs:
attrs.update(kwargs) attrs.update(kwargs)
@ -195,19 +197,19 @@ class IdentityCloudMixin:
""" """
try: try:
project = self.identity.find_project( project = self.identity.find_project(
name_or_id=name_or_id, name_or_id=name_or_id, ignore_missing=True, domain_id=domain_id
ignore_missing=True,
domain_id=domain_id
) )
if not project: if not project:
self.log.debug( self.log.debug("Project %s not found for deleting", name_or_id)
"Project %s not found for deleting", name_or_id)
return False return False
self.identity.delete_project(project) self.identity.delete_project(project)
return True return True
except exceptions.SDKException: except exceptions.SDKException:
self.log.exception("Error in deleting project {project}".format( self.log.exception(
project=name_or_id)) "Error in deleting project {project}".format(
project=name_or_id
)
)
return False return False
@_utils.valid_kwargs('domain_id', 'name') @_utils.valid_kwargs('domain_id', 'name')
@ -299,8 +301,15 @@ class IdentityCloudMixin:
""" """
return self.identity.get_user(user_id) return self.identity.get_user(user_id)
@_utils.valid_kwargs('name', 'email', 'enabled', 'domain_id', 'password', @_utils.valid_kwargs(
'description', 'default_project') 'name',
'email',
'enabled',
'domain_id',
'password',
'description',
'default_project',
)
def update_user(self, name_or_id, **kwargs): def update_user(self, name_or_id, **kwargs):
self.list_users.invalidate(self) self.list_users.invalidate(self)
user_kwargs = {} user_kwargs = {}
@ -351,7 +360,8 @@ class IdentityCloudMixin:
user = self.get_user(name_or_id, **kwargs) user = self.get_user(name_or_id, **kwargs)
if not user: if not user:
self.log.debug( self.log.debug(
"User {0} not found for deleting".format(name_or_id)) "User {0} not found for deleting".format(name_or_id)
)
return False return False
self.identity.delete_user(user) self.identity.delete_user(user)
@ -359,21 +369,23 @@ class IdentityCloudMixin:
return True return True
except exceptions.SDKException: except exceptions.SDKException:
self.log.exception("Error in deleting user {user}".format( self.log.exception(
user=name_or_id "Error in deleting user {user}".format(user=name_or_id)
)) )
return False return False
def _get_user_and_group(self, user_name_or_id, group_name_or_id): def _get_user_and_group(self, user_name_or_id, group_name_or_id):
user = self.get_user(user_name_or_id) user = self.get_user(user_name_or_id)
if not user: if not user:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'User {user} not found'.format(user=user_name_or_id)) 'User {user} not found'.format(user=user_name_or_id)
)
group = self.get_group(group_name_or_id) group = self.get_group(group_name_or_id)
if not group: if not group:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Group {user} not found'.format(user=group_name_or_id)) 'Group {user} not found'.format(user=group_name_or_id)
)
return (user, group) return (user, group)
@ -438,8 +450,9 @@ class IdentityCloudMixin:
return self.identity.create_service(**kwargs) return self.identity.create_service(**kwargs)
@_utils.valid_kwargs('name', 'enabled', 'type', 'service_type', @_utils.valid_kwargs(
'description') 'name', 'enabled', 'type', 'service_type', 'description'
)
def update_service(self, name_or_id, **kwargs): def update_service(self, name_or_id, **kwargs):
# NOTE(SamYaple): Keystone v3 only accepts 'type' but shade accepts # NOTE(SamYaple): Keystone v3 only accepts 'type' but shade accepts
@ -519,7 +532,8 @@ class IdentityCloudMixin:
return True return True
except exceptions.SDKException: except exceptions.SDKException:
self.log.exception( self.log.exception(
'Failed to delete service {id}'.format(id=service['id'])) 'Failed to delete service {id}'.format(id=service['id'])
)
return False return False
@_utils.valid_kwargs('public_url', 'internal_url', 'admin_url') @_utils.valid_kwargs('public_url', 'internal_url', 'admin_url')
@ -560,31 +574,42 @@ class IdentityCloudMixin:
if service is None: if service is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"service {service} not found".format( "service {service} not found".format(
service=service_name_or_id)) service=service_name_or_id
)
)
endpoints_args = [] endpoints_args = []
if url: if url:
# v3 in use, v3-like arguments, one endpoint created # v3 in use, v3-like arguments, one endpoint created
endpoints_args.append( endpoints_args.append(
{'url': url, 'interface': interface, {
'service_id': service['id'], 'enabled': enabled, 'url': url,
'region_id': region}) 'interface': interface,
'service_id': service['id'],
'enabled': enabled,
'region_id': region,
}
)
else: else:
# v3 in use, v2.0-like arguments, one endpoint created for each # v3 in use, v2.0-like arguments, one endpoint created for each
# interface url provided # interface url provided
endpoint_args = {'region_id': region, 'enabled': enabled, endpoint_args = {
'service_id': service['id']} 'region_id': region,
'enabled': enabled,
'service_id': service['id'],
}
if public_url: if public_url:
endpoint_args.update({'url': public_url, endpoint_args.update(
'interface': 'public'}) {'url': public_url, 'interface': 'public'}
)
endpoints_args.append(endpoint_args.copy()) endpoints_args.append(endpoint_args.copy())
if internal_url: if internal_url:
endpoint_args.update({'url': internal_url, endpoint_args.update(
'interface': 'internal'}) {'url': internal_url, 'interface': 'internal'}
)
endpoints_args.append(endpoint_args.copy()) endpoints_args.append(endpoint_args.copy())
if admin_url: if admin_url:
endpoint_args.update({'url': admin_url, endpoint_args.update({'url': admin_url, 'interface': 'admin'})
'interface': 'admin'})
endpoints_args.append(endpoint_args.copy()) endpoints_args.append(endpoint_args.copy())
endpoints = [] endpoints = []
@ -592,8 +617,9 @@ class IdentityCloudMixin:
endpoints.append(self.identity.create_endpoint(**args)) endpoints.append(self.identity.create_endpoint(**args))
return endpoints return endpoints
@_utils.valid_kwargs('enabled', 'service_name_or_id', 'url', 'interface', @_utils.valid_kwargs(
'region') 'enabled', 'service_name_or_id', 'url', 'interface', 'region'
)
def update_endpoint(self, endpoint_id, **kwargs): def update_endpoint(self, endpoint_id, **kwargs):
service_name_or_id = kwargs.pop('service_name_or_id', None) service_name_or_id = kwargs.pop('service_name_or_id', None)
if service_name_or_id is not None: if service_name_or_id is not None:
@ -670,8 +696,7 @@ class IdentityCloudMixin:
self.identity.delete_endpoint(id) self.identity.delete_endpoint(id)
return True return True
except exceptions.SDKException: except exceptions.SDKException:
self.log.exception( self.log.exception("Failed to delete endpoint {id}".format(id=id))
"Failed to delete endpoint {id}".format(id=id))
return False return False
def create_domain(self, name, description=None, enabled=True): def create_domain(self, name, description=None, enabled=True):
@ -746,7 +771,8 @@ class IdentityCloudMixin:
dom = self.get_domain(name_or_id=name_or_id) dom = self.get_domain(name_or_id=name_or_id)
if dom is None: if dom is None:
self.log.debug( self.log.debug(
"Domain %s not found for deleting", name_or_id) "Domain %s not found for deleting", name_or_id
)
return False return False
domain_id = dom['id'] domain_id = dom['id']
@ -963,8 +989,7 @@ class IdentityCloudMixin:
try: try:
group = self.identity.find_group(name_or_id) group = self.identity.find_group(name_or_id)
if group is None: if group is None:
self.log.debug( self.log.debug("Group %s not found for deleting", name_or_id)
"Group %s not found for deleting", name_or_id)
return False return False
self.identity.delete_group(group) self.identity.delete_group(group)
@ -974,7 +999,8 @@ class IdentityCloudMixin:
except exceptions.SDKException: except exceptions.SDKException:
self.log.exception( self.log.exception(
"Unable to delete group {name}".format(name=name_or_id)) "Unable to delete group {name}".format(name=name_or_id)
)
return False return False
def list_roles(self, **kwargs): def list_roles(self, **kwargs):
@ -1051,8 +1077,9 @@ class IdentityCloudMixin:
filters['scope.' + k + '.id'] = filters[k] filters['scope.' + k + '.id'] = filters[k]
del filters[k] del filters[k]
if 'os_inherit_extension_inherited_to' in filters: if 'os_inherit_extension_inherited_to' in filters:
filters['scope.OS-INHERIT:inherited_to'] = ( filters['scope.OS-INHERIT:inherited_to'] = filters[
filters['os_inherit_extension_inherited_to']) 'os_inherit_extension_inherited_to'
]
del filters['os_inherit_extension_inherited_to'] del filters['os_inherit_extension_inherited_to']
return list(self.identity.role_assignments(**filters)) return list(self.identity.role_assignments(**filters))
@ -1138,8 +1165,7 @@ class IdentityCloudMixin:
""" """
role = self.get_role(name_or_id, **kwargs) role = self.get_role(name_or_id, **kwargs)
if role is None: if role is None:
self.log.debug( self.log.debug("Role %s not found for updating", name_or_id)
"Role %s not found for updating", name_or_id)
return False return False
return self.identity.update_role(role, name=name, **kwargs) return self.identity.update_role(role, name=name, **kwargs)
@ -1156,8 +1182,7 @@ class IdentityCloudMixin:
""" """
role = self.get_role(name_or_id, **kwargs) role = self.get_role(name_or_id, **kwargs)
if role is None: if role is None:
self.log.debug( self.log.debug("Role %s not found for deleting", name_or_id)
"Role %s not found for deleting", name_or_id)
return False return False
try: try:
@ -1165,17 +1190,25 @@ class IdentityCloudMixin:
return True return True
except exceptions.SDKExceptions: except exceptions.SDKExceptions:
self.log.exception( self.log.exception(
"Unable to delete role {name}".format( "Unable to delete role {name}".format(name=name_or_id)
name=name_or_id)) )
raise raise
def _get_grant_revoke_params(self, role, user=None, group=None, def _get_grant_revoke_params(
project=None, domain=None, system=None): self,
role,
user=None,
group=None,
project=None,
domain=None,
system=None,
):
data = {} data = {}
search_args = {} search_args = {}
if domain: if domain:
data['domain'] = self.identity.find_domain( data['domain'] = self.identity.find_domain(
domain, ignore_missing=False) domain, ignore_missing=False
)
# We have domain. We should use it for further searching user, # We have domain. We should use it for further searching user,
# group, role, project # group, role, project
search_args['domain_id'] = data['domain'].id search_args['domain_id'] = data['domain'].id
@ -1183,33 +1216,47 @@ class IdentityCloudMixin:
data['role'] = self.identity.find_role(name_or_id=role) data['role'] = self.identity.find_role(name_or_id=role)
if not data['role']: if not data['role']:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Role {0} not found.'.format(role)) 'Role {0} not found.'.format(role)
)
if user: if user:
# use cloud.get_user to save us from bad searching by name # use cloud.get_user to save us from bad searching by name
data['user'] = self.get_user(user, filters=search_args) data['user'] = self.get_user(user, filters=search_args)
if group: if group:
data['group'] = self.identity.find_group( data['group'] = self.identity.find_group(
group, ignore_missing=False, **search_args) group, ignore_missing=False, **search_args
)
if data.get('user') and data.get('group'): if data.get('user') and data.get('group'):
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Specify either a group or a user, not both') 'Specify either a group or a user, not both'
)
if data.get('user') is None and data.get('group') is None: if data.get('user') is None and data.get('group') is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Must specify either a user or a group') 'Must specify either a user or a group'
)
if project is None and domain is None and system is None: if project is None and domain is None and system is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Must specify either a domain, project or system') 'Must specify either a domain, project or system'
)
if project: if project:
data['project'] = self.identity.find_project( data['project'] = self.identity.find_project(
project, ignore_missing=False, **search_args) project, ignore_missing=False, **search_args
)
return data return data
def grant_role(self, name_or_id, user=None, group=None, def grant_role(
project=None, domain=None, system=None, wait=False, self,
timeout=60): name_or_id,
user=None,
group=None,
project=None,
domain=None,
system=None,
wait=False,
timeout=60,
):
"""Grant a role to a user. """Grant a role to a user.
:param string name_or_id: Name or unique ID of the role. :param string name_or_id: Name or unique ID of the role.
@ -1236,8 +1283,13 @@ class IdentityCloudMixin:
:raise OpenStackCloudException: if the role cannot be granted :raise OpenStackCloudException: if the role cannot be granted
""" """
data = self._get_grant_revoke_params( data = self._get_grant_revoke_params(
name_or_id, user=user, group=group, name_or_id,
project=project, domain=domain, system=system) user=user,
group=group,
project=project,
domain=domain,
system=system,
)
user = data.get('user') user = data.get('user')
group = data.get('group') group = data.get('group')
@ -1249,63 +1301,73 @@ class IdentityCloudMixin:
# Proceed with project - precedence over domain and system # Proceed with project - precedence over domain and system
if user: if user:
has_role = self.identity.validate_user_has_project_role( has_role = self.identity.validate_user_has_project_role(
project, user, role) project, user, role
)
if has_role: if has_role:
self.log.debug('Assignment already exists') self.log.debug('Assignment already exists')
return False return False
self.identity.assign_project_role_to_user( self.identity.assign_project_role_to_user(project, user, role)
project, user, role)
else: else:
has_role = self.identity.validate_group_has_project_role( has_role = self.identity.validate_group_has_project_role(
project, group, role) project, group, role
)
if has_role: if has_role:
self.log.debug('Assignment already exists') self.log.debug('Assignment already exists')
return False return False
self.identity.assign_project_role_to_group( self.identity.assign_project_role_to_group(
project, group, role) project, group, role
)
elif domain: elif domain:
# Proceed with domain - precedence over system # Proceed with domain - precedence over system
if user: if user:
has_role = self.identity.validate_user_has_domain_role( has_role = self.identity.validate_user_has_domain_role(
domain, user, role) domain, user, role
)
if has_role: if has_role:
self.log.debug('Assignment already exists') self.log.debug('Assignment already exists')
return False return False
self.identity.assign_domain_role_to_user( self.identity.assign_domain_role_to_user(domain, user, role)
domain, user, role)
else: else:
has_role = self.identity.validate_group_has_domain_role( has_role = self.identity.validate_group_has_domain_role(
domain, group, role) domain, group, role
)
if has_role: if has_role:
self.log.debug('Assignment already exists') self.log.debug('Assignment already exists')
return False return False
self.identity.assign_domain_role_to_group( self.identity.assign_domain_role_to_group(domain, group, role)
domain, group, role)
else: else:
# Proceed with system # Proceed with system
# System name must be 'all' due to checks performed in # System name must be 'all' due to checks performed in
# _get_grant_revoke_params # _get_grant_revoke_params
if user: if user:
has_role = self.identity.validate_user_has_system_role( has_role = self.identity.validate_user_has_system_role(
user, role, system) user, role, system
)
if has_role: if has_role:
self.log.debug('Assignment already exists') self.log.debug('Assignment already exists')
return False return False
self.identity.assign_system_role_to_user( self.identity.assign_system_role_to_user(user, role, system)
user, role, system)
else: else:
has_role = self.identity.validate_group_has_system_role( has_role = self.identity.validate_group_has_system_role(
group, role, system) group, role, system
)
if has_role: if has_role:
self.log.debug('Assignment already exists') self.log.debug('Assignment already exists')
return False return False
self.identity.assign_system_role_to_group( self.identity.assign_system_role_to_group(group, role, system)
group, role, system)
return True return True
def revoke_role(self, name_or_id, user=None, group=None, def revoke_role(
project=None, domain=None, system=None, self,
wait=False, timeout=60): name_or_id,
user=None,
group=None,
project=None,
domain=None,
system=None,
wait=False,
timeout=60,
):
"""Revoke a role from a user. """Revoke a role from a user.
:param string name_or_id: Name or unique ID of the role. :param string name_or_id: Name or unique ID of the role.
@ -1329,8 +1391,13 @@ class IdentityCloudMixin:
:raise OpenStackCloudException: if the role cannot be removed :raise OpenStackCloudException: if the role cannot be removed
""" """
data = self._get_grant_revoke_params( data = self._get_grant_revoke_params(
name_or_id, user=user, group=group, name_or_id,
project=project, domain=domain, system=system) user=user,
group=group,
project=project,
domain=domain,
system=system,
)
user = data.get('user') user = data.get('user')
group = data.get('group') group = data.get('group')
@ -1342,58 +1409,70 @@ class IdentityCloudMixin:
# Proceed with project - precedence over domain and system # Proceed with project - precedence over domain and system
if user: if user:
has_role = self.identity.validate_user_has_project_role( has_role = self.identity.validate_user_has_project_role(
project, user, role) project, user, role
)
if not has_role: if not has_role:
self.log.debug('Assignment does not exists') self.log.debug('Assignment does not exists')
return False return False
self.identity.unassign_project_role_from_user( self.identity.unassign_project_role_from_user(
project, user, role) project, user, role
)
else: else:
has_role = self.identity.validate_group_has_project_role( has_role = self.identity.validate_group_has_project_role(
project, group, role) project, group, role
)
if not has_role: if not has_role:
self.log.debug('Assignment does not exists') self.log.debug('Assignment does not exists')
return False return False
self.identity.unassign_project_role_from_group( self.identity.unassign_project_role_from_group(
project, group, role) project, group, role
)
elif domain: elif domain:
# Proceed with domain - precedence over system # Proceed with domain - precedence over system
if user: if user:
has_role = self.identity.validate_user_has_domain_role( has_role = self.identity.validate_user_has_domain_role(
domain, user, role) domain, user, role
)
if not has_role: if not has_role:
self.log.debug('Assignment does not exists') self.log.debug('Assignment does not exists')
return False return False
self.identity.unassign_domain_role_from_user( self.identity.unassign_domain_role_from_user(
domain, user, role) domain, user, role
)
else: else:
has_role = self.identity.validate_group_has_domain_role( has_role = self.identity.validate_group_has_domain_role(
domain, group, role) domain, group, role
)
if not has_role: if not has_role:
self.log.debug('Assignment does not exists') self.log.debug('Assignment does not exists')
return False return False
self.identity.unassign_domain_role_from_group( self.identity.unassign_domain_role_from_group(
domain, group, role) domain, group, role
)
else: else:
# Proceed with system # Proceed with system
# System name must be 'all' due to checks performed in # System name must be 'all' due to checks performed in
# _get_grant_revoke_params # _get_grant_revoke_params
if user: if user:
has_role = self.identity.validate_user_has_system_role( has_role = self.identity.validate_user_has_system_role(
user, role, system) user, role, system
)
if not has_role: if not has_role:
self.log.debug('Assignment does not exist') self.log.debug('Assignment does not exist')
return False return False
self.identity.unassign_system_role_from_user( self.identity.unassign_system_role_from_user(
user, role, system) user, role, system
)
else: else:
has_role = self.identity.validate_group_has_system_role( has_role = self.identity.validate_group_has_system_role(
group, role, system) group, role, system
)
if not has_role: if not has_role:
self.log.debug('Assignment does not exist') self.log.debug('Assignment does not exist')
return False return False
self.identity.unassign_system_role_from_group( self.identity.unassign_system_role_from_group(
group, role, system) group, role, system
)
return True return True
def _get_identity_params(self, domain_id=None, project=None): def _get_identity_params(self, domain_id=None, project=None):
@ -1406,7 +1485,8 @@ class IdentityCloudMixin:
if not domain_id: if not domain_id:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"User or project creation requires an explicit" "User or project creation requires an explicit"
" domain_id argument.") " domain_id argument."
)
else: else:
ret.update({'domain_id': domain_id}) ret.update({'domain_id': domain_id})

View File

@ -46,7 +46,8 @@ class ImageCloudMixin:
def _image_client(self): def _image_client(self):
if 'image' not in self._raw_clients: if 'image' not in self._raw_clients:
self._raw_clients['image'] = self._get_versioned_client( self._raw_clients['image'] = self._get_versioned_client(
'image', min_version=1, max_version='2.latest') 'image', min_version=1, max_version='2.latest'
)
return self._raw_clients['image'] return self._raw_clients['image']
def search_images(self, name_or_id=None, filters=None): def search_images(self, name_or_id=None, filters=None):
@ -108,7 +109,7 @@ class ImageCloudMixin:
return _utils._get_entity(self, 'image', name_or_id, filters) return _utils._get_entity(self, 'image', name_or_id, filters)
def get_image_by_id(self, id): def get_image_by_id(self, id):
""" Get a image by ID """Get a image by ID
:param id: ID of the image. :param id: ID of the image.
:returns: An image :class:`openstack.image.v2.image.Image` object. :returns: An image :class:`openstack.image.v2.image.Image` object.
@ -145,20 +146,23 @@ class ImageCloudMixin:
if output_path is None and output_file is None: if output_path is None and output_file is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'No output specified, an output path or file object' 'No output specified, an output path or file object'
' is necessary to write the image data to') ' is necessary to write the image data to'
)
elif output_path is not None and output_file is not None: elif output_path is not None and output_file is not None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Both an output path and file object were provided,' 'Both an output path and file object were provided,'
' however only one can be used at once') ' however only one can be used at once'
)
image = self.image.find_image(name_or_id) image = self.image.find_image(name_or_id)
if not image: if not image:
raise exc.OpenStackCloudResourceNotFound( raise exc.OpenStackCloudResourceNotFound(
"No images with name or ID %s were found" % name_or_id, None) "No images with name or ID %s were found" % name_or_id, None
)
return self.image.download_image( return self.image.download_image(
image, output=output_file or output_path, image, output=output_file or output_path, chunk_size=chunk_size
chunk_size=chunk_size) )
def get_image_exclude(self, name_or_id, exclude): def get_image_exclude(self, name_or_id, exclude):
for image in self.search_images(name_or_id): for image in self.search_images(name_or_id):
@ -184,7 +188,8 @@ class ImageCloudMixin:
def wait_for_image(self, image, timeout=3600): def wait_for_image(self, image, timeout=3600):
image_id = image['id'] image_id = image['id']
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
timeout, "Timeout waiting for image to snapshot"): timeout, "Timeout waiting for image to snapshot"
):
self.list_images.invalidate(self) self.list_images.invalidate(self)
image = self.get_image(image_id) image = self.get_image(image_id)
if not image: if not image:
@ -193,7 +198,8 @@ class ImageCloudMixin:
return image return image
elif image['status'] == 'error': elif image['status'] == 'error':
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Image {image} hit error state'.format(image=image_id)) 'Image {image} hit error state'.format(image=image_id)
)
def delete_image( def delete_image(
self, self,
@ -222,17 +228,19 @@ class ImageCloudMixin:
# Task API means an image was uploaded to swift # Task API means an image was uploaded to swift
# TODO(gtema) does it make sense to move this into proxy? # TODO(gtema) does it make sense to move this into proxy?
if self.image_api_use_tasks and ( if self.image_api_use_tasks and (
self.image._IMAGE_OBJECT_KEY in image.properties self.image._IMAGE_OBJECT_KEY in image.properties
or self.image._SHADE_IMAGE_OBJECT_KEY in image.properties): or self.image._SHADE_IMAGE_OBJECT_KEY in image.properties
):
(container, objname) = image.properties.get( (container, objname) = image.properties.get(
self.image._IMAGE_OBJECT_KEY, image.properties.get( self.image._IMAGE_OBJECT_KEY,
self.image._SHADE_IMAGE_OBJECT_KEY)).split('/', 1) image.properties.get(self.image._SHADE_IMAGE_OBJECT_KEY),
).split('/', 1)
self.delete_object(container=container, name=objname) self.delete_object(container=container, name=objname)
if wait: if wait:
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
timeout, timeout, "Timeout waiting for the image to be deleted."
"Timeout waiting for the image to be deleted."): ):
self._get_cache(None).invalidate() self._get_cache(None).invalidate()
if self.get_image(image.id) is None: if self.get_image(image.id) is None:
break break
@ -307,38 +315,53 @@ class ImageCloudMixin:
""" """
if volume: if volume:
image = self.block_storage.create_image( image = self.block_storage.create_image(
name=name, volume=volume, name=name,
volume=volume,
allow_duplicates=allow_duplicates, allow_duplicates=allow_duplicates,
container_format=container_format, disk_format=disk_format, container_format=container_format,
wait=wait, timeout=timeout) disk_format=disk_format,
wait=wait,
timeout=timeout,
)
else: else:
image = self.image.create_image( image = self.image.create_image(
name, filename=filename, name,
filename=filename,
container=container, container=container,
md5=md5, sha256=sha256, md5=md5,
disk_format=disk_format, container_format=container_format, sha256=sha256,
disk_format=disk_format,
container_format=container_format,
disable_vendor_agent=disable_vendor_agent, disable_vendor_agent=disable_vendor_agent,
wait=wait, timeout=timeout, tags=tags, wait=wait,
allow_duplicates=allow_duplicates, meta=meta, **kwargs) timeout=timeout,
tags=tags,
allow_duplicates=allow_duplicates,
meta=meta,
**kwargs,
)
self._get_cache(None).invalidate() self._get_cache(None).invalidate()
if not wait: if not wait:
return image return image
try: try:
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
timeout, timeout, "Timeout waiting for the image to finish."
"Timeout waiting for the image to finish."): ):
image_obj = self.get_image(image.id) image_obj = self.get_image(image.id)
if image_obj and image_obj.status not in ('queued', 'saving'): if image_obj and image_obj.status not in ('queued', 'saving'):
return image_obj return image_obj
except exc.OpenStackCloudTimeout: except exc.OpenStackCloudTimeout:
self.log.debug( self.log.debug(
"Timeout waiting for image to become ready. Deleting.") "Timeout waiting for image to become ready. Deleting."
)
self.delete_image(image.id, wait=True) self.delete_image(image.id, wait=True)
raise raise
def update_image_properties( def update_image_properties(
self, image=None, name_or_id=None, meta=None, **properties): self, image=None, name_or_id=None, meta=None, **properties
):
image = image or name_or_id image = image or name_or_id
return self.image.update_image_properties( return self.image.update_image_properties(
image=image, meta=meta, **properties) image=image, meta=meta, **properties
)

File diff suppressed because it is too large Load Diff

View File

@ -20,8 +20,8 @@ from openstack.cloud import exc
class NetworkCommonCloudMixin: class NetworkCommonCloudMixin:
"""Shared networking functions used by FloatingIP, Network, Compute classes """Shared networking functions used by FloatingIP, Network, Compute
""" classes."""
def __init__(self): def __init__(self):
self._external_ipv4_names = self.config.get_external_ipv4_networks() self._external_ipv4_names = self.config.get_external_ipv4_networks()
@ -33,9 +33,11 @@ class NetworkCommonCloudMixin:
self._default_network = self.config.get_default_network() self._default_network = self.config.get_default_network()
self._use_external_network = self.config.config.get( self._use_external_network = self.config.config.get(
'use_external_network', True) 'use_external_network', True
)
self._use_internal_network = self.config.config.get( self._use_internal_network = self.config.config.get(
'use_internal_network', True) 'use_internal_network', True
)
self._networks_lock = threading.Lock() self._networks_lock = threading.Lock()
self._reset_network_caches() self._reset_network_caches()
@ -90,46 +92,63 @@ class NetworkCommonCloudMixin:
for network in all_networks: for network in all_networks:
# External IPv4 networks # External IPv4 networks
if (network['name'] in self._external_ipv4_names if (
or network['id'] in self._external_ipv4_names): network['name'] in self._external_ipv4_names
or network['id'] in self._external_ipv4_names
):
external_ipv4_networks.append(network) external_ipv4_networks.append(network)
elif ((network.is_router_external elif (
or network.provider_physical_network) (
and network['name'] not in self._internal_ipv4_names network.is_router_external
and network['id'] not in self._internal_ipv4_names): or network.provider_physical_network
)
and network['name'] not in self._internal_ipv4_names
and network['id'] not in self._internal_ipv4_names
):
external_ipv4_networks.append(network) external_ipv4_networks.append(network)
# Internal networks # Internal networks
if (network['name'] in self._internal_ipv4_names if (
or network['id'] in self._internal_ipv4_names): network['name'] in self._internal_ipv4_names
or network['id'] in self._internal_ipv4_names
):
internal_ipv4_networks.append(network) internal_ipv4_networks.append(network)
elif (not network.is_router_external elif (
and not network.provider_physical_network not network.is_router_external
and network['name'] not in self._external_ipv4_names and not network.provider_physical_network
and network['id'] not in self._external_ipv4_names): and network['name'] not in self._external_ipv4_names
and network['id'] not in self._external_ipv4_names
):
internal_ipv4_networks.append(network) internal_ipv4_networks.append(network)
# External networks # External networks
if (network['name'] in self._external_ipv6_names if (
or network['id'] in self._external_ipv6_names): network['name'] in self._external_ipv6_names
or network['id'] in self._external_ipv6_names
):
external_ipv6_networks.append(network) external_ipv6_networks.append(network)
elif (network.is_router_external elif (
and network['name'] not in self._internal_ipv6_names network.is_router_external
and network['id'] not in self._internal_ipv6_names): and network['name'] not in self._internal_ipv6_names
and network['id'] not in self._internal_ipv6_names
):
external_ipv6_networks.append(network) external_ipv6_networks.append(network)
# Internal networks # Internal networks
if (network['name'] in self._internal_ipv6_names if (
or network['id'] in self._internal_ipv6_names): network['name'] in self._internal_ipv6_names
or network['id'] in self._internal_ipv6_names
):
internal_ipv6_networks.append(network) internal_ipv6_networks.append(network)
elif (not network.is_router_external elif (
and network['name'] not in self._external_ipv6_names not network.is_router_external
and network['id'] not in self._external_ipv6_names): and network['name'] not in self._external_ipv6_names
and network['id'] not in self._external_ipv6_names
):
internal_ipv6_networks.append(network) internal_ipv6_networks.append(network)
# External Floating IPv4 networks # External Floating IPv4 networks
if self._nat_source in ( if self._nat_source in (network['name'], network['id']):
network['name'], network['id']):
if nat_source: if nat_source:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Multiple networks were found matching' 'Multiple networks were found matching'
@ -137,8 +156,8 @@ class NetworkCommonCloudMixin:
' to be the NAT source. Please check your' ' to be the NAT source. Please check your'
' cloud resources. It is probably a good idea' ' cloud resources. It is probably a good idea'
' to configure this network by ID rather than' ' to configure this network by ID rather than'
' by name.'.format( ' by name.'.format(nat_net=self._nat_source)
nat_net=self._nat_source)) )
external_ipv4_floating_networks.append(network) external_ipv4_floating_networks.append(network)
nat_source = network nat_source = network
elif self._nat_source is None: elif self._nat_source is None:
@ -147,8 +166,7 @@ class NetworkCommonCloudMixin:
nat_source = nat_source or network nat_source = nat_source or network
# NAT Destination # NAT Destination
if self._nat_destination in ( if self._nat_destination in (network['name'], network['id']):
network['name'], network['id']):
if nat_destination: if nat_destination:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Multiple networks were found matching' 'Multiple networks were found matching'
@ -156,8 +174,8 @@ class NetworkCommonCloudMixin:
' to be the NAT destination. Please check your' ' to be the NAT destination. Please check your'
' cloud resources. It is probably a good idea' ' cloud resources. It is probably a good idea'
' to configure this network by ID rather than' ' to configure this network by ID rather than'
' by name.'.format( ' by name.'.format(nat_net=self._nat_destination)
nat_net=self._nat_destination)) )
nat_destination = network nat_destination = network
elif self._nat_destination is None: elif self._nat_destination is None:
# TODO(mordred) need a config value for floating # TODO(mordred) need a config value for floating
@ -174,14 +192,16 @@ class NetworkCommonCloudMixin:
for subnet in all_subnets: for subnet in all_subnets:
# TODO(mordred) trap for detecting more than # TODO(mordred) trap for detecting more than
# one network with a gateway_ip without a config # one network with a gateway_ip without a config
if ('gateway_ip' in subnet and subnet['gateway_ip'] if (
and network['id'] == subnet['network_id']): 'gateway_ip' in subnet
and subnet['gateway_ip']
and network['id'] == subnet['network_id']
):
nat_destination = network nat_destination = network
break break
# Default network # Default network
if self._default_network in ( if self._default_network in (network['name'], network['id']):
network['name'], network['id']):
if default_network: if default_network:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Multiple networks were found matching' 'Multiple networks were found matching'
@ -190,8 +210,8 @@ class NetworkCommonCloudMixin:
' network. Please check your cloud resources.' ' network. Please check your cloud resources.'
' It is probably a good idea' ' It is probably a good idea'
' to configure this network by ID rather than' ' to configure this network by ID rather than'
' by name.'.format( ' by name.'.format(default_net=self._default_network)
default_net=self._default_network)) )
default_network = network default_network = network
# Validate config vs. reality # Validate config vs. reality
@ -200,49 +220,57 @@ class NetworkCommonCloudMixin:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Networks: {network} was provided for external IPv4" "Networks: {network} was provided for external IPv4"
" access and those networks could not be found".format( " access and those networks could not be found".format(
network=net_name)) network=net_name
)
)
for net_name in self._internal_ipv4_names: for net_name in self._internal_ipv4_names:
if net_name not in [net['name'] for net in internal_ipv4_networks]: if net_name not in [net['name'] for net in internal_ipv4_networks]:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Networks: {network} was provided for internal IPv4" "Networks: {network} was provided for internal IPv4"
" access and those networks could not be found".format( " access and those networks could not be found".format(
network=net_name)) network=net_name
)
)
for net_name in self._external_ipv6_names: for net_name in self._external_ipv6_names:
if net_name not in [net['name'] for net in external_ipv6_networks]: if net_name not in [net['name'] for net in external_ipv6_networks]:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Networks: {network} was provided for external IPv6" "Networks: {network} was provided for external IPv6"
" access and those networks could not be found".format( " access and those networks could not be found".format(
network=net_name)) network=net_name
)
)
for net_name in self._internal_ipv6_names: for net_name in self._internal_ipv6_names:
if net_name not in [net['name'] for net in internal_ipv6_networks]: if net_name not in [net['name'] for net in internal_ipv6_networks]:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Networks: {network} was provided for internal IPv6" "Networks: {network} was provided for internal IPv6"
" access and those networks could not be found".format( " access and those networks could not be found".format(
network=net_name)) network=net_name
)
)
if self._nat_destination and not nat_destination: if self._nat_destination and not nat_destination:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Network {network} was configured to be the' 'Network {network} was configured to be the'
' destination for inbound NAT but it could not be' ' destination for inbound NAT but it could not be'
' found'.format( ' found'.format(network=self._nat_destination)
network=self._nat_destination)) )
if self._nat_source and not nat_source: if self._nat_source and not nat_source:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Network {network} was configured to be the' 'Network {network} was configured to be the'
' source for inbound NAT but it could not be' ' source for inbound NAT but it could not be'
' found'.format( ' found'.format(network=self._nat_source)
network=self._nat_source)) )
if self._default_network and not default_network: if self._default_network and not default_network:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'Network {network} was configured to be the' 'Network {network} was configured to be the'
' default network interface but it could not be' ' default network interface but it could not be'
' found'.format( ' found'.format(network=self._default_network)
network=self._default_network)) )
self._external_ipv4_networks = external_ipv4_networks self._external_ipv4_networks = external_ipv4_networks
self._external_ipv4_floating_networks = external_ipv4_floating_networks self._external_ipv4_floating_networks = external_ipv4_floating_networks
@ -304,9 +332,8 @@ class NetworkCommonCloudMixin:
:returns: A list of network ``Network`` objects if any are found :returns: A list of network ``Network`` objects if any are found
""" """
self._find_interesting_networks() self._find_interesting_networks()
return ( return list(self._external_ipv4_networks) + list(
list(self._external_ipv4_networks) self._external_ipv6_networks
+ list(self._external_ipv6_networks)
) )
def get_internal_networks(self): def get_internal_networks(self):
@ -318,9 +345,8 @@ class NetworkCommonCloudMixin:
:returns: A list of network ``Network`` objects if any are found :returns: A list of network ``Network`` objects if any are found
""" """
self._find_interesting_networks() self._find_interesting_networks()
return ( return list(self._internal_ipv4_networks) + list(
list(self._internal_ipv4_networks) self._internal_ipv6_networks
+ list(self._internal_ipv6_networks)
) )
def get_external_ipv4_networks(self): def get_external_ipv4_networks(self):

View File

@ -105,9 +105,7 @@ class ObjectStoreCloudMixin:
container = self.get_container(name) container = self.get_container(name)
if container: if container:
return container return container
attrs = dict( attrs = dict(name=name)
name=name
)
if public: if public:
attrs['read_ACL'] = OBJECT_CONTAINER_ACLS['public'] attrs['read_ACL'] = OBJECT_CONTAINER_ACLS['public']
container = self.object_store.create_container(**attrs) container = self.object_store.create_container(**attrs)
@ -129,7 +127,9 @@ class ObjectStoreCloudMixin:
'Attempt to delete container {container} failed. The' 'Attempt to delete container {container} failed. The'
' container is not empty. Please delete the objects' ' container is not empty. Please delete the objects'
' inside it before deleting the container'.format( ' inside it before deleting the container'.format(
container=name)) container=name
)
)
def update_container(self, name, headers): def update_container(self, name, headers):
"""Update the metadata in a container. """Update the metadata in a container.
@ -138,7 +138,8 @@ class ObjectStoreCloudMixin:
:param dict headers: Key/Value headers to set on the container. :param dict headers: Key/Value headers to set on the container.
""" """
self.object_store.set_container_metadata( self.object_store.set_container_metadata(
name, refresh=False, **headers) name, refresh=False, **headers
)
def set_container_access(self, name, access, refresh=False): def set_container_access(self, name, access, refresh=False):
"""Set the access control list on a container. """Set the access control list on a container.
@ -152,11 +153,10 @@ class ObjectStoreCloudMixin:
if access not in OBJECT_CONTAINER_ACLS: if access not in OBJECT_CONTAINER_ACLS:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Invalid container access specified: %s. Must be one of %s" "Invalid container access specified: %s. Must be one of %s"
% (access, list(OBJECT_CONTAINER_ACLS.keys()))) % (access, list(OBJECT_CONTAINER_ACLS.keys()))
)
return self.object_store.set_container_metadata( return self.object_store.set_container_metadata(
name, name, read_ACL=OBJECT_CONTAINER_ACLS[access], refresh=refresh
read_ACL=OBJECT_CONTAINER_ACLS[access],
refresh=refresh
) )
def get_container_access(self, name): def get_container_access(self, name):
@ -179,7 +179,8 @@ class ObjectStoreCloudMixin:
if str(acl) == str(value): if str(acl) == str(value):
return key return key
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Could not determine container access for ACL: %s." % acl) "Could not determine container access for ACL: %s." % acl
)
@_utils.cache_on_arguments() @_utils.cache_on_arguments()
def get_object_capabilities(self): def get_object_capabilities(self):
@ -201,7 +202,8 @@ class ObjectStoreCloudMixin:
return self.object_store.get_object_segment_size(segment_size) return self.object_store.get_object_segment_size(segment_size)
def is_object_stale( def is_object_stale(
self, container, name, filename, file_md5=None, file_sha256=None): self, container, name, filename, file_md5=None, file_sha256=None
):
"""Check to see if an object matches the hashes of a file. """Check to see if an object matches the hashes of a file.
:param container: Name of the container. :param container: Name of the container.
@ -213,8 +215,11 @@ class ObjectStoreCloudMixin:
Defaults to None which means calculate locally. Defaults to None which means calculate locally.
""" """
return self.object_store.is_object_stale( return self.object_store.is_object_stale(
container, name, filename, container,
file_md5=file_md5, file_sha256=file_sha256 name,
filename,
file_md5=file_md5,
file_sha256=file_sha256,
) )
def create_directory_marker_object(self, container, name, **headers): def create_directory_marker_object(self, container, name, **headers):
@ -241,11 +246,8 @@ class ObjectStoreCloudMixin:
headers['content-type'] = 'application/directory' headers['content-type'] = 'application/directory'
return self.create_object( return self.create_object(
container, container, name, data='', generate_checksums=False, **headers
name, )
data='',
generate_checksums=False,
**headers)
def create_object( def create_object(
self, self,
@ -295,12 +297,16 @@ class ObjectStoreCloudMixin:
:raises: ``OpenStackCloudException`` on operation error. :raises: ``OpenStackCloudException`` on operation error.
""" """
return self.object_store.create_object( return self.object_store.create_object(
container, name, container,
filename=filename, data=data, name,
md5=md5, sha256=sha256, use_slo=use_slo, filename=filename,
data=data,
md5=md5,
sha256=sha256,
use_slo=use_slo,
generate_checksums=generate_checksums, generate_checksums=generate_checksums,
metadata=metadata, metadata=metadata,
**headers **headers,
) )
def update_object(self, container, name, metadata=None, **headers): def update_object(self, container, name, metadata=None, **headers):
@ -317,8 +323,7 @@ class ObjectStoreCloudMixin:
""" """
meta = metadata.copy() or {} meta = metadata.copy() or {}
meta.update(**headers) meta.update(**headers)
self.object_store.set_object_metadata( self.object_store.set_object_metadata(name, container, **meta)
name, container, **meta)
def list_objects(self, container, full_listing=True, prefix=None): def list_objects(self, container, full_listing=True, prefix=None):
"""List objects. """List objects.
@ -330,10 +335,9 @@ class ObjectStoreCloudMixin:
:returns: A list of object store ``Object`` objects. :returns: A list of object store ``Object`` objects.
:raises: OpenStackCloudException on operation error. :raises: OpenStackCloudException on operation error.
""" """
return list(self.object_store.objects( return list(
container=container, self.object_store.objects(container=container, prefix=prefix)
prefix=prefix )
))
def search_objects(self, container, name=None, filters=None): def search_objects(self, container, name=None, filters=None):
"""Search objects. """Search objects.
@ -364,7 +368,9 @@ class ObjectStoreCloudMixin:
""" """
try: try:
self.object_store.delete_object( self.object_store.delete_object(
name, ignore_missing=False, container=container, name,
ignore_missing=False,
container=container,
) )
return True return True
except exceptions.SDKException: except exceptions.SDKException:
@ -400,9 +406,7 @@ class ObjectStoreCloudMixin:
:param name: :param name:
:returns: The object metadata. :returns: The object metadata.
""" """
return self.object_store.get_object_metadata( return self.object_store.get_object_metadata(name, container).metadata
name, container
).metadata
def get_object_raw(self, container, obj, query_string=None, stream=False): def get_object_raw(self, container, obj, query_string=None, stream=False):
"""Get a raw response object for an object. """Get a raw response object for an object.
@ -422,12 +426,12 @@ class ObjectStoreCloudMixin:
endpoint = urllib.parse.quote(container) endpoint = urllib.parse.quote(container)
if obj: if obj:
endpoint = '{endpoint}/{object}'.format( endpoint = '{endpoint}/{object}'.format(
endpoint=endpoint, endpoint=endpoint, object=urllib.parse.quote(obj)
object=urllib.parse.quote(obj)
) )
if query_string: if query_string:
endpoint = '{endpoint}?{query_string}'.format( endpoint = '{endpoint}?{query_string}'.format(
endpoint=endpoint, query_string=query_string) endpoint=endpoint, query_string=query_string
)
return endpoint return endpoint
def stream_object( def stream_object(
@ -451,13 +455,21 @@ class ObjectStoreCloudMixin:
""" """
try: try:
for ret in self.object_store.stream_object( for ret in self.object_store.stream_object(
obj, container, chunk_size=resp_chunk_size): obj, container, chunk_size=resp_chunk_size
):
yield ret yield ret
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
return return
def get_object(self, container, obj, query_string=None, def get_object(
resp_chunk_size=1024, outfile=None, stream=False): self,
container,
obj,
query_string=None,
resp_chunk_size=1024,
outfile=None,
stream=False,
):
"""Get the headers and body of an object """Get the headers and body of an object
:param string container: Name of the container. :param string container: Name of the container.
@ -477,13 +489,13 @@ class ObjectStoreCloudMixin:
""" """
try: try:
obj = self.object_store.get_object( obj = self.object_store.get_object(
obj, container=container, obj,
container=container,
resp_chunk_size=resp_chunk_size, resp_chunk_size=resp_chunk_size,
outfile=outfile, outfile=outfile,
remember_content=(outfile is None) remember_content=(outfile is None),
) )
headers = { headers = {k.lower(): v for k, v in obj._last_headers.items()}
k.lower(): v for k, v in obj._last_headers.items()}
return (headers, obj.data) return (headers, obj.data)
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
@ -500,10 +512,13 @@ class ObjectStoreCloudMixin:
result = completed.result() result = completed.result()
exceptions.raise_from_response(result) exceptions.raise_from_response(result)
results.append(result) results.append(result)
except (keystoneauth1.exceptions.RetriableConnectionFailure, except (
exceptions.HttpException) as e: keystoneauth1.exceptions.RetriableConnectionFailure,
exceptions.HttpException,
) as e:
error_text = "Exception processing async task: {}".format( error_text = "Exception processing async task: {}".format(
str(e)) str(e)
)
if raise_on_error: if raise_on_error:
self.log.exception(error_text) self.log.exception(error_text)
raise raise

View File

@ -41,20 +41,33 @@ class OrchestrationCloudMixin:
return self._raw_clients['orchestration'] return self._raw_clients['orchestration']
def get_template_contents( def get_template_contents(
self, template_file=None, template_url=None, self,
template_object=None, files=None): template_file=None,
template_url=None,
template_object=None,
files=None,
):
return self.orchestration.get_template_contents( return self.orchestration.get_template_contents(
template_file=template_file, template_url=template_url, template_file=template_file,
template_object=template_object, files=files) template_url=template_url,
template_object=template_object,
files=files,
)
def create_stack( def create_stack(
self, name, tags=None, self,
template_file=None, template_url=None, name,
template_object=None, files=None, tags=None,
rollback=True, template_file=None,
wait=False, timeout=3600, template_url=None,
environment_files=None, template_object=None,
**parameters): files=None,
rollback=True,
wait=False,
timeout=3600,
environment_files=None,
**parameters
):
"""Create a stack. """Create a stack.
:param string name: Name of the stack. :param string name: Name of the stack.
@ -83,27 +96,36 @@ class OrchestrationCloudMixin:
tags=tags, tags=tags,
is_rollback_disabled=not rollback, is_rollback_disabled=not rollback,
timeout_mins=timeout // 60, timeout_mins=timeout // 60,
parameters=parameters parameters=parameters,
)
params.update(
self.orchestration.read_env_and_templates(
template_file=template_file,
template_url=template_url,
template_object=template_object,
files=files,
environment_files=environment_files,
)
) )
params.update(self.orchestration.read_env_and_templates(
template_file=template_file, template_url=template_url,
template_object=template_object, files=files,
environment_files=environment_files
))
self.orchestration.create_stack(name=name, **params) self.orchestration.create_stack(name=name, **params)
if wait: if wait:
event_utils.poll_for_events(self, stack_name=name, event_utils.poll_for_events(self, stack_name=name, action='CREATE')
action='CREATE')
return self.get_stack(name) return self.get_stack(name)
def update_stack( def update_stack(
self, name_or_id, self,
template_file=None, template_url=None, name_or_id,
template_object=None, files=None, template_file=None,
rollback=True, tags=None, template_url=None,
wait=False, timeout=3600, template_object=None,
environment_files=None, files=None,
**parameters): rollback=True,
tags=None,
wait=False,
timeout=3600,
environment_files=None,
**parameters
):
"""Update a stack. """Update a stack.
:param string name_or_id: Name or ID of the stack to update. :param string name_or_id: Name or ID of the stack to update.
@ -131,27 +153,31 @@ class OrchestrationCloudMixin:
tags=tags, tags=tags,
is_rollback_disabled=not rollback, is_rollback_disabled=not rollback,
timeout_mins=timeout // 60, timeout_mins=timeout // 60,
parameters=parameters parameters=parameters,
)
params.update(
self.orchestration.read_env_and_templates(
template_file=template_file,
template_url=template_url,
template_object=template_object,
files=files,
environment_files=environment_files,
)
) )
params.update(self.orchestration.read_env_and_templates(
template_file=template_file, template_url=template_url,
template_object=template_object, files=files,
environment_files=environment_files
))
if wait: if wait:
# find the last event to use as the marker # find the last event to use as the marker
events = event_utils.get_events( events = event_utils.get_events(
self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}) self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}
)
marker = events[0].id if events else None marker = events[0].id if events else None
# Not to cause update of ID field pass stack as dict # Not to cause update of ID field pass stack as dict
self.orchestration.update_stack(stack={'id': name_or_id}, **params) self.orchestration.update_stack(stack={'id': name_or_id}, **params)
if wait: if wait:
event_utils.poll_for_events(self, event_utils.poll_for_events(
name_or_id, self, name_or_id, action='UPDATE', marker=marker
action='UPDATE', )
marker=marker)
return self.get_stack(name_or_id) return self.get_stack(name_or_id)
def delete_stack(self, name_or_id, wait=False): def delete_stack(self, name_or_id, wait=False):
@ -173,24 +199,26 @@ class OrchestrationCloudMixin:
if wait: if wait:
# find the last event to use as the marker # find the last event to use as the marker
events = event_utils.get_events( events = event_utils.get_events(
self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}) self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}
)
marker = events[0].id if events else None marker = events[0].id if events else None
self.orchestration.delete_stack(stack) self.orchestration.delete_stack(stack)
if wait: if wait:
try: try:
event_utils.poll_for_events(self, event_utils.poll_for_events(
stack_name=name_or_id, self, stack_name=name_or_id, action='DELETE', marker=marker
action='DELETE', )
marker=marker)
except exc.OpenStackCloudHTTPError: except exc.OpenStackCloudHTTPError:
pass pass
stack = self.get_stack(name_or_id, resolve_outputs=False) stack = self.get_stack(name_or_id, resolve_outputs=False)
if stack and stack['stack_status'] == 'DELETE_FAILED': if stack and stack['stack_status'] == 'DELETE_FAILED':
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Failed to delete stack {id}: {reason}".format( "Failed to delete stack {id}: {reason}".format(
id=name_or_id, reason=stack['stack_status_reason'])) id=name_or_id, reason=stack['stack_status_reason']
)
)
return True return True
@ -246,12 +274,12 @@ class OrchestrationCloudMixin:
stack = self.orchestration.find_stack( stack = self.orchestration.find_stack(
name_or_id, name_or_id,
ignore_missing=False, ignore_missing=False,
resolve_outputs=resolve_outputs) resolve_outputs=resolve_outputs,
)
if stack.status == 'DELETE_COMPLETE': if stack.status == 'DELETE_COMPLETE':
return [] return []
except exc.OpenStackCloudURINotFound: except exc.OpenStackCloudURINotFound:
return [] return []
return _utils._filter_list([stack], name_or_id, filters) return _utils._filter_list([stack], name_or_id, filters)
return _utils._get_entity( return _utils._get_entity(self, _search_one_stack, name_or_id, filters)
self, _search_one_stack, name_or_id, filters)

View File

@ -59,15 +59,16 @@ class SecurityGroupCloudMixin:
if self._use_neutron_secgroups(): if self._use_neutron_secgroups():
# pass filters dict to the list to filter as much as possible on # pass filters dict to the list to filter as much as possible on
# the server side # the server side
return list( return list(self.network.security_groups(**filters))
self.network.security_groups(**filters))
# Handle nova security groups # Handle nova security groups
else: else:
data = proxy._json_response(self.compute.get( data = proxy._json_response(
'/os-security-groups', params=filters)) self.compute.get('/os-security-groups', params=filters)
)
return self._normalize_secgroups( return self._normalize_secgroups(
self._get_and_munchify('security_groups', data)) self._get_and_munchify('security_groups', data)
)
def get_security_group(self, name_or_id, filters=None): def get_security_group(self, name_or_id, filters=None):
"""Get a security group by name or ID. """Get a security group by name or ID.
@ -93,11 +94,10 @@ class SecurityGroupCloudMixin:
or None if no matching security group is found. or None if no matching security group is found.
""" """
return _utils._get_entity( return _utils._get_entity(self, 'security_group', name_or_id, filters)
self, 'security_group', name_or_id, filters)
def get_security_group_by_id(self, id): def get_security_group_by_id(self, id):
""" Get a security group by ID """Get a security group by ID
:param id: ID of the security group. :param id: ID of the security group.
:returns: A security group :returns: A security group
@ -107,20 +107,23 @@ class SecurityGroupCloudMixin:
raise exc.OpenStackCloudUnavailableFeature( raise exc.OpenStackCloudUnavailableFeature(
"Unavailable feature: security groups" "Unavailable feature: security groups"
) )
error_message = ("Error getting security group with" error_message = "Error getting security group with" " ID {id}".format(
" ID {id}".format(id=id)) id=id
)
if self._use_neutron_secgroups(): if self._use_neutron_secgroups():
return self.network.get_security_group(id) return self.network.get_security_group(id)
else: else:
data = proxy._json_response( data = proxy._json_response(
self.compute.get( self.compute.get('/os-security-groups/{id}'.format(id=id)),
'/os-security-groups/{id}'.format(id=id)), error_message=error_message,
error_message=error_message) )
return self._normalize_secgroup( return self._normalize_secgroup(
self._get_and_munchify('security_group', data)) self._get_and_munchify('security_group', data)
)
def create_security_group(self, name, description, def create_security_group(
project_id=None, stateful=None): self, name, description, project_id=None, stateful=None
):
"""Create a new security group """Create a new security group
:param string name: A name for the security group. :param string name: A name for the security group.
@ -145,22 +148,23 @@ class SecurityGroupCloudMixin:
) )
data = [] data = []
security_group_json = { security_group_json = {'name': name, 'description': description}
'name': name, 'description': description
}
if stateful is not None: if stateful is not None:
security_group_json['stateful'] = stateful security_group_json['stateful'] = stateful
if project_id is not None: if project_id is not None:
security_group_json['tenant_id'] = project_id security_group_json['tenant_id'] = project_id
if self._use_neutron_secgroups(): if self._use_neutron_secgroups():
return self.network.create_security_group( return self.network.create_security_group(**security_group_json)
**security_group_json)
else: else:
data = proxy._json_response(self.compute.post( data = proxy._json_response(
'/os-security-groups', self.compute.post(
json={'security_group': security_group_json})) '/os-security-groups',
json={'security_group': security_group_json},
)
)
return self._normalize_secgroup( return self._normalize_secgroup(
self._get_and_munchify('security_group', data)) self._get_and_munchify('security_group', data)
)
def delete_security_group(self, name_or_id): def delete_security_group(self, name_or_id):
"""Delete a security group """Delete a security group
@ -183,18 +187,23 @@ class SecurityGroupCloudMixin:
# the delete. # the delete.
secgroup = self.get_security_group(name_or_id) secgroup = self.get_security_group(name_or_id)
if secgroup is None: if secgroup is None:
self.log.debug('Security group %s not found for deleting', self.log.debug(
name_or_id) 'Security group %s not found for deleting', name_or_id
)
return False return False
if self._use_neutron_secgroups(): if self._use_neutron_secgroups():
self.network.delete_security_group( self.network.delete_security_group(
secgroup['id'], ignore_missing=False) secgroup['id'], ignore_missing=False
)
return True return True
else: else:
proxy._json_response(self.compute.delete( proxy._json_response(
'/os-security-groups/{id}'.format(id=secgroup['id']))) self.compute.delete(
'/os-security-groups/{id}'.format(id=secgroup['id'])
)
)
return True return True
@_utils.valid_kwargs('name', 'description', 'stateful') @_utils.valid_kwargs('name', 'description', 'stateful')
@ -220,35 +229,38 @@ class SecurityGroupCloudMixin:
if group is None: if group is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Security group %s not found." % name_or_id) "Security group %s not found." % name_or_id
)
if self._use_neutron_secgroups(): if self._use_neutron_secgroups():
return self.network.update_security_group( return self.network.update_security_group(group['id'], **kwargs)
group['id'],
**kwargs
)
else: else:
for key in ('name', 'description'): for key in ('name', 'description'):
kwargs.setdefault(key, group[key]) kwargs.setdefault(key, group[key])
data = proxy._json_response( data = proxy._json_response(
self.compute.put( self.compute.put(
'/os-security-groups/{id}'.format(id=group['id']), '/os-security-groups/{id}'.format(id=group['id']),
json={'security_group': kwargs})) json={'security_group': kwargs},
)
)
return self._normalize_secgroup( return self._normalize_secgroup(
self._get_and_munchify('security_group', data)) self._get_and_munchify('security_group', data)
)
def create_security_group_rule(self, def create_security_group_rule(
secgroup_name_or_id, self,
port_range_min=None, secgroup_name_or_id,
port_range_max=None, port_range_min=None,
protocol=None, port_range_max=None,
remote_ip_prefix=None, protocol=None,
remote_group_id=None, remote_ip_prefix=None,
remote_address_group_id=None, remote_group_id=None,
direction='ingress', remote_address_group_id=None,
ethertype='IPv4', direction='ingress',
project_id=None, ethertype='IPv4',
description=None): project_id=None,
description=None,
):
"""Create a new security group rule """Create a new security group rule
:param string secgroup_name_or_id: :param string secgroup_name_or_id:
@ -308,31 +320,32 @@ class SecurityGroupCloudMixin:
secgroup = self.get_security_group(secgroup_name_or_id) secgroup = self.get_security_group(secgroup_name_or_id)
if not secgroup: if not secgroup:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Security group %s not found." % secgroup_name_or_id) "Security group %s not found." % secgroup_name_or_id
)
if self._use_neutron_secgroups(): if self._use_neutron_secgroups():
# NOTE: Nova accepts -1 port numbers, but Neutron accepts None # NOTE: Nova accepts -1 port numbers, but Neutron accepts None
# as the equivalent value. # as the equivalent value.
rule_def = { rule_def = {
'security_group_id': secgroup['id'], 'security_group_id': secgroup['id'],
'port_range_min': 'port_range_min': None
None if port_range_min == -1 else port_range_min, if port_range_min == -1
'port_range_max': else port_range_min,
None if port_range_max == -1 else port_range_max, 'port_range_max': None
if port_range_max == -1
else port_range_max,
'protocol': protocol, 'protocol': protocol,
'remote_ip_prefix': remote_ip_prefix, 'remote_ip_prefix': remote_ip_prefix,
'remote_group_id': remote_group_id, 'remote_group_id': remote_group_id,
'remote_address_group_id': remote_address_group_id, 'remote_address_group_id': remote_address_group_id,
'direction': direction, 'direction': direction,
'ethertype': ethertype 'ethertype': ethertype,
} }
if project_id is not None: if project_id is not None:
rule_def['tenant_id'] = project_id rule_def['tenant_id'] = project_id
if description is not None: if description is not None:
rule_def["description"] = description rule_def["description"] = description
return self.network.create_security_group_rule( return self.network.create_security_group_rule(**rule_def)
**rule_def
)
else: else:
# NOTE: Neutron accepts None for protocol. Nova does not. # NOTE: Neutron accepts None for protocol. Nova does not.
if protocol is None: if protocol is None:
@ -343,7 +356,8 @@ class SecurityGroupCloudMixin:
'Rule creation failed: Nova does not support egress rules' 'Rule creation failed: Nova does not support egress rules'
) )
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
'No support for egress rules') 'No support for egress rules'
)
# NOTE: Neutron accepts None for ports, but Nova requires -1 # NOTE: Neutron accepts None for ports, but Nova requires -1
# as the equivalent value for ICMP. # as the equivalent value for ICMP.
@ -363,24 +377,28 @@ class SecurityGroupCloudMixin:
port_range_min = 1 port_range_min = 1
port_range_max = 65535 port_range_max = 65535
security_group_rule_dict = dict(security_group_rule=dict( security_group_rule_dict = dict(
parent_group_id=secgroup['id'], security_group_rule=dict(
ip_protocol=protocol, parent_group_id=secgroup['id'],
from_port=port_range_min, ip_protocol=protocol,
to_port=port_range_max, from_port=port_range_min,
cidr=remote_ip_prefix, to_port=port_range_max,
group_id=remote_group_id cidr=remote_ip_prefix,
)) group_id=remote_group_id,
)
)
if project_id is not None: if project_id is not None:
security_group_rule_dict[ security_group_rule_dict['security_group_rule'][
'security_group_rule']['tenant_id'] = project_id 'tenant_id'
] = project_id
data = proxy._json_response( data = proxy._json_response(
self.compute.post( self.compute.post(
'/os-security-group-rules', '/os-security-group-rules', json=security_group_rule_dict
json=security_group_rule_dict )
)) )
return self._normalize_secgroup_rule( return self._normalize_secgroup_rule(
self._get_and_munchify('security_group_rule', data)) self._get_and_munchify('security_group_rule', data)
)
def delete_security_group_rule(self, rule_id): def delete_security_group_rule(self, rule_id):
"""Delete a security group rule """Delete a security group rule
@ -401,8 +419,7 @@ class SecurityGroupCloudMixin:
if self._use_neutron_secgroups(): if self._use_neutron_secgroups():
self.network.delete_security_group_rule( self.network.delete_security_group_rule(
rule_id, rule_id, ignore_missing=False
ignore_missing=False
) )
return True return True
@ -410,7 +427,9 @@ class SecurityGroupCloudMixin:
try: try:
exceptions.raise_from_response( exceptions.raise_from_response(
self.compute.delete( self.compute.delete(
'/os-security-group-rules/{id}'.format(id=rule_id))) '/os-security-group-rules/{id}'.format(id=rule_id)
)
)
except exc.OpenStackCloudResourceNotFound: except exc.OpenStackCloudResourceNotFound:
return False return False
@ -423,8 +442,9 @@ class SecurityGroupCloudMixin:
return self.secgroup_source.lower() in ('nova', 'neutron') return self.secgroup_source.lower() in ('nova', 'neutron')
def _use_neutron_secgroups(self): def _use_neutron_secgroups(self):
return (self.has_service('network') return (
and self.secgroup_source == 'neutron') self.has_service('network') and self.secgroup_source == 'neutron'
)
def _normalize_secgroups(self, groups): def _normalize_secgroups(self, groups):
"""Normalize the structure of security groups """Normalize the structure of security groups
@ -454,7 +474,8 @@ class SecurityGroupCloudMixin:
self._remove_novaclient_artifacts(group) self._remove_novaclient_artifacts(group)
rules = self._normalize_secgroup_rules( rules = self._normalize_secgroup_rules(
group.pop('security_group_rules', group.pop('rules', []))) group.pop('security_group_rules', group.pop('rules', []))
)
project_id = group.pop('tenant_id', '') project_id = group.pop('tenant_id', '')
project_id = group.pop('project_id', project_id) project_id = group.pop('project_id', project_id)
@ -506,14 +527,14 @@ class SecurityGroupCloudMixin:
ret['direction'] = rule.pop('direction', 'ingress') ret['direction'] = rule.pop('direction', 'ingress')
ret['ethertype'] = rule.pop('ethertype', 'IPv4') ret['ethertype'] = rule.pop('ethertype', 'IPv4')
port_range_min = rule.get( port_range_min = rule.get(
'port_range_min', rule.pop('from_port', None)) 'port_range_min', rule.pop('from_port', None)
)
if port_range_min == -1: if port_range_min == -1:
port_range_min = None port_range_min = None
if port_range_min is not None: if port_range_min is not None:
port_range_min = int(port_range_min) port_range_min = int(port_range_min)
ret['port_range_min'] = port_range_min ret['port_range_min'] = port_range_min
port_range_max = rule.pop( port_range_max = rule.pop('port_range_max', rule.pop('to_port', None))
'port_range_max', rule.pop('to_port', None))
if port_range_max == -1: if port_range_max == -1:
port_range_max = None port_range_max = None
if port_range_min is not None: if port_range_min is not None:
@ -521,9 +542,11 @@ class SecurityGroupCloudMixin:
ret['port_range_max'] = port_range_max ret['port_range_max'] = port_range_max
ret['protocol'] = rule.pop('protocol', rule.pop('ip_protocol', None)) ret['protocol'] = rule.pop('protocol', rule.pop('ip_protocol', None))
ret['remote_ip_prefix'] = rule.pop( ret['remote_ip_prefix'] = rule.pop(
'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None)) 'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None)
)
ret['security_group_id'] = rule.pop( ret['security_group_id'] = rule.pop(
'security_group_id', rule.pop('parent_group_id', None)) 'security_group_id', rule.pop('parent_group_id', None)
)
ret['remote_group_id'] = rule.pop('remote_group_id', None) ret['remote_group_id'] = rule.pop('remote_group_id', None)
project_id = rule.pop('tenant_id', '') project_id = rule.pop('tenant_id', '')
project_id = rule.pop('project_id', project_id) project_id = rule.pop('project_id', project_id)

View File

@ -102,8 +102,9 @@ def _filter_list(data, name_or_id, filters):
e_id = _make_unicode(e.get('id', None)) e_id = _make_unicode(e.get('id', None))
e_name = _make_unicode(e.get('name', None)) e_name = _make_unicode(e.get('name', None))
if ((e_id and e_id == name_or_id) if (e_id and e_id == name_or_id) or (
or (e_name and e_name == name_or_id)): e_name and e_name == name_or_id
):
identifier_matches.append(e) identifier_matches.append(e)
else: else:
# Only try fnmatch if we don't match exactly # Only try fnmatch if we don't match exactly
@ -112,8 +113,9 @@ def _filter_list(data, name_or_id, filters):
# so that we log the bad pattern # so that we log the bad pattern
bad_pattern = True bad_pattern = True
continue continue
if ((e_id and fn_reg.match(e_id)) if (e_id and fn_reg.match(e_id)) or (
or (e_name and fn_reg.match(e_name))): e_name and fn_reg.match(e_name)
):
identifier_matches.append(e) identifier_matches.append(e)
if not identifier_matches and bad_pattern: if not identifier_matches and bad_pattern:
log.debug("Bad pattern passed to fnmatch", exc_info=True) log.debug("Bad pattern passed to fnmatch", exc_info=True)
@ -172,8 +174,9 @@ def _get_entity(cloud, resource, name_or_id, filters, **kwargs):
# an additional call, it's simple enough to test to see if we got an # an additional call, it's simple enough to test to see if we got an
# object and just short-circuit return it. # object and just short-circuit return it.
if (hasattr(name_or_id, 'id') if hasattr(name_or_id, 'id') or (
or (isinstance(name_or_id, dict) and 'id' in name_or_id)): isinstance(name_or_id, dict) and 'id' in name_or_id
):
return name_or_id return name_or_id
# If a uuid is passed short-circuit it calling the # If a uuid is passed short-circuit it calling the
@ -183,14 +186,18 @@ def _get_entity(cloud, resource, name_or_id, filters, **kwargs):
if get_resource: if get_resource:
return get_resource(name_or_id) return get_resource(name_or_id)
search = resource if callable(resource) else getattr( search = (
cloud, 'search_%ss' % resource, None) resource
if callable(resource)
else getattr(cloud, 'search_%ss' % resource, None)
)
if search: if search:
entities = search(name_or_id, filters, **kwargs) entities = search(name_or_id, filters, **kwargs)
if entities: if entities:
if len(entities) > 1: if len(entities) > 1:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Multiple matches found for %s" % name_or_id) "Multiple matches found for %s" % name_or_id
)
return entities[0] return entities[0]
return None return None
@ -230,8 +237,10 @@ def valid_kwargs(*valid_args):
if k not in argspec.args[1:] and k not in valid_args: if k not in argspec.args[1:] and k not in valid_args:
raise TypeError( raise TypeError(
"{f}() got an unexpected keyword argument " "{f}() got an unexpected keyword argument "
"'{arg}'".format(f=inspect.stack()[1][3], arg=k)) "'{arg}'".format(f=inspect.stack()[1][3], arg=k)
)
return func(*args, **kwargs) return func(*args, **kwargs)
return func_wrapper return func_wrapper
@ -244,6 +253,7 @@ def _func_wrap(f):
@functools.wraps(f) @functools.wraps(f)
def inner(*args, **kwargs): def inner(*args, **kwargs):
return f(*args, **kwargs) return f(*args, **kwargs)
return inner return inner
@ -253,20 +263,23 @@ def cache_on_arguments(*cache_on_args, **cache_on_kwargs):
def _inner_cache_on_arguments(func): def _inner_cache_on_arguments(func):
def _cache_decorator(obj, *args, **kwargs): def _cache_decorator(obj, *args, **kwargs):
the_method = obj._get_cache(_cache_name).cache_on_arguments( the_method = obj._get_cache(_cache_name).cache_on_arguments(
*cache_on_args, **cache_on_kwargs)( *cache_on_args, **cache_on_kwargs
_func_wrap(func.__get__(obj, type(obj)))) )(_func_wrap(func.__get__(obj, type(obj))))
return the_method(*args, **kwargs) return the_method(*args, **kwargs)
def invalidate(obj, *args, **kwargs): def invalidate(obj, *args, **kwargs):
return obj._get_cache( return (
_cache_name).cache_on_arguments()(func).invalidate( obj._get_cache(_cache_name)
*args, **kwargs) .cache_on_arguments()(func)
.invalidate(*args, **kwargs)
)
_cache_decorator.invalidate = invalidate _cache_decorator.invalidate = invalidate
_cache_decorator.func = func _cache_decorator.func = func
_decorated_methods.append(func.__name__) _decorated_methods.append(func.__name__)
return _cache_decorator return _cache_decorator
return _inner_cache_on_arguments return _inner_cache_on_arguments
@ -320,7 +333,8 @@ def safe_dict_min(key, data):
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Search for minimum value failed. " "Search for minimum value failed. "
"Value for {key} is not an integer: {value}".format( "Value for {key} is not an integer: {value}".format(
key=key, value=d[key]) key=key, value=d[key]
)
) )
if (min_value is None) or (val < min_value): if (min_value is None) or (val < min_value):
min_value = val min_value = val
@ -352,16 +366,17 @@ def safe_dict_max(key, data):
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Search for maximum value failed. " "Search for maximum value failed. "
"Value for {key} is not an integer: {value}".format( "Value for {key} is not an integer: {value}".format(
key=key, value=d[key]) key=key, value=d[key]
)
) )
if (max_value is None) or (val > max_value): if (max_value is None) or (val > max_value):
max_value = val max_value = val
return max_value return max_value
def _call_client_and_retry(client, url, retry_on=None, def _call_client_and_retry(
call_retries=3, retry_wait=2, client, url, retry_on=None, call_retries=3, retry_wait=2, **kwargs
**kwargs): ):
"""Method to provide retry operations. """Method to provide retry operations.
Some APIs utilize HTTP errors on certain operations to indicate that Some APIs utilize HTTP errors on certain operations to indicate that
@ -391,18 +406,17 @@ def _call_client_and_retry(client, url, retry_on=None,
retry_on = [retry_on] retry_on = [retry_on]
count = 0 count = 0
while (count < call_retries): while count < call_retries:
count += 1 count += 1
try: try:
ret_val = client(url, **kwargs) ret_val = client(url, **kwargs)
except exc.OpenStackCloudHTTPError as e: except exc.OpenStackCloudHTTPError as e:
if (retry_on is not None if retry_on is not None and e.response.status_code in retry_on:
and e.response.status_code in retry_on): log.debug(
log.debug('Received retryable error %(err)s, waiting ' 'Received retryable error %(err)s, waiting '
'%(wait)s seconds to retry', { '%(wait)s seconds to retry',
'err': e.response.status_code, {'err': e.response.status_code, 'wait': retry_wait},
'wait': retry_wait )
})
time.sleep(retry_wait) time.sleep(retry_wait)
continue continue
else: else:
@ -484,7 +498,8 @@ def range_filter(data, key, range_exp):
# If parsing the range fails, it must be a bad value. # If parsing the range fails, it must be a bad value.
if val_range is None: if val_range is None:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Invalid range value: {value}".format(value=range_exp)) "Invalid range value: {value}".format(value=range_exp)
)
op = val_range[0] op = val_range[0]
if op: if op:
@ -523,9 +538,7 @@ def generate_patches_from_kwargs(operation, **kwargs):
""" """
patches = [] patches = []
for k, v in kwargs.items(): for k, v in kwargs.items():
patch = {'op': operation, patch = {'op': operation, 'value': v, 'path': '/%s' % k}
'value': v,
'path': '/%s' % k}
patches.append(patch) patches.append(patch)
return sorted(patches) return sorted(patches)
@ -568,11 +581,13 @@ class FileSegment:
def _format_uuid_string(string): def _format_uuid_string(string):
return (string.replace('urn:', '') return (
.replace('uuid:', '') string.replace('urn:', '')
.strip('{}') .replace('uuid:', '')
.replace('-', '') .strip('{}')
.lower()) .replace('-', '')
.lower()
)
def _is_uuid_like(val): def _is_uuid_like(val):

View File

@ -32,20 +32,35 @@ def output_format_dict(data, use_yaml):
def parse_args(): def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module') parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--refresh', action='store_true', parser.add_argument(
help='Refresh cached information') '--refresh', action='store_true', help='Refresh cached information'
)
group = parser.add_mutually_exclusive_group(required=True) group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true', group.add_argument(
help='List active servers') '--list', action='store_true', help='List active servers'
)
group.add_argument('--host', help='List details about the specific host') group.add_argument('--host', help='List details about the specific host')
parser.add_argument('--private', action='store_true', default=False, parser.add_argument(
help='Use private IPs for interface_ip') '--private',
parser.add_argument('--cloud', default=None, action='store_true',
help='Return data for one cloud only') default=False,
parser.add_argument('--yaml', action='store_true', default=False, help='Use private IPs for interface_ip',
help='Output data in nicely readable yaml') )
parser.add_argument('--debug', action='store_true', default=False, parser.add_argument(
help='Enable debug output') '--cloud', default=None, help='Return data for one cloud only'
)
parser.add_argument(
'--yaml',
action='store_true',
default=False,
help='Output data in nicely readable yaml',
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='Enable debug output',
)
return parser.parse_args() return parser.parse_args()
@ -54,8 +69,8 @@ def main():
try: try:
openstack.enable_logging(debug=args.debug) openstack.enable_logging(debug=args.debug)
inventory = openstack.cloud.inventory.OpenStackInventory( inventory = openstack.cloud.inventory.OpenStackInventory(
refresh=args.refresh, private=args.private, refresh=args.refresh, private=args.private, cloud=args.cloud
cloud=args.cloud) )
if args.list: if args.list:
output = inventory.list_hosts() output = inventory.list_hosts()
elif args.host: elif args.host:

View File

@ -19,12 +19,14 @@ OpenStackCloudTimeout = exceptions.ResourceTimeout
class OpenStackCloudCreateException(OpenStackCloudException): class OpenStackCloudCreateException(OpenStackCloudException):
def __init__(self, resource, resource_id, extra_data=None, **kwargs): def __init__(self, resource, resource_id, extra_data=None, **kwargs):
super(OpenStackCloudCreateException, self).__init__( super(OpenStackCloudCreateException, self).__init__(
message="Error creating {resource}: {resource_id}".format( message="Error creating {resource}: {resource_id}".format(
resource=resource, resource_id=resource_id), resource=resource, resource_id=resource_id
extra_data=extra_data, **kwargs) ),
extra_data=extra_data,
**kwargs
)
self.resource_id = resource_id self.resource_id = resource_id

View File

@ -28,15 +28,23 @@ class OpenStackInventory:
extra_config = None extra_config = None
def __init__( def __init__(
self, config_files=None, refresh=False, private=False, self,
config_key=None, config_defaults=None, cloud=None, config_files=None,
use_direct_get=False): refresh=False,
private=False,
config_key=None,
config_defaults=None,
cloud=None,
use_direct_get=False,
):
if config_files is None: if config_files is None:
config_files = [] config_files = []
config = loader.OpenStackConfig( config = loader.OpenStackConfig(
config_files=loader.CONFIG_FILES + config_files) config_files=loader.CONFIG_FILES + config_files
)
self.extra_config = config.get_extra_config( self.extra_config = config.get_extra_config(
config_key, config_defaults) config_key, config_defaults
)
if cloud is None: if cloud is None:
self.clouds = [ self.clouds = [
@ -44,9 +52,7 @@ class OpenStackInventory:
for cloud_region in config.get_all() for cloud_region in config.get_all()
] ]
else: else:
self.clouds = [ self.clouds = [connection.Connection(config=config.get_one(cloud))]
connection.Connection(config=config.get_one(cloud))
]
if private: if private:
for cloud in self.clouds: for cloud in self.clouds:
@ -57,15 +63,17 @@ class OpenStackInventory:
for cloud in self.clouds: for cloud in self.clouds:
cloud._cache.invalidate() cloud._cache.invalidate()
def list_hosts(self, expand=True, fail_on_cloud_config=True, def list_hosts(
all_projects=False): self, expand=True, fail_on_cloud_config=True, all_projects=False
):
hostvars = [] hostvars = []
for cloud in self.clouds: for cloud in self.clouds:
try: try:
# Cycle on servers # Cycle on servers
for server in cloud.list_servers(detailed=expand, for server in cloud.list_servers(
all_projects=all_projects): detailed=expand, all_projects=all_projects
):
hostvars.append(server) hostvars.append(server)
except exceptions.OpenStackCloudException: except exceptions.OpenStackCloudException:
# Don't fail on one particular cloud as others may work # Don't fail on one particular cloud as others may work

View File

@ -23,8 +23,9 @@ from openstack import utils
NON_CALLABLES = (str, bool, dict, int, float, list, type(None)) NON_CALLABLES = (str, bool, dict, int, float, list, type(None))
def find_nova_interfaces(addresses, ext_tag=None, key_name=None, version=4, def find_nova_interfaces(
mac_addr=None): addresses, ext_tag=None, key_name=None, version=4, mac_addr=None
):
ret = [] ret = []
for (k, v) in iter(addresses.items()): for (k, v) in iter(addresses.items()):
if key_name is not None and k != key_name: if key_name is not None and k != key_name:
@ -64,10 +65,12 @@ def find_nova_interfaces(addresses, ext_tag=None, key_name=None, version=4,
return ret return ret
def find_nova_addresses(addresses, ext_tag=None, key_name=None, version=4, def find_nova_addresses(
mac_addr=None): addresses, ext_tag=None, key_name=None, version=4, mac_addr=None
interfaces = find_nova_interfaces(addresses, ext_tag, key_name, version, ):
mac_addr) interfaces = find_nova_interfaces(
addresses, ext_tag, key_name, version, mac_addr
)
floating_addrs = [] floating_addrs = []
fixed_addrs = [] fixed_addrs = []
for i in interfaces: for i in interfaces:
@ -91,8 +94,7 @@ def get_server_ip(server, public=False, cloud_public=True, **kwargs):
private ip we expect shade to be able to reach private ip we expect shade to be able to reach
""" """
addrs = find_nova_addresses(server['addresses'], **kwargs) addrs = find_nova_addresses(server['addresses'], **kwargs)
return find_best_address( return find_best_address(addrs, public=public, cloud_public=cloud_public)
addrs, public=public, cloud_public=cloud_public)
def get_server_private_ip(server, cloud=None): def get_server_private_ip(server, cloud=None):
@ -126,30 +128,34 @@ def get_server_private_ip(server, cloud=None):
int_nets = cloud.get_internal_ipv4_networks() int_nets = cloud.get_internal_ipv4_networks()
for int_net in int_nets: for int_net in int_nets:
int_ip = get_server_ip( int_ip = get_server_ip(
server, key_name=int_net['name'], server,
key_name=int_net['name'],
ext_tag='fixed', ext_tag='fixed',
cloud_public=not cloud.private, cloud_public=not cloud.private,
mac_addr=fip_mac) mac_addr=fip_mac,
)
if int_ip is not None: if int_ip is not None:
return int_ip return int_ip
# Try a second time without the fixed tag. This is for old nova-network # Try a second time without the fixed tag. This is for old nova-network
# results that do not have the fixed/floating tag. # results that do not have the fixed/floating tag.
for int_net in int_nets: for int_net in int_nets:
int_ip = get_server_ip( int_ip = get_server_ip(
server, key_name=int_net['name'], server,
key_name=int_net['name'],
cloud_public=not cloud.private, cloud_public=not cloud.private,
mac_addr=fip_mac) mac_addr=fip_mac,
)
if int_ip is not None: if int_ip is not None:
return int_ip return int_ip
ip = get_server_ip( ip = get_server_ip(
server, ext_tag='fixed', key_name='private', mac_addr=fip_mac) server, ext_tag='fixed', key_name='private', mac_addr=fip_mac
)
if ip: if ip:
return ip return ip
# Last resort, and Rackspace # Last resort, and Rackspace
return get_server_ip( return get_server_ip(server, key_name='private')
server, key_name='private')
def get_server_external_ipv4(cloud, server): def get_server_external_ipv4(cloud, server):
@ -183,8 +189,11 @@ def get_server_external_ipv4(cloud, server):
ext_nets = cloud.get_external_ipv4_networks() ext_nets = cloud.get_external_ipv4_networks()
for ext_net in ext_nets: for ext_net in ext_nets:
ext_ip = get_server_ip( ext_ip = get_server_ip(
server, key_name=ext_net['name'], public=True, server,
cloud_public=not cloud.private) key_name=ext_net['name'],
public=True,
cloud_public=not cloud.private,
)
if ext_ip is not None: if ext_ip is not None:
return ext_ip return ext_ip
@ -192,8 +201,8 @@ def get_server_external_ipv4(cloud, server):
# Much as I might find floating IPs annoying, if it has one, that's # Much as I might find floating IPs annoying, if it has one, that's
# almost certainly the one that wants to be used # almost certainly the one that wants to be used
ext_ip = get_server_ip( ext_ip = get_server_ip(
server, ext_tag='floating', public=True, server, ext_tag='floating', public=True, cloud_public=not cloud.private
cloud_public=not cloud.private) )
if ext_ip is not None: if ext_ip is not None:
return ext_ip return ext_ip
@ -203,8 +212,8 @@ def get_server_external_ipv4(cloud, server):
# Try to get an address from a network named 'public' # Try to get an address from a network named 'public'
ext_ip = get_server_ip( ext_ip = get_server_ip(
server, key_name='public', public=True, server, key_name='public', public=True, cloud_public=not cloud.private
cloud_public=not cloud.private) )
if ext_ip is not None: if ext_ip is not None:
return ext_ip return ext_ip
@ -238,15 +247,21 @@ def find_best_address(addresses, public=False, cloud_public=True):
for address in addresses: for address in addresses:
try: try:
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
5, "Timeout waiting for %s" % address, wait=0.1): 5, "Timeout waiting for %s" % address, wait=0.1
):
# Return the first one that is reachable # Return the first one that is reachable
try: try:
for res in socket.getaddrinfo( for res in socket.getaddrinfo(
address, 22, socket.AF_UNSPEC, address,
socket.SOCK_STREAM, 0): 22,
socket.AF_UNSPEC,
socket.SOCK_STREAM,
0,
):
family, socktype, proto, _, sa = res family, socktype, proto, _, sa = res
connect_socket = socket.socket( connect_socket = socket.socket(
family, socktype, proto) family, socktype, proto
)
connect_socket.settimeout(1) connect_socket.settimeout(1)
connect_socket.connect(sa) connect_socket.connect(sa)
return address return address
@ -265,12 +280,13 @@ def find_best_address(addresses, public=False, cloud_public=True):
"The cloud returned multiple addresses %s:, and we could not " "The cloud returned multiple addresses %s:, and we could not "
"connect to port 22 on either. That might be what you wanted, " "connect to port 22 on either. That might be what you wanted, "
"but we have no clue what's going on, so we picked the first one " "but we have no clue what's going on, so we picked the first one "
"%s" % (addresses, addresses[0])) "%s" % (addresses, addresses[0])
)
return addresses[0] return addresses[0]
def get_server_external_ipv6(server): def get_server_external_ipv6(server):
""" Get an IPv6 address reachable from outside the cloud. """Get an IPv6 address reachable from outside the cloud.
This function assumes that if a server has an IPv6 address, that address This function assumes that if a server has an IPv6 address, that address
is reachable from outside the cloud. is reachable from outside the cloud.
@ -286,7 +302,7 @@ def get_server_external_ipv6(server):
def get_server_default_ip(cloud, server): def get_server_default_ip(cloud, server):
""" Get the configured 'default' address """Get the configured 'default' address
It is possible in clouds.yaml to configure for a cloud a network that It is possible in clouds.yaml to configure for a cloud a network that
is the 'default_interface'. This is the network that should be used is the 'default_interface'. This is the network that should be used
@ -299,22 +315,26 @@ def get_server_default_ip(cloud, server):
""" """
ext_net = cloud.get_default_network() ext_net = cloud.get_default_network()
if ext_net: if ext_net:
if (cloud._local_ipv6 and not cloud.force_ipv4): if cloud._local_ipv6 and not cloud.force_ipv4:
# try 6 first, fall back to four # try 6 first, fall back to four
versions = [6, 4] versions = [6, 4]
else: else:
versions = [4] versions = [4]
for version in versions: for version in versions:
ext_ip = get_server_ip( ext_ip = get_server_ip(
server, key_name=ext_net['name'], version=version, public=True, server,
cloud_public=not cloud.private) key_name=ext_net['name'],
version=version,
public=True,
cloud_public=not cloud.private,
)
if ext_ip is not None: if ext_ip is not None:
return ext_ip return ext_ip
return None return None
def _get_interface_ip(cloud, server): def _get_interface_ip(cloud, server):
""" Get the interface IP for the server """Get the interface IP for the server
Interface IP is the IP that should be used for communicating with the Interface IP is the IP that should be used for communicating with the
server. It is: server. It is:
@ -329,7 +349,7 @@ def _get_interface_ip(cloud, server):
if cloud.private and server['private_v4']: if cloud.private and server['private_v4']:
return server['private_v4'] return server['private_v4']
if (server['public_v6'] and cloud._local_ipv6 and not cloud.force_ipv4): if server['public_v6'] and cloud._local_ipv6 and not cloud.force_ipv4:
return server['public_v6'] return server['public_v6']
else: else:
return server['public_v4'] return server['public_v4']
@ -404,15 +424,19 @@ def _get_supplemental_addresses(cloud, server):
try: try:
# Don't bother doing this before the server is active, it's a waste # Don't bother doing this before the server is active, it's a waste
# of an API call while polling for a server to come up # of an API call while polling for a server to come up
if (cloud.has_service('network') if (
and cloud._has_floating_ips() cloud.has_service('network')
and server['status'] == 'ACTIVE'): and cloud._has_floating_ips()
and server['status'] == 'ACTIVE'
):
for port in cloud.search_ports( for port in cloud.search_ports(
filters=dict(device_id=server['id'])): filters=dict(device_id=server['id'])
):
# This SHOULD return one and only one FIP - but doing it as a # This SHOULD return one and only one FIP - but doing it as a
# search/list lets the logic work regardless # search/list lets the logic work regardless
for fip in cloud.search_floating_ips( for fip in cloud.search_floating_ips(
filters=dict(port_id=port['id'])): filters=dict(port_id=port['id'])
):
fixed_net = fixed_ip_mapping.get(fip['fixed_ip_address']) fixed_net = fixed_ip_mapping.get(fip['fixed_ip_address'])
if fixed_net is None: if fixed_net is None:
log = _log.setup_logging('openstack') log = _log.setup_logging('openstack')
@ -422,10 +446,12 @@ def _get_supplemental_addresses(cloud, server):
" with the floating ip in the neutron listing" " with the floating ip in the neutron listing"
" does not exist in the nova listing. Something" " does not exist in the nova listing. Something"
" is exceptionally broken.", " is exceptionally broken.",
dict(fip=fip['id'], server=server['id'])) dict(fip=fip['id'], server=server['id']),
)
else: else:
server['addresses'][fixed_net].append( server['addresses'][fixed_net].append(
_make_address_dict(fip, port)) _make_address_dict(fip, port)
)
except exc.OpenStackCloudException: except exc.OpenStackCloudException:
# If something goes wrong with a cloud call, that's cool - this is # If something goes wrong with a cloud call, that's cool - this is
# an attempt to provide additional data and should not block forward # an attempt to provide additional data and should not block forward
@ -485,8 +511,7 @@ def get_hostvars_from_server(cloud, server, mounts=None):
expand_server_vars if caching is not set up. If caching is set up, expand_server_vars if caching is not set up. If caching is set up,
the extra cost should be minimal. the extra cost should be minimal.
""" """
server_vars = obj_to_munch( server_vars = obj_to_munch(add_server_interfaces(cloud, server))
add_server_interfaces(cloud, server))
flavor_id = server['flavor'].get('id') flavor_id = server['flavor'].get('id')
if flavor_id: if flavor_id:
@ -539,7 +564,7 @@ def get_hostvars_from_server(cloud, server, mounts=None):
def obj_to_munch(obj): def obj_to_munch(obj):
""" Turn an object with attributes into a dict suitable for serializing. """Turn an object with attributes into a dict suitable for serializing.
Some of the things that are returned in OpenStack are objects with Some of the things that are returned in OpenStack are objects with
attributes. That's awesome - except when you want to expose them as JSON attributes. That's awesome - except when you want to expose them as JSON

View File

@ -12,6 +12,7 @@
import copy import copy
import functools import functools
import queue import queue
# import types so that we can reference ListType in sphinx param declarations. # import types so that we can reference ListType in sphinx param declarations.
# We can't just use list, because sphinx gets confused by # We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list # openstack.resource.Resource.list and openstack.resource2.Resource.list
@ -60,6 +61,7 @@ class _OpenStackCloudMixin:
:param bool strict: Only return documented attributes for each resource :param bool strict: Only return documented attributes for each resource
as per the Data Model contract. (Default False) as per the Data Model contract. (Default False)
""" """
_OBJECT_MD5_KEY = 'x-sdk-md5' _OBJECT_MD5_KEY = 'x-sdk-md5'
_OBJECT_SHA256_KEY = 'x-sdk-sha256' _OBJECT_SHA256_KEY = 'x-sdk-sha256'
_OBJECT_AUTOCREATE_KEY = 'x-sdk-autocreated' _OBJECT_AUTOCREATE_KEY = 'x-sdk-autocreated'
@ -90,7 +92,8 @@ class _OpenStackCloudMixin:
# cert verification # cert verification
if not self.verify: if not self.verify:
self.log.debug( self.log.debug(
"Turning off Insecure SSL warnings since verify=False") "Turning off Insecure SSL warnings since verify=False"
)
category = requestsexceptions.InsecureRequestWarning category = requestsexceptions.InsecureRequestWarning
if category: if category:
# InsecureRequestWarning references a Warning class or is None # InsecureRequestWarning references a Warning class or is None
@ -131,19 +134,20 @@ class _OpenStackCloudMixin:
meth_obj = getattr(self, method, None) meth_obj = getattr(self, method, None)
if not meth_obj: if not meth_obj:
continue continue
if (hasattr(meth_obj, 'invalidate') if hasattr(meth_obj, 'invalidate') and hasattr(
and hasattr(meth_obj, 'func')): meth_obj, 'func'
):
new_func = functools.partial(meth_obj.func, self) new_func = functools.partial(meth_obj.func, self)
new_func.invalidate = _fake_invalidate new_func.invalidate = _fake_invalidate
setattr(self, method, new_func) setattr(self, method, new_func)
# Uncoditionally create cache even with a "null" backend # Uncoditionally create cache even with a "null" backend
self._cache = self._make_cache( self._cache = self._make_cache(
cache_class, cache_expiration_time, cache_arguments) cache_class, cache_expiration_time, cache_arguments
)
expirations = self.config.get_cache_expirations() expirations = self.config.get_cache_expirations()
for expire_key in expirations.keys(): for expire_key in expirations.keys():
self._cache_expirations[expire_key] = \ self._cache_expirations[expire_key] = expirations[expire_key]
expirations[expire_key]
# TODO(gtema): delete in next change # TODO(gtema): delete in next change
self._SERVER_AGE = 0 self._SERVER_AGE = 0
@ -159,7 +163,8 @@ class _OpenStackCloudMixin:
self._raw_clients = {} self._raw_clients = {}
self._local_ipv6 = ( self._local_ipv6 = (
_utils.localhost_supports_ipv6() if not self.force_ipv4 else False) _utils.localhost_supports_ipv6() if not self.force_ipv4 else False
)
def connect_as(self, **kwargs): def connect_as(self, **kwargs):
"""Make a new OpenStackCloud object with new auth context. """Make a new OpenStackCloud object with new auth context.
@ -191,7 +196,8 @@ class _OpenStackCloudMixin:
config = openstack.config.OpenStackConfig( config = openstack.config.OpenStackConfig(
app_name=self.config._app_name, app_name=self.config._app_name,
app_version=self.config._app_version, app_version=self.config._app_version,
load_yaml_config=False) load_yaml_config=False,
)
params = copy.deepcopy(self.config.config) params = copy.deepcopy(self.config.config)
# Remove profile from current cloud so that overridding works # Remove profile from current cloud so that overridding works
params.pop('profile', None) params.pop('profile', None)
@ -298,7 +304,8 @@ class _OpenStackCloudMixin:
app_name=self.config._app_name, app_name=self.config._app_name,
app_version=self.config._app_version, app_version=self.config._app_version,
discovery_cache=self.session._discovery_cache, discovery_cache=self.session._discovery_cache,
**params) **params
)
# Override the cloud name so that logging/location work right # Override the cloud name so that logging/location work right
cloud_region._name = self.name cloud_region._name = self.name
@ -313,9 +320,8 @@ class _OpenStackCloudMixin:
return dogpile.cache.make_region( return dogpile.cache.make_region(
function_key_generator=self._make_cache_key function_key_generator=self._make_cache_key
).configure( ).configure(
cache_class, cache_class, expiration_time=expiration_time, arguments=arguments
expiration_time=expiration_time, )
arguments=arguments)
def _make_cache_key(self, namespace, fn): def _make_cache_key(self, namespace, fn):
fname = fn.__name__ fname = fn.__name__
@ -329,10 +335,11 @@ class _OpenStackCloudMixin:
arg_key = '' arg_key = ''
kw_keys = sorted(kwargs.keys()) kw_keys = sorted(kwargs.keys())
kwargs_key = ','.join( kwargs_key = ','.join(
['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache']) ['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache']
ans = "_".join( )
[str(name_key), fname, arg_key, kwargs_key]) ans = "_".join([str(name_key), fname, arg_key, kwargs_key])
return ans return ans
return generate_key return generate_key
def _get_cache(self, resource_name): def _get_cache(self, resource_name):
@ -349,7 +356,8 @@ class _OpenStackCloudMixin:
return version return version
def _get_versioned_client( def _get_versioned_client(
self, service_type, min_version=None, max_version=None): self, service_type, min_version=None, max_version=None
):
config_version = self.config.get_api_version(service_type) config_version = self.config.get_api_version(service_type)
config_major = self._get_major_version_id(config_version) config_major = self._get_major_version_id(config_version)
max_major = self._get_major_version_id(max_version) max_major = self._get_major_version_id(max_version)
@ -372,7 +380,9 @@ class _OpenStackCloudMixin:
" but shade understands a minimum of {min_version}".format( " but shade understands a minimum of {min_version}".format(
config_version=config_version, config_version=config_version,
service_type=service_type, service_type=service_type,
min_version=min_version)) min_version=min_version,
)
)
elif max_major and config_major > max_major: elif max_major and config_major > max_major:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"Version {config_version} requested for {service_type}" "Version {config_version} requested for {service_type}"
@ -380,10 +390,13 @@ class _OpenStackCloudMixin:
" {max_version}".format( " {max_version}".format(
config_version=config_version, config_version=config_version,
service_type=service_type, service_type=service_type,
max_version=max_version)) max_version=max_version,
)
)
request_min_version = config_version request_min_version = config_version
request_max_version = '{version}.latest'.format( request_max_version = '{version}.latest'.format(
version=config_major) version=config_major
)
adapter = proxy._ShadeAdapter( adapter = proxy._ShadeAdapter(
session=self.session, session=self.session,
service_type=self.config.get_service_type(service_type), service_type=self.config.get_service_type(service_type),
@ -397,7 +410,8 @@ class _OpenStackCloudMixin:
prometheus_histogram=self.config.get_prometheus_histogram(), prometheus_histogram=self.config.get_prometheus_histogram(),
influxdb_client=self.config.get_influxdb_client(), influxdb_client=self.config.get_influxdb_client(),
min_version=request_min_version, min_version=request_min_version,
max_version=request_max_version) max_version=request_max_version,
)
if adapter.get_endpoint(): if adapter.get_endpoint():
return adapter return adapter
@ -409,12 +423,14 @@ class _OpenStackCloudMixin:
endpoint_override=self.config.get_endpoint(service_type), endpoint_override=self.config.get_endpoint(service_type),
region_name=self.config.get_region_name(service_type), region_name=self.config.get_region_name(service_type),
min_version=min_version, min_version=min_version,
max_version=max_version) max_version=max_version,
)
# data.api_version can be None if no version was detected, such # data.api_version can be None if no version was detected, such
# as with neutron # as with neutron
api_version = adapter.get_api_major_version( api_version = adapter.get_api_major_version(
endpoint_override=self.config.get_endpoint(service_type)) endpoint_override=self.config.get_endpoint(service_type)
)
api_major = self._get_major_version_id(api_version) api_major = self._get_major_version_id(api_version)
# If we detect a different version that was configured, warn the user. # If we detect a different version that was configured, warn the user.
@ -430,7 +446,9 @@ class _OpenStackCloudMixin:
' your config.'.format( ' your config.'.format(
service_type=service_type, service_type=service_type,
config_version=config_version, config_version=config_version,
api_version='.'.join([str(f) for f in api_version]))) api_version='.'.join([str(f) for f in api_version]),
)
)
self.log.debug(warning_msg) self.log.debug(warning_msg)
warnings.warn(warning_msg) warnings.warn(warning_msg)
return adapter return adapter
@ -438,19 +456,22 @@ class _OpenStackCloudMixin:
# TODO(shade) This should be replaced with using openstack Connection # TODO(shade) This should be replaced with using openstack Connection
# object. # object.
def _get_raw_client( def _get_raw_client(
self, service_type, api_version=None, endpoint_override=None): self, service_type, api_version=None, endpoint_override=None
):
return proxy._ShadeAdapter( return proxy._ShadeAdapter(
session=self.session, session=self.session,
service_type=self.config.get_service_type(service_type), service_type=self.config.get_service_type(service_type),
service_name=self.config.get_service_name(service_type), service_name=self.config.get_service_name(service_type),
interface=self.config.get_interface(service_type), interface=self.config.get_interface(service_type),
endpoint_override=self.config.get_endpoint( endpoint_override=self.config.get_endpoint(service_type)
service_type) or endpoint_override, or endpoint_override,
region_name=self.config.get_region_name(service_type)) region_name=self.config.get_region_name(service_type),
)
def _is_client_version(self, client, version): def _is_client_version(self, client, version):
client_name = '_{client}_client'.format( client_name = '_{client}_client'.format(
client=client.replace('-', '_')) client=client.replace('-', '_')
)
client = getattr(self, client_name) client = getattr(self, client_name)
return client._version_matches(version) return client._version_matches(version)
@ -458,7 +479,8 @@ class _OpenStackCloudMixin:
def _application_catalog_client(self): def _application_catalog_client(self):
if 'application-catalog' not in self._raw_clients: if 'application-catalog' not in self._raw_clients:
self._raw_clients['application-catalog'] = self._get_raw_client( self._raw_clients['application-catalog'] = self._get_raw_client(
'application-catalog') 'application-catalog'
)
return self._raw_clients['application-catalog'] return self._raw_clients['application-catalog']
@property @property
@ -478,6 +500,7 @@ class _OpenStackCloudMixin:
"""Wrapper around pprint that groks munch objects""" """Wrapper around pprint that groks munch objects"""
# import late since this is a utility function # import late since this is a utility function
import pprint import pprint
new_resource = _utils._dictify_resource(resource) new_resource = _utils._dictify_resource(resource)
pprint.pprint(new_resource) pprint.pprint(new_resource)
@ -485,6 +508,7 @@ class _OpenStackCloudMixin:
"""Wrapper around pformat that groks munch objects""" """Wrapper around pformat that groks munch objects"""
# import late since this is a utility function # import late since this is a utility function
import pprint import pprint
new_resource = _utils._dictify_resource(resource) new_resource = _utils._dictify_resource(resource)
return pprint.pformat(new_resource) return pprint.pformat(new_resource)
@ -521,7 +545,8 @@ class _OpenStackCloudMixin:
return self.config.get_endpoint_from_catalog( return self.config.get_endpoint_from_catalog(
service_type=service_type, service_type=service_type,
interface=interface, interface=interface,
region_name=region_name) region_name=region_name,
)
@property @property
def auth_token(self): def auth_token(self):
@ -600,10 +625,9 @@ class _OpenStackCloudMixin:
region_name=None, region_name=None,
zone=None, zone=None,
project=utils.Munch( project=utils.Munch(
id=None, id=None, name=None, domain_id=None, domain_name=None
name=None, ),
domain_id=None, )
domain_name=None))
def _get_project_id_param_dict(self, name_or_id): def _get_project_id_param_dict(self, name_or_id):
if name_or_id: if name_or_id:
@ -628,7 +652,8 @@ class _OpenStackCloudMixin:
if not domain_id: if not domain_id:
raise exc.OpenStackCloudException( raise exc.OpenStackCloudException(
"User or project creation requires an explicit" "User or project creation requires an explicit"
" domain_id argument.") " domain_id argument."
)
else: else:
return {'domain_id': domain_id} return {'domain_id': domain_id}
else: else:
@ -714,7 +739,8 @@ class _OpenStackCloudMixin:
return self.config.get_session_endpoint(service_key, **kwargs) return self.config.get_session_endpoint(service_key, **kwargs)
except keystoneauth1.exceptions.catalog.EndpointNotFound as e: except keystoneauth1.exceptions.catalog.EndpointNotFound as e:
self.log.debug( self.log.debug(
"Endpoint not found in %s cloud: %s", self.name, str(e)) "Endpoint not found in %s cloud: %s", self.name, str(e)
)
endpoint = None endpoint = None
except exc.OpenStackCloudException: except exc.OpenStackCloudException:
raise raise
@ -725,17 +751,22 @@ class _OpenStackCloudMixin:
service=service_key, service=service_key,
cloud=self.name, cloud=self.name,
region=self.config.get_region_name(service_key), region=self.config.get_region_name(service_key),
error=str(e))) error=str(e),
)
)
return endpoint return endpoint
def has_service(self, service_key, version=None): def has_service(self, service_key, version=None):
if not self.config.has_service(service_key): if not self.config.has_service(service_key):
# TODO(mordred) add a stamp here so that we only report this once # TODO(mordred) add a stamp here so that we only report this once
if not (service_key in self._disable_warnings if not (
and self._disable_warnings[service_key]): service_key in self._disable_warnings
and self._disable_warnings[service_key]
):
self.log.debug( self.log.debug(
"Disabling %(service_key)s entry in catalog" "Disabling %(service_key)s entry in catalog" " per config",
" per config", {'service_key': service_key}) {'service_key': service_key},
)
self._disable_warnings[service_key] = True self._disable_warnings[service_key] = True
return False return False
try: try:
@ -786,26 +817,23 @@ class _OpenStackCloudMixin:
(service_name, resource_name) = resource_type.split('.') (service_name, resource_name) = resource_type.split('.')
if not hasattr(self, service_name): if not hasattr(self, service_name):
raise exceptions.SDKException( raise exceptions.SDKException(
"service %s is not existing/enabled" % "service %s is not existing/enabled" % service_name
service_name
) )
service_proxy = getattr(self, service_name) service_proxy = getattr(self, service_name)
try: try:
resource_type = service_proxy._resource_registry[resource_name] resource_type = service_proxy._resource_registry[resource_name]
except KeyError: except KeyError:
raise exceptions.SDKException( raise exceptions.SDKException(
"Resource %s is not known in service %s" % "Resource %s is not known in service %s"
(resource_name, service_name) % (resource_name, service_name)
) )
if name_or_id: if name_or_id:
# name_or_id is definitely not None # name_or_id is definitely not None
try: try:
resource_by_id = service_proxy._get( resource_by_id = service_proxy._get(
resource_type, resource_type, name_or_id, *get_args, **get_kwargs
name_or_id, )
*get_args,
**get_kwargs)
return [resource_by_id] return [resource_by_id]
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
pass pass
@ -817,11 +845,9 @@ class _OpenStackCloudMixin:
filters["name"] = name_or_id filters["name"] = name_or_id
list_kwargs.update(filters) list_kwargs.update(filters)
return list(service_proxy._list( return list(
resource_type, service_proxy._list(resource_type, *list_args, **list_kwargs)
*list_args, )
**list_kwargs
))
def project_cleanup( def project_cleanup(
self, self,
@ -829,7 +855,7 @@ class _OpenStackCloudMixin:
wait_timeout=120, wait_timeout=120,
status_queue=None, status_queue=None,
filters=None, filters=None,
resource_evaluation_fn=None resource_evaluation_fn=None,
): ):
"""Cleanup the project resources. """Cleanup the project resources.
@ -866,7 +892,7 @@ class _OpenStackCloudMixin:
dependencies.update(deps) dependencies.update(deps)
except ( except (
exceptions.NotSupported, exceptions.NotSupported,
exceptions.ServiceDisabledException exceptions.ServiceDisabledException,
): ):
# Cloud may include endpoint in catalog but not # Cloud may include endpoint in catalog but not
# implement the service or disable it # implement the service or disable it
@ -895,7 +921,7 @@ class _OpenStackCloudMixin:
client_status_queue=status_queue, client_status_queue=status_queue,
identified_resources=cleanup_resources, identified_resources=cleanup_resources,
filters=filters, filters=filters,
resource_evaluation_fn=resource_evaluation_fn resource_evaluation_fn=resource_evaluation_fn,
) )
except exceptions.ServiceDisabledException: except exceptions.ServiceDisabledException:
# same reason as above # same reason as above
@ -908,9 +934,10 @@ class _OpenStackCloudMixin:
dep_graph.node_done(service) dep_graph.node_done(service)
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
timeout=wait_timeout, timeout=wait_timeout,
message="Timeout waiting for cleanup to finish", message="Timeout waiting for cleanup to finish",
wait=1): wait=1,
):
if dep_graph.is_complete(): if dep_graph.is_complete():
return return

View File

@ -21,7 +21,6 @@ from openstack.tests.functional import base
class TestAggregate(base.BaseFunctionalTest): class TestAggregate(base.BaseFunctionalTest):
def test_aggregates(self): def test_aggregates(self):
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
@ -30,31 +29,28 @@ class TestAggregate(base.BaseFunctionalTest):
self.addCleanup(self.cleanup, aggregate_name) self.addCleanup(self.cleanup, aggregate_name)
aggregate = self.operator_cloud.create_aggregate(aggregate_name) aggregate = self.operator_cloud.create_aggregate(aggregate_name)
aggregate_ids = [v['id'] aggregate_ids = [
for v in self.operator_cloud.list_aggregates()] v['id'] for v in self.operator_cloud.list_aggregates()
]
self.assertIn(aggregate['id'], aggregate_ids) self.assertIn(aggregate['id'], aggregate_ids)
aggregate = self.operator_cloud.update_aggregate( aggregate = self.operator_cloud.update_aggregate(
aggregate_name, aggregate_name, availability_zone=availability_zone
availability_zone=availability_zone
) )
self.assertEqual(availability_zone, aggregate['availability_zone']) self.assertEqual(availability_zone, aggregate['availability_zone'])
aggregate = self.operator_cloud.set_aggregate_metadata( aggregate = self.operator_cloud.set_aggregate_metadata(
aggregate_name, aggregate_name, {'key': 'value'}
{'key': 'value'}
) )
self.assertIn('key', aggregate['metadata']) self.assertIn('key', aggregate['metadata'])
aggregate = self.operator_cloud.set_aggregate_metadata( aggregate = self.operator_cloud.set_aggregate_metadata(
aggregate_name, aggregate_name, {'key': None}
{'key': None}
) )
self.assertNotIn('key', aggregate['metadata']) self.assertNotIn('key', aggregate['metadata'])
# Validate that we can delete by name # Validate that we can delete by name
self.assertTrue( self.assertTrue(self.operator_cloud.delete_aggregate(aggregate_name))
self.operator_cloud.delete_aggregate(aggregate_name))
def cleanup(self, aggregate_name): def cleanup(self, aggregate_name):
aggregate = self.operator_cloud.get_aggregate(aggregate_name) aggregate = self.operator_cloud.get_aggregate(aggregate_name)

View File

@ -26,7 +26,6 @@ from openstack.tests.functional import base
class TestClusterTemplate(base.BaseFunctionalTest): class TestClusterTemplate(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestClusterTemplate, self).setUp() super(TestClusterTemplate, self).setUp()
if not self.user_cloud.has_service( if not self.user_cloud.has_service(
@ -52,8 +51,16 @@ class TestClusterTemplate(base.BaseFunctionalTest):
# generate a keypair to add to nova # generate a keypair to add to nova
subprocess.call( subprocess.call(
['ssh-keygen', '-t', 'rsa', '-N', '', '-f', [
'%s/id_rsa_sdk' % self.ssh_directory]) 'ssh-keygen',
'-t',
'rsa',
'-N',
'',
'-f',
'%s/id_rsa_sdk' % self.ssh_directory,
]
)
# add keypair to nova # add keypair to nova
with open('%s/id_rsa_sdk.pub' % self.ssh_directory) as f: with open('%s/id_rsa_sdk.pub' % self.ssh_directory) as f:
@ -62,8 +69,8 @@ class TestClusterTemplate(base.BaseFunctionalTest):
# Test we can create a cluster_template and we get it returned # Test we can create a cluster_template and we get it returned
self.ct = self.user_cloud.create_cluster_template( self.ct = self.user_cloud.create_cluster_template(
name=name, image_id=image_id, name=name, image_id=image_id, keypair_id=keypair_id, coe=coe
keypair_id=keypair_id, coe=coe) )
self.assertEqual(self.ct['name'], name) self.assertEqual(self.ct['name'], name)
self.assertEqual(self.ct['image_id'], image_id) self.assertEqual(self.ct['image_id'], image_id)
self.assertEqual(self.ct['keypair_id'], keypair_id) self.assertEqual(self.ct['keypair_id'], keypair_id)
@ -80,7 +87,8 @@ class TestClusterTemplate(base.BaseFunctionalTest):
# Test we get the same cluster_template with the # Test we get the same cluster_template with the
# get_cluster_template method # get_cluster_template method
cluster_template_get = self.user_cloud.get_cluster_template( cluster_template_get = self.user_cloud.get_cluster_template(
self.ct['uuid']) self.ct['uuid']
)
self.assertEqual(cluster_template_get['uuid'], self.ct['uuid']) self.assertEqual(cluster_template_get['uuid'], self.ct['uuid'])
# Test the get method also works by name # Test the get method also works by name
@ -90,14 +98,15 @@ class TestClusterTemplate(base.BaseFunctionalTest):
# Test we can update a field on the cluster_template and only that # Test we can update a field on the cluster_template and only that
# field is updated # field is updated
cluster_template_update = self.user_cloud.update_cluster_template( cluster_template_update = self.user_cloud.update_cluster_template(
self.ct, tls_disabled=True) self.ct, tls_disabled=True
self.assertEqual( )
cluster_template_update['uuid'], self.ct['uuid']) self.assertEqual(cluster_template_update['uuid'], self.ct['uuid'])
self.assertTrue(cluster_template_update['tls_disabled']) self.assertTrue(cluster_template_update['tls_disabled'])
# Test we can delete and get True returned # Test we can delete and get True returned
cluster_template_delete = self.user_cloud.delete_cluster_template( cluster_template_delete = self.user_cloud.delete_cluster_template(
self.ct['uuid']) self.ct['uuid']
)
self.assertTrue(cluster_template_delete) self.assertTrue(cluster_template_delete)
def cleanup(self, name): def cleanup(self, name):

File diff suppressed because it is too large Load Diff

View File

@ -59,7 +59,8 @@ class TestCompute(base.BaseFunctionalTest):
self.user_cloud.delete_server(server.name) self.user_cloud.delete_server(server.name)
for volume in volumes: for volume in volumes:
self.operator_cloud.delete_volume( self.operator_cloud.delete_volume(
volume.id, wait=False, force=True) volume.id, wait=False, force=True
)
def test_create_and_delete_server(self): def test_create_and_delete_server(self):
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -67,13 +68,15 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name, name=self.server_name,
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
wait=True) wait=True,
)
self.assertEqual(self.server_name, server['name']) self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertIsNotNone(server['adminPass']) self.assertIsNotNone(server['adminPass'])
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True)) self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name) srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -84,14 +87,17 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
auto_ip=True, auto_ip=True,
wait=True) wait=True,
)
self.assertEqual(self.server_name, server['name']) self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertIsNotNone(server['adminPass']) self.assertIsNotNone(server['adminPass'])
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server( self.user_cloud.delete_server(
self.server_name, wait=True, delete_ips=True)) self.server_name, wait=True, delete_ips=True
)
)
srv = self.user_cloud.get_server(self.server_name) srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -100,8 +106,8 @@ class TestCompute(base.BaseFunctionalTest):
server_name = self.getUniqueString() server_name = self.getUniqueString()
self.addCleanup(self._cleanup_servers_and_volumes, server_name) self.addCleanup(self._cleanup_servers_and_volumes, server_name)
server = self.user_cloud.create_server( server = self.user_cloud.create_server(
name=server_name, image=self.image, flavor=self.flavor, name=server_name, image=self.image, flavor=self.flavor, wait=True
wait=True) )
volume = self.user_cloud.create_volume(1) volume = self.user_cloud.create_volume(1)
vol_attachment = self.user_cloud.attach_volume(server, volume) vol_attachment = self.user_cloud.attach_volume(server, volume)
for key in ('device', 'serverId', 'volumeId'): for key in ('device', 'serverId', 'volumeId'):
@ -116,14 +122,16 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
config_drive=True, config_drive=True,
wait=True) wait=True,
)
self.assertEqual(self.server_name, server['name']) self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertTrue(server['has_config_drive']) self.assertTrue(server['has_config_drive'])
self.assertIsNotNone(server['adminPass']) self.assertIsNotNone(server['adminPass'])
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True)) self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name) srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -137,15 +145,16 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
config_drive=None, config_drive=None,
wait=True) wait=True,
)
self.assertEqual(self.server_name, server['name']) self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertFalse(server['has_config_drive']) self.assertFalse(server['has_config_drive'])
self.assertIsNotNone(server['adminPass']) self.assertIsNotNone(server['adminPass'])
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server( self.user_cloud.delete_server(self.server_name, wait=True)
self.server_name, wait=True)) )
srv = self.user_cloud.get_server(self.server_name) srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -157,7 +166,8 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name, name=self.server_name,
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
wait=True) wait=True,
)
# We're going to get servers from other tests, but that's ok, as long # We're going to get servers from other tests, but that's ok, as long
# as we get the server we created with the demo user. # as we get the server we created with the demo user.
found_server = False found_server = False
@ -171,7 +181,8 @@ class TestCompute(base.BaseFunctionalTest):
self.assertRaises( self.assertRaises(
exc.OpenStackCloudException, exc.OpenStackCloudException,
self.user_cloud.list_servers, self.user_cloud.list_servers,
all_projects=True) all_projects=True,
)
def test_create_server_image_flavor_dict(self): def test_create_server_image_flavor_dict(self):
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -179,13 +190,15 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name, name=self.server_name,
image={'id': self.image.id}, image={'id': self.image.id},
flavor={'id': self.flavor.id}, flavor={'id': self.flavor.id},
wait=True) wait=True,
)
self.assertEqual(self.server_name, server['name']) self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertIsNotNone(server['adminPass']) self.assertIsNotNone(server['adminPass'])
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True)) self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name) srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertTrue(srv is None or srv.status.lower() == 'deleted')
@ -195,7 +208,8 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name, name=self.server_name,
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
wait=True) wait=True,
)
# _get_server_console_output does not trap HTTP exceptions, so this # _get_server_console_output does not trap HTTP exceptions, so this
# returning a string tests that the call is correct. Testing that # returning a string tests that the call is correct. Testing that
# the cloud returns actual data in the output is out of scope. # the cloud returns actual data in the output is out of scope.
@ -208,19 +222,22 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name, name=self.server_name,
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
wait=True) wait=True,
)
log = self.user_cloud.get_server_console(server=self.server_name) log = self.user_cloud.get_server_console(server=self.server_name)
self.assertIsInstance(log, str) self.assertIsInstance(log, str)
def test_list_availability_zone_names(self): def test_list_availability_zone_names(self):
self.assertEqual( self.assertEqual(
['nova'], self.user_cloud.list_availability_zone_names()) ['nova'], self.user_cloud.list_availability_zone_names()
)
def test_get_server_console_bad_server(self): def test_get_server_console_bad_server(self):
self.assertRaises( self.assertRaises(
exc.OpenStackCloudException, exc.OpenStackCloudException,
self.user_cloud.get_server_console, self.user_cloud.get_server_console,
server=self.server_name) server=self.server_name,
)
def test_create_and_delete_server_with_admin_pass(self): def test_create_and_delete_server_with_admin_pass(self):
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -229,27 +246,33 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
admin_pass='sheiqu9loegahSh', admin_pass='sheiqu9loegahSh',
wait=True) wait=True,
)
self.assertEqual(self.server_name, server['name']) self.assertEqual(self.server_name, server['name'])
self.assertEqual(self.image.id, server['image']['id']) self.assertEqual(self.image.id, server['image']['id'])
self.assertEqual(self.flavor.name, server['flavor']['original_name']) self.assertEqual(self.flavor.name, server['flavor']['original_name'])
self.assertEqual(server['adminPass'], 'sheiqu9loegahSh') self.assertEqual(server['adminPass'], 'sheiqu9loegahSh')
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True)) self.user_cloud.delete_server(self.server_name, wait=True)
)
srv = self.user_cloud.get_server(self.server_name) srv = self.user_cloud.get_server(self.server_name)
self.assertTrue(srv is None or srv.status.lower() == 'deleted') self.assertTrue(srv is None or srv.status.lower() == 'deleted')
def test_get_image_id(self): def test_get_image_id(self):
self.assertEqual( self.assertEqual(
self.image.id, self.user_cloud.get_image_id(self.image.id)) self.image.id, self.user_cloud.get_image_id(self.image.id)
)
self.assertEqual( self.assertEqual(
self.image.id, self.user_cloud.get_image_id(self.image.name)) self.image.id, self.user_cloud.get_image_id(self.image.name)
)
def test_get_image_name(self): def test_get_image_name(self):
self.assertEqual( self.assertEqual(
self.image.name, self.user_cloud.get_image_name(self.image.id)) self.image.name, self.user_cloud.get_image_name(self.image.id)
)
self.assertEqual( self.assertEqual(
self.image.name, self.user_cloud.get_image_name(self.image.name)) self.image.name, self.user_cloud.get_image_name(self.image.name)
)
def _assert_volume_attach(self, server, volume_id=None, image=''): def _assert_volume_attach(self, server, volume_id=None, image=''):
self.assertEqual(self.server_name, server['name']) self.assertEqual(self.server_name, server['name'])
@ -277,7 +300,8 @@ class TestCompute(base.BaseFunctionalTest):
flavor=self.flavor, flavor=self.flavor,
boot_from_volume=True, boot_from_volume=True,
volume_size=1, volume_size=1,
wait=True) wait=True,
)
volume_id = self._assert_volume_attach(server) volume_id = self._assert_volume_attach(server)
volume = self.user_cloud.get_volume(volume_id) volume = self.user_cloud.get_volume(volume_id)
self.assertIsNotNone(volume) self.assertIsNotNone(volume)
@ -296,13 +320,18 @@ class TestCompute(base.BaseFunctionalTest):
# deleting a server that had had a volume attached. Yay for eventual # deleting a server that had had a volume attached. Yay for eventual
# consistency! # consistency!
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
60, 60,
'Timeout waiting for volume {volume_id} to detach'.format( 'Timeout waiting for volume {volume_id} to detach'.format(
volume_id=volume_id)): volume_id=volume_id
),
):
volume = self.user_cloud.get_volume(volume_id) volume = self.user_cloud.get_volume(volume_id)
if volume.status in ( if volume.status in (
'available', 'error', 'available',
'error_restoring', 'error_extending'): 'error',
'error_restoring',
'error_extending',
):
return return
def test_create_terminate_volume_image(self): def test_create_terminate_volume_image(self):
@ -317,10 +346,12 @@ class TestCompute(base.BaseFunctionalTest):
boot_from_volume=True, boot_from_volume=True,
terminate_volume=True, terminate_volume=True,
volume_size=1, volume_size=1,
wait=True) wait=True,
)
volume_id = self._assert_volume_attach(server) volume_id = self._assert_volume_attach(server)
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True)) self.user_cloud.delete_server(self.server_name, wait=True)
)
volume = self.user_cloud.get_volume(volume_id) volume = self.user_cloud.get_volume(volume_id)
# We can either get None (if the volume delete was quick), or a volume # We can either get None (if the volume delete was quick), or a volume
# that is in the process of being deleted. # that is in the process of being deleted.
@ -335,7 +366,8 @@ class TestCompute(base.BaseFunctionalTest):
self.skipTest('volume service not supported by cloud') self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
volume = self.user_cloud.create_volume( volume = self.user_cloud.create_volume(
size=1, name=self.server_name, image=self.image, wait=True) size=1, name=self.server_name, image=self.image, wait=True
)
self.addCleanup(self.user_cloud.delete_volume, volume.id) self.addCleanup(self.user_cloud.delete_volume, volume.id)
server = self.user_cloud.create_server( server = self.user_cloud.create_server(
name=self.server_name, name=self.server_name,
@ -343,10 +375,12 @@ class TestCompute(base.BaseFunctionalTest):
flavor=self.flavor, flavor=self.flavor,
boot_volume=volume, boot_volume=volume,
volume_size=1, volume_size=1,
wait=True) wait=True,
)
volume_id = self._assert_volume_attach(server, volume_id=volume['id']) volume_id = self._assert_volume_attach(server, volume_id=volume['id'])
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True)) self.user_cloud.delete_server(self.server_name, wait=True)
)
volume = self.user_cloud.get_volume(volume_id) volume = self.user_cloud.get_volume(volume_id)
self.assertIsNotNone(volume) self.assertIsNotNone(volume)
self.assertEqual(volume['name'], volume['display_name']) self.assertEqual(volume['name'], volume['display_name'])
@ -364,7 +398,8 @@ class TestCompute(base.BaseFunctionalTest):
self.skipTest('volume service not supported by cloud') self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
volume = self.user_cloud.create_volume( volume = self.user_cloud.create_volume(
size=1, name=self.server_name, image=self.image, wait=True) size=1, name=self.server_name, image=self.image, wait=True
)
self.addCleanup(self.user_cloud.delete_volume, volume['id']) self.addCleanup(self.user_cloud.delete_volume, volume['id'])
server = self.user_cloud.create_server( server = self.user_cloud.create_server(
name=self.server_name, name=self.server_name,
@ -372,11 +407,14 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image, image=self.image,
boot_from_volume=False, boot_from_volume=False,
volumes=[volume], volumes=[volume],
wait=True) wait=True,
)
volume_id = self._assert_volume_attach( volume_id = self._assert_volume_attach(
server, volume_id=volume['id'], image={'id': self.image['id']}) server, volume_id=volume['id'], image={'id': self.image['id']}
)
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True)) self.user_cloud.delete_server(self.server_name, wait=True)
)
volume = self.user_cloud.get_volume(volume_id) volume = self.user_cloud.get_volume(volume_id)
self.assertIsNotNone(volume) self.assertIsNotNone(volume)
self.assertEqual(volume['name'], volume['display_name']) self.assertEqual(volume['name'], volume['display_name'])
@ -393,7 +431,8 @@ class TestCompute(base.BaseFunctionalTest):
self.skipTest('volume service not supported by cloud') self.skipTest('volume service not supported by cloud')
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
volume = self.user_cloud.create_volume( volume = self.user_cloud.create_volume(
size=1, name=self.server_name, image=self.image, wait=True) size=1, name=self.server_name, image=self.image, wait=True
)
server = self.user_cloud.create_server( server = self.user_cloud.create_server(
name=self.server_name, name=self.server_name,
image=None, image=None,
@ -401,10 +440,12 @@ class TestCompute(base.BaseFunctionalTest):
boot_volume=volume, boot_volume=volume,
terminate_volume=True, terminate_volume=True,
volume_size=1, volume_size=1,
wait=True) wait=True,
)
volume_id = self._assert_volume_attach(server, volume_id=volume['id']) volume_id = self._assert_volume_attach(server, volume_id=volume['id'])
self.assertTrue( self.assertTrue(
self.user_cloud.delete_server(self.server_name, wait=True)) self.user_cloud.delete_server(self.server_name, wait=True)
)
volume = self.user_cloud.get_volume(volume_id) volume = self.user_cloud.get_volume(volume_id)
# We can either get None (if the volume delete was quick), or a volume # We can either get None (if the volume delete was quick), or a volume
# that is in the process of being deleted. # that is in the process of being deleted.
@ -420,9 +461,11 @@ class TestCompute(base.BaseFunctionalTest):
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
admin_pass='sheiqu9loegahSh', admin_pass='sheiqu9loegahSh',
wait=True) wait=True,
image = self.user_cloud.create_image_snapshot('test-snapshot', server, )
wait=True) image = self.user_cloud.create_image_snapshot(
'test-snapshot', server, wait=True
)
self.addCleanup(self.user_cloud.delete_image, image['id']) self.addCleanup(self.user_cloud.delete_image, image['id'])
self.assertEqual('active', image['status']) self.assertEqual('active', image['status'])
@ -432,24 +475,32 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name, name=self.server_name,
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
wait=True) wait=True,
self.user_cloud.set_server_metadata(self.server_name, )
{'key1': 'value1', self.user_cloud.set_server_metadata(
'key2': 'value2'}) self.server_name, {'key1': 'value1', 'key2': 'value2'}
)
updated_server = self.user_cloud.get_server(self.server_name) updated_server = self.user_cloud.get_server(self.server_name)
self.assertEqual(set(updated_server.metadata.items()), self.assertEqual(
set({'key1': 'value1', 'key2': 'value2'}.items())) set(updated_server.metadata.items()),
set({'key1': 'value1', 'key2': 'value2'}.items()),
)
self.user_cloud.set_server_metadata(self.server_name, self.user_cloud.set_server_metadata(
{'key2': 'value3'}) self.server_name, {'key2': 'value3'}
)
updated_server = self.user_cloud.get_server(self.server_name) updated_server = self.user_cloud.get_server(self.server_name)
self.assertEqual(set(updated_server.metadata.items()), self.assertEqual(
set({'key1': 'value1', 'key2': 'value3'}.items())) set(updated_server.metadata.items()),
set({'key1': 'value1', 'key2': 'value3'}.items()),
)
self.user_cloud.delete_server_metadata(self.server_name, ['key2']) self.user_cloud.delete_server_metadata(self.server_name, ['key2'])
updated_server = self.user_cloud.get_server(self.server_name) updated_server = self.user_cloud.get_server(self.server_name)
self.assertEqual(set(updated_server.metadata.items()), self.assertEqual(
set({'key1': 'value1'}.items())) set(updated_server.metadata.items()),
set({'key1': 'value1'}.items()),
)
self.user_cloud.delete_server_metadata(self.server_name, ['key1']) self.user_cloud.delete_server_metadata(self.server_name, ['key1'])
updated_server = self.user_cloud.get_server(self.server_name) updated_server = self.user_cloud.get_server(self.server_name)
@ -458,7 +509,9 @@ class TestCompute(base.BaseFunctionalTest):
self.assertRaises( self.assertRaises(
exc.OpenStackCloudURINotFound, exc.OpenStackCloudURINotFound,
self.user_cloud.delete_server_metadata, self.user_cloud.delete_server_metadata,
self.server_name, ['key1']) self.server_name,
['key1'],
)
def test_update_server(self): def test_update_server(self):
self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) self.addCleanup(self._cleanup_servers_and_volumes, self.server_name)
@ -466,10 +519,10 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name, name=self.server_name,
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
wait=True) wait=True,
)
server_updated = self.user_cloud.update_server( server_updated = self.user_cloud.update_server(
self.server_name, self.server_name, name='new_name'
name='new_name'
) )
self.assertEqual('new_name', server_updated['name']) self.assertEqual('new_name', server_updated['name'])
@ -484,7 +537,8 @@ class TestCompute(base.BaseFunctionalTest):
name=self.server_name, name=self.server_name,
image=self.image, image=self.image,
flavor=self.flavor, flavor=self.flavor,
wait=True) wait=True,
)
start = datetime.datetime.now() - datetime.timedelta(seconds=5) start = datetime.datetime.now() - datetime.timedelta(seconds=5)
usage = self.operator_cloud.get_compute_usage('demo', start) usage = self.operator_cloud.get_compute_usage('demo', start)
self.add_info_on_exception('usage', usage) self.add_info_on_exception('usage', usage)

View File

@ -30,16 +30,18 @@ class TestDevstack(base.BaseFunctionalTest):
scenarios = [ scenarios = [
('designate', dict(env='DESIGNATE', service='dns')), ('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')), ('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict( (
env='MAGNUM', 'magnum',
service='container-infrastructure-management' dict(env='MAGNUM', service='container-infrastructure-management'),
)), ),
('neutron', dict(env='NEUTRON', service='network')), ('neutron', dict(env='NEUTRON', service='network')),
('octavia', dict(env='OCTAVIA', service='load-balancer')), ('octavia', dict(env='OCTAVIA', service='load-balancer')),
('swift', dict(env='SWIFT', service='object-store')), ('swift', dict(env='SWIFT', service='object-store')),
] ]
def test_has_service(self): def test_has_service(self):
if os.environ.get( if (
'OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0') == '1': os.environ.get('OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0')
== '1'
):
self.assertTrue(self.user_cloud.has_service(self.service)) self.assertTrue(self.user_cloud.has_service(self.service))

View File

@ -22,7 +22,6 @@ from openstack.tests.functional import base
class TestDomain(base.BaseFunctionalTest): class TestDomain(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestDomain, self).setUp() super(TestDomain, self).setUp()
if not self.operator_cloud: if not self.operator_cloud:
@ -47,14 +46,16 @@ class TestDomain(base.BaseFunctionalTest):
# Raise an error: we must make users aware that something went # Raise an error: we must make users aware that something went
# wrong # wrong
raise openstack.cloud.OpenStackCloudException( raise openstack.cloud.OpenStackCloudException(
'\n'.join(exception_list)) '\n'.join(exception_list)
)
def test_search_domains(self): def test_search_domains(self):
domain_name = self.domain_prefix + '_search' domain_name = self.domain_prefix + '_search'
# Shouldn't find any domain with this name yet # Shouldn't find any domain with this name yet
results = self.operator_cloud.search_domains( results = self.operator_cloud.search_domains(
filters=dict(name=domain_name)) filters=dict(name=domain_name)
)
self.assertEqual(0, len(results)) self.assertEqual(0, len(results))
# Now create a new domain # Now create a new domain
@ -63,7 +64,8 @@ class TestDomain(base.BaseFunctionalTest):
# Now we should find only the new domain # Now we should find only the new domain
results = self.operator_cloud.search_domains( results = self.operator_cloud.search_domains(
filters=dict(name=domain_name)) filters=dict(name=domain_name)
)
self.assertEqual(1, len(results)) self.assertEqual(1, len(results))
self.assertEqual(domain_name, results[0]['name']) self.assertEqual(domain_name, results[0]['name'])
@ -74,13 +76,17 @@ class TestDomain(base.BaseFunctionalTest):
def test_update_domain(self): def test_update_domain(self):
domain = self.operator_cloud.create_domain( domain = self.operator_cloud.create_domain(
self.domain_prefix, 'description') self.domain_prefix, 'description'
)
self.assertEqual(self.domain_prefix, domain['name']) self.assertEqual(self.domain_prefix, domain['name'])
self.assertEqual('description', domain['description']) self.assertEqual('description', domain['description'])
self.assertTrue(domain['enabled']) self.assertTrue(domain['enabled'])
updated = self.operator_cloud.update_domain( updated = self.operator_cloud.update_domain(
domain['id'], name='updated name', domain['id'],
description='updated description', enabled=False) name='updated name',
description='updated description',
enabled=False,
)
self.assertEqual('updated name', updated['name']) self.assertEqual('updated name', updated['name'])
self.assertEqual('updated description', updated['description']) self.assertEqual('updated description', updated['description'])
self.assertFalse(updated['enabled']) self.assertFalse(updated['enabled'])
@ -91,14 +97,16 @@ class TestDomain(base.BaseFunctionalTest):
name_or_id='updated name', name_or_id='updated name',
name='updated name 2', name='updated name 2',
description='updated description 2', description='updated description 2',
enabled=True) enabled=True,
)
self.assertEqual('updated name 2', updated['name']) self.assertEqual('updated name 2', updated['name'])
self.assertEqual('updated description 2', updated['description']) self.assertEqual('updated description 2', updated['description'])
self.assertTrue(updated['enabled']) self.assertTrue(updated['enabled'])
def test_delete_domain(self): def test_delete_domain(self):
domain = self.operator_cloud.create_domain(self.domain_prefix, domain = self.operator_cloud.create_domain(
'description') self.domain_prefix, 'description'
)
self.assertEqual(self.domain_prefix, domain['name']) self.assertEqual(self.domain_prefix, domain['name'])
self.assertEqual('description', domain['description']) self.assertEqual('description', domain['description'])
self.assertTrue(domain['enabled']) self.assertTrue(domain['enabled'])
@ -107,7 +115,8 @@ class TestDomain(base.BaseFunctionalTest):
# Now we delete domain by name with name_or_id # Now we delete domain by name with name_or_id
domain = self.operator_cloud.create_domain( domain = self.operator_cloud.create_domain(
self.domain_prefix, 'description') self.domain_prefix, 'description'
)
self.assertEqual(self.domain_prefix, domain['name']) self.assertEqual(self.domain_prefix, domain['name'])
self.assertEqual('description', domain['description']) self.assertEqual('description', domain['description'])
self.assertTrue(domain['enabled']) self.assertTrue(domain['enabled'])
@ -117,7 +126,8 @@ class TestDomain(base.BaseFunctionalTest):
# Finally, we assert we get False from delete_domain if domain does # Finally, we assert we get False from delete_domain if domain does
# not exist # not exist
domain = self.operator_cloud.create_domain( domain = self.operator_cloud.create_domain(
self.domain_prefix, 'description') self.domain_prefix, 'description'
)
self.assertEqual(self.domain_prefix, domain['name']) self.assertEqual(self.domain_prefix, domain['name'])
self.assertEqual('description', domain['description']) self.assertEqual('description', domain['description'])
self.assertTrue(domain['enabled']) self.assertTrue(domain['enabled'])

View File

@ -29,8 +29,14 @@ from openstack.tests.functional import base
class TestEndpoints(base.KeystoneBaseFunctionalTest): class TestEndpoints(base.KeystoneBaseFunctionalTest):
endpoint_attributes = ['id', 'region', 'publicurl', 'internalurl', endpoint_attributes = [
'service_id', 'adminurl'] 'id',
'region',
'publicurl',
'internalurl',
'service_id',
'adminurl',
]
def setUp(self): def setUp(self):
super(TestEndpoints, self).setUp() super(TestEndpoints, self).setUp()
@ -39,7 +45,8 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
# Generate a random name for services and regions in this test # Generate a random name for services and regions in this test
self.new_item_name = 'test_' + ''.join( self.new_item_name = 'test_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5)) random.choice(string.ascii_lowercase) for _ in range(5)
)
self.addCleanup(self._cleanup_services) self.addCleanup(self._cleanup_services)
self.addCleanup(self._cleanup_endpoints) self.addCleanup(self._cleanup_endpoints)
@ -47,8 +54,9 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
def _cleanup_endpoints(self): def _cleanup_endpoints(self):
exception_list = list() exception_list = list()
for e in self.operator_cloud.list_endpoints(): for e in self.operator_cloud.list_endpoints():
if e.get('region') is not None and \ if e.get('region') is not None and e['region'].startswith(
e['region'].startswith(self.new_item_name): self.new_item_name
):
try: try:
self.operator_cloud.delete_endpoint(id=e['id']) self.operator_cloud.delete_endpoint(id=e['id'])
except Exception as e: except Exception as e:
@ -63,8 +71,9 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
def _cleanup_services(self): def _cleanup_services(self):
exception_list = list() exception_list = list()
for s in self.operator_cloud.list_services(): for s in self.operator_cloud.list_services():
if s['name'] is not None and \ if s['name'] is not None and s['name'].startswith(
s['name'].startswith(self.new_item_name): self.new_item_name
):
try: try:
self.operator_cloud.delete_service(name_or_id=s['id']) self.operator_cloud.delete_service(name_or_id=s['id'])
except Exception as e: except Exception as e:
@ -82,15 +91,18 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
region = list(self.operator_cloud.identity.regions())[0].id region = list(self.operator_cloud.identity.regions())[0].id
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name=service_name, type='test_type', name=service_name,
description='this is a test description') type='test_type',
description='this is a test description',
)
endpoints = self.operator_cloud.create_endpoint( endpoints = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'], service_name_or_id=service['id'],
public_url='http://public.test/', public_url='http://public.test/',
internal_url='http://internal.test/', internal_url='http://internal.test/',
admin_url='http://admin.url/', admin_url='http://admin.url/',
region=region) region=region,
)
self.assertNotEqual([], endpoints) self.assertNotEqual([], endpoints)
self.assertIsNotNone(endpoints[0].get('id')) self.assertIsNotNone(endpoints[0].get('id'))
@ -99,7 +111,8 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
endpoints = self.operator_cloud.create_endpoint( endpoints = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'], service_name_or_id=service['id'],
public_url='http://public.test/', public_url='http://public.test/',
region=region) region=region,
)
self.assertNotEqual([], endpoints) self.assertNotEqual([], endpoints)
self.assertIsNotNone(endpoints[0].get('id')) self.assertIsNotNone(endpoints[0].get('id'))
@ -108,32 +121,38 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
ver = self.operator_cloud.config.get_api_version('identity') ver = self.operator_cloud.config.get_api_version('identity')
if ver.startswith('2'): if ver.startswith('2'):
# NOTE(SamYaple): Update endpoint only works with v3 api # NOTE(SamYaple): Update endpoint only works with v3 api
self.assertRaises(OpenStackCloudUnavailableFeature, self.assertRaises(
self.operator_cloud.update_endpoint, OpenStackCloudUnavailableFeature,
'endpoint_id1') self.operator_cloud.update_endpoint,
'endpoint_id1',
)
else: else:
# service operations require existing region. Do not test updating # service operations require existing region. Do not test updating
# region for now # region for now
region = list(self.operator_cloud.identity.regions())[0].id region = list(self.operator_cloud.identity.regions())[0].id
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name='service1', type='test_type') name='service1', type='test_type'
)
endpoint = self.operator_cloud.create_endpoint( endpoint = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'], service_name_or_id=service['id'],
url='http://admin.url/', url='http://admin.url/',
interface='admin', interface='admin',
region=region, region=region,
enabled=False)[0] enabled=False,
)[0]
new_service = self.operator_cloud.create_service( new_service = self.operator_cloud.create_service(
name='service2', type='test_type') name='service2', type='test_type'
)
new_endpoint = self.operator_cloud.update_endpoint( new_endpoint = self.operator_cloud.update_endpoint(
endpoint.id, endpoint.id,
service_name_or_id=new_service.id, service_name_or_id=new_service.id,
url='http://public.url/', url='http://public.url/',
interface='public', interface='public',
region=region, region=region,
enabled=True) enabled=True,
)
self.assertEqual(new_endpoint.url, 'http://public.url/') self.assertEqual(new_endpoint.url, 'http://public.url/')
self.assertEqual(new_endpoint.interface, 'public') self.assertEqual(new_endpoint.interface, 'public')
@ -147,14 +166,17 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
region = list(self.operator_cloud.identity.regions())[0].id region = list(self.operator_cloud.identity.regions())[0].id
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name=service_name, type='test_type', name=service_name,
description='this is a test description') type='test_type',
description='this is a test description',
)
endpoints = self.operator_cloud.create_endpoint( endpoints = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'], service_name_or_id=service['id'],
public_url='http://public.test/', public_url='http://public.test/',
internal_url='http://internal.test/', internal_url='http://internal.test/',
region=region) region=region,
)
observed_endpoints = self.operator_cloud.list_endpoints() observed_endpoints = self.operator_cloud.list_endpoints()
found = False found = False
@ -170,10 +192,10 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
elif e['interface'] == 'public': elif e['interface'] == 'public':
self.assertEqual('http://public.test/', e['url']) self.assertEqual('http://public.test/', e['url'])
else: else:
self.assertEqual('http://public.test/', self.assertEqual('http://public.test/', e['publicurl'])
e['publicurl']) self.assertEqual(
self.assertEqual('http://internal.test/', 'http://internal.test/', e['internalurl']
e['internalurl']) )
self.assertEqual(region, e['region_id']) self.assertEqual(region, e['region_id'])
self.assertTrue(found, msg='new endpoint not found in endpoints list!') self.assertTrue(found, msg='new endpoint not found in endpoints list!')
@ -184,14 +206,17 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
region = list(self.operator_cloud.identity.regions())[0].id region = list(self.operator_cloud.identity.regions())[0].id
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name=service_name, type='test_type', name=service_name,
description='this is a test description') type='test_type',
description='this is a test description',
)
endpoints = self.operator_cloud.create_endpoint( endpoints = self.operator_cloud.create_endpoint(
service_name_or_id=service['id'], service_name_or_id=service['id'],
public_url='http://public.test/', public_url='http://public.test/',
internal_url='http://internal.test/', internal_url='http://internal.test/',
region=region) region=region,
)
self.assertNotEqual([], endpoints) self.assertNotEqual([], endpoints)
for endpoint in endpoints: for endpoint in endpoints:
@ -204,5 +229,4 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
if e['id'] == endpoint['id']: if e['id'] == endpoint['id']:
found = True found = True
break break
self.assertEqual( self.assertEqual(False, found, message='new endpoint was not deleted!')
False, found, message='new endpoint was not deleted!')

View File

@ -24,7 +24,6 @@ from openstack.tests.functional import base
class TestFlavor(base.BaseFunctionalTest): class TestFlavor(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestFlavor, self).setUp() super(TestFlavor, self).setUp()
@ -56,8 +55,14 @@ class TestFlavor(base.BaseFunctionalTest):
flavor_name = self.new_item_name + '_create' flavor_name = self.new_item_name + '_create'
flavor_kwargs = dict( flavor_kwargs = dict(
name=flavor_name, ram=1024, vcpus=2, disk=10, ephemeral=5, name=flavor_name,
swap=100, rxtx_factor=1.5, is_public=True ram=1024,
vcpus=2,
disk=10,
ephemeral=5,
swap=100,
rxtx_factor=1.5,
is_public=True,
) )
flavor = self.operator_cloud.create_flavor(**flavor_kwargs) flavor = self.operator_cloud.create_flavor(**flavor_kwargs)
@ -144,8 +149,9 @@ class TestFlavor(base.BaseFunctionalTest):
self.assertEqual(project['id'], acls[0]['tenant_id']) self.assertEqual(project['id'], acls[0]['tenant_id'])
# Now revoke the access and make sure we can't find it # Now revoke the access and make sure we can't find it
self.operator_cloud.remove_flavor_access(new_flavor['id'], self.operator_cloud.remove_flavor_access(
project['id']) new_flavor['id'], project['id']
)
flavors = self.user_cloud.search_flavors(priv_flavor_name) flavors = self.user_cloud.search_flavors(priv_flavor_name)
self.assertEqual(0, len(flavors)) self.assertEqual(0, len(flavors))
@ -157,9 +163,7 @@ class TestFlavor(base.BaseFunctionalTest):
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
flavor_name = self.new_item_name + '_spec_test' flavor_name = self.new_item_name + '_spec_test'
kwargs = dict( kwargs = dict(name=flavor_name, ram=1024, vcpus=2, disk=10)
name=flavor_name, ram=1024, vcpus=2, disk=10
)
new_flavor = self.operator_cloud.create_flavor(**kwargs) new_flavor = self.operator_cloud.create_flavor(**kwargs)
# Expect no extra_specs # Expect no extra_specs
@ -169,7 +173,8 @@ class TestFlavor(base.BaseFunctionalTest):
extra_specs = {'foo': 'aaa', 'bar': 'bbb'} extra_specs = {'foo': 'aaa', 'bar': 'bbb'}
self.operator_cloud.set_flavor_specs(new_flavor['id'], extra_specs) self.operator_cloud.set_flavor_specs(new_flavor['id'], extra_specs)
mod_flavor = self.operator_cloud.get_flavor( mod_flavor = self.operator_cloud.get_flavor(
new_flavor['id'], get_extra=True) new_flavor['id'], get_extra=True
)
# Verify extra_specs were set # Verify extra_specs were set
self.assertIn('extra_specs', mod_flavor) self.assertIn('extra_specs', mod_flavor)
@ -178,7 +183,8 @@ class TestFlavor(base.BaseFunctionalTest):
# Unset the 'foo' value # Unset the 'foo' value
self.operator_cloud.unset_flavor_specs(mod_flavor['id'], ['foo']) self.operator_cloud.unset_flavor_specs(mod_flavor['id'], ['foo'])
mod_flavor = self.operator_cloud.get_flavor_by_id( mod_flavor = self.operator_cloud.get_flavor_by_id(
new_flavor['id'], get_extra=True) new_flavor['id'], get_extra=True
)
# Verify 'foo' is unset and 'bar' is still set # Verify 'foo' is unset and 'bar' is still set
self.assertEqual({'bar': 'bbb'}, mod_flavor['extra_specs']) self.assertEqual({'bar': 'bbb'}, mod_flavor['extra_specs'])

View File

@ -54,12 +54,14 @@ class TestFloatingIP(base.BaseFunctionalTest):
try: try:
if r['name'].startswith(self.new_item_name): if r['name'].startswith(self.new_item_name):
self.user_cloud.update_router( self.user_cloud.update_router(
r, ext_gateway_net_id=None) r, ext_gateway_net_id=None
)
for s in self.user_cloud.list_subnets(): for s in self.user_cloud.list_subnets():
if s['name'].startswith(self.new_item_name): if s['name'].startswith(self.new_item_name):
try: try:
self.user_cloud.remove_router_interface( self.user_cloud.remove_router_interface(
r, subnet_id=s['id']) r, subnet_id=s['id']
)
except Exception: except Exception:
pass pass
self.user_cloud.delete_router(r.id) self.user_cloud.delete_router(r.id)
@ -93,7 +95,9 @@ class TestFloatingIP(base.BaseFunctionalTest):
self.addDetail( self.addDetail(
'exceptions', 'exceptions',
content.text_content( content.text_content(
'\n'.join([str(ex) for ex in exception_list]))) '\n'.join([str(ex) for ex in exception_list])
),
)
exc = exception_list[0] exc = exception_list[0]
raise exc raise exc
@ -121,8 +125,10 @@ class TestFloatingIP(base.BaseFunctionalTest):
fixed_ip = meta.get_server_private_ip(server) fixed_ip = meta.get_server_private_ip(server)
for ip in self.user_cloud.list_floating_ips(): for ip in self.user_cloud.list_floating_ips():
if (ip.get('fixed_ip', None) == fixed_ip if (
or ip.get('fixed_ip_address', None) == fixed_ip): ip.get('fixed_ip', None) == fixed_ip
or ip.get('fixed_ip_address', None) == fixed_ip
):
try: try:
self.user_cloud.delete_floating_ip(ip.id) self.user_cloud.delete_floating_ip(ip.id)
except Exception as e: except Exception as e:
@ -138,42 +144,49 @@ class TestFloatingIP(base.BaseFunctionalTest):
if self.user_cloud.has_service('network'): if self.user_cloud.has_service('network'):
# Create a network # Create a network
self.test_net = self.user_cloud.create_network( self.test_net = self.user_cloud.create_network(
name=self.new_item_name + '_net') name=self.new_item_name + '_net'
)
# Create a subnet on it # Create a subnet on it
self.test_subnet = self.user_cloud.create_subnet( self.test_subnet = self.user_cloud.create_subnet(
subnet_name=self.new_item_name + '_subnet', subnet_name=self.new_item_name + '_subnet',
network_name_or_id=self.test_net['id'], network_name_or_id=self.test_net['id'],
cidr='10.24.4.0/24', cidr='10.24.4.0/24',
enable_dhcp=True enable_dhcp=True,
) )
# Create a router # Create a router
self.test_router = self.user_cloud.create_router( self.test_router = self.user_cloud.create_router(
name=self.new_item_name + '_router') name=self.new_item_name + '_router'
)
# Attach the router to an external network # Attach the router to an external network
ext_nets = self.user_cloud.search_networks( ext_nets = self.user_cloud.search_networks(
filters={'router:external': True}) filters={'router:external': True}
)
self.user_cloud.update_router( self.user_cloud.update_router(
name_or_id=self.test_router['id'], name_or_id=self.test_router['id'],
ext_gateway_net_id=ext_nets[0]['id']) ext_gateway_net_id=ext_nets[0]['id'],
)
# Attach the router to the internal subnet # Attach the router to the internal subnet
self.user_cloud.add_router_interface( self.user_cloud.add_router_interface(
self.test_router, subnet_id=self.test_subnet['id']) self.test_router, subnet_id=self.test_subnet['id']
)
# Select the network for creating new servers # Select the network for creating new servers
self.nic = {'net-id': self.test_net['id']} self.nic = {'net-id': self.test_net['id']}
self.addDetail( self.addDetail(
'networks-neutron', 'networks-neutron',
content.text_content(pprint.pformat( content.text_content(
self.user_cloud.list_networks()))) pprint.pformat(self.user_cloud.list_networks())
),
)
else: else:
# Find network names for nova-net # Find network names for nova-net
data = proxy._json_response( data = proxy._json_response(
self.user_cloud._conn.compute.get('/os-tenant-networks')) self.user_cloud._conn.compute.get('/os-tenant-networks')
)
nets = meta.get_and_munchify('networks', data) nets = meta.get_and_munchify('networks', data)
self.addDetail( self.addDetail(
'networks-nova', 'networks-nova', content.text_content(pprint.pformat(nets))
content.text_content(pprint.pformat( )
nets)))
self.nic = {'net-id': nets[0].id} self.nic = {'net-id': nets[0].id}
def test_private_ip(self): def test_private_ip(self):
@ -181,27 +194,36 @@ class TestFloatingIP(base.BaseFunctionalTest):
new_server = self.user_cloud.get_openstack_vars( new_server = self.user_cloud.get_openstack_vars(
self.user_cloud.create_server( self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server', wait=True,
name=self.new_item_name + '_server',
image=self.image, image=self.image,
flavor=self.flavor, nics=[self.nic])) flavor=self.flavor,
nics=[self.nic],
)
)
self.addDetail( self.addDetail(
'server', content.text_content(pprint.pformat(new_server))) 'server', content.text_content(pprint.pformat(new_server))
)
self.assertNotEqual(new_server['private_v4'], '') self.assertNotEqual(new_server['private_v4'], '')
def test_add_auto_ip(self): def test_add_auto_ip(self):
self._setup_networks() self._setup_networks()
new_server = self.user_cloud.create_server( new_server = self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server', wait=True,
name=self.new_item_name + '_server',
image=self.image, image=self.image,
flavor=self.flavor, nics=[self.nic]) flavor=self.flavor,
nics=[self.nic],
)
# ToDo: remove the following iteration when create_server waits for # ToDo: remove the following iteration when create_server waits for
# the IP to be attached # the IP to be attached
ip = None ip = None
for _ in utils.iterate_timeout( for _ in utils.iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"): self.timeout, "Timeout waiting for IP address to be attached"
):
ip = meta.get_server_external_ipv4(self.user_cloud, new_server) ip = meta.get_server_external_ipv4(self.user_cloud, new_server)
if ip is not None: if ip is not None:
break break
@ -213,15 +235,19 @@ class TestFloatingIP(base.BaseFunctionalTest):
self._setup_networks() self._setup_networks()
new_server = self.user_cloud.create_server( new_server = self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server', wait=True,
name=self.new_item_name + '_server',
image=self.image, image=self.image,
flavor=self.flavor, nics=[self.nic]) flavor=self.flavor,
nics=[self.nic],
)
# ToDo: remove the following iteration when create_server waits for # ToDo: remove the following iteration when create_server waits for
# the IP to be attached # the IP to be attached
ip = None ip = None
for _ in utils.iterate_timeout( for _ in utils.iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"): self.timeout, "Timeout waiting for IP address to be attached"
):
ip = meta.get_server_external_ipv4(self.user_cloud, new_server) ip = meta.get_server_external_ipv4(self.user_cloud, new_server)
if ip is not None: if ip is not None:
break break
@ -230,15 +256,18 @@ class TestFloatingIP(base.BaseFunctionalTest):
self.addCleanup(self._cleanup_ips, new_server) self.addCleanup(self._cleanup_ips, new_server)
f_ip = self.user_cloud.get_floating_ip( f_ip = self.user_cloud.get_floating_ip(
id=None, filters={'floating_ip_address': ip}) id=None, filters={'floating_ip_address': ip}
)
self.user_cloud.detach_ip_from_server( self.user_cloud.detach_ip_from_server(
server_id=new_server.id, floating_ip_id=f_ip['id']) server_id=new_server.id, floating_ip_id=f_ip['id']
)
def test_list_floating_ips(self): def test_list_floating_ips(self):
if self.operator_cloud: if self.operator_cloud:
fip_admin = self.operator_cloud.create_floating_ip() fip_admin = self.operator_cloud.create_floating_ip()
self.addCleanup( self.addCleanup(
self.operator_cloud.delete_floating_ip, fip_admin.id) self.operator_cloud.delete_floating_ip, fip_admin.id
)
fip_user = self.user_cloud.create_floating_ip() fip_user = self.user_cloud.create_floating_ip()
self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id)
@ -260,7 +289,8 @@ class TestFloatingIP(base.BaseFunctionalTest):
# Ask Neutron for only a subset of all the FIPs. # Ask Neutron for only a subset of all the FIPs.
if self.operator_cloud: if self.operator_cloud:
filtered_fip_id_list = [ filtered_fip_id_list = [
fip.id for fip in self.operator_cloud.list_floating_ips( fip.id
for fip in self.operator_cloud.list_floating_ips(
{'tenant_id': self.user_cloud.current_project_id} {'tenant_id': self.user_cloud.current_project_id}
) )
] ]
@ -275,9 +305,10 @@ class TestFloatingIP(base.BaseFunctionalTest):
if self.operator_cloud: if self.operator_cloud:
self.assertNotIn(fip_user.id, fip_op_id_list) self.assertNotIn(fip_user.id, fip_op_id_list)
self.assertRaisesRegex( self.assertRaisesRegex(
ValueError, "Nova-network don't support server-side.*", ValueError,
"Nova-network don't support server-side.*",
self.operator_cloud.list_floating_ips, self.operator_cloud.list_floating_ips,
filters={'foo': 'bar'} filters={'foo': 'bar'},
) )
def test_search_floating_ips(self): def test_search_floating_ips(self):
@ -286,7 +317,7 @@ class TestFloatingIP(base.BaseFunctionalTest):
self.assertIn( self.assertIn(
fip_user['id'], fip_user['id'],
[fip.id for fip in self.user_cloud.search_floating_ips()] [fip.id for fip in self.user_cloud.search_floating_ips()],
) )
def test_get_floating_ip_by_id(self): def test_get_floating_ip_by_id(self):

View File

@ -38,8 +38,7 @@ class TestFloatingIPPool(base.BaseFunctionalTest):
if not self.user_cloud._has_nova_extension('os-floating-ip-pools'): if not self.user_cloud._has_nova_extension('os-floating-ip-pools'):
# Skipping this test is floating-ip-pool extension is not # Skipping this test is floating-ip-pool extension is not
# available on the testing cloud # available on the testing cloud
self.skip( self.skip('Floating IP pools extension is not available')
'Floating IP pools extension is not available')
def test_list_floating_ip_pools(self): def test_list_floating_ip_pools(self):
pools = self.user_cloud.list_floating_ip_pools() pools = self.user_cloud.list_floating_ip_pools()

View File

@ -22,7 +22,6 @@ from openstack.tests.functional import base
class TestGroup(base.BaseFunctionalTest): class TestGroup(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestGroup, self).setUp() super(TestGroup, self).setUp()
if not self.operator_cloud: if not self.operator_cloud:
@ -48,7 +47,8 @@ class TestGroup(base.BaseFunctionalTest):
# Raise an error: we must make users aware that something went # Raise an error: we must make users aware that something went
# wrong # wrong
raise openstack.cloud.OpenStackCloudException( raise openstack.cloud.OpenStackCloudException(
'\n'.join(exception_list)) '\n'.join(exception_list)
)
def test_create_group(self): def test_create_group(self):
group_name = self.group_prefix + '_create' group_name = self.group_prefix + '_create'
@ -68,7 +68,8 @@ class TestGroup(base.BaseFunctionalTest):
self.assertTrue(self.operator_cloud.delete_group(group_name)) self.assertTrue(self.operator_cloud.delete_group(group_name))
results = self.operator_cloud.search_groups( results = self.operator_cloud.search_groups(
filters=dict(name=group_name)) filters=dict(name=group_name)
)
self.assertEqual(0, len(results)) self.assertEqual(0, len(results))
def test_delete_group_not_exists(self): def test_delete_group_not_exists(self):
@ -79,7 +80,8 @@ class TestGroup(base.BaseFunctionalTest):
# Shouldn't find any group with this name yet # Shouldn't find any group with this name yet
results = self.operator_cloud.search_groups( results = self.operator_cloud.search_groups(
filters=dict(name=group_name)) filters=dict(name=group_name)
)
self.assertEqual(0, len(results)) self.assertEqual(0, len(results))
# Now create a new group # Now create a new group
@ -88,7 +90,8 @@ class TestGroup(base.BaseFunctionalTest):
# Now we should find only the new group # Now we should find only the new group
results = self.operator_cloud.search_groups( results = self.operator_cloud.search_groups(
filters=dict(name=group_name)) filters=dict(name=group_name)
)
self.assertEqual(1, len(results)) self.assertEqual(1, len(results))
self.assertEqual(group_name, results[0]['name']) self.assertEqual(group_name, results[0]['name'])
@ -103,8 +106,7 @@ class TestGroup(base.BaseFunctionalTest):
updated_group_name = group_name + '_xyz' updated_group_name = group_name + '_xyz'
updated_group_desc = group_desc + ' updated' updated_group_desc = group_desc + ' updated'
updated_group = self.operator_cloud.update_group( updated_group = self.operator_cloud.update_group(
group_name, group_name, name=updated_group_name, description=updated_group_desc
name=updated_group_name, )
description=updated_group_desc)
self.assertEqual(updated_group_name, updated_group['name']) self.assertEqual(updated_group_name, updated_group['name'])
self.assertEqual(updated_group_desc, updated_group['description']) self.assertEqual(updated_group_desc, updated_group['description'])

View File

@ -30,7 +30,8 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
self.role_prefix = 'test_role' + ''.join( self.role_prefix = 'test_role' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5)) random.choice(string.ascii_lowercase) for _ in range(5)
)
self.user_prefix = self.getUniqueString('user') self.user_prefix = self.getUniqueString('user')
self.group_prefix = self.getUniqueString('group') self.group_prefix = self.getUniqueString('group')
@ -133,7 +134,8 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
user = self.operator_cloud.get_user('demo') user = self.operator_cloud.get_user('demo')
project = self.operator_cloud.get_project('demo') project = self.operator_cloud.get_project('demo')
assignments = self.operator_cloud.list_role_assignments( assignments = self.operator_cloud.list_role_assignments(
filters={'user': user['id'], 'project': project['id']}) filters={'user': user['id'], 'project': project['id']}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertGreater(len(assignments), 0) self.assertGreater(len(assignments), 0)
@ -142,25 +144,35 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
user_email = 'nobody@nowhere.com' user_email = 'nobody@nowhere.com'
role_name = self.role_prefix + '_grant_user_project' role_name = self.role_prefix + '_grant_user_project'
role = self.operator_cloud.create_role(role_name) role = self.operator_cloud.create_role(role_name)
user = self._create_user(name=user_name, user = self._create_user(
email=user_email, name=user_name, email=user_email, default_project='demo'
default_project='demo') )
self.assertTrue(self.operator_cloud.grant_role( self.assertTrue(
role_name, user=user['id'], project='demo', wait=True)) self.operator_cloud.grant_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, user=user['id'], project='demo', wait=True
'role': role['id'], )
'user': user['id'], )
'project': self.operator_cloud.get_project('demo')['id'] assignments = self.operator_cloud.list_role_assignments(
}) {
'role': role['id'],
'user': user['id'],
'project': self.operator_cloud.get_project('demo')['id'],
}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments)) self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role( self.assertTrue(
role_name, user=user['id'], project='demo', wait=True)) self.operator_cloud.revoke_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, user=user['id'], project='demo', wait=True
'role': role['id'], )
'user': user['id'], )
'project': self.operator_cloud.get_project('demo')['id'] assignments = self.operator_cloud.list_role_assignments(
}) {
'role': role['id'],
'user': user['id'],
'project': self.operator_cloud.get_project('demo')['id'],
}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments)) self.assertEqual(0, len(assignments))
@ -171,25 +183,34 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name) role = self.operator_cloud.create_role(role_name)
group_name = self.group_prefix + '_group_project' group_name = self.group_prefix + '_group_project'
group = self.operator_cloud.create_group( group = self.operator_cloud.create_group(
name=group_name, name=group_name, description='test group', domain='default'
description='test group', )
domain='default') self.assertTrue(
self.assertTrue(self.operator_cloud.grant_role( self.operator_cloud.grant_role(
role_name, group=group['id'], project='demo')) role_name, group=group['id'], project='demo'
assignments = self.operator_cloud.list_role_assignments({ )
'role': role['id'], )
'group': group['id'], assignments = self.operator_cloud.list_role_assignments(
'project': self.operator_cloud.get_project('demo')['id'] {
}) 'role': role['id'],
'group': group['id'],
'project': self.operator_cloud.get_project('demo')['id'],
}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments)) self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role( self.assertTrue(
role_name, group=group['id'], project='demo')) self.operator_cloud.revoke_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, group=group['id'], project='demo'
'role': role['id'], )
'group': group['id'], )
'project': self.operator_cloud.get_project('demo')['id'] assignments = self.operator_cloud.list_role_assignments(
}) {
'role': role['id'],
'group': group['id'],
'project': self.operator_cloud.get_project('demo')['id'],
}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments)) self.assertEqual(0, len(assignments))
@ -200,25 +221,35 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name) role = self.operator_cloud.create_role(role_name)
user_name = self.user_prefix + '_user_domain' user_name = self.user_prefix + '_user_domain'
user_email = 'nobody@nowhere.com' user_email = 'nobody@nowhere.com'
user = self._create_user(name=user_name, user = self._create_user(
email=user_email, name=user_name, email=user_email, default_project='demo'
default_project='demo') )
self.assertTrue(self.operator_cloud.grant_role( self.assertTrue(
role_name, user=user['id'], domain='default')) self.operator_cloud.grant_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, user=user['id'], domain='default'
'role': role['id'], )
'user': user['id'], )
'domain': self.operator_cloud.get_domain('default')['id'] assignments = self.operator_cloud.list_role_assignments(
}) {
'role': role['id'],
'user': user['id'],
'domain': self.operator_cloud.get_domain('default')['id'],
}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments)) self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role( self.assertTrue(
role_name, user=user['id'], domain='default')) self.operator_cloud.revoke_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, user=user['id'], domain='default'
'role': role['id'], )
'user': user['id'], )
'domain': self.operator_cloud.get_domain('default')['id'] assignments = self.operator_cloud.list_role_assignments(
}) {
'role': role['id'],
'user': user['id'],
'domain': self.operator_cloud.get_domain('default')['id'],
}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments)) self.assertEqual(0, len(assignments))
@ -229,25 +260,34 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name) role = self.operator_cloud.create_role(role_name)
group_name = self.group_prefix + '_group_domain' group_name = self.group_prefix + '_group_domain'
group = self.operator_cloud.create_group( group = self.operator_cloud.create_group(
name=group_name, name=group_name, description='test group', domain='default'
description='test group', )
domain='default') self.assertTrue(
self.assertTrue(self.operator_cloud.grant_role( self.operator_cloud.grant_role(
role_name, group=group['id'], domain='default')) role_name, group=group['id'], domain='default'
assignments = self.operator_cloud.list_role_assignments({ )
'role': role['id'], )
'group': group['id'], assignments = self.operator_cloud.list_role_assignments(
'domain': self.operator_cloud.get_domain('default')['id'] {
}) 'role': role['id'],
'group': group['id'],
'domain': self.operator_cloud.get_domain('default')['id'],
}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments)) self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role( self.assertTrue(
role_name, group=group['id'], domain='default')) self.operator_cloud.revoke_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, group=group['id'], domain='default'
'role': role['id'], )
'group': group['id'], )
'domain': self.operator_cloud.get_domain('default')['id'] assignments = self.operator_cloud.list_role_assignments(
}) {
'role': role['id'],
'group': group['id'],
'domain': self.operator_cloud.get_domain('default')['id'],
}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments)) self.assertEqual(0, len(assignments))
@ -256,25 +296,27 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name) role = self.operator_cloud.create_role(role_name)
user_name = self.user_prefix + '_user_system' user_name = self.user_prefix + '_user_system'
user_email = 'nobody@nowhere.com' user_email = 'nobody@nowhere.com'
user = self._create_user(name=user_name, user = self._create_user(
email=user_email, name=user_name, email=user_email, default_project='demo'
default_project='demo') )
self.assertTrue(self.operator_cloud.grant_role( self.assertTrue(
role_name, user=user['id'], system='all')) self.operator_cloud.grant_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, user=user['id'], system='all'
'role': role['id'], )
'user': user['id'], )
'system': 'all' assignments = self.operator_cloud.list_role_assignments(
}) {'role': role['id'], 'user': user['id'], 'system': 'all'}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments)) self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role( self.assertTrue(
role_name, user=user['id'], system='all')) self.operator_cloud.revoke_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, user=user['id'], system='all'
'role': role['id'], )
'user': user['id'], )
'system': 'all' assignments = self.operator_cloud.list_role_assignments(
}) {'role': role['id'], 'user': user['id'], 'system': 'all'}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments)) self.assertEqual(0, len(assignments))
@ -285,23 +327,25 @@ class TestIdentity(base.KeystoneBaseFunctionalTest):
role = self.operator_cloud.create_role(role_name) role = self.operator_cloud.create_role(role_name)
group_name = self.group_prefix + '_group_system' group_name = self.group_prefix + '_group_system'
group = self.operator_cloud.create_group( group = self.operator_cloud.create_group(
name=group_name, name=group_name, description='test group'
description='test group') )
self.assertTrue(self.operator_cloud.grant_role( self.assertTrue(
role_name, group=group['id'], system='all')) self.operator_cloud.grant_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, group=group['id'], system='all'
'role': role['id'], )
'group': group['id'], )
'system': 'all' assignments = self.operator_cloud.list_role_assignments(
}) {'role': role['id'], 'group': group['id'], 'system': 'all'}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments)) self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role( self.assertTrue(
role_name, group=group['id'], system='all')) self.operator_cloud.revoke_role(
assignments = self.operator_cloud.list_role_assignments({ role_name, group=group['id'], system='all'
'role': role['id'], )
'group': group['id'], )
'system': 'all' assignments = self.operator_cloud.list_role_assignments(
}) {'role': role['id'], 'group': group['id'], 'system': 'all'}
)
self.assertIsInstance(assignments, list) self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments)) self.assertEqual(0, len(assignments))

View File

@ -25,7 +25,6 @@ from openstack.tests.functional import base
class TestImage(base.BaseFunctionalTest): class TestImage(base.BaseFunctionalTest):
def test_create_image(self): def test_create_image(self):
test_image = tempfile.NamedTemporaryFile(delete=False) test_image = tempfile.NamedTemporaryFile(delete=False)
test_image.write(b'\0' * 1024 * 1024) test_image.write(b'\0' * 1024 * 1024)
@ -40,7 +39,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
tags=['custom'], tags=['custom'],
wait=True) wait=True,
)
finally: finally:
self.user_cloud.delete_image(image_name, wait=True) self.user_cloud.delete_image(image_name, wait=True)
@ -57,13 +57,16 @@ class TestImage(base.BaseFunctionalTest):
container_format='bare', container_format='bare',
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
wait=True) wait=True,
)
self.addCleanup(self.user_cloud.delete_image, image_name, wait=True) self.addCleanup(self.user_cloud.delete_image, image_name, wait=True)
output = os.path.join(tempfile.gettempdir(), self.getUniqueString()) output = os.path.join(tempfile.gettempdir(), self.getUniqueString())
self.user_cloud.download_image(image_name, output) self.user_cloud.download_image(image_name, output)
self.addCleanup(os.remove, output) self.addCleanup(os.remove, output)
self.assertTrue(filecmp.cmp(test_image.name, output), self.assertTrue(
"Downloaded contents don't match created image") filecmp.cmp(test_image.name, output),
"Downloaded contents don't match created image",
)
def test_create_image_skip_duplicate(self): def test_create_image_skip_duplicate(self):
test_image = tempfile.NamedTemporaryFile(delete=False) test_image = tempfile.NamedTemporaryFile(delete=False)
@ -79,7 +82,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
validate_checksum=True, validate_checksum=True,
wait=True) wait=True,
)
second_image = self.user_cloud.create_image( second_image = self.user_cloud.create_image(
name=image_name, name=image_name,
filename=test_image.name, filename=test_image.name,
@ -88,7 +92,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
validate_checksum=True, validate_checksum=True,
wait=True) wait=True,
)
self.assertEqual(first_image.id, second_image.id) self.assertEqual(first_image.id, second_image.id)
finally: finally:
self.user_cloud.delete_image(image_name, wait=True) self.user_cloud.delete_image(image_name, wait=True)
@ -108,7 +113,8 @@ class TestImage(base.BaseFunctionalTest):
container_format='bare', container_format='bare',
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
wait=True) wait=True,
)
second_image = self.user_cloud.create_image( second_image = self.user_cloud.create_image(
name=image_name, name=image_name,
filename=test_image.name, filename=test_image.name,
@ -117,7 +123,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
allow_duplicates=True, allow_duplicates=True,
wait=True) wait=True,
)
self.assertNotEqual(first_image.id, second_image.id) self.assertNotEqual(first_image.id, second_image.id)
finally: finally:
if first_image: if first_image:
@ -138,11 +145,11 @@ class TestImage(base.BaseFunctionalTest):
container_format='bare', container_format='bare',
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
wait=True) wait=True,
)
self.user_cloud.update_image_properties( self.user_cloud.update_image_properties(
image=image, image=image, name=image_name, foo='bar'
name=image_name, )
foo='bar')
image = self.user_cloud.get_image(image_name) image = self.user_cloud.get_image(image_name)
self.assertIn('foo', image.properties) self.assertIn('foo', image.properties)
self.assertEqual(image.properties['foo'], 'bar') self.assertEqual(image.properties['foo'], 'bar')
@ -158,7 +165,8 @@ class TestImage(base.BaseFunctionalTest):
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
allow_duplicates=True, allow_duplicates=True,
wait=False) wait=False,
)
self.assertEqual(image_name, image.name) self.assertEqual(image_name, image.name)
self.user_cloud.delete_image(image.id, wait=True) self.user_cloud.delete_image(image.id, wait=True)
@ -175,7 +183,8 @@ class TestImage(base.BaseFunctionalTest):
container_format='bare', container_format='bare',
min_disk=10, min_disk=10,
min_ram=1024, min_ram=1024,
wait=True) wait=True,
)
image = self.user_cloud.get_image_by_id(image.id) image = self.user_cloud.get_image_by_id(image.id)
self.assertEqual(image_name, image.name) self.assertEqual(image_name, image.name)
self.assertEqual('raw', image.disk_format) self.assertEqual('raw', image.disk_format)

View File

@ -35,8 +35,13 @@ class TestInventory(base.BaseFunctionalTest):
self.server_name = self.getUniqueString('inventory') self.server_name = self.getUniqueString('inventory')
self.addCleanup(self._cleanup_server) self.addCleanup(self._cleanup_server)
server = self.operator_cloud.create_server( server = self.operator_cloud.create_server(
name=self.server_name, image=self.image, flavor=self.flavor, name=self.server_name,
wait=True, auto_ip=True, network='public') image=self.image,
flavor=self.flavor,
wait=True,
auto_ip=True,
network='public',
)
self.server_id = server['id'] self.server_id = server['id']
def _cleanup_server(self): def _cleanup_server(self):

View File

@ -21,7 +21,6 @@ from openstack.tests.functional import base
class TestKeypairs(base.BaseFunctionalTest): class TestKeypairs(base.BaseFunctionalTest):
def test_create_and_delete(self): def test_create_and_delete(self):
'''Test creating and deleting keypairs functionality''' '''Test creating and deleting keypairs functionality'''
name = self.getUniqueString('keypair') name = self.getUniqueString('keypair')
@ -46,7 +45,8 @@ class TestKeypairs(base.BaseFunctionalTest):
name = self.getUniqueString('keypair') name = self.getUniqueString('keypair')
self.addCleanup(self.user_cloud.delete_keypair, name) self.addCleanup(self.user_cloud.delete_keypair, name)
keypair = self.user_cloud.create_keypair( keypair = self.user_cloud.create_keypair(
name=name, public_key=fakes.FAKE_PUBLIC_KEY) name=name, public_key=fakes.FAKE_PUBLIC_KEY
)
self.assertEqual(keypair['name'], name) self.assertEqual(keypair['name'], name)
self.assertIsNotNone(keypair['public_key']) self.assertIsNotNone(keypair['public_key'])
self.assertIsNone(keypair['private_key']) self.assertIsNone(keypair['private_key'])

View File

@ -21,7 +21,6 @@ from openstack.tests.functional import base
class TestUsage(base.BaseFunctionalTest): class TestUsage(base.BaseFunctionalTest):
def test_get_our_compute_limits(self): def test_get_our_compute_limits(self):
'''Test quotas functionality''' '''Test quotas functionality'''
limits = self.user_cloud.get_compute_limits() limits = self.user_cloud.get_compute_limits()

View File

@ -21,7 +21,6 @@ from openstack.tests.functional import base
class TestMagnumServices(base.BaseFunctionalTest): class TestMagnumServices(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestMagnumServices, self).setUp() super(TestMagnumServices, self).setUp()
if not self.user_cloud.has_service( if not self.user_cloud.has_service(

View File

@ -84,7 +84,8 @@ class TestNetwork(base.BaseFunctionalTest):
def test_create_network_provider_flat(self): def test_create_network_provider_flat(self):
existing_public = self.operator_cloud.search_networks( existing_public = self.operator_cloud.search_networks(
filters={'provider:network_type': 'flat'}) filters={'provider:network_type': 'flat'}
)
if existing_public: if existing_public:
self.skipTest('Physical network already allocated') self.skipTest('Physical network already allocated')
net1 = self.operator_cloud.create_network( net1 = self.operator_cloud.create_network(
@ -93,7 +94,7 @@ class TestNetwork(base.BaseFunctionalTest):
provider={ provider={
'physical_network': 'public', 'physical_network': 'public',
'network_type': 'flat', 'network_type': 'flat',
} },
) )
self.assertIn('id', net1) self.assertIn('id', net1)
self.assertEqual(self.network_name, net1['name']) self.assertEqual(self.network_name, net1['name'])
@ -117,10 +118,12 @@ class TestNetwork(base.BaseFunctionalTest):
net1 = self.operator_cloud.create_network(name=self.network_name) net1 = self.operator_cloud.create_network(name=self.network_name)
self.assertIsNotNone(net1) self.assertIsNotNone(net1)
net2 = self.operator_cloud.create_network( net2 = self.operator_cloud.create_network(
name=self.network_name + 'other') name=self.network_name + 'other'
)
self.assertIsNotNone(net2) self.assertIsNotNone(net2)
match = self.operator_cloud.list_networks( match = self.operator_cloud.list_networks(
filters=dict(name=self.network_name)) filters=dict(name=self.network_name)
)
self.assertEqual(1, len(match)) self.assertEqual(1, len(match))
self.assertEqual(net1['name'], match[0]['name']) self.assertEqual(net1['name'], match[0]['name'])

View File

@ -28,7 +28,6 @@ from openstack.tests.functional import base
class TestObject(base.BaseFunctionalTest): class TestObject(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestObject, self).setUp() super(TestObject, self).setUp()
if not self.user_cloud.has_service('object-store'): if not self.user_cloud.has_service('object-store'):
@ -41,69 +40,84 @@ class TestObject(base.BaseFunctionalTest):
self.addCleanup(self.user_cloud.delete_container, container_name) self.addCleanup(self.user_cloud.delete_container, container_name)
self.user_cloud.create_container(container_name) self.user_cloud.create_container(container_name)
container = self.user_cloud.get_container(container_name) container = self.user_cloud.get_container(container_name)
self.assertEqual(container_name, container.name)
self.assertEqual( self.assertEqual(
container_name, container.name) [], self.user_cloud.list_containers(prefix='somethin')
self.assertEqual( )
[],
self.user_cloud.list_containers(prefix='somethin'))
sizes = ( sizes = (
(64 * 1024, 1), # 64K, one segment (64 * 1024, 1), # 64K, one segment
(64 * 1024, 5) # 64MB, 5 segments (64 * 1024, 5), # 64MB, 5 segments
) )
for size, nseg in sizes: for size, nseg in sizes:
segment_size = int(round(size / nseg)) segment_size = int(round(size / nseg))
with tempfile.NamedTemporaryFile() as fake_file: with tempfile.NamedTemporaryFile() as fake_file:
fake_content = ''.join(random.SystemRandom().choice( fake_content = ''.join(
string.ascii_uppercase + string.digits) random.SystemRandom().choice(
for _ in range(size)).encode('latin-1') string.ascii_uppercase + string.digits
)
for _ in range(size)
).encode('latin-1')
fake_file.write(fake_content) fake_file.write(fake_content)
fake_file.flush() fake_file.flush()
name = 'test-%d' % size name = 'test-%d' % size
self.addCleanup( self.addCleanup(
self.user_cloud.delete_object, container_name, name) self.user_cloud.delete_object, container_name, name
)
self.user_cloud.create_object( self.user_cloud.create_object(
container_name, name, container_name,
name,
fake_file.name, fake_file.name,
segment_size=segment_size, segment_size=segment_size,
metadata={'foo': 'bar'}) metadata={'foo': 'bar'},
self.assertFalse(self.user_cloud.is_object_stale( )
container_name, name, self.assertFalse(
fake_file.name self.user_cloud.is_object_stale(
)) container_name, name, fake_file.name
)
)
self.assertEqual( self.assertEqual(
'bar', self.user_cloud.get_object_metadata( 'bar',
container_name, name)['foo'] self.user_cloud.get_object_metadata(container_name, name)[
'foo'
],
)
self.user_cloud.update_object(
container=container_name,
name=name,
metadata={'testk': 'testv'},
) )
self.user_cloud.update_object(container=container_name, name=name,
metadata={'testk': 'testv'})
self.assertEqual( self.assertEqual(
'testv', self.user_cloud.get_object_metadata( 'testv',
container_name, name)['testk'] self.user_cloud.get_object_metadata(container_name, name)[
'testk'
],
) )
try: try:
self.assertIsNotNone( self.assertIsNotNone(
self.user_cloud.get_object(container_name, name)) self.user_cloud.get_object(container_name, name)
)
except exc.OpenStackCloudException as e: except exc.OpenStackCloudException as e:
self.addDetail( self.addDetail(
'failed_response', 'failed_response',
content.text_content(str(e.response.headers))) content.text_content(str(e.response.headers)),
)
self.addDetail( self.addDetail(
'failed_response', 'failed_response', content.text_content(e.response.text)
content.text_content(e.response.text)) )
self.assertEqual( self.assertEqual(
name, name, self.user_cloud.list_objects(container_name)[0]['name']
self.user_cloud.list_objects(container_name)[0]['name']) )
self.assertEqual( self.assertEqual(
[], [], self.user_cloud.list_objects(container_name, prefix='abc')
self.user_cloud.list_objects(container_name, )
prefix='abc'))
self.assertTrue( self.assertTrue(
self.user_cloud.delete_object(container_name, name)) self.user_cloud.delete_object(container_name, name)
)
self.assertEqual([], self.user_cloud.list_objects(container_name)) self.assertEqual([], self.user_cloud.list_objects(container_name))
self.assertEqual( self.assertEqual(
container_name, container_name, self.user_cloud.get_container(container_name).name
self.user_cloud.get_container(container_name).name) )
self.user_cloud.delete_container(container_name) self.user_cloud.delete_container(container_name)
def test_download_object_to_file(self): def test_download_object_to_file(self):
@ -112,64 +126,83 @@ class TestObject(base.BaseFunctionalTest):
self.addDetail('container', content.text_content(container_name)) self.addDetail('container', content.text_content(container_name))
self.addCleanup(self.user_cloud.delete_container, container_name) self.addCleanup(self.user_cloud.delete_container, container_name)
self.user_cloud.create_container(container_name) self.user_cloud.create_container(container_name)
self.assertEqual(container_name, self.assertEqual(
self.user_cloud.list_containers()[0]['name']) container_name, self.user_cloud.list_containers()[0]['name']
)
sizes = ( sizes = (
(64 * 1024, 1), # 64K, one segment (64 * 1024, 1), # 64K, one segment
(64 * 1024, 5) # 64MB, 5 segments (64 * 1024, 5), # 64MB, 5 segments
) )
for size, nseg in sizes: for size, nseg in sizes:
fake_content = '' fake_content = ''
segment_size = int(round(size / nseg)) segment_size = int(round(size / nseg))
with tempfile.NamedTemporaryFile() as fake_file: with tempfile.NamedTemporaryFile() as fake_file:
fake_content = ''.join(random.SystemRandom().choice( fake_content = ''.join(
string.ascii_uppercase + string.digits) random.SystemRandom().choice(
for _ in range(size)).encode('latin-1') string.ascii_uppercase + string.digits
)
for _ in range(size)
).encode('latin-1')
fake_file.write(fake_content) fake_file.write(fake_content)
fake_file.flush() fake_file.flush()
name = 'test-%d' % size name = 'test-%d' % size
self.addCleanup( self.addCleanup(
self.user_cloud.delete_object, container_name, name) self.user_cloud.delete_object, container_name, name
)
self.user_cloud.create_object( self.user_cloud.create_object(
container_name, name, container_name,
name,
fake_file.name, fake_file.name,
segment_size=segment_size, segment_size=segment_size,
metadata={'foo': 'bar'}) metadata={'foo': 'bar'},
self.assertFalse(self.user_cloud.is_object_stale( )
container_name, name, self.assertFalse(
fake_file.name self.user_cloud.is_object_stale(
)) container_name, name, fake_file.name
)
)
self.assertEqual( self.assertEqual(
'bar', self.user_cloud.get_object_metadata( 'bar',
container_name, name)['foo'] self.user_cloud.get_object_metadata(container_name, name)[
'foo'
],
)
self.user_cloud.update_object(
container=container_name,
name=name,
metadata={'testk': 'testv'},
) )
self.user_cloud.update_object(container=container_name, name=name,
metadata={'testk': 'testv'})
self.assertEqual( self.assertEqual(
'testv', self.user_cloud.get_object_metadata( 'testv',
container_name, name)['testk'] self.user_cloud.get_object_metadata(container_name, name)[
'testk'
],
) )
try: try:
with tempfile.NamedTemporaryFile() as fake_file: with tempfile.NamedTemporaryFile() as fake_file:
self.user_cloud.get_object( self.user_cloud.get_object(
container_name, name, outfile=fake_file.name) container_name, name, outfile=fake_file.name
)
downloaded_content = open(fake_file.name, 'rb').read() downloaded_content = open(fake_file.name, 'rb').read()
self.assertEqual(fake_content, downloaded_content) self.assertEqual(fake_content, downloaded_content)
except exc.OpenStackCloudException as e: except exc.OpenStackCloudException as e:
self.addDetail( self.addDetail(
'failed_response', 'failed_response',
content.text_content(str(e.response.headers))) content.text_content(str(e.response.headers)),
)
self.addDetail( self.addDetail(
'failed_response', 'failed_response', content.text_content(e.response.text)
content.text_content(e.response.text)) )
raise raise
self.assertEqual( self.assertEqual(
name, name, self.user_cloud.list_objects(container_name)[0]['name']
self.user_cloud.list_objects(container_name)[0]['name']) )
self.assertTrue( self.assertTrue(
self.user_cloud.delete_object(container_name, name)) self.user_cloud.delete_object(container_name, name)
)
self.assertEqual([], self.user_cloud.list_objects(container_name)) self.assertEqual([], self.user_cloud.list_objects(container_name))
self.assertEqual(container_name, self.assertEqual(
self.user_cloud.list_containers()[0]['name']) container_name, self.user_cloud.list_containers()[0]['name']
)
self.user_cloud.delete_container(container_name) self.user_cloud.delete_container(container_name)

View File

@ -27,7 +27,6 @@ from openstack.tests.functional import base
class TestPort(base.BaseFunctionalTest): class TestPort(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestPort, self).setUp() super(TestPort, self).setUp()
# Skip Neutron tests if neutron is not present # Skip Neutron tests if neutron is not present
@ -40,7 +39,8 @@ class TestPort(base.BaseFunctionalTest):
# Generate a unique port name to allow concurrent tests # Generate a unique port name to allow concurrent tests
self.new_port_name = 'test_' + ''.join( self.new_port_name = 'test_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5)) random.choice(string.ascii_lowercase) for _ in range(5)
)
self.addCleanup(self._cleanup_ports) self.addCleanup(self._cleanup_ports)
@ -65,7 +65,8 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_create' port_name = self.new_port_name + '_create'
port = self.user_cloud.create_port( port = self.user_cloud.create_port(
network_id=self.net.id, name=port_name) network_id=self.net.id, name=port_name
)
self.assertIsInstance(port, dict) self.assertIsInstance(port, dict)
self.assertIn('id', port) self.assertIn('id', port)
self.assertEqual(port.get('name'), port_name) self.assertEqual(port.get('name'), port_name)
@ -74,7 +75,8 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_get' port_name = self.new_port_name + '_get'
port = self.user_cloud.create_port( port = self.user_cloud.create_port(
network_id=self.net.id, name=port_name) network_id=self.net.id, name=port_name
)
self.assertIsInstance(port, dict) self.assertIsInstance(port, dict)
self.assertIn('id', port) self.assertIn('id', port)
self.assertEqual(port.get('name'), port_name) self.assertEqual(port.get('name'), port_name)
@ -89,7 +91,8 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_get_by_id' port_name = self.new_port_name + '_get_by_id'
port = self.user_cloud.create_port( port = self.user_cloud.create_port(
network_id=self.net.id, name=port_name) network_id=self.net.id, name=port_name
)
self.assertIsInstance(port, dict) self.assertIsInstance(port, dict)
self.assertIn('id', port) self.assertIn('id', port)
self.assertEqual(port.get('name'), port_name) self.assertEqual(port.get('name'), port_name)
@ -104,11 +107,11 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_update' port_name = self.new_port_name + '_update'
new_port_name = port_name + '_new' new_port_name = port_name + '_new'
self.user_cloud.create_port( self.user_cloud.create_port(network_id=self.net.id, name=port_name)
network_id=self.net.id, name=port_name)
port = self.user_cloud.update_port( port = self.user_cloud.update_port(
name_or_id=port_name, name=new_port_name) name_or_id=port_name, name=new_port_name
)
self.assertIsInstance(port, dict) self.assertIsInstance(port, dict)
self.assertEqual(port.get('name'), new_port_name) self.assertEqual(port.get('name'), new_port_name)
@ -129,7 +132,8 @@ class TestPort(base.BaseFunctionalTest):
port_name = self.new_port_name + '_delete' port_name = self.new_port_name + '_delete'
port = self.user_cloud.create_port( port = self.user_cloud.create_port(
network_id=self.net.id, name=port_name) network_id=self.net.id, name=port_name
)
self.assertIsInstance(port, dict) self.assertIsInstance(port, dict)
self.assertIn('id', port) self.assertIn('id', port)
self.assertEqual(port.get('name'), port_name) self.assertEqual(port.get('name'), port_name)

View File

@ -25,7 +25,6 @@ from openstack.tests.functional import base
class TestProject(base.KeystoneBaseFunctionalTest): class TestProject(base.KeystoneBaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestProject, self).setUp() super(TestProject, self).setUp()
if not self.operator_cloud: if not self.operator_cloud:
@ -54,8 +53,9 @@ class TestProject(base.KeystoneBaseFunctionalTest):
'description': 'test_create_project', 'description': 'test_create_project',
} }
if self.identity_version == '3': if self.identity_version == '3':
params['domain_id'] = \ params['domain_id'] = self.operator_cloud.get_domain('default')[
self.operator_cloud.get_domain('default')['id'] 'id'
]
project = self.operator_cloud.create_project(**params) project = self.operator_cloud.create_project(**params)
@ -66,15 +66,23 @@ class TestProject(base.KeystoneBaseFunctionalTest):
user_id = self.operator_cloud.current_user_id user_id = self.operator_cloud.current_user_id
# Grant the current user access to the project # Grant the current user access to the project
self.assertTrue(self.operator_cloud.grant_role( self.assertTrue(
'member', user=user_id, project=project['id'], wait=True)) self.operator_cloud.grant_role(
'member', user=user_id, project=project['id'], wait=True
)
)
self.addCleanup( self.addCleanup(
self.operator_cloud.revoke_role, self.operator_cloud.revoke_role,
'member', user=user_id, project=project['id'], wait=True) 'member',
user=user_id,
project=project['id'],
wait=True,
)
new_cloud = self.operator_cloud.connect_as_project(project) new_cloud = self.operator_cloud.connect_as_project(project)
self.add_info_on_exception( self.add_info_on_exception(
'new_cloud_config', pprint.pformat(new_cloud.config.config)) 'new_cloud_config', pprint.pformat(new_cloud.config.config)
)
location = new_cloud.current_location location = new_cloud.current_location
self.assertEqual(project_name, location['project']['name']) self.assertEqual(project_name, location['project']['name'])
@ -84,15 +92,17 @@ class TestProject(base.KeystoneBaseFunctionalTest):
params = { params = {
'name': project_name, 'name': project_name,
'description': 'test_update_project', 'description': 'test_update_project',
'enabled': True 'enabled': True,
} }
if self.identity_version == '3': if self.identity_version == '3':
params['domain_id'] = \ params['domain_id'] = self.operator_cloud.get_domain('default')[
self.operator_cloud.get_domain('default')['id'] 'id'
]
project = self.operator_cloud.create_project(**params) project = self.operator_cloud.create_project(**params)
updated_project = self.operator_cloud.update_project( updated_project = self.operator_cloud.update_project(
project_name, enabled=False, description='new') project_name, enabled=False, description='new'
)
self.assertIsNotNone(updated_project) self.assertIsNotNone(updated_project)
self.assertEqual(project['id'], updated_project['id']) self.assertEqual(project['id'], updated_project['id'])
self.assertEqual(project['name'], updated_project['name']) self.assertEqual(project['name'], updated_project['name'])
@ -102,12 +112,14 @@ class TestProject(base.KeystoneBaseFunctionalTest):
# Revert the description and verify the project is still disabled # Revert the description and verify the project is still disabled
updated_project = self.operator_cloud.update_project( updated_project = self.operator_cloud.update_project(
project_name, description=params['description']) project_name, description=params['description']
)
self.assertIsNotNone(updated_project) self.assertIsNotNone(updated_project)
self.assertEqual(project['id'], updated_project['id']) self.assertEqual(project['id'], updated_project['id'])
self.assertEqual(project['name'], updated_project['name']) self.assertEqual(project['name'], updated_project['name'])
self.assertEqual(project['description'], self.assertEqual(
updated_project['description']) project['description'], updated_project['description']
)
self.assertTrue(project['enabled']) self.assertTrue(project['enabled'])
self.assertFalse(updated_project['enabled']) self.assertFalse(updated_project['enabled'])
@ -115,8 +127,9 @@ class TestProject(base.KeystoneBaseFunctionalTest):
project_name = self.new_project_name + '_delete' project_name = self.new_project_name + '_delete'
params = {'name': project_name} params = {'name': project_name}
if self.identity_version == '3': if self.identity_version == '3':
params['domain_id'] = \ params['domain_id'] = self.operator_cloud.get_domain('default')[
self.operator_cloud.get_domain('default')['id'] 'id'
]
project = self.operator_cloud.create_project(**params) project = self.operator_cloud.create_project(**params)
self.assertIsNotNone(project) self.assertIsNotNone(project)
self.assertTrue(self.operator_cloud.delete_project(project['id'])) self.assertTrue(self.operator_cloud.delete_project(project['id']))

View File

@ -48,8 +48,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
name=self.getUniqueString('router') name=self.getUniqueString('router')
) )
conn.network.add_interface_to_router( conn.network.add_interface_to_router(
self.router.id, self.router.id, subnet_id=self.subnet.id
subnet_id=self.subnet.id) )
def test_cleanup(self): def test_cleanup(self):
self._create_network_resources() self._create_network_resources()
@ -60,7 +60,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True, dry_run=True,
wait_timeout=120, wait_timeout=120,
status_queue=status_queue, status_queue=status_queue,
filters={'created_at': '2000-01-01'}) filters={'created_at': '2000-01-01'},
)
self.assertTrue(status_queue.empty()) self.assertTrue(status_queue.empty())
@ -71,7 +72,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
wait_timeout=120, wait_timeout=120,
status_queue=status_queue, status_queue=status_queue,
filters={'created_at': '2200-01-01'}, filters={'created_at': '2200-01-01'},
resource_evaluation_fn=lambda x, y, z: False) resource_evaluation_fn=lambda x, y, z: False,
)
self.assertTrue(status_queue.empty()) self.assertTrue(status_queue.empty())
@ -80,7 +82,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True, dry_run=True,
wait_timeout=120, wait_timeout=120,
status_queue=status_queue, status_queue=status_queue,
filters={'created_at': '2200-01-01'}) filters={'created_at': '2200-01-01'},
)
objects = [] objects = []
while not status_queue.empty(): while not status_queue.empty():
@ -92,9 +95,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Fourth round - dry run with no filters, ensure everything identified # Fourth round - dry run with no filters, ensure everything identified
self.conn.project_cleanup( self.conn.project_cleanup(
dry_run=True, dry_run=True, wait_timeout=120, status_queue=status_queue
wait_timeout=120, )
status_queue=status_queue)
objects = [] objects = []
while not status_queue.empty(): while not status_queue.empty():
@ -109,9 +111,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Last round - do a real cleanup # Last round - do a real cleanup
self.conn.project_cleanup( self.conn.project_cleanup(
dry_run=False, dry_run=False, wait_timeout=600, status_queue=status_queue
wait_timeout=600, )
status_queue=status_queue)
objects = [] objects = []
while not status_queue.empty(): while not status_queue.empty():
@ -136,10 +137,12 @@ class TestProjectCleanup(base.BaseFunctionalTest):
b1 = self.conn.block_storage.create_backup(volume_id=vol.id) b1 = self.conn.block_storage.create_backup(volume_id=vol.id)
self.conn.block_storage.wait_for_status(b1) self.conn.block_storage.wait_for_status(b1)
b2 = self.conn.block_storage.create_backup( b2 = self.conn.block_storage.create_backup(
volume_id=vol.id, is_incremental=True, snapshot_id=s1.id) volume_id=vol.id, is_incremental=True, snapshot_id=s1.id
)
self.conn.block_storage.wait_for_status(b2) self.conn.block_storage.wait_for_status(b2)
b3 = self.conn.block_storage.create_backup( b3 = self.conn.block_storage.create_backup(
volume_id=vol.id, is_incremental=True, snapshot_id=s1.id) volume_id=vol.id, is_incremental=True, snapshot_id=s1.id
)
self.conn.block_storage.wait_for_status(b3) self.conn.block_storage.wait_for_status(b3)
# First round - check no resources are old enough # First round - check no resources are old enough
@ -147,7 +150,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True, dry_run=True,
wait_timeout=120, wait_timeout=120,
status_queue=status_queue, status_queue=status_queue,
filters={'created_at': '2000-01-01'}) filters={'created_at': '2000-01-01'},
)
self.assertTrue(status_queue.empty()) self.assertTrue(status_queue.empty())
@ -158,7 +162,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
wait_timeout=120, wait_timeout=120,
status_queue=status_queue, status_queue=status_queue,
filters={'created_at': '2200-01-01'}, filters={'created_at': '2200-01-01'},
resource_evaluation_fn=lambda x, y, z: False) resource_evaluation_fn=lambda x, y, z: False,
)
self.assertTrue(status_queue.empty()) self.assertTrue(status_queue.empty())
@ -167,7 +172,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True, dry_run=True,
wait_timeout=120, wait_timeout=120,
status_queue=status_queue, status_queue=status_queue,
filters={'created_at': '2200-01-01'}) filters={'created_at': '2200-01-01'},
)
objects = [] objects = []
while not status_queue.empty(): while not status_queue.empty():
@ -179,9 +185,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Fourth round - dry run with no filters, ensure everything identified # Fourth round - dry run with no filters, ensure everything identified
self.conn.project_cleanup( self.conn.project_cleanup(
dry_run=True, dry_run=True, wait_timeout=120, status_queue=status_queue
wait_timeout=120, )
status_queue=status_queue)
objects = [] objects = []
while not status_queue.empty(): while not status_queue.empty():
@ -196,9 +201,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Last round - do a real cleanup # Last round - do a real cleanup
self.conn.project_cleanup( self.conn.project_cleanup(
dry_run=False, dry_run=False, wait_timeout=600, status_queue=status_queue
wait_timeout=600, )
status_queue=status_queue)
# Ensure no backups remain # Ensure no backups remain
self.assertEqual(0, len(list(self.conn.block_storage.backups()))) self.assertEqual(0, len(list(self.conn.block_storage.backups())))
# Ensure no snapshots remain # Ensure no snapshots remain
@ -212,14 +216,16 @@ class TestProjectCleanup(base.BaseFunctionalTest):
self.conn.object_store.create_container('test_cleanup') self.conn.object_store.create_container('test_cleanup')
for i in range(1, 10): for i in range(1, 10):
self.conn.object_store.create_object( self.conn.object_store.create_object(
"test_cleanup", f"test{i}", data="test{i}") "test_cleanup", f"test{i}", data="test{i}"
)
# First round - check no resources are old enough # First round - check no resources are old enough
self.conn.project_cleanup( self.conn.project_cleanup(
dry_run=True, dry_run=True,
wait_timeout=120, wait_timeout=120,
status_queue=status_queue, status_queue=status_queue,
filters={'updated_at': '2000-01-01'}) filters={'updated_at': '2000-01-01'},
)
self.assertTrue(status_queue.empty()) self.assertTrue(status_queue.empty())
@ -228,7 +234,8 @@ class TestProjectCleanup(base.BaseFunctionalTest):
dry_run=True, dry_run=True,
wait_timeout=120, wait_timeout=120,
status_queue=status_queue, status_queue=status_queue,
filters={'updated_at': '2200-01-01'}) filters={'updated_at': '2200-01-01'},
)
objects = [] objects = []
while not status_queue.empty(): while not status_queue.empty():
objects.append(status_queue.get()) objects.append(status_queue.get())
@ -238,19 +245,15 @@ class TestProjectCleanup(base.BaseFunctionalTest):
self.assertIn('test1', obj_names) self.assertIn('test1', obj_names)
# Ensure object still exists # Ensure object still exists
obj = self.conn.object_store.get_object( obj = self.conn.object_store.get_object("test1", "test_cleanup")
"test1", "test_cleanup")
self.assertIsNotNone(obj) self.assertIsNotNone(obj)
# Last round - do a real cleanup # Last round - do a real cleanup
self.conn.project_cleanup( self.conn.project_cleanup(
dry_run=False, dry_run=False, wait_timeout=600, status_queue=status_queue
wait_timeout=600, )
status_queue=status_queue)
objects.clear() objects.clear()
while not status_queue.empty(): while not status_queue.empty():
objects.append(status_queue.get()) objects.append(status_queue.get())
self.assertIsNone( self.assertIsNone(self.conn.get_container('test_container'))
self.conn.get_container('test_container')
)

View File

@ -50,59 +50,61 @@ class TestQosBandwidthLimitRule(base.BaseFunctionalTest):
# Create bw limit rule # Create bw limit rule
rule = self.operator_cloud.create_qos_bandwidth_limit_rule( rule = self.operator_cloud.create_qos_bandwidth_limit_rule(
self.policy['id'], self.policy['id'], max_kbps=max_kbps, max_burst_kbps=max_burst_kbps
max_kbps=max_kbps, )
max_burst_kbps=max_burst_kbps)
self.assertIn('id', rule) self.assertIn('id', rule)
self.assertEqual(max_kbps, rule['max_kbps']) self.assertEqual(max_kbps, rule['max_kbps'])
self.assertEqual(max_burst_kbps, rule['max_burst_kbps']) self.assertEqual(max_burst_kbps, rule['max_burst_kbps'])
# Now try to update rule # Now try to update rule
updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule( updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule(
self.policy['id'], self.policy['id'], rule['id'], max_kbps=updated_max_kbps
rule['id'], )
max_kbps=updated_max_kbps)
self.assertIn('id', updated_rule) self.assertIn('id', updated_rule)
self.assertEqual(updated_max_kbps, updated_rule['max_kbps']) self.assertEqual(updated_max_kbps, updated_rule['max_kbps'])
self.assertEqual(max_burst_kbps, updated_rule['max_burst_kbps']) self.assertEqual(max_burst_kbps, updated_rule['max_burst_kbps'])
# List rules from policy # List rules from policy
policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules( policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules(
self.policy['id']) self.policy['id']
)
self.assertEqual([updated_rule], policy_rules) self.assertEqual([updated_rule], policy_rules)
# Delete rule # Delete rule
self.operator_cloud.delete_qos_bandwidth_limit_rule( self.operator_cloud.delete_qos_bandwidth_limit_rule(
self.policy['id'], updated_rule['id']) self.policy['id'], updated_rule['id']
)
# Check if there is no rules in policy # Check if there is no rules in policy
policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules( policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules(
self.policy['id']) self.policy['id']
)
self.assertEqual([], policy_rules) self.assertEqual([], policy_rules)
def test_create_qos_bandwidth_limit_rule_direction(self): def test_create_qos_bandwidth_limit_rule_direction(self):
if not self.operator_cloud._has_neutron_extension( if not self.operator_cloud._has_neutron_extension(
'qos-bw-limit-direction'): 'qos-bw-limit-direction'
self.skipTest("'qos-bw-limit-direction' network extension " ):
"not supported by cloud") self.skipTest(
"'qos-bw-limit-direction' network extension "
"not supported by cloud"
)
max_kbps = 1500 max_kbps = 1500
direction = "ingress" direction = "ingress"
updated_direction = "egress" updated_direction = "egress"
# Create bw limit rule # Create bw limit rule
rule = self.operator_cloud.create_qos_bandwidth_limit_rule( rule = self.operator_cloud.create_qos_bandwidth_limit_rule(
self.policy['id'], self.policy['id'], max_kbps=max_kbps, direction=direction
max_kbps=max_kbps, )
direction=direction)
self.assertIn('id', rule) self.assertIn('id', rule)
self.assertEqual(max_kbps, rule['max_kbps']) self.assertEqual(max_kbps, rule['max_kbps'])
self.assertEqual(direction, rule['direction']) self.assertEqual(direction, rule['direction'])
# Now try to update direction in rule # Now try to update direction in rule
updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule( updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule(
self.policy['id'], self.policy['id'], rule['id'], direction=updated_direction
rule['id'], )
direction=updated_direction)
self.assertIn('id', updated_rule) self.assertIn('id', updated_rule)
self.assertEqual(max_kbps, updated_rule['max_kbps']) self.assertEqual(max_kbps, updated_rule['max_kbps'])
self.assertEqual(updated_direction, updated_rule['direction']) self.assertEqual(updated_direction, updated_rule['direction'])

View File

@ -49,29 +49,31 @@ class TestQosDscpMarkingRule(base.BaseFunctionalTest):
# Create DSCP marking rule # Create DSCP marking rule
rule = self.operator_cloud.create_qos_dscp_marking_rule( rule = self.operator_cloud.create_qos_dscp_marking_rule(
self.policy['id'], self.policy['id'], dscp_mark=dscp_mark
dscp_mark=dscp_mark) )
self.assertIn('id', rule) self.assertIn('id', rule)
self.assertEqual(dscp_mark, rule['dscp_mark']) self.assertEqual(dscp_mark, rule['dscp_mark'])
# Now try to update rule # Now try to update rule
updated_rule = self.operator_cloud.update_qos_dscp_marking_rule( updated_rule = self.operator_cloud.update_qos_dscp_marking_rule(
self.policy['id'], self.policy['id'], rule['id'], dscp_mark=updated_dscp_mark
rule['id'], )
dscp_mark=updated_dscp_mark)
self.assertIn('id', updated_rule) self.assertIn('id', updated_rule)
self.assertEqual(updated_dscp_mark, updated_rule['dscp_mark']) self.assertEqual(updated_dscp_mark, updated_rule['dscp_mark'])
# List rules from policy # List rules from policy
policy_rules = self.operator_cloud.list_qos_dscp_marking_rules( policy_rules = self.operator_cloud.list_qos_dscp_marking_rules(
self.policy['id']) self.policy['id']
)
self.assertEqual([updated_rule], policy_rules) self.assertEqual([updated_rule], policy_rules)
# Delete rule # Delete rule
self.operator_cloud.delete_qos_dscp_marking_rule( self.operator_cloud.delete_qos_dscp_marking_rule(
self.policy['id'], updated_rule['id']) self.policy['id'], updated_rule['id']
)
# Check if there is no rules in policy # Check if there is no rules in policy
policy_rules = self.operator_cloud.list_qos_dscp_marking_rules( policy_rules = self.operator_cloud.list_qos_dscp_marking_rules(
self.policy['id']) self.policy['id']
)
self.assertEqual([], policy_rules) self.assertEqual([], policy_rules)

View File

@ -49,29 +49,31 @@ class TestQosMinimumBandwidthRule(base.BaseFunctionalTest):
# Create min bw rule # Create min bw rule
rule = self.operator_cloud.create_qos_minimum_bandwidth_rule( rule = self.operator_cloud.create_qos_minimum_bandwidth_rule(
self.policy['id'], self.policy['id'], min_kbps=min_kbps
min_kbps=min_kbps) )
self.assertIn('id', rule) self.assertIn('id', rule)
self.assertEqual(min_kbps, rule['min_kbps']) self.assertEqual(min_kbps, rule['min_kbps'])
# Now try to update rule # Now try to update rule
updated_rule = self.operator_cloud.update_qos_minimum_bandwidth_rule( updated_rule = self.operator_cloud.update_qos_minimum_bandwidth_rule(
self.policy['id'], self.policy['id'], rule['id'], min_kbps=updated_min_kbps
rule['id'], )
min_kbps=updated_min_kbps)
self.assertIn('id', updated_rule) self.assertIn('id', updated_rule)
self.assertEqual(updated_min_kbps, updated_rule['min_kbps']) self.assertEqual(updated_min_kbps, updated_rule['min_kbps'])
# List rules from policy # List rules from policy
policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules( policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules(
self.policy['id']) self.policy['id']
)
self.assertEqual([updated_rule], policy_rules) self.assertEqual([updated_rule], policy_rules)
# Delete rule # Delete rule
self.operator_cloud.delete_qos_minimum_bandwidth_rule( self.operator_cloud.delete_qos_minimum_bandwidth_rule(
self.policy['id'], updated_rule['id']) self.policy['id'], updated_rule['id']
)
# Check if there is no rules in policy # Check if there is no rules in policy
policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules( policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules(
self.policy['id']) self.policy['id']
)
self.assertEqual([], policy_rules) self.assertEqual([], policy_rules)

View File

@ -56,7 +56,8 @@ class TestQosPolicy(base.BaseFunctionalTest):
def test_create_qos_policy_shared(self): def test_create_qos_policy_shared(self):
policy = self.operator_cloud.create_qos_policy( policy = self.operator_cloud.create_qos_policy(
name=self.policy_name, shared=True) name=self.policy_name, shared=True
)
self.assertIn('id', policy) self.assertIn('id', policy)
self.assertEqual(self.policy_name, policy['name']) self.assertEqual(self.policy_name, policy['name'])
self.assertTrue(policy['is_shared']) self.assertTrue(policy['is_shared'])
@ -64,10 +65,12 @@ class TestQosPolicy(base.BaseFunctionalTest):
def test_create_qos_policy_default(self): def test_create_qos_policy_default(self):
if not self.operator_cloud._has_neutron_extension('qos-default'): if not self.operator_cloud._has_neutron_extension('qos-default'):
self.skipTest("'qos-default' network extension not supported " self.skipTest(
"by cloud") "'qos-default' network extension not supported " "by cloud"
)
policy = self.operator_cloud.create_qos_policy( policy = self.operator_cloud.create_qos_policy(
name=self.policy_name, default=True) name=self.policy_name, default=True
)
self.assertIn('id', policy) self.assertIn('id', policy)
self.assertEqual(self.policy_name, policy['name']) self.assertEqual(self.policy_name, policy['name'])
self.assertFalse(policy['is_shared']) self.assertFalse(policy['is_shared'])
@ -80,7 +83,8 @@ class TestQosPolicy(base.BaseFunctionalTest):
self.assertFalse(policy['is_default']) self.assertFalse(policy['is_default'])
updated_policy = self.operator_cloud.update_qos_policy( updated_policy = self.operator_cloud.update_qos_policy(
policy['id'], shared=True, default=True) policy['id'], shared=True, default=True
)
self.assertEqual(self.policy_name, updated_policy['name']) self.assertEqual(self.policy_name, updated_policy['name'])
self.assertTrue(updated_policy['is_shared']) self.assertTrue(updated_policy['is_shared'])
self.assertTrue(updated_policy['is_default']) self.assertTrue(updated_policy['is_default'])
@ -89,9 +93,11 @@ class TestQosPolicy(base.BaseFunctionalTest):
policy1 = self.operator_cloud.create_qos_policy(name=self.policy_name) policy1 = self.operator_cloud.create_qos_policy(name=self.policy_name)
self.assertIsNotNone(policy1) self.assertIsNotNone(policy1)
policy2 = self.operator_cloud.create_qos_policy( policy2 = self.operator_cloud.create_qos_policy(
name=self.policy_name + 'other') name=self.policy_name + 'other'
)
self.assertIsNotNone(policy2) self.assertIsNotNone(policy2)
match = self.operator_cloud.list_qos_policies( match = self.operator_cloud.list_qos_policies(
filters=dict(name=self.policy_name)) filters=dict(name=self.policy_name)
)
self.assertEqual(1, len(match)) self.assertEqual(1, len(match))
self.assertEqual(policy1['name'], match[0]['name']) self.assertEqual(policy1['name'], match[0]['name'])

View File

@ -21,11 +21,9 @@ from openstack.tests.functional import base
class TestComputeQuotas(base.BaseFunctionalTest): class TestComputeQuotas(base.BaseFunctionalTest):
def test_get_quotas(self): def test_get_quotas(self):
'''Test quotas functionality''' '''Test quotas functionality'''
self.user_cloud.get_compute_quotas( self.user_cloud.get_compute_quotas(self.user_cloud.current_project_id)
self.user_cloud.current_project_id)
def test_set_quotas(self): def test_set_quotas(self):
'''Test quotas functionality''' '''Test quotas functionality'''
@ -36,15 +34,15 @@ class TestComputeQuotas(base.BaseFunctionalTest):
cores = quotas['cores'] cores = quotas['cores']
self.operator_cloud.set_compute_quotas('demo', cores=cores + 1) self.operator_cloud.set_compute_quotas('demo', cores=cores + 1)
self.assertEqual( self.assertEqual(
cores + 1, cores + 1, self.operator_cloud.get_compute_quotas('demo')['cores']
self.operator_cloud.get_compute_quotas('demo')['cores']) )
self.operator_cloud.delete_compute_quotas('demo') self.operator_cloud.delete_compute_quotas('demo')
self.assertEqual( self.assertEqual(
cores, self.operator_cloud.get_compute_quotas('demo')['cores']) cores, self.operator_cloud.get_compute_quotas('demo')['cores']
)
class TestVolumeQuotas(base.BaseFunctionalTest): class TestVolumeQuotas(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestVolumeQuotas, self).setUp() super(TestVolumeQuotas, self).setUp()
if not self.user_cloud.has_service('volume'): if not self.user_cloud.has_service('volume'):
@ -52,9 +50,7 @@ class TestVolumeQuotas(base.BaseFunctionalTest):
def test_get_quotas(self): def test_get_quotas(self):
'''Test get quotas functionality''' '''Test get quotas functionality'''
self.user_cloud.get_volume_quotas( self.user_cloud.get_volume_quotas(self.user_cloud.current_project_id)
self.user_cloud.current_project_id
)
def test_set_quotas(self): def test_set_quotas(self):
'''Test set quotas functionality''' '''Test set quotas functionality'''
@ -66,19 +62,18 @@ class TestVolumeQuotas(base.BaseFunctionalTest):
self.operator_cloud.set_volume_quotas('demo', volumes=volumes + 1) self.operator_cloud.set_volume_quotas('demo', volumes=volumes + 1)
self.assertEqual( self.assertEqual(
volumes + 1, volumes + 1,
self.operator_cloud.get_volume_quotas('demo')['volumes']) self.operator_cloud.get_volume_quotas('demo')['volumes'],
)
self.operator_cloud.delete_volume_quotas('demo') self.operator_cloud.delete_volume_quotas('demo')
self.assertEqual( self.assertEqual(
volumes, volumes, self.operator_cloud.get_volume_quotas('demo')['volumes']
self.operator_cloud.get_volume_quotas('demo')['volumes']) )
class TestNetworkQuotas(base.BaseFunctionalTest): class TestNetworkQuotas(base.BaseFunctionalTest):
def test_get_quotas(self): def test_get_quotas(self):
'''Test get quotas functionality''' '''Test get quotas functionality'''
self.user_cloud.get_network_quotas( self.user_cloud.get_network_quotas(self.user_cloud.current_project_id)
self.user_cloud.current_project_id)
def test_quotas(self): def test_quotas(self):
'''Test quotas functionality''' '''Test quotas functionality'''
@ -92,11 +87,12 @@ class TestNetworkQuotas(base.BaseFunctionalTest):
self.operator_cloud.set_network_quotas('demo', networks=network + 1) self.operator_cloud.set_network_quotas('demo', networks=network + 1)
self.assertEqual( self.assertEqual(
network + 1, network + 1,
self.operator_cloud.get_network_quotas('demo')['networks']) self.operator_cloud.get_network_quotas('demo')['networks'],
)
self.operator_cloud.delete_network_quotas('demo') self.operator_cloud.delete_network_quotas('demo')
self.assertEqual( self.assertEqual(
network, network, self.operator_cloud.get_network_quotas('demo')['networks']
self.operator_cloud.get_network_quotas('demo')['networks']) )
def test_get_quotas_details(self): def test_get_quotas_details(self):
if not self.operator_cloud: if not self.operator_cloud:
@ -105,14 +101,21 @@ class TestNetworkQuotas(base.BaseFunctionalTest):
self.skipTest('network service not supported by cloud') self.skipTest('network service not supported by cloud')
quotas = [ quotas = [
'floating_ips', 'networks', 'ports', 'floating_ips',
'rbac_policies', 'routers', 'subnets', 'networks',
'subnet_pools', 'security_group_rules', 'ports',
'security_groups'] 'rbac_policies',
'routers',
'subnets',
'subnet_pools',
'security_group_rules',
'security_groups',
]
expected_keys = ['limit', 'used', 'reserved'] expected_keys = ['limit', 'used', 'reserved']
'''Test getting details about quota usage''' '''Test getting details about quota usage'''
quota_details = self.operator_cloud.get_network_quotas( quota_details = self.operator_cloud.get_network_quotas(
'demo', details=True) 'demo', details=True
)
for quota in quotas: for quota in quotas:
quota_val = quota_details[quota] quota_val = quota_details[quota]
if quota_val: if quota_val:

View File

@ -17,7 +17,6 @@ from openstack.tests.functional import base
class TestRangeSearch(base.BaseFunctionalTest): class TestRangeSearch(base.BaseFunctionalTest):
def _filter_m1_flavors(self, results): def _filter_m1_flavors(self, results):
"""The m1 flavors are the original devstack flavors""" """The m1 flavors are the original devstack flavors"""
new_results = [] new_results = []
@ -30,7 +29,10 @@ class TestRangeSearch(base.BaseFunctionalTest):
flavors = self.user_cloud.list_flavors(get_extra=False) flavors = self.user_cloud.list_flavors(get_extra=False)
self.assertRaises( self.assertRaises(
exc.OpenStackCloudException, exc.OpenStackCloudException,
self.user_cloud.range_search, flavors, {"ram": "<1a0"}) self.user_cloud.range_search,
flavors,
{"ram": "<1a0"},
)
def test_range_search_exact(self): def test_range_search_exact(self):
flavors = self.user_cloud.list_flavors(get_extra=False) flavors = self.user_cloud.list_flavors(get_extra=False)
@ -103,7 +105,8 @@ class TestRangeSearch(base.BaseFunctionalTest):
def test_range_search_multi_1(self): def test_range_search_multi_1(self):
flavors = self.user_cloud.list_flavors(get_extra=False) flavors = self.user_cloud.list_flavors(get_extra=False)
result = self.user_cloud.range_search( result = self.user_cloud.range_search(
flavors, {"ram": "MIN", "vcpus": "MIN"}) flavors, {"ram": "MIN", "vcpus": "MIN"}
)
self.assertIsInstance(result, list) self.assertIsInstance(result, list)
self.assertEqual(1, len(result)) self.assertEqual(1, len(result))
# older devstack does not have cirros256 # older devstack does not have cirros256
@ -112,7 +115,8 @@ class TestRangeSearch(base.BaseFunctionalTest):
def test_range_search_multi_2(self): def test_range_search_multi_2(self):
flavors = self.user_cloud.list_flavors(get_extra=False) flavors = self.user_cloud.list_flavors(get_extra=False)
result = self.user_cloud.range_search( result = self.user_cloud.range_search(
flavors, {"ram": "<1024", "vcpus": "MIN"}) flavors, {"ram": "<1024", "vcpus": "MIN"}
)
self.assertIsInstance(result, list) self.assertIsInstance(result, list)
result = self._filter_m1_flavors(result) result = self._filter_m1_flavors(result)
self.assertEqual(1, len(result)) self.assertEqual(1, len(result))
@ -122,7 +126,8 @@ class TestRangeSearch(base.BaseFunctionalTest):
def test_range_search_multi_3(self): def test_range_search_multi_3(self):
flavors = self.user_cloud.list_flavors(get_extra=False) flavors = self.user_cloud.list_flavors(get_extra=False)
result = self.user_cloud.range_search( result = self.user_cloud.range_search(
flavors, {"ram": ">=4096", "vcpus": "<6"}) flavors, {"ram": ">=4096", "vcpus": "<6"}
)
self.assertIsInstance(result, list) self.assertIsInstance(result, list)
result = self._filter_m1_flavors(result) result = self._filter_m1_flavors(result)
self.assertEqual(2, len(result)) self.assertEqual(2, len(result))
@ -133,7 +138,8 @@ class TestRangeSearch(base.BaseFunctionalTest):
def test_range_search_multi_4(self): def test_range_search_multi_4(self):
flavors = self.user_cloud.list_flavors(get_extra=False) flavors = self.user_cloud.list_flavors(get_extra=False)
result = self.user_cloud.range_search( result = self.user_cloud.range_search(
flavors, {"ram": ">=4096", "vcpus": "MAX"}) flavors, {"ram": ">=4096", "vcpus": "MAX"}
)
self.assertIsInstance(result, list) self.assertIsInstance(result, list)
self.assertEqual(1, len(result)) self.assertEqual(1, len(result))
# This is the only result that should have max vcpu # This is the only result that should have max vcpu

View File

@ -25,7 +25,6 @@ from openstack.tests.functional import base
class TestRecordset(base.BaseFunctionalTest): class TestRecordset(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestRecordset, self).setUp() super(TestRecordset, self).setUp()
if not self.user_cloud.has_service('dns'): if not self.user_cloud.has_service('dns'):
@ -50,11 +49,9 @@ class TestRecordset(base.BaseFunctionalTest):
zone_obj = self.user_cloud.create_zone(name=zone, email=email) zone_obj = self.user_cloud.create_zone(name=zone, email=email)
# Test we can create a recordset and we get it returned # Test we can create a recordset and we get it returned
created_recordset = self.user_cloud.create_recordset(zone_obj['id'], created_recordset = self.user_cloud.create_recordset(
name, zone_obj['id'], name, type_, records, description, ttl
type_, )
records,
description, ttl)
self.addCleanup(self.cleanup, zone, created_recordset['id']) self.addCleanup(self.cleanup, zone, created_recordset['id'])
self.assertEqual(created_recordset['zone_id'], zone_obj['id']) self.assertEqual(created_recordset['zone_id'], zone_obj['id'])
@ -65,20 +62,22 @@ class TestRecordset(base.BaseFunctionalTest):
self.assertEqual(created_recordset['ttl'], ttl) self.assertEqual(created_recordset['ttl'], ttl)
# Test that we can list recordsets # Test that we can list recordsets
recordsets = self.user_cloud.list_recordsets(zone_obj['id'],) recordsets = self.user_cloud.list_recordsets(
zone_obj['id'],
)
self.assertIsNotNone(recordsets) self.assertIsNotNone(recordsets)
# Test we get the same recordset with the get_recordset method # Test we get the same recordset with the get_recordset method
get_recordset = self.user_cloud.get_recordset(zone_obj['id'], get_recordset = self.user_cloud.get_recordset(
created_recordset['id']) zone_obj['id'], created_recordset['id']
)
self.assertEqual(get_recordset['id'], created_recordset['id']) self.assertEqual(get_recordset['id'], created_recordset['id'])
# Test we can update a field on the recordset and only that field # Test we can update a field on the recordset and only that field
# is updated # is updated
updated_recordset = self.user_cloud.update_recordset( updated_recordset = self.user_cloud.update_recordset(
zone_obj['id'], zone_obj['id'], created_recordset['id'], ttl=7200
created_recordset['id'], )
ttl=7200)
self.assertEqual(updated_recordset['id'], created_recordset['id']) self.assertEqual(updated_recordset['id'], created_recordset['id'])
self.assertEqual(updated_recordset['name'], name) self.assertEqual(updated_recordset['name'], name)
self.assertEqual(updated_recordset['type'], type_.upper()) self.assertEqual(updated_recordset['type'], type_.upper())
@ -88,7 +87,8 @@ class TestRecordset(base.BaseFunctionalTest):
# Test we can delete and get True returned # Test we can delete and get True returned
deleted_recordset = self.user_cloud.delete_recordset( deleted_recordset = self.user_cloud.delete_recordset(
zone, created_recordset['id']) zone, created_recordset['id']
)
self.assertTrue(deleted_recordset) self.assertTrue(deleted_recordset)
def test_recordsets_with_zone_name(self): def test_recordsets_with_zone_name(self):
@ -110,9 +110,9 @@ class TestRecordset(base.BaseFunctionalTest):
zone_obj = self.user_cloud.create_zone(name=zone, email=email) zone_obj = self.user_cloud.create_zone(name=zone, email=email)
# Test we can create a recordset and we get it returned # Test we can create a recordset and we get it returned
created_recordset = self.user_cloud.create_recordset(zone, name, type_, created_recordset = self.user_cloud.create_recordset(
records, zone, name, type_, records, description, ttl
description, ttl) )
self.addCleanup(self.cleanup, zone, created_recordset['id']) self.addCleanup(self.cleanup, zone, created_recordset['id'])
self.assertEqual(created_recordset['zone_id'], zone_obj['id']) self.assertEqual(created_recordset['zone_id'], zone_obj['id'])
@ -127,16 +127,16 @@ class TestRecordset(base.BaseFunctionalTest):
self.assertIsNotNone(recordsets) self.assertIsNotNone(recordsets)
# Test we get the same recordset with the get_recordset method # Test we get the same recordset with the get_recordset method
get_recordset = self.user_cloud.get_recordset(zone, get_recordset = self.user_cloud.get_recordset(
created_recordset['id']) zone, created_recordset['id']
)
self.assertEqual(get_recordset['id'], created_recordset['id']) self.assertEqual(get_recordset['id'], created_recordset['id'])
# Test we can update a field on the recordset and only that field # Test we can update a field on the recordset and only that field
# is updated # is updated
updated_recordset = self.user_cloud.update_recordset( updated_recordset = self.user_cloud.update_recordset(
zone_obj['id'], zone_obj['id'], created_recordset['id'], ttl=7200
created_recordset['id'], )
ttl=7200)
self.assertEqual(updated_recordset['id'], created_recordset['id']) self.assertEqual(updated_recordset['id'], created_recordset['id'])
self.assertEqual(updated_recordset['name'], name) self.assertEqual(updated_recordset['name'], name)
self.assertEqual(updated_recordset['type'], type_.upper()) self.assertEqual(updated_recordset['type'], type_.upper())
@ -146,10 +146,10 @@ class TestRecordset(base.BaseFunctionalTest):
# Test we can delete and get True returned # Test we can delete and get True returned
deleted_recordset = self.user_cloud.delete_recordset( deleted_recordset = self.user_cloud.delete_recordset(
zone, created_recordset['id']) zone, created_recordset['id']
)
self.assertTrue(deleted_recordset) self.assertTrue(deleted_recordset)
def cleanup(self, zone_name, recordset_id): def cleanup(self, zone_name, recordset_id):
self.user_cloud.delete_recordset( self.user_cloud.delete_recordset(zone_name, recordset_id)
zone_name, recordset_id)
self.user_cloud.delete_zone(zone_name) self.user_cloud.delete_zone(zone_name)

View File

@ -24,8 +24,13 @@ from openstack.tests.functional import base
EXPECTED_TOPLEVEL_FIELDS = ( EXPECTED_TOPLEVEL_FIELDS = (
'id', 'name', 'is_admin_state_up', 'external_gateway_info', 'id',
'project_id', 'routes', 'status' 'name',
'is_admin_state_up',
'external_gateway_info',
'project_id',
'routes',
'status',
) )
EXPECTED_GW_INFO_FIELDS = ('network_id', 'enable_snat', 'external_fixed_ips') EXPECTED_GW_INFO_FIELDS = ('network_id', 'enable_snat', 'external_fixed_ips')
@ -90,7 +95,8 @@ class TestRouter(base.BaseFunctionalTest):
def test_create_router_basic(self): def test_create_router_basic(self):
net1_name = self.network_prefix + '_net1' net1_name = self.network_prefix + '_net1'
net1 = self.operator_cloud.create_network( net1 = self.operator_cloud.create_network(
name=net1_name, external=True) name=net1_name, external=True
)
router_name = self.router_prefix + '_create_basic' router_name = self.router_prefix + '_create_basic'
router = self.operator_cloud.create_router( router = self.operator_cloud.create_router(
@ -117,14 +123,15 @@ class TestRouter(base.BaseFunctionalTest):
proj_id = project['id'] proj_id = project['id']
net1_name = self.network_prefix + '_net1' net1_name = self.network_prefix + '_net1'
net1 = self.operator_cloud.create_network( net1 = self.operator_cloud.create_network(
name=net1_name, external=True, project_id=proj_id) name=net1_name, external=True, project_id=proj_id
)
router_name = self.router_prefix + '_create_project' router_name = self.router_prefix + '_create_project'
router = self.operator_cloud.create_router( router = self.operator_cloud.create_router(
name=router_name, name=router_name,
admin_state_up=True, admin_state_up=True,
ext_gateway_net_id=net1['id'], ext_gateway_net_id=net1['id'],
project_id=proj_id project_id=proj_id,
) )
for field in EXPECTED_TOPLEVEL_FIELDS: for field in EXPECTED_TOPLEVEL_FIELDS:
@ -140,9 +147,9 @@ class TestRouter(base.BaseFunctionalTest):
self.assertEqual(net1['id'], ext_gw_info['network_id']) self.assertEqual(net1['id'], ext_gw_info['network_id'])
self.assertTrue(ext_gw_info['enable_snat']) self.assertTrue(ext_gw_info['enable_snat'])
def _create_and_verify_advanced_router(self, def _create_and_verify_advanced_router(
external_cidr, self, external_cidr, external_gateway_ip=None
external_gateway_ip=None): ):
# external_cidr must be passed in as unicode (u'') # external_cidr must be passed in as unicode (u'')
# NOTE(Shrews): The arguments are needed because these tests # NOTE(Shrews): The arguments are needed because these tests
# will run in parallel and we want to make sure that each test # will run in parallel and we want to make sure that each test
@ -150,10 +157,13 @@ class TestRouter(base.BaseFunctionalTest):
net1_name = self.network_prefix + '_net1' net1_name = self.network_prefix + '_net1'
sub1_name = self.subnet_prefix + '_sub1' sub1_name = self.subnet_prefix + '_sub1'
net1 = self.operator_cloud.create_network( net1 = self.operator_cloud.create_network(
name=net1_name, external=True) name=net1_name, external=True
)
sub1 = self.operator_cloud.create_subnet( sub1 = self.operator_cloud.create_subnet(
net1['id'], external_cidr, subnet_name=sub1_name, net1['id'],
gateway_ip=external_gateway_ip external_cidr,
subnet_name=sub1_name,
gateway_ip=external_gateway_ip,
) )
ip_net = ipaddress.IPv4Network(external_cidr) ip_net = ipaddress.IPv4Network(external_cidr)
@ -165,9 +175,7 @@ class TestRouter(base.BaseFunctionalTest):
admin_state_up=False, admin_state_up=False,
ext_gateway_net_id=net1['id'], ext_gateway_net_id=net1['id'],
enable_snat=False, enable_snat=False,
ext_fixed_ips=[ ext_fixed_ips=[{'subnet_id': sub1['id'], 'ip_address': last_ip}],
{'subnet_id': sub1['id'], 'ip_address': last_ip}
]
) )
for field in EXPECTED_TOPLEVEL_FIELDS: for field in EXPECTED_TOPLEVEL_FIELDS:
@ -183,12 +191,10 @@ class TestRouter(base.BaseFunctionalTest):
self.assertEqual(1, len(ext_gw_info['external_fixed_ips'])) self.assertEqual(1, len(ext_gw_info['external_fixed_ips']))
self.assertEqual( self.assertEqual(
sub1['id'], sub1['id'], ext_gw_info['external_fixed_ips'][0]['subnet_id']
ext_gw_info['external_fixed_ips'][0]['subnet_id']
) )
self.assertEqual( self.assertEqual(
last_ip, last_ip, ext_gw_info['external_fixed_ips'][0]['ip_address']
ext_gw_info['external_fixed_ips'][0]['ip_address']
) )
return router return router
@ -198,20 +204,25 @@ class TestRouter(base.BaseFunctionalTest):
def test_add_remove_router_interface(self): def test_add_remove_router_interface(self):
router = self._create_and_verify_advanced_router( router = self._create_and_verify_advanced_router(
external_cidr=u'10.3.3.0/24') external_cidr=u'10.3.3.0/24'
)
net_name = self.network_prefix + '_intnet1' net_name = self.network_prefix + '_intnet1'
sub_name = self.subnet_prefix + '_intsub1' sub_name = self.subnet_prefix + '_intsub1'
net = self.operator_cloud.create_network(name=net_name) net = self.operator_cloud.create_network(name=net_name)
sub = self.operator_cloud.create_subnet( sub = self.operator_cloud.create_subnet(
net['id'], '10.4.4.0/24', subnet_name=sub_name, net['id'],
gateway_ip='10.4.4.1' '10.4.4.0/24',
subnet_name=sub_name,
gateway_ip='10.4.4.1',
) )
iface = self.operator_cloud.add_router_interface( iface = self.operator_cloud.add_router_interface(
router, subnet_id=sub['id']) router, subnet_id=sub['id']
)
self.assertIsNone( self.assertIsNone(
self.operator_cloud.remove_router_interface( self.operator_cloud.remove_router_interface(
router, subnet_id=sub['id']) router, subnet_id=sub['id']
)
) )
# Test return values *after* the interface is detached so the # Test return values *after* the interface is detached so the
@ -224,25 +235,32 @@ class TestRouter(base.BaseFunctionalTest):
def test_list_router_interfaces(self): def test_list_router_interfaces(self):
router = self._create_and_verify_advanced_router( router = self._create_and_verify_advanced_router(
external_cidr=u'10.5.5.0/24') external_cidr=u'10.5.5.0/24'
)
net_name = self.network_prefix + '_intnet1' net_name = self.network_prefix + '_intnet1'
sub_name = self.subnet_prefix + '_intsub1' sub_name = self.subnet_prefix + '_intsub1'
net = self.operator_cloud.create_network(name=net_name) net = self.operator_cloud.create_network(name=net_name)
sub = self.operator_cloud.create_subnet( sub = self.operator_cloud.create_subnet(
net['id'], '10.6.6.0/24', subnet_name=sub_name, net['id'],
gateway_ip='10.6.6.1' '10.6.6.0/24',
subnet_name=sub_name,
gateway_ip='10.6.6.1',
) )
iface = self.operator_cloud.add_router_interface( iface = self.operator_cloud.add_router_interface(
router, subnet_id=sub['id']) router, subnet_id=sub['id']
)
all_ifaces = self.operator_cloud.list_router_interfaces(router) all_ifaces = self.operator_cloud.list_router_interfaces(router)
int_ifaces = self.operator_cloud.list_router_interfaces( int_ifaces = self.operator_cloud.list_router_interfaces(
router, interface_type='internal') router, interface_type='internal'
)
ext_ifaces = self.operator_cloud.list_router_interfaces( ext_ifaces = self.operator_cloud.list_router_interfaces(
router, interface_type='external') router, interface_type='external'
)
self.assertIsNone( self.assertIsNone(
self.operator_cloud.remove_router_interface( self.operator_cloud.remove_router_interface(
router, subnet_id=sub['id']) router, subnet_id=sub['id']
)
) )
# Test return values *after* the interface is detached so the # Test return values *after* the interface is detached so the
@ -253,17 +271,21 @@ class TestRouter(base.BaseFunctionalTest):
self.assertEqual(1, len(ext_ifaces)) self.assertEqual(1, len(ext_ifaces))
ext_fixed_ips = router['external_gateway_info']['external_fixed_ips'] ext_fixed_ips = router['external_gateway_info']['external_fixed_ips']
self.assertEqual(ext_fixed_ips[0]['subnet_id'], self.assertEqual(
ext_ifaces[0]['fixed_ips'][0]['subnet_id']) ext_fixed_ips[0]['subnet_id'],
ext_ifaces[0]['fixed_ips'][0]['subnet_id'],
)
self.assertEqual(sub['id'], int_ifaces[0]['fixed_ips'][0]['subnet_id']) self.assertEqual(sub['id'], int_ifaces[0]['fixed_ips'][0]['subnet_id'])
def test_update_router_name(self): def test_update_router_name(self):
router = self._create_and_verify_advanced_router( router = self._create_and_verify_advanced_router(
external_cidr=u'10.7.7.0/24') external_cidr=u'10.7.7.0/24'
)
new_name = self.router_prefix + '_update_name' new_name = self.router_prefix + '_update_name'
updated = self.operator_cloud.update_router( updated = self.operator_cloud.update_router(
router['id'], name=new_name) router['id'], name=new_name
)
self.assertIsNotNone(updated) self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS: for field in EXPECTED_TOPLEVEL_FIELDS:
@ -275,20 +297,20 @@ class TestRouter(base.BaseFunctionalTest):
# Validate nothing else changed # Validate nothing else changed
self.assertEqual(router['status'], updated['status']) self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['admin_state_up'], updated['admin_state_up']) self.assertEqual(router['admin_state_up'], updated['admin_state_up'])
self.assertEqual(router['external_gateway_info'], self.assertEqual(
updated['external_gateway_info']) router['external_gateway_info'], updated['external_gateway_info']
)
def test_update_router_routes(self): def test_update_router_routes(self):
router = self._create_and_verify_advanced_router( router = self._create_and_verify_advanced_router(
external_cidr=u'10.7.7.0/24') external_cidr=u'10.7.7.0/24'
)
routes = [{ routes = [{"destination": "10.7.7.0/24", "nexthop": "10.7.7.99"}]
"destination": "10.7.7.0/24",
"nexthop": "10.7.7.99"
}]
updated = self.operator_cloud.update_router( updated = self.operator_cloud.update_router(
router['id'], routes=routes) router['id'], routes=routes
)
self.assertIsNotNone(updated) self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS: for field in EXPECTED_TOPLEVEL_FIELDS:
@ -300,15 +322,18 @@ class TestRouter(base.BaseFunctionalTest):
# Validate nothing else changed # Validate nothing else changed
self.assertEqual(router['status'], updated['status']) self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['admin_state_up'], updated['admin_state_up']) self.assertEqual(router['admin_state_up'], updated['admin_state_up'])
self.assertEqual(router['external_gateway_info'], self.assertEqual(
updated['external_gateway_info']) router['external_gateway_info'], updated['external_gateway_info']
)
def test_update_router_admin_state(self): def test_update_router_admin_state(self):
router = self._create_and_verify_advanced_router( router = self._create_and_verify_advanced_router(
external_cidr=u'10.8.8.0/24') external_cidr=u'10.8.8.0/24'
)
updated = self.operator_cloud.update_router( updated = self.operator_cloud.update_router(
router['id'], admin_state_up=True) router['id'], admin_state_up=True
)
self.assertIsNotNone(updated) self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS: for field in EXPECTED_TOPLEVEL_FIELDS:
@ -316,25 +341,30 @@ class TestRouter(base.BaseFunctionalTest):
# admin_state_up is the only change we expect # admin_state_up is the only change we expect
self.assertTrue(updated['admin_state_up']) self.assertTrue(updated['admin_state_up'])
self.assertNotEqual(router['admin_state_up'], self.assertNotEqual(
updated['admin_state_up']) router['admin_state_up'], updated['admin_state_up']
)
# Validate nothing else changed # Validate nothing else changed
self.assertEqual(router['status'], updated['status']) self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['name'], updated['name']) self.assertEqual(router['name'], updated['name'])
self.assertEqual(router['external_gateway_info'], self.assertEqual(
updated['external_gateway_info']) router['external_gateway_info'], updated['external_gateway_info']
)
def test_update_router_ext_gw_info(self): def test_update_router_ext_gw_info(self):
router = self._create_and_verify_advanced_router( router = self._create_and_verify_advanced_router(
external_cidr=u'10.9.9.0/24') external_cidr=u'10.9.9.0/24'
)
# create a new subnet # create a new subnet
existing_net_id = router['external_gateway_info']['network_id'] existing_net_id = router['external_gateway_info']['network_id']
sub_name = self.subnet_prefix + '_update' sub_name = self.subnet_prefix + '_update'
sub = self.operator_cloud.create_subnet( sub = self.operator_cloud.create_subnet(
existing_net_id, '10.10.10.0/24', subnet_name=sub_name, existing_net_id,
gateway_ip='10.10.10.1' '10.10.10.0/24',
subnet_name=sub_name,
gateway_ip='10.10.10.1',
) )
updated = self.operator_cloud.update_router( updated = self.operator_cloud.update_router(
@ -342,7 +372,7 @@ class TestRouter(base.BaseFunctionalTest):
ext_gateway_net_id=existing_net_id, ext_gateway_net_id=existing_net_id,
ext_fixed_ips=[ ext_fixed_ips=[
{'subnet_id': sub['id'], 'ip_address': '10.10.10.77'} {'subnet_id': sub['id'], 'ip_address': '10.10.10.77'}
] ],
) )
self.assertIsNotNone(updated) self.assertIsNotNone(updated)
@ -353,12 +383,10 @@ class TestRouter(base.BaseFunctionalTest):
ext_gw_info = updated['external_gateway_info'] ext_gw_info = updated['external_gateway_info']
self.assertEqual(1, len(ext_gw_info['external_fixed_ips'])) self.assertEqual(1, len(ext_gw_info['external_fixed_ips']))
self.assertEqual( self.assertEqual(
sub['id'], sub['id'], ext_gw_info['external_fixed_ips'][0]['subnet_id']
ext_gw_info['external_fixed_ips'][0]['subnet_id']
) )
self.assertEqual( self.assertEqual(
'10.10.10.77', '10.10.10.77', ext_gw_info['external_fixed_ips'][0]['ip_address']
ext_gw_info['external_fixed_ips'][0]['ip_address']
) )
# Validate nothing else changed # Validate nothing else changed

View File

@ -23,7 +23,8 @@ from openstack.tests.functional import base
class TestSecurityGroups(base.BaseFunctionalTest): class TestSecurityGroups(base.BaseFunctionalTest):
def test_create_list_security_groups(self): def test_create_list_security_groups(self):
sg1 = self.user_cloud.create_security_group( sg1 = self.user_cloud.create_security_group(
name="sg1", description="sg1") name="sg1", description="sg1"
)
self.addCleanup(self.user_cloud.delete_security_group, sg1['id']) self.addCleanup(self.user_cloud.delete_security_group, sg1['id'])
if self.user_cloud.has_service('network'): if self.user_cloud.has_service('network'):
# Neutron defaults to all_tenants=1 when admin # Neutron defaults to all_tenants=1 when admin
@ -39,10 +40,12 @@ class TestSecurityGroups(base.BaseFunctionalTest):
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
sg1 = self.user_cloud.create_security_group( sg1 = self.user_cloud.create_security_group(
name="sg1", description="sg1") name="sg1", description="sg1"
)
self.addCleanup(self.user_cloud.delete_security_group, sg1['id']) self.addCleanup(self.user_cloud.delete_security_group, sg1['id'])
sg2 = self.operator_cloud.create_security_group( sg2 = self.operator_cloud.create_security_group(
name="sg2", description="sg2") name="sg2", description="sg2"
)
self.addCleanup(self.operator_cloud.delete_security_group, sg2['id']) self.addCleanup(self.operator_cloud.delete_security_group, sg2['id'])
if self.user_cloud.has_service('network'): if self.user_cloud.has_service('network'):
@ -53,7 +56,8 @@ class TestSecurityGroups(base.BaseFunctionalTest):
# Filter by tenant_id (filtering by project_id won't work with # Filter by tenant_id (filtering by project_id won't work with
# Keystone V2) # Keystone V2)
sg_list = self.operator_cloud.list_security_groups( sg_list = self.operator_cloud.list_security_groups(
filters={'tenant_id': self.user_cloud.current_project_id}) filters={'tenant_id': self.user_cloud.current_project_id}
)
self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) self.assertIn(sg1['id'], [sg['id'] for sg in sg_list])
self.assertNotIn(sg2['id'], [sg['id'] for sg in sg_list]) self.assertNotIn(sg2['id'], [sg['id'] for sg in sg_list])
@ -64,7 +68,8 @@ class TestSecurityGroups(base.BaseFunctionalTest):
self.assertNotIn(sg1['id'], [sg['id'] for sg in sg_list]) self.assertNotIn(sg1['id'], [sg['id'] for sg in sg_list])
sg_list = self.operator_cloud.list_security_groups( sg_list = self.operator_cloud.list_security_groups(
filters={'all_tenants': 1}) filters={'all_tenants': 1}
)
self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) self.assertIn(sg1['id'], [sg['id'] for sg in sg_list])
def test_get_security_group_by_id(self): def test_get_security_group_by_id(self):

View File

@ -21,15 +21,16 @@ from openstack.tests.functional import base
class TestServerGroup(base.BaseFunctionalTest): class TestServerGroup(base.BaseFunctionalTest):
def test_server_group(self): def test_server_group(self):
server_group_name = self.getUniqueString() server_group_name = self.getUniqueString()
self.addCleanup(self.cleanup, server_group_name) self.addCleanup(self.cleanup, server_group_name)
server_group = self.user_cloud.create_server_group( server_group = self.user_cloud.create_server_group(
server_group_name, ['affinity']) server_group_name, ['affinity']
)
server_group_ids = [v['id'] server_group_ids = [
for v in self.user_cloud.list_server_groups()] v['id'] for v in self.user_cloud.list_server_groups()
]
self.assertIn(server_group['id'], server_group_ids) self.assertIn(server_group['id'], server_group_ids)
self.user_cloud.delete_server_group(server_group_name) self.user_cloud.delete_server_group(server_group_name)

View File

@ -38,15 +38,17 @@ class TestServices(base.KeystoneBaseFunctionalTest):
# Generate a random name for services in this test # Generate a random name for services in this test
self.new_service_name = 'test_' + ''.join( self.new_service_name = 'test_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5)) random.choice(string.ascii_lowercase) for _ in range(5)
)
self.addCleanup(self._cleanup_services) self.addCleanup(self._cleanup_services)
def _cleanup_services(self): def _cleanup_services(self):
exception_list = list() exception_list = list()
for s in self.operator_cloud.list_services(): for s in self.operator_cloud.list_services():
if s['name'] is not None and \ if s['name'] is not None and s['name'].startswith(
s['name'].startswith(self.new_service_name): self.new_service_name
):
try: try:
self.operator_cloud.delete_service(name_or_id=s['id']) self.operator_cloud.delete_service(name_or_id=s['id'])
except Exception as e: except Exception as e:
@ -60,45 +62,57 @@ class TestServices(base.KeystoneBaseFunctionalTest):
def test_create_service(self): def test_create_service(self):
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name=self.new_service_name + '_create', type='test_type', name=self.new_service_name + '_create',
description='this is a test description') type='test_type',
description='this is a test description',
)
self.assertIsNotNone(service.get('id')) self.assertIsNotNone(service.get('id'))
def test_update_service(self): def test_update_service(self):
ver = self.operator_cloud.config.get_api_version('identity') ver = self.operator_cloud.config.get_api_version('identity')
if ver.startswith('2'): if ver.startswith('2'):
# NOTE(SamYaple): Update service only works with v3 api # NOTE(SamYaple): Update service only works with v3 api
self.assertRaises(OpenStackCloudUnavailableFeature, self.assertRaises(
self.operator_cloud.update_service, OpenStackCloudUnavailableFeature,
'service_id', name='new name') self.operator_cloud.update_service,
'service_id',
name='new name',
)
else: else:
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name=self.new_service_name + '_create', type='test_type', name=self.new_service_name + '_create',
description='this is a test description', enabled=True) type='test_type',
description='this is a test description',
enabled=True,
)
new_service = self.operator_cloud.update_service( new_service = self.operator_cloud.update_service(
service.id, service.id,
name=self.new_service_name + '_update', name=self.new_service_name + '_update',
description='this is an updated description', description='this is an updated description',
enabled=False enabled=False,
)
self.assertEqual(
new_service.name, self.new_service_name + '_update'
)
self.assertEqual(
new_service.description, 'this is an updated description'
) )
self.assertEqual(new_service.name,
self.new_service_name + '_update')
self.assertEqual(new_service.description,
'this is an updated description')
self.assertFalse(new_service.is_enabled) self.assertFalse(new_service.is_enabled)
self.assertEqual(service.id, new_service.id) self.assertEqual(service.id, new_service.id)
def test_list_services(self): def test_list_services(self):
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name=self.new_service_name + '_list', type='test_type') name=self.new_service_name + '_list', type='test_type'
)
observed_services = self.operator_cloud.list_services() observed_services = self.operator_cloud.list_services()
self.assertIsInstance(observed_services, list) self.assertIsInstance(observed_services, list)
found = False found = False
for s in observed_services: for s in observed_services:
# Test all attributes are returned # Test all attributes are returned
if s['id'] == service['id']: if s['id'] == service['id']:
self.assertEqual(self.new_service_name + '_list', self.assertEqual(
s.get('name')) self.new_service_name + '_list', s.get('name')
)
self.assertEqual('test_type', s.get('type')) self.assertEqual('test_type', s.get('type'))
found = True found = True
self.assertTrue(found, msg='new service not found in service list!') self.assertTrue(found, msg='new service not found in service list!')
@ -106,8 +120,8 @@ class TestServices(base.KeystoneBaseFunctionalTest):
def test_delete_service_by_name(self): def test_delete_service_by_name(self):
# Test delete by name # Test delete by name
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name=self.new_service_name + '_delete_by_name', name=self.new_service_name + '_delete_by_name', type='test_type'
type='test_type') )
self.operator_cloud.delete_service(name_or_id=service['name']) self.operator_cloud.delete_service(name_or_id=service['name'])
observed_services = self.operator_cloud.list_services() observed_services = self.operator_cloud.list_services()
found = False found = False
@ -120,8 +134,8 @@ class TestServices(base.KeystoneBaseFunctionalTest):
def test_delete_service_by_id(self): def test_delete_service_by_id(self):
# Test delete by id # Test delete by id
service = self.operator_cloud.create_service( service = self.operator_cloud.create_service(
name=self.new_service_name + '_delete_by_id', name=self.new_service_name + '_delete_by_id', type='test_type'
type='test_type') )
self.operator_cloud.delete_service(name_or_id=service['id']) self.operator_cloud.delete_service(name_or_id=service['id'])
observed_services = self.operator_cloud.list_services() observed_services = self.operator_cloud.list_services()
found = False found = False

View File

@ -73,7 +73,6 @@ validate_template = '''heat_template_version: asdf-no-such-version '''
class TestStack(base.BaseFunctionalTest): class TestStack(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestStack, self).setUp() super(TestStack, self).setUp()
if not self.user_cloud.has_service('orchestration'): if not self.user_cloud.has_service('orchestration'):
@ -88,10 +87,12 @@ class TestStack(base.BaseFunctionalTest):
test_template.write(validate_template.encode('utf-8')) test_template.write(validate_template.encode('utf-8'))
test_template.close() test_template.close()
stack_name = self.getUniqueString('validate_template') stack_name = self.getUniqueString('validate_template')
self.assertRaises(exc.OpenStackCloudException, self.assertRaises(
self.user_cloud.create_stack, exc.OpenStackCloudException,
name=stack_name, self.user_cloud.create_stack,
template_file=test_template.name) name=stack_name,
template_file=test_template.name,
)
def test_stack_simple(self): def test_stack_simple(self):
test_template = tempfile.NamedTemporaryFile(delete=False) test_template = tempfile.NamedTemporaryFile(delete=False)
@ -100,9 +101,8 @@ class TestStack(base.BaseFunctionalTest):
self.stack_name = self.getUniqueString('simple_stack') self.stack_name = self.getUniqueString('simple_stack')
self.addCleanup(self._cleanup_stack) self.addCleanup(self._cleanup_stack)
stack = self.user_cloud.create_stack( stack = self.user_cloud.create_stack(
name=self.stack_name, name=self.stack_name, template_file=test_template.name, wait=True
template_file=test_template.name, )
wait=True)
# assert expected values in stack # assert expected values in stack
self.assertEqual('CREATE_COMPLETE', stack['stack_status']) self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
@ -121,9 +121,8 @@ class TestStack(base.BaseFunctionalTest):
# update with no changes # update with no changes
stack = self.user_cloud.update_stack( stack = self.user_cloud.update_stack(
self.stack_name, self.stack_name, template_file=test_template.name, wait=True
template_file=test_template.name, )
wait=True)
# assert no change in updated stack # assert no change in updated stack
self.assertEqual('UPDATE_COMPLETE', stack['stack_status']) self.assertEqual('UPDATE_COMPLETE', stack['stack_status'])
@ -135,7 +134,8 @@ class TestStack(base.BaseFunctionalTest):
self.stack_name, self.stack_name,
template_file=test_template.name, template_file=test_template.name,
wait=True, wait=True,
length=12) length=12,
)
# assert changed output in updated stack # assert changed output in updated stack
stack = self.user_cloud.get_stack(self.stack_name) stack = self.user_cloud.get_stack(self.stack_name)
@ -147,7 +147,8 @@ class TestStack(base.BaseFunctionalTest):
def test_stack_nested(self): def test_stack_nested(self):
test_template = tempfile.NamedTemporaryFile( test_template = tempfile.NamedTemporaryFile(
suffix='.yaml', delete=False) suffix='.yaml', delete=False
)
test_template.write(root_template.encode('utf-8')) test_template.write(root_template.encode('utf-8'))
test_template.close() test_template.close()
@ -166,7 +167,8 @@ class TestStack(base.BaseFunctionalTest):
name=self.stack_name, name=self.stack_name,
template_file=test_template.name, template_file=test_template.name,
environment_files=[env.name], environment_files=[env.name],
wait=True) wait=True,
)
# assert expected values in stack # assert expected values in stack
self.assertEqual('CREATE_COMPLETE', stack['stack_status']) self.assertEqual('CREATE_COMPLETE', stack['stack_status'])

View File

@ -105,7 +105,8 @@ class TestUsers(base.KeystoneBaseFunctionalTest):
email='somebody@nowhere.com', email='somebody@nowhere.com',
enabled=False, enabled=False,
password='secret', password='secret',
description='') description='',
)
self.assertIsNotNone(new_user) self.assertIsNotNone(new_user)
self.assertEqual(user['id'], new_user['id']) self.assertEqual(user['id'], new_user['id'])
self.assertEqual(user_name + '2', new_user['name']) self.assertEqual(user_name + '2', new_user['name'])
@ -115,30 +116,37 @@ class TestUsers(base.KeystoneBaseFunctionalTest):
def test_update_user_password(self): def test_update_user_password(self):
user_name = self.user_prefix + '_password' user_name = self.user_prefix + '_password'
user_email = 'nobody@nowhere.com' user_email = 'nobody@nowhere.com'
user = self._create_user(name=user_name, user = self._create_user(
email=user_email, name=user_name, email=user_email, password='old_secret'
password='old_secret') )
self.assertIsNotNone(user) self.assertIsNotNone(user)
self.assertTrue(user['enabled']) self.assertTrue(user['enabled'])
# This should work for both v2 and v3 # This should work for both v2 and v3
new_user = self.operator_cloud.update_user( new_user = self.operator_cloud.update_user(
user['id'], password='new_secret') user['id'], password='new_secret'
)
self.assertIsNotNone(new_user) self.assertIsNotNone(new_user)
self.assertEqual(user['id'], new_user['id']) self.assertEqual(user['id'], new_user['id'])
self.assertEqual(user_name, new_user['name']) self.assertEqual(user_name, new_user['name'])
self.assertEqual(user_email, new_user['email']) self.assertEqual(user_email, new_user['email'])
self.assertTrue(new_user['enabled']) self.assertTrue(new_user['enabled'])
self.assertTrue(self.operator_cloud.grant_role( self.assertTrue(
'member', user=user['id'], project='demo', wait=True)) self.operator_cloud.grant_role(
'member', user=user['id'], project='demo', wait=True
)
)
self.addCleanup( self.addCleanup(
self.operator_cloud.revoke_role, self.operator_cloud.revoke_role,
'member', user=user['id'], project='demo', wait=True) 'member',
user=user['id'],
project='demo',
wait=True,
)
new_cloud = self.operator_cloud.connect_as( new_cloud = self.operator_cloud.connect_as(
user_id=user['id'], user_id=user['id'], password='new_secret', project_name='demo'
password='new_secret', )
project_name='demo')
self.assertIsNotNone(new_cloud) self.assertIsNotNone(new_cloud)
location = new_cloud.current_location location = new_cloud.current_location
@ -166,9 +174,11 @@ class TestUsers(base.KeystoneBaseFunctionalTest):
# Add the user to the group # Add the user to the group
self.operator_cloud.add_user_to_group(user_name, group_name) self.operator_cloud.add_user_to_group(user_name, group_name)
self.assertTrue( self.assertTrue(
self.operator_cloud.is_user_in_group(user_name, group_name)) self.operator_cloud.is_user_in_group(user_name, group_name)
)
# Remove them from the group # Remove them from the group
self.operator_cloud.remove_user_from_group(user_name, group_name) self.operator_cloud.remove_user_from_group(user_name, group_name)
self.assertFalse( self.assertFalse(
self.operator_cloud.is_user_in_group(user_name, group_name)) self.operator_cloud.is_user_in_group(user_name, group_name)
)

View File

@ -43,10 +43,10 @@ class TestVolume(base.BaseFunctionalTest):
self.addDetail('volume', content.text_content(volume_name)) self.addDetail('volume', content.text_content(volume_name))
self.addCleanup(self.cleanup, volume_name, snapshot_name=snapshot_name) self.addCleanup(self.cleanup, volume_name, snapshot_name=snapshot_name)
volume = self.user_cloud.create_volume( volume = self.user_cloud.create_volume(
display_name=volume_name, size=1) display_name=volume_name, size=1
)
snapshot = self.user_cloud.create_volume_snapshot( snapshot = self.user_cloud.create_volume_snapshot(
volume['id'], volume['id'], display_name=snapshot_name
display_name=snapshot_name
) )
ret_volume = self.user_cloud.get_volume_by_id(volume['id']) ret_volume = self.user_cloud.get_volume_by_id(volume['id'])
@ -60,7 +60,8 @@ class TestVolume(base.BaseFunctionalTest):
self.assertIn(snapshot['id'], snapshot_ids) self.assertIn(snapshot['id'], snapshot_ids)
ret_snapshot = self.user_cloud.get_volume_snapshot_by_id( ret_snapshot = self.user_cloud.get_volume_snapshot_by_id(
snapshot['id']) snapshot['id']
)
self.assertEqual(snapshot['id'], ret_snapshot['id']) self.assertEqual(snapshot['id'], ret_snapshot['id'])
self.user_cloud.delete_volume_snapshot(snapshot_name, wait=True) self.user_cloud.delete_volume_snapshot(snapshot_name, wait=True)
@ -73,9 +74,11 @@ class TestVolume(base.BaseFunctionalTest):
self.addDetail('volume', content.text_content(volume_name)) self.addDetail('volume', content.text_content(volume_name))
self.addCleanup(self.cleanup, volume_name, image_name=image_name) self.addCleanup(self.cleanup, volume_name, image_name=image_name)
volume = self.user_cloud.create_volume( volume = self.user_cloud.create_volume(
display_name=volume_name, size=1) display_name=volume_name, size=1
)
image = self.user_cloud.create_image( image = self.user_cloud.create_image(
image_name, volume=volume, wait=True) image_name, volume=volume, wait=True
)
volume_ids = [v['id'] for v in self.user_cloud.list_volumes()] volume_ids = [v['id'] for v in self.user_cloud.list_volumes()]
self.assertIn(volume['id'], volume_ids) self.assertIn(volume['id'], volume_ids)
@ -93,7 +96,8 @@ class TestVolume(base.BaseFunctionalTest):
snapshot = self.user_cloud.get_volume_snapshot(snapshot_name) snapshot = self.user_cloud.get_volume_snapshot(snapshot_name)
if snapshot: if snapshot:
self.user_cloud.delete_volume_snapshot( self.user_cloud.delete_volume_snapshot(
snapshot_name, wait=True) snapshot_name, wait=True
)
if image_name: if image_name:
image = self.user_cloud.get_image(image_name) image = self.user_cloud.get_image(image_name)
if image: if image:
@ -108,7 +112,8 @@ class TestVolume(base.BaseFunctionalTest):
self.user_cloud.delete_volume(v, wait=False) self.user_cloud.delete_volume(v, wait=False)
try: try:
for count in utils.iterate_timeout( for count in utils.iterate_timeout(
180, "Timeout waiting for volume cleanup"): 180, "Timeout waiting for volume cleanup"
):
found = False found = False
for existing in self.user_cloud.list_volumes(): for existing in self.user_cloud.list_volumes():
for v in volume: for v in volume:
@ -127,7 +132,8 @@ class TestVolume(base.BaseFunctionalTest):
for v in volume: for v in volume:
if v['id'] == existing['id']: if v['id'] == existing['id']:
self.operator_cloud.delete_volume( self.operator_cloud.delete_volume(
v, wait=False, force=True) v, wait=False, force=True
)
def test_list_volumes_pagination(self): def test_list_volumes_pagination(self):
'''Test pagination for list volumes functionality''' '''Test pagination for list volumes functionality'''
@ -146,9 +152,7 @@ class TestVolume(base.BaseFunctionalTest):
for i in self.user_cloud.list_volumes(): for i in self.user_cloud.list_volumes():
if i['name'] and i['name'].startswith(self.id()): if i['name'] and i['name'].startswith(self.id()):
result.append(i['id']) result.append(i['id'])
self.assertEqual( self.assertEqual(sorted([i['id'] for i in volumes]), sorted(result))
sorted([i['id'] for i in volumes]),
sorted(result))
def test_update_volume(self): def test_update_volume(self):
name, desc = self.getUniqueString('name'), self.getUniqueString('desc') name, desc = self.getUniqueString('name'), self.getUniqueString('desc')

View File

@ -27,14 +27,18 @@ class TestVolume(base.BaseFunctionalTest):
def test_create_get_delete_volume_backup(self): def test_create_get_delete_volume_backup(self):
volume = self.user_cloud.create_volume( volume = self.user_cloud.create_volume(
display_name=self.getUniqueString(), size=1) display_name=self.getUniqueString(), size=1
)
self.addCleanup(self.user_cloud.delete_volume, volume['id']) self.addCleanup(self.user_cloud.delete_volume, volume['id'])
backup_name_1 = self.getUniqueString() backup_name_1 = self.getUniqueString()
backup_desc_1 = self.getUniqueString() backup_desc_1 = self.getUniqueString()
backup = self.user_cloud.create_volume_backup( backup = self.user_cloud.create_volume_backup(
volume_id=volume['id'], name=backup_name_1, volume_id=volume['id'],
description=backup_desc_1, wait=True) name=backup_name_1,
description=backup_desc_1,
wait=True,
)
self.assertEqual(backup_name_1, backup['name']) self.assertEqual(backup_name_1, backup['name'])
backup = self.user_cloud.get_volume_backup(backup['id']) backup = self.user_cloud.get_volume_backup(backup['id'])
@ -48,11 +52,13 @@ class TestVolume(base.BaseFunctionalTest):
volume = self.user_cloud.create_volume(size=1) volume = self.user_cloud.create_volume(size=1)
snapshot = self.user_cloud.create_volume_snapshot(volume['id']) snapshot = self.user_cloud.create_volume_snapshot(volume['id'])
self.addCleanup(self.user_cloud.delete_volume, volume['id']) self.addCleanup(self.user_cloud.delete_volume, volume['id'])
self.addCleanup(self.user_cloud.delete_volume_snapshot, snapshot['id'], self.addCleanup(
wait=True) self.user_cloud.delete_volume_snapshot, snapshot['id'], wait=True
)
backup = self.user_cloud.create_volume_backup( backup = self.user_cloud.create_volume_backup(
volume_id=volume['id'], snapshot_id=snapshot['id'], wait=True) volume_id=volume['id'], snapshot_id=snapshot['id'], wait=True
)
backup = self.user_cloud.get_volume_backup(backup['id']) backup = self.user_cloud.get_volume_backup(backup['id'])
self.assertEqual(backup['snapshot_id'], snapshot['id']) self.assertEqual(backup['snapshot_id'], snapshot['id'])
@ -65,9 +71,11 @@ class TestVolume(base.BaseFunctionalTest):
self.addCleanup(self.user_cloud.delete_volume, volume['id']) self.addCleanup(self.user_cloud.delete_volume, volume['id'])
full_backup = self.user_cloud.create_volume_backup( full_backup = self.user_cloud.create_volume_backup(
volume_id=volume['id'], wait=True) volume_id=volume['id'], wait=True
)
incr_backup = self.user_cloud.create_volume_backup( incr_backup = self.user_cloud.create_volume_backup(
volume_id=volume['id'], incremental=True, wait=True) volume_id=volume['id'], incremental=True, wait=True
)
full_backup = self.user_cloud.get_volume_backup(full_backup['id']) full_backup = self.user_cloud.get_volume_backup(full_backup['id'])
incr_backup = self.user_cloud.get_volume_backup(incr_backup['id']) incr_backup = self.user_cloud.get_volume_backup(incr_backup['id'])
@ -81,7 +89,8 @@ class TestVolume(base.BaseFunctionalTest):
def test_list_volume_backups(self): def test_list_volume_backups(self):
vol1 = self.user_cloud.create_volume( vol1 = self.user_cloud.create_volume(
display_name=self.getUniqueString(), size=1) display_name=self.getUniqueString(), size=1
)
self.addCleanup(self.user_cloud.delete_volume, vol1['id']) self.addCleanup(self.user_cloud.delete_volume, vol1['id'])
# We create 2 volumes to create 2 backups. We could have created 2 # We create 2 volumes to create 2 backups. We could have created 2
@ -89,12 +98,14 @@ class TestVolume(base.BaseFunctionalTest):
# to be race-condition prone. And I didn't want to use an ugly sleep() # to be race-condition prone. And I didn't want to use an ugly sleep()
# here. # here.
vol2 = self.user_cloud.create_volume( vol2 = self.user_cloud.create_volume(
display_name=self.getUniqueString(), size=1) display_name=self.getUniqueString(), size=1
)
self.addCleanup(self.user_cloud.delete_volume, vol2['id']) self.addCleanup(self.user_cloud.delete_volume, vol2['id'])
backup_name_1 = self.getUniqueString() backup_name_1 = self.getUniqueString()
backup = self.user_cloud.create_volume_backup( backup = self.user_cloud.create_volume_backup(
volume_id=vol1['id'], name=backup_name_1) volume_id=vol1['id'], name=backup_name_1
)
self.addCleanup(self.user_cloud.delete_volume_backup, backup['id']) self.addCleanup(self.user_cloud.delete_volume_backup, backup['id'])
backup = self.user_cloud.create_volume_backup(volume_id=vol2['id']) backup = self.user_cloud.create_volume_backup(volume_id=vol2['id'])
@ -104,6 +115,7 @@ class TestVolume(base.BaseFunctionalTest):
self.assertEqual(2, len(backups)) self.assertEqual(2, len(backups))
backups = self.user_cloud.list_volume_backups( backups = self.user_cloud.list_volume_backups(
search_opts={"name": backup_name_1}) search_opts={"name": backup_name_1}
)
self.assertEqual(1, len(backups)) self.assertEqual(1, len(backups))
self.assertEqual(backup_name_1, backups[0]['name']) self.assertEqual(backup_name_1, backups[0]['name'])

View File

@ -25,7 +25,6 @@ from openstack.tests.functional import base
class TestVolumeType(base.BaseFunctionalTest): class TestVolumeType(base.BaseFunctionalTest):
def _assert_project(self, volume_name_or_id, project_id, allowed=True): def _assert_project(self, volume_name_or_id, project_id, allowed=True):
acls = self.operator_cloud.get_volume_type_access(volume_name_or_id) acls = self.operator_cloud.get_volume_type_access(volume_name_or_id)
allowed_projects = [x.get('project_id') for x in acls] allowed_projects = [x.get('project_id') for x in acls]
@ -40,83 +39,87 @@ class TestVolumeType(base.BaseFunctionalTest):
volume_type = { volume_type = {
"name": 'test-volume-type', "name": 'test-volume-type',
"description": None, "description": None,
"os-volume-type-access:is_public": False} "os-volume-type-access:is_public": False,
}
self.operator_cloud.block_storage.post( self.operator_cloud.block_storage.post(
'/types', json={'volume_type': volume_type}) '/types', json={'volume_type': volume_type}
)
def tearDown(self): def tearDown(self):
ret = self.operator_cloud.get_volume_type('test-volume-type') ret = self.operator_cloud.get_volume_type('test-volume-type')
if ret.get('id'): if ret.get('id'):
self.operator_cloud.block_storage.delete( self.operator_cloud.block_storage.delete(
'/types/{volume_type_id}'.format(volume_type_id=ret.id)) '/types/{volume_type_id}'.format(volume_type_id=ret.id)
)
super(TestVolumeType, self).tearDown() super(TestVolumeType, self).tearDown()
def test_list_volume_types(self): def test_list_volume_types(self):
volume_types = self.operator_cloud.list_volume_types() volume_types = self.operator_cloud.list_volume_types()
self.assertTrue(volume_types) self.assertTrue(volume_types)
self.assertTrue(any( self.assertTrue(
x for x in volume_types if x.name == 'test-volume-type')) any(x for x in volume_types if x.name == 'test-volume-type')
)
def test_add_remove_volume_type_access(self): def test_add_remove_volume_type_access(self):
volume_type = self.operator_cloud.get_volume_type('test-volume-type') volume_type = self.operator_cloud.get_volume_type('test-volume-type')
self.assertEqual('test-volume-type', volume_type.name) self.assertEqual('test-volume-type', volume_type.name)
self.operator_cloud.add_volume_type_access( self.operator_cloud.add_volume_type_access(
'test-volume-type', 'test-volume-type', self.operator_cloud.current_project_id
self.operator_cloud.current_project_id) )
self._assert_project( self._assert_project(
'test-volume-type', self.operator_cloud.current_project_id, 'test-volume-type',
allowed=True) self.operator_cloud.current_project_id,
allowed=True,
)
self.operator_cloud.remove_volume_type_access( self.operator_cloud.remove_volume_type_access(
'test-volume-type', 'test-volume-type', self.operator_cloud.current_project_id
self.operator_cloud.current_project_id) )
self._assert_project( self._assert_project(
'test-volume-type', self.operator_cloud.current_project_id, 'test-volume-type',
allowed=False) self.operator_cloud.current_project_id,
allowed=False,
)
def test_add_volume_type_access_missing_project(self): def test_add_volume_type_access_missing_project(self):
# Project id is not valitaded and it may not exist. # Project id is not valitaded and it may not exist.
self.operator_cloud.add_volume_type_access( self.operator_cloud.add_volume_type_access(
'test-volume-type', 'test-volume-type', '00000000000000000000000000000000'
'00000000000000000000000000000000') )
self.operator_cloud.remove_volume_type_access( self.operator_cloud.remove_volume_type_access(
'test-volume-type', 'test-volume-type', '00000000000000000000000000000000'
'00000000000000000000000000000000') )
def test_add_volume_type_access_missing_volume(self): def test_add_volume_type_access_missing_volume(self):
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudException, exc.OpenStackCloudException, "VolumeType not found.*"
"VolumeType not found.*"
): ):
self.operator_cloud.add_volume_type_access( self.operator_cloud.add_volume_type_access(
'MISSING_VOLUME_TYPE', 'MISSING_VOLUME_TYPE', self.operator_cloud.current_project_id
self.operator_cloud.current_project_id) )
def test_remove_volume_type_access_missing_volume(self): def test_remove_volume_type_access_missing_volume(self):
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudException, exc.OpenStackCloudException, "VolumeType not found.*"
"VolumeType not found.*"
): ):
self.operator_cloud.remove_volume_type_access( self.operator_cloud.remove_volume_type_access(
'MISSING_VOLUME_TYPE', 'MISSING_VOLUME_TYPE', self.operator_cloud.current_project_id
self.operator_cloud.current_project_id) )
def test_add_volume_type_access_bad_project(self): def test_add_volume_type_access_bad_project(self):
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudBadRequest, exc.OpenStackCloudBadRequest, "Unable to authorize.*"
"Unable to authorize.*"
): ):
self.operator_cloud.add_volume_type_access( self.operator_cloud.add_volume_type_access(
'test-volume-type', 'test-volume-type', 'BAD_PROJECT_ID'
'BAD_PROJECT_ID') )
def test_remove_volume_type_access_missing_project(self): def test_remove_volume_type_access_missing_project(self):
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudURINotFound, exc.OpenStackCloudURINotFound, "Unable to revoke.*"
"Unable to revoke.*"
): ):
self.operator_cloud.remove_volume_type_access( self.operator_cloud.remove_volume_type_access(
'test-volume-type', 'test-volume-type', '00000000000000000000000000000000'
'00000000000000000000000000000000') )

View File

@ -23,7 +23,6 @@ from openstack.tests.functional import base
class TestZone(base.BaseFunctionalTest): class TestZone(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestZone, self).setUp() super(TestZone, self).setUp()
if not self.user_cloud.has_service('dns'): if not self.user_cloud.has_service('dns'):
@ -43,9 +42,13 @@ class TestZone(base.BaseFunctionalTest):
# Test we can create a zone and we get it returned # Test we can create a zone and we get it returned
zone = self.user_cloud.create_zone( zone = self.user_cloud.create_zone(
name=name, zone_type=zone_type, email=email, name=name,
description=description, ttl=ttl, zone_type=zone_type,
masters=masters) email=email,
description=description,
ttl=ttl,
masters=masters,
)
self.assertEqual(zone['name'], name) self.assertEqual(zone['name'], name)
self.assertEqual(zone['type'], zone_type.upper()) self.assertEqual(zone['type'], zone_type.upper())
self.assertEqual(zone['email'], email) self.assertEqual(zone['email'], email)

View File

@ -32,7 +32,6 @@ RANGE_DATA = [
class TestUtils(base.TestCase): class TestUtils(base.TestCase):
def test__filter_list_name_or_id(self): def test__filter_list_name_or_id(self):
el1 = dict(id=100, name='donald') el1 = dict(id=100, name='donald')
el2 = dict(id=200, name='pluto') el2 = dict(id=200, name='pluto')
@ -85,18 +84,28 @@ class TestUtils(base.TestCase):
self.assertEqual([], ret) self.assertEqual([], ret)
def test__filter_list_unicode(self): def test__filter_list_unicode(self):
el1 = dict(id=100, name=u'中文', last='duck', el1 = dict(
other=dict(category='duck', financial=dict(status='poor'))) id=100,
el2 = dict(id=200, name=u'中文', last='trump', name=u'中文',
other=dict(category='human', financial=dict(status='rich'))) last='duck',
el3 = dict(id=300, name='donald', last='ronald mac', other=dict(category='duck', financial=dict(status='poor')),
other=dict(category='clown', financial=dict(status='rich'))) )
el2 = dict(
id=200,
name=u'中文',
last='trump',
other=dict(category='human', financial=dict(status='rich')),
)
el3 = dict(
id=300,
name='donald',
last='ronald mac',
other=dict(category='clown', financial=dict(status='rich')),
)
data = [el1, el2, el3] data = [el1, el2, el3]
ret = _utils._filter_list( ret = _utils._filter_list(
data, u'中文', data, u'中文', {'other': {'financial': {'status': 'rich'}}}
{'other': { )
'financial': {'status': 'rich'}
}})
self.assertEqual([el2], ret) self.assertEqual([el2], ret)
def test__filter_list_filter(self): def test__filter_list_filter(self):
@ -114,30 +123,47 @@ class TestUtils(base.TestCase):
self.assertEqual([el1], ret) self.assertEqual([el1], ret)
def test__filter_list_dict1(self): def test__filter_list_dict1(self):
el1 = dict(id=100, name='donald', last='duck', el1 = dict(
other=dict(category='duck')) id=100, name='donald', last='duck', other=dict(category='duck')
el2 = dict(id=200, name='donald', last='trump', )
other=dict(category='human')) el2 = dict(
el3 = dict(id=300, name='donald', last='ronald mac', id=200, name='donald', last='trump', other=dict(category='human')
other=dict(category='clown')) )
el3 = dict(
id=300,
name='donald',
last='ronald mac',
other=dict(category='clown'),
)
data = [el1, el2, el3] data = [el1, el2, el3]
ret = _utils._filter_list( ret = _utils._filter_list(
data, 'donald', {'other': {'category': 'clown'}}) data, 'donald', {'other': {'category': 'clown'}}
)
self.assertEqual([el3], ret) self.assertEqual([el3], ret)
def test__filter_list_dict2(self): def test__filter_list_dict2(self):
el1 = dict(id=100, name='donald', last='duck', el1 = dict(
other=dict(category='duck', financial=dict(status='poor'))) id=100,
el2 = dict(id=200, name='donald', last='trump', name='donald',
other=dict(category='human', financial=dict(status='rich'))) last='duck',
el3 = dict(id=300, name='donald', last='ronald mac', other=dict(category='duck', financial=dict(status='poor')),
other=dict(category='clown', financial=dict(status='rich'))) )
el2 = dict(
id=200,
name='donald',
last='trump',
other=dict(category='human', financial=dict(status='rich')),
)
el3 = dict(
id=300,
name='donald',
last='ronald mac',
other=dict(category='clown', financial=dict(status='rich')),
)
data = [el1, el2, el3] data = [el1, el2, el3]
ret = _utils._filter_list( ret = _utils._filter_list(
data, 'donald', data, 'donald', {'other': {'financial': {'status': 'rich'}}}
{'other': { )
'financial': {'status': 'rich'}
}})
self.assertEqual([el2, el3], ret) self.assertEqual([el2, el3], ret)
def test_safe_dict_min_ints(self): def test_safe_dict_min_ints(self):
@ -176,7 +202,7 @@ class TestUtils(base.TestCase):
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudException, exc.OpenStackCloudException,
"Search for minimum value failed. " "Search for minimum value failed. "
"Value for f1 is not an integer: aaa" "Value for f1 is not an integer: aaa",
): ):
_utils.safe_dict_min('f1', data) _utils.safe_dict_min('f1', data)
@ -216,7 +242,7 @@ class TestUtils(base.TestCase):
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudException, exc.OpenStackCloudException,
"Search for maximum value failed. " "Search for maximum value failed. "
"Value for f1 is not an integer: aaa" "Value for f1 is not an integer: aaa",
): ):
_utils.safe_dict_max('f1', data) _utils.safe_dict_max('f1', data)
@ -282,15 +308,13 @@ class TestUtils(base.TestCase):
def test_range_filter_invalid_int(self): def test_range_filter_invalid_int(self):
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudException, exc.OpenStackCloudException, "Invalid range value: <1A0"
"Invalid range value: <1A0"
): ):
_utils.range_filter(RANGE_DATA, "key1", "<1A0") _utils.range_filter(RANGE_DATA, "key1", "<1A0")
def test_range_filter_invalid_op(self): def test_range_filter_invalid_op(self):
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudException, exc.OpenStackCloudException, "Invalid range value: <>100"
"Invalid range value: <>100"
): ):
_utils.range_filter(RANGE_DATA, "key1", "<>100") _utils.range_filter(RANGE_DATA, "key1", "<>100")
@ -330,8 +354,16 @@ class TestUtils(base.TestCase):
def test_get_entity_pass_uuid(self): def test_get_entity_pass_uuid(self):
uuid = uuid4().hex uuid = uuid4().hex
self.cloud.use_direct_get = True self.cloud.use_direct_get = True
resources = ['flavor', 'image', 'volume', 'network', resources = [
'subnet', 'port', 'floating_ip', 'security_group'] 'flavor',
'image',
'volume',
'network',
'subnet',
'port',
'floating_ip',
'security_group',
]
for r in resources: for r in resources:
f = 'get_%s_by_id' % r f = 'get_%s_by_id' % r
with mock.patch.object(self.cloud, f) as get: with mock.patch.object(self.cloud, f) as get:
@ -340,8 +372,16 @@ class TestUtils(base.TestCase):
def test_get_entity_pass_search_methods(self): def test_get_entity_pass_search_methods(self):
self.cloud.use_direct_get = True self.cloud.use_direct_get = True
resources = ['flavor', 'image', 'volume', 'network', resources = [
'subnet', 'port', 'floating_ip', 'security_group'] 'flavor',
'image',
'volume',
'network',
'subnet',
'port',
'floating_ip',
'security_group',
]
filters = {} filters = {}
name = 'name_no_uuid' name = 'name_no_uuid'
for r in resources: for r in resources:
@ -351,8 +391,16 @@ class TestUtils(base.TestCase):
search.assert_called_once_with(name, filters) search.assert_called_once_with(name, filters)
def test_get_entity_get_and_search(self): def test_get_entity_get_and_search(self):
resources = ['flavor', 'image', 'volume', 'network', resources = [
'subnet', 'port', 'floating_ip', 'security_group'] 'flavor',
'image',
'volume',
'network',
'subnet',
'port',
'floating_ip',
'security_group',
]
for r in resources: for r in resources:
self.assertTrue(hasattr(self.cloud, 'get_%s_by_id' % r)) self.assertTrue(hasattr(self.cloud, 'get_%s_by_id' % r))
self.assertTrue(hasattr(self.cloud, 'search_%ss' % r)) self.assertTrue(hasattr(self.cloud, 'search_%ss' % r))

View File

@ -23,7 +23,7 @@ DEP_DICT = {
'parent_id': None, 'parent_id': None,
'root_id': 1, 'root_id': 1,
'num_accelerators': 4, 'num_accelerators': 4,
'device_id': 0 'device_id': 0,
} }
DEV_UUID = uuid.uuid4().hex DEV_UUID = uuid.uuid4().hex
@ -40,14 +40,16 @@ DEV_DICT = {
DEV_PROF_UUID = uuid.uuid4().hex DEV_PROF_UUID = uuid.uuid4().hex
DEV_PROF_GROUPS = [ DEV_PROF_GROUPS = [
{"resources:ACCELERATOR_FPGA": "1", {
"trait:CUSTOM_FPGA_INTEL_PAC_ARRIA10": "required", "resources:ACCELERATOR_FPGA": "1",
"trait:CUSTOM_FUNCTION_ID_3AFB": "required", "trait:CUSTOM_FPGA_INTEL_PAC_ARRIA10": "required",
}, "trait:CUSTOM_FUNCTION_ID_3AFB": "required",
{"resources:CUSTOM_ACCELERATOR_FOO": "2", },
"resources:CUSTOM_MEMORY": "200", {
"trait:CUSTOM_TRAIT_ALWAYS": "required", "resources:CUSTOM_ACCELERATOR_FOO": "2",
} "resources:CUSTOM_MEMORY": "200",
"trait:CUSTOM_TRAIT_ALWAYS": "required",
},
] ]
DEV_PROF_DICT = { DEV_PROF_DICT = {
"id": 1, "id": 1,
@ -61,10 +63,9 @@ NEW_DEV_PROF_DICT = copy.copy(DEV_PROF_DICT)
ARQ_UUID = uuid.uuid4().hex ARQ_UUID = uuid.uuid4().hex
ARQ_DEV_RP_UUID = uuid.uuid4().hex ARQ_DEV_RP_UUID = uuid.uuid4().hex
ARQ_INSTANCE_UUID = uuid.uuid4().hex ARQ_INSTANCE_UUID = uuid.uuid4().hex
ARQ_ATTACH_INFO_STR = '{"bus": "5e", '\ ARQ_ATTACH_INFO_STR = (
'"device": "00", '\ '{"bus": "5e", ' '"device": "00", ' '"domain": "0000", ' '"function": "1"}'
'"domain": "0000", '\ )
'"function": "1"}'
ARQ_DICT = { ARQ_DICT = {
'uuid': ARQ_UUID, 'uuid': ARQ_UUID,
'hostname': 'test_hostname', 'hostname': 'test_hostname',
@ -85,36 +86,41 @@ class TestAccelerator(base.TestCase):
self.use_cyborg() self.use_cyborg()
def test_list_deployables(self): def test_list_deployables(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'accelerator', method='GET',
'public', uri=self.get_mock_url(
append=['v2', 'deployables']), 'accelerator', 'public', append=['v2', 'deployables']
json={'deployables': [DEP_DICT]} ),
), json={'deployables': [DEP_DICT]},
]) ),
]
)
dep_list = self.cloud.list_deployables() dep_list = self.cloud.list_deployables()
self.assertEqual(len(dep_list), 1) self.assertEqual(len(dep_list), 1)
self.assertEqual(dep_list[0].id, DEP_DICT['uuid']) self.assertEqual(dep_list[0].id, DEP_DICT['uuid'])
self.assertEqual(dep_list[0].name, DEP_DICT['name']) self.assertEqual(dep_list[0].name, DEP_DICT['name'])
self.assertEqual(dep_list[0].parent_id, DEP_DICT['parent_id']) self.assertEqual(dep_list[0].parent_id, DEP_DICT['parent_id'])
self.assertEqual(dep_list[0].root_id, DEP_DICT['root_id']) self.assertEqual(dep_list[0].root_id, DEP_DICT['root_id'])
self.assertEqual(dep_list[0].num_accelerators, self.assertEqual(
DEP_DICT['num_accelerators']) dep_list[0].num_accelerators, DEP_DICT['num_accelerators']
)
self.assertEqual(dep_list[0].device_id, DEP_DICT['device_id']) self.assertEqual(dep_list[0].device_id, DEP_DICT['device_id'])
self.assert_calls() self.assert_calls()
def test_list_devices(self): def test_list_devices(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'accelerator', method='GET',
'public', uri=self.get_mock_url(
append=['v2', 'devices']), 'accelerator', 'public', append=['v2', 'devices']
json={'devices': [DEV_DICT]} ),
), json={'devices': [DEV_DICT]},
]) ),
]
)
dev_list = self.cloud.list_devices() dev_list = self.cloud.list_devices()
self.assertEqual(len(dev_list), 1) self.assertEqual(len(dev_list), 1)
self.assertEqual(dev_list[0].id, DEV_DICT['id']) self.assertEqual(dev_list[0].id, DEV_DICT['id'])
@ -123,22 +129,28 @@ class TestAccelerator(base.TestCase):
self.assertEqual(dev_list[0].type, DEV_DICT['type']) self.assertEqual(dev_list[0].type, DEV_DICT['type'])
self.assertEqual(dev_list[0].vendor, DEV_DICT['vendor']) self.assertEqual(dev_list[0].vendor, DEV_DICT['vendor'])
self.assertEqual(dev_list[0].model, DEV_DICT['model']) self.assertEqual(dev_list[0].model, DEV_DICT['model'])
self.assertEqual(dev_list[0].std_board_info, self.assertEqual(
DEV_DICT['std_board_info']) dev_list[0].std_board_info, DEV_DICT['std_board_info']
self.assertEqual(dev_list[0].vendor_board_info, )
DEV_DICT['vendor_board_info']) self.assertEqual(
dev_list[0].vendor_board_info, DEV_DICT['vendor_board_info']
)
self.assert_calls() self.assert_calls()
def test_list_device_profiles(self): def test_list_device_profiles(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'accelerator', method='GET',
'public', uri=self.get_mock_url(
append=['v2', 'device_profiles']), 'accelerator',
json={'device_profiles': [DEV_PROF_DICT]} 'public',
), append=['v2', 'device_profiles'],
]) ),
json={'device_profiles': [DEV_PROF_DICT]},
),
]
)
dev_prof_list = self.cloud.list_device_profiles() dev_prof_list = self.cloud.list_device_profiles()
self.assertEqual(len(dev_prof_list), 1) self.assertEqual(len(dev_prof_list), 1)
self.assertEqual(dev_prof_list[0].id, DEV_PROF_DICT['id']) self.assertEqual(dev_prof_list[0].id, DEV_PROF_DICT['id'])
@ -148,183 +160,248 @@ class TestAccelerator(base.TestCase):
self.assert_calls() self.assert_calls()
def test_create_device_profile(self): def test_create_device_profile(self):
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'accelerator', method='POST',
'public', uri=self.get_mock_url(
append=['v2', 'device_profiles']), 'accelerator',
json=NEW_DEV_PROF_DICT) 'public',
]) append=['v2', 'device_profiles'],
),
json=NEW_DEV_PROF_DICT,
)
]
)
attrs = { attrs = {
'name': NEW_DEV_PROF_DICT['name'], 'name': NEW_DEV_PROF_DICT['name'],
'groups': NEW_DEV_PROF_DICT['groups'] 'groups': NEW_DEV_PROF_DICT['groups'],
} }
self.assertTrue( self.assertTrue(self.cloud.create_device_profile(attrs))
self.cloud.create_device_profile(
attrs
)
)
self.assert_calls() self.assert_calls()
def test_delete_device_profile(self, filters=None): def test_delete_device_profile(self, filters=None):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'accelerator', method='GET',
'public', uri=self.get_mock_url(
append=['v2', 'device_profiles', DEV_PROF_DICT['name']]), 'accelerator',
json={"device_profiles": [DEV_PROF_DICT]}), 'public',
dict(method='DELETE', append=[
uri=self.get_mock_url( 'v2',
'accelerator', 'device_profiles',
'public', DEV_PROF_DICT['name'],
append=['v2', 'device_profiles', DEV_PROF_DICT['name']]), ],
json=DEV_PROF_DICT) ),
json={"device_profiles": [DEV_PROF_DICT]},
]) ),
dict(
method='DELETE',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'device_profiles',
DEV_PROF_DICT['name'],
],
),
json=DEV_PROF_DICT,
),
]
)
self.assertTrue( self.assertTrue(
self.cloud.delete_device_profile( self.cloud.delete_device_profile(DEV_PROF_DICT['name'], filters)
DEV_PROF_DICT['name'],
filters
)
) )
self.assert_calls() self.assert_calls()
def test_list_accelerator_requests(self): def test_list_accelerator_requests(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'accelerator', method='GET',
'public', uri=self.get_mock_url(
append=['v2', 'accelerator_requests']), 'accelerator',
json={'arqs': [ARQ_DICT]} 'public',
), append=['v2', 'accelerator_requests'],
]) ),
json={'arqs': [ARQ_DICT]},
),
]
)
arq_list = self.cloud.list_accelerator_requests() arq_list = self.cloud.list_accelerator_requests()
self.assertEqual(len(arq_list), 1) self.assertEqual(len(arq_list), 1)
self.assertEqual(arq_list[0].uuid, ARQ_DICT['uuid']) self.assertEqual(arq_list[0].uuid, ARQ_DICT['uuid'])
self.assertEqual(arq_list[0].device_profile_name, self.assertEqual(
ARQ_DICT['device_profile_name']) arq_list[0].device_profile_name, ARQ_DICT['device_profile_name']
self.assertEqual(arq_list[0].device_profile_group_id, )
ARQ_DICT['device_profile_group_id']) self.assertEqual(
self.assertEqual(arq_list[0].device_rp_uuid, arq_list[0].device_profile_group_id,
ARQ_DICT['device_rp_uuid']) ARQ_DICT['device_profile_group_id'],
self.assertEqual(arq_list[0].instance_uuid, )
ARQ_DICT['instance_uuid']) self.assertEqual(
self.assertEqual(arq_list[0].attach_handle_type, arq_list[0].device_rp_uuid, ARQ_DICT['device_rp_uuid']
ARQ_DICT['attach_handle_type']) )
self.assertEqual(arq_list[0].attach_handle_info, self.assertEqual(arq_list[0].instance_uuid, ARQ_DICT['instance_uuid'])
ARQ_DICT['attach_handle_info']) self.assertEqual(
self.assert_calls() arq_list[0].attach_handle_type, ARQ_DICT['attach_handle_type']
)
def test_create_accelerator_request(self): self.assertEqual(
self.register_uris([ arq_list[0].attach_handle_info, ARQ_DICT['attach_handle_info']
dict(method='POST',
uri=self.get_mock_url(
'accelerator',
'public',
append=['v2', 'accelerator_requests']),
json=NEW_ARQ_DICT
),
])
attrs = {
'device_profile_name': NEW_ARQ_DICT['device_profile_name'],
'device_profile_group_id': NEW_ARQ_DICT['device_profile_group_id']
}
self.assertTrue(
self.cloud.create_accelerator_request(
attrs
)
) )
self.assert_calls() self.assert_calls()
def test_delete_accelerator_request(self, filters=None): def test_create_accelerator_request(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'accelerator', method='POST',
'public', uri=self.get_mock_url(
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]), 'accelerator',
json={"accelerator_requests": [ARQ_DICT]}), 'public',
dict(method='DELETE', append=['v2', 'accelerator_requests'],
uri=self.get_mock_url( ),
'accelerator', json=NEW_ARQ_DICT,
'public', ),
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]), ]
json=ARQ_DICT) )
]) attrs = {
'device_profile_name': NEW_ARQ_DICT['device_profile_name'],
'device_profile_group_id': NEW_ARQ_DICT['device_profile_group_id'],
}
self.assertTrue(self.cloud.create_accelerator_request(attrs))
self.assert_calls()
def test_delete_accelerator_request(self, filters=None):
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json={"accelerator_requests": [ARQ_DICT]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json=ARQ_DICT,
),
]
)
self.assertTrue( self.assertTrue(
self.cloud.delete_accelerator_request( self.cloud.delete_accelerator_request(ARQ_DICT['uuid'], filters)
ARQ_DICT['uuid'],
filters
)
) )
self.assert_calls() self.assert_calls()
def test_bind_accelerator_request(self): def test_bind_accelerator_request(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'accelerator', method='GET',
'public', uri=self.get_mock_url(
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]), 'accelerator',
json={"accelerator_requests": [ARQ_DICT]}), 'public',
dict(method='PATCH', append=[
uri=self.get_mock_url( 'v2',
'accelerator', 'accelerator_requests',
'public', ARQ_DICT['uuid'],
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]), ],
json=ARQ_DICT) ),
]) json={"accelerator_requests": [ARQ_DICT]},
properties = [{'path': '/hostname', ),
'value': ARQ_DICT['hostname'], dict(
'op': 'add'}, method='PATCH',
{'path': '/instance_uuid', uri=self.get_mock_url(
'value': ARQ_DICT['instance_uuid'], 'accelerator',
'op': 'add'}, 'public',
{'path': '/device_rp_uuid', append=[
'value': ARQ_DICT['device_rp_uuid'], 'v2',
'op': 'add'}] 'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json=ARQ_DICT,
),
]
)
properties = [
{'path': '/hostname', 'value': ARQ_DICT['hostname'], 'op': 'add'},
{
'path': '/instance_uuid',
'value': ARQ_DICT['instance_uuid'],
'op': 'add',
},
{
'path': '/device_rp_uuid',
'value': ARQ_DICT['device_rp_uuid'],
'op': 'add',
},
]
self.assertTrue( self.assertTrue(
self.cloud.bind_accelerator_request( self.cloud.bind_accelerator_request(ARQ_DICT['uuid'], properties)
ARQ_DICT['uuid'], properties
)
) )
self.assert_calls() self.assert_calls()
def test_unbind_accelerator_request(self): def test_unbind_accelerator_request(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'accelerator', method='GET',
'public', uri=self.get_mock_url(
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]), 'accelerator',
json={"accelerator_requests": [ARQ_DICT]}), 'public',
dict(method='PATCH', append=[
uri=self.get_mock_url( 'v2',
'accelerator', 'accelerator_requests',
'public', ARQ_DICT['uuid'],
append=['v2', 'accelerator_requests', ARQ_DICT['uuid']]), ],
json=ARQ_DICT) ),
]) json={"accelerator_requests": [ARQ_DICT]},
),
dict(
method='PATCH',
uri=self.get_mock_url(
'accelerator',
'public',
append=[
'v2',
'accelerator_requests',
ARQ_DICT['uuid'],
],
),
json=ARQ_DICT,
),
]
)
properties = [{'path': '/hostname', properties = [
'op': 'remove'}, {'path': '/hostname', 'op': 'remove'},
{'path': '/instance_uuid', {'path': '/instance_uuid', 'op': 'remove'},
'op': 'remove'}, {'path': '/device_rp_uuid', 'op': 'remove'},
{'path': '/device_rp_uuid', ]
'op': 'remove'}]
self.assertTrue( self.assertTrue(
self.cloud.unbind_accelerator_request( self.cloud.unbind_accelerator_request(ARQ_DICT['uuid'], properties)
ARQ_DICT['uuid'], properties
)
) )
self.assert_calls() self.assert_calls()

View File

@ -15,7 +15,6 @@ from openstack.tests.unit import base
class TestAggregate(base.TestCase): class TestAggregate(base.TestCase):
def setUp(self): def setUp(self):
super(TestAggregate, self).setUp() super(TestAggregate, self).setUp()
self.aggregate_name = self.getUniqueString('aggregate') self.aggregate_name = self.getUniqueString('aggregate')
@ -27,17 +26,25 @@ class TestAggregate(base.TestCase):
del create_aggregate['metadata'] del create_aggregate['metadata']
del create_aggregate['hosts'] del create_aggregate['hosts']
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates']), method='POST',
json={'aggregate': create_aggregate}, uri=self.get_mock_url(
validate=dict(json={ 'compute', 'public', append=['os-aggregates']
'aggregate': { ),
'name': self.aggregate_name, json={'aggregate': create_aggregate},
'availability_zone': None, validate=dict(
}})), json={
]) 'aggregate': {
'name': self.aggregate_name,
'availability_zone': None,
}
}
),
),
]
)
self.cloud.create_aggregate(name=self.aggregate_name) self.cloud.create_aggregate(name=self.aggregate_name)
self.assert_calls() self.assert_calls()
@ -45,100 +52,144 @@ class TestAggregate(base.TestCase):
def test_create_aggregate_with_az(self): def test_create_aggregate_with_az(self):
availability_zone = 'az1' availability_zone = 'az1'
az_aggregate = fakes.make_fake_aggregate( az_aggregate = fakes.make_fake_aggregate(
1, self.aggregate_name, availability_zone=availability_zone) 1, self.aggregate_name, availability_zone=availability_zone
)
create_aggregate = az_aggregate.copy() create_aggregate = az_aggregate.copy()
del create_aggregate['metadata'] del create_aggregate['metadata']
del create_aggregate['hosts'] del create_aggregate['hosts']
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates']), method='POST',
json={'aggregate': create_aggregate}, uri=self.get_mock_url(
validate=dict(json={ 'compute', 'public', append=['os-aggregates']
'aggregate': { ),
'name': self.aggregate_name, json={'aggregate': create_aggregate},
'availability_zone': availability_zone, validate=dict(
}})), json={
]) 'aggregate': {
'name': self.aggregate_name,
'availability_zone': availability_zone,
}
}
),
),
]
)
self.cloud.create_aggregate( self.cloud.create_aggregate(
name=self.aggregate_name, availability_zone=availability_zone) name=self.aggregate_name, availability_zone=availability_zone
)
self.assert_calls() self.assert_calls()
def test_delete_aggregate(self): def test_delete_aggregate(self):
self.register_uris([ self.register_uris(
dict(method='DELETE', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates', '1'])), method='DELETE',
]) uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
),
]
)
self.assertTrue(self.cloud.delete_aggregate('1')) self.assertTrue(self.cloud.delete_aggregate('1'))
self.assert_calls() self.assert_calls()
def test_delete_aggregate_by_name(self): def test_delete_aggregate_by_name(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url( method='GET',
'compute', 'public', append=['os-aggregates', uri=self.get_mock_url(
self.aggregate_name] 'compute',
'public',
append=['os-aggregates', self.aggregate_name],
),
status_code=404,
), ),
status_code=404, dict(
), method='GET',
dict(method='GET', uri=self.get_mock_url(
uri=self.get_mock_url( 'compute', 'public', append=['os-aggregates']
'compute', 'public', append=['os-aggregates']), ),
json={'aggregates': [self.fake_aggregate]}), json={'aggregates': [self.fake_aggregate]},
dict(method='DELETE', ),
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates', '1'])), method='DELETE',
]) uri=self.get_mock_url(
'compute', 'public', append=['os-aggregates', '1']
),
),
]
)
self.assertTrue(self.cloud.delete_aggregate(self.aggregate_name)) self.assertTrue(self.cloud.delete_aggregate(self.aggregate_name))
self.assert_calls() self.assert_calls()
def test_update_aggregate_set_az(self): def test_update_aggregate_set_az(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates', '1']), method='GET',
json=self.fake_aggregate), uri=self.get_mock_url(
dict(method='PUT', 'compute', 'public', append=['os-aggregates', '1']
uri=self.get_mock_url( ),
'compute', 'public', append=['os-aggregates', '1']), json=self.fake_aggregate,
json={'aggregate': self.fake_aggregate}, ),
validate=dict( dict(
json={ method='PUT',
'aggregate': { uri=self.get_mock_url(
'availability_zone': 'az', 'compute', 'public', append=['os-aggregates', '1']
}})), ),
]) json={'aggregate': self.fake_aggregate},
validate=dict(
json={
'aggregate': {
'availability_zone': 'az',
}
}
),
),
]
)
self.cloud.update_aggregate(1, availability_zone='az') self.cloud.update_aggregate(1, availability_zone='az')
self.assert_calls() self.assert_calls()
def test_update_aggregate_unset_az(self): def test_update_aggregate_unset_az(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates', '1']), method='GET',
json=self.fake_aggregate), uri=self.get_mock_url(
dict(method='PUT', 'compute', 'public', append=['os-aggregates', '1']
uri=self.get_mock_url( ),
'compute', 'public', append=['os-aggregates', '1']), json=self.fake_aggregate,
json={'aggregate': self.fake_aggregate}, ),
validate=dict( dict(
json={ method='PUT',
'aggregate': { uri=self.get_mock_url(
'availability_zone': None, 'compute', 'public', append=['os-aggregates', '1']
}})), ),
]) json={'aggregate': self.fake_aggregate},
validate=dict(
json={
'aggregate': {
'availability_zone': None,
}
}
),
),
]
)
self.cloud.update_aggregate(1, availability_zone=None) self.cloud.update_aggregate(1, availability_zone=None)
@ -146,57 +197,83 @@ class TestAggregate(base.TestCase):
def test_set_aggregate_metadata(self): def test_set_aggregate_metadata(self):
metadata = {'key': 'value'} metadata = {'key': 'value'}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates', '1']), method='GET',
json=self.fake_aggregate), uri=self.get_mock_url(
dict(method='POST', 'compute', 'public', append=['os-aggregates', '1']
uri=self.get_mock_url( ),
'compute', 'public', json=self.fake_aggregate,
append=['os-aggregates', '1', 'action']), ),
json={'aggregate': self.fake_aggregate}, dict(
validate=dict( method='POST',
json={'set_metadata': {'metadata': metadata}})), uri=self.get_mock_url(
]) 'compute',
'public',
append=['os-aggregates', '1', 'action'],
),
json={'aggregate': self.fake_aggregate},
validate=dict(
json={'set_metadata': {'metadata': metadata}}
),
),
]
)
self.cloud.set_aggregate_metadata('1', metadata) self.cloud.set_aggregate_metadata('1', metadata)
self.assert_calls() self.assert_calls()
def test_add_host_to_aggregate(self): def test_add_host_to_aggregate(self):
hostname = 'host1' hostname = 'host1'
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates', '1']), method='GET',
json=self.fake_aggregate), uri=self.get_mock_url(
dict(method='POST', 'compute', 'public', append=['os-aggregates', '1']
uri=self.get_mock_url( ),
'compute', 'public', json=self.fake_aggregate,
append=['os-aggregates', '1', 'action']), ),
json={'aggregate': self.fake_aggregate}, dict(
validate=dict( method='POST',
json={'add_host': {'host': hostname}})), uri=self.get_mock_url(
]) 'compute',
'public',
append=['os-aggregates', '1', 'action'],
),
json={'aggregate': self.fake_aggregate},
validate=dict(json={'add_host': {'host': hostname}}),
),
]
)
self.cloud.add_host_to_aggregate('1', hostname) self.cloud.add_host_to_aggregate('1', hostname)
self.assert_calls() self.assert_calls()
def test_remove_host_from_aggregate(self): def test_remove_host_from_aggregate(self):
hostname = 'host1' hostname = 'host1'
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-aggregates', '1']), method='GET',
json=self.fake_aggregate), uri=self.get_mock_url(
dict(method='POST', 'compute', 'public', append=['os-aggregates', '1']
uri=self.get_mock_url( ),
'compute', 'public', json=self.fake_aggregate,
append=['os-aggregates', '1', 'action']), ),
json={'aggregate': self.fake_aggregate}, dict(
validate=dict( method='POST',
json={'remove_host': {'host': hostname}})), uri=self.get_mock_url(
]) 'compute',
'public',
append=['os-aggregates', '1', 'action'],
),
json={'aggregate': self.fake_aggregate},
validate=dict(json={'remove_host': {'host': hostname}}),
),
]
)
self.cloud.remove_host_from_aggregate('1', hostname) self.cloud.remove_host_from_aggregate('1', hostname)
self.assert_calls() self.assert_calls()

View File

@ -17,62 +17,63 @@ from openstack.tests.unit import base
_fake_zone_list = { _fake_zone_list = {
"availabilityZoneInfo": [ "availabilityZoneInfo": [
{ {"hosts": None, "zoneName": "az1", "zoneState": {"available": True}},
"hosts": None, {"hosts": None, "zoneName": "nova", "zoneState": {"available": False}},
"zoneName": "az1",
"zoneState": {
"available": True
}
},
{
"hosts": None,
"zoneName": "nova",
"zoneState": {
"available": False
}
}
] ]
} }
class TestAvailabilityZoneNames(base.TestCase): class TestAvailabilityZoneNames(base.TestCase):
def test_list_availability_zone_names(self): def test_list_availability_zone_names(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/os-availability-zone'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json=_fake_zone_list), uri='{endpoint}/os-availability-zone'.format(
]) endpoint=fakes.COMPUTE_ENDPOINT
),
json=_fake_zone_list,
),
]
)
self.assertEqual( self.assertEqual(['az1'], self.cloud.list_availability_zone_names())
['az1'], self.cloud.list_availability_zone_names())
self.assert_calls() self.assert_calls()
def test_unauthorized_availability_zone_names(self): def test_unauthorized_availability_zone_names(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/os-availability-zone'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
status_code=403), uri='{endpoint}/os-availability-zone'.format(
]) endpoint=fakes.COMPUTE_ENDPOINT
),
status_code=403,
),
]
)
self.assertEqual( self.assertEqual([], self.cloud.list_availability_zone_names())
[], self.cloud.list_availability_zone_names())
self.assert_calls() self.assert_calls()
def test_list_all_availability_zone_names(self): def test_list_all_availability_zone_names(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/os-availability-zone'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json=_fake_zone_list), uri='{endpoint}/os-availability-zone'.format(
]) endpoint=fakes.COMPUTE_ENDPOINT
),
json=_fake_zone_list,
),
]
)
self.assertEqual( self.assertEqual(
['az1', 'nova'], ['az1', 'nova'],
self.cloud.list_availability_zone_names(unavailable=True)) self.cloud.list_availability_zone_names(unavailable=True),
)
self.assert_calls() self.assert_calls()

File diff suppressed because it is too large Load Diff

View File

@ -25,28 +25,36 @@ from openstack.tests.unit import base
class TestBaremetalPort(base.IronicTestCase): class TestBaremetalPort(base.IronicTestCase):
def setUp(self): def setUp(self):
super(TestBaremetalPort, self).setUp() super(TestBaremetalPort, self).setUp()
self.fake_baremetal_node = fakes.make_fake_machine( self.fake_baremetal_node = fakes.make_fake_machine(
self.name, self.uuid) self.name, self.uuid
)
# TODO(TheJulia): Some tests below have fake ports, # TODO(TheJulia): Some tests below have fake ports,
# since they are required in some processes. Lets refactor # since they are required in some processes. Lets refactor
# them at some point to use self.fake_baremetal_port. # them at some point to use self.fake_baremetal_port.
self.fake_baremetal_port = fakes.make_fake_port( self.fake_baremetal_port = fakes.make_fake_port(
'00:01:02:03:04:05', '00:01:02:03:04:05', node_id=self.uuid
node_id=self.uuid) )
self.fake_baremetal_port2 = fakes.make_fake_port( self.fake_baremetal_port2 = fakes.make_fake_port(
'0a:0b:0c:0d:0e:0f', '0a:0b:0c:0d:0e:0f', node_id=self.uuid
node_id=self.uuid) )
def test_list_nics(self): def test_list_nics(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(resource='ports', append=['detail']), dict(
json={'ports': [self.fake_baremetal_port, method='GET',
self.fake_baremetal_port2]}), uri=self.get_mock_url(resource='ports', append=['detail']),
]) json={
'ports': [
self.fake_baremetal_port,
self.fake_baremetal_port2,
]
},
),
]
)
return_value = self.cloud.list_nics() return_value = self.cloud.list_nics()
self.assertEqual(2, len(return_value)) self.assertEqual(2, len(return_value))
@ -54,59 +62,86 @@ class TestBaremetalPort(base.IronicTestCase):
self.assert_calls() self.assert_calls()
def test_list_nics_failure(self): def test_list_nics_failure(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(resource='ports', append=['detail']), dict(
status_code=400) method='GET',
]) uri=self.get_mock_url(resource='ports', append=['detail']),
self.assertRaises(exc.OpenStackCloudException, status_code=400,
self.cloud.list_nics) )
]
)
self.assertRaises(exc.OpenStackCloudException, self.cloud.list_nics)
self.assert_calls() self.assert_calls()
def test_list_nics_for_machine(self): def test_list_nics_for_machine(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
resource='ports', method='GET',
append=['detail'], uri=self.get_mock_url(
qs_elements=['node_uuid=%s' % resource='ports',
self.fake_baremetal_node['uuid']]), append=['detail'],
json={'ports': [self.fake_baremetal_port, qs_elements=[
self.fake_baremetal_port2]}), 'node_uuid=%s' % self.fake_baremetal_node['uuid']
]) ],
),
json={
'ports': [
self.fake_baremetal_port,
self.fake_baremetal_port2,
]
},
),
]
)
return_value = self.cloud.list_nics_for_machine( return_value = self.cloud.list_nics_for_machine(
self.fake_baremetal_node['uuid']) self.fake_baremetal_node['uuid']
)
self.assertEqual(2, len(return_value)) self.assertEqual(2, len(return_value))
self.assertSubdict(self.fake_baremetal_port, return_value[0]) self.assertSubdict(self.fake_baremetal_port, return_value[0])
self.assert_calls() self.assert_calls()
def test_list_nics_for_machine_failure(self): def test_list_nics_for_machine_failure(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
resource='ports', method='GET',
append=['detail'], uri=self.get_mock_url(
qs_elements=['node_uuid=%s' % resource='ports',
self.fake_baremetal_node['uuid']]), append=['detail'],
status_code=400) qs_elements=[
]) 'node_uuid=%s' % self.fake_baremetal_node['uuid']
],
),
status_code=400,
)
]
)
self.assertRaises(exc.OpenStackCloudException, self.assertRaises(
self.cloud.list_nics_for_machine, exc.OpenStackCloudException,
self.fake_baremetal_node['uuid']) self.cloud.list_nics_for_machine,
self.fake_baremetal_node['uuid'],
)
self.assert_calls() self.assert_calls()
def test_get_nic_by_mac(self): def test_get_nic_by_mac(self):
mac = self.fake_baremetal_port['address'] mac = self.fake_baremetal_port['address']
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
resource='ports', method='GET',
append=['detail'], uri=self.get_mock_url(
qs_elements=['address=%s' % mac]), resource='ports',
json={'ports': [self.fake_baremetal_port]}), append=['detail'],
]) qs_elements=['address=%s' % mac],
),
json={'ports': [self.fake_baremetal_port]},
),
]
)
return_value = self.cloud.get_nic_by_mac(mac) return_value = self.cloud.get_nic_by_mac(mac)
@ -115,14 +150,19 @@ class TestBaremetalPort(base.IronicTestCase):
def test_get_nic_by_mac_failure(self): def test_get_nic_by_mac_failure(self):
mac = self.fake_baremetal_port['address'] mac = self.fake_baremetal_port['address']
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
resource='ports', method='GET',
append=['detail'], uri=self.get_mock_url(
qs_elements=['address=%s' % mac]), resource='ports',
json={'ports': []}), append=['detail'],
]) qs_elements=['address=%s' % mac],
),
json={'ports': []},
),
]
)
self.assertIsNone(self.cloud.get_nic_by_mac(mac)) self.assertIsNone(self.cloud.get_nic_by_mac(mac))

View File

@ -39,26 +39,23 @@ def _(msg):
_TASK_PROPERTIES = { _TASK_PROPERTIES = {
"id": { "id": {
"description": _("An identifier for the task"), "description": _("An identifier for the task"),
"pattern": _('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' "pattern": _(
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), '^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
"type": "string" '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'
),
"type": "string",
}, },
"type": { "type": {
"description": _("The type of task represented by this content"), "description": _("The type of task represented by this content"),
"enum": [ "enum": [
"import", "import",
], ],
"type": "string" "type": "string",
}, },
"status": { "status": {
"description": _("The current status of this task"), "description": _("The current status of this task"),
"enum": [ "enum": ["pending", "processing", "success", "failure"],
"pending", "type": "string",
"processing",
"success",
"failure"
],
"type": "string"
}, },
"input": { "input": {
"description": _("The parameters required by task, JSON blob"), "description": _("The parameters required by task, JSON blob"),
@ -70,50 +67,55 @@ _TASK_PROPERTIES = {
}, },
"owner": { "owner": {
"description": _("An identifier for the owner of this task"), "description": _("An identifier for the owner of this task"),
"type": "string" "type": "string",
}, },
"message": { "message": {
"description": _("Human-readable informative message only included" "description": _(
" when appropriate (usually on failure)"), "Human-readable informative message only included"
" when appropriate (usually on failure)"
),
"type": "string", "type": "string",
}, },
"expires_at": { "expires_at": {
"description": _("Datetime when this resource would be" "description": _(
" subject to removal"), "Datetime when this resource would be" " subject to removal"
"type": ["null", "string"] ),
"type": ["null", "string"],
}, },
"created_at": { "created_at": {
"description": _("Datetime when this resource was created"), "description": _("Datetime when this resource was created"),
"type": "string" "type": "string",
}, },
"updated_at": { "updated_at": {
"description": _("Datetime when this resource was updated"), "description": _("Datetime when this resource was updated"),
"type": "string" "type": "string",
}, },
'self': {'type': 'string'}, 'self': {'type': 'string'},
'schema': {'type': 'string'} 'schema': {'type': 'string'},
} }
_TASK_SCHEMA = dict( _TASK_SCHEMA = dict(
name='Task', properties=_TASK_PROPERTIES, name='Task',
properties=_TASK_PROPERTIES,
additionalProperties=False, additionalProperties=False,
) )
class TestMemoryCache(base.TestCase): class TestMemoryCache(base.TestCase):
def setUp(self): def setUp(self):
super(TestMemoryCache, self).setUp( super(TestMemoryCache, self).setUp(
cloud_config_fixture='clouds_cache.yaml') cloud_config_fixture='clouds_cache.yaml'
)
def _compare_images(self, exp, real): def _compare_images(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False), _image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False)) real.to_dict(computed=False),
)
def _compare_volumes(self, exp, real): def _compare_volumes(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
_volume.Volume(**exp).to_dict(computed=False), _volume.Volume(**exp).to_dict(computed=False),
real.to_dict(computed=False) real.to_dict(computed=False),
) )
def test_openstack_cloud(self): def test_openstack_cloud(self):
@ -122,13 +124,13 @@ class TestMemoryCache(base.TestCase):
def _compare_projects(self, exp, real): def _compare_projects(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
_project.Project(**exp).to_dict(computed=False), _project.Project(**exp).to_dict(computed=False),
real.to_dict(computed=False) real.to_dict(computed=False),
) )
def _compare_users(self, exp, real): def _compare_users(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
_user.User(**exp).to_dict(computed=False), _user.User(**exp).to_dict(computed=False),
real.to_dict(computed=False) real.to_dict(computed=False),
) )
def test_list_projects_v3(self): def test_list_projects_v3(self):
@ -137,28 +139,42 @@ class TestMemoryCache(base.TestCase):
project_list = [project_one, project_two] project_list = [project_one, project_two]
first_response = {'projects': [project_one.json_response['project']]} first_response = {'projects': [project_one.json_response['project']]}
second_response = {'projects': [p.json_response['project'] second_response = {
for p in project_list]} 'projects': [p.json_response['project'] for p in project_list]
}
mock_uri = self.get_mock_url( mock_uri = self.get_mock_url(
service_type='identity', resource='projects', service_type='identity', resource='projects', base_url_append='v3'
base_url_append='v3') )
self.register_uris([ self.register_uris(
dict(method='GET', uri=mock_uri, status_code=200, [
json=first_response), dict(
dict(method='GET', uri=mock_uri, status_code=200, method='GET',
json=second_response)]) uri=mock_uri,
status_code=200,
json=first_response,
),
dict(
method='GET',
uri=mock_uri,
status_code=200,
json=second_response,
),
]
)
for a, b in zip(first_response['projects'], for a, b in zip(
self.cloud.list_projects()): first_response['projects'], self.cloud.list_projects()
):
self._compare_projects(a, b) self._compare_projects(a, b)
# invalidate the list_projects cache # invalidate the list_projects cache
self.cloud.list_projects.invalidate(self.cloud) self.cloud.list_projects.invalidate(self.cloud)
for a, b in zip(second_response['projects'], for a, b in zip(
self.cloud.list_projects()): second_response['projects'], self.cloud.list_projects()
):
self._compare_projects(a, b) self._compare_projects(a, b)
self.assert_calls() self.assert_calls()
@ -166,13 +182,18 @@ class TestMemoryCache(base.TestCase):
def test_list_servers_no_herd(self): def test_list_servers_no_herd(self):
self.cloud._SERVER_AGE = 2 self.cloud._SERVER_AGE = 2
fake_server = fakes.make_fake_server('1234', 'name') fake_server = fakes.make_fake_server('1234', 'name')
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'detail']), method='GET',
json={'servers': [fake_server]}), uri=self.get_mock_url(
]) 'compute', 'public', append=['servers', 'detail']
),
json={'servers': [fake_server]},
),
]
)
with concurrent.futures.ThreadPoolExecutor(16) as pool: with concurrent.futures.ThreadPoolExecutor(16) as pool:
for i in range(16): for i in range(16):
pool.submit(lambda: self.cloud.list_servers(bare=True)) pool.submit(lambda: self.cloud.list_servers(bare=True))
@ -183,125 +204,180 @@ class TestMemoryCache(base.TestCase):
self.assert_calls() self.assert_calls()
def test_list_volumes(self): def test_list_volumes(self):
fake_volume = fakes.FakeVolume('volume1', 'available', fake_volume = fakes.FakeVolume(
'Volume 1 Display Name') 'volume1', 'available', 'Volume 1 Display Name'
)
fake_volume_dict = meta.obj_to_munch(fake_volume) fake_volume_dict = meta.obj_to_munch(fake_volume)
fake_volume2 = fakes.FakeVolume('volume2', 'available', fake_volume2 = fakes.FakeVolume(
'Volume 2 Display Name') 'volume2', 'available', 'Volume 2 Display Name'
)
fake_volume2_dict = meta.obj_to_munch(fake_volume2) fake_volume2_dict = meta.obj_to_munch(fake_volume2)
self.register_uris([ self.register_uris(
self.get_cinder_discovery_mock_dict(), [
dict(method='GET', self.get_cinder_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'volumev3', 'public', append=['volumes', 'detail']), method='GET',
json={'volumes': [fake_volume_dict]}), uri=self.get_mock_url(
dict(method='GET', 'volumev3', 'public', append=['volumes', 'detail']
uri=self.get_mock_url( ),
'volumev3', 'public', append=['volumes', 'detail']), json={'volumes': [fake_volume_dict]},
json={'volumes': [fake_volume_dict, fake_volume2_dict]})]) ),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict, fake_volume2_dict]},
),
]
)
for a, b in zip([fake_volume_dict], for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self.cloud.list_volumes()):
self._compare_volumes(a, b) self._compare_volumes(a, b)
# this call should hit the cache # this call should hit the cache
for a, b in zip([fake_volume_dict], for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self.cloud.list_volumes()):
self._compare_volumes(a, b) self._compare_volumes(a, b)
self.cloud.list_volumes.invalidate(self.cloud) self.cloud.list_volumes.invalidate(self.cloud)
for a, b in zip([fake_volume_dict, fake_volume2_dict], for a, b in zip(
self.cloud.list_volumes()): [fake_volume_dict, fake_volume2_dict], self.cloud.list_volumes()
):
self._compare_volumes(a, b) self._compare_volumes(a, b)
self.assert_calls() self.assert_calls()
def test_list_volumes_creating_invalidates(self): def test_list_volumes_creating_invalidates(self):
fake_volume = fakes.FakeVolume('volume1', 'creating', fake_volume = fakes.FakeVolume(
'Volume 1 Display Name') 'volume1', 'creating', 'Volume 1 Display Name'
)
fake_volume_dict = meta.obj_to_munch(fake_volume) fake_volume_dict = meta.obj_to_munch(fake_volume)
fake_volume2 = fakes.FakeVolume('volume2', 'available', fake_volume2 = fakes.FakeVolume(
'Volume 2 Display Name') 'volume2', 'available', 'Volume 2 Display Name'
)
fake_volume2_dict = meta.obj_to_munch(fake_volume2) fake_volume2_dict = meta.obj_to_munch(fake_volume2)
self.register_uris([ self.register_uris(
self.get_cinder_discovery_mock_dict(), [
dict(method='GET', self.get_cinder_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'volumev3', 'public', append=['volumes', 'detail']), method='GET',
json={'volumes': [fake_volume_dict]}), uri=self.get_mock_url(
dict(method='GET', 'volumev3', 'public', append=['volumes', 'detail']
uri=self.get_mock_url( ),
'volumev3', 'public', append=['volumes', 'detail']), json={'volumes': [fake_volume_dict]},
json={'volumes': [fake_volume_dict, fake_volume2_dict]})]) ),
for a, b in zip([fake_volume_dict], dict(
self.cloud.list_volumes()): method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict, fake_volume2_dict]},
),
]
)
for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self._compare_volumes(a, b) self._compare_volumes(a, b)
for a, b in zip([fake_volume_dict, fake_volume2_dict], for a, b in zip(
self.cloud.list_volumes()): [fake_volume_dict, fake_volume2_dict], self.cloud.list_volumes()
):
self._compare_volumes(a, b) self._compare_volumes(a, b)
self.assert_calls() self.assert_calls()
def test_create_volume_invalidates(self): def test_create_volume_invalidates(self):
fake_volb4 = meta.obj_to_munch( fake_volb4 = meta.obj_to_munch(
fakes.FakeVolume('volume1', 'available', '')) fakes.FakeVolume('volume1', 'available', '')
)
_id = '12345' _id = '12345'
fake_vol_creating = meta.obj_to_munch( fake_vol_creating = meta.obj_to_munch(
fakes.FakeVolume(_id, 'creating', '')) fakes.FakeVolume(_id, 'creating', '')
)
fake_vol_avail = meta.obj_to_munch( fake_vol_avail = meta.obj_to_munch(
fakes.FakeVolume(_id, 'available', '')) fakes.FakeVolume(_id, 'available', '')
)
def now_deleting(request, context): def now_deleting(request, context):
fake_vol_avail['status'] = 'deleting' fake_vol_avail['status'] = 'deleting'
self.register_uris([ self.register_uris(
self.get_cinder_discovery_mock_dict(), [
dict(method='GET', self.get_cinder_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'volumev3', 'public', append=['volumes', 'detail']), method='GET',
json={'volumes': [fake_volb4]}), uri=self.get_mock_url(
dict(method='POST', 'volumev3', 'public', append=['volumes', 'detail']
uri=self.get_mock_url( ),
'volumev3', 'public', append=['volumes']), json={'volumes': [fake_volb4]},
json={'volume': fake_vol_creating}), ),
dict(method='GET', dict(
uri=self.get_mock_url( method='POST',
'volumev3', 'public', append=['volumes', _id]), uri=self.get_mock_url(
json={'volume': fake_vol_creating}), 'volumev3', 'public', append=['volumes']
dict(method='GET', ),
uri=self.get_mock_url( json={'volume': fake_vol_creating},
'volumev3', 'public', append=['volumes', _id]), ),
json={'volume': fake_vol_avail}), dict(
dict(method='GET', method='GET',
uri=self.get_mock_url( uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']), 'volumev3', 'public', append=['volumes', _id]
json={'volumes': [fake_volb4, fake_vol_avail]}), ),
dict(method='GET', json={'volume': fake_vol_creating},
uri=self.get_mock_url( ),
'volumev3', 'public', dict(
append=['volumes', _id]), method='GET',
json={'volume': fake_vol_avail}), uri=self.get_mock_url(
dict(method='DELETE', 'volumev3', 'public', append=['volumes', _id]
uri=self.get_mock_url( ),
'volumev3', 'public', append=['volumes', _id]), json={'volume': fake_vol_avail},
json=now_deleting), ),
dict(method='GET', dict(
uri=self.get_mock_url( method='GET',
'volumev3', 'public', append=['volumes', _id]), uri=self.get_mock_url(
status_code=404), 'volumev3', 'public', append=['volumes', 'detail']
dict(method='GET', ),
uri=self.get_mock_url( json={'volumes': [fake_volb4, fake_vol_avail]},
'volumev3', 'public', append=['volumes', 'detail']), ),
json={'volumes': [fake_volb4, fake_vol_avail]}), dict(
]) method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json={'volume': fake_vol_avail},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json=now_deleting,
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volb4, fake_vol_avail]},
),
]
)
for a, b in zip([fake_volb4], self.cloud.list_volumes()): for a, b in zip([fake_volb4], self.cloud.list_volumes()):
self._compare_volumes(a, b) self._compare_volumes(a, b)
volume = dict(display_name='junk_vol', volume = dict(
size=1, display_name='junk_vol',
display_description='test junk volume') size=1,
display_description='test junk volume',
)
self.cloud.create_volume(wait=True, timeout=2, **volume) self.cloud.create_volume(wait=True, timeout=2, **volume)
# If cache was not invalidated, we would not see our own volume here # If cache was not invalidated, we would not see our own volume here
# because the first volume was available and thus would already be # because the first volume was available and thus would already be
# cached. # cached.
for a, b in zip([fake_volb4, fake_vol_avail], for a, b in zip(
self.cloud.list_volumes()): [fake_volb4, fake_vol_avail], self.cloud.list_volumes()
):
self._compare_volumes(a, b) self._compare_volumes(a, b)
self.cloud.delete_volume(_id) self.cloud.delete_volume(_id)
# And now delete and check same thing since list is cached as all # And now delete and check same thing since list is cached as all
@ -312,14 +388,20 @@ class TestMemoryCache(base.TestCase):
def test_list_users(self): def test_list_users(self):
user_data = self._get_user_data(email='test@example.com') user_data = self._get_user_data(email='test@example.com')
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
service_type='identity', method='GET',
resource='users', uri=self.get_mock_url(
base_url_append='v3'), service_type='identity',
status_code=200, resource='users',
json={'users': [user_data.json_response['user']]})]) base_url_append='v3',
),
status_code=200,
json={'users': [user_data.json_response['user']]},
)
]
)
users = self.cloud.list_users() users = self.cloud.list_users()
self.assertEqual(1, len(users)) self.assertEqual(1, len(users))
self.assertEqual(user_data.user_id, users[0]['id']) self.assertEqual(user_data.user_id, users[0]['id'])
@ -336,14 +418,14 @@ class TestMemoryCache(base.TestCase):
new_req = {'user': {'email': new_resp['user']['email']}} new_req = {'user': {'email': new_resp['user']['email']}}
mock_users_url = self.get_mock_url( mock_users_url = self.get_mock_url(
service_type='identity', service_type='identity', interface='admin', resource='users'
interface='admin', )
resource='users')
mock_user_resource_url = self.get_mock_url( mock_user_resource_url = self.get_mock_url(
service_type='identity', service_type='identity',
interface='admin', interface='admin',
resource='users', resource='users',
append=[user_data.user_id]) append=[user_data.user_id],
)
empty_user_list_resp = {'users': []} empty_user_list_resp = {'users': []}
users_list_resp = {'users': [user_data.json_response['user']]} users_list_resp = {'users': [user_data.json_response['user']]}
@ -354,35 +436,68 @@ class TestMemoryCache(base.TestCase):
uris_to_mock = [ uris_to_mock = [
# Inital User List is Empty # Inital User List is Empty
dict(method='GET', uri=mock_users_url, status_code=200, dict(
json=empty_user_list_resp), method='GET',
uri=mock_users_url,
status_code=200,
json=empty_user_list_resp,
),
# POST to create the user # POST to create the user
# GET to get the user data after POST # GET to get the user data after POST
dict(method='POST', uri=mock_users_url, status_code=200, dict(
json=user_data.json_response, method='POST',
validate=dict(json=user_data.json_request)), uri=mock_users_url,
status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request),
),
# List Users Call # List Users Call
dict(method='GET', uri=mock_users_url, status_code=200, dict(
json=users_list_resp), method='GET',
uri=mock_users_url,
status_code=200,
json=users_list_resp,
),
# List users to get ID for update # List users to get ID for update
# Get user using user_id from list # Get user using user_id from list
# Update user # Update user
# Get updated user # Get updated user
dict(method='GET', uri=mock_users_url, status_code=200, dict(
json=users_list_resp), method='GET',
dict(method='PUT', uri=mock_user_resource_url, status_code=200, uri=mock_users_url,
json=new_resp, validate=dict(json=new_req)), status_code=200,
json=users_list_resp,
),
dict(
method='PUT',
uri=mock_user_resource_url,
status_code=200,
json=new_resp,
validate=dict(json=new_req),
),
# List Users Call # List Users Call
dict(method='GET', uri=mock_users_url, status_code=200, dict(
json=updated_users_list_resp), method='GET',
uri=mock_users_url,
status_code=200,
json=updated_users_list_resp,
),
# List User to get ID for delete # List User to get ID for delete
# delete user # delete user
dict(method='GET', uri=mock_users_url, status_code=200, dict(
json=updated_users_list_resp), method='GET',
uri=mock_users_url,
status_code=200,
json=updated_users_list_resp,
),
dict(method='DELETE', uri=mock_user_resource_url, status_code=204), dict(method='DELETE', uri=mock_user_resource_url, status_code=204),
# List Users Call (empty post delete) # List Users Call (empty post delete)
dict(method='GET', uri=mock_users_url, status_code=200, dict(
json=empty_user_list_resp) method='GET',
uri=mock_users_url,
status_code=200,
json=empty_user_list_resp,
),
] ]
self.register_uris(uris_to_mock) self.register_uris(uris_to_mock)
@ -391,8 +506,9 @@ class TestMemoryCache(base.TestCase):
self.assertEqual([], self.cloud.list_users()) self.assertEqual([], self.cloud.list_users())
# now add one # now add one
created = self.cloud.create_user(name=user_data.name, created = self.cloud.create_user(
email=user_data.email) name=user_data.name, email=user_data.email
)
self.assertEqual(user_data.user_id, created['id']) self.assertEqual(user_data.user_id, created['id'])
self.assertEqual(user_data.name, created['name']) self.assertEqual(user_data.name, created['name'])
self.assertEqual(user_data.email, created['email']) self.assertEqual(user_data.email, created['email'])
@ -403,8 +519,9 @@ class TestMemoryCache(base.TestCase):
self.assertEqual(user_data.email, users[0]['email']) self.assertEqual(user_data.email, users[0]['email'])
# Update and check to see if it is updated # Update and check to see if it is updated
updated = self.cloud.update_user(user_data.user_id, updated = self.cloud.update_user(
email=new_resp['user']['email']) user_data.user_id, email=new_resp['user']['email']
)
self.assertEqual(user_data.user_id, updated.id) self.assertEqual(user_data.user_id, updated.id)
self.assertEqual(user_data.name, updated.name) self.assertEqual(user_data.name, updated.name)
self.assertEqual(new_resp['user']['email'], updated.email) self.assertEqual(new_resp['user']['email'], updated.email)
@ -420,17 +537,26 @@ class TestMemoryCache(base.TestCase):
def test_list_flavors(self): def test_list_flavors(self):
mock_uri = '{endpoint}/flavors/detail?is_public=None'.format( mock_uri = '{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT) endpoint=fakes.COMPUTE_ENDPOINT
)
uris_to_mock = [ uris_to_mock = [
dict(method='GET', uri=mock_uri, dict(
validate=dict( method='GET',
headers={'OpenStack-API-Version': 'compute 2.53'}), uri=mock_uri,
json={'flavors': []}), validate=dict(
dict(method='GET', uri=mock_uri, headers={'OpenStack-API-Version': 'compute 2.53'}
validate=dict( ),
headers={'OpenStack-API-Version': 'compute 2.53'}), json={'flavors': []},
json={'flavors': fakes.FAKE_FLAVOR_LIST}) ),
dict(
method='GET',
uri=mock_uri,
validate=dict(
headers={'OpenStack-API-Version': 'compute 2.53'}
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
] ]
self.use_compute_discovery() self.use_compute_discovery()
@ -442,9 +568,7 @@ class TestMemoryCache(base.TestCase):
self.cloud.list_flavors.invalidate(self.cloud) self.cloud.list_flavors.invalidate(self.cloud)
self.assertResourceListEqual( self.assertResourceListEqual(
self.cloud.list_flavors(), self.cloud.list_flavors(), fakes.FAKE_FLAVOR_LIST, _flavor.Flavor
fakes.FAKE_FLAVOR_LIST,
_flavor.Flavor
) )
self.assert_calls() self.assert_calls()
@ -454,23 +578,32 @@ class TestMemoryCache(base.TestCase):
self.use_glance() self.use_glance()
fake_image = fakes.make_fake_image(image_id='42') fake_image = fakes.make_fake_image(image_id='42')
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('image', 'public', dict(
append=['v2', 'images']), method='GET',
json={'images': []}), uri=self.get_mock_url(
dict(method='GET', 'image', 'public', append=['v2', 'images']
uri=self.get_mock_url('image', 'public', ),
append=['v2', 'images']), json={'images': []},
json={'images': [fake_image]}), ),
]) dict(
method='GET',
uri=self.get_mock_url(
'image', 'public', append=['v2', 'images']
),
json={'images': [fake_image]},
),
]
)
self.assertEqual([], self.cloud.list_images()) self.assertEqual([], self.cloud.list_images())
self.assertEqual([], self.cloud.list_images()) self.assertEqual([], self.cloud.list_images())
self.cloud.list_images.invalidate(self.cloud) self.cloud.list_images.invalidate(self.cloud)
[self._compare_images(a, b) for a, b in zip( [
[fake_image], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([fake_image], self.cloud.list_images())
]
self.assert_calls() self.assert_calls()
@ -479,23 +612,30 @@ class TestMemoryCache(base.TestCase):
deleted_image_id = self.getUniqueString() deleted_image_id = self.getUniqueString()
deleted_image = fakes.make_fake_image( deleted_image = fakes.make_fake_image(
image_id=deleted_image_id, status='deleted') image_id=deleted_image_id, status='deleted'
)
active_image_id = self.getUniqueString() active_image_id = self.getUniqueString()
active_image = fakes.make_fake_image(image_id=active_image_id) active_image = fakes.make_fake_image(image_id=active_image_id)
list_return = {'images': [active_image, deleted_image]} list_return = {'images': [active_image, deleted_image]}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='https://image.example.com/v2/images', dict(
json=list_return), method='GET',
]) uri='https://image.example.com/v2/images',
json=list_return,
),
]
)
[self._compare_images(a, b) for a, b in zip( [
[active_image], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([active_image], self.cloud.list_images())
]
[self._compare_images(a, b) for a, b in zip( [
[active_image], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([active_image], self.cloud.list_images())
]
# We should only have one call # We should only have one call
self.assert_calls() self.assert_calls()
@ -507,29 +647,38 @@ class TestMemoryCache(base.TestCase):
fi = fakes.make_fake_image(image_id=self.getUniqueString()) fi = fakes.make_fake_image(image_id=self.getUniqueString())
fi2 = fakes.make_fake_image(image_id=self.getUniqueString()) fi2 = fakes.make_fake_image(image_id=self.getUniqueString())
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='https://image.example.com/v2/images', dict(
json={'images': [fi]}), method='GET',
dict(method='GET', uri='https://image.example.com/v2/images',
uri='https://image.example.com/v2/images', json={'images': [fi]},
json={'images': [fi, fi2]}), ),
]) dict(
method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi, fi2]},
),
]
)
[self._compare_images(a, b) for a, b in zip( [
[fi], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([fi], self.cloud.list_images())
]
# Now test that the list was cached # Now test that the list was cached
[self._compare_images(a, b) for a, b in zip( [
[fi], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([fi], self.cloud.list_images())
]
# Invalidation too # Invalidation too
self.cloud.list_images.invalidate(self.cloud) self.cloud.list_images.invalidate(self.cloud)
[self._compare_images(a, b) for a, b in zip( [
[fi, fi2], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([fi, fi2], self.cloud.list_images())
]
def test_list_ports_filtered(self): def test_list_ports_filtered(self):
down_port = test_port.TestPort.mock_neutron_port_create_rep['port'] down_port = test_port.TestPort.mock_neutron_port_create_rep['port']
@ -537,21 +686,31 @@ class TestMemoryCache(base.TestCase):
active_port['status'] = 'ACTIVE' active_port['status'] = 'ACTIVE'
# We're testing to make sure a query string is passed when we're # We're testing to make sure a query string is passed when we're
# caching (cache by url), and that the results are still filtered. # caching (cache by url), and that the results are still filtered.
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports'], method='GET',
qs_elements=['status=DOWN']), uri=self.get_mock_url(
json={'ports': [ 'network',
down_port, 'public',
active_port, append=['v2.0', 'ports'],
]}), qs_elements=['status=DOWN'],
]) ),
json={
'ports': [
down_port,
active_port,
]
},
),
]
)
ports = self.cloud.list_ports(filters={'status': 'DOWN'}) ports = self.cloud.list_ports(filters={'status': 'DOWN'})
for a, b in zip([down_port], ports): for a, b in zip([down_port], ports):
self.assertDictEqual( self.assertDictEqual(
_port.Port(**a).to_dict(computed=False), _port.Port(**a).to_dict(computed=False),
b.to_dict(computed=False)) b.to_dict(computed=False),
)
self.assert_calls() self.assert_calls()
@ -565,41 +724,56 @@ class TestCacheIgnoresQueuedStatus(base.TestCase):
def setUp(self): def setUp(self):
super(TestCacheIgnoresQueuedStatus, self).setUp( super(TestCacheIgnoresQueuedStatus, self).setUp(
cloud_config_fixture='clouds_cache.yaml') cloud_config_fixture='clouds_cache.yaml'
)
self.use_glance() self.use_glance()
active_image_id = self.getUniqueString() active_image_id = self.getUniqueString()
self.active_image = fakes.make_fake_image( self.active_image = fakes.make_fake_image(
image_id=active_image_id, status=self.status) image_id=active_image_id, status=self.status
)
self.active_list_return = {'images': [self.active_image]} self.active_list_return = {'images': [self.active_image]}
steady_image_id = self.getUniqueString() steady_image_id = self.getUniqueString()
self.steady_image = fakes.make_fake_image(image_id=steady_image_id) self.steady_image = fakes.make_fake_image(image_id=steady_image_id)
self.steady_list_return = { self.steady_list_return = {
'images': [self.active_image, self.steady_image]} 'images': [self.active_image, self.steady_image]
}
def _compare_images(self, exp, real): def _compare_images(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False), _image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False)) real.to_dict(computed=False),
)
def test_list_images_ignores_pending_status(self): def test_list_images_ignores_pending_status(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='https://image.example.com/v2/images', dict(
json=self.active_list_return), method='GET',
dict(method='GET', uri='https://image.example.com/v2/images',
uri='https://image.example.com/v2/images', json=self.active_list_return,
json=self.steady_list_return), ),
]) dict(
method='GET',
uri='https://image.example.com/v2/images',
json=self.steady_list_return,
),
]
)
[self._compare_images(a, b) for a, b in zip( [
[self.active_image], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([self.active_image], self.cloud.list_images())
]
# Should expect steady_image to appear if active wasn't cached # Should expect steady_image to appear if active wasn't cached
[self._compare_images(a, b) for a, b in zip( [
[self.active_image, self.steady_image], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip(
[self.active_image, self.steady_image],
self.cloud.list_images(),
)
]
class TestCacheSteadyStatus(base.TestCase): class TestCacheSteadyStatus(base.TestCase):
@ -611,45 +785,53 @@ class TestCacheSteadyStatus(base.TestCase):
def setUp(self): def setUp(self):
super(TestCacheSteadyStatus, self).setUp( super(TestCacheSteadyStatus, self).setUp(
cloud_config_fixture='clouds_cache.yaml') cloud_config_fixture='clouds_cache.yaml'
)
self.use_glance() self.use_glance()
active_image_id = self.getUniqueString() active_image_id = self.getUniqueString()
self.active_image = fakes.make_fake_image( self.active_image = fakes.make_fake_image(
image_id=active_image_id, status=self.status) image_id=active_image_id, status=self.status
)
self.active_list_return = {'images': [self.active_image]} self.active_list_return = {'images': [self.active_image]}
def _compare_images(self, exp, real): def _compare_images(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False), _image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False)) real.to_dict(computed=False),
)
def test_list_images_caches_steady_status(self): def test_list_images_caches_steady_status(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='https://image.example.com/v2/images', dict(
json=self.active_list_return), method='GET',
]) uri='https://image.example.com/v2/images',
json=self.active_list_return,
),
]
)
[self._compare_images(a, b) for a, b in zip( [
[self.active_image], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([self.active_image], self.cloud.list_images())
]
[self._compare_images(a, b) for a, b in zip( [
[self.active_image], self._compare_images(a, b)
self.cloud.list_images())] for a, b in zip([self.active_image], self.cloud.list_images())
]
# We should only have one call # We should only have one call
self.assert_calls() self.assert_calls()
class TestBogusAuth(base.TestCase): class TestBogusAuth(base.TestCase):
def setUp(self): def setUp(self):
super(TestBogusAuth, self).setUp( super(TestBogusAuth, self).setUp(
cloud_config_fixture='clouds_cache.yaml') cloud_config_fixture='clouds_cache.yaml'
)
def test_get_auth_bogus(self): def test_get_auth_bogus(self):
with testtools.ExpectedException(exceptions.ConfigException): with testtools.ExpectedException(exceptions.ConfigException):
openstack.connect( openstack.connect(cloud='_bogus_test_', config=self.config)
cloud='_bogus_test_', config=self.config)

View File

@ -49,7 +49,6 @@ cluster_template_obj = dict(
class TestClusterTemplates(base.TestCase): class TestClusterTemplates(base.TestCase):
def _compare_clustertemplates(self, exp, real): def _compare_clustertemplates(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
cluster_template.ClusterTemplate(**exp).to_dict(computed=False), cluster_template.ClusterTemplate(**exp).to_dict(computed=False),
@ -57,20 +56,30 @@ class TestClusterTemplates(base.TestCase):
) )
def get_mock_url( def get_mock_url(
self, self,
service_type='container-infrastructure-management', service_type='container-infrastructure-management',
base_url_append=None, append=None, resource=None): base_url_append=None,
append=None,
resource=None,
):
return super(TestClusterTemplates, self).get_mock_url( return super(TestClusterTemplates, self).get_mock_url(
service_type=service_type, resource=resource, service_type=service_type,
append=append, base_url_append=base_url_append) resource=resource,
append=append,
base_url_append=base_url_append,
)
def test_list_cluster_templates_without_detail(self): def test_list_cluster_templates_without_detail(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[cluster_template_obj]))]) uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
cluster_templates_list = self.cloud.list_cluster_templates() cluster_templates_list = self.cloud.list_cluster_templates()
self._compare_clustertemplates( self._compare_clustertemplates(
cluster_template_obj, cluster_template_obj,
@ -79,11 +88,15 @@ class TestClusterTemplates(base.TestCase):
self.assert_calls() self.assert_calls()
def test_list_cluster_templates_with_detail(self): def test_list_cluster_templates_with_detail(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[cluster_template_obj]))]) uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
cluster_templates_list = self.cloud.list_cluster_templates(detail=True) cluster_templates_list = self.cloud.list_cluster_templates(detail=True)
self._compare_clustertemplates( self._compare_clustertemplates(
cluster_template_obj, cluster_template_obj,
@ -92,14 +105,19 @@ class TestClusterTemplates(base.TestCase):
self.assert_calls() self.assert_calls()
def test_search_cluster_templates_by_name(self): def test_search_cluster_templates_by_name(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[cluster_template_obj]))]) uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
cluster_templates = self.cloud.search_cluster_templates( cluster_templates = self.cloud.search_cluster_templates(
name_or_id='fake-cluster-template') name_or_id='fake-cluster-template'
)
self.assertEqual(1, len(cluster_templates)) self.assertEqual(1, len(cluster_templates))
self.assertEqual('fake-uuid', cluster_templates[0]['uuid']) self.assertEqual('fake-uuid', cluster_templates[0]['uuid'])
@ -107,24 +125,33 @@ class TestClusterTemplates(base.TestCase):
def test_search_cluster_templates_not_found(self): def test_search_cluster_templates_not_found(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[cluster_template_obj]))]) uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
cluster_templates = self.cloud.search_cluster_templates( cluster_templates = self.cloud.search_cluster_templates(
name_or_id='non-existent') name_or_id='non-existent'
)
self.assertEqual(0, len(cluster_templates)) self.assertEqual(0, len(cluster_templates))
self.assert_calls() self.assert_calls()
def test_get_cluster_template(self): def test_get_cluster_template(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[cluster_template_obj]))]) uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
r = self.cloud.get_cluster_template('fake-cluster-template') r = self.cloud.get_cluster_template('fake-cluster-template')
self.assertIsNotNone(r) self.assertIsNotNone(r)
@ -135,41 +162,52 @@ class TestClusterTemplates(base.TestCase):
self.assert_calls() self.assert_calls()
def test_get_cluster_template_not_found(self): def test_get_cluster_template_not_found(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[]))]) uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[]),
)
]
)
r = self.cloud.get_cluster_template('doesNotExist') r = self.cloud.get_cluster_template('doesNotExist')
self.assertIsNone(r) self.assertIsNone(r)
self.assert_calls() self.assert_calls()
def test_create_cluster_template(self): def test_create_cluster_template(self):
json_response = cluster_template_obj.copy() json_response = cluster_template_obj.copy()
kwargs = dict(name=cluster_template_obj['name'], kwargs = dict(
image_id=cluster_template_obj['image_id'], name=cluster_template_obj['name'],
keypair_id=cluster_template_obj['keypair_id'], image_id=cluster_template_obj['image_id'],
coe=cluster_template_obj['coe']) keypair_id=cluster_template_obj['keypair_id'],
self.register_uris([ coe=cluster_template_obj['coe'],
dict(
method='POST',
uri=self.get_mock_url(resource='clustertemplates'),
json=json_response,
validate=dict(json=kwargs))])
response = self.cloud.create_cluster_template(**kwargs)
self._compare_clustertemplates(
json_response,
response
) )
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(resource='clustertemplates'),
json=json_response,
validate=dict(json=kwargs),
)
]
)
response = self.cloud.create_cluster_template(**kwargs)
self._compare_clustertemplates(json_response, response)
self.assert_calls() self.assert_calls()
def test_create_cluster_template_exception(self): def test_create_cluster_template_exception(self):
self.register_uris([ self.register_uris(
dict( [
method='POST', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='POST',
status_code=403)]) uri=self.get_mock_url(resource='clustertemplates'),
status_code=403,
)
]
)
# TODO(mordred) requests here doens't give us a great story # TODO(mordred) requests here doens't give us a great story
# for matching the old error message text. Investigate plumbing # for matching the old error message text. Investigate plumbing
# an error message in to the adapter call so that we can give a # an error message in to the adapter call so that we can give a
@ -177,54 +215,72 @@ class TestClusterTemplates(base.TestCase):
# OpenStackCloudException - but for some reason testtools will not # OpenStackCloudException - but for some reason testtools will not
# match the more specific HTTPError, even though it's a subclass # match the more specific HTTPError, even though it's a subclass
# of OpenStackCloudException. # of OpenStackCloudException.
with testtools.ExpectedException( with testtools.ExpectedException(exceptions.ForbiddenException):
exceptions.ForbiddenException):
self.cloud.create_cluster_template('fake-cluster-template') self.cloud.create_cluster_template('fake-cluster-template')
self.assert_calls() self.assert_calls()
def test_delete_cluster_template(self): def test_delete_cluster_template(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[cluster_template_obj])), uri=self.get_mock_url(resource='clustertemplates'),
dict( json=dict(clustertemplates=[cluster_template_obj]),
method='DELETE', ),
uri=self.get_mock_url(resource='clustertemplates/fake-uuid')), dict(
]) method='DELETE',
uri=self.get_mock_url(
resource='clustertemplates/fake-uuid'
),
),
]
)
self.cloud.delete_cluster_template('fake-uuid') self.cloud.delete_cluster_template('fake-uuid')
self.assert_calls() self.assert_calls()
def test_update_cluster_template(self): def test_update_cluster_template(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[cluster_template_obj])), uri=self.get_mock_url(resource='clustertemplates'),
dict( json=dict(clustertemplates=[cluster_template_obj]),
method='PATCH', ),
uri=self.get_mock_url(resource='clustertemplates/fake-uuid'), dict(
status_code=200, method='PATCH',
validate=dict( uri=self.get_mock_url(
json=[{ resource='clustertemplates/fake-uuid'
'op': 'replace', ),
'path': '/name', status_code=200,
'value': 'new-cluster-template' validate=dict(
}] json=[
)), {
]) 'op': 'replace',
'path': '/name',
'value': 'new-cluster-template',
}
]
),
),
]
)
new_name = 'new-cluster-template' new_name = 'new-cluster-template'
updated = self.cloud.update_cluster_template( updated = self.cloud.update_cluster_template(
'fake-uuid', name=new_name) 'fake-uuid', name=new_name
)
self.assertEqual(new_name, updated.name) self.assertEqual(new_name, updated.name)
self.assert_calls() self.assert_calls()
def test_coe_get_cluster_template(self): def test_coe_get_cluster_template(self):
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri=self.get_mock_url(resource='clustertemplates'), method='GET',
json=dict(clustertemplates=[cluster_template_obj]))]) uri=self.get_mock_url(resource='clustertemplates'),
json=dict(clustertemplates=[cluster_template_obj]),
)
]
)
r = self.cloud.get_cluster_template('fake-cluster-template') r = self.cloud.get_cluster_template('fake-cluster-template')
self.assertIsNotNone(r) self.assertIsNotNone(r)

View File

@ -24,14 +24,10 @@ CLUSTERING_DICT = {
'max_size': 1, 'max_size': 1,
'min_size': 1, 'min_size': 1,
'timeout': 100, 'timeout': 100,
'metadata': {} 'metadata': {},
} }
PROFILE_DICT = { PROFILE_DICT = {'name': 'fake-profile-name', 'spec': {}, 'metadata': {}}
'name': 'fake-profile-name',
'spec': {},
'metadata': {}
}
POLICY_DICT = { POLICY_DICT = {
'name': 'fake-profile-name', 'name': 'fake-profile-name',
@ -43,7 +39,7 @@ RECEIVER_DICT = {
'cluster_id': 'fake-cluster-id', 'cluster_id': 'fake-cluster-id',
'name': 'fake-receiver-name', 'name': 'fake-receiver-name',
'params': {}, 'params': {},
'type': 'webhook' 'type': 'webhook',
} }
NEW_CLUSTERING_DICT = copy.copy(CLUSTERING_DICT) NEW_CLUSTERING_DICT = copy.copy(CLUSTERING_DICT)
@ -57,7 +53,6 @@ NEW_RECEIVER_DICT['id'] = '1'
class TestClustering(base.TestCase): class TestClustering(base.TestCase):
def assertAreInstances(self, elements, elem_type): def assertAreInstances(self, elements, elem_type):
for e in elements: for e in elements:
self.assertIsInstance(e, elem_type) self.assertIsInstance(e, elem_type)
@ -65,12 +60,14 @@ class TestClustering(base.TestCase):
def _compare_clusters(self, exp, real): def _compare_clusters(self, exp, real):
self.assertEqual( self.assertEqual(
cluster.Cluster(**exp).to_dict(computed=False), cluster.Cluster(**exp).to_dict(computed=False),
real.to_dict(computed=False)) real.to_dict(computed=False),
)
def setUp(self): def setUp(self):
super(TestClustering, self).setUp() super(TestClustering, self).setUp()
self.use_senlin() self.use_senlin()
# def test_create_cluster(self): # def test_create_cluster(self):
# self.register_uris([ # self.register_uris([
# dict(method='GET', # dict(method='GET',

View File

@ -212,7 +212,5 @@ class TestCOEClusters(base.TestCase):
), ),
] ]
) )
self.cloud.update_coe_cluster( self.cloud.update_coe_cluster(coe_cluster_obj["uuid"], node_count=3)
coe_cluster_obj["uuid"], node_count=3
)
self.assert_calls() self.assert_calls()

View File

@ -12,7 +12,7 @@
from openstack.container_infrastructure_management.v1 import ( from openstack.container_infrastructure_management.v1 import (
cluster_certificate cluster_certificate,
) )
from openstack.tests.unit import base from openstack.tests.unit import base
@ -20,7 +20,7 @@ coe_cluster_ca_obj = dict(
cluster_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c", cluster_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c",
pem="-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n", pem="-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n",
bay_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c", bay_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c",
links=[] links=[],
) )
coe_cluster_signed_cert_obj = dict( coe_cluster_signed_cert_obj = dict(
@ -28,50 +28,72 @@ coe_cluster_signed_cert_obj = dict(
pem='-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----', pem='-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----',
bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c',
links=[], links=[],
csr=('-----BEGIN CERTIFICATE REQUEST-----\nMIICfz==' csr=(
'\n-----END CERTIFICATE REQUEST-----\n') '-----BEGIN CERTIFICATE REQUEST-----\nMIICfz=='
'\n-----END CERTIFICATE REQUEST-----\n'
),
) )
class TestCOEClusters(base.TestCase): class TestCOEClusters(base.TestCase):
def _compare_cluster_certs(self, exp, real): def _compare_cluster_certs(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
cluster_certificate.ClusterCertificate( cluster_certificate.ClusterCertificate(**exp).to_dict(
**exp).to_dict(computed=False), computed=False
),
real.to_dict(computed=False), real.to_dict(computed=False),
) )
def get_mock_url( def get_mock_url(
self, self,
service_type='container-infrastructure-management', service_type='container-infrastructure-management',
base_url_append=None, append=None, resource=None): base_url_append=None,
append=None,
resource=None,
):
return super(TestCOEClusters, self).get_mock_url( return super(TestCOEClusters, self).get_mock_url(
service_type=service_type, resource=resource, service_type=service_type,
append=append, base_url_append=base_url_append) resource=resource,
append=append,
base_url_append=base_url_append,
)
def test_get_coe_cluster_certificate(self): def test_get_coe_cluster_certificate(self):
self.register_uris([dict( self.register_uris(
method='GET', [
uri=self.get_mock_url( dict(
resource='certificates', method='GET',
append=[coe_cluster_ca_obj['cluster_uuid']]), uri=self.get_mock_url(
json=coe_cluster_ca_obj) resource='certificates',
]) append=[coe_cluster_ca_obj['cluster_uuid']],
),
json=coe_cluster_ca_obj,
)
]
)
ca_cert = self.cloud.get_coe_cluster_certificate( ca_cert = self.cloud.get_coe_cluster_certificate(
coe_cluster_ca_obj['cluster_uuid']) coe_cluster_ca_obj['cluster_uuid']
self._compare_cluster_certs( )
coe_cluster_ca_obj, self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert)
ca_cert)
self.assert_calls() self.assert_calls()
def test_sign_coe_cluster_certificate(self): def test_sign_coe_cluster_certificate(self):
self.register_uris([dict( self.register_uris(
method='POST', [
uri=self.get_mock_url(resource='certificates'), dict(
json={"cluster_uuid": coe_cluster_signed_cert_obj['cluster_uuid'], method='POST',
"csr": coe_cluster_signed_cert_obj['csr']} uri=self.get_mock_url(resource='certificates'),
)]) json={
"cluster_uuid": coe_cluster_signed_cert_obj[
'cluster_uuid'
],
"csr": coe_cluster_signed_cert_obj['csr'],
},
)
]
)
self.cloud.sign_coe_cluster_certificate( self.cloud.sign_coe_cluster_certificate(
coe_cluster_signed_cert_obj['cluster_uuid'], coe_cluster_signed_cert_obj['cluster_uuid'],
coe_cluster_signed_cert_obj['csr']) coe_cluster_signed_cert_obj['csr'],
)
self.assert_calls() self.assert_calls()

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,6 @@ from openstack.tests.unit import base
class TestCreateVolumeSnapshot(base.TestCase): class TestCreateVolumeSnapshot(base.TestCase):
def setUp(self): def setUp(self):
super(TestCreateVolumeSnapshot, self).setUp() super(TestCreateVolumeSnapshot, self).setUp()
self.use_cinder() self.use_cinder()
@ -33,7 +32,8 @@ class TestCreateVolumeSnapshot(base.TestCase):
def _compare_snapshots(self, exp, real): def _compare_snapshots(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
snapshot.Snapshot(**exp).to_dict(computed=False), snapshot.Snapshot(**exp).to_dict(computed=False),
real.to_dict(computed=False)) real.to_dict(computed=False),
)
def test_create_volume_snapshot_wait(self): def test_create_volume_snapshot_wait(self):
""" """
@ -42,32 +42,46 @@ class TestCreateVolumeSnapshot(base.TestCase):
""" """
snapshot_id = '5678' snapshot_id = '5678'
volume_id = '1234' volume_id = '1234'
build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating', build_snapshot = fakes.FakeVolumeSnapshot(
'foo', 'derpysnapshot') snapshot_id, 'creating', 'foo', 'derpysnapshot'
)
build_snapshot_dict = meta.obj_to_munch(build_snapshot) build_snapshot_dict = meta.obj_to_munch(build_snapshot)
fake_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'available', fake_snapshot = fakes.FakeVolumeSnapshot(
'foo', 'derpysnapshot') snapshot_id, 'available', 'foo', 'derpysnapshot'
)
fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) fake_snapshot_dict = meta.obj_to_munch(fake_snapshot)
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'volumev3', 'public', append=['snapshots']), method='POST',
json={'snapshot': build_snapshot_dict}, uri=self.get_mock_url(
validate=dict(json={ 'volumev3', 'public', append=['snapshots']
'snapshot': {'volume_id': '1234'}})), ),
dict(method='GET', json={'snapshot': build_snapshot_dict},
uri=self.get_mock_url('volumev3', 'public', validate=dict(json={'snapshot': {'volume_id': '1234'}}),
append=['snapshots', snapshot_id]), ),
json={'snapshot': build_snapshot_dict}), dict(
dict(method='GET', method='GET',
uri=self.get_mock_url('volumev3', 'public', uri=self.get_mock_url(
append=['snapshots', snapshot_id]), 'volumev3', 'public', append=['snapshots', snapshot_id]
json={'snapshot': fake_snapshot_dict})]) ),
json={'snapshot': build_snapshot_dict},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', snapshot_id]
),
json={'snapshot': fake_snapshot_dict},
),
]
)
self._compare_snapshots( self._compare_snapshots(
fake_snapshot_dict, fake_snapshot_dict,
self.cloud.create_volume_snapshot(volume_id=volume_id, wait=True)) self.cloud.create_volume_snapshot(volume_id=volume_id, wait=True),
)
self.assert_calls() self.assert_calls()
def test_create_volume_snapshot_with_timeout(self): def test_create_volume_snapshot_with_timeout(self):
@ -77,26 +91,38 @@ class TestCreateVolumeSnapshot(base.TestCase):
""" """
snapshot_id = '5678' snapshot_id = '5678'
volume_id = '1234' volume_id = '1234'
build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating', build_snapshot = fakes.FakeVolumeSnapshot(
'foo', 'derpysnapshot') snapshot_id, 'creating', 'foo', 'derpysnapshot'
)
build_snapshot_dict = meta.obj_to_munch(build_snapshot) build_snapshot_dict = meta.obj_to_munch(build_snapshot)
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'volumev3', 'public', append=['snapshots']), method='POST',
json={'snapshot': build_snapshot_dict}, uri=self.get_mock_url(
validate=dict(json={ 'volumev3', 'public', append=['snapshots']
'snapshot': {'volume_id': '1234'}})), ),
dict(method='GET', json={'snapshot': build_snapshot_dict},
uri=self.get_mock_url('volumev3', 'public', validate=dict(json={'snapshot': {'volume_id': '1234'}}),
append=['snapshots', snapshot_id]), ),
json={'snapshot': build_snapshot_dict})]) dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', snapshot_id]
),
json={'snapshot': build_snapshot_dict},
),
]
)
self.assertRaises( self.assertRaises(
exc.OpenStackCloudTimeout, exc.OpenStackCloudTimeout,
self.cloud.create_volume_snapshot, volume_id=volume_id, self.cloud.create_volume_snapshot,
wait=True, timeout=0.01) volume_id=volume_id,
wait=True,
timeout=0.01,
)
self.assert_calls(do_count=False) self.assert_calls(do_count=False)
def test_create_volume_snapshot_with_error(self): def test_create_volume_snapshot_with_error(self):
@ -106,31 +132,47 @@ class TestCreateVolumeSnapshot(base.TestCase):
""" """
snapshot_id = '5678' snapshot_id = '5678'
volume_id = '1234' volume_id = '1234'
build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating', build_snapshot = fakes.FakeVolumeSnapshot(
'bar', 'derpysnapshot') snapshot_id, 'creating', 'bar', 'derpysnapshot'
)
build_snapshot_dict = meta.obj_to_munch(build_snapshot) build_snapshot_dict = meta.obj_to_munch(build_snapshot)
error_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'error', error_snapshot = fakes.FakeVolumeSnapshot(
'blah', 'derpysnapshot') snapshot_id, 'error', 'blah', 'derpysnapshot'
)
error_snapshot_dict = meta.obj_to_munch(error_snapshot) error_snapshot_dict = meta.obj_to_munch(error_snapshot)
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'volumev3', 'public', append=['snapshots']), method='POST',
json={'snapshot': build_snapshot_dict}, uri=self.get_mock_url(
validate=dict(json={ 'volumev3', 'public', append=['snapshots']
'snapshot': {'volume_id': '1234'}})), ),
dict(method='GET', json={'snapshot': build_snapshot_dict},
uri=self.get_mock_url('volumev3', 'public', validate=dict(json={'snapshot': {'volume_id': '1234'}}),
append=['snapshots', snapshot_id]), ),
json={'snapshot': build_snapshot_dict}), dict(
dict(method='GET', method='GET',
uri=self.get_mock_url('volumev3', 'public', uri=self.get_mock_url(
append=['snapshots', snapshot_id]), 'volumev3', 'public', append=['snapshots', snapshot_id]
json={'snapshot': error_snapshot_dict})]) ),
json={'snapshot': build_snapshot_dict},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', snapshot_id]
),
json={'snapshot': error_snapshot_dict},
),
]
)
self.assertRaises( self.assertRaises(
exc.OpenStackCloudException, exc.OpenStackCloudException,
self.cloud.create_volume_snapshot, volume_id=volume_id, self.cloud.create_volume_snapshot,
wait=True, timeout=5) volume_id=volume_id,
wait=True,
timeout=5,
)
self.assert_calls() self.assert_calls()

View File

@ -24,27 +24,39 @@ from openstack.tests.unit import base
class TestDeleteServer(base.TestCase): class TestDeleteServer(base.TestCase):
def test_delete_server(self): def test_delete_server(self):
""" """
Test that server delete is called when wait=False Test that server delete is called when wait=False
""" """
server = fakes.make_fake_server('1234', 'daffy', 'ACTIVE') server = fakes.make_fake_server('1234', 'daffy', 'ACTIVE')
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'daffy']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'daffy']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=daffy']), ),
json={'servers': [server]}), dict(
dict(method='DELETE', method='GET',
uri=self.get_mock_url( uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234'])), 'compute',
]) 'public',
append=['servers', 'detail'],
qs_elements=['name=daffy'],
),
json={'servers': [server]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
]
)
self.assertTrue(self.cloud.delete_server('daffy', wait=False)) self.assertTrue(self.cloud.delete_server('daffy', wait=False))
self.assert_calls() self.assert_calls()
@ -53,35 +65,55 @@ class TestDeleteServer(base.TestCase):
""" """
Test that we return immediately when server is already gone Test that we return immediately when server is already gone
""" """
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'tweety']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'tweety']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=tweety']), ),
json={'servers': []}), dict(
]) method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=tweety'],
),
json={'servers': []},
),
]
)
self.assertFalse(self.cloud.delete_server('tweety', wait=False)) self.assertFalse(self.cloud.delete_server('tweety', wait=False))
self.assert_calls() self.assert_calls()
def test_delete_server_already_gone_wait(self): def test_delete_server_already_gone_wait(self):
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'speedy']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'speedy']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=speedy']), ),
json={'servers': []}), dict(
]) method='GET',
uri=self.get_mock_url(
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=speedy'],
),
json={'servers': []},
),
]
)
self.assertFalse(self.cloud.delete_server('speedy', wait=True)) self.assertFalse(self.cloud.delete_server('speedy', wait=True))
self.assert_calls() self.assert_calls()
@ -90,29 +122,48 @@ class TestDeleteServer(base.TestCase):
Test that delete_server waits for the server to be gone Test that delete_server waits for the server to be gone
""" """
server = fakes.make_fake_server('9999', 'wily', 'ACTIVE') server = fakes.make_fake_server('9999', 'wily', 'ACTIVE')
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'wily']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'wily']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=wily']), ),
json={'servers': [server]}), dict(
dict(method='DELETE', method='GET',
uri=self.get_mock_url( uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999'])), 'compute',
dict(method='GET', 'public',
uri=self.get_mock_url( append=['servers', 'detail'],
'compute', 'public', append=['servers', '9999']), qs_elements=['name=wily'],
json={'server': server}), ),
dict(method='GET', json={'servers': [server]},
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', '9999']), dict(
status_code=404), method='DELETE',
]) uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999']
),
json={'server': server},
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '9999']
),
status_code=404,
),
]
)
self.assertTrue(self.cloud.delete_server('wily', wait=True)) self.assertTrue(self.cloud.delete_server('wily', wait=True))
self.assert_calls() self.assert_calls()
@ -122,27 +173,42 @@ class TestDeleteServer(base.TestCase):
Test that delete_server raises non-404 exceptions Test that delete_server raises non-404 exceptions
""" """
server = fakes.make_fake_server('1212', 'speedy', 'ACTIVE') server = fakes.make_fake_server('1212', 'speedy', 'ACTIVE')
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'speedy']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'speedy']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=speedy']), ),
json={'servers': [server]}), dict(
dict(method='DELETE', method='GET',
uri=self.get_mock_url( uri=self.get_mock_url(
'compute', 'public', append=['servers', '1212']), 'compute',
status_code=400), 'public',
]) append=['servers', 'detail'],
qs_elements=['name=speedy'],
),
json={'servers': [server]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1212']
),
status_code=400,
),
]
)
self.assertRaises( self.assertRaises(
shade_exc.OpenStackCloudException, shade_exc.OpenStackCloudException,
self.cloud.delete_server, 'speedy', self.cloud.delete_server,
wait=False) 'speedy',
wait=False,
)
self.assert_calls() self.assert_calls()
@ -156,24 +222,38 @@ class TestDeleteServer(base.TestCase):
if service_type == 'volume': if service_type == 'volume':
return False return False
return orig_has_service(service_type) return orig_has_service(service_type)
self.cloud.has_service = fake_has_service self.cloud.has_service = fake_has_service
server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') server = fakes.make_fake_server('1234', 'porky', 'ACTIVE')
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'porky']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'porky']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=porky']), ),
json={'servers': [server]}), dict(
dict(method='DELETE', method='GET',
uri=self.get_mock_url( uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234'])), 'compute',
]) 'public',
append=['servers', 'detail'],
qs_elements=['name=porky'],
),
json={'servers': [server]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
]
)
self.assertTrue(self.cloud.delete_server('porky', wait=False)) self.assertTrue(self.cloud.delete_server('porky', wait=False))
self.assert_calls() self.assert_calls()
@ -185,50 +265,84 @@ class TestDeleteServer(base.TestCase):
server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') server = fakes.make_fake_server('1234', 'porky', 'ACTIVE')
fip_id = uuid.uuid4().hex fip_id = uuid.uuid4().hex
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'porky']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'porky']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=porky']), ),
json={'servers': [server]}), dict(
dict(method='GET', method='GET',
uri=self.get_mock_url( uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips'], 'compute',
qs_elements=['floating_ip_address=172.24.5.5']), 'public',
complete_qs=True, append=['servers', 'detail'],
json={'floatingips': [{ qs_elements=['name=porky'],
'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', ),
'tenant_id': '4969c491a3c74ee4af974e6d800c62de', json={'servers': [server]},
'floating_network_id': '376da547-b977-4cfe-9cba7', ),
'fixed_ip_address': '10.0.0.4', dict(
'floating_ip_address': '172.24.5.5', method='GET',
'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', uri=self.get_mock_url(
'id': fip_id, 'network',
'status': 'ACTIVE'}]}), 'public',
dict(method='DELETE', append=['v2.0', 'floatingips'],
uri=self.get_mock_url( qs_elements=['floating_ip_address=172.24.5.5'],
'network', 'public', ),
append=['v2.0', 'floatingips', fip_id])), complete_qs=True,
dict(method='GET', json={
uri=self.get_mock_url( 'floatingips': [
'network', 'public', append=['v2.0', 'floatingips']), {
complete_qs=True, 'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', # noqa: E501
json={'floatingips': []}), 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', # noqa: E501
dict(method='DELETE', 'floating_network_id': '376da547-b977-4cfe-9cba7', # noqa: E501
uri=self.get_mock_url( 'fixed_ip_address': '10.0.0.4',
'compute', 'public', append=['servers', '1234'])), 'floating_ip_address': '172.24.5.5',
dict(method='GET', 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', # noqa: E501
uri=self.get_mock_url( 'id': fip_id,
'compute', 'public', append=['servers', '1234']), 'status': 'ACTIVE',
status_code=404), }
]) ]
self.assertTrue(self.cloud.delete_server( },
'porky', wait=True, delete_ips=True)) ),
dict(
method='DELETE',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'floatingips', fip_id],
),
),
dict(
method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips']
),
complete_qs=True,
json={'floatingips': []},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
status_code=404,
),
]
)
self.assertTrue(
self.cloud.delete_server('porky', wait=True, delete_ips=True)
)
self.assert_calls() self.assert_calls()
@ -238,33 +352,55 @@ class TestDeleteServer(base.TestCase):
""" """
server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') server = fakes.make_fake_server('1234', 'porky', 'ACTIVE')
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'porky']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'porky']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=porky']), ),
json={'servers': [server]}), dict(
dict(method='GET', method='GET',
uri=self.get_mock_url( uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips'], 'compute',
qs_elements=['floating_ip_address=172.24.5.5']), 'public',
complete_qs=True, append=['servers', 'detail'],
status_code=404), qs_elements=['name=porky'],
dict(method='DELETE', ),
uri=self.get_mock_url( json={'servers': [server]},
'compute', 'public', append=['servers', '1234'])), ),
dict(method='GET', dict(
uri=self.get_mock_url( method='GET',
'compute', 'public', append=['servers', '1234']), uri=self.get_mock_url(
status_code=404), 'network',
]) 'public',
self.assertTrue(self.cloud.delete_server( append=['v2.0', 'floatingips'],
'porky', wait=True, delete_ips=True)) qs_elements=['floating_ip_address=172.24.5.5'],
),
complete_qs=True,
status_code=404,
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
status_code=404,
),
]
)
self.assertTrue(
self.cloud.delete_server('porky', wait=True, delete_ips=True)
)
self.assert_calls() self.assert_calls()
@ -275,44 +411,73 @@ class TestDeleteServer(base.TestCase):
self.cloud._floating_ip_source = 'nova' self.cloud._floating_ip_source = 'nova'
server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') server = fakes.make_fake_server('1234', 'porky', 'ACTIVE')
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict(method='GET', self.get_nova_discovery_mock_dict(),
uri=self.get_mock_url( dict(
'compute', 'public', append=['servers', 'porky']), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'compute', 'public', append=['servers', 'porky']
uri=self.get_mock_url( ),
'compute', 'public', append=['servers', 'detail'], status_code=404,
qs_elements=['name=porky']), ),
json={'servers': [server]}), dict(
dict(method='GET', method='GET',
uri=self.get_mock_url( uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips']), 'compute',
json={'floating_ips': [ 'public',
{ append=['servers', 'detail'],
'fixed_ip': None, qs_elements=['name=porky'],
'id': 1, ),
'instance_id': None, json={'servers': [server]},
'ip': '172.24.5.5', ),
'pool': 'nova' dict(
}]}), method='GET',
dict(method='DELETE', uri=self.get_mock_url(
uri=self.get_mock_url( 'compute', 'public', append=['os-floating-ips']
'compute', 'public', append=['os-floating-ips', '1'])), ),
dict(method='GET', json={
uri=self.get_mock_url( 'floating_ips': [
'compute', 'public', append=['os-floating-ips']), {
json={'floating_ips': []}), 'fixed_ip': None,
dict(method='DELETE', 'id': 1,
uri=self.get_mock_url( 'instance_id': None,
'compute', 'public', append=['servers', '1234'])), 'ip': '172.24.5.5',
dict(method='GET', 'pool': 'nova',
uri=self.get_mock_url( }
'compute', 'public', append=['servers', '1234']), ]
status_code=404), },
]) ),
self.assertTrue(self.cloud.delete_server( dict(
'porky', wait=True, delete_ips=True)) method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips', '1']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-floating-ips']
),
json={'floating_ips': []},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']
),
status_code=404,
),
]
)
self.assertTrue(
self.cloud.delete_server('porky', wait=True, delete_ips=True)
)
self.assert_calls() self.assert_calls()

View File

@ -24,7 +24,6 @@ from openstack.tests.unit import base
class TestDeleteVolumeSnapshot(base.TestCase): class TestDeleteVolumeSnapshot(base.TestCase):
def setUp(self): def setUp(self):
super(TestDeleteVolumeSnapshot, self).setUp() super(TestDeleteVolumeSnapshot, self).setUp()
self.use_cinder() self.use_cinder()
@ -34,23 +33,34 @@ class TestDeleteVolumeSnapshot(base.TestCase):
Test that delete_volume_snapshot without a wait returns True instance Test that delete_volume_snapshot without a wait returns True instance
when the volume snapshot deletes. when the volume snapshot deletes.
""" """
fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available', fake_snapshot = fakes.FakeVolumeSnapshot(
'foo', 'derpysnapshot') '1234', 'available', 'foo', 'derpysnapshot'
)
fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) fake_snapshot_dict = meta.obj_to_munch(fake_snapshot)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'volumev3', 'public', method='GET',
append=['snapshots', 'detail']), uri=self.get_mock_url(
json={'snapshots': [fake_snapshot_dict]}), 'volumev3', 'public', append=['snapshots', 'detail']
dict(method='DELETE', ),
uri=self.get_mock_url( json={'snapshots': [fake_snapshot_dict]},
'volumev3', 'public', ),
append=['snapshots', fake_snapshot_dict['id']]))]) dict(
method='DELETE',
uri=self.get_mock_url(
'volumev3',
'public',
append=['snapshots', fake_snapshot_dict['id']],
),
),
]
)
self.assertTrue( self.assertTrue(
self.cloud.delete_volume_snapshot(name_or_id='1234', wait=False)) self.cloud.delete_volume_snapshot(name_or_id='1234', wait=False)
)
self.assert_calls() self.assert_calls()
def test_delete_volume_snapshot_with_error(self): def test_delete_volume_snapshot_with_error(self):
@ -58,24 +68,36 @@ class TestDeleteVolumeSnapshot(base.TestCase):
Test that a exception while deleting a volume snapshot will cause an Test that a exception while deleting a volume snapshot will cause an
OpenStackCloudException. OpenStackCloudException.
""" """
fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available', fake_snapshot = fakes.FakeVolumeSnapshot(
'foo', 'derpysnapshot') '1234', 'available', 'foo', 'derpysnapshot'
)
fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) fake_snapshot_dict = meta.obj_to_munch(fake_snapshot)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'volumev3', 'public', method='GET',
append=['snapshots', 'detail']), uri=self.get_mock_url(
json={'snapshots': [fake_snapshot_dict]}), 'volumev3', 'public', append=['snapshots', 'detail']
dict(method='DELETE', ),
uri=self.get_mock_url( json={'snapshots': [fake_snapshot_dict]},
'volumev3', 'public', ),
append=['snapshots', fake_snapshot_dict['id']]), dict(
status_code=404)]) method='DELETE',
uri=self.get_mock_url(
'volumev3',
'public',
append=['snapshots', fake_snapshot_dict['id']],
),
status_code=404,
),
]
)
self.assertRaises( self.assertRaises(
exc.OpenStackCloudException, exc.OpenStackCloudException,
self.cloud.delete_volume_snapshot, name_or_id='1234') self.cloud.delete_volume_snapshot,
name_or_id='1234',
)
self.assert_calls() self.assert_calls()
def test_delete_volume_snapshot_with_timeout(self): def test_delete_volume_snapshot_with_timeout(self):
@ -83,29 +105,43 @@ class TestDeleteVolumeSnapshot(base.TestCase):
Test that a timeout while waiting for the volume snapshot to delete Test that a timeout while waiting for the volume snapshot to delete
raises an exception in delete_volume_snapshot. raises an exception in delete_volume_snapshot.
""" """
fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available', fake_snapshot = fakes.FakeVolumeSnapshot(
'foo', 'derpysnapshot') '1234', 'available', 'foo', 'derpysnapshot'
)
fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) fake_snapshot_dict = meta.obj_to_munch(fake_snapshot)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'volumev3', 'public', method='GET',
append=['snapshots', 'detail']), uri=self.get_mock_url(
json={'snapshots': [fake_snapshot_dict]}), 'volumev3', 'public', append=['snapshots', 'detail']
dict(method='DELETE', ),
uri=self.get_mock_url( json={'snapshots': [fake_snapshot_dict]},
'volumev3', 'public', ),
append=['snapshots', fake_snapshot_dict['id']])), dict(
dict(method='GET', method='DELETE',
uri=self.get_mock_url( uri=self.get_mock_url(
'volumev3', 'public', 'volumev3',
append=['snapshots', '1234']), 'public',
json={'snapshot': fake_snapshot_dict}), append=['snapshots', fake_snapshot_dict['id']],
]) ),
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['snapshots', '1234']
),
json={'snapshot': fake_snapshot_dict},
),
]
)
self.assertRaises( self.assertRaises(
exc.OpenStackCloudTimeout, exc.OpenStackCloudTimeout,
self.cloud.delete_volume_snapshot, name_or_id='1234', self.cloud.delete_volume_snapshot,
wait=True, timeout=0.01) name_or_id='1234',
wait=True,
timeout=0.01,
)
self.assert_calls(do_count=False) self.assert_calls(do_count=False)

View File

@ -15,17 +15,23 @@ from openstack.tests.unit import base
class TestDomainParams(base.TestCase): class TestDomainParams(base.TestCase):
def test_identity_params_v3(self): def test_identity_params_v3(self):
project_data = self._get_project_data(v3=True) project_data = self._get_project_data(v3=True)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='https://identity.example.com/v3/projects', dict(
json=dict(projects=[project_data.json_response['project']])) method='GET',
]) uri='https://identity.example.com/v3/projects',
json=dict(
projects=[project_data.json_response['project']]
),
)
]
)
ret = self.cloud._get_identity_params( ret = self.cloud._get_identity_params(
domain_id='5678', project=project_data.project_name) domain_id='5678', project=project_data.project_name
)
self.assertIn('default_project_id', ret) self.assertIn('default_project_id', ret)
self.assertEqual(ret['default_project_id'], project_data.project_id) self.assertEqual(ret['default_project_id'], project_data.project_id)
self.assertIn('domain_id', ret) self.assertIn('domain_id', ret)
@ -39,6 +45,8 @@ class TestDomainParams(base.TestCase):
self.assertRaises( self.assertRaises(
exc.OpenStackCloudException, exc.OpenStackCloudException,
self.cloud._get_identity_params, self.cloud._get_identity_params,
domain_id=None, project=project_data.project_name) domain_id=None,
project=project_data.project_name,
)
self.assert_calls() self.assert_calls()

View File

@ -23,36 +23,54 @@ from openstack.tests.unit import base
class TestDomains(base.TestCase): class TestDomains(base.TestCase):
def get_mock_url(
def get_mock_url(self, service_type='identity', self,
resource='domains', service_type='identity',
append=None, base_url_append='v3', resource='domains',
qs_elements=None): append=None,
base_url_append='v3',
qs_elements=None,
):
return super(TestDomains, self).get_mock_url( return super(TestDomains, self).get_mock_url(
service_type=service_type, resource=resource, service_type=service_type,
append=append, base_url_append=base_url_append, resource=resource,
qs_elements=qs_elements) append=append,
base_url_append=base_url_append,
qs_elements=qs_elements,
)
def test_list_domains(self): def test_list_domains(self):
domain_data = self._get_domain_data() domain_data = self._get_domain_data()
self.register_uris([ self.register_uris(
dict(method='GET', uri=self.get_mock_url(), status_code=200, [
json={'domains': [domain_data.json_response['domain']]})]) dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={'domains': [domain_data.json_response['domain']]},
)
]
)
domains = self.cloud.list_domains() domains = self.cloud.list_domains()
self.assertThat(len(domains), matchers.Equals(1)) self.assertThat(len(domains), matchers.Equals(1))
self.assertThat(domains[0].name, self.assertThat(
matchers.Equals(domain_data.domain_name)) domains[0].name, matchers.Equals(domain_data.domain_name)
self.assertThat(domains[0].id, )
matchers.Equals(domain_data.domain_id)) self.assertThat(domains[0].id, matchers.Equals(domain_data.domain_id))
self.assert_calls() self.assert_calls()
def test_get_domain(self): def test_get_domain(self):
domain_data = self._get_domain_data() domain_data = self._get_domain_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(append=[domain_data.domain_id]), dict(
status_code=200, method='GET',
json=domain_data.json_response)]) uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=200,
json=domain_data.json_response,
)
]
)
domain = self.cloud.get_domain(domain_id=domain_data.domain_id) domain = self.cloud.get_domain(domain_id=domain_data.domain_id)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
@ -61,57 +79,86 @@ class TestDomains(base.TestCase):
def test_get_domain_with_name_or_id(self): def test_get_domain_with_name_or_id(self):
domain_data = self._get_domain_data() domain_data = self._get_domain_data()
response = {'domains': [domain_data.json_response['domain']]} response = {'domains': [domain_data.json_response['domain']]}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(append=[domain_data.domain_id]), dict(
status_code=200, method='GET',
json=domain_data.json_response), uri=self.get_mock_url(append=[domain_data.domain_id]),
dict(method='GET', status_code=200,
uri=self.get_mock_url(append=[domain_data.domain_name]), json=domain_data.json_response,
status_code=404), ),
dict(method='GET', dict(
uri=self.get_mock_url( method='GET',
qs_elements=['name=' + domain_data.domain_name] uri=self.get_mock_url(append=[domain_data.domain_name]),
), status_code=404,
status_code=200, ),
json=response), dict(
]) method='GET',
uri=self.get_mock_url(
qs_elements=['name=' + domain_data.domain_name]
),
status_code=200,
json=response,
),
]
)
domain = self.cloud.get_domain(name_or_id=domain_data.domain_id) domain = self.cloud.get_domain(name_or_id=domain_data.domain_id)
domain_by_name = self.cloud.get_domain( domain_by_name = self.cloud.get_domain(
name_or_id=domain_data.domain_name) name_or_id=domain_data.domain_name
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) )
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat(domain_by_name.id,
matchers.Equals(domain_data.domain_id))
self.assertThat(domain_by_name.name,
matchers.Equals(domain_data.domain_name))
self.assert_calls()
def test_create_domain(self):
domain_data = self._get_domain_data(description=uuid.uuid4().hex,
enabled=True)
self.register_uris([
dict(method='POST', uri=self.get_mock_url(), status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request))])
domain = self.cloud.create_domain(
domain_data.domain_name, domain_data.description)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat( self.assertThat(
domain.description, matchers.Equals(domain_data.description)) domain_by_name.id, matchers.Equals(domain_data.domain_id)
)
self.assertThat(
domain_by_name.name, matchers.Equals(domain_data.domain_name)
)
self.assert_calls()
def test_create_domain(self):
domain_data = self._get_domain_data(
description=uuid.uuid4().hex, enabled=True
)
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request),
)
]
)
domain = self.cloud.create_domain(
domain_data.domain_name, domain_data.description
)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat(
domain.description, matchers.Equals(domain_data.description)
)
self.assert_calls() self.assert_calls()
def test_create_domain_exception(self): def test_create_domain_exception(self):
domain_data = self._get_domain_data(domain_name='domain_name', domain_data = self._get_domain_data(
enabled=True) domain_name='domain_name', enabled=True
)
with testtools.ExpectedException( with testtools.ExpectedException(
openstack.cloud.OpenStackCloudBadRequest openstack.cloud.OpenStackCloudBadRequest
): ):
self.register_uris([ self.register_uris(
dict(method='POST', uri=self.get_mock_url(), status_code=400, [
json=domain_data.json_response, dict(
validate=dict(json=domain_data.json_request))]) method='POST',
uri=self.get_mock_url(),
status_code=400,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request),
)
]
)
self.cloud.create_domain('domain_name') self.cloud.create_domain('domain_name')
self.assert_calls() self.assert_calls()
@ -120,11 +167,20 @@ class TestDomains(base.TestCase):
new_resp = domain_data.json_response.copy() new_resp = domain_data.json_response.copy()
new_resp['domain']['enabled'] = False new_resp['domain']['enabled'] = False
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([ self.register_uris(
dict(method='PATCH', uri=domain_resource_uri, status_code=200, [
json=new_resp, dict(
validate=dict(json={'domain': {'enabled': False}})), method='PATCH',
dict(method='DELETE', uri=domain_resource_uri, status_code=204)]) uri=domain_resource_uri,
status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}}),
),
dict(
method='DELETE', uri=domain_resource_uri, status_code=204
),
]
)
self.cloud.delete_domain(domain_data.domain_id) self.cloud.delete_domain(domain_data.domain_id)
self.assert_calls() self.assert_calls()
@ -134,15 +190,26 @@ class TestDomains(base.TestCase):
new_resp['domain']['enabled'] = False new_resp['domain']['enabled'] = False
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(append=[domain_data.domain_id]), dict(
status_code=200, method='GET',
json={'domain': domain_data.json_response['domain']}), uri=self.get_mock_url(append=[domain_data.domain_id]),
dict(method='PATCH', uri=domain_resource_uri, status_code=200, status_code=200,
json=new_resp, json={'domain': domain_data.json_response['domain']},
validate=dict(json={'domain': {'enabled': False}})), ),
dict(method='DELETE', uri=domain_resource_uri, status_code=204)]) dict(
method='PATCH',
uri=domain_resource_uri,
status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}}),
),
dict(
method='DELETE', uri=domain_resource_uri, status_code=204
),
]
)
self.cloud.delete_domain(name_or_id=domain_data.domain_id) self.cloud.delete_domain(name_or_id=domain_data.domain_id)
self.assert_calls() self.assert_calls()
@ -156,11 +223,20 @@ class TestDomains(base.TestCase):
new_resp = domain_data.json_response.copy() new_resp = domain_data.json_response.copy()
new_resp['domain']['enabled'] = False new_resp['domain']['enabled'] = False
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([ self.register_uris(
dict(method='PATCH', uri=domain_resource_uri, status_code=200, [
json=new_resp, dict(
validate=dict(json={'domain': {'enabled': False}})), method='PATCH',
dict(method='DELETE', uri=domain_resource_uri, status_code=404)]) uri=domain_resource_uri,
status_code=200,
json=new_resp,
validate=dict(json={'domain': {'enabled': False}}),
),
dict(
method='DELETE', uri=domain_resource_uri, status_code=404
),
]
)
with testtools.ExpectedException( with testtools.ExpectedException(
openstack.exceptions.ResourceNotFound openstack.exceptions.ResourceNotFound
): ):
@ -169,53 +245,81 @@ class TestDomains(base.TestCase):
def test_update_domain(self): def test_update_domain(self):
domain_data = self._get_domain_data( domain_data = self._get_domain_data(
description=self.getUniqueString('domainDesc')) description=self.getUniqueString('domainDesc')
)
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([ self.register_uris(
dict(method='PATCH', uri=domain_resource_uri, status_code=200, [
json=domain_data.json_response, dict(
validate=dict(json=domain_data.json_request))]) method='PATCH',
uri=domain_resource_uri,
status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request),
)
]
)
domain = self.cloud.update_domain( domain = self.cloud.update_domain(
domain_data.domain_id, domain_data.domain_id,
name=domain_data.domain_name, name=domain_data.domain_name,
description=domain_data.description) description=domain_data.description,
)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat( self.assertThat(
domain.description, matchers.Equals(domain_data.description)) domain.description, matchers.Equals(domain_data.description)
)
self.assert_calls() self.assert_calls()
def test_update_domain_name_or_id(self): def test_update_domain_name_or_id(self):
domain_data = self._get_domain_data( domain_data = self._get_domain_data(
description=self.getUniqueString('domainDesc')) description=self.getUniqueString('domainDesc')
)
domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id])
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(append=[domain_data.domain_id]), dict(
status_code=200, method='GET',
json={'domain': domain_data.json_response['domain']}), uri=self.get_mock_url(append=[domain_data.domain_id]),
dict(method='PATCH', uri=domain_resource_uri, status_code=200, status_code=200,
json=domain_data.json_response, json={'domain': domain_data.json_response['domain']},
validate=dict(json=domain_data.json_request))]) ),
dict(
method='PATCH',
uri=domain_resource_uri,
status_code=200,
json=domain_data.json_response,
validate=dict(json=domain_data.json_request),
),
]
)
domain = self.cloud.update_domain( domain = self.cloud.update_domain(
name_or_id=domain_data.domain_id, name_or_id=domain_data.domain_id,
name=domain_data.domain_name, name=domain_data.domain_name,
description=domain_data.description) description=domain_data.description,
)
self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) self.assertThat(domain.id, matchers.Equals(domain_data.domain_id))
self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) self.assertThat(domain.name, matchers.Equals(domain_data.domain_name))
self.assertThat( self.assertThat(
domain.description, matchers.Equals(domain_data.description)) domain.description, matchers.Equals(domain_data.description)
)
self.assert_calls() self.assert_calls()
def test_update_domain_exception(self): def test_update_domain_exception(self):
domain_data = self._get_domain_data( domain_data = self._get_domain_data(
description=self.getUniqueString('domainDesc')) description=self.getUniqueString('domainDesc')
self.register_uris([ )
dict(method='PATCH', self.register_uris(
uri=self.get_mock_url(append=[domain_data.domain_id]), [
status_code=409, dict(
json=domain_data.json_response, method='PATCH',
validate=dict(json={'domain': {'enabled': False}}))]) uri=self.get_mock_url(append=[domain_data.domain_id]),
status_code=409,
json=domain_data.json_response,
validate=dict(json={'domain': {'enabled': False}}),
)
]
)
with testtools.ExpectedException( with testtools.ExpectedException(
openstack.exceptions.ConflictException openstack.exceptions.ConflictException
): ):

View File

@ -27,11 +27,17 @@ from openstack.tests.unit import base
class TestCloudEndpoints(base.TestCase): class TestCloudEndpoints(base.TestCase):
def get_mock_url(
def get_mock_url(self, service_type='identity', interface='public', self,
resource='endpoints', append=None, base_url_append='v3'): service_type='identity',
interface='public',
resource='endpoints',
append=None,
base_url_append='v3',
):
return super(TestCloudEndpoints, self).get_mock_url( return super(TestCloudEndpoints, self).get_mock_url(
service_type, interface, resource, append, base_url_append) service_type, interface, resource, append, base_url_append
)
def _dummy_url(self): def _dummy_url(self):
return 'https://%s.example.com/' % uuid.uuid4().hex return 'https://%s.example.com/' % uuid.uuid4().hex
@ -39,148 +45,207 @@ class TestCloudEndpoints(base.TestCase):
def test_create_endpoint_v3(self): def test_create_endpoint_v3(self):
service_data = self._get_service_data() service_data = self._get_service_data()
public_endpoint_data = self._get_endpoint_v3_data( public_endpoint_data = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='public', service_id=service_data.service_id,
url=self._dummy_url()) interface='public',
url=self._dummy_url(),
)
public_endpoint_data_disabled = self._get_endpoint_v3_data( public_endpoint_data_disabled = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='public', service_id=service_data.service_id,
url=self._dummy_url(), enabled=False) interface='public',
url=self._dummy_url(),
enabled=False,
)
admin_endpoint_data = self._get_endpoint_v3_data( admin_endpoint_data = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='admin', service_id=service_data.service_id,
url=self._dummy_url(), region=public_endpoint_data.region_id) interface='admin',
url=self._dummy_url(),
region=public_endpoint_data.region_id,
)
internal_endpoint_data = self._get_endpoint_v3_data( internal_endpoint_data = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='internal', service_id=service_data.service_id,
url=self._dummy_url(), region=public_endpoint_data.region_id) interface='internal',
url=self._dummy_url(),
region=public_endpoint_data.region_id,
)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(resource='services'), dict(
status_code=200, method='GET',
json={'services': [ uri=self.get_mock_url(resource='services'),
service_data.json_response_v3['service']]}), status_code=200,
dict(method='POST', json={
uri=self.get_mock_url(), 'services': [service_data.json_response_v3['service']]
status_code=200, },
json=public_endpoint_data_disabled.json_response, ),
validate=dict( dict(
json=public_endpoint_data_disabled.json_request)), method='POST',
dict(method='GET', uri=self.get_mock_url(),
uri=self.get_mock_url(resource='services'), status_code=200,
status_code=200, json=public_endpoint_data_disabled.json_response,
json={'services': [ validate=dict(
service_data.json_response_v3['service']]}), json=public_endpoint_data_disabled.json_request
dict(method='POST', ),
uri=self.get_mock_url(), ),
status_code=200, dict(
json=public_endpoint_data.json_response, method='GET',
validate=dict(json=public_endpoint_data.json_request)), uri=self.get_mock_url(resource='services'),
dict(method='POST', status_code=200,
uri=self.get_mock_url(), json={
status_code=200, 'services': [service_data.json_response_v3['service']]
json=internal_endpoint_data.json_response, },
validate=dict(json=internal_endpoint_data.json_request)), ),
dict(method='POST', dict(
uri=self.get_mock_url(), method='POST',
status_code=200, uri=self.get_mock_url(),
json=admin_endpoint_data.json_response, status_code=200,
validate=dict(json=admin_endpoint_data.json_request)), json=public_endpoint_data.json_response,
]) validate=dict(json=public_endpoint_data.json_request),
),
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=internal_endpoint_data.json_response,
validate=dict(json=internal_endpoint_data.json_request),
),
dict(
method='POST',
uri=self.get_mock_url(),
status_code=200,
json=admin_endpoint_data.json_response,
validate=dict(json=admin_endpoint_data.json_request),
),
]
)
endpoints = self.cloud.create_endpoint( endpoints = self.cloud.create_endpoint(
service_name_or_id=service_data.service_id, service_name_or_id=service_data.service_id,
region=public_endpoint_data_disabled.region_id, region=public_endpoint_data_disabled.region_id,
url=public_endpoint_data_disabled.url, url=public_endpoint_data_disabled.url,
interface=public_endpoint_data_disabled.interface, interface=public_endpoint_data_disabled.interface,
enabled=False) enabled=False,
)
# Test endpoint values # Test endpoint values
self.assertThat( self.assertThat(
endpoints[0].id, endpoints[0].id,
matchers.Equals(public_endpoint_data_disabled.endpoint_id)) matchers.Equals(public_endpoint_data_disabled.endpoint_id),
self.assertThat(endpoints[0].url, )
matchers.Equals(public_endpoint_data_disabled.url)) self.assertThat(
endpoints[0].url,
matchers.Equals(public_endpoint_data_disabled.url),
)
self.assertThat( self.assertThat(
endpoints[0].interface, endpoints[0].interface,
matchers.Equals(public_endpoint_data_disabled.interface)) matchers.Equals(public_endpoint_data_disabled.interface),
)
self.assertThat( self.assertThat(
endpoints[0].region_id, endpoints[0].region_id,
matchers.Equals(public_endpoint_data_disabled.region_id)) matchers.Equals(public_endpoint_data_disabled.region_id),
)
self.assertThat( self.assertThat(
endpoints[0].region_id, endpoints[0].region_id,
matchers.Equals(public_endpoint_data_disabled.region_id)) matchers.Equals(public_endpoint_data_disabled.region_id),
self.assertThat(endpoints[0].is_enabled, )
matchers.Equals(public_endpoint_data_disabled.enabled)) self.assertThat(
endpoints[0].is_enabled,
matchers.Equals(public_endpoint_data_disabled.enabled),
)
endpoints_2on3 = self.cloud.create_endpoint( endpoints_2on3 = self.cloud.create_endpoint(
service_name_or_id=service_data.service_id, service_name_or_id=service_data.service_id,
region=public_endpoint_data.region_id, region=public_endpoint_data.region_id,
public_url=public_endpoint_data.url, public_url=public_endpoint_data.url,
internal_url=internal_endpoint_data.url, internal_url=internal_endpoint_data.url,
admin_url=admin_endpoint_data.url) admin_url=admin_endpoint_data.url,
)
# Three endpoints should be returned, public, internal, and admin # Three endpoints should be returned, public, internal, and admin
self.assertThat(len(endpoints_2on3), matchers.Equals(3)) self.assertThat(len(endpoints_2on3), matchers.Equals(3))
# test keys and values are correct for each endpoint created # test keys and values are correct for each endpoint created
for result, reference in zip( for result, reference in zip(
endpoints_2on3, [public_endpoint_data, endpoints_2on3,
internal_endpoint_data, [
admin_endpoint_data] public_endpoint_data,
internal_endpoint_data,
admin_endpoint_data,
],
): ):
self.assertThat(result.id, matchers.Equals(reference.endpoint_id)) self.assertThat(result.id, matchers.Equals(reference.endpoint_id))
self.assertThat(result.url, matchers.Equals(reference.url)) self.assertThat(result.url, matchers.Equals(reference.url))
self.assertThat(result.interface, self.assertThat(
matchers.Equals(reference.interface)) result.interface, matchers.Equals(reference.interface)
self.assertThat(result.region_id, )
matchers.Equals(reference.region_id)) self.assertThat(
self.assertThat(result.is_enabled, result.region_id, matchers.Equals(reference.region_id)
matchers.Equals(reference.enabled)) )
self.assertThat(
result.is_enabled, matchers.Equals(reference.enabled)
)
self.assert_calls() self.assert_calls()
def test_update_endpoint_v3(self): def test_update_endpoint_v3(self):
service_data = self._get_service_data() service_data = self._get_service_data()
dummy_url = self._dummy_url() dummy_url = self._dummy_url()
endpoint_data = self._get_endpoint_v3_data( endpoint_data = self._get_endpoint_v3_data(
service_id=service_data.service_id, interface='admin', service_id=service_data.service_id,
enabled=False) interface='admin',
enabled=False,
)
reference_request = endpoint_data.json_request.copy() reference_request = endpoint_data.json_request.copy()
reference_request['endpoint']['url'] = dummy_url reference_request['endpoint']['url'] = dummy_url
self.register_uris([ self.register_uris(
dict(method='PATCH', [
uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), dict(
status_code=200, method='PATCH',
json=endpoint_data.json_response, uri=self.get_mock_url(append=[endpoint_data.endpoint_id]),
validate=dict(json=reference_request)) status_code=200,
]) json=endpoint_data.json_response,
validate=dict(json=reference_request),
)
]
)
endpoint = self.cloud.update_endpoint( endpoint = self.cloud.update_endpoint(
endpoint_data.endpoint_id, endpoint_data.endpoint_id,
service_name_or_id=service_data.service_id, service_name_or_id=service_data.service_id,
region=endpoint_data.region_id, region=endpoint_data.region_id,
url=dummy_url, url=dummy_url,
interface=endpoint_data.interface, interface=endpoint_data.interface,
enabled=False enabled=False,
) )
# test keys and values are correct # test keys and values are correct
self.assertThat(endpoint.id, self.assertThat(
matchers.Equals(endpoint_data.endpoint_id)) endpoint.id, matchers.Equals(endpoint_data.endpoint_id)
self.assertThat(endpoint.service_id, )
matchers.Equals(service_data.service_id)) self.assertThat(
self.assertThat(endpoint.url, endpoint.service_id, matchers.Equals(service_data.service_id)
matchers.Equals(endpoint_data.url)) )
self.assertThat(endpoint.interface, self.assertThat(endpoint.url, matchers.Equals(endpoint_data.url))
matchers.Equals(endpoint_data.interface)) self.assertThat(
endpoint.interface, matchers.Equals(endpoint_data.interface)
)
self.assert_calls() self.assert_calls()
def test_list_endpoints(self): def test_list_endpoints(self):
endpoints_data = [self._get_endpoint_v3_data() for e in range(1, 10)] endpoints_data = [self._get_endpoint_v3_data() for e in range(1, 10)]
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'endpoints': [e.json_response['endpoint'] uri=self.get_mock_url(),
for e in endpoints_data]}) status_code=200,
]) json={
'endpoints': [
e.json_response['endpoint'] for e in endpoints_data
]
},
)
]
)
endpoints = self.cloud.list_endpoints() endpoints = self.cloud.list_endpoints()
# test we are getting exactly len(self.mock_endpoints) elements # test we are getting exactly len(self.mock_endpoints) elements
@ -188,58 +253,89 @@ class TestCloudEndpoints(base.TestCase):
# test keys and values are correct # test keys and values are correct
for i, ep in enumerate(endpoints_data): for i, ep in enumerate(endpoints_data):
self.assertThat(endpoints[i].id, self.assertThat(endpoints[i].id, matchers.Equals(ep.endpoint_id))
matchers.Equals(ep.endpoint_id)) self.assertThat(
self.assertThat(endpoints[i].service_id, endpoints[i].service_id, matchers.Equals(ep.service_id)
matchers.Equals(ep.service_id)) )
self.assertThat(endpoints[i].url, self.assertThat(endpoints[i].url, matchers.Equals(ep.url))
matchers.Equals(ep.url)) self.assertThat(
self.assertThat(endpoints[i].interface, endpoints[i].interface, matchers.Equals(ep.interface)
matchers.Equals(ep.interface)) )
self.assert_calls() self.assert_calls()
def test_search_endpoints(self): def test_search_endpoints(self):
endpoints_data = [self._get_endpoint_v3_data(region='region1') endpoints_data = [
for e in range(0, 2)] self._get_endpoint_v3_data(region='region1') for e in range(0, 2)
endpoints_data.extend([self._get_endpoint_v3_data() ]
for e in range(1, 8)]) endpoints_data.extend(
self.register_uris([ [self._get_endpoint_v3_data() for e in range(1, 8)]
dict(method='GET', )
uri=self.get_mock_url(), self.register_uris(
status_code=200, [
json={'endpoints': [e.json_response['endpoint'] dict(
for e in endpoints_data]}), method='GET',
dict(method='GET', uri=self.get_mock_url(),
uri=self.get_mock_url(), status_code=200,
status_code=200, json={
json={'endpoints': [e.json_response['endpoint'] 'endpoints': [
for e in endpoints_data]}), e.json_response['endpoint'] for e in endpoints_data
dict(method='GET', ]
uri=self.get_mock_url(), },
status_code=200, ),
json={'endpoints': [e.json_response['endpoint'] dict(
for e in endpoints_data]}), method='GET',
dict(method='GET', uri=self.get_mock_url(),
uri=self.get_mock_url(), status_code=200,
status_code=200, json={
json={'endpoints': [e.json_response['endpoint'] 'endpoints': [
for e in endpoints_data]}) e.json_response['endpoint'] for e in endpoints_data
]) ]
},
),
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={
'endpoints': [
e.json_response['endpoint'] for e in endpoints_data
]
},
),
dict(
method='GET',
uri=self.get_mock_url(),
status_code=200,
json={
'endpoints': [
e.json_response['endpoint'] for e in endpoints_data
]
},
),
]
)
# Search by id # Search by id
endpoints = self.cloud.search_endpoints( endpoints = self.cloud.search_endpoints(
id=endpoints_data[-1].endpoint_id) id=endpoints_data[-1].endpoint_id
)
# # test we are getting exactly 1 element # # test we are getting exactly 1 element
self.assertEqual(1, len(endpoints)) self.assertEqual(1, len(endpoints))
self.assertThat(endpoints[0].id, self.assertThat(
matchers.Equals(endpoints_data[-1].endpoint_id)) endpoints[0].id, matchers.Equals(endpoints_data[-1].endpoint_id)
self.assertThat(endpoints[0].service_id, )
matchers.Equals(endpoints_data[-1].service_id)) self.assertThat(
self.assertThat(endpoints[0].url, endpoints[0].service_id,
matchers.Equals(endpoints_data[-1].url)) matchers.Equals(endpoints_data[-1].service_id),
self.assertThat(endpoints[0].interface, )
matchers.Equals(endpoints_data[-1].interface)) self.assertThat(
endpoints[0].url, matchers.Equals(endpoints_data[-1].url)
)
self.assertThat(
endpoints[0].interface,
matchers.Equals(endpoints_data[-1].interface),
)
# Not found # Not found
endpoints = self.cloud.search_endpoints(id='!invalid!') endpoints = self.cloud.search_endpoints(id='!invalid!')
@ -247,13 +343,15 @@ class TestCloudEndpoints(base.TestCase):
# Multiple matches # Multiple matches
endpoints = self.cloud.search_endpoints( endpoints = self.cloud.search_endpoints(
filters={'region_id': 'region1'}) filters={'region_id': 'region1'}
)
# # test we are getting exactly 2 elements # # test we are getting exactly 2 elements
self.assertEqual(2, len(endpoints)) self.assertEqual(2, len(endpoints))
# test we are getting the correct response for region/region_id compat # test we are getting the correct response for region/region_id compat
endpoints = self.cloud.search_endpoints( endpoints = self.cloud.search_endpoints(
filters={'region_id': 'region1'}) filters={'region_id': 'region1'}
)
# # test we are getting exactly 2 elements, this is v3 # # test we are getting exactly 2 elements, this is v3
self.assertEqual(2, len(endpoints)) self.assertEqual(2, len(endpoints))
@ -261,16 +359,23 @@ class TestCloudEndpoints(base.TestCase):
def test_delete_endpoint(self): def test_delete_endpoint(self):
endpoint_data = self._get_endpoint_v3_data() endpoint_data = self._get_endpoint_v3_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'endpoints': [ uri=self.get_mock_url(),
endpoint_data.json_response['endpoint']]}), status_code=200,
dict(method='DELETE', json={
uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), 'endpoints': [endpoint_data.json_response['endpoint']]
status_code=204) },
]) ),
dict(
method='DELETE',
uri=self.get_mock_url(append=[endpoint_data.endpoint_id]),
status_code=204,
),
]
)
# Delete by id # Delete by id
self.cloud.delete_endpoint(id=endpoint_data.endpoint_id) self.cloud.delete_endpoint(id=endpoint_data.endpoint_id)

View File

@ -17,7 +17,6 @@ from openstack.tests.unit import base
class TestFlavors(base.TestCase): class TestFlavors(base.TestCase):
def setUp(self): def setUp(self):
super(TestFlavors, self).setUp() super(TestFlavors, self).setUp()
# self.use_compute_discovery() # self.use_compute_discovery()
@ -25,55 +24,85 @@ class TestFlavors(base.TestCase):
def test_create_flavor(self): def test_create_flavor(self):
self.use_compute_discovery() self.use_compute_discovery()
self.register_uris([ self.register_uris(
dict(method='POST', [
uri='{endpoint}/flavors'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='POST',
json={'flavor': fakes.FAKE_FLAVOR}, uri='{endpoint}/flavors'.format(
validate=dict( endpoint=fakes.COMPUTE_ENDPOINT
json={ ),
'flavor': { json={'flavor': fakes.FAKE_FLAVOR},
"name": "vanilla", validate=dict(
"description": None, json={
"ram": 65536, 'flavor': {
"vcpus": 24, "name": "vanilla",
"swap": 0, "description": None,
"os-flavor-access:is_public": True, "ram": 65536,
"rxtx_factor": 1.0, "vcpus": 24,
"OS-FLV-EXT-DATA:ephemeral": 0, "swap": 0,
"disk": 1600, "os-flavor-access:is_public": True,
"id": None}}))]) "rxtx_factor": 1.0,
"OS-FLV-EXT-DATA:ephemeral": 0,
"disk": 1600,
"id": None,
}
}
),
)
]
)
self.cloud.create_flavor( self.cloud.create_flavor(
'vanilla', ram=65536, disk=1600, vcpus=24, 'vanilla',
ram=65536,
disk=1600,
vcpus=24,
) )
self.assert_calls() self.assert_calls()
def test_delete_flavor(self): def test_delete_flavor(self):
self.use_compute_discovery() self.use_compute_discovery()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/flavors/vanilla'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json=fakes.FAKE_FLAVOR), uri='{endpoint}/flavors/vanilla'.format(
dict(method='DELETE', endpoint=fakes.COMPUTE_ENDPOINT
uri='{endpoint}/flavors/{id}'.format( ),
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID))]) json=fakes.FAKE_FLAVOR,
),
dict(
method='DELETE',
uri='{endpoint}/flavors/{id}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID
),
),
]
)
self.assertTrue(self.cloud.delete_flavor('vanilla')) self.assertTrue(self.cloud.delete_flavor('vanilla'))
self.assert_calls() self.assert_calls()
def test_delete_flavor_not_found(self): def test_delete_flavor_not_found(self):
self.use_compute_discovery() self.use_compute_discovery()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/flavors/invalid'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
status_code=404), uri='{endpoint}/flavors/invalid'.format(
dict(method='GET', endpoint=fakes.COMPUTE_ENDPOINT
uri='{endpoint}/flavors/detail?is_public=None'.format( ),
endpoint=fakes.COMPUTE_ENDPOINT), status_code=404,
json={'flavors': fakes.FAKE_FLAVOR_LIST})]) ),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
)
self.assertFalse(self.cloud.delete_flavor('invalid')) self.assertFalse(self.cloud.delete_flavor('invalid'))
@ -81,30 +110,48 @@ class TestFlavors(base.TestCase):
def test_delete_flavor_exception(self): def test_delete_flavor_exception(self):
self.use_compute_discovery() self.use_compute_discovery()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/flavors/vanilla'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json=fakes.FAKE_FLAVOR), uri='{endpoint}/flavors/vanilla'.format(
dict(method='GET', endpoint=fakes.COMPUTE_ENDPOINT
uri='{endpoint}/flavors/detail?is_public=None'.format( ),
endpoint=fakes.COMPUTE_ENDPOINT), json=fakes.FAKE_FLAVOR,
json={'flavors': fakes.FAKE_FLAVOR_LIST}), ),
dict(method='DELETE', dict(
uri='{endpoint}/flavors/{id}'.format( method='GET',
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID), uri='{endpoint}/flavors/detail?is_public=None'.format(
status_code=503)]) endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
dict(
method='DELETE',
uri='{endpoint}/flavors/{id}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID
),
status_code=503,
),
]
)
self.assertRaises(openstack.cloud.OpenStackCloudException, self.assertRaises(
self.cloud.delete_flavor, 'vanilla') openstack.cloud.OpenStackCloudException,
self.cloud.delete_flavor,
'vanilla',
)
def test_list_flavors(self): def test_list_flavors(self):
self.use_compute_discovery() self.use_compute_discovery()
uris_to_mock = [ uris_to_mock = [
dict(method='GET', dict(
uri='{endpoint}/flavors/detail?is_public=None'.format( method='GET',
endpoint=fakes.COMPUTE_ENDPOINT), uri='{endpoint}/flavors/detail?is_public=None'.format(
json={'flavors': fakes.FAKE_FLAVOR_LIST}), endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
] ]
self.register_uris(uris_to_mock) self.register_uris(uris_to_mock)
@ -126,17 +173,26 @@ class TestFlavors(base.TestCase):
def test_list_flavors_with_extra(self): def test_list_flavors_with_extra(self):
self.use_compute_discovery() self.use_compute_discovery()
uris_to_mock = [ uris_to_mock = [
dict(method='GET', dict(
uri='{endpoint}/flavors/detail?is_public=None'.format( method='GET',
endpoint=fakes.COMPUTE_ENDPOINT), uri='{endpoint}/flavors/detail?is_public=None'.format(
json={'flavors': fakes.FAKE_FLAVOR_LIST}), endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
] ]
uris_to_mock.extend([ uris_to_mock.extend(
dict(method='GET', [
uri='{endpoint}/flavors/{id}/os-extra_specs'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']), method='GET',
json={'extra_specs': {}}) uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
for flavor in fakes.FAKE_FLAVOR_LIST]) endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']
),
json={'extra_specs': {}},
)
for flavor in fakes.FAKE_FLAVOR_LIST
]
)
self.register_uris(uris_to_mock) self.register_uris(uris_to_mock)
flavors = self.cloud.list_flavors(get_extra=True) flavors = self.cloud.list_flavors(get_extra=True)
@ -157,17 +213,26 @@ class TestFlavors(base.TestCase):
def test_get_flavor_by_ram(self): def test_get_flavor_by_ram(self):
self.use_compute_discovery() self.use_compute_discovery()
uris_to_mock = [ uris_to_mock = [
dict(method='GET', dict(
uri='{endpoint}/flavors/detail?is_public=None'.format( method='GET',
endpoint=fakes.COMPUTE_ENDPOINT), uri='{endpoint}/flavors/detail?is_public=None'.format(
json={'flavors': fakes.FAKE_FLAVOR_LIST}), endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
] ]
uris_to_mock.extend([ uris_to_mock.extend(
dict(method='GET', [
uri='{endpoint}/flavors/{id}/os-extra_specs'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']), method='GET',
json={'extra_specs': {}}) uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
for flavor in fakes.FAKE_FLAVOR_LIST]) endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']
),
json={'extra_specs': {}},
)
for flavor in fakes.FAKE_FLAVOR_LIST
]
)
self.register_uris(uris_to_mock) self.register_uris(uris_to_mock)
flavor = self.cloud.get_flavor_by_ram(ram=250) flavor = self.cloud.get_flavor_by_ram(ram=250)
@ -176,47 +241,69 @@ class TestFlavors(base.TestCase):
def test_get_flavor_by_ram_and_include(self): def test_get_flavor_by_ram_and_include(self):
self.use_compute_discovery() self.use_compute_discovery()
uris_to_mock = [ uris_to_mock = [
dict(method='GET', dict(
uri='{endpoint}/flavors/detail?is_public=None'.format( method='GET',
endpoint=fakes.COMPUTE_ENDPOINT), uri='{endpoint}/flavors/detail?is_public=None'.format(
json={'flavors': fakes.FAKE_FLAVOR_LIST}), endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
] ]
uris_to_mock.extend([ uris_to_mock.extend(
dict(method='GET', [
uri='{endpoint}/flavors/{id}/os-extra_specs'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']), method='GET',
json={'extra_specs': {}}) uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
for flavor in fakes.FAKE_FLAVOR_LIST]) endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']
),
json={'extra_specs': {}},
)
for flavor in fakes.FAKE_FLAVOR_LIST
]
)
self.register_uris(uris_to_mock) self.register_uris(uris_to_mock)
flavor = self.cloud.get_flavor_by_ram(ram=150, include='strawberry') flavor = self.cloud.get_flavor_by_ram(ram=150, include='strawberry')
self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id']) self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id'])
def test_get_flavor_by_ram_not_found(self): def test_get_flavor_by_ram_not_found(self):
self.use_compute_discovery() self.use_compute_discovery()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/flavors/detail?is_public=None'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json={'flavors': []})]) uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={'flavors': []},
)
]
)
self.assertRaises( self.assertRaises(
openstack.cloud.OpenStackCloudException, openstack.cloud.OpenStackCloudException,
self.cloud.get_flavor_by_ram, self.cloud.get_flavor_by_ram,
ram=100) ram=100,
)
def test_get_flavor_string_and_int(self): def test_get_flavor_string_and_int(self):
self.use_compute_discovery() self.use_compute_discovery()
flavor_resource_uri = '{endpoint}/flavors/1/os-extra_specs'.format( flavor_resource_uri = '{endpoint}/flavors/1/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT) endpoint=fakes.COMPUTE_ENDPOINT
)
flavor = fakes.make_fake_flavor('1', 'vanilla') flavor = fakes.make_fake_flavor('1', 'vanilla')
flavor_json = {'extra_specs': {}} flavor_json = {'extra_specs': {}}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/flavors/1'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json=flavor), uri='{endpoint}/flavors/1'.format(
dict(method='GET', uri=flavor_resource_uri, json=flavor_json), endpoint=fakes.COMPUTE_ENDPOINT
]) ),
json=flavor,
),
dict(method='GET', uri=flavor_resource_uri, json=flavor_json),
]
)
flavor1 = self.cloud.get_flavor('1') flavor1 = self.cloud.get_flavor('1')
self.assertEqual('1', flavor1['id']) self.assertEqual('1', flavor1['id'])
@ -226,11 +313,17 @@ class TestFlavors(base.TestCase):
def test_set_flavor_specs(self): def test_set_flavor_specs(self):
self.use_compute_discovery() self.use_compute_discovery()
extra_specs = dict(key1='value1') extra_specs = dict(key1='value1')
self.register_uris([ self.register_uris(
dict(method='POST', [
uri='{endpoint}/flavors/{id}/os-extra_specs'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT, id=1), method='POST',
json=dict(extra_specs=extra_specs))]) uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=1
),
json=dict(extra_specs=extra_specs),
)
]
)
self.cloud.set_flavor_specs(1, extra_specs) self.cloud.set_flavor_specs(1, extra_specs)
self.assert_calls() self.assert_calls()
@ -238,62 +331,97 @@ class TestFlavors(base.TestCase):
def test_unset_flavor_specs(self): def test_unset_flavor_specs(self):
self.use_compute_discovery() self.use_compute_discovery()
keys = ['key1', 'key2'] keys = ['key1', 'key2']
self.register_uris([ self.register_uris(
dict(method='DELETE', [
uri='{endpoint}/flavors/{id}/os-extra_specs/{key}'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT, id=1, key=key)) method='DELETE',
for key in keys]) uri='{endpoint}/flavors/{id}/os-extra_specs/{key}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=1, key=key
),
)
for key in keys
]
)
self.cloud.unset_flavor_specs(1, keys) self.cloud.unset_flavor_specs(1, keys)
self.assert_calls() self.assert_calls()
def test_add_flavor_access(self): def test_add_flavor_access(self):
self.register_uris([ self.register_uris(
dict(method='POST', [
uri='{endpoint}/flavors/{id}/action'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'), method='POST',
json={ uri='{endpoint}/flavors/{id}/action'.format(
'flavor_access': [{ endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'
'flavor_id': 'flavor_id', 'tenant_id': 'tenant_id'}]}, ),
validate=dict( json={
json={'addTenantAccess': {'tenant': 'tenant_id'}}))]) 'flavor_access': [
{
'flavor_id': 'flavor_id',
'tenant_id': 'tenant_id',
}
]
},
validate=dict(
json={'addTenantAccess': {'tenant': 'tenant_id'}}
),
)
]
)
self.cloud.add_flavor_access('flavor_id', 'tenant_id') self.cloud.add_flavor_access('flavor_id', 'tenant_id')
self.assert_calls() self.assert_calls()
def test_remove_flavor_access(self): def test_remove_flavor_access(self):
self.register_uris([ self.register_uris(
dict(method='POST', [
uri='{endpoint}/flavors/{id}/action'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'), method='POST',
json={'flavor_access': []}, uri='{endpoint}/flavors/{id}/action'.format(
validate=dict( endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'
json={'removeTenantAccess': {'tenant': 'tenant_id'}}))]) ),
json={'flavor_access': []},
validate=dict(
json={'removeTenantAccess': {'tenant': 'tenant_id'}}
),
)
]
)
self.cloud.remove_flavor_access('flavor_id', 'tenant_id') self.cloud.remove_flavor_access('flavor_id', 'tenant_id')
self.assert_calls() self.assert_calls()
def test_list_flavor_access(self): def test_list_flavor_access(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/flavors/vanilla/os-flavor-access'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json={ uri='{endpoint}/flavors/vanilla/os-flavor-access'.format(
'flavor_access': [ endpoint=fakes.COMPUTE_ENDPOINT
{'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'}]}) ),
]) json={
'flavor_access': [
{'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'}
]
},
)
]
)
self.cloud.list_flavor_access('vanilla') self.cloud.list_flavor_access('vanilla')
self.assert_calls() self.assert_calls()
def test_get_flavor_by_id(self): def test_get_flavor_by_id(self):
self.use_compute_discovery() self.use_compute_discovery()
flavor_uri = '{endpoint}/flavors/1'.format( flavor_uri = '{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT) endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')}
self.register_uris([ self.register_uris(
dict(method='GET', uri=flavor_uri, json=flavor_json), [
]) dict(method='GET', uri=flavor_uri, json=flavor_json),
]
)
flavor1 = self.cloud.get_flavor_by_id('1') flavor1 = self.cloud.get_flavor_by_id('1')
self.assertEqual('1', flavor1['id']) self.assertEqual('1', flavor1['id'])
@ -305,16 +433,22 @@ class TestFlavors(base.TestCase):
def test_get_flavor_with_extra_specs(self): def test_get_flavor_with_extra_specs(self):
self.use_compute_discovery() self.use_compute_discovery()
flavor_uri = '{endpoint}/flavors/1'.format( flavor_uri = '{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT) endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_extra_uri = '{endpoint}/flavors/1/os-extra_specs'.format( flavor_extra_uri = '{endpoint}/flavors/1/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT) endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')}
flavor_extra_json = {'extra_specs': {'name': 'test'}} flavor_extra_json = {'extra_specs': {'name': 'test'}}
self.register_uris([ self.register_uris(
dict(method='GET', uri=flavor_uri, json=flavor_json), [
dict(method='GET', uri=flavor_extra_uri, json=flavor_extra_json), dict(method='GET', uri=flavor_uri, json=flavor_json),
]) dict(
method='GET', uri=flavor_extra_uri, json=flavor_extra_json
),
]
)
flavor1 = self.cloud.get_flavor_by_id('1', get_extra=True) flavor1 = self.cloud.get_flavor_by_id('1', get_extra=True)
self.assertEqual('1', flavor1['id']) self.assertEqual('1', flavor1['id'])

View File

@ -29,16 +29,20 @@ from openstack.tests.unit import base
class TestFloatingIP(base.TestCase): class TestFloatingIP(base.TestCase):
@patch.object(connection.Connection, 'get_floating_ip') @patch.object(connection.Connection, 'get_floating_ip')
@patch.object(connection.Connection, '_attach_ip_to_server') @patch.object(connection.Connection, '_attach_ip_to_server')
@patch.object(connection.Connection, 'available_floating_ip') @patch.object(connection.Connection, 'available_floating_ip')
def test_add_auto_ip( def test_add_auto_ip(
self, mock_available_floating_ip, mock_attach_ip_to_server, self,
mock_get_floating_ip): mock_available_floating_ip,
mock_attach_ip_to_server,
mock_get_floating_ip,
):
server_dict = fakes.make_fake_server( server_dict = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE", server_id='server-id',
addresses={} name='test-server',
status="ACTIVE",
addresses={},
) )
floating_ip_dict = { floating_ip_dict = {
"id": "this-is-a-floating-ip-id", "id": "this-is-a-floating-ip-id",
@ -47,7 +51,7 @@ class TestFloatingIP(base.TestCase):
"floating_ip_address": "203.0.113.29", "floating_ip_address": "203.0.113.29",
"network": "this-is-a-net-or-pool-id", "network": "this-is-a-net-or-pool-id",
"attached": False, "attached": False,
"status": "ACTIVE" "status": "ACTIVE",
} }
mock_available_floating_ip.return_value = floating_ip_dict mock_available_floating_ip.return_value = floating_ip_dict
@ -55,51 +59,63 @@ class TestFloatingIP(base.TestCase):
self.cloud.add_auto_ip(server=server_dict) self.cloud.add_auto_ip(server=server_dict)
mock_attach_ip_to_server.assert_called_with( mock_attach_ip_to_server.assert_called_with(
timeout=60, wait=False, server=server_dict, timeout=60,
floating_ip=floating_ip_dict, skip_attach=False) wait=False,
server=server_dict,
floating_ip=floating_ip_dict,
skip_attach=False,
)
@patch.object(connection.Connection, '_add_ip_from_pool') @patch.object(connection.Connection, '_add_ip_from_pool')
def test_add_ips_to_server_pool(self, mock_add_ip_from_pool): def test_add_ips_to_server_pool(self, mock_add_ip_from_pool):
server_dict = fakes.make_fake_server( server_dict = fakes.make_fake_server(
server_id='romeo', name='test-server', status="ACTIVE", server_id='romeo',
addresses={}) name='test-server',
status="ACTIVE",
addresses={},
)
pool = 'nova' pool = 'nova'
self.cloud.add_ips_to_server(server_dict, ip_pool=pool) self.cloud.add_ips_to_server(server_dict, ip_pool=pool)
mock_add_ip_from_pool.assert_called_with( mock_add_ip_from_pool.assert_called_with(
server_dict, pool, reuse=True, wait=False, timeout=60, server_dict,
fixed_address=None, nat_destination=None) pool,
reuse=True,
wait=False,
timeout=60,
fixed_address=None,
nat_destination=None,
)
@patch.object(connection.Connection, 'has_service') @patch.object(connection.Connection, 'has_service')
@patch.object(connection.Connection, 'get_floating_ip') @patch.object(connection.Connection, 'get_floating_ip')
@patch.object(connection.Connection, '_add_auto_ip') @patch.object(connection.Connection, '_add_auto_ip')
def test_add_ips_to_server_ipv6_only( def test_add_ips_to_server_ipv6_only(
self, mock_add_auto_ip, self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service
mock_get_floating_ip, ):
mock_has_service):
self.cloud._floating_ip_source = None self.cloud._floating_ip_source = None
self.cloud.force_ipv4 = False self.cloud.force_ipv4 = False
self.cloud._local_ipv6 = True self.cloud._local_ipv6 = True
mock_has_service.return_value = False mock_has_service.return_value = False
server = fakes.make_fake_server( server = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE", server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={ addresses={
'private': [{ 'private': [{'addr': "10.223.160.141", 'version': 4}],
'addr': "10.223.160.141", 'public': [
'version': 4 {
}], u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42',
'public': [{ u'OS-EXT-IPS:type': u'fixed',
u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42', 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
u'OS-EXT-IPS:type': u'fixed', 'version': 6,
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", }
'version': 6 ],
}] },
}
) )
server_dict = meta.add_server_interfaces( server_dict = meta.add_server_interfaces(
self.cloud, self.cloud, _server.Server(**server)
_server.Server(**server)
) )
new_server = self.cloud.add_ips_to_server(server=server_dict) new_server = self.cloud.add_ips_to_server(server=server_dict)
@ -107,80 +123,79 @@ class TestFloatingIP(base.TestCase):
mock_add_auto_ip.assert_not_called() mock_add_auto_ip.assert_not_called()
self.assertEqual( self.assertEqual(
new_server['interface_ip'], new_server['interface_ip'],
'2001:4800:7819:103:be76:4eff:fe05:8525') '2001:4800:7819:103:be76:4eff:fe05:8525',
)
self.assertEqual(new_server['private_v4'], '10.223.160.141') self.assertEqual(new_server['private_v4'], '10.223.160.141')
self.assertEqual(new_server['public_v4'], '') self.assertEqual(new_server['public_v4'], '')
self.assertEqual( self.assertEqual(
new_server['public_v6'], '2001:4800:7819:103:be76:4eff:fe05:8525') new_server['public_v6'], '2001:4800:7819:103:be76:4eff:fe05:8525'
)
@patch.object(connection.Connection, 'has_service') @patch.object(connection.Connection, 'has_service')
@patch.object(connection.Connection, 'get_floating_ip') @patch.object(connection.Connection, 'get_floating_ip')
@patch.object(connection.Connection, '_add_auto_ip') @patch.object(connection.Connection, '_add_auto_ip')
def test_add_ips_to_server_rackspace( def test_add_ips_to_server_rackspace(
self, mock_add_auto_ip, self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service
mock_get_floating_ip, ):
mock_has_service):
self.cloud._floating_ip_source = None self.cloud._floating_ip_source = None
self.cloud.force_ipv4 = False self.cloud.force_ipv4 = False
self.cloud._local_ipv6 = True self.cloud._local_ipv6 = True
mock_has_service.return_value = False mock_has_service.return_value = False
server = fakes.make_fake_server( server = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE", server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={ addresses={
'private': [{ 'private': [{'addr': "10.223.160.141", 'version': 4}],
'addr': "10.223.160.141", 'public': [
'version': 4 {'addr': "104.130.246.91", 'version': 4},
}], {
'public': [{ 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
'addr': "104.130.246.91", 'version': 6,
'version': 4 },
}, { ],
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", },
'version': 6
}]
}
) )
server_dict = meta.add_server_interfaces( server_dict = meta.add_server_interfaces(
self.cloud, self.cloud, _server.Server(**server)
_server.Server(**server)) )
new_server = self.cloud.add_ips_to_server(server=server_dict) new_server = self.cloud.add_ips_to_server(server=server_dict)
mock_get_floating_ip.assert_not_called() mock_get_floating_ip.assert_not_called()
mock_add_auto_ip.assert_not_called() mock_add_auto_ip.assert_not_called()
self.assertEqual( self.assertEqual(
new_server['interface_ip'], new_server['interface_ip'],
'2001:4800:7819:103:be76:4eff:fe05:8525') '2001:4800:7819:103:be76:4eff:fe05:8525',
)
@patch.object(connection.Connection, 'has_service') @patch.object(connection.Connection, 'has_service')
@patch.object(connection.Connection, 'get_floating_ip') @patch.object(connection.Connection, 'get_floating_ip')
@patch.object(connection.Connection, '_add_auto_ip') @patch.object(connection.Connection, '_add_auto_ip')
def test_add_ips_to_server_rackspace_local_ipv4( def test_add_ips_to_server_rackspace_local_ipv4(
self, mock_add_auto_ip, self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service
mock_get_floating_ip, ):
mock_has_service):
self.cloud._floating_ip_source = None self.cloud._floating_ip_source = None
self.cloud.force_ipv4 = False self.cloud.force_ipv4 = False
self.cloud._local_ipv6 = False self.cloud._local_ipv6 = False
mock_has_service.return_value = False mock_has_service.return_value = False
server = fakes.make_fake_server( server = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE", server_id='server-id',
name='test-server',
status="ACTIVE",
addresses={ addresses={
'private': [{ 'private': [{'addr': "10.223.160.141", 'version': 4}],
'addr': "10.223.160.141", 'public': [
'version': 4 {'addr': "104.130.246.91", 'version': 4},
}], {
'public': [{ 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525",
'addr': "104.130.246.91", 'version': 6,
'version': 4 },
}, { ],
'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", },
'version': 6
}]
}
) )
server_dict = meta.add_server_interfaces( server_dict = meta.add_server_interfaces(
self.cloud, self.cloud, _server.Server(**server)
_server.Server(**server)) )
new_server = self.cloud.add_ips_to_server(server=server_dict) new_server = self.cloud.add_ips_to_server(server=server_dict)
mock_get_floating_ip.assert_not_called() mock_get_floating_ip.assert_not_called()
@ -190,24 +205,35 @@ class TestFloatingIP(base.TestCase):
@patch.object(connection.Connection, 'add_ip_list') @patch.object(connection.Connection, 'add_ip_list')
def test_add_ips_to_server_ip_list(self, mock_add_ip_list): def test_add_ips_to_server_ip_list(self, mock_add_ip_list):
server_dict = fakes.make_fake_server( server_dict = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE", server_id='server-id',
addresses={}) name='test-server',
status="ACTIVE",
addresses={},
)
ips = ['203.0.113.29', '172.24.4.229'] ips = ['203.0.113.29', '172.24.4.229']
self.cloud.add_ips_to_server(server_dict, ips=ips) self.cloud.add_ips_to_server(server_dict, ips=ips)
mock_add_ip_list.assert_called_with( mock_add_ip_list.assert_called_with(
server_dict, ips, wait=False, timeout=60, server_dict,
ips,
wait=False,
timeout=60,
fixed_address=None, fixed_address=None,
nat_destination=None) nat_destination=None,
)
@patch.object(connection.Connection, '_needs_floating_ip') @patch.object(connection.Connection, '_needs_floating_ip')
@patch.object(connection.Connection, '_add_auto_ip') @patch.object(connection.Connection, '_add_auto_ip')
def test_add_ips_to_server_auto_ip( def test_add_ips_to_server_auto_ip(
self, mock_add_auto_ip, mock_needs_floating_ip): self, mock_add_auto_ip, mock_needs_floating_ip
):
server_dict = fakes.make_fake_server( server_dict = fakes.make_fake_server(
server_id='server-id', name='test-server', status="ACTIVE", server_id='server-id',
addresses={}) name='test-server',
status="ACTIVE",
addresses={},
)
# TODO(mordred) REMOVE THIS MOCK WHEN THE NEXT PATCH LANDS # TODO(mordred) REMOVE THIS MOCK WHEN THE NEXT PATCH LANDS
# SERIOUSLY THIS TIME. NEXT PATCH - WHICH SHOULD ADD MOCKS FOR # SERIOUSLY THIS TIME. NEXT PATCH - WHICH SHOULD ADD MOCKS FOR
@ -218,4 +244,5 @@ class TestFloatingIP(base.TestCase):
self.cloud.add_ips_to_server(server_dict) self.cloud.add_ips_to_server(server_dict)
mock_add_auto_ip.assert_called_with( mock_add_auto_ip.assert_called_with(
server_dict, wait=False, timeout=60, reuse=True) server_dict, wait=False, timeout=60, reuse=True
)

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,7 @@ def get_fake_has_service(has_service):
if s == 'network': if s == 'network':
return False return False
return has_service(s) return has_service(s)
return fake_has_service return fake_has_service
@ -38,27 +39,28 @@ class TestFloatingIP(base.TestCase):
'id': 1, 'id': 1,
'instance_id': None, 'instance_id': None,
'ip': '203.0.113.1', 'ip': '203.0.113.1',
'pool': 'nova' 'pool': 'nova',
}, },
{ {
'fixed_ip': None, 'fixed_ip': None,
'id': 2, 'id': 2,
'instance_id': None, 'instance_id': None,
'ip': '203.0.113.2', 'ip': '203.0.113.2',
'pool': 'nova' 'pool': 'nova',
}, },
{ {
'fixed_ip': '192.0.2.3', 'fixed_ip': '192.0.2.3',
'id': 29, 'id': 29,
'instance_id': 'myself', 'instance_id': 'myself',
'ip': '198.51.100.29', 'ip': '198.51.100.29',
'pool': 'black_hole' 'pool': 'black_hole',
} },
] ]
mock_floating_ip_pools = [ mock_floating_ip_pools = [
{'id': 'pool1_id', 'name': 'nova'}, {'id': 'pool1_id', 'name': 'nova'},
{'id': 'pool2_id', 'name': 'pool2'}] {'id': 'pool2_id', 'name': 'pool2'},
]
def assertAreInstances(self, elements, elem_type): def assertAreInstances(self, elements, elem_type):
for e in elements: for e in elements:
@ -68,23 +70,36 @@ class TestFloatingIP(base.TestCase):
super(TestFloatingIP, self).setUp() super(TestFloatingIP, self).setUp()
self.fake_server = fakes.make_fake_server( self.fake_server = fakes.make_fake_server(
'server-id', '', 'ACTIVE', 'server-id',
addresses={u'test_pnztt_net': [{ '',
u'OS-EXT-IPS:type': u'fixed', 'ACTIVE',
u'addr': '192.0.2.129', addresses={
u'version': 4, u'test_pnztt_net': [
u'OS-EXT-IPS-MAC:mac_addr': {
u'fa:16:3e:ae:7d:42'}]}) u'OS-EXT-IPS:type': u'fixed',
u'addr': '192.0.2.129',
u'version': 4,
u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42',
}
]
},
)
self.cloud.has_service = get_fake_has_service(self.cloud.has_service) self.cloud.has_service = get_fake_has_service(self.cloud.has_service)
def test_list_floating_ips(self): def test_list_floating_ips(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': self.mock_floating_ip_list_rep}), method='GET',
]) uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
]
)
floating_ips = self.cloud.list_floating_ips() floating_ips = self.cloud.list_floating_ips()
self.assertIsInstance(floating_ips, list) self.assertIsInstance(floating_ips, list)
@ -95,19 +110,28 @@ class TestFloatingIP(base.TestCase):
def test_list_floating_ips_with_filters(self): def test_list_floating_ips_with_filters(self):
self.assertRaisesRegex( self.assertRaisesRegex(
ValueError, "Nova-network don't support server-side", ValueError,
self.cloud.list_floating_ips, filters={'Foo': 42} "Nova-network don't support server-side",
self.cloud.list_floating_ips,
filters={'Foo': 42},
) )
def test_search_floating_ips(self): def test_search_floating_ips(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': self.mock_floating_ip_list_rep}), method='GET',
]) uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
]
)
floating_ips = self.cloud.search_floating_ips( floating_ips = self.cloud.search_floating_ips(
filters={'attached': False}) filters={'attached': False}
)
self.assertIsInstance(floating_ips, list) self.assertIsInstance(floating_ips, list)
self.assertEqual(2, len(floating_ips)) self.assertEqual(2, len(floating_ips))
@ -116,11 +140,17 @@ class TestFloatingIP(base.TestCase):
self.assert_calls() self.assert_calls()
def test_get_floating_ip(self): def test_get_floating_ip(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': self.mock_floating_ip_list_rep}), method='GET',
]) uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
]
)
floating_ip = self.cloud.get_floating_ip(id='29') floating_ip = self.cloud.get_floating_ip(id='29')
@ -130,11 +160,17 @@ class TestFloatingIP(base.TestCase):
self.assert_calls() self.assert_calls()
def test_get_floating_ip_not_found(self): def test_get_floating_ip_not_found(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': self.mock_floating_ip_list_rep}), method='GET',
]) uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep},
),
]
)
floating_ip = self.cloud.get_floating_ip(id='666') floating_ip = self.cloud.get_floating_ip(id='666')
@ -143,12 +179,17 @@ class TestFloatingIP(base.TestCase):
self.assert_calls() self.assert_calls()
def test_get_floating_ip_by_id(self): def test_get_floating_ip_by_id(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips', dict(
'1']), method='GET',
json={'floating_ip': self.mock_floating_ip_list_rep[0]}), uri=self.get_mock_url(
]) 'compute', append=['os-floating-ips', '1']
),
json={'floating_ip': self.mock_floating_ip_list_rep[0]},
),
]
)
floating_ip = self.cloud.get_floating_ip_by_id(id='1') floating_ip = self.cloud.get_floating_ip_by_id(id='1')
@ -157,161 +198,240 @@ class TestFloatingIP(base.TestCase):
self.assert_calls() self.assert_calls()
def test_create_floating_ip(self): def test_create_floating_ip(self):
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ip': self.mock_floating_ip_list_rep[1]}, method='POST',
validate=dict( uri=self.get_mock_url(
json={'pool': 'nova'})), 'compute', append=['os-floating-ips']
dict(method='GET', ),
uri=self.get_mock_url( json={'floating_ip': self.mock_floating_ip_list_rep[1]},
'compute', validate=dict(json={'pool': 'nova'}),
append=['os-floating-ips', '2']), ),
json={'floating_ip': self.mock_floating_ip_list_rep[1]}), dict(
]) method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips', '2']
),
json={'floating_ip': self.mock_floating_ip_list_rep[1]},
),
]
)
self.cloud.create_floating_ip(network='nova') self.cloud.create_floating_ip(network='nova')
self.assert_calls() self.assert_calls()
def test_available_floating_ip_existing(self): def test_available_floating_ip_existing(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': self.mock_floating_ip_list_rep[:1]}), method='GET',
]) uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': self.mock_floating_ip_list_rep[:1]},
),
]
)
ip = self.cloud.available_floating_ip(network='nova') ip = self.cloud.available_floating_ip(network='nova')
self.assertEqual(self.mock_floating_ip_list_rep[0]['ip'], self.assertEqual(
ip['floating_ip_address']) self.mock_floating_ip_list_rep[0]['ip'], ip['floating_ip_address']
)
self.assert_calls() self.assert_calls()
def test_available_floating_ip_new(self): def test_available_floating_ip_new(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': []}), method='GET',
dict(method='POST', uri=self.get_mock_url(
uri=self.get_mock_url('compute', append=['os-floating-ips']), 'compute', append=['os-floating-ips']
json={'floating_ip': self.mock_floating_ip_list_rep[0]}, ),
validate=dict( json={'floating_ips': []},
json={'pool': 'nova'})), ),
dict(method='GET', dict(
uri=self.get_mock_url( method='POST',
'compute', uri=self.get_mock_url(
append=['os-floating-ips', '1']), 'compute', append=['os-floating-ips']
json={'floating_ip': self.mock_floating_ip_list_rep[0]}), ),
]) json={'floating_ip': self.mock_floating_ip_list_rep[0]},
validate=dict(json={'pool': 'nova'}),
),
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips', '1']
),
json={'floating_ip': self.mock_floating_ip_list_rep[0]},
),
]
)
ip = self.cloud.available_floating_ip(network='nova') ip = self.cloud.available_floating_ip(network='nova')
self.assertEqual(self.mock_floating_ip_list_rep[0]['ip'], self.assertEqual(
ip['floating_ip_address']) self.mock_floating_ip_list_rep[0]['ip'], ip['floating_ip_address']
)
self.assert_calls() self.assert_calls()
def test_delete_floating_ip_existing(self): def test_delete_floating_ip_existing(self):
self.register_uris([ self.register_uris(
dict(method='DELETE', [
uri=self.get_mock_url( dict(
'compute', method='DELETE',
append=['os-floating-ips', 'a-wild-id-appears'])), uri=self.get_mock_url(
dict(method='GET', 'compute',
uri=self.get_mock_url('compute', append=['os-floating-ips']), append=['os-floating-ips', 'a-wild-id-appears'],
json={'floating_ips': []}), ),
]) ),
dict(
method='GET',
uri=self.get_mock_url(
'compute', append=['os-floating-ips']
),
json={'floating_ips': []},
),
]
)
ret = self.cloud.delete_floating_ip( ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears')
floating_ip_id='a-wild-id-appears')
self.assertTrue(ret) self.assertTrue(ret)
self.assert_calls() self.assert_calls()
def test_delete_floating_ip_not_found(self): def test_delete_floating_ip_not_found(self):
self.register_uris([ self.register_uris(
dict(method='DELETE', [
uri=self.get_mock_url( dict(
'compute', method='DELETE',
append=['os-floating-ips', 'a-wild-id-appears']), uri=self.get_mock_url(
status_code=404), 'compute',
]) append=['os-floating-ips', 'a-wild-id-appears'],
),
status_code=404,
),
]
)
ret = self.cloud.delete_floating_ip( ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears')
floating_ip_id='a-wild-id-appears')
self.assertFalse(ret) self.assertFalse(ret)
self.assert_calls() self.assert_calls()
def test_attach_ip_to_server(self): def test_attach_ip_to_server(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': self.mock_floating_ip_list_rep}), method='GET',
dict(method='POST', uri=self.get_mock_url(
uri=self.get_mock_url( 'compute', append=['os-floating-ips']
'compute', ),
append=['servers', self.fake_server['id'], 'action']), json={'floating_ips': self.mock_floating_ip_list_rep},
validate=dict( ),
json={ dict(
"addFloatingIp": { method='POST',
"address": "203.0.113.1", uri=self.get_mock_url(
"fixed_address": "192.0.2.129", 'compute',
}})), append=['servers', self.fake_server['id'], 'action'],
]) ),
validate=dict(
json={
"addFloatingIp": {
"address": "203.0.113.1",
"fixed_address": "192.0.2.129",
}
}
),
),
]
)
self.cloud._attach_ip_to_server( self.cloud._attach_ip_to_server(
server=self.fake_server, server=self.fake_server,
floating_ip=self.cloud._normalize_floating_ip( floating_ip=self.cloud._normalize_floating_ip(
self.mock_floating_ip_list_rep[0]), self.mock_floating_ip_list_rep[0]
fixed_address='192.0.2.129') ),
fixed_address='192.0.2.129',
)
self.assert_calls() self.assert_calls()
def test_detach_ip_from_server(self): def test_detach_ip_from_server(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': self.mock_floating_ip_list_rep}), method='GET',
dict(method='POST', uri=self.get_mock_url(
uri=self.get_mock_url( 'compute', append=['os-floating-ips']
'compute', ),
append=['servers', self.fake_server['id'], 'action']), json={'floating_ips': self.mock_floating_ip_list_rep},
validate=dict( ),
json={ dict(
"removeFloatingIp": { method='POST',
"address": "203.0.113.1", uri=self.get_mock_url(
}})), 'compute',
]) append=['servers', self.fake_server['id'], 'action'],
),
validate=dict(
json={
"removeFloatingIp": {
"address": "203.0.113.1",
}
}
),
),
]
)
self.cloud.detach_ip_from_server( self.cloud.detach_ip_from_server(
server_id='server-id', floating_ip_id=1) server_id='server-id', floating_ip_id=1
)
self.assert_calls() self.assert_calls()
def test_add_ip_from_pool(self): def test_add_ip_from_pool(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url('compute', append=['os-floating-ips']), dict(
json={'floating_ips': self.mock_floating_ip_list_rep}), method='GET',
dict(method='GET', uri=self.get_mock_url(
uri=self.get_mock_url('compute', append=['os-floating-ips']), 'compute', append=['os-floating-ips']
json={'floating_ips': self.mock_floating_ip_list_rep}), ),
dict(method='POST', json={'floating_ips': self.mock_floating_ip_list_rep},
uri=self.get_mock_url( ),
'compute', dict(
append=['servers', self.fake_server['id'], 'action']), method='GET',
validate=dict( uri=self.get_mock_url(
json={ 'compute', append=['os-floating-ips']
"addFloatingIp": { ),
"address": "203.0.113.1", json={'floating_ips': self.mock_floating_ip_list_rep},
"fixed_address": "192.0.2.129", ),
}})), dict(
]) method='POST',
uri=self.get_mock_url(
'compute',
append=['servers', self.fake_server['id'], 'action'],
),
validate=dict(
json={
"addFloatingIp": {
"address": "203.0.113.1",
"fixed_address": "192.0.2.129",
}
}
),
),
]
)
server = self.cloud._add_ip_from_pool( server = self.cloud._add_ip_from_pool(
server=self.fake_server, server=self.fake_server,
network='nova', network='nova',
fixed_address='192.0.2.129') fixed_address='192.0.2.129',
)
self.assertEqual(server, self.fake_server) self.assertEqual(server, self.fake_server)
self.assert_calls() self.assert_calls()

View File

@ -25,27 +25,39 @@ from openstack.tests.unit import base
class TestFloatingIPPool(base.TestCase): class TestFloatingIPPool(base.TestCase):
pools = [{'name': u'public'}] pools = [{'name': 'public'}]
def test_list_floating_ip_pools(self): def test_list_floating_ip_pools(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/extensions'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json={'extensions': [{ uri='{endpoint}/extensions'.format(
u'alias': u'os-floating-ip-pools', endpoint=fakes.COMPUTE_ENDPOINT
u'updated': u'2014-12-03T00:00:00Z', ),
u'name': u'FloatingIpPools', json={
u'links': [], 'extensions': [
u'namespace': {
u'http://docs.openstack.org/compute/ext/fake_xml', 'alias': 'os-floating-ip-pools',
u'description': u'Floating IPs support.'}]}), 'updated': '2014-12-03T00:00:00Z',
dict(method='GET', 'name': 'FloatingIpPools',
uri='{endpoint}/os-floating-ip-pools'.format( 'links': [],
endpoint=fakes.COMPUTE_ENDPOINT), 'namespace': 'http://docs.openstack.org/compute/ext/fake_xml', # noqa: E501
json={"floating_ip_pools": [{"name": "public"}]}) 'description': 'Floating IPs support.',
]) }
]
},
),
dict(
method='GET',
uri='{endpoint}/os-floating-ip-pools'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
json={"floating_ip_pools": [{"name": "public"}]},
),
]
)
floating_ip_pools = self.cloud.list_floating_ip_pools() floating_ip_pools = self.cloud.list_floating_ip_pools()
@ -55,24 +67,38 @@ class TestFloatingIPPool(base.TestCase):
def test_list_floating_ip_pools_exception(self): def test_list_floating_ip_pools_exception(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='{endpoint}/extensions'.format( dict(
endpoint=fakes.COMPUTE_ENDPOINT), method='GET',
json={'extensions': [{ uri='{endpoint}/extensions'.format(
u'alias': u'os-floating-ip-pools', endpoint=fakes.COMPUTE_ENDPOINT
u'updated': u'2014-12-03T00:00:00Z', ),
u'name': u'FloatingIpPools', json={
u'links': [], 'extensions': [
u'namespace': {
u'http://docs.openstack.org/compute/ext/fake_xml', 'alias': 'os-floating-ip-pools',
u'description': u'Floating IPs support.'}]}), 'updated': '2014-12-03T00:00:00Z',
dict(method='GET', 'name': 'FloatingIpPools',
uri='{endpoint}/os-floating-ip-pools'.format( 'links': [],
endpoint=fakes.COMPUTE_ENDPOINT), 'namespace': 'http://docs.openstack.org/compute/ext/fake_xml', # noqa: E501
status_code=404)]) 'description': 'Floating IPs support.',
}
]
},
),
dict(
method='GET',
uri='{endpoint}/os-floating-ip-pools'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
status_code=404,
),
]
)
self.assertRaises( self.assertRaises(
OpenStackCloudException, self.cloud.list_floating_ip_pools) OpenStackCloudException, self.cloud.list_floating_ip_pools
)
self.assert_calls() self.assert_calls()

File diff suppressed because it is too large Load Diff

View File

@ -17,86 +17,129 @@ from openstack.tests.unit import base
class TestGroups(base.TestCase): class TestGroups(base.TestCase):
def setUp(self, cloud_config_fixture='clouds.yaml'): def setUp(self, cloud_config_fixture='clouds.yaml'):
super(TestGroups, self).setUp( super(TestGroups, self).setUp(
cloud_config_fixture=cloud_config_fixture) cloud_config_fixture=cloud_config_fixture
)
self.addCleanup(self.assert_calls) self.addCleanup(self.assert_calls)
def get_mock_url(self, service_type='identity', interface='public', def get_mock_url(
resource='groups', append=None, base_url_append='v3'): self,
service_type='identity',
interface='public',
resource='groups',
append=None,
base_url_append='v3',
):
return super(TestGroups, self).get_mock_url( return super(TestGroups, self).get_mock_url(
service_type='identity', interface=interface, resource=resource, service_type='identity',
append=append, base_url_append=base_url_append) interface=interface,
resource=resource,
append=append,
base_url_append=base_url_append,
)
def test_list_groups(self): def test_list_groups(self):
group_data = self._get_group_data() group_data = self._get_group_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'groups': [group_data.json_response['group']]}) uri=self.get_mock_url(),
]) status_code=200,
json={'groups': [group_data.json_response['group']]},
)
]
)
self.cloud.list_groups() self.cloud.list_groups()
def test_get_group(self): def test_get_group(self):
group_data = self._get_group_data() group_data = self._get_group_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'groups': [group_data.json_response['group']]}), uri=self.get_mock_url(),
]) status_code=200,
json={'groups': [group_data.json_response['group']]},
),
]
)
self.cloud.get_group(group_data.group_id) self.cloud.get_group(group_data.group_id)
def test_delete_group(self): def test_delete_group(self):
group_data = self._get_group_data() group_data = self._get_group_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
append=[group_data.group_id]), method='GET',
status_code=200, uri=self.get_mock_url(append=[group_data.group_id]),
json={'group': group_data.json_response['group']}), status_code=200,
dict(method='DELETE', json={'group': group_data.json_response['group']},
uri=self.get_mock_url(append=[group_data.group_id]), ),
status_code=204), dict(
]) method='DELETE',
uri=self.get_mock_url(append=[group_data.group_id]),
status_code=204,
),
]
)
self.assertTrue(self.cloud.delete_group(group_data.group_id)) self.assertTrue(self.cloud.delete_group(group_data.group_id))
def test_create_group(self): def test_create_group(self):
domain_data = self._get_domain_data() domain_data = self._get_domain_data()
group_data = self._get_group_data(domain_id=domain_data.domain_id) group_data = self._get_group_data(domain_id=domain_data.domain_id)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(resource='domains', dict(
append=[domain_data.domain_id]), method='GET',
status_code=200, uri=self.get_mock_url(
json=domain_data.json_response), resource='domains', append=[domain_data.domain_id]
dict(method='POST', ),
uri=self.get_mock_url(), status_code=200,
status_code=200, json=domain_data.json_response,
json=group_data.json_response, ),
validate=dict(json=group_data.json_request)) dict(
]) method='POST',
uri=self.get_mock_url(),
status_code=200,
json=group_data.json_response,
validate=dict(json=group_data.json_request),
),
]
)
self.cloud.create_group( self.cloud.create_group(
name=group_data.group_name, description=group_data.description, name=group_data.group_name,
domain=group_data.domain_id) description=group_data.description,
domain=group_data.domain_id,
)
def test_update_group(self): def test_update_group(self):
group_data = self._get_group_data() group_data = self._get_group_data()
# Domain ID is not sent # Domain ID is not sent
group_data.json_request['group'].pop('domain_id') group_data.json_request['group'].pop('domain_id')
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
append=[group_data.group_id]), method='GET',
status_code=200, uri=self.get_mock_url(append=[group_data.group_id]),
json={'group': group_data.json_response['group']}), status_code=200,
dict(method='PATCH', json={'group': group_data.json_response['group']},
uri=self.get_mock_url( ),
append=[group_data.group_id]), dict(
status_code=200, method='PATCH',
json=group_data.json_response, uri=self.get_mock_url(append=[group_data.group_id]),
validate=dict(json={ status_code=200,
'group': {'name': 'new_name', 'description': json=group_data.json_response,
'new_description'}})) validate=dict(
]) json={
'group': {
'name': 'new_name',
'description': 'new_description',
}
}
),
),
]
)
self.cloud.update_group( self.cloud.update_group(
group_data.group_id, 'new_name', 'new_description') group_data.group_id, 'new_name', 'new_description'
)

View File

@ -23,46 +23,65 @@ RAW_ROLE_ASSIGNMENTS = [
"links": {"assignment": "http://example"}, "links": {"assignment": "http://example"},
"role": {"id": "123456"}, "role": {"id": "123456"},
"scope": {"domain": {"id": "161718"}}, "scope": {"domain": {"id": "161718"}},
"user": {"id": "313233"} "user": {"id": "313233"},
}, },
{ {
"links": {"assignment": "http://example"}, "links": {"assignment": "http://example"},
"group": {"id": "101112"}, "group": {"id": "101112"},
"role": {"id": "123456"}, "role": {"id": "123456"},
"scope": {"project": {"id": "456789"}} "scope": {"project": {"id": "456789"}},
} },
] ]
class TestIdentityRoles(base.TestCase): class TestIdentityRoles(base.TestCase):
def get_mock_url(
def get_mock_url(self, service_type='identity', interface='public', self,
resource='roles', append=None, base_url_append='v3', service_type='identity',
qs_elements=None): interface='public',
resource='roles',
append=None,
base_url_append='v3',
qs_elements=None,
):
return super(TestIdentityRoles, self).get_mock_url( return super(TestIdentityRoles, self).get_mock_url(
service_type, interface, resource, append, base_url_append, service_type,
qs_elements) interface,
resource,
append,
base_url_append,
qs_elements,
)
def test_list_roles(self): def test_list_roles(self):
role_data = self._get_role_data() role_data = self._get_role_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'roles': [role_data.json_response['role']]}) uri=self.get_mock_url(),
]) status_code=200,
json={'roles': [role_data.json_response['role']]},
)
]
)
self.cloud.list_roles() self.cloud.list_roles()
self.assert_calls() self.assert_calls()
def test_list_role_by_name(self): def test_list_role_by_name(self):
role_data = self._get_role_data() role_data = self._get_role_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
qs_elements=['name={0}'.format(role_data.role_name)]), method='GET',
status_code=200, uri=self.get_mock_url(
json={'roles': [role_data.json_response['role']]}) qs_elements=['name={0}'.format(role_data.role_name)]
]) ),
status_code=200,
json={'roles': [role_data.json_response['role']]},
)
]
)
role = self.cloud.list_roles(name=role_data.role_name)[0] role = self.cloud.list_roles(name=role_data.role_name)[0]
self.assertIsNotNone(role) self.assertIsNotNone(role)
@ -72,12 +91,16 @@ class TestIdentityRoles(base.TestCase):
def test_get_role_by_name(self): def test_get_role_by_name(self):
role_data = self._get_role_data() role_data = self._get_role_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'roles': [role_data.json_response['role']]}) uri=self.get_mock_url(),
]) status_code=200,
json={'roles': [role_data.json_response['role']]},
)
]
)
role = self.cloud.get_role(role_data.role_name) role = self.cloud.get_role(role_data.role_name)
self.assertIsNotNone(role) self.assertIsNotNone(role)
@ -87,12 +110,16 @@ class TestIdentityRoles(base.TestCase):
def test_get_role_by_id(self): def test_get_role_by_id(self):
role_data = self._get_role_data() role_data = self._get_role_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'roles': [role_data.json_response['role']]}) uri=self.get_mock_url(),
]) status_code=200,
json={'roles': [role_data.json_response['role']]},
)
]
)
role = self.cloud.get_role(role_data.role_id) role = self.cloud.get_role(role_data.role_id)
self.assertIsNotNone(role) self.assertIsNotNone(role)
@ -102,13 +129,17 @@ class TestIdentityRoles(base.TestCase):
def test_create_role(self): def test_create_role(self):
role_data = self._get_role_data() role_data = self._get_role_data()
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url(), dict(
status_code=200, method='POST',
json=role_data.json_response, uri=self.get_mock_url(),
validate=dict(json=role_data.json_request)) status_code=200,
]) json=role_data.json_response,
validate=dict(json=role_data.json_request),
)
]
)
role = self.cloud.create_role(role_data.role_name) role = self.cloud.create_role(role_data.role_name)
@ -120,20 +151,25 @@ class TestIdentityRoles(base.TestCase):
def test_update_role(self): def test_update_role(self):
role_data = self._get_role_data() role_data = self._get_role_data()
req = {'role': {'name': 'new_name'}} req = {'role': {'name': 'new_name'}}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'roles': [role_data.json_response['role']]}), uri=self.get_mock_url(),
dict(method='PATCH', status_code=200,
uri=self.get_mock_url(append=[role_data.role_id]), json={'roles': [role_data.json_response['role']]},
status_code=200, ),
json=role_data.json_response, dict(
validate=dict(json=req)) method='PATCH',
]) uri=self.get_mock_url(append=[role_data.role_id]),
status_code=200,
json=role_data.json_response,
validate=dict(json=req),
),
]
)
role = self.cloud.update_role( role = self.cloud.update_role(role_data.role_id, 'new_name')
role_data.role_id, 'new_name')
self.assertIsNotNone(role) self.assertIsNotNone(role)
self.assertThat(role.name, matchers.Equals(role_data.role_name)) self.assertThat(role.name, matchers.Equals(role_data.role_name))
@ -142,30 +178,42 @@ class TestIdentityRoles(base.TestCase):
def test_delete_role_by_id(self): def test_delete_role_by_id(self):
role_data = self._get_role_data() role_data = self._get_role_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'roles': [role_data.json_response['role']]}), uri=self.get_mock_url(),
dict(method='DELETE', status_code=200,
uri=self.get_mock_url(append=[role_data.role_id]), json={'roles': [role_data.json_response['role']]},
status_code=204) ),
]) dict(
method='DELETE',
uri=self.get_mock_url(append=[role_data.role_id]),
status_code=204,
),
]
)
role = self.cloud.delete_role(role_data.role_id) role = self.cloud.delete_role(role_data.role_id)
self.assertThat(role, matchers.Equals(True)) self.assertThat(role, matchers.Equals(True))
self.assert_calls() self.assert_calls()
def test_delete_role_by_name(self): def test_delete_role_by_name(self):
role_data = self._get_role_data() role_data = self._get_role_data()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(), dict(
status_code=200, method='GET',
json={'roles': [role_data.json_response['role']]}), uri=self.get_mock_url(),
dict(method='DELETE', status_code=200,
uri=self.get_mock_url(append=[role_data.role_id]), json={'roles': [role_data.json_response['role']]},
status_code=204) ),
]) dict(
method='DELETE',
uri=self.get_mock_url(append=[role_data.role_id]),
status_code=204,
),
]
)
role = self.cloud.delete_role(role_data.role_name) role = self.cloud.delete_role(role_data.role_name)
self.assertThat(role, matchers.Equals(True)) self.assertThat(role, matchers.Equals(True))
self.assert_calls() self.assert_calls()
@ -177,78 +225,102 @@ class TestIdentityRoles(base.TestCase):
project_data = self._get_project_data(domain_id=domain_data.domain_id) project_data = self._get_project_data(domain_id=domain_data.domain_id)
role_data = self._get_role_data() role_data = self._get_role_data()
response = [ response = [
{'links': 'https://example.com', {
'role': {'id': role_data.role_id}, 'links': 'https://example.com',
'scope': {'domain': {'id': domain_data.domain_id}}, 'role': {'id': role_data.role_id},
'user': {'id': user_data.user_id}}, 'scope': {'domain': {'id': domain_data.domain_id}},
{'links': 'https://example.com', 'user': {'id': user_data.user_id},
'role': {'id': role_data.role_id}, },
'scope': {'project': {'id': project_data.project_id}}, {
'group': {'id': group_data.group_id}}, 'links': 'https://example.com',
'role': {'id': role_data.role_id},
'scope': {'project': {'id': project_data.project_id}},
'group': {'id': group_data.group_id},
},
] ]
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
resource='role_assignments'), method='GET',
status_code=200, uri=self.get_mock_url(resource='role_assignments'),
json={'role_assignments': response}, status_code=200,
complete_qs=True) json={'role_assignments': response},
]) complete_qs=True,
)
]
)
ret = self.cloud.list_role_assignments() ret = self.cloud.list_role_assignments()
self.assertThat(len(ret), matchers.Equals(2)) self.assertThat(len(ret), matchers.Equals(2))
self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id)) self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id))
self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id)) self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id))
self.assertThat( self.assertThat(
ret[0].scope['domain']['id'], ret[0].scope['domain']['id'],
matchers.Equals(domain_data.domain_id)) matchers.Equals(domain_data.domain_id),
)
self.assertThat( self.assertThat(
ret[1].group['id'], ret[1].group['id'], matchers.Equals(group_data.group_id)
matchers.Equals(group_data.group_id)) )
self.assertThat(ret[1].role['id'], matchers.Equals(role_data.role_id)) self.assertThat(ret[1].role['id'], matchers.Equals(role_data.role_id))
self.assertThat( self.assertThat(
ret[1].scope['project']['id'], ret[1].scope['project']['id'],
matchers.Equals(project_data.project_id)) matchers.Equals(project_data.project_id),
)
def test_list_role_assignments_filters(self): def test_list_role_assignments_filters(self):
domain_data = self._get_domain_data() domain_data = self._get_domain_data()
user_data = self._get_user_data(domain_id=domain_data.domain_id) user_data = self._get_user_data(domain_id=domain_data.domain_id)
role_data = self._get_role_data() role_data = self._get_role_data()
response = [ response = [
{'links': 'https://example.com', {
'role': {'id': role_data.role_id}, 'links': 'https://example.com',
'scope': {'domain': {'id': domain_data.domain_id}}, 'role': {'id': role_data.role_id},
'user': {'id': user_data.user_id}} 'scope': {'domain': {'id': domain_data.domain_id}},
'user': {'id': user_data.user_id},
}
] ]
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
resource='role_assignments', method='GET',
qs_elements=['scope.domain.id=%s' % domain_data.domain_id, uri=self.get_mock_url(
'user.id=%s' % user_data.user_id, resource='role_assignments',
'effective=True']), qs_elements=[
status_code=200, 'scope.domain.id=%s' % domain_data.domain_id,
json={'role_assignments': response}, 'user.id=%s' % user_data.user_id,
complete_qs=True) 'effective=True',
]) ],
params = dict(user=user_data.user_id, domain=domain_data.domain_id, ),
effective=True) status_code=200,
json={'role_assignments': response},
complete_qs=True,
)
]
)
params = dict(
user=user_data.user_id,
domain=domain_data.domain_id,
effective=True,
)
ret = self.cloud.list_role_assignments(filters=params) ret = self.cloud.list_role_assignments(filters=params)
self.assertThat(len(ret), matchers.Equals(1)) self.assertThat(len(ret), matchers.Equals(1))
self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id)) self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id))
self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id)) self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id))
self.assertThat( self.assertThat(
ret[0].scope['domain']['id'], ret[0].scope['domain']['id'],
matchers.Equals(domain_data.domain_id)) matchers.Equals(domain_data.domain_id),
)
def test_list_role_assignments_exception(self): def test_list_role_assignments_exception(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url(resource='role_assignments'), dict(
status_code=403) method='GET',
]) uri=self.get_mock_url(resource='role_assignments'),
with testtools.ExpectedException( status_code=403,
exceptions.ForbiddenException )
): ]
)
with testtools.ExpectedException(exceptions.ForbiddenException):
self.cloud.list_role_assignments() self.cloud.list_role_assignments()
self.assert_calls() self.assert_calls()

View File

@ -16,29 +16,46 @@ from openstack.tests.unit import base
class TestIdentityUsers(base.TestCase): class TestIdentityUsers(base.TestCase):
def get_mock_url(
def get_mock_url(self, service_type='identity', interface='public', self,
resource='users', append=None, base_url_append='v3', service_type='identity',
qs_elements=None): interface='public',
resource='users',
append=None,
base_url_append='v3',
qs_elements=None,
):
return super(TestIdentityUsers, self).get_mock_url( return super(TestIdentityUsers, self).get_mock_url(
service_type, interface, resource, append, base_url_append, service_type,
qs_elements) interface,
resource,
append,
base_url_append,
qs_elements,
)
def test_create_user(self): def test_create_user(self):
domain_data = self._get_domain_data() domain_data = self._get_domain_data()
user_data = self._get_user_data("myusername", "mypassword", user_data = self._get_user_data(
domain_id=domain_data.domain_id) "myusername", "mypassword", domain_id=domain_data.domain_id
self.register_uris([ )
dict(method='POST', self.register_uris(
uri=self.get_mock_url(), [
status_code=200, dict(
json=user_data.json_response, method='POST',
validate=dict(json=user_data.json_request)) uri=self.get_mock_url(),
]) status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request),
)
]
)
user = self.cloud.create_user(user_data.name, user = self.cloud.create_user(
password=user_data.password, user_data.name,
domain_id=domain_data.domain_id) password=user_data.password,
domain_id=domain_data.domain_id,
)
self.assertIsNotNone(user) self.assertIsNotNone(user)
self.assertThat(user.name, matchers.Equals(user_data.name)) self.assertThat(user.name, matchers.Equals(user_data.name))
@ -46,22 +63,29 @@ class TestIdentityUsers(base.TestCase):
def test_create_user_without_password(self): def test_create_user_without_password(self):
domain_data = self._get_domain_data() domain_data = self._get_domain_data()
user_data = self._get_user_data("myusername", user_data = self._get_user_data(
domain_id=domain_data.domain_id) "myusername", domain_id=domain_data.domain_id
)
user_data._replace( user_data._replace(
password=None, password=None,
json_request=user_data.json_request["user"].pop("password")) json_request=user_data.json_request["user"].pop("password"),
)
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url(), dict(
status_code=200, method='POST',
json=user_data.json_response, uri=self.get_mock_url(),
validate=dict(json=user_data.json_request)) status_code=200,
]) json=user_data.json_response,
validate=dict(json=user_data.json_request),
)
]
)
user = self.cloud.create_user(user_data.name, user = self.cloud.create_user(
domain_id=domain_data.domain_id) user_data.name, domain_id=domain_data.domain_id
)
self.assertIsNotNone(user) self.assertIsNotNone(user)
self.assertThat(user.name, matchers.Equals(user_data.name)) self.assertThat(user.name, matchers.Equals(user_data.name))

File diff suppressed because it is too large Load Diff

View File

@ -20,47 +20,59 @@ from openstack.tests.unit import base
class TestImageSnapshot(base.TestCase): class TestImageSnapshot(base.TestCase):
def setUp(self): def setUp(self):
super(TestImageSnapshot, self).setUp() super(TestImageSnapshot, self).setUp()
self.server_id = str(uuid.uuid4()) self.server_id = str(uuid.uuid4())
self.image_id = str(uuid.uuid4()) self.image_id = str(uuid.uuid4())
self.server_name = self.getUniqueString('name') self.server_name = self.getUniqueString('name')
self.fake_server = fakes.make_fake_server( self.fake_server = fakes.make_fake_server(
self.server_id, self.server_name) self.server_id, self.server_name
)
def test_create_image_snapshot_wait_until_active_never_active(self): def test_create_image_snapshot_wait_until_active_never_active(self):
snapshot_name = 'test-snapshot' snapshot_name = 'test-snapshot'
fake_image = fakes.make_fake_image(self.image_id, status='pending') fake_image = fakes.make_fake_image(self.image_id, status='pending')
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict( self.get_nova_discovery_mock_dict(),
method='POST', dict(
uri='{endpoint}/servers/{server_id}/action'.format( method='POST',
endpoint=fakes.COMPUTE_ENDPOINT, uri='{endpoint}/servers/{server_id}/action'.format(
server_id=self.server_id), endpoint=fakes.COMPUTE_ENDPOINT,
headers=dict( server_id=self.server_id,
Location='{endpoint}/images/{image_id}'.format( ),
endpoint='https://images.example.com', headers=dict(
image_id=self.image_id)), Location='{endpoint}/images/{image_id}'.format(
validate=dict( endpoint='https://images.example.com',
json={ image_id=self.image_id,
"createImage": { )
"name": snapshot_name, ),
"metadata": {}, validate=dict(
}})), json={
self.get_glance_discovery_mock_dict(), "createImage": {
dict( "name": snapshot_name,
method='GET', "metadata": {},
uri='https://image.example.com/v2/images', }
json=dict(images=[fake_image])), }
]) ),
),
self.get_glance_discovery_mock_dict(),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[fake_image]),
),
]
)
self.assertRaises( self.assertRaises(
exc.OpenStackCloudTimeout, exc.OpenStackCloudTimeout,
self.cloud.create_image_snapshot, self.cloud.create_image_snapshot,
snapshot_name, dict(id=self.server_id), snapshot_name,
wait=True, timeout=0.01) dict(id=self.server_id),
wait=True,
timeout=0.01,
)
# After the fifth call, we just keep polling get images for status. # After the fifth call, we just keep polling get images for status.
# Due to mocking sleep, we have no clue how many times we'll call it. # Due to mocking sleep, we have no clue how many times we'll call it.
@ -70,35 +82,46 @@ class TestImageSnapshot(base.TestCase):
snapshot_name = 'test-snapshot' snapshot_name = 'test-snapshot'
pending_image = fakes.make_fake_image(self.image_id, status='pending') pending_image = fakes.make_fake_image(self.image_id, status='pending')
fake_image = fakes.make_fake_image(self.image_id) fake_image = fakes.make_fake_image(self.image_id)
self.register_uris([ self.register_uris(
self.get_nova_discovery_mock_dict(), [
dict( self.get_nova_discovery_mock_dict(),
method='POST', dict(
uri='{endpoint}/servers/{server_id}/action'.format( method='POST',
endpoint=fakes.COMPUTE_ENDPOINT, uri='{endpoint}/servers/{server_id}/action'.format(
server_id=self.server_id), endpoint=fakes.COMPUTE_ENDPOINT,
headers=dict( server_id=self.server_id,
Location='{endpoint}/images/{image_id}'.format( ),
endpoint='https://images.example.com', headers=dict(
image_id=self.image_id)), Location='{endpoint}/images/{image_id}'.format(
validate=dict( endpoint='https://images.example.com',
json={ image_id=self.image_id,
"createImage": { )
"name": snapshot_name, ),
"metadata": {}, validate=dict(
}})), json={
self.get_glance_discovery_mock_dict(), "createImage": {
dict( "name": snapshot_name,
method='GET', "metadata": {},
uri='https://image.example.com/v2/images', }
json=dict(images=[pending_image])), }
dict( ),
method='GET', ),
uri='https://image.example.com/v2/images', self.get_glance_discovery_mock_dict(),
json=dict(images=[fake_image])), dict(
]) method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[pending_image]),
),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=dict(images=[fake_image]),
),
]
)
image = self.cloud.create_image_snapshot( image = self.cloud.create_image_snapshot(
'test-snapshot', dict(id=self.server_id), wait=True, timeout=2) 'test-snapshot', dict(id=self.server_id), wait=True, timeout=2
)
self.assertEqual(image['id'], self.image_id) self.assertEqual(image['id'], self.image_id)
self.assert_calls() self.assert_calls()

View File

@ -19,7 +19,6 @@ from openstack.tests.unit import base
class TestInventory(base.TestCase): class TestInventory(base.TestCase):
def setUp(self): def setUp(self):
super(TestInventory, self).setUp() super(TestInventory, self).setUp()
@ -50,8 +49,7 @@ class TestInventory(base.TestCase):
self.assertIsInstance(inv.clouds, list) self.assertIsInstance(inv.clouds, list)
self.assertEqual(1, len(inv.clouds)) self.assertEqual(1, len(inv.clouds))
self.assertFalse(mock_config.return_value.get_all.called) self.assertFalse(mock_config.return_value.get_all.called)
mock_config.return_value.get_one.assert_called_once_with( mock_config.return_value.get_one.assert_called_once_with('supercloud')
'supercloud')
@mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.config.loader.OpenStackConfig")
@mock.patch("openstack.connection.Connection") @mock.patch("openstack.connection.Connection")
@ -68,8 +66,9 @@ class TestInventory(base.TestCase):
ret = inv.list_hosts() ret = inv.list_hosts()
inv.clouds[0].list_servers.assert_called_once_with(detailed=True, inv.clouds[0].list_servers.assert_called_once_with(
all_projects=False) detailed=True, all_projects=False
)
self.assertFalse(inv.clouds[0].get_openstack_vars.called) self.assertFalse(inv.clouds[0].get_openstack_vars.called)
self.assertEqual([server], ret) self.assertEqual([server], ret)
@ -81,16 +80,17 @@ class TestInventory(base.TestCase):
inv = inventory.OpenStackInventory() inv = inventory.OpenStackInventory()
server = self.cloud._normalize_server( server = self.cloud._normalize_server(
fakes.make_fake_server( fakes.make_fake_server('1234', 'test', 'ACTIVE', addresses={})
'1234', 'test', 'ACTIVE', addresses={})) )
self.assertIsInstance(inv.clouds, list) self.assertIsInstance(inv.clouds, list)
self.assertEqual(1, len(inv.clouds)) self.assertEqual(1, len(inv.clouds))
inv.clouds[0].list_servers.return_value = [server] inv.clouds[0].list_servers.return_value = [server]
inv.list_hosts(expand=False) inv.list_hosts(expand=False)
inv.clouds[0].list_servers.assert_called_once_with(detailed=False, inv.clouds[0].list_servers.assert_called_once_with(
all_projects=False) detailed=False, all_projects=False
)
self.assertFalse(inv.clouds[0].get_openstack_vars.called) self.assertFalse(inv.clouds[0].get_openstack_vars.called)
@mock.patch("openstack.config.loader.OpenStackConfig") @mock.patch("openstack.config.loader.OpenStackConfig")
@ -108,8 +108,9 @@ class TestInventory(base.TestCase):
ret = inv.list_hosts(all_projects=True) ret = inv.list_hosts(all_projects=True)
inv.clouds[0].list_servers.assert_called_once_with(detailed=True, inv.clouds[0].list_servers.assert_called_once_with(
all_projects=True) detailed=True, all_projects=True
)
self.assertFalse(inv.clouds[0].get_openstack_vars.called) self.assertFalse(inv.clouds[0].get_openstack_vars.called)
self.assertEqual([server], ret) self.assertEqual([server], ret)

View File

@ -19,29 +19,41 @@ from openstack.tests.unit import base
class TestKeypair(base.TestCase): class TestKeypair(base.TestCase):
def setUp(self): def setUp(self):
super(TestKeypair, self).setUp() super(TestKeypair, self).setUp()
self.keyname = self.getUniqueString('key') self.keyname = self.getUniqueString('key')
self.key = fakes.make_fake_keypair(self.keyname) self.key = fakes.make_fake_keypair(self.keyname)
self.useFixture(fixtures.MonkeyPatch( self.useFixture(
'openstack.utils.maximum_supported_microversion', fixtures.MonkeyPatch(
lambda *args, **kwargs: '2.10')) 'openstack.utils.maximum_supported_microversion',
lambda *args, **kwargs: '2.10',
)
)
def test_create_keypair(self): def test_create_keypair(self):
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-keypairs']), method='POST',
json={'keypair': self.key}, uri=self.get_mock_url(
validate=dict(json={ 'compute', 'public', append=['os-keypairs']
'keypair': { ),
'name': self.key['name'], json={'keypair': self.key},
'public_key': self.key['public_key']}})), validate=dict(
]) json={
'keypair': {
'name': self.key['name'],
'public_key': self.key['public_key'],
}
}
),
),
]
)
new_key = self.cloud.create_keypair( new_key = self.cloud.create_keypair(
self.keyname, self.key['public_key']) self.keyname, self.key['public_key']
)
new_key_cmp = new_key.to_dict(ignore_none=True) new_key_cmp = new_key.to_dict(ignore_none=True)
new_key_cmp.pop('location') new_key_cmp.pop('location')
new_key_cmp.pop('id') new_key_cmp.pop('id')
@ -50,97 +62,140 @@ class TestKeypair(base.TestCase):
self.assert_calls() self.assert_calls()
def test_create_keypair_exception(self): def test_create_keypair_exception(self):
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-keypairs']), method='POST',
status_code=400, uri=self.get_mock_url(
validate=dict(json={ 'compute', 'public', append=['os-keypairs']
'keypair': { ),
'name': self.key['name'], status_code=400,
'public_key': self.key['public_key']}})), validate=dict(
]) json={
'keypair': {
'name': self.key['name'],
'public_key': self.key['public_key'],
}
}
),
),
]
)
self.assertRaises( self.assertRaises(
exc.OpenStackCloudException, exc.OpenStackCloudException,
self.cloud.create_keypair, self.cloud.create_keypair,
self.keyname, self.key['public_key']) self.keyname,
self.key['public_key'],
)
self.assert_calls() self.assert_calls()
def test_delete_keypair(self): def test_delete_keypair(self):
self.register_uris([ self.register_uris(
dict(method='DELETE', [
uri=self.get_mock_url( dict(
'compute', 'public', method='DELETE',
append=['os-keypairs', self.keyname]), uri=self.get_mock_url(
status_code=202), 'compute',
]) 'public',
append=['os-keypairs', self.keyname],
),
status_code=202,
),
]
)
self.assertTrue(self.cloud.delete_keypair(self.keyname)) self.assertTrue(self.cloud.delete_keypair(self.keyname))
self.assert_calls() self.assert_calls()
def test_delete_keypair_not_found(self): def test_delete_keypair_not_found(self):
self.register_uris([ self.register_uris(
dict(method='DELETE', [
uri=self.get_mock_url( dict(
'compute', 'public', method='DELETE',
append=['os-keypairs', self.keyname]), uri=self.get_mock_url(
status_code=404), 'compute',
]) 'public',
append=['os-keypairs', self.keyname],
),
status_code=404,
),
]
)
self.assertFalse(self.cloud.delete_keypair(self.keyname)) self.assertFalse(self.cloud.delete_keypair(self.keyname))
self.assert_calls() self.assert_calls()
def test_list_keypairs(self): def test_list_keypairs(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-keypairs']), method='GET',
json={'keypairs': [{'keypair': self.key}]}), uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']
]) ),
json={'keypairs': [{'keypair': self.key}]},
),
]
)
keypairs = self.cloud.list_keypairs() keypairs = self.cloud.list_keypairs()
self.assertEqual(len(keypairs), 1) self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0].name, self.key['name']) self.assertEqual(keypairs[0].name, self.key['name'])
self.assert_calls() self.assert_calls()
def test_list_keypairs_empty_filters(self): def test_list_keypairs_empty_filters(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-keypairs']), method='GET',
json={'keypairs': [{'keypair': self.key}]}), uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']
]) ),
json={'keypairs': [{'keypair': self.key}]},
),
]
)
keypairs = self.cloud.list_keypairs(filters=None) keypairs = self.cloud.list_keypairs(filters=None)
self.assertEqual(len(keypairs), 1) self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0].name, self.key['name']) self.assertEqual(keypairs[0].name, self.key['name'])
self.assert_calls() self.assert_calls()
def test_list_keypairs_notempty_filters(self): def test_list_keypairs_notempty_filters(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-keypairs'], method='GET',
qs_elements=['user_id=b']), uri=self.get_mock_url(
json={'keypairs': [{'keypair': self.key}]}), 'compute',
'public',
]) append=['os-keypairs'],
qs_elements=['user_id=b'],
),
json={'keypairs': [{'keypair': self.key}]},
),
]
)
keypairs = self.cloud.list_keypairs( keypairs = self.cloud.list_keypairs(
filters={'user_id': 'b', 'fake': 'dummy'}) filters={'user_id': 'b', 'fake': 'dummy'}
)
self.assertEqual(len(keypairs), 1) self.assertEqual(len(keypairs), 1)
self.assertEqual(keypairs[0].name, self.key['name']) self.assertEqual(keypairs[0].name, self.key['name'])
self.assert_calls() self.assert_calls()
def test_list_keypairs_exception(self): def test_list_keypairs_exception(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['os-keypairs']), method='GET',
status_code=400), uri=self.get_mock_url(
'compute', 'public', append=['os-keypairs']
]) ),
self.assertRaises(exc.OpenStackCloudException, status_code=400,
self.cloud.list_keypairs) ),
]
)
self.assertRaises(
exc.OpenStackCloudException, self.cloud.list_keypairs
)
self.assert_calls() self.assert_calls()

View File

@ -14,81 +14,93 @@ from openstack.tests.unit import base
class TestLimits(base.TestCase): class TestLimits(base.TestCase):
def test_get_compute_limits(self): def test_get_compute_limits(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'compute', 'public', append=['limits']), method='GET',
json={ uri=self.get_mock_url(
"limits": { 'compute', 'public', append=['limits']
"absolute": { ),
"maxImageMeta": 128, json={
"maxPersonality": 5, "limits": {
"maxPersonalitySize": 10240, "absolute": {
"maxSecurityGroupRules": 20, "maxImageMeta": 128,
"maxSecurityGroups": 10, "maxPersonality": 5,
"maxServerMeta": 128, "maxPersonalitySize": 10240,
"maxTotalCores": 20, "maxSecurityGroupRules": 20,
"maxTotalFloatingIps": 10, "maxSecurityGroups": 10,
"maxTotalInstances": 10, "maxServerMeta": 128,
"maxTotalKeypairs": 100, "maxTotalCores": 20,
"maxTotalRAMSize": 51200, "maxTotalFloatingIps": 10,
"maxServerGroups": 10, "maxTotalInstances": 10,
"maxServerGroupMembers": 10, "maxTotalKeypairs": 100,
"totalCoresUsed": 0, "maxTotalRAMSize": 51200,
"totalInstancesUsed": 0, "maxServerGroups": 10,
"totalRAMUsed": 0, "maxServerGroupMembers": 10,
"totalSecurityGroupsUsed": 0, "totalCoresUsed": 0,
"totalFloatingIpsUsed": 0, "totalInstancesUsed": 0,
"totalServerGroupsUsed": 0 "totalRAMUsed": 0,
}, "totalSecurityGroupsUsed": 0,
"rate": [] "totalFloatingIpsUsed": 0,
} "totalServerGroupsUsed": 0,
}), },
]) "rate": [],
}
},
),
]
)
self.cloud.get_compute_limits() self.cloud.get_compute_limits()
self.assert_calls() self.assert_calls()
def test_other_get_compute_limits(self): def test_other_get_compute_limits(self):
project = self.mock_for_keystone_projects(project_count=1, project = self.mock_for_keystone_projects(
list_get=True)[0] project_count=1, list_get=True
self.register_uris([ )[0]
dict(method='GET', self.register_uris(
uri=self.get_mock_url( [
'compute', 'public', append=['limits'], dict(
qs_elements=[ method='GET',
'tenant_id={id}'.format(id=project.project_id) uri=self.get_mock_url(
]), 'compute',
json={ 'public',
"limits": { append=['limits'],
"absolute": { qs_elements=[
"maxImageMeta": 128, 'tenant_id={id}'.format(id=project.project_id)
"maxPersonality": 5, ],
"maxPersonalitySize": 10240, ),
"maxSecurityGroupRules": 20, json={
"maxSecurityGroups": 10, "limits": {
"maxServerMeta": 128, "absolute": {
"maxTotalCores": 20, "maxImageMeta": 128,
"maxTotalFloatingIps": 10, "maxPersonality": 5,
"maxTotalInstances": 10, "maxPersonalitySize": 10240,
"maxTotalKeypairs": 100, "maxSecurityGroupRules": 20,
"maxTotalRAMSize": 51200, "maxSecurityGroups": 10,
"maxServerGroups": 10, "maxServerMeta": 128,
"maxServerGroupMembers": 10, "maxTotalCores": 20,
"totalCoresUsed": 0, "maxTotalFloatingIps": 10,
"totalInstancesUsed": 0, "maxTotalInstances": 10,
"totalRAMUsed": 0, "maxTotalKeypairs": 100,
"totalSecurityGroupsUsed": 0, "maxTotalRAMSize": 51200,
"totalFloatingIpsUsed": 0, "maxServerGroups": 10,
"totalServerGroupsUsed": 0 "maxServerGroupMembers": 10,
}, "totalCoresUsed": 0,
"rate": [] "totalInstancesUsed": 0,
} "totalRAMUsed": 0,
}), "totalSecurityGroupsUsed": 0,
]) "totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0,
},
"rate": [],
}
},
),
]
)
self.cloud.get_compute_limits(project.project_id) self.cloud.get_compute_limits(project.project_id)

View File

@ -27,14 +27,19 @@ magnum_service_obj = dict(
class TestMagnumServices(base.TestCase): class TestMagnumServices(base.TestCase):
def test_list_magnum_services(self): def test_list_magnum_services(self):
self.register_uris([dict( self.register_uris(
method='GET', [
uri=self.get_mock_url( dict(
service_type='container-infrastructure-management', method='GET',
resource='mservices'), uri=self.get_mock_url(
json=dict(mservices=[magnum_service_obj]))]) service_type='container-infrastructure-management',
resource='mservices',
),
json=dict(mservices=[magnum_service_obj]),
)
]
)
mservices_list = self.cloud.list_magnum_services() mservices_list = self.cloud.list_magnum_services()
self.assertEqual( self.assertEqual(
mservices_list[0].to_dict(computed=False), mservices_list[0].to_dict(computed=False),

File diff suppressed because it is too large Load Diff

View File

@ -57,7 +57,7 @@ class TestNetwork(base.TestCase):
"updated": "2015-01-01T10:00:00-00:00", "updated": "2015-01-01T10:00:00-00:00",
"description": "Availability zone support for router.", "description": "Availability zone support for router.",
"links": [], "links": [],
"name": "Network Availability Zone" "name": "Network Availability Zone",
} }
enabled_neutron_extensions = [network_availability_zone_extension] enabled_neutron_extensions = [network_availability_zone_extension]
@ -65,66 +65,99 @@ class TestNetwork(base.TestCase):
def _compare_networks(self, exp, real): def _compare_networks(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
_network.Network(**exp).to_dict(computed=False), _network.Network(**exp).to_dict(computed=False),
real.to_dict(computed=False)) real.to_dict(computed=False),
)
def test_list_networks(self): def test_list_networks(self):
net1 = {'id': '1', 'name': 'net1'} net1 = {'id': '1', 'name': 'net1'}
net2 = {'id': '2', 'name': 'net2'} net2 = {'id': '2', 'name': 'net2'}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks']), method='GET',
json={'networks': [net1, net2]}) uri=self.get_mock_url(
]) 'network', 'public', append=['v2.0', 'networks']
),
json={'networks': [net1, net2]},
)
]
)
nets = self.cloud.list_networks() nets = self.cloud.list_networks()
self.assertEqual( self.assertEqual(
[_network.Network(**i).to_dict(computed=False) for i in [ [
net1, net2]], _network.Network(**i).to_dict(computed=False)
[i.to_dict(computed=False) for i in nets]) for i in [net1, net2]
],
[i.to_dict(computed=False) for i in nets],
)
self.assert_calls() self.assert_calls()
def test_list_networks_filtered(self): def test_list_networks_filtered(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks'], method='GET',
qs_elements=["name=test"]), uri=self.get_mock_url(
json={'networks': []}) 'network',
]) 'public',
append=['v2.0', 'networks'],
qs_elements=["name=test"],
),
json={'networks': []},
)
]
)
self.cloud.list_networks(filters={'name': 'test'}) self.cloud.list_networks(filters={'name': 'test'})
self.assert_calls() self.assert_calls()
def test_create_network(self): def test_create_network(self):
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks']), method='POST',
json={'network': self.mock_new_network_rep}, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'networks']
json={'network': { ),
'admin_state_up': True, json={'network': self.mock_new_network_rep},
'name': 'netname'}})) validate=dict(
]) json={
'network': {
'admin_state_up': True,
'name': 'netname',
}
}
),
)
]
)
network = self.cloud.create_network("netname") network = self.cloud.create_network("netname")
self._compare_networks( self._compare_networks(self.mock_new_network_rep, network)
self.mock_new_network_rep, network)
self.assert_calls() self.assert_calls()
def test_create_network_specific_tenant(self): def test_create_network_specific_tenant(self):
project_id = "project_id_value" project_id = "project_id_value"
mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['project_id'] = project_id mock_new_network_rep['project_id'] = project_id
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks']), method='POST',
json={'network': mock_new_network_rep}, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'networks']
json={'network': { ),
'admin_state_up': True, json={'network': mock_new_network_rep},
'name': 'netname', validate=dict(
'project_id': project_id}})) json={
]) 'network': {
'admin_state_up': True,
'name': 'netname',
'project_id': project_id,
}
}
),
)
]
)
network = self.cloud.create_network("netname", project_id=project_id) network = self.cloud.create_network("netname", project_id=project_id)
self._compare_networks(mock_new_network_rep, network) self._compare_networks(mock_new_network_rep, network)
self.assert_calls() self.assert_calls()
@ -132,45 +165,57 @@ class TestNetwork(base.TestCase):
def test_create_network_external(self): def test_create_network_external(self):
mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['router:external'] = True mock_new_network_rep['router:external'] = True
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks']), method='POST',
json={'network': mock_new_network_rep}, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'networks']
json={'network': { ),
'admin_state_up': True, json={'network': mock_new_network_rep},
'name': 'netname', validate=dict(
'router:external': True}})) json={
]) 'network': {
'admin_state_up': True,
'name': 'netname',
'router:external': True,
}
}
),
)
]
)
network = self.cloud.create_network("netname", external=True) network = self.cloud.create_network("netname", external=True)
self._compare_networks(mock_new_network_rep, network) self._compare_networks(mock_new_network_rep, network)
self.assert_calls() self.assert_calls()
def test_create_network_provider(self): def test_create_network_provider(self):
provider_opts = {'physical_network': 'mynet', provider_opts = {
'network_type': 'vlan', 'physical_network': 'mynet',
'segmentation_id': 'vlan1'} 'network_type': 'vlan',
'segmentation_id': 'vlan1',
}
new_network_provider_opts = { new_network_provider_opts = {
'provider:physical_network': 'mynet', 'provider:physical_network': 'mynet',
'provider:network_type': 'vlan', 'provider:network_type': 'vlan',
'provider:segmentation_id': 'vlan1' 'provider:segmentation_id': 'vlan1',
} }
mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep.update(new_network_provider_opts) mock_new_network_rep.update(new_network_provider_opts)
expected_send_params = { expected_send_params = {'admin_state_up': True, 'name': 'netname'}
'admin_state_up': True,
'name': 'netname'
}
expected_send_params.update(new_network_provider_opts) expected_send_params.update(new_network_provider_opts)
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks']), method='POST',
json={'network': mock_new_network_rep}, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'networks']
json={'network': expected_send_params})) ),
]) json={'network': mock_new_network_rep},
validate=dict(json={'network': expected_send_params}),
)
]
)
network = self.cloud.create_network("netname", provider=provider_opts) network = self.cloud.create_network("netname", provider=provider_opts)
self._compare_networks(mock_new_network_rep, network) self._compare_networks(mock_new_network_rep, network)
self.assert_calls() self.assert_calls()
@ -179,89 +224,122 @@ class TestNetwork(base.TestCase):
network_id = "test-net-id" network_id = "test-net-id"
network_name = "network" network_name = "network"
network = {'id': network_id, 'name': network_name} network = {'id': network_id, 'name': network_name}
provider_opts = {'physical_network': 'mynet', provider_opts = {
'network_type': 'vlan', 'physical_network': 'mynet',
'segmentation_id': 'vlan1', 'network_type': 'vlan',
'should_not_be_passed': 1} 'segmentation_id': 'vlan1',
'should_not_be_passed': 1,
}
update_network_provider_opts = { update_network_provider_opts = {
'provider:physical_network': 'mynet', 'provider:physical_network': 'mynet',
'provider:network_type': 'vlan', 'provider:network_type': 'vlan',
'provider:segmentation_id': 'vlan1' 'provider:segmentation_id': 'vlan1',
} }
mock_update_rep = copy.copy(self.mock_new_network_rep) mock_update_rep = copy.copy(self.mock_new_network_rep)
mock_update_rep.update(update_network_provider_opts) mock_update_rep.update(update_network_provider_opts)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', method='GET',
append=['v2.0', 'networks', network_name]), uri=self.get_mock_url(
status_code=404), 'network',
dict(method='GET', 'public',
uri=self.get_mock_url( append=['v2.0', 'networks', network_name],
'network', 'public', append=['v2.0', 'networks'], ),
qs_elements=['name=%s' % network_name]), status_code=404,
json={'networks': [network]}), ),
dict(method='PUT', dict(
uri=self.get_mock_url( method='GET',
'network', 'public', uri=self.get_mock_url(
append=['v2.0', 'networks', network_id]), 'network',
json={'network': mock_update_rep}, 'public',
validate=dict( append=['v2.0', 'networks'],
json={'network': update_network_provider_opts})) qs_elements=['name=%s' % network_name],
]) ),
json={'networks': [network]},
),
dict(
method='PUT',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_id],
),
json={'network': mock_update_rep},
validate=dict(
json={'network': update_network_provider_opts}
),
),
]
)
network = self.cloud.update_network( network = self.cloud.update_network(
network_name, network_name, provider=provider_opts
provider=provider_opts
) )
self._compare_networks(mock_update_rep, network) self._compare_networks(mock_update_rep, network)
self.assert_calls() self.assert_calls()
def test_create_network_with_availability_zone_hints(self): def test_create_network_with_availability_zone_hints(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'extensions']), method='GET',
json={'extensions': self.enabled_neutron_extensions}), uri=self.get_mock_url(
dict(method='POST', 'network', 'public', append=['v2.0', 'extensions']
uri=self.get_mock_url( ),
'network', 'public', append=['v2.0', 'networks']), json={'extensions': self.enabled_neutron_extensions},
json={'network': self.mock_new_network_rep}, ),
validate=dict( dict(
json={'network': { method='POST',
'admin_state_up': True, uri=self.get_mock_url(
'name': 'netname', 'network', 'public', append=['v2.0', 'networks']
'availability_zone_hints': ['nova']}})) ),
]) json={'network': self.mock_new_network_rep},
network = self.cloud.create_network("netname", validate=dict(
availability_zone_hints=['nova']) json={
'network': {
'admin_state_up': True,
'name': 'netname',
'availability_zone_hints': ['nova'],
}
}
),
),
]
)
network = self.cloud.create_network(
"netname", availability_zone_hints=['nova']
)
self._compare_networks(self.mock_new_network_rep, network) self._compare_networks(self.mock_new_network_rep, network)
self.assert_calls() self.assert_calls()
def test_create_network_provider_ignored_value(self): def test_create_network_provider_ignored_value(self):
provider_opts = {'physical_network': 'mynet', provider_opts = {
'network_type': 'vlan', 'physical_network': 'mynet',
'segmentation_id': 'vlan1', 'network_type': 'vlan',
'should_not_be_passed': 1} 'segmentation_id': 'vlan1',
'should_not_be_passed': 1,
}
new_network_provider_opts = { new_network_provider_opts = {
'provider:physical_network': 'mynet', 'provider:physical_network': 'mynet',
'provider:network_type': 'vlan', 'provider:network_type': 'vlan',
'provider:segmentation_id': 'vlan1' 'provider:segmentation_id': 'vlan1',
} }
mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep.update(new_network_provider_opts) mock_new_network_rep.update(new_network_provider_opts)
expected_send_params = { expected_send_params = {'admin_state_up': True, 'name': 'netname'}
'admin_state_up': True,
'name': 'netname'
}
expected_send_params.update(new_network_provider_opts) expected_send_params.update(new_network_provider_opts)
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks']), method='POST',
json={'network': mock_new_network_rep}, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'networks']
json={'network': expected_send_params})) ),
]) json={'network': mock_new_network_rep},
validate=dict(json={'network': expected_send_params}),
)
]
)
network = self.cloud.create_network("netname", provider=provider_opts) network = self.cloud.create_network("netname", provider=provider_opts)
self._compare_networks(mock_new_network_rep, network) self._compare_networks(mock_new_network_rep, network)
self.assert_calls() self.assert_calls()
@ -270,16 +348,17 @@ class TestNetwork(base.TestCase):
azh_opts = "invalid" azh_opts = "invalid"
with testtools.ExpectedException( with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException, openstack.cloud.OpenStackCloudException,
"Parameter 'availability_zone_hints' must be a list" "Parameter 'availability_zone_hints' must be a list",
): ):
self.cloud.create_network("netname", self.cloud.create_network(
availability_zone_hints=azh_opts) "netname", availability_zone_hints=azh_opts
)
def test_create_network_provider_wrong_type(self): def test_create_network_provider_wrong_type(self):
provider_opts = "invalid" provider_opts = "invalid"
with testtools.ExpectedException( with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException, openstack.cloud.OpenStackCloudException,
"Parameter 'provider' must be a dict" "Parameter 'provider' must be a dict",
): ):
self.cloud.create_network("netname", provider=provider_opts) self.cloud.create_network("netname", provider=provider_opts)
@ -287,20 +366,28 @@ class TestNetwork(base.TestCase):
port_security_state = False port_security_state = False
mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['port_security_enabled'] = port_security_state mock_new_network_rep['port_security_enabled'] = port_security_state
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks']), method='POST',
json={'network': mock_new_network_rep}, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'networks']
json={'network': { ),
'admin_state_up': True, json={'network': mock_new_network_rep},
'name': 'netname', validate=dict(
'port_security_enabled': port_security_state}})) json={
]) 'network': {
'admin_state_up': True,
'name': 'netname',
'port_security_enabled': port_security_state,
}
}
),
)
]
)
network = self.cloud.create_network( network = self.cloud.create_network(
"netname", "netname", port_security_enabled=port_security_state
port_security_enabled=port_security_state
) )
self._compare_networks(mock_new_network_rep, network) self._compare_networks(mock_new_network_rep, network)
self.assert_calls() self.assert_calls()
@ -309,34 +396,41 @@ class TestNetwork(base.TestCase):
mtu_size = 1500 mtu_size = 1500
mock_new_network_rep = copy.copy(self.mock_new_network_rep) mock_new_network_rep = copy.copy(self.mock_new_network_rep)
mock_new_network_rep['mtu'] = mtu_size mock_new_network_rep['mtu'] = mtu_size
self.register_uris([ self.register_uris(
dict(method='POST', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'networks']), method='POST',
json={'network': mock_new_network_rep}, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'networks']
json={'network': { ),
'admin_state_up': True, json={'network': mock_new_network_rep},
'name': 'netname', validate=dict(
'mtu': mtu_size}})) json={
]) 'network': {
network = self.cloud.create_network("netname", 'admin_state_up': True,
mtu_size=mtu_size 'name': 'netname',
) 'mtu': mtu_size,
}
}
),
)
]
)
network = self.cloud.create_network("netname", mtu_size=mtu_size)
self._compare_networks(mock_new_network_rep, network) self._compare_networks(mock_new_network_rep, network)
self.assert_calls() self.assert_calls()
def test_create_network_with_wrong_mtu_size(self): def test_create_network_with_wrong_mtu_size(self):
with testtools.ExpectedException( with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException, openstack.cloud.OpenStackCloudException,
"Parameter 'mtu_size' must be greater than 67." "Parameter 'mtu_size' must be greater than 67.",
): ):
self.cloud.create_network("netname", mtu_size=42) self.cloud.create_network("netname", mtu_size=42)
def test_create_network_with_wrong_mtu_type(self): def test_create_network_with_wrong_mtu_type(self):
with testtools.ExpectedException( with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException, openstack.cloud.OpenStackCloudException,
"Parameter 'mtu_size' must be an integer." "Parameter 'mtu_size' must be an integer.",
): ):
self.cloud.create_network("netname", mtu_size="fourty_two") self.cloud.create_network("netname", mtu_size="fourty_two")
@ -344,39 +438,65 @@ class TestNetwork(base.TestCase):
network_id = "test-net-id" network_id = "test-net-id"
network_name = "network" network_name = "network"
network = {'id': network_id, 'name': network_name} network = {'id': network_id, 'name': network_name}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', method='GET',
append=['v2.0', 'networks', network_name]), uri=self.get_mock_url(
status_code=404), 'network',
dict(method='GET', 'public',
uri=self.get_mock_url( append=['v2.0', 'networks', network_name],
'network', 'public', append=['v2.0', 'networks'], ),
qs_elements=['name=%s' % network_name]), status_code=404,
json={'networks': [network]}), ),
dict(method='DELETE', dict(
uri=self.get_mock_url( method='GET',
'network', 'public', uri=self.get_mock_url(
append=['v2.0', 'networks', network_id]), 'network',
json={}) 'public',
]) append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name],
),
json={'networks': [network]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_id],
),
json={},
),
]
)
self.assertTrue(self.cloud.delete_network(network_name)) self.assertTrue(self.cloud.delete_network(network_name))
self.assert_calls() self.assert_calls()
def test_delete_network_not_found(self): def test_delete_network_not_found(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', method='GET',
append=['v2.0', 'networks', 'test-net']), uri=self.get_mock_url(
status_code=404), 'network',
dict(method='GET', 'public',
uri=self.get_mock_url( append=['v2.0', 'networks', 'test-net'],
'network', 'public', append=['v2.0', 'networks'], ),
qs_elements=['name=test-net']), status_code=404,
json={'networks': []}), ),
]) dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=test-net'],
),
json={'networks': []},
),
]
)
self.assertFalse(self.cloud.delete_network('test-net')) self.assertFalse(self.cloud.delete_network('test-net'))
self.assert_calls() self.assert_calls()
@ -384,37 +504,61 @@ class TestNetwork(base.TestCase):
network_id = "test-net-id" network_id = "test-net-id"
network_name = "network" network_name = "network"
network = {'id': network_id, 'name': network_name} network = {'id': network_id, 'name': network_name}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', method='GET',
append=['v2.0', 'networks', network_name]), uri=self.get_mock_url(
status_code=404), 'network',
dict(method='GET', 'public',
uri=self.get_mock_url( append=['v2.0', 'networks', network_name],
'network', 'public', append=['v2.0', 'networks'], ),
qs_elements=['name=%s' % network_name]), status_code=404,
json={'networks': [network]}), ),
dict(method='DELETE', dict(
uri=self.get_mock_url( method='GET',
'network', 'public', uri=self.get_mock_url(
append=['v2.0', 'networks', network_id]), 'network',
status_code=503) 'public',
]) append=['v2.0', 'networks'],
self.assertRaises(openstack.cloud.OpenStackCloudException, qs_elements=['name=%s' % network_name],
self.cloud.delete_network, network_name) ),
json={'networks': [network]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'networks', network_id],
),
status_code=503,
),
]
)
self.assertRaises(
openstack.cloud.OpenStackCloudException,
self.cloud.delete_network,
network_name,
)
self.assert_calls() self.assert_calls()
def test_get_network_by_id(self): def test_get_network_by_id(self):
network_id = "test-net-id" network_id = "test-net-id"
network_name = "network" network_name = "network"
network = {'id': network_id, 'name': network_name} network = {'id': network_id, 'name': network_name}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', method='GET',
append=['v2.0', 'networks', network_id]), uri=self.get_mock_url(
json={'network': network}) 'network',
]) 'public',
append=['v2.0', 'networks', network_id],
),
json={'network': network},
)
]
)
self.assertTrue(self.cloud.get_network_by_id(network_id)) self.assertTrue(self.cloud.get_network_by_id(network_id))
self.assert_calls() self.assert_calls()

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,6 @@ from openstack.tests.unit import base
class TestSearch(base.TestCase): class TestSearch(base.TestCase):
class FakeResource(resource.Resource): class FakeResource(resource.Resource):
allow_fetch = True allow_fetch = True
allow_list = True allow_list = True
@ -33,9 +32,7 @@ class TestSearch(base.TestCase):
self.session._sdk_connection = self.cloud self.session._sdk_connection = self.cloud
self.session._get = mock.Mock() self.session._get = mock.Mock()
self.session._list = mock.Mock() self.session._list = mock.Mock()
self.session._resource_registry = dict( self.session._resource_registry = dict(fake=self.FakeResource)
fake=self.FakeResource
)
# Set the mock into the cloud connection # Set the mock into the cloud connection
setattr(self.cloud, "mock_session", self.session) setattr(self.cloud, "mock_session", self.session)
@ -44,7 +41,7 @@ class TestSearch(base.TestCase):
exceptions.SDKException, exceptions.SDKException,
self.cloud.search_resources, self.cloud.search_resources,
"wrong_service.wrong_resource", "wrong_service.wrong_resource",
"name" "name",
) )
def test_raises_unknown_resource(self): def test_raises_unknown_resource(self):
@ -52,44 +49,33 @@ class TestSearch(base.TestCase):
exceptions.SDKException, exceptions.SDKException,
self.cloud.search_resources, self.cloud.search_resources,
"mock_session.wrong_resource", "mock_session.wrong_resource",
"name" "name",
) )
def test_search_resources_get_finds(self): def test_search_resources_get_finds(self):
self.session._get.return_value = self.FakeResource(foo="bar") self.session._get.return_value = self.FakeResource(foo="bar")
ret = self.cloud.search_resources( ret = self.cloud.search_resources("mock_session.fake", "fake_name")
"mock_session.fake", self.session._get.assert_called_with(self.FakeResource, "fake_name")
"fake_name"
)
self.session._get.assert_called_with(
self.FakeResource, "fake_name")
self.assertEqual(1, len(ret)) self.assertEqual(1, len(ret))
self.assertEqual( self.assertEqual(
self.FakeResource(foo="bar").to_dict(), self.FakeResource(foo="bar").to_dict(), ret[0].to_dict()
ret[0].to_dict()
) )
def test_search_resources_list(self): def test_search_resources_list(self):
self.session._get.side_effect = exceptions.ResourceNotFound self.session._get.side_effect = exceptions.ResourceNotFound
self.session._list.return_value = [ self.session._list.return_value = [self.FakeResource(foo="bar")]
self.FakeResource(foo="bar")
]
ret = self.cloud.search_resources( ret = self.cloud.search_resources("mock_session.fake", "fake_name")
"mock_session.fake", self.session._get.assert_called_with(self.FakeResource, "fake_name")
"fake_name"
)
self.session._get.assert_called_with(
self.FakeResource, "fake_name")
self.session._list.assert_called_with( self.session._list.assert_called_with(
self.FakeResource, name="fake_name") self.FakeResource, name="fake_name"
)
self.assertEqual(1, len(ret)) self.assertEqual(1, len(ret))
self.assertEqual( self.assertEqual(
self.FakeResource(foo="bar").to_dict(), self.FakeResource(foo="bar").to_dict(), ret[0].to_dict()
ret[0].to_dict()
) )
def test_search_resources_args(self): def test_search_resources_args(self):
@ -103,33 +89,27 @@ class TestSearch(base.TestCase):
get_kwargs={"getkwarg1": "1"}, get_kwargs={"getkwarg1": "1"},
list_args=["listarg1"], list_args=["listarg1"],
list_kwargs={"listkwarg1": "1"}, list_kwargs={"listkwarg1": "1"},
filter1="foo" filter1="foo",
) )
self.session._get.assert_called_with( self.session._get.assert_called_with(
self.FakeResource, "fake_name", self.FakeResource, "fake_name", "getarg1", getkwarg1="1"
"getarg1", getkwarg1="1") )
self.session._list.assert_called_with( self.session._list.assert_called_with(
self.FakeResource, self.FakeResource,
"listarg1", listkwarg1="1", "listarg1",
name="fake_name", filter1="foo" listkwarg1="1",
name="fake_name",
filter1="foo",
) )
def test_search_resources_name_empty(self): def test_search_resources_name_empty(self):
self.session._list.return_value = [ self.session._list.return_value = [self.FakeResource(foo="bar")]
self.FakeResource(foo="bar")
]
ret = self.cloud.search_resources( ret = self.cloud.search_resources("mock_session.fake", None, foo="bar")
"mock_session.fake",
None,
foo="bar"
)
self.session._get.assert_not_called() self.session._get.assert_not_called()
self.session._list.assert_called_with( self.session._list.assert_called_with(self.FakeResource, foo="bar")
self.FakeResource, foo="bar")
self.assertEqual(1, len(ret)) self.assertEqual(1, len(ret))
self.assertEqual( self.assertEqual(
self.FakeResource(foo="bar").to_dict(), self.FakeResource(foo="bar").to_dict(), ret[0].to_dict()
ret[0].to_dict()
) )

View File

@ -22,7 +22,6 @@ from openstack.tests.unit import base
class TestOperatorCloud(base.TestCase): class TestOperatorCloud(base.TestCase):
def test_get_image_name(self): def test_get_image_name(self):
self.use_glance() self.use_glance()
@ -30,14 +29,20 @@ class TestOperatorCloud(base.TestCase):
fake_image = fakes.make_fake_image(image_id=image_id) fake_image = fakes.make_fake_image(image_id=image_id)
list_return = {'images': [fake_image]} list_return = {'images': [fake_image]}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='https://image.example.com/v2/images', dict(
json=list_return), method='GET',
dict(method='GET', uri='https://image.example.com/v2/images',
uri='https://image.example.com/v2/images', json=list_return,
json=list_return), ),
]) dict(
method='GET',
uri='https://image.example.com/v2/images',
json=list_return,
),
]
)
self.assertEqual('fake_image', self.cloud.get_image_name(image_id)) self.assertEqual('fake_image', self.cloud.get_image_name(image_id))
self.assertEqual('fake_image', self.cloud.get_image_name('fake_image')) self.assertEqual('fake_image', self.cloud.get_image_name('fake_image'))
@ -51,14 +56,20 @@ class TestOperatorCloud(base.TestCase):
fake_image = fakes.make_fake_image(image_id=image_id) fake_image = fakes.make_fake_image(image_id=image_id)
list_return = {'images': [fake_image]} list_return = {'images': [fake_image]}
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='https://image.example.com/v2/images', dict(
json=list_return), method='GET',
dict(method='GET', uri='https://image.example.com/v2/images',
uri='https://image.example.com/v2/images', json=list_return,
json=list_return), ),
]) dict(
method='GET',
uri='https://image.example.com/v2/images',
json=list_return,
),
]
)
self.assertEqual(image_id, self.cloud.get_image_id(image_id)) self.assertEqual(image_id, self.cloud.get_image_id(image_id))
self.assertEqual(image_id, self.cloud.get_image_id('fake_image')) self.assertEqual(image_id, self.cloud.get_image_id('fake_image'))
@ -72,15 +83,17 @@ class TestOperatorCloud(base.TestCase):
def side_effect(*args, **kwargs): def side_effect(*args, **kwargs):
raise FakeException("No service") raise FakeException("No service")
session_mock = mock.Mock() session_mock = mock.Mock()
session_mock.get_endpoint.side_effect = side_effect session_mock.get_endpoint.side_effect = side_effect
get_session_mock.return_value = session_mock get_session_mock.return_value = session_mock
self.cloud.name = 'testcloud' self.cloud.name = 'testcloud'
self.cloud.config.config['region_name'] = 'testregion' self.cloud.config.config['region_name'] = 'testregion'
with testtools.ExpectedException( with testtools.ExpectedException(
exc.OpenStackCloudException, exc.OpenStackCloudException,
"Error getting image endpoint on testcloud:testregion:" "Error getting image endpoint on testcloud:testregion:"
" No service"): " No service",
):
self.cloud.get_session_endpoint("image") self.cloud.get_session_endpoint("image")
@mock.patch.object(cloud_region.CloudRegion, 'get_session') @mock.patch.object(cloud_region.CloudRegion, 'get_session')
@ -97,8 +110,11 @@ class TestOperatorCloud(base.TestCase):
get_session_mock.return_value = session_mock get_session_mock.return_value = session_mock
self.cloud.get_session_endpoint('identity') self.cloud.get_session_endpoint('identity')
kwargs = dict( kwargs = dict(
interface='public', region_name='RegionOne', interface='public',
service_name=None, service_type='identity') region_name='RegionOne',
service_name=None,
service_type='identity',
)
session_mock.get_endpoint.assert_called_with(**kwargs) session_mock.get_endpoint.assert_called_with(**kwargs)
@ -122,23 +138,23 @@ class TestOperatorCloud(base.TestCase):
uuid1 = uuid.uuid4().hex uuid1 = uuid.uuid4().hex
uuid2 = uuid.uuid4().hex uuid2 = uuid.uuid4().hex
self.use_compute_discovery() self.use_compute_discovery()
self.register_uris([ self.register_uris(
dict( [
method='GET', dict(
uri='https://compute.example.com/v2.1/os-hypervisors/detail', method='GET',
json={ uri='https://compute.example.com/v2.1/os-hypervisors/detail', # noqa: E501
'hypervisors': [ json={
fakes.make_fake_hypervisor(uuid1, 'testserver1'), 'hypervisors': [
fakes.make_fake_hypervisor(uuid2, 'testserver2'), fakes.make_fake_hypervisor(uuid1, 'testserver1'),
] fakes.make_fake_hypervisor(uuid2, 'testserver2'),
}, ]
validate={ },
'headers': { validate={
'OpenStack-API-Version': 'compute 2.53' 'headers': {'OpenStack-API-Version': 'compute 2.53'}
} },
} ),
), ]
]) )
r = self.cloud.list_hypervisors() r = self.cloud.list_hypervisors()
@ -154,19 +170,22 @@ class TestOperatorCloud(base.TestCase):
'''This test verifies that calling list_hypervisors on a pre-2.53 cloud '''This test verifies that calling list_hypervisors on a pre-2.53 cloud
calls the old version.''' calls the old version.'''
self.use_compute_discovery( self.use_compute_discovery(
compute_version_json='old-compute-version.json') compute_version_json='old-compute-version.json'
self.register_uris([ )
dict( self.register_uris(
method='GET', [
uri='https://compute.example.com/v2.1/os-hypervisors/detail', dict(
json={ method='GET',
'hypervisors': [ uri='https://compute.example.com/v2.1/os-hypervisors/detail', # noqa: E501
fakes.make_fake_hypervisor('1', 'testserver1'), json={
fakes.make_fake_hypervisor('2', 'testserver2'), 'hypervisors': [
] fakes.make_fake_hypervisor('1', 'testserver1'),
} fakes.make_fake_hypervisor('2', 'testserver2'),
), ]
]) },
),
]
)
r = self.cloud.list_hypervisors() r = self.cloud.list_hypervisors()

View File

@ -32,19 +32,34 @@ class TestOpenStackCloudOperatorNoAuth(base.TestCase):
# By clearing the URI registry, we remove all calls to a keystone # By clearing the URI registry, we remove all calls to a keystone
# catalog or getting a token # catalog or getting a token
self._uri_registry.clear() self._uri_registry.clear()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
service_type='baremetal', base_url_append='v1'), method='GET',
json={'id': 'v1', uri=self.get_mock_url(
'links': [{"href": "https://baremetal.example.com/v1", service_type='baremetal', base_url_append='v1'
"rel": "self"}]}), ),
dict(method='GET', json={
uri=self.get_mock_url( 'id': 'v1',
service_type='baremetal', base_url_append='v1', 'links': [
resource='nodes'), {
json={'nodes': []}), "href": "https://baremetal.example.com/v1",
]) "rel": "self",
}
],
},
),
dict(
method='GET',
uri=self.get_mock_url(
service_type='baremetal',
base_url_append='v1',
resource='nodes',
),
json={'nodes': []},
),
]
)
def test_ironic_noauth_none_auth_type(self): def test_ironic_noauth_none_auth_type(self):
"""Test noauth selection for Ironic in OpenStackCloud """Test noauth selection for Ironic in OpenStackCloud
@ -58,7 +73,8 @@ class TestOpenStackCloudOperatorNoAuth(base.TestCase):
# client library. # client library.
self.cloud_noauth = openstack.connect( self.cloud_noauth = openstack.connect(
auth_type='none', auth_type='none',
baremetal_endpoint_override="https://baremetal.example.com/v1") baremetal_endpoint_override="https://baremetal.example.com/v1",
)
self.cloud_noauth.list_machines() self.cloud_noauth.list_machines()
@ -92,8 +108,9 @@ class TestOpenStackCloudOperatorNoAuth(base.TestCase):
self.cloud_noauth = openstack.connect( self.cloud_noauth = openstack.connect(
auth_type='admin_token', auth_type='admin_token',
auth=dict( auth=dict(
endpoint='https://baremetal.example.com/v1', endpoint='https://baremetal.example.com/v1', token='ignored'
token='ignored')) ),
)
self.cloud_noauth.list_machines() self.cloud_noauth.list_machines()
@ -116,65 +133,94 @@ class TestOpenStackCloudOperatorNoAuthUnversioned(base.TestCase):
# By clearing the URI registry, we remove all calls to a keystone # By clearing the URI registry, we remove all calls to a keystone
# catalog or getting a token # catalog or getting a token
self._uri_registry.clear() self._uri_registry.clear()
self.register_uris([ self.register_uris(
dict(method='GET', [
uri='https://baremetal.example.com/', dict(
json={ method='GET',
"default_version": { uri='https://baremetal.example.com/',
"status": "CURRENT", json={
"min_version": "1.1", "default_version": {
"version": "1.46", "status": "CURRENT",
"id": "v1", "min_version": "1.1",
"links": [{ "version": "1.46",
"href": "https://baremetal.example.com/v1", "id": "v1",
"rel": "self" "links": [
}]}, {
"versions": [{ "href": "https://baremetal.example.com/v1",
"status": "CURRENT", "rel": "self",
"min_version": "1.1", }
"version": "1.46", ],
"id": "v1", },
"links": [{ "versions": [
"href": "https://baremetal.example.com/v1", {
"rel": "self" "status": "CURRENT",
}]}], "min_version": "1.1",
"name": "OpenStack Ironic API", "version": "1.46",
"description": "Ironic is an OpenStack project." "id": "v1",
}), "links": [
dict(method='GET', {
uri=self.get_mock_url( "href": "https://baremetal.example.com/v1", # noqa: E501
service_type='baremetal', base_url_append='v1'), "rel": "self",
json={ }
"media_types": [{ ],
"base": "application/json", }
"type": "application/vnd.openstack.ironic.v1+json" ],
}], "name": "OpenStack Ironic API",
"links": [{ "description": "Ironic is an OpenStack project.",
"href": "https://baremetal.example.com/v1", },
"rel": "self" ),
}], dict(
"ports": [{ method='GET',
"href": "https://baremetal.example.com/v1/ports/", uri=self.get_mock_url(
"rel": "self" service_type='baremetal', base_url_append='v1'
}, { ),
"href": "https://baremetal.example.com/ports/", json={
"rel": "bookmark" "media_types": [
}], {
"nodes": [{ "base": "application/json",
"href": "https://baremetal.example.com/v1/nodes/", "type": "application/vnd.openstack.ironic.v1+json", # noqa: E501
"rel": "self" }
}, { ],
"href": "https://baremetal.example.com/nodes/", "links": [
"rel": "bookmark" {
}], "href": "https://baremetal.example.com/v1",
"id": "v1" "rel": "self",
}), }
dict(method='GET', ],
uri=self.get_mock_url( "ports": [
service_type='baremetal', base_url_append='v1', {
resource='nodes'), "href": "https://baremetal.example.com/v1/ports/", # noqa: E501
json={'nodes': []}), "rel": "self",
]) },
{
"href": "https://baremetal.example.com/ports/",
"rel": "bookmark",
},
],
"nodes": [
{
"href": "https://baremetal.example.com/v1/nodes/", # noqa: E501
"rel": "self",
},
{
"href": "https://baremetal.example.com/nodes/",
"rel": "bookmark",
},
],
"id": "v1",
},
),
dict(
method='GET',
uri=self.get_mock_url(
service_type='baremetal',
base_url_append='v1',
resource='nodes',
),
json={'nodes': []},
),
]
)
def test_ironic_noauth_none_auth_type(self): def test_ironic_noauth_none_auth_type(self):
"""Test noauth selection for Ironic in OpenStackCloud """Test noauth selection for Ironic in OpenStackCloud
@ -188,7 +234,8 @@ class TestOpenStackCloudOperatorNoAuthUnversioned(base.TestCase):
# client library. # client library.
self.cloud_noauth = openstack.connect( self.cloud_noauth = openstack.connect(
auth_type='none', auth_type='none',
baremetal_endpoint_override="https://baremetal.example.com") baremetal_endpoint_override="https://baremetal.example.com",
)
self.cloud_noauth.list_machines() self.cloud_noauth.list_machines()

View File

@ -42,14 +42,11 @@ class TestPort(base.TestCase):
'mac_address': '50:1c:0d:e4:f0:0d', 'mac_address': '50:1c:0d:e4:f0:0d',
'binding:profile': {}, 'binding:profile': {},
'fixed_ips': [ 'fixed_ips': [
{ {'subnet_id': 'test-subnet-id', 'ip_address': '29.29.29.29'}
'subnet_id': 'test-subnet-id',
'ip_address': '29.29.29.29'
}
], ],
'id': 'test-port-id', 'id': 'test-port-id',
'security_groups': [], 'security_groups': [],
'device_id': '' 'device_id': '',
} }
} }
@ -70,14 +67,11 @@ class TestPort(base.TestCase):
'mac_address': '50:1c:0d:e4:f0:0d', 'mac_address': '50:1c:0d:e4:f0:0d',
'binding:profile': {}, 'binding:profile': {},
'fixed_ips': [ 'fixed_ips': [
{ {'subnet_id': 'test-subnet-id', 'ip_address': '29.29.29.29'}
'subnet_id': 'test-subnet-id',
'ip_address': '29.29.29.29'
}
], ],
'id': 'test-port-id', 'id': 'test-port-id',
'security_groups': [], 'security_groups': [],
'device_id': '' 'device_id': '',
} }
} }
@ -94,7 +88,7 @@ class TestPort(base.TestCase):
'extra_dhcp_opts': [], 'extra_dhcp_opts': [],
'binding:vif_details': { 'binding:vif_details': {
'port_filter': True, 'port_filter': True,
'ovs_hybrid_plug': True 'ovs_hybrid_plug': True,
}, },
'binding:vif_type': 'ovs', 'binding:vif_type': 'ovs',
'device_owner': 'network:router_gateway', 'device_owner': 'network:router_gateway',
@ -104,12 +98,12 @@ class TestPort(base.TestCase):
'fixed_ips': [ 'fixed_ips': [
{ {
'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062', 'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062',
'ip_address': '172.24.4.2' 'ip_address': '172.24.4.2',
} }
], ],
'id': 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b', 'id': 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b',
'security_groups': [], 'security_groups': [],
'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824' 'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824',
}, },
{ {
'status': 'ACTIVE', 'status': 'ACTIVE',
@ -122,7 +116,7 @@ class TestPort(base.TestCase):
'extra_dhcp_opts': [], 'extra_dhcp_opts': [],
'binding:vif_details': { 'binding:vif_details': {
'port_filter': True, 'port_filter': True,
'ovs_hybrid_plug': True 'ovs_hybrid_plug': True,
}, },
'binding:vif_type': 'ovs', 'binding:vif_type': 'ovs',
'device_owner': 'network:router_interface', 'device_owner': 'network:router_interface',
@ -132,104 +126,155 @@ class TestPort(base.TestCase):
'fixed_ips': [ 'fixed_ips': [
{ {
'subnet_id': '288bf4a1-51ba-43b6-9d0a-520e9005db17', 'subnet_id': '288bf4a1-51ba-43b6-9d0a-520e9005db17',
'ip_address': '10.0.0.1' 'ip_address': '10.0.0.1',
} }
], ],
'id': 'f71a6703-d6de-4be1-a91a-a570ede1d159', 'id': 'f71a6703-d6de-4be1-a91a-a570ede1d159',
'security_groups': [], 'security_groups': [],
'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824' 'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824',
} },
] ]
} }
def _compare_ports(self, exp, real): def _compare_ports(self, exp, real):
self.assertDictEqual( self.assertDictEqual(
_port.Port(**exp).to_dict(computed=False), _port.Port(**exp).to_dict(computed=False),
real.to_dict(computed=False)) real.to_dict(computed=False),
)
def test_create_port(self): def test_create_port(self):
self.register_uris([ self.register_uris(
dict(method="POST", [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports']), method="POST",
json=self.mock_neutron_port_create_rep, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'ports']
json={'port': { ),
'network_id': 'test-net-id', json=self.mock_neutron_port_create_rep,
'name': 'test-port-name', validate=dict(
'admin_state_up': True}})) json={
]) 'port': {
'network_id': 'test-net-id',
'name': 'test-port-name',
'admin_state_up': True,
}
}
),
)
]
)
port = self.cloud.create_port( port = self.cloud.create_port(
network_id='test-net-id', name='test-port-name', network_id='test-net-id',
admin_state_up=True) name='test-port-name',
admin_state_up=True,
)
self._compare_ports(self.mock_neutron_port_create_rep['port'], port) self._compare_ports(self.mock_neutron_port_create_rep['port'], port)
self.assert_calls() self.assert_calls()
def test_create_port_parameters(self): def test_create_port_parameters(self):
"""Test that we detect invalid arguments passed to create_port""" """Test that we detect invalid arguments passed to create_port"""
self.assertRaises( self.assertRaises(
TypeError, self.cloud.create_port, TypeError,
network_id='test-net-id', nome='test-port-name', self.cloud.create_port,
stato_amministrativo_porta=True) network_id='test-net-id',
nome='test-port-name',
stato_amministrativo_porta=True,
)
def test_create_port_exception(self): def test_create_port_exception(self):
self.register_uris([ self.register_uris(
dict(method="POST", [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports']), method="POST",
status_code=500, uri=self.get_mock_url(
validate=dict( 'network', 'public', append=['v2.0', 'ports']
json={'port': { ),
'network_id': 'test-net-id', status_code=500,
'name': 'test-port-name', validate=dict(
'admin_state_up': True}})) json={
]) 'port': {
'network_id': 'test-net-id',
'name': 'test-port-name',
'admin_state_up': True,
}
}
),
)
]
)
self.assertRaises( self.assertRaises(
OpenStackCloudException, self.cloud.create_port, OpenStackCloudException,
network_id='test-net-id', name='test-port-name', self.cloud.create_port,
admin_state_up=True) network_id='test-net-id',
name='test-port-name',
admin_state_up=True,
)
self.assert_calls() self.assert_calls()
def test_create_port_with_project(self): def test_create_port_with_project(self):
self.mock_neutron_port_create_rep["port"].update( self.mock_neutron_port_create_rep["port"].update(
{ {
'project_id': 'test-project-id', 'project_id': 'test-project-id',
}) }
self.register_uris([ )
dict(method="POST", self.register_uris(
uri=self.get_mock_url( [
'network', 'public', append=['v2.0', 'ports']), dict(
json=self.mock_neutron_port_create_rep, method="POST",
validate=dict( uri=self.get_mock_url(
json={'port': { 'network', 'public', append=['v2.0', 'ports']
'network_id': 'test-net-id', ),
'project_id': 'test-project-id', json=self.mock_neutron_port_create_rep,
'name': 'test-port-name', validate=dict(
'admin_state_up': True}})) json={
]) 'port': {
'network_id': 'test-net-id',
'project_id': 'test-project-id',
'name': 'test-port-name',
'admin_state_up': True,
}
}
),
)
]
)
port = self.cloud.create_port( port = self.cloud.create_port(
network_id='test-net-id', name='test-port-name', network_id='test-net-id',
admin_state_up=True, project_id='test-project-id') name='test-port-name',
admin_state_up=True,
project_id='test-project-id',
)
self._compare_ports(self.mock_neutron_port_create_rep['port'], port) self._compare_ports(self.mock_neutron_port_create_rep['port'], port)
self.assert_calls() self.assert_calls()
def test_update_port(self): def test_update_port(self):
port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b'
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports', port_id]), method='GET',
json=dict(port=self.mock_neutron_port_list_rep['ports'][0])), uri=self.get_mock_url(
dict(method='PUT', 'network', 'public', append=['v2.0', 'ports', port_id]
uri=self.get_mock_url( ),
'network', 'public', json=dict(
append=['v2.0', 'ports', port_id]), port=self.mock_neutron_port_list_rep['ports'][0]
json=self.mock_neutron_port_update_rep, ),
validate=dict( ),
json={'port': {'name': 'test-port-name-updated'}})) dict(
]) method='PUT',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]
),
json=self.mock_neutron_port_update_rep,
validate=dict(
json={'port': {'name': 'test-port-name-updated'}}
),
),
]
)
port = self.cloud.update_port( port = self.cloud.update_port(
name_or_id=port_id, name='test-port-name-updated') name_or_id=port_id, name='test-port-name-updated'
)
self._compare_ports(self.mock_neutron_port_update_rep['port'], port) self._compare_ports(self.mock_neutron_port_update_rep['port'], port)
self.assert_calls() self.assert_calls()
@ -237,72 +282,107 @@ class TestPort(base.TestCase):
def test_update_port_parameters(self): def test_update_port_parameters(self):
"""Test that we detect invalid arguments passed to update_port""" """Test that we detect invalid arguments passed to update_port"""
self.assertRaises( self.assertRaises(
TypeError, self.cloud.update_port, TypeError,
name_or_id='test-port-id', nome='test-port-name-updated') self.cloud.update_port,
name_or_id='test-port-id',
nome='test-port-name-updated',
)
def test_update_port_exception(self): def test_update_port_exception(self):
port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b'
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports', port_id]), method='GET',
json=self.mock_neutron_port_list_rep), uri=self.get_mock_url(
dict(method='PUT', 'network', 'public', append=['v2.0', 'ports', port_id]
uri=self.get_mock_url( ),
'network', 'public', json=self.mock_neutron_port_list_rep,
append=['v2.0', 'ports', port_id]), ),
status_code=500, dict(
validate=dict( method='PUT',
json={'port': {'name': 'test-port-name-updated'}})) uri=self.get_mock_url(
]) 'network', 'public', append=['v2.0', 'ports', port_id]
),
status_code=500,
validate=dict(
json={'port': {'name': 'test-port-name-updated'}}
),
),
]
)
self.assertRaises( self.assertRaises(
OpenStackCloudException, self.cloud.update_port, OpenStackCloudException,
self.cloud.update_port,
name_or_id='d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b', name_or_id='d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b',
name='test-port-name-updated') name='test-port-name-updated',
)
self.assert_calls() self.assert_calls()
def test_list_ports(self): def test_list_ports(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports']), method='GET',
json=self.mock_neutron_port_list_rep) uri=self.get_mock_url(
]) 'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.list_ports() ports = self.cloud.list_ports()
for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports): for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports):
self._compare_ports(a, b) self._compare_ports(a, b)
self.assert_calls() self.assert_calls()
def test_list_ports_filtered(self): def test_list_ports_filtered(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports'], method='GET',
qs_elements=['status=DOWN']), uri=self.get_mock_url(
json=self.mock_neutron_port_list_rep) 'network',
]) 'public',
append=['v2.0', 'ports'],
qs_elements=['status=DOWN'],
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.list_ports(filters={'status': 'DOWN'}) ports = self.cloud.list_ports(filters={'status': 'DOWN'})
for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports): for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports):
self._compare_ports(a, b) self._compare_ports(a, b)
self.assert_calls() self.assert_calls()
def test_list_ports_exception(self): def test_list_ports_exception(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports']), method='GET',
status_code=500) uri=self.get_mock_url(
]) 'network', 'public', append=['v2.0', 'ports']
),
status_code=500,
)
]
)
self.assertRaises(OpenStackCloudException, self.cloud.list_ports) self.assertRaises(OpenStackCloudException, self.cloud.list_ports)
def test_search_ports_by_id(self): def test_search_ports_by_id(self):
port_id = 'f71a6703-d6de-4be1-a91a-a570ede1d159' port_id = 'f71a6703-d6de-4be1-a91a-a570ede1d159'
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports']), method='GET',
json=self.mock_neutron_port_list_rep) uri=self.get_mock_url(
]) 'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.search_ports(name_or_id=port_id) ports = self.cloud.search_ports(name_or_id=port_id)
self.assertEqual(1, len(ports)) self.assertEqual(1, len(ports))
@ -311,12 +391,17 @@ class TestPort(base.TestCase):
def test_search_ports_by_name(self): def test_search_ports_by_name(self):
port_name = "first-port" port_name = "first-port"
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports']), method='GET',
json=self.mock_neutron_port_list_rep) uri=self.get_mock_url(
]) 'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.search_ports(name_or_id=port_name) ports = self.cloud.search_ports(name_or_id=port_name)
self.assertEqual(1, len(ports)) self.assertEqual(1, len(ports))
@ -324,51 +409,80 @@ class TestPort(base.TestCase):
self.assert_calls() self.assert_calls()
def test_search_ports_not_found(self): def test_search_ports_not_found(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports']), method='GET',
json=self.mock_neutron_port_list_rep) uri=self.get_mock_url(
]) 'network', 'public', append=['v2.0', 'ports']
),
json=self.mock_neutron_port_list_rep,
)
]
)
ports = self.cloud.search_ports(name_or_id='non-existent') ports = self.cloud.search_ports(name_or_id='non-existent')
self.assertEqual(0, len(ports)) self.assertEqual(0, len(ports))
self.assert_calls() self.assert_calls()
def test_delete_port(self): def test_delete_port(self):
port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b'
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', method='GET',
append=['v2.0', 'ports', 'first-port']), uri=self.get_mock_url(
status_code=404), 'network',
dict(method='GET', 'public',
uri=self.get_mock_url( append=['v2.0', 'ports', 'first-port'],
'network', 'public', append=['v2.0', 'ports'], ),
qs_elements=['name=first-port']), status_code=404,
json=self.mock_neutron_port_list_rep), ),
dict(method='DELETE', dict(
uri=self.get_mock_url( method='GET',
'network', 'public', uri=self.get_mock_url(
append=['v2.0', 'ports', port_id]), 'network',
json={}) 'public',
]) append=['v2.0', 'ports'],
qs_elements=['name=first-port'],
),
json=self.mock_neutron_port_list_rep,
),
dict(
method='DELETE',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports', port_id]
),
json={},
),
]
)
self.assertTrue(self.cloud.delete_port(name_or_id='first-port')) self.assertTrue(self.cloud.delete_port(name_or_id='first-port'))
def test_delete_port_not_found(self): def test_delete_port_not_found(self):
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports', method='GET',
'non-existent']), uri=self.get_mock_url(
status_code=404), 'network',
dict(method='GET', 'public',
uri=self.get_mock_url( append=['v2.0', 'ports', 'non-existent'],
'network', 'public', append=['v2.0', 'ports'], ),
qs_elements=['name=non-existent']), status_code=404,
json={'ports': []}) ),
]) dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['name=non-existent'],
),
json={'ports': []},
),
]
)
self.assertFalse(self.cloud.delete_port(name_or_id='non-existent')) self.assertFalse(self.cloud.delete_port(name_or_id='non-existent'))
self.assert_calls() self.assert_calls()
@ -376,50 +490,78 @@ class TestPort(base.TestCase):
port_name = "port-name" port_name = "port-name"
port1 = dict(id='123', name=port_name) port1 = dict(id='123', name=port_name)
port2 = dict(id='456', name=port_name) port2 = dict(id='456', name=port_name)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', 'ports', port_name]), method='GET',
status_code=404), uri=self.get_mock_url(
dict(method='GET', 'network',
uri=self.get_mock_url( 'public',
'network', 'public', append=['v2.0', 'ports'], append=['v2.0', 'ports', port_name],
qs_elements=['name=%s' % port_name]), ),
json={'ports': [port1, port2]}) status_code=404,
]) ),
self.assertRaises(OpenStackCloudException, dict(
self.cloud.delete_port, port_name) method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['name=%s' % port_name],
),
json={'ports': [port1, port2]},
),
]
)
self.assertRaises(
OpenStackCloudException, self.cloud.delete_port, port_name
)
self.assert_calls() self.assert_calls()
def test_delete_subnet_multiple_using_id(self): def test_delete_subnet_multiple_using_id(self):
port_name = "port-name" port_name = "port-name"
port1 = dict(id='123', name=port_name) port1 = dict(id='123', name=port_name)
port2 = dict(id='456', name=port_name) port2 = dict(id='456', name=port_name)
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', method='GET',
append=['v2.0', 'ports', port1['id']]), uri=self.get_mock_url(
json={'ports': [port1, port2]}), 'network',
dict(method='DELETE', 'public',
uri=self.get_mock_url( append=['v2.0', 'ports', port1['id']],
'network', 'public', ),
append=['v2.0', 'ports', port1['id']]), json={'ports': [port1, port2]},
json={}) ),
]) dict(
method='DELETE',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports', port1['id']],
),
json={},
),
]
)
self.assertTrue(self.cloud.delete_port(name_or_id=port1['id'])) self.assertTrue(self.cloud.delete_port(name_or_id=port1['id']))
self.assert_calls() self.assert_calls()
def test_get_port_by_id(self): def test_get_port_by_id(self):
fake_port = dict(id='123', name='456') fake_port = dict(id='123', name='456')
self.register_uris([ self.register_uris(
dict(method='GET', [
uri=self.get_mock_url( dict(
'network', 'public', append=['v2.0', method='GET',
'ports', uri=self.get_mock_url(
fake_port['id']]), 'network',
json={'port': fake_port}) 'public',
]) append=['v2.0', 'ports', fake_port['id']],
),
json={'port': fake_port},
)
]
)
r = self.cloud.get_port_by_id(fake_port['id']) r = self.cloud.get_port_by_id(fake_port['id'])
self.assertIsNotNone(r) self.assertIsNotNone(r)
self._compare_ports(fake_port, r) self._compare_ports(fake_port, r)

Some files were not shown because too many files have changed in this diff Show More