Replace uuid4() with uuidsentinel

As of now, in most of the test cases, uuidsentinel is used for
generating a UUID except at some places where uuid4() is used.
In order to maintain consistency, we propose to use uuidsentinel
module for generating UUIDs throughout the test cases.

There are some cases where unique UUIDs are required. For such
cases, generate_uuid() from oslo_utils.uuidutils is used.

Change-Id: Ie0e0fc1878e3f18065a11539b15d8c4ee893a29a
This commit is contained in:
hussainchachuliya 2016-09-19 18:45:41 +05:30
parent 00b359ce14
commit 7bb82e2dfd
10 changed files with 57 additions and 61 deletions

View File

@ -14,10 +14,10 @@
# under the License.
import datetime
import uuid
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import routes
import six
from six.moves import range
@ -302,7 +302,7 @@ def create_info_cache(nw_cache):
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
FAKE_UUIDS[token] = uuidutils.generate_uuid()
return FAKE_UUIDS[token]

View File

@ -6708,14 +6708,15 @@ class ComputeTestCase(BaseTestCase):
# these are the ones that are expired
old_instances = []
for x in range(4):
instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at}
instance = {'uuid': uuidutils.generate_uuid(),
'created_at': created_at}
instance.update(filters)
old_instances.append(fake_instance.fake_db_instance(**instance))
# not expired
instances = list(old_instances) # copy the contents of old_instances
new_instance = {
'uuid': str(uuid.uuid4()),
'uuid': uuids.fake,
'created_at': timeutils.utcnow(),
}
sort_key = 'created_at'
@ -7030,7 +7031,7 @@ class ComputeTestCase(BaseTestCase):
def test_partial_deletion_raise_exception(self, mock_complete):
admin_context = context.get_admin_context()
instance = objects.Instance(admin_context)
instance.uuid = str(uuid.uuid4())
instance.uuid = uuids.fake
instance.vm_state = vm_states.DELETED
instance.deleted = False
instance.host = self.compute.host
@ -7909,7 +7910,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual([], instance.security_groups.objects)
def test_default_hostname_generator(self):
fake_uuids = [str(uuid.uuid4()) for x in range(4)]
fake_uuids = [uuidutils.generate_uuid() for x in range(4)]
orig_populate = self.compute_api._populate_instance_for_create
@ -7938,7 +7939,7 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_show)
group = objects.InstanceGroup(self.context)
group.uuid = str(uuid.uuid4())
group.uuid = uuids.fake
group.project_id = self.context.project_id
group.user_id = self.context.user_id
group.create()
@ -9471,7 +9472,7 @@ class ComputeAPITestCase(BaseTestCase):
# Tests that when deallocate_port_for_instance fails we log the failure
# before exiting compute.detach_interface.
nwinfo, port_id = self.test_attach_interface()
instance = objects.Instance(id=42, uuid=uuidutils.generate_uuid())
instance = objects.Instance(id=42, uuid=uuids.fake)
instance.info_cache = objects.InstanceInfoCache.new(
self.context, uuids.info_cache_instance)
instance.info_cache.network_info = network_model.NetworkInfo.hydrate(
@ -11699,7 +11700,7 @@ class CheckConfigDriveTestCase(test.NoDBTestCase):
def test_config_drive_bogus_values_raise(self):
self._assertInvalid('asd')
self._assertInvalid(uuidutils.generate_uuid())
self._assertInvalid(uuids.fake)
class CheckRequestedImageTestCase(test.TestCase):
@ -11794,12 +11795,12 @@ class CheckRequestedImageTestCase(test.TestCase):
# disk.
# We should allow a root volume created from an image whose min_disk is
# larger than the flavor root disk.
image_uuid = str(uuid.uuid4())
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=self.instance_type.root_gb * units.Gi,
min_disk=self.instance_type.root_gb + 1)
volume_uuid = str(uuid.uuid4())
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='volume', destination_type='volume',
volume_id=volume_uuid, volume_size=self.instance_type.root_gb + 1)
@ -11810,12 +11811,12 @@ class CheckRequestedImageTestCase(test.TestCase):
def test_volume_blockdevicemapping_min_disk(self):
# A bdm object volume smaller than the image's min_disk should not be
# allowed
image_uuid = str(uuid.uuid4())
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=self.instance_type.root_gb * units.Gi,
min_disk=self.instance_type.root_gb + 1)
volume_uuid = str(uuid.uuid4())
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='image', destination_type='volume',
image_id=image_uuid, volume_id=volume_uuid,
@ -11828,12 +11829,12 @@ class CheckRequestedImageTestCase(test.TestCase):
def test_volume_blockdevicemapping_min_disk_no_size(self):
# We should allow a root volume whose size is not given
image_uuid = str(uuid.uuid4())
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=self.instance_type.root_gb * units.Gi,
min_disk=self.instance_type.root_gb)
volume_uuid = str(uuid.uuid4())
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='volume', destination_type='volume',
volume_id=volume_uuid, volume_size=None)
@ -11844,7 +11845,7 @@ class CheckRequestedImageTestCase(test.TestCase):
def test_image_blockdevicemapping(self):
# Test that we can succeed when passing bdms, and the root bdm isn't a
# volume
image_uuid = str(uuid.uuid4())
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=self.instance_type.root_gb * units.Gi, min_disk=0)
@ -11857,7 +11858,7 @@ class CheckRequestedImageTestCase(test.TestCase):
def test_image_blockdevicemapping_too_big(self):
# We should do a size check against flavor if we were passed bdms but
# the root bdm isn't a volume
image_uuid = str(uuid.uuid4())
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=(self.instance_type.root_gb + 1) * units.Gi,
min_disk=0)
@ -11873,7 +11874,7 @@ class CheckRequestedImageTestCase(test.TestCase):
def test_image_blockdevicemapping_min_disk(self):
# We should do a min_disk check against flavor if we were passed bdms
# but the root bdm isn't a volume
image_uuid = str(uuid.uuid4())
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=0, min_disk=self.instance_type.root_gb + 1)

View File

@ -14,7 +14,6 @@
import datetime
import time
import uuid
from cinderclient import exceptions as cinder_exception
from eventlet import event as eventlet_event
@ -471,7 +470,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
with mock.patch.object(self.compute,
'_build_semaphore') as mock_sem:
instance = objects.Instance(uuid=str(uuid.uuid4()))
instance = objects.Instance(uuid=uuidutils.generate_uuid())
for i in (1, 2, 3):
self.compute.build_and_run_instance(self.context, instance,
mock.sentinel.image,
@ -1773,12 +1772,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
# This test ensures that volume_id arguments are passed to volume_api
# and that volume states are OK
volumes = {}
old_volume_id = uuidutils.generate_uuid()
old_volume_id = uuids.fake
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'status': 'detaching',
'size': 1}
new_volume_id = uuidutils.generate_uuid()
new_volume_id = uuids.fake_2
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'status': 'available',
@ -1940,12 +1939,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
# This test ensures that delete_on_termination flag arguments
# are reserved
volumes = {}
old_volume_id = uuidutils.generate_uuid()
old_volume_id = uuids.fake
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'status': 'detaching',
'size': 2}
new_volume_id = uuidutils.generate_uuid()
new_volume_id = uuids.fake_2
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'status': 'available',
@ -4881,7 +4880,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
@mock.patch('nova.objects.Migration.save')
def _do_it(mock_mig_save):
instance = objects.Instance(uuid=str(uuid.uuid4()))
instance = objects.Instance(uuid=uuids.fake)
migration = objects.Migration()
self.compute.live_migration(self.context,
mock.sentinel.dest,
@ -5003,7 +5002,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def test_live_migration_force_complete_succeeded(self):
instance = objects.Instance(uuid=str(uuid.uuid4()))
instance = objects.Instance(uuid=uuids.fake)
migration = objects.Migration()
migration.status = 'running'
migration.id = 0

View File

@ -13,9 +13,9 @@
# under the License.
import datetime
import uuid
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova import objects
from nova.objects import fields
@ -58,7 +58,7 @@ def fake_db_instance(**updates):
db_instance = {
'id': 1,
'deleted': False,
'uuid': str(uuid.uuid4()),
'uuid': uuidutils.generate_uuid(),
'user_id': 'fake-user',
'project_id': 'fake-project',
'host': 'fake-host',

View File

@ -17,7 +17,6 @@ Tests for Crypto module.
"""
import os
import uuid
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import serialization
@ -29,6 +28,7 @@ import six
from nova import crypto
from nova import exception
from nova import test
from nova.tests import uuidsentinel as uuids
from nova import utils
@ -158,7 +158,7 @@ class RevokeCertsTest(test.NoDBTestCase):
def test_revoke_cert_project_not_found_chdir_fails(self, *args, **kargs):
self.flags(use_project_ca=True, group='crypto')
self.assertRaises(exception.ProjectNotFound, crypto.revoke_cert,
str(uuid.uuid4()), 'test_file')
uuids.fake, 'test_file')
class CertExceptionTests(test.NoDBTestCase):

View File

@ -14,13 +14,13 @@
import sys
import time
import uuid
import fixtures
from lxml import etree
import six
from nova.objects import fields as obj_fields
from nova.tests import uuidsentinel as uuids
from nova.virt.libvirt import config as vconfig
# Allow passing None to the various connect methods
@ -572,7 +572,7 @@ class Domain(object):
if uuid_elem is not None:
definition['uuid'] = uuid_elem.text
else:
definition['uuid'] = str(uuid.uuid4())
definition['uuid'] = uuids.fake
vcpu = tree.find('./vcpu')
if vcpu is not None:

View File

@ -28,7 +28,6 @@ import shutil
import signal
import threading
import time
import uuid
import eventlet
from eventlet import greenthread
@ -375,7 +374,7 @@ class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
if uuidstr is None:
uuidstr = str(uuid.uuid4())
uuidstr = uuids.fake
self.uuidstr = uuidstr
self.id = id
self.domname = name
@ -497,7 +496,7 @@ class CacheConcurrencyTestCase(test.NoDBTestCase):
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
uuid = uuids.fake
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
@ -535,7 +534,7 @@ class CacheConcurrencyTestCase(test.NoDBTestCase):
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
uuid = uuids.fake
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
@ -10325,7 +10324,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/sda'
instance_ref['ephemeral_gb'] = 0
instance_ref['uuid'] = uuidutils.generate_uuid()
instance_ref['uuid'] = uuids.fake
inst_obj = objects.Instance(**instance_ref)
image_meta = objects.ImageMeta.from_dict({})
@ -12127,7 +12126,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
self._uuid = str(uuid.uuid4())
self._uuid = uuids.fake
def ID(self):
return 1
@ -12196,7 +12195,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
self._uuid = str(uuid.uuid4())
self._uuid = uuidutils.generate_uuid()
def ID(self):
return 1
@ -14760,7 +14759,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_close,
mock_mkstemp,
mock_exists):
instance_uuid = str(uuid.uuid4())
instance_uuid = uuids.fake
self.flags(images_type='raw', group='libvirt')
self.flags(instances_path='/tmp')
mock_mkstemp.return_value = (-1,
@ -17321,7 +17320,7 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
# creating instance
self.inst = {}
self.inst['uuid'] = uuidutils.generate_uuid()
self.inst['uuid'] = uuids.fake
self.inst['id'] = '1'
# create domain info

View File

@ -14,11 +14,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import eventlet
from eventlet import greenthread
import mock
from oslo_utils import uuidutils
import six
from nova import exception
@ -26,6 +25,7 @@ from nova import objects
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.tests import uuidsentinel as uuids
from nova.virt import event
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
@ -43,7 +43,7 @@ class FakeVirtDomain(object):
def __init__(self, id=-1, name=None):
self._id = id
self._name = name
self._uuid = str(uuid.uuid4())
self._uuid = uuidutils.generate_uuid()
def name(self):
return self._name
@ -892,7 +892,7 @@ Active: 8381604 kB
return "instance000001"
def UUIDString(self):
return str(uuid.uuid4())
return uuids.fake
m = mock.mock_open(read_data="""
MemTotal: 16194180 kB

View File

@ -14,7 +14,6 @@
# under the License.
import contextlib
import uuid
from eventlet import greenthread
import mock
@ -562,7 +561,7 @@ class CheckVDISizeTestCase(VMUtilsTestBase):
super(CheckVDISizeTestCase, self).setUp()
self.context = 'fakecontext'
self.session = 'fakesession'
self.instance = objects.Instance(uuid=str(uuid.uuid4()))
self.instance = objects.Instance(uuid=uuids.fake)
self.flavor = objects.Flavor()
self.vdi_uuid = 'fakeuuid'
@ -1358,7 +1357,7 @@ class CreateKernelRamdiskTestCase(VMUtilsTestBase):
self.instance = {"kernel_id": None, "ramdisk_id": None}
self.name_label = "name"
self.mox.StubOutWithMock(self.session, "call_plugin")
self.mox.StubOutWithMock(uuid, "uuid4")
self.mox.StubOutWithMock(uuidutils, "generate_uuid")
self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
def test_create_kernel_and_ramdisk_no_create(self):
@ -1376,14 +1375,14 @@ class CreateKernelRamdiskTestCase(VMUtilsTestBase):
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuid.uuid4().AndReturn("fake_uuid1")
uuidutils.generate_uuid().AndReturn("fake_uuid1")
self.session.call_plugin('kernel.py', 'create_kernel_ramdisk',
args_kernel).AndReturn("k")
args_ramdisk = {}
args_ramdisk['cached-image'] = ramdisk_id
args_ramdisk['new-image-uuid'] = "fake_uuid2"
uuid.uuid4().AndReturn("fake_uuid2")
uuidutils.generate_uuid().AndReturn("fake_uuid2")
self.session.call_plugin('kernel.py', 'create_kernel_ramdisk',
args_ramdisk).AndReturn("r")
@ -1399,7 +1398,7 @@ class CreateKernelRamdiskTestCase(VMUtilsTestBase):
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuid.uuid4().AndReturn("fake_uuid1")
uuidutils.generate_uuid().AndReturn("fake_uuid1")
self.session.call_plugin('kernel.py', 'create_kernel_ramdisk',
args_kernel).AndReturn("")
@ -1422,7 +1421,7 @@ class CreateKernelRamdiskTestCase(VMUtilsTestBase):
self.flags(cache_images=cache_images, group='xenserver')
if cache_images == 'all':
uuid.uuid4().AndReturn("fake_uuid1")
uuidutils.generate_uuid().AndReturn("fake_uuid1")
self.session.call_plugin('kernel.py', 'create_kernel_ramdisk',
args_kernel).AndReturn("cached_image")
else:
@ -1577,8 +1576,7 @@ class CreateVmTestCase(VMUtilsTestBase):
def test_invalid_cpu_mask_raises(self, mock_extract):
self.flags(vcpu_pin_set="asdf")
session = mock.Mock()
instance = objects.Instance(uuid=str(uuid.uuid4()),
system_metadata={})
instance = objects.Instance(uuid=uuids.fake, system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
@ -1589,7 +1587,7 @@ class CreateVmTestCase(VMUtilsTestBase):
def test_destroy_vm(self, mock_extract):
session = mock.Mock()
instance = objects.Instance(uuid=str(uuid.uuid4()))
instance = objects.Instance(uuid=uuids.fake)
vm_utils.destroy_vm(session, instance, "vm_ref")
@ -1600,7 +1598,7 @@ class CreateVmTestCase(VMUtilsTestBase):
exc = test.TestingException()
session.XenAPI.Failure = test.TestingException
session.VM.destroy.side_effect = exc
instance = objects.Instance(uuid=str(uuid.uuid4()))
instance = objects.Instance(uuid=uuids.fake)
vm_utils.destroy_vm(session, instance, "vm_ref")
@ -1992,7 +1990,7 @@ class ImportMigratedDisksTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrate_ephemeral_disks(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
instance = objects.Instance(id=1, uuid=uuids.fake)
instance.old_flavor = objects.Flavor(ephemeral_gb=4000)
result = vm_utils._import_migrate_ephemeral_disks("s", instance)
@ -2014,8 +2012,7 @@ class ImportMigratedDisksTestCase(VMUtilsTestBase):
def test_import_migrate_ephemeral_disks_use_old_flavor(self,
mock_get_sizes):
mock_get_sizes.return_value = []
instance = objects.Instance(id=1, uuid=uuidutils.generate_uuid(),
ephemeral_gb=2000)
instance = objects.Instance(id=1, uuid=uuids.fake, ephemeral_gb=2000)
instance.old_flavor = objects.Flavor(ephemeral_gb=4000)
vm_utils._import_migrate_ephemeral_disks("s", instance)

View File

@ -21,7 +21,6 @@ import copy
import functools
import os
import re
import uuid
import mock
from mox3 import mox
@ -30,6 +29,7 @@ from oslo_config import fixture as config_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import uuidutils
import six
import testtools
@ -1550,7 +1550,7 @@ iface eth0 inet6 static
def _create_instance(self, spawn=True, obj=False, **attrs):
"""Creates and spawns a test instance."""
instance_values = {
'uuid': str(uuid.uuid4()),
'uuid': uuidutils.generate_uuid(),
'display_name': 'host-',
'project_id': self.project_id,
'user_id': self.user_id,