Merge "Use six.moves.range for Python 3"

This commit is contained in:
Jenkins 2015-05-22 18:04:43 +00:00 committed by Gerrit Code Review
commit 2108a66572
51 changed files with 150 additions and 110 deletions

View File

@ -25,7 +25,7 @@ def process_todo_nodes(app, doctree, fromdocname):
# remove the item that was added in the constructor, since I'm tired of
# reading through docutils for the proper way to construct an empty list
lists = []
for i in xrange(5):
for i in range(5):
lists.append(nodes.bullet_list("", nodes.Text('', '')))
lists[i].remove(lists[i][0])
lists[i]['classes'].append('todo_list')

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import range
from webob import exc
from nova import context
@ -42,7 +43,7 @@ def convert_password(context, password):
"""
password = password or ''
meta = {}
for i in xrange(CHUNKS):
for i in range(CHUNKS):
meta['password_%d' % i] = password[:CHUNK_LENGTH]
password = password[CHUNK_LENGTH:]
return meta

View File

@ -24,6 +24,7 @@ from oslo_log import log as logging
import oslo_messaging
from oslo_utils import importutils
from oslo_utils import timeutils
from six.moves import range
from nova.cells import messaging
from nova.cells import state as cells_state
@ -169,7 +170,7 @@ class CellsManager(manager.Manager):
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
for i in range(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)

View File

@ -40,6 +40,7 @@ from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
@ -228,7 +229,7 @@ class _BaseMessage(object):
responses = []
wait_time = CONF.cells.call_timeout
try:
for x in xrange(num_responses):
for x in range(num_responses):
json_responses = self.resp_queue.get(timeout=wait_time)
responses.extend(json_responses)
except queue.Empty:

View File

@ -21,6 +21,7 @@ import time
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import range
from nova.cells import filters
from nova.cells import weights
@ -223,7 +224,7 @@ class CellsScheduler(base.Base):
filter_properties, method, method_kwargs):
"""Pick a cell where we should create a new instance(s)."""
try:
for i in xrange(max(0, CONF.cells.scheduler_retries) + 1):
for i in range(max(0, CONF.cells.scheduler_retries) + 1):
try:
target_cells = self._grab_target_cells(filter_properties)
if target_cells is None:

View File

@ -35,6 +35,7 @@ from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import availability_zones
from nova import block_device
@ -910,7 +911,7 @@ class API(base.Base):
LOG.debug("Going to run %s instances..." % num_instances)
instances = []
try:
for i in xrange(num_instances):
for i in range(num_instances):
instance = objects.Instance(context=context)
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(

View File

@ -48,6 +48,7 @@ from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from six.moves import range
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
@ -3006,7 +3007,7 @@ class ComputeManager(manager.Manager):
excess = len(images) - rotation
LOG.debug("Rotating out %d backups", excess,
instance=instance)
for i in xrange(excess):
for i in range(excess):
image = images.pop()
image_id = image['id']
LOG.debug("Deleting image %s", image_id,

View File

@ -36,6 +36,7 @@ from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import NoSuchTableError
@ -4826,7 +4827,7 @@ def flavor_extra_specs_delete(context, flavor_id, key):
@require_context
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in xrange(max_retries):
for attempt in range(max_retries):
try:
session = get_session()
with session.begin():
@ -5522,7 +5523,7 @@ def aggregate_metadata_delete(context, aggregate_id, key):
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in xrange(max_retries):
for attempt in range(max_retries):
try:
session = get_session()
with session.begin():

View File

@ -31,6 +31,7 @@ from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
from nova import exception
@ -212,7 +213,7 @@ class GlanceClientWrapper(object):
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance.num_retries
for attempt in xrange(1, num_attempts + 1):
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:

View File

@ -23,6 +23,7 @@ import random
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import range
from nova import exception
from nova.i18n import _
@ -140,7 +141,7 @@ class FilterScheduler(driver.Scheduler):
selected_hosts = []
num_instances = request_spec.get('num_instances', 1)
for num in xrange(num_instances):
for num in range(num_instances):
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties, index=num)

View File

@ -47,7 +47,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in xrange(0, 50):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break

View File

@ -14,6 +14,7 @@
# under the License.
from oslo_config import cfg
from six.moves import range
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import state
@ -62,7 +63,7 @@ class CellsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
return cell
raise exception.CellNotFound(cell_name=cell_name)
for x in xrange(num_cells):
for x in range(num_cells):
cell = models.Cell()
our_id = self.cells_next_id
self.cells_next_id += 1

View File

@ -19,6 +19,7 @@ import mock
from oslo_config import cfg
from oslo_utils import timeutils
import requests
from six.moves import range
import webob
import webob.dec
import webob.exc
@ -53,7 +54,7 @@ class LockoutTestCase(test.NoDBTestCase):
def _send_bad_attempts(self, access_key, num_attempts=1):
"""Fail x."""
for i in xrange(num_attempts):
for i in range(num_attempts):
req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
self.assertEqual(req.get_response(self.lockout).status_int, 403)

View File

@ -17,6 +17,7 @@ import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import range
from webob import exc
from nova.api.openstack.compute import extensions
@ -322,7 +323,7 @@ class BlockDeviceMappingTestV21(test.TestCase):
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self.stubs.Set(objects.Instance, 'destroy', _instance_destroy)
for _unused in xrange(len(bdm_exceptions)):
for _unused in range(len(bdm_exceptions)):
params = {block_device_mapping.ATTRIBUTE_NAME:
[self.bdm[0].copy()]}
self.assertRaises(exc.HTTPBadRequest,

View File

@ -17,6 +17,7 @@ import datetime
import mock
from oslo_utils import timeutils
from six.moves import range
import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage as \
@ -99,7 +100,7 @@ def fake_instance_get_active_by_window_joined(context, begin, end,
x,
project_id if project_id else
"faketenant_%s" % (x / SERVERS))
for x in xrange(TENANTS * SERVERS)]
for x in range(TENANTS * SERVERS)]
@mock.patch.object(db, 'instance_get_active_by_window_joined',
@ -126,7 +127,7 @@ class SimpleTenantUsageTestV21(test.TestCase):
req.environ['nova.context'] = self.admin_context
res_dict = self.controller.index(req)
usages = res_dict['tenant_usages']
for i in xrange(TENANTS):
for i in range(TENANTS):
self.assertEqual(int(usages[i]['total_hours']),
SERVERS * HOURS)
self.assertEqual(int(usages[i]['total_local_gb_usage']),
@ -178,20 +179,20 @@ class SimpleTenantUsageTestV21(test.TestCase):
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in xrange(TENANTS):
for i in range(TENANTS):
servers = usages[i]['server_usages']
for j in xrange(SERVERS):
for j in range(SERVERS):
self.assertEqual(int(servers[j]['hours']), HOURS)
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in xrange(TENANTS):
for i in range(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in xrange(TENANTS):
for i in range(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def _test_verify_show(self, start, stop):
@ -206,8 +207,8 @@ class SimpleTenantUsageTestV21(test.TestCase):
servers = usage['server_usages']
self.assertEqual(len(usage['server_usages']), TENANTS * SERVERS)
uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
x for x in xrange(SERVERS)]
for j in xrange(SERVERS):
x for x in range(SERVERS)]
for j in range(SERVERS):
delta = STOP - START
uptime = delta.days * 24 * 3600 + delta.seconds
self.assertEqual(int(servers[j]['uptime']), uptime)

View File

@ -27,6 +27,7 @@ from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from six.moves import range
import six.moves.urllib.parse as urlparse
import testtools
import webob
@ -580,7 +581,7 @@ class ServersControllerTest(ControllerTest):
servers = res_dict['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res_dict['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
@ -612,7 +613,7 @@ class ServersControllerTest(ControllerTest):
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
@ -636,7 +637,7 @@ class ServersControllerTest(ControllerTest):
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
@ -1283,7 +1284,7 @@ class ServersControllerTest(ControllerTest):
def return_servers_with_host(context, *args, **kwargs):
return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
uuid=fakes.get_fake_uuid(i))
for i in xrange(5)]
for i in range(5)]
self.stubs.Set(db, 'instance_get_all_by_filters_sort',
return_servers_with_host)

View File

@ -23,6 +23,7 @@ import StringIO
import mock
from oslo_serialization import jsonutils
import six
from six.moves import range
import webob
from nova.api.openstack.compute import limits
@ -502,7 +503,7 @@ class LimiterTest(BaseLimitTestSuite):
def _check(self, num, verb, url, username=None):
"""Check and yield results from checks."""
for x in xrange(num):
for x in range(num):
yield self.limiter.check_for_delay(verb, url, username)[0]
def _check_sum(self, num, verb, url, username=None):

View File

@ -27,6 +27,7 @@ import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from six.moves import range
import six.moves.urllib.parse as urlparse
import testtools
import webob
@ -542,7 +543,7 @@ class ServersControllerTest(ControllerTest):
servers = res_dict['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res_dict['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
@ -574,7 +575,7 @@ class ServersControllerTest(ControllerTest):
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
@ -598,7 +599,7 @@ class ServersControllerTest(ControllerTest):
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
@ -1332,7 +1333,7 @@ class ServersControllerTest(ControllerTest):
def return_servers_with_host(context, *args, **kwargs):
return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
uuid=fakes.get_fake_uuid(i))
for i in xrange(5)]
for i in range(5)]
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers_with_host)

View File

@ -21,6 +21,7 @@ from oslo_utils import netutils
from oslo_utils import timeutils
import routes
import six
from six.moves import range
import webob
import webob.dec
import webob.request
@ -410,7 +411,7 @@ def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
if 'sort_dirs' in kwargs:
kwargs.pop('sort_dirs')
for i in xrange(num_servers):
for i in range(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)

View File

@ -56,7 +56,7 @@ class RequestTest(test.NoDBTestCase):
def test_cache_and_retrieve_instances(self):
request = wsgi.Request.blank('/foo')
instances = []
for x in xrange(3):
for x in range(3):
instances.append({'uuid': 'uuid%s' % x})
# Store 2
request.cache_db_instances(instances[:2])
@ -77,7 +77,7 @@ class RequestTest(test.NoDBTestCase):
def test_cache_and_retrieve_compute_nodes(self):
request = wsgi.Request.blank('/foo')
compute_nodes = []
for x in xrange(3):
for x in range(3):
compute_nodes.append({'id': 'id%s' % x})
# Store 2
request.cache_db_compute_nodes(compute_nodes[:2])

View File

@ -21,6 +21,7 @@ import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from six.moves import range
from nova.cells import messaging
from nova.cells import utils as cells_utils
@ -279,7 +280,7 @@ class CellsManagerClassTestCase(test.NoDBTestCase):
expected_response = []
# 3 cells... so 3 responses. Each response is a list of services.
# Manager should turn these into a single list of responses.
for i in xrange(3):
for i in range(3):
cell_name = 'path!to!cell%i' % i
services = []
for service in FAKE_SERVICES:
@ -405,7 +406,7 @@ class CellsManagerClassTestCase(test.NoDBTestCase):
# 3 cells... so 3 responses. Each response is a list of task log
# entries. Manager should turn these into a single list of
# task log entries.
for i in xrange(num):
for i in range(num):
cell_name = 'path!to!cell%i' % i
task_logs = []
for task_log in FAKE_TASK_LOGS:
@ -468,7 +469,7 @@ class CellsManagerClassTestCase(test.NoDBTestCase):
expected_response = []
# 3 cells... so 3 responses. Each response is a list of computes.
# Manager should turn these into a single list of responses.
for i in xrange(3):
for i in range(3):
cell_name = 'path!to!cell%i' % i
compute_nodes = []
for compute_node in FAKE_COMPUTE_NODES:

View File

@ -75,7 +75,7 @@ class CellsRPCDriverTestCase(test.NoDBTestCase):
def stop(self):
call_info['stopped'].append(self)
fake_servers = [FakeRPCServer() for x in xrange(5)]
fake_servers = [FakeRPCServer() for x in range(5)]
self.driver.rpc_servers = fake_servers
self.driver.stop_servers()
self.assertEqual(fake_servers, call_info['stopped'])

View File

@ -79,7 +79,7 @@ class CellsSchedulerTestCase(test.TestCase):
self.my_cell_state = self.state_manager.get_my_state()
self.ctxt = context.RequestContext('fake', 'fake')
instance_uuids = []
for x in xrange(3):
for x in range(3):
instance_uuids.append(uuidutils.generate_uuid())
self.instance_uuids = instance_uuids
self.instances = [objects.Instance(uuid=uuid, id=id)

View File

@ -6130,7 +6130,7 @@ class ComputeTestCase(BaseTestCase):
instance_map = {}
instances = []
for x in xrange(8):
for x in range(8):
inst_uuid = 'fake-uuid-%s' % x
instance_map[inst_uuid] = fake_instance.fake_db_instance(
uuid=inst_uuid, host=CONF.host, created_at=None)
@ -6408,7 +6408,7 @@ class ComputeTestCase(BaseTestCase):
filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host}
# these are the ones that are expired
old_instances = []
for x in xrange(4):
for x in range(4):
instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at}
instance.update(filters)
old_instances.append(fake_instance.fake_db_instance(**instance))
@ -7506,7 +7506,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual([], instance.security_groups.objects)
def test_default_hostname_generator(self):
fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
fake_uuids = [str(uuid.uuid4()) for x in range(4)]
orig_populate = self.compute_api._populate_instance_for_create

View File

@ -1083,7 +1083,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
def test_get_instances_on_driver(self):
driver_instances = []
for x in xrange(10):
for x in range(10):
driver_instances.append(fake_instance.fake_db_instance())
self.mox.StubOutWithMock(self.compute.driver,
@ -1128,7 +1128,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
all_instances = []
driver_instances = []
for x in xrange(10):
for x in range(10):
instance = fake_instance.fake_db_instance(name='inst-%i' % x,
id=x)
if x % 2:

View File

@ -1013,7 +1013,7 @@ class _BaseTaskTestCase(object):
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuid.uuid4(),
flavor=instance_type) for i in xrange(2)]
flavor=instance_type) for i in range(2)]
instance_type_p = obj_base.obj_to_primitive(instance_type)
instance_properties = instance_obj.compat_instance(instances[0])
@ -1095,7 +1095,7 @@ class _BaseTaskTestCase(object):
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
@ -1928,7 +1928,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
for i in range(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
@ -1986,7 +1986,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
def test_build_instances_info_cache_not_found(self, build_request_spec,
setup_instance_group):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]

View File

@ -60,7 +60,7 @@ class ConsoleauthTestCase(test.TestCase):
fake_validate_console_port)
def test_multiple_tokens_for_instance(self):
tokens = [u"token" + str(i) for i in xrange(10)]
tokens = [u"token" + str(i) for i in range(10)]
self._stub_validate_console_port(True)
@ -73,7 +73,7 @@ class ConsoleauthTestCase(test.TestCase):
self.assertTrue(self.manager_api.check_token(self.context, token))
def test_delete_tokens_for_instance(self):
tokens = [u"token" + str(i) for i in xrange(10)]
tokens = [u"token" + str(i) for i in range(10)]
for token in tokens:
self.manager_api.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',

View File

@ -35,6 +35,7 @@ from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from sqlalchemy import Column
from sqlalchemy.dialects import sqlite
from sqlalchemy.exc import OperationalError
@ -6496,7 +6497,7 @@ class S3ImageTestCase(test.TestCase):
def setUp(self):
super(S3ImageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = [uuidutils.generate_uuid() for i in xrange(3)]
self.values = [uuidutils.generate_uuid() for i in range(3)]
self.images = [db.s3_image_create(self.ctxt, uuid)
for uuid in self.values]
@ -6755,7 +6756,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
def test_compute_node_search_by_hypervisor(self):
nodes_created = []
new_service = copy.copy(self.service_dict)
for i in xrange(3):
for i in range(3):
new_service['binary'] += str(i)
new_service['topic'] += str(i)
service = db.service_create(self.ctxt, new_service)
@ -6882,7 +6883,7 @@ class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
'2001:4f8:3:ba::/64',
'2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
values = []
for i in xrange(len(cidr_samples)):
for i in range(len(cidr_samples)):
rule = {}
rule['protocol'] = 'foo' + str(i)
rule['from_port'] = 9999 + i
@ -6922,7 +6923,7 @@ class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
'file_name': 'filename'
}
return [{k: v + str(x) for k, v in base_values.iteritems()}
for x in xrange(1, 4)]
for x in range(1, 4)]
def _certificates_create(self):
return [db.certificate_create(self.ctxt, cert)
@ -6983,7 +6984,7 @@ class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
'password': 'pass' + str(x),
'port': 7878 + x,
'pool_id': self.console_pools[x]['id']}
for x in xrange(len(pools_data))]
for x in range(len(pools_data))]
self.consoles = [db.console_create(self.ctxt, val)
for val in self.console_data]
@ -7086,7 +7087,7 @@ class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
def _create_cells(self):
test_values = []
for x in xrange(1, 4):
for x in range(1, 4):
modified_val = {k: self._cell_value_modify(v, x)
for k, v in self._get_cell_base_values().iteritems()}
db.cell_create(self.ctxt, modified_val)
@ -8592,7 +8593,7 @@ class TestDBInstanceTags(test.TestCase):
uuid = self._create_instance()
tag = 'tag'
for x in xrange(5):
for x in range(5):
db.instance_tag_add(self.context, uuid, tag)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)

View File

@ -24,6 +24,7 @@ library to work with nova.
import fnmatch
from oslo_serialization import jsonutils
from six.moves import range
class Store(object):
@ -140,7 +141,7 @@ def _paren_groups(source):
count = 0
start = 0
result = []
for pos in xrange(len(source)):
for pos in range(len(source)):
if source[pos] == '(':
if count == 0:
start = pos

View File

@ -15,6 +15,7 @@
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import range
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
@ -221,12 +222,12 @@ def fake_vif(x):
def floating_ip_ids():
for i in xrange(1, 100):
for i in range(1, 100):
yield i
def fixed_ip_ids():
for i in xrange(1, 100):
for i in range(1, 100):
yield i
@ -237,7 +238,7 @@ fixed_ip_id = fixed_ip_ids()
def next_fixed_ip(network_id, num_floating_ips=0):
next_id = next(fixed_ip_id)
f_ips = [FakeModel(**next_floating_ip(next_id))
for i in xrange(num_floating_ips)]
for i in range(num_floating_ips)]
return {'id': next_id,
'network_id': network_id,
'address': '192.168.%d.%03d' % (network_id, (next_id + 99)),
@ -295,8 +296,8 @@ def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
def fixed_ips_fake(*args, **kwargs):
global fixed_ips
ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
for i in xrange(1, num_networks + 1)
for j in xrange(ips_per_vif)]
for i in range(1, num_networks + 1)
for j in range(ips_per_vif)]
fixed_ips = ips
return ips

View File

@ -17,6 +17,8 @@ from mox3 import mox
from neutronclient.common import exceptions as n_exc
from neutronclient.v2_0 import client
from six.moves import range
from nova import context
from nova import exception
from nova.network.neutronv2 import api as neutronapi
@ -222,7 +224,7 @@ class TestNeutronDriver(test.NoDBTestCase):
device_ids = []
ports = []
sg_bindings = {}
for i in xrange(0, num_servers):
for i in range(0, num_servers):
server_id = "server-%d" % i
port_id = "port-%d" % i
servers.append({'id': server_id})
@ -232,7 +234,7 @@ class TestNeutronDriver(test.NoDBTestCase):
'security_groups': [sg1_id, sg2_id]})
sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}]
for x in xrange(0, num_servers, max_query):
for x in range(0, num_servers, max_query):
self.moxed_client.list_ports(
device_id=device_ids[x:x + max_query]).\
AndReturn({'ports': ports[x:x + max_query]})

View File

@ -134,7 +134,7 @@ class FixedIPTests(test.NoDBTestCase):
def test_add_floating_ip_repeatedly_only_one_instance(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
for i in xrange(10):
for i in range(10):
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
@ -199,7 +199,7 @@ class SubnetTests(test.NoDBTestCase):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
for i in xrange(10):
for i in range(10):
subnet.add_route(route2)
self.assertEqual(subnet['routes'], [route1, route2])
@ -214,7 +214,7 @@ class SubnetTests(test.NoDBTestCase):
def test_add_dns_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in xrange(10):
for i in range(10):
subnet.add_dns(fake_network_cache_model.new_ip(
dict(address='9.9.9.9')))
self.assertEqual(subnet['dns'],
@ -236,7 +236,7 @@ class SubnetTests(test.NoDBTestCase):
def test_add_ip_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in xrange(10):
for i in range(10):
subnet.add_ip(fake_network_cache_model.new_fixed_ip(
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
@ -293,7 +293,7 @@ class NetworkTests(test.NoDBTestCase):
def test_add_subnet_a_lot(self):
network = fake_network_cache_model.new_network()
for i in xrange(10):
for i in range(10):
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(network['subnets'],

View File

@ -27,6 +27,7 @@ from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from six.moves import range
from nova.compute import flavors
from nova import context
@ -596,7 +597,7 @@ class TestNeutronv2Base(test.TestCase):
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
for i in xrange(1, number + 1):
for i in range(1, number + 1):
float_data = number == 1 and self.float_data1 or self.float_data2
for ip in port_data[i - 1]['fixed_ips']:
float_data = [x for x in float_data
@ -619,7 +620,7 @@ class TestNeutronv2Base(test.TestCase):
net_info_cache)
instance = self._fake_instance_object_with_info_cache(self.instance)
nw_inf = api.get_instance_nw_info(self.context, instance)
for i in xrange(0, number):
for i in range(0, number):
self._verify_nw_info(nw_inf, i)
def _allocate_for_instance(self, net_idx=1, **kwargs):

View File

@ -15,6 +15,7 @@
import mock
from oslo_utils import timeutils
from six.moves import range
from nova import exception
from nova.scheduler import caching_scheduler
@ -144,7 +145,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = self._get_fake_request_spec()
host_states = []
for x in xrange(hosts):
for x in range(hosts):
host_state = self._get_fake_host_state(x)
host_states.append(host_state)
self.driver.all_host_states = host_states
@ -152,7 +153,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
def run_test():
a = timeutils.utcnow()
for x in xrange(requests):
for x in range(requests):
self.driver.select_destinations(
self.context, request_spec, {})

View File

@ -18,6 +18,8 @@ Tests For Scheduler Host Filters.
import inspect
import sys
from six.moves import range
from nova import filters
from nova import loadables
from nova import test
@ -72,15 +74,15 @@ class FiltersTestCase(test.NoDBTestCase):
# call gets to processing 'obj2'. We then return 'False' for it.
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
for x in xrange(total_iterations):
for x in range(total_iterations):
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
for x in xrange(total_iterations):
for x in range(total_iterations):
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
objs = iter(filter_obj_list)
for x in xrange(total_iterations):
for x in range(total_iterations):
# Pass in generators returned from previous calls.
objs = base_filter.filter_all(objs, filter_properties)
self.assertTrue(inspect.isgenerator(objs))

View File

@ -67,9 +67,9 @@ class HostManagerTestCase(test.NoDBTestCase):
self.flags(scheduler_default_filters=['FakeFilterClass1'])
self.host_manager = host_manager.HostManager()
self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
'fake-node') for x in xrange(1, 5)]
'fake-node') for x in range(1, 5)]
self.fake_hosts += [host_manager.HostState('fake_multihost',
'fake-node%s' % x) for x in xrange(1, 5)]
'fake-node%s' % x) for x in range(1, 5)]
def test_load_filters(self):
filters = self.host_manager._load_filters()
@ -388,7 +388,7 @@ class HostManagerTestCase(test.NoDBTestCase):
self.assertEqual(len(host_states_map), 4)
# Check that .service is set properly
for i in xrange(4):
for i in range(4):
compute_node = fakes.COMPUTE_NODES[i]
host = compute_node['host']
node = compute_node['hypervisor_hostname']

View File

@ -18,6 +18,7 @@ import datetime
from oslo_config import cfg
from oslo_utils import timeutils
from six.moves import range
from nova import compute
from nova.compute import flavors
@ -174,39 +175,39 @@ class QuotaIntegrationTestCase(test.TestCase):
def test_max_injected_files(self):
files = []
for i in xrange(CONF.quota_injected_files):
for i in range(CONF.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
for i in xrange(CONF.quota_injected_files + 1):
for i in range(CONF.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
content = ''.join(['a' for i in range(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
content = ''.join(['a' for i in range(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = CONF.quota_injected_file_path_length
path = ''.join(['a' for i in xrange(max)])
path = ''.join(['a' for i in range(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
max = CONF.quota_injected_file_path_length
path = ''.join(['a' for i in xrange(max + 1)])
path = ''.join(['a' for i in range(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)

View File

@ -19,6 +19,7 @@ import sys
import mock
from oslo_config import cfg
from six.moves import range
from nova.compute import flavors
import nova.context
@ -145,7 +146,7 @@ def get_test_network_info(count=1):
return vif
return network_model.NetworkInfo([current() for x in xrange(0, count)])
return network_model.NetworkInfo([current() for x in range(0, count)])
def is_osx():

View File

@ -15,6 +15,8 @@
import mock
from six.moves import range
from nova import exception
from nova import test
from nova.virt.hyperv import constants
@ -270,7 +272,7 @@ class VMUtilsTestCase(test.NoDBTestCase):
def test_get_free_controller_slot_exception(self):
fake_drive = mock.MagicMock()
type(fake_drive).AddressOnParent = mock.PropertyMock(
side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
side_effect=range(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
with mock.patch.object(self._vmutils,
'get_attached_disks') as fake_get_attached_disks:

View File

@ -43,6 +43,7 @@ from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
@ -8258,7 +8259,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
return_infos = [(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple]
return_shutdowns = [shutdown_count.append("shutdown")]
retry_countdown = retry_interval
for x in xrange(min(seconds_to_shutdown, timeout)):
for x in range(min(seconds_to_shutdown, timeout)):
return_infos.append(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
if retry_countdown == 0:
@ -8932,7 +8933,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
# Generate mempages list per cell
cell_mempages = list()
for cellid in xrange(cells_per_host):
for cellid in range(cells_per_host):
mempages_0 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_0.size = 4
mempages_0.total = 1024 * cellid

View File

@ -824,7 +824,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
def fake_get_all_by_filters(context, *args, **kwargs):
was['called'] = True
instances = []
for x in xrange(2):
for x in range(2):
instances.append(fake_instance.fake_db_instance(
image_ref='1',
uuid=x,

View File

@ -685,9 +685,9 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
self.ephemeral_bdm) for _ in range(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
for _ in range(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'

View File

@ -377,7 +377,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_list_instance_uuids(self):
uuids = []
for x in xrange(1, 4):
for x in range(1, 4):
instance = self._create_instance()
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
@ -1625,7 +1625,7 @@ class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
self._test_encryption(''.join(['abcd' for i in range(1024)]))
# FIXME(sirp): convert this to use XenAPITestBaseNoDB

View File

@ -47,6 +47,7 @@ from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from six.moves import range
from nova import exception
from nova.i18n import _, _LE, _LW
@ -249,7 +250,7 @@ def novadir():
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for _x in xrange(size)]
choices = [random.choice(characters) for _x in range(size)]
return '%s-%s' % (topic, ''.join(choices))
@ -384,7 +385,7 @@ def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([r.choice(symbols) for _i in xrange(length)])
password.extend([r.choice(symbols) for _i in range(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group

View File

@ -27,6 +27,7 @@ if sys.platform == 'win32':
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import range
from nova import exception
from nova.i18n import _, _LW
@ -729,7 +730,7 @@ class VMUtils(object):
attached_disks = self.get_attached_disks(scsi_controller_path)
used_slots = [int(disk.AddressOnParent) for disk in attached_disks]
for slot in xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
for slot in range(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
if slot not in used_slots:
return slot
raise HyperVException(_("Exceeded the maximum number of slots"))

View File

@ -25,6 +25,7 @@ import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from six.moves import range
from nova import exception
from nova.i18n import _, _LE, _LW
@ -284,7 +285,7 @@ class ISCSIVolumeDriver(object):
# The WMI query in get_device_number_for_target can incorrectly
# return no data when the system is under load. This issue can
# be avoided by adding a retry.
for i in xrange(CONF.hyperv.mounted_disk_query_retry_count):
for i in range(CONF.hyperv.mounted_disk_query_retry_count):
device_number = self._volutils.get_device_number_for_target(
target_iqn, target_lun)
if device_number in (None, -1):

View File

@ -28,6 +28,7 @@ import time
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import range
from nova.i18n import _
from nova import utils
@ -86,7 +87,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
if retry_count < 2:
retry_count = 2
for attempt in xrange(retry_count):
for attempt in range(retry_count):
try:
session_info = self.execute('iscsicli.exe', 'SessionList')
if session_info.find(target_iqn) == -1:

View File

@ -26,6 +26,7 @@ if sys.platform == 'win32':
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import range
from nova.i18n import _
from nova import utils
@ -78,7 +79,7 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
if retry_count < 2:
retry_count = 2
for attempt in xrange(retry_count):
for attempt in range(retry_count):
target = self._conn_storage.query("SELECT * FROM MSFT_iSCSITarget "
"WHERE NodeAddress='%s' " %
target_iqn)

View File

@ -54,6 +54,7 @@ from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova import block_device
@ -2042,7 +2043,7 @@ class LibvirtDriver(driver.ComputeDriver):
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
for x in range(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._host.get_domain(instance)
state = self._get_power_state(dom)
new_domid = dom.ID()
@ -5243,7 +5244,7 @@ class LibvirtDriver(driver.ComputeDriver):
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
timeout_count = list(range(CONF.live_migration_retry_count))
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):

View File

@ -23,6 +23,7 @@ from eventlet import queue
from eventlet import timeout
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import range
from nova import context
from nova import exception
@ -128,7 +129,7 @@ class XenAPISession(object):
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in xrange(CONF.xenserver.connection_concurrent - 1):
for i in range(CONF.xenserver.connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw,
@ -217,7 +218,7 @@ class XenAPISession(object):
"""Allows a plugin to raise RetryableError so we can try again."""
attempts = num_retries + 1
sleep_time = 0.5
for attempt in xrange(1, attempts + 1):
for attempt in range(1, attempts + 1):
try:
if attempt > 1:
time.sleep(sleep_time)

View File

@ -37,6 +37,7 @@ from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
from nova.api.metadata import base as instance_metadata
@ -382,7 +383,7 @@ def _should_retry_unplug_vbd(err):
def unplug_vbd(session, vbd_ref, this_vm_ref):
# make sure that perform at least once
max_attempts = max(0, CONF.xenserver.num_vbd_unplug_retries) + 1
for num_attempt in xrange(1, max_attempts + 1):
for num_attempt in range(1, max_attempts + 1):
try:
if num_attempt > 1:
greenthread.sleep(1)
@ -1366,7 +1367,7 @@ def _make_uuid_stack():
# which does not have the `uuid` module. To work around this,
# we generate the uuids here (under Python 2.6+) and
# pass them as arguments
return [str(uuid.uuid4()) for i in xrange(MAX_VDI_CHAIN_SIZE)]
return [str(uuid.uuid4()) for i in range(MAX_VDI_CHAIN_SIZE)]
def _image_uses_bittorrent(context, instance):
@ -2099,7 +2100,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
# Its possible that other coalesce operation happen, so we need
# to consider the full chain, rather than just the most recent parent.
good_parent_uuids = vdi_uuid_list[1:]
for i in xrange(max_attempts):
for i in range(max_attempts):
# NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
# matches the underlying VHDs.
# This can also kick XenServer into performing a pending coalesce.
@ -2146,7 +2147,7 @@ def _remap_vbd_dev(dev):
def _wait_for_device(dev):
"""Wait for device node to appear."""
for i in xrange(0, CONF.xenserver.block_device_creation_timeout):
for i in range(0, CONF.xenserver.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return