Replace xrange() with six.moves.range()
Add "from six.moves import range" to replace the builtin range() function with six.moves.range() to always create an iterator, instead of creating a temporary list. Replace "xrange" with "range". Don't add the import for ranges of 1024 items or less. Blueprint cinder-python3 Change-Id: If618b4e810e444f7eb6592bb2398805e9d14d548
This commit is contained in:
parent
106261582f
commit
8e63cccb65
@ -54,6 +54,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import encodeutils
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
from six.moves import range
|
||||
|
||||
from cinder.backup import driver
|
||||
from cinder import exception
|
||||
@ -284,7 +285,7 @@ class CephBackupDriver(driver.BackupDriver):
|
||||
else:
|
||||
zeroes = '\0' * length
|
||||
chunks = int(length / self.chunk_size)
|
||||
for chunk in xrange(0, chunks):
|
||||
for chunk in range(0, chunks):
|
||||
LOG.debug("Writing zeroes chunk %d", chunk)
|
||||
volume.write(zeroes)
|
||||
volume.flush()
|
||||
@ -306,7 +307,7 @@ class CephBackupDriver(driver.BackupDriver):
|
||||
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred",
|
||||
{'chunks': chunks, 'bytes': self.chunk_size})
|
||||
|
||||
for chunk in xrange(0, chunks):
|
||||
for chunk in range(0, chunks):
|
||||
before = time.time()
|
||||
data = src.read(self.chunk_size)
|
||||
# If we have reach end of source, discard any extraneous bytes from
|
||||
|
@ -19,6 +19,7 @@
|
||||
"""Implementation of paginate query."""
|
||||
|
||||
from oslo_log import log as logging
|
||||
from six.moves import range
|
||||
import sqlalchemy
|
||||
|
||||
from cinder import exception
|
||||
@ -100,9 +101,9 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
||||
|
||||
# Build up an array of sort criteria as in the docstring
|
||||
criteria_list = []
|
||||
for i in xrange(0, len(sort_keys)):
|
||||
for i in range(0, len(sort_keys)):
|
||||
crit_attrs = []
|
||||
for j in xrange(0, i):
|
||||
for j in range(0, i):
|
||||
model_attr = getattr(model, sort_keys[j])
|
||||
crit_attrs.append((model_attr == marker_values[j]))
|
||||
|
||||
|
@ -32,6 +32,7 @@ from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
from six.moves import range
|
||||
from six.moves import urllib
|
||||
|
||||
from cinder import exception
|
||||
@ -169,7 +170,7 @@ class GlanceClientWrapper(object):
|
||||
glanceclient.exc.CommunicationError)
|
||||
num_attempts = 1 + CONF.glance_num_retries
|
||||
|
||||
for attempt in xrange(1, num_attempts + 1):
|
||||
for attempt in range(1, num_attempts + 1):
|
||||
client = self.client or self._create_onetime_client(context,
|
||||
version)
|
||||
try:
|
||||
|
@ -114,7 +114,7 @@ class RequestTest(test.TestCase):
|
||||
self.assertIsNone(request.cached_resource_by_id('r-0'))
|
||||
|
||||
resources = []
|
||||
for x in xrange(3):
|
||||
for x in range(3):
|
||||
resources.append({'id': 'r-%s' % x})
|
||||
|
||||
# Cache an empty list of resources using the default name
|
||||
@ -161,7 +161,7 @@ class RequestTest(test.TestCase):
|
||||
|
||||
r = wsgi.Request.blank('/foo')
|
||||
resources = []
|
||||
for x in xrange(3):
|
||||
for x in range(3):
|
||||
resources.append({'id': 'id%s' % x})
|
||||
|
||||
# Store 2
|
||||
|
@ -23,6 +23,7 @@ from lxml import etree
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
from six.moves import http_client
|
||||
from six.moves import range
|
||||
import webob
|
||||
|
||||
from cinder.api.v1 import limits
|
||||
@ -401,7 +402,7 @@ class LimiterTest(BaseLimitTestSuite):
|
||||
|
||||
def _check(self, num, verb, url, username=None):
|
||||
"""Check and yield results from checks."""
|
||||
for x in xrange(num):
|
||||
for x in range(num):
|
||||
yield self.limiter.check_for_delay(verb, url, username)[0]
|
||||
|
||||
def _check_sum(self, num, verb, url, username=None):
|
||||
|
@ -23,6 +23,7 @@ from lxml import etree
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
from six.moves import http_client
|
||||
from six.moves import range
|
||||
import webob
|
||||
|
||||
from cinder.api.v2 import limits
|
||||
@ -406,7 +407,7 @@ class LimiterTest(BaseLimitTestSuite):
|
||||
|
||||
def _check(self, num, verb, url, username=None):
|
||||
"""Check and yield results from checks."""
|
||||
for x in xrange(num):
|
||||
for x in range(num):
|
||||
yield self.limiter.check_for_delay(verb, url, username)[0]
|
||||
|
||||
def _check_sum(self, num, verb, url, username=None):
|
||||
|
@ -21,6 +21,7 @@ import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
from six.moves import range
|
||||
from six.moves import urllib
|
||||
import webob
|
||||
|
||||
@ -1118,7 +1119,7 @@ class VolumeApiTest(test.TestCase):
|
||||
filters=None,
|
||||
viewable_admin_meta=False):
|
||||
vols = [stubs.stub_volume(i)
|
||||
for i in xrange(CONF.osapi_max_limit)]
|
||||
for i in range(CONF.osapi_max_limit)]
|
||||
if limit is None or limit >= len(vols):
|
||||
return vols
|
||||
return vols[:limit]
|
||||
@ -1136,7 +1137,7 @@ class VolumeApiTest(test.TestCase):
|
||||
filters=None,
|
||||
viewable_admin_meta=False):
|
||||
vols = [stubs.stub_volume(i)
|
||||
for i in xrange(100)]
|
||||
for i in range(100)]
|
||||
if limit is None or limit >= len(vols):
|
||||
return vols
|
||||
return vols[:limit]
|
||||
@ -1154,7 +1155,7 @@ class VolumeApiTest(test.TestCase):
|
||||
filters=None,
|
||||
viewable_admin_meta=False):
|
||||
vols = [stubs.stub_volume(i)
|
||||
for i in xrange(CONF.osapi_max_limit + 100)]
|
||||
for i in range(CONF.osapi_max_limit + 100)]
|
||||
if limit is None or limit >= len(vols):
|
||||
return vols
|
||||
return vols[:limit]
|
||||
|
@ -289,7 +289,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
mock.Mock(return_value=mock_remotefsclient))
|
||||
# Remove tempdir.
|
||||
self.addCleanup(shutil.rmtree, self.temp_dir)
|
||||
for _i in xrange(0, 128):
|
||||
for _i in range(0, 128):
|
||||
self.volume_file.write(os.urandom(1024))
|
||||
|
||||
def test_backup_uncompressed(self):
|
||||
|
@ -62,6 +62,6 @@ class ChanceWeigherTestCase(test.TestCase):
|
||||
# the ChanceWeigher
|
||||
hm = host_manager.HostManager()
|
||||
fake_hosts = [host_manager.HostState('fake_host%s' % x)
|
||||
for x in xrange(1, 5)]
|
||||
for x in range(1, 5)]
|
||||
weighed_hosts = hm.get_weighed_hosts(fake_hosts, {}, 'ChanceWeigher')
|
||||
self.assertEqual(4, len(weighed_hosts))
|
||||
|
@ -46,7 +46,7 @@ class HostManagerTestCase(test.TestCase):
|
||||
super(HostManagerTestCase, self).setUp()
|
||||
self.host_manager = host_manager.HostManager()
|
||||
self.fake_hosts = [host_manager.HostState('fake_host%s' % x)
|
||||
for x in xrange(1, 5)]
|
||||
for x in range(1, 5)]
|
||||
|
||||
def test_choose_host_filters_not_found(self):
|
||||
self.flags(scheduler_default_filters='FakeFilterClass3')
|
||||
@ -254,7 +254,7 @@ class HostManagerTestCase(test.TestCase):
|
||||
# Get host_state_map and make sure we have the first 4 hosts
|
||||
host_state_map = self.host_manager.host_state_map
|
||||
self.assertEqual(len(host_state_map), 3)
|
||||
for i in xrange(3):
|
||||
for i in range(3):
|
||||
volume_node = services[i]
|
||||
host = volume_node['host']
|
||||
self.assertEqual(host_state_map[host].service, volume_node)
|
||||
@ -280,7 +280,7 @@ class HostManagerTestCase(test.TestCase):
|
||||
# down, host4 is missing capabilities)
|
||||
host_state_map = self.host_manager.host_state_map
|
||||
self.assertEqual(len(host_state_map), 2)
|
||||
for i in xrange(2):
|
||||
for i in range(2):
|
||||
volume_node = services[i]
|
||||
host = volume_node['host']
|
||||
self.assertEqual(host_state_map[host].service,
|
||||
|
@ -24,6 +24,7 @@ from oslo_concurrency import processutils
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
from cinder.backup import driver
|
||||
from cinder.backup.drivers import ceph
|
||||
@ -171,7 +172,7 @@ class BackupCephTestCase(test.TestCase):
|
||||
# Create a file with some data in it.
|
||||
self.volume_file = tempfile.NamedTemporaryFile()
|
||||
self.addCleanup(self.volume_file.close)
|
||||
for _i in xrange(0, self.num_chunks):
|
||||
for _i in range(0, self.num_chunks):
|
||||
data = os.urandom(self.chunk_size)
|
||||
self.checksum.update(data)
|
||||
self.volume_file.write(data)
|
||||
@ -285,7 +286,7 @@ class BackupCephTestCase(test.TestCase):
|
||||
|
||||
checksum = hashlib.sha256()
|
||||
test_file.seek(0)
|
||||
for _c in xrange(0, self.num_chunks):
|
||||
for _c in range(0, self.num_chunks):
|
||||
checksum.update(test_file.read(self.chunk_size))
|
||||
|
||||
# Ensure the files are equal
|
||||
@ -350,7 +351,7 @@ class BackupCephTestCase(test.TestCase):
|
||||
|
||||
checksum = hashlib.sha256()
|
||||
test_file.seek(0)
|
||||
for _c in xrange(0, self.num_chunks):
|
||||
for _c in range(0, self.num_chunks):
|
||||
checksum.update(test_file.read(self.chunk_size))
|
||||
|
||||
# Ensure the files are equal
|
||||
@ -627,7 +628,7 @@ class BackupCephTestCase(test.TestCase):
|
||||
|
||||
checksum = hashlib.sha256()
|
||||
test_file.seek(0)
|
||||
for _c in xrange(0, self.num_chunks):
|
||||
for _c in range(0, self.num_chunks):
|
||||
checksum.update(test_file.read(self.chunk_size))
|
||||
|
||||
# Ensure the files are equal
|
||||
|
@ -90,7 +90,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self.addCleanup(self.volume_file.close)
|
||||
# Remove tempdir.
|
||||
self.addCleanup(shutil.rmtree, self.temp_dir)
|
||||
for _i in xrange(0, 128):
|
||||
for _i in range(0, 128):
|
||||
self.volume_file.write(os.urandom(1024))
|
||||
|
||||
def test_backup_swift_url(self):
|
||||
|
@ -268,34 +268,34 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
self.assertEqual(attachment['attached_host'], host_name)
|
||||
|
||||
def test_volume_data_get_for_host(self):
|
||||
for i in xrange(THREE):
|
||||
for j in xrange(THREE):
|
||||
for i in range(THREE):
|
||||
for j in range(THREE):
|
||||
db.volume_create(self.ctxt, {'host': 'h%d' % i,
|
||||
'size': ONE_HUNDREDS})
|
||||
for i in xrange(THREE):
|
||||
for i in range(THREE):
|
||||
self.assertEqual((THREE, THREE_HUNDREDS),
|
||||
db.volume_data_get_for_host(
|
||||
self.ctxt, 'h%d' % i))
|
||||
|
||||
def test_volume_data_get_for_host_for_multi_backend(self):
|
||||
for i in xrange(THREE):
|
||||
for j in xrange(THREE):
|
||||
for i in range(THREE):
|
||||
for j in range(THREE):
|
||||
db.volume_create(self.ctxt, {'host':
|
||||
'h%d@lvmdriver-1#lvmdriver-1' % i,
|
||||
'size': ONE_HUNDREDS})
|
||||
for i in xrange(THREE):
|
||||
for i in range(THREE):
|
||||
self.assertEqual((THREE, THREE_HUNDREDS),
|
||||
db.volume_data_get_for_host(
|
||||
self.ctxt, 'h%d@lvmdriver-1' % i))
|
||||
|
||||
def test_volume_data_get_for_project(self):
|
||||
for i in xrange(THREE):
|
||||
for j in xrange(THREE):
|
||||
for i in range(THREE):
|
||||
for j in range(THREE):
|
||||
db.volume_create(self.ctxt, {'project_id': 'p%d' % i,
|
||||
'size': ONE_HUNDREDS,
|
||||
'host': 'h-%d-%d' % (i, j),
|
||||
})
|
||||
for i in xrange(THREE):
|
||||
for i in range(THREE):
|
||||
self.assertEqual((THREE, THREE_HUNDREDS),
|
||||
db.volume_data_get_for_project(
|
||||
self.ctxt, 'p%d' % i))
|
||||
@ -349,7 +349,7 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
def test_volume_get_all(self):
|
||||
volumes = [db.volume_create(self.ctxt,
|
||||
{'host': 'h%d' % i, 'size': i})
|
||||
for i in xrange(3)]
|
||||
for i in range(3)]
|
||||
self._assertEqualListsOfObjects(volumes, db.volume_get_all(
|
||||
self.ctxt, None, None, ['host'], None))
|
||||
|
||||
@ -366,10 +366,10 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
|
||||
def test_volume_get_all_by_host(self):
|
||||
volumes = []
|
||||
for i in xrange(3):
|
||||
for i in range(3):
|
||||
volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i})
|
||||
for j in xrange(3)])
|
||||
for i in xrange(3):
|
||||
for j in range(3)])
|
||||
for i in range(3):
|
||||
self._assertEqualListsOfObjects(volumes[i],
|
||||
db.volume_get_all_by_host(
|
||||
self.ctxt, 'h%d' % i))
|
||||
@ -377,7 +377,7 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
def test_volume_get_all_by_host_with_pools(self):
|
||||
volumes = []
|
||||
vol_on_host_wo_pool = [db.volume_create(self.ctxt, {'host': 'foo'})
|
||||
for j in xrange(3)]
|
||||
for j in range(3)]
|
||||
vol_on_host_w_pool = [db.volume_create(
|
||||
self.ctxt, {'host': 'foo#pool0'})]
|
||||
volumes.append((vol_on_host_wo_pool +
|
||||
@ -424,10 +424,10 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
|
||||
def test_volume_get_all_by_group(self):
|
||||
volumes = []
|
||||
for i in xrange(3):
|
||||
for i in range(3):
|
||||
volumes.append([db.volume_create(self.ctxt, {
|
||||
'consistencygroup_id': 'g%d' % i}) for j in xrange(3)])
|
||||
for i in xrange(3):
|
||||
'consistencygroup_id': 'g%d' % i}) for j in range(3)])
|
||||
for i in range(3):
|
||||
self._assertEqualListsOfObjects(volumes[i],
|
||||
db.volume_get_all_by_group(
|
||||
self.ctxt, 'g%d' % i))
|
||||
@ -462,10 +462,10 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
|
||||
def test_volume_get_all_by_project(self):
|
||||
volumes = []
|
||||
for i in xrange(3):
|
||||
for i in range(3):
|
||||
volumes.append([db.volume_create(self.ctxt, {
|
||||
'project_id': 'p%d' % i}) for j in xrange(3)])
|
||||
for i in xrange(3):
|
||||
'project_id': 'p%d' % i}) for j in range(3)])
|
||||
for i in range(3):
|
||||
self._assertEqualListsOfObjects(volumes[i],
|
||||
db.volume_get_all_by_project(
|
||||
self.ctxt, 'p%d' % i, None,
|
||||
@ -567,21 +567,21 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
{'project_id': 'g1',
|
||||
'display_name': 'name_%d' % i,
|
||||
'size': 1})
|
||||
for i in xrange(2)])
|
||||
for i in range(2)])
|
||||
vols.extend([db.volume_create(self.ctxt,
|
||||
{'project_id': 'g1',
|
||||
'display_name': 'name_%d' % i,
|
||||
'size': 2})
|
||||
for i in xrange(2)])
|
||||
for i in range(2)])
|
||||
vols.extend([db.volume_create(self.ctxt,
|
||||
{'project_id': 'g1',
|
||||
'display_name': 'name_%d' % i})
|
||||
for i in xrange(2)])
|
||||
for i in range(2)])
|
||||
vols.extend([db.volume_create(self.ctxt,
|
||||
{'project_id': 'g2',
|
||||
'display_name': 'name_%d' % i,
|
||||
'size': 1})
|
||||
for i in xrange(2)])
|
||||
for i in range(2)])
|
||||
|
||||
# By project, filter on size and name
|
||||
filters = {'size': '1'}
|
||||
|
@ -25,6 +25,7 @@ from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
import paramiko
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
import cinder
|
||||
from cinder import exception
|
||||
@ -1479,7 +1480,7 @@ class TestRetryDecorator(test.TestCase):
|
||||
|
||||
expected_sleep_arg = []
|
||||
|
||||
for i in xrange(retries):
|
||||
for i in range(retries):
|
||||
if i > 0:
|
||||
interval *= backoff_rate
|
||||
expected_sleep_arg.append(float(interval))
|
||||
|
@ -2790,7 +2790,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
|
||||
# FIXME(jdg): What is this actually testing?
|
||||
# We never call the internal _check method?
|
||||
for _index in xrange(100):
|
||||
for _index in range(100):
|
||||
tests_utils.create_volume(self.context, **self.volume_params)
|
||||
for volume_id in volume_ids:
|
||||
self.volume.delete_volume(self.context, volume_id)
|
||||
@ -5832,7 +5832,7 @@ class ISCSITestCase(DriverTestCase):
|
||||
def _attach_volume(self):
|
||||
"""Attach volumes to an instance."""
|
||||
volume_id_list = []
|
||||
for index in xrange(3):
|
||||
for index in range(3):
|
||||
vol = {}
|
||||
vol['size'] = 0
|
||||
vol_ref = db.volume_create(self.context, vol)
|
||||
|
@ -31,6 +31,7 @@ from oslo_serialization import jsonutils as json
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
from six.moves import range
|
||||
import taskflow.engines
|
||||
from taskflow.patterns import linear_flow
|
||||
from taskflow import task
|
||||
@ -1694,7 +1695,7 @@ class EMCVnxCliBase(object):
|
||||
LOG.info(_LI("initiator_auto_registration: False. "
|
||||
"Initiator auto registration is not enabled. "
|
||||
"Please register initiator manually."))
|
||||
self.hlu_set = set(xrange(1, self.max_luns_per_sg + 1))
|
||||
self.hlu_set = set(range(1, self.max_luns_per_sg + 1))
|
||||
self._client = CommandLineHelper(self.configuration)
|
||||
conf_pools = self.configuration.safe_get("storage_vnx_pool_names")
|
||||
self.storage_pools = self._get_managed_storage_pools(conf_pools)
|
||||
|
@ -25,6 +25,7 @@ from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from six.moves import range
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LW, _LI
|
||||
@ -344,7 +345,7 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver):
|
||||
"""
|
||||
lines = [line for line in out if line != '']
|
||||
# Every record has 2 lines
|
||||
for i in xrange(0, len(lines), 2):
|
||||
for i in range(0, len(lines), 2):
|
||||
try:
|
||||
int(lines[i][0])
|
||||
# sanity check
|
||||
|
@ -21,6 +21,7 @@ import collections
|
||||
import random
|
||||
|
||||
from oslo_log import log as logging
|
||||
from six.moves import range
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
|
@ -32,6 +32,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
from cinder.brick.local_dev import lvm
|
||||
from cinder import exception
|
||||
@ -84,7 +85,7 @@ class retry(object):
|
||||
sleep_time = self._sleep_factor
|
||||
exc_info = None
|
||||
|
||||
for attempt in xrange(self._count):
|
||||
for attempt in range(self._count):
|
||||
if attempt != 0:
|
||||
LOG.warning(_LW('Retrying failed call to %(func)s, '
|
||||
'attempt %(attempt)i.'),
|
||||
|
@ -536,7 +536,7 @@ class V6000Common(object):
|
||||
LOG.debug("Entering _wait_for_export_config loop: state=%s.",
|
||||
state)
|
||||
|
||||
for node_id in xrange(2):
|
||||
for node_id in range(2):
|
||||
resp = mg_conns[node_id].basic.get_node_values(bn)
|
||||
if state and len(resp.keys()):
|
||||
status[node_id] = True
|
||||
|
@ -37,6 +37,7 @@ driver documentation for more information.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import units
|
||||
from six.moves import range
|
||||
|
||||
from cinder import context
|
||||
from cinder.db.sqlalchemy import models
|
||||
@ -505,7 +506,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
|
||||
output = []
|
||||
for w in wwns:
|
||||
output.append('wwn.{0}'.format(
|
||||
':'.join(w[x:x + 2] for x in xrange(0, len(w), 2))))
|
||||
':'.join(w[x:x + 2] for x in range(0, len(w), 2))))
|
||||
return output
|
||||
|
||||
def _convert_wwns_vmem_to_openstack(self, wwns):
|
||||
|
@ -588,7 +588,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
|
||||
LOG.debug("Entering _wait_for_targetstate loop: target=%s.",
|
||||
target_name)
|
||||
|
||||
for node_id in xrange(2):
|
||||
for node_id in range(2):
|
||||
resp = mg_conns[node_id].basic.get_node_values(bn)
|
||||
if len(resp.keys()):
|
||||
status[node_id] = True
|
||||
|
@ -24,6 +24,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import strutils
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import units
|
||||
from six.moves import range
|
||||
|
||||
from cinder.brick.local_dev import lvm as brick_lvm
|
||||
from cinder import db
|
||||
@ -443,7 +444,7 @@ def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
|
||||
|
||||
# then fill with random characters from all symbol groups
|
||||
symbols = ''.join(symbolgroups)
|
||||
password.extend([random.choice(symbols) for _i in xrange(length)])
|
||||
password.extend([random.choice(symbols) for _i in range(length)])
|
||||
|
||||
# finally shuffle to ensure first x characters aren't from a
|
||||
# predictable group
|
||||
|
@ -24,7 +24,7 @@ def process_todo_nodes(app, doctree, fromdocname):
|
||||
# remove the item that was added in the constructor, since I'm tired of
|
||||
# reading through docutils for the proper way to construct an empty list
|
||||
lists = []
|
||||
for i in xrange(5):
|
||||
for i in range(5):
|
||||
lists.append(nodes.bullet_list("", nodes.Text('', '')))
|
||||
lists[i].remove(lists[i][0])
|
||||
lists[i]['classes'].append('todo_list')
|
||||
|
Loading…
Reference in New Issue
Block a user