Replace dict.iteritems() with six.iteritems(dict)
This patch was generated by the sixer tool version 0.2 using the "iteritems" operation: https://pypi.python.org/pypi/sixer Manual changes: - Don't change get_instance_metadata() in nova/compute/api.py: fixed by the change Ifd455e70002eb9636b87f83788384127ba6edeeb. - Don't change sqlalchemy code and nova/tests/unit/db/test_db_api.py. sqlalchemy objects cannot be converted to a dictionary using dict(obj) directly yet. It will be possible with the change I702be362a58155a28482e733e60539d36c039509. - Revert change in a comment in nova/objects/instance.py; the sixer tool is limited and don't understand comments - Reformat nova/virt/vmwareapi/driver.py to respect the 80 columns contraint Blueprint nova-python3 Change-Id: I81465661cb8a74778d70ba9b6641073f1effa49b
This commit is contained in:
parent
792dc63a10
commit
68f6f080b2
|
@ -575,7 +575,7 @@ class CloudController(object):
|
|||
def _cidr_args_split(self, kwargs):
|
||||
cidr_args_split = []
|
||||
cidrs = kwargs['ip_ranges']
|
||||
for key, cidr in cidrs.iteritems():
|
||||
for key, cidr in six.iteritems(cidrs):
|
||||
mykwargs = kwargs.copy()
|
||||
del mykwargs['ip_ranges']
|
||||
mykwargs['cidr_ip'] = cidr['cidr_ip']
|
||||
|
@ -585,7 +585,7 @@ class CloudController(object):
|
|||
def _groups_args_split(self, kwargs):
|
||||
groups_args_split = []
|
||||
groups = kwargs['groups']
|
||||
for key, group in groups.iteritems():
|
||||
for key, group in six.iteritems(groups):
|
||||
mykwargs = kwargs.copy()
|
||||
del mykwargs['groups']
|
||||
if 'group_name' in group:
|
||||
|
@ -1232,7 +1232,7 @@ class CloudController(object):
|
|||
i['keyName'] = instance.key_name
|
||||
i['tagSet'] = []
|
||||
|
||||
for k, v in utils.instance_meta(instance).iteritems():
|
||||
for k, v in six.iteritems(utils.instance_meta(instance)):
|
||||
i['tagSet'].append({'key': k, 'value': v})
|
||||
|
||||
client_token = self._get_client_token(context, instance_uuid)
|
||||
|
|
|
@ -25,6 +25,7 @@ from oslo_log import log as logging
|
|||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.api.metadata import password
|
||||
|
@ -447,7 +448,7 @@ class InstanceMetadata(object):
|
|||
path = 'openstack/%s/%s' % (version, VD_JSON_NAME)
|
||||
yield (path, self.lookup(path))
|
||||
|
||||
for (cid, content) in self.content.iteritems():
|
||||
for (cid, content) in six.iteritems(self.content):
|
||||
yield ('%s/%s/%s' % ("openstack", CONTENT_DIR, cid), content)
|
||||
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import re
|
|||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import webob
|
||||
from webob import exc
|
||||
|
@ -150,8 +151,8 @@ def task_and_vm_state_from_status(statuses):
|
|||
vm_states = set()
|
||||
task_states = set()
|
||||
lower_statuses = [status.lower() for status in statuses]
|
||||
for state, task_map in _STATE_MAP.iteritems():
|
||||
for task_state, mapped_state in task_map.iteritems():
|
||||
for state, task_map in six.iteritems(_STATE_MAP):
|
||||
for task_state, mapped_state in six.iteritems(task_map):
|
||||
status_string = mapped_state
|
||||
if status_string.lower() in lower_statuses:
|
||||
vm_states.add(state)
|
||||
|
@ -333,7 +334,7 @@ def check_img_metadata_properties_quota(context, metadata):
|
|||
|
||||
# check the key length.
|
||||
if isinstance(metadata, dict):
|
||||
for key, value in metadata.iteritems():
|
||||
for key, value in six.iteritems(metadata):
|
||||
if len(key) == 0:
|
||||
expl = _("Image metadata key cannot be blank")
|
||||
raise webob.exc.HTTPBadRequest(explanation=expl)
|
||||
|
@ -349,7 +350,7 @@ def dict_to_query_str(params):
|
|||
# TODO(throughnothing): we should just use urllib.urlencode instead of this
|
||||
# But currently we don't work with urlencoded url's
|
||||
param_str = ""
|
||||
for key, val in params.iteritems():
|
||||
for key, val in six.iteritems(params):
|
||||
param_str = param_str + '='.join([str(key), str(val)]) + '&'
|
||||
|
||||
return param_str.rstrip('&')
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
import datetime
|
||||
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import extensions
|
||||
|
@ -171,7 +172,7 @@ class AggregateController(object):
|
|||
'remove_host': self._remove_host,
|
||||
'set_metadata': self._set_metadata,
|
||||
}
|
||||
for action, data in body.iteritems():
|
||||
for action, data in six.iteritems(body):
|
||||
if action not in _actions.keys():
|
||||
msg = _('Aggregates does not have %s action') % action
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
|
|
@ -43,7 +43,7 @@ def _filter_keys(item, keys):
|
|||
item is a dict
|
||||
|
||||
"""
|
||||
return {k: v for k, v in item.iteritems() if k in keys}
|
||||
return {k: v for k, v in six.iteritems(item) if k in keys}
|
||||
|
||||
|
||||
def _fixup_cell_info(cell_info, keys):
|
||||
|
|
|
@ -54,7 +54,7 @@ class FlavorExtraSpecsController(object):
|
|||
except exception.InvalidInput as error:
|
||||
raise exc.HTTPBadRequest(explanation=error.format_message())
|
||||
|
||||
for key, value in specs.iteritems():
|
||||
for key, value in six.iteritems(specs):
|
||||
try:
|
||||
utils.check_string_length(key, 'extra_specs key',
|
||||
min_length=1, max_length=255)
|
||||
|
|
|
@ -18,6 +18,7 @@ import itertools
|
|||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
|
@ -113,7 +114,7 @@ class FpingController(object):
|
|||
ip_list += ips
|
||||
alive_ips = self.fping(ip_list)
|
||||
res = []
|
||||
for instance_uuid, ips in instance_ips.iteritems():
|
||||
for instance_uuid, ips in six.iteritems(instance_ips):
|
||||
res.append({
|
||||
"id": instance_uuid,
|
||||
"project_id": instance_projects[instance_uuid],
|
||||
|
|
|
@ -121,7 +121,7 @@ class HostController(object):
|
|||
context = req.environ['nova.context']
|
||||
authorize(context)
|
||||
# See what the user wants to 'update'
|
||||
params = {k.strip().lower(): v for k, v in body.iteritems()}
|
||||
params = {k.strip().lower(): v for k, v in six.iteritems(body)}
|
||||
orig_status = status = params.pop('status', None)
|
||||
orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
|
||||
# Validate the request
|
||||
|
|
|
@ -84,7 +84,7 @@ class NetworkController(object):
|
|||
networks = {}
|
||||
for n in self.network_api.get_all(ctx):
|
||||
networks[n['id']] = n['label']
|
||||
return [{'id': k, 'label': v} for k, v in networks.iteritems()]
|
||||
return [{'id': k, 'label': v} for k, v in six.iteritems(networks)]
|
||||
|
||||
def index(self, req):
|
||||
context = req.environ['nova.context']
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import wsgi
|
||||
from nova import quota
|
||||
|
@ -55,7 +57,7 @@ class UsedLimitsController(wsgi.Controller):
|
|||
quota_map['totalServerGroupsUsed'] = 'server_groups'
|
||||
|
||||
used_limits = {}
|
||||
for display_name, key in quota_map.iteritems():
|
||||
for display_name, key in six.iteritems(quota_map):
|
||||
if key in quotas:
|
||||
reserved = (quotas[key]['reserved']
|
||||
if self._reserved(req) else 0)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
|
@ -55,7 +56,7 @@ class Controller(object):
|
|||
context = req.environ['nova.context']
|
||||
image = self._get_image(context, image_id)
|
||||
if 'metadata' in body:
|
||||
for key, value in body['metadata'].iteritems():
|
||||
for key, value in six.iteritems(body['metadata']):
|
||||
image['properties'][key] = value
|
||||
common.check_img_metadata_properties_quota(context,
|
||||
image['properties'])
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
import webob.exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
|
@ -117,7 +118,7 @@ class Controller(wsgi.Controller):
|
|||
filters = self._get_filters(req)
|
||||
params = req.GET.copy()
|
||||
page_params = common.get_pagination_params(req)
|
||||
for key, val in page_params.iteritems():
|
||||
for key, val in six.iteritems(page_params):
|
||||
params[key] = val
|
||||
|
||||
try:
|
||||
|
@ -137,7 +138,7 @@ class Controller(wsgi.Controller):
|
|||
filters = self._get_filters(req)
|
||||
params = req.GET.copy()
|
||||
page_params = common.get_pagination_params(req)
|
||||
for key, val in page_params.iteritems():
|
||||
for key, val in six.iteritems(page_params):
|
||||
params[key] = val
|
||||
try:
|
||||
images = self._image_api.get_all(context, filters=filters,
|
||||
|
|
|
@ -45,7 +45,7 @@ def _filter_keys(item, keys):
|
|||
"""Filters all model attributes except for keys
|
||||
item is a dict
|
||||
"""
|
||||
return {k: v for k, v in item.iteritems() if k in keys}
|
||||
return {k: v for k, v in six.iteritems(item) if k in keys}
|
||||
|
||||
|
||||
def _fixup_cell_info(cell_info, keys):
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
import copy
|
||||
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
import webob.exc
|
||||
|
||||
from nova.api.openstack import extensions
|
||||
|
@ -133,7 +134,7 @@ class ExtensionInfoController(wsgi.Controller):
|
|||
"""Filter extensions list based on policy."""
|
||||
|
||||
discoverable_extensions = dict()
|
||||
for alias, ext in self.extension_info.get_extensions().iteritems():
|
||||
for alias, ext in six.iteritems(self.extension_info.get_extensions()):
|
||||
authorize = extensions.os_compute_soft_authorizer(alias)
|
||||
if authorize(context, action='discoverable'):
|
||||
discoverable_extensions[alias] = ext
|
||||
|
@ -173,7 +174,7 @@ class ExtensionInfoController(wsgi.Controller):
|
|||
context = req.environ['nova.context']
|
||||
|
||||
sorted_ext_list = sorted(
|
||||
self._get_extensions(context).iteritems())
|
||||
six.iteritems(self._get_extensions(context)))
|
||||
|
||||
extensions = []
|
||||
for _alias, ext in sorted_ext_list:
|
||||
|
|
|
@ -42,7 +42,7 @@ class FlavorExtraSpecsController(wsgi.Controller):
|
|||
# NOTE(gmann): Max length for numeric value is being checked
|
||||
# explicitly as json schema cannot have max length check for numeric value
|
||||
def _check_extra_specs_value(self, specs):
|
||||
for key, value in specs.iteritems():
|
||||
for key, value in six.iteritems(specs):
|
||||
try:
|
||||
if isinstance(value, (six.integer_types, float)):
|
||||
value = six.text_type(value)
|
||||
|
|
|
@ -18,6 +18,7 @@ import itertools
|
|||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
|
@ -110,7 +111,7 @@ class FpingController(wsgi.Controller):
|
|||
ip_list += ips
|
||||
alive_ips = self.fping(ip_list)
|
||||
res = []
|
||||
for instance_uuid, ips in instance_ips.iteritems():
|
||||
for instance_uuid, ips in six.iteritems(instance_ips):
|
||||
res.append({
|
||||
"id": instance_uuid,
|
||||
"project_id": instance_projects[instance_uuid],
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
|
@ -63,7 +64,7 @@ class ImageMetadataController(wsgi.Controller):
|
|||
def create(self, req, image_id, body):
|
||||
context = req.environ['nova.context']
|
||||
image = self._get_image(context, image_id)
|
||||
for key, value in body['metadata'].iteritems():
|
||||
for key, value in six.iteritems(body['metadata']):
|
||||
image['properties'][key] = value
|
||||
common.check_img_metadata_properties_quota(context,
|
||||
image['properties'])
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
import webob.exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
|
@ -123,7 +124,7 @@ class ImagesController(wsgi.Controller):
|
|||
filters = self._get_filters(req)
|
||||
params = req.GET.copy()
|
||||
page_params = common.get_pagination_params(req)
|
||||
for key, val in page_params.iteritems():
|
||||
for key, val in six.iteritems(page_params):
|
||||
params[key] = val
|
||||
|
||||
try:
|
||||
|
@ -144,7 +145,7 @@ class ImagesController(wsgi.Controller):
|
|||
filters = self._get_filters(req)
|
||||
params = req.GET.copy()
|
||||
page_params = common.get_pagination_params(req)
|
||||
for key, val in page_params.iteritems():
|
||||
for key, val in six.iteritems(page_params):
|
||||
params[key] = val
|
||||
try:
|
||||
images = self._image_api.get_all(context, filters=filters,
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
import webob
|
||||
|
||||
from nova.api.openstack.compute.schemas.v3 import quota_classes
|
||||
|
@ -79,7 +80,7 @@ class QuotaClassSetsController(wsgi.Controller):
|
|||
authorize(context)
|
||||
quota_class = id
|
||||
|
||||
for key, value in body['quota_class_set'].iteritems():
|
||||
for key, value in six.iteritems(body['quota_class_set']):
|
||||
try:
|
||||
db.quota_class_update(context, quota_class, key, value)
|
||||
except exception.QuotaClassNotFound:
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# under the License.
|
||||
|
||||
from oslo_utils import strutils
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import webob
|
||||
|
||||
|
@ -130,7 +131,7 @@ class QuotaSetsController(wsgi.Controller):
|
|||
# NOTE(dims): Pass #1 - In this loop for quota_set.items(), we validate
|
||||
# min/max values and bail out if any of the items in the set is bad.
|
||||
valid_quotas = {}
|
||||
for key, value in body['quota_set'].iteritems():
|
||||
for key, value in six.iteritems(body['quota_set']):
|
||||
if key == 'force' or (not value and value != 0):
|
||||
continue
|
||||
# validate whether already used and reserved exceeds the new
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
|
@ -46,7 +47,7 @@ class ServerMetadataController(wsgi.Controller):
|
|||
msg = _('Server does not exist')
|
||||
raise exc.HTTPNotFound(explanation=msg)
|
||||
meta_dict = {}
|
||||
for key, value in meta.iteritems():
|
||||
for key, value in six.iteritems(meta):
|
||||
meta_dict[key] = value
|
||||
return meta_dict
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ class TenantNetworkController(wsgi.Controller):
|
|||
networks = {}
|
||||
for n in self.network_api.get_all(ctx):
|
||||
networks[n['id']] = n['label']
|
||||
return [{'id': k, 'label': v} for k, v in networks.iteritems()]
|
||||
return [{'id': k, 'label': v} for k, v in six.iteritems(networks)]
|
||||
|
||||
@extensions.expected_errors(())
|
||||
def index(self, req):
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import wsgi
|
||||
from nova import quota
|
||||
|
@ -48,7 +50,7 @@ class UsedLimitsController(wsgi.Controller):
|
|||
'totalServerGroupsUsed': 'server_groups',
|
||||
}
|
||||
used_limits = {}
|
||||
for display_name, key in quota_map.iteritems():
|
||||
for display_name, key in six.iteritems(quota_map):
|
||||
if key in quotas:
|
||||
reserved = (quotas[key]['reserved']
|
||||
if self._reserved(req) else 0)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
|
@ -38,7 +39,7 @@ class Controller(object):
|
|||
raise exc.HTTPNotFound(explanation=msg)
|
||||
|
||||
meta_dict = {}
|
||||
for key, value in meta.iteritems():
|
||||
for key, value in six.iteritems(meta):
|
||||
meta_dict[key] = value
|
||||
return meta_dict
|
||||
|
||||
|
|
|
@ -921,7 +921,7 @@ class Controller(wsgi.Controller):
|
|||
def _validate_metadata(self, metadata):
|
||||
"""Ensure that we can work with the metadata given."""
|
||||
try:
|
||||
metadata.iteritems()
|
||||
six.iteritems(metadata)
|
||||
except AttributeError:
|
||||
msg = _("Unable to parse metadata key/value pairs.")
|
||||
LOG.debug(msg)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
import datetime
|
||||
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
|
||||
|
||||
class ViewBuilder(object):
|
||||
|
@ -58,7 +59,7 @@ class ViewBuilder(object):
|
|||
|
||||
"""
|
||||
limits = {}
|
||||
for name, value in absolute_limits.iteritems():
|
||||
for name, value in six.iteritems(absolute_limits):
|
||||
if name in self.limit_names and value is not None:
|
||||
for limit_name in self.limit_names[name]:
|
||||
limits[limit_name] = value
|
||||
|
|
|
@ -140,7 +140,7 @@ class ExtensionManager(object):
|
|||
"""
|
||||
def sorted_extensions(self):
|
||||
if self.sorted_ext_list is None:
|
||||
self.sorted_ext_list = sorted(self.extensions.iteritems())
|
||||
self.sorted_ext_list = sorted(six.iteritems(self.extensions))
|
||||
|
||||
for _alias, ext in self.sorted_ext_list:
|
||||
yield ext
|
||||
|
|
|
@ -18,6 +18,7 @@ import re
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import strutils
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
|
@ -87,11 +88,11 @@ class BlockDeviceDict(dict):
|
|||
bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name'])
|
||||
# NOTE (ndipanov): Never default db fields
|
||||
self.update({field: None for field in self._fields - do_not_default})
|
||||
self.update(list(bdm_dict.iteritems()))
|
||||
self.update(list(six.iteritems(bdm_dict)))
|
||||
|
||||
def _validate(self, bdm_dict):
|
||||
"""Basic data format validations."""
|
||||
dict_fields = set(key for key, _ in bdm_dict.iteritems())
|
||||
dict_fields = set(key for key, _ in six.iteritems(bdm_dict))
|
||||
|
||||
# Check that there are no bogus fields
|
||||
if not (dict_fields <=
|
||||
|
@ -137,7 +138,7 @@ class BlockDeviceDict(dict):
|
|||
non_computable_fields = set(['boot_index', 'disk_bus',
|
||||
'guest_format', 'device_type'])
|
||||
|
||||
new_bdm = {fld: val for fld, val in legacy_bdm.iteritems()
|
||||
new_bdm = {fld: val for fld, val in six.iteritems(legacy_bdm)
|
||||
if fld in copy_over_fields}
|
||||
|
||||
virt_name = legacy_bdm.get('virtual_name')
|
||||
|
|
|
@ -24,6 +24,7 @@ from oslo_log import log as logging
|
|||
import oslo_messaging
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
from nova.cells import messaging
|
||||
|
@ -399,7 +400,7 @@ class CellsManager(manager.Manager):
|
|||
totals = {}
|
||||
for response in responses:
|
||||
data = response.value_or_raise()
|
||||
for key, val in data.iteritems():
|
||||
for key, val in six.iteritems(data):
|
||||
totals.setdefault(key, 0)
|
||||
totals[key] += val
|
||||
return totals
|
||||
|
|
|
@ -1135,7 +1135,7 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
|
|||
services = objects.ServiceList.get_all(message.ctxt, disabled=disabled)
|
||||
ret_services = []
|
||||
for service in services:
|
||||
for key, val in filters.iteritems():
|
||||
for key, val in six.iteritems(filters):
|
||||
if getattr(service, key) != val:
|
||||
break
|
||||
else:
|
||||
|
@ -1261,7 +1261,7 @@ class MessageRunner(object):
|
|||
self.response_queues = {}
|
||||
self.methods_by_type = {}
|
||||
self.our_name = CONF.cells.name
|
||||
for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.iteritems():
|
||||
for msg_type, cls in six.iteritems(_CELL_MESSAGE_TYPE_TO_METHODS_CLS):
|
||||
self.methods_by_type[msg_type] = cls(self)
|
||||
self.serializer = objects_base.NovaObjectSerializer()
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ class CellState(object):
|
|||
|
||||
def update_db_info(self, cell_db_info):
|
||||
"""Update cell credentials from db."""
|
||||
self.db_info = {k: v for k, v in cell_db_info.iteritems()
|
||||
self.db_info = {k: v for k, v in six.iteritems(cell_db_info)
|
||||
if k != 'name'}
|
||||
|
||||
def update_capabilities(self, cell_metadata):
|
||||
|
|
|
@ -289,7 +289,7 @@ class ProjectCommands(object):
|
|||
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
|
||||
else:
|
||||
quota = QUOTAS.get_project_quotas(ctxt, project_id)
|
||||
for key, value in quota.iteritems():
|
||||
for key, value in six.iteritems(quota):
|
||||
if value['limit'] < 0 or value['limit'] is None:
|
||||
value['limit'] = 'unlimited'
|
||||
print(print_format % (key, value['limit'], value['in_use'],
|
||||
|
@ -543,7 +543,7 @@ class NetworkCommands(object):
|
|||
dns1=None, dns2=None, project_id=None, priority=None,
|
||||
uuid=None, fixed_cidr=None):
|
||||
"""Creates fixed ips for host by range."""
|
||||
kwargs = {k: v for k, v in locals().iteritems()
|
||||
kwargs = {k: v for k, v in six.iteritems(locals())
|
||||
if v and k != "self"}
|
||||
if multi_host is not None:
|
||||
kwargs['multi_host'] = multi_host == 'T'
|
||||
|
@ -1041,7 +1041,7 @@ class AgentBuildCommands(object):
|
|||
|
||||
buildlist.append(agent_build)
|
||||
|
||||
for key, buildlist in by_hypervisor.iteritems():
|
||||
for key, buildlist in six.iteritems(by_hypervisor):
|
||||
if hypervisor and key != hypervisor:
|
||||
continue
|
||||
|
||||
|
|
|
@ -476,7 +476,7 @@ class API(base.Base):
|
|||
# Because metadata is stored in the DB, we hard-code the size limits
|
||||
# In future, we may support more variable length strings, so we act
|
||||
# as if this is quota-controlled for forwards compatibility
|
||||
for k, v in metadata.iteritems():
|
||||
for k, v in six.iteritems(metadata):
|
||||
try:
|
||||
utils.check_string_length(v)
|
||||
utils.check_string_length(k, min_length=1)
|
||||
|
@ -1987,7 +1987,7 @@ class API(base.Base):
|
|||
'system_metadata': _remap_system_metadata_filter}
|
||||
|
||||
# copy from search_opts, doing various remappings as necessary
|
||||
for opt, value in search_opts.iteritems():
|
||||
for opt, value in six.iteritems(search_opts):
|
||||
# Do remappings.
|
||||
# Values not in the filter_mapping table are copied as-is.
|
||||
# If remapping is None, option is not copied
|
||||
|
@ -3406,7 +3406,7 @@ class HostAPI(base.Base):
|
|||
set_zones=set_zones)
|
||||
ret_services = []
|
||||
for service in services:
|
||||
for key, val in filters.iteritems():
|
||||
for key, val in six.iteritems(filters):
|
||||
if service[key] != val:
|
||||
break
|
||||
else:
|
||||
|
|
|
@ -57,7 +57,7 @@ class ResourceMonitorMeta(type):
|
|||
prefix = '_get_'
|
||||
prefix_len = len(prefix)
|
||||
cls.metric_map = {}
|
||||
for name, value in cls.__dict__.iteritems():
|
||||
for name, value in six.iteritems(cls.__dict__):
|
||||
if (len(name) > prefix_len
|
||||
and name[:prefix_len] == prefix
|
||||
and isinstance(value, types.FunctionType)):
|
||||
|
@ -123,7 +123,7 @@ class ResourceMonitorBase(object):
|
|||
"""
|
||||
data = []
|
||||
self._update_data()
|
||||
for name, func in self.metric_map.iteritems():
|
||||
for name, func in six.iteritems(self.metric_map):
|
||||
ret = func(self, **kwargs)
|
||||
data.append(self._populate(name, ret[0], ret[1]))
|
||||
return data
|
||||
|
|
|
@ -112,7 +112,7 @@ class ConductorManager(manager.Manager):
|
|||
exception.UnexpectedTaskStateError)
|
||||
def instance_update(self, context, instance_uuid,
|
||||
updates, service):
|
||||
for key, value in updates.iteritems():
|
||||
for key, value in six.iteritems(updates):
|
||||
if key not in allowed_updates:
|
||||
LOG.error(_LE("Instance update attempted for "
|
||||
"'%(key)s' on %(instance_uuid)s"),
|
||||
|
|
|
@ -56,7 +56,7 @@ class ConvertedException(webob.exc.WSGIHTTPException):
|
|||
|
||||
def _cleanse_dict(original):
|
||||
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
|
||||
return {k: v for k, v in original.iteritems() if "_pass" not in k}
|
||||
return {k: v for k, v in six.iteritems(original) if "_pass" not in k}
|
||||
|
||||
|
||||
def wrap_exception(notifier=None, get_notifier=None):
|
||||
|
@ -123,7 +123,7 @@ class NovaException(Exception):
|
|||
# kwargs doesn't match a variable in the message
|
||||
# log the issue and the kwargs
|
||||
LOG.exception(_LE('Exception in string format operation'))
|
||||
for name, value in kwargs.iteritems():
|
||||
for name, value in six.iteritems(kwargs):
|
||||
LOG.error("%s: %s" % (name, value)) # noqa
|
||||
|
||||
if CONF.fatal_exception_format_errors:
|
||||
|
|
|
@ -252,7 +252,7 @@ class GlanceImageService(object):
|
|||
self._download_handlers = {}
|
||||
download_modules = image_xfers.load_transfer_modules()
|
||||
|
||||
for scheme, mod in download_modules.iteritems():
|
||||
for scheme, mod in six.iteritems(download_modules):
|
||||
if scheme not in CONF.glance.allowed_direct_url_schemes:
|
||||
continue
|
||||
|
||||
|
|
|
@ -394,7 +394,7 @@ class IptablesManager(object):
|
|||
elif ip_version == 6:
|
||||
tables = self.ipv6
|
||||
|
||||
for table, chains in builtin_chains[ip_version].iteritems():
|
||||
for table, chains in six.iteritems(builtin_chains[ip_version]):
|
||||
for chain in chains:
|
||||
tables[table].add_chain(chain)
|
||||
tables[table].add_rule(chain, '-j $%s' % (chain,),
|
||||
|
@ -461,7 +461,7 @@ class IptablesManager(object):
|
|||
run_as_root=True,
|
||||
attempts=5)
|
||||
all_lines = all_tables.split('\n')
|
||||
for table_name, table in tables.iteritems():
|
||||
for table_name, table in six.iteritems(tables):
|
||||
start, end = self._find_table(all_lines, table_name)
|
||||
all_lines[start:end] = self._modify_rules(
|
||||
all_lines[start:end], table, table_name)
|
||||
|
|
|
@ -26,7 +26,7 @@ from nova.i18n import _
|
|||
|
||||
def ensure_string_keys(d):
|
||||
# http://bugs.python.org/issue4978
|
||||
return {str(k): v for k, v in d.iteritems()}
|
||||
return {str(k): v for k, v in six.iteritems(d)}
|
||||
|
||||
# Constants for the 'vif_type' field in VIF class
|
||||
VIF_TYPE_OVS = 'ovs'
|
||||
|
|
|
@ -333,7 +333,7 @@ def image_meta(system_metadata):
|
|||
system metadata.
|
||||
"""
|
||||
image_meta = {}
|
||||
for md_key, md_value in system_metadata.iteritems():
|
||||
for md_key, md_value in six.iteritems(system_metadata):
|
||||
if md_key.startswith('image_'):
|
||||
image_meta[md_key[6:]] = md_value
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ def make_class_properties(cls):
|
|||
for name, field in supercls.fields.items():
|
||||
if name not in cls.fields:
|
||||
cls.fields[name] = field
|
||||
for name, field in cls.fields.iteritems():
|
||||
for name, field in six.iteritems(cls.fields):
|
||||
if not isinstance(field, obj_fields.Field):
|
||||
raise exception.ObjectFieldInvalid(
|
||||
field=name, objname=cls.obj_name())
|
||||
|
@ -187,7 +187,7 @@ def remotable(fn):
|
|||
if NovaObject.indirection_api:
|
||||
updates, result = NovaObject.indirection_api.object_action(
|
||||
self._context, self, fn.__name__, args, kwargs)
|
||||
for key, value in updates.iteritems():
|
||||
for key, value in six.iteritems(updates):
|
||||
if key in self.fields:
|
||||
field = self.fields[key]
|
||||
# NOTE(ndipanov): Since NovaObjectSerializer will have
|
||||
|
@ -820,7 +820,7 @@ def serialize_args(fn):
|
|||
def wrapper(obj, *args, **kwargs):
|
||||
args = [timeutils.strtime(at=arg) if isinstance(arg, datetime.datetime)
|
||||
else arg for arg in args]
|
||||
for k, v in kwargs.iteritems():
|
||||
for k, v in six.iteritems(kwargs):
|
||||
if k == 'exc_val' and v:
|
||||
kwargs[k] = str(v)
|
||||
elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
|
||||
|
|
|
@ -202,7 +202,7 @@ class BaseRequestHandler(object):
|
|||
elif isinstance(value, datetime.datetime):
|
||||
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
|
||||
elif isinstance(value, dict):
|
||||
for name, subvalue in value.iteritems():
|
||||
for name, subvalue in six.iteritems(value):
|
||||
if not isinstance(subvalue, list):
|
||||
subvalue = [subvalue]
|
||||
for subsubvalue in subvalue:
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
import copy
|
||||
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _LE
|
||||
|
@ -241,7 +242,7 @@ class PciDeviceStats(object):
|
|||
# 'devices' shouldn't be part of stats
|
||||
pools = []
|
||||
for pool in self.pools:
|
||||
tmp = {k: v for k, v in pool.iteritems() if k != 'devices'}
|
||||
tmp = {k: v for k, v in six.iteritems(pool) if k != 'devices'}
|
||||
pools.append(tmp)
|
||||
return iter(pools)
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import os
|
|||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _LE
|
||||
|
@ -48,7 +49,7 @@ def pci_device_prop_match(pci_dev, specs):
|
|||
|
||||
"""
|
||||
def _matching_devices(spec):
|
||||
return all(pci_dev.get(k) == v for k, v in spec.iteritems())
|
||||
return all(pci_dev.get(k) == v for k, v in six.iteritems(spec))
|
||||
|
||||
return any(_matching_devices(spec) for spec in specs)
|
||||
|
||||
|
|
|
@ -247,7 +247,7 @@ class DbQuotaDriver(object):
|
|||
# Use the project quota for default user quota.
|
||||
proj_quotas = project_quotas or db.quota_get_all_by_project(
|
||||
context, project_id)
|
||||
for key, value in proj_quotas.iteritems():
|
||||
for key, value in six.iteritems(proj_quotas):
|
||||
if key not in user_quotas.keys():
|
||||
user_quotas[key] = value
|
||||
user_usages = None
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from nova.scheduler import filters
|
||||
from nova.scheduler.filters import utils
|
||||
|
@ -50,7 +51,7 @@ class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
|
|||
image_props = spec.get('image', {}).get('properties', {})
|
||||
metadata = utils.aggregate_metadata_get_by_host(host_state)
|
||||
|
||||
for key, options in metadata.iteritems():
|
||||
for key, options in six.iteritems(metadata):
|
||||
if (cfg_namespace and
|
||||
not key.startswith(cfg_namespace + cfg_separator)):
|
||||
continue
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from nova.scheduler import filters
|
||||
from nova.scheduler.filters import extra_specs_ops
|
||||
|
@ -44,7 +45,7 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
|
|||
|
||||
metadata = utils.aggregate_metadata_get_by_host(host_state)
|
||||
|
||||
for key, req in instance_type['extra_specs'].iteritems():
|
||||
for key, req in six.iteritems(instance_type['extra_specs']):
|
||||
# Either not scope format, or aggregate_instance_extra_specs scope
|
||||
scope = key.split(':', 1)
|
||||
if len(scope) > 1:
|
||||
|
|
|
@ -71,7 +71,7 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
|
|||
if 'extra_specs' not in instance_type:
|
||||
return True
|
||||
|
||||
for key, req in instance_type['extra_specs'].iteritems():
|
||||
for key, req in six.iteritems(instance_type['extra_specs']):
|
||||
# Either not scope format, or in capabilities scope
|
||||
scope = key.split(':')
|
||||
if len(scope) > 1:
|
||||
|
|
|
@ -41,7 +41,7 @@ def aggregate_metadata_get_by_host(host_state, key=None):
|
|||
for aggr in aggrlist:
|
||||
if key is not None and key not in aggr.metadata:
|
||||
continue
|
||||
for k, v in aggr.metadata.iteritems():
|
||||
for k, v in six.iteritems(aggr.metadata):
|
||||
values = v.split(',')
|
||||
for value in values:
|
||||
metadata[k].add(value.strip())
|
||||
|
|
|
@ -269,7 +269,7 @@ class TestCase(testtools.TestCase):
|
|||
def flags(self, **kw):
|
||||
"""Override flag variables for a test."""
|
||||
group = kw.pop('group', None)
|
||||
for k, v in kw.iteritems():
|
||||
for k, v in six.iteritems(kw):
|
||||
CONF.set_override(k, v, group)
|
||||
|
||||
def start_service(self, name, host=None, **kwargs):
|
||||
|
|
|
@ -25,6 +25,7 @@ import warnings
|
|||
import fixtures
|
||||
from oslo_config import cfg
|
||||
from oslo_messaging import conffixture as messaging_conffixture
|
||||
import six
|
||||
|
||||
from nova.db import migration
|
||||
from nova.db.sqlalchemy import api as session
|
||||
|
@ -282,7 +283,7 @@ class ConfPatcher(fixtures.Fixture):
|
|||
|
||||
def setUp(self):
|
||||
super(ConfPatcher, self).setUp()
|
||||
for k, v in self.args.iteritems():
|
||||
for k, v in six.iteritems(self.args):
|
||||
self.addCleanup(CONF.clear_override, k, self.group)
|
||||
CONF.set_override(k, v, self.group)
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import urllib
|
|||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
import requests
|
||||
import six
|
||||
|
||||
from nova.tests.unit.image import fake
|
||||
|
||||
|
@ -246,7 +247,7 @@ class TestOpenStackClient(object):
|
|||
|
||||
if search_opts is not None:
|
||||
qparams = {}
|
||||
for opt, val in search_opts.iteritems():
|
||||
for opt, val in six.iteritems(search_opts):
|
||||
qparams[opt] = val
|
||||
if qparams:
|
||||
query_string = "?%s" % urllib.urlencode(qparams)
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception as ex
|
||||
|
@ -89,7 +91,7 @@ class FlavorManageFullstack(test.TestCase):
|
|||
'id': 'flavorid',
|
||||
'swap': 'swap'
|
||||
}
|
||||
for k, v in mapping.iteritems():
|
||||
for k, v in six.iteritems(mapping):
|
||||
if k in flav:
|
||||
self.assertEqual(flav[k], flavdb[v],
|
||||
"%s != %s" % (flav, flavdb))
|
||||
|
@ -97,7 +99,7 @@ class FlavorManageFullstack(test.TestCase):
|
|||
def assertFlavorAPIEqual(self, flav, flavapi):
|
||||
# for all keys in the flavor, ensure they are correctly set in
|
||||
# flavapi response.
|
||||
for k, v in flav.iteritems():
|
||||
for k, v in six.iteritems(flav):
|
||||
if k in flavapi:
|
||||
self.assertEqual(flav[k], flavapi[k],
|
||||
"%s != %s" % (flav, flavapi))
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
import datetime
|
||||
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import api_version_request as api_version
|
||||
|
@ -91,7 +92,7 @@ def fake_get_all_flavors_sorted_list(context, inactive=False,
|
|||
return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
|
||||
|
||||
res = {}
|
||||
for k, v in INSTANCE_TYPES.iteritems():
|
||||
for k, v in six.iteritems(INSTANCE_TYPES):
|
||||
if filters['is_public'] and _has_flavor_access(k, context.project_id):
|
||||
res.update({k: v})
|
||||
continue
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from nova.api.openstack.compute.contrib import used_limits as used_limits_v2
|
||||
from nova.api.openstack.compute.plugins.v3 import used_limits as \
|
||||
used_limits_v21
|
||||
|
@ -66,7 +68,7 @@ class UsedLimitsTestCaseV21(test.NoDBTestCase):
|
|||
}
|
||||
limits = {}
|
||||
expected_abs_limits = []
|
||||
for display_name, q in quota_map.iteritems():
|
||||
for display_name, q in six.iteritems(quota_map):
|
||||
limits[q] = {'limit': len(display_name),
|
||||
'in_use': len(display_name) / 2,
|
||||
'reserved': len(display_name) / 3}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
@ -128,7 +129,7 @@ class APITest(test.NoDBTestCase):
|
|||
self.assertEqual(resp.status_int, exception_type.code, resp.body)
|
||||
|
||||
if hasattr(exception_type, 'headers'):
|
||||
for (key, value) in exception_type.headers.iteritems():
|
||||
for (key, value) in six.iteritems(exception_type.headers):
|
||||
self.assertIn(key, resp.headers)
|
||||
self.assertEqual(resp.headers[key], str(value))
|
||||
|
||||
|
|
|
@ -252,7 +252,7 @@ class FakeToken(object):
|
|||
def __init__(self, **kwargs):
|
||||
FakeToken.id_count += 1
|
||||
self.id = FakeToken.id_count
|
||||
for k, v in kwargs.iteritems():
|
||||
for k, v in six.iteritems(kwargs):
|
||||
setattr(self, k, v)
|
||||
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
import inspect
|
||||
|
||||
import mock
|
||||
import six
|
||||
import webob
|
||||
|
||||
from nova.api.openstack import api_version_request as api_version
|
||||
|
@ -1006,7 +1007,7 @@ class ResourceTest(test.NoDBTestCase):
|
|||
app = fakes.TestRouter(Controller())
|
||||
response = req.get_response(app)
|
||||
|
||||
for hdr, val in response.headers.iteritems():
|
||||
for hdr, val in six.iteritems(response.headers):
|
||||
# All headers must be utf8
|
||||
self.assertIsInstance(hdr, str)
|
||||
self.assertIsInstance(val, str)
|
||||
|
@ -1131,7 +1132,7 @@ class ResponseObjectTest(test.NoDBTestCase):
|
|||
response = robj.serialize(request, content_type)
|
||||
|
||||
self.assertEqual(response.headers['Content-Type'], content_type)
|
||||
for hdr, val in response.headers.iteritems():
|
||||
for hdr, val in six.iteritems(response.headers):
|
||||
# All headers must be utf8
|
||||
self.assertIsInstance(hdr, str)
|
||||
self.assertIsInstance(val, str)
|
||||
|
|
|
@ -124,7 +124,7 @@ def unify_instance(instance):
|
|||
model-initiated sources that can reasonably be compared.
|
||||
"""
|
||||
newdict = dict()
|
||||
for k, v in instance.iteritems():
|
||||
for k, v in six.iteritems(instance):
|
||||
if isinstance(v, datetime.datetime):
|
||||
# NOTE(danms): DB models and Instance objects have different
|
||||
# timezone expectations
|
||||
|
@ -6394,7 +6394,7 @@ class ComputeTestCase(BaseTestCase):
|
|||
|
||||
self.compute._poll_unconfirmed_resizes(ctxt)
|
||||
|
||||
for instance_uuid, status in expected_migration_status.iteritems():
|
||||
for instance_uuid, status in six.iteritems(expected_migration_status):
|
||||
self.assertEqual(status,
|
||||
fetch_instance_migration_status(instance_uuid))
|
||||
|
||||
|
@ -7400,7 +7400,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
image_props = {'image_kernel_id': 'fake_kernel_id',
|
||||
'image_ramdisk_id': 'fake_ramdisk_id',
|
||||
'image_something_else': 'meow', }
|
||||
for key, value in image_props.iteritems():
|
||||
for key, value in six.iteritems(image_props):
|
||||
self.assertIn(key, sys_metadata)
|
||||
self.assertEqual(value, sys_metadata[key])
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import oslo_messaging as messaging
|
|||
from oslo_utils import importutils
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
import nova
|
||||
from nova.compute import build_results
|
||||
|
@ -80,7 +81,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
|||
}
|
||||
event = mock.Mock()
|
||||
event.get_instance_uuid.return_value = mock.sentinel.uuid
|
||||
for transition, pwr_state in event_map.iteritems():
|
||||
for transition, pwr_state in six.iteritems(event_map):
|
||||
event.get_transition.return_value = transition
|
||||
self.compute.handle_lifecycle_event(event)
|
||||
mock_get.assert_called_with(mock.ANY, mock.sentinel.uuid,
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
import copy
|
||||
import datetime
|
||||
|
||||
import six
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
|
||||
|
@ -450,7 +452,7 @@ def stub_out_db_instance_api(stubs, injected=True):
|
|||
return INSTANCE_TYPES[name]
|
||||
|
||||
def fake_flavor_get(context, id):
|
||||
for name, inst_type in INSTANCE_TYPES.iteritems():
|
||||
for name, inst_type in six.iteritems(INSTANCE_TYPES):
|
||||
if str(inst_type['id']) == str(id):
|
||||
return inst_type
|
||||
return None
|
||||
|
|
|
@ -24,6 +24,7 @@ library to work with nova.
|
|||
import fnmatch
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
|
||||
|
@ -312,11 +313,11 @@ class FakeLDAP(object):
|
|||
# get the attributes from the store
|
||||
attrs = store.hgetall(key)
|
||||
# turn the values from the store into lists
|
||||
attrs = {k: _from_json(v) for k, v in attrs.iteritems()}
|
||||
attrs = {k: _from_json(v) for k, v in six.iteritems(attrs)}
|
||||
# filter the objects by query
|
||||
if not query or _match_query(query, attrs):
|
||||
# filter the attributes by fields
|
||||
attrs = {k: v for k, v in attrs.iteritems()
|
||||
attrs = {k: v for k, v in six.iteritems(attrs)
|
||||
if not fields or k in fields}
|
||||
objects.append((key[len(self.__prefix):], attrs))
|
||||
return objects
|
||||
|
|
|
@ -1939,14 +1939,14 @@ class VlanNetworkTestCase(test.TestCase):
|
|||
|
||||
class _TestDomainObject(object):
|
||||
def __init__(self, **kwargs):
|
||||
for k, v in kwargs.iteritems():
|
||||
for k, v in six.iteritems(kwargs):
|
||||
self.__setattr__(k, v)
|
||||
|
||||
|
||||
class FakeNetwork(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.vlan = None
|
||||
for k, v in kwargs.iteritems():
|
||||
for k, v in six.iteritems(kwargs):
|
||||
self.__setattr__(k, v)
|
||||
|
||||
def __getitem__(self, item):
|
||||
|
|
|
@ -68,7 +68,7 @@ class MyComparator(mox.Comparator):
|
|||
def _com_dict(self, lhs, rhs):
|
||||
if len(lhs) != len(rhs):
|
||||
return False
|
||||
for key, value in lhs.iteritems():
|
||||
for key, value in six.iteritems(lhs):
|
||||
if key not in rhs:
|
||||
return False
|
||||
rhs_value = rhs[key]
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
import contextlib
|
||||
|
||||
import mock
|
||||
import six
|
||||
|
||||
from nova.cells import rpcapi as cells_rpcapi
|
||||
from nova import context
|
||||
|
@ -206,7 +207,7 @@ class _TestBlockDeviceMappingObject(object):
|
|||
'bdm_update_or_create_at_top'):
|
||||
bdm.create()
|
||||
|
||||
for k, v in values.iteritems():
|
||||
for k, v in six.iteritems(values):
|
||||
self.assertEqual(v, getattr(bdm, k))
|
||||
|
||||
def test_create_fails(self):
|
||||
|
|
|
@ -1095,7 +1095,7 @@ class TestArgsSerializer(test.NoDBTestCase):
|
|||
|
||||
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
|
||||
'c': self.str_now}
|
||||
for key, val in kwargs.iteritems():
|
||||
for key, val in six.iteritems(kwargs):
|
||||
self.assertEqual(expected_kwargs[key], val)
|
||||
|
||||
def test_serialize_args(self):
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
|
||||
import mock
|
||||
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova import objects
|
||||
from nova.pci import stats
|
||||
|
@ -248,7 +250,7 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
|
|||
self.assertEqual(product_id, pool['product_id'])
|
||||
self.assertEqual(count, pool['count'])
|
||||
if tags:
|
||||
for k, v in tags.iteritems():
|
||||
for k, v in six.iteritems(tags):
|
||||
self.assertEqual(v, pool[k])
|
||||
|
||||
def _assertPools(self):
|
||||
|
|
|
@ -17,6 +17,7 @@ import os
|
|||
import fixtures
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
|
||||
from nova.openstack.common import policy as common_policy
|
||||
from nova import paths
|
||||
|
@ -103,7 +104,7 @@ class RoleBasedPolicyFixture(RealPolicyFixture):
|
|||
policy = jsonutils.load(open(CONF.policy_file))
|
||||
|
||||
# Convert all actions to require specified role
|
||||
for action, rule in policy.iteritems():
|
||||
for action, rule in six.iteritems(policy):
|
||||
policy[action] = 'role:%s' % self.role
|
||||
|
||||
self.policy_dir = self.useFixture(fixtures.TempDir())
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
Fakes For Scheduler tests.
|
||||
"""
|
||||
|
||||
import six
|
||||
|
||||
from nova import objects
|
||||
from nova.scheduler import host_manager
|
||||
|
||||
|
@ -88,5 +90,5 @@ class FakeHostState(host_manager.HostState):
|
|||
self.instances = {inst.uuid: inst for inst in instances}
|
||||
else:
|
||||
self.instances = {}
|
||||
for (key, val) in attribute_dict.iteritems():
|
||||
for (key, val) in six.iteritems(attribute_dict):
|
||||
setattr(self, key, val)
|
||||
|
|
|
@ -15,6 +15,8 @@ Unit Tests for flavors code
|
|||
"""
|
||||
import time
|
||||
|
||||
import six
|
||||
|
||||
from nova.compute import flavors
|
||||
from nova import context
|
||||
from nova import db
|
||||
|
@ -70,7 +72,7 @@ class InstanceTypeTestCase(test.TestCase):
|
|||
"""return a flavorid not in the DB."""
|
||||
nonexistent_flavor = 2700
|
||||
flavor_ids = [value.id for key, value in
|
||||
flavors.get_all_flavors().iteritems()]
|
||||
six.iteritems(flavors.get_all_flavors())]
|
||||
while nonexistent_flavor in flavor_ids:
|
||||
nonexistent_flavor += 1
|
||||
else:
|
||||
|
|
|
@ -29,6 +29,7 @@ except ImportError:
|
|||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
import webob
|
||||
|
||||
from nova.api.metadata import base
|
||||
|
@ -463,7 +464,7 @@ class OpenStackMetadataTestCase(test.TestCase):
|
|||
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
|
||||
mddict = jsonutils.loads(mdjson)
|
||||
|
||||
for key, val in extra.iteritems():
|
||||
for key, val in six.iteritems(extra):
|
||||
self.assertEqual(mddict[key], val)
|
||||
|
||||
def test_password(self):
|
||||
|
|
|
@ -33,6 +33,7 @@ from oslo_context import context as common_context
|
|||
from oslo_context import fixture as context_fixture
|
||||
from oslo_utils import encodeutils
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
|
||||
|
||||
import nova
|
||||
|
@ -825,7 +826,7 @@ class GetSystemMetadataFromImageTestCase(test.NoDBTestCase):
|
|||
sys_meta = utils.get_system_metadata_from_image(image)
|
||||
|
||||
# Verify that we inherit all the image properties
|
||||
for key, expected in image["properties"].iteritems():
|
||||
for key, expected in six.iteritems(image["properties"]):
|
||||
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
|
||||
self.assertEqual(sys_meta[sys_key], expected)
|
||||
|
||||
|
@ -882,7 +883,7 @@ class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
|
|||
# Verify that we inherit the rest of metadata as properties
|
||||
self.assertIn("properties", image)
|
||||
|
||||
for key, value in image["properties"].iteritems():
|
||||
for key, value in six.iteritems(image["properties"]):
|
||||
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
|
||||
self.assertEqual(image["properties"][key], sys_meta[sys_key])
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import uuid
|
|||
|
||||
import fixtures
|
||||
from lxml import etree
|
||||
import six
|
||||
|
||||
from nova.compute import arch
|
||||
from nova.virt.libvirt import config as vconfig
|
||||
|
@ -804,7 +805,7 @@ class Connection(object):
|
|||
|
||||
dom._id = -1
|
||||
|
||||
for (k, v) in self._running_vms.iteritems():
|
||||
for (k, v) in six.iteritems(self._running_vms):
|
||||
if v == dom:
|
||||
del self._running_vms[k]
|
||||
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
|
||||
|
|
|
@ -4858,7 +4858,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
|
|||
_get_prefix(prefix, 'ubda'))]
|
||||
}
|
||||
|
||||
for (virt_type, checks) in type_disk_map.iteritems():
|
||||
for (virt_type, checks) in six.iteritems(type_disk_map):
|
||||
self.flags(virt_type=virt_type, group='libvirt')
|
||||
if prefix:
|
||||
self.flags(disk_prefix=prefix, group='libvirt')
|
||||
|
@ -5174,7 +5174,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
|
|||
# implementation doesn't fiddle around with the CONF.
|
||||
testuri = 'something completely different'
|
||||
self.flags(connection_uri=testuri, group='libvirt')
|
||||
for (virt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||
for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map):
|
||||
self.flags(virt_type=virt_type, group='libvirt')
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
self.assertEqual(drvr._uri(), testuri)
|
||||
|
@ -13691,7 +13691,7 @@ class _BaseSnapshotTests(test.NoDBTestCase):
|
|||
|
||||
if expected_properties:
|
||||
for expected_key, expected_value in \
|
||||
expected_properties.iteritems():
|
||||
six.iteritems(expected_properties):
|
||||
self.assertEqual(expected_value, props[expected_key])
|
||||
|
||||
def _create_image(self, extra_properties=None):
|
||||
|
|
|
@ -20,6 +20,7 @@ import tempfile
|
|||
import mock
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
from nova.compute import arch
|
||||
from nova import exception
|
||||
|
@ -369,7 +370,7 @@ ID TAG VM SIZE DATE VM CLOCK
|
|||
# NOTE(aloga): Xen is tested in test_pick_disk_driver_name_xen
|
||||
|
||||
version = 1005001
|
||||
for (virt_type, checks) in type_map.iteritems():
|
||||
for (virt_type, checks) in six.iteritems(type_map):
|
||||
self.flags(virt_type=virt_type, group='libvirt')
|
||||
for (is_block_dev, expected_result) in checks:
|
||||
result = libvirt_utils.pick_disk_driver_name(version,
|
||||
|
|
|
@ -16,6 +16,7 @@ import contextlib
|
|||
|
||||
import mock
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
|
||||
from nova import block_device
|
||||
from nova import context
|
||||
|
@ -213,7 +214,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
|||
self.assertThat(test_bdm, matchers.DictMatches(
|
||||
getattr(self, "%s_driver_bdm" % name)))
|
||||
|
||||
for k, v in db_bdm.iteritems():
|
||||
for k, v in six.iteritems(db_bdm):
|
||||
field_val = getattr(test_bdm._bdm_obj, k)
|
||||
if isinstance(field_val, bool):
|
||||
v = bool(v)
|
||||
|
@ -229,7 +230,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
|||
getattr(test_bdm._bdm_obj, passthru))
|
||||
|
||||
# Make sure that all others raise _invalidType
|
||||
for other_name, cls in self.driver_classes.iteritems():
|
||||
for other_name, cls in six.iteritems(self.driver_classes):
|
||||
if other_name == name:
|
||||
continue
|
||||
self.assertRaises(driver_block_device._InvalidType,
|
||||
|
@ -238,10 +239,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
|||
|
||||
# Test the save method
|
||||
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
|
||||
for fld, alias in test_bdm._update_on_save.iteritems():
|
||||
for fld, alias in six.iteritems(test_bdm._update_on_save):
|
||||
test_bdm[alias or fld] = 'fake_changed_value'
|
||||
test_bdm.save()
|
||||
for fld, alias in test_bdm._update_on_save.iteritems():
|
||||
for fld, alias in six.iteritems(test_bdm._update_on_save):
|
||||
self.assertEqual(test_bdm[alias or fld],
|
||||
getattr(test_bdm._bdm_obj, fld))
|
||||
|
||||
|
|
|
@ -189,7 +189,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
|
|||
}
|
||||
|
||||
def test_load_new_drivers(self):
|
||||
for cls, driver in self.new_drivers.iteritems():
|
||||
for cls, driver in six.iteritems(self.new_drivers):
|
||||
self.flags(compute_driver=cls)
|
||||
# NOTE(sdague) the try block is to make it easier to debug a
|
||||
# failure by knowing which driver broke
|
||||
|
|
|
@ -27,6 +27,7 @@ from oslo_utils import units
|
|||
from oslo_utils import uuidutils
|
||||
from oslo_vmware import exceptions as vexc
|
||||
from oslo_vmware.objects import datastore as ds_obj
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.virt.vmwareapi import constants
|
||||
|
@ -986,7 +987,7 @@ def create_vm(uuid=None, name=None,
|
|||
if vm_path.rel_path == '':
|
||||
vm_path = vm_path.join(name, name + '.vmx')
|
||||
|
||||
for key, value in _db_content["Datastore"].iteritems():
|
||||
for key, value in six.iteritems(_db_content["Datastore"]):
|
||||
if value.get('summary.name') == vm_path.datastore:
|
||||
ds = key
|
||||
break
|
||||
|
|
|
@ -19,6 +19,7 @@ import sys
|
|||
|
||||
import fixtures
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
|
||||
from nova import test
|
||||
import nova.tests.unit.image.fake
|
||||
|
@ -65,7 +66,7 @@ def stubout_session(stubs, cls, product_version=(5, 6, 2),
|
|||
def stubout_get_this_vm_uuid(stubs):
|
||||
def f(session):
|
||||
vms = [rec['uuid'] for ref, rec
|
||||
in fake.get_all_records('VM').iteritems()
|
||||
in six.iteritems(fake.get_all_records('VM'))
|
||||
if rec['is_control_domain']]
|
||||
return vms[0]
|
||||
stubs.Set(vm_utils, 'get_this_vm_uuid', f)
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
from eventlet import greenthread
|
||||
import mock
|
||||
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.tests.unit.virt.xenapi import stubs
|
||||
|
@ -153,7 +155,7 @@ class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB):
|
|||
'xvdq': -1,
|
||||
}
|
||||
|
||||
for (input, expected) in cases.iteritems():
|
||||
for (input, expected) in six.iteritems(cases):
|
||||
actual = volume_utils._mountpoint_to_number(input)
|
||||
self.assertEqual(actual, expected,
|
||||
'%s yielded %s, not %s' % (input, actual, expected))
|
||||
|
|
|
@ -31,6 +31,7 @@ from oslo_config import fixture as config_fixture
|
|||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import importutils
|
||||
import six
|
||||
import testtools
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
|
@ -600,7 +601,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||
vm_info = conn.get_info({'name': name})
|
||||
# Get XenAPI record for VM
|
||||
vms = [rec for ref, rec
|
||||
in xenapi_fake.get_all_records('VM').iteritems()
|
||||
in six.iteritems(xenapi_fake.get_all_records('VM'))
|
||||
if not rec['is_control_domain']]
|
||||
vm = vms[0]
|
||||
self.vm_info = vm_info
|
||||
|
@ -2523,7 +2524,7 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
|
|||
|
||||
@classmethod
|
||||
def _fake_list_vms(cls, session):
|
||||
return cls.FAKE_VMS.iteritems()
|
||||
return six.iteritems(cls.FAKE_VMS)
|
||||
|
||||
@staticmethod
|
||||
def _fake_fetch_bandwidth_mt(session):
|
||||
|
@ -2965,7 +2966,7 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
|
|||
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
|
||||
'fake_host2'],
|
||||
'avail_zone2': ['fake_host3'], }):
|
||||
for avail_zone, hosts in values.iteritems():
|
||||
for avail_zone, hosts in six.iteritems(values):
|
||||
for service_host in hosts:
|
||||
db.service_create(context,
|
||||
{'host': service_host,
|
||||
|
|
|
@ -837,7 +837,7 @@ def metadata_to_dict(metadata):
|
|||
|
||||
def dict_to_metadata(metadata):
|
||||
result = []
|
||||
for key, value in metadata.iteritems():
|
||||
for key, value in six.iteritems(metadata):
|
||||
result.append(dict(key=key, value=value))
|
||||
return result
|
||||
|
||||
|
@ -1076,7 +1076,7 @@ def get_system_metadata_from_image(image_meta, flavor=None):
|
|||
system_meta = {}
|
||||
prefix_format = SM_IMAGE_PROP_PREFIX + '%s'
|
||||
|
||||
for key, value in image_meta.get('properties', {}).iteritems():
|
||||
for key, value in six.iteritems(image_meta.get('properties', {})):
|
||||
new_value = safe_truncate(six.text_type(value), 255)
|
||||
system_meta[prefix_format % key] = new_value
|
||||
|
||||
|
@ -1104,7 +1104,7 @@ def get_image_from_system_metadata(system_meta):
|
|||
if not isinstance(system_meta, dict):
|
||||
system_meta = metadata_to_dict(system_meta)
|
||||
|
||||
for key, value in system_meta.iteritems():
|
||||
for key, value in six.iteritems(system_meta):
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ import operator
|
|||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
|
||||
from nova import block_device
|
||||
from nova.i18n import _LE
|
||||
|
@ -134,7 +135,7 @@ class DriverBlockDevice(dict):
|
|||
raise NotImplementedError()
|
||||
|
||||
def save(self):
|
||||
for attr_name, key_name in self._update_on_save.iteritems():
|
||||
for attr_name, key_name in six.iteritems(self._update_on_save):
|
||||
lookup_name = key_name or attr_name
|
||||
if self[lookup_name] != getattr(self._bdm_obj, attr_name):
|
||||
setattr(self._bdm_obj, attr_name, self[lookup_name])
|
||||
|
@ -204,7 +205,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
|
|||
raise _InvalidType
|
||||
|
||||
self.update(
|
||||
{k: v for k, v in self._bdm_obj.iteritems()
|
||||
{k: v for k, v in six.iteritems(self._bdm_obj)
|
||||
if k in self._new_fields | set(['delete_on_termination'])}
|
||||
)
|
||||
self['mount_device'] = self._bdm_obj.device_name
|
||||
|
|
|
@ -27,6 +27,7 @@ if sys.platform == 'win32':
|
|||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
from nova import exception
|
||||
|
@ -96,7 +97,7 @@ class VMUtils(object):
|
|||
|
||||
def __init__(self, host='.'):
|
||||
self._enabled_states_map = {v: k for k, v in
|
||||
self._vm_power_states_map.iteritems()}
|
||||
six.iteritems(self._vm_power_states_map)}
|
||||
if sys.platform == 'win32':
|
||||
self._init_hyperv_wmi_conn(host)
|
||||
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
|
||||
|
|
|
@ -73,6 +73,7 @@ import itertools
|
|||
import operator
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
from nova import block_device
|
||||
from nova.compute import arch
|
||||
|
@ -637,7 +638,7 @@ def get_disk_info(virt_type, instance, image_meta,
|
|||
|
||||
|
||||
def get_boot_order(disk_info):
|
||||
boot_mapping = (info for name, info in disk_info['mapping'].iteritems()
|
||||
boot_mapping = (info for name, info in six.iteritems(disk_info['mapping'])
|
||||
if name != 'root' and info.get('boot_index') is not None)
|
||||
boot_devs_dup = (BOOT_DEV_FOR_TYPE[dev['type']] for dev in
|
||||
sorted(boot_mapping,
|
||||
|
|
|
@ -19,6 +19,8 @@ This module provides helper APIs for populating the config.py
|
|||
classes based on common operational needs / policies
|
||||
"""
|
||||
|
||||
import six
|
||||
|
||||
from nova.pci import utils as pci_utils
|
||||
|
||||
|
||||
|
@ -161,7 +163,7 @@ def set_vif_bandwidth_config(conf, inst_type):
|
|||
bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
|
||||
'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
|
||||
'vif_outbound_burst']
|
||||
for key, value in inst_type.get('extra_specs', {}).iteritems():
|
||||
for key, value in six.iteritems(inst_type.get('extra_specs', {})):
|
||||
scope = key.split(':')
|
||||
if len(scope) > 1 and scope[0] == 'quota':
|
||||
if scope[1] in bandwidth_items:
|
||||
|
|
|
@ -4555,7 +4555,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
else:
|
||||
info = libvirt_utils.get_fs_info(CONF.instances_path)
|
||||
|
||||
for (k, v) in info.iteritems():
|
||||
for (k, v) in six.iteritems(info):
|
||||
info[k] = v / units.Gi
|
||||
|
||||
return info
|
||||
|
|
|
@ -164,7 +164,7 @@ class Image(object):
|
|||
tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec',
|
||||
'disk_write_bytes_sec', 'disk_write_iops_sec',
|
||||
'disk_total_bytes_sec', 'disk_total_iops_sec']
|
||||
for key, value in extra_specs.iteritems():
|
||||
for key, value in six.iteritems(extra_specs):
|
||||
scope = key.split(':')
|
||||
if len(scope) > 1 and scope[0] == 'quota':
|
||||
if scope[1] in tune_items:
|
||||
|
@ -334,7 +334,7 @@ class Image(object):
|
|||
with open(self.disk_info_path) as disk_info_file:
|
||||
line = disk_info_file.read().rstrip()
|
||||
dct = _dict_from_line(line)
|
||||
for path, driver_format in dct.iteritems():
|
||||
for path, driver_format in six.iteritems(dct):
|
||||
if path == self.path:
|
||||
return driver_format
|
||||
driver_format = self._get_driver_format()
|
||||
|
|
|
@ -156,7 +156,7 @@ class LibvirtBaseVolumeDriver(object):
|
|||
'read_iops_sec', 'write_iops_sec']
|
||||
specs = data['qos_specs']
|
||||
if isinstance(specs, dict):
|
||||
for k, v in specs.iteritems():
|
||||
for k, v in six.iteritems(specs):
|
||||
if k in tune_opts:
|
||||
new_key = 'disk_' + k
|
||||
setattr(conf, new_key, v)
|
||||
|
|
|
@ -29,6 +29,7 @@ from oslo_vmware import exceptions as vexc
|
|||
from oslo_vmware import pbm
|
||||
from oslo_vmware import vim
|
||||
from oslo_vmware import vim_util
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LI, _LW
|
||||
|
@ -173,7 +174,8 @@ class VMwareVCDriver(driver.ComputeDriver):
|
|||
|
||||
# Check if there are any clusters that were specified in the nova.conf
|
||||
# but are not in the vCenter, for missing clusters log a warning.
|
||||
clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
|
||||
clusters_found = [v.get('name')
|
||||
for k, v in six.iteritems(self.dict_mors)]
|
||||
missing_clusters = set(self._cluster_names) - set(clusters_found)
|
||||
if missing_clusters:
|
||||
LOG.warning(_LW("The following clusters could not be found in the "
|
||||
|
|
|
@ -27,6 +27,7 @@ from oslo_log import log as logging
|
|||
from oslo_utils import strutils
|
||||
from oslo_utils import units
|
||||
from oslo_vmware import rw_handles
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI
|
||||
|
@ -146,7 +147,7 @@ class VMwareImage(object):
|
|||
'hw_vif_model': 'vif_model'
|
||||
}
|
||||
|
||||
for k, v in props_map.iteritems():
|
||||
for k, v in six.iteritems(props_map):
|
||||
if k in properties:
|
||||
props[v] = properties[k]
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ The VMware API utility module.
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_vmware import vim_util as vutil
|
||||
import six
|
||||
import suds
|
||||
|
||||
from nova.i18n import _LW
|
||||
|
@ -46,7 +47,7 @@ def object_to_dict(obj, list_depth=1):
|
|||
are converted.
|
||||
"""
|
||||
d = {}
|
||||
for k, v in suds.sudsobject.asdict(obj).iteritems():
|
||||
for k, v in six.iteritems(suds.sudsobject.asdict(obj)):
|
||||
if hasattr(v, '__keylist__'):
|
||||
d[k] = object_to_dict(v, list_depth=list_depth)
|
||||
elif isinstance(v, list):
|
||||
|
|
|
@ -30,6 +30,7 @@ from oslo_vmware import exceptions as vexc
|
|||
from oslo_vmware.objects import datastore as ds_obj
|
||||
from oslo_vmware import pbm
|
||||
from oslo_vmware import vim_util as vutil
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
|
@ -502,7 +503,7 @@ def get_vm_extra_config_spec(client_factory, extra_opts):
|
|||
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
|
||||
# add the key value pairs
|
||||
extra_config = []
|
||||
for key, value in extra_opts.iteritems():
|
||||
for key, value in six.iteritems(extra_opts):
|
||||
opt = client_factory.create('ns0:OptionValue')
|
||||
opt.key = key
|
||||
opt.value = value
|
||||
|
|
|
@ -29,6 +29,7 @@ from oslo_config import cfg
|
|||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from nova.i18n import _, _LE, _LW
|
||||
|
@ -357,7 +358,7 @@ class XenAPIDriver(driver.ComputeDriver):
|
|||
# of mac addresses with values that are the bw counters:
|
||||
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
|
||||
all_counters = self._vmops.get_all_bw_counters()
|
||||
for instance_name, counters in all_counters.iteritems():
|
||||
for instance_name, counters in six.iteritems(all_counters):
|
||||
if instance_name in imap:
|
||||
# yes these are stats for a nova-managed vm
|
||||
# correlate the stats with the nova instance uuid:
|
||||
|
|
|
@ -59,6 +59,7 @@ from oslo_log import log as logging
|
|||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
|
@ -541,7 +542,7 @@ class SessionBase(object):
|
|||
|
||||
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
|
||||
shared, sm_config):
|
||||
for ref, rec in _db_content['SR'].iteritems():
|
||||
for ref, rec in six.iteritems(_db_content['SR']):
|
||||
if rec.get('uuid') == sr_uuid:
|
||||
# make forgotten = 0 and return ref
|
||||
_db_content['SR'][ref]['forgotten'] = 0
|
||||
|
@ -1050,7 +1051,7 @@ class SessionBase(object):
|
|||
|
||||
def _get_by_field(self, recs, k, v, return_singleton):
|
||||
result = []
|
||||
for ref, rec in recs.iteritems():
|
||||
for ref, rec in six.iteritems(recs):
|
||||
if rec.get(k) == v:
|
||||
result.append(ref)
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import re
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
|
||||
from nova.compute import arch
|
||||
from nova.compute import hv_type
|
||||
|
@ -389,7 +390,7 @@ def _host_find(context, session, src_aggregate, host_ref):
|
|||
# CONF.host in the XenServer host's other-config map.
|
||||
# TODO(armando-migliaccio): improve according the note above
|
||||
uuid = session.host.get_uuid(host_ref)
|
||||
for compute_host, host_uuid in src_aggregate.metadetails.iteritems():
|
||||
for compute_host, host_uuid in six.iteritems(src_aggregate.metadetails):
|
||||
if host_uuid == uuid:
|
||||
return compute_host
|
||||
raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found '
|
||||
|
|
|
@ -597,7 +597,7 @@ def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description,
|
|||
session.call_xenapi('VDI.set_name_description', vdi_ref, description)
|
||||
|
||||
other_config = _get_vdi_other_config(vdi_type, instance=instance)
|
||||
for key, value in other_config.iteritems():
|
||||
for key, value in six.iteritems(other_config):
|
||||
if key not in existing_other_config:
|
||||
session.call_xenapi(
|
||||
"VDI.add_to_other_config", vdi_ref, key, value)
|
||||
|
@ -1332,7 +1332,7 @@ def create_image(context, session, instance, name_label, image_id,
|
|||
{'image_id': image_id, 'cache': cache, 'downloaded': downloaded,
|
||||
'duration': duration})
|
||||
|
||||
for vdi_type, vdi in vdis.iteritems():
|
||||
for vdi_type, vdi in six.iteritems(vdis):
|
||||
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid'])
|
||||
_set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type,
|
||||
instance)
|
||||
|
@ -1352,7 +1352,7 @@ def _fetch_image(context, session, instance, name_label, image_id, image_type):
|
|||
vdis = _fetch_disk_image(context, session, instance, name_label,
|
||||
image_id, image_type)
|
||||
|
||||
for vdi_type, vdi in vdis.iteritems():
|
||||
for vdi_type, vdi in six.iteritems(vdis):
|
||||
vdi_uuid = vdi['uuid']
|
||||
LOG.debug("Fetched VDIs of type '%(vdi_type)s' with UUID"
|
||||
" '%(vdi_uuid)s'",
|
||||
|
|
|
@ -621,7 +621,7 @@ class VMOps(object):
|
|||
vbd_refs.append(vbd_ref)
|
||||
|
||||
# Attach original ephemeral disks
|
||||
for userdevice, vdi_ref in orig_vdi_refs.iteritems():
|
||||
for userdevice, vdi_ref in six.iteritems(orig_vdi_refs):
|
||||
if userdevice >= DEVICE_EPHEMERAL:
|
||||
vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
|
||||
userdevice, bootable=False)
|
||||
|
@ -747,7 +747,7 @@ class VMOps(object):
|
|||
ephemeral_vdis = vdis.get('ephemerals')
|
||||
if ephemeral_vdis:
|
||||
# attach existing (migrated) ephemeral disks
|
||||
for userdevice, ephemeral_vdi in ephemeral_vdis.iteritems():
|
||||
for userdevice, ephemeral_vdi in six.iteritems(ephemeral_vdis):
|
||||
vm_utils.create_vbd(self._session, vm_ref,
|
||||
ephemeral_vdi['ref'],
|
||||
userdevice, bootable=False)
|
||||
|
@ -1745,7 +1745,7 @@ class VMOps(object):
|
|||
if dom is None or dom not in counters:
|
||||
continue
|
||||
vifs_bw = bw.setdefault(name, {})
|
||||
for vif_num, vif_data in counters[dom].iteritems():
|
||||
for vif_num, vif_data in six.iteritems(counters[dom]):
|
||||
mac = vif_map[vif_num]
|
||||
vif_data['mac_address'] = mac
|
||||
vifs_bw[mac] = vif_data
|
||||
|
|
|
@ -33,6 +33,7 @@ from oslo_log import loggers
|
|||
from oslo_utils import excutils
|
||||
from paste import deploy
|
||||
import routes.middleware
|
||||
import six
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
|
@ -422,7 +423,7 @@ class Debug(Middleware):
|
|||
resp = req.get_response(self.application)
|
||||
|
||||
print(('*' * 40) + ' RESPONSE HEADERS')
|
||||
for (key, value) in resp.headers.iteritems():
|
||||
for (key, value) in six.iteritems(resp.headers):
|
||||
print(key, '=', value)
|
||||
print()
|
||||
|
||||
|
|
Loading…
Reference in New Issue