Merge "Remove "six" library"

This commit is contained in:
Zuul 2020-07-30 00:35:59 +00:00 committed by Gerrit Code Review
commit 08863de754
59 changed files with 218 additions and 191 deletions

View File

@ -127,10 +127,11 @@ ignore-iface-methods=
deprecated-modules= deprecated-modules=
# should use oslo_serialization.jsonutils # should use oslo_serialization.jsonutils
json json
six
[TYPECHECK] [TYPECHECK]
# List of module names for which member attributes should not be checked # List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems ignored-modules=_MovedItems
[REPORTS] [REPORTS]
# Tells whether to display a full report or only the messages # Tells whether to display a full report or only the messages

View File

@ -28,6 +28,7 @@ Below you can find a list of checks specific to this repository.
with [obj for obj in data if test(obj)]. with [obj for obj in data if test(obj)].
- [N346] Use neutron_lib.db.api.sqla_listen rather than sqlalchemy - [N346] Use neutron_lib.db.api.sqla_listen rather than sqlalchemy
- [N347] Test code must not import mock library - [N347] Test code must not import mock library
- [N348] Test code must not import six library
.. note:: .. note::
When adding a new hacking check to this repository or ``neutron-lib``, make When adding a new hacking check to this repository or ``neutron-lib``, make

View File

@ -116,7 +116,6 @@ requestsexceptions==1.2.0
rfc3986==0.3.1 rfc3986==0.3.1
Routes==2.3.1 Routes==2.3.1
simplejson==3.5.1 simplejson==3.5.1
six==1.10.0
smmap==0.9.0 smmap==0.9.0
snowballstemmer==1.2.1 snowballstemmer==1.2.1
sqlalchemy-migrate==0.11.0 sqlalchemy-migrate==0.11.0

View File

@ -14,6 +14,7 @@
# under the License. # under the License.
import collections import collections
import functools
import itertools import itertools
import operator import operator
import random import random
@ -30,7 +31,6 @@ from oslo_utils import uuidutils
from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp.backend.ovs_idl import idlutils
import debtcollector import debtcollector
import six
import tenacity import tenacity
from neutron._i18n import _ from neutron._i18n import _
@ -87,7 +87,7 @@ def _ovsdb_retry(fn):
The instance's ovsdb_timeout is used as the max waiting time. This relies The instance's ovsdb_timeout is used as the max waiting time. This relies
on the fact that instance methods receive self as the first argument. on the fact that instance methods receive self as the first argument.
""" """
@six.wraps(fn) @functools.wraps(fn)
def wrapped(*args, **kwargs): def wrapped(*args, **kwargs):
self = args[0] self = args[0]
new_fn = tenacity.retry( new_fn = tenacity.retry(
@ -1022,11 +1022,11 @@ class OVSBridge(BaseOVS):
max_burst_kbps=None, min_kbps=None): max_burst_kbps=None, min_kbps=None):
other_config = {} other_config = {}
if max_kbps: if max_kbps:
other_config[six.u('max-rate')] = six.u(str(max_kbps * 1000)) other_config['max-rate'] = str(max_kbps * 1000)
if max_burst_kbps: if max_burst_kbps:
other_config[six.u('burst')] = six.u(str(max_burst_kbps * 1000)) other_config['burst'] = str(max_burst_kbps * 1000)
if min_kbps: if min_kbps:
other_config[six.u('min-rate')] = six.u(str(min_kbps * 1000)) other_config['min-rate'] = str(min_kbps * 1000)
queue = self._find_queue(port_id) queue = self._find_queue(port_id)
if queue and queue['_uuid']: if queue and queue['_uuid']:

View File

@ -14,11 +14,11 @@
# #
import datetime import datetime
import queue
import time import time
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from six.moves import queue as Queue
class ResourceUpdate(object): class ResourceUpdate(object):
@ -110,7 +110,7 @@ class ExclusiveResourceProcessor(object):
if id not in self._masters: if id not in self._masters:
self._masters[id] = self self._masters[id] = self
self._queue = Queue.PriorityQueue(-1) self._queue = queue.PriorityQueue(-1)
self._master = self._masters[id] self._master = self._masters[id]
@ -162,7 +162,7 @@ class ExclusiveResourceProcessor(object):
class ResourceProcessingQueue(object): class ResourceProcessingQueue(object):
"""Manager of the queue of resources to process.""" """Manager of the queue of resources to process."""
def __init__(self): def __init__(self):
self._queue = Queue.PriorityQueue() self._queue = queue.PriorityQueue()
def add(self, update): def add(self, update):
update.tries -= 1 update.tries -= 1

View File

@ -14,6 +14,7 @@
# under the License. # under the License.
import collections import collections
import functools
import os import os
import threading import threading
@ -32,7 +33,6 @@ from oslo_service import loopingcall
from oslo_utils import fileutils from oslo_utils import fileutils
from oslo_utils import importutils from oslo_utils import importutils
from oslo_utils import timeutils from oslo_utils import timeutils
import six
from neutron._i18n import _ from neutron._i18n import _
from neutron.agent.common import resource_processing_queue as queue from neutron.agent.common import resource_processing_queue as queue
@ -57,7 +57,7 @@ DHCP_READY_PORTS_SYNC_MAX = 64
def _sync_lock(f): def _sync_lock(f):
"""Decorator to block all operations for a global sync call.""" """Decorator to block all operations for a global sync call."""
@six.wraps(f) @functools.wraps(f)
def wrapped(*args, **kwargs): def wrapped(*args, **kwargs):
with _SYNC_STATE_LOCK.write_lock(): with _SYNC_STATE_LOCK.write_lock():
return f(*args, **kwargs) return f(*args, **kwargs)
@ -66,7 +66,7 @@ def _sync_lock(f):
def _wait_if_syncing(f): def _wait_if_syncing(f):
"""Decorator to wait if any sync operations are in progress.""" """Decorator to wait if any sync operations are in progress."""
@six.wraps(f) @functools.wraps(f)
def wrapped(*args, **kwargs): def wrapped(*args, **kwargs):
with _SYNC_STATE_LOCK.read_lock(): with _SYNC_STATE_LOCK.read_lock():
return f(*args, **kwargs) return f(*args, **kwargs)

View File

@ -13,6 +13,7 @@
# under the License. # under the License.
import os import os
import queue
import sys import sys
import threading import threading
@ -20,7 +21,6 @@ import httplib2
import netaddr import netaddr
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from six.moves import queue
from neutron._i18n import _ from neutron._i18n import _
from neutron.agent.l3 import ha from neutron.agent.l3 import ha

View File

@ -25,7 +25,6 @@ import sys
from neutron_lib import exceptions from neutron_lib import exceptions
from oslo_log import log as logging from oslo_log import log as logging
import setproctitle import setproctitle
import six
from neutron._i18n import _ from neutron._i18n import _
@ -138,7 +137,7 @@ class Pidfile(object):
def write(self, pid): def write(self, pid):
os.ftruncate(self.fd, 0) os.ftruncate(self.fd, 0)
os.write(self.fd, six.b("%s" % pid)) os.write(self.fd, bytes("%s" % pid, 'utf-8'))
os.fsync(self.fd) os.fsync(self.fd)
def read(self): def read(self):

View File

@ -16,6 +16,7 @@
import abc import abc
import collections import collections
import copy import copy
import io
import itertools import itertools
import os import os
import re import re
@ -33,7 +34,6 @@ from oslo_utils import excutils
from oslo_utils import fileutils from oslo_utils import fileutils
from oslo_utils import netutils from oslo_utils import netutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
from neutron._i18n import _ from neutron._i18n import _
from neutron.agent.common import utils as agent_common_utils from neutron.agent.common import utils as agent_common_utils
@ -765,7 +765,7 @@ class Dnsmasq(DhcpLocalProcess):
epoch-timestamp mac_addr ip_addr hostname client-ID epoch-timestamp mac_addr ip_addr hostname client-ID
""" """
filename = self.get_conf_file_name('leases') filename = self.get_conf_file_name('leases')
buf = six.StringIO() buf = io.StringIO()
LOG.debug('Building initial lease file: %s', filename) LOG.debug('Building initial lease file: %s', filename)
# we make up a lease time for the database entry # we make up a lease time for the database entry
@ -822,7 +822,7 @@ class Dnsmasq(DhcpLocalProcess):
should receive a dhcp lease, the hosts resolution in itself is should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method. defined by the `_output_addn_hosts_file` method.
""" """
buf = six.StringIO() buf = io.StringIO()
filename = self.get_conf_file_name('host') filename = self.get_conf_file_name('host')
LOG.debug('Building host file: %s', filename) LOG.debug('Building host file: %s', filename)
@ -1061,7 +1061,7 @@ class Dnsmasq(DhcpLocalProcess):
Each line in this file is in the same form as a standard /etc/hosts Each line in this file is in the same form as a standard /etc/hosts
file. file.
""" """
buf = six.StringIO() buf = io.StringIO()
for host_tuple in self._iter_hosts(): for host_tuple in self._iter_hosts():
port, alloc, hostname, fqdn, no_dhcp, no_opts, tag = host_tuple port, alloc, hostname, fqdn, no_dhcp, no_opts, tag = host_tuple
# It is compulsory to write the `fqdn` before the `hostname` in # It is compulsory to write the `fqdn` before the `hostname` in

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import io
import os import os
import shutil import shutil
@ -21,7 +22,6 @@ from neutron_lib import constants as lib_const
from neutron_lib.utils import file as file_utils from neutron_lib.utils import file as file_utils
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import six
from neutron.agent.linux import external_process from neutron.agent.linux import external_process
from neutron.agent.linux import pd from neutron.agent.linux import pd
@ -86,7 +86,7 @@ class PDDibbler(pd_driver.PDDriverBase):
def _generate_dibbler_conf(self, ex_gw_ifname, lla, hint_prefix): def _generate_dibbler_conf(self, ex_gw_ifname, lla, hint_prefix):
dcwa = self.dibbler_client_working_area dcwa = self.dibbler_client_working_area
script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True) script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True)
buf = six.StringIO() buf = io.StringIO()
buf.write('%s' % SCRIPT_TEMPLATE.render( buf.write('%s' % SCRIPT_TEMPLATE.render(
prefix_path=self.prefix_path, prefix_path=self.prefix_path,
l3_agent_pid=os.getpid())) l3_agent_pid=os.getpid()))
@ -94,7 +94,7 @@ class PDDibbler(pd_driver.PDDriverBase):
os.chmod(script_path, 0o744) os.chmod(script_path, 0o744)
dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False) dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False)
buf = six.StringIO() buf = io.StringIO()
buf.write('%s' % CONFIG_TEMPLATE.render( buf.write('%s' % CONFIG_TEMPLATE.render(
enterprise_number=cfg.CONF.vendor_pen, enterprise_number=cfg.CONF.vendor_pen,
va_id='0x%s' % self.converted_subnet_id, va_id='0x%s' % self.converted_subnet_id,

View File

@ -24,7 +24,6 @@ from neutron_lib import constants as n_const
from neutron_lib.utils import runtime from neutron_lib.utils import runtime
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import netutils from oslo_utils import netutils
import six
from stevedore import driver from stevedore import driver
from neutron.common import utils from neutron.common import utils
@ -121,7 +120,7 @@ class PrefixDelegation(object):
if not self._is_pd_master_router(router): if not self._is_pd_master_router(router):
return return
prefix_update = {} prefix_update = {}
for pd_info in six.itervalues(router['subnets']): for pd_info in router['subnets'].values():
# gateway is added after internal router ports. # gateway is added after internal router ports.
# If a PD is being synced, and if the prefix is available, # If a PD is being synced, and if the prefix is available,
# send update if prefix out of sync; If not available, # send update if prefix out of sync; If not available,
@ -169,7 +168,7 @@ class PrefixDelegation(object):
preserve_ips = [] preserve_ips = []
router = self.routers.get(router_id) router = self.routers.get(router_id)
if router is not None: if router is not None:
for pd_info in six.itervalues(router['subnets']): for pd_info in router['subnets'].values():
preserve_ips.append(pd_info.get_bind_lla_with_mask()) preserve_ips.append(pd_info.get_bind_lla_with_mask())
return preserve_ips return preserve_ips
@ -184,7 +183,7 @@ class PrefixDelegation(object):
router = self.routers.get(router_id) router = self.routers.get(router_id)
if router is not None: if router is not None:
subnet_to_delete = None subnet_to_delete = None
for subnet_id, pd_info in six.iteritems(router['subnets']): for subnet_id, pd_info in router['subnets'].items():
if pd_info.ri_ifname == stale_ifname: if pd_info.ri_ifname == stale_ifname:
self._delete_pd(router, pd_info) self._delete_pd(router, pd_info)
subnet_to_delete = subnet_id subnet_to_delete = subnet_id
@ -268,11 +267,11 @@ class PrefixDelegation(object):
router['master'] = master router['master'] = master
if master: if master:
for pd_info in six.itervalues(router['subnets']): for pd_info in router['subnets'].values():
bind_lla_with_mask = pd_info.get_bind_lla_with_mask() bind_lla_with_mask = pd_info.get_bind_lla_with_mask()
self._add_lla(router, bind_lla_with_mask) self._add_lla(router, bind_lla_with_mask)
else: else:
for pd_info in six.itervalues(router['subnets']): for pd_info in router['subnets'].values():
self._delete_lla(router, pd_info.get_bind_lla_with_mask()) self._delete_lla(router, pd_info.get_bind_lla_with_mask())
if pd_info.client_started: if pd_info.client_started:
pd_info.driver.disable(self.pmon, pd_info.driver.disable(self.pmon,

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import io
import os import os
import pwd import pwd
@ -23,7 +24,6 @@ import netaddr
from neutron_lib import constants from neutron_lib import constants
from neutron_lib.utils import file as file_utils from neutron_lib.utils import file as file_utils
from oslo_log import log as logging from oslo_log import log as logging
import six
from neutron.agent.linux import external_process from neutron.agent.linux import external_process
from neutron.agent.linux import utils from neutron.agent.linux import utils
@ -93,7 +93,7 @@ class DaemonMonitor(object):
self._router_id, self._router_id,
'radvd.conf', 'radvd.conf',
True) True)
buf = six.StringIO() buf = io.StringIO()
for p in router_ports: for p in router_ports:
subnets = p.get('subnets', []) subnets = p.get('subnets', [])
v6_subnets = [subnet for subnet in subnets if v6_subnets = [subnet for subnet in subnets if

View File

@ -15,6 +15,7 @@
import glob import glob
import grp import grp
from http import client as httplib
import os import os
import pwd import pwd
import shlex import shlex
@ -32,7 +33,6 @@ from oslo_rootwrap import client
from oslo_utils import encodeutils from oslo_utils import encodeutils
from oslo_utils import excutils from oslo_utils import excutils
from oslo_utils import fileutils from oslo_utils import fileutils
from six.moves import http_client as httplib
from neutron._i18n import _ from neutron._i18n import _
from neutron.agent.linux import xenapi_root_helper from neutron.agent.linux import xenapi_root_helper

View File

@ -14,6 +14,7 @@
import hashlib import hashlib
import hmac import hmac
import urllib
from neutron_lib.agent import topics from neutron_lib.agent import topics
from neutron_lib import constants from neutron_lib import constants
@ -25,7 +26,6 @@ import oslo_messaging
from oslo_service import loopingcall from oslo_service import loopingcall
from oslo_utils import encodeutils from oslo_utils import encodeutils
import requests import requests
from six.moves import urllib
import webob import webob
from neutron._i18n import _ from neutron._i18n import _

View File

@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import collections import collections
import functools
import re import re
from neutron.agent.linux import external_process from neutron.agent.linux import external_process
@ -29,7 +30,6 @@ from oslo_log import log
from oslo_utils import uuidutils from oslo_utils import uuidutils
from ovsdbapp.backend.ovs_idl import event as row_event from ovsdbapp.backend.ovs_idl import event as row_event
from ovsdbapp.backend.ovs_idl import vlog from ovsdbapp.backend.ovs_idl import vlog
import six
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -46,7 +46,7 @@ MetadataPortInfo = collections.namedtuple('MetadataPortInfo', ['mac',
def _sync_lock(f): def _sync_lock(f):
"""Decorator to block all operations for a global sync call.""" """Decorator to block all operations for a global sync call."""
@six.wraps(f) @functools.wraps(f)
def wrapped(*args, **kwargs): def wrapped(*args, **kwargs):
with _SYNC_STATE_LOCK.write_lock(): with _SYNC_STATE_LOCK.write_lock():
return f(*args, **kwargs) return f(*args, **kwargs)

View File

@ -14,6 +14,7 @@
import hashlib import hashlib
import hmac import hmac
import urllib
from neutron._i18n import _ from neutron._i18n import _
from neutron.agent.linux import utils as agent_utils from neutron.agent.linux import utils as agent_utils
@ -28,7 +29,6 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import encodeutils from oslo_utils import encodeutils
import requests import requests
from six.moves import urllib
import webob import webob

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import builtins
import ctypes import ctypes
import io import io
import os import os
@ -23,7 +24,6 @@ from neutron_lib import exceptions
from neutron_lib.utils import helpers from neutron_lib.utils import helpers
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import encodeutils from oslo_utils import encodeutils
import six
from neutron._i18n import _ from neutron._i18n import _
@ -64,7 +64,7 @@ def create_process(cmd, run_as_root=False, addl_env=None,
# in a tpool proxy object, avoding blocking other greenthreads. # in a tpool proxy object, avoding blocking other greenthreads.
# #
# The 'file' type is not available on Python 3.x. # The 'file' type is not available on Python 3.x.
file_type = getattr(six.moves.builtins, 'file', io.IOBase) file_type = getattr(builtins, 'file', io.IOBase)
obj = tpool.Proxy(obj, autowrap=(file_type, )) obj = tpool.Proxy(obj, autowrap=(file_type, ))
return obj, cmd return obj, cmd

View File

@ -14,6 +14,7 @@
# under the License. # under the License.
import functools import functools
import urllib
from neutron_lib.api import attributes from neutron_lib.api import attributes
from neutron_lib import constants from neutron_lib import constants
@ -23,7 +24,6 @@ from oslo_config import cfg
import oslo_i18n import oslo_i18n
from oslo_log import log as logging from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from six.moves import urllib
from webob import exc from webob import exc
from neutron._i18n import _ from neutron._i18n import _

View File

@ -999,3 +999,41 @@ class SingletonDecorator:
if self._instance is None: if self._instance is None:
self._instance = self._klass(*args, **kwargs) self._instance = self._klass(*args, **kwargs)
return self._instance return self._instance
def with_metaclass(meta, *bases):
"""Function from jinja2/_compat.py. License: BSD.
Method imported from "futures".
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})

View File

@ -10,11 +10,10 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import importlib
import inspect import inspect
import itertools import itertools
import six
from neutron.conf.policies import address_scope from neutron.conf.policies import address_scope
from neutron.conf.policies import agent from neutron.conf.policies import agent
from neutron.conf.policies import auto_allocated_topology from neutron.conf.policies import auto_allocated_topology
@ -78,4 +77,4 @@ def reload_default_policies():
module.__name__.startswith(__package__)): module.__name__.startswith(__package__)):
# NOTE: pylint checks function args wrongly. # NOTE: pylint checks function args wrongly.
# pylint: disable=too-many-function-args # pylint: disable=too-many-function-args
six.moves.reload_module(module) importlib.reload(module)

View File

@ -38,7 +38,6 @@ import oslo_messaging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from oslo_utils import importutils from oslo_utils import importutils
from oslo_utils import timeutils from oslo_utils import timeutils
import six
from neutron.agent.common import utils from neutron.agent.common import utils
from neutron.api.rpc.callbacks import version_manager from neutron.api.rpc.callbacks import version_manager
@ -125,7 +124,7 @@ class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase):
def _adjust_az_filters(self, filters): def _adjust_az_filters(self, filters):
# The intersect of sets gets us applicable filter keys (others ignored) # The intersect of sets gets us applicable filter keys (others ignored)
common_keys = six.viewkeys(filters) & six.viewkeys(AZ_ATTRIBUTE_MAP) common_keys = filters.keys() & AZ_ATTRIBUTE_MAP.keys()
for key in common_keys: for key in common_keys:
filter_key = AZ_ATTRIBUTE_MAP[key]['agent_key'] filter_key = AZ_ATTRIBUTE_MAP[key]['agent_key']
filter_vals = filters.pop(key) filter_vals = filters.pop(key)

View File

@ -16,7 +16,6 @@
import functools import functools
import netaddr import netaddr
import six
from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import network as net_def
from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import port as port_def
@ -105,7 +104,7 @@ class DbBasePluginCommon(object):
@staticmethod @staticmethod
def _generate_macs(mac_count=1): def _generate_macs(mac_count=1):
mac_maker = net.random_mac_generator(cfg.CONF.base_mac.split(':')) mac_maker = net.random_mac_generator(cfg.CONF.base_mac.split(':'))
return [six.next(mac_maker) for x in range(mac_count)] return [next(mac_maker) for x in range(mac_count)]
@db_api.CONTEXT_READER @db_api.CONTEXT_READER
def _is_mac_in_use(self, context, network_id, mac_address): def _is_mac_in_use(self, context, network_id, mac_address):

View File

@ -15,7 +15,6 @@
import netaddr import netaddr
from neutron_lib.db import api as db_api from neutron_lib.db import api as db_api
import six
from sqlalchemy import func from sqlalchemy import func
import neutron.db.models_v2 as mod import neutron.db.models_v2 as mod
@ -32,7 +31,7 @@ SUPPORTED_FILTERS = {
'project_id': mod.Network.project_id, 'project_id': mod.Network.project_id,
'ip_version': mod.Subnet.ip_version, 'ip_version': mod.Subnet.ip_version,
} }
SUPPORTED_FILTER_KEYS = six.viewkeys(SUPPORTED_FILTERS) SUPPORTED_FILTER_KEYS = set(SUPPORTED_FILTERS.keys())
class IpAvailabilityMixin(object): class IpAvailabilityMixin(object):
@ -82,7 +81,7 @@ class IpAvailabilityMixin(object):
subnet_total_ips_dict.get(row.subnet_id, 0)) subnet_total_ips_dict.get(row.subnet_id, 0))
# Convert result back into the list it expects # Convert result back into the list it expects
net_ip_availabilities = list(six.viewvalues(result_dict)) net_ip_availabilities = list(result_dict.values())
return net_ip_availabilities return net_ip_availabilities
@classmethod @classmethod
@ -143,7 +142,7 @@ class IpAvailabilityMixin(object):
@classmethod @classmethod
def _adjust_query_for_filters(cls, query, filters): def _adjust_query_for_filters(cls, query, filters):
# The intersect of sets gets us applicable filter keys (others ignored) # The intersect of sets gets us applicable filter keys (others ignored)
common_keys = six.viewkeys(filters) & SUPPORTED_FILTER_KEYS common_keys = filters.keys() & SUPPORTED_FILTER_KEYS
for key in common_keys: for key in common_keys:
filter_vals = filters[key] filter_vals = filters[key]
if filter_vals: if filter_vals:

View File

@ -39,6 +39,8 @@ tests_imports_from2 = re.compile(r"\bfrom[\s]+neutron[\s]+import[\s]+tests\b")
import_mock = re.compile(r"\bimport[\s]+mock\b") import_mock = re.compile(r"\bimport[\s]+mock\b")
import_from_mock = re.compile(r"\bfrom[\s]+mock[\s]+import\b") import_from_mock = re.compile(r"\bfrom[\s]+mock[\s]+import\b")
import_six = re.compile(r"\bimport[\s]+six\b")
import_from_six = re.compile(r"\bfrom[\s]+six[\s]+import\b")
@core.flake8ext @core.flake8ext
@ -238,3 +240,17 @@ def check_no_import_mock(logical_line, filename, noqa):
for regex in import_mock, import_from_mock: for regex in import_mock, import_from_mock:
if re.match(regex, logical_line): if re.match(regex, logical_line):
yield(0, msg) yield(0, msg)
@core.flake8ext
def check_no_import_six(logical_line, filename, noqa):
"""N348 - Test code must not import six library
"""
msg = "N348: Test code must not import six library"
if noqa:
return
for regex in import_six, import_from_six:
if re.match(regex, logical_line):
yield(0, msg)

View File

@ -29,7 +29,6 @@ from oslo_utils import versionutils
from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import exception as obj_exception from oslo_versionedobjects import exception as obj_exception
from oslo_versionedobjects import fields as obj_fields from oslo_versionedobjects import fields as obj_fields
import six
from sqlalchemy import orm from sqlalchemy import orm
from neutron._i18n import _ from neutron._i18n import _
@ -55,7 +54,7 @@ def register_filter_hook_on_model(model, filter_name):
obj_class.add_extra_filter_name(filter_name) obj_class.add_extra_filter_name(filter_name)
class LazyQueryIterator(six.Iterator): class LazyQueryIterator(object):
def __init__(self, obj_class, lazy_query): def __init__(self, obj_class, lazy_query):
self.obj_class = obj_class self.obj_class = obj_class
self.context = None self.context = None

View File

@ -19,10 +19,10 @@ from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources from neutron_lib.callbacks import resources
from neutron_lib import exceptions from neutron_lib import exceptions
from six import with_metaclass
from sqlalchemy import and_ from sqlalchemy import and_
from neutron._i18n import _ from neutron._i18n import _
from neutron.common import utils
from neutron.db import _utils as db_utils from neutron.db import _utils as db_utils
from neutron.db import rbac_db_mixin from neutron.db import rbac_db_mixin
from neutron.db import rbac_db_models as models from neutron.db import rbac_db_models as models
@ -386,4 +386,5 @@ class RbacNeutronMetaclass(type):
return klass return klass
NeutronRbacObject = with_metaclass(RbacNeutronMetaclass, base.NeutronDbObject) NeutronRbacObject = utils.with_metaclass(RbacNeutronMetaclass,
base.NeutronDbObject)

View File

@ -21,7 +21,6 @@ from neutron_lib.objects import common_types
from neutron_lib.utils import net as net_utils from neutron_lib.utils import net as net_utils
from oslo_utils import versionutils from oslo_utils import versionutils
from oslo_versionedobjects import fields as obj_fields from oslo_versionedobjects import fields as obj_fields
import six
from sqlalchemy import func from sqlalchemy import func
from neutron.db.models import dvr as dvr_models from neutron.db.models import dvr as dvr_models
@ -353,7 +352,8 @@ class FloatingIP(base.NeutronDbObject):
# Just hit the first row of each group # Just hit the first row of each group
for key, value in group_iterator: for key, value in group_iterator:
row = [r for r in six.next(value)] # pylint: disable=stop-iteration-return
row = list(next(value))
yield (cls._load_object(context, row[0]), row[1]) yield (cls._load_object(context, row[0]), row[1])

View File

@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import urllib
from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import network as net_def
from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api.definitions import subnet as subnet_def
@ -22,7 +24,6 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
import pecan import pecan
from pecan import request from pecan import request
from six.moves import urllib
from neutron._i18n import _ from neutron._i18n import _
from neutron.api.views import versions as versions_view from neutron.api.views import versions as versions_view

View File

@ -24,7 +24,6 @@ from neutron_lib.plugins import directory
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from oslo_log import log from oslo_log import log
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
from sqlalchemy import or_ from sqlalchemy import or_
from sqlalchemy.orm import exc from sqlalchemy.orm import exc
@ -168,7 +167,7 @@ def get_ports_and_sgs(context, port_ids):
return [] return []
ports_to_sg_ids = get_sg_ids_grouped_by_port(context, port_ids) ports_to_sg_ids = get_sg_ids_grouped_by_port(context, port_ids)
return [make_port_dict_with_security_groups(port, sec_groups) return [make_port_dict_with_security_groups(port, sec_groups)
for port, sec_groups in six.iteritems(ports_to_sg_ids)] for port, sec_groups in ports_to_sg_ids.items()]
def get_sg_ids_grouped_by_port(context, port_ids): def get_sg_ids_grouped_by_port(context, port_ids):

View File

@ -32,7 +32,6 @@ from oslo_log import log as logging
import oslo_messaging import oslo_messaging
from oslo_service import service from oslo_service import service
from oslo_utils import excutils from oslo_utils import excutils
from six import moves
from neutron.agent.linux import bridge_lib from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib from neutron.agent.linux import ip_lib
@ -700,7 +699,7 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
return False return False
test_iface = None test_iface = None
for seg_id in moves.range(1, constants.MAX_VXLAN_VNI + 1): for seg_id in range(1, constants.MAX_VXLAN_VNI + 1):
if (ip_lib.device_exists(self.get_vxlan_device_name(seg_id)) or if (ip_lib.device_exists(self.get_vxlan_device_name(seg_id)) or
ip_lib.vxlan_in_use(seg_id)): ip_lib.vxlan_in_use(seg_id)):
continue continue

View File

@ -32,7 +32,6 @@ import oslo_messaging
from oslo_service import loopingcall from oslo_service import loopingcall
from osprofiler import profiler from osprofiler import profiler
import pyroute2 import pyroute2
import six
from neutron._i18n import _ from neutron._i18n import _
from neutron.agent.common import utils from neutron.agent.common import utils
@ -534,7 +533,7 @@ class SriovNicAgentConfigParser(object):
exists in device mappings. exists in device mappings.
""" """
dev_net_set = set(itertools.chain.from_iterable( dev_net_set = set(itertools.chain.from_iterable(
six.itervalues(self.device_mappings))) self.device_mappings.values()))
for dev_name in self.exclude_devices.keys(): for dev_name in self.exclude_devices.keys():
if dev_name not in dev_net_set: if dev_name not in dev_net_set:
raise ValueError(_( raise ValueError(_(

View File

@ -47,7 +47,6 @@ from oslo_service import systemd
from oslo_utils import netutils from oslo_utils import netutils
from osprofiler import profiler from osprofiler import profiler
from ovsdbapp import exceptions as ovs_exceptions from ovsdbapp import exceptions as ovs_exceptions
import six
from neutron._i18n import _ from neutron._i18n import _
from neutron.agent.common import ip_lib from neutron.agent.common import ip_lib
@ -177,8 +176,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
"be removed in W release. Please use patch ports " "be removed in W release. Please use patch ports "
"instead.") "instead.")
self.veth_mtu = agent_conf.veth_mtu self.veth_mtu = agent_conf.veth_mtu
self.available_local_vlans = set(six.moves.range( self.available_local_vlans = set(range(n_const.MIN_VLAN_TAG,
n_const.MIN_VLAN_TAG, n_const.MAX_VLAN_TAG + 1)) n_const.MAX_VLAN_TAG + 1))
self.tunnel_types = agent_conf.tunnel_types or [] self.tunnel_types = agent_conf.tunnel_types or []
self.enable_tunneling = bool(self.tunnel_types) self.enable_tunneling = bool(self.tunnel_types)
self.l2_pop = agent_conf.l2_population self.l2_pop = agent_conf.l2_population
@ -226,7 +225,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
self.rp_bandwidths = place_utils.parse_rp_bandwidths( self.rp_bandwidths = place_utils.parse_rp_bandwidths(
ovs_conf.resource_provider_bandwidths) ovs_conf.resource_provider_bandwidths)
br_set = set(six.itervalues(self.bridge_mappings)) br_set = set(self.bridge_mappings.values())
n_utils.validate_rp_bandwidth(self.rp_bandwidths, n_utils.validate_rp_bandwidth(self.rp_bandwidths,
br_set) br_set)
self.rp_inventory_defaults = place_utils.parse_rp_inventory_defaults( self.rp_inventory_defaults = place_utils.parse_rp_inventory_defaults(

View File

@ -30,7 +30,6 @@ from oslo_config import cfg
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from oslo_log import log from oslo_log import log
from oslo_utils import uuidutils from oslo_utils import uuidutils
from six import moves
from sqlalchemy import or_ from sqlalchemy import or_
from neutron._i18n import _ from neutron._i18n import _
@ -194,7 +193,7 @@ class _TunnelTypeDriverBase(helpers.SegmentTypeDriver, metaclass=abc.ABCMeta):
tunnel_ids = set() tunnel_ids = set()
ranges = self.get_network_segment_ranges() ranges = self.get_network_segment_ranges()
for tun_min, tun_max in ranges: for tun_min, tun_max in ranges:
tunnel_ids |= set(moves.range(tun_min, tun_max + 1)) tunnel_ids |= set(range(tun_min, tun_max + 1))
tunnel_id_getter = operator.attrgetter(self.segmentation_key) tunnel_id_getter = operator.attrgetter(self.segmentation_key)
tunnel_col = getattr(self.model, self.segmentation_key) tunnel_col = getattr(self.model, self.segmentation_key)

View File

@ -13,12 +13,12 @@
import cProfile import cProfile
from datetime import datetime from datetime import datetime
import functools
import io import io
import pstats import pstats
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
import six
from neutron.common import utils from neutron.common import utils
from neutron.conf import profiling as profiling_conf_opts from neutron.conf import profiling as profiling_conf_opts
@ -40,7 +40,7 @@ def profile(f):
the result. the result.
""" """
@six.wraps(f) @functools.wraps(f)
def profile_wrapper(*args, **kwargs): def profile_wrapper(*args, **kwargs):
try: try:

View File

@ -10,10 +10,11 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import functools
from neutron_lib.db import api as db_api from neutron_lib.db import api as db_api
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
import six
from neutron._i18n import _ from neutron._i18n import _
from neutron.quota import resource from neutron.quota import resource
@ -89,7 +90,7 @@ def mark_resources_dirty(f):
of the decorated function. of the decorated function.
""" """
@six.wraps(f) @functools.wraps(f)
def wrapper(_self, context, *args, **kwargs): def wrapper(_self, context, *args, **kwargs):
ret_val = f(_self, context, *args, **kwargs) ret_val = f(_self, context, *args, **kwargs)
set_resources_dirty(context) set_resources_dirty(context)
@ -112,7 +113,7 @@ class tracked_resources(object):
def __call__(self, f): def __call__(self, f):
@six.wraps(f) @functools.wraps(f)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
registry = ResourceRegistry.get_instance() registry = ResourceRegistry.get_instance()
for resource_name in self._tracked_resources: for resource_name in self._tracked_resources:

View File

@ -23,7 +23,6 @@ from neutron_lib.plugins import utils as plugin_utils
from oslo_config import cfg from oslo_config import cfg
from oslo_log import helpers as log_helpers from oslo_log import helpers as log_helpers
from oslo_log import log from oslo_log import log
import six
from neutron._i18n import _ from neutron._i18n import _
from neutron.db import segments_db from neutron.db import segments_db
@ -106,7 +105,7 @@ class NetworkSegmentRangePlugin(ext_range.NetworkSegmentRangePluginBase):
def _add_unchanged_range_attributes(self, updates, existing): def _add_unchanged_range_attributes(self, updates, existing):
"""Adds data for unspecified fields on incoming update requests.""" """Adds data for unspecified fields on incoming update requests."""
for key, value in six.iteritems(existing): for key, value in existing.items():
updates.setdefault(key, value) updates.setdefault(key, value)
return updates return updates

View File

@ -23,6 +23,7 @@ import inspect
import logging import logging
import os import os
import os.path import os.path
import queue
import threading import threading
from unittest import mock from unittest import mock
@ -42,7 +43,6 @@ from oslo_utils import fileutils
from oslo_utils import strutils from oslo_utils import strutils
from oslotest import base from oslotest import base
from osprofiler import profiler from osprofiler import profiler
import six
from sqlalchemy import exc as sqlalchemy_exc from sqlalchemy import exc as sqlalchemy_exc
import testtools import testtools
from testtools import content from testtools import content
@ -495,7 +495,7 @@ class BaseTestCase(DietTestCase):
item = self.q.get(False) item = self.q.get(False)
func, func_args = item[0], item[1] func, func_args = item[0], item[1]
func(*func_args) func(*func_args)
except six.moves.queue.Empty: except queue.Empty:
pass pass
finally: finally:
if item: if item:
@ -506,7 +506,7 @@ class BaseTestCase(DietTestCase):
def get_exception(self): def get_exception(self):
return self.exception return self.exception
q = six.moves.queue.Queue() q = queue.Queue()
for func, func_args in zip(calls, args): for func, func_args in zip(calls, args):
q.put_nowait((func, func_args)) q.put_nowait((func, func_args))

View File

@ -12,10 +12,10 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import configparser
import os.path import os.path
import fixtures import fixtures
import six
from neutron.tests import base from neutron.tests import base
@ -63,7 +63,7 @@ class ConfigFileFixture(fixtures.Fixture):
f.flush() f.flush()
def dict_to_config_parser(self, config_dict): def dict_to_config_parser(self, config_dict):
config_parser = six.moves.configparser.ConfigParser() config_parser = configparser.ConfigParser()
for section, section_dict in config_dict.items(): for section, section_dict in config_dict.items():
if section != 'DEFAULT': if section != 'DEFAULT':
config_parser.add_section(section) config_parser.add_section(section)

View File

@ -18,7 +18,6 @@ import netaddr
from neutron_lib import constants as lib_constants from neutron_lib import constants as lib_constants
from neutron_lib.services.qos import constants as qos_consts from neutron_lib.services.qos import constants as qos_consts
from oslo_utils import uuidutils from oslo_utils import uuidutils
from six import moves
from neutron.common import ipv6_utils from neutron.common import ipv6_utils
@ -251,8 +250,7 @@ def router_append_subnet(router, count=1,
ipv6_subnet_modes = [subnet_mode_none] * count ipv6_subnet_modes = [subnet_mode_none] * count
elif len(ipv6_subnet_modes) != count: elif len(ipv6_subnet_modes) != count:
ipv6_subnet_modes.extend([subnet_mode_none for i in ipv6_subnet_modes.extend([subnet_mode_none for i in
moves.range(len(ipv6_subnet_modes), range(len(ipv6_subnet_modes), count)])
count)])
if ip_version == lib_constants.IP_VERSION_4: if ip_version == lib_constants.IP_VERSION_4:
ip_pool = '35.4.%i.4' ip_pool = '35.4.%i.4'
@ -281,7 +279,7 @@ def router_append_subnet(router, count=1,
fixed_ips, subnets = [], [] fixed_ips, subnets = [], []
num_existing_subnets = len(subnets) num_existing_subnets = len(subnets)
for i in moves.range(count): for i in range(count):
subnet_id = _uuid() subnet_id = _uuid()
fixed_ips.append( fixed_ips.append(
{'ip_address': ip_pool % (i + num_existing_subnets), {'ip_address': ip_pool % (i + num_existing_subnets),

View File

@ -15,6 +15,7 @@
import copy import copy
import sys import sys
import urllib
import uuid import uuid
from wsgiref import simple_server as wsgi_simple_server from wsgiref import simple_server as wsgi_simple_server
@ -22,7 +23,6 @@ from oslo_config import cfg
from oslo_config import types from oslo_config import types
from oslo_log import log as logging from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from six.moves import urllib
from neutron.common import config as common_config from neutron.common import config as common_config

View File

@ -18,7 +18,6 @@ from unittest import mock
from neutron_lib.services.qos import constants as qos_constants from neutron_lib.services.qos import constants as qos_constants
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
from neutron.agent.common import ovs_lib from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib from neutron.agent.linux import ip_lib
@ -32,9 +31,9 @@ MIN_RATE_DEFAULT = 1000000
MAX_RATE_DEFAULT = 3000000 MAX_RATE_DEFAULT = 3000000
BURST_DEFAULT = 2000000 BURST_DEFAULT = 2000000
QUEUE_NUM_DEFAULT = 'queue_num' QUEUE_NUM_DEFAULT = 'queue_num'
OTHER_CONFIG_DEFAULT = {six.u('max-rate'): six.u(str(MAX_RATE_DEFAULT)), OTHER_CONFIG_DEFAULT = {'max-rate': str(MAX_RATE_DEFAULT),
six.u('burst'): six.u(str(BURST_DEFAULT)), 'burst': str(BURST_DEFAULT),
six.u('min-rate'): six.u(str(MIN_RATE_DEFAULT))} 'min-rate': str(MIN_RATE_DEFAULT)}
class BaseOVSTestCase(base.BaseSudoTestCase): class BaseOVSTestCase(base.BaseSudoTestCase):
@ -147,10 +146,9 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
def test__update_queue_new(self): def test__update_queue_new(self):
queue_id, neutron_port_id = self._create_queue() queue_id, neutron_port_id = self._create_queue()
self.assertIsNotNone(queue_id) self.assertIsNotNone(queue_id)
external_ids = {six.u('port'): six.u(neutron_port_id), external_ids = {'port': str(neutron_port_id),
six.u('queue-num'): six.u('queue_num'), 'queue-num': 'queue_num',
six.u('type'): 'type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH}
six.u(qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)}
expected = {'_uuid': queue_id, expected = {'_uuid': queue_id,
'other_config': OTHER_CONFIG_DEFAULT, 'other_config': OTHER_CONFIG_DEFAULT,
@ -160,13 +158,12 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
def test__update_queue_update(self): def test__update_queue_update(self):
queue_id, neutron_port_id = self._create_queue() queue_id, neutron_port_id = self._create_queue()
self.assertIsNotNone(queue_id) self.assertIsNotNone(queue_id)
other_config = {six.u('max-rate'): six.u('6000000'), other_config = {'max-rate': '6000000',
six.u('burst'): six.u('5000000'), 'burst': '5000000',
six.u('min-rate'): six.u('4000000')} 'min-rate': '4000000'}
external_ids = {six.u('port'): six.u(neutron_port_id), external_ids = {'port': str(neutron_port_id),
six.u('queue-num'): six.u('queue_num'), 'queue-num': 'queue_num',
six.u('type'): 'type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH}
six.u(qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)}
queue = self._list_queues(queue_id) queue = self._list_queues(queue_id)
self.assertIsNotNone(queue) self.assertIsNotNone(queue)
@ -181,10 +178,9 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
def test__find_queue(self): def test__find_queue(self):
queue_id, neutron_port_id = self._create_queue() queue_id, neutron_port_id = self._create_queue()
external_ids = {six.u('port'): six.u(neutron_port_id), external_ids = {'port': str(neutron_port_id),
six.u('type'): six.u( 'type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH,
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), 'queue-num': 'queue_num'}
six.u('queue-num'): six.u('queue_num')}
expected = {'_uuid': queue_id, expected = {'_uuid': queue_id,
'external_ids': external_ids, 'external_ids': external_ids,
'other_config': OTHER_CONFIG_DEFAULT} 'other_config': OTHER_CONFIG_DEFAULT}
@ -199,10 +195,9 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
ports.append(neutron_port_id) ports.append(neutron_port_id)
for idx, port in enumerate(ports): for idx, port in enumerate(ports):
external_ids = {six.u('port'): six.u(ports[idx]), external_ids = {'port': str(ports[idx]),
six.u('type'): six.u( 'type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH,
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), 'queue-num': 'queue_num'}
six.u('queue-num'): six.u('queue_num')}
expected = {'_uuid': queue_ids[idx], expected = {'_uuid': queue_ids[idx],
'external_ids': external_ids, 'external_ids': external_ids,
'other_config': OTHER_CONFIG_DEFAULT} 'other_config': OTHER_CONFIG_DEFAULT}
@ -213,10 +208,9 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
def test__delete_queue(self): def test__delete_queue(self):
queue_id, port_id = self._create_queue() queue_id, port_id = self._create_queue()
external_ids = {six.u('port'): six.u(port_id), external_ids = {'port': str(port_id),
six.u('type'): six.u( 'type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH,
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), 'queue-num': 'queue_num'}
six.u('queue-num'): six.u('queue_num')}
expected = {'_uuid': queue_id, expected = {'_uuid': queue_id,
'external_ids': external_ids, 'external_ids': external_ids,
'other_config': OTHER_CONFIG_DEFAULT} 'other_config': OTHER_CONFIG_DEFAULT}
@ -246,9 +240,8 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
queues = {1: queue_id} queues = {1: queue_id}
qos_id = self._create_qos(queues=queues) qos_id = self._create_qos(queues=queues)
external_ids = {six.u('id'): six.u(self.ovs._min_bw_qos_id), external_ids = {'id': str(self.ovs._min_bw_qos_id),
six.u('_type'): six.u( '_type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH}
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)}
expected = {'_uuid': qos_id, expected = {'_uuid': qos_id,
'type': 'linux-htb', 'type': 'linux-htb',
'external_ids': external_ids} 'external_ids': external_ids}
@ -262,9 +255,8 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
queues = {1: queue_id_1} queues = {1: queue_id_1}
qos_id = self._create_qos(queues=queues) qos_id = self._create_qos(queues=queues)
external_ids = {six.u('id'): six.u(self.ovs._min_bw_qos_id), external_ids = {'id': str(self.ovs._min_bw_qos_id),
six.u('_type'): six.u( '_type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH}
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)}
expected = {'_uuid': qos_id, expected = {'_uuid': qos_id,
'type': 'linux-htb', 'type': 'linux-htb',
'external_ids': external_ids} 'external_ids': external_ids}
@ -344,13 +336,12 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
self.ovs.update_minimum_bandwidth_queue(self.port_id, [port_name], self.ovs.update_minimum_bandwidth_queue(self.port_id, [port_name],
queue_num, 1800) queue_num, 1800)
self._check_value(qos_id, self._find_port_qos, port_name) self._check_value(qos_id, self._find_port_qos, port_name)
external_ids = {six.u('port'): six.u(port_id), external_ids = {'port': str(port_id),
six.u('type'): six.u( 'type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH,
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), 'queue-num': 'queue_num'}
six.u('queue-num'): six.u('queue_num')} other_config = {'max-rate': str(MAX_RATE_DEFAULT),
other_config = {six.u('max-rate'): six.u(str(MAX_RATE_DEFAULT)), 'burst': str(BURST_DEFAULT),
six.u('burst'): six.u(str(BURST_DEFAULT)), 'min-rate': '1800000'}
six.u('min-rate'): six.u('1800000')}
expected = {'_uuid': queue_id, expected = {'_uuid': queue_id,
'external_ids': external_ids, 'external_ids': external_ids,
'other_config': other_config} 'other_config': other_config}
@ -367,11 +358,10 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
qos_id = self._find_port_qos(port_name) qos_id = self._find_port_qos(port_name)
qos = self._list_qos(qos_id) qos = self._list_qos(qos_id)
queue_id = qos['queues'][1].uuid queue_id = qos['queues'][1].uuid
external_ids = {six.u('port'): six.u(self.port_id), external_ids = {'port': str(self.port_id),
six.u('type'): six.u( 'type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH,
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), 'queue-num': str(queue_num)}
six.u('queue-num'): six.u(str(queue_num))} other_config = {'min-rate': '1700000'}
other_config = {six.u('min-rate'): six.u('1700000')}
expected = {'_uuid': queue_id, expected = {'_uuid': queue_id,
'external_ids': external_ids, 'external_ids': external_ids,
'other_config': other_config} 'other_config': other_config}
@ -432,10 +422,9 @@ class BaseOVSTestCase(base.BaseSudoTestCase):
self._check_value(None, self._list_qos, qos_id=qos_id) self._check_value(None, self._list_qos, qos_id=qos_id)
self._check_value(None, self._list_queues, queue_id=queue_id_1) self._check_value(None, self._list_queues, queue_id=queue_id_1)
self._check_value(None, self._list_queues, queue_id=queue_id_2) self._check_value(None, self._list_queues, queue_id=queue_id_2)
external_ids = {six.u('port'): six.u(port_id_3), external_ids = {'port': str(port_id_3),
six.u('type'): six.u( 'type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH,
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), 'queue-num': 'queue_num'}
six.u('queue-num'): six.u('queue_num')}
expected = {'_uuid': queue_id_3, expected = {'_uuid': queue_id_3,
'external_ids': external_ids, 'external_ids': external_ids,
'other_config': OTHER_CONFIG_DEFAULT} 'other_config': OTHER_CONFIG_DEFAULT}

View File

@ -21,7 +21,6 @@ import netaddr
from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as lib_constants from neutron_lib import constants as lib_constants
from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.exceptions import l3 as l3_exc
import six
import testtools import testtools
from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3 import agent as neutron_l3_agent
@ -1703,7 +1702,7 @@ class TestDvrRouter(DvrRouterTestFramework, framework.L3AgentTestFramework):
ip_version=lib_constants.IP_VERSION_4, ip_version=lib_constants.IP_VERSION_4,
table=router_fip_table_idx, table=router_fip_table_idx,
via=str(next_hop)) via=str(next_hop))
expected_extra_route = [{'cidr': six.u(destination), expected_extra_route = [{'cidr': str(destination),
'device': fg_port_name, 'device': fg_port_name,
'table': router_fip_table_idx, 'table': router_fip_table_idx,
'via': next_hop}] 'via': next_hop}]
@ -1766,11 +1765,11 @@ class TestDvrRouter(DvrRouterTestFramework, framework.L3AgentTestFramework):
str(net_addr_2) + '/' + str(net_addr_2) + '/' +
str(fixed_ips_2[0]['prefixlen'])) str(fixed_ips_2[0]['prefixlen']))
expected_routes = [{'device': fpr_device_name, expected_routes = [{'device': fpr_device_name,
'cidr': six.u(route_cidr_1), 'cidr': str(route_cidr_1),
'via': str(rtr_2_fip.ip), 'via': str(rtr_2_fip.ip),
'table': 'main'}, 'table': 'main'},
{'device': fpr_device_name, {'device': fpr_device_name,
'cidr': six.u(route_cidr_2), 'cidr': str(route_cidr_2),
'via': str(rtr_2_fip.ip), 'via': str(rtr_2_fip.ip),
'table': 'main'}] 'table': 'main'}]
# Comparing the static routes for both internal interfaces on the # Comparing the static routes for both internal interfaces on the

View File

@ -15,12 +15,12 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import queue
import signal import signal
import sys import sys
import threading import threading
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from six.moves import queue
from neutron.agent.linux import ip_lib from neutron.agent.linux import ip_lib

View File

@ -16,7 +16,6 @@ import os
import sys import sys
from oslo_config import cfg from oslo_config import cfg
from six import moves
from neutron.agent.linux import external_process from neutron.agent.linux import external_process
from neutron.common import utils from neutron.common import utils
@ -57,7 +56,7 @@ class BaseTestProcessMonitor(base.BaseLoggingTestCase):
def spawn_n_children(self, n, service=None): def spawn_n_children(self, n, service=None):
self._child_processes = [] self._child_processes = []
for child_number in moves.range(n): for child_number in range(n):
uuid = self._child_uuid(child_number) uuid = self._child_uuid(child_number)
_callback = self._make_cmdline_callback(uuid) _callback = self._make_cmdline_callback(uuid)
pm = external_process.ProcessManager( pm = external_process.ProcessManager(

View File

@ -13,11 +13,12 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from http import client as httplib
from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import portbindings
from neutron_lib import context from neutron_lib import context
from neutron_lib.plugins import directory from neutron_lib.plugins import directory
from oslo_config import cfg from oslo_config import cfg
from six.moves import http_client as httplib
from webob import exc from webob import exc
from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.db import test_db_base_plugin_v2

View File

@ -19,7 +19,6 @@ from unittest import mock
from neutron_lib import constants from neutron_lib import constants
from neutron_lib.utils import helpers from neutron_lib.utils import helpers
from oslo_config import cfg from oslo_config import cfg
import six
from neutron.agent.l2.extensions.fdb_population import ( from neutron.agent.l2.extensions.fdb_population import (
FdbPopulationAgentExtension) FdbPopulationAgentExtension)
@ -50,7 +49,7 @@ class FdbPopulationExtensionTestCase(base.BaseTestCase):
def _get_existing_device(self): def _get_existing_device(self):
device_mappings = helpers.parse_mappings( device_mappings = helpers.parse_mappings(
cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False) cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False)
DEVICES = six.next(six.itervalues(device_mappings)) DEVICES = next(iter(device_mappings.values()))
return DEVICES[0] return DEVICES[0]
def _get_fdb_extension(self, mock_execute, fdb_table): def _get_fdb_extension(self, mock_execute, fdb_table):

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import builtins
import io import io
from unittest import mock from unittest import mock
@ -20,7 +21,6 @@ import ddt
import eventlet import eventlet
from eventlet import tpool from eventlet import tpool
from neutron_lib import exceptions from neutron_lib import exceptions
import six
from neutron.agent.windows import utils from neutron.agent.windows import utils
from neutron.tests import base from neutron.tests import base
@ -62,7 +62,7 @@ class WindowsUtilsTestCase(base.BaseTestCase):
preexec_fn=None, preexec_fn=None,
close_fds=False) close_fds=False)
file_type = getattr(six.moves.builtins, 'file', io.IOBase) file_type = getattr(builtins, 'file', io.IOBase)
mock_tpool_proxy.assert_called_once_with( mock_tpool_proxy.assert_called_once_with(
mock_popen.return_value, autowrap=(file_type, )) mock_popen.return_value, autowrap=(file_type, ))
@ -196,8 +196,9 @@ class WindowsUtilsTestCase(base.BaseTestCase):
mock.sentinel.cmd, addl_env=mock.sentinel.env, mock.sentinel.cmd, addl_env=mock.sentinel.env,
tpool_proxy=False) tpool_proxy=False)
mock_avoid_blocking_call.assert_called_once_with( mock_avoid_blocking_call.assert_called_once_with(
mock_popen.communicate, six.b(fake_stdin)) mock_popen.communicate, bytes(fake_stdin, 'utf-8'))
mock_popen.communicate.assert_called_once_with(six.b(fake_stdin)) mock_popen.communicate.assert_called_once_with(
bytes(fake_stdin, 'utf-8'))
mock_popen.stdin.close.assert_called_once_with() mock_popen.stdin.close.assert_called_once_with()
def test_get_root_helper_child_pid(self): def test_get_root_helper_child_pid(self):

View File

@ -15,6 +15,7 @@
import os import os
from unittest import mock from unittest import mock
import urllib
from neutron_lib.api import attributes from neutron_lib.api import attributes
from neutron_lib.api import converters from neutron_lib.api import converters
@ -31,7 +32,6 @@ from oslo_config import cfg
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from oslo_policy import policy as oslo_policy from oslo_policy import policy as oslo_policy
from oslo_utils import uuidutils from oslo_utils import uuidutils
from six.moves import urllib
import webob import webob
from webob import exc from webob import exc
import webtest import webtest

View File

@ -26,7 +26,6 @@ import netaddr
from neutron_lib import constants from neutron_lib import constants
from oslo_log import log as logging from oslo_log import log as logging
from osprofiler import profiler from osprofiler import profiler
import six
import testscenarios import testscenarios
import testtools import testtools
@ -336,7 +335,7 @@ class TestPortRuleMasking(base.BaseTestCase):
def test_port_rule_masking_random_ranges(self): def test_port_rule_masking_random_ranges(self):
# calling randint a bunch of times is really slow # calling randint a bunch of times is really slow
randports = sorted(random.sample(six.moves.range(1, 65536), 2000)) randports = sorted(random.sample(range(1, 65536), 2000))
port_max = 0 port_max = 0
for i in randports: for i in randports:
port_min = port_max port_min = port_max
@ -436,8 +435,7 @@ class TestThrottler(base.BaseTestCase):
self.assertEqual(2, orig_function.call_count) self.assertEqual(2, orig_function.call_count)
lock_with_timer = six.get_function_closure( lock_with_timer = throttled_func.__closure__[1].cell_contents
throttled_func)[1].cell_contents
timestamp = lock_with_timer.timestamp - threshold timestamp = lock_with_timer.timestamp - threshold
lock_with_timer.timestamp = timestamp lock_with_timer.timestamp = timestamp

View File

@ -1133,8 +1133,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
return False return False
return real_has_attr(item, attr) return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
with self.network() as net: with self.network() as net:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'], res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True) 'test', True)
@ -1173,8 +1172,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
return False return False
return real_has_attr(item, attr) return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
plugin = directory.get_plugin() plugin = directory.get_plugin()
method_to_patch = '_process_port_binding' method_to_patch = '_process_port_binding'
if real_has_attr(plugin, method_to_patch): if real_has_attr(plugin, method_to_patch):
@ -2923,8 +2921,7 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
return False return False
return real_has_attr(item, attr) return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
res = self._create_network_bulk(self.fmt, 2, 'test', True) res = self._create_network_bulk(self.fmt, 2, 'test', True)
self._validate_behavior_on_bulk_success(res, 'networks') self._validate_behavior_on_bulk_success(res, 'networks')
@ -2949,8 +2946,7 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
orig = directory.get_plugin().create_network orig = directory.get_plugin().create_network
# ensures the API choose the emulation code path # ensures the API choose the emulation code path
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
method_to_patch = _get_create_db_method('network') method_to_patch = _get_create_db_method('network')
with mock.patch.object(directory.get_plugin(), with mock.patch.object(directory.get_plugin(),
method_to_patch) as patched_plugin: method_to_patch) as patched_plugin:
@ -3460,8 +3456,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
return False return False
return real_has_attr(item, attr) return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
with self.network() as net: with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2, res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'], net['network']['id'],
@ -3477,8 +3472,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
return False return False
return real_has_attr(item, attr) return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
orig = directory.get_plugin().create_subnet orig = directory.get_plugin().create_subnet
method_to_patch = _get_create_db_method('subnet') method_to_patch = _get_create_db_method('subnet')
with mock.patch.object(directory.get_plugin(), with mock.patch.object(directory.get_plugin(),

View File

@ -1704,8 +1704,7 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
return False return False
return real_has_attr(item, attr) return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
with self.security_group() as sg: with self.security_group() as sg:
rule1 = self._build_security_group_rule( rule1 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', sg['security_group']['id'], 'ingress',
@ -1775,9 +1774,7 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
return False return False
return real_has_attr(item, attr) return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
with self.security_group() as sg: with self.security_group() as sg:
rule = self._build_security_group_rule( rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', sg['security_group']['id'], 'ingress',
@ -1812,8 +1809,7 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
return False return False
return real_has_attr(item, attr) return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr', with mock.patch('builtins.hasattr', new=fakehasattr):
new=fakehasattr):
with self.security_group() as sg: with self.security_group() as sg:
rule = self._build_security_group_rule( rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', sg['security_group']['id'], 'ingress',

View File

@ -19,7 +19,6 @@ from neutron_lib import context
from neutron_lib.plugins import directory from neutron_lib.plugins import directory
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
from neutron.common import utils from neutron.common import utils
from neutron.db import db_base_plugin_v2 from neutron.db import db_base_plugin_v2
@ -74,7 +73,7 @@ class TimeStampChangedsinceTestCase(test_db_base_plugin_v2.
return resources return resources
def _return_by_timedelay(self, resource, timedelay): def _return_by_timedelay(self, resource, timedelay):
resource_type = six.next(six.iterkeys(resource)) resource_type = next(iter(resource))
time_create = timeutils.parse_isotime( time_create = timeutils.parse_isotime(
resource[resource_type]['updated_at']) resource[resource_type]['updated_at'])
time_before = datetime.timedelta(seconds=timedelay) time_before = datetime.timedelta(seconds=timedelay)
@ -85,7 +84,7 @@ class TimeStampChangedsinceTestCase(test_db_base_plugin_v2.
addedtime_string) addedtime_string)
def _update_test_resource_by_name(self, resource): def _update_test_resource_by_name(self, resource):
resource_type = six.next(six.iterkeys(resource)) resource_type = next(iter(resource))
name = resource[resource_type]['name'] name = resource[resource_type]['name']
data = {resource_type: {'name': '%s_new' % name}} data = {resource_type: {'name': '%s_new' % name}}
req = self.new_update_request('%ss' % resource_type, req = self.new_update_request('%ss' % resource_type,
@ -104,7 +103,7 @@ class TimeStampChangedsinceTestCase(test_db_base_plugin_v2.
def _list_resources_with_changed_since(self, resource): def _list_resources_with_changed_since(self, resource):
# assert list results contain the net info when # assert list results contain the net info when
# changed_since equal with the net updated time. # changed_since equal with the net updated time.
resource_type = six.next(six.iterkeys(resource)) resource_type = next(iter(resource))
if resource_type in ['network', 'port']: if resource_type in ['network', 'port']:
self._set_timestamp_by_show(resource, resource_type) self._set_timestamp_by_show(resource, resource_type)
resources = self._get_resp_with_changed_since(resource_type, resources = self._get_resp_with_changed_since(resource_type,
@ -125,7 +124,7 @@ class TimeStampChangedsinceTestCase(test_db_base_plugin_v2.
self.assertEqual([], resources[resource_type + 's']) self.assertEqual([], resources[resource_type + 's'])
def _test_list_mutiple_resources_with_changed_since(self, first, second): def _test_list_mutiple_resources_with_changed_since(self, first, second):
resource_type = six.next(six.iterkeys(first)) resource_type = next(iter(first))
if resource_type in ['network', 'port']: if resource_type in ['network', 'port']:
self._set_timestamp_by_show(first, resource_type) self._set_timestamp_by_show(first, resource_type)
self._set_timestamp_by_show(second, resource_type) self._set_timestamp_by_show(second, resource_type)

View File

@ -13,6 +13,7 @@
import io import io
import re import re
import tokenize import tokenize
from unittest import mock
import testtools import testtools
@ -224,6 +225,19 @@ class HackingTestCase(base.BaseTestCase):
checks.check_no_import_mock( checks.check_no_import_mock(
fail_line, "neutron/tests/test_fake.py", None)))) fail_line, "neutron/tests/test_fake.py", None))))
def test_check_no_import_six(self):
pass_line = 'from other_library import six'
fail_lines = ('import six',
'import six as six_lib',
'from six import moves')
self.assertEqual(
0,
len(list(checks.check_no_import_six(pass_line, mock.ANY, None))))
for fail_line in fail_lines:
self.assertEqual(
1, len(list(checks.check_no_import_six(
fail_line, mock.ANY, None))))
def test_check_oslo_i18n_wrapper(self): def test_check_oslo_i18n_wrapper(self):
def _pass(line, filename, noqa=False): def _pass(line, filename, noqa=False):
self.assertLinePasses( self.assertLinePasses(

View File

@ -20,7 +20,6 @@ from neutron_lib import context
from neutron_lib import exceptions as exc from neutron_lib import exceptions as exc
from neutron_lib.plugins.ml2 import api from neutron_lib.plugins.ml2 import api
from oslo_config import cfg from oslo_config import cfg
from six import moves
import testtools import testtools
from testtools import matchers from testtools import matchers
@ -176,7 +175,7 @@ class TunnelTypeTestMixin(object):
api.PHYSICAL_NETWORK: 'None', api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: None} api.SEGMENTATION_ID: None}
for x in moves.range(TUN_MIN, TUN_MAX + 1): for x in range(TUN_MIN, TUN_MAX + 1):
segment = self.driver.reserve_provider_segment(self.context, segment = self.driver.reserve_provider_segment(self.context,
specs) specs)
self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE]) self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE])
@ -207,7 +206,7 @@ class TunnelTypeTestMixin(object):
def test_allocate_tenant_segment(self): def test_allocate_tenant_segment(self):
tunnel_ids = set() tunnel_ids = set()
for x in moves.range(TUN_MIN, TUN_MAX + 1): for x in range(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.context) segment = self.driver.allocate_tenant_segment(self.context)
self.assertThat(segment[api.SEGMENTATION_ID], self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1)) matchers.GreaterThan(TUN_MIN - 1))

View File

@ -739,15 +739,14 @@ class TestPciOsWrapper(base.BaseTestCase):
self.assertTrue(esm.PciOsWrapper.pf_device_exists('p6p1')) self.assertTrue(esm.PciOsWrapper.pf_device_exists('p6p1'))
def test_get_numvfs(self): def test_get_numvfs(self):
with mock.patch("six.moves.builtins.open", with mock.patch("builtins.open",
mock.mock_open(read_data="63")) as mock_open: mock.mock_open(read_data="63")) as mock_open:
self.assertEqual(63, esm.PciOsWrapper.get_numvfs('dev1')) self.assertEqual(63, esm.PciOsWrapper.get_numvfs('dev1'))
mock_open.assert_called_once_with( mock_open.assert_called_once_with(
esm.PciOsWrapper.NUMVFS_PATH % 'dev1') esm.PciOsWrapper.NUMVFS_PATH % 'dev1')
def test_get_numvfs_no_file(self): def test_get_numvfs_no_file(self):
with mock.patch("six.moves.builtins.open", with mock.patch("builtins.open", side_effect=IOError()) as mock_open:
side_effect=IOError()) as mock_open:
self.assertEqual(-1, esm.PciOsWrapper.get_numvfs('dev1')) self.assertEqual(-1, esm.PciOsWrapper.get_numvfs('dev1'))
mock_open.assert_called_once_with( mock_open.assert_called_once_with(
esm.PciOsWrapper.NUMVFS_PATH % 'dev1') esm.PciOsWrapper.NUMVFS_PATH % 'dev1')

View File

@ -17,12 +17,12 @@ import os
import socket import socket
import ssl import ssl
from unittest import mock from unittest import mock
import urllib
from neutron_lib.db import api as db_api from neutron_lib.db import api as db_api
from neutron_lib import exceptions as exception from neutron_lib import exceptions as exception
from oslo_config import cfg from oslo_config import cfg
from oslo_utils import netutils from oslo_utils import netutils
from six.moves import urllib
import testtools import testtools
import webob import webob
import webob.exc import webob.exc

View File

@ -16,8 +16,6 @@
import sys import sys
from unittest import mock from unittest import mock
from six import moves
from neutron.tests import base from neutron.tests import base
from neutron.tests import post_mortem_debug from neutron.tests import post_mortem_debug
@ -48,8 +46,7 @@ class TestTesttoolsExceptionHandler(base.BaseTestCase):
mod_mock.post_mortem = mock.Mock() mod_mock.post_mortem = mock.Mock()
return mod_mock return mod_mock
with mock.patch('six.moves.builtins.__import__', with mock.patch('builtins.__import__', side_effect=import_mock):
side_effect=import_mock):
pdb_debugger = post_mortem_debug._get_debugger('pdb') pdb_debugger = post_mortem_debug._get_debugger('pdb')
pudb_debugger = post_mortem_debug._get_debugger('pudb') pudb_debugger = post_mortem_debug._get_debugger('pudb')
self.assertEqual('pdb', pdb_debugger.__name__) self.assertEqual('pdb', pdb_debugger.__name__)
@ -78,7 +75,7 @@ class TestGetIgnoredTraceback(base.BaseTestCase):
tb = root_tb tb = root_tb
tracebacks = [tb] tracebacks = [tb]
for x in moves.range(len(ignored_bit_array) - 1): for x in range(len(ignored_bit_array) - 1):
tb.tb_next = mock.Mock() tb.tb_next = mock.Mock()
tb = tb.tb_next tb = tb.tb_next
tracebacks.append(tb) tracebacks.append(tb)

View File

@ -23,7 +23,6 @@ SQLAlchemy>=1.2.0 # MIT
WebOb>=1.8.2 # MIT WebOb>=1.8.2 # MIT
keystoneauth1>=3.14.0 # Apache-2.0 keystoneauth1>=3.14.0 # Apache-2.0
alembic>=0.8.10 # MIT alembic>=0.8.10 # MIT
six>=1.10.0 # MIT
stevedore>=1.20.0 # Apache-2.0 stevedore>=1.20.0 # Apache-2.0
oslo.cache>=1.26.0 # Apache-2.0 oslo.cache>=1.26.0 # Apache-2.0
oslo.concurrency>=3.26.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0

View File

@ -183,6 +183,7 @@ extension =
N344 = neutron.hacking.checks:check_python3_no_filter N344 = neutron.hacking.checks:check_python3_no_filter
N346 = neutron.hacking.checks:check_no_sqlalchemy_event_import N346 = neutron.hacking.checks:check_no_sqlalchemy_event_import
N347 = neutron.hacking.checks:check_no_import_mock N347 = neutron.hacking.checks:check_no_import_mock
N348 = neutron.hacking.checks:check_no_import_six
# Checks from neutron-lib # Checks from neutron-lib
N521 = neutron_lib.hacking.checks:use_jsonutils N521 = neutron_lib.hacking.checks:use_jsonutils
N524 = neutron_lib.hacking.checks:check_no_contextlib_nested N524 = neutron_lib.hacking.checks:check_no_contextlib_nested