Python2/3 compatibility fixes

Changes resulting from running the 2to3 tooling.

Change-Id: I59f52f43ae64c2dbf4c04b45f6acd8f5d5f8281d
This commit is contained in:
Thomas Bachman 2022-01-13 17:06:52 +00:00
parent de548128ce
commit 268aaf1da9
75 changed files with 728 additions and 727 deletions

View File

@ -38,7 +38,7 @@ def get_obj_from_stack(cls):
i = 1
try:
while True:
for val in sys._getframe(i).f_locals.values():
for val in list(sys._getframe(i).f_locals.values()):
if isinstance(val, cls):
return val
i = i + 1
@ -86,7 +86,7 @@ def admin_context(context):
def deep_sort(obj):
if isinstance(obj, dict):
obj = OrderedDict(sorted(obj.items()))
for k, v in obj.items():
for k, v in list(obj.items()):
if isinstance(v, dict) or isinstance(v, list):
obj[k] = deep_sort(v)

View File

@ -282,7 +282,7 @@ class FWaasEventHandler(nfp_api.NfpEventHandler):
result = []
for d1 in unique_rules:
for d2 in rules:
if d1.viewitems() <= d2.viewitems():
if d1.items() <= d2.items():
result.append(d2)
break
result.sort(key=operator.itemgetter('position'))

View File

@ -628,7 +628,7 @@ class LBaaSV2EventHandler(agent_base.AgentBaseEventHandler,
if operation == lb_const.CREATE:
driver_name = data['driver_name']
driver_id = driver_name + service_vendor
if (driver_id) not in self.drivers.keys():
if (driver_id) not in list(self.drivers.keys()):
msg = ('No device driver on agent: %s.' % (driver_name))
LOG.error(msg)
self.plugin_rpc.update_status(
@ -860,7 +860,7 @@ class LBaaSV2EventHandler(agent_base.AgentBaseEventHandler,
spacing=60)
def collect_stats_v2(self, ev):
for pool_id, driver_name in \
LBaaSV2EventHandler.instance_mapping.items():
list(LBaaSV2EventHandler.instance_mapping.items()):
driver_id = lb_const.SERVICE_TYPE + driver_name
driver = self.drivers[driver_id]
try:

View File

@ -23,7 +23,7 @@ LOG = nfp_logging.getLogger(__name__)
def set_class_attr(**kwargs):
def f(class_obj):
for key, value in kwargs.items():
for key, value in list(kwargs.items()):
setattr(class_obj, key.lower(), value.lower())
return class_obj
return f

View File

@ -63,7 +63,7 @@ class BaseDataModel(object):
@classmethod
def from_dict(cls, model_dict):
fields = {k: v for k, v in model_dict.items()
fields = {k: v for k, v in list(model_dict.items())
if k in cls.fields}
return cls(**fields)

View File

@ -111,7 +111,7 @@ class OctaviaDataModelBuilder(object):
# Update Octavia model from dict
def _update(self, octavia_data_model, update_dict):
for key, value in update_dict.items():
for key, value in list(update_dict.items()):
setattr(octavia_data_model, key, value)
return octavia_data_model

View File

@ -942,9 +942,9 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
'aes-256': "aes256",
'aes-192': "aes256"}
if ike_enc_algo in algos.keys():
if ike_enc_algo in list(algos.keys()):
ike_enc_algo = algos[ike_enc_algo]
if ipsec_enc_algo in algos.keys():
if ipsec_enc_algo in list(algos.keys()):
ipsec_enc_algo = algos[ipsec_enc_algo]
conn['ikepolicy']['encryption_algorithm'] = ike_enc_algo
@ -1418,10 +1418,10 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
reason = resource_data.get('reason')
rsrc = resource_data.get('rsrc_type')
if rsrc not in self.handlers.keys():
if rsrc not in list(self.handlers.keys()):
raise UnknownResourceException(rsrc=rsrc)
if reason not in self.handlers[rsrc].keys():
if reason not in list(self.handlers[rsrc].keys()):
raise UnknownReasonException(reason=reason)
self.handlers[rsrc][reason](context, resource_data)

View File

@ -34,7 +34,7 @@ class Filter(object):
"""
filters = {}
try:
for fk, fv in msg['args'].items():
for fk, fv in list(msg['args'].items()):
if dict == type(fv):
filters = fv
break
@ -69,7 +69,7 @@ class Filter(object):
"""
for fk, fv in filters.items():
for fk, fv in list(filters.items()):
for d in data[:]:
if d.get(fk) is None:
data.remove(d)
@ -221,7 +221,7 @@ class Filter(object):
siteconn['ipsecpolicy'] = ipsecpolicy
vpnserviceid = vpnservice['id']
if vpnserviceid not in vpnservices.keys():
if vpnserviceid not in list(vpnservices.keys()):
vpnservices[vpnserviceid] = \
{'service': vpnservice, 'siteconns': []}
@ -236,4 +236,4 @@ class Filter(object):
As of now, passing everything.
"""
return vpnservices.values()
return list(vpnservices.values())

View File

@ -229,7 +229,7 @@ def _find_routers_via_routes_for_floatingip(self, context, internal_port,
cidr_nexthops[cidr].append(route['nexthop'])
smallest_cidr = netaddr.smallest_matching_cidr(
internal_ip_address,
cidr_nexthops.keys())
list(cidr_nexthops.keys()))
if not smallest_cidr:
continue
# validate that there exists a path to "internal_port"

View File

@ -134,8 +134,8 @@ def dib():
# wily support is removed from ubuntu 'current' release,
# download/copy to loation as expected by diskimage-builder
if conf['ubuntu_release']['release'] == "wily":
import commands
commands.getoutput("mkdir -p %s" % dib['cache_dir'])
import subprocess
subprocess.getoutput("mkdir -p %s" % dib['cache_dir'])
wily_SHA256SUMS = "%s/SHA256SUMS.ubuntu.wily.amd64" % dib['cache_dir']
if not os.path.isfile(wily_SHA256SUMS):
ret = subprocess.call(["wget", "http://cloud-images-archive.ubuntu.com/releases/wily/release-20160715/SHA1SUMS", "-r", "-O", wily_SHA256SUMS])
@ -155,14 +155,14 @@ def dib():
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
print("DIB-ARGS: ", dib_args)
print(("DIB-ARGS: ", dib_args))
ret = subprocess.call(dib_args)
if not ret:
output_path = os.path.realpath('./')
print("Output path: ", output_path)
print(("Output path: ", output_path))
output_image = output_path + '/' + image_name + '.qcow2'
print("Image location: %s" % output_image)
print(("Image location: %s" % output_image))
with open("%s/last_built_image_path" % output_path, "w") as f:
f.write(output_image)

View File

@ -5,15 +5,15 @@ import sys
import os
import shutil
import subprocess
import ConfigParser
import commands
import configparser
import subprocess
import time
import platform
from image_builder import disk_image_create as DIB
from .image_builder import disk_image_create as DIB
# Defines
TEMP_WORK_DIR = "tmp"
CONFIG = ConfigParser.ConfigParser()
CONFIG = configparser.ConfigParser()
NEUTRON_CONF = "/etc/neutron/neutron.conf"
NEUTRON_ML2_CONF = "/etc/neutron/plugins/ml2/ml2_conf.ini"
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
@ -71,21 +71,21 @@ args = parser.parse_args()
def check_if_apic_sys():
global APIC_ENV
mech_drivers = commands.getoutput("crudini --get " + NEUTRON_ML2_CONF + " ml2 mechanism_drivers")
mech_drivers = subprocess.getoutput("crudini --get " + NEUTRON_ML2_CONF + " ml2 mechanism_drivers")
if mech_drivers == 'apic_gbp':
APIC_ENV = True
def set_keystone_authtoken_section():
global NEUTRON_CONF
nfp_conf = '/etc/nfp.ini'
admin_user = commands.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken username")
admin_password = commands.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken password")
admin_tenant_name = commands.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken project_name")
auth_uri = commands.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken auth_uri")
auth_protocol = commands.getoutput("echo " + auth_uri + " | cut -d':' -f1")
auth_host = commands.getoutput("echo " + auth_uri + " | cut -d'/' -f3 | cut -d':' -f1")
auth_port = commands.getoutput("echo " + auth_uri + " | cut -d'/' -f3 | cut -d':' -f2")
auth_version = commands.getoutput("echo " + auth_uri + " | cut -d'/' -f4")
admin_user = subprocess.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken username")
admin_password = subprocess.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken password")
admin_tenant_name = subprocess.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken project_name")
auth_uri = subprocess.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken auth_uri")
auth_protocol = subprocess.getoutput("echo " + auth_uri + " | cut -d':' -f1")
auth_host = subprocess.getoutput("echo " + auth_uri + " | cut -d'/' -f3 | cut -d':' -f1")
auth_port = subprocess.getoutput("echo " + auth_uri + " | cut -d'/' -f3 | cut -d':' -f2")
auth_version = subprocess.getoutput("echo " + auth_uri + " | cut -d'/' -f4")
if auth_version == '':
auth_version = 'v2.0'
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken admin_user " + admin_user).split(' '))
@ -97,18 +97,18 @@ def set_keystone_authtoken_section():
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken auth_version " + auth_version).split(' '))
def configure_nfp():
commands.getoutput("cat /usr/lib/python2.7/site-packages/gbpservice/contrib/nfp/bin/nfp.ini >> /etc/nfp.ini")
commands.getoutput("mkdir -p /etc/nfp/vyos/")
commands.getoutput("cp -r /usr/lib/python2.7/site-packages/gbpservice/contrib/nfp/bin/vyos.day0 /etc/nfp/vyos/")
commands.getoutput("sed -i 's/\"password\": \"\"/\"password\": \"vyos\"/' /etc/nfp/vyos/vyos.day0")
subprocess.getoutput("cat /usr/lib/python2.7/site-packages/gbpservice/contrib/nfp/bin/nfp.ini >> /etc/nfp.ini")
subprocess.getoutput("mkdir -p /etc/nfp/vyos/")
subprocess.getoutput("cp -r /usr/lib/python2.7/site-packages/gbpservice/contrib/nfp/bin/vyos.day0 /etc/nfp/vyos/")
subprocess.getoutput("sed -i 's/\"password\": \"\"/\"password\": \"vyos\"/' /etc/nfp/vyos/vyos.day0")
set_keystone_authtoken_section()
check_if_apic_sys()
curr_service_plugins = commands.getoutput("crudini --get /etc/neutron/neutron.conf DEFAULT service_plugins")
curr_service_plugins = subprocess.getoutput("crudini --get /etc/neutron/neutron.conf DEFAULT service_plugins")
curr_service_plugins_list = curr_service_plugins.split(",")
lbaas_enabled = filter(lambda x: 'lbaas' in x, curr_service_plugins_list)
vpnaas_enabled = filter(lambda x: 'vpnaas' in x, curr_service_plugins_list)
fwaas_enabled = filter(lambda x: 'fwaas' in x, curr_service_plugins_list)
firewall_enabled = filter(lambda x: 'firewall' in x, curr_service_plugins_list)
lbaas_enabled = [x for x in curr_service_plugins_list if 'lbaas' in x]
vpnaas_enabled = [x for x in curr_service_plugins_list if 'vpnaas' in x]
fwaas_enabled = [x for x in curr_service_plugins_list if 'fwaas' in x]
firewall_enabled = [x for x in curr_service_plugins_list if 'firewall' in x]
for word in firewall_enabled:
if word not in fwaas_enabled:
fwaas_enabled.append(word)
@ -147,7 +147,7 @@ def configure_nfp():
subprocess.call(("crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins " + str(new_service_plugins)).split(' '))
#check id gbp-heat is configured, if not configure
curr_heat_plugin_dirs = commands.getoutput("crudini --get /etc/heat/heat.conf DEFAULT plugin_dirs")
curr_heat_plugin_dirs = subprocess.getoutput("crudini --get /etc/heat/heat.conf DEFAULT plugin_dirs")
curr_heat_plugin_dirs_list = curr_heat_plugin_dirs.split(",")
heat_dirs_to_enable = ["/usr/lib64/heat", "/usr/lib/heat", "/usr/lib/python2.7/site-packages/gbpautomation/heat"]
for dir in heat_dirs_to_enable:
@ -171,7 +171,7 @@ def configure_nfp():
# Configure service owner
subprocess.call("crudini --set /etc/neutron/neutron.conf admin_owned_resources_apic_tscp plumbing_resource_owner_user neutron".split(' '))
admin_password = commands.getoutput("crudini --get /etc/neutron/neutron.conf keystone_authtoken password")
admin_password = subprocess.getoutput("crudini --get /etc/neutron/neutron.conf keystone_authtoken password")
subprocess.call("crudini --set /etc/neutron/neutron.conf admin_owned_resources_apic_tscp plumbing_resource_owner_password".split(' ') + [admin_password])
subprocess.call("crudini --set /etc/neutron/neutron.conf admin_owned_resources_apic_tscp plumbing_resource_owner_tenant_name services".split(' '))
@ -212,12 +212,12 @@ def get_src_dirs():
elif os_type in ['centos', 'redhat']:
src_path = "/usr/lib/python2.7/site-packages/"
else:
print("ERROR: Unsupported Operating System(%s)" % os_type)
print(("ERROR: Unsupported Operating System(%s)" % os_type))
return 1
for src_dir in src_dirs:
to_copy = src_path + src_dir
if not os.path.isdir(to_copy):
print("ERROR: directory not found: ", to_copy)
print(("ERROR: directory not found: ", to_copy))
return 1
# create a tmp directory for creating configurator docker
subprocess.call(["rm", "-rf", dst_dir])
@ -228,7 +228,7 @@ def get_src_dirs():
for src_dir in src_dirs:
to_copy = src_path + src_dir
if(subprocess.call(["cp", "-r", to_copy, dst_dir])):
print("ERROR: failed to copy %s to ./ directory" % to_copy)
print(("ERROR: failed to copy %s to ./ directory" % to_copy))
return 1
subprocess.call(["cp", dockerfile, dst_dir])
subprocess.call(["cp", run_sh, dst_dir])
@ -271,7 +271,7 @@ def build_configuration_vm():
if not ret:
print("ERROR: Failed to create Configurator VM")
else:
print("SUCCESS, created Configurator VM: ", image)
print(("SUCCESS, created Configurator VM: ", image))
# clean the scr_dirs copied in PWD
clean_src_dirs()
@ -337,7 +337,7 @@ def create_orchestrator_ctl():
try:
file = open(orch_ctl_file, 'w+')
except:
print("Error creating " + orch_ctl_file + " file")
print(("Error creating " + orch_ctl_file + " file"))
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Orchestrator\n")
@ -362,7 +362,7 @@ def create_orchestrator_ctl():
try:
file = open(orch_config_file, 'w+')
except:
print("Error creating " + orch_ctl_file + " file")
print(("Error creating " + orch_ctl_file + " file"))
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Config Orchestrator")
@ -402,7 +402,7 @@ def create_nfp_namespace_file():
try:
filepx = open(proxy_tool_file, 'w+')
except:
print("Error creating " + proxy_tool_file + " file")
print(("Error creating " + proxy_tool_file + " file"))
sys.exit(1)
filepx.write("#!/usr/bin/bash\n")
filepx.write("\nNOVA_CONF=/etc/nova/nova.conf\nNOVA_SESSION=neutron")
@ -552,7 +552,7 @@ def create_proxy_ctl():
try:
filepx = open(proxy_sup_file, 'w+')
except:
print("Error creating " + proxy_sup_file + " file")
print(("Error creating " + proxy_sup_file + " file"))
sys.exit(1)
filepx.write("#!/usr/bin/sh\nNFP_PROXY_AGENT_INI=/etc/nfp.ini")
@ -567,7 +567,7 @@ def create_proxy_ctl():
try:
file = open(proxy_ctl_file, 'w+')
except:
print("Error creating " + proxy_ctl_file + " file")
print(("Error creating " + proxy_ctl_file + " file"))
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Proxy\n")
@ -610,7 +610,7 @@ def create_proxy_agent_ctl():
try:
file = open(proxy_ctl_file, 'w+')
except:
print("Error creating " + proxy_ctl_file + " file")
print(("Error creating " + proxy_ctl_file + " file"))
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Proxy Agent")
@ -657,11 +657,11 @@ def create_nfp_resources():
get_openstack_creds()
os.system("gbp l3policy-create default-nfp --ip-pool 172.16.0.0/16"
" --subnet-prefix-length 20 --proxy-ip-pool=172.17.0.0/16")
l3policy_Id = commands.getstatusoutput(
l3policy_Id = subprocess.getstatusoutput(
"gbp l3policy-list | grep '\sdefault-nfp\s' | awk '{print $2}'")[1]
os.system("gbp l2policy-create --l3-policy " +
l3policy_Id + " svc_management_ptg")
l2policy_Id = commands.getstatusoutput(
l2policy_Id = subprocess.getstatusoutput(
"gbp l2policy-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
os.system("gbp group-create svc_management_ptg --service_management True"
@ -683,7 +683,7 @@ def add_nova_key_pair():
configurator_key_name = "configurator_key"
print("Creating nova keypair for configurator VM.")
pem_file_content = commands.getoutput("nova keypair-add" + " " + configurator_key_name)
pem_file_content = subprocess.getoutput("nova keypair-add" + " " + configurator_key_name)
with open("keys/configurator_key.pem", "w") as f:
f.write(pem_file_content)
os.chmod("keys/configurator_key.pem", 0o600)
@ -697,16 +697,16 @@ def launch_configurator():
" --disk-format qcow2 --container-format bare"
" --visibility public --file " + args.controller_path)
else:
print("Error " + args.controller_path + " does not exist")
print(("Error " + args.controller_path + " does not exist"))
sys.exit(1)
# add nova keypair for nfp_controller VM.
configurator_key_name = add_nova_key_pair()
Port_id = commands.getstatusoutput(
Port_id = subprocess.getstatusoutput(
"gbp policy-target-create --policy-target-group svc_management_ptg"
" nfp_controllerVM_instance | grep port_id | awk '{print $4}'")[1]
Image_id = commands.getstatusoutput(
Image_id = subprocess.getstatusoutput(
"glance image-list | grep nfp_controller |awk '{print $2}'")[1]
if Image_id and Port_id:
os.system("nova boot --flavor m1.medium --image " +
@ -733,41 +733,41 @@ def clean_up():
clean up nfp resources
"""
get_openstack_creds()
InstanceId = commands.getstatusoutput(
InstanceId = subprocess.getstatusoutput(
"nova list | grep nfp_controllerVM_instance | awk '{print $2}'")[1]
if InstanceId:
os.system("nova delete " + InstanceId)
time.sleep(10)
PolicyTargetId = commands.getstatusoutput(
PolicyTargetId = subprocess.getstatusoutput(
"gbp policy-target-list | grep nfp_controllerVM_instance"
" | awk '{print $2}'")[1]
if PolicyTargetId:
os.system("gbp policy-target-delete " + PolicyTargetId)
ImageId = commands.getstatusoutput(
ImageId = subprocess.getstatusoutput(
"glance image-list | grep nfp_controller | awk '{print $2}'")[1]
if ImageId:
os.system("glance image-delete " + ImageId)
ServiceMGMTId = commands.getstatusoutput(
ServiceMGMTId = subprocess.getstatusoutput(
"gbp group-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
if ServiceMGMTId:
SvcGroupId = commands.getstatusoutput(
SvcGroupId = subprocess.getstatusoutput(
"gbp group-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
l2policyId = commands.getstatusoutput(
l2policyId = subprocess.getstatusoutput(
"gbp l2policy-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
l3policyId = commands.getstatusoutput(
l3policyId = subprocess.getstatusoutput(
"gbp l3policy-list | grep '\sdefault-nfp\s'"
" | awk '{print $2}'")[1]
os.system("gbp group-delete " + SvcGroupId)
os.system("gbp l2policy-delete " + l2policyId)
os.system("gbp l3policy-delete " + l3policyId)
HeatId = commands.getstatusoutput(
HeatId = subprocess.getstatusoutput(
"heat stack-list | grep '\sgbp_services_stack\s'"
" | awk '{print $2}'")[1]
if HeatId:

View File

@ -12,6 +12,7 @@
import filter_base
from gbpservice.contrib.nfp.configurator.lib import data_filter

View File

@ -81,11 +81,9 @@ class ConfiguratorRpcManagerTestCase(base.BaseTestCase):
else self.fo.fake_request_data_generic_single()))}
}
if batch:
request_data_actual, _ = (
request_data['batch'].values())
request_data_actual, _ = (list(request_data['batch'].values()))
else:
request_data_actual, _ = (
request_data['single'].values())
request_data_actual, _ = (list(request_data['single'].values()))
with mock.patch.object(rpc_mgr,
'_get_service_agent_instance',

View File

@ -290,29 +290,29 @@ class VPNTestData(object):
}]
self.ipsec_site_connection_delete = [{
u'status': u'INIT',
u'psk': u'secret',
u'initiator': u'bi-directional',
u'name': u'site_to_site_connection1',
u'admin_state_up': True,
u'tenant_id': u'564aeb9ebd694468bfb79a69da887419',
u'auth_mode': u'psk',
u'peer_cidrs': [u'11.0.0.0/24'],
u'mtu': 1500,
u'ikepolicy_id': (
u'7a88b9f4-70bf-4184-834d-6814f264d331'),
u'vpnservice_id': (
u'3d453be6-7ddc-4812-a4a7-3299f9d3d29e'),
u'dpd': {u'action': u'hold',
u'interval': 30,
u'timeout': 120},
u'route_mode': u'static',
u'ipsecpolicy_id': (
u'03839460-1519-46ab-a073-b74314c06ec3'),
u'peer_address': u'1.103.2.2',
u'peer_id': u'1.103.2.2',
u'id': u'4dae3c91-0d0a-4ba5-9269-d0deab653316',
u'description': ";".join(self.ipsec_delete),
'status': 'INIT',
'psk': 'secret',
'initiator': 'bi-directional',
'name': 'site_to_site_connection1',
'admin_state_up': True,
'tenant_id': '564aeb9ebd694468bfb79a69da887419',
'auth_mode': 'psk',
'peer_cidrs': ['11.0.0.0/24'],
'mtu': 1500,
'ikepolicy_id': (
'7a88b9f4-70bf-4184-834d-6814f264d331'),
'vpnservice_id': (
'3d453be6-7ddc-4812-a4a7-3299f9d3d29e'),
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'route_mode': 'static',
'ipsecpolicy_id': (
'03839460-1519-46ab-a073-b74314c06ec3'),
'peer_address': '1.103.2.2',
'peer_id': '1.103.2.2',
'id': '4dae3c91-0d0a-4ba5-9269-d0deab653316',
'description': ";".join(self.ipsec_delete),
}]
self.ikepolicies = [{
@ -344,24 +344,24 @@ class VPNTestData(object):
self.context = {
'domain': None,
'project_name': None,
'tenant_name': u'services',
'tenant_name': 'services',
'project_domain': None,
'timestamp': '2016-03-03 09:19:05.381231',
'auth_token': u'0711af29a389492cb799e096a003a760',
'auth_token': '0711af29a389492cb799e096a003a760',
'resource_uuid': None,
'is_admin': True,
'user': u'19e278f3c3fa43e3964b057bc73cf7d7',
'user': '19e278f3c3fa43e3964b057bc73cf7d7',
'tenant_id': '9f1663d116f74a01991ad66aaa8756c5',
'read_only': False,
'project_id': 'b',
'user_id': 'a',
'show_deleted': False,
'roles': [u'admin', u'heat_stack_owner'],
'roles': ['admin', 'heat_stack_owner'],
'user_identity': 'a b - - -',
'tenant_id': u'9f1663d116f74a01991ad66aaa8756c5',
'request_id': u'req-da8765fb-4eb4-4f4f-9ebb-843ad1d752bd',
'tenant_id': '9f1663d116f74a01991ad66aaa8756c5',
'request_id': 'req-da8765fb-4eb4-4f4f-9ebb-843ad1d752bd',
'user_domain': None,
'user_name': u'neutron',
'user_name': 'neutron',
'agent_info': {'context': {},
'resource': {}},
"resource_data": {
@ -483,36 +483,36 @@ class VPNTestData(object):
'''
return {
u'rsrc_type': u'ipsec_site_connection',
u'rsrc_id': u'4dae3c91-0d0a-4ba5-9269-d0deab653316',
u'resource': {
u'status': u'INIT',
u'psk': u'secret',
u'initiator': u'bi-directional',
u'name': u'site_to_site_connection1',
u'admin_state_up': True,
u'tenant_id': u'564aeb9ebd694468bfb79a69da887419',
u'auth_mode': u'psk',
u'peer_cidrs': [u'11.0.0.0/24'],
u'mtu': 1500,
u'ikepolicy_id': (
u'7a88b9f4-70bf-4184-834d-6814f264d331'),
u'vpnservice_id': (
u'3d453be6-7ddc-4812-a4a7-3299f9d3d29e'),
u'dpd': {u'action': u'hold',
u'interval': 30,
u'timeout': 120},
u'route_mode': u'static',
u'ipsecpolicy_id': (
u'03839460-1519-46ab-a073-b74314c06ec3'),
u'peer_address': u'1.103.2.2',
u'peer_id': u'1.103.2.2',
u'id': u'4dae3c91-0d0a-4ba5-9269-d0deab653315',
u'description': ";".join(self.ipsec_delete),
'rsrc_type': 'ipsec_site_connection',
'rsrc_id': '4dae3c91-0d0a-4ba5-9269-d0deab653316',
'resource': {
'status': 'INIT',
'psk': 'secret',
'initiator': 'bi-directional',
'name': 'site_to_site_connection1',
'admin_state_up': True,
'tenant_id': '564aeb9ebd694468bfb79a69da887419',
'auth_mode': 'psk',
'peer_cidrs': ['11.0.0.0/24'],
'mtu': 1500,
'ikepolicy_id': (
'7a88b9f4-70bf-4184-834d-6814f264d331'),
'vpnservice_id': (
'3d453be6-7ddc-4812-a4a7-3299f9d3d29e'),
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'route_mode': 'static',
'ipsecpolicy_id': (
'03839460-1519-46ab-a073-b74314c06ec3'),
'peer_address': '1.103.2.2',
'peer_id': '1.103.2.2',
'id': '4dae3c91-0d0a-4ba5-9269-d0deab653315',
'description': ";".join(self.ipsec_delete),
},
u'svc_type': u'ipsec',
u'service_vendor': u'vyos',
u'reason': u'delete',
'svc_type': 'ipsec',
'service_vendor': 'vyos',
'reason': 'delete',
}
def _update_ipsec_site_conn_obj(self):
@ -521,36 +521,36 @@ class VPNTestData(object):
'''
return {
u'rsrc_type': u'ipsec_site_connection',
u'rsrc_id': u'4dae3c91-0d0a-4ba5-9269-d0deab653316',
u'resource': {
u'status': u'INIT',
u'psk': u'secret',
u'initiator': u'bi-directional',
u'name': u'site_to_site_connection1',
u'admin_state_up': True,
u'tenant_id': u'564aeb9ebd694468bfb79a69da887419',
u'auth_mode': u'psk',
u'peer_cidrs': [u'11.0.0.0/24'],
u'mtu': 1500,
u'ikepolicy_id': (
u'7a88b9f4-70bf-4184-834d-6814f264d331'),
u'vpnservice_id': (
u'3d453be6-7ddc-4812-a4a7-3299f9d3d29e'),
u'dpd': {u'action': u'hold',
u'interval': 30,
u'timeout': 120},
u'route_mode': u'static',
u'ipsecpolicy_id': (
u'03839460-1519-46ab-a073-b74314c06ec3'),
u'peer_address': u'1.103.2.2',
u'peer_id': u'1.103.2.2',
u'id': u'4dae3c91-0d0a-4ba5-9269-d0deab653315',
u'description': ";".join(self.ipsec_vpn_create),
'rsrc_type': 'ipsec_site_connection',
'rsrc_id': '4dae3c91-0d0a-4ba5-9269-d0deab653316',
'resource': {
'status': 'INIT',
'psk': 'secret',
'initiator': 'bi-directional',
'name': 'site_to_site_connection1',
'admin_state_up': True,
'tenant_id': '564aeb9ebd694468bfb79a69da887419',
'auth_mode': 'psk',
'peer_cidrs': ['11.0.0.0/24'],
'mtu': 1500,
'ikepolicy_id': (
'7a88b9f4-70bf-4184-834d-6814f264d331'),
'vpnservice_id': (
'3d453be6-7ddc-4812-a4a7-3299f9d3d29e'),
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'route_mode': 'static',
'ipsecpolicy_id': (
'03839460-1519-46ab-a073-b74314c06ec3'),
'peer_address': '1.103.2.2',
'peer_id': '1.103.2.2',
'id': '4dae3c91-0d0a-4ba5-9269-d0deab653315',
'description': ";".join(self.ipsec_vpn_create),
},
u'svc_type': u'ipsec',
u'service_vendor': u'vyos',
u'reason': u'update',
'svc_type': 'ipsec',
'service_vendor': 'vyos',
'reason': 'update',
}
def make_resource_data(self, operation=None, service_type=None):

View File

@ -599,7 +599,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
db_res.consumed_policy_rule_sets = []
return
with context.session.begin(subtransactions=True):
policy_rule_sets_id_list = policy_rule_sets_dict.keys()
policy_rule_sets_id_list = list(policy_rule_sets_dict.keys())
# We will first check if the new list of policy_rule_sets is valid
self._validate_policy_rule_set_list(
context, policy_rule_sets_id_list)

View File

@ -365,11 +365,12 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase):
if config_params:
if not spec_db.config_param_names:
spec_db.config_param_names = str(
config_params.keys())
list(config_params.keys()))
else:
config_param_names = ast.literal_eval(
spec_db.config_param_names)
config_param_names.extend(config_params.keys())
config_param_names.extend(
list(config_params.keys()))
spec_db.config_param_names = str(
config_param_names)

View File

@ -14,7 +14,7 @@ GBP_PLURALS = {}
def register_plurals(plural_mappings):
for plural, single in plural_mappings.items():
for plural, single in list(plural_mappings.items()):
GBP_PLURALS[single] = plural

View File

@ -199,8 +199,8 @@ def convert_nested_domain_allowed_vlans(value):
for vlan_range in value[VLAN_RANGES]:
for vrng in [VLAN_RANGE_START, VLAN_RANGE_END]:
vlan_range[vrng] = convert_apic_vlan(vlan_range[vrng])
vlans_list.extend(range(vlan_range[VLAN_RANGE_START],
vlan_range[VLAN_RANGE_END] + 1))
vlans_list.extend(list(range(vlan_range[VLAN_RANGE_START],
vlan_range[VLAN_RANGE_END] + 1)))
# eliminate duplicates
vlans_list = list(set(vlans_list))
# sort

View File

@ -156,7 +156,7 @@ def extend_resources(self, version, attr_map):
if check_optionals and optional_exts_set - set(processed_exts):
continue
extended_attrs = ext.get_extended_resources(version)
for res, resource_attrs in extended_attrs.items():
for res, resource_attrs in list(extended_attrs.items()):
res_to_update = attr_map.setdefault(res, {})
if self._is_sub_resource(res_to_update):
# kentwu: service_profiles defined in servicechain
@ -198,7 +198,7 @@ def extend_resources(self, version, attr_map):
', '.join(unloadable_extensions))
self._check_faulty_extensions(unloadable_extensions)
# Extending extensions' attributes map.
for ext in processed_exts.values():
for ext in list(processed_exts.values()):
ext.update_attributes_map(attr_map)
@ -230,7 +230,7 @@ def fill_post_defaults(
:raises: exc_cls If check_allow_post is True and this instance of
ResourceAttributes doesn't support POST.
"""
for attr, attr_vals in self.attributes.items():
for attr, attr_vals in list(self.attributes.items()):
# kentwu: Patch needed for our GBP service_profiles attribute. Since
# parent and parameters are both sub-resource's attributes picked up
# from flavor plugin so we can just ignore those. These 2 attributes

View File

@ -142,7 +142,7 @@ def do_apic_aim_persist_migration(session):
# class. We work around this for now by using the dict members.
# This should be removed once the model class is fixed upstream.
scope_dict = {}
for k, v in scope_db.__dict__.items():
for k, v in list(scope_db.__dict__.items()):
if k == '_sa_instance_state':
continue
if k == 'shared_':
@ -457,8 +457,8 @@ def do_ha_ip_duplicate_entries_removal(session):
port_db.network_id, {})
ha_ip_dict.setdefault(
ha_ip, []).append(tuple((ha_ip, port_id)))
for haip_dict in net_to_ha_ip_dict.values():
for ha_ip in haip_dict.keys():
for haip_dict in list(net_to_ha_ip_dict.values()):
for ha_ip in list(haip_dict.keys()):
if len(haip_dict[ha_ip]) > 1:
for (haip, portid) in haip_dict[ha_ip]:
delete_q = HAIPAddressToPortAssociation.delete().where(

View File

@ -2213,7 +2213,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
existing_scope_db.aim_mapping)
if vrf.identity != existing_vrf.identity:
raise (exceptions.
NonIsomorphicNetworkRoutingUnsupported())
NonIsomorphicNetworkRoutingUnsupported)
else:
raise exceptions.NonIsomorphicNetworkRoutingUnsupported()
@ -2241,7 +2241,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
router_vrf = (
self._map_default_vrf(
session,
router_shared_net or next(iter(router_topology.values())))
router_shared_net or next(
iter(list(router_topology.values()))))
if router_topology else None)
# Choose VRF and move one topology if necessary.
@ -2487,7 +2488,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
router_shared_net = self._topology_shared(router_topology)
router_vrf = self._map_default_vrf(
session,
router_shared_net or next(iter(router_topology.values())))
router_shared_net or next(
iter(list(router_topology.values()))))
if old_vrf.identity != router_vrf.identity:
router_vrf = self._ensure_default_vrf(aim_ctx, router_vrf)
self._move_topology(
@ -2755,7 +2757,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# active_active_aap mode.
subnet_ids = [x['subnet_id'] for x in port['fixed_ips']]
active_aap_mode = self._query_active_active_aap(session, subnet_ids)
for port_id, other_subnet_ids in affected_ports.items():
for port_id, other_subnet_ids in list(affected_ports.items()):
other_active_aap_mode = self._query_active_active_aap(
session, other_subnet_ids)
if active_aap_mode != other_active_aap_mode:
@ -4192,7 +4194,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# TODO(rkukura): Validate that nothing in new_vrf overlaps
# with topology.
for network_db in topology.values():
for network_db in list(topology.values()):
if old_vrf.tenant_name != new_vrf.tenant_name:
# New VRF is in different Tenant, so move BD, EPG, and
# all Subnets to new VRF's Tenant and set BD's VRF.
@ -4361,7 +4363,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
[result[0] for result in results])
def _topology_shared(self, topology):
for network_db in topology.values():
for network_db in list(topology.values()):
if self._network_shared(network_db):
return network_db
@ -5373,7 +5375,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
network_db = self.plugin._get_network(plugin_context,
network['id'])
for ip_vers, subnet_dict in subnets_dict.items():
for ip_vers, subnet_dict in list(subnets_dict.items()):
secondary_ip = subnet_dict['subnet']['gateway_ip'] + '/' + (
subnet_dict['mask'])
aim_l3out_if = aim_resource.L3OutInterface(
@ -6612,7 +6614,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
mgr, net_dbs, routed_nets)
self._validate_routed_vrfs(mgr, routed_nets, network_vrfs)
for net_db in net_dbs.values():
for net_db in list(net_dbs.values()):
if not net_db.aim_extension_mapping:
self._missing_network_extension_mapping(mgr, net_db)
self._expect_project(mgr, net_db.project_id)
@ -6750,7 +6752,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
unscoped_router_net_ids = defaultdict(set)
unscoped_net_dbs = {}
shared_unscoped_net_ids = []
for intfs in routed_nets.values():
for intfs in list(routed_nets.values()):
net_id = None
v4_scope_mapping = None
v6_scope_mapping = None
@ -6824,7 +6826,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
expand_shared_topology(net_id, vrf)
# Process remaining (unshared) unscoped networks.
for net_db in unscoped_net_dbs.values():
for net_db in list(unscoped_net_dbs.values()):
if net_db.id not in network_vrfs:
vrf = use_default_vrf(net_db)
for router_id in unscoped_net_router_ids[net_db.id]:
@ -6834,12 +6836,12 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
def _validate_routed_vrfs(self, mgr, routed_nets, network_vrfs):
vrf_subnets = defaultdict(list)
for net_id, intfs in routed_nets.items():
for net_id, intfs in list(routed_nets.items()):
vrf = network_vrfs[net_id]
vrf_subnets[tuple(vrf.identity)] += [
(intf.subnet.id, netaddr.IPNetwork(intf.subnet.cidr))
for intf in intfs]
for vrf_id, subnets in vrf_subnets.items():
for vrf_id, subnets in list(vrf_subnets.items()):
subnets.sort(key=lambda s: s[1])
for (id1, cidr1), (id2, cidr2) in zip(subnets[:-1], subnets[1:]):
if id2 != id1 and cidr2 in cidr1:
@ -7101,12 +7103,12 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
vrf_routers = defaultdict(set)
int_vrfs = {}
for router_id in router_ids:
for int_vrf in router_vrfs[router_id].values():
for int_vrf in list(router_vrfs[router_id].values()):
key = tuple(int_vrf.identity)
vrf_routers[key].add(router_id)
int_vrfs[key] = int_vrf
for key, routers in vrf_routers.items():
for key, routers in list(vrf_routers.items()):
prov = set()
cons = set()
for router_id in routers:

View File

@ -1061,7 +1061,7 @@ class ApicRpcHandlerMixin(object):
total_ips = sorted(ips + ips_aap)
host_snat_ips = []
for ext_net in info['ext_net_info'].values():
for ext_net in list(info['ext_net_info'].values()):
need_snat = False
for ip in total_ips:
if ip not in fip_fixed_ips.get(ext_net.network_id, []):
@ -1110,7 +1110,7 @@ class ApicRpcHandlerMixin(object):
'nat_epg_app_profile': ext_net.epg_app_profile_name,
'nat_epg_name': ext_net.epg_name,
'nat_epg_tenant': ext_net.epg_tenant_name}
for ext_net in info['ext_net_info'].values()
for ext_net in list(info['ext_net_info'].values())
if ext_net.external_network_dn and
ext_net.nat_type == 'distributed' and
ext_net.network_id in host_snat_ext_net_ids]
@ -1164,7 +1164,7 @@ class ApicRpcHandlerMixin(object):
(ip.route_destination, ip.route_nexthop))
# Add remaining details to each subnet.
for subnet_id, subnet in subnets.items():
for subnet_id, subnet in list(subnets.items()):
dhcp_ips = set()
dhcp_ports = defaultdict(list)
for ip in dhcp_ip_info:

View File

@ -341,7 +341,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
# considered for deriving the status
mapped_status = []
for ascp in self.L3P_ADDRESS_SCOPE_KEYS.values():
for ascp in list(self.L3P_ADDRESS_SCOPE_KEYS.values()):
if l3p_db[ascp]:
ascp_id = l3p_db[ascp]
ascope = self._get_address_scope(
@ -469,7 +469,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
aim_resources = self._get_implicit_contracts_for_default_epg(
context, l3p_db, default_epg_dn)
aim_resources_list = []
for k in aim_resources.keys():
for k in list(aim_resources.keys()):
if not aim_resources[k] or not all(
x for x in aim_resources[k]):
# We expected a AIM mapped resource but did not find
@ -955,7 +955,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
aim_filter = self._aim_filter(session, context.current)
aim_reverse_filter = self._aim_filter(
session, context.current, reverse_prefix=True)
for afilter in filter(None, [aim_filter, aim_reverse_filter]):
for afilter in [_f for _f in [aim_filter, aim_reverse_filter] if _f]:
self.aim.delete(aim_ctx, afilter)
@log.log_method_call
@ -1509,7 +1509,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
aim_filter = self._aim_filter(session, pr)
aim_reverse_filter = self._aim_filter(
session, pr, reverse_prefix=True)
for afilter in filter(None, [aim_filter, aim_reverse_filter]):
for afilter in [_f for _f in [aim_filter, aim_reverse_filter] if _f]:
self._delete_aim_filter_entries(aim_context, afilter)
def _create_aim_filter_entries(self, session, aim_ctx, aim_filter,
@ -1543,7 +1543,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
def _get_aim_filter_names(self, session, policy_rule):
# Forward and Reverse AIM Filter names for a Policy Rule
aim_filters = self._get_aim_filters(session, policy_rule)
aim_filter_names = [f.name for f in aim_filters.values() if f]
aim_filter_names = [f.name for f in list(aim_filters.values()) if f]
return aim_filter_names
def _get_aim_filter_entries(self, session, policy_rule):
@ -1970,7 +1970,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
def _check_l3policy_ext_segment(self, context, l3policy):
if l3policy['external_segments']:
for allocations in l3policy['external_segments'].values():
for allocations in list(l3policy['external_segments'].values()):
if len(allocations) > 1:
raise alib.OnlyOneAddressIsAllowedPerExternalSegment()
# if NAT is disabled, allow only one L3P per ES
@ -2067,7 +2067,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
filters={'id': l3policy['routers']})
es_2_router = self._map_ext_segment_to_routers(context, es_list,
routers)
for r in es_2_router.values():
for r in list(es_2_router.values()):
router_subs = self._get_router_interface_subnets(plugin_context,
r['id'])
self._detach_router_from_subnets(plugin_context, r['id'],

View File

@ -213,7 +213,7 @@ class ValidationManager(object):
def validate_scope_arguments(self):
if self.neutron_resources:
for resource in self.neutron_resources:
if resource not in self.neutron_to_aim_mapping.keys():
if resource not in list(self.neutron_to_aim_mapping.keys()):
err_msg = ("Incorrect resource in the argument: " +
str(self.neutron_resources))
raise IncorrectResourceError(err_msg)
@ -222,7 +222,7 @@ class ValidationManager(object):
aim_tenant_list = set()
project_dict = self.md.project_details_cache.project_details
for project_id in project_dict.keys():
for project_id in list(project_dict.keys()):
tenant_name = project_dict[project_id][0]
if tenant_name in self.tenants:
self.tenant_ids.add(project_id)
@ -261,7 +261,7 @@ class ValidationManager(object):
elif not replace and key in expected_resources:
self.output("resource %s already expected" % resource)
raise InternalValidationError()
for attr_name, attr_type in resource.other_attributes.items():
for attr_name, attr_type in list(resource.other_attributes.items()):
attr_type_type = attr_type['type']
if attr_type_type == 'string':
value = getattr(resource, attr_name)
@ -316,9 +316,9 @@ class ValidationManager(object):
instance = expected_instances.get(key)
return [instance] if instance else []
else:
return [i for i in expected_instances.values()
return [i for i in list(expected_instances.values())
if all([getattr(i, k) == v for k, v in
filters.items()])]
list(filters.items())])]
else:
return list(expected_instances.values())
@ -342,7 +342,7 @@ class ValidationManager(object):
self.result = api.VALIDATION_FAILED_BINDING_PORTS
def _validate_aim_resources(self):
for resource_class in self._expected_aim_resources.keys():
for resource_class in list(self._expected_aim_resources.keys()):
self._validate_aim_resource_class(resource_class)
def _should_validate_neutron_resource(self, resource):
@ -375,7 +375,7 @@ class ValidationManager(object):
self._validate_actual_aim_resource(
actual_resource, expected_resource)
for expected_resource in expected_resources.values():
for expected_resource in list(expected_resources.values()):
if self._should_handle_missing_resource(expected_resource):
self._handle_missing_aim_resource(expected_resource)
@ -492,7 +492,7 @@ class ValidationManager(object):
self.aim_mgr.create(self.actual_aim_ctx, expected_resource)
def _validate_db_instances(self):
for db_class in self._expected_db_instances.keys():
for db_class in list(self._expected_db_instances.keys()):
self._validate_db_instance_class(db_class)
def _validate_db_instance_class(self, db_class):
@ -503,7 +503,7 @@ class ValidationManager(object):
self._validate_actual_db_instance(
actual_instance, expected_instances)
for expected_instance in expected_instances.values():
for expected_instance in list(expected_instances.values()):
self._handle_missing_db_instance(expected_instance)
def _validate_actual_db_instance(self, actual_instance,
@ -526,7 +526,8 @@ class ValidationManager(object):
def _is_db_instance_correct(self, expected_instance, actual_instance):
expected_values = expected_instance.__dict__
actual_values = actual_instance.__dict__
return all([v == actual_values[k] for k, v in expected_values.items()
return all([v == actual_values[k]
for k, v in list(expected_values.items())
if not k.startswith('_')])
def _handle_unexpected_db_instance(self, actual_instance):
@ -582,7 +583,7 @@ class ValidationAimStore(aim_store.AimStore):
return [r for r in
self._mgr.expected_aim_resources(resource_class)
if all([getattr(r, k) == v for k, v in
filters.items()])]
list(filters.items())])]
else:
return self._mgr.expected_aim_resources(resource_class)
@ -595,7 +596,7 @@ class ValidationAimStore(aim_store.AimStore):
assert(False)
def from_attr(self, db_obj, resource_class, attribute_dict):
for k, v in attribute_dict.items():
for k, v in list(attribute_dict.items()):
setattr(db_obj, k, v)
def to_attr(self, resource_class, db_obj):

View File

@ -1075,7 +1075,7 @@ class ImplicitResourceOperations(local_api.LocalAPI,
context._plugin_context, l2_policy_id)
l3p = context._plugin.get_l3_policy(context._plugin_context,
l2p['l3_policy_id'])
external_segments = l3p.get('external_segments').keys()
external_segments = list(l3p.get('external_segments').keys())
if not external_segments:
return es_list_with_nat_pools
external_segments = context._plugin.get_external_segments(
@ -1232,7 +1232,8 @@ class ImplicitResourceOperations(local_api.LocalAPI,
context.current['l2_policy_id'])
l3p = context._plugin.get_l3_policy(
context._plugin_context, l2p['l3_policy_id'])
external_segments = l3p.get('external_segments').keys()
external_segments = list(
l3p.get('external_segments').keys())
if external_segments:
external_segments = (
context._plugin.get_external_segments(
@ -1245,8 +1246,8 @@ class ImplicitResourceOperations(local_api.LocalAPI,
l3ps = context._plugin.get_l3_policies(
context._plugin_context, filter)
if l3ps:
external_segments = l3ps[0].get(
'external_segments').keys()
external_segments = list(l3ps[0].get(
'external_segments').keys())
if external_segments:
external_segments = (
context._plugin.get_external_segments(
@ -1464,7 +1465,7 @@ class ImplicitResourceOperations(local_api.LocalAPI,
if ip_version == 6 or ip_version == 46:
ip_dict[6] = {'default_prefixlen': 64}
for family in ip_dict.keys():
for family in list(ip_dict.keys()):
explicit_scope = l3p_req[self.L3P_ADDRESS_SCOPE_KEYS[family]]
explicit_pools = l3p_req[self.L3P_SUBNETPOOLS_KEYS[family]]
default_pool = self._core_plugin.get_default_subnetpool(
@ -1554,12 +1555,12 @@ class ImplicitResourceOperations(local_api.LocalAPI,
def _delete_l3p_subnetpools_postcommit(self, context):
subpools = []
for sp_key in self.L3P_SUBNETPOOLS_KEYS.values():
for sp_key in list(self.L3P_SUBNETPOOLS_KEYS.values()):
subpools += context.current[sp_key]
for sp_id in subpools:
self._cleanup_subnetpool(context._plugin_context, sp_id)
for ascp_key in self.L3P_ADDRESS_SCOPE_KEYS.values():
for ascp_key in list(self.L3P_ADDRESS_SCOPE_KEYS.values()):
if context.current[ascp_key]:
self._cleanup_address_scope(context._plugin_context,
context.current[ascp_key])
@ -1594,8 +1595,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
l2p = context._plugin.get_l2_policy(
context._plugin_context, context.current['l2_policy_id'])
if l2p['tenant_id'] != context.current['tenant_id']:
raise (
exc.
raise (exc.
CrossTenantPolicyTargetGroupL2PolicyNotSupported())
def _reject_cross_tenant_l2p_l3p(self, context):
@ -3133,7 +3133,8 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
return routes
add = _routes_from_es_ids(
context, added or context.current['external_segments'].keys())
context, added or list(
context.current['external_segments'].keys()))
remove = _routes_from_es_ids(context, removed)
self._update_l3p_routes(

View File

@ -247,7 +247,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
updaters[instance['id']]['plumbing_info'] = (
driver.get_plumbing_info(node_context))
# Update the nodes
for update in updaters.values():
for update in list(updaters.values()):
try:
update['driver'].update(update['context'])
except exc.NodeDriverError as ex:
@ -383,7 +383,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
context,
self.get_servicechain_instance(context, instance_id),
'update')
for update in updaters.values():
for update in list(updaters.values()):
try:
update['driver'].policy_target_group_updated(
update['context'],
@ -398,7 +398,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
updaters = self._get_scheduled_drivers(
context, self.get_servicechain_instance(context, instance_id),
'update')
for update in updaters.values():
for update in list(updaters.values()):
try:
getattr(update['driver'],
'update_policy_target_' + action)(
@ -412,7 +412,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
updaters = self._get_scheduled_drivers(
context, self.get_servicechain_instance(context, instance_id),
'update')
for update in updaters.values():
for update in list(updaters.values()):
try:
getattr(update['driver'],
'update_node_consumer_ptg_' + action)(
@ -434,7 +434,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
"""
sci = self.get_servicechain_instance(context, servicechain_instance_id)
updaters = self._get_scheduled_drivers(context, sci, 'update')
for update in updaters.values():
for update in list(updaters.values()):
try:
getattr(update['driver'],
'notify_chain_parameters_updated')(update['context'])
@ -526,7 +526,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
'status_details': 'node deployment in progress'}
if deployers:
try:
for deploy in deployers.values():
for deploy in list(deployers.values()):
driver = deploy['driver']
nodes_status.append(driver.get_status(
deploy['context']))
@ -535,7 +535,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
result['status'] = 'ERROR'
result['status_details'] = 'node deployment failed'
elif node_status.count('ACTIVE') == len(
deployers.values()):
list(deployers.values())):
result['status'] = 'ACTIVE'
result['status_details'] = 'node deployment completed'
except Exception as exc:
@ -548,19 +548,19 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
def _deploy_servicechain_nodes(self, context, deployers):
self.plumber.plug_services(context, list(deployers.values()))
for deploy in deployers.values():
for deploy in list(deployers.values()):
driver = deploy['driver']
driver.create(deploy['context'])
def _update_servicechain_nodes(self, context, updaters):
for update in updaters.values():
for update in list(updaters.values()):
driver = update['driver']
driver.update(update['context'])
def _destroy_servicechain_nodes(self, context, destroyers):
# Actual node disruption
try:
for destroy in destroyers.values():
for destroy in list(destroyers.values()):
driver = destroy['driver']
try:
driver.delete(destroy['context'])

View File

@ -82,10 +82,10 @@ class FlowclassifierAIMDriver(FlowclassifierAIMDriverBase):
fc = context.current
# Verify L7 params are set
l7_p = fc['l7_parameters']
if any(x for x in sfc_cts.AIM_FLC_L7_PARAMS.keys()
if any(x for x in list(sfc_cts.AIM_FLC_L7_PARAMS.keys())
if not validators.is_attr_set(l7_p.get(x))):
raise sfc_exc.BadFlowClassifier(
params=sfc_cts.AIM_FLC_L7_PARAMS.keys())
params=list(sfc_cts.AIM_FLC_L7_PARAMS.keys()))
# Verify standard params are set
# TODO(ivar): src and dst prefix are needed only for SVI networks
if any(x for x in sfc_cts.AIM_FLC_PARAMS

View File

@ -732,7 +732,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
return (
any(context.current[a] != context.original[a] for a in attrs) or
any(param_curr.get(x) != param_orig.get(x) for x in
sfc_cts.AIM_PPG_PARAMS.keys()))
list(sfc_cts.AIM_PPG_PARAMS.keys())))
def _should_regenerate_pc(self, context):
attrs = ['flow_classifiers', 'port_pair_groups', 'name']
@ -747,7 +747,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
any(current[x] != original[x] for x in
sfc_cts.AIM_FLC_PARAMS + ['name']) or
any(l7_curr[x] != l7_orig[x] for x in
sfc_cts.AIM_FLC_L7_PARAMS.keys()))
list(sfc_cts.AIM_FLC_L7_PARAMS.keys())))
def _get_ppg_device_cluster(self, session, ppg, tenant):
tenant_aid = tenant
@ -928,7 +928,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
for ppg_id in ppg_ids:
for chain in self._get_chains_by_ppg_ids(context, [ppg_id]):
chains[chain['id']] = chain
for chain in chains.values():
for chain in list(chains.values()):
flowcs, ppgs = self._get_pc_flowcs_and_ppgs(context, chain)
self._validate_port_chain(context, chain, flowcs, ppgs)

View File

@ -39,7 +39,7 @@ orig_warning = resource.LOG.warning
def warning(*args):
try:
for val in sys._getframe(1).f_locals.values():
for val in list(sys._getframe(1).f_locals.values()):
if isinstance(val, resource.TrackedResource) and (
sys._getframe(1).f_code.co_name == (
'unregister_events')):

View File

@ -211,10 +211,10 @@ class ApiManagerMixin(object):
class GroupPolicyDBTestBase(ApiManagerMixin):
resource_prefix_map = dict(
(k, gp_constants.GBP_PREFIXES[constants.SERVICECHAIN])
for k in service_chain.RESOURCE_ATTRIBUTE_MAP.keys())
for k in list(service_chain.RESOURCE_ATTRIBUTE_MAP.keys()))
resource_prefix_map.update(dict(
(k, gp_constants.GBP_PREFIXES[constants.GROUP_POLICY])
for k in gpolicy.RESOURCE_ATTRIBUTE_MAP.keys()
for k in list(gpolicy.RESOURCE_ATTRIBUTE_MAP.keys())
))
fmt = JSON_FORMAT

View File

@ -202,7 +202,7 @@ class Test_Process_Model(unittest2.TestCase):
controller.launch(2)
# Check if 2 workers are created
workers = controller.get_childrens()
pids = workers.keys()
pids = list(workers.keys())
self.assertEqual(len(pids), 2)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@ -217,7 +217,7 @@ class Test_Process_Model(unittest2.TestCase):
controller.launch(4)
# Check if 4 workers are created
workers = controller.get_childrens()
pids = workers.keys()
pids = list(workers.keys())
self.assertEqual(len(pids), 4)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@ -232,7 +232,7 @@ class Test_Process_Model(unittest2.TestCase):
controller.launch(2)
controller._update_manager()
# Check if 2 workers are added to manager
pids = controller._manager._resource_map.keys()
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 2)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@ -259,7 +259,7 @@ class Test_Process_Model(unittest2.TestCase):
# Run one more time and check if it detects the difference
controller._manager.manager_run()
pids = controller._manager._resource_map.keys()
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 2)
if pid not in old_childs:
self.assertFalse(old_childs[0] in pids)
@ -533,7 +533,7 @@ class Test_Process_Model(unittest2.TestCase):
# Update descriptor
desc = nfp_event.EventDesc(**{})
setattr(event, 'desc', desc)
event.desc.worker = controller.get_childrens().keys()[0]
event.desc.worker = list(controller.get_childrens().keys())[0]
ctx = nfp_context.get()
ctx['log_context']['namespace'] = 'nfp_module'
@ -712,7 +712,7 @@ class Test_Process_Model(unittest2.TestCase):
self.controller = controller
# Check if 1 worker is added to manager
pids = controller._manager._resource_map.keys()
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 1)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@ -751,7 +751,7 @@ class Test_Process_Model(unittest2.TestCase):
self.controller = controller
# Check if 1 worker is added to manager
pids = controller._manager._resource_map.keys()
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 1)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@ -785,7 +785,7 @@ class Test_Process_Model(unittest2.TestCase):
self.controller = controller
# Check if 1 worker is added to manager
pids = controller._manager._resource_map.keys()
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 1)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@ -829,7 +829,7 @@ class Test_Process_Model(unittest2.TestCase):
self.controller = controller
# Check if 1 worker is added to manager
pids = controller._manager._resource_map.keys()
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 1)
self.assertTrue(pid in range(8888, 9999) for pid in pids)

View File

@ -101,153 +101,153 @@ class DummyDictionaries(object):
}
DEFAULT_FW_CONFIG = {
u'heat_template_version': u'2013-05-23',
u'description': u'Template to deploy firewall',
u'resources': {
u'sc_firewall_rule3': {
u'type': u'OS::Neutron::FirewallRule',
u'properties': {
u'action': u'allow',
u'destination_port': u'82',
u'protocol': u'tcp', u'name': u'Rule_3'
'heat_template_version': '2013-05-23',
'description': 'Template to deploy firewall',
'resources': {
'sc_firewall_rule3': {
'type': 'OS::Neutron::FirewallRule',
'properties': {
'action': 'allow',
'destination_port': '82',
'protocol': 'tcp', 'name': 'Rule_3'
}
},
u'sc_firewall_rule2': {
u'type': u'OS::Neutron::FirewallRule',
u'properties': {
u'action': u'allow',
u'destination_port': u'81',
u'protocol': u'tcp', u'name': u'Rule_2'
'sc_firewall_rule2': {
'type': 'OS::Neutron::FirewallRule',
'properties': {
'action': 'allow',
'destination_port': '81',
'protocol': 'tcp', 'name': 'Rule_2'
}
},
u'sc_firewall_rule1': {
u'type': u'OS::Neutron::FirewallRule',
u'properties': {
u'action': u'allow',
u'destination_port': u'80',
u'protocol': u'tcp',
u'name': u'Rule_1'
'sc_firewall_rule1': {
'type': 'OS::Neutron::FirewallRule',
'properties': {
'action': 'allow',
'destination_port': '80',
'protocol': 'tcp',
'name': 'Rule_1'
}
},
u'sc_firewall_rule0': {
u'type': u'OS::Neutron::FirewallRule',
u'properties': {
u'action': u'allow',
u'destination_port': u'22',
u'protocol': u'tcp', u'name': u'Rule_0'
'sc_firewall_rule0': {
'type': 'OS::Neutron::FirewallRule',
'properties': {
'action': 'allow',
'destination_port': '22',
'protocol': 'tcp', 'name': 'Rule_0'
}
},
u'sc_firewall_rule4': {
u'type': u'OS::Neutron::FirewallRule',
u'properties': {
u'action': u'allow',
u'protocol': u'icmp',
u'name': u'Rule_4'
'sc_firewall_rule4': {
'type': 'OS::Neutron::FirewallRule',
'properties': {
'action': 'allow',
'protocol': 'icmp',
'name': 'Rule_4'
}
},
u'sc_firewall_policy': {
u'type': u'OS::Neutron::FirewallPolicy',
u'properties': {
u'name': u'',
u'firewall_rules': [
{u'get_resource': u'sc_firewall_rule0'},
{u'get_resource': u'sc_firewall_rule1'},
{u'get_resource': u'sc_firewall_rule2'},
{u'get_resource': u'sc_firewall_rule3'},
{u'get_resource': u'sc_firewall_rule4'}]
'sc_firewall_policy': {
'type': 'OS::Neutron::FirewallPolicy',
'properties': {
'name': '',
'firewall_rules': [
{'get_resource': 'sc_firewall_rule0'},
{'get_resource': 'sc_firewall_rule1'},
{'get_resource': 'sc_firewall_rule2'},
{'get_resource': 'sc_firewall_rule3'},
{'get_resource': 'sc_firewall_rule4'}]
}
},
u'sc_firewall': {
u'type': u'OS::Neutron::Firewall',
u'properties': {
u'firewall_policy_id': {
u'get_resource': u'sc_firewall_policy'
'sc_firewall': {
'type': 'OS::Neutron::Firewall',
'properties': {
'firewall_policy_id': {
'get_resource': 'sc_firewall_policy'
},
u'name': u'serviceVM_infra_FW',
u'description': {u'insert_type': u'east_west'}
'name': 'serviceVM_infra_FW',
'description': {'insert_type': 'east_west'}
}
}
}
}
DEFAULT_VPN_CONFIG = {
u'resources': {
u'IKEPolicy': {
u'type': u'OS::Neutron::IKEPolicy',
u'properties': {
u'name': u'IKEPolicy',
u'auth_algorithm': u'sha1',
u'encryption_algorithm': u'3des',
u'pfs': u'group5',
u'lifetime': {
u'units': u'seconds',
u'value': 3600
'resources': {
'IKEPolicy': {
'type': 'OS::Neutron::IKEPolicy',
'properties': {
'name': 'IKEPolicy',
'auth_algorithm': 'sha1',
'encryption_algorithm': '3des',
'pfs': 'group5',
'lifetime': {
'units': 'seconds',
'value': 3600
},
u'ike_version': u'v1',
u'phase1_negotiation_mode': u'main'
'ike_version': 'v1',
'phase1_negotiation_mode': 'main'
}
},
u'VPNService': {
u'type': u'OS::Neutron::VPNService',
u'properties': {
u'router_id': {
u'get_param': u'RouterId'
'VPNService': {
'type': 'OS::Neutron::VPNService',
'properties': {
'router_id': {
'get_param': 'RouterId'
},
u'subnet_id': {
u'get_param': u'Subnet'
'subnet_id': {
'get_param': 'Subnet'
},
u'admin_state_up': u'true',
u'description': {
u'get_param': u'ServiceDescription'
'admin_state_up': 'true',
'description': {
'get_param': 'ServiceDescription'
},
u'name': u'VPNService'
'name': 'VPNService'
}
},
u'site_to_site_connection1': {
u'type': u'OS::Neutron::IPsecSiteConnection',
u'properties': {
u'psk': u'secret',
u'initiator': u'bi-directional',
u'name': u'site_to_site_connection1',
u'admin_state_up': u'true',
'site_to_site_connection1': {
'type': 'OS::Neutron::IPsecSiteConnection',
'properties': {
'psk': 'secret',
'initiator': 'bi-directional',
'name': 'site_to_site_connection1',
'admin_state_up': 'true',
'description':
u'fip=1.103.1.20;tunnel_local_cidr=11.0.1.0/24;\
'fip=1.103.1.20;tunnel_local_cidr=11.0.1.0/24;\
user_access_ip=1.103.2.20;fixed_ip=192.168.0.3;\
standby_fip=1.103.1.21;service_vendor=vyos;\
stitching_cidr=192.168.0.0/28;\
stitching_gateway=192.168.0.1;mgmt_gw_ip=120.0.0.1',
u'peer_cidrs': [u'11.0.0.0/24'],
u'mtu': 1500,
u'ikepolicy_id': {
u'get_resource': u'IKEPolicy'
'peer_cidrs': ['11.0.0.0/24'],
'mtu': 1500,
'ikepolicy_id': {
'get_resource': 'IKEPolicy'
},
u'dpd': {
u'interval': 30,
u'actions': u'hold',
u'timeout': 120
'dpd': {
'interval': 30,
'actions': 'hold',
'timeout': 120
},
u'vpnservice_id': {
u'get_resource': u'VPNService'
'vpnservice_id': {
'get_resource': 'VPNService'
},
u'peer_address': u'1.103.2.88',
u'peer_id': u'1.103.2.88',
u'ipsecpolicy_id': {
u'get_resource': u'IPsecPolicy'
'peer_address': '1.103.2.88',
'peer_id': '1.103.2.88',
'ipsecpolicy_id': {
'get_resource': 'IPsecPolicy'
}
}
},
u'IPsecPolicy': {
u'type': u'OS::Neutron::IPsecPolicy',
u'properties': {
u'name': u'IPsecPolicy',
u'transform_protocol': u'esp',
u'auth_algorithm': u'sha1',
u'encapsulation_mode': u'tunnel',
u'encryption_algorithm': u'3des',
u'pfs': u'group5',
u'lifetime': {
u'units': u'seconds',
u'value': 3600
'IPsecPolicy': {
'type': 'OS::Neutron::IPsecPolicy',
'properties': {
'name': 'IPsecPolicy',
'transform_protocol': 'esp',
'auth_algorithm': 'sha1',
'encapsulation_mode': 'tunnel',
'encryption_algorithm': '3des',
'pfs': 'group5',
'lifetime': {
'units': 'seconds',
'value': 3600
}
}
}
@ -255,17 +255,17 @@ class DummyDictionaries(object):
}
appended_sc_firewall_policy = {
u'type': u'OS::Neutron::FirewallPolicy',
u'properties': {
u'name': u'',
u'firewall_rules': [
'type': 'OS::Neutron::FirewallPolicy',
'properties': {
'name': '',
'firewall_rules': [
{
u'get_resource': u'sc_firewall_rule0'
'get_resource': 'sc_firewall_rule0'
},
{u'get_resource': u'sc_firewall_rule1'},
{u'get_resource': u'sc_firewall_rule2'},
{u'get_resource': u'sc_firewall_rule3'},
{u'get_resource': u'sc_firewall_rule4'},
{'get_resource': 'sc_firewall_rule1'},
{'get_resource': 'sc_firewall_rule2'},
{'get_resource': 'sc_firewall_rule3'},
{'get_resource': 'sc_firewall_rule4'},
{'get_resource': 'node_driver_rule_2b86019a-45f7-44_1'},
{'get_resource': 'node_driver_rule_2b86019a-45f7-44_2'},
{'get_resource': 'node_driver_rule_2b86019a-45f7-44_3'},
@ -276,29 +276,29 @@ class DummyDictionaries(object):
}
updated_sc_firewall_policy = {
u'type': u'OS::Neutron::FirewallPolicy',
u'properties': {
u'name': u'-fw_redirect',
u'firewall_rules': [
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_1'},
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_2'},
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_3'},
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_4'},
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_5'},
'type': 'OS::Neutron::FirewallPolicy',
'properties': {
'name': '-fw_redirect',
'firewall_rules': [
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_1'},
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_2'},
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_3'},
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_4'},
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_5'},
]
}
}
updated_template_sc_firewall_policy = {
u'type': u'OS::Neutron::FirewallPolicy',
u'properties': {
u'name': u'',
u'firewall_rules': [
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_1'},
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_2'},
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_3'},
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_4'},
{'get_resource': u'node_driver_rule_af6a8a58-1e25-49_5'},
'type': 'OS::Neutron::FirewallPolicy',
'properties': {
'name': '',
'firewall_rules': [
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_1'},
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_2'},
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_3'},
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_4'},
{'get_resource': 'node_driver_rule_af6a8a58-1e25-49_5'},
]
}
}
@ -319,188 +319,188 @@ class DummyDictionaries(object):
port_info = {
'port': {
u'status': u'ACTIVE',
u'binding:host_id': u'LibertyCompute',
u'name': u'',
u'allowed_address_pairs': [],
u'admin_state_up': True,
u'network_id': u'2286b432-a443-4cd3-be49-e354f531abe3',
u'dns_name': u'',
u'extra_dhcp_opts': [],
u'mac_address': u'fa:16:3e:43:34:33',
u'dns_assignment': [
{u'hostname': u'host-42-0-0-13',
u'ip_address': u'42.0.0.13',
u'fqdn': u'host-42-0-0-13.openstacklocal.'
'status': 'ACTIVE',
'binding:host_id': 'LibertyCompute',
'name': '',
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': '2286b432-a443-4cd3-be49-e354f531abe3',
'dns_name': '',
'extra_dhcp_opts': [],
'mac_address': 'fa:16:3e:43:34:33',
'dns_assignment': [
{'hostname': 'host-42-0-0-13',
'ip_address': '42.0.0.13',
'fqdn': 'host-42-0-0-13.openstacklocal.'
}],
u'binding:vif_details': {
u'port_filter': True,
u'ovs_hybrid_plug': True
'binding:vif_details': {
'port_filter': True,
'ovs_hybrid_plug': True
},
u'binding:vif_type': u'ovs',
u'device_owner': u'compute:nova',
u'tenant_id': u'f6b09b7a590642d8ac6de73df0ab0686',
u'binding:profile': {},
u'binding:vnic_type': u'normal',
u'fixed_ips': [
{u'subnet_id': u'b31cdafe-bdf3-4c19-b768-34d623d77d6c',
u'ip_address': u'42.0.0.13'}],
u'id': u'dde7d849-4c7c-4b48-8c21-f3f52c646fbe',
u'security_groups': [u'ad3b95a4-b5ce-4a95-9add-6ef2ee797e72'],
u'device_id': u'36e9a6d9-ea04-4627-93c5-6f708368c070'
'binding:vif_type': 'ovs',
'device_owner': 'compute:nova',
'tenant_id': 'f6b09b7a590642d8ac6de73df0ab0686',
'binding:profile': {},
'binding:vnic_type': 'normal',
'fixed_ips': [
{'subnet_id': 'b31cdafe-bdf3-4c19-b768-34d623d77d6c',
'ip_address': '42.0.0.13'}],
'id': 'dde7d849-4c7c-4b48-8c21-f3f52c646fbe',
'security_groups': ['ad3b95a4-b5ce-4a95-9add-6ef2ee797e72'],
'device_id': '36e9a6d9-ea04-4627-93c5-6f708368c070'
}
}
provider_ptg = {
u'shared': False,
u'subnets': [u'a2702d68-6deb-425c-a266-e27b349e00ce'],
u'proxy_group_id': None,
u'description': u'',
u'consumed_policy_rule_sets': [],
u'network_service_policy_id': u'0cdf2cba-90f8-44da-84a5-876e582f6e35',
u'tenant_id': u'8ae6701128994ab281dde6b92207bb19',
u'service_management': False,
u'provided_policy_rule_sets': ['7d4b1ef2-eb80-415d-ad13-abf0ea0c52f3'],
u'policy_targets': [
'shared': False,
'subnets': ['a2702d68-6deb-425c-a266-e27b349e00ce'],
'proxy_group_id': None,
'description': '',
'consumed_policy_rule_sets': [],
'network_service_policy_id': '0cdf2cba-90f8-44da-84a5-876e582f6e35',
'tenant_id': '8ae6701128994ab281dde6b92207bb19',
'service_management': False,
'provided_policy_rule_sets': ['7d4b1ef2-eb80-415d-ad13-abf0ea0c52f3'],
'policy_targets': [
{'name': 'provider_0132c_00b93',
'port_id': 'dde7d849-4c7c-4b48-8c21-f3f52c646fbe'}],
u'proxy_type': None,
u'proxied_group_id': None,
u'l2_policy_id': u'120aa972-1b58-418d-aa5b-1d2f96612c49',
u'id': u'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b',
u'name': u'fw_redirect'
'proxy_type': None,
'proxied_group_id': None,
'l2_policy_id': '120aa972-1b58-418d-aa5b-1d2f96612c49',
'id': 'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b',
'name': 'fw_redirect'
}
consumer_ptg = {
u'shared': False,
u'subnets': [u'a2702d68-6deb-425c-a266-e27b349e00ce'],
u'proxy_group_id': None,
u'description': u'',
u'consumed_policy_rule_sets': ['7d4b1ef2-eb80-415d-ad13-abf0ea0c52f3'],
u'network_service_policy_id': u'0cdf2cba-90f8-44da-84a5-876e582f6e35',
u'tenant_id': u'8ae6701128994ab281dde6b92207bb19',
u'service_management': False,
u'provided_policy_rule_sets': [],
u'policy_targets': [
'shared': False,
'subnets': ['a2702d68-6deb-425c-a266-e27b349e00ce'],
'proxy_group_id': None,
'description': '',
'consumed_policy_rule_sets': ['7d4b1ef2-eb80-415d-ad13-abf0ea0c52f3'],
'network_service_policy_id': '0cdf2cba-90f8-44da-84a5-876e582f6e35',
'tenant_id': '8ae6701128994ab281dde6b92207bb19',
'service_management': False,
'provided_policy_rule_sets': [],
'policy_targets': [
{'name': 'provider_0132c_00b93',
'port_id': 'dde7d849-4c7c-4b48-8c21-f3f52c646fbe'}],
u'proxy_type': None,
u'proxied_group_id': None,
u'l2_policy_id': u'120aa972-1b58-418d-aa5b-1d2f96612c49',
u'id': u'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b',
u'name': u'fw_redirect'
'proxy_type': None,
'proxied_group_id': None,
'l2_policy_id': '120aa972-1b58-418d-aa5b-1d2f96612c49',
'id': 'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b',
'name': 'fw_redirect'
}
l3_policies = {
u'l3_policies': [
{u'tenant_id': '8ae6701128994ab281dde6b92207bb19',
u'name': u'remote-vpn-client-pool-cidr-l3policy'}]
'l3_policies': [
{'tenant_id': '8ae6701128994ab281dde6b92207bb19',
'name': 'remote-vpn-client-pool-cidr-l3policy'}]
}
policy_rule_sets = {
u'policy_rule_sets': [
{u'id': u'7d4b1ef2-eb80-415d-ad13-abf0ea0c52f3',
u'name': u'fw_redirect',
u'policy_rules': [u'493788ad-2b9a-47b1-b04d-9096d4057fb5'],
u'tenant_id': u'8ae6701128994ab281dde6b92207bb19',
u'shared': False,
u'consuming_policy_target_groups':
[u'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b'],
u'consuming_external_policies': None}]
'policy_rule_sets': [
{'id': '7d4b1ef2-eb80-415d-ad13-abf0ea0c52f3',
'name': 'fw_redirect',
'policy_rules': ['493788ad-2b9a-47b1-b04d-9096d4057fb5'],
'tenant_id': '8ae6701128994ab281dde6b92207bb19',
'shared': False,
'consuming_policy_target_groups':
['af6a8a58-1e25-49c4-97a3-d5f50b3aa04b'],
'consuming_external_policies': None}]
}
policy_rules = {
u'policy_rules': [
{u'id': u'493788ad-2b9a-47b1-b04d-9096d4057fb5',
u'name': u'fw_redirect',
u'policy_actions': [u'0bab5fa6-4f89-4e15-8363-dacc7d825466'],
u'policy_classifier_id': u'8e5fc80f-7544-484c-82d0-2a5794c10664',
u'tenant_id': u'8ae6701128994ab281dde6b92207bb19',
u'shared': False}]
'policy_rules': [
{'id': '493788ad-2b9a-47b1-b04d-9096d4057fb5',
'name': 'fw_redirect',
'policy_actions': ['0bab5fa6-4f89-4e15-8363-dacc7d825466'],
'policy_classifier_id': '8e5fc80f-7544-484c-82d0-2a5794c10664',
'tenant_id': '8ae6701128994ab281dde6b92207bb19',
'shared': False}]
}
policy_actions = {
u'policy_actions': [
{u'id': u'0bab5fa6-4f89-4e15-8363-dacc7d825466',
u'name': u'fw_redirect',
u'action_value': u'1e83b288-4b56-4851-83e2-69c4365aa8e5',
u'action_type': u'redirect',
u'tenant_id': u'8ae6701128994ab281dde6b92207bb19',
u'shared': False}]
'policy_actions': [
{'id': '0bab5fa6-4f89-4e15-8363-dacc7d825466',
'name': 'fw_redirect',
'action_value': '1e83b288-4b56-4851-83e2-69c4365aa8e5',
'action_type': 'redirect',
'tenant_id': '8ae6701128994ab281dde6b92207bb19',
'shared': False}]
}
policy_target_groups = {
u'policy_target_groups': [
{u'shared': False,
u'subnets': [u'a2702d68-6deb-425c-a266-e27b349e00ce'],
u'proxy_group_id': None,
u'description': u'',
u'consumed_policy_rule_sets': [],
u'network_service_policy_id':
u'0cdf2cba-90f8-44da-84a5-876e582f6e35',
u'tenant_id': u'8ae6701128994ab281dde6b92207bb19',
u'service_management': False,
u'provided_policy_rule_sets':
'policy_target_groups': [
{'shared': False,
'subnets': ['a2702d68-6deb-425c-a266-e27b349e00ce'],
'proxy_group_id': None,
'description': '',
'consumed_policy_rule_sets': [],
'network_service_policy_id':
'0cdf2cba-90f8-44da-84a5-876e582f6e35',
'tenant_id': '8ae6701128994ab281dde6b92207bb19',
'service_management': False,
'provided_policy_rule_sets':
['7d4b1ef2-eb80-415d-ad13-abf0ea0c52f3'],
u'policy_targets': [
'policy_targets': [
{'name': 'provider_0132c_00b93',
'port_id': 'dde7d849-4c7c-4b48-8c21-f3f52c646fbe'}],
u'proxy_type': None,
u'proxied_group_id': None,
u'l2_policy_id': u'120aa972-1b58-418d-aa5b-1d2f96612c49',
u'id': u'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b',
u'name': u'fw_redirect'}]
'proxy_type': None,
'proxied_group_id': None,
'l2_policy_id': '120aa972-1b58-418d-aa5b-1d2f96612c49',
'id': 'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b',
'name': 'fw_redirect'}]
}
subnet_info = {
u'subnet': {
u'name': u'lb-subnet',
u'enable_dhcp': True,
u'network_id': u'2286b432-a443-4cd3-be49-e354f531abe3',
u'tenant_id': u'f6b09b7a590642d8ac6de73df0ab0686',
u'dns_nameservers': [],
u'ipv6_ra_mode': None,
u'allocation_pools': [{
u'start': u'42.0.0.2', u'end': u'42.0.0.254'}],
u'gateway_ip': u'42.0.0.1',
u'ipv6_address_mode': None,
u'ip_version': 4,
u'host_routes': [],
u'cidr': u'42.0.0.0/24',
u'id': u'b31cdafe-bdf3-4c19-b768-34d623d77d6c',
u'subnetpool_id': None
'subnet': {
'name': 'lb-subnet',
'enable_dhcp': True,
'network_id': '2286b432-a443-4cd3-be49-e354f531abe3',
'tenant_id': 'f6b09b7a590642d8ac6de73df0ab0686',
'dns_nameservers': [],
'ipv6_ra_mode': None,
'allocation_pools': [{
'start': '42.0.0.2', 'end': '42.0.0.254'}],
'gateway_ip': '42.0.0.1',
'ipv6_address_mode': None,
'ip_version': 4,
'host_routes': [],
'cidr': '42.0.0.0/24',
'id': 'b31cdafe-bdf3-4c19-b768-34d623d77d6c',
'subnetpool_id': None
}
}
subnets_info = {
u'subnets': [
{u'name': u'lb-subnet',
u'enable_dhcp': True,
u'network_id': u'2286b432-a443-4cd3-be49-e354f531abe3',
u'tenant_id': u'f6b09b7a590642d8ac6de73df0ab0686',
u'dns_nameservers': [],
u'ipv6_ra_mode': None,
u'allocation_pools': [{
u'start': u'42.0.0.2', u'end': u'42.0.0.254'}],
u'gateway_ip': u'42.0.0.1',
u'ipv6_address_mode': None,
u'ip_version': 4,
u'host_routes': [],
u'cidr': u'42.0.0.0/24',
u'id': u'b31cdafe-bdf3-4c19-b768-34d623d77d6c',
u'subnetpool_id': None}]
'subnets': [
{'name': 'lb-subnet',
'enable_dhcp': True,
'network_id': '2286b432-a443-4cd3-be49-e354f531abe3',
'tenant_id': 'f6b09b7a590642d8ac6de73df0ab0686',
'dns_nameservers': [],
'ipv6_ra_mode': None,
'allocation_pools': [{
'start': '42.0.0.2', 'end': '42.0.0.254'}],
'gateway_ip': '42.0.0.1',
'ipv6_address_mode': None,
'ip_version': 4,
'host_routes': [],
'cidr': '42.0.0.0/24',
'id': 'b31cdafe-bdf3-4c19-b768-34d623d77d6c',
'subnetpool_id': None}]
}
external_policies = {u'external_policies': {}}
external_policies = {'external_policies': {}}
fw_template_properties = {
'fw_rule_keys': [u'sc_firewall_rule3', u'sc_firewall_rule2',
u'sc_firewall_rule1', u'sc_firewall_rule0',
u'sc_firewall_rule4'],
'name': u'2b8',
'fw_rule_keys': ['sc_firewall_rule3', 'sc_firewall_rule2',
'sc_firewall_rule1', 'sc_firewall_rule0',
'sc_firewall_rule4'],
'name': '2b8',
'properties_key': 'properties',
'resources_key': 'resources',
'is_template_aws_version': False,
'fw_policy_key': u'sc_firewall_policy'
'fw_policy_key': 'sc_firewall_policy'
}
fw_scn_config = "{\"heat_template_version\": \"2013-05-23\",\
@ -586,49 +586,49 @@ class DummyDictionaries(object):
\":\"VPNService\"}}, \"type\":\"OS::Neutron::IPsecSiteConnection\"}}}"
service_profile = {
u'service_flavor': u'vyos',
u'service_type': u'FIREWALL'
'service_flavor': 'vyos',
'service_type': 'FIREWALL'
}
vpn_service_profile = {
u'service_flavor': u'vyos',
u'service_type': u'VPN'
'service_flavor': 'vyos',
'service_type': 'VPN'
}
lbv2_service_profile = {
u'service_flavor': u'haproxy',
u'service_type': u'LOADBALANCERV2'
'service_flavor': 'haproxy',
'service_type': 'LOADBALANCERV2'
}
fw_service_chain_node = {
u'id': u'012345678919',
u'name': u'scn_fw',
u'config': fw_scn_config
'id': '012345678919',
'name': 'scn_fw',
'config': fw_scn_config
}
vpn_service_chain_node = {
u'id': u'012345678919',
u'name': u'scn_vpn',
u'config': vpn_scn_config
'id': '012345678919',
'name': 'scn_vpn',
'config': vpn_scn_config
}
lbv2_service_chain_node = {
u'id': u'012345678919',
u'name': u'scn_lb',
u'config': lbv2_scn_config
'id': '012345678919',
'name': 'scn_lb',
'config': lbv2_scn_config
}
service_chain_instance = {
u'id': u'7834569034456677',
u'name': u'sci_fw'
'id': '7834569034456677',
'name': 'sci_fw'
}
consumer_port = {
u'fixed_ips': [{
u'ip_address': u'11.0.3.4',
u'subnet_id': u'9876256378888333'
'fixed_ips': [{
'ip_address': '11.0.3.4',
'subnet_id': '9876256378888333'
}],
u'id': u'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b'
'id': 'af6a8a58-1e25-49c4-97a3-d5f50b3aa04b'
}
network_function_details = {

View File

@ -178,7 +178,7 @@ class FakeProjectManager(object):
def __init__(self):
self._projects = {k: FakeProject(k, v)
for k, v in TEST_TENANT_NAMES.items()}
for k, v in list(TEST_TENANT_NAMES.items())}
def list(self):
return list(self._projects.values())
@ -5437,7 +5437,7 @@ class TestPortBinding(ApicAimTestCase):
net1 = self._make_network(self.fmt, 'net1', True,
arg_list=self.extension_attributes,
**{'apic:svi': 'True',
'provider:network_type': u'vlan',
'provider:network_type': 'vlan',
'apic:bgp_enable': 'True',
'apic:bgp_asn': '2'})['network']
@ -5562,7 +5562,7 @@ class TestPortBinding(ApicAimTestCase):
net1 = self._make_network(self.fmt, 'net1', True,
arg_list=self.extension_attributes,
**{'apic:svi': 'True',
'provider:network_type': u'vlan',
'provider:network_type': 'vlan',
'apic:bgp_enable': 'True',
'apic:bgp_asn': '2'})['network']
@ -5764,7 +5764,7 @@ class TestPortBinding(ApicAimTestCase):
net = self._make_network(self.fmt, 'net1', True,
arg_list=self.extension_attributes,
**{'apic:svi': 'True', 'provider:network_type': u'vlan'})
**{'apic:svi': 'True', 'provider:network_type': 'vlan'})
self._make_subnet(self.fmt, net, '10.0.1.1', '10.0.1.0/24')
port = self._make_port(self.fmt, net['network']['id'])['port']
@ -5873,14 +5873,14 @@ class TestPortBinding(ApicAimTestCase):
self._test_bind_baremetal()
def test_bind_baremetal_vlan(self):
self._test_bind_baremetal(network_type=u'vlan', physnet=u'physnet2')
self._test_bind_baremetal(network_type='vlan', physnet='physnet2')
def test_bind_baremetal_vlan_svi(self):
self._test_bind_baremetal(network_type=u'vlan',
is_svi=True, physnet=u'physnet2')
self._test_bind_baremetal(network_type='vlan',
is_svi=True, physnet='physnet2')
def _test_bind_baremetal(self, network_type=u'opflex', is_svi=False,
physnet=u'physnet1'):
def _test_bind_baremetal(self, network_type='opflex', is_svi=False,
physnet='physnet1'):
# Do positive and negative port binding testing, using the
# different information in the binding profile.
def validate_binding(port):
@ -6123,11 +6123,11 @@ class TestPortBinding(ApicAimTestCase):
**kwargs)['port']
if parent_net_type == 'opflex':
access_vlan = self._check_binding(parent_port['id'],
expected_binding_info=[(u'apic_aim', u'opflex'),
(u'apic_aim', u'vlan')])
expected_binding_info=[('apic_aim', 'opflex'),
('apic_aim', 'vlan')])
else:
access_vlan = self._check_binding(parent_port['id'],
expected_binding_info=[(u'apic_aim', u'vlan')])
expected_binding_info=[('apic_aim', 'vlan')])
self.assertEqual(access_vlan,
net1['network']['provider:segmentation_id'])
epg = self._net_2_epg(net1['network'])
@ -6158,14 +6158,14 @@ class TestPortBinding(ApicAimTestCase):
bottom_bound_physnet = baremetal_physnet
if subport_net_type == 'vlan':
if inherit:
expected_binding_info = [(u'apic_aim', u'vlan')]
expected_binding_info = [('apic_aim', 'vlan')]
bottom_bound_physnet = subport_physnet
else:
expected_binding_info = [(u'apic_aim', u'vlan'),
(u'apic_aim', u'vlan')]
expected_binding_info = [('apic_aim', 'vlan'),
('apic_aim', 'vlan')]
else:
expected_binding_info = [(u'apic_aim', u'opflex'),
(u'apic_aim', u'vlan')]
expected_binding_info = [('apic_aim', 'opflex'),
('apic_aim', 'vlan')]
self._check_binding(subport_net1_port['id'],
top_bound_physnet=subport_physnet,
bottom_bound_physnet=bottom_bound_physnet,
@ -6325,11 +6325,11 @@ class TestPortBinding(ApicAimTestCase):
access_vlan = self._check_binding(parent_port['id'],
top_bound_physnet=baremetal_physnet,
bottom_bound_physnet=baremetal_physnet,
expected_binding_info=[(u'apic_aim', u'opflex'),
(u'apic_aim', u'vlan')])
expected_binding_info=[('apic_aim', 'opflex'),
('apic_aim', 'vlan')])
else:
access_vlan = self._check_binding(parent_port['id'],
expected_binding_info=[(u'apic_aim', u'vlan')])
expected_binding_info=[('apic_aim', 'vlan')])
self.assertEqual(access_vlan,
net1['network']['provider:segmentation_id'])
self.assertEqual(kwargs['binding:profile'],
@ -7710,7 +7710,7 @@ class TestExtensionAttributes(ApicAimTestCase):
new_resources = []
for res in resources:
res_dict = {}
for k, v in res.members.items():
for k, v in list(res.members.items()):
if k in res.user_attributes():
if isinstance(v, list):
v = v.sort() or []
@ -12080,7 +12080,7 @@ class TestOpflexRpc(ApicAimTestCase):
path='topology/pod-1/paths-102/pathep-[eth1/8]')
self.aim_mgr.create(aim_ctx, hlink_1)
kwargs = {'provider:network_type': u'vlan'}
kwargs = {'provider:network_type': 'vlan'}
if apic_svi:
kwargs.update({'apic:svi': 'True'})
@ -12126,7 +12126,7 @@ class TestOpflexRpc(ApicAimTestCase):
path='topology/pod-1/paths-102/pathep-[eth1/8]')
self.aim_mgr.create(aim_ctx, hlink_1)
kwargs = {'provider:network_type': u'vlan'}
kwargs = {'provider:network_type': 'vlan'}
if apic_svi:
kwargs.update({'apic:svi': 'True'})

View File

@ -70,18 +70,18 @@ if six.PY3:
unicode = str
ML2PLUS_PLUGIN = 'gbpservice.neutron.plugins.ml2plus.plugin.Ml2PlusPlugin'
DEFAULT_FILTER_ENTRY = {'arp_opcode': u'unspecified',
'dest_from_port': u'unspecified',
'dest_to_port': u'unspecified',
'ether_type': u'unspecified',
DEFAULT_FILTER_ENTRY = {'arp_opcode': 'unspecified',
'dest_from_port': 'unspecified',
'dest_to_port': 'unspecified',
'ether_type': 'unspecified',
'fragment_only': False,
'icmpv4_type': u'unspecified',
'icmpv6_type': u'unspecified',
'ip_protocol': u'unspecified',
'source_from_port': u'unspecified',
'source_to_port': u'unspecified',
'icmpv4_type': 'unspecified',
'icmpv6_type': 'unspecified',
'ip_protocol': 'unspecified',
'source_from_port': 'unspecified',
'source_to_port': 'unspecified',
'stateful': False,
'tcp_flags': u'unspecified'}
'tcp_flags': 'unspecified'}
AGENT_TYPE = ocst.AGENT_TYPE_OPFLEX_OVS
AGENT_CONF = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE,
@ -110,7 +110,7 @@ CONS = 'apic:external_consumed_contracts'
def aim_object_to_dict(obj):
result = {}
for key, value in obj.__dict__.items():
for key, value in list(obj.__dict__.items()):
if key in obj.user_attributes():
result[key] = value
return result
@ -2306,7 +2306,7 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
# Verify that implicit subnetpools exist for each address family,
# and that the PTG was allocated a subnet with a prefix from
# each address family
for ip_version in self.ip_dict.keys():
for ip_version in list(self.ip_dict.keys()):
family_subnets = []
for subnet_id in ptg['subnets']:
req = self.new_show_request('subnets', subnet_id, fmt=self.fmt)
@ -2343,7 +2343,7 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
self._verify_implicit_subnets_in_ptg(ptg, l3p)
self._test_policy_target_group_aim_mappings(ptg, prs_lists, l2p,
num_address_families=len(self.ip_dict.keys()))
num_address_families=len(list(self.ip_dict.keys())))
new_name = 'new name'
new_prs_lists = self._get_provided_consumed_prs_lists()
@ -2355,7 +2355,7 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
'scope'})['policy_target_group']
self._test_policy_target_group_aim_mappings(ptg, new_prs_lists, l2p,
num_address_families=len(self.ip_dict.keys()))
num_address_families=len(list(self.ip_dict.keys())))
self.delete_policy_target_group(ptg_id, expected_res_status=204)
self.show_policy_target_group(ptg_id, expected_res_status=404)
@ -2386,7 +2386,7 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
self.show_l2_policy(ptg['l2_policy_id'], expected_res_status=200)
self._verify_implicit_subnets_in_ptg(ptg)
self._validate_router_interface_created(
num_address_families=len(self.ip_dict.keys()))
num_address_families=len(list(self.ip_dict.keys())))
ptg_name = ptg['name']
aim_epg_name = self.driver.apic_epg_name_for_policy_target_group(
@ -2451,7 +2451,7 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
def _create_explicit_subnetpools(self):
vrf_dn = None
for ip_version in self.ip_dict.keys():
for ip_version in list(self.ip_dict.keys()):
ascp = self._make_address_scope_for_vrf(vrf_dn,
ip_version, name='as1v' + str(ip_version))
ascp = ascp['address_scope']
@ -2472,24 +2472,24 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
def test_create_ptg_explicit_subnetpools(self):
self._create_explicit_subnetpools()
kwargs = {'name': "l3p1", 'ip_pool': None}
for ip_version in self.ip_dict.keys():
for ip_version in list(self.ip_dict.keys()):
kwargs[self.ip_dict[ip_version]['subnetpools_id_key']] = [sp['id']
for sp in self.ip_dict[ip_version]['subnetpools']]
if len(self.ip_dict.keys()) == 1:
if len(list(self.ip_dict.keys())) == 1:
kwargs['ip_version'] = list(self.ip_dict.keys())[0]
else:
kwargs['ip_version'] = 46
l3p = self.create_l3_policy(**kwargs)['l3_policy']
for ip_version in self.ip_dict.keys():
for ip_version in list(self.ip_dict.keys()):
self.assertEqual(self.ip_dict[ip_version]['address_scope']['id'],
l3p[self.ip_dict[ip_version]['address_scope_id_key']])
subnetpool_prefixes = []
for ip_version in self.ip_dict.keys():
for ip_version in list(self.ip_dict.keys()):
cidrlist = self.ip_dict[ip_version]['cidrs']
subnetpool_prefixes.extend([cidr for cidr, _ in cidrlist])
self._validate_create_l3_policy(
l3p, subnetpool_prefixes=subnetpool_prefixes)
for ip_version in self.ip_dict.keys():
for ip_version in list(self.ip_dict.keys()):
sp_key = self.ip_dict[ip_version]['subnetpools_id_key']
self.assertEqual(len(self.ip_dict[ip_version]['subnetpools']),
len(l3p[sp_key]))
@ -2540,14 +2540,14 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
# Implicitly created subnet should not be deleted
self._verify_implicit_subnets_in_ptg(ptg)
self._validate_router_interface_created(
num_address_families=len(self.ip_dict.keys()))
num_address_families=len(list(self.ip_dict.keys())))
def test_delete_ptg_after_router_interface_delete(self):
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
ptg_id = ptg['id']
self._validate_router_interface_created(
num_address_families=len(self.ip_dict.keys()))
num_address_families=len(list(self.ip_dict.keys())))
router_id = self._l3_plugin.get_routers(self._context)[0]['id']
subnet_id = self._plugin.get_subnets(self._context)[0]['id']
@ -4050,8 +4050,8 @@ class TestPolicyRuleBase(AIMBaseTestCase):
self.assertItemsEqual(
aim_object_to_dict(expected_filter_entry),
# special processing to convert unicode to str
dict((str(k), str(v)) for k, v in aim_object_to_dict(
filter_entry).items()))
dict((str(k), str(v)) for k, v in list(aim_object_to_dict(
filter_entry).items())))
def _validate_1_to_many_reverse_filter_entries(
self, policy_rule, afilter, filter_entries):
@ -4059,7 +4059,7 @@ class TestPolicyRuleBase(AIMBaseTestCase):
policy_rule['policy_classifier_id'])['policy_classifier']
expected_entries = alib.get_filter_entries_for_policy_classifier(pc)
for e_name, value in expected_entries['reverse_rules'].items():
for e_name, value in list(expected_entries['reverse_rules'].items()):
expected_filter_entry = self.driver._aim_filter_entry(
self._neutron_context.session, afilter, e_name,
alib.map_to_aim_filter_entry(value))
@ -4069,8 +4069,8 @@ class TestPolicyRuleBase(AIMBaseTestCase):
self.assertItemsEqual(
aim_object_to_dict(expected_filter_entry),
# special processing to convert unicode to str
dict((str(k), str(v)) for k, v in aim_object_to_dict(
filter_entry).items()))
dict((str(k), str(v)) for k, v in list(aim_object_to_dict(
filter_entry).items())))
def _test_policy_rule_aim_mapping(self, policy_rule):
aim_filter_name = str(self.name_mapper.policy_rule(

View File

@ -55,7 +55,7 @@ NEW_STATUS_DETAILS = 'new_status_details'
def get_status_for_test(self, context):
resource_name = [item for item in context.__dict__.keys()
resource_name = [item for item in list(context.__dict__.keys())
if item.startswith('_original')][0][len('_original'):]
getattr(context, resource_name)['status'] = NEW_STATUS
getattr(context, resource_name)['status_details'] = NEW_STATUS_DETAILS
@ -70,7 +70,7 @@ class GroupPolicyPluginTestBase(tgpmdb.GroupPolicyMappingDbTestCase):
gp_plugin = GP_PLUGIN_KLASS
ml2_opts = ml2_options or {'mechanism_drivers': ['openvswitch'],
'extension_drivers': ['port_security']}
for opt, val in ml2_opts.items():
for opt, val in list(ml2_opts.items()):
cfg.CONF.set_override(opt, val, 'ml2')
core_plugin = core_plugin or test_plugin.PLUGIN_NAME
super(GroupPolicyPluginTestBase, self).setUp(core_plugin=core_plugin,

View File

@ -12,7 +12,6 @@
# limitations under the License.
import copy
import itertools
from unittest import mock
@ -194,11 +193,11 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
# attributes containing a colon should be passed with
# a double underscore
try:
new_args = dict(itertools.izip(map(lambda x: x.replace('__', ':'),
kwargs), kwargs.values()))
new_args = dict(zip([x.replace('__', ':')
for x in kwargs], list(kwargs.values())))
except AttributeError:
new_args = dict(zip(map(lambda x: x.replace('__', ':'),
kwargs), kwargs.values()))
new_args = dict(list(zip([x.replace('__', ':')
for x in kwargs], list(kwargs.values()))))
arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
return super(ResourceMappingTestCase, self)._create_network(
fmt, name, admin_state_up, arg_list=arg_list, **new_args)

View File

@ -12,7 +12,6 @@
# limitations under the License.
import copy
import itertools
from unittest import mock
@ -91,7 +90,7 @@ class HeatNodeDriverTestCase(
"description": "Haproxy pool from template",
"lb_algorithm": "ROUND_ROBIN",
"protocol": "HTTP",
'listener': {u'get_resource': u'listener'},
'listener': {'get_resource': 'listener'},
}
},
"test_listener": {
@ -156,9 +155,8 @@ class HeatNodeDriverTestCase(
"""Override the routine for allowing the router:external attribute."""
# attributes containing a colon should be passed with
# a double underscore
new_args = dict(itertools.izip(map(lambda x: x.replace('__', ':'),
kwargs),
kwargs.values()))
new_args = dict(zip([x.replace('__', ':') for x in kwargs],
list(kwargs.values())))
arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
return super(HeatNodeDriverTestCase, self)._create_network(
fmt, name, admin_state_up, arg_list=arg_list, **new_args)
@ -242,7 +240,7 @@ class TestServiceChainInstance(HeatNodeDriverTestCase):
'admin_state_up': True,
'address': member_ip,
'protocol_port': {'get_param': 'app_port'},
'pool': {'Ref': u'test_pool'}
'pool': {'Ref': 'test_pool'}
}
}
}
@ -349,7 +347,7 @@ class TestServiceChainInstance(HeatNodeDriverTestCase):
self.delete_policy_target(pt['id'])
template_on_delete_pt = copy.deepcopy(expected_stack_template)
template_on_delete_pt['Resources'].pop(pool_member.keys()[0])
template_on_delete_pt['Resources'].pop(list(pool_member.keys())[0])
expected_stack_id = stack_id
expected_stack_params = {}
stack_update.assert_called_once_with(

View File

@ -53,7 +53,7 @@ class NFPException(Exception):
except AttributeError:
pass
for k, v in self.kwargs.items():
for k, v in list(self.kwargs.items()):
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
@ -66,7 +66,7 @@ class NFPException(Exception):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in kwargs.items():
for name, value in list(kwargs.items()):
LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:

View File

@ -39,8 +39,8 @@ def _name(obj):
# If it is callable, then it is a method
if callable(obj):
return "{0}.{1}.{2}".format(
type(obj.im_self).__module__,
type(obj.im_self).__name__,
type(obj.__self__).__module__,
type(obj.__self__).__name__,
obj.__name__)
# If obj is of type class
elif _is_class(obj):

View File

@ -69,9 +69,9 @@ def init_log_context():
def init(data=None):
if not data:
data = {}
if 'log_context' not in data.keys():
if 'log_context' not in list(data.keys()):
data['log_context'] = init_log_context()
if 'event_desc' not in data.keys():
if 'event_desc' not in list(data.keys()):
data['event_desc'] = {}
Context.context = NfpContext(data)
context = getattr(Context, 'context')

View File

@ -139,7 +139,7 @@ class NfpService(object):
return event
# REVISIT (mak): spacing=0, caller must explicitly specify
def poll_event(self, event, spacing=2, max_times=sys.maxint):
def poll_event(self, event, spacing=2, max_times=sys.maxsize):
"""To poll for an event.
As a base class, it only does the polling
@ -441,7 +441,7 @@ class NfpController(nfp_launcher.NfpLauncher, NfpService):
def report_state(self):
"""Invoked by report_task to report states of all agents. """
for value in self._rpc_agents.values():
for value in list(self._rpc_agents.values()):
for agent in value['agents']:
agent.report_state()
@ -457,7 +457,7 @@ class NfpController(nfp_launcher.NfpLauncher, NfpService):
graph_nodes = []
for parent, childs in six.iteritems(graph):
puuid = parent.desc.uuid
assert puuid not in graph_sig.keys(), (
assert puuid not in list(graph_sig.keys()), (
"Event - %s is already root of subgraph - %s" % (
puuid, str(graph_sig[puuid])))
graph_sig[puuid] = []
@ -516,7 +516,7 @@ class NfpController(nfp_launcher.NfpLauncher, NfpService):
LOG.debug(message)
self._manager.process_events([event])
def poll_event(self, event, spacing=2, max_times=sys.maxint):
def poll_event(self, event, spacing=2, max_times=sys.maxsize):
"""Post a poll event into the system.
Core will poll for this event to timeout, after

View File

@ -244,7 +244,7 @@ class NfpEventHandlers(object):
self._event_desc_table[event_id]['modules'][module][0][3])
else:
priorities = (
self._event_desc_table[event_id]['priority'].keys())
list(self._event_desc_table[event_id]['priority'].keys()))
priority = max(priorities)
eh = (
self._event_desc_table[
@ -268,7 +268,7 @@ class NfpEventHandlers(object):
event_id]['modules'][module][0][2]
else:
priorities = (
self._event_desc_table[event_id]['priority'].keys())
list(self._event_desc_table[event_id]['priority'].keys()))
priority = max(priorities)
ph = (
self._event_desc_table[

View File

@ -110,7 +110,7 @@ class TaskExecutor(object):
job.pop('thread')
job['result'] = result
if 'result_store' in job.keys():
if 'result_store' in list(job.keys()):
job['result_store']['result'] = result
done_jobs = self.pipe_line[:]
@ -147,7 +147,7 @@ class EventGraphExecutor(object):
self.running = {}
def add(self, graph):
assert graph['id'] not in self.running.keys(), "Graph - %s \
assert graph['id'] not in list(self.running.keys()), "Graph - %s \
is already running" % (graph['id'])
graph['results'] = dict.fromkeys(graph['data'])
self.running[graph['id']] = graph
@ -197,7 +197,7 @@ class EventGraphExecutor(object):
self.manager._scheduled_new_event(event)
def _graph(self, node):
for graph in self.running.values():
for graph in list(self.running.values()):
root = self._root(graph, node)
if root:
return graph

View File

@ -292,7 +292,7 @@ class NfpResourceManager(NfpProcessManager, NfpEventManager):
# event, then worker would not be pre-assigned.
# In such case, assign a random worker
if not event.desc.worker:
event.desc.worker = self._resource_map.keys()[0]
event.desc.worker = list(self._resource_map.keys())[0]
event.lifetime = event.desc.poll_desc.spacing
self._watchdog(event, handler=self._poll_timedout)
else:

View File

@ -44,7 +44,7 @@ class _Meta(type):
except AttributeError:
cls._poll_desc_table = {}
for value in cls.__dict__.values():
for value in list(cls.__dict__.values()):
if getattr(value, '_desc', False):
desc = value
cls._poll_desc_table[desc._event] = desc

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import http.client
import socket
import zlib
@ -30,13 +30,13 @@ class RestClientException(exceptions.Exception):
""" RestClient Exception """
class UnixHTTPConnection(httplib.HTTPConnection):
class UnixHTTPConnection(http.client.HTTPConnection):
"""Connection class for HTTP over UNIX domain socket."""
def __init__(self, host, port=None, strict=None, timeout=None,
proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
http.client.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.socket_path = '/var/run/uds_socket'

View File

@ -202,7 +202,7 @@ def send_request_to_configurator(conf, context, body,
method_name = 'network_function_event'
else:
if (body['config'][0]['resource'] in
nfp_constants.CONFIG_TAG_RESOURCE_MAP.values()):
list(nfp_constants.CONFIG_TAG_RESOURCE_MAP.values())):
body['config'][0]['resource_data'].update(
{'neutron_context': context.to_dict()})
body['info']['context'].update(

View File

@ -94,7 +94,7 @@ class CommonDbMixin(object):
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
return dict(((key, item) for key, item in list(resource.items())
if key in fields))
return resource

View File

@ -58,7 +58,7 @@ class NFPDbBase(object):
network_function_db = self._get_network_function(
session, network_function_id)
network_function_db.update(updated_network_function)
if 'status' in updated_network_function.keys():
if 'status' in list(updated_network_function.keys()):
updated_network_function_map = {
'status': updated_network_function['status']
}

View File

@ -894,7 +894,7 @@ class OrchestrationDriver(object):
gcm.retry(gbp_cli.delete_l2_policy, token,
ptg['l2_policy_id'])
if ('consumer' not in device_data.keys() or not device_data[
if ('consumer' not in list(device_data.keys()) or not device_data[
'consumer'].get('ptg')):
return

View File

@ -610,7 +610,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
t_ports = []
for ptg in [consumer, provider]:
if (port_type in ptg.keys()) and ptg[port_type]:
if (port_type in list(ptg.keys())) and ptg[port_type]:
t_ports.append({
'id': ptg[port_type].get('id'),
'port_classification': ptg.get(

View File

@ -10,11 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import os
import re
import string
import subprocess
import sys
import six
@ -75,11 +75,11 @@ class Gbp_Config(object):
if cmd_val == 2:
cmd = 'gbp policy-action-update ' + str(name_uuid)
# Build the cmd string for optional/non-default args/values
for arg, value in kwargs.items():
for arg, value in list(kwargs.items()):
cmd = cmd + " --" + ("%s %s" % (arg, value))
_log.info(cmd)
# Execute the policy-action-config-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
_log.info(cmd_out)
# Catch for non-exception error strings, even though try clause
# succeeded
@ -111,11 +111,11 @@ class Gbp_Config(object):
if cmd_val == 2:
cmd = 'gbp policy-classifier-update ' + str(classifier_name)
# Build the cmd string for optional/non-default args/values
for arg, value in kwargs.items():
for arg, value in list(kwargs.items()):
cmd = cmd + " --" + "%s %s" % (arg, value)
# Execute the policy-classifier-config-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings, even though try clause
# succeeded
if self.cmd_error_check(cmd_out) == 0:
@ -167,7 +167,7 @@ class Gbp_Config(object):
if cmd_val == 2:
cmd = 'gbp %s-update ' % cfgobj_dict[cfgobj] + str(name_uuid)
# Build the cmd string for optional/non-default args/values
for arg, value in kwargs.items():
for arg, value in list(kwargs.items()):
if arg.startswith('_'):
# Parameter not supported by CLI, leave it as is
arg = arg[1:]
@ -177,7 +177,7 @@ class Gbp_Config(object):
cmd = cmd + " --" + "%s=%s" % (arg, value)
_log.info(cmd)
# Execute the cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings, even though try clause
# succeeded
if self.cmd_error_check(cmd_out) == 0:
@ -257,7 +257,7 @@ class Gbp_Config(object):
cmd = cmd + " --" + ("%s %s" % (arg, value))
_log.info(cmd)
# Execute the update cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings, even though try clause
# succeeded
if self.cmd_error_check(cmd_out) == 0:
@ -287,7 +287,7 @@ class Gbp_Config(object):
raise KeyError
# Build the command with mandatory params
cmd = 'gbp %s-list -c id ' % cfgobj_dict[cfgobj]
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
_out = cmd_out.split('\n')
final_out = _out[3:len(_out) - 1]
_log.info("\nThe Policy Object %s to be deleted = \n%s" % (
@ -295,7 +295,7 @@ class Gbp_Config(object):
for item in final_out:
item = item.strip(' |')
cmd = 'gbp %s-delete ' % cfgobj_dict[cfgobj] + str(item)
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
_log.info(cmd_out)
return 1
@ -337,7 +337,7 @@ class Gbp_Config(object):
' --servicetype ' + service)
_log.info(cmd)
# Execute the policy-rule-config-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings, even though try clause
# succeeded
@ -374,13 +374,13 @@ class Gbp_Config(object):
if cmd_val == 2:
cmd = 'neutron %s-update ' % cfgobj_dict[cfg_obj] + str(name_uuid)
# Build the cmd string for optional/non-default args/values
for arg, value in kwargs.items():
for arg, value in list(kwargs.items()):
if '_' in arg:
arg = string.replace(arg, '_', '-')
cmd = cmd + " --" + "".join('%s=%s' % (arg, value))
_log.info(cmd)
# Execute the cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings, even though try clause
# succeeded

View File

@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import re
import subprocess
import yaml
@ -24,7 +24,7 @@ _log = logging.getLogger()
_log.setLevel(logging.INFO)
orig_getoutput = commands.getoutput
orig_getoutput = subprocess.getoutput
def getoutput(cmd):
@ -34,7 +34,7 @@ def getoutput(cmd):
return cmd_out
commands.getoutput = getoutput
subprocess.getoutput = getoutput
class Gbp_Verify(object):
@ -71,7 +71,7 @@ class Gbp_Verify(object):
cmd = "gbp policy-action-show " + str(action_name)
# Execute the policy-action-verify-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings, even though try clause
# succeeded
@ -93,7 +93,7 @@ class Gbp_Verify(object):
# If try clause succeeds for "verify" cmd then parse the cmd_out to
# match the user-fed expected attributes & their values
if cmd_val == 1:
for arg, val in kwargs.items():
for arg, val in list(kwargs.items()):
if re.search("\\b%s\\b\s+\| \\b%s\\b.*" %
(arg, val), cmd_out, re.I) is None:
_log.info(cmd_out)
@ -125,7 +125,7 @@ class Gbp_Verify(object):
if cmd_val == 1:
cmd = "gbp policy-classifier-show " + str(classifier_name)
# Execute the policy-classifier-verify-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings, even though try clause
# succeeded
@ -149,7 +149,7 @@ class Gbp_Verify(object):
# If try clause succeeds for "verify" cmd then parse the cmd_out to
# match the user-fed expected attributes & their values
if cmd_val == 1:
for arg, val in kwargs.items():
for arg, val in list(kwargs.items()):
if re.search("\\b%s\\b\s+\| \\b%s\\b.*" %
(arg, val), cmd_out, re.I) is None:
_log.info(cmd_out)
@ -194,7 +194,7 @@ class Gbp_Verify(object):
if cmd_val == 1:
cmd = 'gbp %s-show ' % verifyobj_dict[verifyobj] + str(name_uuid)
# Execute the policy-object-verify-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings
for err in self.err_strings:
if re.search('\\b%s\\b' % (err), cmd_out, re.I):
@ -219,7 +219,7 @@ class Gbp_Verify(object):
# If "verify" cmd succeeds then parse the cmd_out to match the user-fed
# expected attributes & their values
if cmd_val == 1:
for arg, val in kwargs.items():
for arg, val in list(kwargs.items()):
if re.search("\\b%s\\b\s+\| \\b%s\\b.*" %
(arg, val), cmd_out, re.I) is None:
_log.info(cmd_out)
@ -262,7 +262,7 @@ class Gbp_Verify(object):
if cmd_val == 1:
cmd = 'gbp %s-show ' % verifyobj_dict[verifyobj] + str(name_uuid)
# Execute the policy-object-verify-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# _log.info(cmd_out)
# Catch for non-exception error strings
for err in self.err_strings:
@ -288,7 +288,7 @@ class Gbp_Verify(object):
# If "verify" succeeds cmd then parse the cmd_out to match the user-fed
# expected attributes & their values
if cmd_val == 1 and ret == 'default':
for arg, val in kwargs.items():
for arg, val in list(kwargs.items()):
if re.search("\\b%s\\b\s+\| \\b%s\\b.*" %
(arg, val), cmd_out, re.I) is None:
# incase of attribute has more than one value then
@ -317,7 +317,7 @@ class Gbp_Verify(object):
rtrid = match.group(1)
return rtrid.rstrip()
elif cmd_val == 1:
for arg, val in kwargs.items():
for arg, val in list(kwargs.items()):
if arg == 'network_service_params':
if re.findall('(%s)' % (val), cmd_out) == []:
_log.info(cmd_out)
@ -353,7 +353,7 @@ class Gbp_Verify(object):
cmd = 'neutron %s-show ' % verifyobj + str(name_uuid)
_log.info('Neutron Cmd == %s\n' % (cmd))
# Execute the policy-object-verify-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
_log.info(cmd_out)
# Catch for non-exception error strings
for err in self.err_strings:
@ -369,7 +369,7 @@ class Gbp_Verify(object):
return match.group(1).rstrip()
else:
return 0
for arg, val in kwargs.items():
for arg, val in list(kwargs.items()):
if isinstance(val, list): # More than 1 value is to be verified
for i in val:
if cmd_out.find(i) == -1:
@ -415,7 +415,7 @@ class Gbp_Verify(object):
cmd = ('gbp %s-show ' % verifyobj_dict[verifyobj] +
str(name_uuid) + ' -F %s' % (attr))
# Execute the policy-object-verify-cmd
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
# Catch for non-exception error strings
for err in self.err_strings:
if re.search('\\b%s\\b' % (err), cmd_out, re.I):
@ -446,10 +446,10 @@ class Gbp_Verify(object):
# heat template
outputs_dict = heat_conf["outputs"]
print(outputs_dict)
for key in outputs_dict.keys():
for key in list(outputs_dict.keys()):
cmd = 'heat stack-show %s | grep -B 2 %s' % (heat_stack_name, key)
print(cmd)
cmd_out = commands.getoutput(cmd)
cmd_out = subprocess.getoutput(cmd)
print(cmd_out)
match = re.search('\"\\boutput_value\\b\": \"(.*)\"',
cmd_out, re.I)

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import subprocess
import subprocess
import sys
@ -24,7 +24,7 @@ def main():
cmd_list = ["sudo sh -c 'cat /dev/null > test_results_admin.txt'",
"sudo chmod 777 test_results_admin.txt "]
for cmd in cmd_list:
commands.getoutput(cmd)
subprocess.getoutput(cmd)
test_list = ['tc_gbp_pr_pc_pa_shared_func.py',
'tc_gbp_prs_pr_shared_func.py']
for test in test_list:
@ -35,10 +35,10 @@ def main():
contents = results_file.read()
results_file.close()
print(contents)
print("\n\nTotal Number of Shared Resource TestCases Executed= %s" % (
contents.count("_SHARED_")))
print("\n\nNumber of TestCases Passed= %s" % (contents.count("PASSED")))
print("\n\nNumber of TestCases Failed= %s" % (contents.count("FAILED")))
print(("\n\nTotal Number of Shared Resource TestCases Executed= %s" % (
contents.count("_SHARED_"))))
print(("\n\nNumber of TestCases Passed= %s" % (contents.count("PASSED"))))
print(("\n\nNumber of TestCases Failed= %s" % (contents.count("FAILED"))))
if contents.count("FAILED") > 0:
sys.exit(1)
else:

View File

@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import subprocess
import sys
@ -30,7 +29,7 @@ def run_func_neg():
"sudo sh -c 'ls *_neg.py >> func_neg.txt'",
"sudo chmod 777 *"]
for cmd in cmd_list:
commands.getoutput(cmd)
subprocess.getoutput(cmd)
return "func_neg.txt"
@ -43,14 +42,15 @@ def main():
try:
flag = sys.argv[1]
except Exception:
print('%s' % (usage))
print(('%s' % (usage)))
sys.exit(1)
fname = run_func_neg()
num_lines = sum(1 for line in open(fname))
print("\nNumber of Functional Test Scripts to execute = %s" % (num_lines))
print(("\nNumber of Functional Test Scripts to execute = %s" %
(num_lines)))
with open(fname) as f:
for i, l in enumerate(f, 1):
print("Functional Test Script to execute now == %s" % l)
print(("Functional Test Script to execute now == %s" % l))
# Assumption: test-scripts are executable from any location
# Reading the line from text file, also reads trailing \n, hence we
# need to strip
@ -62,10 +62,10 @@ def main():
contents = f.read()
f.close()
print(contents)
print("\n\nTotal Number of TestCases Executed= %s" % (
contents.count("TESTCASE_GBP_")))
print("\n\nNumber of TestCases Passed= %s" % (contents.count("PASSED")))
print("\n\nNumber of TestCases Failed= %s" % (contents.count("FAILED")))
print(("\n\nTotal Number of TestCases Executed= %s" % (
contents.count("TESTCASE_GBP_"))))
print(("\n\nNumber of TestCases Passed= %s" % (contents.count("PASSED"))))
print(("\n\nNumber of TestCases Failed= %s" % (contents.count("FAILED"))))
if contents.count("FAILED") > 0:
sys.exit(1)
else:

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import subprocess
import subprocess
import sys
@ -27,7 +27,7 @@ def run_func_neg():
"sudo sh -c 'ls *_neg.py >> func_neg.txt'",
"sudo chmod 777 *"]
for cmd in cmd_list:
commands.getoutput(cmd)
subprocess.getoutput(cmd)
return "func_neg.txt"
@ -36,14 +36,14 @@ def main():
try:
flag = sys.argv[1]
except Exception:
print('%s' % (usage))
print(('%s' % (usage)))
sys.exit(1)
fname = run_func_neg()
num_lines = sum(1 for line in open(fname))
print("\nNumber of Functional Test Scripts to execute = %s" % num_lines)
print(("\nNumber of Functional Test Scripts to execute = %s" % num_lines))
with open(fname) as f:
for i, l in enumerate(f, 1):
print("Functional Test Script to execute now == %s" % l)
print(("Functional Test Script to execute now == %s" % l))
# Assumption: test-scripts are executable from any location
# Reading the line from text file, also reads trailing \n, hence we
# need to strip
@ -55,10 +55,10 @@ def main():
contents = f.read()
f.close()
print(contents)
print("\n\nTotal Number of TestCases Executed= %s" % (
contents.count("TESTCASE_GBP_")))
print("\n\nNumber of TestCases Passed= %s" % (contents.count("PASSED")))
print("\n\nNumber of TestCases Failed= %s" % (contents.count("FAILED")))
print(("\n\nTotal Number of TestCases Executed= %s" % (
contents.count("TESTCASE_GBP_"))))
print(("\n\nNumber of TestCases Passed= %s" % (contents.count("PASSED"))))
print(("\n\nNumber of TestCases Failed= %s" % (contents.count("FAILED"))))
if contents.count("FAILED") > 0:
sys.exit(1)
else:

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -40,7 +40,7 @@ class test_gbp_l2p_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_l2p_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_l2p_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import platform
import subprocess
import sys
from libs import config_libs
@ -45,7 +45,7 @@ class test_gbp_l3p_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_l3p_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_l3p_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -47,7 +47,7 @@ class test_gbp_l3p_neg(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_l3p_neg.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_l3p_neg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -42,7 +42,7 @@ class test_gbp_nsp_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_nsp_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_nsp_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,10 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import os
import re
import subprocess
import sys
from libs import config_libs
@ -39,7 +39,7 @@ class test_gbp_pa_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_pa_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_pa_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
@ -197,7 +197,7 @@ class test_gbp_pa_func(object):
"UPdating Polic Action")
spec_cr_cmd = ('gbp servicechain-spec-create demo_spec | grep id | '
'head -1')
cmd_out = commands.getoutput(spec_cr_cmd)
cmd_out = subprocess.getoutput(spec_cr_cmd)
spec_id = re.search("\\bid\\b\s+\| (.*) \|", cmd_out, re.I).group(1)
self._log.info(
'\n##Step 2: Update Policy Action Attributes name and '
@ -234,7 +234,7 @@ class test_gbp_pa_func(object):
return 0
self._log.info("\n## Step 3A: Now delete the service chain spec")
spec_del_cmd = 'gbp servicechain-spec-delete %s' % (spec_id)
cmd_out = commands.getoutput(spec_del_cmd)
cmd_out = subprocess.getoutput(spec_del_cmd)
if self.gbpverify.gbp_action_verify(
1,
'grppol_act',

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -48,7 +48,7 @@ class test_gbp_pa_neg(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_pa_neg.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_pa_neg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import os
import subprocess
import sys
from libs import config_libs
@ -45,7 +45,7 @@ class test_gbp_pc_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_pc_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_pc_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -48,7 +48,7 @@ class test_gbp_pc_neg(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_pc_neg.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_pc_neg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import os
import subprocess
import sys
from libs import config_libs
@ -49,7 +49,7 @@ class test_gbp_pr_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_pr_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_pr_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import os
import subprocess
import sys
from libs import config_libs
@ -45,7 +45,7 @@ class test_gbp_pr_neg(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_pr_neg.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_pr_neg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -45,7 +45,7 @@ class test_gbp_pr_pc_pa_shared_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_pr_pc_pa_shared_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_pr_pc_pa_shared_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -57,7 +57,7 @@ class test_gbp_prs_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_prs_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_prs_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -42,7 +42,7 @@ class test_gbp_prs_neg(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_prs_neg.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_prs_neg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -40,7 +40,7 @@ class test_gbp_prs_pr_shared_func(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_prs_pr_shared_func.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_prs_pr_shared_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -418,7 +418,7 @@ class test_gbp_ptg_func(object):
'\n## Step 3: Delete the neutron port corresponding to the '
'Policy-Target\n')
cmd = 'neutron port-delete %s' % (neutron_port_id)
if self.gbpcfg.cmd_error_check(commands.getoutput(cmd)) == 0:
if self.gbpcfg.cmd_error_check(subprocess.getoutput(cmd)) == 0:
self._log.info(
"\n## Step 3: Deletion of the neutron port corresponding "
"to the Policy-Target = Failed")

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -34,7 +34,7 @@ class test_gbp_ri_func_1(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_ri_func_1.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_ri_func_1.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -34,7 +34,7 @@ class test_gbp_ri_func_2(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_ri_func_2.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_ri_func_2.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
@ -68,7 +68,7 @@ class test_gbp_ri_func_2(object):
(obj))
if fail != 0:
self._log.info("\n## TESTCASE_GBP_RI_FUNC_2: FAILED")
commands.report_results('test_gbp_ri_func_2', 'test_results.txt')
subprocess.report_results('test_gbp_ri_func_2', 'test_results.txt')
sys.exit(1)
def run(self):

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -34,7 +34,7 @@ class test_gbp_ri_func_3(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_ri_func_3.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_ri_func_3.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)

View File

@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import subprocess
import sys
from libs import config_libs
@ -34,7 +34,7 @@ class test_gbp_ri_func_4(object):
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_ri_func_4.log'
commands.getoutput(cmd)
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_ri_func_4.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)