Cleanup whitespace errors suppressed for flake8

These errors are no longer suppressed:
 E225 missing whitespace around operator
 E226 missing whitespace around arithmetic operator
 E231 missing whitespace after ','
 E241 multiple spaces after ','

Story: 2003499
Task: 26386
Change-Id: Ic97e46b0a3b57456740a2a5935a8a3e6d3b81427
Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
Al Bailey 2018-09-11 14:41:16 -05:00
parent edda9455bd
commit 19a881ee2d
22 changed files with 320 additions and 328 deletions

View File

@ -210,8 +210,8 @@ def analysis_instance_stop_success(instance_uuid, instance_name, records,
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_STOP_STATE),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_STOP_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_RESULT),
@ -219,9 +219,9 @@ def analysis_instance_stop_success(instance_uuid, instance_name, records,
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE_CALLBACK),
(always, NFV_VIM.INSTANCE_STOP_CALLBACK),
(always, NFV_VIM.INSTANCE_STOP_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
(always, NFV_VIM.INSTANCE_STOP_CALLBACK),
(always, NFV_VIM.INSTANCE_STOP_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -240,8 +240,8 @@ def analysis_instance_pause_success(instance_uuid, instance_name, records,
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_PAUSE_STATE),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_PAUSE_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_RESULT),
@ -249,9 +249,9 @@ def analysis_instance_pause_success(instance_uuid, instance_name, records,
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE_CALLBACK),
(always, NFV_VIM.INSTANCE_PAUSE_CALLBACK),
(always, NFV_VIM.INSTANCE_PAUSE_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
(always, NFV_VIM.INSTANCE_PAUSE_CALLBACK),
(always, NFV_VIM.INSTANCE_PAUSE_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -270,16 +270,16 @@ def analysis_instance_unpause_success(instance_uuid, instance_name, records,
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_UNPAUSE_STATE),
(always, NFV_VIM.INSTANCE_UNPAUSE_CALLBACK),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_UNPAUSE_STATE),
(always, NFV_VIM.INSTANCE_UNPAUSE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_HEARTBEAT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_CALLBACK),
(always, NFV_VIM.INSTANCE_UNPAUSE_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
(always, NFV_VIM.INSTANCE_UNPAUSE_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -298,8 +298,8 @@ def analysis_instance_suspend_success(instance_uuid, instance_name, records,
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_SUSPEND_STATE),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_SUSPEND_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_RESULT),
@ -307,9 +307,9 @@ def analysis_instance_suspend_success(instance_uuid, instance_name, records,
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE_CALLBACK),
(always, NFV_VIM.INSTANCE_SUSPEND_CALLBACK),
(always, NFV_VIM.INSTANCE_SUSPEND_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
(always, NFV_VIM.INSTANCE_SUSPEND_CALLBACK),
(always, NFV_VIM.INSTANCE_SUSPEND_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -328,16 +328,16 @@ def analysis_instance_resume_success(instance_uuid, instance_name, records,
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_RESUME_STATE),
(always, NFV_VIM.INSTANCE_RESUME_CALLBACK),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_RESUME_STATE),
(always, NFV_VIM.INSTANCE_RESUME_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_HEARTBEAT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_CALLBACK),
(always, NFV_VIM.INSTANCE_RESUME_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
(always, NFV_VIM.INSTANCE_RESUME_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -363,8 +363,8 @@ def analysis_instance_reboot_success(instance_uuid, instance_name, records,
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_REBOOT_STATE),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_REBOOT_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_RESULT),
@ -372,9 +372,9 @@ def analysis_instance_reboot_success(instance_uuid, instance_name, records,
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE_CALLBACK),
(always, NFV_VIM.INSTANCE_REBOOT_CALLBACK),
(always, NFV_VIM.INSTANCE_REBOOT_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE),
(always, NFV_VIM.INSTANCE_REBOOT_CALLBACK),
(always, NFV_VIM.INSTANCE_REBOOT_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_NOTIFY)]
expected_records = list()
@ -395,11 +395,11 @@ def analysis_instance_rebuild_success(instance_uuid, instance_name, records,
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_REBUILD_STATE),
(always, NFV_VIM.INSTANCE_REBUILD_CALLBACK),
(always, NFV_VIM.INSTANCE_REBUILD_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_REBUILD_STATE),
(always, NFV_VIM.INSTANCE_REBUILD_CALLBACK),
(always, NFV_VIM.INSTANCE_REBUILD_STATE_COMPLETED),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -419,8 +419,8 @@ def analysis_instance_live_migrate_success(instance_uuid, instance_name,
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_LIVE_MIGRATE_STATE),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_LIVE_MIGRATE_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_RESULT),
@ -428,14 +428,14 @@ def analysis_instance_live_migrate_success(instance_uuid, instance_name,
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE_CALLBACK),
(always, NFV_VIM.INSTANCE_LIVE_MIGRATE_CALLBACK),
(always, NFV_VIM.INSTANCE_LIVE_MIGRATE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_LIVE_MIGRATE_FINISH_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_HEARTBEAT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_CALLBACK),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -460,24 +460,24 @@ def analysis_instance_cold_migrate_success(instance_uuid, instance_name,
guest_hb_only = not action and guest_hb
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_COLD_MIGRATE_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_RESULT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE_CALLBACK),
(always, NFV_VIM.INSTANCE_COLD_MIGRATE_CALLBACK),
(not action, NFV_VIM.INSTANCE_COLD_MIGRATE_CONFIRM_STATE),
(not action, NFV_VIM.INSTANCE_COLD_MIGRATE_CONFIRM_CALLBACK),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_COLD_MIGRATE_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_RESULT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE_CALLBACK),
(always, NFV_VIM.INSTANCE_COLD_MIGRATE_CALLBACK),
(not action, NFV_VIM.INSTANCE_COLD_MIGRATE_CONFIRM_STATE),
(not action, NFV_VIM.INSTANCE_COLD_MIGRATE_CONFIRM_CALLBACK),
(guest_hb_only, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE),
(guest_hb_only, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE_CALLBACK),
(guest_hb_only, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_HEARTBEAT),
(guest_hb_only, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY),
(guest_hb_only, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_CALLBACK),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -499,15 +499,15 @@ def analysis_instance_cold_migrate_confirm_success(instance_uuid, instance_name,
Analyze records and determine if instance cold-migrate confirmed
"""
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_COLD_MIGRATE_CONFIRM_STATE),
(action, NFV_VIM.INSTANCE_COLD_MIGRATE_CONFIRM_CALLBACK),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_COLD_MIGRATE_CONFIRM_STATE),
(action, NFV_VIM.INSTANCE_COLD_MIGRATE_CONFIRM_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_HEARTBEAT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_CALLBACK),
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -529,15 +529,15 @@ def analysis_instance_cold_migrate_revert_success(instance_uuid, instance_name,
Analyze records and determine if instance cold-migrate reverted
"""
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_COLD_MIGRATE_REVERT_STATE),
(action, NFV_VIM.INSTANCE_COLD_MIGRATE_REVERT_CALLBACK),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_COLD_MIGRATE_REVERT_STATE),
(action, NFV_VIM.INSTANCE_COLD_MIGRATE_REVERT_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_HEARTBEAT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_CALLBACK),
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -558,8 +558,8 @@ def analysis_instance_resize_success(instance_uuid, instance_name, records,
Analyze records and determine if instance resized
"""
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_RESIZE_STATE),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_RESIZE_STATE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_VOTE_RESULT),
@ -567,8 +567,8 @@ def analysis_instance_resize_success(instance_uuid, instance_name, records,
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_PRE_NOTIFY_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_DISABLE_CALLBACK),
(action, NFV_VIM.INSTANCE_RESIZE_CALLBACK),
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
(action, NFV_VIM.INSTANCE_RESIZE_CALLBACK),
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -588,15 +588,15 @@ def analysis_instance_resize_confirm_success(instance_uuid, instance_name,
Analyze records and determine if instance resize confirmed
"""
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_RESIZE_CONFIRM_STATE),
(action, NFV_VIM.INSTANCE_RESIZE_CONFIRM_CALLBACK),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_RESIZE_CONFIRM_STATE),
(action, NFV_VIM.INSTANCE_RESIZE_CONFIRM_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_HEARTBEAT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_CALLBACK),
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
@ -616,15 +616,15 @@ def analysis_instance_resize_revert_success(instance_uuid, instance_name,
Analyze records and determine if instance resize reverted
"""
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_RESIZE_REVERT_STATE),
(action, NFV_VIM.INSTANCE_RESIZE_REVERT_CALLBACK),
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(action, NFV_VIM.INSTANCE_RESIZE_REVERT_STATE),
(action, NFV_VIM.INSTANCE_RESIZE_REVERT_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_ENABLE_CALLBACK),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_HEARTBEAT),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY),
(guest_hb, NFV_VIM.INSTANCE_GUEST_SERVICES_POST_NOTIFY_CALLBACK),
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
(action, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:

View File

@ -76,7 +76,7 @@ class NfvVimParser(object):
timestamp = datetime.datetime(int(parsed[0]), int(parsed[1]),
int(parsed[2]), int(parsed[3]),
int(parsed[4]), int(parsed[5]),
int(parsed[6])*1000)
int(parsed[6]) * 1000)
if start_date <= timestamp <= end_date:
message_data = self.parse_message(parsed[9], parsed[10],

View File

@ -44,10 +44,10 @@ class Histogram(object):
if 0 == sample_as_int:
bucket_idx = sample_as_int.bit_length()
else:
bucket_idx = (sample_as_int-1).bit_length()
bucket_idx = (sample_as_int - 1).bit_length()
if bucket_idx > self._num_buckets:
bucket_idx = self._num_buckets-1
bucket_idx = self._num_buckets - 1
if sample_as_int > self._max_sample:
self._max_sample = sample_as_int

View File

@ -210,7 +210,7 @@ class Strategy(object):
elif 0 == stage_id and stage_id < self.apply_phase.total_stages:
self._state = STRATEGY_STATE.APPLYING
self._current_phase = STRATEGY_PHASE.APPLY
self.apply_phase.apply(stage_id+1)
self.apply_phase.apply(stage_id + 1)
else:
success = False
@ -259,7 +259,7 @@ class Strategy(object):
% (stage_id, self.apply_phase.current_stage))
else:
self.apply_phase.apply(stage_id+1)
self.apply_phase.apply(stage_id + 1)
else:
if stage_id is None:

View File

@ -7,7 +7,7 @@ import six
import collections
from nfv_common import debug
from nfv_common.helpers import Constants, Constant, Singleton
from nfv_common.helpers import Constants, Constant, Singleton
from _task_result import TaskResult

View File

@ -44,8 +44,8 @@ from subprocess import call
from collections import defaultdict
dir = os.path.dirname(__file__)
csvDir = os.path.join(dir,'csv/')
logDir = os.path.join(dir,'logs/')
csvDir = os.path.join(dir, 'csv/')
logDir = os.path.join(dir, 'logs/')
if not os.path.exists(csvDir):
os.makedirs(csvDir)
@ -59,45 +59,45 @@ call("gunzip logs/nfv-vim.log.[0-9].gz logs/nfv-vim.log.[0-9][0-9].gz", shell=Tr
class Parser(object):
def __init__(self):
self.proc="" # Name of process being read
self.timestamp="" # Timestamp found on line stating process name
self.write=False # Flag indicating data has yet to be written
self.stored=False # Flag indicating that there is new data stored
self.length=0 # Time duration of process
self.instanceCount=0 # Number of hits for the particular duration
self.rollingCount=0 # Sum of the hits for each duration parsed within the sample
self.total=0 # Specific duration multiplied by number of hits for that duration
self.avg=0 # Average execution time of process
self.unit="" # Unit execution time was recorded in
self.csvs=defaultdict(list) # Stores unique processes in a dict of lists
self.proc = "" # Name of process being read
self.timestamp = "" # Timestamp found on line stating process name
self.write = False # Flag indicating data has yet to be written
self.stored = False # Flag indicating that there is new data stored
self.length = 0 # Time duration of process
self.instanceCount = 0 # Number of hits for the particular duration
self.rollingCount = 0 # Sum of the hits for each duration parsed within the sample
self.total = 0 # Specific duration multiplied by number of hits for that duration
self.avg = 0 # Average execution time of process
self.unit = "" # Unit execution time was recorded in
self.csvs = defaultdict(list) # Stores unique processes in a dict of lists
# Resets variables when a new process begins to be read in logs
def reset(self):
self.length=0
self.avg=0
self.instanceCount=0
self.rollingCount=0
self.total=0
self.proc=""
self.unit=""
self.write=False
self.stored=False
self.length = 0
self.avg = 0
self.instanceCount = 0
self.rollingCount = 0
self.total = 0
self.proc = ""
self.unit = ""
self.write = False
self.stored = False
# Adds log data for a process to the csvs dictionary
def add(self,proc,total,timestamp,rollingCount):
def add(self, proc, total, timestamp, rollingCount):
if rollingCount != 0:
avg=total/float(rollingCount)
avg = total / float(rollingCount)
else:
avg=0
self.csvs[proc].append(timestamp+","+str(avg)+","+str(rollingCount)+",")
avg = 0
self.csvs[proc].append(timestamp + "," + str(avg) + "," + str(rollingCount) + ",")
self.reset()
def main(self):
# Sorts the log files to read them in descending order
sorted_files = glob.glob(logDir+"nfv-vim.log*")
sorted_files = glob.glob(logDir + "nfv-vim.log*")
sorted_files.sort(reverse=True)
for logFile in sorted_files:
with open(logFile,"r+") as f:
with open(logFile, "r+") as f:
cfgLines = f.read().splitlines()
for line in cfgLines:
if "Histogram" in line:
@ -106,33 +106,33 @@ class Parser(object):
self.total,
self.timestamp,
self.rollingCount)
self.write=True
self.proc=line.partition("Histogram: ")[2]
self.proc=("".join(self.proc.split())).rstrip(':')
self.timestamp=line.split()[0]
self.write = True
self.proc = line.partition("Histogram: ")[2]
self.proc = ("".join(self.proc.split())).rstrip(':')
self.timestamp = line.split()[0]
elif "histogram.py" in line:
line=line.split()
self.length=int(line[8])
self.unit=line[9]
self.instanceCount=int(line[10])
line = line.split()
self.length = int(line[8])
self.unit = line[9]
self.instanceCount = int(line[10])
if "decisecond" in self.unit:
self.length*=100
self.length *= 100
elif "secs" in self.unit:
self.length*=1000
self.total=self.total+self.instanceCount*self.length
self.rollingCount+=self.instanceCount
self.stored=True
self.length *= 1000
self.total = self.total + self.instanceCount * self.length
self.rollingCount += self.instanceCount
self.stored = True
f.close()
if self.write or self.stored:
self.add(self.proc,self.total,self.timestamp,self.rollingCount)
self.add(self.proc, self.total, self.timestamp, self.rollingCount)
for process in self.csvs:
with open(os.path.join(csvDir,process+".csv"),'w+') as csvOut:
with open(os.path.join(csvDir, process + ".csv"), 'w+') as csvOut:
for line in self.csvs[process]:
csvOut.write(line+"\n")
csvOut.write(line + "\n")
csvOut.close()
process=Parser()
process = Parser()
process.main()
print("\nComplete\n")

View File

@ -53,26 +53,26 @@ from builtins import input
dir = os.path.dirname(__file__)
fig = plotly.graph_objs.graph_objs.Figure
pth = os.path.join(dir,'csv/')
pth = os.path.join(dir, 'csv/')
execTime=False # Indicates if average execution time is to be graphed or not
default=False # Indicates no commands were entered and to run with default settings (run config with -t option)
oneAxis=False # Causes the generated graph to have two y-axes sharing an x-axis with both avg execution time and hits being graphed
config=False # Indicates whether to pull process names from logplot.cfg or not
hits=False # Indicates if the delta of hits between samples is to be graphed
markers=False
lines=False
timestamp=[]
dateRange=[]
warnings=[]
procs=[]
group=[]
graphName=""
plotType=""
execTime = False # Indicates if average execution time is to be graphed or not
default = False # Indicates no commands were entered and to run with default settings (run config with -t option)
oneAxis = False # Causes the generated graph to have two y-axes sharing an x-axis with both avg execution time and hits being graphed
config = False # Indicates whether to pull process names from logplot.cfg or not
hits = False # Indicates if the delta of hits between samples is to be graphed
markers = False
lines = False
timestamp = []
dateRange = []
warnings = []
procs = []
group = []
graphName = ""
plotType = ""
def helpMessage():
print("\n"+"-"*120)
print("\n" + "-" * 120)
print("NFV-VIM Histogram Graphing Script\n")
print("This script is meant to graph average execution times and the delta of hits between sample periods for processes in nfv-vim logs.\n")
print("Usage:\n")
@ -130,51 +130,51 @@ def helpMessage():
" the config file, to use log information for all dates\n"
" available, to show average execution time on the y-axis,\n"
" and to name the file with the current day's datestamp.")
print("-"*120)
print("-" * 120)
# Appends new processes found via CSV filenames to the master process list in logplot.cfg if there are not already present.
# If logplot.cfg has not been generated yet, this will create it and add process names found in filenames in ./csv
def updater(configExists=True):
procs=[]
existingProcs=[]
newProcs=[]
position=0 # Tracks position of the end of the master process list so new processes can be added above it.
procs = []
existingProcs = []
newProcs = []
position = 0 # Tracks position of the end of the master process list so new processes can be added above it.
os.chdir(pth)
for name in iglob("*.csv"):
procs.append(str(name)[:-4])
os.chdir("..")
if not configExists:
f=open(os.path.join(dir,'logplot.cfg'),"w")
f = open(os.path.join(dir, 'logplot.cfg'), "w")
for p in procs:
f.write(p+" "*(59-len(p))+"N\n")
f.write("#"*20+"END OF PROCESS LIST"+"#"*21+"\n\n")
f.write("#"*27+"GROUPS"+"#"*27+"\n")
f.write(p + " " * (59 - len(p)) + "N\n")
f.write("#" * 20 + "END OF PROCESS LIST" + "#" * 21 + "\n\n")
f.write("#" * 27 + "GROUPS" + "#" * 27 + "\n")
f.write("#GroupSTART\n")
f.write("GroupName=ExampleGroupName1\n")
f.write("ExampleProcessName1"+" "*40+"N\n")
f.write("ExampleProcessName2"+" "*40+"N\n")
f.write("ExampleProcessName1" + " " * 40 + "N\n")
f.write("ExampleProcessName2" + " " * 40 + "N\n")
f.write("#GroupEND\n")
f.write("-"*60+"\n")
f.write("-" * 60 + "\n")
f.write("GroupName=ExampleGroupName2\n")
f.write("ExampleProcessName3"+" "*40+"N\n")
f.write("ExampleProcessName4"+" "*40+"N\n")
f.write("ExampleProcessName3" + " " * 40 + "N\n")
f.write("ExampleProcessName4" + " " * 40 + "N\n")
f.write("#GroupEND\n")
f.write("#"*20+"END OF GROUPS"+"#"*27)
f.write("#" * 20 + "END OF GROUPS" + "#" * 27)
f.close()
else:
with open(os.path.join(dir,'logplot.cfg'),"r+") as f:
with open(os.path.join(dir, 'logplot.cfg'), "r+") as f:
cfgLines = f.read().splitlines()
for cfgProc in cfgLines:
if "#END" in cfgProc:
break
existingProcs.append(cfgProc.split()[0])
position+=1
position += 1
for p in procs:
if p not in existingProcs:
newProcs.append(p+" "*(59-len(p))+"N")
procs=cfgLines[:position]+newProcs+cfgLines[position:]
newProcs.append(p + " " * (59 - len(p)) + "N")
procs = cfgLines[:position] + newProcs + cfgLines[position:]
f.seek(0)
f.write("\n".join(procs))
f.truncate()
@ -183,25 +183,25 @@ def updater(configExists=True):
# Appends process names found in the specified group to the list of processes to be graphed.
def gCommand(groups):
procs=[]
f=open(os.path.join(dir,'logplot.cfg'),"r")
cfgLines=f.read().splitlines()
procs = []
f = open(os.path.join(dir, 'logplot.cfg'), "r")
cfgLines = f.read().splitlines()
for g in groups:
groupFound=False
finishedGroup=False
groupFound = False
finishedGroup = False
for i in range(len(cfgLines)):
liNum=i
if str("GroupName="+g) == cfgLines[i].strip():
groupFound=True
liNum = i
if str("GroupName=" + g) == cfgLines[i].strip():
groupFound = True
while not finishedGroup:
liNum+=1
liNum += 1
if "GroupEND" in cfgLines[liNum]:
finishedGroup=True
finishedGroup = True
else:
cfgLine=cfgLines[liNum].split()
if cfgLine[1]=="Y":
cfgLine = cfgLines[liNum].split()
if cfgLine[1] == "Y":
procs.append(cfgLine[0])
else:
break
@ -218,34 +218,34 @@ def gCommand(groups):
# a list of known processes containing the name they entered. If they enter one of the provided names, it will be added to the list. If the
# user enters "s", the process in question will be skipped and the script will continue. If they user enters "q" the script will exit.
def pCommand(pList):
procList=[]
procList = []
for i in range(len(pList)):
csvFile=str(pList[i])+".csv"
procName=str(pList[i])
isFile=False
csvFile = str(pList[i]) + ".csv"
procName = str(pList[i])
isFile = False
if os.path.isfile(os.path.join(pth,csvFile)):
if os.path.isfile(os.path.join(pth, csvFile)):
isFile = True
procList.append(pList[i])
else:
while(not isFile):
print("\nFiles containing keyword: %s" % (str(procName)))
csvFile=str(procName)+".csv"
csvFile = str(procName) + ".csv"
for root, directories, filenames in os.walk(pth):
for filename in filenames:
if procName.lower() in filename.lower():
if (str(procName)+".csv") == str(filename):
isFile=True
if (str(procName) + ".csv") == str(filename):
isFile = True
procList.append(str(procName).strip())
break
else:
print(" "+filename[:-4])
print(" " + filename[:-4])
else:
procName = str(input("\nEnter the corrected process name, q to quit, or s to skip: ")).strip()
if procName=="s":
isFile=True
if procName == "s":
isFile = True
break
elif procName=="q":
elif procName == "q":
sys.exit()
return procList
@ -253,59 +253,55 @@ def pCommand(pList):
# Stores the average execution time, or delta hit count data into into a plotly graph obj, and restricts sample to be within a certain
# date range if specified. If plots is 1, one graph will be generated. If plots is 2, two graphs will be generated with one above the other.
def storeGraphData(procs, dateRange=[], execTime=False, hits=False, plots=1):
graphData={}
prevHitTotal=0
timeList=[[] for p in range(len(procs))]
dateList=[[] for p in range(len(procs))]
hitList=[[] for p in range(len(procs))]
graphData = {}
prevHitTotal = 0
timeList = [[] for p in range(len(procs))]
dateList = [[] for p in range(len(procs))]
hitList = [[] for p in range(len(procs))]
if dateRange:
for i in range(len(procs)):
csvFile = str(procs[i])+".csv"
with open(os.path.join(pth,csvFile), 'rb') as f:
csvFile = str(procs[i]) + ".csv"
with open(os.path.join(pth, csvFile), 'rb') as f:
reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
for ts, at, h, n in reader:
t = ts.split("T")
date=''.join(x for x in t[0].split('-'))
date = ''.join(x for x in t[0].split('-'))
if (int(date) >= int(dateRange[0])) and (int(date) <= int(dateRange[1])):
timeList[i].append(at)
dateList[i].append(str(ts[0:10:1]+" "+ts[11:]))
hitList[i].append(int(h)-prevHitTotal)
prevHitTotal=int(h)
dateList[i].append(str(ts[0:10:1] + " " + ts[11:]))
hitList[i].append(int(h) - prevHitTotal)
prevHitTotal = int(h)
f.close()
hitList[i][0]=None
graphData['trace'+str(i)] = go.Scatter(
x=dateList[i],
y=timeList[i] if execTime else hitList[i],
mode=plotType,
name=(procs[i] if not oneAxis else (procs[i]+"_"+("time" if execTime else "hits")))
)
if plots==1:
fig.append_trace(graphData['trace'+str(i)], 1, 1)
elif plots==2:
fig.append_trace(graphData['trace'+str(i)], 2, 1)
hitList[i][0] = None
graphData['trace' + str(i)] = go.Scatter(x=dateList[i],
y=timeList[i] if execTime else hitList[i],
mode=plotType,
name=(procs[i] if not oneAxis else (procs[i] + "_" + ("time" if execTime else "hits"))))
if plots == 1:
fig.append_trace(graphData['trace' + str(i)], 1, 1)
elif plots == 2:
fig.append_trace(graphData['trace' + str(i)], 2, 1)
else:
for i in range(len(procs)):
csvFile = str(procs[i])+".csv"
with open(os.path.join(pth,csvFile), 'rb') as f:
csvFile = str(procs[i]) + ".csv"
with open(os.path.join(pth, csvFile), 'rb') as f:
reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
for ts, at, h, n in reader:
timeList[i].append(at)
dateList[i].append(str(ts[0:10:1]+" "+ts[11:]))
hitList[i].append(int(h)-prevHitTotal)
prevHitTotal=int(h)
dateList[i].append(str(ts[0:10:1] + " " + ts[11:]))
hitList[i].append(int(h) - prevHitTotal)
prevHitTotal = int(h)
f.close()
hitList[i][0]=None
graphData['trace'+str(i)] = go.Scatter(
x=dateList[i],
y=timeList[i] if execTime else hitList[i],
mode=plotType,
name=(procs[i] if not oneAxis else (procs[i]+"_"+("time" if execTime else "hits")))
)
if plots==1:
fig.append_trace(graphData['trace'+str(i)], 1, 1)
elif plots==2:
fig.append_trace(graphData['trace'+str(i)], 2, 1)
hitList[i][0] = None
graphData['trace' + str(i)] = go.Scatter(x=dateList[i],
y=timeList[i] if execTime else hitList[i],
mode=plotType,
name=(procs[i] if not oneAxis else (procs[i] + "_" + ("time" if execTime else "hits"))))
if plots == 1:
fig.append_trace(graphData['trace' + str(i)], 1, 1)
elif plots == 2:
fig.append_trace(graphData['trace' + str(i)], 2, 1)
# Formats the graph by adding axis titles, changing font sizes, setting there to be two separate graphs or two graphs sharing an x-axis etc.
@ -313,14 +309,14 @@ def formatGraph(two, oneAxis):
fig['layout'].update(showlegend=True)
if two:
if oneAxis:
fig['layout']['xaxis1'].update(title='Timestamp',titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['yaxis1'].update(title='Hits Per Sample',titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['yaxis2'].update(title='Average Execution Time (milliseconds)',anchor='x',overlaying='y',side='right',position=1,titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['xaxis1'].update(title='Timestamp', titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['yaxis1'].update(title='Hits Per Sample', titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['yaxis2'].update(title='Average Execution Time (milliseconds)', anchor='x', overlaying='y', side='right', position=1, titlefont=dict(size=20, color='#4d4d4d'))
else:
fig['layout']['xaxis1'].update(title='Timestamp',titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['yaxis1'].update(title='Average Execution Time (milliseconds)',titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['xaxis2'].update(title='Timestamp',titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['yaxis2'].update(title='Hits Per Sample',titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['xaxis1'].update(title='Timestamp', titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['yaxis1'].update(title='Average Execution Time (milliseconds)', titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['xaxis2'].update(title='Timestamp', titlefont=dict(size=20, color='#4d4d4d'))
fig['layout']['yaxis2'].update(title='Hits Per Sample', titlefont=dict(size=20, color='#4d4d4d'))
fig['layout'].update(title=graphName, titlefont=dict(size=26))
else:
fig['layout'].update(
@ -346,32 +342,32 @@ def formatGraph(two, oneAxis):
# Sets the name of the saved html file.
def setFilename(graphName):
validName=False
validName = False
if not os.path.exists("Graphs/"):
os.makedirs("Graphs/")
os.chdir(os.path.join(dir,'Graphs/'))
os.chdir(os.path.join(dir, 'Graphs/'))
if not graphName:
graphName=time.strftime("%m-%d-%Y")
if os.path.exists(str(graphName+".html")):
n=1
graphName = time.strftime("%m-%d-%Y")
if os.path.exists(str(graphName + ".html")):
n = 1
while(not validName):
if os.path.exists(str(graphName+"("+str(n)+").html")):
n+=1
if os.path.exists(str(graphName + "(" + str(n) + ").html")):
n += 1
else:
graphName=graphName+"("+str(n)+")"
validName=True
graphName = graphName + "(" + str(n) + ")"
validName = True
return graphName
print("Welcome to plotter, type --help for information")
# Checks that plotly is installed, otherwise graphs cannot be generated.
plotCheck=subprocess.getstatusoutput("pip list | grep plotly")
if plotCheck[0]==0:
plotCheck = subprocess.getstatusoutput("pip list | grep plotly")
if plotCheck[0] == 0:
if "plotly" not in plotCheck[1]:
print("\n\tWARNING: Plotly is not installed on your system.\n\tPlease install it with: sudo pip install plotly\n")
sys.exit()
# Checks to see if logplot.cfg already exists, creates it if not.
if not os.path.isfile(os.path.join(dir,'logplot.cfg')):
if not os.path.isfile(os.path.join(dir, 'logplot.cfg')):
print("Generating logplot.cfg")
updater(False)
print("logplot.cfg created.")
@ -381,54 +377,54 @@ if not os.path.isdir('./csv'):
command = sys.argv # Takes arguments from the command line
if len(command)==1:
if len(command) == 1:
print("Running with default settings.")
default = True
else:
for i in range(1,len(command)):
for i in range(1, len(command)):
if command[i] == "-c": # Use config file
config=True
config = True
elif command[i] == "--g": # Groups
for j in range(i+1,len(command)):
for j in range(i + 1, len(command)):
group.append(command[j])
procs=gCommand(group)
procs = gCommand(group)
break
elif command[i] == "-t": # Average execution time
execTime=True
execTime = True
elif command[i] == "-h": # Delta hits between samples
hits=True
hits = True
elif command[i] == "-l": # Graph with lines
lines=True
lines = True
elif command[i] == "-m": # Graph with markers (scatter)
markers=True
markers = True
elif command[i] == "-lm": # Graph with lines and markers
lines=True
markers=True
lines = True
markers = True
elif command[i] == "-d": # Date range
dateRange=command[i+1].split('-')
dateRange = command[i + 1].split('-')
if dateRange[0]:
lower=dateRange[0].split("/")
dateRange[0]=lower[0]+lower[1].zfill(2)+lower[2].zfill(2)
lower = dateRange[0].split("/")
dateRange[0] = lower[0] + lower[1].zfill(2) + lower[2].zfill(2)
else:
dateRange[0]="0"*8
dateRange[0] = "0" * 8
if dateRange[1]:
upper=dateRange[1].split("/")
dateRange[1]=upper[0]+upper[1].zfill(2)+upper[2].zfill(2)
upper = dateRange[1].split("/")
dateRange[1] = upper[0] + upper[1].zfill(2) + upper[2].zfill(2)
else:
dateRange[1]="9"*8
i+=1
dateRange[1] = "9" * 8
i += 1
elif command[i] == "-n": # Name of file to be generated
graphName=command[i+1]
i+=1
graphName = command[i + 1]
i += 1
elif command[i] == "-oneaxis": # Have hit and time data displayed on same graph
oneAxis=True
oneAxis = True
elif (command[i] == "--help") or (command[i] == "--h"): # Print help message and exit script
helpMessage()
sys.exit()
elif command[i] == "--p": # User-specified processes
for j in range(i+1,len(command)):
for j in range(i + 1, len(command)):
procs.append(command[j])
procs=pCommand(procs)
procs = pCommand(procs)
break
elif command[i] == "--update":
print("Updating...")
@ -442,26 +438,26 @@ if (not execTime) and (not hits):
# Default settings can be changed as desired.
if default:
config=True
execTime=True
config = True
execTime = True
if (lines and markers):
plotType="lines+markers"
plotType = "lines+markers"
elif lines:
plotType="lines"
plotType = "lines"
else:
plotType="markers"
plotType = "markers"
if config:
f=open(os.path.join(dir,'logplot.cfg'),"r")
procList=f.read().splitlines()
f = open(os.path.join(dir, 'logplot.cfg'), "r")
procList = f.read().splitlines()
for p in procList:
if "#END" in p:
break
cfgLine=p.split()
if cfgLine[1]=="Y":
csvFile=cfgLine[0]+".csv"
if os.path.exists(os.path.join(pth,csvFile)):
cfgLine = p.split()
if cfgLine[1] == "Y":
csvFile = cfgLine[0] + ".csv"
if os.path.exists(os.path.join(pth, csvFile)):
procs.append(cfgLine[0])
else:
warnings.append("WARNING: %s does not exist." % (csvFile,))
@ -486,10 +482,10 @@ if procs:
formatGraph((execTime and hits), oneAxis)
# Generates the plot
plotly.offline.plot(fig, filename=setFilename(graphName)+".html")
plotly.offline.plot(fig, filename=setFilename(graphName) + ".html")
else:
warnings.append("NO GRAPH GENERATED BECAUSE NO VALID GROUP OR PROCESS NAME SPECIFIED.")
# If any warnings occured, print them
if warnings:
print("\n\t"+("\n\t").join(warnings)+"\n")
print("\n\t" + ("\n\t").join(warnings) + "\n")

View File

@ -2988,7 +2988,7 @@ class NFVIComputeAPI(nfvi.api.v1.NFVIComputeAPI):
self._rest_api_server.add_handler(
'POST', '/v2.1/*', self.instance_action_rest_api_post_handler)
interval_secs = max(self._max_action_request_wait_in_secs/2, 1)
interval_secs = max(self._max_action_request_wait_in_secs / 2, 1)
timers.timers_create_timer('compute-api-action-requests-audit',
interval_secs, interval_secs,
self._audit_action_requests)

View File

@ -143,7 +143,7 @@ def test_instance_director_recovery_list(
# -- with one instance in the failed state, but elapsed time is less
# than the recovery cooldown, verify that the list is empty, but
# the audit interval is set to the recovery cooldown period
instance_1._elapsed_time_in_state = _recovery_cooldown-1
instance_1._elapsed_time_in_state = _recovery_cooldown - 1
(next_audit_interval, instance_recovery_list, instance_failed_list,
instance_rebuilding_list, instance_migrating_list,
instance_rebooting_list) = _director._get_instance_recovery_list()

View File

@ -3279,7 +3279,7 @@ class TestSwPatchStrategy(object):
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout':15},
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
@ -3297,7 +3297,7 @@ class TestSwPatchStrategy(object):
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout':15},
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
@ -3445,7 +3445,7 @@ class TestSwPatchStrategy(object):
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout':60}
'timeout': 60}
]
},
]

View File

@ -672,7 +672,7 @@ class TestSwUpgradeStrategy(object):
stage_hosts = list()
stage_instances = list()
for x in range(0, len(host_sets)-1):
for x in range(0, len(host_sets) - 1):
stage_hosts.append(["compute-%02d" % host_num for host_num in host_sets[x]])
stage_instances.append(
["test_instance_%02d" % host_num for host_num in instance_sets[x]])

View File

@ -52,7 +52,7 @@ class HeatAPI(object):
token = openstack.get_token(directory)
url_target_index = pecan.request.url.find('/api/openstack/heat')
url_target = pecan.request.url[url_target_index+len('/api/openstack/heat'):]
url_target = pecan.request.url[url_target_index + len('/api/openstack/heat'):]
if '' == url_target or '/' == url_target:
url = token.get_service_url(openstack.OPENSTACK_SERVICE.HEAT,

View File

@ -92,7 +92,7 @@ class Database(object):
self._session.commit()
elapsed_ms = timers.get_monotonic_timestamp_in_ms() - start_ms
histogram.add_histogram_data("database-commits (periodic)",
elapsed_ms/100, "decisecond")
elapsed_ms / 100, "decisecond")
self._commit_timer_id = None
def commit(self):
@ -101,7 +101,7 @@ class Database(object):
self._session.commit()
elapsed_ms = timers.get_monotonic_timestamp_in_ms() - start_ms
histogram.add_histogram_data("database-commits (inline)",
elapsed_ms/100, "decisecond")
elapsed_ms / 100, "decisecond")
else:
if self._commit_timer_id is None:
self._commit_timer_id \

View File

@ -12,5 +12,5 @@ class Tenant(ObjectData):
"""
def __init__(self, uuid, name, description, enabled):
super(Tenant, self).__init__('1.0.0')
self.update(dict(uuid=uuid, name=name, description=description,
self.update(dict(uuid=uuid, name=name, description=description,
enabled=enabled))

View File

@ -234,7 +234,7 @@ class InstanceActionData(object):
InstanceActionData._seqnum += 1
elif action_seqnum >= InstanceActionData._seqnum:
InstanceActionData._seqnum = action_seqnum+1
InstanceActionData._seqnum = action_seqnum + 1
self._seqnum = action_seqnum
self._action_state = action_state

View File

@ -16,5 +16,5 @@ class Tenant(ObjectData):
"""
def __init__(self, uuid, name, description, enabled):
super(Tenant, self).__init__('1.0.0')
self.update(dict(uuid=uuid, name=name, description=description,
self.update(dict(uuid=uuid, name=name, description=description,
enabled=enabled))

View File

@ -1259,7 +1259,7 @@ class SwUpgradeStrategy(SwUpdateStrategy):
stage.add_step(strategy.UnlockHostsStep(host_list))
# Allow up to four hours for controller disks to synchronize
stage.add_step(strategy.WaitDataSyncStep(
timeout_in_secs=4*60*60,
timeout_in_secs=4 * 60 * 60,
ignore_alarms=self._ignore_alarms))
self.apply_phase.add_stage(stage)

View File

@ -177,33 +177,33 @@ class HTTPRequestHandler(BaseHTTPRequestHandler):
self.wfile.write(
query_obj.group(1) + "(" +
json.dumps(
{'locked_hosts': locked_hosts,
'unlocked_hosts': unlocked_hosts,
'locking_hosts': locking_hosts,
'unlocking_hosts': unlocking_hosts,
'enabled_hosts': enabled_hosts,
'disabled_hosts': disabled_hosts,
'offline_hosts': offline_hosts,
'failed_hosts': failed_hosts,
{'locked_hosts': locked_hosts,
'unlocked_hosts': unlocked_hosts,
'locking_hosts': locking_hosts,
'unlocking_hosts': unlocking_hosts,
'enabled_hosts': enabled_hosts,
'disabled_hosts': disabled_hosts,
'offline_hosts': offline_hosts,
'failed_hosts': failed_hosts,
'nfvi_enabled_hosts': nfvi_enabled_hosts,
'total_hosts': total_hosts,
'locked_instances': locked_instances,
'unlocked_instances': unlocked_instances,
'enabled_instances': enabled_instances,
'disabled_instances': disabled_instances,
'failed_instances': failed_instances,
'total_hosts': total_hosts,
'locked_instances': locked_instances,
'unlocked_instances': unlocked_instances,
'enabled_instances': enabled_instances,
'disabled_instances': disabled_instances,
'failed_instances': failed_instances,
'powering_off_instances': powering_off_instances,
'pausing_instances': pausing_instances,
'paused_instances': paused_instances,
'suspended_instances': suspended_instances,
'suspending_instances': suspending_instances,
'resizing_instances': resizing_instances,
'rebooting_instances': rebooting_instances,
'rebuilding_instances': rebuilding_instances,
'migrating_instances': migrating_instances,
'deleting_instances': deleting_instances,
'deleted_instances': deleted_instances,
'total_instances': total_instances,
'pausing_instances': pausing_instances,
'paused_instances': paused_instances,
'suspended_instances': suspended_instances,
'suspending_instances': suspending_instances,
'resizing_instances': resizing_instances,
'rebooting_instances': rebooting_instances,
'rebuilding_instances': rebuilding_instances,
'migrating_instances': migrating_instances,
'deleting_instances': deleting_instances,
'deleted_instances': deleted_instances,
'total_instances': total_instances,
'datetime': str(datetime.datetime.now())[:-3]
}) + ")")
else:
@ -246,10 +246,10 @@ class HTTPRequestHandler(BaseHTTPRequestHandler):
self.wfile.write(
query_obj.group(1) + "(" +
json.dumps(
{'critical_alarms': critical_alarms,
'major_alarms': major_alarms,
'minor_alarms': minor_alarms,
'warning_alarms': warning_alarms,
{'critical_alarms': critical_alarms,
'major_alarms': major_alarms,
'minor_alarms': minor_alarms,
'warning_alarms': warning_alarms,
'indeterminate_alarms': indeterminate_alarms,
'datetime': str(datetime.datetime.now())[:-3]
}) + ")")

View File

@ -199,8 +199,8 @@ class DebugHeaders(Middleware):
if body:
for line in body.splitlines():
# This way we won't print out control characters:
LOG.info(line.encode('string_escape')+'\n')
LOG.info('-'*70+'\n')
LOG.info(line.encode('string_escape') + '\n')
LOG.info('-' * 70 + '\n')
def get_jason_request_body(request):

View File

@ -38,7 +38,7 @@ class Router(Middleware):
"""
self.map = mapper
self.forwarder = forwarder
self._router = RoutesMiddleware(self._dispatch,self.map)
self._router = RoutesMiddleware(self._dispatch, self.map)
super(Router, self).__init__(app)
@webob.dec.wsgify

View File

@ -43,10 +43,10 @@ class Histogram(object):
if 0 == sample_as_int:
bucket_idx = sample_as_int.bit_length()
else:
bucket_idx = (sample_as_int-1).bit_length()
bucket_idx = (sample_as_int - 1).bit_length()
if bucket_idx > self._num_buckets:
bucket_idx = self._num_buckets-1
bucket_idx = self._num_buckets - 1
if sample_as_int > self._max_sample:
self._max_sample = sample_as_int

View File

@ -35,10 +35,6 @@ commands =
# E127 continuation line over-indented for visual indent
# E128 continuation line under-indented for visual indent
# E129 visually indented line with same indent as next logical line
# E225 missing whitespace around operator
# E226 missing whitespace around arithmetic operator
# E231 missing whitespace after ':'
# E241 multiple spaces after
# E261 at least two spaces before inline comment
# E265 block comment should start with '# '
# E501 line too long
@ -55,7 +51,7 @@ commands =
# - flake8 codes -
# F401 '<module>' imported but unused
# F821 undefined name 'unicode' (python3 specific)
ignore = E116,E121,E122,E123,E124,E126,E127,E128,E129,E225,E226,E231,E241,E261,E265,E501,E712,
ignore = E116,E121,E122,E123,E124,E126,E127,E128,E129,E261,E265,E501,E712,
H102,H104,H301,H306,H401,H404,H405,H501,
F401,F821,
# H106 Dont put vim configuration in source files (off by default).