Enable Flake8 Docstring Errors

Flake8 currently ignores the following errors:
H401: docstring should not start with a space
H404: multi line docstring should start without a leading new line
H405: multi line docstring summary not separated with an empty line
Enable them for more consistent formatting of docstrings

Change-Id: I385e28e9c6eca3c02a3def51ff64b00b7a63a853
Story: 2004515
Task: 30076
Signed-off-by: Eric Barrett <eric.barrett@windriver.com>
This commit is contained in:
Eric Barrett 2019-04-18 09:11:20 -04:00
parent 2bc3391ae1
commit cb0b2ffe1e
18 changed files with 142 additions and 210 deletions

View File

@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
def osd_pool_set_quota(ceph_api, pool_name, max_bytes=0, max_objects=0):
"""Set the quota for an OSD pool_name
Setting max_bytes or max_objects to 0 will disable that quota param
:param pool_name: OSD pool_name
:param max_bytes: maximum bytes for OSD pool_name
@ -112,6 +113,7 @@ def osd_pool_create(ceph_api, pool_name, pg_num, pgp_num):
def osd_pool_delete(ceph_api, pool_name):
"""Delete an osd pool
:param pool_name: pool name
"""
response, body = ceph_api.osd_pool_delete(

View File

@ -665,7 +665,7 @@ class Monitor(HandleUpgradesMixin):
@staticmethod
def _parse_reason(health):
""" Parse reason strings received from Ceph """
"""Parse reason strings received from Ceph"""
if health['health'] in constants.CEPH_STATUS_CUSTOM:
# Don't parse reason messages that we added
return "Storage Alarm Condition: %(health)s. %(detail)s" % health
@ -864,7 +864,7 @@ class Monitor(HandleUpgradesMixin):
alarm_list[alarm].entity_instance_id)
def _get_current_alarms(self):
""" Retrieve currently raised alarm """
"""Retrieve currently raised alarm"""
self.current_health_alarm = self.service.fm_api.get_fault(
fm_constants.FM_ALARM_ID_STORAGE_CEPH,
self.service.entity_instance_id)

View File

@ -20,7 +20,7 @@ DEVICE_NAME_NVME = "nvme"
def command(arguments, **kwargs):
""" Execute e command and capture stdout, stderr & return code """
"""Execute e command and capture stdout, stderr & return code"""
process = subprocess.Popen(
arguments,
stdout=subprocess.PIPE,
@ -71,7 +71,7 @@ JOURNAL_UUID = '45b0969e-9b03-4f30-b4c6-b4b80ceff106' # Type of a journal parti
def is_partitioning_correct(disk_path, partition_sizes):
""" Validate the existence and size of journal partitions"""
"""Validate the existence and size of journal partitions"""
# Obtain the device node from the device path.
disk_node = device_path_to_device_node(disk_path)
@ -110,7 +110,7 @@ def is_partitioning_correct(disk_path, partition_sizes):
def create_partitions(disk_path, partition_sizes):
""" Recreate partitions """
"""Recreate partitions"""
# Obtain the device node from the device path.
disk_node = device_path_to_device_node(disk_path)
@ -183,7 +183,7 @@ OSD_PATH = "/var/lib/ceph/osd/"
def mount_data_partition(data_path, osdid):
""" Mount an OSD data partition and return the mounted path """
"""Mount an OSD data partition and return the mounted path"""
# Obtain the device node from the device path.
data_node = device_path_to_device_node(data_path)
@ -204,7 +204,7 @@ def mount_data_partition(data_path, osdid):
def is_location_correct(path, journal_path, osdid):
""" Check if location points to the correct device """
"""Check if location points to the correct device"""
# Obtain the device node from the device path.
journal_node = device_path_to_device_node(journal_path)
@ -217,7 +217,7 @@ def is_location_correct(path, journal_path, osdid):
def fix_location(mount_point, journal_path, osdid):
""" Move the journal to the new partition """
"""Move the journal to the new partition"""
# Obtain the device node from the device path.
journal_node = device_path_to_device_node(journal_path)

View File

@ -48,9 +48,7 @@ def start_polling():
def handle_exception(exc_type, exc_value, exc_traceback):
"""
Exception handler to log any uncaught exceptions
"""
"""Exception handler to log any uncaught exceptions"""
logging.error("Uncaught exception",
exc_info=(exc_type, exc_value, exc_traceback))
sys.__excepthook__(exc_type, exc_value, exc_traceback)
@ -60,9 +58,7 @@ def handle_exception(exc_type, exc_value, exc_traceback):
# CLASSES
###################
class LogMgmtDaemon():
""" Daemon process representation of
the /var/log monitoring program
"""
"""Daemon process representation of the /var/log monitoring program"""
def __init__(self):
# Daemon-specific init
self.stdin_path = '/dev/null'

View File

@ -41,7 +41,7 @@ class CPU:
time_last = float(0.0) # float of the time the last sample was taken
def log_error(self, err_str):
""" Print an error log with plugin name prefixing the log """
"""Print an error log with plugin name prefixing the log"""
collectd.error("%s %s" % (PLUGIN, err_str))

View File

@ -24,9 +24,7 @@ obj = ExampleObject()
# The config function - called once on collectd process startup
def config_func(config):
"""
Configure the plugin
"""
"""Configure the plugin"""
for node in config.children:
key = node.key.lower()

View File

@ -217,9 +217,7 @@ class PluginObject:
database_setup_in_progress = False # connection mutex
def __init__(self, id, plugin):
"""
PluginObject Class constructor
"""
"""PluginObject Class constructor"""
# plugin specific static class members.
self.id = id # alarm id ; 100.1??
@ -272,27 +270,20 @@ class PluginObject:
# For plugins that have multiple instances like df (filesystem plugin)
# we need to create an instance of this object for each one.
# This dictionary is used to associate a instance with its object.
# This dictionary is used to associate an instance with its object.
self.instance_objects = {}
def _ilog(self, string):
"""
Create a collectd notifier info log with the specified string.
"""
"""Create a collectd notifier info log with the string param"""
collectd.info('%s %s : %s' % (PLUGIN, self.plugin, string))
def _llog(self, string):
"""
Create a collectd notifier info log with the specified string
if debug_lists is True.
"""
"""Create a collectd notifier info log with the string param if debug_lists"""
if debug_lists:
collectd.info('%s %s : %s' % (PLUGIN, self.plugin, string))
def _elog(self, string):
"""
Create a collectd notifier error log with the specified string.
"""
"""Create a collectd notifier error log with the string param"""
collectd.error('%s %s : %s' % (PLUGIN, self.plugin, string))
##########################################################################
@ -310,7 +301,7 @@ class PluginObject:
##########################################################################
def _state_audit(self, location):
""" Log the state of the specified object. """
"""Log the state of the specified object"""
if self.id == ALARM_ID__CPU:
_print_state()
@ -346,7 +337,7 @@ class PluginObject:
##########################################################################
def _manage_change(self, nObject):
""" Log resource instance value on step state change. """
"""Log resource instance value on step state change"""
# filter out messages to ignore ; notifications that have no value
if "has not been updated for" in nObject.message:
@ -495,9 +486,7 @@ class PluginObject:
##########################################################################
def _severity_change(self, entity_id, severity):
"""
Check for a severity change
"""
"""Check for a severity change"""
if entity_id in self.warnings:
self._llog(entity_id + " is already in warnings list")
@ -561,9 +550,7 @@ class PluginObject:
#########################################################################
def _manage_alarm(self, entity_id, severity):
"""
Manage the alarm severity lists and report state change.
"""
"""Manage the alarm severity lists and report state change"""
collectd.debug("%s manage alarm %s %s %s" %
(PLUGIN,
@ -675,10 +662,9 @@ class PluginObject:
#
##########################################################################
def _get_instance_object(self, eid):
"""
Safely get an object from the self instance object list indexed
by eid while locked.
:param eid:
"""Safely get an object from the self instance object dict while locked
:param eid: the index for the instance object dictionary
:return: object or None
"""
@ -707,10 +693,10 @@ class PluginObject:
#
##########################################################################
def _add_instance_object(self, obj, eid):
"""
Update self instance_objects list while locked
"""Update self instance_objects list while locked
:param obj: the object to add
:param eid: indexed by this eid
:param eid: index for instance_objects
:return: nothing
"""
try:
@ -734,9 +720,7 @@ class PluginObject:
#
##########################################################################
def _copy_instance_object(self, object):
"""
Copy select members of self object to target object
"""
"""Copy select members of self object to target object"""
object.resource_name = self.resource_name
object.instance_name = self.instance_name
@ -802,9 +786,7 @@ class PluginObject:
#
##########################################################################
def _create_instance_objects(self):
"""
Create, initialize and add an instance object to this/self plugin
"""
"""Create, initialize and add an instance object to this/self plugin"""
# Create the File System subordinate instance objects.
if self.id == ALARM_ID__DF:
@ -880,9 +862,7 @@ PLUGINS = {
def _get_base_object(alarm_id):
"""
Get the alarm object for the specified alarm id.
"""
"""Get the alarm object for the specified alarm id"""
for plugin in PLUGIN_NAME_LIST:
if PLUGINS[plugin].id == alarm_id:
return PLUGINS[plugin]
@ -890,9 +870,7 @@ def _get_base_object(alarm_id):
def _get_object(alarm_id, eid):
"""
Get the plugin object for the specified alarm id and eid
"""
"""Get the plugin object for the specified alarm id and eid"""
base_obj = _get_base_object(alarm_id)
if len(base_obj.instance_objects):
@ -905,9 +883,7 @@ def _get_object(alarm_id, eid):
def _build_entity_id(plugin, plugin_instance):
"""
Builds an entity id string based on the collectd notification object.
"""
"""Builds an entity id string based on the collectd notification object"""
inst_error = False
@ -968,8 +944,6 @@ def _build_entity_id(plugin, plugin_instance):
def _get_df_mountpoints():
"""
"""
conf_file = PLUGIN_PATH + 'df.conf'
if not os.path.exists(conf_file):
@ -996,9 +970,7 @@ def _get_df_mountpoints():
def _print_obj(obj):
"""
Print a single object
"""
"""Print a single object"""
base_object = False
for plugin in PLUGIN_NAME_LIST:
if PLUGINS[plugin] == obj:
@ -1045,9 +1017,7 @@ def _print_obj(obj):
def _print_state(obj=None):
"""
Print the current object state
"""
"""Print the current object state"""
try:
objs = []
if obj is None:
@ -1069,9 +1039,7 @@ def _print_state(obj=None):
def _database_setup(database):
"""
Setup the influx database for collectd resource samples
"""
"""Setup the influx database for collectd resource samples"""
collectd.info("%s setting up influxdb:%s database" %
(PLUGIN, database))
@ -1144,9 +1112,7 @@ def _database_setup(database):
def _clear_alarm_for_missing_filesystems():
"""
Clear alarmed file systems that are no longer mounted or present
"""
"""Clear alarmed file systems that are no longer mounted or present"""
# get the DF (filesystem plugin) base object.
df_base_obj = PLUGINS[PLUGIN__DF]
@ -1187,7 +1153,7 @@ def _clear_alarm_for_missing_filesystems():
# Initialize each plugin object with plugin specific data.
# Query FM for existing alarms and run with that starting state.
def init_func():
""" Collectd FM Notifier Initialization Function """
"""Collectd FM Notifier Initialization Function"""
PluginObject.lock = Lock()

View File

@ -167,7 +167,7 @@ class LinkObject:
#
##################################################################
def raise_port_alarm(self, network):
""" Raise a port alarm """
"""Raise a port alarm"""
if self.severity != fm_constants.FM_ALARM_SEVERITY_MAJOR:
@ -201,7 +201,7 @@ class LinkObject:
#
##################################################################
def clear_port_alarm(self, network):
""" Clear a port alarm """
"""Clear a port alarm"""
if self.severity != fm_constants.FM_ALARM_SEVERITY_CLEAR:
if manage_alarm(self.name,
@ -269,7 +269,7 @@ class NetworkObject:
#
##################################################################
def raise_iface_alarm(self, severity):
""" Raise an interface alarm """
"""Raise an interface alarm"""
if severity == fm_constants.FM_ALARM_SEVERITY_CLEAR:
collectd.error("%s %s raise alarm called with clear severity" %
@ -310,7 +310,7 @@ class NetworkObject:
#
##################################################################
def clear_iface_alarm(self):
""" Clear an interface alarm """
"""Clear an interface alarm"""
if self.severity != fm_constants.FM_ALARM_SEVERITY_CLEAR:
if manage_alarm(self.name,
@ -343,7 +343,6 @@ class NetworkObject:
#
######################################################################
def manage_iface_alarm(self):
""" """
# Single Link Config
if self.link_two.name is None:
if self.link_one.state == LINK_DOWN:
@ -412,7 +411,7 @@ NETWORKS = [NetworkObject(NETWORK_MGMT),
#
##########################################################################
def get_timestamp(lmon_time):
""" Convert lmon time to fm timestamp time """
"""Convert lmon time to fm timestamp time"""
if lmon_time:
try:
@ -428,7 +427,7 @@ def get_timestamp(lmon_time):
def dump_network_info(network):
""" Log the specified network info """
"""Log the specified network info"""
link_one_event_time = datetime.datetime.fromtimestamp(
float(network.link_one.timestamp)).strftime('%Y-%m-%d %H:%M:%S')
@ -479,7 +478,7 @@ def dump_network_info(network):
#
##########################################################################
def this_hosts_alarm(hostname, eid):
""" Check if the specified eid is for this host """
"""Check if the specified eid is for this host"""
if hostname:
if eid:
@ -527,7 +526,7 @@ def this_hosts_alarm(hostname, eid):
#
##########################################################################
def clear_alarms(alarm_id_list):
""" Clear alarm state of all plugin alarms. """
"""Clear alarm state of all plugin alarms"""
found = False
for alarm_id in alarm_id_list:
alarms = api.get_faults_by_id(alarm_id)
@ -575,7 +574,7 @@ def clear_alarms(alarm_id_list):
#
##########################################################################
def manage_alarm(name, network, level, action, severity, alarm_id, timestamp):
""" Manage raise and clear of port and interface alarms """
"""Manage raise and clear of port and interface alarms"""
ts = datetime.datetime.fromtimestamp(
float(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
@ -638,7 +637,7 @@ def manage_alarm(name, network, level, action, severity, alarm_id, timestamp):
# The config function - called once on collectd process startup
def config_func(config):
""" Configure the plugin """
"""Configure the plugin"""
# Need to update the Link Status Query URL with the port number.
url_updated = False
@ -696,7 +695,7 @@ def config_func(config):
# The init function - called once on collectd process startup
def init_func():
""" Init the plugin """
"""Init the plugin"""
if obj.config_done is False:
collectd.info("%s configuration failed" % PLUGIN)
@ -716,7 +715,7 @@ def init_func():
# The sample read function - called on every audit interval
def read_func():
""" collectd interface monitor plugin read function """
"""collectd interface monitor plugin read function"""
if obj.init_done is False:
init_func()

View File

@ -55,7 +55,7 @@ obj = MEM()
def log_meminfo(plugin, name, meminfo):
""" Log the supplied meminfo """
"""Log the supplied meminfo"""
if debug is False:
return
@ -76,9 +76,7 @@ def log_meminfo(plugin, name, meminfo):
def config_func(config):
"""
Configure the memory usage plugin
"""
"""Configure the memory usage plugin"""
for node in config.children:
key = node.key.lower()

View File

@ -79,9 +79,7 @@ PLUGIN__EXAMPLE = 'example'
class collectdMtceNotifierObject:
def __init__(self, port):
"""
collectdMtceNotifierObject Class constructor
"""
"""collectdMtceNotifierObject Class constructor"""
# default maintenance port
self.port = port
self.addr = None
@ -119,9 +117,7 @@ obj = collectdMtceNotifierObject(MTCE_CMD_RX_PORT)
def _get_active_controller_ip():
"""
Get the active controller host IP
"""
"""Get the active controller host IP"""
try:
obj.addr = socket.getaddrinfo('controller', None)[0][4][0]
@ -134,9 +130,7 @@ def _get_active_controller_ip():
def _df_instance_to_path(df_inst):
"""
Convert a df instance name to a mountpoint
"""
"""Convert a df instance name to a mountpoint"""
# df_root is not a dynamic file system. Ignore that one.
if df_inst == 'df_root':
@ -148,9 +142,7 @@ def _df_instance_to_path(df_inst):
# This function removes degraded file systems that are no longer present.
def _clear_degrade_for_missing_filesystems():
"""
Remove degraded file systems that are no longer mounted or present.
"""
"""Remove degraded file systems that are no longer mounted or present"""
for df_inst in obj.degrade_list:
@ -178,9 +170,7 @@ def _clear_degrade_for_missing_filesystems():
# val = port number
#
def config_func(config):
"""
Configure the maintenance degrade notifier plugin.
"""
"""Configure the maintenance degrade notifier plugin"""
collectd.debug('%s config function' % PLUGIN)
for node in config.children:
@ -200,9 +190,7 @@ def config_func(config):
# Collectd calls this function on startup.
def init_func():
"""
Collectd Mtce Notifier Initialization Function
"""
"""Collectd Mtce Notifier Initialization Function"""
obj.host = os.uname()[1]
collectd.info("%s %s:%s sending to mtce port %d" %
@ -221,9 +209,7 @@ def init_func():
# 4. send mtcAgent the degrade state message.
#
def notifier_func(nObject):
"""
Collectd Mtce Notifier Handler Function
"""
"""Collectd Mtce Notifier Handler Function"""
# Create the resource name from the notifier object.
# format: <plugin name>_<plugin_instance_name>

View File

@ -114,7 +114,7 @@ obj = NtpqObject()
###############################################################################
def _add_unreachable_server(ip=None):
""" Add ip to unreachable_servers list """
"""Add ip to unreachable_servers list"""
if ip:
if ip not in obj.unreachable_servers:
@ -153,7 +153,7 @@ def _add_unreachable_server(ip=None):
###############################################################################
def _raise_alarm(ip=None):
""" Assert an NTP alarm """
"""Assert an NTP alarm"""
if not ip:
# Don't re-raise the alarm if its already raised
@ -221,7 +221,7 @@ def _raise_alarm(ip=None):
###############################################################################
def _clear_base_alarm():
""" Clear the NTP base alarm """
"""Clear the NTP base alarm"""
if api.clear_fault(PLUGIN_ALARMID, obj.base_eid) is False:
collectd.error("%s failed to clear alarm %s:%s" %
@ -254,10 +254,7 @@ def _clear_base_alarm():
###############################################################################
def _remove_ip_from_unreachable_list(ip):
"""
Remove an IP address from the unreachable list and
clear any NTP alarm that might be asserted for it.
"""
"""Remove an IP address from the unreachable list and clear its NTP alarms"""
# remove from unreachable list if its there
if ip and ip in obj.unreachable_servers:
@ -299,7 +296,7 @@ def _remove_ip_from_unreachable_list(ip):
###############################################################################
def _add_ip_to_ntpq_server_list(ip):
""" Add this IP to the list of servers that ntpq reports against. """
"""Add this IP to the list of servers that ntpq reports against"""
if ip not in obj.server_list_ntpq:
obj.server_list_ntpq.append(ip)
@ -321,7 +318,7 @@ def _add_ip_to_ntpq_server_list(ip):
###############################################################################
def _cleanup_stale_servers():
""" Cleanup the server IP tracking lists """
"""Cleanup the server IP tracking lists"""
collectd.debug("%s CLEANUP REACHABLE: %s %s" %
(PLUGIN, obj.server_list_ntpq, obj.reachable_servers))
@ -356,7 +353,7 @@ def _cleanup_stale_servers():
###############################################################################
def _get_ntp_servers():
""" Read the provisioned servers from the ntp conf file """
"""Read the provisioned servers from the ntp conf file"""
with open(PLUGIN_CONF, 'r') as infile:
for line in infile:
@ -401,7 +398,7 @@ def _get_ntp_servers():
###############################################################################
def _is_controller(ip):
""" Returns True if this IP corresponds to one of the controllers """
"""Returns True if this IP corresponds to one of the controllers"""
collectd.debug("%s check if '%s' is a controller ip" % (PLUGIN, ip))
with open('/etc/hosts', 'r') as infile:
@ -433,7 +430,7 @@ def _is_controller(ip):
###############################################################################
def _is_uuid_like(val):
"""Returns validation of a value as a UUID."""
"""Returns validation of a value as a UUID"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
@ -459,7 +456,7 @@ def _is_uuid_like(val):
###############################################################################
def config_func(config):
""" Configure the plugin """
"""Configure the plugin"""
collectd.debug('%s config function' % PLUGIN)
return 0

View File

@ -89,7 +89,7 @@ class PluginObject(object):
###########################################################################
def init_ready(self):
""" Test for system init ready state """
"""Test for system init ready state"""
if os.path.exists(tsc.INITIAL_CONFIG_COMPLETE_FLAG) is False:
self.log_throttle_count += 1
@ -117,7 +117,7 @@ class PluginObject(object):
#
###########################################################################
def gethostname(self):
""" Fetch the hostname """
"""Fetch the hostname"""
# get current hostname
try:
@ -142,7 +142,7 @@ class PluginObject(object):
#
###########################################################################
def is_virtual(self):
""" Check for virtual host """
"""Check for virtual host"""
try:
cmd = '/usr/bin/facter is_virtual'
@ -185,7 +185,7 @@ class PluginObject(object):
#
###########################################################################
def check_for_fit(self, name, unit):
""" Load FIT data into usage if it exists """
"""Load FIT data into usage if it exists"""
fit_file = '/var/run/fit/' + name + '_data'
@ -241,7 +241,7 @@ class PluginObject(object):
#
###########################################################################
def make_http_request(self, url=None, to=None, hdrs=None):
""" Make a blocking HTTP Request and return result """
"""Make a blocking HTTP Request and return result"""
try:
@ -280,7 +280,7 @@ class PluginObject(object):
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
"""Returns validation of a value as a UUID
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
@ -292,7 +292,7 @@ def is_uuid_like(val):
def get_severity_str(severity):
""" get string that represents the specified severity """
"""get string that represents the specified severity"""
if severity == fm_constants.FM_ALARM_SEVERITY_CLEAR:
return "clear"

View File

@ -236,7 +236,7 @@ TIMESTAMP_MODE__LEGACY = 'legacy'
#
#####################################################################
def _get_supported_modes(interface):
""" Get the supported modes for the specified interface """
"""Get the supported modes for the specified interface"""
hw_tx = hw_rx = sw_tx = sw_rx = False
modes = []
@ -299,7 +299,7 @@ def _get_supported_modes(interface):
#
#####################################################################
def get_alarm_object(alarm, interface=None):
""" Alarm object lookup """
"""Alarm object lookup"""
for o in ALARM_OBJ_LIST:
# print_alarm_object(o)
@ -329,7 +329,7 @@ def get_alarm_object(alarm, interface=None):
#
#####################################################################
def clear_alarm(eid):
""" Clear the ptp alarm with the specified entity ID """
"""Clear the ptp alarm with the specified entity ID"""
try:
if api.clear_fault(PLUGIN_ALARMID, eid) is True:
@ -364,7 +364,7 @@ def clear_alarm(eid):
#
#####################################################################
def raise_alarm(alarm_cause, interface=None, data=0):
""" Assert a cause based PTP alarm """
"""Assert a cause based PTP alarm"""
collectd.debug("%s Raising Alarm %d" % (PLUGIN, alarm_cause))
@ -461,7 +461,7 @@ def raise_alarm(alarm_cause, interface=None, data=0):
#
#####################################################################
def create_interface_alarm_objects(interface=None):
""" Create alarm objects """
"""Create alarm objects"""
collectd.debug("%s Alarm Object Create: Interface:%s " %
(PLUGIN, interface))
@ -548,7 +548,7 @@ def create_interface_alarm_objects(interface=None):
#
#####################################################################
def read_timestamp_mode():
""" Load timestamping mode """
"""Load timestamping mode"""
if os.path.exists(PLUGIN_CONF_FILE):
current_mode = obj.mode

View File

@ -70,7 +70,7 @@ obj = pc.PluginObject(PLUGIN, "")
# Raise Remote Logging Server Alarm
def raise_alarm():
""" Raise Remote Logging Server Alarm. """
"""Raise Remote Logging Server Alarm"""
repair = 'Ensure Remote Log Server IP is reachable from '
repair += 'Controller through OAM interface; otherwise '
@ -110,7 +110,7 @@ def raise_alarm():
# Clear remote logging server alarm
def clear_alarm():
""" Clear remote logging server alarm """
"""Clear remote logging server alarm"""
try:
if api.clear_fault(PLUGIN_ALARMID, obj.base_eid) is True:
@ -126,7 +126,7 @@ def clear_alarm():
# The config function - called once on collectd process startup
def config_func(config):
""" Configure the plugin """
"""Configure the plugin"""
# all configuration is learned during normal monitoring
obj.config_done = True
@ -135,7 +135,7 @@ def config_func(config):
# The init function - called once on collectd process startup
def init_func():
""" Init the plugin """
"""Init the plugin"""
# remote logging server monitoring is for controllers only
if tsc.nodetype != 'controller':
@ -155,7 +155,7 @@ def init_func():
# The sample read function - called on every audit interval
def read_func():
""" Remote logging server connectivity plugin read function """
"""Remote logging server connectivity plugin read function"""
# remote logging server monitoring is for controllers only
if tsc.nodetype != 'controller':

View File

@ -57,9 +57,7 @@ def configure_debuggubg(debug):
def parse_arguments(show):
"""
Parse command line arguments.
"""
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
@ -142,8 +140,7 @@ def parse_arguments(show):
def get_system_creds():
""" Return keystone credentials by sourcing /etc/platform/openrc. """
"""Return keystone credentials by sourcing /etc/platform/openrc"""
d = {}
proc = subprocess.Popen(['bash', '-c',
@ -168,7 +165,7 @@ def get_system_creds():
def convert_to_readable_size(size, orig_unit='B'):
""" Converts size to human readable unit """
"""Converts size to human readable unit"""
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
# convert original size to bytes
@ -188,8 +185,7 @@ def convert_to_readable_size(size, orig_unit='B'):
def print_disk_view(rows=None, extended=False):
""" Print all summary Disk views using PrettyTable. """
"""Print all summary Disk views using PrettyTable"""
disk_lables_extended = \
['Host', 'Device Node', 'Device Type', 'UUID', 'Size',
@ -216,8 +212,7 @@ def print_disk_view(rows=None, extended=False):
def print_vg_view(rows=None, extended=False):
""" Print all summary VG views using PrettyTable. """
"""Print all summary VG views using PrettyTable"""
vg_labels_extended = \
['Host', 'VG Name', 'UUID', 'VG State', 'VG Size', 'Current LVs',
'Current PVs', 'PV List (name:state:uuid)', 'VG Parameters']
@ -246,11 +241,11 @@ def print_vg_view(rows=None, extended=False):
def get_info_and_display(cc, show=None):
""" Get storage information from server nodes.
"""Get storage information from server nodes
Display the following information in table format.
- disk data of all server nodes
- VG data of all servers nodes
Display the following information in table format:
- disk data of all server nodes
- VG data of all servers nodes
"""
# get list of server hosts and for each host retrieve
# the disk, lvg. pv list objects.

View File

@ -107,14 +107,15 @@ LIBVIRT_REAP_SEC = LIBVIRT_TIMEOUT_SEC + 2.0
# Define a context manager to suppress stdout and stderr.
class suppress_stdout_stderr(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
"""Context manager for doing a "deep suppression" of stdout and stderr
i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
"""
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
@ -140,9 +141,7 @@ def atoi(text):
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
'''
"""alist.sort(key=natural_keys) sorts in human order"""
return [atoi(c) for c in re.split('(\d+)', text)]
@ -253,6 +252,7 @@ def help_text_epilog():
class ChoiceOpt(cfg.Opt):
r"""Option with List(String) type
Option with ``type`` :class:`oslo_config.types.List`
:param name: the option's name
:param choices: Optional sequence of either valid values or tuples of valid
@ -269,9 +269,7 @@ class ChoiceOpt(cfg.Opt):
def parse_arguments(debug, show):
"""
Parse command line arguments.
"""
"""Parse command line arguments"""
# Initialize all debug flags to False
define_debug_flags(debug)
@ -357,7 +355,7 @@ def parse_arguments(debug, show):
def configure_logging(logger, level=logging.DEBUG):
""" Configure logger streams and format. """
"""Configure logger streams and format"""
logger.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
@ -368,8 +366,10 @@ def configure_logging(logger, level=logging.DEBUG):
def _translate_keys(collection, convert):
""" For a collection of elements, translate _info field names
into human-readable names based on a list of conversion tuples.
"""translate elements _info field names into human-readable names
:param collection: dictionary containing the elements to be translated
:param convert: list of conversion tuples
"""
for k, item in collection.items():
keys = list(item.__dict__.keys())
@ -384,7 +384,7 @@ def _translate_keys(collection, convert):
def _translate_extended_states(collection):
""" Return human readable power-state string. """
"""Return human readable power-state string"""
power_states = [
'NOSTATE', # 0x00
'Running', # 0x01
@ -408,7 +408,7 @@ def _translate_extended_states(collection):
def _translate_virDomainState(state):
""" Return human readable virtual domain state string. """
"""Return human readable virtual domain state string"""
states = {}
states[0] = 'NOSTATE'
states[1] = 'Running'
@ -423,7 +423,7 @@ def _translate_virDomainState(state):
def _translate_virVcpuState(state):
""" Return human readable virtual vpu state string. """
"""Return human readable virtual vpu state string"""
states = {}
states[0] = 'Offline'
states[1] = 'Running'
@ -433,7 +433,7 @@ def _translate_virVcpuState(state):
def _mask_to_cpulist(mask=0):
""" Create cpulist from mask, list in socket-core-thread enumerated order.
"""Create cpulist from mask, list in socket-core-thread enumerated order
:param extended: extended info
:param mask: cpuset mask
@ -452,11 +452,11 @@ def _mask_to_cpulist(mask=0):
def string_to_cpulist(cpus_str=''):
''' Convert a string representation to cpulist
"""Convert a string representation to cpulist
:param cpus_str: string containing list cpus, eg., 1,2,6-7
:returns cpulist
'''
"""
# Create list of excluded cpus by parsing excluded_cpulist_str,
# example: 1,2,6-7
@ -480,8 +480,9 @@ def string_to_cpulist(cpus_str=''):
def list_to_range(L=None):
""" Convert a list into a string of comma separate ranges.
E.g., [1,2,3,8,9,15] is converted to '1-3,8-9,15'
"""Convert a list into a string of comma separate ranges
E.g., [1,2,3,8,9,15] is converted to '1-3,8-9,15'
"""
if L is None:
L = []
@ -491,8 +492,9 @@ def list_to_range(L=None):
def range_to_list(csv_range=None):
""" Convert a string of comma separate ranges into an expanded list of
integers. E.g., '1-3,8-9,15' is converted to [1,2,3,8,9,15]
"""Convert a string of comma separate ranges into an expanded list of ints
E.g., '1-3,8-9,15' is converted to [1,2,3,8,9,15]
"""
if not csv_range:
return []
@ -525,9 +527,9 @@ def libvirt_domain_info_worker(tuple_hosts):
def do_libvirt_domain_info(tuple_hosts):
"""
Connect to libvirt for specified host, and retrieve per-domain information
including cpu affinity per vcpu.
"""Connect to libvirt for specified host, and retrieve per-domain information
The information includes cpu affinity per vcpu.
"""
(host) = tuple_hosts
domains = {}
@ -683,9 +685,7 @@ def print_debug_info(tenants=None, regions=None,
topologies=None, topologies_idx=None, topologies_sib=None,
computes_cell=None,
debug=None, show=None):
"""
Print debug information - pretty formatting of various data structures
"""
"""Print debug information - pretty formatting of various data structures"""
pp = pprint.PrettyPrinter(indent=2)
if True in debug.values():
@ -779,7 +779,7 @@ def print_debug_info(tenants=None, regions=None,
def define_debug_flags(debug):
""" Define dictionary of debug flags. """
"""Define dictionary of debug flags"""
opts = ['all',
'creds',
'hypervisors',
@ -800,7 +800,7 @@ def define_debug_flags(debug):
def define_options():
""" Define several groupings with lists of show options. """
"""Define several groupings with lists of show options"""
L_opts = ['brief',
'all',
]
@ -826,7 +826,7 @@ def define_options():
def define_option_flags(show, options=None,
L_opts=None, L_brief=None, L_details=None, L_other=None):
""" Define dictionary of option flags. """
"""Define dictionary of option flags"""
if options is None:
options = []
if L_opts is None:
@ -864,8 +864,7 @@ def print_all_tables(tenants=None,
images_in_use=None,
server_groups_in_use=None,
debug=None, show=None):
""" Print all summary tables using PrettyTable.
"""
"""Print all summary tables using PrettyTable"""
# Print list of aggregates
if show['aggregates']:
print()
@ -1413,8 +1412,7 @@ def print_all_tables(tenants=None,
def _get_host_id(tenant_id=None, host_name=None):
""" Routine defined in nova/api/openstack/compute/views/servers.py .
"""
"""Routine defined in nova/api/openstack/compute/views/servers.py"""
sha_hash = hashlib.sha224(tenant_id + host_name)
return sha_hash.hexdigest()
@ -1425,15 +1423,15 @@ def start_process():
def get_info_and_display(show=None):
""" Get information from various sources (keystone, nova, libvirt).
"""Get information from various sources (keystone, nova, libvirt)
Display the following information in table format.
- nova view of hypervisors and servers
- libvirt view of servers
- nova view of in-progress migrations
- nova view of flavors in-use
- nova view of volumes and images in-use
- nova view of server-groups in-use
Display the following information in table format.
- nova view of hypervisors and servers
- libvirt view of servers
- nova view of in-progress migrations
- nova view of flavors in-use
- nova view of volumes and images in-use
- nova view of server-groups in-use
"""
# Keep track of mismatches found when validating data sources

View File

@ -50,9 +50,6 @@ commands =
# H237: module exception is removed in Python 3
# H238: old style class declaration, use new style
# H306: imports not in alphabetical order
# H401: docstring should not start with a space
# H404: multi line docstring should start without a leading new line
# H405: multi line docstring summary not separated with an empty line
# W series
# W191 indentation contains tabs
# W291 trailing whitespace
@ -67,7 +64,7 @@ commands =
# F series
# F401 'module' imported but unused
ignore = E265,E266,E402,E501,E722
H101,H102,H104,H201,H238,H237,H306,H401,H404,H405,
H101,H102,H104,H201,H238,H237,H306,
W191,W291,W391,W503,
B001,B007,B009,B010,B301,
F401

View File

@ -6,7 +6,7 @@
class ValidateError(Exception):
"""Base class for license validation exceptions."""
"""Base class for license validation exceptions"""
def __init__(self, message=None):
self.message = message