diff --git a/sysinv/sysinv/sysinv/sysinv/agent/node.py b/sysinv/sysinv/sysinv/sysinv/agent/node.py index 5516939c41..c4e459bc87 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/node.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/node.py @@ -295,7 +295,7 @@ class NodeOperator(object): else: vswitch_hugepages_nr = VSWITCH_REAL_MEMORY_MB / hugepage_size - ## Create a new set of dict attributes + # Create a new set of dict attributes hp_attr = {'vswitch_hugepages_size_mib': hugepage_size, 'vswitch_hugepages_nr': vswitch_hugepages_nr, 'vswitch_hugepages_avail': 0} diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py index 823c283da3..a6b6401411 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py @@ -243,15 +243,15 @@ class Interface(base.APIBase): networktype = cutils.get_primary_network_type(rpc_interface.as_dict()) if networktype and networktype not in address.ALLOWED_NETWORK_TYPES: - ## Hide this functionality when the network type does not support - ## setting or updating the network type + # Hide this functionality when the network type does not support + # setting or updating the network type interface.ipv4_mode = wtypes.Unset interface.ipv6_mode = wtypes.Unset interface.ipv4_pool = wtypes.Unset interface.ipv6_pool = wtypes.Unset - ## It is not necessary to show these fields if the interface is not - ## configured to allocate addresses from a pool + # It is not necessary to show these fields if the interface is not + # configured to allocate addresses from a pool if interface.ipv4_mode != constants.IPV4_POOL: interface.ipv4_pool = wtypes.Unset if interface.ipv6_mode != constants.IPV6_POOL: @@ -475,7 +475,7 @@ class InterfaceController(rest.RestController): ports = p.name break - ## Process updates + # Process updates vlan_id = None delete_addressing = False @@ -1041,14 +1041,14 @@ def _check_network_type_and_port(interface, ihost, def _check_address_mode(op, interface, ihost, existing_interface): - ## Check for valid values: + # Check for valid values: interface_id = interface['id'] ipv4_mode = interface.get('ipv4_mode') ipv6_mode = interface.get('ipv6_mode') object_utils.ipv4_mode_or_none(ipv4_mode) object_utils.ipv6_mode_or_none(ipv6_mode) - ## Check for supported interface network types + # Check for supported interface network types network_type = cutils.get_primary_network_type(interface) if network_type not in address.ALLOWED_NETWORK_TYPES: if (ipv4_mode and ipv4_mode != constants.IPV4_DISABLED): @@ -1058,13 +1058,13 @@ def _check_address_mode(op, interface, ihost, existing_interface): raise exception.AddressModeOnlyOnSupportedTypes( types=", ".join(address.ALLOWED_NETWORK_TYPES)) - ## Check for infrastructure specific requirements + # Check for infrastructure specific requirements if network_type == constants.NETWORK_TYPE_INFRA: if ipv4_mode != constants.IPV4_STATIC: if ipv6_mode != constants.IPV6_STATIC: raise exception.AddressModeMustBeStaticOnInfra() - ## Check for valid combinations of mode+pool + # Check for valid combinations of mode+pool ipv4_pool = interface.get('ipv4_pool') ipv6_pool = interface.get('ipv6_pool') if ipv4_mode != constants.IPV4_POOL and ipv4_pool: @@ -1078,7 +1078,7 @@ def _check_address_mode(op, interface, ihost, existing_interface): pool = pecan.request.dbapi.address_pool_get(ipv4_pool) if pool['family'] != constants.IPV4_FAMILY: raise exception.AddressPoolFamilyMismatch() - ## Convert to UUID + # Convert to UUID ipv4_pool = pool['uuid'] interface['ipv4_pool'] = ipv4_pool @@ -1093,12 +1093,12 @@ def _check_address_mode(op, interface, ihost, existing_interface): pool = pecan.request.dbapi.address_pool_get(ipv6_pool) if pool['family'] != constants.IPV6_FAMILY: raise exception.AddressPoolFamilyMismatch() - ## Convert to UUID + # Convert to UUID ipv6_pool = pool['uuid'] interface['ipv6_pool'] = ipv6_pool if existing_interface: - ## Check for valid transitions + # Check for valid transitions existing_ipv4_mode = existing_interface.get('ipv4_mode') if ipv4_mode != existing_ipv4_mode: if (existing_ipv4_mode == constants.IPV4_STATIC and @@ -1349,8 +1349,8 @@ def _check_interface_data(op, interface, ihost, existing_interface): {'ifname': i.ifname, 'network': pn}) raise wsme.exc.ClientSideError(msg) - ## Send the interface and provider network details to neutron for - ## additional validation. + # Send the interface and provider network details to neutron for + # additional validation. _neutron_bind_interface(ihost, interface, test=True) # Send the shared data interface(s) and provider networks details to # neutron for additional validation, if required @@ -1358,11 +1358,11 @@ def _check_interface_data(op, interface, ihost, existing_interface): elif (not _neutron_providernet_extension_supported() and any(nt in PCI_NETWORK_TYPES for nt in networktypelist)): - ## When the neutron implementation is not our own and it does not - ## support our provider network extension we still want to do minimal - ## validation of the provider network list but we cannot do more - ## complex validation because we do not have any additional information - ## about the provider networks. + # When the neutron implementation is not our own and it does not + # support our provider network extension we still want to do minimal + # validation of the provider network list but we cannot do more + # complex validation because we do not have any additional information + # about the provider networks. if not providernetworks: msg = _("At least one provider network must be selected.") raise wsme.exc.ClientSideError(msg) @@ -1525,7 +1525,7 @@ def _update_address_mode(interface, family, mode, pool): interface_id = interface['id'] pool_id = pecan.request.dbapi.address_pool_get(pool)['id'] if pool else None try: - ## retrieve the existing value and compare + # retrieve the existing value and compare existing = pecan.request.dbapi.address_mode_query( interface_id, family) if existing.mode == mode: @@ -1537,7 +1537,7 @@ def _update_address_mode(interface, family, mode, pool): pecan.request.dbapi.addresses_destroy_by_interface( interface_id, family) except exception.AddressModeNotFoundByFamily: - ## continue and update DB with new record + # continue and update DB with new record pass updates = {'family': family, 'mode': mode, 'address_pool_id': pool_id} pecan.request.dbapi.address_mode_update(interface_id, updates) @@ -1609,8 +1609,8 @@ def _add_extended_attributes(ihost, interface, attributes): interface_data = interface.as_dict() networktype = cutils.get_primary_network_type(interface_data) if networktype not in address.ALLOWED_NETWORK_TYPES: - ## No need to create new address mode records if the interface type - ## does not support it + # No need to create new address mode records if the interface type + # does not support it return if attributes.get('ipv4_mode'): _update_ipv4_address_mode(interface_data, @@ -1864,7 +1864,7 @@ def _neutron_providernet_list(): def _update_shared_interface_neutron_bindings(ihost, interface, test=False): if not _neutron_host_extension_supported(): - ## No action required if neutron does not support the host extension + # No action required if neutron does not support the host extension return shared_data_interfaces = _get_shared_data_interfaces(ihost, interface) for shared_interface in shared_data_interfaces: @@ -1881,10 +1881,10 @@ def _neutron_bind_interface(ihost, interface, test=False): ihost_uuid = ihost['uuid'] recordtype = ihost['recordtype'] if recordtype in ['profile']: - ## No action required if we are operating on a profile record + # No action required if we are operating on a profile record return if not _neutron_host_extension_supported(): - ## No action required if neutron does not support the host extension + # No action required if neutron does not support the host extension return networktypelist = [] if interface['networktype']: @@ -1903,7 +1903,7 @@ def _neutron_bind_interface(ihost, interface, test=False): providernetworks = interface.get('providernetworks', '') vlans = _get_interface_vlans(ihost_uuid, interface) try: - ## Send the request to neutron + # Send the request to neutron pecan.request.rpcapi.neutron_bind_interface( pecan.request.context, ihost_uuid, interface_uuid, networktype, providernetworks, @@ -1920,13 +1920,13 @@ def _neutron_unbind_interface(ihost, interface): ihost_uuid = ihost['uuid'] recordtype = ihost['recordtype'] if recordtype in ['profile']: - ## No action required if we are operating on a profile record + # No action required if we are operating on a profile record return if not _neutron_host_extension_supported(): - ## No action required if neutron does not support the host extension + # No action required if neutron does not support the host extension return try: - ## Send the request to neutron + # Send the request to neutron pecan.request.rpcapi.neutron_unbind_interface( pecan.request.context, ihost_uuid, interface['uuid']) except rpc_common.RemoteError as e: @@ -2030,7 +2030,7 @@ def _create(interface, from_profile=False): interface.update({'forihostid': ihost['id'], 'ihost_uuid': ihost['uuid']}) - ## Assign an UUID if not already done. + # Assign an UUID if not already done. if not interface.get('uuid'): interface['uuid'] = str(uuid.uuid4()) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py index 64badf5a52..b31d5a8dc4 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py @@ -1598,7 +1598,7 @@ def _create_storage_profile(profile_name, profile_node): for disk in disks: dev_path = disk.get('path') dev_func = disk.get('volumeFunc') - ## Convert from GiB to MiB + # Convert from GiB to MiB dev_size = int(disk.get('size')) * 1024 journal_size = int(disk.get('journalSize', '0')) * 1024 tier = disk.get('tier', constants.SB_TIER_DEFAULT_NAMES[ diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py index bc108eb47f..15ad240cbd 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py @@ -41,7 +41,7 @@ from sysinv.openstack.common.gettextutils import _ LOG = log.getLogger(__name__) -## Maximum number of equal cost paths for a destination subnet +# Maximum number of equal cost paths for a destination subnet SYSINV_ROUTE_MAX_PATHS = 4 # Defines the list of interface network types that support routes @@ -90,7 +90,7 @@ class Route(base.APIBase): self.fields = objects.route.fields.keys() for k in self.fields: if not hasattr(self, k): - ## Skip fields that we choose to hide + # Skip fields that we choose to hide continue setattr(self, k, kwargs.get(k, wtypes.Unset)) @@ -346,15 +346,15 @@ class RouteController(rest.RestController): route = route.as_dict() route['uuid'] = str(uuid.uuid4()) interface_uuid = route.pop('interface_uuid') - ## Query parent object references + # Query parent object references host_id, interface_id = self._get_parent_id(interface_uuid) - ## Check for semantic conflicts + # Check for semantic conflicts self._check_interface_type(interface_id) self._check_allowed_routes(interface_id, route) self._check_route_conflicts(host_id, route) self._check_local_gateway(host_id, route) self._check_reachable_gateway(interface_id, route) - ## Attempt to create the new route record + # Attempt to create the new route record result = pecan.request.dbapi.route_create(interface_id, route) pecan.request.rpcapi.update_route_config(pecan.request.context) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/sdn_controller.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/sdn_controller.py index 41bc7d8afa..1a89d27ed4 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/sdn_controller.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/sdn_controller.py @@ -45,7 +45,7 @@ from fm_api import fm_api LOG = log.getLogger(__name__) -### UTILS ### +# UTILS def _getIPAddressFromHostname(hostname): """ Dual stacked version of gethostbyname diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph.py index 37b3349fc6..f177af17a2 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph.py @@ -540,7 +540,7 @@ def _check_backend_ceph(req, storage_ceph, confirmed=False): def check_and_update_services(storage_ceph): req_services = api_helper.getListFromServices(storage_ceph) - ## If glance/nova is already a service on an external ceph backend, remove it from there + # If glance/nova is already a service on an external ceph backend, remove it from there check_svcs = [constants.SB_SVC_GLANCE, constants.SB_SVC_NOVA] check_data = {constants.SB_SVC_GLANCE: ['glance_pool'], constants.SB_SVC_NOVA: ['ephemeral_pool']} diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph_external.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph_external.py index 87aca0026f..e943d0c38c 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph_external.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_ceph_external.py @@ -303,7 +303,7 @@ def _discover_and_validate_backend_hiera_data(caps_dict): def _check_and_update_services(storage_ceph_ext): svcs = api_helper.getListFromServices(storage_ceph_ext) - ## If glance/nova is already a service on other rbd backend, remove it from there + # If glance/nova is already a service on other rbd backend, remove it from there check_svcs = [constants.SB_SVC_GLANCE, constants.SB_SVC_NOVA] for s in check_svcs: if s in svcs: diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_external.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_external.py index e8bde69243..0e3fac0904 100755 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_external.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/storage_external.py @@ -369,7 +369,7 @@ def _apply_backend_changes(op, sb_obj): sb_obj.uuid, {'state': constants.SB_STATE_CONFIGURED}) - ## update shared_services + # update shared_services s_s = utils.get_shared_services() shared_services = [] if s_s is None else ast.literal_eval(s_s) diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index 9490331e6d..6c438cf6e8 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -862,7 +862,7 @@ SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_RETRIES = \ SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_PERIOD_SEC_DEFAULT = 300 SERVICE_PARAM_HORIZON_AUTH_LOCKOUT_RETRIES_DEFAULT = 3 -#### NEUTRON Service Parameters #### +# NEUTRON Service Parameters SERVICE_PARAM_NAME_ML2_EXTENSION_DRIVERS = 'extension_drivers' SERVICE_PARAM_NAME_ML2_MECHANISM_DRIVERS = 'mechanism_drivers' diff --git a/sysinv/sysinv/sysinv/sysinv/common/retrying.py b/sysinv/sysinv/sysinv/sysinv/common/retrying.py index 3ed312da22..9ec0443961 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/retrying.py +++ b/sysinv/sysinv/sysinv/sysinv/common/retrying.py @@ -1,16 +1,16 @@ -## Copyright 2013-2014 Ray Holder -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. +# Copyright 2013-2014 Ray Holder +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import random import six diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py index e734890643..b2585f6200 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py @@ -2463,22 +2463,22 @@ class ConductorManager(service.PeriodicService): host on the specified numa node.""" functions = [] cpu_count = self._get_node_cpu_count(cpu_list, node) - ## Determine how many platform cpus need to be reserved + # Determine how many platform cpus need to be reserved count = self._get_default_platform_cpu_count( host, node, cpu_count, hyperthreading) for i in range(0, count): functions.append(constants.PLATFORM_FUNCTION) - ## Determine how many vswitch cpus need to be reserved + # Determine how many vswitch cpus need to be reserved count = self._get_default_vswitch_cpu_count( host, node, cpu_count, hyperthreading) for i in range(0, count): functions.append(constants.VSWITCH_FUNCTION) - ## Determine how many shared cpus need to be reserved + # Determine how many shared cpus need to be reserved count = self._get_default_shared_cpu_count( host, node, cpu_count, hyperthreading) for i in range(0, count): functions.append(constants.SHARED_FUNCTION) - ## Assign the default function to the remaining cpus + # Assign the default function to the remaining cpus for i in range(0, (cpu_count - len(functions))): functions.append(cpu_utils.get_default_function(host)) return functions @@ -2723,13 +2723,13 @@ class ConductorManager(service.PeriodicService): mem_dict.update(i) - ## Do not allow updates to the amounts of reserved memory. + # Do not allow updates to the amounts of reserved memory. mem_dict.pop('platform_reserved_mib', None) - ## numa_node is not stored against imemory table + # numa_node is not stored against imemory table mem_dict.pop('numa_node', None) - ## clear the pending hugepage number for unlocked nodes + # clear the pending hugepage number for unlocked nodes if ihost.administrative == constants.ADMIN_UNLOCKED: mem_dict['vm_hugepages_nr_2M_pending'] = None mem_dict['vm_hugepages_nr_1G_pending'] = None @@ -2738,7 +2738,7 @@ class ConductorManager(service.PeriodicService): imems = self.dbapi.imemory_get_by_ihost_inode(ihost_uuid, inode_uuid) if not imems: - ## Set the amount of memory reserved for platform use. + # Set the amount of memory reserved for platform use. mem_dict.update(self._get_platform_reserved_memory( ihost, i['numa_node'])) self.dbapi.imemory_create(forihostid, mem_dict) @@ -2754,7 +2754,7 @@ class ConductorManager(service.PeriodicService): self.dbapi.imemory_update(imem['uuid'], mem_dict) except: - ## Set the amount of memory reserved for platform use. + # Set the amount of memory reserved for platform use. mem_dict.update(self._get_platform_reserved_memory( ihost, i['numa_node'])) self.dbapi.imemory_create(forihostid, mem_dict) @@ -4427,7 +4427,7 @@ class ConductorManager(service.PeriodicService): self._update_alarm_status(context, standby_host) else: - ## Ignore the reboot required bit for active controller when doing the comparison + # Ignore the reboot required bit for active controller when doing the comparison active_config_target_flipped = None if active_host and active_host.config_target: active_config_target_flipped = self._config_flip_reboot_required(active_host.config_target) @@ -6086,8 +6086,8 @@ class ConductorManager(service.PeriodicService): """ LOG.info("Ceph manifests success on host: %s" % host_uuid) - ## As we can have multiple external_ceph backends, need to find the one - ## that is in configuring state. + # As we can have multiple external_ceph backends, need to find the one + # that is in configuring state. ceph_conf = StorageBackendConfig.get_configuring_target_backend( self.dbapi, target=constants.SB_TYPE_CEPH_EXTERNAL) @@ -6142,8 +6142,8 @@ class ConductorManager(service.PeriodicService): args = {'host': host_uuid, 'error': error} LOG.error("Ceph external manifests failed on host: %(host)s. Error: %(error)s" % args) - ## As we can have multiple external_ceph backends, need to find the one - ## that is in configuring state. + # As we can have multiple external_ceph backends, need to find the one + # that is in configuring state. ceph_conf = StorageBackendConfig.get_configuring_target_backend( self.dbapi, target=constants.SB_TYPE_CEPH_EXTERNAL) diff --git a/sysinv/sysinv/sysinv/tox.ini b/sysinv/sysinv/sysinv/tox.ini index 2e0c238632..9ef83dcae9 100644 --- a/sysinv/sysinv/sysinv/tox.ini +++ b/sysinv/sysinv/sysinv/tox.ini @@ -81,7 +81,7 @@ commands = # H231..H238 are python3 compatability # H401,H403,H404,H405 are docstring and not important [flake8] -ignore = E501,E127,E128,E231,E266,E402,E203,E126,E722,H101,H102,H104,H105,H231,H232,H233,H234,H235,H236,H237,H238,H401,H403,H404,H405 +ignore = E501,E127,E128,E231,E402,E203,E126,E722,H101,H102,H104,H105,H231,H232,H233,H234,H235,H236,H237,H238,H401,H403,H404,H405 builtins = _ [testenv:flake8]