Fixing Pep8 errors of type F841

Change-Id: Ie03201b9d73fc201d1e2de8a2b0585897381273a
Story: 2002888
Task: 23097
Signed-off-by: Jack Ding <jack.ding@windriver.com>
This commit is contained in:
Mathieu Robinson 2018-06-18 16:01:05 -04:00 committed by Jack Ding
parent 3926523fab
commit 73fcbfeff7
43 changed files with 264 additions and 269 deletions

View File

@ -214,8 +214,8 @@ class AgentManager(service.PeriodicService):
cmd = '/usr/bin/affine-interrupts.sh %s %s' % \
(info['name'], cpulist)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output = proc.communicate()[0]
LOG.debug("%s return %d" % (cmd, proc.returncode))
proc.communicate()
LOG.info("%s return %d" % (cmd, proc.returncode))
if proc.returncode == 1:
LOG.error("Failed to affine %s %s interrupts with %s" %
(info['networktype'], info['name'], cpulist))
@ -1677,7 +1677,7 @@ class AgentManager(service.PeriodicService):
stdout = stdout.strip()
iscsi_initiator_name = stdout.split('=')[-1]
LOG.info("iscsi initiator name = %s" % iscsi_initiator_name)
except Exception as e:
except Exception:
LOG.error("Failed retrieving iscsi initiator name")
return iscsi_initiator_name

View File

@ -293,7 +293,7 @@ class AddressPoolController(rest.RestController):
def _check_name_conflict(self, addrpool):
try:
pool = pecan.request.dbapi.address_pool_get(addrpool['name'])
pecan.request.dbapi.address_pool_get(addrpool['name'])
raise exception.AddressPoolAlreadyExists(name=addrpool['name'])
except exception.AddressPoolNotFound:
pass

View File

@ -244,7 +244,7 @@ class ClusterController(rest.RestController):
def _check_name_conflict(self, cluster):
try:
pool = pecan.request.dbapi.cluster_get(cluster['name'])
raise exception.ClusterAlreadyExists(name=cluster['name'])
raise exception.ClusterAlreadyExists(name=pool)
except exception.ClusterNotFound:
pass

View File

@ -91,10 +91,7 @@ class HostCpuProfile(CpuProfile):
if self.number_of_cpu == profile.number_of_cpu and \
self.cores_per_cpu == profile.cores_per_cpu:
return self.check_profile_core_functions(profile)
else:
errorstring = "Profile is not applicable to host"
return False
return False # Profile is not applicable to host
def check_profile_core_functions(self, profile):
platform_cores = 0

View File

@ -1412,8 +1412,7 @@ class HostController(rest.RestController):
ihost_obj = objects.host.get_by_uuid(pecan.request.context,
ihost_obj.uuid)
mgmt_network = pecan.request.dbapi.network_get_by_type(
constants.NETWORK_TYPE_MGMT)
pecan.request.dbapi.network_get_by_type(constants.NETWORK_TYPE_MGMT)
# Configure the new ihost
ihost_ret = pecan.request.rpcapi.configure_ihost(pecan.request.context,
@ -1474,7 +1473,7 @@ class HostController(rest.RestController):
LOG.info("VIM notify add host add %s subfunctions=%s" % (
ihost_obj['hostname'], subfunctions))
try:
vim_resp = vim_api.vim_host_add(
vim_api.vim_host_add(
self._api_token,
ihost_obj['uuid'],
ihost_obj['hostname'],
@ -1603,10 +1602,10 @@ class HostController(rest.RestController):
if len(ihost_obj) != 1:
raise Exception("Unexpected: no/more_than_one host(s) contain(s) a management mac address from local network adapters")
result = self._patch(ihost_obj[0]['uuid'],
self._patch(ihost_obj[0]['uuid'],
changed_paths, None)
else:
result = self._do_post(new_host)
self._do_post(new_host)
if new_host['power_on'] is not None and new_host['bm_type'] is None:
success_str = "%s\n %s Warning: Ignoring <power_on> due to insufficient board management (bm) data." % (success_str, new_host['hostname'])
@ -1816,7 +1815,6 @@ class HostController(rest.RestController):
self._optimize_delta_handling(delta_handle)
host_new_state = []
if 'administrative' in delta or \
'operational' in delta:
self.stage_administrative_update(hostupdate)
@ -1874,7 +1872,7 @@ class HostController(rest.RestController):
LOG.info("Notify VIM host action %s action=%s" % (
ihost_obj['hostname'], action))
try:
vim_resp = vim_api.vim_host_action(
vim_api.vim_host_action(
self._api_token,
ihost_obj['uuid'],
ihost_obj['hostname'],
@ -2030,7 +2028,7 @@ class HostController(rest.RestController):
LOG.info("sysinv notify add host add %s subfunctions=%s" %
(ihost_obj['hostname'], ihost_obj['subfunctions']))
try:
vim_resp = vim_api.vim_host_add(
vim_api.vim_host_add(
self._api_token,
ihost_obj['uuid'],
ihost_obj['hostname'],
@ -2073,7 +2071,7 @@ class HostController(rest.RestController):
# self._api_token.is_expired():
# self._api_token = rest_api.get_token()
vim_resp = vim_api.vim_host_add(
vim_api.vim_host_add(
self._api_token,
ihost['uuid'],
ihost['hostname'],
@ -2173,7 +2171,7 @@ class HostController(rest.RestController):
# self._api_token.is_expired():
# self._api_token = rest_api.get_token()
vim_resp = vim_api.vim_host_delete(
vim_api.vim_host_delete(
self._api_token,
ihost.uuid,
ihost.hostname,
@ -2293,7 +2291,7 @@ class HostController(rest.RestController):
# self._api_token.is_expired():
# self._api_token = rest_api.get_token()
system = pecan.request.dbapi.isystem_get_one()
response = patch_api.patch_drop_host(
patch_api.patch_drop_host(
token=self._api_token,
timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,
hostname=ihost.hostname,
@ -2324,7 +2322,7 @@ class HostController(rest.RestController):
return
try:
upgrade = pecan.request.dbapi.software_upgrade_get_one()
pecan.request.dbapi.software_upgrade_get_one()
except exception.NotFound:
return
@ -4943,7 +4941,7 @@ class HostController(rest.RestController):
try:
ihost_stors = pecan.request.dbapi.ihost_get_by_personality(
personality=constants.STORAGE)
except Exception as e:
except Exception:
raise wsme.exc.ClientSideError(
_("Can not unlock a compute node until at "
"least one storage node is unlocked and enabled."))
@ -4999,7 +4997,7 @@ class HostController(rest.RestController):
try:
if not pecan.request.rpcapi.restore_ceph_config(pecan.request.context):
raise Exception()
except Exception as e:
except Exception:
raise wsme.exc.ClientSideError(
_("Restore Ceph config failed. Retry unlocking storage node."))
@ -5461,9 +5459,7 @@ class HostController(rest.RestController):
ila_networktype = [network.strip() for network in ila.networktype.split(",")]
if any(network in ila_networktype for network in iif_networktype):
idata['imtu'] = ila.imtu
u_interface = \
pecan.request.dbapi.iinterface_update(
iif.uuid, idata)
pecan.request.dbapi.iinterface_update(iif.uuid, idata)
break
def stage_action(self, action, hostupdate):

View File

@ -737,7 +737,6 @@ def _check_interface_vlan_id(op, interface, ihost, from_profile=False):
def _check_interface_name(op, interface, ihost, from_profile=False):
ihost_id = interface['forihostid']
ihost_uuid = interface['ihost_uuid']
ifname = interface['ifname']
iftype = interface['iftype']
@ -1138,8 +1137,6 @@ def _check_interface_data(op, interface, ihost, existing_interface):
# Get providernet dict
all_providernetworks = _neutron_providernet_list()
providernetworksdict = _get_providernetworksdict(
all_providernetworks, providernetworks)
# Check interface name for validity
_check_interface_name(op, interface, ihost, existing_interface)
@ -1420,7 +1417,6 @@ def _check_interface_data(op, interface, ihost, existing_interface):
constants.NETWORK_TYPE_INFRA in networktypelist):
host_list = pecan.request.dbapi.ihost_get_by_personality(
personality=constants.CONTROLLER)
marker_obj = None
infra_on_controller = False
for h in host_list:
# find any interface in controller host that is of type infra
@ -1529,7 +1525,6 @@ def _check_ports(op, interface, ihost, ports):
def _update_address_mode(interface, family, mode, pool):
interface_id = interface['id']
existing_pool = None
pool_id = pecan.request.dbapi.address_pool_get(pool)['id'] if pool else None
try:
## retrieve the existing value and compare
@ -1911,7 +1906,7 @@ def _neutron_bind_interface(ihost, interface, test=False):
vlans = _get_interface_vlans(ihost_uuid, interface)
try:
## Send the request to neutron
valid = pecan.request.rpcapi.neutron_bind_interface(
pecan.request.rpcapi.neutron_bind_interface(
pecan.request.context,
ihost_uuid, interface_uuid, networktype, providernetworks,
interface['imtu'], vlans=vlans, test=test)
@ -1934,7 +1929,7 @@ def _neutron_unbind_interface(ihost, interface):
return
try:
## Send the request to neutron
valid = pecan.request.rpcapi.neutron_unbind_interface(
pecan.request.rpcapi.neutron_unbind_interface(
pecan.request.context, ihost_uuid, interface['uuid'])
except rpc_common.RemoteError as e:
raise wsme.exc.ClientSideError(str(e.value))

View File

@ -531,7 +531,7 @@ def _check_memory(rpc_port, ihost, platform_reserved_mib=None,
except ValueError:
raise wsme.exc.ClientSideError((
"Platform memory must be a number"))
if int(platform_reserved_mib) < 0:
if val < 0:
raise wsme.exc.ClientSideError((
"Platform memory must be greater than zero"))
@ -630,7 +630,7 @@ def _check_huge_values(rpc_port, patch, vm_hugepages_nr_2M=None,
except ValueError:
raise wsme.exc.ClientSideError(_(
"VM huge pages 1G must be a number"))
if int(vm_hugepages_nr_1G) < 0:
if val < 0:
raise wsme.exc.ClientSideError(_(
"VM huge pages 1G must be greater than or equal to zero"))
@ -645,15 +645,6 @@ def _check_huge_values(rpc_port, patch, vm_hugepages_nr_2M=None,
# Update the number of available huge pages
num_2M_for_1G = 512
if rpc_port['vm_hugepages_nr_2M']:
old_nr_2M = int(rpc_port['vm_hugepages_nr_2M'])
else:
old_nr_2M = 0
if rpc_port['vm_hugepages_nr_1G']:
old_nr_1G = int(rpc_port['vm_hugepages_nr_1G'])
else:
old_nr_1G = 0
# None == unchanged
if vm_hugepages_nr_1G is not None:

View File

@ -212,7 +212,7 @@ def _check_extoam_data(extoam_orig, extoam, region_config=False):
try:
utils.is_valid_subnet(subnet)
except Exception as e:
except Exception:
raise wsme.exc.ClientSideError(_(
"Invalid subnet %s %s."
"Please check and configure a valid OAM Subnet."
@ -415,13 +415,9 @@ class OAMNetworkController(rest.RestController):
extoam_uuid)
# this is required for cases where action is appended
action = None
for p in patch:
if '/action' in p['path']:
value = p['value']
patch.remove(p)
if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION):
action = value
break
# replace isystem_uuid and iextoam_uuid with corresponding
@ -439,8 +435,7 @@ class OAMNetworkController(rest.RestController):
extoam_orig = copy.deepcopy(rpc_extoam)
for p in patch_obj:
if p['path'] == '/isystem_uuid':
isystem = objects.system.get_by_uuid(pecan.request.context,
p['value'])
isystem = objects.system.get_by_uuid(pecan.request.context, p['value'])
p['path'] = '/forisystemid'
p['value'] = isystem.id

View File

@ -154,11 +154,6 @@ def _check_ntp_data(op, ntp):
MAX_S = 3
if op == "add":
this_ntp_id = 0
else:
this_ntp_id = ntp['id']
dns_list = pecan.request.dbapi.idns_get_list(ntp['forisystemid'])
if dns_list:

View File

@ -1060,7 +1060,7 @@ class ProfileController(rest.RestController):
return [{'result': 'Invalid',
'type': '', 'name': '',
'msg': 'Profile is invalid',
'detail': e.message}]
'detail': error}]
profile_types = ["cpuProfile", "memoryProfile", "interfaceProfile",
"storageProfile", "localstorageProfile"]
@ -1216,7 +1216,7 @@ def _create_cpu_profile(profile_name, profile_node):
"thread": thread_id,
"allocated_function": core.core_function,
'forinodeid': new_node['id']}
new_cpu = pecan.request.dbapi.icpu_create(iprofile_id, cdict)
pecan.request.dbapi.icpu_create(iprofile_id, cdict)
cpu_idx = cpu_idx + 1
node_idx = node_idx + 1
@ -1372,7 +1372,7 @@ def _create_if_profile(profile_name, profile_node):
'mtu': ethIf.mtu
}
newPort = pecan.request.dbapi.ethernet_port_create(iprofile_id, pdict)
pecan.request.dbapi.ethernet_port_create(iprofile_id, pdict)
routes = ethIf.routes
_create_route(newIf.uuid, newIf.id, routes)
@ -1545,7 +1545,6 @@ def _create_mem_profile(profile_name, profile_node):
iprofile_id = ihost['id']
cpu_idx = 0
node_idx = 0
try:
@ -1559,7 +1558,7 @@ def _create_mem_profile(profile_name, profile_node):
mdict['platform_reserved_mib'] = get_mem_size(platform_reserved, node_idx)
mdict['vm_hugepages_nr_2M_pending'] = get_mem_size(vm_hp_2m, node_idx)
mdict['vm_hugepages_nr_1G_pending'] = get_mem_size(vm_hp_1g, node_idx)
newmemory = pecan.request.dbapi.imemory_create(iprofile_id, mdict)
pecan.request.dbapi.imemory_create(iprofile_id, mdict)
node_idx += 1
except Exception as exc:
@ -1881,7 +1880,7 @@ def _create_localstorage_profile(profile_name, profile_node):
'forihostid': profile_id,
'forilvgid': ilvg_pf.id}
pv_pf = pv_api._create(pvdict, iprofile=True)
pv_api._create(pvdict, iprofile=True)
except wsme.exc.ClientSideError as cse:
pecan.request.dbapi.ihost_destroy(ihost.uuid)
@ -1990,7 +1989,7 @@ def cpuprofile_copy_data(host, profile):
'cpu_model', 'cpu_family', 'capabilities',
'forihostid', 'forinodeid']
cdict = {k: v for (k, v) in c.as_dict().items() if k in cpufields}
new_cpu = pecan.request.dbapi.icpu_create(iprofile_id, cdict)
pecan.request.dbapi.icpu_create(iprofile_id, cdict)
ROUTE_FIELDS = ['family', 'network', 'prefix', 'gateway', 'metric']
@ -2045,7 +2044,7 @@ def ifprofile_copy_data(host, profile):
'link_mode', 'bootp', 'pciaddr', 'dev_id',
'host_id', 'interface_id', 'node_id']
pdict = {k: v for (k, v) in p.as_dict().items() if k in ethernet_port_fields}
newPort = pecan.request.dbapi.ethernet_port_create(iprofile_id, pdict)
pecan.request.dbapi.ethernet_port_create(iprofile_id, pdict)
# Generate the uses/used_by relationships
for i in newIfList:
@ -2274,7 +2273,7 @@ def localstorageprofile_copy_data(host, profile):
if ipv.get('pv_type') == constants.PV_TYPE_DISK:
try:
pv_disk = pecan.request.dbapi.idisk_get_by_ipv(ipv.get('uuid'))
except Exception as e:
except Exception:
err_msg = '{} {}'.format("Could not obtain the disk used by "
"physical volume", ipv.get('uuid'))
raise wsme.exc.ClientSideError(_(err_msg))
@ -2285,7 +2284,7 @@ def localstorageprofile_copy_data(host, profile):
try:
pv_part = pecan.request.dbapi.partition_get_by_ipv(
ipv.get('uuid'))
except Exception as e:
except Exception:
err_msg = '{} {}'.format("Could not obtain the partition "
"used by physical volume",
ipv.get('uuid'))
@ -3153,7 +3152,7 @@ def localstorageprofile_apply_to_host(host, profile):
pdata = {'foripvid': ipvPairs[pdisk.foripvid]}
try:
device = device_update_function(disk_or_part_uuid, pdata)
device_update_function(disk_or_part_uuid, pdata)
except:
raise wsme.exc.ClientSideError(_(
"Failed to link storage to device %s" % disk_or_part_uuid))

View File

@ -934,7 +934,7 @@ def delete_pv(pv_uuid, force=False):
_update_disk_or_partition('idisk', pv)
elif pv['pv_type'] == constants.PV_TYPE_PARTITION:
partition = _update_disk_or_partition('partition', pv)
_update_disk_or_partition('partition', pv)
# If the partition already exists, don't modify its status. Wait
# for when the PV is actually deleted to do so.
# If the host hasn't been provisioned yet, then the partition will

View File

@ -158,11 +158,6 @@ def _check_remotelogging_data(op, remotelogging):
# Get data
ip_address = remotelogging['ip_address']
if op == "add":
this_remotelogging_id = 0
else:
this_remotelogging_id = remotelogging['id']
# Validate ip_address
if ip_address:
try:

View File

@ -387,6 +387,6 @@ class RouteController(rest.RestController):
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, route_uuid):
"""Delete an IP route."""
route = self._get_one(route_uuid)
self._get_one(route_uuid)
pecan.request.dbapi.route_destroy(route_uuid)
pecan.request.rpcapi.update_route_config(pecan.request.context)

View File

@ -641,7 +641,7 @@ def _create(storage_ceph):
# Mark the storage tier as in-use
try:
tier = pecan.request.dbapi.storage_tier_update(
pecan.request.dbapi.storage_tier_update(
storage_ceph_obj.tier_id,
{'forbackendid': storage_ceph_obj.id,
'status': constants.SB_TIER_STATUS_IN_USE})

View File

@ -139,9 +139,6 @@ class StorageLVM(base.APIBase):
'services',
'capabilities'])
chosts = pecan.request.dbapi.ihost_get_by_personality(
constants.CONTROLLER)
stor_lvm.links =\
[link.Link.make_link('self', pecan.request.host_url,
'storage_lvm',

View File

@ -197,7 +197,7 @@ class UpgradeController(rest.RestController):
# There must not already be an upgrade in progress
try:
upgrade = pecan.request.dbapi.software_upgrade_get_one()
pecan.request.dbapi.software_upgrade_get_one()
except exception.NotFound:
pass
else:
@ -284,8 +284,6 @@ class UpgradeController(rest.RestController):
raise wsme.exc.ClientSideError(_(
"operation rejected: An upgrade is not in progress."))
from_load = pecan.request.dbapi.load_get(upgrade.from_load)
from_version = from_load.software_version
to_load = pecan.request.dbapi.load_get(upgrade.to_load)
to_version = to_load.software_version

View File

@ -147,22 +147,6 @@ class UserCollection(collection.Collection):
##############
# UTILS
##############
def _check_user_data(op, user):
# Get data
root_sig = user['root_sig']
# iuser_root_sig_list = []
# user_root_sig = ""
MAX_S = 2
if op == "add":
this_user_id = 0
else:
this_user_id = user['id']
return user
LOCK_NAME = 'UserController'
@ -290,7 +274,7 @@ class UserController(rest.RestController):
except utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
user = _check_user_data("modify", user.as_dict())
user = user.as_dict()
try:
# Update only the fields that have changed

View File

@ -95,7 +95,7 @@ class ContextHook(hooks.PecanHook):
is_admin = policy.check('admin', state.request.headers, creds)
path = utils.safe_rstrip(state.request.path, '/')
utils.safe_rstrip(state.request.path, '/')
is_public_api = state.request.environ.get('is_public_api', False)
state.request.context = context.RequestContext(

View File

@ -651,7 +651,7 @@ class CephApiOperator(object):
def get_monitors_status(self, db_api):
# first check that the monitors are available in sysinv
num_active__monitors = 0
num_active_monitors = 0
num_inv_monitors = 0
required_monitors = constants.MIN_STOR_MONITORS
quorum_names = []

View File

@ -119,7 +119,7 @@ def _validate_ldap_url(name, value):
def _validate_ldap_dn(name, value):
try:
ldap.dn.str2dn(value)
except ldap.DECODING_ERROR as e:
except ldap.DECODING_ERROR:
raise wsme.exc.ClientSideError(_(
"Parameter '%s' must be a valid LDAP DN value" % name))

View File

@ -871,9 +871,9 @@ def symlink_force(source, link_name):
def mounted(remote_dir, local_dir):
local_dir = os.path.abspath(local_dir)
try:
_ = subprocess.check_output(
["/bin/nfs-mount", remote_dir, local_dir],
stderr=subprocess.STDOUT)
subprocess.check_output(
["/bin/nfs-mount", remote_dir, local_dir],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise OSError(("mount operation failed: "
"command={}, retcode={}, output='{}'").format(
@ -882,9 +882,9 @@ def mounted(remote_dir, local_dir):
yield
finally:
try:
_ = subprocess.check_output(
["/bin/umount", local_dir],
stderr=subprocess.STDOUT)
subprocess.check_output(
["/bin/umount", local_dir],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise OSError(("umount operation failed: "
"command={}, retcode={}, output='{}'").format(
@ -1323,7 +1323,6 @@ def _get_cinder_device_info(dbapi, forihostid):
# TODO (rchurch): get a DB query based on volume group name
lvgs = dbapi.ilvg_get_by_ihost(forihostid)
cinder_vg = None
for vg in lvgs:
if vg.lvm_vg_name == constants.LVG_CINDER_VOLUMES:
pvs = dbapi.ipv_get_by_ihost(forihostid)

View File

@ -35,7 +35,6 @@ import glob
import grp
import hashlib
import httplib
import math
import os
import pwd
import re
@ -618,7 +617,7 @@ class ConductorManager(service.PeriodicService):
ihost_mtc = host.as_dict()
ihost_mtc['operation'] = 'modify'
ihost_mtc = cutils.removekeys_nonmtce(ihost_mtc)
mtc_response_dict = mtce_api.host_modify(
mtce_api.host_modify(
self._api_token, self._mtc_address,
self._mtc_port, ihost_mtc,
constants.MTC_DEFAULT_TIMEOUT_IN_SECS)
@ -671,7 +670,7 @@ class ConductorManager(service.PeriodicService):
ihost_mtc = cutils.removekeys_nonmtce(ihost_mtc)
LOG.info("%s create_ihost update mtce %s " %
(ihost.hostname, ihost_mtc))
mtc_response_dict = mtce_api.host_modify(
mtce_api.host_modify(
self._api_token, self._mtc_address, self._mtc_port,
ihost_mtc,
constants.MTC_DEFAULT_TIMEOUT_IN_SECS)
@ -1961,7 +1960,6 @@ class ConductorManager(service.PeriodicService):
{'ifname': updated_name})
used_by = interface['used_by']
vlans = []
for ifname in used_by:
vlan = self.dbapi.iinterface_get(ifname, port['forihostid'])
if vlan.get('iftype') != constants.INTERFACE_TYPE_VLAN:
@ -2626,7 +2624,7 @@ class ConductorManager(service.PeriodicService):
# there has been an update. Delete db entries and replace.
for icpu in icpus:
cpu = self.dbapi.icpu_destroy(icpu.uuid)
self.dbapi.icpu_destroy(icpu.uuid)
# sort the list of cpus by socket and coreid
cpu_list = sorted(icpu_dict_array, key=self._sort_by_socket_and_coreid)
@ -2656,7 +2654,7 @@ class ConductorManager(service.PeriodicService):
cpu_dict.update(data)
cpu = self.dbapi.icpu_create(forihostid, cpu_dict)
self.dbapi.icpu_create(forihostid, cpu_dict)
except exception.NodeNotFound:
raise exception.SysinvException(_(
@ -2743,7 +2741,7 @@ class ConductorManager(service.PeriodicService):
## Set the amount of memory reserved for platform use.
mem_dict.update(self._get_platform_reserved_memory(
ihost, i['numa_node']))
mem = self.dbapi.imemory_create(forihostid, mem_dict)
self.dbapi.imemory_create(forihostid, mem_dict)
else:
for imem in imems:
# Include 4K pages in the displayed VM memtotal
@ -2753,13 +2751,13 @@ class ConductorManager(service.PeriodicService):
constants.NUM_4K_PER_MiB)
mem_dict['memtotal_mib'] += vm_4K_mib
mem_dict['memavail_mib'] += vm_4K_mib
pmem = self.dbapi.imemory_update(imem['uuid'],
self.dbapi.imemory_update(imem['uuid'],
mem_dict)
except:
## Set the amount of memory reserved for platform use.
mem_dict.update(self._get_platform_reserved_memory(
ihost, i['numa_node']))
mem = self.dbapi.imemory_create(forihostid, mem_dict)
self.dbapi.imemory_create(forihostid, mem_dict)
pass
return
@ -2843,7 +2841,7 @@ class ConductorManager(service.PeriodicService):
:return:
"""
try:
upgrade = self.dbapi.software_upgrade_get_one()
self.dbapi.software_upgrade_get_one()
except exception.NotFound:
# Not upgrading. We assume the host versions match
# If they somehow don't match we've got bigger problems
@ -3209,7 +3207,6 @@ class ConductorManager(service.PeriodicService):
partitions = self.dbapi.partition_get_by_ihost(host.id)
partition4 = next((p for p in partitions if p.device_node == pv4_name), None)
part_size_mib = float(pv_cgts_vg.lvm_pv_size) / (1024**2) - int(partition4.size_mib)
part_size = math.ceil(part_size_mib)
if part_size_mib > 0:
LOG.info("%s is not enough for R4 cgts-vg" % pv4_name)
else:
@ -3221,8 +3218,6 @@ class ConductorManager(service.PeriodicService):
"device path: %s" %
(part_size_mib, part_device_node, part_device_path))
part_uuid = uuidutils.generate_uuid()
partition_dict = {
'idisk_id': disk.id,
'idisk_uuid': disk.uuid,
@ -4494,7 +4489,7 @@ class ConductorManager(service.PeriodicService):
ihost_mtc['task'] = constants.FORCE_LOCKING
LOG.warn("ihost_action override %s" %
ihost_mtc)
mtc_response_dict = mtce_api.host_modify(
mtce_api.host_modify(
self._api_token, self._mtc_address, self._mtc_port,
ihost_mtc, timeout_in_secs)
@ -4522,13 +4517,13 @@ class ConductorManager(service.PeriodicService):
else:
val = {'ihost_action': ihost_action_str}
ihost_u = self.dbapi.ihost_update(ihost.uuid, val)
self.dbapi.ihost_update(ihost.uuid, val)
else: # Administrative locked already
task_str = ihost.task or ""
if (task_str.startswith(constants.FORCE_LOCKING) or
task_str.startswith(constants.LOCKING)):
val = {'task': ""}
ihost_u = self.dbapi.ihost_update(ihost.uuid, val)
self.dbapi.ihost_update(ihost.uuid, val)
vim_progress_status_str = ihost.get('vim_progress_status') or ""
if (vim_progress_status_str and
@ -4542,7 +4537,7 @@ class ConductorManager(service.PeriodicService):
vim_progress_status_str += ".."
val = {'vim_progress_status': vim_progress_status_str}
ihost_u = self.dbapi.ihost_update(ihost.uuid, val)
self.dbapi.ihost_update(ihost.uuid, val)
def _audit_upgrade_status(self):
"""Audit upgrade related status"""
@ -5415,9 +5410,7 @@ class ConductorManager(service.PeriodicService):
reboot = True
# Set config out-of-date for controllers
config_uuid = self._config_update_hosts(context,
personalities,
reboot=reboot)
self._config_update_hosts(context, personalities, reboot=reboot)
# TODO(oponcea): Set config_uuid to a random value to keep Config out-of-date.
# Once sm supports in-service config reload set config_uuid=config_uuid.
@ -7790,9 +7783,6 @@ class ConductorManager(service.PeriodicService):
if (mtc_response_dict['status'] != 'pass'):
LOG.error("Failed mtc_host_add=%s" % ihost_mtc_dict)
else:
# TODO: remove this else
LOG.info("Passed mtc_host_add=%s" % ihost_mtc_dict)
return
@ -9177,8 +9167,8 @@ class ConductorManager(service.PeriodicService):
else:
cmd = "ip6tables-restore"
with open(os.devnull, "w") as fnull:
output = subprocess.check_output(
with open(os.devnull, "w"):
subprocess.check_output(
[cmd, "--test", "--noflush", rules_file],
stderr=subprocess.STDOUT)
return True
@ -9272,14 +9262,12 @@ class ConductorManager(service.PeriodicService):
"ERROR: Failed to install license to redundant storage."))
hostname = subprocess.check_output(["hostname"]).rstrip()
if hostname == constants.CONTROLLER_0_HOSTNAME:
mate = constants.CONTROLLER_1_HOSTNAME
elif hostname == constants.CONTROLLER_1_HOSTNAME:
mate = constants.CONTROLLER_0_HOSTNAME
elif hostname == 'localhost':
validHostnames = [constants.CONTROLLER_0_HOSTNAME,
constants.CONTROLLER_1_HOSTNAME]
if hostname == 'localhost':
raise exception.SysinvException(_(
"ERROR: Host undefined. Unable to install license"))
else:
elif hostname not in validHostnames:
raise exception.SysinvException(_(
"ERROR: Invalid hostname for controller node: %s") % hostname)
@ -9385,7 +9373,6 @@ class ConductorManager(service.PeriodicService):
password=passphrase,
backend=default_backend())
except Exception as e:
msg = "Exception occured e={}".format(e)
raise exception.SysinvException(_("Error decrypting PEM "
"file: %s" % e))
key_file.seek(0)
@ -9441,7 +9428,7 @@ class ConductorManager(service.PeriodicService):
tpmconfig_dict = {'tpm_path': constants.SSL_CERT_DIR + 'object.tpm'}
if not tpm:
new_tpmconfig = self.dbapi.tpmconfig_create(tpmconfig_dict)
self.dbapi.tpmconfig_create(tpmconfig_dict)
tpmconfig_dict.update(
{'cert_path': constants.SSL_CERT_DIR + 'key.pem',

View File

@ -704,7 +704,7 @@ class OpenStackOperator(object):
if s.name.find(constants.SERVICE_TYPE_CINDER) != -1:
endpoint_list += self._get_keystoneclient().endpoints.list(
service=s, region=region1_name)
except Exception as e:
except Exception:
LOG.error("Failed to get keystone endpoints for cinder.")
return endpoint_list
@ -897,7 +897,7 @@ class OpenStackOperator(object):
try:
clusters = self._get_magnumclient().clusters.list()
return len(clusters)
except Exception as e:
except Exception:
LOG.error("Unable to get backend list of magnum clusters")
return 0

View File

@ -1162,7 +1162,7 @@ class Connection(api.Connection):
try:
session.add(isystem)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
raise exception.SystemAlreadyExists(uuid=values['uuid'])
return isystem
@ -1223,7 +1223,7 @@ class Connection(api.Connection):
query = add_identity_filter(query, server)
try:
isystem_ref = query.one()
query.one()
except NoResultFound:
raise exception.ServerNotFound(server=server)
@ -2385,7 +2385,7 @@ class Connection(api.Connection):
try:
session.add(obj)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
LOG.error("Failed to update interface")
return query.one()
@ -2793,7 +2793,7 @@ class Connection(api.Connection):
@objects.objectify(objects.partition)
def partition_update(self, partition_id, values, forihostid=None):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.partition, read_deleted="no")
if forihostid:
query = query.filter_by(forihostid=forihostid)
@ -2806,7 +2806,7 @@ class Connection(api.Connection):
return query.one()
def partition_destroy(self, partition_id):
with _session_for_write() as session:
with _session_for_write():
# Delete physically since it has unique columns
if uuidutils.is_uuid_like(partition_id):
model_query(models.partition, read_deleted="no"). \
@ -2831,7 +2831,7 @@ class Connection(api.Connection):
try:
session.add(journal)
session.flush()
except Exception as e:
except Exception:
raise
return journal
@ -3262,7 +3262,7 @@ class Connection(api.Connection):
try:
session.add(itrapdest)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
raise exception.TrapDestAlreadyExists(uuid=values['uuid'])
return itrapdest
@ -3332,7 +3332,7 @@ class Connection(api.Connection):
try:
session.add(icommunity)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
raise exception.CommunityAlreadyExists(uuid=values['uuid'])
return icommunity
@ -3465,7 +3465,7 @@ class Connection(api.Connection):
query = add_identity_filter(query, server)
try:
node_ref = query.one()
query.one()
except NoResultFound:
raise exception.ServerNotFound(server=server)
# if node_ref['reservation'] is not None:
@ -3474,7 +3474,7 @@ class Connection(api.Connection):
# Get node ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the node.
# if uuidutils.is_uuid_like(server):
server_id = node_ref['id']
# server_id = node_ref['id']
# else:
# server_id = server
@ -3552,7 +3552,7 @@ class Connection(api.Connection):
query = add_identity_filter(query, server)
try:
node_ref = query.one()
query.one()
except NoResultFound:
raise exception.ServerNotFound(server=server)
# if node_ref['reservation'] is not None:
@ -3561,7 +3561,7 @@ class Connection(api.Connection):
# Get node ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the node.
# if uuidutils.is_uuid_like(server):
server_id = node_ref['id']
# server_id = node_ref['id']
# else:
# server_id = server
@ -3586,7 +3586,7 @@ class Connection(api.Connection):
try:
session.add(ntp)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
raise exception.NTPAlreadyExists(uuid=values['uuid'])
return self._ntp_get(values['uuid'])
@ -3639,7 +3639,7 @@ class Connection(api.Connection):
query = add_identity_filter(query, server)
try:
node_ref = query.one()
query.one()
except NoResultFound:
raise exception.ServerNotFound(server=server)
# if node_ref['reservation'] is not None:
@ -3648,7 +3648,7 @@ class Connection(api.Connection):
# Get node ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the node.
# if uuidutils.is_uuid_like(server):
server_id = node_ref['id']
# server_id = node_ref['id']
# else:
# server_id = server
@ -4077,7 +4077,7 @@ class Connection(api.Connection):
@objects.objectify(objects.storage_backend)
def storage_backend_update(self, storage_backend_id, values):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.StorageBackend, read_deleted="no")
query = add_storage_backend_filter(query, storage_backend_id)
try:
@ -4124,7 +4124,7 @@ class Connection(api.Connection):
try:
session.add(obj)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
LOG.error("Failed to update storage backend")
return query.one()
@ -4133,7 +4133,7 @@ class Connection(api.Connection):
return self._storage_backend_destroy(models.StorageBackend, storage_backend_id)
def _storage_backend_destroy(self, cls, storage_backend_id):
with _session_for_write() as session:
with _session_for_write():
# Delete storage_backend which should cascade to delete derived backends
if uuidutils.is_uuid_like(storage_backend_id):
model_query(cls, read_deleted="no").\
@ -4312,7 +4312,7 @@ class Connection(api.Connection):
query = add_identity_filter(query, server)
try:
node_ref = query.one()
query.one()
except NoResultFound:
raise exception.ServerNotFound(server=server)
# if node_ref['reservation'] is not None:
@ -4321,7 +4321,7 @@ class Connection(api.Connection):
# Get node ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the node.
# if uuidutils.is_uuid_like(server):
server_id = node_ref['id']
# server_id = node_ref['id']
# else:
# server_id = server
@ -4399,7 +4399,7 @@ class Connection(api.Connection):
query = add_identity_filter(query, server)
try:
node_ref = query.one()
query.one()
except NoResultFound:
raise exception.ServerNotFound(server=server)
@ -4477,7 +4477,7 @@ class Connection(api.Connection):
query = model_query(models.Services, session=session)
query = query.filter_by(name=service)
try:
node_ref = query.one()
query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service)
query.delete()
@ -5230,7 +5230,7 @@ class Connection(api.Connection):
try:
session.add(address_pool)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
raise exception.AddressPoolAlreadyExists(uuid=values['uuid'])
return self._address_pool_get(values['uuid'])
@ -5395,7 +5395,7 @@ class Connection(api.Connection):
sort_key, sort_dir, query)
def _sensor_analog_update(self, sensorid, values, hostid=None):
with _session_for_write() as session:
with _session_for_write():
# May need to reserve in multi controller system; ref sysinv
query = model_query(models.SensorsAnalog, read_deleted="no")
@ -5417,7 +5417,7 @@ class Connection(api.Connection):
return query.one()
def _sensor_analog_destroy(self, sensorid):
with _session_for_write() as session:
with _session_for_write():
# Delete port which should cascade to delete SensorsAnalog
if uuidutils.is_uuid_like(sensorid):
model_query(models.Sensors, read_deleted="no").\
@ -5555,7 +5555,7 @@ class Connection(api.Connection):
sort_key, sort_dir, query)
def _sensor_discrete_update(self, sensorid, values, hostid=None):
with _session_for_write() as session:
with _session_for_write():
# May need to reserve in multi controller system; ref sysinv
query = model_query(models.SensorsDiscrete, read_deleted="no")
@ -5577,7 +5577,7 @@ class Connection(api.Connection):
return query.one()
def _sensor_discrete_destroy(self, sensorid):
with _session_for_write() as session:
with _session_for_write():
# Delete port which should cascade to delete SensorsDiscrete
if uuidutils.is_uuid_like(sensorid):
model_query(models.Sensors, read_deleted="no").\
@ -5659,9 +5659,7 @@ class Connection(api.Connection):
values['uuid'] = uuidutils.generate_uuid()
values['host_id'] = int(host_id)
is_profile = False
if 'sensor_profile' in values:
is_profile = True
values.pop('sensor_profile')
# The id is null for ae sensors with more than one member
@ -5705,7 +5703,7 @@ class Connection(api.Connection):
@objects.objectify(objects.sensor)
def isensor_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Sensors)
model_query(models.Sensors)
return _paginate_query(models.Sensors, limit, marker,
sort_key, sort_dir)
@ -5755,7 +5753,7 @@ class Connection(api.Connection):
sort_key, sort_dir, query)
def _isensor_update(self, cls, sensor_id, values):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.Sensors)
query = add_sensor_filter(query, sensor_id)
try:
@ -5776,7 +5774,7 @@ class Connection(api.Connection):
@objects.objectify(objects.sensor)
def isensor_update(self, isensor_id, values):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.Sensors, read_deleted="no")
query = add_sensor_filter(query, isensor_id)
try:
@ -5799,7 +5797,7 @@ class Connection(api.Connection):
isensor_id, values)
def _isensor_destroy(self, cls, sensor_id):
with _session_for_write() as session:
with _session_for_write():
# Delete sensor which should cascade to delete derived sensors
if uuidutils.is_uuid_like(sensor_id):
model_query(cls, read_deleted="no").\
@ -5888,7 +5886,7 @@ class Connection(api.Connection):
@objects.objectify(objects.sensorgroup)
def isensorgroup_update(self, isensorgroup_id, values):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.SensorGroups, read_deleted="no")
query = add_sensorgroup_filter(query, isensorgroup_id)
try:
@ -5939,9 +5937,7 @@ class Connection(api.Connection):
values['uuid'] = uuidutils.generate_uuid()
values['host_id'] = int(host_id)
is_profile = False
if 'sensorgroup_profile' in values:
is_profile = True
values.pop('sensorgroup_profile')
temp_id = obj.id
@ -6025,7 +6021,7 @@ class Connection(api.Connection):
return query.one()
def _isensorgroup_destroy(self, cls, sensorgroup_id):
with _session_for_write() as session:
with _session_for_write():
# Delete sensorgroup which should cascade to
# delete derived sensorgroups
if uuidutils.is_uuid_like(sensorgroup_id):
@ -6179,14 +6175,14 @@ class Connection(api.Connection):
query = add_identity_filter(query, load)
try:
node_ref = query.one()
query.one()
except NoResultFound:
raise exception.LoadNotFound(load=load)
query.delete()
def set_upgrade_loads_state(self, upgrade, to_state, from_state):
with _session_for_write() as session:
with _session_for_write():
self.load_update(upgrade.from_load, {'state': from_state})
self.load_update(upgrade.to_load, {'state': to_state})
@ -6215,8 +6211,9 @@ class Connection(api.Connection):
try:
session.add(upgrade)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
raise exception.UpgradeAlreadyExists(uuid=values['uuid'])
return self._software_upgrade_get(values['uuid'])
@objects.objectify(objects.software_upgrade)
@ -6507,7 +6504,7 @@ class Connection(api.Connection):
with _session_for_write() as session:
cluster = self._cluster_get(cluster_uuid,
session=session)
peers = values.pop('peers', [])
values.pop('peers', [])
cluster.update(values)
# if peers:
# self._peer_update(session, cluster, peers)
@ -6575,7 +6572,7 @@ class Connection(api.Connection):
try:
session.add(peer)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
raise exception.PeerAlreadyExists(uuid=values['uuid'])
return self._peer_get(values['uuid'])
@ -6639,7 +6636,7 @@ class Connection(api.Connection):
try:
session.add(lldp_agent)
session.flush()
except db_exc.DBDuplicateEntry as exc:
except db_exc.DBDuplicateEntry:
LOG.error("Failed to add lldp agent %s, on host %s:"
"already exists" %
(values['uuid'],
@ -6691,7 +6688,7 @@ class Connection(api.Connection):
@objects.objectify(objects.lldp_agent)
def lldp_agent_update(self, uuid, values):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.LldpAgents, read_deleted="no")
try:
@ -6709,7 +6706,7 @@ class Connection(api.Connection):
def lldp_agent_destroy(self, agentid):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.LldpAgents, read_deleted="no")
query = add_lldp_filter_by_agent(query, agentid)
@ -6831,7 +6828,7 @@ class Connection(api.Connection):
@objects.objectify(objects.lldp_neighbour)
def lldp_neighbour_update(self, uuid, values):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.LldpNeighbours, read_deleted="no")
try:
@ -6848,7 +6845,7 @@ class Connection(api.Connection):
err="Multiple entries found for uuid %s" % uuid)
def lldp_neighbour_destroy(self, neighbourid):
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.LldpNeighbours, read_deleted="no")
query = add_lldp_filter_by_neighbour(query, neighbourid)
try:
@ -7038,7 +7035,7 @@ class Connection(api.Connection):
raise exception.InvalidParameterValue(
err="agent id and neighbour id not specified")
with _session_for_write() as session:
with _session_for_write():
query = model_query(models.LldpTlvs, read_deleted="no")
if agentid:
@ -7097,7 +7094,7 @@ class Connection(api.Connection):
return results
def lldp_tlv_destroy(self, id):
with _session_for_write() as session:
with _session_for_write():
model_query(models.LldpTlvs, read_deleted="no").\
filter_by(id=id).\
delete()

View File

@ -19,11 +19,11 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
i_system = Table('i_system',
meta,
Column('id', Integer,
primary_key=True, nullable=False),
mysql_engine=ENGINE, mysql_charset=CHARSET)
Table('i_system',
meta,
Column('id', Integer,
primary_key=True, nullable=False),
mysql_engine=ENGINE, mysql_charset=CHARSET)
i_host = Table('i_host',
meta,
@ -57,11 +57,11 @@ def upgrade(migrate_engine):
migrate_engine.execute('ALTER TABLE i_host ALTER COLUMN invprovision TYPE "invprovisionStateEnum" '
'USING invprovision::text::"invprovisionStateEnum"')
i_node = Table('i_node',
meta,
Column('id', Integer,
primary_key=True, nullable=False),
mysql_engine=ENGINE, mysql_charset=CHARSET)
Table('i_node',
meta,
Column('id', Integer,
primary_key=True, nullable=False),
mysql_engine=ENGINE, mysql_charset=CHARSET)
i_alarm_history = Table(
'i_alarm_history',

View File

@ -17,11 +17,11 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
i_system = Table('i_system',
meta,
Column('id', Integer,
primary_key=True, nullable=False),
mysql_engine=ENGINE, mysql_charset=CHARSET)
Table('i_system',
meta,
Column('id', Integer,
primary_key=True, nullable=False),
mysql_engine=ENGINE, mysql_charset=CHARSET)
clusters = Table(
'clusters',

View File

@ -17,8 +17,8 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
ports = Table('ports', meta, autoload=True, autoload_with=migrate_engine)
ihost = Table('i_host', meta, autoload=True, autoload_with=migrate_engine)
Table('ports', meta, autoload=True, autoload_with=migrate_engine)
Table('i_host', meta, autoload=True, autoload_with=migrate_engine)
lldp_agents = Table(
'lldp_agents',

View File

@ -28,7 +28,7 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
conn = migrate_engine.connect()
migrate_engine.connect()
i_host = Table('i_host', meta, autoload=True)
i_host.create_column(Column('action_state', String(255)))

View File

@ -84,7 +84,7 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
conn = migrate_engine.connect()
migrate_engine.connect()
# 046_drop_iport.py
i_port = Table('i_port', meta, autoload=True)
@ -144,7 +144,7 @@ def upgrade(migrate_engine):
# String per 048
# 053_add_virtual_interface.py
interfaces = Table('interfaces', meta, autoload=True)
Table('interfaces', meta, autoload=True)
virtual_interfaces = Table(
'virtual_interfaces',
@ -229,15 +229,6 @@ def upgrade(migrate_engine):
mysql_engine=ENGINE, mysql_charset=CHARSET,
autoload=True)
pvStateEnum = Enum('unprovisioned',
'adding',
'provisioned',
'removing',
'failed',
'reserve2',
native_enum=False,
name='pvStateEnum')
migrate_engine.execute('ALTER TABLE i_pv DROP CONSTRAINT "pvStateEnum"')
# In 16.10, as DB changes by PATCH are not supported, we use 'reserve1' instead of
# 'failed'. Therefore, even though upgrades with PVs in 'failed' state should not
@ -246,12 +237,8 @@ def upgrade(migrate_engine):
LOG.info("Migrate pv_state")
migrate_engine.execute('UPDATE i_pv SET pv_state=\'failed\' WHERE pv_state=\'reserve1\'')
# pvStateEnum.create(bind=migrate_engine, checkfirst=False)
# migrate_engine.execute('ALTER TABLE i_pv ALTER COLUMN pv_state TYPE "pvStateEnum" '
# 'USING pv_state::text::"pvStateEnum"')
pv_state_col = i_pv.c.pv_state
pv_state_col.alter(Column('pv_state', String(32)))
# pvStateEnum.drop(bind=migrate_engine, checkfirst=False)
# 057_idisk_id_path_wwn.py
i_idisk = Table('i_idisk', meta, autoload=True)

View File

@ -24,7 +24,7 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
storage_backend = Table('storage_backend', meta, autoload=True)
Table('storage_backend', meta, autoload=True)
# Define and create the storage_external table.
storage_external = Table(

View File

@ -101,7 +101,8 @@ class PathFilter(CommandFilter):
"""
def match(self, userargs):
command, arguments = userargs[0], userargs[1:]
# command = userargs[0]
arguments = userargs[1:]
equal_args_num = len(self.args) == len(arguments)
exec_is_valid = super(PathFilter, self).match(userargs)

View File

@ -17,8 +17,6 @@ class AodhPuppet(openstack.OpenstackBasePuppet):
def get_static_config(self):
dbuser = self._get_database_username(self.SERVICE_NAME)
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
return {
'aodh::db::postgresql::user': dbuser,

View File

@ -50,7 +50,6 @@ class DCManagerPuppet(openstack.OpenstackBasePuppet):
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
neutron_region_name = self._operator.neutron.get_region_name()
return {
# The region in which the identity server can be found

View File

@ -0,0 +1,89 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from . import openstack
class GnocchiPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for gnocchi configuration"""
SERVICE_NAME = 'gnocchi'
SERVICE_PORT = 8041
def get_static_config(self):
dbuser = self._get_database_username(self.SERVICE_NAME)
return {
'gnocchi::db::postgresql::user': dbuser,
}
def get_secure_static_config(self):
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
return {
'gnocchi::db::postgresql::password': dbpass,
'gnocchi::keystone::auth::password': kspass,
'gnocchi::keystone::authtoken::password': kspass,
}
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
config = {
'gnocchi::keystone::auth::region':
self._get_service_region_name(self.SERVICE_NAME),
'gnocchi::keystone::auth::public_url': self.get_public_url(),
'gnocchi::keystone::auth::internal_url': self.get_internal_url(),
'gnocchi::keystone::auth::admin_url': self.get_admin_url(),
'gnocchi::keystone::auth::auth_name': ksuser,
'gnocchi::keystone::auth::tenant': self._get_service_tenant_name(),
'gnocchi::keystone::authtoken::auth_url':
self._keystone_identity_uri(),
'gnocchi::keystone::authtoken::auth_uri':
self._keystone_auth_uri(),
'gnocchi::keystone::authtoken::user_domain_name':
self._get_service_user_domain_name(),
'gnocchi::keystone::authtoken::project_domain_name':
self._get_service_project_domain_name(),
'gnocchi::keystone::authtoken::project_name':
self._get_service_tenant_name(),
'gnocchi::keystone::authtoken::region_name':
self._keystone_region_name(),
'gnocchi::keystone::authtoken::username': ksuser,
'openstack::gnocchi::params::region_name':
self._get_service_region_name(self.SERVICE_NAME),
'openstack::gnocchi::params::service_create':
self._to_create_services(),
}
if (self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER):
config.update({'openstack::gnocchi::params::service_enabled': False,
'gnocchi::keystone::auth::configure_endpoint': False})
return config
def get_secure_system_config(self):
config = {
'gnocchi::database_connection':
self._format_database_connection(self.SERVICE_NAME),
}
return config
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)
def get_admin_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)

View File

@ -150,12 +150,10 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
}
}
dbuser = self._get_database_username(self.SERVICE_NAME)
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
kspass_placement = self._get_service_password(self.PLACEMENT_NAME)
api_dbuser = self._get_database_username(self.SERVICE_API_NAME)
api_dbpass = self._get_database_password(self.SERVICE_API_NAME)
return {

View File

@ -17,8 +17,6 @@ class PankoPuppet(openstack.OpenstackBasePuppet):
def get_static_config(self):
dbuser = self._get_database_username(self.SERVICE_NAME)
dbpass = self._get_database_password(self.SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
return {
'panko::db::postgresql::user': dbuser,

View File

@ -644,7 +644,7 @@ class InterfaceComputeVlanOverBond(InterfaceTestCase):
self._create_compute_vlan('data', constants.NETWORK_TYPE_DATA, 5, bond2,
providernetworks='group0-ext0')
bond3 = self._create_compute_bond('bond3', constants.NETWORK_TYPE_NONE)
self._create_compute_bond('bond3', constants.NETWORK_TYPE_NONE)
self._create_ethernet('sriov', constants.NETWORK_TYPE_PCI_SRIOV,
'group0-data0', host=self.compute)
@ -917,7 +917,7 @@ class TestPatch(InterfaceTestCase):
def test_mtu_smaller_than_users(self):
port, lower_interface = self._create_ethernet(
'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.compute)
upper = dbutils.create_test_interface(
dbutils.create_test_interface(
forihostid='2',
ihost_uuid=self.compute.uuid,
ifname='data0',
@ -1344,7 +1344,7 @@ class TestPost(InterfaceTestCase):
vlan_iface = self._create_compute_vlan(
'vlan1', constants.NETWORK_TYPE_DATA, 1,
providernetworks='group0-ext0')
vlan_iface2 = self._create_compute_vlan('vlan2',
self._create_compute_vlan('vlan2',
constants.NETWORK_TYPE_DATA, 2,
lower_iface=vlan_iface, providernetworks='group0-ext1',
expect_errors=True)
@ -1354,7 +1354,7 @@ class TestPost(InterfaceTestCase):
def test_create_data_vlan_over_pxeboot_lag(self):
bond_iface = self._create_compute_bond(
'pxeboot', constants.NETWORK_TYPE_PXEBOOT)
vlan_iface = self._create_compute_vlan('vlan2',
self._create_compute_vlan('vlan2',
constants.NETWORK_TYPE_DATA, 2,
lower_iface=bond_iface, providernetworks='group0-ext1',
expect_errors=True)
@ -1364,7 +1364,7 @@ class TestPost(InterfaceTestCase):
def test_create_data_vlan_over_mgmt_lag(self):
bond_iface = self._create_compute_bond(
'mgmt', constants.NETWORK_TYPE_MGMT)
vlan_iface = self._create_compute_vlan(
self._create_compute_vlan(
'vlan2', constants.NETWORK_TYPE_DATA, 2,
lower_iface=bond_iface, providernetworks='group0-ext1',
expect_errors=True)
@ -1374,7 +1374,7 @@ class TestPost(InterfaceTestCase):
def test_create_mgmt_vlan_over_data_lag(self):
bond_iface = self._create_compute_bond(
'data', constants.NETWORK_TYPE_DATA, providernetworks='group0-ext1')
vlan_iface = self._create_compute_vlan(
self._create_compute_vlan(
'mgmt', constants.NETWORK_TYPE_MGMT, 2,
lower_iface=bond_iface, providernetworks='group0-ext1',
expect_errors=True)
@ -1382,7 +1382,7 @@ class TestPost(InterfaceTestCase):
# Expected message: The management VLAN configured on this system is 2,
# so the VLAN configured for the mgmt interface must match.
def test_mgmt_vlan_not_matching_in_network(self):
vlan_iface = self._create_compute_vlan(
self._create_compute_vlan(
'vlan2', constants.NETWORK_TYPE_MGMT, 12,
providernetworks='group0-ext1', expect_errors=True)
@ -1393,7 +1393,7 @@ class TestPost(InterfaceTestCase):
mgmt_network = dbapi.network_get_by_type(constants.NETWORK_TYPE_MGMT)
values = {'vlan_id': None}
dbapi.network_update(mgmt_network.uuid, values)
vlan_iface = self._create_compute_vlan(
self._create_compute_vlan(
'vlan2', constants.NETWORK_TYPE_MGMT, 12,
providernetworks='group0-ext1',
expect_errors=True)
@ -1401,7 +1401,7 @@ class TestPost(InterfaceTestCase):
# Expected message:
# Provider network(s) not supported for non-data interfaces.
def test_create_nondata_provider_network(self):
bond_iface = self._create_compute_bond(
self._create_compute_bond(
'pxeboot', constants.NETWORK_TYPE_PXEBOOT,
providernetworks='group0-data0', expect_errors=True)
@ -1479,7 +1479,7 @@ class TestCpePost(InterfaceTestCase):
def test_create_oam_vlan_over_data_lag(self):
bond_iface = self._create_bond(
'data', constants.NETWORK_TYPE_DATA, providernetworks='group0-ext1')
vlan_iface = self._create_vlan(
self._create_vlan(
'oam', constants.NETWORK_TYPE_OAM, 2,
lower_iface=bond_iface, providernetworks='group0-ext1',
expect_errors=True)
@ -1489,7 +1489,7 @@ class TestCpePost(InterfaceTestCase):
def test_create_infra_vlan_over_data_lag(self):
bond_iface = self._create_bond(
'data', constants.NETWORK_TYPE_DATA, providernetworks='group0-ext1')
vlan_iface = self._create_vlan(
self._create_vlan(
'infra', constants.NETWORK_TYPE_INFRA, 2,
lower_iface=bond_iface, providernetworks='group0-ext1',
expect_errors=True)
@ -1663,7 +1663,7 @@ class TestCpePatch(InterfaceTestCase):
# Expected error: SR-IOV can't be configured on this interface
def test_invalid_sriov_totalvfs_zero(self):
interface = dbutils.create_test_interface(forihostid='1')
port = dbutils.create_test_ethernet_port(
dbutils.create_test_ethernet_port(
id=1, name='eth1', host_id=1, interface_id=interface.id,
pciaddr='0000:00:00.11', dev_id=0, sriov_totalvfs=0, sriov_numvfs=1)
response = self.patch_dict_json(
@ -1676,7 +1676,7 @@ class TestCpePatch(InterfaceTestCase):
# Expected error: The interface support a maximum of ___ VFs
def test_invalid_sriov_exceeded_totalvfs(self):
interface = dbutils.create_test_interface(forihostid='1')
port = dbutils.create_test_ethernet_port(
dbutils.create_test_ethernet_port(
id=1, name='eth1', host_id=1, interface_id=interface.id,
pciaddr='0000:00:00.11', dev_id=0, sriov_totalvfs=1, sriov_numvfs=1,
driver=None)
@ -1690,7 +1690,7 @@ class TestCpePatch(InterfaceTestCase):
# Expected error: Corresponding port has invalid driver
def test_invalid_driver_for_sriov(self):
interface = dbutils.create_test_interface(forihostid='1')
port = dbutils.create_test_ethernet_port(
dbutils.create_test_ethernet_port(
id=1, name='eth1', host_id=1, interface_id=interface.id,
pciaddr='0000:00:00.11', dev_id=0, sriov_totalvfs=1, sriov_numvfs=1,
driver=None)

View File

@ -151,7 +151,7 @@ class ProfileDeleteTestCase(ProfileTestCase):
cpuprofile_data = self.get_json(
'%s' % self._get_path(profile_data['iprofiles'][0]['uuid']))
self.assertEqual(post_response.json['uuid'], cpuprofile_data['uuid'])
response = self.delete(
self.delete(
'%s/%s' % (self._get_path(), post_response.json['uuid']))
def test_delete_interface_success(self):
@ -161,7 +161,7 @@ class ProfileDeleteTestCase(ProfileTestCase):
ifprofile_data = self.get_json(
'%s' % self._get_path(profile_data['iprofiles'][0]['uuid']))
self.assertEqual(post_response.json['uuid'], ifprofile_data['uuid'])
response = self.delete(
self.delete(
'%s/%s' % (self._get_path(), post_response.json['uuid']))
def test_delete_memory_success(self):
@ -171,7 +171,7 @@ class ProfileDeleteTestCase(ProfileTestCase):
memprofile_data = self.get_json(
'%s' % self._get_path(profile_data['iprofiles'][0]['uuid']))
self.assertEqual(post_response.json['uuid'], memprofile_data['uuid'])
response = self.delete(
self.delete(
'%s/%s' % (self._get_path(), post_response.json['uuid']))
def test_delete_storage_success(self):
@ -182,7 +182,7 @@ class ProfileDeleteTestCase(ProfileTestCase):
storprofile_data = self.get_json(
'%s' % self._get_path(profile_data['iprofiles'][0]['uuid']))
self.assertEqual(post_response.json['uuid'], storprofile_data['uuid'])
response = self.delete(
self.delete(
'%s/%s' % (self._get_path(), post_response.json['uuid']))
@ -192,7 +192,7 @@ class ProfileShowTestCase(ProfileTestCase):
def test_show_cpu_success(self):
self.profile["profiletype"] = constants.PROFILE_TYPE_CPU
post_response = self.post_json('%s' % self._get_path(), self.profile)
self.post_json('%s' % self._get_path(), self.profile)
list_data = self.get_json('%s' % self._get_path())
show_data = self.get_json(
'%s/icpus' % self._get_path(list_data['iprofiles'][0]['uuid']))
@ -201,7 +201,7 @@ class ProfileShowTestCase(ProfileTestCase):
def test_show_interface_success(self):
self.profile["profiletype"] = constants.PROFILE_TYPE_INTERFACE
post_response = self.post_json('%s' % self._get_path(), self.profile)
self.post_json('%s' % self._get_path(), self.profile)
list_data = self.get_json('%s' % self._get_path())
show_data = self.get_json('%s/iinterfaces' % self._get_path(
list_data['iprofiles'][0]['uuid']))
@ -214,7 +214,7 @@ class ProfileShowTestCase(ProfileTestCase):
def test_show_memory_success(self, mock_is_virtual):
mock_is_virtual.return_value = True
self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY
post_response = self.post_json('%s' % self._get_path(), self.profile)
self.post_json('%s' % self._get_path(), self.profile)
list_data = self.get_json('%s' % self._get_path())
show_data = self.get_json(
'%s/imemorys' % self._get_path(list_data['iprofiles'][0]['uuid']))
@ -228,7 +228,7 @@ class ProfileShowTestCase(ProfileTestCase):
def test_show_storage_success(self):
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
self.profile["ihost_uuid"] = self.compute.uuid
post_response = self.post_json('%s' % self._get_path(), self.profile)
self.post_json('%s' % self._get_path(), self.profile)
list_data = self.get_json('%s' % self._get_path())
profile_uuid = list_data['iprofiles'][0]['uuid']
show_data = self.get_json(

View File

@ -207,5 +207,5 @@ class sensorgroupTestCase(base.FunctionalTest):
request_relearn = {
'host_uuid': self.host['uuid'],
}
response = self.post_json('/isensorgroups/relearn', request_relearn)
self.post_json('/isensorgroups/relearn', request_relearn)
mock_hwmon_relearn.assert_called_once()

View File

@ -103,7 +103,7 @@ class StorageTierIndependentTCs(base.FunctionalTest):
values = {'cluster_uuid': self.cluster.uuid,
'name': 'gold'}
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add') as mock_tiers_add:
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add'):
response = self.post_json('/storage_tiers', values, expect_errors=True)
self.assertEqual(http_client.OK, response.status_int)
@ -136,7 +136,7 @@ class StorageTierIndependentTCs(base.FunctionalTest):
values = {'cluster_uuid': self.cluster.uuid,
'name': 'platinum'}
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add') as mock_tiers_add:
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add'):
response = self.post_json('/storage_tiers', values, expect_errors=True)
self.assertEqual(http_client.OK, response.status_int)
@ -284,7 +284,7 @@ class StorageTierIndependentTCs(base.FunctionalTest):
values = {'cluster_uuid': self.cluster.uuid,
'name': 'gold'}
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add') as mock_tiers_add:
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add'):
response = self.post_json('/storage_tiers', values, expect_errors=True)
self.assertEqual(http_client.OK, response.status_int)
@ -375,7 +375,7 @@ class StorageTierIndependentTCs(base.FunctionalTest):
'name': 'platinum',
'status': constants.SB_TIER_STATUS_IN_USE}
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add') as mock_tiers_add:
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add'):
response = self.post_json('/storage_tiers', values, expect_errors=True)
self.assertEqual(http_client.OK, response.status_int)
@ -471,7 +471,7 @@ class StorageTierIndependentTCs(base.FunctionalTest):
'name': 'platinum',
'status': constants.SB_TIER_STATUS_IN_USE}
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add') as mock_tiers_add:
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add'):
response = self.post_json('/storage_tiers', values, expect_errors=True)
self.assertEqual(http_client.OK, response.status_int)
@ -497,7 +497,7 @@ class StorageTierIndependentTCs(base.FunctionalTest):
self.assertIn('Storage Tier platinum cannot be deleted. It is in-use',
response.json['error_message'])
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tier_delete') as mock_tier_delete:
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tier_delete'):
response = self.delete('/storage_tiers/%s' % uuid_map['gold'], expect_errors=False)
self.assertEqual(http_client.NO_CONTENT, response.status_int)
@ -670,7 +670,7 @@ class StorageTierDependentTCs(base.FunctionalTest):
# Create a second storage tier
values = {'cluster_uuid': saved_cluster_db_uuid,
'name': 'gold'}
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add') as mock_tiers_add:
with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add'):
response = self.post_json('/storage_tiers', values, expect_errors=True)
self.assertEqual(http_client.OK, response.status_int)

View File

@ -233,7 +233,7 @@ class ManagerTestCase(base.DbTestCase):
ihost['install_output'] = 'text'
ihost['console'] = 'ttyS0,115200'
res = self.service.configure_ihost(self.context, ihost)
self.service.configure_ihost(self.context, ihost)
with open(self.dnsmasq_hosts_file, 'r') as f:
self.assertEqual(

View File

@ -212,9 +212,9 @@ class DbNodeTestCase(base.DbTestCase):
self.assertEqual(ll['backend'], res[2]['backend'])
def test_storage_backend_get_by_isystem_none(self):
c = self._create_test_storage_backend_with_ceph()
f = self._create_test_storage_backend_with_file()
ll = self._create_test_storage_backend_with_lvm()
self._create_test_storage_backend_with_ceph()
self._create_test_storage_backend_with_file()
self._create_test_storage_backend_with_lvm()
self.assertRaises(exception.ServerNotFound,
self.dbapi.storage_backend_get_by_isystem,
self.system['id'] + 1)

View File

@ -82,7 +82,7 @@ commands =
# H231..H238 are python3 compatability
# H401,H403,H404,H405 are docstring and not important
[flake8]
ignore = F841,E501,E127,E128,E231,E266,E402,E711,E116,E203,E731,E712,E713,E702,E714,E126,E121,E722,H101,H102,H104,H105,H231,H232,H233,H234,H235,H236,H237,H238,H401,H403,H404,H405
ignore = E501,E127,E128,E231,E266,E402,E711,E116,E203,E731,E712,E713,E702,E714,E126,E121,E722,H101,H102,H104,H105,H231,H232,H233,H234,H235,H236,H237,H238,H401,H403,H404,H405
builtins = _
[testenv:flake8]