Enable python3.5 sysinv unit test
- Replace mox with mox3 to support python3 - Change 'lambda (x, y): y - x' to 'lambda xy: xy[1] - xy[0]' - Change iter to list for str operator - Change urlparse to compatible python 2/3 - Adapt gettextutils - Fix 'TypeError: unorderable types: NoneType() < int()' for python3 - Change dict key to list - Change some requirement and test requirement remove MySQL-python remove Cheetah remove version limitation <3.0.0 for python-ldap - Fix encode/decode issue - Avoid exception for obj_load_attr with optional fields - Remove test_hash_file test case since it was never used. - Use mock not CreateMockAnything to mock contentmanager - Fix UnboundLocalError issue for python3 - Replace self.stubs.Set with stub_out - Change e.message to str(e) in exception block - Add self.mox.UnsetStubs() before self.mox.VerifyAll() - Fix set() order dismatch for python 2/3 - Implement nested for context manager in python 3 - Change migration version of sqlalchemy from 0 in python 3 - Avoid expose exception for db not connected - change hasattr(obj,attr) to getattr(obj,attr,None) since hasattr shadows all exception in python2.7 - Avoid sqlalchemy.orm.exc.DetachedInstanceError in get_networks Story: 2003433 Task: 28354 Depends-on: I4c601a72232402e45fe70e0d29de031ff294a4d7 Change-Id: I8382eba1bc3c91ca63d93e759021149914b12865 Signed-off-by: Sun Austin <austin.sun@intel.com>
This commit is contained in:
parent
3f6e511ec3
commit
527faa0113
17
.zuul.yaml
17
.zuul.yaml
@ -8,6 +8,7 @@
|
|||||||
- build-openstack-releasenotes
|
- build-openstack-releasenotes
|
||||||
- openstack-tox-linters
|
- openstack-tox-linters
|
||||||
- sysinv-tox-py27
|
- sysinv-tox-py27
|
||||||
|
- sysinv-tox-py35
|
||||||
- sysinv-tox-flake8
|
- sysinv-tox-flake8
|
||||||
- sysinv-tox-pylint
|
- sysinv-tox-pylint
|
||||||
- controllerconfig-tox-flake8
|
- controllerconfig-tox-flake8
|
||||||
@ -27,6 +28,7 @@
|
|||||||
- build-openstack-releasenotes
|
- build-openstack-releasenotes
|
||||||
- openstack-tox-linters
|
- openstack-tox-linters
|
||||||
- sysinv-tox-py27
|
- sysinv-tox-py27
|
||||||
|
- sysinv-tox-py35
|
||||||
- sysinv-tox-flake8
|
- sysinv-tox-flake8
|
||||||
- sysinv-tox-pylint
|
- sysinv-tox-pylint
|
||||||
- controllerconfig-tox-flake8
|
- controllerconfig-tox-flake8
|
||||||
@ -59,6 +61,21 @@
|
|||||||
tox_envlist: py27
|
tox_envlist: py27
|
||||||
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini
|
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: sysinv-tox-py35
|
||||||
|
parent: tox
|
||||||
|
description: |
|
||||||
|
Run py35 test for sysinv
|
||||||
|
required-projects:
|
||||||
|
- openstack/stx-update
|
||||||
|
- openstack/stx-fault
|
||||||
|
- openstack/stx-integ
|
||||||
|
files:
|
||||||
|
- sysinv/sysinv/*
|
||||||
|
vars:
|
||||||
|
tox_envlist: py35
|
||||||
|
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: sysinv-tox-flake8
|
name: sysinv-tox-flake8
|
||||||
parent: tox
|
parent: tox
|
||||||
|
@ -30,7 +30,6 @@ pecan>=1.0.0
|
|||||||
six>=1.4.1
|
six>=1.4.1
|
||||||
jsonpatch>=1.1
|
jsonpatch>=1.1
|
||||||
WSME>=0.5b2
|
WSME>=0.5b2
|
||||||
Cheetah>=2.4.4
|
|
||||||
pyghmi
|
pyghmi
|
||||||
PyYAML>=3.10
|
PyYAML>=3.10
|
||||||
python-magnumclient>=2.0.0 # Apache-2.0
|
python-magnumclient>=2.0.0 # Apache-2.0
|
||||||
|
@ -107,7 +107,7 @@ class CephMon(base.APIBase):
|
|||||||
defaults = {'state': constants.SB_STATE_CONFIGURED,
|
defaults = {'state': constants.SB_STATE_CONFIGURED,
|
||||||
'task': constants.SB_TASK_NONE}
|
'task': constants.SB_TASK_NONE}
|
||||||
|
|
||||||
self.fields = objects.ceph_mon.fields.keys()
|
self.fields = list(objects.ceph_mon.fields.keys())
|
||||||
|
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k, defaults.get(k)))
|
setattr(self, k, kwargs.get(k, defaults.get(k)))
|
||||||
|
@ -89,7 +89,7 @@ class Certificate(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.certificate.fields.keys()
|
self.fields = list(objects.certificate.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
if not hasattr(self, k):
|
if not hasattr(self, k):
|
||||||
continue
|
continue
|
||||||
|
@ -107,7 +107,7 @@ class ControllerFs(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.controller_fs.fields.keys()
|
self.fields = list(objects.controller_fs.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ class CPU(base.APIBase):
|
|||||||
"A list containing a self link and associated cpu links"
|
"A list containing a self link and associated cpu links"
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.cpu.fields.keys()
|
self.fields = list(objects.cpu.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ class DNS(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.dns.fields.keys()
|
self.fields = list(objects.dns.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ class DRBDConfig(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.drbdconfig.fields.keys()
|
self.fields = list(objects.drbdconfig.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ class FirewallRules(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.firewallrules.fields.keys()
|
self.fields = list(objects.firewallrules.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
if not hasattr(self, k):
|
if not hasattr(self, k):
|
||||||
continue
|
continue
|
||||||
|
@ -540,7 +540,7 @@ class Host(base.APIBase):
|
|||||||
"The iscsi initiator name (only used for worker hosts)"
|
"The iscsi initiator name (only used for worker hosts)"
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.host.fields.keys()
|
self.fields = list(objects.host.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ class Interface(base.APIBase):
|
|||||||
"Represent the networks of the interface"
|
"Represent the networks of the interface"
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.interface.fields.keys()
|
self.fields = list(objects.interface.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ class LVG(base.APIBase):
|
|||||||
"Links to the collection of ipvs on this lvg"
|
"Links to the collection of ipvs on this lvg"
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.lvg.fields.keys()
|
self.fields = list(objects.lvg.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ class LVG(base.APIBase):
|
|||||||
# lvm_vg_size is Volume Group's total size in byte
|
# lvm_vg_size is Volume Group's total size in byte
|
||||||
# lvm_vg_free_pe is Volume Group's free Physical Extents
|
# lvm_vg_free_pe is Volume Group's free Physical Extents
|
||||||
# lvm_vg_total_pe is Volume Group's total Physical Extents
|
# lvm_vg_total_pe is Volume Group's total Physical Extents
|
||||||
if lvg.lvm_vg_total_pe > 0:
|
if lvg.lvm_vg_total_pe and lvg.lvm_vg_total_pe > 0:
|
||||||
lvg.lvm_vg_avail_size = \
|
lvg.lvm_vg_avail_size = \
|
||||||
lvg.lvm_vg_size * lvg.lvm_vg_free_pe / lvg.lvm_vg_total_pe
|
lvg.lvm_vg_size * lvg.lvm_vg_free_pe / lvg.lvm_vg_total_pe
|
||||||
else:
|
else:
|
||||||
@ -665,7 +665,7 @@ def _check(op, lvg):
|
|||||||
_("cinder-volumes LVG cannot be removed once it is "
|
_("cinder-volumes LVG cannot be removed once it is "
|
||||||
"provisioned and LVM backend is added."))
|
"provisioned and LVM backend is added."))
|
||||||
elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
|
elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
|
||||||
if (lvg['lvm_cur_lv'] > 1):
|
if (lvg['lvm_cur_lv'] and lvg['lvm_cur_lv'] > 1):
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Can't delete volume group: %s. There are currently %d "
|
_("Can't delete volume group: %s. There are currently %d "
|
||||||
"instance volumes present in the volume group. Terminate"
|
"instance volumes present in the volume group. Terminate"
|
||||||
|
@ -166,7 +166,7 @@ class Memory(base.APIBase):
|
|||||||
"A list containing a self link and associated memory links"
|
"A list containing a self link and associated memory links"
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.memory.fields.keys()
|
self.fields = list(objects.memory.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ class InfraNetwork(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.infra_network.fields.keys()
|
self.fields = list(objects.infra_network.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ class OAMNetwork(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.oam_network.fields.keys()
|
self.fields = list(objects.oam_network.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ class NTP(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.ntp.fields.keys()
|
self.fields = list(objects.ntp.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ class Profile(base.APIBase):
|
|||||||
tboot = wtypes.text
|
tboot = wtypes.text
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.host.fields.keys()
|
self.fields = list(objects.host.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
@ -3003,7 +3003,9 @@ def localstorageprofile_apply_to_host(host, profile):
|
|||||||
for hdisk in host.disks:
|
for hdisk in host.disks:
|
||||||
if ((hdisk.device_path == pdisk.device_path or
|
if ((hdisk.device_path == pdisk.device_path or
|
||||||
hdisk.device_node == pdisk.device_node) and
|
hdisk.device_node == pdisk.device_node) and
|
||||||
hdisk.size_mib >= pdisk.size_mib):
|
((hdisk.size_mib is None and pdisk.size_mib is None) or
|
||||||
|
(hdisk.size_mib and pdisk.size_mib and
|
||||||
|
hdisk.size_mib >= pdisk.size_mib))):
|
||||||
match = True
|
match = True
|
||||||
diskPairs.append((hdisk, pdisk))
|
diskPairs.append((hdisk, pdisk))
|
||||||
disksUsed.append(hdisk.id)
|
disksUsed.append(hdisk.id)
|
||||||
|
@ -365,10 +365,10 @@ class VlanInterface(Interface):
|
|||||||
|
|
||||||
def getNetworkMap(self):
|
def getNetworkMap(self):
|
||||||
return {
|
return {
|
||||||
'dataclassNetwork': lambda (node): DataclassNetwork(node),
|
'dataclassNetwork': lambda node: DataclassNetwork(node),
|
||||||
'infraNetwork': lambda (node): ExternalNetwork(node, constants.NETWORK_TYPE_INFRA),
|
'infraNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_INFRA),
|
||||||
'oamNetwork': lambda (node): ExternalNetwork(node, constants.NETWORK_TYPE_OAM),
|
'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM),
|
||||||
'mgmtNetwork': lambda (node): ExternalNetwork(node, constants.NETWORK_TYPE_MGMT)
|
'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT)
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -98,7 +98,7 @@ class RemoteLogging(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.remotelogging.fields.keys()
|
self.fields = list(objects.remotelogging.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ class SensorGroup(base.APIBase):
|
|||||||
"Links to the collection of isensors on this isensorgroup"
|
"Links to the collection of isensors on this isensorgroup"
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.sensorgroup.fields.keys()
|
self.fields = list(objects.sensorgroup.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ class Storage(base.APIBase):
|
|||||||
"The name of the tier that uses this stor."
|
"The name of the tier that uses this stor."
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.storage.fields.keys()
|
self.fields = list(objects.storage.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ class StorageBackend(base.APIBase):
|
|||||||
'services': None,
|
'services': None,
|
||||||
'confirmed': False}
|
'confirmed': False}
|
||||||
|
|
||||||
self.fields = objects.storage_backend.fields.keys()
|
self.fields = list(objects.storage_backend.fields.keys())
|
||||||
|
|
||||||
# 'confirmed' is not part of objects.storage_backend.fields
|
# 'confirmed' is not part of objects.storage_backend.fields
|
||||||
# (it's an API-only attribute)
|
# (it's an API-only attribute)
|
||||||
|
@ -181,7 +181,7 @@ class StorageCeph(base.APIBase):
|
|||||||
'confirmed': False,
|
'confirmed': False,
|
||||||
'object_gateway': False}
|
'object_gateway': False}
|
||||||
|
|
||||||
self.fields = objects.storage_ceph.fields.keys()
|
self.fields = list(objects.storage_ceph.fields.keys())
|
||||||
|
|
||||||
# 'confirmed' is not part of objects.storage_backend.fields
|
# 'confirmed' is not part of objects.storage_backend.fields
|
||||||
# (it's an API-only attribute)
|
# (it's an API-only attribute)
|
||||||
|
@ -119,7 +119,7 @@ class StorageCephExternal(base.APIBase):
|
|||||||
'confirmed': False,
|
'confirmed': False,
|
||||||
'ceph_conf': None}
|
'ceph_conf': None}
|
||||||
|
|
||||||
self.fields = objects.storage_ceph_external.fields.keys()
|
self.fields = list(objects.storage_ceph_external.fields.keys())
|
||||||
|
|
||||||
# 'confirmed' is not part of objects.storage_backend.fields
|
# 'confirmed' is not part of objects.storage_backend.fields
|
||||||
# (it's an API-only attribute)
|
# (it's an API-only attribute)
|
||||||
|
@ -111,7 +111,7 @@ class StorageExternal(base.APIBase):
|
|||||||
'services': None,
|
'services': None,
|
||||||
'confirmed': False}
|
'confirmed': False}
|
||||||
|
|
||||||
self.fields = objects.storage_external.fields.keys()
|
self.fields = list(objects.storage_external.fields.keys())
|
||||||
|
|
||||||
# 'confirmed' is not part of objects.storage_backend.fields
|
# 'confirmed' is not part of objects.storage_backend.fields
|
||||||
# (it's an API-only attribute)
|
# (it's an API-only attribute)
|
||||||
|
@ -109,7 +109,7 @@ class StorageFile(base.APIBase):
|
|||||||
'services': None,
|
'services': None,
|
||||||
'confirmed': False}
|
'confirmed': False}
|
||||||
|
|
||||||
self.fields = objects.storage_file.fields.keys()
|
self.fields = list(objects.storage_file.fields.keys())
|
||||||
|
|
||||||
# 'confirmed' is not part of objects.storage_backend.fields
|
# 'confirmed' is not part of objects.storage_backend.fields
|
||||||
# (it's an API-only attribute)
|
# (it's an API-only attribute)
|
||||||
|
@ -113,7 +113,7 @@ class StorageLVM(base.APIBase):
|
|||||||
'services': None,
|
'services': None,
|
||||||
'confirmed': False}
|
'confirmed': False}
|
||||||
|
|
||||||
self.fields = objects.storage_lvm.fields.keys()
|
self.fields = list(objects.storage_lvm.fields.keys())
|
||||||
|
|
||||||
# 'confirmed' is not part of objects.storage_backend.fields
|
# 'confirmed' is not part of objects.storage_backend.fields
|
||||||
# (it's an API-only attribute)
|
# (it's an API-only attribute)
|
||||||
|
@ -84,7 +84,7 @@ class User(base.APIBase):
|
|||||||
updated_at = wtypes.datetime.datetime
|
updated_at = wtypes.datetime.datetime
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.user.fields.keys()
|
self.fields = list(objects.user.fields.keys())
|
||||||
for k in self.fields:
|
for k in self.fields:
|
||||||
setattr(self, k, kwargs.get(k))
|
setattr(self, k, kwargs.get(k))
|
||||||
|
|
||||||
|
@ -55,7 +55,10 @@ def validate_limit(limit):
|
|||||||
if limit and limit < 0:
|
if limit and limit < 0:
|
||||||
raise wsme.exc.ClientSideError(_("Limit must be positive"))
|
raise wsme.exc.ClientSideError(_("Limit must be positive"))
|
||||||
|
|
||||||
return min(CONF.api_limit_max, limit) or CONF.api_limit_max
|
if limit:
|
||||||
|
return min(CONF.api_limit_max, limit) or CONF.api_limit_max
|
||||||
|
else:
|
||||||
|
return CONF.api_limit_max
|
||||||
|
|
||||||
|
|
||||||
def validate_sort_dir(sort_dir):
|
def validate_sort_dir(sort_dir):
|
||||||
|
@ -23,6 +23,7 @@ Based on pecan.middleware.errordocument
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
import six
|
||||||
import webob
|
import webob
|
||||||
from xml import etree as et
|
from xml import etree as et
|
||||||
|
|
||||||
@ -83,7 +84,11 @@ class ParsableErrorMiddleware(object):
|
|||||||
'</error_message>']
|
'</error_message>']
|
||||||
state['headers'].append(('Content-Type', 'application/xml'))
|
state['headers'].append(('Content-Type', 'application/xml'))
|
||||||
else:
|
else:
|
||||||
|
if six.PY3:
|
||||||
|
app_iter = [i.decode('utf-8') for i in app_iter]
|
||||||
body = [json.dumps({'error_message': '\n'.join(app_iter)})]
|
body = [json.dumps({'error_message': '\n'.join(app_iter)})]
|
||||||
|
if six.PY3:
|
||||||
|
body = [item.encode('utf-8') for item in body]
|
||||||
state['headers'].append(('Content-Type', 'application/json'))
|
state['headers'].append(('Content-Type', 'application/json'))
|
||||||
state['headers'].append(('Content-Length', str(len(body[0]))))
|
state['headers'].append(('Content-Length', str(len(body[0]))))
|
||||||
else:
|
else:
|
||||||
|
@ -549,7 +549,8 @@ def sanitize_hostname(hostname):
|
|||||||
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
|
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
|
||||||
if isinstance(hostname, six.string_types):
|
if isinstance(hostname, six.string_types):
|
||||||
hostname = hostname.encode('latin-1', 'ignore')
|
hostname = hostname.encode('latin-1', 'ignore')
|
||||||
|
if six.PY3:
|
||||||
|
hostname = hostname.decode()
|
||||||
hostname = re.sub('[ _]', '-', hostname)
|
hostname = re.sub('[ _]', '-', hostname)
|
||||||
hostname = re.sub('[^\w.-]+', '', hostname)
|
hostname = re.sub('[^\w.-]+', '', hostname)
|
||||||
hostname = hostname.lower()
|
hostname = hostname.lower()
|
||||||
@ -595,7 +596,9 @@ def hash_file(file_like_object):
|
|||||||
"""Generate a hash for the contents of a file."""
|
"""Generate a hash for the contents of a file."""
|
||||||
checksum = hashlib.sha1()
|
checksum = hashlib.sha1()
|
||||||
for chunk in iter(lambda: file_like_object.read(32768), b''):
|
for chunk in iter(lambda: file_like_object.read(32768), b''):
|
||||||
checksum.update(chunk)
|
encoded_chunk = (chunk.encode(encoding='utf-8')
|
||||||
|
if isinstance(chunk, six.string_types) else chunk)
|
||||||
|
checksum.update(encoded_chunk)
|
||||||
return checksum.hexdigest()
|
return checksum.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
@ -159,11 +159,7 @@ class AppOperator(object):
|
|||||||
from six.moves.urllib.error import HTTPError
|
from six.moves.urllib.error import HTTPError
|
||||||
from six.moves.urllib.error import URLError
|
from six.moves.urllib.error import URLError
|
||||||
from socket import timeout as socket_timeout
|
from socket import timeout as socket_timeout
|
||||||
|
from six.moves.urllib.parse import urlparse
|
||||||
try:
|
|
||||||
import urlparse
|
|
||||||
except ImportError:
|
|
||||||
from urllib2 import urlparse
|
|
||||||
|
|
||||||
def _handle_download_failure(reason):
|
def _handle_download_failure(reason):
|
||||||
raise exception.KubeAppUploadFailure(
|
raise exception.KubeAppUploadFailure(
|
||||||
|
@ -136,7 +136,7 @@ CONFIG_CONTROLLER_FINI_FLAG = os.path.join(tsc.VOLATILE_PATH,
|
|||||||
CONFIG_FAIL_FLAG = os.path.join(tsc.VOLATILE_PATH, ".config_fail")
|
CONFIG_FAIL_FLAG = os.path.join(tsc.VOLATILE_PATH, ".config_fail")
|
||||||
|
|
||||||
# configuration UUID reboot required flag (bit)
|
# configuration UUID reboot required flag (bit)
|
||||||
CONFIG_REBOOT_REQUIRED = (1 << 127L)
|
CONFIG_REBOOT_REQUIRED = (1 << 127)
|
||||||
|
|
||||||
LOCK_NAME_UPDATE_CONFIG = 'update_config_'
|
LOCK_NAME_UPDATE_CONFIG = 'update_config_'
|
||||||
|
|
||||||
|
@ -2311,7 +2311,7 @@ class Connection(api.Connection):
|
|||||||
|
|
||||||
obj = self._interface_get(models.Interfaces, interface_id)
|
obj = self._interface_get(models.Interfaces, interface_id)
|
||||||
|
|
||||||
for k, v in values.items():
|
for k, v in list(values.items()):
|
||||||
if k == 'networktype' and v == constants.NETWORK_TYPE_NONE:
|
if k == 'networktype' and v == constants.NETWORK_TYPE_NONE:
|
||||||
v = None
|
v = None
|
||||||
if k == 'datanetworks' and v == 'none':
|
if k == 'datanetworks' and v == 'none':
|
||||||
|
@ -47,7 +47,14 @@ def make_class_properties(cls):
|
|||||||
def getter(self, name=name):
|
def getter(self, name=name):
|
||||||
attrname = get_attrname(name)
|
attrname = get_attrname(name)
|
||||||
if not hasattr(self, attrname):
|
if not hasattr(self, attrname):
|
||||||
self.obj_load_attr(name)
|
# if name in _optional_fields, we just return None
|
||||||
|
# as class not implement obj_load_attr function
|
||||||
|
if hasattr(self, '_optional_fields') and name in self._optional_fields:
|
||||||
|
LOG.exception(_('This is Optional field in %(field)s') %
|
||||||
|
{'field': name})
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
self.obj_load_attr(name)
|
||||||
return getattr(self, attrname)
|
return getattr(self, attrname)
|
||||||
|
|
||||||
def setter(self, value, name=name, typefn=typefn):
|
def setter(self, value, name=name, typefn=typefn):
|
||||||
@ -397,7 +404,7 @@ class SysinvObject(object):
|
|||||||
|
|
||||||
NOTE(danms): May be removed in the future.
|
NOTE(danms): May be removed in the future.
|
||||||
"""
|
"""
|
||||||
for name in self.fields.keys() + self.obj_extra_fields:
|
for name in list(self.fields.keys()) + self.obj_extra_fields:
|
||||||
if (hasattr(self, get_attrname(name)) or
|
if (hasattr(self, get_attrname(name)) or
|
||||||
name in self.obj_extra_fields):
|
name in self.obj_extra_fields):
|
||||||
yield name, getattr(self, name)
|
yield name, getattr(self, name)
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
#
|
#
|
||||||
|
|
||||||
from sysinv.common import constants
|
from sysinv.common import constants
|
||||||
from sysinv.db import api as db_api
|
from sysinv.db import api as db_api
|
||||||
from sysinv.objects import base
|
from sysinv.objects import base
|
||||||
@ -81,19 +80,31 @@ def get_host_uuid(field, db_server):
|
|||||||
|
|
||||||
def get_networks(field, db_object):
|
def get_networks(field, db_object):
|
||||||
result = []
|
result = []
|
||||||
if hasattr(db_object, 'interface_networks'):
|
try:
|
||||||
for entry in getattr(db_object, 'interface_networks', []):
|
if getattr(db_object, 'interface_networks', None):
|
||||||
id_str = str(entry.network_id)
|
for entry in getattr(db_object, 'interface_networks', []):
|
||||||
result.append(id_str)
|
id_str = str(entry.network_id)
|
||||||
|
result.append(id_str)
|
||||||
|
except exc.DetachedInstanceError:
|
||||||
|
# instrument and return empty network
|
||||||
|
LOG.exception("DetachedInstanceError unable to get networks for %s" %
|
||||||
|
db_object)
|
||||||
|
pass
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def get_datanetworks(field, db_object):
|
def get_datanetworks(field, db_object):
|
||||||
result = []
|
result = []
|
||||||
if hasattr(db_object, 'interface_datanetworks'):
|
try:
|
||||||
for entry in getattr(db_object, 'interface_datanetworks', []):
|
if hasattr(db_object, 'interface_datanetworks'):
|
||||||
id_str = str(entry.datanetwork_id)
|
for entry in getattr(db_object, 'interface_datanetworks', []):
|
||||||
result.append(id_str)
|
id_str = str(entry.datanetwork_id)
|
||||||
|
result.append(id_str)
|
||||||
|
except exc.DetachedInstanceError:
|
||||||
|
# instrument and return empty datanetwork
|
||||||
|
LOG.exception("DetachedInstanceError unable to get datanetworks \
|
||||||
|
for %s" % db_object)
|
||||||
|
pass
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,8 +18,8 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
import mox
|
|
||||||
import stubout
|
import stubout
|
||||||
|
from mox3 import mox
|
||||||
|
|
||||||
|
|
||||||
class MoxStubout(fixtures.Fixture):
|
class MoxStubout(fixtures.Fixture):
|
||||||
|
@ -25,13 +25,16 @@ Usual usage in an openstack.common module:
|
|||||||
|
|
||||||
import gettext
|
import gettext
|
||||||
import os
|
import os
|
||||||
|
import six
|
||||||
_localedir = os.environ.get('sysinv'.upper() + '_LOCALEDIR')
|
_localedir = os.environ.get('sysinv'.upper() + '_LOCALEDIR')
|
||||||
_t = gettext.translation('sysinv', localedir=_localedir, fallback=True)
|
_t = gettext.translation('sysinv', localedir=_localedir, fallback=True)
|
||||||
|
|
||||||
|
|
||||||
def _(msg):
|
def _(msg):
|
||||||
return _t.ugettext(msg)
|
if six.PY2:
|
||||||
|
return _t.ugettext(msg)
|
||||||
|
if six.PY3:
|
||||||
|
return _t.gettext(msg)
|
||||||
|
|
||||||
|
|
||||||
def install(domain):
|
def install(domain):
|
||||||
@ -45,6 +48,10 @@ def install(domain):
|
|||||||
a translation-domain-specific environment variable (e.g.
|
a translation-domain-specific environment variable (e.g.
|
||||||
NOVA_LOCALEDIR).
|
NOVA_LOCALEDIR).
|
||||||
"""
|
"""
|
||||||
gettext.install(domain,
|
if six.PY2:
|
||||||
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
gettext.install(domain,
|
||||||
unicode=True)
|
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
||||||
|
unicode=True)
|
||||||
|
if six.PY3:
|
||||||
|
gettext.install(domain,
|
||||||
|
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'))
|
||||||
|
@ -49,9 +49,9 @@ def parse_isotime(timestr):
|
|||||||
try:
|
try:
|
||||||
return iso8601.parse_date(timestr)
|
return iso8601.parse_date(timestr)
|
||||||
except iso8601.ParseError as e:
|
except iso8601.ParseError as e:
|
||||||
raise ValueError(e.message)
|
raise ValueError(str(e))
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
raise ValueError(e.message)
|
raise ValueError(str(e))
|
||||||
|
|
||||||
|
|
||||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from oslo_utils import strutils
|
from oslo_utils import strutils
|
||||||
from urlparse import urlparse
|
from six.moves.urllib.parse import urlparse
|
||||||
from sysinv.common import constants
|
from sysinv.common import constants
|
||||||
from sysinv.common import exception
|
from sysinv.common import exception
|
||||||
from sysinv.openstack.common import log as logging
|
from sysinv.openstack.common import log as logging
|
||||||
|
@ -10,7 +10,7 @@ import os
|
|||||||
from sysinv.common import constants
|
from sysinv.common import constants
|
||||||
|
|
||||||
from tsconfig import tsconfig
|
from tsconfig import tsconfig
|
||||||
from urlparse import urlparse
|
from six.moves.urllib.parse import urlparse
|
||||||
|
|
||||||
from sysinv.puppet import openstack
|
from sysinv.puppet import openstack
|
||||||
|
|
||||||
|
@ -606,7 +606,7 @@ class PlatformPuppet(base.BasePuppet):
|
|||||||
# change the CPU list to ranges
|
# change the CPU list to ranges
|
||||||
rcu_nocbs_ranges = ""
|
rcu_nocbs_ranges = ""
|
||||||
for key, group in itertools.groupby(enumerate(rcu_nocbs),
|
for key, group in itertools.groupby(enumerate(rcu_nocbs),
|
||||||
lambda (x, y): y - x):
|
lambda xy: xy[1] - xy[0]):
|
||||||
group = list(group)
|
group = list(group)
|
||||||
rcu_nocbs_ranges += "%s-%s," % (group[0][1], group[-1][1])
|
rcu_nocbs_ranges += "%s-%s," % (group[0][1], group[-1][1])
|
||||||
rcu_nocbs_ranges = rcu_nocbs_ranges.rstrip(',')
|
rcu_nocbs_ranges = rcu_nocbs_ranges.rstrip(',')
|
||||||
@ -619,7 +619,7 @@ class PlatformPuppet(base.BasePuppet):
|
|||||||
# change the CPU list to ranges
|
# change the CPU list to ranges
|
||||||
non_vswitch_cpus_ranges = ""
|
non_vswitch_cpus_ranges = ""
|
||||||
for key, group in itertools.groupby(enumerate(non_vswitch_cpus),
|
for key, group in itertools.groupby(enumerate(non_vswitch_cpus),
|
||||||
lambda (x, y): y - x):
|
lambda xy: xy[1] - xy[0]):
|
||||||
group = list(group)
|
group = list(group)
|
||||||
non_vswitch_cpus_ranges += "\"%s-%s\"," % (group[0][1], group[-1][1])
|
non_vswitch_cpus_ranges += "\"%s-%s\"," % (group[0][1], group[-1][1])
|
||||||
|
|
||||||
|
@ -752,6 +752,8 @@ class StorageBackendTestCases(base.FunctionalTest):
|
|||||||
'capabilities': {'test_bparam3': 'foo'},
|
'capabilities': {'test_bparam3': 'foo'},
|
||||||
'confirmed': True
|
'confirmed': True
|
||||||
}
|
}
|
||||||
|
services_string = '%s,%s' % (constants.SB_SVC_CINDER, constants.SB_SVC_GLANCE)
|
||||||
|
services_string2 = '%s,%s' % (constants.SB_SVC_GLANCE, constants.SB_SVC_CINDER)
|
||||||
response = self.post_json('/storage_backend', vals, expect_errors=False)
|
response = self.post_json('/storage_backend', vals, expect_errors=False)
|
||||||
self.assertEqual(http_client.OK, response.status_int)
|
self.assertEqual(http_client.OK, response.status_int)
|
||||||
self.assertEqual('ceph', # Expected
|
self.assertEqual('ceph', # Expected
|
||||||
@ -759,15 +761,13 @@ class StorageBackendTestCases(base.FunctionalTest):
|
|||||||
|
|
||||||
patch_response = self.patch_dict_json('/storage_backend/%s' % response.json['uuid'],
|
patch_response = self.patch_dict_json('/storage_backend/%s' % response.json['uuid'],
|
||||||
headers={'User-Agent': 'sysinv'},
|
headers={'User-Agent': 'sysinv'},
|
||||||
services=(',').join([constants.SB_SVC_CINDER,
|
services=services_string,
|
||||||
constants.SB_SVC_GLANCE]),
|
|
||||||
capabilities=jsonutils.dumps({'test_cparam3': 'bar',
|
capabilities=jsonutils.dumps({'test_cparam3': 'bar',
|
||||||
'test_gparam3': 'too'}),
|
'test_gparam3': 'too'}),
|
||||||
expect_errors=False)
|
expect_errors=False)
|
||||||
self.assertEqual(http_client.OK, patch_response.status_int)
|
self.assertEqual(http_client.OK, patch_response.status_int)
|
||||||
self.assertEqual((',').join([constants.SB_SVC_CINDER,
|
json_result = self.get_json('/storage_backend/%s/' % response.json['uuid'])['services']
|
||||||
constants.SB_SVC_GLANCE]), # Expected
|
self.assertTrue(services_string == json_result or services_string2 == json_result)
|
||||||
self.get_json('/storage_backend/%s/' % response.json['uuid'])['services']) # Result
|
|
||||||
self.assertEqual({'test_bparam3': 'foo',
|
self.assertEqual({'test_bparam3': 'foo',
|
||||||
'test_cparam3': 'bar',
|
'test_cparam3': 'bar',
|
||||||
'test_gparam3': 'too'}, # Expected
|
'test_gparam3': 'too'}, # Expected
|
||||||
|
@ -15,7 +15,19 @@ import mock
|
|||||||
from six.moves import http_client
|
from six.moves import http_client
|
||||||
|
|
||||||
from cephclient import wrapper as ceph
|
from cephclient import wrapper as ceph
|
||||||
from contextlib import nested
|
try:
|
||||||
|
from contextlib import nested # Python 2
|
||||||
|
except ImportError:
|
||||||
|
from contextlib import ExitStack
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def nested(*contexts):
|
||||||
|
"""
|
||||||
|
Reimplementation of nested in python 3.
|
||||||
|
"""
|
||||||
|
with ExitStack() as stack:
|
||||||
|
yield tuple(stack.enter_context(cm) for cm in contexts)
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
from sysinv.conductor import manager
|
from sysinv.conductor import manager
|
||||||
from sysinv.conductor import rpcapi
|
from sysinv.conductor import rpcapi
|
||||||
@ -659,7 +671,6 @@ class StorageTierDependentTCs(base.FunctionalTest):
|
|||||||
def fake_configure_osd_istor(context, istor_obj):
|
def fake_configure_osd_istor(context, istor_obj):
|
||||||
istor_obj['osdid'] = 0
|
istor_obj['osdid'] = 0
|
||||||
return istor_obj
|
return istor_obj
|
||||||
|
|
||||||
mock_mon_status.return_value = [3, 2, ['controller-0', 'controller-1', 'storage-0']]
|
mock_mon_status.return_value = [3, 2, ['controller-0', 'controller-1', 'storage-0']]
|
||||||
mock_osd.side_effect = fake_configure_osd_istor
|
mock_osd.side_effect = fake_configure_osd_istor
|
||||||
|
|
||||||
|
@ -205,6 +205,18 @@ class TestCase(testtools.TestCase):
|
|||||||
else:
|
else:
|
||||||
return root
|
return root
|
||||||
|
|
||||||
|
def stub_out(self, old, new):
|
||||||
|
"""Replace a function for the duration of the test.
|
||||||
|
|
||||||
|
Use the monkey patch fixture to replace a function for the
|
||||||
|
duration of a test. Useful when you want to provide fake
|
||||||
|
methods instead of mocks during testing.
|
||||||
|
|
||||||
|
This should be used instead of self.stubs.Set (which is based
|
||||||
|
on mox) going forward.
|
||||||
|
"""
|
||||||
|
self.useFixture(fixtures.MonkeyPatch(old, new))
|
||||||
|
|
||||||
|
|
||||||
class TimeOverride(fixtures.Fixture):
|
class TimeOverride(fixtures.Fixture):
|
||||||
"""Fixture to start and remove time override."""
|
"""Fixture to start and remove time override."""
|
||||||
|
@ -521,14 +521,20 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestMigrations, self).setUp()
|
super(TestMigrations, self).setUp()
|
||||||
|
if six.PY2:
|
||||||
|
version = -1
|
||||||
|
else:
|
||||||
|
version = 0
|
||||||
self.migration = __import__('sysinv.db.migration',
|
self.migration = __import__('sysinv.db.migration',
|
||||||
globals(), locals(), ['INIT_VERSION'], -1)
|
globals(), locals(), ['INIT_VERSION'], version)
|
||||||
self.INIT_VERSION = self.migration.INIT_VERSION
|
self.INIT_VERSION = self.migration.INIT_VERSION
|
||||||
if self.migration_api is None:
|
if self.migration_api is None:
|
||||||
temp = __import__('sysinv.db.sqlalchemy.migration',
|
try:
|
||||||
globals(), locals(), ['versioning_api'], -1)
|
temp = __import__('sysinv.db.sqlalchemy.migration',
|
||||||
self.migration_api = temp.versioning_api
|
globals(), locals(), ['versioning_api'], version)
|
||||||
|
self.migration_api = temp.versioning_api
|
||||||
|
except Exception as e:
|
||||||
|
print('import warning :%s' % e)
|
||||||
|
|
||||||
def column_exists(self, engine, table_name, column):
|
def column_exists(self, engine, table_name, column):
|
||||||
metadata = MetaData()
|
metadata = MetaData()
|
||||||
|
@ -296,12 +296,14 @@ class _TestObject(object):
|
|||||||
obj = Foo()
|
obj = Foo()
|
||||||
# NOTE(danms): Can't use assertRaisesRegexp() because of py26
|
# NOTE(danms): Can't use assertRaisesRegexp() because of py26
|
||||||
raised = False
|
raised = False
|
||||||
|
ex_out = ""
|
||||||
try:
|
try:
|
||||||
obj.foobar
|
obj.foobar
|
||||||
except NotImplementedError as ex:
|
except NotImplementedError as ex:
|
||||||
|
ex_out = str(ex)
|
||||||
raised = True
|
raised = True
|
||||||
self.assertTrue(raised)
|
self.assertTrue(raised)
|
||||||
self.assertTrue('foobar' in str(ex))
|
self.assertTrue('foobar' in ex_out)
|
||||||
|
|
||||||
def test_loaded_in_primitive(self):
|
def test_loaded_in_primitive(self):
|
||||||
obj = MyObj()
|
obj = MyObj()
|
||||||
@ -420,7 +422,18 @@ class _TestObject(object):
|
|||||||
'updated_at': timeutils.isotime(dt),
|
'updated_at': timeutils.isotime(dt),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.assertEqual(obj.obj_to_primitive(), expected)
|
expected2 = {'sysinv_object.name': 'MyObj',
|
||||||
|
'sysinv_object.namespace': 'sysinv',
|
||||||
|
'sysinv_object.version': '1.5',
|
||||||
|
'sysinv_object.changes':
|
||||||
|
['updated_at', 'created_at'],
|
||||||
|
'sysinv_object.data':
|
||||||
|
{'created_at': timeutils.isotime(dt),
|
||||||
|
'updated_at': timeutils.isotime(dt),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prim = obj.obj_to_primitive()
|
||||||
|
self.assertTrue(expected == prim or expected2 == prim)
|
||||||
|
|
||||||
def test_contains(self):
|
def test_contains(self):
|
||||||
obj = MyObj()
|
obj = MyObj()
|
||||||
|
@ -16,12 +16,9 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from sysinv.common import exception
|
from sysinv.common import exception
|
||||||
from sysinv.common import images
|
from sysinv.common import images
|
||||||
from sysinv.common import utils
|
|
||||||
from sysinv.openstack.common import fileutils
|
|
||||||
from sysinv.tests import base
|
from sysinv.tests import base
|
||||||
|
|
||||||
|
|
||||||
@ -60,12 +57,12 @@ class SysinvImagesTestCase(base.TestCase):
|
|||||||
|
|
||||||
return FakeImgInfo()
|
return FakeImgInfo()
|
||||||
|
|
||||||
self.stubs.Set(utils, 'execute', fake_execute)
|
self.stub_out('sysinv.common.utils.execute', fake_execute)
|
||||||
self.stubs.Set(os, 'rename', fake_rename)
|
self.stub_out('os.rename', fake_rename)
|
||||||
self.stubs.Set(os, 'unlink', fake_unlink)
|
self.stub_out('os.unlink', fake_unlink)
|
||||||
self.stubs.Set(images, 'fetch', lambda *_: None)
|
self.stub_out('sysinv.common.images.fetch', lambda *_: None)
|
||||||
self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
|
self.stub_out('sysinv.common.images.qemu_img_info', fake_qemu_img_info)
|
||||||
self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_errror)
|
self.stub_out('sysinv.openstack.common.fileutils.delete_if_exists', fake_rm_on_errror)
|
||||||
|
|
||||||
context = 'opaque context'
|
context = 'opaque context'
|
||||||
image_id = '4'
|
image_id = '4'
|
||||||
|
@ -23,9 +23,9 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
import testtools
|
import testtools
|
||||||
import time
|
import time
|
||||||
|
import six
|
||||||
|
|
||||||
import mox
|
from mox3 import mox
|
||||||
|
|
||||||
from sysinv.cmd import sysinv_deploy_helper as bmdh
|
from sysinv.cmd import sysinv_deploy_helper as bmdh
|
||||||
from sysinv import db
|
from sysinv import db
|
||||||
from sysinv.openstack.common import log as logging
|
from sysinv.openstack.common import log as logging
|
||||||
@ -233,7 +233,10 @@ class SwitchPxeConfigTestCase(tests_base.TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(SwitchPxeConfigTestCase, self).setUp()
|
super(SwitchPxeConfigTestCase, self).setUp()
|
||||||
(fd, self.fname) = tempfile.mkstemp()
|
(fd, self.fname) = tempfile.mkstemp()
|
||||||
os.write(fd, _PXECONF_DEPLOY)
|
if six.PY2:
|
||||||
|
os.write(fd, _PXECONF_DEPLOY)
|
||||||
|
else:
|
||||||
|
os.write(fd, bytes(_PXECONF_DEPLOY, 'UTF-8'))
|
||||||
os.close(fd)
|
os.close(fd)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
# Copyright 2012 Hewlett-Packard Development Company, L.P.
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
|
||||||
@ -16,17 +16,16 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import errno
|
import errno
|
||||||
import hashlib
|
import mock
|
||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
import tempfile
|
import tempfile
|
||||||
import wsme
|
import wsme
|
||||||
|
|
||||||
import mox
|
|
||||||
import netaddr
|
import netaddr
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from six import StringIO
|
from mox3 import mox
|
||||||
from six.moves import builtins
|
from six.moves import builtins
|
||||||
from sysinv.common import exception
|
from sysinv.common import exception
|
||||||
from sysinv.common import service_parameter
|
from sysinv.common import service_parameter
|
||||||
@ -50,6 +49,7 @@ class BareMetalUtilsTestCase(base.TestCase):
|
|||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
utils.unlink_without_raise("/fake/path")
|
utils.unlink_without_raise("/fake/path")
|
||||||
|
self.mox.UnsetStubs()
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
def test_unlink_ENOENT(self):
|
def test_unlink_ENOENT(self):
|
||||||
@ -58,6 +58,7 @@ class BareMetalUtilsTestCase(base.TestCase):
|
|||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
utils.unlink_without_raise("/fake/path")
|
utils.unlink_without_raise("/fake/path")
|
||||||
|
self.mox.UnsetStubs()
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
def test_create_link(self):
|
def test_create_link(self):
|
||||||
@ -110,7 +111,7 @@ exit 1
|
|||||||
self.assertRaises(exception.ProcessExecutionError,
|
self.assertRaises(exception.ProcessExecutionError,
|
||||||
utils.execute,
|
utils.execute,
|
||||||
tmpfilename, tmpfilename2, attempts=10,
|
tmpfilename, tmpfilename2, attempts=10,
|
||||||
process_input='foo',
|
process_input='foo'.encode('utf-8'),
|
||||||
delay_on_retry=False)
|
delay_on_retry=False)
|
||||||
fp = open(tmpfilename2, 'r')
|
fp = open(tmpfilename2, 'r')
|
||||||
runs = fp.read()
|
runs = fp.read()
|
||||||
@ -154,7 +155,7 @@ grep foo
|
|||||||
os.chmod(tmpfilename, 0o755)
|
os.chmod(tmpfilename, 0o755)
|
||||||
utils.execute(tmpfilename,
|
utils.execute(tmpfilename,
|
||||||
tmpfilename2,
|
tmpfilename2,
|
||||||
process_input='foo',
|
process_input='foo'.encode('utf-8'),
|
||||||
attempts=2)
|
attempts=2)
|
||||||
finally:
|
finally:
|
||||||
os.unlink(tmpfilename)
|
os.unlink(tmpfilename)
|
||||||
@ -203,12 +204,9 @@ class GenericUtilsTestCase(base.TestCase):
|
|||||||
fake_contents = "lorem ipsum"
|
fake_contents = "lorem ipsum"
|
||||||
fake_file = self.mox.CreateMockAnything()
|
fake_file = self.mox.CreateMockAnything()
|
||||||
fake_file.read().AndReturn(fake_contents)
|
fake_file.read().AndReturn(fake_contents)
|
||||||
fake_context_manager = self.mox.CreateMockAnything()
|
fake_context_manager = mock.Mock()
|
||||||
fake_context_manager.__enter__().AndReturn(fake_file)
|
fake_context_manager.__enter__ = mock.Mock(return_value=fake_file)
|
||||||
fake_context_manager.__exit__(mox.IgnoreArg(),
|
fake_context_manager.__exit__ = mock.Mock(return_value=False)
|
||||||
mox.IgnoreArg(),
|
|
||||||
mox.IgnoreArg())
|
|
||||||
|
|
||||||
builtins.open(mox.IgnoreArg()).AndReturn(fake_context_manager)
|
builtins.open(mox.IgnoreArg()).AndReturn(fake_context_manager)
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
@ -224,13 +222,6 @@ class GenericUtilsTestCase(base.TestCase):
|
|||||||
self.assertEqual(data, fake_contents)
|
self.assertEqual(data, fake_contents)
|
||||||
self.assertTrue(self.reload_called)
|
self.assertTrue(self.reload_called)
|
||||||
|
|
||||||
def test_hash_file(self):
|
|
||||||
data = 'Mary had a little lamb, its fleece as white as snow'
|
|
||||||
flo = StringIO(data)
|
|
||||||
h1 = utils.hash_file(flo)
|
|
||||||
h2 = hashlib.sha1(data).hexdigest()
|
|
||||||
self.assertEqual(h1, h2)
|
|
||||||
|
|
||||||
def test_is_valid_boolstr(self):
|
def test_is_valid_boolstr(self):
|
||||||
self.assertTrue(utils.is_valid_boolstr('true'))
|
self.assertTrue(utils.is_valid_boolstr('true'))
|
||||||
self.assertTrue(utils.is_valid_boolstr('false'))
|
self.assertTrue(utils.is_valid_boolstr('false'))
|
||||||
|
@ -7,7 +7,6 @@ discover
|
|||||||
fixtures>=0.3.14
|
fixtures>=0.3.14
|
||||||
mock<1.1.0,>=1.0
|
mock<1.1.0,>=1.0
|
||||||
mox
|
mox
|
||||||
MySQL-python
|
|
||||||
passlib>=1.7.0
|
passlib>=1.7.0
|
||||||
psycopg2
|
psycopg2
|
||||||
python-barbicanclient<3.1.0,>=3.0.1
|
python-barbicanclient<3.1.0,>=3.0.1
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = flake8,py27, pylint
|
envlist = flake8,py27,py35,pylint
|
||||||
minversion = 1.6
|
minversion = 1.6
|
||||||
# skipsdist = True
|
# skipsdist = True
|
||||||
#,pip-missing-reqs
|
#,pip-missing-reqs
|
||||||
|
Loading…
Reference in New Issue
Block a user