Merge "Enable python 3 testing support"
This commit is contained in:
@@ -59,7 +59,7 @@ def generate_series():
|
||||
|
||||
plt.figure()
|
||||
plt.barh(range(len(perc_data)), perc_data.values(), align='center')
|
||||
locs, labels = plt.yticks(range(len(perc_data)), perc_data.keys())
|
||||
locs, labels = plt.yticks(range(len(perc_data)), list(perc_data.keys()))
|
||||
plt.xlabel('Failure Percentage')
|
||||
plt.tight_layout()
|
||||
plt.savefig(CONF.output, dpi=900)
|
||||
|
||||
@@ -18,6 +18,7 @@ import datetime
|
||||
from oslo_config import cfg
|
||||
from oslo_db.sqlalchemy import session as db_session
|
||||
from oslo_db.sqlalchemy import utils as db_utils
|
||||
import six
|
||||
import sqlalchemy
|
||||
from sqlalchemy.engine.url import make_url
|
||||
|
||||
@@ -45,7 +46,7 @@ def _create_facade_lazily():
|
||||
if facade is None:
|
||||
facade = db_session.EngineFacade(
|
||||
CONF.database.connection,
|
||||
**dict(CONF.database.iteritems()))
|
||||
**dict(six.iteritems(CONF.database)))
|
||||
_facades[db_backend] = facade
|
||||
return facade
|
||||
|
||||
@@ -852,7 +853,7 @@ def get_recent_successful_runs(num_runs=10, session=None):
|
||||
session = session or get_session()
|
||||
results = db_utils.model_query(models.Run, session).order_by(
|
||||
models.Run.run_at.desc()).filter_by(fails=0).limit(num_runs).all()
|
||||
return map(lambda x: x.id, results)
|
||||
return list(map(lambda x: x.id, results))
|
||||
|
||||
|
||||
def get_recent_failed_runs(num_runs=10, session=None):
|
||||
@@ -868,7 +869,7 @@ def get_recent_failed_runs(num_runs=10, session=None):
|
||||
results = db_utils.model_query(models.Run, session).order_by(
|
||||
models.Run.run_at.desc()).filter(
|
||||
models.Run.fails > 0).limit(num_runs).all()
|
||||
return map(lambda x: x.id, results)
|
||||
return list(map(lambda x: x.id, results))
|
||||
|
||||
|
||||
def delete_old_runs(expire_age=186, session=None):
|
||||
@@ -1104,7 +1105,7 @@ def get_all_runs_time_series_by_key(key, start_date=None,
|
||||
'skip': run[3],
|
||||
}]}
|
||||
else:
|
||||
if run[4] not in runs[run[0]].keys():
|
||||
if run[4] not in list(runs[run[0]].keys()):
|
||||
runs[run[0]][run[4]] = [{
|
||||
'pass': run[1],
|
||||
'fail': run[2],
|
||||
|
||||
@@ -32,7 +32,7 @@ class SubunitBase(models.ModelBase):
|
||||
super(SubunitBase, self).save(session or db_api.get_session())
|
||||
|
||||
def keys(self):
|
||||
return self.__dict__.keys()
|
||||
return list(self.__dict__.keys())
|
||||
|
||||
def values(self):
|
||||
return self.__dict__.values()
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
import datetime
|
||||
|
||||
import six
|
||||
from six import moves
|
||||
import testscenarios
|
||||
|
||||
from subunit2sql.db import api
|
||||
@@ -214,7 +216,7 @@ class TestDatabaseAPI(base.TestCase):
|
||||
# 10 with 10 failures
|
||||
# 7 in 2010/2011 each, 6 in 2012
|
||||
# 10 in projecta/projectb each
|
||||
for i in range(20):
|
||||
for i in moves.range(20):
|
||||
if i % 2 == 1:
|
||||
fails = 10
|
||||
else:
|
||||
@@ -251,7 +253,7 @@ class TestDatabaseAPI(base.TestCase):
|
||||
def test_get_time_series_runs_by_key_value(self):
|
||||
runs = []
|
||||
run_at = datetime.datetime.utcnow()
|
||||
for run_num in xrange(15):
|
||||
for run_num in moves.range(15):
|
||||
run = api.create_run(run_num, run_num + 1, run_num + 2, 3,
|
||||
run_at=run_at)
|
||||
runs.append(run)
|
||||
@@ -263,14 +265,14 @@ class TestDatabaseAPI(base.TestCase):
|
||||
runs_time_series = api.get_time_series_runs_by_key_value('test_key',
|
||||
'fun')
|
||||
self.assertEqual(1, len(runs_time_series))
|
||||
timestamp = runs_time_series.keys()[0]
|
||||
timestamp = list(runs_time_series.keys())[0]
|
||||
self.assertEqual(3, len(runs_time_series[timestamp]))
|
||||
for run_num in xrange(3):
|
||||
for run_num in moves.range(3):
|
||||
run_dict = {
|
||||
'skip': long(run_num),
|
||||
'fail': long(run_num + 1),
|
||||
'pass': long(run_num + 2),
|
||||
'id': unicode(runs[run_num].id),
|
||||
'skip': run_num,
|
||||
'fail': run_num + 1,
|
||||
'pass': run_num + 2,
|
||||
'id': six.text_type(runs[run_num].id),
|
||||
'run_time': 3.0,
|
||||
'metadata': {
|
||||
u'test_key': u'fun',
|
||||
@@ -278,12 +280,12 @@ class TestDatabaseAPI(base.TestCase):
|
||||
}
|
||||
}
|
||||
self.assertIn(run_dict, runs_time_series[timestamp])
|
||||
for run_num in range(3, 14):
|
||||
for run_num in moves.range(3, 14):
|
||||
missing_run_dict = {
|
||||
'skip': long(run_num),
|
||||
'fail': long(run_num + 1),
|
||||
'pass': long(run_num + 2),
|
||||
'id': unicode(runs[run_num].id),
|
||||
'skip': run_num,
|
||||
'fail': run_num + 1,
|
||||
'pass': run_num + 2,
|
||||
'id': six.text_type(runs[run_num].id),
|
||||
'run_time': 3.0,
|
||||
'metadata': {
|
||||
u'test_key': u'fun',
|
||||
|
||||
@@ -39,7 +39,7 @@ def get_connect_string(backend,
|
||||
these then we'll run the tests, otherwise they are skipped
|
||||
"""
|
||||
if backend == "mysql":
|
||||
backend = "mysql+mysqldb"
|
||||
backend = "mysql+pymysql"
|
||||
elif backend == "postgres":
|
||||
backend = "postgresql+psycopg2"
|
||||
|
||||
|
||||
@@ -14,13 +14,13 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ConfigParser
|
||||
import datetime
|
||||
import os
|
||||
|
||||
|
||||
from alembic import config
|
||||
from alembic import script
|
||||
from six.moves import configparser as ConfigParser
|
||||
import sqlalchemy
|
||||
from sqlalchemy.engine import reflection
|
||||
|
||||
@@ -274,7 +274,7 @@ class TestWalkMigrations(base.TestCase):
|
||||
runs.insert().values(time_data).execute()
|
||||
runs = get_table(engine, 'runs')
|
||||
result = runs.select().execute()
|
||||
run_at = map(lambda x: (x['id'], x['run_at']), result)
|
||||
run_at = list(map(lambda x: (x['id'], x['run_at']), result))
|
||||
for run in data:
|
||||
self.assertIn((run['id'], None), run_at)
|
||||
self.assertIn((time_data['id'], now), run_at)
|
||||
@@ -321,7 +321,7 @@ class TestWalkMigrations(base.TestCase):
|
||||
# Query the DB for the tests from the sample dataset above
|
||||
where = ' OR '.join(["tests.id='%s'" % x for x in test_ids])
|
||||
result = tests.select(where).execute()
|
||||
run_time_pairs = map(lambda x: (x['id'], x['run_time']), result)
|
||||
run_time_pairs = list(map(lambda x: (x['id'], x['run_time']), result))
|
||||
# Ensure the test with one failure is None
|
||||
self.assertIn(('fake_null_test_id_fails', None), run_time_pairs)
|
||||
# Ensure the test with 2 success each taking 4 sec lists the proper
|
||||
|
||||
@@ -15,13 +15,13 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import urlparse
|
||||
|
||||
import fixtures as fix
|
||||
from oslo_concurrency.fixture import lockutils as lock_fixture
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_config import fixture as config_fixture
|
||||
from oslo_db import options
|
||||
from six.moves.urllib import parse as urlparse
|
||||
|
||||
from subunit2sql.db import api as session
|
||||
from subunit2sql.migrations import cli
|
||||
|
||||
@@ -110,7 +110,7 @@ class TestReadSubunit(base.TestCase):
|
||||
# assert that the dict root key is the test name - the fake_id stripped
|
||||
# of the tags
|
||||
fake_test_name = fake_id[:fake_id.find('[')]
|
||||
self.assertEqual(parsed_results.keys(), [fake_test_name])
|
||||
self.assertEqual(list(parsed_results.keys()), [fake_test_name])
|
||||
|
||||
self.assertEqual(parsed_results[fake_test_name]['status'],
|
||||
fake_status)
|
||||
|
||||
@@ -8,6 +8,6 @@ testscenarios>=0.4
|
||||
testrepository>=0.0.18
|
||||
testtools>=0.9.34
|
||||
oslosphinx
|
||||
MySQL-python
|
||||
PyMySql
|
||||
psycopg2
|
||||
os-testr
|
||||
|
||||
Reference in New Issue
Block a user