Add tests for postgresql
Some deployments use postgres as database backend. In order to have official support for that we need to add tests for it. Change-Id: I36408c27ccb36fd0826d97f34d98b211ee075f4e Depends-On: https://review.openstack.org/535718changes/18/549518/35
parent
851ab84c7e
commit
267d134b7e
|
@ -3,6 +3,7 @@
|
|||
|
||||
mysql-client [test]
|
||||
mysql-server [test]
|
||||
postgresql [test]
|
||||
libjpeg-dev [test]
|
||||
openssl [test]
|
||||
zookeeperd [platform:dpkg]
|
||||
|
|
|
@ -11,6 +11,7 @@ sphinxcontrib-programoutput
|
|||
sphinx-autodoc-typehints
|
||||
reno>=2.5.0 # Apache-2.0
|
||||
PyMySQL
|
||||
psycopg2-binary
|
||||
mypy
|
||||
zuul-sphinx
|
||||
beautifulsoup4
|
||||
|
|
|
@ -50,6 +50,8 @@ import fixtures
|
|||
import kazoo.client
|
||||
import kazoo.exceptions
|
||||
import pymysql
|
||||
import psycopg2
|
||||
import psycopg2.extensions
|
||||
import testtools
|
||||
import testtools.content
|
||||
import testtools.content_type
|
||||
|
@ -1925,6 +1927,45 @@ class MySQLSchemaFixture(fixtures.Fixture):
|
|||
cur.execute("flush privileges")
|
||||
|
||||
|
||||
class PostgresqlSchemaFixture(fixtures.Fixture):
|
||||
def setUp(self):
|
||||
super(PostgresqlSchemaFixture, self).setUp()
|
||||
|
||||
# Postgres lowercases user and table names during creation but not
|
||||
# during authentication. Thus only use lowercase chars.
|
||||
random_bits = ''.join(random.choice(string.ascii_lowercase)
|
||||
for x in range(8))
|
||||
self.name = '%s_%s' % (random_bits, os.getpid())
|
||||
self.passwd = uuid.uuid4().hex
|
||||
db = psycopg2.connect(host="localhost",
|
||||
user="openstack_citest",
|
||||
password="openstack_citest",
|
||||
database="openstack_citest")
|
||||
db.autocommit = True
|
||||
cur = db.cursor()
|
||||
cur.execute("create role %s with login unencrypted password '%s';" % (
|
||||
self.name, self.passwd))
|
||||
cur.execute("create database %s OWNER %s TEMPLATE template0 "
|
||||
"ENCODING 'UTF8';" % (self.name, self.name))
|
||||
|
||||
self.dburi = 'postgresql://%s:%s@localhost/%s' % (self.name,
|
||||
self.passwd,
|
||||
self.name)
|
||||
|
||||
self.addDetail('dburi', testtools.content.text_content(self.dburi))
|
||||
self.addCleanup(self.cleanup)
|
||||
|
||||
def cleanup(self):
|
||||
db = psycopg2.connect(host="localhost",
|
||||
user="openstack_citest",
|
||||
password="openstack_citest",
|
||||
database="openstack_citest")
|
||||
db.autocommit = True
|
||||
cur = db.cursor()
|
||||
cur.execute("drop database %s" % self.name)
|
||||
cur.execute("drop user %s" % self.name)
|
||||
|
||||
|
||||
class BaseTestCase(testtools.TestCase):
|
||||
log = logging.getLogger("zuul.test")
|
||||
wait_timeout = 30
|
||||
|
@ -3159,8 +3200,13 @@ class ZuulDBTestCase(ZuulTestCase):
|
|||
continue
|
||||
|
||||
if self.config.get(section_name, 'driver') == 'sql':
|
||||
f = MySQLSchemaFixture()
|
||||
self.useFixture(f)
|
||||
if (self.config.get(section_name, 'dburi') ==
|
||||
'$MYSQL_FIXTURE_DBURI$'):
|
||||
f = MySQLSchemaFixture()
|
||||
self.useFixture(f)
|
||||
self.config.set(section_name, 'dburi', f.dburi)
|
||||
elif (self.config.get(section_name, 'dburi') ==
|
||||
'$POSTGRESQL_FIXTURE_DBURI$'):
|
||||
f = PostgresqlSchemaFixture()
|
||||
self.useFixture(f)
|
||||
self.config.set(section_name, 'dburi', f.dburi)
|
||||
|
|
|
@ -7,12 +7,15 @@
|
|||
success:
|
||||
gerrit:
|
||||
Verified: 1
|
||||
resultsdb: null
|
||||
resultsdb_mysql: null
|
||||
resultsdb_postgresql: null
|
||||
failure:
|
||||
gerrit:
|
||||
Verified: -1
|
||||
resultsdb: null
|
||||
resultsdb_failures: null
|
||||
resultsdb_mysql: null
|
||||
resultsdb_mysql_failures: null
|
||||
resultsdb_postgresql: null
|
||||
resultsdb_postgresql_failures: null
|
||||
|
||||
- job:
|
||||
name: base
|
||||
|
|
|
@ -18,10 +18,18 @@ server=review.example.com
|
|||
user=jenkins
|
||||
sshkey=fake_id_rsa1
|
||||
|
||||
[connection resultsdb]
|
||||
[connection resultsdb_mysql]
|
||||
driver=sql
|
||||
dburi=mysql+pymysql://bad:creds@host/db
|
||||
|
||||
[connection resultsdb_failures]
|
||||
[connection resultsdb_mysql_failures]
|
||||
driver=sql
|
||||
dburi=mysql+pymysql://bad:creds@host/db
|
||||
|
||||
[connection resultsdb_postgresql]
|
||||
driver=sql
|
||||
dburi=postgresql://bad:creds@host/db
|
||||
|
||||
[connection resultsdb_postgresql_failures]
|
||||
driver=sql
|
||||
dburi=postgresql://bad:creds@host/db
|
||||
|
|
|
@ -18,11 +18,20 @@ server=review.example.com
|
|||
user=jenkins
|
||||
sshkey=fake_id_rsa1
|
||||
|
||||
[connection resultsdb]
|
||||
[connection resultsdb_mysql]
|
||||
driver=sql
|
||||
dburi=$MYSQL_FIXTURE_DBURI$
|
||||
table_prefix=prefix_
|
||||
|
||||
[connection resultsdb_failures]
|
||||
[connection resultsdb_mysql_failures]
|
||||
driver=sql
|
||||
dburi=$MYSQL_FIXTURE_DBURI$
|
||||
|
||||
[connection resultsdb_postgresql]
|
||||
driver=sql
|
||||
dburi=$POSTGRESQL_FIXTURE_DBURI$
|
||||
table_prefix=prefix_
|
||||
|
||||
[connection resultsdb_postgresql_failures]
|
||||
driver=sql
|
||||
dburi=$POSTGRESQL_FIXTURE_DBURI$
|
||||
|
|
|
@ -18,10 +18,18 @@ server=review.example.com
|
|||
user=jenkins
|
||||
sshkey=fake_id_rsa1
|
||||
|
||||
[connection resultsdb]
|
||||
[connection resultsdb_mysql]
|
||||
driver=sql
|
||||
dburi=$MYSQL_FIXTURE_DBURI$
|
||||
|
||||
[connection resultsdb_failures]
|
||||
[connection resultsdb_mysql_failures]
|
||||
driver=sql
|
||||
dburi=$MYSQL_FIXTURE_DBURI$
|
||||
|
||||
[connection resultsdb_postgresql]
|
||||
driver=sql
|
||||
dburi=$POSTGRESQL_FIXTURE_DBURI$
|
||||
|
||||
[connection resultsdb_postgresql_failures]
|
||||
driver=sql
|
||||
dburi=$POSTGRESQL_FIXTURE_DBURI$
|
||||
|
|
|
@ -62,10 +62,8 @@ class TestSQLConnection(ZuulDBTestCase):
|
|||
tenant_config_file = 'config/sql-driver/main.yaml'
|
||||
expected_table_prefix = ''
|
||||
|
||||
def test_sql_tables_created(self):
|
||||
"Test the tables for storing results are created properly"
|
||||
|
||||
connection = self.connections.connections['resultsdb']
|
||||
def _sql_tables_created(self, connection_name):
|
||||
connection = self.connections.connections[connection_name]
|
||||
insp = sa.engine.reflection.Inspector(connection.engine)
|
||||
|
||||
table_prefix = connection.table_prefix
|
||||
|
@ -77,10 +75,13 @@ class TestSQLConnection(ZuulDBTestCase):
|
|||
self.assertEqual(14, len(insp.get_columns(buildset_table)))
|
||||
self.assertEqual(10, len(insp.get_columns(build_table)))
|
||||
|
||||
def test_sql_indexes_created(self):
|
||||
"Test the indexes are created properly"
|
||||
def test_sql_tables_created(self):
|
||||
"Test the tables for storing results are created properly"
|
||||
self._sql_tables_created('resultsdb_mysql')
|
||||
self._sql_tables_created('resultsdb_postgresql')
|
||||
|
||||
connection = self.connections.connections['resultsdb']
|
||||
def _sql_indexes_created(self, connection_name):
|
||||
connection = self.connections.connections[connection_name]
|
||||
insp = sa.engine.reflection.Inspector(connection.engine)
|
||||
|
||||
table_prefix = connection.table_prefix
|
||||
|
@ -109,15 +110,84 @@ class TestSQLConnection(ZuulDBTestCase):
|
|||
for index in indexes:
|
||||
self.assertTrue(index['name'].startswith(table_prefix))
|
||||
|
||||
def test_sql_indexes_created(self):
|
||||
"Test the indexes are created properly"
|
||||
self._sql_indexes_created('resultsdb_mysql')
|
||||
self._sql_indexes_created('resultsdb_postgresql')
|
||||
|
||||
def test_sql_results(self):
|
||||
"Test results are entered into an sql table"
|
||||
|
||||
def check_results(connection_name):
|
||||
# Grab the sa tables
|
||||
tenant = self.sched.abide.tenants.get('tenant-one')
|
||||
reporter = _get_reporter_from_connection_name(
|
||||
tenant.layout.pipelines['check'].success_actions,
|
||||
connection_name
|
||||
)
|
||||
|
||||
conn = self.connections.connections[
|
||||
connection_name].engine.connect()
|
||||
result = conn.execute(
|
||||
sa.sql.select([reporter.connection.zuul_buildset_table]))
|
||||
|
||||
buildsets = result.fetchall()
|
||||
self.assertEqual(2, len(buildsets))
|
||||
buildset0 = buildsets[0]
|
||||
buildset1 = buildsets[1]
|
||||
|
||||
self.assertEqual('check', buildset0['pipeline'])
|
||||
self.assertEqual('org/project', buildset0['project'])
|
||||
self.assertEqual(1, buildset0['change'])
|
||||
self.assertEqual('1', buildset0['patchset'])
|
||||
self.assertEqual('SUCCESS', buildset0['result'])
|
||||
self.assertEqual('Build succeeded.', buildset0['message'])
|
||||
self.assertEqual('tenant-one', buildset0['tenant'])
|
||||
self.assertEqual(
|
||||
'https://review.example.com/%d' % buildset0['change'],
|
||||
buildset0['ref_url'])
|
||||
|
||||
buildset0_builds = conn.execute(
|
||||
sa.sql.select([reporter.connection.zuul_build_table]).where(
|
||||
reporter.connection.zuul_build_table.c.buildset_id ==
|
||||
buildset0['id']
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
# Check the first result, which should be the project-merge job
|
||||
self.assertEqual('project-merge', buildset0_builds[0]['job_name'])
|
||||
self.assertEqual("SUCCESS", buildset0_builds[0]['result'])
|
||||
self.assertEqual(
|
||||
'finger://{hostname}/{uuid}'.format(
|
||||
hostname=self.executor_server.hostname,
|
||||
uuid=buildset0_builds[0]['uuid']),
|
||||
buildset0_builds[0]['log_url'])
|
||||
self.assertEqual('check', buildset1['pipeline'])
|
||||
self.assertEqual('master', buildset1['branch'])
|
||||
self.assertEqual('org/project', buildset1['project'])
|
||||
self.assertEqual(2, buildset1['change'])
|
||||
self.assertEqual('1', buildset1['patchset'])
|
||||
self.assertEqual('FAILURE', buildset1['result'])
|
||||
self.assertEqual('Build failed.', buildset1['message'])
|
||||
|
||||
buildset1_builds = conn.execute(
|
||||
sa.sql.select([reporter.connection.zuul_build_table]).where(
|
||||
reporter.connection.zuul_build_table.c.buildset_id ==
|
||||
buildset1['id']
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
# Check the second result, which should be the project-test1 job
|
||||
# which failed
|
||||
self.assertEqual('project-test1', buildset1_builds[1]['job_name'])
|
||||
self.assertEqual("FAILURE", buildset1_builds[1]['result'])
|
||||
self.assertEqual(
|
||||
'finger://{hostname}/{uuid}'.format(
|
||||
hostname=self.executor_server.hostname,
|
||||
uuid=buildset1_builds[1]['uuid']),
|
||||
buildset1_builds[1]['log_url'])
|
||||
|
||||
self.executor_server.hold_jobs_in_build = True
|
||||
# Grab the sa tables
|
||||
tenant = self.sched.abide.tenants.get('tenant-one')
|
||||
reporter = _get_reporter_from_connection_name(
|
||||
tenant.layout.pipelines['check'].success_actions,
|
||||
'resultsdb'
|
||||
)
|
||||
|
||||
# Add a success result
|
||||
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
|
||||
|
@ -135,66 +205,8 @@ class TestSQLConnection(ZuulDBTestCase):
|
|||
self.orderedRelease()
|
||||
self.waitUntilSettled()
|
||||
|
||||
conn = self.connections.connections['resultsdb'].engine.connect()
|
||||
result = conn.execute(
|
||||
sa.sql.select([reporter.connection.zuul_buildset_table]))
|
||||
|
||||
buildsets = result.fetchall()
|
||||
self.assertEqual(2, len(buildsets))
|
||||
buildset0 = buildsets[0]
|
||||
buildset1 = buildsets[1]
|
||||
|
||||
self.assertEqual('check', buildset0['pipeline'])
|
||||
self.assertEqual('org/project', buildset0['project'])
|
||||
self.assertEqual(1, buildset0['change'])
|
||||
self.assertEqual('1', buildset0['patchset'])
|
||||
self.assertEqual('SUCCESS', buildset0['result'])
|
||||
self.assertEqual('Build succeeded.', buildset0['message'])
|
||||
self.assertEqual('tenant-one', buildset0['tenant'])
|
||||
self.assertEqual('https://review.example.com/%d' % buildset0['change'],
|
||||
buildset0['ref_url'])
|
||||
|
||||
buildset0_builds = conn.execute(
|
||||
sa.sql.select([reporter.connection.zuul_build_table]).
|
||||
where(
|
||||
reporter.connection.zuul_build_table.c.buildset_id ==
|
||||
buildset0['id']
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
# Check the first result, which should be the project-merge job
|
||||
self.assertEqual('project-merge', buildset0_builds[0]['job_name'])
|
||||
self.assertEqual("SUCCESS", buildset0_builds[0]['result'])
|
||||
self.assertEqual(
|
||||
'finger://{hostname}/{uuid}'.format(
|
||||
hostname=self.executor_server.hostname,
|
||||
uuid=buildset0_builds[0]['uuid']),
|
||||
buildset0_builds[0]['log_url'])
|
||||
self.assertEqual('check', buildset1['pipeline'])
|
||||
self.assertEqual('master', buildset1['branch'])
|
||||
self.assertEqual('org/project', buildset1['project'])
|
||||
self.assertEqual(2, buildset1['change'])
|
||||
self.assertEqual('1', buildset1['patchset'])
|
||||
self.assertEqual('FAILURE', buildset1['result'])
|
||||
self.assertEqual('Build failed.', buildset1['message'])
|
||||
|
||||
buildset1_builds = conn.execute(
|
||||
sa.sql.select([reporter.connection.zuul_build_table]).
|
||||
where(
|
||||
reporter.connection.zuul_build_table.c.buildset_id ==
|
||||
buildset1['id']
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
# Check the second result, which should be the project-test1 job
|
||||
# which failed
|
||||
self.assertEqual('project-test1', buildset1_builds[1]['job_name'])
|
||||
self.assertEqual("FAILURE", buildset1_builds[1]['result'])
|
||||
self.assertEqual(
|
||||
'finger://{hostname}/{uuid}'.format(
|
||||
hostname=self.executor_server.hostname,
|
||||
uuid=buildset1_builds[1]['uuid']),
|
||||
buildset1_builds[1]['log_url'])
|
||||
check_results('resultsdb_mysql')
|
||||
check_results('resultsdb_postgresql')
|
||||
|
||||
def test_multiple_sql_connections(self):
|
||||
"Test putting results in different databases"
|
||||
|
@ -209,49 +221,60 @@ class TestSQLConnection(ZuulDBTestCase):
|
|||
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
|
||||
self.waitUntilSettled()
|
||||
|
||||
# Grab the sa tables for resultsdb
|
||||
tenant = self.sched.abide.tenants.get('tenant-one')
|
||||
reporter1 = _get_reporter_from_connection_name(
|
||||
tenant.layout.pipelines['check'].success_actions,
|
||||
'resultsdb'
|
||||
)
|
||||
def check_results(connection_name_1, connection_name_2):
|
||||
# Grab the sa tables for resultsdb
|
||||
tenant = self.sched.abide.tenants.get('tenant-one')
|
||||
reporter1 = _get_reporter_from_connection_name(
|
||||
tenant.layout.pipelines['check'].success_actions,
|
||||
connection_name_1
|
||||
)
|
||||
|
||||
conn = self.connections.connections['resultsdb'].engine.connect()
|
||||
buildsets_resultsdb = conn.execute(sa.sql.select(
|
||||
[reporter1.connection.zuul_buildset_table])).fetchall()
|
||||
# Should have been 2 buildset reported to the resultsdb (both success
|
||||
# and failure report)
|
||||
self.assertEqual(2, len(buildsets_resultsdb))
|
||||
conn = self.connections.connections[connection_name_1].\
|
||||
engine.connect()
|
||||
buildsets_resultsdb = conn.execute(sa.sql.select(
|
||||
[reporter1.connection.zuul_buildset_table])).fetchall()
|
||||
# Should have been 2 buildset reported to the resultsdb (both
|
||||
# success and failure report)
|
||||
self.assertEqual(2, len(buildsets_resultsdb))
|
||||
|
||||
# The first one should have passed
|
||||
self.assertEqual('check', buildsets_resultsdb[0]['pipeline'])
|
||||
self.assertEqual('org/project', buildsets_resultsdb[0]['project'])
|
||||
self.assertEqual(1, buildsets_resultsdb[0]['change'])
|
||||
self.assertEqual('1', buildsets_resultsdb[0]['patchset'])
|
||||
self.assertEqual('SUCCESS', buildsets_resultsdb[0]['result'])
|
||||
self.assertEqual('Build succeeded.', buildsets_resultsdb[0]['message'])
|
||||
# The first one should have passed
|
||||
self.assertEqual('check', buildsets_resultsdb[0]['pipeline'])
|
||||
self.assertEqual(
|
||||
'org/project', buildsets_resultsdb[0]['project'])
|
||||
self.assertEqual(1, buildsets_resultsdb[0]['change'])
|
||||
self.assertEqual('1', buildsets_resultsdb[0]['patchset'])
|
||||
self.assertEqual('SUCCESS', buildsets_resultsdb[0]['result'])
|
||||
self.assertEqual(
|
||||
'Build succeeded.', buildsets_resultsdb[0]['message'])
|
||||
|
||||
# Grab the sa tables for resultsdb_failures
|
||||
reporter2 = _get_reporter_from_connection_name(
|
||||
tenant.layout.pipelines['check'].failure_actions,
|
||||
'resultsdb_failures'
|
||||
)
|
||||
# Grab the sa tables for resultsdb_mysql_failures
|
||||
reporter2 = _get_reporter_from_connection_name(
|
||||
tenant.layout.pipelines['check'].failure_actions,
|
||||
connection_name_2
|
||||
)
|
||||
|
||||
conn = self.connections.connections['resultsdb_failures'].\
|
||||
engine.connect()
|
||||
buildsets_resultsdb_failures = conn.execute(sa.sql.select(
|
||||
[reporter2.connection.zuul_buildset_table])).fetchall()
|
||||
# The failure db should only have 1 buildset failed
|
||||
self.assertEqual(1, len(buildsets_resultsdb_failures))
|
||||
conn = self.connections.connections[connection_name_2].\
|
||||
engine.connect()
|
||||
buildsets_resultsdb_failures = conn.execute(sa.sql.select(
|
||||
[reporter2.connection.zuul_buildset_table])).fetchall()
|
||||
# The failure db should only have 1 buildset failed
|
||||
self.assertEqual(1, len(buildsets_resultsdb_failures))
|
||||
|
||||
self.assertEqual('check', buildsets_resultsdb_failures[0]['pipeline'])
|
||||
self.assertEqual(
|
||||
'org/project', buildsets_resultsdb_failures[0]['project'])
|
||||
self.assertEqual(2, buildsets_resultsdb_failures[0]['change'])
|
||||
self.assertEqual('1', buildsets_resultsdb_failures[0]['patchset'])
|
||||
self.assertEqual('FAILURE', buildsets_resultsdb_failures[0]['result'])
|
||||
self.assertEqual(
|
||||
'Build failed.', buildsets_resultsdb_failures[0]['message'])
|
||||
self.assertEqual(
|
||||
'check', buildsets_resultsdb_failures[0]['pipeline'])
|
||||
self.assertEqual('org/project',
|
||||
buildsets_resultsdb_failures[0]['project'])
|
||||
self.assertEqual(2,
|
||||
buildsets_resultsdb_failures[0]['change'])
|
||||
self.assertEqual(
|
||||
'1', buildsets_resultsdb_failures[0]['patchset'])
|
||||
self.assertEqual(
|
||||
'FAILURE', buildsets_resultsdb_failures[0]['result'])
|
||||
self.assertEqual('Build failed.',
|
||||
buildsets_resultsdb_failures[0]['message'])
|
||||
|
||||
check_results('resultsdb_mysql', 'resultsdb_mysql_failures')
|
||||
check_results('resultsdb_postgresql', 'resultsdb_postgresql_failures')
|
||||
|
||||
|
||||
class TestSQLConnectionPrefix(TestSQLConnection):
|
||||
|
|
|
@ -9,6 +9,7 @@ TOOLSDIR=$(dirname $0)
|
|||
|
||||
# Be sure mysql and zookeeper are started.
|
||||
sudo service mysql start
|
||||
sudo service postgresql start
|
||||
sudo service zookeeper start
|
||||
|
||||
# The root password for the MySQL database; pass it in via
|
||||
|
@ -37,6 +38,10 @@ mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e "
|
|||
DROP DATABASE IF EXISTS openstack_citest;
|
||||
CREATE DATABASE openstack_citest CHARACTER SET utf8;"
|
||||
|
||||
# setup postgres user and database
|
||||
sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN SUPERUSER UNENCRYPTED PASSWORD '$DB_PW';"
|
||||
sudo -u postgres psql -c "CREATE DATABASE openstack_citest OWNER $DB_USER TEMPLATE template0 ENCODING 'UTF8';"
|
||||
|
||||
# TODO(pabelanger): Move this into bindep after we figure out how to enable our
|
||||
# PPA.
|
||||
# NOTE(pabelanger): Avoid hitting http://keyserver.ubuntu.com
|
||||
|
|
|
@ -153,6 +153,10 @@ class SQLConnection(BaseConnection):
|
|||
"in zuul.conf [web] section")
|
||||
return True
|
||||
|
||||
def onStop(self):
|
||||
self.log.debug("Stopping SQL connection %s" % self.connection_name)
|
||||
self.engine.dispose()
|
||||
|
||||
|
||||
class SqlWebHandler(BaseTenantWebHandler):
|
||||
log = logging.getLogger("zuul.web.SqlHandler")
|
||||
|
|
Loading…
Reference in New Issue