db: Remove use of 'bind' arguments
Resolve the following RemovedIn20Warning warnings: The MetaData.bind argument is deprecated and will be removed in SQLAlchemy 2.0. The ``bind`` argument for schema methods that invoke SQL against an engine or connection will be required in SQLAlchemy 2.0. We must retain a couple of workarounds to keep sqlalchemy-migrate happy. This shouldn't be an issue since this library is not compatible with SQLAlchemy 2.x and will have to be removed (along with the legacy migration files themselves) when we eventually support 2.x. Change-Id: I946b4c7926ba2583b84ab95a1fe7aedc6923d9f8 Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
parent
be4e01c101
commit
eac61c5929
@ -27,6 +27,9 @@ def InetSmall():
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = sa.MetaData()
|
||||
# NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
|
||||
# is sqlalchemy-migrate which requires this. We'll remove these migrations
|
||||
# when dropping SQLAlchemy < 2.x support
|
||||
meta.bind = migrate_engine
|
||||
|
||||
cell_mappings = sa.Table('cell_mappings', meta,
|
||||
|
@ -4233,8 +4233,9 @@ def _get_fk_stmts(metadata, conn, table, column, records):
|
||||
return inserts, deletes
|
||||
|
||||
|
||||
def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before,
|
||||
task_log):
|
||||
def _archive_deleted_rows_for_table(
|
||||
metadata, engine, tablename, max_rows, before, task_log,
|
||||
):
|
||||
"""Move up to max_rows rows from one tables to the corresponding
|
||||
shadow table.
|
||||
|
||||
@ -4249,7 +4250,7 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before,
|
||||
- number of extra rows archived (due to FK constraints)
|
||||
dict of {tablename: rows_archived}
|
||||
"""
|
||||
conn = metadata.bind.connect()
|
||||
conn = engine.connect()
|
||||
# NOTE(tdurakov): table metadata should be received
|
||||
# from models, not db tables. Default value specified by SoftDeleteMixin
|
||||
# is known only by models, not DB layer.
|
||||
@ -4382,8 +4383,9 @@ def archive_deleted_rows(context=None, max_rows=None, before=None,
|
||||
table_to_rows_archived = collections.defaultdict(int)
|
||||
deleted_instance_uuids = []
|
||||
total_rows_archived = 0
|
||||
meta = sa.MetaData(get_engine(use_slave=True, context=context))
|
||||
meta.reflect()
|
||||
meta = sa.MetaData()
|
||||
engine = get_engine(use_slave=True, context=context)
|
||||
meta.reflect(bind=engine)
|
||||
# Get the sorted list of tables in order of foreign key dependency.
|
||||
# Process the parent tables and find their dependent records in order to
|
||||
# archive the related records in a single database transactions. The goal
|
||||
@ -4409,7 +4411,7 @@ def archive_deleted_rows(context=None, max_rows=None, before=None,
|
||||
|
||||
rows_archived, _deleted_instance_uuids, extras = (
|
||||
_archive_deleted_rows_for_table(
|
||||
meta, tablename,
|
||||
meta, engine, tablename,
|
||||
max_rows=max_rows - total_rows_archived,
|
||||
before=before,
|
||||
task_log=task_log))
|
||||
@ -4437,8 +4439,7 @@ def purge_shadow_tables(context, before_date, status_fn=None):
|
||||
engine = get_engine(context=context)
|
||||
conn = engine.connect()
|
||||
metadata = sa.MetaData()
|
||||
metadata.bind = engine
|
||||
metadata.reflect()
|
||||
metadata.reflect(bind=engine)
|
||||
total_deleted = 0
|
||||
|
||||
if status_fn is None:
|
||||
|
@ -45,10 +45,13 @@ def process(element, compiler, **kw):
|
||||
|
||||
|
||||
def _create_shadow_tables(migrate_engine):
|
||||
meta = sa.MetaData(migrate_engine)
|
||||
meta = sa.MetaData()
|
||||
meta.reflect(migrate_engine)
|
||||
table_names = list(meta.tables.keys())
|
||||
|
||||
# NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
|
||||
# is sqlalchemy-migrate which requires this. We'll remove these migrations
|
||||
# when dropping SQLAlchemy < 2.x support
|
||||
meta.bind = migrate_engine
|
||||
|
||||
for table_name in table_names:
|
||||
@ -184,6 +187,9 @@ def _create_shadow_tables(migrate_engine):
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = sa.MetaData()
|
||||
# NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
|
||||
# is sqlalchemy-migrate which requires this. We'll remove these migrations
|
||||
# when dropping SQLAlchemy < 2.x support
|
||||
meta.bind = migrate_engine
|
||||
|
||||
agent_builds = sa.Table('agent_builds', meta,
|
||||
|
@ -53,8 +53,8 @@ def process(element, compiler, **kw):
|
||||
|
||||
|
||||
def _create_shadow_tables(connection):
|
||||
meta = sa.MetaData(connection)
|
||||
meta.reflect(connection)
|
||||
meta = sa.MetaData()
|
||||
meta.reflect(bind=connection)
|
||||
table_names = list(meta.tables.keys())
|
||||
|
||||
for table_name in table_names:
|
||||
|
12
nova/tests/fixtures/nova.py
vendored
12
nova/tests/fixtures/nova.py
vendored
@ -847,18 +847,6 @@ class WarningsFixture(fixtures.Fixture):
|
||||
message=r'The current statement is being autocommitted .*',
|
||||
category=sqla_exc.SADeprecationWarning)
|
||||
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
module='nova',
|
||||
message=r'The MetaData.bind argument is deprecated .*',
|
||||
category=sqla_exc.SADeprecationWarning)
|
||||
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
module='nova',
|
||||
message=r'The ``bind`` argument for schema methods .*',
|
||||
category=sqla_exc.SADeprecationWarning)
|
||||
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
module='nova',
|
||||
|
@ -177,8 +177,8 @@ class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
|
||||
def _get_table_counts(self):
|
||||
engine = db.get_engine()
|
||||
conn = engine.connect()
|
||||
meta = sa.MetaData(engine)
|
||||
meta.reflect()
|
||||
meta = sa.MetaData()
|
||||
meta.reflect(bind=engine)
|
||||
shadow_tables = db._purgeable_tables(meta)
|
||||
results = {}
|
||||
for table in shadow_tables:
|
||||
|
@ -5652,7 +5652,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
def setUp(self):
|
||||
super(ArchiveTestCase, self).setUp()
|
||||
self.engine = db.get_engine()
|
||||
self.metadata = sa.MetaData(self.engine)
|
||||
self.metadata = sa.MetaData()
|
||||
self.conn = self.engine.connect()
|
||||
self.instance_id_mappings = models.InstanceIdMapping.__table__
|
||||
self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
|
||||
@ -5684,8 +5684,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
except for specificially named exceptions, are empty. This
|
||||
makes sure that archiving isn't moving unexpected content.
|
||||
"""
|
||||
metadata = sa.MetaData(bind=self.engine)
|
||||
metadata.reflect()
|
||||
metadata = sa.MetaData()
|
||||
metadata.reflect(bind=self.engine)
|
||||
for table in metadata.tables:
|
||||
if table.startswith("shadow_") and table not in exceptions:
|
||||
rows = self.conn.exec_driver_sql(
|
||||
@ -5698,8 +5698,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
|
||||
Shadow tables should have an identical schema to the main table.
|
||||
"""
|
||||
metadata = sa.MetaData(bind=self.engine)
|
||||
metadata.reflect()
|
||||
metadata = sa.MetaData()
|
||||
metadata.reflect(bind=self.engine)
|
||||
for table_name in metadata.tables:
|
||||
# some tables don't have shadow tables so skip these
|
||||
if table_name in [
|
||||
@ -5942,7 +5942,9 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
self.assertEqual(len(rows), 0)
|
||||
# Archive 2 rows
|
||||
db._archive_deleted_rows_for_table(
|
||||
self.metadata, tablename, max_rows=2, before=None, task_log=False)
|
||||
self.metadata, self.engine, tablename, max_rows=2, before=None,
|
||||
task_log=False,
|
||||
)
|
||||
# Verify we have 4 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 4)
|
||||
@ -5951,7 +5953,9 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
self.assertEqual(len(rows), 2)
|
||||
# Archive 2 more rows
|
||||
db._archive_deleted_rows_for_table(
|
||||
self.metadata, tablename, max_rows=2, before=None, task_log=False)
|
||||
self.metadata, self.engine, tablename, max_rows=2, before=None,
|
||||
task_log=False,
|
||||
)
|
||||
# Verify we have 2 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 2)
|
||||
@ -5960,7 +5964,9 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
self.assertEqual(len(rows), 4)
|
||||
# Try to archive more, but there are no deleted rows left.
|
||||
db._archive_deleted_rows_for_table(
|
||||
self.metadata, tablename, max_rows=2, before=None, task_log=False)
|
||||
self.metadata, self.engine, tablename, max_rows=2, before=None,
|
||||
task_log=False,
|
||||
)
|
||||
# Verify we still have 2 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 2)
|
||||
@ -6019,8 +6025,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
# Archiving instances should result in migrations related to the
|
||||
# instances also being archived.
|
||||
num = db._archive_deleted_rows_for_table(
|
||||
self.metadata, "instances", max_rows=None, before=None,
|
||||
task_log=False)
|
||||
self.metadata, self.engine, "instances", max_rows=None,
|
||||
before=None, task_log=False)
|
||||
self.assertEqual(1, num[0])
|
||||
self._assert_shadow_tables_empty_except(
|
||||
'shadow_instances',
|
||||
@ -6386,7 +6392,8 @@ class RetryOnDeadlockTestCase(test.TestCase):
|
||||
|
||||
|
||||
class TestSqlalchemyTypesRepr(
|
||||
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase):
|
||||
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
|
||||
):
|
||||
|
||||
def setUp(self):
|
||||
# NOTE(sdague): the oslo_db base test case completely
|
||||
@ -6397,15 +6404,15 @@ class TestSqlalchemyTypesRepr(
|
||||
|
||||
super(TestSqlalchemyTypesRepr, self).setUp()
|
||||
self.engine = enginefacade.writer.get_engine()
|
||||
meta = sa.MetaData(bind=self.engine)
|
||||
meta = sa.MetaData()
|
||||
self.table = sa.Table(
|
||||
'cidr_tbl',
|
||||
meta,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('addr', col_types.CIDR())
|
||||
)
|
||||
self.table.create()
|
||||
self.addCleanup(meta.drop_all)
|
||||
meta.create_all(self.engine)
|
||||
self.addCleanup(meta.drop_all, self.engine)
|
||||
|
||||
def test_cidr_repr(self):
|
||||
addrs = [('192.168.3.0/24', '192.168.3.0/24'),
|
||||
|
Loading…
x
Reference in New Issue
Block a user