From 0703fd03e6562733dd6531c18d65482afe147762 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 7 Aug 2019 17:48:29 -0400 Subject: [PATCH] Re-use DB MetaData during archive_deleted_rows This simply passes the database engine metadata from archive_deleted_rows to _archive_deleted_rows_for_table so the latter does not need to get the engine and create a new MetaData object for each table processed. Change-Id: If42acd953dc9bd09972e974f7bb04d5bab701a40 --- nova/db/sqlalchemy/api.py | 9 +++------ nova/tests/unit/db/test_db_api.py | 31 ++++++++++++++++++++++--------- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 0925c83a74e6..d283a5d630b3 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -5482,7 +5482,7 @@ def _archive_if_instance_deleted(table, shadow_table, instances, conn, return 0 -def _archive_deleted_rows_for_table(tablename, max_rows, before): +def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before): """Move up to max_rows rows from one tables to the corresponding shadow table. @@ -5491,10 +5491,7 @@ def _archive_deleted_rows_for_table(tablename, max_rows, before): - number of rows archived - list of UUIDs of instances that were archived """ - engine = get_engine() - conn = engine.connect() - metadata = MetaData() - metadata.bind = engine + conn = metadata.bind.connect() # NOTE(tdurakov): table metadata should be received # from models, not db tables. Default value specified by SoftDeleteMixin # is known only by models, not DB layer. @@ -5607,7 +5604,7 @@ def archive_deleted_rows(max_rows=None, before=None): continue rows_archived, _deleted_instance_uuids = ( _archive_deleted_rows_for_table( - tablename, + meta, tablename, max_rows=max_rows - total_rows_archived, before=before)) total_rows_archived += rows_archived diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py index 1915e46e6cd0..18b15fd61690 100644 --- a/nova/tests/unit/db/test_db_api.py +++ b/nova/tests/unit/db/test_db_api.py @@ -8407,6 +8407,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): def setUp(self): super(ArchiveTestCase, self).setUp() self.engine = get_engine() + self.metadata = MetaData(self.engine) self.conn = self.engine.connect() self.instance_id_mappings = models.InstanceIdMapping.__table__ self.shadow_instance_id_mappings = sqlalchemyutils.get_table( @@ -8667,7 +8668,9 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): # Verify we have 0 in shadow self.assertEqual(len(rows), 0) # Archive 2 rows - sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2, + sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + tablename, + max_rows=2, before=None) # Verify we have 4 left in main rows = self.conn.execute(qmt).fetchall() @@ -8676,7 +8679,9 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): rows = self.conn.execute(qst).fetchall() self.assertEqual(len(rows), 2) # Archive 2 more rows - sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2, + sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + tablename, + max_rows=2, before=None) # Verify we have 2 left in main rows = self.conn.execute(qmt).fetchall() @@ -8685,7 +8690,9 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): rows = self.conn.execute(qst).fetchall() self.assertEqual(len(rows), 4) # Try to archive more, but there are no deleted rows left. - sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2, + sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + tablename, + max_rows=2, before=None) # Verify we still have 2 left in main rows = self.conn.execute(qmt).fetchall() @@ -8779,17 +8786,20 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): result = self.conn.execute(ins_stmt) result.inserted_primary_key[0] # The first try to archive console_pools should fail, due to FK. - num = sqlalchemy_api._archive_deleted_rows_for_table("console_pools", + num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + "console_pools", max_rows=None, before=None) self.assertEqual(num[0], 0) # Then archiving consoles should work. - num = sqlalchemy_api._archive_deleted_rows_for_table("consoles", + num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + "consoles", max_rows=None, before=None) self.assertEqual(num[0], 1) # Then archiving console_pools should work. - num = sqlalchemy_api._archive_deleted_rows_for_table("console_pools", + num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + "console_pools", max_rows=None, before=None) self.assertEqual(num[0], 1) @@ -8811,17 +8821,20 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): deleted=0) self.conn.execute(ins_stmt) # The first try to archive instances should fail, due to FK. - num = sqlalchemy_api._archive_deleted_rows_for_table("instances", + num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + "instances", max_rows=None, before=None) self.assertEqual(0, num[0]) # Then archiving migrations should work. - num = sqlalchemy_api._archive_deleted_rows_for_table("migrations", + num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + "migrations", max_rows=None, before=None) self.assertEqual(1, num[0]) # Then archiving instances should work. - num = sqlalchemy_api._archive_deleted_rows_for_table("instances", + num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, + "instances", max_rows=None, before=None) self.assertEqual(1, num[0])