Make archive_deleted_rows_for_table private
archive_deleted_rows_for_table is only really used by nova.db.sqlalchemy.api.archive_deleted_rows so it should be a private method. The entry point to this code for nova-manage db archive_deleted_rows is through the archive_deleted_rows method only. Change-Id: I8ff5311c160d1190a15867721081f5193e3f4b35
This commit is contained in:
parent
7a07429362
commit
5135c54d01
|
@ -1904,16 +1904,6 @@ def archive_deleted_rows(max_rows=None):
|
|||
return IMPL.archive_deleted_rows(max_rows=max_rows)
|
||||
|
||||
|
||||
def archive_deleted_rows_for_table(tablename, max_rows=None):
|
||||
"""Move up to max_rows rows from tablename to corresponding shadow
|
||||
table.
|
||||
|
||||
:returns: number of rows archived.
|
||||
"""
|
||||
return IMPL.archive_deleted_rows_for_table(tablename,
|
||||
max_rows=max_rows)
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
|
|
|
@ -5964,7 +5964,7 @@ def task_log_end_task(context, task_name, period_beginning, period_ending,
|
|||
raise exception.TaskNotRunning(task_name=task_name, host=host)
|
||||
|
||||
|
||||
def archive_deleted_rows_for_table(tablename, max_rows):
|
||||
def _archive_deleted_rows_for_table(tablename, max_rows):
|
||||
"""Move up to max_rows rows from one tables to the corresponding
|
||||
shadow table.
|
||||
|
||||
|
@ -6043,7 +6043,7 @@ def archive_deleted_rows(max_rows=None):
|
|||
tablenames.append(model_class.__tablename__)
|
||||
rows_archived = 0
|
||||
for tablename in tablenames:
|
||||
rows_archived += archive_deleted_rows_for_table(tablename,
|
||||
rows_archived += _archive_deleted_rows_for_table(tablename,
|
||||
max_rows=max_rows - rows_archived)
|
||||
if rows_archived >= max_rows:
|
||||
break
|
||||
|
|
|
@ -8177,7 +8177,7 @@ class ArchiveTestCase(test.TestCase):
|
|||
# Verify we have 0 in shadow
|
||||
self.assertEqual(len(rows), 0)
|
||||
# Archive 2 rows
|
||||
db.archive_deleted_rows_for_table(tablename, max_rows=2)
|
||||
sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2)
|
||||
# Verify we have 4 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 4)
|
||||
|
@ -8185,7 +8185,7 @@ class ArchiveTestCase(test.TestCase):
|
|||
rows = self.conn.execute(qst).fetchall()
|
||||
self.assertEqual(len(rows), 2)
|
||||
# Archive 2 more rows
|
||||
db.archive_deleted_rows_for_table(tablename, max_rows=2)
|
||||
sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2)
|
||||
# Verify we have 2 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 2)
|
||||
|
@ -8193,7 +8193,7 @@ class ArchiveTestCase(test.TestCase):
|
|||
rows = self.conn.execute(qst).fetchall()
|
||||
self.assertEqual(len(rows), 4)
|
||||
# Try to archive more, but there are no deleted rows left.
|
||||
db.archive_deleted_rows_for_table(tablename, max_rows=2)
|
||||
sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2)
|
||||
# Verify we still have 2 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 2)
|
||||
|
@ -8250,13 +8250,16 @@ class ArchiveTestCase(test.TestCase):
|
|||
result = self.conn.execute(ins_stmt)
|
||||
result.inserted_primary_key[0]
|
||||
# The first try to archive console_pools should fail, due to FK.
|
||||
num = db.archive_deleted_rows_for_table("console_pools")
|
||||
num = sqlalchemy_api._archive_deleted_rows_for_table("console_pools",
|
||||
max_rows=None)
|
||||
self.assertEqual(num, 0)
|
||||
# Then archiving consoles should work.
|
||||
num = db.archive_deleted_rows_for_table("consoles")
|
||||
num = sqlalchemy_api._archive_deleted_rows_for_table("consoles",
|
||||
max_rows=None)
|
||||
self.assertEqual(num, 1)
|
||||
# Then archiving console_pools should work.
|
||||
num = db.archive_deleted_rows_for_table("console_pools")
|
||||
num = sqlalchemy_api._archive_deleted_rows_for_table("console_pools",
|
||||
max_rows=None)
|
||||
self.assertEqual(num, 1)
|
||||
self._assert_shadow_tables_empty_except(
|
||||
'shadow_console_pools',
|
||||
|
|
Loading…
Reference in New Issue