Merge "Add --task-log option to nova-manage db archive_deleted_rows"
This commit is contained in:
@@ -216,7 +216,7 @@ db archive_deleted_rows
|
||||
.. code-block:: shell
|
||||
|
||||
nova-manage db archive_deleted_rows [--max_rows <rows>] [--verbose]
|
||||
[--until-complete] [--before <date>] [--purge] [--all-cells]
|
||||
[--until-complete] [--before <date>] [--purge] [--all-cells] [--task-log]
|
||||
|
||||
Move deleted rows from production tables to shadow tables. Note that the
|
||||
corresponding rows in the ``instance_mappings``, ``request_specs`` and
|
||||
@@ -228,6 +228,10 @@ file.
|
||||
If automating, this should be run continuously while the result is 1,
|
||||
stopping at 0, or use the :option:`--until-complete` option.
|
||||
|
||||
.. versionchanged:: 24.0.0 (Xena)
|
||||
|
||||
Added :option:`--task-log` option.
|
||||
|
||||
.. rubric:: Options
|
||||
|
||||
.. option:: --max_rows <rows>
|
||||
@@ -272,6 +276,17 @@ stopping at 0, or use the :option:`--until-complete` option.
|
||||
|
||||
Run command across all cells.
|
||||
|
||||
.. option:: --task-log
|
||||
|
||||
Also archive ``task_log`` table records. Note that ``task_log`` records are
|
||||
never deleted, so archiving them will move all of the ``task_log`` records
|
||||
up to now into the shadow tables. It is recommended to also specify the
|
||||
:option:`--before` option to avoid races for those consuming ``task_log``
|
||||
record data via the `/os-instance_usage_audit_log`__ API (example:
|
||||
Telemetry).
|
||||
|
||||
.. __: https://docs.openstack.org/api-ref/compute/#server-usage-audit-log-os-instance-usage-audit-log
|
||||
|
||||
.. rubric:: Return codes
|
||||
|
||||
.. list-table::
|
||||
|
||||
@@ -223,9 +223,17 @@ Error: %s""") % str(e))
|
||||
help='Purge all data from shadow tables after archive completes')
|
||||
@args('--all-cells', action='store_true', dest='all_cells',
|
||||
default=False, help='Run command across all cells.')
|
||||
@args('--task-log', action='store_true', dest='task_log', default=False,
|
||||
help=('Also archive ``task_log`` table records. Note that '
|
||||
'``task_log`` records are never deleted, so archiving them '
|
||||
'will move all of the ``task_log`` records up to now into the '
|
||||
'shadow tables. It is recommended to also specify the '
|
||||
'``--before`` option to avoid races for those consuming '
|
||||
'``task_log`` record data via the '
|
||||
'``/os-instance_usage_audit_log`` API (example: Telemetry).'))
|
||||
def archive_deleted_rows(self, max_rows=1000, verbose=False,
|
||||
until_complete=False, purge=False,
|
||||
before=None, all_cells=False):
|
||||
before=None, all_cells=False, task_log=False):
|
||||
"""Move deleted rows from production tables to shadow tables.
|
||||
|
||||
Returns 0 if nothing was archived, 1 if some number of rows were
|
||||
@@ -317,7 +325,8 @@ Error: %s""") % str(e))
|
||||
until_complete,
|
||||
verbose,
|
||||
before_date,
|
||||
cell_name)
|
||||
cell_name,
|
||||
task_log)
|
||||
except KeyboardInterrupt:
|
||||
interrupt = True
|
||||
break
|
||||
@@ -348,7 +357,7 @@ Error: %s""") % str(e))
|
||||
return int(bool(table_to_rows_archived))
|
||||
|
||||
def _do_archive(self, table_to_rows_archived, cctxt, max_rows,
|
||||
until_complete, verbose, before_date, cell_name):
|
||||
until_complete, verbose, before_date, cell_name, task_log):
|
||||
"""Helper function for archiving deleted rows for a cell.
|
||||
|
||||
This will archive deleted rows for a cell database and remove the
|
||||
@@ -367,11 +376,13 @@ Error: %s""") % str(e))
|
||||
:param before_date: Archive rows that were deleted before this date
|
||||
:param cell_name: Name of the cell or None if not archiving across all
|
||||
cells
|
||||
:param task_log: Whether to archive task_log table rows
|
||||
"""
|
||||
ctxt = context.get_admin_context()
|
||||
while True:
|
||||
run, deleted_instance_uuids, total_rows_archived = \
|
||||
db.archive_deleted_rows(cctxt, max_rows, before=before_date)
|
||||
db.archive_deleted_rows(
|
||||
cctxt, max_rows, before=before_date, task_log=task_log)
|
||||
for table_name, rows_archived in run.items():
|
||||
if cell_name:
|
||||
table_name = cell_name + '.' + table_name
|
||||
@@ -397,6 +408,7 @@ Error: %s""") % str(e))
|
||||
ctxt, deleted_instance_uuids))
|
||||
table_to_rows_archived[
|
||||
'API_DB.instance_group_member'] += deleted_group_members
|
||||
|
||||
# If we're not archiving until there is nothing more to archive, we
|
||||
# have reached max_rows in this cell DB or there was nothing to
|
||||
# archive.
|
||||
|
||||
@@ -1288,7 +1288,8 @@ def task_log_get(context, task_name, period_beginning,
|
||||
####################
|
||||
|
||||
|
||||
def archive_deleted_rows(context=None, max_rows=None, before=None):
|
||||
def archive_deleted_rows(context=None, max_rows=None, before=None,
|
||||
task_log=None):
|
||||
"""Move up to max_rows rows from production tables to the corresponding
|
||||
shadow tables.
|
||||
|
||||
@@ -1296,6 +1297,7 @@ def archive_deleted_rows(context=None, max_rows=None, before=None):
|
||||
:param max_rows: Maximum number of rows to archive (required)
|
||||
:param before: optional datetime which when specified filters the records
|
||||
to only archive those records deleted before the given date
|
||||
:param task_log: Optional for whether to archive task_log table records
|
||||
:returns: 3-item tuple:
|
||||
|
||||
- dict that maps table name to number of rows archived from that table,
|
||||
@@ -1310,7 +1312,7 @@ def archive_deleted_rows(context=None, max_rows=None, before=None):
|
||||
- total number of rows that were archived
|
||||
"""
|
||||
return IMPL.archive_deleted_rows(context=context, max_rows=max_rows,
|
||||
before=before)
|
||||
before=before, task_log=task_log)
|
||||
|
||||
|
||||
def pcidevice_online_data_migration(context, max_count):
|
||||
|
||||
@@ -4145,7 +4145,8 @@ def _get_fk_stmts(metadata, conn, table, column, records):
|
||||
return inserts, deletes
|
||||
|
||||
|
||||
def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
|
||||
def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before,
|
||||
task_log):
|
||||
"""Move up to max_rows rows from one tables to the corresponding
|
||||
shadow table.
|
||||
|
||||
@@ -4189,8 +4190,19 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
|
||||
|
||||
select = sql.select([column],
|
||||
deleted_column != deleted_column.default.arg)
|
||||
|
||||
if tablename == "task_log" and task_log:
|
||||
# task_log table records are never deleted by anything, so we won't
|
||||
# base our select statement on the 'deleted' column status.
|
||||
select = sql.select([column])
|
||||
|
||||
if before:
|
||||
select = select.where(table.c.deleted_at < before)
|
||||
if tablename != "task_log":
|
||||
select = select.where(table.c.deleted_at < before)
|
||||
elif task_log:
|
||||
# task_log table records are never deleted by anything, so we won't
|
||||
# base our select statement on the 'deleted_at' column status.
|
||||
select = select.where(table.c.updated_at < before)
|
||||
|
||||
select = select.order_by(column).limit(max_rows)
|
||||
rows = conn.execute(select).fetchall()
|
||||
@@ -4251,7 +4263,8 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
|
||||
return rows_archived, deleted_instance_uuids, extras
|
||||
|
||||
|
||||
def archive_deleted_rows(context=None, max_rows=None, before=None):
|
||||
def archive_deleted_rows(context=None, max_rows=None, before=None,
|
||||
task_log=False):
|
||||
"""Move up to max_rows rows from production tables to the corresponding
|
||||
shadow tables.
|
||||
|
||||
@@ -4259,6 +4272,7 @@ def archive_deleted_rows(context=None, max_rows=None, before=None):
|
||||
:param max_rows: Maximum number of rows to archive (required)
|
||||
:param before: optional datetime which when specified filters the records
|
||||
to only archive those records deleted before the given date
|
||||
:param task_log: Optional for whether to archive task_log table records
|
||||
:returns: 3-item tuple:
|
||||
|
||||
- dict that maps table name to number of rows archived from that table,
|
||||
@@ -4295,7 +4309,8 @@ def archive_deleted_rows(context=None, max_rows=None, before=None):
|
||||
_archive_deleted_rows_for_table(
|
||||
meta, tablename,
|
||||
max_rows=max_rows - total_rows_archived,
|
||||
before=before))
|
||||
before=before,
|
||||
task_log=task_log))
|
||||
total_rows_archived += rows_archived
|
||||
if tablename == 'instances':
|
||||
deleted_instance_uuids = _deleted_instance_uuids
|
||||
@@ -4333,6 +4348,7 @@ def purge_shadow_tables(context, before_date, status_fn=None):
|
||||
overrides = {
|
||||
'shadow_instance_actions': 'created_at',
|
||||
'shadow_instance_actions_events': 'created_at',
|
||||
'shadow_task_log': 'updated_at',
|
||||
}
|
||||
|
||||
for table in _purgeable_tables(metadata):
|
||||
|
||||
@@ -16,6 +16,7 @@ import datetime
|
||||
import re
|
||||
|
||||
from dateutil import parser as dateutil_parser
|
||||
from oslo_utils import fixture as osloutils_fixture
|
||||
from oslo_utils import timeutils
|
||||
from sqlalchemy import func
|
||||
from sqlalchemy import MetaData
|
||||
@@ -24,13 +25,19 @@ from sqlalchemy import select
|
||||
from nova import context
|
||||
from nova.db import api as db
|
||||
from nova.db.sqlalchemy import api as sqlalchemy_api
|
||||
from nova import objects
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova import utils as nova_utils
|
||||
|
||||
|
||||
class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
|
||||
"""Tests DB API for archiving (soft) deleted records"""
|
||||
|
||||
def setUp(self):
|
||||
# Disable filters (namely the ComputeFilter) because we'll manipulate
|
||||
# time.
|
||||
self.flags(
|
||||
enabled_filters=['AllHostsFilter'], group='filter_scheduler')
|
||||
super(TestDatabaseArchive, self).setUp()
|
||||
self.enforce_fk_constraints()
|
||||
|
||||
@@ -183,10 +190,32 @@ class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
|
||||
return results
|
||||
|
||||
def test_archive_then_purge_all(self):
|
||||
# Enable the generation of task_log records by the instance usage audit
|
||||
# nova-compute periodic task.
|
||||
self.flags(instance_usage_audit=True)
|
||||
compute = self.computes['compute']
|
||||
|
||||
server = self._create_server()
|
||||
server_id = server['id']
|
||||
|
||||
admin_context = context.get_admin_context()
|
||||
future = timeutils.utcnow() + datetime.timedelta(days=30)
|
||||
|
||||
with osloutils_fixture.TimeFixture(future):
|
||||
# task_log records are generated by the _instance_usage_audit
|
||||
# periodic task.
|
||||
compute.manager._instance_usage_audit(admin_context)
|
||||
# Audit period defaults to 1 month, the last audit period will
|
||||
# be the previous calendar month.
|
||||
begin, end = nova_utils.last_completed_audit_period()
|
||||
# Verify that we have 1 task_log record per audit period.
|
||||
task_logs = objects.TaskLogList.get_all(
|
||||
admin_context, 'instance_usage_audit', begin, end)
|
||||
self.assertEqual(1, len(task_logs))
|
||||
|
||||
self._delete_server(server)
|
||||
results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000)
|
||||
results, deleted_ids, archived = db.archive_deleted_rows(
|
||||
max_rows=1000, task_log=True)
|
||||
self.assertEqual([server_id], deleted_ids)
|
||||
|
||||
lines = []
|
||||
@@ -194,7 +223,6 @@ class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
|
||||
def status(msg):
|
||||
lines.append(msg)
|
||||
|
||||
admin_context = context.get_admin_context()
|
||||
deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
|
||||
None, status_fn=status)
|
||||
self.assertNotEqual(0, deleted)
|
||||
@@ -203,41 +231,69 @@ class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
|
||||
for line in lines:
|
||||
self.assertIsNotNone(re.match(r'Deleted [1-9][0-9]* rows from .*',
|
||||
line))
|
||||
# Ensure we purged task_log records.
|
||||
self.assertIn('shadow_task_log', str(lines))
|
||||
|
||||
results = self._get_table_counts()
|
||||
# No table should have any rows
|
||||
self.assertFalse(any(results.values()))
|
||||
|
||||
def test_archive_then_purge_by_date(self):
|
||||
server = self._create_server()
|
||||
# Enable the generation of task_log records by the instance usage audit
|
||||
# nova-compute periodic task.
|
||||
self.flags(instance_usage_audit=True)
|
||||
compute = self.computes['compute']
|
||||
|
||||
# Simulate a server that was created 30 days ago, needed to test the
|
||||
# task_log coverage. The task_log audit period defaults to 1 month, so
|
||||
# for a server to appear in the task_log, it must have been active
|
||||
# during the previous calendar month.
|
||||
month_ago = timeutils.utcnow() - datetime.timedelta(days=30)
|
||||
with osloutils_fixture.TimeFixture(month_ago):
|
||||
server = self._create_server()
|
||||
|
||||
server_id = server['id']
|
||||
admin_context = context.get_admin_context()
|
||||
|
||||
# task_log records are generated by the _instance_usage_audit
|
||||
# periodic task.
|
||||
compute.manager._instance_usage_audit(admin_context)
|
||||
# Audit period defaults to 1 month, the last audit period will
|
||||
# be the previous calendar month.
|
||||
begin, end = nova_utils.last_completed_audit_period()
|
||||
# Verify that we have 1 task_log record per audit period.
|
||||
task_logs = objects.TaskLogList.get_all(
|
||||
admin_context, 'instance_usage_audit', begin, end)
|
||||
self.assertEqual(1, len(task_logs))
|
||||
|
||||
# Delete the server and archive deleted rows.
|
||||
self._delete_server(server)
|
||||
results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000)
|
||||
results, deleted_ids, archived = db.archive_deleted_rows(
|
||||
max_rows=1000, task_log=True)
|
||||
self.assertEqual([server_id], deleted_ids)
|
||||
self.assertEqual(sum(results.values()), archived)
|
||||
|
||||
pre_purge_results = self._get_table_counts()
|
||||
|
||||
past = timeutils.utcnow() - datetime.timedelta(hours=1)
|
||||
admin_context = context.get_admin_context()
|
||||
deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
|
||||
past)
|
||||
# Make sure we didn't delete anything if the marker is before
|
||||
# we started
|
||||
past = timeutils.utcnow() - datetime.timedelta(days=31)
|
||||
deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
|
||||
past)
|
||||
self.assertEqual(0, deleted)
|
||||
|
||||
results = self._get_table_counts()
|
||||
# Nothing should be changed if we didn't purge anything
|
||||
results = self._get_table_counts()
|
||||
self.assertEqual(pre_purge_results, results)
|
||||
|
||||
future = timeutils.utcnow() + datetime.timedelta(hours=1)
|
||||
deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future)
|
||||
# Make sure we deleted things when the marker is after
|
||||
# we started
|
||||
future = timeutils.utcnow() + datetime.timedelta(hours=1)
|
||||
deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future)
|
||||
self.assertNotEqual(0, deleted)
|
||||
|
||||
results = self._get_table_counts()
|
||||
# There should be no rows in any table if we purged everything
|
||||
results = self._get_table_counts()
|
||||
self.assertFalse(any(results.values()))
|
||||
|
||||
def test_purge_with_real_date(self):
|
||||
|
||||
@@ -10,13 +10,16 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import collections
|
||||
import datetime
|
||||
from io import StringIO
|
||||
import mock
|
||||
|
||||
import fixtures
|
||||
from neutronclient.common import exceptions as neutron_client_exc
|
||||
import os_resource_classes as orc
|
||||
from oslo_utils import fixture as osloutils_fixture
|
||||
from oslo_utils.fixture import uuidsentinel
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from nova.cmd import manage
|
||||
from nova import config
|
||||
@@ -29,6 +32,7 @@ from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional import fixtures as func_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.functional import test_servers
|
||||
from nova import utils as nova_utils
|
||||
|
||||
CONF = config.CONF
|
||||
INCOMPLETE_CONSUMER_ID = '00000000-0000-0000-0000-000000000000'
|
||||
@@ -1683,6 +1687,53 @@ class TestDBArchiveDeletedRows(integrated_helpers._IntegratedTestBase):
|
||||
self.assertEqual(
|
||||
1, len(self.api.get_server_group(group['id'])['members']))
|
||||
|
||||
def test_archive_task_logs(self):
|
||||
# Enable the generation of task_log records by the instance usage audit
|
||||
# nova-compute periodic task.
|
||||
self.flags(instance_usage_audit=True)
|
||||
compute = self.computes['compute']
|
||||
|
||||
# Create a few servers so the for the periodic task to process.
|
||||
for i in range(0, 3):
|
||||
self._create_server()
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
# The instance usage audit periodic task only processes servers that
|
||||
# were active during the last audit period. The audit period defaults
|
||||
# to 1 month, so the last audit period would be the previous calendar
|
||||
# month. Advance time 30 days into the future in order to generate a
|
||||
# task_log record for the servers we created.
|
||||
for days in (30, 60, 90):
|
||||
future = timeutils.utcnow() + datetime.timedelta(days=days)
|
||||
with osloutils_fixture.TimeFixture(future):
|
||||
# task_log records are generated by the _instance_usage_audit
|
||||
# periodic task.
|
||||
compute.manager._instance_usage_audit(ctxt)
|
||||
# Audit period defaults to 1 month, the last audit period will
|
||||
# be the previous calendar month.
|
||||
begin, end = nova_utils.last_completed_audit_period()
|
||||
# Verify that we have 1 task_log record per audit period.
|
||||
task_logs = objects.TaskLogList.get_all(
|
||||
ctxt, 'instance_usage_audit', begin, end)
|
||||
self.assertEqual(1, len(task_logs))
|
||||
|
||||
# First try archiving without --task-log. Expect no task_log entries in
|
||||
# the results.
|
||||
self.cli.archive_deleted_rows(verbose=True)
|
||||
self.assertNotIn('task_log', self.output.getvalue())
|
||||
# Next try archiving with --task-log and --before.
|
||||
# We'll archive records that were last updated before the second audit
|
||||
# period.
|
||||
before = timeutils.utcnow() + datetime.timedelta(days=30)
|
||||
self.cli.archive_deleted_rows(
|
||||
task_log=True, before=before.isoformat(), verbose=True)
|
||||
# Verify that only 1 task_log record was archived.
|
||||
self.assertRegex(self.output.getvalue(), r'\| task_log\s+\| 1')
|
||||
# Now archive all of the rest, there should be 2 left.
|
||||
self.cli.archive_deleted_rows(task_log=True, verbose=True)
|
||||
self.assertRegex(self.output.getvalue(), r'\| task_log\s+\| 2')
|
||||
|
||||
|
||||
class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin,
|
||||
test.TestCase):
|
||||
@@ -1768,3 +1819,69 @@ class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin,
|
||||
self.assertRaises(exception.InstanceNotFound,
|
||||
objects.Instance.get_by_uuid,
|
||||
cctxt, server_id)
|
||||
|
||||
def test_archive_task_logs(self):
|
||||
# Enable the generation of task_log records by the instance usage audit
|
||||
# nova-compute periodic task.
|
||||
self.flags(instance_usage_audit=True)
|
||||
|
||||
# Create servers so the for the periodic task to process.
|
||||
# Boot a server to cell1
|
||||
server = self._build_server(az='nova:host1')
|
||||
created_server = self.api.post_server({'server': server})
|
||||
self._wait_for_state_change(created_server, 'ACTIVE')
|
||||
# Boot a server to cell2
|
||||
server = self._build_server(az='nova:host2')
|
||||
created_server = self.api.post_server({'server': server})
|
||||
self._wait_for_state_change(created_server, 'ACTIVE')
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
# The instance usage audit periodic task only processes servers that
|
||||
# were active during the last audit period. The audit period defaults
|
||||
# to 1 month, so the last audit period would be the previous calendar
|
||||
# month. Advance time 30 days into the future in order to generate a
|
||||
# task_log record for the servers we created.
|
||||
for days in (30, 60, 90):
|
||||
future = timeutils.utcnow() + datetime.timedelta(days=days)
|
||||
with osloutils_fixture.TimeFixture(future):
|
||||
# task_log records are generated by the _instance_usage_audit
|
||||
# periodic task.
|
||||
with context.target_cell(
|
||||
ctxt, self.cell_mappings['cell1']) as cctxt:
|
||||
self.compute1.manager._instance_usage_audit(cctxt)
|
||||
with context.target_cell(
|
||||
ctxt, self.cell_mappings['cell2']) as cctxt:
|
||||
self.compute2.manager._instance_usage_audit(ctxt)
|
||||
# Audit period defaults to 1 month, the last audit period will
|
||||
# be the previous calendar month.
|
||||
begin, end = nova_utils.last_completed_audit_period()
|
||||
|
||||
for cell_name in ('cell1', 'cell2'):
|
||||
with context.target_cell(
|
||||
ctxt, self.cell_mappings[cell_name]) as cctxt:
|
||||
task_logs = objects.TaskLogList.get_all(
|
||||
cctxt, 'instance_usage_audit', begin, end)
|
||||
self.assertEqual(1, len(task_logs))
|
||||
|
||||
# First try archiving without --task-log. Expect no task_log entries in
|
||||
# the results.
|
||||
self.cli.archive_deleted_rows(all_cells=True, verbose=True)
|
||||
self.assertNotIn('task_log', self.output.getvalue())
|
||||
# Next try archiving with --task-log and --before.
|
||||
# We'll archive records that were last updated before the second audit
|
||||
# period.
|
||||
before = timeutils.utcnow() + datetime.timedelta(days=30)
|
||||
self.cli.archive_deleted_rows(
|
||||
all_cells=True, task_log=True, before=before.isoformat(),
|
||||
verbose=True)
|
||||
# Verify that only 2 task_log records were archived, 1 in each cell.
|
||||
for cell_name in ('cell1', 'cell2'):
|
||||
self.assertRegex(
|
||||
self.output.getvalue(), r'\| %s.task_log\s+\| 1' % cell_name)
|
||||
# Now archive all of the rest, there should be 4 left, 2 in each cell.
|
||||
self.cli.archive_deleted_rows(
|
||||
all_cells=True, task_log=True, verbose=True)
|
||||
for cell_name in ('cell1', 'cell2'):
|
||||
self.assertRegex(
|
||||
self.output.getvalue(), r'\| %s.task_log\s+\| 2' % cell_name)
|
||||
|
||||
@@ -143,10 +143,14 @@ class DbCommandsTestCase(test.NoDBTestCase):
|
||||
purge=purge)
|
||||
mock_db_archive.assert_has_calls([
|
||||
# Called with max_rows=30 but only 15 were archived.
|
||||
mock.call(test.MatchType(context.RequestContext), 30, before=None),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 30, before=None,
|
||||
task_log=False),
|
||||
# So the total from the last call was 15 and the new max_rows=15
|
||||
# for the next call in the second cell.
|
||||
mock.call(test.MatchType(context.RequestContext), 15, before=None)
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 15, before=None,
|
||||
task_log=False)
|
||||
])
|
||||
output = self.output.getvalue()
|
||||
expected = '''\
|
||||
@@ -218,18 +222,28 @@ class DbCommandsTestCase(test.NoDBTestCase):
|
||||
until_complete=True)
|
||||
mock_db_archive.assert_has_calls([
|
||||
# Called with max_rows=30 but only 15 were archived.
|
||||
mock.call(test.MatchType(context.RequestContext), 30, before=None),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 30, before=None,
|
||||
task_log=False),
|
||||
# Called with max_rows=30 but 0 were archived (nothing left to
|
||||
# archive in this cell)
|
||||
mock.call(test.MatchType(context.RequestContext), 30, before=None),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 30, before=None,
|
||||
task_log=False),
|
||||
# So the total from the last call was 0 and the new max_rows=30
|
||||
# because until_complete=True.
|
||||
mock.call(test.MatchType(context.RequestContext), 30, before=None),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 30, before=None,
|
||||
task_log=False),
|
||||
# Called with max_rows=30 but 0 were archived (nothing left to
|
||||
# archive in this cell)
|
||||
mock.call(test.MatchType(context.RequestContext), 30, before=None),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 30, before=None,
|
||||
task_log=False),
|
||||
# Called one final time with max_rows=30
|
||||
mock.call(test.MatchType(context.RequestContext), 30, before=None)
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 30, before=None,
|
||||
task_log=False)
|
||||
])
|
||||
output = self.output.getvalue()
|
||||
expected = '''\
|
||||
@@ -252,7 +266,8 @@ Archiving.....complete
|
||||
def _test_archive_deleted_rows(self, mock_db_archive, verbose=False):
|
||||
result = self.commands.archive_deleted_rows(20, verbose=verbose)
|
||||
mock_db_archive.assert_called_once_with(
|
||||
test.MatchType(context.RequestContext), 20, before=None)
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False)
|
||||
output = self.output.getvalue()
|
||||
if verbose:
|
||||
expected = '''\
|
||||
@@ -304,9 +319,15 @@ Archiving.....complete
|
||||
|
||||
self.assertEqual(expected, self.output.getvalue())
|
||||
mock_db_archive.assert_has_calls([
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None),
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None),
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False),
|
||||
])
|
||||
|
||||
def test_archive_deleted_rows_until_complete_quiet(self):
|
||||
@@ -344,9 +365,15 @@ Rows were archived, running purge...
|
||||
|
||||
self.assertEqual(expected, self.output.getvalue())
|
||||
mock_db_archive.assert_has_calls([
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None),
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None),
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False),
|
||||
])
|
||||
mock_db_purge.assert_called_once_with(mock.ANY, None,
|
||||
status_fn=mock.ANY)
|
||||
@@ -405,8 +432,12 @@ Archiving....stopped
|
||||
|
||||
self.assertEqual(expected, self.output.getvalue())
|
||||
mock_db_archive.assert_has_calls([
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None),
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None)
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False),
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False)
|
||||
])
|
||||
|
||||
def test_archive_deleted_rows_until_stopped_quiet(self):
|
||||
@@ -422,7 +453,8 @@ Archiving....stopped
|
||||
result = self.commands.archive_deleted_rows(20, before='2017-01-13')
|
||||
mock_db_archive.assert_called_once_with(
|
||||
test.MatchType(context.RequestContext), 20,
|
||||
before=datetime.datetime(2017, 1, 13))
|
||||
before=datetime.datetime(2017, 1, 13),
|
||||
task_log=False)
|
||||
self.assertEqual(1, result)
|
||||
|
||||
@mock.patch.object(db, 'archive_deleted_rows', return_value=({}, [], 0))
|
||||
@@ -432,7 +464,8 @@ Archiving....stopped
|
||||
result = self.commands.archive_deleted_rows(20, verbose=True,
|
||||
purge=True)
|
||||
mock_db_archive.assert_called_once_with(
|
||||
test.MatchType(context.RequestContext), 20, before=None)
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False)
|
||||
output = self.output.getvalue()
|
||||
# If nothing was archived, there should be no purge messages
|
||||
self.assertIn('Nothing was archived.', output)
|
||||
@@ -474,7 +507,9 @@ Archiving....stopped
|
||||
|
||||
self.assertEqual(1, result)
|
||||
mock_db_archive.assert_has_calls([
|
||||
mock.call(test.MatchType(context.RequestContext), 20, before=None)
|
||||
mock.call(
|
||||
test.MatchType(context.RequestContext), 20, before=None,
|
||||
task_log=False)
|
||||
])
|
||||
self.assertEqual(1, mock_reqspec_destroy.call_count)
|
||||
mock_members_destroy.assert_called_once()
|
||||
|
||||
@@ -5917,6 +5917,9 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
self.migrations = models.Migration.__table__
|
||||
self.shadow_migrations = sqlalchemyutils.get_table(
|
||||
self.engine, "shadow_migrations")
|
||||
self.task_log = models.TaskLog.__table__
|
||||
self.shadow_task_log = sqlalchemyutils.get_table(
|
||||
self.engine, "shadow_task_log")
|
||||
|
||||
self.uuidstrs = []
|
||||
for _ in range(6):
|
||||
@@ -6162,7 +6165,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
sqlalchemy_api._archive_deleted_rows_for_table(self.metadata,
|
||||
tablename,
|
||||
max_rows=2,
|
||||
before=None)
|
||||
before=None,
|
||||
task_log=False)
|
||||
# Verify we have 4 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 4)
|
||||
@@ -6173,7 +6177,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
sqlalchemy_api._archive_deleted_rows_for_table(self.metadata,
|
||||
tablename,
|
||||
max_rows=2,
|
||||
before=None)
|
||||
before=None,
|
||||
task_log=False)
|
||||
# Verify we have 2 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 2)
|
||||
@@ -6184,7 +6189,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
sqlalchemy_api._archive_deleted_rows_for_table(self.metadata,
|
||||
tablename,
|
||||
max_rows=2,
|
||||
before=None)
|
||||
before=None,
|
||||
task_log=False)
|
||||
# Verify we still have 2 left in main
|
||||
rows = self.conn.execute(qmt).fetchall()
|
||||
self.assertEqual(len(rows), 2)
|
||||
@@ -6244,7 +6250,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata,
|
||||
"instances",
|
||||
max_rows=None,
|
||||
before=None)
|
||||
before=None,
|
||||
task_log=False)
|
||||
self.assertEqual(1, num[0])
|
||||
self._assert_shadow_tables_empty_except(
|
||||
'shadow_instances',
|
||||
@@ -6321,6 +6328,76 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
'shadow_instance_id_mappings'
|
||||
)
|
||||
|
||||
def test_archive_deleted_rows_task_log(self):
|
||||
# Add 6 rows to each table
|
||||
for i in range(1, 7):
|
||||
ins_stmt = self.task_log.insert().values(
|
||||
id=i, task_name='instance_usage_audit', state='DONE',
|
||||
host='host', message='message')
|
||||
self.conn.execute(ins_stmt)
|
||||
# Set 1 to updated before 2017-01-01
|
||||
updated_at = timeutils.parse_strtime('2017-01-01T00:00:00.0')
|
||||
update_statement = self.task_log.update().where(
|
||||
self.task_log.c.id == 1).values(updated_at=updated_at)
|
||||
self.conn.execute(update_statement)
|
||||
# Set 1 to updated before 2017-01-02
|
||||
updated_at = timeutils.parse_strtime('2017-01-02T00:00:00.0')
|
||||
update_statement = self.task_log.update().where(
|
||||
self.task_log.c.id == 2).values(updated_at=updated_at)
|
||||
self.conn.execute(update_statement)
|
||||
# Set 2 to updated now
|
||||
update_statement = self.task_log.update().where(
|
||||
self.task_log.c.id.in_(range(3, 5))).values(
|
||||
updated_at=timeutils.utcnow())
|
||||
self.conn.execute(update_statement)
|
||||
# Verify we have 6 in main
|
||||
qtl = sql.select([self.task_log]).where(
|
||||
self.task_log.c.id.in_(range(1, 7)))
|
||||
rows = self.conn.execute(qtl).fetchall()
|
||||
self.assertEqual(len(rows), 6)
|
||||
# Verify we have 0 in shadow
|
||||
qstl = sql.select([self.shadow_task_log]).where(
|
||||
self.shadow_task_log.c.id.in_(range(1, 7)))
|
||||
rows = self.conn.execute(qstl).fetchall()
|
||||
self.assertEqual(len(rows), 0)
|
||||
# Make sure 'before' comparison is for < not <=
|
||||
before_date = dateutil_parser.parse('2017-01-01', fuzzy=True)
|
||||
_, _, rows = db.archive_deleted_rows(
|
||||
max_rows=1, task_log=True, before=before_date)
|
||||
self.assertEqual(0, rows)
|
||||
# Archive rows updated before 2017-01-02
|
||||
before_date = dateutil_parser.parse('2017-01-02', fuzzy=True)
|
||||
results = db.archive_deleted_rows(
|
||||
max_rows=100, task_log=True, before=before_date)
|
||||
expected = dict(task_log=1)
|
||||
self._assertEqualObjects(expected, results[0])
|
||||
# Archive 1 row updated before 2017-01-03
|
||||
before_date = dateutil_parser.parse('2017-01-03', fuzzy=True)
|
||||
results = db.archive_deleted_rows(
|
||||
max_rows=1, task_log=True, before=before_date)
|
||||
expected = dict(task_log=1)
|
||||
self._assertEqualObjects(expected, results[0])
|
||||
# Archive 2 rows
|
||||
results = db.archive_deleted_rows(max_rows=2, task_log=True)
|
||||
expected = dict(task_log=2)
|
||||
self._assertEqualObjects(expected, results[0])
|
||||
# Verify we have 2 left in main
|
||||
rows = self.conn.execute(qtl).fetchall()
|
||||
self.assertEqual(len(rows), 2)
|
||||
# Verify we have 4 in shadow
|
||||
rows = self.conn.execute(qstl).fetchall()
|
||||
self.assertEqual(len(rows), 4)
|
||||
# Archive the rest
|
||||
results = db.archive_deleted_rows(max_rows=100, task_log=True)
|
||||
expected = dict(task_log=2)
|
||||
self._assertEqualObjects(expected, results[0])
|
||||
# Verify we have 0 left in main
|
||||
rows = self.conn.execute(qtl).fetchall()
|
||||
self.assertEqual(len(rows), 0)
|
||||
# Verify we have 6 in shadow
|
||||
rows = self.conn.execute(qstl).fetchall()
|
||||
self.assertEqual(len(rows), 6)
|
||||
|
||||
|
||||
class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
||||
def setUp(self):
|
||||
|
||||
22
releasenotes/notes/archive-task-logs-fa9dd7c5859b5e30.yaml
Normal file
22
releasenotes/notes/archive-task-logs-fa9dd7c5859b5e30.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
features:
|
||||
- |
|
||||
A ``--task-log`` option has been added to the ``nova-manage db
|
||||
archive_deleted_rows`` CLI. When ``--task-log`` is specified, ``task_log``
|
||||
table records will be archived while archiving the database. The
|
||||
``--task-log`` option works in conjunction with ``--before`` if operators
|
||||
desire archiving only records that are older than ``<date>``. The
|
||||
``updated_at`` field is used by ``--task-log --before <date>`` to determine
|
||||
the age of a ``task_log`` record for archival.
|
||||
|
||||
The ``task_log`` database table contains instance usage audit records if
|
||||
``nova-compute`` has been configured with ``[DEFAULT]instance_usage_audit =
|
||||
True``. This will be the case if OpenStack Telemetry is being used in the
|
||||
deployment, as the option causes Nova to generate audit data that Telemetry
|
||||
then retrieves from the server usage audit log API [1].
|
||||
|
||||
Historically, there has been no way to delete ``task_log`` table records
|
||||
other than manual database modification. Because of this, ``task_log``
|
||||
records could pile up over time and operators are forced to perform manual
|
||||
steps to periodically truncate the ``task_log`` table.
|
||||
|
||||
[1] https://docs.openstack.org/api-ref/compute/#server-usage-audit-log-os-instance-usage-audit-log
|
||||
Reference in New Issue
Block a user