db: Don't rely on implicit autocommit

Resolve the following RemovedIn20Warning warning:

  The current statement is being autocommitted using implicit
  autocommit, which will be removed in SQLAlchemy 2.0. Use the .begin()
  method of Engine or Connection in order to use an explicit transaction
  for DML and DDL statements.

This is the beefiest one due to our extensive reliance on auto-commit.

Change-Id: Iebf9d022c312b8f5457ff34eb497cdb851aa4901
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2023-07-14 13:29:50 +01:00
parent bbc4f7edd0
commit 5647c6b132
3 changed files with 21 additions and 22 deletions

View File

@ -771,7 +771,6 @@ def purge_deleted_rows(context, age_in_days, max_rows):
based on last updated_at and status column. based on last updated_at and status column.
""" """
engine = get_engine() engine = get_engine()
conn = engine.connect()
metadata = MetaData() metadata = MetaData()
metadata.reflect(engine) metadata.reflect(engine)
deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days) deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days)
@ -812,7 +811,8 @@ def purge_deleted_rows(context, age_in_days, max_rows):
delete_statement = DeleteFromSelect(table, query_delete, column) delete_statement = DeleteFromSelect(table, query_delete, column)
result = conn.execute(delete_statement) with engine.connect() as conn, conn.begin():
result = conn.execute(delete_statement)
rows = result.rowcount rows = result.rowcount
LOG.info('Deleted %(rows)d row(s) from table %(tbl)s', LOG.info('Deleted %(rows)d row(s) from table %(tbl)s',

View File

@ -174,13 +174,6 @@ class WarningsFixture(fixtures.Fixture):
category=sqla_exc.SADeprecationWarning, category=sqla_exc.SADeprecationWarning,
) )
warnings.filterwarnings(
'ignore',
message='The current statement is being autocommitted',
module='masakari',
category=sqla_exc.SADeprecationWarning,
)
# Enable general SQLAlchemy warnings also to ensure we're not doing # Enable general SQLAlchemy warnings also to ensure we're not doing
# silly stuff. It's possible that we'll need to filter things out here # silly stuff. It's possible that we'll need to filter things out here
# with future SQLAlchemy versions, but that's a good thing # with future SQLAlchemy versions, but that's a good thing

View File

@ -34,7 +34,6 @@ class PurgeDeletedTest(test.TestCase):
super(PurgeDeletedTest, self).setUp() super(PurgeDeletedTest, self).setUp()
self.context = context.get_admin_context() self.context = context.get_admin_context()
self.engine = db_api.get_engine() self.engine = db_api.get_engine()
self.conn = self.engine.connect()
self.notifications = sqlalchemyutils.get_table( self.notifications = sqlalchemyutils.get_table(
self.engine, "notifications") self.engine, "notifications")
self.failover_segments = sqlalchemyutils.get_table( self.failover_segments = sqlalchemyutils.get_table(
@ -58,7 +57,8 @@ class PurgeDeletedTest(test.TestCase):
type='demo', type='demo',
status='failed') status='failed')
self.uuidstrs.append(notification_uuid) self.uuidstrs.append(notification_uuid)
self.conn.execute(ins_stmt) with self.engine.connect() as conn, conn.begin():
conn.execute(ins_stmt)
ins_stmt = self.failover_segments.insert().values( ins_stmt = self.failover_segments.insert().values(
uuid=fs_segment_uuid, uuid=fs_segment_uuid,
@ -66,7 +66,8 @@ class PurgeDeletedTest(test.TestCase):
service_type='demo', service_type='demo',
recovery_method='auto') recovery_method='auto')
self.uuid_fs_segments.append(fs_segment_uuid) self.uuid_fs_segments.append(fs_segment_uuid)
self.conn.execute(ins_stmt) with self.engine.connect() as conn, conn.begin():
conn.execute(ins_stmt)
ins_stmt = self.hosts.insert().values( ins_stmt = self.hosts.insert().values(
uuid=host_uuid, uuid=host_uuid,
@ -75,7 +76,8 @@ class PurgeDeletedTest(test.TestCase):
type='demo', type='demo',
control_attributes='test') control_attributes='test')
self.uuid_hosts.append(host_uuid) self.uuid_hosts.append(host_uuid)
self.conn.execute(ins_stmt) with self.engine.connect() as conn, conn.begin():
conn.execute(ins_stmt)
# Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago
self.age_in_days_20 = timeutils.utcnow() - datetime.timedelta(days=20) self.age_in_days_20 = timeutils.utcnow() - datetime.timedelta(days=20)
@ -102,20 +104,24 @@ class PurgeDeletedTest(test.TestCase):
self.hosts.c.uuid.in_(self.uuid_hosts[4:6])).values( self.hosts.c.uuid.in_(self.uuid_hosts[4:6])).values(
deleted_at=self.age_in_days_60) deleted_at=self.age_in_days_60)
self.conn.execute(make_notifications_old) with self.engine.connect() as conn, conn.begin():
self.conn.execute(make_notifications_older) conn.execute(make_notifications_old)
self.conn.execute(make_failover_segments_old) conn.execute(make_notifications_older)
self.conn.execute(make_failover_segments_older) conn.execute(make_failover_segments_old)
self.conn.execute(make_hosts_old) conn.execute(make_failover_segments_older)
self.conn.execute(make_hosts_older) conn.execute(make_hosts_old)
conn.execute(make_hosts_older)
dialect = self.engine.url.get_dialect() dialect = self.engine.url.get_dialect()
if dialect == sqlite.dialect: if dialect == sqlite.dialect:
self.conn.exec_driver_sql("PRAGMA foreign_keys = ON") with self.engine.connect() as conn, conn.begin():
conn.exec_driver_sql("PRAGMA foreign_keys = ON")
def _count(self, table): def _count(self, table):
return self.conn.execute( with self.engine.connect() as conn:
select(func.count()).select_from(table)).scalar() return conn.execute(
select(func.count()).select_from(table),
).scalar()
def test_purge_deleted_rows_old(self): def test_purge_deleted_rows_old(self):
# Purge at 30 days old, should only delete 2 rows # Purge at 30 days old, should only delete 2 rows