DB: Fix result set locking with periodics
An issue previously existed where periodics would cause an open transaction to exist with the database which would cause issues when attempting to write to the database. This issue has been fixed by assembling the data to return to the calling method, such that an open transaction does not remain, by copying the data retrieved from the database, thus disjointing it from the transaction. Closes-Bug: #2027405 Change-Id: I6401193b04fd3be78c37433bfdd0ccbd92aac8da
This commit is contained in:
parent
416ea8711e
commit
fb978dab1c
@ -532,9 +532,20 @@ class Connection(api.Connection):
|
||||
|
||||
query = sa.select(*columns)
|
||||
query = self._add_nodes_filters(query, filters)
|
||||
return _paginate_query(models.Node, limit, marker,
|
||||
sort_key, sort_dir, query,
|
||||
return_base_tuple=True)
|
||||
# TODO(TheJulia): Why are we paginating this?!?!?!
|
||||
# If we are not using sorting, or any other query magic,
|
||||
# we could likely just do a query execution and
|
||||
# prepare the tuple responses.
|
||||
results = _paginate_query(models.Node, limit, marker,
|
||||
sort_key, sort_dir, query,
|
||||
return_base_tuple=True)
|
||||
# Need to copy the data to close out the _paginate_query
|
||||
# object.
|
||||
new_result = [tuple([ent for ent in r]) for r in results]
|
||||
# Explicitly free results so we don't hang on to it.
|
||||
del results
|
||||
|
||||
return new_result
|
||||
|
||||
def get_node_list(self, filters=None, limit=None, marker=None,
|
||||
sort_key=None, sort_dir=None, fields=None):
|
||||
|
@ -174,24 +174,24 @@ class DbNodeTestCase(base.DbTestCase):
|
||||
self.assertEqual([node2.id], [r[0] for r in res])
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': True})
|
||||
self.assertEqual([node2.id], [r.id for r in res])
|
||||
self.assertEqual([node2.id], [r[0] for r in res])
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': False})
|
||||
self.assertEqual(sorted([node1.id, node3.id]),
|
||||
sorted([r.id for r in res]))
|
||||
sorted([r[0] for r in res]))
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
|
||||
self.assertEqual([node2.id], [r.id for r in res])
|
||||
self.assertEqual([node2.id], [r[0] for r in res])
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
|
||||
self.assertEqual([], [r.id for r in res])
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'resource_class': 'foo'})
|
||||
self.assertEqual([node2.id], [r.id for r in res])
|
||||
self.assertEqual([node2.id], [r[0] for r in res])
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(
|
||||
filters={'conductor_group': 'group1'})
|
||||
self.assertEqual([node2.id], [r.id for r in res])
|
||||
self.assertEqual([node2.id], [r[0] for r in res])
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(
|
||||
filters={'conductor_group': 'group2'})
|
||||
@ -201,13 +201,13 @@ class DbNodeTestCase(base.DbTestCase):
|
||||
filters={'reserved_by_any_of': ['fake-host',
|
||||
'another-fake-host']})
|
||||
self.assertEqual(sorted([node1.id, node3.id]),
|
||||
sorted([r.id for r in res]))
|
||||
sorted([r[0] for r in res]))
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'id': node1.id})
|
||||
self.assertEqual([node1.id], [r.id for r in res])
|
||||
self.assertEqual([node1.id], [r[0] for r in res])
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'uuid': node1.uuid})
|
||||
self.assertEqual([node1.id], [r.id for r in res])
|
||||
self.assertEqual([node1.id], [r[0] for r in res])
|
||||
|
||||
# ensure unknown filters explode
|
||||
filters = {'bad_filter': 'foo'}
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Fixes issues with locks related to the execution of periodic tasks where
|
||||
the task has a lingering transaction. For more information please see
|
||||
`bug 2027405 <https://bugs.launchpad.net/ironic/+bug/2027405>`_.
|
Loading…
Reference in New Issue
Block a user