Browse Source

Restore soft-deleted compute node with same uuid

There is a unique index on the compute_nodes.uuid column which
means we can't have more than one compute_nodes record in the
same DB with the same UUID even if one is soft deleted because
the deleted column is not part of that unique index constraint.

This is a problem with ironic nodes where the node is 1:1 with
the compute node record, and when a node is undergoing maintenance
the driver doesn't return it from get_available_nodes() so the
ComputeManager.update_available_resource periodic task (soft)
deletes the compute node record, but when the node is no longer
under maintenance in ironic and the driver reports it, the
ResourceTracker._init_compute_node code will fail to create the
ComputeNode record again because of the duplicate uuid.

This change handles the DBDuplicateEntry error in compute_node_create
by finding the soft-deleted compute node with the same uuid and
simply updating it to no longer be (soft) deleted.

Closes-Bug: #1839560

Change-Id: Iafba419fe86446ffe636721f523fb619f8f787b3
tags/20.0.0.0rc1
Matt Riedemann 2 months ago
parent
commit
8b007266f4

+ 45
- 1
nova/db/sqlalchemy/api.py View File

@@ -30,6 +30,7 @@ from oslo_db.sqlalchemy import enginefacade
30 30
 from oslo_db.sqlalchemy import update_match
31 31
 from oslo_db.sqlalchemy import utils as sqlalchemyutils
32 32
 from oslo_log import log as logging
33
+from oslo_utils import excutils
33 34
 from oslo_utils import importutils
34 35
 from oslo_utils import timeutils
35 36
 from oslo_utils import uuidutils
@@ -696,11 +697,54 @@ def compute_node_create(context, values):
696 697
 
697 698
     compute_node_ref = models.ComputeNode()
698 699
     compute_node_ref.update(values)
699
-    compute_node_ref.save(context.session)
700
+    try:
701
+        compute_node_ref.save(context.session)
702
+    except db_exc.DBDuplicateEntry:
703
+        with excutils.save_and_reraise_exception(logger=LOG) as err_ctx:
704
+            # Check to see if we have a (soft) deleted ComputeNode with the
705
+            # same UUID and if so just update it and mark as no longer (soft)
706
+            # deleted. See bug 1839560 for details.
707
+            if 'uuid' in values:
708
+                # Get a fresh context for a new DB session and allow it to
709
+                # get a deleted record.
710
+                ctxt = nova.context.get_admin_context(read_deleted='yes')
711
+                compute_node_ref = _compute_node_get_and_update_deleted(
712
+                    ctxt, values)
713
+                # If we didn't get anything back we failed to find the node
714
+                # by uuid and update it so re-raise the DBDuplicateEntry.
715
+                if compute_node_ref:
716
+                    err_ctx.reraise = False
700 717
 
701 718
     return compute_node_ref
702 719
 
703 720
 
721
+@pick_context_manager_writer
722
+def _compute_node_get_and_update_deleted(context, values):
723
+    """Find a ComputeNode by uuid, update and un-delete it.
724
+
725
+    This is a special case from the ``compute_node_create`` method which
726
+    needs to be separate to get a new Session.
727
+
728
+    This method will update the ComputeNode, if found, to have deleted=0 and
729
+    deleted_at=None values.
730
+
731
+    :param context: request auth context which should be able to read deleted
732
+        records
733
+    :param values: values used to update the ComputeNode record - must include
734
+        uuid
735
+    :return: updated ComputeNode sqlalchemy model object if successfully found
736
+        and updated, None otherwise
737
+    """
738
+    cn = model_query(
739
+        context, models.ComputeNode).filter_by(uuid=values['uuid']).first()
740
+    if cn:
741
+        # Update with the provided values but un-soft-delete.
742
+        update_values = copy.deepcopy(values)
743
+        update_values['deleted'] = 0
744
+        update_values['deleted_at'] = None
745
+        return compute_node_update(context, cn.id, update_values)
746
+
747
+
704 748
 @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
705 749
 @pick_context_manager_writer
706 750
 def compute_node_update(context, compute_id, values):

+ 17
- 23
nova/tests/functional/regressions/test_bug_1839560.py View File

@@ -14,7 +14,6 @@ from oslo_log import log as logging
14 14
 
15 15
 from nova import context
16 16
 from nova.db import api as db_api
17
-from nova import exception
18 17
 from nova import objects
19 18
 from nova import test
20 19
 from nova.tests import fixtures as nova_fixtures
@@ -93,30 +92,25 @@ class PeriodicNodeRecreateTestCase(test.TestCase,
93 92
         # Now stub the driver again to report node2 as being back and run
94 93
         # the periodic task.
95 94
         compute.manager.driver._set_nodes(['node1', 'node2'])
95
+        LOG.info('Running update_available_resource which should bring back '
96
+                 'node2.')
96 97
         compute.manager.update_available_resource(ctxt)
97
-        # FIXME(mriedem): This is bug 1839560 where the ResourceTracker fails
98
-        # to create a ComputeNode for node2 because of conflicting UUIDs.
98
+        # The DBDuplicateEntry error should have been handled and resulted in
99
+        # updating the (soft) deleted record to no longer be deleted.
99 100
         log = self.stdlog.logger.output
100
-        self.assertIn('Error updating resources for node node2', log)
101
-        self.assertIn('DBDuplicateEntry', log)
102
-        # Should still only have one reported hypervisor (node1).
101
+        self.assertNotIn('DBDuplicateEntry', log)
102
+        # Should have two reported hypervisors again.
103 103
         hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
104
-        self.assertEqual(1, len(hypervisors), hypervisors)
105
-        # Test the workaround for bug 1839560 by archiving the deleted node2
106
-        # compute_nodes table record which will allow the periodic to create a
107
-        # new entry for node2. We can remove this when the bug is fixed.
104
+        self.assertEqual(2, len(hypervisors), hypervisors)
105
+        # Now that the node2 record was un-soft-deleted, archiving should not
106
+        # remove any compute_nodes.
108 107
         LOG.info('Archiving the database.')
109 108
         archived = db_api.archive_deleted_rows(1000)[0]
110
-        self.assertIn('compute_nodes', archived)
111
-        self.assertEqual(1, archived['compute_nodes'])
112
-        with utils.temporary_mutation(ctxt, read_deleted='yes'):
113
-            self.assertRaises(exception.ComputeHostNotFound,
114
-                              objects.ComputeNode.get_by_host_and_nodename,
115
-                              ctxt, 'node1', 'node2')
116
-        # Now run the periodic again and we should have a new ComputeNode for
117
-        # node2.
118
-        LOG.info('Running update_available_resource which should create a new '
119
-                 'ComputeNode record for node2.')
120
-        compute.manager.update_available_resource(ctxt)
121
-        hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
122
-        self.assertEqual(2, len(hypervisors), hypervisors)
109
+        self.assertNotIn('compute_nodes', archived)
110
+        cn2 = objects.ComputeNode.get_by_host_and_nodename(
111
+            ctxt, 'node1', 'node2')
112
+        self.assertFalse(cn2.deleted)
113
+        self.assertIsNone(cn2.deleted_at)
114
+        # The node2 id and uuid should not have changed in the DB.
115
+        self.assertEqual(cn.id, cn2.id)
116
+        self.assertEqual(cn.uuid, cn2.uuid)

+ 12
- 0
nova/tests/unit/db/test_db_api.py View File

@@ -7215,6 +7215,18 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
7215 7215
         new_stats = jsonutils.loads(self.item['stats'])
7216 7216
         self.assertEqual(self.stats, new_stats)
7217 7217
 
7218
+    def test_compute_node_create_duplicate_host_hypervisor_hostname(self):
7219
+        """Tests to make sure that DBDuplicateEntry is raised when trying to
7220
+        create a duplicate ComputeNode with the same host and
7221
+        hypervisor_hostname values but different uuid values. This makes
7222
+        sure that when _compute_node_get_and_update_deleted returns None
7223
+        the DBDuplicateEntry is re-raised.
7224
+        """
7225
+        other_node = dict(self.compute_node_dict)
7226
+        other_node['uuid'] = uuidutils.generate_uuid()
7227
+        self.assertRaises(db_exc.DBDuplicateEntry,
7228
+                          db.compute_node_create, self.ctxt, other_node)
7229
+
7218 7230
     def test_compute_node_get_all(self):
7219 7231
         nodes = db.compute_node_get_all(self.ctxt)
7220 7232
         self.assertEqual(1, len(nodes))

Loading…
Cancel
Save