Fix ProviderTree copying with threading Lock
The threading Lock object cannot be copied so the ProviderTree cannot be deep copied in threading mode. This patch adds custom pickling / copying handlers to the ProviderTree class to ignore the lock from the source and readd it in the destination object during pickling / copying. Note that ProviderTree uses a named lock which means all the instance of the ProviderTree object in the same process uses a shared lock object. The copy handlers ensures the same behavior during copying so the copy will use the same shared lock as well. Signed-off-by: Balazs Gibizer <gibi@redhat.com> Change-Id: I084e72ce81dd456d67c9046f37d1ccb01b9fa4ba
This commit is contained in:
committed by
Ghanshyam Maan
parent
4b71dab239
commit
e3203ce4d1
@@ -727,3 +727,23 @@ class ProviderTree(object):
|
||||
with self.lock:
|
||||
provider = self._find_with_lock(name_or_uuid)
|
||||
return provider.update_resources(resources)
|
||||
|
||||
def __getstate__(self):
|
||||
"""Define how pickle and therefore deepcopy works.
|
||||
|
||||
Threading lock cannot be pickled so this code will ignore the field
|
||||
during pickling. The __setstate__ call will restore the shared named
|
||||
lock for the object.
|
||||
"""
|
||||
state = self.__dict__.copy()
|
||||
del state["lock"]
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
"""Define how to unpickle and therefore deepcopy works.
|
||||
|
||||
Threading lock cannot be pickled so __getstate__ is ignored it. Here we
|
||||
add the same named lock to the new copy.
|
||||
"""
|
||||
state["lock"] = lockutils.internal_lock(_LOCK_NAME)
|
||||
self.__dict__.update(state)
|
||||
|
||||
@@ -9,6 +9,8 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import copy
|
||||
|
||||
from oslo_utils.fixture import uuidsentinel as uuids
|
||||
|
||||
from nova.compute import provider_tree
|
||||
@@ -726,3 +728,17 @@ class TestProviderTree(test.NoDBTestCase):
|
||||
self.assertTrue(pt.update_resources(cn.uuid, cn_resources))
|
||||
# resources not changed
|
||||
self.assertFalse(pt.update_resources(cn.uuid, cn_resources))
|
||||
|
||||
def test_deep_copy(self):
|
||||
"""Test that ProviderTree is copiable and the lock inside it
|
||||
is still pointing to the same named lock instance.
|
||||
"""
|
||||
pt = provider_tree.ProviderTree()
|
||||
pt2 = provider_tree.ProviderTree()
|
||||
# Two ProviderTree instances are sharing the same named lock
|
||||
self.assertIs(pt.lock, pt2.lock)
|
||||
|
||||
cpt = copy.deepcopy(pt)
|
||||
# Verify that deep copy behaves the same so the copy uses the same
|
||||
# shared lock
|
||||
self.assertIs(pt.lock, cpt.lock)
|
||||
|
||||
@@ -7,16 +7,6 @@ nova.tests.unit.test_context.ContextTestCase.test_scatter_gather_cells_queued_ta
|
||||
nova.tests.unit.virt.libvirt.test_driver.CacheConcurrencyTestCase.test_different_fname_concurrency
|
||||
nova.tests.unit.virt.libvirt.test_driver.CacheConcurrencyTestCase.test_same_fname_concurrency
|
||||
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_image_cache_disk_reservation
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree_for_pcpu_reshape
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree_for_vgpu_reshape
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree_for_vpmem
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree_with_cpu_traits
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree_with_file_backed_memory
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree_with_tpm_traits
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree_with_vgpus
|
||||
nova.tests.unit.virt.libvirt.test_driver.TestUpdateProviderTree.test_update_provider_tree_zero_total
|
||||
nova.tests.unit.virt.libvirt.volume.test_mount.HostMountStateTestCase.test_mount_concurrent
|
||||
nova.tests.unit.virt.libvirt.volume.test_mount.HostMountStateTestCase.test_mount_concurrent_no_interfere
|
||||
nova.tests.unit.virt.libvirt.volume.test_mount.MountManagerTestCase.test_host_up_waits_for_completion
|
||||
|
||||
Reference in New Issue
Block a user