fixing bug caused by batch objects on querysets being copied

This commit is contained in:
Blake Eggleston
2013-03-12 10:11:55 -07:00
parent 7f1d0c9e80
commit f04c831ec0
2 changed files with 20 additions and 2 deletions

View File

@@ -227,6 +227,12 @@ class QuerySet(object):
for k,v in self.__dict__.items():
if k in ['_con', '_cur', '_result_cache', '_result_idx']:
clone.__dict__[k] = None
elif k == '_batch':
# we need to keep the same batch instance across
# all queryset clones, otherwise the batched queries
# fly off into other batch instances which are never
# executed, thx @dokai
clone.__dict__[k] = self._batch
else:
clone.__dict__[k] = copy.deepcopy(v, memo)

View File

@@ -31,8 +31,6 @@ class BatchQueryTests(BaseCassEngTestCase):
for obj in TestMultiKeyModel.filter(partition=self.pkey):
obj.delete()
def test_insert_success_case(self):
b = BatchQuery()
@@ -90,4 +88,18 @@ class BatchQueryTests(BaseCassEngTestCase):
for i in range(5):
TestMultiKeyModel.get(partition=self.pkey, cluster=i)
def test_bulk_delete_success_case(self):
for i in range(1):
for j in range(5):
TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{}:{}'.format(i,j))
with BatchQuery() as b:
TestMultiKeyModel.objects.batch(b).filter(partition=0).delete()
assert TestMultiKeyModel.filter(partition=0).count() == 5
assert TestMultiKeyModel.filter(partition=0).count() == 0
#cleanup
for m in TestMultiKeyModel.all():
m.delete()