XenAPI was not implemented to allow for multiple simultaneous XenAPI requests. A single XenAPIConnection (and thus XenAPISession) is used for all queries. XenAPISession's wait_for_task method would set a self.loop = for looping calls to _poll_task until task completion. Subsequent (parallel) calls to wait_for_task for another query would overwrite this. XenAPISession._poll_task was pulled into the XenAPISession.wait_for_task method to avoid having to store self.loop.
This commit is contained in:
@@ -16,6 +16,7 @@
|
||||
|
||||
"""Test suite for XenAPI."""
|
||||
|
||||
import eventlet
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
@@ -198,6 +199,28 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
self.context = context.RequestContext('fake', 'fake', False)
|
||||
self.conn = xenapi_conn.get_connection(False)
|
||||
|
||||
def test_parallel_builds(self):
|
||||
stubs.stubout_loopingcall_delay(self.stubs)
|
||||
|
||||
def _do_build(id, proj, user, *args):
|
||||
values = {
|
||||
'id': id,
|
||||
'project_id': proj,
|
||||
'user_id': user,
|
||||
'image_id': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
instance = db.instance_create(self.context, values)
|
||||
self.conn.spawn(instance)
|
||||
|
||||
gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id)
|
||||
gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id)
|
||||
gt1.wait()
|
||||
gt2.wait()
|
||||
|
||||
def test_list_instances_0(self):
|
||||
instances = self.conn.list_instances()
|
||||
self.assertEquals(instances, [])
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
"""Stubouts, mocks and fixtures for the test suite"""
|
||||
|
||||
import eventlet
|
||||
from nova.virt import xenapi_conn
|
||||
from nova.virt.xenapi import fake
|
||||
from nova.virt.xenapi import volume_utils
|
||||
@@ -28,29 +29,6 @@ def stubout_instance_snapshot(stubs):
|
||||
@classmethod
|
||||
def fake_fetch_image(cls, session, instance_id, image, user, project,
|
||||
type):
|
||||
# Stubout wait_for_task
|
||||
def fake_wait_for_task(self, task, id):
|
||||
class FakeEvent:
|
||||
|
||||
def send(self, value):
|
||||
self.rv = value
|
||||
|
||||
def wait(self):
|
||||
return self.rv
|
||||
|
||||
done = FakeEvent()
|
||||
self._poll_task(id, task, done)
|
||||
rv = done.wait()
|
||||
return rv
|
||||
|
||||
def fake_loop(self):
|
||||
pass
|
||||
|
||||
stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
|
||||
fake_wait_for_task)
|
||||
|
||||
stubs.Set(xenapi_conn.XenAPISession, '_stop_loop', fake_loop)
|
||||
|
||||
from nova.virt.xenapi.fake import create_vdi
|
||||
name_label = "instance-%s" % instance_id
|
||||
#TODO: create fake SR record
|
||||
@@ -63,11 +41,6 @@ def stubout_instance_snapshot(stubs):
|
||||
|
||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
||||
|
||||
def fake_parse_xmlrpc_value(val):
|
||||
return val
|
||||
|
||||
stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
|
||||
|
||||
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
|
||||
original_parent_uuid):
|
||||
from nova.virt.xenapi.fake import create_vdi
|
||||
@@ -144,6 +117,16 @@ def stubout_loopingcall_start(stubs):
|
||||
stubs.Set(utils.LoopingCall, 'start', fake_start)
|
||||
|
||||
|
||||
def stubout_loopingcall_delay(stubs):
|
||||
def fake_start(self, interval, now=True):
|
||||
self._running = True
|
||||
eventlet.sleep(1)
|
||||
self.f(*self.args, **self.kw)
|
||||
# This would fail before parallel xenapi calls were fixed
|
||||
assert self._running == False
|
||||
stubs.Set(utils.LoopingCall, 'start', fake_start)
|
||||
|
||||
|
||||
class FakeSessionForVMTests(fake.SessionBase):
|
||||
""" Stubs out a XenAPISession for VM tests """
|
||||
def __init__(self, uri):
|
||||
|
||||
Reference in New Issue
Block a user