Add send timeout functionality, several bug fixes and new unit tests.
This commit is contained in:
parent
0b36539095
commit
3c0f144ece
@ -20,8 +20,10 @@ __all__ = [
|
||||
"Connection"
|
||||
]
|
||||
|
||||
import heapq
|
||||
import logging
|
||||
import time
|
||||
from warnings import warn
|
||||
|
||||
from dingus.link import _SessionProxy
|
||||
|
||||
@ -150,10 +152,13 @@ class Connection(object):
|
||||
self._sender_links = {} # SenderLink
|
||||
self._receiver_links = {} # ReceiverLink
|
||||
|
||||
self._timers = {} # indexed by expiration date
|
||||
self._timers_heap = [] # sorted by expiration date
|
||||
|
||||
self._read_done = False
|
||||
self._write_done = False
|
||||
self._error = None
|
||||
self._next_tick = 0
|
||||
self._next_deadline = 0
|
||||
self._user_context = None
|
||||
self._active = False
|
||||
self._in_process = False
|
||||
@ -199,15 +204,15 @@ class Connection(object):
|
||||
# TODO(kgiusti) - think about server side use of sasl!
|
||||
@property
|
||||
def pn_sasl(self):
|
||||
return self.sasl
|
||||
|
||||
# TODO(kgiusti) - deprecate in favor of pn_sasl
|
||||
@property
|
||||
def sasl(self):
|
||||
if not self._pn_sasl:
|
||||
self._pn_sasl = self._pn_transport.sasl()
|
||||
return self._pn_sasl
|
||||
|
||||
@property
|
||||
def sasl(self):
|
||||
warn(DeprecationWarning("sasl deprecated, use pn_sasl instead"))
|
||||
return self.pn_sasl
|
||||
|
||||
def pn_ssl(self):
|
||||
"""Return the Proton SSL context for this Connection."""
|
||||
return self._pn_ssl
|
||||
@ -278,11 +283,9 @@ class Connection(object):
|
||||
if self._handler:
|
||||
self._handler.connection_failed(self, self._error)
|
||||
# nag application until connection is destroyed
|
||||
self._next_tick = now
|
||||
self._next_deadline = now
|
||||
return now
|
||||
|
||||
self._next_tick = self._pn_transport.tick(now)
|
||||
|
||||
# wait until SASL has authenticated
|
||||
# TODO(kgiusti) Server-side SASL
|
||||
if self._pn_sasl:
|
||||
@ -292,7 +295,7 @@ class Connection(object):
|
||||
str(self._pn_sasl.state))
|
||||
if self._handler:
|
||||
self._handler.sasl_step(self, self._pn_sasl)
|
||||
return
|
||||
return self._next_deadline
|
||||
|
||||
if self._handler:
|
||||
self._handler.sasl_done(self, self._pn_sasl,
|
||||
@ -320,21 +323,28 @@ class Connection(object):
|
||||
session = pn_link.session.context
|
||||
if (pn_link.is_sender and
|
||||
pn_link.name not in self._sender_links):
|
||||
LOG.debug("Remotely initiated Sender needs init")
|
||||
LOG.debug("Remotely initiated Sender %s needs init",
|
||||
pn_link.name)
|
||||
link = session.request_sender(pn_link)
|
||||
self._sender_links[pn_link.name] = link
|
||||
link._process_endpoints()
|
||||
elif (pn_link.is_receiver and
|
||||
pn_link.name not in self._receiver_links):
|
||||
LOG.debug("Remotely initiated Receiver needs init")
|
||||
LOG.debug("Remotely initiated Receiver %s needs init",
|
||||
pn_link.name)
|
||||
link = session.request_receiver(pn_link)
|
||||
self._receiver_links[pn_link.name] = link
|
||||
link._process_endpoints()
|
||||
else:
|
||||
LOG.debug("Ignoring request link - name in use %s",
|
||||
pn_link.name)
|
||||
pn_link = next_pn_link
|
||||
|
||||
# process timer events:
|
||||
timer_deadline = self._expire_timers(now)
|
||||
transport_deadline = self._pn_transport.tick(now)
|
||||
if timer_deadline and transport_deadline:
|
||||
self._next_deadline = min(timer_deadline, transport_deadline)
|
||||
else:
|
||||
self._next_deadline = timer_deadline or transport_deadline
|
||||
|
||||
# TODO(kgiusti) won't scale - use new Engine event API
|
||||
pn_link = self._pn_connection.link_head(self._ACTIVE)
|
||||
while pn_link:
|
||||
@ -379,7 +389,7 @@ class Connection(object):
|
||||
self._handler.connection_remote_closed(self, cond)
|
||||
elif self._pn_connection.state == self._CLOSED:
|
||||
LOG.debug("Connection close complete")
|
||||
self._next_tick = 0
|
||||
self._next_deadline = 0
|
||||
if self._handler:
|
||||
self._handler.connection_closed(self)
|
||||
|
||||
@ -391,16 +401,20 @@ class Connection(object):
|
||||
# link = link.next(0)
|
||||
# print "Link Count %d" % count
|
||||
|
||||
return self._next_tick
|
||||
return self._next_deadline
|
||||
|
||||
finally:
|
||||
self._in_process = False
|
||||
|
||||
@property
|
||||
def next_tick(self):
|
||||
"""Timestamp for next call to tick()
|
||||
"""
|
||||
return self._next_tick
|
||||
warn(DeprecationWarning("next_tick deprecated, use deadline instead"))
|
||||
return self.deadline
|
||||
|
||||
@property
|
||||
def deadline(self):
|
||||
"""Must invoke process() on or before this timestamp."""
|
||||
return self._next_deadline
|
||||
|
||||
@property
|
||||
def needs_input(self):
|
||||
@ -564,7 +578,7 @@ class Connection(object):
|
||||
self._write_done = True
|
||||
self._error = error
|
||||
# report error during the next call to process()
|
||||
self._next_tick = time.time()
|
||||
self._next_deadline = time.time()
|
||||
return self.EOS
|
||||
|
||||
def _configure_ssl(self, properties):
|
||||
@ -611,3 +625,32 @@ class Connection(object):
|
||||
pn_ssl.peer_hostname = hostname
|
||||
LOG.debug("SSL configured for connection %s", self._name)
|
||||
return pn_ssl
|
||||
|
||||
def _add_timer(self, deadline, callback):
|
||||
callbacks = self._timers.get(deadline)
|
||||
if callbacks:
|
||||
callbacks.add(callback)
|
||||
else:
|
||||
callbacks = set()
|
||||
callbacks.add(callback)
|
||||
self._timers[deadline] = callbacks
|
||||
heapq.heappush(self._timers_heap, deadline)
|
||||
if deadline < self._next_deadline:
|
||||
self._next_deadline = deadline
|
||||
|
||||
def _cancel_timer(self, deadline, callback):
|
||||
callbacks = self._timers.get(deadline)
|
||||
if callbacks:
|
||||
callbacks.discard(callback)
|
||||
# next expire will discard empty deadlines
|
||||
|
||||
def _expire_timers(self, now):
|
||||
while (self._timers_heap and
|
||||
self._timers_heap[0] <= now):
|
||||
deadline = heapq.heappop(self._timers_heap)
|
||||
callbacks = self._timers.get(deadline)
|
||||
if callbacks:
|
||||
del self._timers[deadline]
|
||||
for cb in callbacks:
|
||||
cb()
|
||||
return self._timers_heap[0] if self._timers_heap else 0
|
||||
|
@ -310,10 +310,36 @@ class SenderLink(_Link):
|
||||
RELEASED = 3
|
||||
MODIFIED = 4
|
||||
|
||||
class _SendRequest(object):
|
||||
"""Tracks sending a single message."""
|
||||
def __init__(self, link, tag, message, callback, handle, deadline):
|
||||
self.link = link
|
||||
self.tag = tag
|
||||
self.message = message
|
||||
self.callback = callback
|
||||
self.handle = handle
|
||||
self.deadline = deadline
|
||||
self.link._send_requests[self.tag] = self
|
||||
if deadline:
|
||||
self.link._connection._add_timer(deadline, self)
|
||||
|
||||
def __call__(self):
|
||||
"""Invoked by Connection on timeout (now <= deadline)."""
|
||||
self.link._send_expired(self)
|
||||
|
||||
def destroy(self, state, info):
|
||||
"""Invoked on final completion of send."""
|
||||
if self.deadline and state != SenderLink.TIMED_OUT:
|
||||
self.link._connection._cancel_timer(self.deadline, self)
|
||||
if self.tag in self.link._send_requests:
|
||||
del self.link._send_requests[self.tag]
|
||||
if self.callback:
|
||||
self.callback(self.link, self.handle, state, info)
|
||||
|
||||
def __init__(self, connection, pn_link):
|
||||
super(SenderLink, self).__init__(connection, pn_link)
|
||||
self._pending_sends = collections.deque()
|
||||
self._pending_acks = {}
|
||||
self._send_requests = {} # indexed by tag
|
||||
self._pending_sends = collections.deque() # tags in order sent
|
||||
self._next_deadline = 0
|
||||
self._next_tag = 0
|
||||
self._last_credit = 0
|
||||
@ -322,52 +348,47 @@ class SenderLink(_Link):
|
||||
|
||||
def send(self, message, delivery_callback=None,
|
||||
handle=None, deadline=None):
|
||||
|
||||
self._pn_link.delivery("tag-%x" % self._next_tag)
|
||||
tag = "dingus-tag-%s" % self._next_tag
|
||||
self._next_tag += 1
|
||||
send_req = (message, delivery_callback, handle, deadline)
|
||||
send_req = SenderLink._SendRequest(self, tag, message,
|
||||
delivery_callback, handle,
|
||||
deadline)
|
||||
self._pn_link.delivery(tag)
|
||||
LOG.debug("Sending a message, tag=%s", tag)
|
||||
|
||||
# TODO(kgiusti) deadline not supported yet
|
||||
if deadline:
|
||||
raise NotImplementedError("send timeout not supported yet!")
|
||||
if deadline and (self._next_deadline == 0 or
|
||||
self._next_deadline > deadline):
|
||||
self._next_deadline = deadline
|
||||
self._connection._add_timer(deadline, send_req)
|
||||
|
||||
pn_delivery = self._pn_link.current
|
||||
if pn_delivery and pn_delivery.writable:
|
||||
# send oldest pending:
|
||||
if self._pending_sends:
|
||||
self._pending_sends.append(send_req)
|
||||
send_req = self._pending_sends.popleft()
|
||||
self._pending_sends.append(tag)
|
||||
tag = self._pending_sends.popleft()
|
||||
send_req = self._send_requests[tag]
|
||||
LOG.debug("Sending previous pending message, tag=%s", tag)
|
||||
self._write_msg(pn_delivery, send_req)
|
||||
else:
|
||||
self._pending_sends.append(send_req)
|
||||
LOG.debug("Send is pending for credit, tag=%s", tag)
|
||||
self._pending_sends.append(tag)
|
||||
|
||||
return 0
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return len(self._pending_sends) + len(self._pending_acks)
|
||||
return len(self._send_requests)
|
||||
|
||||
@property
|
||||
def credit(self):
|
||||
return self._pn_link.credit
|
||||
|
||||
def close(self, pn_condition=None):
|
||||
while self._pending_sends:
|
||||
i = self._pending_sends.popleft()
|
||||
cb = i[1]
|
||||
if cb:
|
||||
handle = i[2]
|
||||
# TODO(kgiusti) fix - must be async!
|
||||
cb(self, handle, self.ABORTED, {"condition": pn_condition})
|
||||
for i in self._pending_acks.itervalues():
|
||||
cb = i[1]
|
||||
handle = i[2]
|
||||
self._pending_sends.clear()
|
||||
while self._send_requests:
|
||||
key, send_req = self._send_requests.popitem()
|
||||
# TODO(kgiusti) fix - must be async!
|
||||
cb(self, handle, self.ABORTED, {"condition": pn_condition})
|
||||
self._pending_acks.clear()
|
||||
send_req.destroy(self, SenderLink.ABORTED,
|
||||
{"condition": pn_condition})
|
||||
super(SenderLink, self).close(pn_condition)
|
||||
|
||||
def reject(self, pn_condition=None):
|
||||
@ -389,10 +410,10 @@ class SenderLink(_Link):
|
||||
proton.Disposition.MODIFIED: SenderLink.MODIFIED,
|
||||
}
|
||||
|
||||
if pn_delivery.tag in self._pending_acks:
|
||||
LOG.debug("Processing delivery, tag=%s", str(pn_delivery.tag))
|
||||
if pn_delivery.tag in self._send_requests:
|
||||
if pn_delivery.settled: # remote has finished
|
||||
LOG.debug("Remote has settled a sent msg")
|
||||
send_req = self._pending_acks.pop(pn_delivery.tag)
|
||||
state = _disposition_state_map.get(pn_delivery.remote_state,
|
||||
self.UNKNOWN)
|
||||
pn_disposition = pn_delivery.remote
|
||||
@ -406,43 +427,49 @@ class SenderLink(_Link):
|
||||
annotations = pn_disposition.annotations
|
||||
if annotations:
|
||||
info["message-annotations"] = annotations
|
||||
cb = send_req[1]
|
||||
handle = send_req[2]
|
||||
cb(self, handle, state, info)
|
||||
send_req = self._send_requests.pop(pn_delivery.tag)
|
||||
send_req.destroy(state, info)
|
||||
pn_delivery.settle()
|
||||
elif pn_delivery.writable:
|
||||
# we can now send on this delivery
|
||||
LOG.debug("Delivery has become writable")
|
||||
if self._pending_sends:
|
||||
tag = self._pending_sends.popleft()
|
||||
send_req = self._send_requests[tag]
|
||||
self._write_msg(pn_delivery, send_req)
|
||||
else:
|
||||
# not for a sent msg, use it to send the next
|
||||
if pn_delivery.writable and self._pending_sends:
|
||||
send_req = self._pending_sends.popleft()
|
||||
self._write_msg(pn_delivery, send_req)
|
||||
else:
|
||||
# what else is there???
|
||||
pn_delivery.settle()
|
||||
# tag no longer valid, expired or canceled send?
|
||||
LOG.debug("Delivery ignored, tag=%s", str(pn_delivery.tag))
|
||||
pn_delivery.settle()
|
||||
|
||||
def _process_credit(self):
|
||||
# Alert if credit has become available
|
||||
if self._handler and self._state == _Link._STATE_ACTIVE:
|
||||
new_credit = self._pn_link.credit
|
||||
if self._last_credit <= 0 and new_credit > 0:
|
||||
LOG.debug("Credit is available, link=%s", self.name)
|
||||
self._handler.credit_granted(self)
|
||||
self._last_credit = new_credit
|
||||
|
||||
def _write_msg(self, pn_delivery, send_req):
|
||||
# given a writable delivery, send a message
|
||||
# send_req = (msg, cb, handle, deadline)
|
||||
LOG.debug("Sending a pending message")
|
||||
msg = send_req[0]
|
||||
cb = send_req[1]
|
||||
self._pn_link.send(msg.encode())
|
||||
LOG.debug("Sending message to engine, tag=%s", send_req.tag)
|
||||
self._pn_link.send(send_req.message.encode())
|
||||
self._pn_link.advance()
|
||||
if cb: # delivery callback given
|
||||
if pn_delivery.tag in self._pending_acks:
|
||||
raise Exception("Duplicate delivery tag?")
|
||||
self._pending_acks[pn_delivery.tag] = send_req
|
||||
else:
|
||||
# no status required, so settle it now.
|
||||
if not send_req.callback:
|
||||
# no disposition callback, so we can discard the send request and
|
||||
# settle the delivery immediately
|
||||
send_req.destroy(SenderLink.UNKNOWN, {})
|
||||
pn_delivery.settle()
|
||||
|
||||
def _send_expired(self, send_req):
|
||||
LOG.debug("Send request timed-out, tag=%s", send_req.tag)
|
||||
try:
|
||||
self._pending_sends.remove(send_req.tag)
|
||||
except ValueError:
|
||||
pass
|
||||
send_req.destroy(SenderLink.TIMED_OUT, None)
|
||||
|
||||
# state machine actions:
|
||||
|
||||
def _do_active(self):
|
||||
|
@ -77,14 +77,16 @@ def do_connection_io(c1, c2):
|
||||
return o1 > 0 or o2 > 0
|
||||
|
||||
|
||||
def process_connections(c1, c2):
|
||||
def process_connections(c1, c2, timestamp=None):
|
||||
"""Do I/O and protocol processing on two connected Connections until no
|
||||
further data is transferred."""
|
||||
c1.process(time.time())
|
||||
c2.process(time.time())
|
||||
if timestamp is None:
|
||||
timestamp = time.time()
|
||||
c1.process(timestamp)
|
||||
c2.process(timestamp)
|
||||
while do_connection_io(c1, c2):
|
||||
c1.process(time.time())
|
||||
c2.process(time.time())
|
||||
c1.process(timestamp)
|
||||
c2.process(timestamp)
|
||||
|
||||
|
||||
def _validate_callback(connection):
|
||||
@ -195,15 +197,17 @@ class DeliveryCallback(object):
|
||||
def __init__(self):
|
||||
self.link = None
|
||||
self.handle = None
|
||||
self.state = None
|
||||
self.status = None
|
||||
self.info = None
|
||||
self.count = 0
|
||||
|
||||
def __call__(self, link, handle, state, info):
|
||||
def __call__(self, link, handle, status, info):
|
||||
_validate_callback(link.connection)
|
||||
self.link = link
|
||||
self.handle = handle
|
||||
self.state = state
|
||||
self.status = status
|
||||
self.info = info
|
||||
self.count += 1
|
||||
|
||||
|
||||
class ReceiverCallback(dingus.ReceiverEventHandler):
|
||||
|
@ -17,6 +17,7 @@
|
||||
# under the License.
|
||||
#
|
||||
import common
|
||||
# import logging
|
||||
|
||||
from proton import Condition
|
||||
from proton import Message
|
||||
@ -26,11 +27,16 @@ import dingus
|
||||
|
||||
class APITest(common.Test):
|
||||
|
||||
def setup(self):
|
||||
def setup(self, props=None):
|
||||
# logging.getLogger("dingus").setLevel(logging.DEBUG)
|
||||
self.container1 = dingus.Container("test-container-1")
|
||||
self.conn1_handler = common.ConnCallback()
|
||||
if props is None:
|
||||
# props = {"x-trace-protocol": True}
|
||||
props = {"x-trace-protocol": False}
|
||||
self.conn1 = self.container1.create_connection("conn1",
|
||||
self.conn1_handler)
|
||||
self.conn1_handler,
|
||||
props)
|
||||
self.conn1.open()
|
||||
|
||||
self.container2 = dingus.Container("test-container-2")
|
||||
@ -39,8 +45,8 @@ class APITest(common.Test):
|
||||
self.conn2_handler)
|
||||
self.conn2.open()
|
||||
|
||||
def process_connections(self):
|
||||
common.process_connections(self.conn1, self.conn2)
|
||||
def process_connections(self, timestamp=None):
|
||||
common.process_connections(self.conn1, self.conn2, timestamp)
|
||||
|
||||
def teardown(self):
|
||||
if self.conn1:
|
||||
@ -262,7 +268,7 @@ class APITest(common.Test):
|
||||
self.process_connections()
|
||||
assert cb.link == sender
|
||||
assert cb.handle == "my-handle"
|
||||
assert cb.state == dingus.SenderLink.ACCEPTED
|
||||
assert cb.status == dingus.SenderLink.ACCEPTED
|
||||
|
||||
def test_send_released(self):
|
||||
cb = common.DeliveryCallback()
|
||||
@ -279,7 +285,7 @@ class APITest(common.Test):
|
||||
self.process_connections()
|
||||
assert cb.link == sender
|
||||
assert cb.handle == "my-handle"
|
||||
assert cb.state == dingus.SenderLink.RELEASED
|
||||
assert cb.status == dingus.SenderLink.RELEASED
|
||||
|
||||
def test_send_rejected(self):
|
||||
cb = common.DeliveryCallback()
|
||||
@ -298,7 +304,7 @@ class APITest(common.Test):
|
||||
self.process_connections()
|
||||
assert cb.link == sender
|
||||
assert cb.handle == "my-handle"
|
||||
assert cb.state == dingus.SenderLink.REJECTED
|
||||
assert cb.status == dingus.SenderLink.REJECTED
|
||||
r_cond = cb.info.get("condition")
|
||||
assert r_cond and r_cond.name == "itchy"
|
||||
|
||||
@ -318,14 +324,134 @@ class APITest(common.Test):
|
||||
self.process_connections()
|
||||
assert cb.link == sender
|
||||
assert cb.handle == "my-handle"
|
||||
assert cb.state == dingus.SenderLink.MODIFIED
|
||||
assert cb.status == dingus.SenderLink.MODIFIED
|
||||
assert cb.info.get("delivery-failed") is False
|
||||
assert cb.info.get("undeliverable-here") is True
|
||||
info = cb.info.get("message-annotations")
|
||||
assert info and info["dog"] == 1
|
||||
|
||||
def XXXtest_send_timed_out(self):
|
||||
print("TBD")
|
||||
def test_send_expired_no_credit(self):
|
||||
cb = common.DeliveryCallback()
|
||||
sender, receiver = self._setup_receiver_sync()
|
||||
rl_handler = receiver.user_context
|
||||
msg = Message()
|
||||
msg.body = "Hi"
|
||||
sender.send(msg, cb, "my-handle", deadline=10)
|
||||
# receiver.add_capacity(1)
|
||||
self.process_connections(timestamp=9)
|
||||
assert rl_handler.message_received_ct == 0
|
||||
assert sender.pending == 1
|
||||
assert cb.status is None
|
||||
self.process_connections(timestamp=10)
|
||||
assert sender.pending == 0
|
||||
assert cb.status == dingus.SenderLink.TIMED_OUT
|
||||
|
||||
def test_send_expired_late_reply(self):
|
||||
cb = common.DeliveryCallback()
|
||||
sender, receiver = self._setup_sender_sync()
|
||||
rl_handler = receiver.user_context
|
||||
receiver.add_capacity(1)
|
||||
self.process_connections(timestamp=1)
|
||||
msg = Message()
|
||||
msg.body = "Hi"
|
||||
sender.send(msg, cb, "my-handle", deadline=10)
|
||||
self.process_connections(timestamp=9)
|
||||
assert rl_handler.message_received_ct == 1
|
||||
assert sender.pending == 1
|
||||
assert sender.credit == 0
|
||||
assert cb.status is None
|
||||
self.process_connections(timestamp=10)
|
||||
assert rl_handler.message_received_ct == 1
|
||||
assert sender.pending == 0
|
||||
assert cb.status == dingus.SenderLink.TIMED_OUT
|
||||
# late reply:
|
||||
assert cb.count == 1
|
||||
msg2, handle = rl_handler.received_messages[0]
|
||||
receiver.message_accepted(handle)
|
||||
self.process_connections(timestamp=15)
|
||||
assert cb.count == 1
|
||||
|
||||
def test_send_expired_no_reply(self):
|
||||
cb = common.DeliveryCallback()
|
||||
sender, receiver = self._setup_sender_sync()
|
||||
rl_handler = receiver.user_context
|
||||
msg = Message()
|
||||
msg.body = "Hi"
|
||||
sender.send(msg, cb, "my-handle", deadline=10)
|
||||
self.process_connections(timestamp=1)
|
||||
assert rl_handler.message_received_ct == 0
|
||||
assert sender.pending == 1
|
||||
assert sender.credit == 0
|
||||
assert cb.count == 0
|
||||
receiver.add_capacity(1)
|
||||
self.process_connections(timestamp=2)
|
||||
assert rl_handler.message_received_ct == 1
|
||||
assert sender.pending == 1
|
||||
assert sender.credit == 0
|
||||
assert cb.count == 0
|
||||
self.process_connections(timestamp=12)
|
||||
assert sender.pending == 0
|
||||
assert cb.count == 1
|
||||
assert cb.status == dingus.SenderLink.TIMED_OUT
|
||||
|
||||
def test_send_expired_no_callback(self):
|
||||
sender, receiver = self._setup_receiver_sync()
|
||||
rl_handler = receiver.user_context
|
||||
msg = Message()
|
||||
msg.body = "Hi"
|
||||
sender.send(msg, deadline=10)
|
||||
assert sender.pending == 1
|
||||
self.process_connections(timestamp=12)
|
||||
assert rl_handler.message_received_ct == 0
|
||||
assert sender.pending == 0
|
||||
|
||||
def test_send_deadline_idle(self):
|
||||
"""Validate the connection's deadline processing."""
|
||||
|
||||
self.setup(props={"idle-time-out": 99})
|
||||
|
||||
sender1 = self.conn1.create_sender("src1", "tgt1")
|
||||
sender1.open()
|
||||
self.process_connections(timestamp=1)
|
||||
assert self.conn2_handler.receiver_requested_ct == 1
|
||||
args = self.conn2_handler.receiver_requested_args[0]
|
||||
receiver1 = self.conn2.accept_receiver(args.link_handle)
|
||||
receiver1.open()
|
||||
|
||||
sender2 = self.conn1.create_sender("src2", "tgt2")
|
||||
sender2.open()
|
||||
self.process_connections(timestamp=1)
|
||||
assert self.conn2_handler.receiver_requested_ct == 2
|
||||
args = self.conn2_handler.receiver_requested_args[1]
|
||||
receiver2 = self.conn2.accept_receiver(args.link_handle)
|
||||
receiver2.open()
|
||||
|
||||
self.process_connections(timestamp=1)
|
||||
assert self.conn1.deadline == 100.0
|
||||
|
||||
msg = Message()
|
||||
msg.body = "Hi"
|
||||
sender1.send(msg, deadline=11)
|
||||
assert self.conn1.deadline == 11
|
||||
self.process_connections(timestamp=2)
|
||||
assert self.conn1.deadline == 11
|
||||
|
||||
sender2.send(msg, deadline=7)
|
||||
assert self.conn1.deadline == 7
|
||||
self.process_connections(timestamp=7)
|
||||
assert self.conn1.deadline == 11
|
||||
self.process_connections(timestamp=11)
|
||||
assert self.conn1.deadline == 100
|
||||
|
||||
# next send timeout after the idle keepalive:
|
||||
sender1.send(msg, deadline=101)
|
||||
self.process_connections(timestamp=11)
|
||||
assert self.conn1.deadline == 100
|
||||
|
||||
# have remote send idle, next deadline should
|
||||
# be the pending send:
|
||||
self.process_connections(timestamp=self.conn2.deadline)
|
||||
assert self.conn1.deadline == 101
|
||||
|
||||
def XXXtest_send_aborted(self):
|
||||
print("TBD")
|
||||
|
Loading…
x
Reference in New Issue
Block a user