Merge tag '2.0' into debian/unstable
Conflicts: .hgignore README TODO
This commit is contained in:
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
* text=auto
|
||||
*.py text diff=python
|
||||
18
.gitignore
vendored
Normal file
18
.gitignore
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
*\.py[co]
|
||||
*~
|
||||
*\.orig
|
||||
*\#.*
|
||||
*@.*
|
||||
.coverage
|
||||
htmlcov
|
||||
.DS_Store
|
||||
venv
|
||||
pyvenv
|
||||
distribute_setup.py
|
||||
distribute-*.tar.gz
|
||||
build
|
||||
dist
|
||||
*.egg-info
|
||||
.tox
|
||||
.idea/
|
||||
*.iml
|
||||
17
.hgignore
17
.hgignore
@@ -1,17 +0,0 @@
|
||||
.*\.py[co]$
|
||||
.*~$
|
||||
.*\.orig$
|
||||
.*\#.*$
|
||||
.*@.*$
|
||||
\.coverage$
|
||||
htmlcov$
|
||||
\.DS_Store$
|
||||
venv$
|
||||
distribute_setup.py$
|
||||
distribute-\d+.\d+.\d+.tar.gz$
|
||||
build$
|
||||
dist$
|
||||
.*\.egg-info$
|
||||
|
||||
# Directory created by the "tox" command (ex: tox -e py27)
|
||||
.tox
|
||||
17
.travis.yml
Normal file
17
.travis.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
language: python
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
python:
|
||||
- 3.3
|
||||
- 3.4
|
||||
|
||||
install:
|
||||
- pip install asyncio
|
||||
- python setup.py install
|
||||
|
||||
script:
|
||||
- python runtests.py
|
||||
- PYTHONASYNCIODEBUG=1 python runtests.py
|
||||
4
AUTHORS
4
AUTHORS
@@ -9,6 +9,6 @@ The photo of Trollis flower was taken by Imartin6 and distributed under the CC
|
||||
BY-SA 3.0 license. It comes from:
|
||||
http://commons.wikimedia.org/wiki/File:Trollius_altaicus.jpg
|
||||
|
||||
Trollius is a port of the Tulip project on Python 2, see also authors of the
|
||||
Tulip project (AUTHORS file of the Tulip project).
|
||||
Trollius is a port of the asyncio project on Python 2, see also authors of the
|
||||
asyncio project (AUTHORS file).
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
include AUTHORS COPYING TODO tox.ini
|
||||
include AUTHORS COPYING TODO.rst tox.ini
|
||||
include Makefile
|
||||
include overlapped.c pypi.bat
|
||||
include check.py runtests.py
|
||||
include update-tulip*.sh
|
||||
include check.py runtests.py run_aiotest.py release.py
|
||||
include update-asyncio-*.sh
|
||||
include .travis.yml
|
||||
include releaser.conf
|
||||
|
||||
include doc/conf.py doc/make.bat doc/Makefile
|
||||
include doc/*.rst doc/*.jpg
|
||||
|
||||
@@ -25,16 +25,18 @@ Here is a more detailed list of the package contents:
|
||||
* an interface for passing work off to a threadpool, for times when you
|
||||
absolutely, positively have to use a library that makes blocking I/O calls.
|
||||
|
||||
Trollius is a portage of the `Tulip project <http://code.google.com/p/tulip/>`_
|
||||
(``asyncio`` module, `PEP 3156 <http://legacy.python.org/dev/peps/pep-3156/>`_)
|
||||
on Python 2. Trollius works on Python 2.6-3.5. It has been tested on Windows,
|
||||
Linux, Mac OS X, FreeBSD and OpenIndiana.
|
||||
Trollius is a portage of the `asyncio project
|
||||
<https://github.com/python/asyncio>`_ (`PEP 3156
|
||||
<http://legacy.python.org/dev/peps/pep-3156/>`_) on Python 2. Trollius works on
|
||||
Python 2.6-3.5. It has been tested on Windows, Linux, Mac OS X, FreeBSD and
|
||||
OpenIndiana.
|
||||
|
||||
* `Asyncio documentation <http://docs.python.org/dev/library/asyncio.html>`_
|
||||
* `Trollius documentation <http://trollius.readthedocs.org/>`_
|
||||
* `Trollius project in the Python Cheeseshop (PyPI)
|
||||
<https://pypi.python.org/pypi/trollius>`_
|
||||
* `Trollius project at Bitbucket <https://bitbucket.org/enovance/trollius>`_
|
||||
* `Trollius project at Github <https://github.com/haypo/trollius>`_
|
||||
(bug tracker, source code)
|
||||
* Copyright/license: Open source, Apache 2.0. Enjoy!
|
||||
|
||||
See also the `Tulip project <http://code.google.com/p/tulip/>`_.
|
||||
See also the `asyncio project at Github <https://github.com/python/asyncio>`_.
|
||||
@@ -1,5 +1,13 @@
|
||||
Unsorted "TODO" tasks:
|
||||
|
||||
* Drop Python 2.6 and 3.2 support
|
||||
* Drop platform without ssl module?
|
||||
* test_utils.py: remove assertRaisesRegex, assertRegex
|
||||
* streams.py:FIXME: should we support __aiter__ and __anext__ in Trollius?
|
||||
* reuse selectors backport from PyPI
|
||||
* check ssl.SSLxxx in update_xxx.sh
|
||||
* document how to port asyncio to trollius
|
||||
* use six instead of compat
|
||||
* Replace logger with warning in monotonic clock and synchronous executor
|
||||
* Windows: use _overlapped in py33_winapi?
|
||||
* Fix tests failing with PyPy:
|
||||
@@ -1,17 +1,17 @@
|
||||
++++++++++++++++++
|
||||
Trollius and Tulip
|
||||
++++++++++++++++++
|
||||
++++++++++++++++++++
|
||||
Trollius and asyncio
|
||||
++++++++++++++++++++
|
||||
|
||||
Differences between Trollius and Tulip
|
||||
======================================
|
||||
Differences between Trollius and asyncio
|
||||
========================================
|
||||
|
||||
Syntax of coroutines
|
||||
--------------------
|
||||
|
||||
The major difference between Trollius and Tulip is the syntax of coroutines:
|
||||
The major difference between Trollius and asyncio is the syntax of coroutines:
|
||||
|
||||
================== ======================
|
||||
Tulip Trollius
|
||||
asyncio Trollius
|
||||
================== ======================
|
||||
``yield from ...`` ``yield From(...)``
|
||||
``yield from []`` ``yield From(None)``
|
||||
@@ -78,7 +78,8 @@ On Python 3.3 and newer, these symbols are just aliases to exceptions of the
|
||||
|
||||
``trollius.BACKPORT_SSL_ERRORS`` constant:
|
||||
|
||||
* ``True`` if ``ssl.SSLError`` are wrapped to Trollius exceptions,
|
||||
* ``True`` if ``ssl.SSLError`` are wrapped to Trollius exceptions (Python 2
|
||||
older than 2.7.9, or Python 3 older than 3.3),
|
||||
* ``False`` is trollius SSL exceptions are just aliases.
|
||||
|
||||
|
||||
@@ -111,7 +112,8 @@ On Python 3.2 and older, the trollius SSL transport does not have the
|
||||
|
||||
``trollius.BACKPORT_SSL_CONTEXT`` constant:
|
||||
|
||||
* ``True`` if ``trollius.SSLContext`` is the backported class,
|
||||
* ``True`` if ``trollius.SSLContext`` is the backported class (Python 2 older
|
||||
than 2.7.9, or Python 3 older than 3.3),
|
||||
* ``False`` if ``trollius.SSLContext`` is just an alias to ``ssl.SSLContext``.
|
||||
|
||||
|
||||
@@ -127,41 +129,41 @@ Other differences
|
||||
``BaseEventLoop.run_in_executor()`` uses a synchronous executor instead of a
|
||||
pool of threads. It blocks until the function returns. For example, DNS
|
||||
resolutions are blocking in this case.
|
||||
* Trollius has more symbols than Tulip for compatibility with Python older than
|
||||
3.3:
|
||||
* Trollius has more symbols than asyncio for compatibility with Python older
|
||||
than 3.3:
|
||||
|
||||
- ``From``: part of ``yield From(...)`` syntax
|
||||
- ``Return``: part of ``raise Return(...)`` syntax
|
||||
|
||||
|
||||
Write code working on Trollius and Tulip
|
||||
========================================
|
||||
Write code working on Trollius and asyncio
|
||||
==========================================
|
||||
|
||||
Trollius and Tulip are different, especially for coroutines (``yield
|
||||
Trollius and asyncio are different, especially for coroutines (``yield
|
||||
From(...)`` vs ``yield from ...``).
|
||||
|
||||
To use asyncio or Trollius on Python 2 and Python 3, add the following code at
|
||||
the top of your file::
|
||||
|
||||
try:
|
||||
# Use builtin asyncio on Python 3.4+, or Tulip on Python 3.3
|
||||
# Use builtin asyncio on Python 3.4+, or asyncio on Python 3.3
|
||||
import asyncio
|
||||
except ImportError:
|
||||
# Use Trollius on Python <= 3.2
|
||||
import trollius as asyncio
|
||||
|
||||
It is possible to write code working on both projects using only callbacks.
|
||||
This option is used by the following projects which work on Trollius and Tulip:
|
||||
This option is used by the following projects which work on Trollius and asyncio:
|
||||
|
||||
* `AutobahnPython <https://github.com/tavendo/AutobahnPython>`_: WebSocket &
|
||||
WAMP for Python, it works on Trollius (Python 2.6 and 2.7), Tulip (Python
|
||||
WAMP for Python, it works on Trollius (Python 2.6 and 2.7), asyncio (Python
|
||||
3.3) and Python 3.4 (asyncio), and also on Twisted.
|
||||
* `Pulsar <http://pythonhosted.org/pulsar/>`_: Event driven concurrent
|
||||
framework for Python. With pulsar you can write asynchronous servers
|
||||
performing one or several activities in different threads and/or processes.
|
||||
Trollius 0.3 requires Pulsar 0.8.2 or later. Pulsar uses the ``asyncio``
|
||||
module if available, or import ``trollius``.
|
||||
* `Tornado <http://www.tornadoweb.org/>`_ supports Tulip and Trollius since
|
||||
* `Tornado <http://www.tornadoweb.org/>`_ supports asyncio and Trollius since
|
||||
Tornado 3.2: `tornado.platform.asyncio — Bridge between asyncio and Tornado
|
||||
<http://tornado.readthedocs.org/en/latest/asyncio.html>`_. It tries to import
|
||||
asyncio or fallback on importing trollius.
|
||||
@@ -169,10 +171,10 @@ This option is used by the following projects which work on Trollius and Tulip:
|
||||
Another option is to provide functions returning ``Future`` objects, so the
|
||||
caller can decide to use callback using ``fut.add_done_callback(callback)`` or
|
||||
to use coroutines (``yield From(fut)`` for Trollius, or ``yield from fut`` for
|
||||
Tulip). This option is used by the `aiodns <https://github.com/saghul/aiodns>`_
|
||||
asyncio). This option is used by the `aiodns <https://github.com/saghul/aiodns>`_
|
||||
project for example.
|
||||
|
||||
Since Trollius 0.4, it's possible to use Tulip and Trollius coroutines in the
|
||||
Since Trollius 0.4, it's possible to use asyncio and Trollius coroutines in the
|
||||
same process. The only limit is that the event loop must be a Trollius event
|
||||
loop.
|
||||
|
||||
|
||||
@@ -2,6 +2,313 @@
|
||||
Change log
|
||||
++++++++++
|
||||
|
||||
Version 2.0 (2015-07-13)
|
||||
========================
|
||||
|
||||
Summary:
|
||||
|
||||
* SSL support on Windows for proactor event loop with Python 3.5 and newer
|
||||
* Many race conditions were fixed in the proactor event loop
|
||||
* Trollius moved to Github and the fork was recreated on top to asyncio git
|
||||
repository
|
||||
* Many resource leaks (ex: unclosed sockets) were fixed
|
||||
* Optimization of socket connections: avoid: don't call the slow getaddrinfo()
|
||||
function to ensure that the address is already resolved. The check is now
|
||||
only done in debug mode.
|
||||
|
||||
The Trollius project moved from Bitbucket to Github. The project is now a fork
|
||||
of the Git repository of the asyncio project (previously called the "tulip"
|
||||
project), the trollius source code lives in the trollius branch.
|
||||
|
||||
The new Trollius home page is now: https://github.com/haypo/trollius
|
||||
|
||||
The asyncio project moved to: https://github.com/python/asyncio
|
||||
|
||||
Note: the PEP 492 is not supported in trollius yet.
|
||||
|
||||
API changes:
|
||||
|
||||
* Issue #234: Drop JoinableQueue on Python 3.5+
|
||||
* add the asyncio.ensure_future() function, previously called async().
|
||||
The async() function is now deprecated.
|
||||
* New event loop methods: set_task_factory() and get_task_factory().
|
||||
* Python issue #23347: Make BaseSubprocessTransport.wait() private.
|
||||
* Python issue #23347: send_signal(), kill() and terminate() methods of
|
||||
BaseSubprocessTransport now check if the transport was closed and if the
|
||||
process exited.
|
||||
* Python issue #23209, #23225: selectors.BaseSelector.get_key() now raises a
|
||||
RuntimeError if the selector is closed. And selectors.BaseSelector.close()
|
||||
now clears its internal reference to the selector mapping to break a
|
||||
reference cycle. Initial patch written by Martin Richard.
|
||||
* PipeHandle.fileno() of asyncio.windows_utils now raises an exception if the
|
||||
pipe is closed.
|
||||
* Remove Overlapped.WaitNamedPipeAndConnect() of the _overlapped module,
|
||||
it is no more used and it had issues.
|
||||
* Python issue #23537: Remove 2 unused private methods of
|
||||
BaseSubprocessTransport: _make_write_subprocess_pipe_proto,
|
||||
_make_read_subprocess_pipe_proto. Methods only raise NotImplementedError and
|
||||
are never used.
|
||||
* Remove unused SSLProtocol._closing attribute
|
||||
|
||||
New SSL implementation:
|
||||
|
||||
* Python issue #22560: On Python 3.5 and newer, use new SSL implementation
|
||||
based on ssl.MemoryBIO instead of the legacy SSL implementation. Patch
|
||||
written by Antoine Pitrou, based on the work of Geert Jansen.
|
||||
* If available, the new SSL implementation can be used by ProactorEventLoop to
|
||||
support SSL.
|
||||
|
||||
Enhance, fix and cleanup the IocpProactor:
|
||||
|
||||
* Python issue #23293: Rewrite IocpProactor.connect_pipe(). Add
|
||||
_overlapped.ConnectPipe() which tries to connect to the pipe for asynchronous
|
||||
I/O (overlapped): call CreateFile() in a loop until it doesn't fail with
|
||||
ERROR_PIPE_BUSY. Use an increasing delay between 1 ms and 100 ms.
|
||||
* Tulip issue #204: Fix IocpProactor.accept_pipe().
|
||||
Overlapped.ConnectNamedPipe() now returns a boolean: True if the pipe is
|
||||
connected (if ConnectNamedPipe() failed with ERROR_PIPE_CONNECTED), False if
|
||||
the connection is in progress.
|
||||
* Tulip issue #204: Fix IocpProactor.recv(). If ReadFile() fails with
|
||||
ERROR_BROKEN_PIPE, the operation is not pending: don't register the
|
||||
overlapped.
|
||||
* Python issue #23095: Rewrite _WaitHandleFuture.cancel().
|
||||
_WaitHandleFuture.cancel() now waits until the wait is cancelled to clear its
|
||||
reference to the overlapped object. To wait until the cancellation is done,
|
||||
UnregisterWaitEx() is used with an event instead of UnregisterWait().
|
||||
* Python issue #23293: Rewrite IocpProactor.connect_pipe() as a coroutine. Use
|
||||
a coroutine with asyncio.sleep() instead of call_later() to ensure that the
|
||||
scheduled call is cancelled.
|
||||
* Fix ProactorEventLoop.start_serving_pipe(). If a client was connected before
|
||||
the server was closed: drop the client (close the pipe) and exit
|
||||
* Python issue #23293: Cleanup IocpProactor.close(). The special case for
|
||||
connect_pipe() is no more needed. connect_pipe() doesn't use overlapped
|
||||
operations anymore.
|
||||
* IocpProactor.close(): don't cancel futures which are already cancelled
|
||||
* Enhance (fix) BaseProactorEventLoop._loop_self_reading(). Handle correctly
|
||||
CancelledError: just exit. On error, log the exception and exit; don't try to
|
||||
close the event loop (it doesn't work).
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix LifoQueue's and PriorityQueue's put() and task_done().
|
||||
* Issue #222: Fix the @coroutine decorator for functions without __name__
|
||||
attribute like functools.partial(). Enhance also the representation of a
|
||||
CoroWrapper if the coroutine function is a functools.partial().
|
||||
* Python issue #23879: SelectorEventLoop.sock_connect() must not call connect()
|
||||
again if the first call to connect() raises an InterruptedError. When the C
|
||||
function connect() fails with EINTR, the connection runs in background. We
|
||||
have to wait until the socket becomes writable to be notified when the
|
||||
connection succeed or fails.
|
||||
* Fix _SelectorTransport.__repr__() if the event loop is closed
|
||||
* Fix repr(BaseSubprocessTransport) if it didn't start yet
|
||||
* Workaround CPython bug #23353. Don't use yield/yield-from in an except block
|
||||
of a generator. Store the exception and handle it outside the except block.
|
||||
* Fix BaseSelectorEventLoop._accept_connection(). Close the transport on error.
|
||||
In debug mode, log errors using call_exception_handler().
|
||||
* Fix _UnixReadPipeTransport and _UnixWritePipeTransport. Only start reading
|
||||
when connection_made() has been called.
|
||||
* Fix _SelectorSslTransport.close(). Don't call protocol.connection_lost() if
|
||||
protocol.connection_made() was not called yet: if the SSL handshake failed or
|
||||
is still in progress. The close() method can be called if the creation of the
|
||||
connection is cancelled, by a timeout for example.
|
||||
* Fix _SelectorDatagramTransport constructor. Only start reading after
|
||||
connection_made() has been called.
|
||||
* Fix _SelectorSocketTransport constructor. Only start reading when
|
||||
connection_made() has been called: protocol.data_received() must not be
|
||||
called before protocol.connection_made().
|
||||
* Fix SSLProtocol.eof_received(). Wake-up the waiter if it is not done yet.
|
||||
* Close transports on error. Fix create_datagram_endpoint(),
|
||||
connect_read_pipe() and connect_write_pipe(): close the transport if the task
|
||||
is cancelled or on error.
|
||||
* Close the transport on subprocess creation failure
|
||||
* Fix _ProactorBasePipeTransport.close(). Set the _read_fut attribute to None
|
||||
after cancelling it.
|
||||
* Python issue #23243: Fix _UnixWritePipeTransport.close(). Do nothing if the
|
||||
transport is already closed. Before it was not possible to close the
|
||||
transport twice.
|
||||
* Python issue #23242: SubprocessStreamProtocol now closes the subprocess
|
||||
transport at subprocess exit. Clear also its reference to the transport.
|
||||
* Fix BaseEventLoop._create_connection_transport(). Close the transport if the
|
||||
creation of the transport (if the waiter) gets an exception.
|
||||
* Python issue #23197: On SSL handshake failure, check if the waiter is
|
||||
cancelled before setting its exception.
|
||||
* Python issue #23173: Fix SubprocessStreamProtocol.connection_made() to handle
|
||||
cancelled waiter.
|
||||
* Python issue #23173: If an exception is raised during the creation of a
|
||||
subprocess, kill the subprocess (close pipes, kill and read the return
|
||||
status). Log an error in such case.
|
||||
* Python issue #23209: Break some reference cycles in asyncio. Patch written by
|
||||
Martin Richard.
|
||||
|
||||
Optimization:
|
||||
|
||||
* Only call _check_resolved_address() in debug mode. _check_resolved_address()
|
||||
is implemented with getaddrinfo() which is slow. If available, use
|
||||
socket.inet_pton() instead of socket.getaddrinfo(), because it is much faster
|
||||
|
||||
Other changes:
|
||||
|
||||
* Python issue #23456: Add missing @coroutine decorators
|
||||
* Python issue #23475: Fix test_close_kill_running(). Really kill the child
|
||||
process, don't mock completly the Popen.kill() method. This change fix memory
|
||||
leaks and reference leaks.
|
||||
* BaseSubprocessTransport: repr() mentions when the child process is running
|
||||
* BaseSubprocessTransport.close() doesn't try to kill the process if it already
|
||||
finished.
|
||||
* Tulip issue #221: Fix docstring of QueueEmpty and QueueFull
|
||||
* Fix subprocess_attach_write_pipe example. Close the transport, not directly
|
||||
the pipe.
|
||||
* Python issue #23347: send_signal(), terminate(), kill() don't check if the
|
||||
transport was closed. The check broken a Tulip example and this limitation is
|
||||
arbitrary. Check if _proc is None should be enough. Enhance also close(): do
|
||||
nothing when called the second time.
|
||||
* Python issue #23347: Refactor creation of subprocess transports.
|
||||
* Python issue #23243: On Python 3.4 and newer, emit a ResourceWarning when an
|
||||
event loop or a transport is not explicitly closed
|
||||
* tox.ini: enable ResourceWarning warnings
|
||||
* Python issue #23243: test_sslproto: Close explicitly transports
|
||||
* SSL transports now clear their reference to the waiter.
|
||||
* Python issue #23208: Add BaseEventLoop._current_handle. In debug mode,
|
||||
BaseEventLoop._run_once() now sets the BaseEventLoop._current_handle
|
||||
attribute to the handle currently executed.
|
||||
* Replace test_selectors.py with the file of Python 3.5 adapted for asyncio and
|
||||
Python 3.3.
|
||||
* Tulip issue #184: FlowControlMixin constructor now get the event loop if the
|
||||
loop parameter is not set.
|
||||
* _ProactorBasePipeTransport now sets the _sock attribute to None when the
|
||||
transport is closed.
|
||||
* Python issue #23219: cancelling wait_for() now cancels the task
|
||||
* Python issue #23243: Close explicitly event loops and transports in tests
|
||||
* Python issue #23140: Fix cancellation of Process.wait(). Check the state of
|
||||
the waiter future before setting its result.
|
||||
* Python issue #23046: Expose the BaseEventLoop class in the asyncio namespace
|
||||
* Python issue #22926: In debug mode, call_soon(), call_at() and call_later()
|
||||
methods of BaseEventLoop now use the identifier of the current thread to
|
||||
ensure that they are called from the thread running the event loop. Before,
|
||||
the get_event_loop() method was used to check the thread, and no exception
|
||||
was raised when the thread had no event loop. Now the methods always raise an
|
||||
exception in debug mode when called from the wrong thread. It should help to
|
||||
notice misusage of the API.
|
||||
|
||||
2014-12-19: Version 1.0.4
|
||||
=========================
|
||||
|
||||
Changes:
|
||||
|
||||
* Python issue #22922: create_task(), call_at(), call_soon(),
|
||||
call_soon_threadsafe() and run_in_executor() now raise an error if the event
|
||||
loop is closed. Initial patch written by Torsten Landschoff.
|
||||
* Python issue #22921: Don't require OpenSSL SNI to pass hostname to ssl
|
||||
functions. Patch by Donald Stufft.
|
||||
* Add run_aiotest.py: run the aiotest test suite.
|
||||
* tox now also run the aiotest test suite
|
||||
* Python issue #23074: get_event_loop() now raises an exception if the thread
|
||||
has no event loop even if assertions are disabled.
|
||||
|
||||
Bugfixes:
|
||||
|
||||
* Fix a race condition in BaseSubprocessTransport._try_finish(): ensure that
|
||||
connection_made() is called before connection_lost().
|
||||
* Python issue #23009: selectors, make sure EpollSelecrtor.select() works when
|
||||
no file descriptor is registered.
|
||||
* Python issue #22922: Fix ProactorEventLoop.close(). Call
|
||||
_stop_accept_futures() before sestting the _closed attribute, otherwise
|
||||
call_soon() raises an error.
|
||||
* Python issue #22429: Fix EventLoop.run_until_complete(), don't stop the event
|
||||
loop if a BaseException is raised, because the event loop is already stopped.
|
||||
* Initialize more Future and Task attributes in the class definition to avoid
|
||||
attribute errors in destructors.
|
||||
* Python issue #22685: Set the transport of stdout and stderr StreamReader
|
||||
objects in the SubprocessStreamProtocol. It allows to pause the transport to
|
||||
not buffer too much stdout or stderr data.
|
||||
* BaseSelectorEventLoop.close() now closes the self-pipe before calling the
|
||||
parent close() method. If the event loop is already closed, the self-pipe is
|
||||
not unregistered from the selector.
|
||||
|
||||
|
||||
2014-10-20: Version 1.0.3
|
||||
=========================
|
||||
|
||||
Changes:
|
||||
|
||||
* On Python 2 in debug mode, Future.set_exception() now stores the traceback
|
||||
object of the exception in addition to the exception object. When a task
|
||||
waiting for another task and the other task raises an exception, the
|
||||
traceback object is now copied with the exception. Be careful, storing the
|
||||
traceback object may create reference leaks.
|
||||
* Use ssl.create_default_context() if available to create the default SSL
|
||||
context: Python 2.7.9 and newer, or Python 3.4 and newer.
|
||||
* On Python 3.5 and newer, reuse socket.socketpair() in the windows_utils
|
||||
submodule.
|
||||
* On Python 3.4 and newer, use os.set_inheritable().
|
||||
* Enhance protocol representation: add "closed" or "closing" info.
|
||||
* run_forever() now consumes BaseException of the temporary task. If the
|
||||
coroutine raised a BaseException, consume the exception to not log a warning.
|
||||
The caller doesn't have access to the local task.
|
||||
* Python issue 22448: cleanup _run_once(), only iterate once to remove delayed
|
||||
calls that were cancelled.
|
||||
* The destructor of the Return class now shows where the Return object was
|
||||
created.
|
||||
* run_tests.py doesn't catch any exceptions anymore when loading tests, only
|
||||
catch SkipTest.
|
||||
* Fix (SSL) tests for the future Python 2.7.9 which includes a "new" ssl
|
||||
module: module backported from Python 3.5.
|
||||
* BaseEventLoop.add_signal_handler() now raises an exception if the parameter
|
||||
is a coroutine function.
|
||||
* Coroutine functions and objects are now rejected with a TypeError by the
|
||||
following functions: add_signal_handler(), call_at(), call_later(),
|
||||
call_soon(), call_soon_threadsafe(), run_in_executor().
|
||||
|
||||
|
||||
2014-10-02: Version 1.0.2
|
||||
=========================
|
||||
|
||||
This release fixes bugs. It also provides more information in debug mode on
|
||||
error.
|
||||
|
||||
Major changes:
|
||||
|
||||
* Tulip issue #203: Add _FlowControlMixin.get_write_buffer_limits() method.
|
||||
* Python issue #22063: socket operations (socket,recv, sock_sendall,
|
||||
sock_connect, sock_accept) of SelectorEventLoop now raise an exception in
|
||||
debug mode if sockets are in blocking mode.
|
||||
|
||||
Major bugfixes:
|
||||
|
||||
* Tulip issue #205: Fix a race condition in BaseSelectorEventLoop.sock_connect().
|
||||
* Tulip issue #201: Fix a race condition in wait_for(). Don't raise a
|
||||
TimeoutError if we reached the timeout and the future completed in the same
|
||||
iteration of the event loop. A side effect of the bug is that Queue.get()
|
||||
looses items.
|
||||
* PipeServer.close() now cancels the "accept pipe" future which cancels the
|
||||
overlapped operation.
|
||||
|
||||
Other changes:
|
||||
|
||||
* Python issue #22448: Improve cancelled timer callback handles cleanup. Patch
|
||||
by Joshua Moore-Oliva.
|
||||
* Python issue #22369: Change "context manager protocol" to "context management
|
||||
protocol". Patch written by Serhiy Storchaka.
|
||||
* Tulip issue #206: In debug mode, keep the callback in the representation of
|
||||
Handle and TimerHandle after cancel().
|
||||
* Tulip issue #207: Fix test_tasks.test_env_var_debug() to use correct asyncio
|
||||
module.
|
||||
* runtests.py: display a message to mention if tests are run in debug or
|
||||
release mode
|
||||
* Tulip issue #200: Log errors in debug mode instead of simply ignoring them.
|
||||
* Tulip issue #200: _WaitHandleFuture._unregister_wait() now catchs and logs
|
||||
exceptions.
|
||||
* _fatal_error() method of _UnixReadPipeTransport and _UnixWritePipeTransport
|
||||
now log all exceptions in debug mode
|
||||
* Fix debug log in BaseEventLoop.create_connection(): get the socket object
|
||||
from the transport because SSL transport closes the old socket and creates a
|
||||
new SSL socket object.
|
||||
* Remove the _SelectorSslTransport._rawsock attribute: it contained the closed
|
||||
socket (not very useful) and it was not used.
|
||||
* Fix _SelectorTransport.__repr__() if the transport was closed
|
||||
* Use the new os.set_blocking() function of Python 3.5 if available
|
||||
|
||||
|
||||
2014-07-30: Version 1.0.1
|
||||
=========================
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ copyright = u'2014, Victor Stinner'
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = release = '1.0.1'
|
||||
version = release = '2.0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
|
||||
30
doc/dev.rst
30
doc/dev.rst
@@ -24,9 +24,12 @@ Test Dependencies
|
||||
-----------------
|
||||
|
||||
On Python older than 3.3, unit tests require the `mock
|
||||
<https://pypi.python.org/pypi/mock>`_ module. Python 2.6 requires also
|
||||
<https://pypi.python.org/pypi/mock>`_ module. Python 2.6 and 2.7 require also
|
||||
`unittest2 <https://pypi.python.org/pypi/unittest2>`_.
|
||||
|
||||
To run ``run_aiotest.py``, you need the `aiotest
|
||||
<https://pypi.python.org/pypi/aiotest>`_ test suite: ``pip install aiotest``.
|
||||
|
||||
|
||||
Run tests on UNIX
|
||||
-----------------
|
||||
@@ -55,3 +58,28 @@ And coverage as follows::
|
||||
|
||||
C:\Python27\python.exe runtests.py --coverage
|
||||
|
||||
|
||||
CPython bugs
|
||||
============
|
||||
|
||||
The development of asyncio and trollius helped to identify different bugs in CPython:
|
||||
|
||||
* 2.5.0 <= python <= 3.4.2: `sys.exc_info() bug when yield/yield-from is used
|
||||
in an except block in a generator (#23353>)
|
||||
<http://bugs.python.org/issue23353>`_. The fix will be part of Python 3.4.3.
|
||||
_UnixSelectorEventLoop._make_subprocess_transport() and
|
||||
ProactorEventLoop._make_subprocess_transport() work around the bug.
|
||||
* python == 3.4.0: `Segfault in gc with cyclic trash (#21435)
|
||||
<http://bugs.python.org/issue21435>`_.
|
||||
Regression introduced in Python 3.4.0, fixed in Python 3.4.1.
|
||||
Status in Ubuntu the February, 3th 2015: only Ubuntu Trusty (14.04 LTS) is
|
||||
impacted (`bug #1367907: Segfault in gc with cyclic trash
|
||||
<https://bugs.launchpad.net/ubuntu/+source/python3.4/+bug/1367907>`_, see
|
||||
also `update Python3 for trusty #1348954
|
||||
<https://bugs.launchpad.net/ubuntu/+source/python3.4/+bug/1348954>`_)
|
||||
* 3.3.0 <= python <= 3.4.0: `gen.send(tuple) unpacks the tuple instead of
|
||||
passing 1 argument (the tuple) when gen is an object with a send() method,
|
||||
not a classic generator (#21209) <http://bugs.python.org/21209>`_.
|
||||
Regression introduced in Python 3.4.0, fixed in Python 3.4.1.
|
||||
trollius.CoroWrapper.send() works around the issue, the bug is checked at
|
||||
runtime once, when the module is imported.
|
||||
|
||||
@@ -33,7 +33,7 @@ Here is a more detailed list of the package contents:
|
||||
* an interface for passing work off to a threadpool, for times when you
|
||||
absolutely, positively have to use a library that makes blocking I/O calls.
|
||||
|
||||
Trollius is a portage of the `Tulip project <http://code.google.com/p/tulip/>`_
|
||||
Trollius is a portage of the `asyncio project <https://github.com/python/asyncio>`_
|
||||
(``asyncio`` module, `PEP 3156 <http://legacy.python.org/dev/peps/pep-3156/>`_)
|
||||
on Python 2. Trollius works on Python 2.6-3.5. It has been tested on Windows,
|
||||
Linux, Mac OS X, FreeBSD and OpenIndiana.
|
||||
@@ -41,12 +41,16 @@ Linux, Mac OS X, FreeBSD and OpenIndiana.
|
||||
* `Asyncio documentation <http://docs.python.org/dev/library/asyncio.html>`_
|
||||
* `Trollius documentation <http://trollius.readthedocs.org/>`_ (this document)
|
||||
* `Trollius project in the Python Cheeseshop (PyPI)
|
||||
<https://pypi.python.org/pypi/trollius>`_
|
||||
* `Trollius project at Bitbucket <https://bitbucket.org/enovance/trollius>`_
|
||||
<https://pypi.python.org/pypi/trollius>`_ (download wheel packages and
|
||||
tarballs)
|
||||
* `Trollius project at Github <https://github.com/haypo/trollius>`_
|
||||
(bug tracker, source code)
|
||||
* Mailing list: `python-tulip Google Group
|
||||
<https://groups.google.com/forum/?fromgroups#!forum/python-tulip>`_
|
||||
* IRC: ``#asyncio`` channel on the `Freenode network <https://freenode.net/>`_
|
||||
* Copyright/license: Open source, Apache 2.0. Enjoy!
|
||||
|
||||
See also the `Tulip project <http://code.google.com/p/tulip/>`_ (asyncio module
|
||||
for Python 3.3).
|
||||
See also the `asyncio project at Github <https://github.com/python/asyncio>`_.
|
||||
|
||||
|
||||
Table Of Contents
|
||||
|
||||
@@ -39,8 +39,8 @@ Trollius on Windows:
|
||||
|
||||
.. note::
|
||||
|
||||
Only wheel packages for Python 2.7 are currently distributed on the
|
||||
Cheeseshop (PyPI). If you need wheel packages for other Python versions,
|
||||
Only wheel packages for Python 2.7, 3.3 and 3.4 are currently distributed on
|
||||
the Cheeseshop (PyPI). If you need wheel packages for other Python versions,
|
||||
please ask.
|
||||
|
||||
Download source code
|
||||
@@ -49,13 +49,13 @@ Download source code
|
||||
Command to download the development version of the source code (``trollius``
|
||||
branch)::
|
||||
|
||||
hg clone 'https://bitbucket.org/enovance/trollius#trollius'
|
||||
git clone https://github.com/haypo/trollius.git -b trollius
|
||||
|
||||
The actual code lives in the ``trollius`` subdirectory. Tests are in the
|
||||
``tests`` subdirectory.
|
||||
|
||||
See the `trollius project at Bitbucket
|
||||
<https://bitbucket.org/enovance/trollius>`_.
|
||||
See the `trollius project at Github
|
||||
<https://github.com/haypo/trollius>`_.
|
||||
|
||||
The source code of the Trollius project is in the ``trollius`` branch of the
|
||||
Mercurial repository, not in the default branch. The default branch is the
|
||||
|
||||
@@ -51,6 +51,9 @@ coroutines.
|
||||
|
||||
Effect of the debug mode:
|
||||
|
||||
* On Python 2, :meth:`Future.set_exception` stores the traceback, so
|
||||
``loop.run_until_complete()`` raises the exception with the original
|
||||
traceback.
|
||||
* Log coroutines defined but never "yielded"
|
||||
* BaseEventLoop.call_soon() and BaseEventLoop.call_at() methods raise an
|
||||
exception if they are called from the wrong thread.
|
||||
|
||||
@@ -45,6 +45,8 @@ def test_call(*args, **kw):
|
||||
print("%s: exit code %s" % (' '.join(args), exitcode))
|
||||
except asyncio.TimeoutError:
|
||||
print("timeout! (%.1f sec)" % timeout)
|
||||
proc.kill()
|
||||
yield from proc.wait()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(cat(loop))
|
||||
|
||||
@@ -4,7 +4,7 @@ style and uses asyncio.streams.start_server() and
|
||||
asyncio.streams.open_connection().
|
||||
|
||||
Note that running this example starts both the TCP server and client
|
||||
in the same process. It listens on port 1234 on 127.0.0.1, so it will
|
||||
in the same process. It listens on port 12345 on 127.0.0.1, so it will
|
||||
fail if this port is currently in use.
|
||||
"""
|
||||
|
||||
@@ -88,7 +88,7 @@ class MyServer:
|
||||
|
||||
def start(self, loop):
|
||||
"""
|
||||
Starts the TCP server, so that it listens on port 1234.
|
||||
Starts the TCP server, so that it listens on port 12345.
|
||||
|
||||
For each client that connects, the accept_client method gets
|
||||
called. This method runs the loop until the server sockets
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
A variant of simple_tcp_server.py that measures the time it takes to
|
||||
send N messages for a range of N. (This was O(N**2) in a previous
|
||||
version of Tulip.)
|
||||
version of asyncio.)
|
||||
|
||||
Note that running this example starts both the TCP server and client
|
||||
in the same process. It listens on port 1234 on 127.0.0.1, so it will
|
||||
|
||||
158
overlapped.c
158
overlapped.c
@@ -64,12 +64,6 @@ typedef struct {
|
||||
};
|
||||
} OverlappedObject;
|
||||
|
||||
typedef struct {
|
||||
OVERLAPPED *Overlapped;
|
||||
HANDLE IocpHandle;
|
||||
char Address[1];
|
||||
} WaitNamedPipeAndConnectContext;
|
||||
|
||||
/*
|
||||
* Map Windows error codes to subclasses of OSError
|
||||
*/
|
||||
@@ -325,6 +319,29 @@ overlapped_UnregisterWait(PyObject *self, PyObject *args)
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
PyDoc_STRVAR(
|
||||
UnregisterWaitEx_doc,
|
||||
"UnregisterWaitEx(WaitHandle, Event) -> None\n\n"
|
||||
"Unregister wait handle.\n");
|
||||
|
||||
static PyObject *
|
||||
overlapped_UnregisterWaitEx(PyObject *self, PyObject *args)
|
||||
{
|
||||
HANDLE WaitHandle, Event;
|
||||
BOOL ret;
|
||||
|
||||
if (!PyArg_ParseTuple(args, F_HANDLE F_HANDLE, &WaitHandle, &Event))
|
||||
return NULL;
|
||||
|
||||
Py_BEGIN_ALLOW_THREADS
|
||||
ret = UnregisterWaitEx(WaitHandle, Event);
|
||||
Py_END_ALLOW_THREADS
|
||||
|
||||
if (!ret)
|
||||
return SetFromWindowsErr(0);
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Event functions -- currently only used by tests
|
||||
*/
|
||||
@@ -733,7 +750,7 @@ Overlapped_ReadFile(OverlappedObject *self, PyObject *args)
|
||||
switch (err) {
|
||||
case ERROR_BROKEN_PIPE:
|
||||
mark_as_completed(&self->overlapped);
|
||||
Py_RETURN_NONE;
|
||||
return SetFromWindowsErr(err);
|
||||
case ERROR_SUCCESS:
|
||||
case ERROR_MORE_DATA:
|
||||
case ERROR_IO_PENDING:
|
||||
@@ -792,7 +809,7 @@ Overlapped_WSARecv(OverlappedObject *self, PyObject *args)
|
||||
switch (err) {
|
||||
case ERROR_BROKEN_PIPE:
|
||||
mark_as_completed(&self->overlapped);
|
||||
Py_RETURN_NONE;
|
||||
return SetFromWindowsErr(err);
|
||||
case ERROR_SUCCESS:
|
||||
case ERROR_MORE_DATA:
|
||||
case ERROR_IO_PENDING:
|
||||
@@ -1128,109 +1145,59 @@ Overlapped_ConnectNamedPipe(OverlappedObject *self, PyObject *args)
|
||||
switch (err) {
|
||||
case ERROR_PIPE_CONNECTED:
|
||||
mark_as_completed(&self->overlapped);
|
||||
Py_RETURN_NONE;
|
||||
Py_RETURN_TRUE;
|
||||
case ERROR_SUCCESS:
|
||||
case ERROR_IO_PENDING:
|
||||
Py_RETURN_NONE;
|
||||
Py_RETURN_FALSE;
|
||||
default:
|
||||
self->type = TYPE_NOT_STARTED;
|
||||
return SetFromWindowsErr(err);
|
||||
}
|
||||
}
|
||||
|
||||
/* Unfortunately there is no way to do an overlapped connect to a
|
||||
pipe. We instead use WaitNamedPipe() and CreateFile() in a thread
|
||||
pool thread. If a connection succeeds within a time limit (10
|
||||
seconds) then PostQueuedCompletionStatus() is used to return the
|
||||
pipe handle to the completion port. */
|
||||
|
||||
static DWORD WINAPI
|
||||
WaitNamedPipeAndConnectInThread(WaitNamedPipeAndConnectContext *ctx)
|
||||
{
|
||||
HANDLE PipeHandle = INVALID_HANDLE_VALUE;
|
||||
DWORD Start = GetTickCount();
|
||||
DWORD Deadline = Start + 10*1000;
|
||||
DWORD Error = 0;
|
||||
DWORD Timeout;
|
||||
BOOL Success;
|
||||
|
||||
for ( ; ; ) {
|
||||
Timeout = Deadline - GetTickCount();
|
||||
if ((int)Timeout < 0)
|
||||
break;
|
||||
Success = WaitNamedPipe(ctx->Address, Timeout);
|
||||
Error = Success ? ERROR_SUCCESS : GetLastError();
|
||||
switch (Error) {
|
||||
case ERROR_SUCCESS:
|
||||
PipeHandle = CreateFile(ctx->Address,
|
||||
GENERIC_READ | GENERIC_WRITE,
|
||||
0, NULL, OPEN_EXISTING,
|
||||
FILE_FLAG_OVERLAPPED, NULL);
|
||||
if (PipeHandle == INVALID_HANDLE_VALUE)
|
||||
continue;
|
||||
break;
|
||||
case ERROR_SEM_TIMEOUT:
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (!PostQueuedCompletionStatus(ctx->IocpHandle, Error,
|
||||
(ULONG_PTR)PipeHandle, ctx->Overlapped))
|
||||
CloseHandle(PipeHandle);
|
||||
free(ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
PyDoc_STRVAR(
|
||||
Overlapped_WaitNamedPipeAndConnect_doc,
|
||||
"WaitNamedPipeAndConnect(addr, iocp_handle) -> Overlapped[pipe_handle]\n\n"
|
||||
"Start overlapped connection to address, notifying iocp_handle when\n"
|
||||
"finished");
|
||||
ConnectPipe_doc,
|
||||
"ConnectPipe(addr) -> pipe_handle\n\n"
|
||||
"Connect to the pipe for asynchronous I/O (overlapped).");
|
||||
|
||||
static PyObject *
|
||||
Overlapped_WaitNamedPipeAndConnect(OverlappedObject *self, PyObject *args)
|
||||
ConnectPipe(OverlappedObject *self, PyObject *args)
|
||||
{
|
||||
PyObject *AddressObj;
|
||||
HANDLE PipeHandle;
|
||||
#ifdef PYTHON3
|
||||
wchar_t *Address;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "U", &AddressObj))
|
||||
return NULL;
|
||||
|
||||
Address = PyUnicode_AsWideCharString(AddressObj, NULL);
|
||||
if (Address == NULL)
|
||||
return NULL;
|
||||
|
||||
# define CREATE_FILE CreateFileW
|
||||
#else
|
||||
char *Address;
|
||||
Py_ssize_t AddressLength;
|
||||
HANDLE IocpHandle;
|
||||
OVERLAPPED Overlapped;
|
||||
BOOL ret;
|
||||
DWORD err;
|
||||
WaitNamedPipeAndConnectContext *ctx;
|
||||
Py_ssize_t ContextLength;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "s#" F_HANDLE F_POINTER,
|
||||
&Address, &AddressLength, &IocpHandle, &Overlapped))
|
||||
if (!PyArg_ParseTuple(args, "s", &Address))
|
||||
return NULL;
|
||||
|
||||
if (self->type != TYPE_NONE) {
|
||||
PyErr_SetString(PyExc_ValueError, "operation already attempted");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ContextLength = (AddressLength +
|
||||
offsetof(WaitNamedPipeAndConnectContext, Address));
|
||||
ctx = calloc(1, ContextLength + 1);
|
||||
if (ctx == NULL)
|
||||
return PyErr_NoMemory();
|
||||
memcpy(ctx->Address, Address, AddressLength + 1);
|
||||
ctx->Overlapped = &self->overlapped;
|
||||
ctx->IocpHandle = IocpHandle;
|
||||
|
||||
self->type = TYPE_WAIT_NAMED_PIPE_AND_CONNECT;
|
||||
self->handle = NULL;
|
||||
# define CREATE_FILE CreateFileA
|
||||
#endif
|
||||
|
||||
Py_BEGIN_ALLOW_THREADS
|
||||
ret = QueueUserWorkItem(WaitNamedPipeAndConnectInThread, ctx,
|
||||
WT_EXECUTELONGFUNCTION);
|
||||
PipeHandle = CREATE_FILE(Address,
|
||||
GENERIC_READ | GENERIC_WRITE,
|
||||
0, NULL, OPEN_EXISTING,
|
||||
FILE_FLAG_OVERLAPPED, NULL);
|
||||
Py_END_ALLOW_THREADS
|
||||
|
||||
mark_as_completed(&self->overlapped);
|
||||
|
||||
self->error = err = ret ? ERROR_SUCCESS : GetLastError();
|
||||
if (!ret)
|
||||
return SetFromWindowsErr(err);
|
||||
Py_RETURN_NONE;
|
||||
#ifdef PYTHON3
|
||||
PyMem_Free(Address);
|
||||
#endif
|
||||
if (PipeHandle == INVALID_HANDLE_VALUE)
|
||||
return SetFromWindowsErr(0);
|
||||
return Py_BuildValue(F_HANDLE, PipeHandle);
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
@@ -1267,9 +1234,6 @@ static PyMethodDef Overlapped_methods[] = {
|
||||
METH_VARARGS, Overlapped_DisconnectEx_doc},
|
||||
{"ConnectNamedPipe", (PyCFunction) Overlapped_ConnectNamedPipe,
|
||||
METH_VARARGS, Overlapped_ConnectNamedPipe_doc},
|
||||
{"WaitNamedPipeAndConnect",
|
||||
(PyCFunction) Overlapped_WaitNamedPipeAndConnect,
|
||||
METH_VARARGS, Overlapped_WaitNamedPipeAndConnect_doc},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
@@ -1347,12 +1311,17 @@ static PyMethodDef overlapped_functions[] = {
|
||||
METH_VARARGS, RegisterWaitWithQueue_doc},
|
||||
{"UnregisterWait", overlapped_UnregisterWait,
|
||||
METH_VARARGS, UnregisterWait_doc},
|
||||
{"UnregisterWaitEx", overlapped_UnregisterWaitEx,
|
||||
METH_VARARGS, UnregisterWaitEx_doc},
|
||||
{"CreateEvent", overlapped_CreateEvent,
|
||||
METH_VARARGS, CreateEvent_doc},
|
||||
{"SetEvent", overlapped_SetEvent,
|
||||
METH_VARARGS, SetEvent_doc},
|
||||
{"ResetEvent", overlapped_ResetEvent,
|
||||
METH_VARARGS, ResetEvent_doc},
|
||||
{"ConnectPipe",
|
||||
(PyCFunction) ConnectPipe,
|
||||
METH_VARARGS, ConnectPipe_doc},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
@@ -1403,6 +1372,7 @@ _init_overlapped(void)
|
||||
WINAPI_CONSTANT(F_DWORD, ERROR_IO_PENDING);
|
||||
WINAPI_CONSTANT(F_DWORD, ERROR_NETNAME_DELETED);
|
||||
WINAPI_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
|
||||
WINAPI_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
|
||||
WINAPI_CONSTANT(F_DWORD, INFINITE);
|
||||
WINAPI_CONSTANT(F_HANDLE, INVALID_HANDLE_VALUE);
|
||||
WINAPI_CONSTANT(F_HANDLE, NULL);
|
||||
|
||||
7
releaser.conf
Normal file
7
releaser.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
# Configuration file for the tool "releaser"
|
||||
# https://bitbucket.org/haypo/misc/src/tip/bin/releaser.py
|
||||
|
||||
[project]
|
||||
name = trollius
|
||||
debug_env_var = TROLLIUSDEBUG
|
||||
python_versions = 2.7, 3.3, 3.4
|
||||
14
run_aiotest.py
Normal file
14
run_aiotest.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import aiotest.run
|
||||
import sys
|
||||
import trollius
|
||||
if sys.platform == 'win32':
|
||||
from trollius.windows_utils import socketpair
|
||||
else:
|
||||
from socket import socketpair
|
||||
|
||||
config = aiotest.TestConfig()
|
||||
config.asyncio = trollius
|
||||
config.socketpair = socketpair
|
||||
config.new_event_pool_policy = trollius.DefaultEventLoopPolicy
|
||||
config.call_soon_check_closed = True
|
||||
aiotest.run.main(config)
|
||||
70
runtests.py
70
runtests.py
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
"""Run Tulip unittests.
|
||||
"""Run trollius unittests.
|
||||
|
||||
Usage:
|
||||
python3 runtests.py [flags] [pattern] ...
|
||||
@@ -29,7 +29,8 @@ import random
|
||||
import re
|
||||
import sys
|
||||
import textwrap
|
||||
from trollius.compat import PY33
|
||||
PY2 = (sys.version_info < (3,))
|
||||
PY33 = (sys.version_info >= (3, 3))
|
||||
if PY33:
|
||||
import importlib.machinery
|
||||
else:
|
||||
@@ -38,25 +39,31 @@ try:
|
||||
import coverage
|
||||
except ImportError:
|
||||
coverage = None
|
||||
if sys.version_info < (3,):
|
||||
if PY2:
|
||||
sys.exc_clear()
|
||||
|
||||
try:
|
||||
import unittest
|
||||
from unittest.signals import installHandler
|
||||
except ImportError:
|
||||
import unittest2 as unittest
|
||||
from unittest2.signals import installHandler
|
||||
except ImportError:
|
||||
import unittest
|
||||
from unittest.signals import installHandler
|
||||
|
||||
ARGS = optparse.OptionParser(description="Run all unittests.", usage="%prog [options] [pattern] [pattern2 ...]")
|
||||
ARGS.add_option(
|
||||
'-v', '--verbose', action="store_true", dest='verbose',
|
||||
'-v', '--verbose', type=int, dest='verbose',
|
||||
default=0, help='verbose')
|
||||
ARGS.add_option(
|
||||
'-x', action="store_true", dest='exclude', help='exclude tests')
|
||||
ARGS.add_option(
|
||||
'-f', '--failfast', action="store_true", default=False,
|
||||
dest='failfast', help='Stop on first fail or error')
|
||||
ARGS.add_option(
|
||||
'--no-ssl', action="store_true", default=False,
|
||||
help='Disable the SSL module')
|
||||
ARGS.add_option(
|
||||
'--no-concurrent', action="store_true", default=False,
|
||||
help='Disable the concurrent module')
|
||||
ARGS.add_option(
|
||||
'-c', '--catch', action="store_true", default=False,
|
||||
dest='catchbreak', help='Catch control-C and display results')
|
||||
@@ -92,6 +99,8 @@ else:
|
||||
|
||||
|
||||
def load_modules(basedir, suffix='.py'):
|
||||
import trollius.test_utils
|
||||
|
||||
def list_dir(prefix, dir):
|
||||
files = []
|
||||
|
||||
@@ -119,7 +128,7 @@ def load_modules(basedir, suffix='.py'):
|
||||
for modname, sourcefile in list_dir('', basedir):
|
||||
if modname == 'runtests':
|
||||
continue
|
||||
if modname == 'test_asyncio' and sys.version_info <= (3, 3):
|
||||
if modname == 'test_asyncio' and not PY33:
|
||||
print("Skipping '{0}': need at least Python 3.3".format(modname),
|
||||
file=sys.stderr)
|
||||
continue
|
||||
@@ -128,7 +137,7 @@ def load_modules(basedir, suffix='.py'):
|
||||
mods.append((mod, sourcefile))
|
||||
except SyntaxError:
|
||||
raise
|
||||
except Exception as err:
|
||||
except trollius.test_utils.SkipTest as err:
|
||||
print("Skipping '{0}': {1}".format(modname, err), file=sys.stderr)
|
||||
|
||||
return mods
|
||||
@@ -138,7 +147,7 @@ def randomize_tests(tests, seed):
|
||||
if seed is None:
|
||||
seed = random.randrange(10000000)
|
||||
random.seed(seed)
|
||||
print("Using random seed", seed)
|
||||
print("Randomize test execution order (seed: %s)" % seed)
|
||||
random.shuffle(tests._tests)
|
||||
|
||||
|
||||
@@ -222,9 +231,26 @@ class TestRunner(unittest.TextTestRunner):
|
||||
return result
|
||||
|
||||
|
||||
def _runtests(args, tests):
|
||||
v = 0 if args.quiet else args.verbose + 1
|
||||
runner_factory = TestRunner if args.findleaks else unittest.TextTestRunner
|
||||
if args.randomize:
|
||||
randomize_tests(tests, args.seed)
|
||||
runner = runner_factory(verbosity=v, failfast=args.failfast)
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
return runner.run(tests)
|
||||
|
||||
|
||||
def runtests():
|
||||
args, pattern = ARGS.parse_args()
|
||||
|
||||
if args.no_ssl:
|
||||
sys.modules['ssl'] = None
|
||||
|
||||
if args.no_concurrent:
|
||||
sys.modules['concurrent'] = None
|
||||
|
||||
if args.coverage and coverage is None:
|
||||
URL = "bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py"
|
||||
print(textwrap.dedent("""
|
||||
@@ -255,9 +281,6 @@ def runtests():
|
||||
|
||||
v = 0 if args.quiet else args.verbose + 1
|
||||
failfast = args.failfast
|
||||
catchbreak = args.catchbreak
|
||||
findleaks = args.findleaks
|
||||
runner_factory = TestRunner if findleaks else unittest.TextTestRunner
|
||||
|
||||
if args.coverage:
|
||||
cov = coverage.coverage(branch=True,
|
||||
@@ -265,7 +288,6 @@ def runtests():
|
||||
)
|
||||
cov.start()
|
||||
|
||||
logger = logging.getLogger()
|
||||
if v == 0:
|
||||
level = logging.CRITICAL
|
||||
elif v == 1:
|
||||
@@ -279,24 +301,22 @@ def runtests():
|
||||
logging.basicConfig(level=level)
|
||||
|
||||
finder = TestsFinder(args.testsdir, includes, excludes)
|
||||
if catchbreak:
|
||||
if args.catchbreak:
|
||||
installHandler()
|
||||
import trollius.coroutines
|
||||
if trollius.coroutines._DEBUG:
|
||||
print("Run tests in debug mode")
|
||||
else:
|
||||
print("Run tests in release mode")
|
||||
try:
|
||||
tests = finder.load_tests()
|
||||
if args.forever:
|
||||
while True:
|
||||
tests = finder.load_tests()
|
||||
if args.randomize:
|
||||
randomize_tests(tests, args.seed)
|
||||
result = runner_factory(verbosity=v,
|
||||
failfast=failfast).run(tests)
|
||||
result = _runtests(args, tests)
|
||||
if not result.wasSuccessful():
|
||||
sys.exit(1)
|
||||
else:
|
||||
tests = finder.load_tests()
|
||||
if args.randomize:
|
||||
randomize_tests(tests, args.seed)
|
||||
result = runner_factory(verbosity=v,
|
||||
failfast=failfast).run(tests)
|
||||
result = _runtests(args, tests)
|
||||
sys.exit(not result.wasSuccessful())
|
||||
finally:
|
||||
if args.coverage:
|
||||
|
||||
29
setup.py
29
setup.py
@@ -1,18 +1,21 @@
|
||||
# Release procedure:
|
||||
# - fill Tulip changelog
|
||||
# - run maybe update_tulip.sh
|
||||
# - run unit tests with concurrent.futures
|
||||
# - run unit tests without concurrent.futures
|
||||
# - run unit tests without ssl: set sys.modules['ssl']=None at startup
|
||||
# - fill trollius changelog
|
||||
# - run maybe ./update-asyncio-step1.sh
|
||||
# - run all tests: tox
|
||||
# - test examples
|
||||
# - check that "python setup.py sdist" contains all files tracked by
|
||||
# the SCM (Mercurial): update MANIFEST.in if needed
|
||||
# - run test on Windows: releaser.py test
|
||||
# - update version in setup.py (version) and doc/conf.py (version, release)
|
||||
# - set release date in doc/changelog.rst
|
||||
# - hg ci
|
||||
# - hg tag trollius-VERSION
|
||||
# - hg push
|
||||
# - python setup.py register sdist bdist_wheel upload
|
||||
# - git commit
|
||||
# - git tag trollius-VERSION
|
||||
# - git push --tags
|
||||
# - git push
|
||||
# - On Linux: python setup.py register sdist bdist_wheel upload
|
||||
# - On Windows: python releaser.py release
|
||||
# - increment version in setup.py (version) and doc/conf.py (version, release)
|
||||
# - hg ci && hg push
|
||||
# - gt commit && git push
|
||||
|
||||
import os
|
||||
import sys
|
||||
@@ -25,7 +28,7 @@ except ImportError:
|
||||
# We won't be able to build the Wheel file on Windows.
|
||||
from distutils.core import setup, Extension
|
||||
|
||||
with open("README") as fp:
|
||||
with open("README.rst") as fp:
|
||||
long_description = fp.read()
|
||||
|
||||
extensions = []
|
||||
@@ -43,14 +46,14 @@ if sys.version_info < (3,):
|
||||
|
||||
install_options = {
|
||||
"name": "trollius",
|
||||
"version": "1.0.1",
|
||||
"version": "2.0",
|
||||
"license": "Apache License 2.0",
|
||||
"author": 'Victor Stinner',
|
||||
"author_email": 'victor.stinner@gmail.com',
|
||||
|
||||
"description": "Port of the Tulip project (asyncio module, PEP 3156) on Python 2",
|
||||
"long_description": long_description,
|
||||
"url": "https://bitbucket.org/enovance/trollius/",
|
||||
"url": "https://github.com/haypo/trollius",
|
||||
|
||||
"classifiers": [
|
||||
"Programming Language :: Python",
|
||||
|
||||
@@ -2,7 +2,7 @@ from trollius import test_utils
|
||||
from trollius import From, Return
|
||||
import trollius
|
||||
import trollius.coroutines
|
||||
import unittest
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
try:
|
||||
import asyncio
|
||||
@@ -87,7 +87,7 @@ class AsyncioTests(test_utils.TestCase):
|
||||
fut = asyncio.Future()
|
||||
self.assertIs(fut._loop, self.loop)
|
||||
|
||||
fut2 = trollius.async(fut)
|
||||
fut2 = trollius.ensure_future(fut)
|
||||
self.assertIs(fut2, fut)
|
||||
self.assertIs(fut._loop, self.loop)
|
||||
|
||||
|
||||
@@ -2,10 +2,11 @@
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import math
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import Return, From
|
||||
@@ -14,9 +15,9 @@ from trollius import constants
|
||||
from trollius import test_utils
|
||||
from trollius.py33_exceptions import BlockingIOError
|
||||
from trollius.test_utils import mock
|
||||
from trollius import test_support as support # IPV6_ENABLED, gc_collect
|
||||
from trollius.time_monotonic import time_monotonic
|
||||
from trollius.test_support import assert_python_ok
|
||||
from trollius.test_utils import unittest
|
||||
from trollius import test_support as support
|
||||
|
||||
|
||||
MOCK_ANY = mock.ANY
|
||||
@@ -53,7 +54,9 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
NotImplementedError,
|
||||
self.loop._make_write_pipe_transport, m, m)
|
||||
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
|
||||
self.assertRaises(NotImplementedError, next, iter(gen))
|
||||
# self.assertRaises(NotImplementedError, next, iter(gen))
|
||||
with self.assertRaises(NotImplementedError):
|
||||
gen.send(None)
|
||||
|
||||
def test_close(self):
|
||||
self.assertFalse(self.loop.is_closed())
|
||||
@@ -76,13 +79,6 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
self.assertFalse(self.loop._scheduled)
|
||||
self.assertIn(h, self.loop._ready)
|
||||
|
||||
def test__add_callback_timer(self):
|
||||
h = asyncio.TimerHandle(time_monotonic()+10, lambda: False, (),
|
||||
self.loop)
|
||||
|
||||
self.loop._add_callback(h)
|
||||
self.assertIn(h, self.loop._scheduled)
|
||||
|
||||
def test__add_callback_cancelled_handle(self):
|
||||
h = asyncio.Handle(lambda: False, (), self.loop)
|
||||
h.cancel()
|
||||
@@ -153,28 +149,71 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
# are really slow
|
||||
self.assertLessEqual(dt, 0.9, dt)
|
||||
|
||||
def test_assert_is_current_event_loop(self):
|
||||
def check_thread(self, loop, debug):
|
||||
def cb():
|
||||
pass
|
||||
|
||||
other_loop = base_events.BaseEventLoop()
|
||||
other_loop._selector = mock.Mock()
|
||||
asyncio.set_event_loop(other_loop)
|
||||
loop.set_debug(debug)
|
||||
if debug:
|
||||
msg = ("Non-thread-safe operation invoked on an event loop other "
|
||||
"than the current one")
|
||||
with self.assertRaisesRegex(RuntimeError, msg):
|
||||
loop.call_soon(cb)
|
||||
with self.assertRaisesRegex(RuntimeError, msg):
|
||||
loop.call_later(60, cb)
|
||||
with self.assertRaisesRegex(RuntimeError, msg):
|
||||
loop.call_at(loop.time() + 60, cb)
|
||||
else:
|
||||
loop.call_soon(cb)
|
||||
loop.call_later(60, cb)
|
||||
loop.call_at(loop.time() + 60, cb)
|
||||
|
||||
# raise RuntimeError if the event loop is different in debug mode
|
||||
self.loop.set_debug(True)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.call_soon(cb)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.call_later(60, cb)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.call_at(self.loop.time() + 60, cb)
|
||||
def test_check_thread(self):
|
||||
def check_in_thread(loop, event, debug, create_loop, fut):
|
||||
# wait until the event loop is running
|
||||
event.wait()
|
||||
|
||||
try:
|
||||
if create_loop:
|
||||
loop2 = base_events.BaseEventLoop()
|
||||
try:
|
||||
asyncio.set_event_loop(loop2)
|
||||
self.check_thread(loop, debug)
|
||||
finally:
|
||||
asyncio.set_event_loop(None)
|
||||
loop2.close()
|
||||
else:
|
||||
self.check_thread(loop, debug)
|
||||
except Exception as exc:
|
||||
loop.call_soon_threadsafe(fut.set_exception, exc)
|
||||
else:
|
||||
loop.call_soon_threadsafe(fut.set_result, None)
|
||||
|
||||
def test_thread(loop, debug, create_loop=False):
|
||||
event = threading.Event()
|
||||
fut = asyncio.Future(loop=loop)
|
||||
loop.call_soon(event.set)
|
||||
args = (loop, event, debug, create_loop, fut)
|
||||
thread = threading.Thread(target=check_in_thread, args=args)
|
||||
thread.start()
|
||||
loop.run_until_complete(fut)
|
||||
thread.join()
|
||||
|
||||
self.loop._process_events = mock.Mock()
|
||||
self.loop._write_to_self = mock.Mock()
|
||||
|
||||
# raise RuntimeError if the thread has no event loop
|
||||
test_thread(self.loop, True)
|
||||
|
||||
# check disabled if debug mode is disabled
|
||||
self.loop.set_debug(False)
|
||||
self.loop.call_soon(cb)
|
||||
self.loop.call_later(60, cb)
|
||||
self.loop.call_at(self.loop.time() + 60, cb)
|
||||
test_thread(self.loop, False)
|
||||
|
||||
# raise RuntimeError if the event loop of the thread is not the called
|
||||
# event loop
|
||||
test_thread(self.loop, True, create_loop=True)
|
||||
|
||||
# check disabled if debug mode is disabled
|
||||
test_thread(self.loop, False, create_loop=True)
|
||||
|
||||
def test_run_once_in_executor_handle(self):
|
||||
def cb():
|
||||
@@ -246,7 +285,8 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
@mock.patch('trollius.base_events.logger')
|
||||
def test__run_once_logging(self, m_logger):
|
||||
def slow_select(timeout):
|
||||
# Sleep a bit longer than a second to avoid timer resolution issues.
|
||||
# Sleep a bit longer than a second to avoid timer resolution
|
||||
# issues.
|
||||
time.sleep(1.1)
|
||||
return []
|
||||
|
||||
@@ -284,6 +324,82 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
self.assertTrue(non_local['processed'])
|
||||
self.assertEqual([non_local['handle']], list(self.loop._ready))
|
||||
|
||||
def test__run_once_cancelled_event_cleanup(self):
|
||||
self.loop._process_events = mock.Mock()
|
||||
|
||||
self.assertTrue(
|
||||
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
|
||||
|
||||
def cb():
|
||||
pass
|
||||
|
||||
# Set up one "blocking" event that will not be cancelled to
|
||||
# ensure later cancelled events do not make it to the head
|
||||
# of the queue and get cleaned.
|
||||
not_cancelled_count = 1
|
||||
self.loop.call_later(3000, cb)
|
||||
|
||||
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
|
||||
# cancelled handles, ensure they aren't removed
|
||||
|
||||
cancelled_count = 2
|
||||
for x in range(2):
|
||||
h = self.loop.call_later(3600, cb)
|
||||
h.cancel()
|
||||
|
||||
# Add some cancelled events that will be at head and removed
|
||||
cancelled_count += 2
|
||||
for x in range(2):
|
||||
h = self.loop.call_later(100, cb)
|
||||
h.cancel()
|
||||
|
||||
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
|
||||
self.assertLessEqual(cancelled_count + not_cancelled_count,
|
||||
base_events._MIN_SCHEDULED_TIMER_HANDLES)
|
||||
|
||||
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
|
||||
|
||||
self.loop._run_once()
|
||||
|
||||
cancelled_count -= 2
|
||||
|
||||
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
|
||||
|
||||
self.assertEqual(len(self.loop._scheduled),
|
||||
cancelled_count + not_cancelled_count)
|
||||
|
||||
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
|
||||
# so that deletion of cancelled events will occur on next _run_once
|
||||
add_cancel_count = int(math.ceil(
|
||||
base_events._MIN_SCHEDULED_TIMER_HANDLES *
|
||||
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
|
||||
|
||||
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
|
||||
add_cancel_count, 0)
|
||||
|
||||
# Add some events that will not be cancelled
|
||||
not_cancelled_count += add_not_cancel_count
|
||||
for x in range(add_not_cancel_count):
|
||||
self.loop.call_later(3600, cb)
|
||||
|
||||
# Add enough cancelled events
|
||||
cancelled_count += add_cancel_count
|
||||
for x in range(add_cancel_count):
|
||||
h = self.loop.call_later(3600, cb)
|
||||
h.cancel()
|
||||
|
||||
# Ensure all handles are still scheduled
|
||||
self.assertEqual(len(self.loop._scheduled),
|
||||
cancelled_count + not_cancelled_count)
|
||||
|
||||
self.loop._run_once()
|
||||
|
||||
# Ensure cancelled events were removed
|
||||
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
|
||||
|
||||
# Ensure only uncancelled events remain scheduled
|
||||
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
|
||||
|
||||
def test_run_until_complete_type_error(self):
|
||||
self.assertRaises(TypeError,
|
||||
self.loop.run_until_complete, 'blah')
|
||||
@@ -291,6 +407,7 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
def test_run_until_complete_loop(self):
|
||||
task = asyncio.Future(loop=self.loop)
|
||||
other_loop = self.new_test_loop()
|
||||
self.addCleanup(other_loop.close)
|
||||
self.assertRaises(ValueError,
|
||||
other_loop.run_until_complete, task)
|
||||
|
||||
@@ -302,7 +419,7 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
self.loop.run_until_complete, self.loop.subprocess_exec,
|
||||
asyncio.SubprocessProtocol)
|
||||
|
||||
# exepected multiple arguments, not a list
|
||||
# expected multiple arguments, not a list
|
||||
self.assertRaises(TypeError,
|
||||
self.loop.run_until_complete, self.loop.subprocess_exec,
|
||||
asyncio.SubprocessProtocol, args)
|
||||
@@ -382,7 +499,7 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
|
||||
# Test Future.__del__
|
||||
with mock.patch('trollius.base_events.logger') as log:
|
||||
fut = asyncio.async(zero_error_coro(), loop=self.loop)
|
||||
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
|
||||
fut.add_done_callback(lambda *args: self.loop.stop())
|
||||
self.loop.run_forever()
|
||||
fut = None # Trigger Future.__del__ or futures._TracebackLogger
|
||||
@@ -473,6 +590,7 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
raise ValueError('spam')
|
||||
|
||||
loop = Loop()
|
||||
self.addCleanup(loop.close)
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
def run_loop():
|
||||
@@ -506,18 +624,54 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
self.assertIs(type(context['context']['exception']),
|
||||
ZeroDivisionError)
|
||||
|
||||
def test_set_task_factory_invalid(self):
|
||||
with self.assertRaisesRegex(
|
||||
TypeError, 'task factory must be a callable or None'):
|
||||
|
||||
self.loop.set_task_factory(1)
|
||||
|
||||
self.assertIsNone(self.loop.get_task_factory())
|
||||
|
||||
def test_set_task_factory(self):
|
||||
self.loop._process_events = mock.Mock()
|
||||
|
||||
class MyTask(asyncio.Task):
|
||||
pass
|
||||
|
||||
@asyncio.coroutine
|
||||
def coro():
|
||||
pass
|
||||
|
||||
factory = lambda loop, coro: MyTask(coro, loop=loop)
|
||||
|
||||
self.assertIsNone(self.loop.get_task_factory())
|
||||
self.loop.set_task_factory(factory)
|
||||
self.assertIs(self.loop.get_task_factory(), factory)
|
||||
|
||||
task = self.loop.create_task(coro())
|
||||
self.assertTrue(isinstance(task, MyTask))
|
||||
self.loop.run_until_complete(task)
|
||||
|
||||
self.loop.set_task_factory(None)
|
||||
self.assertIsNone(self.loop.get_task_factory())
|
||||
|
||||
task = self.loop.create_task(coro())
|
||||
self.assertTrue(isinstance(task, asyncio.Task))
|
||||
self.assertFalse(isinstance(task, MyTask))
|
||||
self.loop.run_until_complete(task)
|
||||
|
||||
def test_env_var_debug(self):
|
||||
code = '\n'.join((
|
||||
'import trollius',
|
||||
'loop = trollius.get_event_loop()',
|
||||
'print(loop.get_debug())'))
|
||||
|
||||
sts, stdout, stderr = assert_python_ok('-c', code,
|
||||
TROLLIUSDEBUG='')
|
||||
sts, stdout, stderr = support.assert_python_ok('-c', code,
|
||||
TROLLIUSDEBUG='')
|
||||
self.assertEqual(stdout.rstrip(), b'False')
|
||||
|
||||
sts, stdout, stderr = assert_python_ok('-c', code,
|
||||
TROLLIUSDEBUG='1')
|
||||
sts, stdout, stderr = support.assert_python_ok('-c', code,
|
||||
TROLLIUSDEBUG='1')
|
||||
self.assertEqual(stdout.rstrip(), b'True')
|
||||
|
||||
def test_create_task(self):
|
||||
@@ -536,13 +690,58 @@ class BaseEventLoopTests(test_utils.TestCase):
|
||||
self.set_event_loop(loop)
|
||||
|
||||
coro = test()
|
||||
task = asyncio.async(coro, loop=loop)
|
||||
task = asyncio.ensure_future(coro, loop=loop)
|
||||
self.assertIsInstance(task, MyTask)
|
||||
|
||||
# make warnings quiet
|
||||
task._log_destroy_pending = False
|
||||
coro.close()
|
||||
|
||||
def test_run_forever_keyboard_interrupt(self):
|
||||
# Python issue #22601: ensure that the temporary task created by
|
||||
# run_forever() consumes the KeyboardInterrupt and so don't log
|
||||
# a warning
|
||||
@asyncio.coroutine
|
||||
def raise_keyboard_interrupt():
|
||||
raise KeyboardInterrupt
|
||||
|
||||
self.loop._process_events = mock.Mock()
|
||||
self.loop.call_exception_handler = mock.Mock()
|
||||
|
||||
try:
|
||||
self.loop.run_until_complete(raise_keyboard_interrupt())
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
self.loop.close()
|
||||
support.gc_collect()
|
||||
|
||||
self.assertFalse(self.loop.call_exception_handler.called)
|
||||
|
||||
def test_run_until_complete_baseexception(self):
|
||||
# Python issue #22429: run_until_complete() must not schedule a pending
|
||||
# call to stop() if the future raised a BaseException
|
||||
@asyncio.coroutine
|
||||
def raise_keyboard_interrupt():
|
||||
raise KeyboardInterrupt
|
||||
|
||||
self.loop._process_events = mock.Mock()
|
||||
|
||||
try:
|
||||
self.loop.run_until_complete(raise_keyboard_interrupt())
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def func():
|
||||
self.loop.stop()
|
||||
func.called = True
|
||||
func.called = False
|
||||
try:
|
||||
self.loop.call_soon(func)
|
||||
self.loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
self.assertTrue(func.called)
|
||||
|
||||
|
||||
class MyProto(asyncio.Protocol):
|
||||
done = None
|
||||
@@ -793,6 +992,9 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
|
||||
class _SelectorTransportMock:
|
||||
_sock = None
|
||||
|
||||
def get_extra_info(self, key):
|
||||
return mock.Mock()
|
||||
|
||||
def close(self):
|
||||
self._sock.close()
|
||||
|
||||
@@ -957,8 +1159,8 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
|
||||
self.assertRaises(
|
||||
socket.error, self.loop.run_until_complete, coro)
|
||||
|
||||
@test_utils.skipUnless(support.IPV6_ENABLED,
|
||||
'IPv6 not supported or enabled')
|
||||
@unittest.skipUnless(support.IPV6_ENABLED,
|
||||
'IPv6 not supported or enabled')
|
||||
def test_create_datagram_endpoint_no_matching_family(self):
|
||||
coro = self.loop.create_datagram_endpoint(
|
||||
asyncio.DatagramProtocol,
|
||||
@@ -1026,19 +1228,23 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
|
||||
|
||||
def test_call_coroutine(self):
|
||||
@asyncio.coroutine
|
||||
def coroutine_function():
|
||||
def simple_coroutine():
|
||||
pass
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.call_soon(coroutine_function)
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.call_soon_threadsafe(coroutine_function)
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.call_later(60, coroutine_function)
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.call_at(self.loop.time() + 60, coroutine_function)
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.run_in_executor(None, coroutine_function)
|
||||
coro_func = simple_coroutine
|
||||
coro_obj = coro_func()
|
||||
self.addCleanup(coro_obj.close)
|
||||
for func in (coro_func, coro_obj):
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.call_soon(func)
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.call_soon_threadsafe(func)
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.call_later(60, func)
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.call_at(self.loop.time() + 60, func)
|
||||
with self.assertRaises(TypeError):
|
||||
self.loop.run_in_executor(None, func)
|
||||
|
||||
@mock.patch('trollius.base_events.logger')
|
||||
def test_log_slow_callbacks(self, m_logger):
|
||||
@@ -1060,15 +1266,17 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
|
||||
fmt = m_logger.warning.call_args[0][0]
|
||||
args = m_logger.warning.call_args[0][1:]
|
||||
self.assertRegex(fmt % tuple(args),
|
||||
"^Executing <Handle.*stop_loop_cb.*> took .* seconds$")
|
||||
"^Executing <Handle.*stop_loop_cb.*> "
|
||||
"took .* seconds$")
|
||||
|
||||
# slow task
|
||||
asyncio.async(stop_loop_coro(self.loop), loop=self.loop)
|
||||
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
|
||||
self.loop.run_forever()
|
||||
fmt = m_logger.warning.call_args[0][0]
|
||||
args = m_logger.warning.call_args[0][1:]
|
||||
self.assertRegex(fmt % tuple(args),
|
||||
"^Executing <Task.*stop_loop_coro.*> took .* seconds$")
|
||||
"^Executing <Task.*stop_loop_coro.*> "
|
||||
"took .* seconds$")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -13,16 +13,12 @@ import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import errno
|
||||
import unittest
|
||||
import weakref
|
||||
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
ssl = None
|
||||
HAS_SNI = False
|
||||
else:
|
||||
HAS_SNI = getattr(ssl, 'HAS_SNI', False)
|
||||
|
||||
try:
|
||||
import concurrent
|
||||
@@ -33,14 +29,19 @@ from trollius import Return, From
|
||||
from trollius import futures
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import compat
|
||||
from trollius import events
|
||||
from trollius import proactor_events
|
||||
from trollius import selector_events
|
||||
from trollius import sslproto
|
||||
from trollius import test_support as support
|
||||
from trollius import test_utils
|
||||
from trollius.test_utils import unittest
|
||||
from trollius.py33_exceptions import (wrap_error,
|
||||
BlockingIOError, ConnectionRefusedError,
|
||||
FileNotFoundError)
|
||||
from trollius.test_utils import mock
|
||||
from trollius.time_monotonic import time_monotonic
|
||||
from trollius import test_support as support # find_unused_port, IPV6_ENABLED, TEST_HOME_DIR
|
||||
|
||||
|
||||
def data_file(filename):
|
||||
@@ -63,6 +64,11 @@ def osx_tiger():
|
||||
return version < (10, 5)
|
||||
|
||||
|
||||
def skip_if_backported_sslcontext():
|
||||
backported = getattr(asyncio, 'BACKPORT_SSL_CONTEXT', False)
|
||||
return unittest.skipIf(backported, 'need ssl.SSLContext')
|
||||
|
||||
|
||||
ONLYCERT = data_file('ssl_cert.pem')
|
||||
ONLYKEY = data_file('ssl_key.pem')
|
||||
SIGNED_CERTFILE = data_file('keycert3.pem')
|
||||
@@ -240,7 +246,8 @@ class EventLoopTestsMixin(object):
|
||||
|
||||
def tearDown(self):
|
||||
# just in case if we have transport close callbacks
|
||||
test_utils.run_briefly(self.loop)
|
||||
if not self.loop.is_closed():
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
self.loop.close()
|
||||
gc.collect()
|
||||
@@ -341,7 +348,7 @@ class EventLoopTestsMixin(object):
|
||||
self.loop.run_forever()
|
||||
self.assertEqual(results, ['hello', 'world'])
|
||||
|
||||
@test_utils.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
@unittest.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
def test_run_in_executor(self):
|
||||
def run(arg):
|
||||
return (arg, threading.current_thread().ident)
|
||||
@@ -399,6 +406,25 @@ class EventLoopTestsMixin(object):
|
||||
self.assertEqual(read, data)
|
||||
|
||||
def _basetest_sock_client_ops(self, httpd, sock):
|
||||
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
|
||||
# in debug mode, socket operations must fail
|
||||
# if the socket is not in blocking mode
|
||||
self.loop.set_debug(True)
|
||||
sock.setblocking(True)
|
||||
with self.assertRaises(ValueError):
|
||||
self.loop.run_until_complete(
|
||||
self.loop.sock_connect(sock, httpd.address))
|
||||
with self.assertRaises(ValueError):
|
||||
self.loop.run_until_complete(
|
||||
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
|
||||
with self.assertRaises(ValueError):
|
||||
self.loop.run_until_complete(
|
||||
self.loop.sock_recv(sock, 1024))
|
||||
with self.assertRaises(ValueError):
|
||||
self.loop.run_until_complete(
|
||||
self.loop.sock_accept(sock))
|
||||
|
||||
# test in non-blocking mode
|
||||
sock.setblocking(False)
|
||||
self.loop.run_until_complete(
|
||||
self.loop.sock_connect(sock, httpd.address))
|
||||
@@ -417,7 +443,7 @@ class EventLoopTestsMixin(object):
|
||||
sock = socket.socket()
|
||||
self._basetest_sock_client_ops(httpd, sock)
|
||||
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_unix_sock_client_ops(self):
|
||||
with test_utils.run_test_unix_server() as httpd:
|
||||
sock = socket.socket(socket.AF_UNIX)
|
||||
@@ -457,7 +483,7 @@ class EventLoopTestsMixin(object):
|
||||
conn.close()
|
||||
listener.close()
|
||||
|
||||
@test_utils.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
|
||||
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
|
||||
def test_add_signal_handler(self):
|
||||
non_local = {'caught': 0}
|
||||
|
||||
@@ -500,7 +526,7 @@ class EventLoopTestsMixin(object):
|
||||
# Removing again returns False.
|
||||
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
|
||||
|
||||
@test_utils.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
|
||||
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
|
||||
def test_signal_handling_while_selecting(self):
|
||||
# Test with a signal actually arriving during a select() call.
|
||||
non_local = {'caught': 0}
|
||||
@@ -515,7 +541,7 @@ class EventLoopTestsMixin(object):
|
||||
self.loop.run_forever()
|
||||
self.assertEqual(non_local['caught'], 1)
|
||||
|
||||
@test_utils.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
|
||||
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
|
||||
def test_signal_handling_args(self):
|
||||
some_args = (42,)
|
||||
non_local = {'caught': 0}
|
||||
@@ -548,7 +574,7 @@ class EventLoopTestsMixin(object):
|
||||
lambda: MyProto(loop=self.loop), *httpd.address)
|
||||
self._basetest_create_connection(conn_fut)
|
||||
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_create_unix_connection(self):
|
||||
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
|
||||
# zero-length address for UNIX socket.
|
||||
@@ -599,31 +625,75 @@ class EventLoopTestsMixin(object):
|
||||
self.assertGreater(pr.nbytes, 0)
|
||||
tr.close()
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
def _test_create_ssl_connection(self, httpd, create_connection,
|
||||
check_sockname=True):
|
||||
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
|
||||
self._basetest_create_ssl_connection(conn_fut, check_sockname)
|
||||
|
||||
# ssl.Purpose was introduced in Python 3.4
|
||||
if hasattr(ssl, 'Purpose'):
|
||||
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH,
|
||||
cafile=None, capath=None,
|
||||
cadata=None):
|
||||
"""
|
||||
A ssl.create_default_context() replacement that doesn't enable
|
||||
cert validation.
|
||||
"""
|
||||
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
|
||||
return test_utils.dummy_ssl_context()
|
||||
|
||||
# With ssl=True, ssl.create_default_context() should be called
|
||||
with mock.patch('ssl.create_default_context',
|
||||
side_effect=_dummy_ssl_create_context) as m:
|
||||
conn_fut = create_connection(ssl=True)
|
||||
self._basetest_create_ssl_connection(conn_fut, check_sockname)
|
||||
self.assertEqual(m.call_count, 1)
|
||||
|
||||
if not asyncio.BACKPORT_SSL_CONTEXT:
|
||||
# With the real ssl.create_default_context(), certificate
|
||||
# validation will fail
|
||||
with self.assertRaises(ssl.SSLError) as cm:
|
||||
conn_fut = create_connection(ssl=True)
|
||||
# Ignore the "SSL handshake failed" log in debug mode
|
||||
with test_utils.disable_logger():
|
||||
self._basetest_create_ssl_connection(conn_fut, check_sockname)
|
||||
|
||||
# Test for Python 3.2
|
||||
if hasattr(ssl.SSLError, 'reason'):
|
||||
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
def test_create_ssl_connection(self):
|
||||
with test_utils.run_test_server(use_ssl=True) as httpd:
|
||||
conn_fut = self.loop.create_connection(
|
||||
create_connection = functools.partial(
|
||||
self.loop.create_connection,
|
||||
lambda: MyProto(loop=self.loop),
|
||||
*httpd.address,
|
||||
ssl=test_utils.dummy_ssl_context())
|
||||
*httpd.address)
|
||||
self._test_create_ssl_connection(httpd, create_connection)
|
||||
|
||||
self._basetest_create_ssl_connection(conn_fut)
|
||||
def test_legacy_create_ssl_connection(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_ssl_connection()
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_create_ssl_unix_connection(self):
|
||||
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
|
||||
# zero-length address for UNIX socket.
|
||||
check_sockname = not osx_tiger()
|
||||
|
||||
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
|
||||
conn_fut = self.loop.create_unix_connection(
|
||||
lambda: MyProto(loop=self.loop),
|
||||
httpd.address,
|
||||
ssl=test_utils.dummy_ssl_context(),
|
||||
create_connection = functools.partial(
|
||||
self.loop.create_unix_connection,
|
||||
lambda: MyProto(loop=self.loop), httpd.address,
|
||||
server_hostname='127.0.0.1')
|
||||
|
||||
self._basetest_create_ssl_connection(conn_fut, check_sockname)
|
||||
self._test_create_ssl_connection(httpd, create_connection,
|
||||
check_sockname)
|
||||
|
||||
def test_legacy_create_ssl_unix_connection(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_ssl_unix_connection()
|
||||
|
||||
def test_create_connection_local_addr(self):
|
||||
with test_utils.run_test_server() as httpd:
|
||||
@@ -692,7 +762,7 @@ class EventLoopTestsMixin(object):
|
||||
|
||||
return server, path
|
||||
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_create_unix_server(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
server, path = self._make_unix_server(lambda: proto)
|
||||
@@ -720,7 +790,7 @@ class EventLoopTestsMixin(object):
|
||||
# close server
|
||||
server.close()
|
||||
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_create_unix_server_path_socket_error(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
sock = socket.socket()
|
||||
@@ -755,7 +825,7 @@ class EventLoopTestsMixin(object):
|
||||
sslcontext = self._create_ssl_context(certfile, keyfile)
|
||||
return self._make_unix_server(factory, ssl=sslcontext)
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
def test_create_server_ssl(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
server, host, port = self._make_ssl_server(
|
||||
@@ -789,8 +859,12 @@ class EventLoopTestsMixin(object):
|
||||
# stop serving
|
||||
server.close()
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_legacy_create_server_ssl(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_server_ssl()
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_create_unix_server_ssl(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
server, path = self._make_ssl_unix_server(
|
||||
@@ -820,16 +894,19 @@ class EventLoopTestsMixin(object):
|
||||
# stop serving
|
||||
server.close()
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@test_utils.skipUnless(HAS_SNI, 'No SNI support in ssl module')
|
||||
def test_legacy_create_unix_server_ssl(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_unix_server_ssl()
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
@skip_if_backported_sslcontext()
|
||||
def test_create_server_ssl_verify_failed(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
server, host, port = self._make_ssl_server(
|
||||
lambda: proto, SIGNED_CERTFILE)
|
||||
|
||||
sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
if not asyncio.BACKPORT_SSL_CONTEXT:
|
||||
sslcontext_client.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext_client.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
|
||||
if hasattr(sslcontext_client, 'check_hostname'):
|
||||
sslcontext_client.check_hostname = True
|
||||
@@ -837,26 +914,33 @@ class EventLoopTestsMixin(object):
|
||||
# no CA loaded
|
||||
f_c = self.loop.create_connection(MyProto, host, port,
|
||||
ssl=sslcontext_client)
|
||||
with test_utils.disable_logger():
|
||||
with self.assertRaisesRegex(ssl.SSLError,
|
||||
'certificate verify failed'):
|
||||
self.loop.run_until_complete(f_c)
|
||||
with mock.patch.object(self.loop, 'call_exception_handler'):
|
||||
with test_utils.disable_logger():
|
||||
with self.assertRaisesRegex(ssl.SSLError,
|
||||
'certificate verify failed'):
|
||||
self.loop.run_until_complete(f_c)
|
||||
|
||||
# execute the loop to log the connection error
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
# close connection
|
||||
self.assertIsNone(proto.transport)
|
||||
server.close()
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@test_utils.skipUnless(HAS_SNI, 'No SNI support in ssl module')
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_legacy_create_server_ssl_verify_failed(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_server_ssl_verify_failed()
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@skip_if_backported_sslcontext()
|
||||
def test_create_unix_server_ssl_verify_failed(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
server, path = self._make_ssl_unix_server(
|
||||
lambda: proto, SIGNED_CERTFILE)
|
||||
|
||||
sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
if not asyncio.BACKPORT_SSL_CONTEXT:
|
||||
sslcontext_client.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext_client.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
|
||||
if hasattr(sslcontext_client, 'check_hostname'):
|
||||
sslcontext_client.check_hostname = True
|
||||
@@ -865,17 +949,24 @@ class EventLoopTestsMixin(object):
|
||||
f_c = self.loop.create_unix_connection(MyProto, path,
|
||||
ssl=sslcontext_client,
|
||||
server_hostname='invalid')
|
||||
with test_utils.disable_logger():
|
||||
with self.assertRaisesRegex(ssl.SSLError,
|
||||
'certificate verify failed'):
|
||||
self.loop.run_until_complete(f_c)
|
||||
with mock.patch.object(self.loop, 'call_exception_handler'):
|
||||
with test_utils.disable_logger():
|
||||
with self.assertRaisesRegex(ssl.SSLError,
|
||||
'certificate verify failed'):
|
||||
self.loop.run_until_complete(f_c)
|
||||
|
||||
# execute the loop to log the connection error
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
# close connection
|
||||
self.assertIsNone(proto.transport)
|
||||
server.close()
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@test_utils.skipUnless(HAS_SNI, 'No SNI support in ssl module')
|
||||
def test_legacy_create_unix_server_ssl_verify_failed(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_unix_server_ssl_verify_failed()
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
def test_create_server_ssl_match_failed(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
server, host, port = self._make_ssl_server(
|
||||
@@ -884,28 +975,40 @@ class EventLoopTestsMixin(object):
|
||||
sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
if not asyncio.BACKPORT_SSL_CONTEXT:
|
||||
sslcontext_client.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
|
||||
sslcontext_client.load_verify_locations(
|
||||
cafile=SIGNING_CA)
|
||||
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
|
||||
sslcontext_client.load_verify_locations(
|
||||
cafile=SIGNING_CA)
|
||||
if hasattr(sslcontext_client, 'check_hostname'):
|
||||
sslcontext_client.check_hostname = True
|
||||
|
||||
# incorrect server_hostname
|
||||
f_c = self.loop.create_connection(MyProto, host, port,
|
||||
ssl=sslcontext_client)
|
||||
with test_utils.disable_logger():
|
||||
with self.assertRaisesRegex(
|
||||
ssl.CertificateError,
|
||||
"hostname '127.0.0.1' doesn't match 'localhost'"):
|
||||
self.loop.run_until_complete(f_c)
|
||||
if compat.PY3:
|
||||
err_msg = "hostname '127.0.0.1' doesn't match 'localhost'"
|
||||
else:
|
||||
# http://bugs.python.org/issue22861
|
||||
err_msg = "hostname '127.0.0.1' doesn't match u'localhost'"
|
||||
|
||||
# incorrect server_hostname
|
||||
if not asyncio.BACKPORT_SSL_CONTEXT:
|
||||
f_c = self.loop.create_connection(MyProto, host, port,
|
||||
ssl=sslcontext_client)
|
||||
with mock.patch.object(self.loop, 'call_exception_handler'):
|
||||
with test_utils.disable_logger():
|
||||
with self.assertRaisesRegex(
|
||||
ssl.CertificateError,
|
||||
err_msg):
|
||||
self.loop.run_until_complete(f_c)
|
||||
|
||||
# close connection
|
||||
proto.transport.close()
|
||||
|
||||
# close connection
|
||||
proto.transport.close()
|
||||
server.close()
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@test_utils.skipUnless(HAS_SNI, 'No SNI support in ssl module')
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_legacy_create_server_ssl_match_failed(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_server_ssl_match_failed()
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_create_unix_server_ssl_verified(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
server, path = self._make_ssl_unix_server(
|
||||
@@ -914,8 +1017,8 @@ class EventLoopTestsMixin(object):
|
||||
sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
if not asyncio.BACKPORT_SSL_CONTEXT:
|
||||
sslcontext_client.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
|
||||
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
|
||||
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
|
||||
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
|
||||
if hasattr(sslcontext_client, 'check_hostname'):
|
||||
sslcontext_client.check_hostname = True
|
||||
|
||||
@@ -929,9 +1032,13 @@ class EventLoopTestsMixin(object):
|
||||
proto.transport.close()
|
||||
client.close()
|
||||
server.close()
|
||||
self.loop.run_until_complete(proto.done)
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@test_utils.skipUnless(HAS_SNI, 'No SNI support in ssl module')
|
||||
def test_legacy_create_unix_server_ssl_verified(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_unix_server_ssl_verified()
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
def test_create_server_ssl_verified(self):
|
||||
proto = MyProto(loop=self.loop)
|
||||
server, host, port = self._make_ssl_server(
|
||||
@@ -940,8 +1047,8 @@ class EventLoopTestsMixin(object):
|
||||
sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
if not asyncio.BACKPORT_SSL_CONTEXT:
|
||||
sslcontext_client.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
|
||||
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
|
||||
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
|
||||
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
|
||||
if hasattr(sslcontext_client, 'check_hostname'):
|
||||
sslcontext_client.check_hostname = True
|
||||
|
||||
@@ -954,7 +1061,13 @@ class EventLoopTestsMixin(object):
|
||||
# close connection
|
||||
proto.transport.close()
|
||||
client.close()
|
||||
|
||||
server.close()
|
||||
self.loop.run_until_complete(proto.done)
|
||||
|
||||
def test_legacy_create_server_ssl_verified(self):
|
||||
with test_utils.force_legacy_ssl_support():
|
||||
self.test_create_server_ssl_verified()
|
||||
|
||||
def test_create_server_sock(self):
|
||||
non_local = {'proto': asyncio.Future(loop=self.loop)}
|
||||
@@ -998,7 +1111,7 @@ class EventLoopTestsMixin(object):
|
||||
|
||||
server.close()
|
||||
|
||||
@test_utils.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
|
||||
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
|
||||
def test_create_server_dual_stack(self):
|
||||
f_proto = asyncio.Future(loop=self.loop)
|
||||
|
||||
@@ -1049,6 +1162,7 @@ class EventLoopTestsMixin(object):
|
||||
client.connect(('127.0.0.1', port))
|
||||
client.send(b'xxx')
|
||||
client.close()
|
||||
|
||||
server.close()
|
||||
|
||||
client = socket.socket()
|
||||
@@ -1115,7 +1229,7 @@ class EventLoopTestsMixin(object):
|
||||
self.assertIsNone(loop._csock)
|
||||
self.assertIsNone(loop._ssock)
|
||||
|
||||
@test_utils.skipUnless(sys.platform != 'win32',
|
||||
@unittest.skipUnless(sys.platform != 'win32',
|
||||
"Don't support pipes for Windows")
|
||||
def test_read_pipe(self):
|
||||
proto = MyReadPipeProto(loop=self.loop)
|
||||
@@ -1150,7 +1264,7 @@ class EventLoopTestsMixin(object):
|
||||
# extra info is available
|
||||
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
|
||||
|
||||
@test_utils.skipUnless(sys.platform != 'win32',
|
||||
@unittest.skipUnless(sys.platform != 'win32',
|
||||
"Don't support pipes for Windows")
|
||||
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
|
||||
# older than 10.6 (Snow Leopard)
|
||||
@@ -1190,8 +1304,8 @@ class EventLoopTestsMixin(object):
|
||||
# extra info is available
|
||||
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
|
||||
|
||||
@test_utils.skipUnless(sys.platform != 'win32',
|
||||
"Don't support pipes for Windows")
|
||||
@unittest.skipUnless(sys.platform != 'win32',
|
||||
"Don't support pipes for Windows")
|
||||
def test_write_pipe(self):
|
||||
rpipe, wpipe = os.pipe()
|
||||
pipeobj = io.open(wpipe, 'wb', 1024)
|
||||
@@ -1229,10 +1343,11 @@ class EventLoopTestsMixin(object):
|
||||
self.loop.run_until_complete(proto.done)
|
||||
self.assertEqual('CLOSED', proto.state)
|
||||
|
||||
@test_utils.skipUnless(sys.platform != 'win32',
|
||||
@unittest.skipUnless(sys.platform != 'win32',
|
||||
"Don't support pipes for Windows")
|
||||
def test_write_pipe_disconnect_on_close(self):
|
||||
rsock, wsock = test_utils.socketpair()
|
||||
rsock.setblocking(False)
|
||||
if hasattr(wsock, 'detach'):
|
||||
wsock_fd = wsock.detach()
|
||||
else:
|
||||
@@ -1256,8 +1371,8 @@ class EventLoopTestsMixin(object):
|
||||
self.loop.run_until_complete(proto.done)
|
||||
self.assertEqual('CLOSED', proto.state)
|
||||
|
||||
@test_utils.skipUnless(sys.platform != 'win32',
|
||||
"Don't support pipes for Windows")
|
||||
@unittest.skipUnless(sys.platform != 'win32',
|
||||
"Don't support pipes for Windows")
|
||||
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
|
||||
# older than 10.6 (Snow Leopard)
|
||||
@support.requires_mac_ver(10, 6)
|
||||
@@ -1365,6 +1480,10 @@ class EventLoopTestsMixin(object):
|
||||
'selector': self.loop._selector.__class__.__name__})
|
||||
|
||||
def test_sock_connect_address(self):
|
||||
# In debug mode, sock_connect() must ensure that the address is already
|
||||
# resolved (call _check_resolved_address())
|
||||
self.loop.set_debug(True)
|
||||
|
||||
addresses = [(socket.AF_INET, ('www.python.org', 80))]
|
||||
if support.IPV6_ENABLED:
|
||||
addresses.extend((
|
||||
@@ -1376,6 +1495,7 @@ class EventLoopTestsMixin(object):
|
||||
for sock_type in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
|
||||
sock = socket.socket(family, sock_type)
|
||||
with contextlib.closing(sock):
|
||||
sock.setblocking(False)
|
||||
connect = self.loop.sock_connect(sock, address)
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
self.loop.run_until_complete(connect)
|
||||
@@ -1415,6 +1535,38 @@ class EventLoopTestsMixin(object):
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.run_until_complete(coro)
|
||||
|
||||
def test_close(self):
|
||||
self.loop.close()
|
||||
|
||||
@asyncio.coroutine
|
||||
def test():
|
||||
pass
|
||||
|
||||
func = lambda: False
|
||||
coro = test()
|
||||
self.addCleanup(coro.close)
|
||||
|
||||
# operation blocked when the loop is closed
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.run_forever()
|
||||
with self.assertRaises(RuntimeError):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
self.loop.run_until_complete(fut)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.call_soon(func)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.call_soon_threadsafe(func)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.call_later(1.0, func)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.call_at(self.loop.time() + .0, func)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.run_in_executor(None, func)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.create_task(coro)
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.loop.add_signal_handler(signal.SIGTERM, func)
|
||||
|
||||
|
||||
class SubprocessTestsMixin(object):
|
||||
|
||||
@@ -1446,9 +1598,10 @@ class SubprocessTestsMixin(object):
|
||||
stdin = transp.get_pipe_transport(0)
|
||||
stdin.write(b'Python The Winner')
|
||||
self.loop.run_until_complete(proto.got_data[1].wait())
|
||||
transp.close()
|
||||
with test_utils.disable_logger():
|
||||
transp.close()
|
||||
self.loop.run_until_complete(proto.completed)
|
||||
self.check_terminated(proto.returncode)
|
||||
self.check_killed(proto.returncode)
|
||||
self.assertEqual(b'Python The Winner', proto.data[1])
|
||||
|
||||
def test_subprocess_interactive(self):
|
||||
@@ -1462,21 +1615,20 @@ class SubprocessTestsMixin(object):
|
||||
self.loop.run_until_complete(proto.connected)
|
||||
self.assertEqual('CONNECTED', proto.state)
|
||||
|
||||
try:
|
||||
stdin = transp.get_pipe_transport(0)
|
||||
stdin.write(b'Python ')
|
||||
self.loop.run_until_complete(proto.got_data[1].wait())
|
||||
proto.got_data[1].clear()
|
||||
self.assertEqual(b'Python ', proto.data[1])
|
||||
stdin = transp.get_pipe_transport(0)
|
||||
stdin.write(b'Python ')
|
||||
self.loop.run_until_complete(proto.got_data[1].wait())
|
||||
proto.got_data[1].clear()
|
||||
self.assertEqual(b'Python ', proto.data[1])
|
||||
|
||||
stdin.write(b'The Winner')
|
||||
self.loop.run_until_complete(proto.got_data[1].wait())
|
||||
self.assertEqual(b'Python The Winner', proto.data[1])
|
||||
finally:
|
||||
stdin.write(b'The Winner')
|
||||
self.loop.run_until_complete(proto.got_data[1].wait())
|
||||
self.assertEqual(b'Python The Winner', proto.data[1])
|
||||
|
||||
with test_utils.disable_logger():
|
||||
transp.close()
|
||||
|
||||
self.loop.run_until_complete(proto.completed)
|
||||
self.check_terminated(proto.returncode)
|
||||
self.check_killed(proto.returncode)
|
||||
|
||||
def test_subprocess_shell(self):
|
||||
connect = self.loop.subprocess_shell(
|
||||
@@ -1492,6 +1644,7 @@ class SubprocessTestsMixin(object):
|
||||
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
|
||||
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
|
||||
self.assertEqual(proto.data[2], b'')
|
||||
transp.close()
|
||||
|
||||
def test_subprocess_exitcode(self):
|
||||
connect = self.loop.subprocess_shell(
|
||||
@@ -1501,6 +1654,7 @@ class SubprocessTestsMixin(object):
|
||||
self.assertIsInstance(proto, MySubprocessProtocol)
|
||||
self.loop.run_until_complete(proto.completed)
|
||||
self.assertEqual(7, proto.returncode)
|
||||
transp.close()
|
||||
|
||||
def test_subprocess_close_after_finish(self):
|
||||
connect = self.loop.subprocess_shell(
|
||||
@@ -1528,6 +1682,7 @@ class SubprocessTestsMixin(object):
|
||||
transp.kill()
|
||||
self.loop.run_until_complete(proto.completed)
|
||||
self.check_killed(proto.returncode)
|
||||
transp.close()
|
||||
|
||||
def test_subprocess_terminate(self):
|
||||
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
|
||||
@@ -1542,8 +1697,9 @@ class SubprocessTestsMixin(object):
|
||||
transp.terminate()
|
||||
self.loop.run_until_complete(proto.completed)
|
||||
self.check_terminated(proto.returncode)
|
||||
transp.close()
|
||||
|
||||
@test_utils.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
|
||||
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
|
||||
def test_subprocess_send_signal(self):
|
||||
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
|
||||
|
||||
@@ -1557,6 +1713,7 @@ class SubprocessTestsMixin(object):
|
||||
transp.send_signal(signal.SIGHUP)
|
||||
self.loop.run_until_complete(proto.completed)
|
||||
self.assertEqual(-signal.SIGHUP, proto.returncode)
|
||||
transp.close()
|
||||
|
||||
def test_subprocess_stderr(self):
|
||||
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
|
||||
@@ -1629,11 +1786,12 @@ class SubprocessTestsMixin(object):
|
||||
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
|
||||
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
|
||||
self.assertEqual(b'ERR:OSError', proto.data[2])
|
||||
transp.close()
|
||||
with test_utils.disable_logger():
|
||||
transp.close()
|
||||
self.loop.run_until_complete(proto.completed)
|
||||
self.check_terminated(proto.returncode)
|
||||
self.check_killed(proto.returncode)
|
||||
|
||||
@test_utils.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
|
||||
@unittest.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
|
||||
def test_subprocess_wait_no_same_group(self):
|
||||
# start the new process in a new session
|
||||
connect = self.loop.subprocess_shell(
|
||||
@@ -1692,20 +1850,36 @@ if sys.platform == 'win32':
|
||||
def create_event_loop(self):
|
||||
return asyncio.ProactorEventLoop()
|
||||
|
||||
def test_create_ssl_connection(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
|
||||
if not sslproto._is_sslproto_available():
|
||||
def test_create_ssl_connection(self):
|
||||
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
|
||||
|
||||
def test_create_server_ssl(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
|
||||
def test_create_server_ssl(self):
|
||||
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
|
||||
|
||||
def test_create_server_ssl_verify_failed(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
|
||||
def test_create_server_ssl_verify_failed(self):
|
||||
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
|
||||
|
||||
def test_create_server_ssl_match_failed(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
|
||||
def test_create_server_ssl_match_failed(self):
|
||||
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
|
||||
|
||||
def test_create_server_ssl_verified(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
|
||||
def test_create_server_ssl_verified(self):
|
||||
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
|
||||
|
||||
def test_legacy_create_ssl_connection(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
|
||||
|
||||
def test_legacy_create_server_ssl(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
|
||||
|
||||
def test_legacy_create_server_ssl_verify_failed(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
|
||||
|
||||
def test_legacy_create_server_ssl_match_failed(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
|
||||
|
||||
def test_legacy_create_server_ssl_verified(self):
|
||||
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
|
||||
|
||||
def test_reader_callback(self):
|
||||
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
|
||||
@@ -1753,8 +1927,8 @@ else:
|
||||
@support.requires_mac_ver(10, 9)
|
||||
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
|
||||
# hangs on OpenBSD 5.5
|
||||
@test_utils.skipIf(sys.platform.startswith('openbsd'),
|
||||
'test hangs on OpenBSD')
|
||||
@unittest.skipIf(sys.platform.startswith('openbsd'),
|
||||
'test hangs on OpenBSD')
|
||||
def test_read_pty_output(self):
|
||||
super(KqueueEventLoopTests, self).test_read_pty_output()
|
||||
|
||||
@@ -1897,30 +2071,47 @@ class HandleTests(test_utils.TestCase):
|
||||
|
||||
# cancelled handle
|
||||
h.cancel()
|
||||
self.assertEqual(repr(h),
|
||||
'<Handle cancelled created at %s:%s>'
|
||||
% (create_filename, create_lineno))
|
||||
self.assertEqual(
|
||||
repr(h),
|
||||
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
|
||||
% (filename, lineno, create_filename, create_lineno))
|
||||
|
||||
# double cancellation won't overwrite _repr
|
||||
h.cancel()
|
||||
self.assertEqual(
|
||||
repr(h),
|
||||
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
|
||||
% (filename, lineno, create_filename, create_lineno))
|
||||
|
||||
def test_handle_source_traceback(self):
|
||||
loop = asyncio.get_event_loop_policy().new_event_loop()
|
||||
loop.set_debug(True)
|
||||
self.set_event_loop(loop)
|
||||
|
||||
def check_source_traceback(h):
|
||||
lineno = sys._getframe(1).f_lineno - 1
|
||||
self.assertIsInstance(h._source_traceback, list)
|
||||
filename = sys._getframe().f_code.co_filename
|
||||
self.assertEqual(h._source_traceback[-1][:3],
|
||||
(filename,
|
||||
lineno,
|
||||
'test_handle_source_traceback'))
|
||||
|
||||
# call_soon
|
||||
h = loop.call_soon(noop)
|
||||
self.check_soure_traceback(h._source_traceback, -1)
|
||||
check_source_traceback(h)
|
||||
|
||||
# call_soon_threadsafe
|
||||
h = loop.call_soon_threadsafe(noop)
|
||||
self.check_soure_traceback(h._source_traceback, -1)
|
||||
check_source_traceback(h)
|
||||
|
||||
# call_later
|
||||
h = loop.call_later(0, noop)
|
||||
self.check_soure_traceback(h._source_traceback, -1)
|
||||
check_source_traceback(h)
|
||||
|
||||
# call_at
|
||||
h = loop.call_later(0, noop)
|
||||
self.check_soure_traceback(h._source_traceback, -1)
|
||||
check_source_traceback(h)
|
||||
|
||||
|
||||
class TimerTests(test_utils.TestCase):
|
||||
@@ -1986,8 +2177,9 @@ class TimerTests(test_utils.TestCase):
|
||||
# cancelled handle
|
||||
h.cancel()
|
||||
self.assertEqual(repr(h),
|
||||
'<TimerHandle cancelled when=123 created at %s:%s>'
|
||||
% (create_filename, create_lineno))
|
||||
'<TimerHandle cancelled when=123 noop() '
|
||||
'at %s:%s created at %s:%s>'
|
||||
% (filename, lineno, create_filename, create_lineno))
|
||||
|
||||
|
||||
def test_timer_comparison(self):
|
||||
@@ -2044,12 +2236,15 @@ class AbstractEventLoopTests(test_utils.TestCase):
|
||||
NotImplementedError, loop.stop)
|
||||
self.assertRaises(
|
||||
NotImplementedError, loop.is_running)
|
||||
self.assertRaises(
|
||||
NotImplementedError, loop.is_closed)
|
||||
# skip some tests if the AbstractEventLoop class comes from asyncio
|
||||
# and the asyncio version (python version in fact) is older than 3.4.2
|
||||
if events.asyncio is None or sys.version_info >= (3, 4, 2):
|
||||
self.assertRaises(
|
||||
NotImplementedError, loop.is_closed)
|
||||
self.assertRaises(
|
||||
NotImplementedError, loop.create_task, None)
|
||||
self.assertRaises(
|
||||
NotImplementedError, loop.close)
|
||||
self.assertRaises(
|
||||
NotImplementedError, loop.create_task, None)
|
||||
self.assertRaises(
|
||||
NotImplementedError, loop.call_later, None, None)
|
||||
self.assertRaises(
|
||||
@@ -2184,14 +2379,14 @@ class PolicyTests(test_utils.TestCase):
|
||||
def test_get_event_loop_after_set_none(self):
|
||||
policy = asyncio.DefaultEventLoopPolicy()
|
||||
policy.set_event_loop(None)
|
||||
self.assertRaises(AssertionError, policy.get_event_loop)
|
||||
self.assertRaises(RuntimeError, policy.get_event_loop)
|
||||
|
||||
@mock.patch('trollius.events.threading.current_thread')
|
||||
def test_get_event_loop_thread(self, m_current_thread):
|
||||
|
||||
def f():
|
||||
policy = asyncio.DefaultEventLoopPolicy()
|
||||
self.assertRaises(AssertionError, policy.get_event_loop)
|
||||
self.assertRaises(RuntimeError, policy.get_event_loop)
|
||||
|
||||
th = threading.Thread(target=f)
|
||||
th.start()
|
||||
|
||||
@@ -7,12 +7,14 @@ except ImportError:
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import unittest
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import From
|
||||
from trollius import compat
|
||||
from trollius import test_support as support
|
||||
from trollius import test_utils
|
||||
from trollius import test_support as support # gc_collect
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
|
||||
def get_thread_ident():
|
||||
@@ -32,6 +34,7 @@ class FutureTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
self.addCleanup(self.loop.close)
|
||||
|
||||
def test_initial_state(self):
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
@@ -114,7 +117,8 @@ class FutureTests(test_utils.TestCase):
|
||||
exc = RuntimeError()
|
||||
f_exception = asyncio.Future(loop=self.loop)
|
||||
f_exception.set_exception(exc)
|
||||
self.assertEqual(repr(f_exception), '<Future finished exception=RuntimeError()>')
|
||||
self.assertEqual(repr(f_exception),
|
||||
'<Future finished exception=RuntimeError()>')
|
||||
self.assertIs(f_exception.exception(), exc)
|
||||
|
||||
def func_repr(func):
|
||||
@@ -227,7 +231,7 @@ class FutureTests(test_utils.TestCase):
|
||||
del fut
|
||||
self.assertFalse(m_log.error.called)
|
||||
|
||||
@test_utils.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
@unittest.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
def test_wrap_future(self):
|
||||
|
||||
def run(arg):
|
||||
@@ -245,7 +249,7 @@ class FutureTests(test_utils.TestCase):
|
||||
f2 = asyncio.wrap_future(f1)
|
||||
self.assertIs(f1, f2)
|
||||
|
||||
@test_utils.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
@unittest.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
@mock.patch('trollius.futures.events')
|
||||
def test_wrap_future_use_global_loop(self, m_events):
|
||||
def run(arg):
|
||||
@@ -255,7 +259,7 @@ class FutureTests(test_utils.TestCase):
|
||||
f2 = asyncio.wrap_future(f1)
|
||||
self.assertIs(m_events.get_event_loop.return_value, f2._loop)
|
||||
|
||||
@test_utils.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
@unittest.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
def test_wrap_future_cancel(self):
|
||||
f1 = concurrent.futures.Future()
|
||||
f2 = asyncio.wrap_future(f1, loop=self.loop)
|
||||
@@ -264,7 +268,7 @@ class FutureTests(test_utils.TestCase):
|
||||
self.assertTrue(f1.cancelled())
|
||||
self.assertTrue(f2.cancelled())
|
||||
|
||||
@test_utils.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
@unittest.skipIf(concurrent is None, 'need concurrent.futures')
|
||||
def test_wrap_future_cancel2(self):
|
||||
f1 = concurrent.futures.Future()
|
||||
f2 = asyncio.wrap_future(f1, loop=self.loop)
|
||||
@@ -279,11 +283,17 @@ class FutureTests(test_utils.TestCase):
|
||||
self.loop.set_debug(True)
|
||||
|
||||
future = asyncio.Future(loop=self.loop)
|
||||
self.check_soure_traceback(future._source_traceback, -1)
|
||||
lineno = sys._getframe().f_lineno - 1
|
||||
self.assertIsInstance(future._source_traceback, list)
|
||||
filename = sys._getframe().f_code.co_filename
|
||||
self.assertEqual(future._source_traceback[-1][:3],
|
||||
(filename,
|
||||
lineno,
|
||||
'test_future_source_traceback'))
|
||||
|
||||
@mock.patch('trollius.base_events.logger')
|
||||
def test_future_exception_never_retrieved(self, m_log):
|
||||
self.loop.set_debug(True)
|
||||
def check_future_exception_never_retrieved(self, debug, m_log):
|
||||
self.loop.set_debug(debug)
|
||||
|
||||
def memory_error():
|
||||
try:
|
||||
@@ -293,40 +303,70 @@ class FutureTests(test_utils.TestCase):
|
||||
exc = memory_error()
|
||||
|
||||
future = asyncio.Future(loop=self.loop)
|
||||
source_traceback = future._source_traceback
|
||||
if debug:
|
||||
source_traceback = future._source_traceback
|
||||
future.set_exception(exc)
|
||||
future = None
|
||||
test_utils.run_briefly(self.loop)
|
||||
support.gc_collect()
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
frame = source_traceback[-1]
|
||||
regex = (r'^Future exception was never retrieved\n'
|
||||
r'future: <Future finished exception=MemoryError\(\) created at {filename}:{lineno}>\n'
|
||||
r'source_traceback: Object created at \(most recent call last\):\n'
|
||||
r' File'
|
||||
r'.*\n'
|
||||
r' File "{filename}", line {lineno}, in test_future_exception_never_retrieved\n'
|
||||
r' future = asyncio\.Future\(loop=self\.loop\)$'
|
||||
).format(filename=re.escape(frame[0]), lineno=frame[1])
|
||||
if debug:
|
||||
frame = source_traceback[-1]
|
||||
regex = (r'^Future exception was never retrieved\n'
|
||||
r'future: <Future finished exception=MemoryError\(\) '
|
||||
r'created at {filename}:{lineno}>\n'
|
||||
r'source_traceback: Object '
|
||||
r'created at \(most recent call last\):\n'
|
||||
r' File'
|
||||
r'.*\n'
|
||||
r' File "{filename}", line {lineno}, '
|
||||
r'in check_future_exception_never_retrieved\n'
|
||||
r' future = asyncio\.Future\(loop=self\.loop\)$'
|
||||
).format(filename=re.escape(frame[0]),
|
||||
lineno=frame[1])
|
||||
else:
|
||||
regex = (r'^Future exception was never retrieved\n'
|
||||
r'future: '
|
||||
r'<Future finished exception=MemoryError\(\)>$'
|
||||
)
|
||||
exc_info = (type(exc), exc, exc.__traceback__)
|
||||
m_log.error.assert_called_once_with(mock.ANY, exc_info=exc_info)
|
||||
else:
|
||||
frame = source_traceback[-1]
|
||||
regex = (r'^Future/Task exception was never retrieved\n'
|
||||
r'Future/Task created at \(most recent call last\):\n'
|
||||
r' File'
|
||||
r'.*\n'
|
||||
r' File "{filename}", line {lineno}, in test_future_exception_never_retrieved\n'
|
||||
r' future = asyncio\.Future\(loop=self\.loop\)\n'
|
||||
r'Traceback \(most recent call last\):\n'
|
||||
r'.*\n'
|
||||
r'MemoryError$'
|
||||
).format(filename=re.escape(frame[0]), lineno=frame[1])
|
||||
if debug:
|
||||
frame = source_traceback[-1]
|
||||
regex = (r'^Future/Task exception was never retrieved\n'
|
||||
r'Future/Task created at \(most recent call last\):\n'
|
||||
r' File'
|
||||
r'.*\n'
|
||||
r' File "{filename}", line {lineno}, '
|
||||
r'in check_future_exception_never_retrieved\n'
|
||||
r' future = asyncio\.Future\(loop=self\.loop\)\n'
|
||||
r'Traceback \(most recent call last\):\n'
|
||||
r'.*\n'
|
||||
r'MemoryError$'
|
||||
).format(filename=re.escape(frame[0]),
|
||||
lineno=frame[1])
|
||||
elif compat.PY3:
|
||||
regex = (r'^Future/Task exception was never retrieved\n'
|
||||
r'Traceback \(most recent call last\):\n'
|
||||
r'.*\n'
|
||||
r'MemoryError$'
|
||||
)
|
||||
else:
|
||||
regex = (r'^Future/Task exception was never retrieved\n'
|
||||
r'MemoryError$'
|
||||
)
|
||||
m_log.error.assert_called_once_with(mock.ANY, exc_info=False)
|
||||
message = m_log.error.call_args[0][0]
|
||||
self.assertRegex(message, re.compile(regex, re.DOTALL))
|
||||
|
||||
def test_future_exception_never_retrieved(self):
|
||||
self.check_future_exception_never_retrieved(False)
|
||||
|
||||
def test_future_exception_never_retrieved_debug(self):
|
||||
self.check_future_exception_never_retrieved(True)
|
||||
|
||||
def test_set_result_unless_cancelled(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.cancel()
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
"""Tests for lock.py"""
|
||||
|
||||
import unittest
|
||||
import re
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import From, Return
|
||||
from trollius import test_utils
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
|
||||
STR_RGX_REPR = (
|
||||
@@ -230,7 +230,7 @@ class LockTests(test_utils.TestCase):
|
||||
except RuntimeError as err:
|
||||
self.assertEqual(
|
||||
str(err),
|
||||
'"yield" should be used as context manager expression')
|
||||
'"yield From" should be used as context manager expression')
|
||||
|
||||
self.assertFalse(lock.locked())
|
||||
|
||||
@@ -856,7 +856,7 @@ class SemaphoreTests(test_utils.TestCase):
|
||||
except RuntimeError as err:
|
||||
self.assertEqual(
|
||||
str(err),
|
||||
'"yield" should be used as context manager expression')
|
||||
'"yield From" should be used as context manager expression')
|
||||
|
||||
self.assertEqual(2, sem._value)
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Tests for proactor_events.py"""
|
||||
|
||||
import socket
|
||||
import unittest
|
||||
|
||||
from trollius import test_utils
|
||||
from trollius.proactor_events import BaseProactorEventLoop
|
||||
@@ -10,29 +9,45 @@ from trollius.proactor_events import _ProactorSocketTransport
|
||||
from trollius.proactor_events import _ProactorWritePipeTransport
|
||||
from trollius.py33_exceptions import ConnectionAbortedError, ConnectionResetError
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
import trollius as asyncio
|
||||
|
||||
|
||||
def close_transport(transport):
|
||||
# Don't call transport.close() because the event loop and the IOCP proactor
|
||||
# are mocked
|
||||
if transport._sock is None:
|
||||
return
|
||||
transport._sock.close()
|
||||
transport._sock = None
|
||||
|
||||
|
||||
class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
self.addCleanup(self.loop.close)
|
||||
self.proactor = mock.Mock()
|
||||
self.loop._proactor = self.proactor
|
||||
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
|
||||
self.sock = mock.Mock(socket.socket)
|
||||
|
||||
def socket_transport(self, waiter=None):
|
||||
transport = _ProactorSocketTransport(self.loop, self.sock,
|
||||
self.protocol, waiter=waiter)
|
||||
self.addCleanup(close_transport, transport)
|
||||
return transport
|
||||
|
||||
def test_ctor(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
tr = _ProactorSocketTransport(
|
||||
self.loop, self.sock, self.protocol, fut)
|
||||
tr = self.socket_transport(waiter=fut)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertIsNone(fut.result())
|
||||
self.protocol.connection_made(tr)
|
||||
self.proactor.recv.assert_called_with(self.sock, 4096)
|
||||
|
||||
def test_loop_reading(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._loop_reading()
|
||||
self.loop._proactor.recv.assert_called_with(self.sock, 4096)
|
||||
self.assertFalse(self.protocol.data_received.called)
|
||||
@@ -42,8 +57,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
res = asyncio.Future(loop=self.loop)
|
||||
res.set_result(b'data')
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
|
||||
tr = self.socket_transport()
|
||||
tr._read_fut = res
|
||||
tr._loop_reading(res)
|
||||
self.loop._proactor.recv.assert_called_with(self.sock, 4096)
|
||||
@@ -53,8 +67,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
res = asyncio.Future(loop=self.loop)
|
||||
res.set_result(b'')
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
|
||||
tr = self.socket_transport()
|
||||
self.assertRaises(AssertionError, tr._loop_reading, res)
|
||||
|
||||
tr.close = mock.Mock()
|
||||
@@ -67,7 +80,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
def test_loop_reading_aborted(self):
|
||||
err = self.loop._proactor.recv.side_effect = ConnectionAbortedError()
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._loop_reading()
|
||||
tr._fatal_error.assert_called_with(
|
||||
@@ -77,7 +90,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
def test_loop_reading_aborted_closing(self):
|
||||
self.loop._proactor.recv.side_effect = ConnectionAbortedError()
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._closing = True
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._loop_reading()
|
||||
@@ -85,7 +98,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
|
||||
def test_loop_reading_aborted_is_fatal(self):
|
||||
self.loop._proactor.recv.side_effect = ConnectionAbortedError()
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._closing = False
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._loop_reading()
|
||||
@@ -94,7 +107,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
def test_loop_reading_conn_reset_lost(self):
|
||||
err = self.loop._proactor.recv.side_effect = ConnectionResetError()
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._closing = False
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._force_close = mock.Mock()
|
||||
@@ -105,7 +118,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
def test_loop_reading_exception(self):
|
||||
err = self.loop._proactor.recv.side_effect = (OSError())
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._loop_reading()
|
||||
tr._fatal_error.assert_called_with(
|
||||
@@ -113,19 +126,19 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
'Fatal read error on pipe transport')
|
||||
|
||||
def test_write(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._loop_writing = mock.Mock()
|
||||
tr.write(b'data')
|
||||
self.assertEqual(tr._buffer, None)
|
||||
tr._loop_writing.assert_called_with(data=b'data')
|
||||
|
||||
def test_write_no_data(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr.write(b'')
|
||||
self.assertFalse(tr._buffer)
|
||||
|
||||
def test_write_more(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._write_fut = mock.Mock()
|
||||
tr._loop_writing = mock.Mock()
|
||||
tr.write(b'data')
|
||||
@@ -133,7 +146,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
self.assertFalse(tr._loop_writing.called)
|
||||
|
||||
def test_loop_writing(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._buffer = bytearray(b'data')
|
||||
tr._loop_writing()
|
||||
self.loop._proactor.send.assert_called_with(self.sock, b'data')
|
||||
@@ -143,7 +156,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
@mock.patch('trollius.proactor_events.logger')
|
||||
def test_loop_writing_err(self, m_log):
|
||||
err = self.loop._proactor.send.side_effect = OSError()
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._buffer = [b'da', b'ta']
|
||||
tr._loop_writing()
|
||||
@@ -164,7 +177,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_result(b'data')
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._write_fut = fut
|
||||
tr._loop_writing(fut)
|
||||
self.assertIsNone(tr._write_fut)
|
||||
@@ -173,7 +186,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_result(1)
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._write_fut = fut
|
||||
tr.close()
|
||||
tr._loop_writing(fut)
|
||||
@@ -182,13 +195,13 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
self.protocol.connection_lost.assert_called_with(None)
|
||||
|
||||
def test_abort(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._force_close = mock.Mock()
|
||||
tr.abort()
|
||||
tr._force_close.assert_called_with(None)
|
||||
|
||||
def test_close(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr.close()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.protocol.connection_lost.assert_called_with(None)
|
||||
@@ -201,14 +214,14 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
self.assertFalse(self.protocol.connection_lost.called)
|
||||
|
||||
def test_close_write_fut(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._write_fut = mock.Mock()
|
||||
tr.close()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertFalse(self.protocol.connection_lost.called)
|
||||
|
||||
def test_close_buffer(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._buffer = [b'data']
|
||||
tr.close()
|
||||
test_utils.run_briefly(self.loop)
|
||||
@@ -216,14 +229,14 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('trollius.base_events.logger')
|
||||
def test_fatal_error(self, m_logging):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._force_close = mock.Mock()
|
||||
tr._fatal_error(None)
|
||||
self.assertTrue(tr._force_close.called)
|
||||
self.assertTrue(m_logging.error.called)
|
||||
|
||||
def test_force_close(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._buffer = [b'data']
|
||||
read_fut = tr._read_fut = mock.Mock()
|
||||
write_fut = tr._write_fut = mock.Mock()
|
||||
@@ -237,14 +250,14 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
self.assertEqual(tr._conn_lost, 1)
|
||||
|
||||
def test_force_close_idempotent(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._closing = True
|
||||
tr._force_close(None)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertFalse(self.protocol.connection_lost.called)
|
||||
|
||||
def test_fatal_error_2(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._buffer = [b'data']
|
||||
tr._force_close(None)
|
||||
|
||||
@@ -253,14 +266,13 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
self.assertEqual(None, tr._buffer)
|
||||
|
||||
def test_call_connection_lost(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
tr._call_connection_lost(None)
|
||||
self.assertTrue(self.protocol.connection_lost.called)
|
||||
self.assertTrue(self.sock.close.called)
|
||||
|
||||
def test_write_eof(self):
|
||||
tr = _ProactorSocketTransport(
|
||||
self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
self.assertTrue(tr.can_write_eof())
|
||||
tr.write_eof()
|
||||
self.sock.shutdown.assert_called_with(socket.SHUT_WR)
|
||||
@@ -269,7 +281,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
tr.close()
|
||||
|
||||
def test_write_eof_buffer(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
tr._loop._proactor.send.return_value = f
|
||||
tr.write(b'data')
|
||||
@@ -313,11 +325,10 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
self.assertFalse(tr.can_write_eof())
|
||||
with self.assertRaises(NotImplementedError):
|
||||
tr.write_eof()
|
||||
tr.close()
|
||||
close_transport(tr)
|
||||
|
||||
def test_pause_resume_reading(self):
|
||||
tr = _ProactorSocketTransport(
|
||||
self.loop, self.sock, self.protocol)
|
||||
tr = self.socket_transport()
|
||||
futures = []
|
||||
for msg in [b'data1', b'data2', b'data3', b'data4', b'']:
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
@@ -344,6 +355,85 @@ class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
tr.close()
|
||||
|
||||
|
||||
def pause_writing_transport(self, high):
|
||||
tr = self.socket_transport()
|
||||
tr.set_write_buffer_limits(high=high)
|
||||
|
||||
self.assertEqual(tr.get_write_buffer_size(), 0)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
self.assertFalse(self.protocol.resume_writing.called)
|
||||
return tr
|
||||
|
||||
def test_pause_resume_writing(self):
|
||||
tr = self.pause_writing_transport(high=4)
|
||||
|
||||
# write a large chunk, must pause writing
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
self.loop._proactor.send.return_value = fut
|
||||
tr.write(b'large data')
|
||||
self.loop._run_once()
|
||||
self.assertTrue(self.protocol.pause_writing.called)
|
||||
|
||||
# flush the buffer
|
||||
fut.set_result(None)
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 0)
|
||||
self.assertTrue(self.protocol.resume_writing.called)
|
||||
|
||||
def test_pause_writing_2write(self):
|
||||
tr = self.pause_writing_transport(high=4)
|
||||
|
||||
# first short write, the buffer is not full (3 <= 4)
|
||||
fut1 = asyncio.Future(loop=self.loop)
|
||||
self.loop._proactor.send.return_value = fut1
|
||||
tr.write(b'123')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 3)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
|
||||
# fill the buffer, must pause writing (6 > 4)
|
||||
tr.write(b'abc')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 6)
|
||||
self.assertTrue(self.protocol.pause_writing.called)
|
||||
|
||||
def test_pause_writing_3write(self):
|
||||
tr = self.pause_writing_transport(high=4)
|
||||
|
||||
# first short write, the buffer is not full (1 <= 4)
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
self.loop._proactor.send.return_value = fut
|
||||
tr.write(b'1')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 1)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
|
||||
# second short write, the buffer is not full (3 <= 4)
|
||||
tr.write(b'23')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 3)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
|
||||
# fill the buffer, must pause writing (6 > 4)
|
||||
tr.write(b'abc')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 6)
|
||||
self.assertTrue(self.protocol.pause_writing.called)
|
||||
|
||||
def test_dont_pause_writing(self):
|
||||
tr = self.pause_writing_transport(high=4)
|
||||
|
||||
# write a large chunk which completes immedialty,
|
||||
# it should not pause writing
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_result(None)
|
||||
self.loop._proactor.send.return_value = fut
|
||||
tr.write(b'very large data')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 0)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
|
||||
|
||||
class BaseProactorEventLoopTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@@ -357,19 +447,19 @@ class BaseProactorEventLoopTests(test_utils.TestCase):
|
||||
return (self.ssock, self.csock)
|
||||
|
||||
self.loop = EventLoop(self.proactor)
|
||||
self.set_event_loop(self.loop, cleanup=False)
|
||||
self.set_event_loop(self.loop)
|
||||
|
||||
@mock.patch.object(BaseProactorEventLoop, '_call_soon')
|
||||
@mock.patch.object(BaseProactorEventLoop, 'call_soon')
|
||||
@mock.patch.object(BaseProactorEventLoop, '_socketpair')
|
||||
def test_ctor(self, socketpair, _call_soon):
|
||||
def test_ctor(self, socketpair, call_soon):
|
||||
ssock, csock = socketpair.return_value = (
|
||||
mock.Mock(), mock.Mock())
|
||||
loop = BaseProactorEventLoop(self.proactor)
|
||||
self.assertIs(loop._ssock, ssock)
|
||||
self.assertIs(loop._csock, csock)
|
||||
self.assertEqual(loop._internal_fds, 1)
|
||||
_call_soon.assert_called_with(loop._loop_self_reading, (),
|
||||
check_loop=False)
|
||||
call_soon.assert_called_with(loop._loop_self_reading)
|
||||
loop.close()
|
||||
|
||||
def test_close_self_pipe(self):
|
||||
self.loop._close_self_pipe()
|
||||
@@ -379,6 +469,9 @@ class BaseProactorEventLoopTests(test_utils.TestCase):
|
||||
self.assertIsNone(self.loop._ssock)
|
||||
self.assertIsNone(self.loop._csock)
|
||||
|
||||
# Don't call close(): _close_self_pipe() cannot be called twice
|
||||
self.loop._closed = True
|
||||
|
||||
def test_close(self):
|
||||
self.loop._close_self_pipe = mock.Mock()
|
||||
self.loop.close()
|
||||
@@ -407,12 +500,17 @@ class BaseProactorEventLoopTests(test_utils.TestCase):
|
||||
self.proactor.accept.assert_called_with(self.sock)
|
||||
|
||||
def test_socketpair(self):
|
||||
class EventLoop(BaseProactorEventLoop):
|
||||
# override the destructor to not log a ResourceWarning
|
||||
def __del__(self):
|
||||
pass
|
||||
self.assertRaises(
|
||||
NotImplementedError, BaseProactorEventLoop, self.proactor)
|
||||
NotImplementedError, EventLoop, self.proactor)
|
||||
|
||||
def test_make_socket_transport(self):
|
||||
tr = self.loop._make_socket_transport(self.sock, asyncio.Protocol())
|
||||
self.assertIsInstance(tr, _ProactorSocketTransport)
|
||||
close_transport(tr)
|
||||
|
||||
def test_loop_self_reading(self):
|
||||
self.loop._loop_self_reading()
|
||||
@@ -430,9 +528,10 @@ class BaseProactorEventLoopTests(test_utils.TestCase):
|
||||
|
||||
def test_loop_self_reading_exception(self):
|
||||
self.loop.close = mock.Mock()
|
||||
self.loop.call_exception_handler = mock.Mock()
|
||||
self.proactor.recv.side_effect = OSError()
|
||||
self.assertRaises(OSError, self.loop._loop_self_reading)
|
||||
self.assertTrue(self.loop.close.called)
|
||||
self.loop._loop_self_reading()
|
||||
self.assertTrue(self.loop.call_exception_handler.called)
|
||||
|
||||
def test_write_to_self(self):
|
||||
self.loop._write_to_self()
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
"""Tests for queues.py"""
|
||||
|
||||
import unittest
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import Return, From
|
||||
from trollius import test_utils
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
|
||||
class _QueueTestBase(test_utils.TestCase):
|
||||
@@ -414,14 +413,16 @@ class PriorityQueueTests(_QueueTestBase):
|
||||
self.assertEqual([1, 2, 3], items)
|
||||
|
||||
|
||||
class JoinableQueueTests(_QueueTestBase):
|
||||
class _QueueJoinTestMixin(object):
|
||||
|
||||
q_class = None
|
||||
|
||||
def test_task_done_underflow(self):
|
||||
q = asyncio.JoinableQueue(loop=self.loop)
|
||||
q = self.q_class(loop=self.loop)
|
||||
self.assertRaises(ValueError, q.task_done)
|
||||
|
||||
def test_task_done(self):
|
||||
q = asyncio.JoinableQueue(loop=self.loop)
|
||||
q = self.q_class(loop=self.loop)
|
||||
for i in range(100):
|
||||
q.put_nowait(i)
|
||||
|
||||
@@ -456,7 +457,7 @@ class JoinableQueueTests(_QueueTestBase):
|
||||
self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
|
||||
|
||||
def test_join_empty_queue(self):
|
||||
q = asyncio.JoinableQueue(loop=self.loop)
|
||||
q = self.q_class(loop=self.loop)
|
||||
|
||||
# Test that a queue join()s successfully, and before anything else
|
||||
# (done twice for insurance).
|
||||
@@ -469,12 +470,24 @@ class JoinableQueueTests(_QueueTestBase):
|
||||
self.loop.run_until_complete(join())
|
||||
|
||||
def test_format(self):
|
||||
q = asyncio.JoinableQueue(loop=self.loop)
|
||||
q = self.q_class(loop=self.loop)
|
||||
self.assertEqual(q._format(), 'maxsize=0')
|
||||
|
||||
q._unfinished_tasks = 2
|
||||
self.assertEqual(q._format(), 'maxsize=0 tasks=2')
|
||||
|
||||
|
||||
class QueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
|
||||
q_class = asyncio.Queue
|
||||
|
||||
|
||||
class LifoQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
|
||||
q_class = asyncio.LifoQueue
|
||||
|
||||
|
||||
class PriorityQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
|
||||
q_class = asyncio.PriorityQueue
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,214 +1,445 @@
|
||||
"""Tests for selectors.py."""
|
||||
|
||||
import unittest
|
||||
import errno
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
from time import sleep
|
||||
try:
|
||||
import resource
|
||||
except ImportError:
|
||||
resource = None
|
||||
|
||||
from trollius import selectors
|
||||
from trollius import test_support as support
|
||||
from trollius import test_utils
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import socketpair
|
||||
from trollius.test_utils import unittest
|
||||
from trollius.time_monotonic import time_monotonic as time
|
||||
|
||||
|
||||
class FakeSelector(selectors._BaseSelectorImpl):
|
||||
"""Trivial non-abstract subclass of BaseSelector."""
|
||||
|
||||
def select(self, timeout=None):
|
||||
raise NotImplementedError
|
||||
def find_ready_matching(ready, flag):
|
||||
match = []
|
||||
for key, events in ready:
|
||||
if events & flag:
|
||||
match.append(key.fileobj)
|
||||
return match
|
||||
|
||||
|
||||
class _SelectorMappingTests(test_utils.TestCase):
|
||||
class BaseSelectorTestCase(object):
|
||||
|
||||
def test_len(self):
|
||||
s = FakeSelector()
|
||||
map = selectors._SelectorMapping(s)
|
||||
self.assertTrue(map.__len__() == 0)
|
||||
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 10
|
||||
s.register(f, selectors.EVENT_READ, None)
|
||||
self.assertTrue(len(map) == 1)
|
||||
|
||||
def test_getitem(self):
|
||||
s = FakeSelector()
|
||||
map = selectors._SelectorMapping(s)
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 10
|
||||
s.register(f, selectors.EVENT_READ, None)
|
||||
attended = selectors.SelectorKey(f, 10, selectors.EVENT_READ, None)
|
||||
self.assertEqual(attended, map.__getitem__(f))
|
||||
|
||||
def test_getitem_key_error(self):
|
||||
s = FakeSelector()
|
||||
map = selectors._SelectorMapping(s)
|
||||
self.assertTrue(len(map) == 0)
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 10
|
||||
s.register(f, selectors.EVENT_READ, None)
|
||||
self.assertRaises(KeyError, map.__getitem__, 5)
|
||||
|
||||
def test_iter(self):
|
||||
s = FakeSelector()
|
||||
map = selectors._SelectorMapping(s)
|
||||
self.assertTrue(len(map) == 0)
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 5
|
||||
s.register(f, selectors.EVENT_READ, None)
|
||||
counter = 0
|
||||
for fileno in map.__iter__():
|
||||
self.assertEqual(5, fileno)
|
||||
counter += 1
|
||||
|
||||
for idx in map:
|
||||
self.assertEqual(f, map[idx].fileobj)
|
||||
self.assertEqual(1, counter)
|
||||
|
||||
|
||||
class BaseSelectorTests(test_utils.TestCase):
|
||||
def test_fileobj_to_fd(self):
|
||||
self.assertEqual(10, selectors._fileobj_to_fd(10))
|
||||
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 10
|
||||
self.assertEqual(10, selectors._fileobj_to_fd(f))
|
||||
|
||||
f.fileno.side_effect = AttributeError
|
||||
self.assertRaises(ValueError, selectors._fileobj_to_fd, f)
|
||||
|
||||
f.fileno.return_value = -1
|
||||
self.assertRaises(ValueError, selectors._fileobj_to_fd, f)
|
||||
|
||||
def test_selector_key_repr(self):
|
||||
key = selectors.SelectorKey(10, 10, selectors.EVENT_READ, None)
|
||||
self.assertEqual(
|
||||
"SelectorKey(fileobj=10, fd=10, events=1, data=None)", repr(key))
|
||||
def make_socketpair(self):
|
||||
rd, wr = socketpair()
|
||||
self.addCleanup(rd.close)
|
||||
self.addCleanup(wr.close)
|
||||
return rd, wr
|
||||
|
||||
def test_register(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ)
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
key = s.register(rd, selectors.EVENT_READ, "data")
|
||||
self.assertIsInstance(key, selectors.SelectorKey)
|
||||
self.assertEqual(key.fd, 10)
|
||||
self.assertIs(key, s._fd_to_key[10])
|
||||
self.assertEqual(key.fileobj, rd)
|
||||
self.assertEqual(key.fd, rd.fileno())
|
||||
self.assertEqual(key.events, selectors.EVENT_READ)
|
||||
self.assertEqual(key.data, "data")
|
||||
|
||||
def test_register_unknown_event(self):
|
||||
s = FakeSelector()
|
||||
self.assertRaises(ValueError, s.register, mock.Mock(), 999999)
|
||||
# register an unknown event
|
||||
self.assertRaises(ValueError, s.register, 0, 999999)
|
||||
|
||||
def test_register_already_registered(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
# register an invalid FD
|
||||
self.assertRaises(ValueError, s.register, -10, selectors.EVENT_READ)
|
||||
|
||||
s = FakeSelector()
|
||||
s.register(fobj, selectors.EVENT_READ)
|
||||
self.assertRaises(KeyError, s.register, fobj, selectors.EVENT_READ)
|
||||
# register twice
|
||||
self.assertRaises(KeyError, s.register, rd, selectors.EVENT_READ)
|
||||
|
||||
# register the same FD, but with a different object
|
||||
self.assertRaises(KeyError, s.register, rd.fileno(),
|
||||
selectors.EVENT_READ)
|
||||
|
||||
def test_unregister(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
s = FakeSelector()
|
||||
s.register(fobj, selectors.EVENT_READ)
|
||||
s.unregister(fobj)
|
||||
self.assertFalse(s._fd_to_key)
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
def test_unregister_unknown(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
s.register(rd, selectors.EVENT_READ)
|
||||
s.unregister(rd)
|
||||
|
||||
s = FakeSelector()
|
||||
self.assertRaises(KeyError, s.unregister, fobj)
|
||||
# unregister an unknown file obj
|
||||
self.assertRaises(KeyError, s.unregister, 999999)
|
||||
|
||||
def test_modify_unknown(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
# unregister twice
|
||||
self.assertRaises(KeyError, s.unregister, rd)
|
||||
|
||||
s = FakeSelector()
|
||||
self.assertRaises(KeyError, s.modify, fobj, 1)
|
||||
def test_unregister_after_fd_close(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
rd, wr = self.make_socketpair()
|
||||
r, w = rd.fileno(), wr.fileno()
|
||||
s.register(r, selectors.EVENT_READ)
|
||||
s.register(w, selectors.EVENT_WRITE)
|
||||
rd.close()
|
||||
wr.close()
|
||||
s.unregister(r)
|
||||
s.unregister(w)
|
||||
|
||||
@unittest.skipUnless(os.name == 'posix', "requires posix")
|
||||
def test_unregister_after_fd_close_and_reuse(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
rd, wr = self.make_socketpair()
|
||||
r, w = rd.fileno(), wr.fileno()
|
||||
s.register(r, selectors.EVENT_READ)
|
||||
s.register(w, selectors.EVENT_WRITE)
|
||||
rd2, wr2 = self.make_socketpair()
|
||||
rd.close()
|
||||
wr.close()
|
||||
os.dup2(rd2.fileno(), r)
|
||||
os.dup2(wr2.fileno(), w)
|
||||
self.addCleanup(os.close, r)
|
||||
self.addCleanup(os.close, w)
|
||||
s.unregister(r)
|
||||
s.unregister(w)
|
||||
|
||||
def test_unregister_after_socket_close(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
rd, wr = self.make_socketpair()
|
||||
s.register(rd, selectors.EVENT_READ)
|
||||
s.register(wr, selectors.EVENT_WRITE)
|
||||
rd.close()
|
||||
wr.close()
|
||||
s.unregister(rd)
|
||||
s.unregister(wr)
|
||||
|
||||
def test_modify(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ)
|
||||
key2 = s.modify(fobj, selectors.EVENT_WRITE)
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
key = s.register(rd, selectors.EVENT_READ)
|
||||
|
||||
# modify events
|
||||
key2 = s.modify(rd, selectors.EVENT_WRITE)
|
||||
self.assertNotEqual(key.events, key2.events)
|
||||
self.assertEqual(
|
||||
selectors.SelectorKey(fobj, 10, selectors.EVENT_WRITE, None),
|
||||
s.get_key(fobj))
|
||||
self.assertEqual(key2, s.get_key(rd))
|
||||
|
||||
def test_modify_data(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
s.unregister(rd)
|
||||
|
||||
# modify data
|
||||
d1 = object()
|
||||
d2 = object()
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ, d1)
|
||||
key2 = s.modify(fobj, selectors.EVENT_READ, d2)
|
||||
key = s.register(rd, selectors.EVENT_READ, d1)
|
||||
key2 = s.modify(rd, selectors.EVENT_READ, d2)
|
||||
self.assertEqual(key.events, key2.events)
|
||||
self.assertNotEqual(key.data, key2.data)
|
||||
self.assertEqual(
|
||||
selectors.SelectorKey(fobj, 10, selectors.EVENT_READ, d2),
|
||||
s.get_key(fobj))
|
||||
self.assertEqual(key2, s.get_key(rd))
|
||||
self.assertEqual(key2.data, d2)
|
||||
|
||||
def test_modify_data_use_a_shortcut(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
# modify unknown file obj
|
||||
self.assertRaises(KeyError, s.modify, 999999, selectors.EVENT_READ)
|
||||
|
||||
d1 = object()
|
||||
d2 = object()
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ, d1)
|
||||
|
||||
s.unregister = mock.Mock()
|
||||
# modify use a shortcut
|
||||
d3 = object()
|
||||
s.register = mock.Mock()
|
||||
key2 = s.modify(fobj, selectors.EVENT_READ, d2)
|
||||
self.assertFalse(s.unregister.called)
|
||||
s.unregister = mock.Mock()
|
||||
|
||||
s.modify(rd, selectors.EVENT_READ, d3)
|
||||
self.assertFalse(s.register.called)
|
||||
|
||||
def test_modify_same(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
data = object()
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ, data)
|
||||
key2 = s.modify(fobj, selectors.EVENT_READ, data)
|
||||
self.assertIs(key, key2)
|
||||
|
||||
def test_select(self):
|
||||
s = FakeSelector()
|
||||
self.assertRaises(NotImplementedError, s.select)
|
||||
self.assertFalse(s.unregister.called)
|
||||
|
||||
def test_close(self):
|
||||
s = FakeSelector()
|
||||
s.register(1, selectors.EVENT_READ)
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
mapping = s.get_map()
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
s.register(rd, selectors.EVENT_READ)
|
||||
s.register(wr, selectors.EVENT_WRITE)
|
||||
|
||||
s.close()
|
||||
self.assertFalse(s._fd_to_key)
|
||||
self.assertRaises(RuntimeError, s.get_key, rd)
|
||||
self.assertRaises(RuntimeError, s.get_key, wr)
|
||||
self.assertRaises(KeyError, mapping.__getitem__, rd)
|
||||
self.assertRaises(KeyError, mapping.__getitem__, wr)
|
||||
|
||||
def test_get_key(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
key = s.register(rd, selectors.EVENT_READ, "data")
|
||||
self.assertEqual(key, s.get_key(rd))
|
||||
|
||||
# unknown file obj
|
||||
self.assertRaises(KeyError, s.get_key, 999999)
|
||||
|
||||
def test_get_map(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
keys = s.get_map()
|
||||
self.assertFalse(keys)
|
||||
self.assertEqual(len(keys), 0)
|
||||
self.assertEqual(list(keys), [])
|
||||
key = s.register(rd, selectors.EVENT_READ, "data")
|
||||
self.assertIn(rd, keys)
|
||||
self.assertEqual(key, keys[rd])
|
||||
self.assertEqual(len(keys), 1)
|
||||
self.assertEqual(list(keys), [rd.fileno()])
|
||||
self.assertEqual(list(keys.values()), [key])
|
||||
|
||||
# unknown file obj
|
||||
with self.assertRaises(KeyError):
|
||||
keys[999999]
|
||||
|
||||
# Read-only mapping
|
||||
with self.assertRaises(TypeError):
|
||||
del keys[rd]
|
||||
|
||||
def test_select(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
s.register(rd, selectors.EVENT_READ)
|
||||
wr_key = s.register(wr, selectors.EVENT_WRITE)
|
||||
|
||||
result = s.select()
|
||||
for key, events in result:
|
||||
self.assertTrue(isinstance(key, selectors.SelectorKey))
|
||||
self.assertTrue(events)
|
||||
self.assertFalse(events & ~(selectors.EVENT_READ |
|
||||
selectors.EVENT_WRITE))
|
||||
|
||||
self.assertEqual([(wr_key, selectors.EVENT_WRITE)], result)
|
||||
|
||||
def test_context_manager(self):
|
||||
s = FakeSelector()
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
with s as sel:
|
||||
sel.register(1, selectors.EVENT_READ)
|
||||
sel.register(rd, selectors.EVENT_READ)
|
||||
sel.register(wr, selectors.EVENT_WRITE)
|
||||
|
||||
self.assertFalse(s._fd_to_key)
|
||||
self.assertRaises(RuntimeError, s.get_key, rd)
|
||||
self.assertRaises(RuntimeError, s.get_key, wr)
|
||||
|
||||
def test_key_from_fd(self):
|
||||
s = FakeSelector()
|
||||
key = s.register(1, selectors.EVENT_READ)
|
||||
def test_fileno(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
self.assertIs(key, s._key_from_fd(1))
|
||||
self.assertIsNone(s._key_from_fd(10))
|
||||
if hasattr(s, 'fileno'):
|
||||
fd = s.fileno()
|
||||
self.assertTrue(isinstance(fd, int))
|
||||
self.assertGreaterEqual(fd, 0)
|
||||
|
||||
def test_selector(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
NUM_SOCKETS = 12
|
||||
MSG = b" This is a test."
|
||||
MSG_LEN = len(MSG)
|
||||
readers = []
|
||||
writers = []
|
||||
r2w = {}
|
||||
w2r = {}
|
||||
|
||||
for i in range(NUM_SOCKETS):
|
||||
rd, wr = self.make_socketpair()
|
||||
s.register(rd, selectors.EVENT_READ)
|
||||
s.register(wr, selectors.EVENT_WRITE)
|
||||
readers.append(rd)
|
||||
writers.append(wr)
|
||||
r2w[rd] = wr
|
||||
w2r[wr] = rd
|
||||
|
||||
bufs = []
|
||||
|
||||
while writers:
|
||||
ready = s.select()
|
||||
ready_writers = find_ready_matching(ready, selectors.EVENT_WRITE)
|
||||
if not ready_writers:
|
||||
self.fail("no sockets ready for writing")
|
||||
wr = random.choice(ready_writers)
|
||||
wr.send(MSG)
|
||||
|
||||
for i in range(10):
|
||||
ready = s.select()
|
||||
ready_readers = find_ready_matching(ready,
|
||||
selectors.EVENT_READ)
|
||||
if ready_readers:
|
||||
break
|
||||
# there might be a delay between the write to the write end and
|
||||
# the read end is reported ready
|
||||
sleep(0.1)
|
||||
else:
|
||||
self.fail("no sockets ready for reading")
|
||||
self.assertEqual([w2r[wr]], ready_readers)
|
||||
rd = ready_readers[0]
|
||||
buf = rd.recv(MSG_LEN)
|
||||
self.assertEqual(len(buf), MSG_LEN)
|
||||
bufs.append(buf)
|
||||
s.unregister(r2w[rd])
|
||||
s.unregister(rd)
|
||||
writers.remove(r2w[rd])
|
||||
|
||||
self.assertEqual(bufs, [MSG] * NUM_SOCKETS)
|
||||
|
||||
@unittest.skipIf(sys.platform == 'win32',
|
||||
'select.select() cannot be used with empty fd sets')
|
||||
def test_empty_select(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
self.assertEqual(s.select(timeout=0), [])
|
||||
|
||||
def test_timeout(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
s.register(wr, selectors.EVENT_WRITE)
|
||||
t = time()
|
||||
self.assertEqual(1, len(s.select(0)))
|
||||
self.assertEqual(1, len(s.select(-1)))
|
||||
self.assertLess(time() - t, 0.5)
|
||||
|
||||
s.unregister(wr)
|
||||
s.register(rd, selectors.EVENT_READ)
|
||||
t = time()
|
||||
self.assertFalse(s.select(0))
|
||||
self.assertFalse(s.select(-1))
|
||||
self.assertLess(time() - t, 0.5)
|
||||
|
||||
t0 = time()
|
||||
self.assertFalse(s.select(1))
|
||||
t1 = time()
|
||||
dt = t1 - t0
|
||||
# Tolerate 2.0 seconds for very slow buildbots
|
||||
self.assertTrue(0.8 <= dt <= 2.0, dt)
|
||||
|
||||
@unittest.skipUnless(hasattr(signal, "alarm"),
|
||||
"signal.alarm() required for this test")
|
||||
def test_select_interrupt(self):
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
rd, wr = self.make_socketpair()
|
||||
|
||||
orig_alrm_handler = signal.signal(signal.SIGALRM, lambda *args: None)
|
||||
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
|
||||
self.addCleanup(signal.alarm, 0)
|
||||
|
||||
signal.alarm(1)
|
||||
|
||||
s.register(rd, selectors.EVENT_READ)
|
||||
t = time()
|
||||
self.assertFalse(s.select(2))
|
||||
self.assertLess(time() - t, 2.5)
|
||||
|
||||
|
||||
class ScalableSelectorMixIn(object):
|
||||
|
||||
# see issue #18963 for why it's skipped on older OS X versions
|
||||
@support.requires_mac_ver(10, 5)
|
||||
@unittest.skipUnless(resource, "Test needs resource module")
|
||||
def test_above_fd_setsize(self):
|
||||
# A scalable implementation should have no problem with more than
|
||||
# FD_SETSIZE file descriptors. Since we don't know the value, we just
|
||||
# try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
try:
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
|
||||
self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
|
||||
(soft, hard))
|
||||
NUM_FDS = min(hard, 2**16)
|
||||
except (OSError, ValueError):
|
||||
NUM_FDS = soft
|
||||
|
||||
# guard for already allocated FDs (stdin, stdout...)
|
||||
NUM_FDS -= 32
|
||||
|
||||
s = self.SELECTOR()
|
||||
self.addCleanup(s.close)
|
||||
|
||||
for i in range(NUM_FDS // 2):
|
||||
try:
|
||||
rd, wr = self.make_socketpair()
|
||||
except OSError:
|
||||
# too many FDs, skip - note that we should only catch EMFILE
|
||||
# here, but apparently *BSD and Solaris can fail upon connect()
|
||||
# or bind() with EADDRNOTAVAIL, so let's be safe
|
||||
self.skipTest("FD limit reached")
|
||||
|
||||
try:
|
||||
s.register(rd, selectors.EVENT_READ)
|
||||
s.register(wr, selectors.EVENT_WRITE)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOSPC:
|
||||
# this can be raised by epoll if we go over
|
||||
# fs.epoll.max_user_watches sysctl
|
||||
self.skipTest("FD limit reached")
|
||||
raise
|
||||
|
||||
self.assertEqual(NUM_FDS // 2, len(s.select()))
|
||||
|
||||
|
||||
class DefaultSelectorTestCase(BaseSelectorTestCase, test_utils.TestCase):
|
||||
|
||||
SELECTOR = selectors.DefaultSelector
|
||||
|
||||
|
||||
class SelectSelectorTestCase(BaseSelectorTestCase, test_utils.TestCase):
|
||||
|
||||
SELECTOR = selectors.SelectSelector
|
||||
|
||||
|
||||
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
|
||||
"Test needs selectors.PollSelector")
|
||||
class PollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn,
|
||||
test_utils.TestCase):
|
||||
|
||||
SELECTOR = getattr(selectors, 'PollSelector', None)
|
||||
|
||||
|
||||
@unittest.skipUnless(hasattr(selectors, 'EpollSelector'),
|
||||
"Test needs selectors.EpollSelector")
|
||||
class EpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn,
|
||||
test_utils.TestCase):
|
||||
|
||||
SELECTOR = getattr(selectors, 'EpollSelector', None)
|
||||
|
||||
|
||||
@unittest.skipUnless(hasattr(selectors, 'KqueueSelector'),
|
||||
"Test needs selectors.KqueueSelector)")
|
||||
class KqueueSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn,
|
||||
test_utils.TestCase):
|
||||
|
||||
SELECTOR = getattr(selectors, 'KqueueSelector', None)
|
||||
|
||||
|
||||
@unittest.skipUnless(hasattr(selectors, 'DevpollSelector'),
|
||||
"Test needs selectors.DevpollSelector")
|
||||
class DevpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn,
|
||||
test_utils.TestCase):
|
||||
|
||||
SELECTOR = getattr(selectors, 'DevpollSelector', None)
|
||||
|
||||
if hasattr(selectors.DefaultSelector, 'fileno'):
|
||||
def test_fileno(self):
|
||||
self.assertIsInstance(selectors.DefaultSelector().fileno(), int)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
72
tests/test_sslproto.py
Normal file
72
tests/test_sslproto.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""Tests for asyncio/sslproto.py."""
|
||||
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
ssl = None
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import ConnectionResetError
|
||||
from trollius import sslproto
|
||||
from trollius import test_utils
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
class SslProtoHandshakeTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = asyncio.new_event_loop()
|
||||
self.set_event_loop(self.loop)
|
||||
|
||||
def ssl_protocol(self, waiter=None):
|
||||
sslcontext = test_utils.dummy_ssl_context()
|
||||
app_proto = asyncio.Protocol()
|
||||
proto = sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter)
|
||||
self.addCleanup(proto._app_transport.close)
|
||||
return proto
|
||||
|
||||
def connection_made(self, ssl_proto, do_handshake=None):
|
||||
transport = mock.Mock()
|
||||
sslpipe = mock.Mock()
|
||||
sslpipe.shutdown.return_value = b''
|
||||
if do_handshake:
|
||||
sslpipe.do_handshake.side_effect = do_handshake
|
||||
else:
|
||||
def mock_handshake(callback):
|
||||
return []
|
||||
sslpipe.do_handshake.side_effect = mock_handshake
|
||||
with mock.patch('trollius.sslproto._SSLPipe', return_value=sslpipe):
|
||||
ssl_proto.connection_made(transport)
|
||||
|
||||
def test_cancel_handshake(self):
|
||||
# Python issue #23197: cancelling an handshake must not raise an
|
||||
# exception or log an error, even if the handshake failed
|
||||
waiter = asyncio.Future(loop=self.loop)
|
||||
ssl_proto = self.ssl_protocol(waiter)
|
||||
handshake_fut = asyncio.Future(loop=self.loop)
|
||||
|
||||
def do_handshake(callback):
|
||||
exc = Exception()
|
||||
callback(exc)
|
||||
handshake_fut.set_result(None)
|
||||
return []
|
||||
|
||||
waiter.cancel()
|
||||
self.connection_made(ssl_proto, do_handshake)
|
||||
|
||||
with test_utils.disable_logger():
|
||||
self.loop.run_until_complete(handshake_fut)
|
||||
|
||||
def test_eof_received_waiter(self):
|
||||
waiter = asyncio.Future(loop=self.loop)
|
||||
ssl_proto = self.ssl_protocol(waiter)
|
||||
self.connection_made(ssl_proto)
|
||||
ssl_proto.eof_received()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertIsInstance(waiter.exception(), ConnectionResetError)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -5,7 +5,6 @@ import io
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import unittest
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
@@ -16,6 +15,7 @@ from trollius import Return, From
|
||||
from trollius import compat
|
||||
from trollius import test_utils
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
|
||||
class StreamReaderTests(test_utils.TestCase):
|
||||
@@ -56,7 +56,7 @@ class StreamReaderTests(test_utils.TestCase):
|
||||
loop=self.loop)
|
||||
self._basetest_open_connection(conn_fut)
|
||||
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_open_unix_connection(self):
|
||||
with test_utils.run_test_unix_server() as httpd:
|
||||
conn_fut = asyncio.open_unix_connection(httpd.address,
|
||||
@@ -75,7 +75,7 @@ class StreamReaderTests(test_utils.TestCase):
|
||||
|
||||
writer.close()
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
def test_open_connection_no_loop_ssl(self):
|
||||
with test_utils.run_test_server(use_ssl=True) as httpd:
|
||||
conn_fut = asyncio.open_connection(
|
||||
@@ -85,8 +85,8 @@ class StreamReaderTests(test_utils.TestCase):
|
||||
|
||||
self._basetest_open_connection_no_loop_ssl(conn_fut)
|
||||
|
||||
@test_utils.skipIf(ssl is None, 'No ssl module')
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_open_unix_connection_no_loop_ssl(self):
|
||||
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
|
||||
conn_fut = asyncio.open_unix_connection(
|
||||
@@ -112,7 +112,7 @@ class StreamReaderTests(test_utils.TestCase):
|
||||
loop=self.loop)
|
||||
self._basetest_open_connection_error(conn_fut)
|
||||
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_open_unix_connection_error(self):
|
||||
with test_utils.run_test_unix_server() as httpd:
|
||||
conn_fut = asyncio.open_unix_connection(httpd.address,
|
||||
@@ -416,11 +416,7 @@ class StreamReaderTests(test_utils.TestCase):
|
||||
|
||||
@asyncio.coroutine
|
||||
def set_err():
|
||||
stream.set_exception(ValueError())
|
||||
|
||||
@asyncio.coroutine
|
||||
def readline():
|
||||
yield From(stream.readline())
|
||||
self.loop.call_soon(stream.set_exception, ValueError())
|
||||
|
||||
t1 = asyncio.Task(stream.readline(), loop=self.loop)
|
||||
t2 = asyncio.Task(set_err(), loop=self.loop)
|
||||
@@ -432,11 +428,7 @@ class StreamReaderTests(test_utils.TestCase):
|
||||
def test_exception_cancel(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def read_a_line():
|
||||
yield From(stream.readline())
|
||||
|
||||
t = asyncio.Task(read_a_line(), loop=self.loop)
|
||||
t = asyncio.Task(stream.readline(), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
t.cancel()
|
||||
test_utils.run_briefly(self.loop)
|
||||
@@ -519,7 +511,7 @@ class StreamReaderTests(test_utils.TestCase):
|
||||
server.stop()
|
||||
self.assertEqual(msg, b"hello world!\n")
|
||||
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_start_unix_server(self):
|
||||
|
||||
class MyServer:
|
||||
@@ -589,9 +581,9 @@ class StreamReaderTests(test_utils.TestCase):
|
||||
server.stop()
|
||||
self.assertEqual(msg, b"hello world!\n")
|
||||
|
||||
@test_utils.skipIf(sys.platform == 'win32', "Don't have pipes")
|
||||
@unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
|
||||
def test_read_all_from_pipe_reader(self):
|
||||
# See Tulip issue 168. This test is derived from the example
|
||||
# See asyncio issue 168. This test is derived from the example
|
||||
# subprocess_attach_read_pipe.py, but we configure the
|
||||
# StreamReader's limit so that twice it is less than the size
|
||||
# of the data writter. Also we must explicitly attach a child
|
||||
@@ -619,8 +611,8 @@ os.close(fd)
|
||||
kw = {'loop': self.loop}
|
||||
if compat.PY3:
|
||||
kw['pass_fds'] = set((wfd,))
|
||||
proc = self.loop.run_until_complete(
|
||||
asyncio.create_subprocess_exec(*args, **kw))
|
||||
create = asyncio.create_subprocess_exec(*args, **kw)
|
||||
proc = self.loop.run_until_complete(create)
|
||||
self.loop.run_until_complete(proc.wait())
|
||||
finally:
|
||||
asyncio.set_child_watcher(None)
|
||||
@@ -629,6 +621,25 @@ os.close(fd)
|
||||
data = self.loop.run_until_complete(reader.read(-1))
|
||||
self.assertEqual(data, b'data')
|
||||
|
||||
def test_streamreader_constructor(self):
|
||||
self.addCleanup(asyncio.set_event_loop, None)
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
|
||||
# retrieves the current loop if the loop parameter is not set
|
||||
reader = asyncio.StreamReader()
|
||||
self.assertIs(reader._loop, self.loop)
|
||||
|
||||
def test_streamreaderprotocol_constructor(self):
|
||||
self.addCleanup(asyncio.set_event_loop, None)
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
|
||||
# retrieves the current loop if the loop parameter is not set
|
||||
reader = mock.Mock()
|
||||
protocol = asyncio.StreamReaderProtocol(reader)
|
||||
self.assertIs(protocol._loop, self.loop)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -4,12 +4,16 @@ import trollius as asyncio
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import unittest
|
||||
from trollius import BrokenPipeError, ConnectionResetError, ProcessLookupError
|
||||
from trollius import From, Return
|
||||
from trollius import base_subprocess
|
||||
from trollius import test_support as support
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
if sys.platform != 'win32':
|
||||
from trollius import unix_events
|
||||
from trollius.py33_exceptions import BrokenPipeError, ConnectionResetError
|
||||
|
||||
|
||||
# Program blocking
|
||||
PROGRAM_BLOCKED = [sys.executable, '-c', 'import time; time.sleep(3600)']
|
||||
@@ -25,7 +29,57 @@ else:
|
||||
'sys.stdout.write(data)'))
|
||||
PROGRAM_CAT = [sys.executable, '-c', PROGRAM_CAT]
|
||||
|
||||
class SubprocessMixin(object):
|
||||
class TestSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
||||
def _start(self, *args, **kwargs):
|
||||
self._proc = mock.Mock()
|
||||
self._proc.stdin = None
|
||||
self._proc.stdout = None
|
||||
self._proc.stderr = None
|
||||
|
||||
|
||||
class SubprocessTransportTests(test_utils.TestCase):
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
self.set_event_loop(self.loop)
|
||||
|
||||
|
||||
def create_transport(self, waiter=None):
|
||||
protocol = mock.Mock()
|
||||
protocol.connection_made._is_coroutine = False
|
||||
protocol.process_exited._is_coroutine = False
|
||||
transport = TestSubprocessTransport(
|
||||
self.loop, protocol, ['test'], False,
|
||||
None, None, None, 0, waiter=waiter)
|
||||
return (transport, protocol)
|
||||
|
||||
def test_proc_exited(self):
|
||||
waiter = asyncio.Future(loop=self.loop)
|
||||
transport, protocol = self.create_transport(waiter)
|
||||
transport._process_exited(6)
|
||||
self.loop.run_until_complete(waiter)
|
||||
|
||||
self.assertEqual(transport.get_returncode(), 6)
|
||||
|
||||
self.assertTrue(protocol.connection_made.called)
|
||||
self.assertTrue(protocol.process_exited.called)
|
||||
self.assertTrue(protocol.connection_lost.called)
|
||||
self.assertEqual(protocol.connection_lost.call_args[0], (None,))
|
||||
|
||||
self.assertFalse(transport._closed)
|
||||
self.assertIsNone(transport._loop)
|
||||
self.assertIsNone(transport._proc)
|
||||
self.assertIsNone(transport._protocol)
|
||||
|
||||
# methods must raise ProcessLookupError if the process exited
|
||||
self.assertRaises(ProcessLookupError,
|
||||
transport.send_signal, signal.SIGTERM)
|
||||
self.assertRaises(ProcessLookupError, transport.terminate)
|
||||
self.assertRaises(ProcessLookupError, transport.kill)
|
||||
|
||||
transport.close()
|
||||
|
||||
|
||||
class SubprocessMixin:
|
||||
|
||||
def test_stdin_stdout(self):
|
||||
args = PROGRAM_CAT
|
||||
@@ -80,7 +134,7 @@ class SubprocessMixin(object):
|
||||
exitcode = self.loop.run_until_complete(proc.wait())
|
||||
self.assertEqual(exitcode, 7)
|
||||
|
||||
@test_utils.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
|
||||
@unittest.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
|
||||
def test_start_new_session(self):
|
||||
def start_new_session():
|
||||
os.setsid()
|
||||
@@ -117,7 +171,7 @@ class SubprocessMixin(object):
|
||||
else:
|
||||
self.assertEqual(-signal.SIGTERM, returncode)
|
||||
|
||||
@test_utils.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
|
||||
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
|
||||
def test_send_signal(self):
|
||||
code = '; '.join((
|
||||
'import sys, time',
|
||||
@@ -125,7 +179,9 @@ class SubprocessMixin(object):
|
||||
'sys.stdout.flush()',
|
||||
'time.sleep(3600)'))
|
||||
args = [sys.executable, '-c', code]
|
||||
create = asyncio.create_subprocess_exec(*args, loop=self.loop, stdout=subprocess.PIPE)
|
||||
create = asyncio.create_subprocess_exec(*args,
|
||||
stdout=subprocess.PIPE,
|
||||
loop=self.loop)
|
||||
proc = self.loop.run_until_complete(create)
|
||||
|
||||
@asyncio.coroutine
|
||||
@@ -163,17 +219,214 @@ class SubprocessMixin(object):
|
||||
|
||||
coro = write_stdin(proc, large_data)
|
||||
# drain() must raise BrokenPipeError or ConnectionResetError
|
||||
self.assertRaises((BrokenPipeError, ConnectionResetError),
|
||||
self.loop.run_until_complete, coro)
|
||||
with test_utils.disable_logger():
|
||||
self.assertRaises((BrokenPipeError, ConnectionResetError),
|
||||
self.loop.run_until_complete, coro)
|
||||
self.loop.run_until_complete(proc.wait())
|
||||
|
||||
def test_communicate_ignore_broken_pipe(self):
|
||||
proc, large_data = self.prepare_broken_pipe_test()
|
||||
|
||||
# communicate() must ignore BrokenPipeError when feeding stdin
|
||||
self.loop.run_until_complete(proc.communicate(large_data))
|
||||
with test_utils.disable_logger():
|
||||
self.loop.run_until_complete(proc.communicate(large_data))
|
||||
self.loop.run_until_complete(proc.wait())
|
||||
|
||||
def test_pause_reading(self):
|
||||
limit = 10
|
||||
size = (limit * 2 + 1)
|
||||
|
||||
@asyncio.coroutine
|
||||
def test_pause_reading():
|
||||
code = '\n'.join((
|
||||
'import sys',
|
||||
'sys.stdout.write("x" * %s)' % size,
|
||||
'sys.stdout.flush()',
|
||||
))
|
||||
|
||||
connect_read_pipe = self.loop.connect_read_pipe
|
||||
|
||||
@asyncio.coroutine
|
||||
def connect_read_pipe_mock(*args, **kw):
|
||||
connect = connect_read_pipe(*args, **kw)
|
||||
transport, protocol = yield From(connect)
|
||||
transport.pause_reading = mock.Mock()
|
||||
transport.resume_reading = mock.Mock()
|
||||
raise Return(transport, protocol)
|
||||
|
||||
self.loop.connect_read_pipe = connect_read_pipe_mock
|
||||
|
||||
proc = yield From(asyncio.create_subprocess_exec(
|
||||
sys.executable, '-c', code,
|
||||
stdin=asyncio.subprocess.PIPE,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
limit=limit,
|
||||
loop=self.loop))
|
||||
stdout_transport = proc._transport.get_pipe_transport(1)
|
||||
|
||||
stdout, stderr = yield From(proc.communicate())
|
||||
|
||||
# The child process produced more than limit bytes of output,
|
||||
# the stream reader transport should pause the protocol to not
|
||||
# allocate too much memory.
|
||||
raise Return(stdout, stdout_transport)
|
||||
|
||||
# Issue #22685: Ensure that the stream reader pauses the protocol
|
||||
# when the child process produces too much data
|
||||
stdout, transport = self.loop.run_until_complete(test_pause_reading())
|
||||
|
||||
self.assertEqual(stdout, b'x' * size)
|
||||
self.assertTrue(transport.pause_reading.called)
|
||||
self.assertTrue(transport.resume_reading.called)
|
||||
|
||||
def test_stdin_not_inheritable(self):
|
||||
# asyncio issue #209: stdin must not be inheritable, otherwise
|
||||
# the Process.communicate() hangs
|
||||
@asyncio.coroutine
|
||||
def len_message(message):
|
||||
code = 'import sys; data = sys.stdin.read(); print(len(data))'
|
||||
proc = yield From(asyncio.create_subprocess_exec(
|
||||
sys.executable, '-c', code,
|
||||
stdin=asyncio.subprocess.PIPE,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
close_fds=False,
|
||||
loop=self.loop))
|
||||
stdout, stderr = yield From(proc.communicate(message))
|
||||
exitcode = yield From(proc.wait())
|
||||
raise Return(stdout, exitcode)
|
||||
|
||||
output, exitcode = self.loop.run_until_complete(len_message(b'abc'))
|
||||
self.assertEqual(output.rstrip(), b'3')
|
||||
self.assertEqual(exitcode, 0)
|
||||
|
||||
def test_cancel_process_wait(self):
|
||||
# Issue #23140: cancel Process.wait()
|
||||
|
||||
@asyncio.coroutine
|
||||
def cancel_wait():
|
||||
proc = yield From(asyncio.create_subprocess_exec(
|
||||
*PROGRAM_BLOCKED,
|
||||
loop=self.loop))
|
||||
|
||||
# Create an internal future waiting on the process exit
|
||||
task = self.loop.create_task(proc.wait())
|
||||
self.loop.call_soon(task.cancel)
|
||||
try:
|
||||
yield From(task)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# Cancel the future
|
||||
task.cancel()
|
||||
|
||||
# Kill the process and wait until it is done
|
||||
proc.kill()
|
||||
yield From(proc.wait())
|
||||
|
||||
self.loop.run_until_complete(cancel_wait())
|
||||
|
||||
def test_cancel_make_subprocess_transport_exec(self):
|
||||
@asyncio.coroutine
|
||||
def cancel_make_transport():
|
||||
coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED,
|
||||
loop=self.loop)
|
||||
task = self.loop.create_task(coro)
|
||||
|
||||
self.loop.call_soon(task.cancel)
|
||||
try:
|
||||
yield From(task)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# ignore the log:
|
||||
# "Exception during subprocess creation, kill the subprocess"
|
||||
with test_utils.disable_logger():
|
||||
self.loop.run_until_complete(cancel_make_transport())
|
||||
|
||||
def test_cancel_post_init(self):
|
||||
@asyncio.coroutine
|
||||
def cancel_make_transport():
|
||||
coro = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
|
||||
*PROGRAM_BLOCKED)
|
||||
task = self.loop.create_task(coro)
|
||||
|
||||
self.loop.call_soon(task.cancel)
|
||||
try:
|
||||
yield From(task)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# ignore the log:
|
||||
# "Exception during subprocess creation, kill the subprocess"
|
||||
with test_utils.disable_logger():
|
||||
self.loop.run_until_complete(cancel_make_transport())
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
def test_close_kill_running(self):
|
||||
@asyncio.coroutine
|
||||
def kill_running():
|
||||
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
|
||||
*PROGRAM_BLOCKED)
|
||||
transport, protocol = yield From(create)
|
||||
|
||||
non_local = {'kill_called': False}
|
||||
def kill():
|
||||
non_local['kill_called'] = True
|
||||
orig_kill()
|
||||
|
||||
proc = transport.get_extra_info('subprocess')
|
||||
orig_kill = proc.kill
|
||||
proc.kill = kill
|
||||
returncode = transport.get_returncode()
|
||||
transport.close()
|
||||
yield From(transport._wait())
|
||||
raise Return(returncode, non_local['kill_called'])
|
||||
|
||||
# Ignore "Close running child process: kill ..." log
|
||||
with test_utils.disable_logger():
|
||||
returncode, killed = self.loop.run_until_complete(kill_running())
|
||||
self.assertIsNone(returncode)
|
||||
|
||||
# transport.close() must kill the process if it is still running
|
||||
self.assertTrue(killed)
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
def test_close_dont_kill_finished(self):
|
||||
@asyncio.coroutine
|
||||
def kill_running():
|
||||
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
|
||||
*PROGRAM_BLOCKED)
|
||||
transport, protocol = yield From(create)
|
||||
proc = transport.get_extra_info('subprocess')
|
||||
|
||||
# kill the process (but asyncio is not notified immediatly)
|
||||
proc.kill()
|
||||
proc.wait()
|
||||
|
||||
proc.kill = mock.Mock()
|
||||
proc_returncode = proc.poll()
|
||||
transport_returncode = transport.get_returncode()
|
||||
transport.close()
|
||||
raise Return(proc_returncode, transport_returncode,
|
||||
proc.kill.called)
|
||||
|
||||
# Ignore "Unknown child process pid ..." log of SafeChildWatcher,
|
||||
# emitted because the test already consumes the exit status:
|
||||
# proc.wait()
|
||||
with test_utils.disable_logger():
|
||||
result = self.loop.run_until_complete(kill_running())
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
proc_returncode, transport_return_code, killed = result
|
||||
|
||||
self.assertIsNotNone(proc_returncode)
|
||||
self.assertIsNone(transport_return_code)
|
||||
|
||||
# transport.close() must not kill the process if it finished, even if
|
||||
# the transport was not notified yet
|
||||
self.assertFalse(killed)
|
||||
|
||||
|
||||
if sys.platform != 'win32':
|
||||
# Unix
|
||||
@@ -184,19 +437,12 @@ if sys.platform != 'win32':
|
||||
def setUp(self):
|
||||
policy = asyncio.get_event_loop_policy()
|
||||
self.loop = policy.new_event_loop()
|
||||
|
||||
# ensure that the event loop is passed explicitly in asyncio
|
||||
policy.set_event_loop(None)
|
||||
self.set_event_loop(self.loop)
|
||||
|
||||
watcher = self.Watcher()
|
||||
watcher.attach_loop(self.loop)
|
||||
policy.set_child_watcher(watcher)
|
||||
|
||||
def tearDown(self):
|
||||
policy = asyncio.get_event_loop_policy()
|
||||
policy.set_child_watcher(None)
|
||||
self.loop.close()
|
||||
super(SubprocessWatcherMixin, self).tearDown()
|
||||
self.addCleanup(policy.set_child_watcher, None)
|
||||
|
||||
class SubprocessSafeWatcherTests(SubprocessWatcherMixin,
|
||||
test_utils.TestCase):
|
||||
@@ -213,17 +459,8 @@ else:
|
||||
class SubprocessProactorTests(SubprocessMixin, test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
policy = asyncio.get_event_loop_policy()
|
||||
self.loop = asyncio.ProactorEventLoop()
|
||||
|
||||
# ensure that the event loop is passed explicitly in asyncio
|
||||
policy.set_event_loop(None)
|
||||
|
||||
def tearDown(self):
|
||||
policy = asyncio.get_event_loop_policy()
|
||||
self.loop.close()
|
||||
policy.set_event_loop(None)
|
||||
super(SubprocessProactorTests, self).tearDown()
|
||||
self.set_event_loop(self.loop)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
"""Tests for tasks.py."""
|
||||
|
||||
import contextlib
|
||||
import functools
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
import unittest
|
||||
import weakref
|
||||
|
||||
import trollius as asyncio
|
||||
@@ -11,8 +13,8 @@ from trollius import From, Return
|
||||
from trollius import coroutines
|
||||
from trollius import test_support as support
|
||||
from trollius import test_utils
|
||||
from trollius.test_support import assert_python_ok
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
|
||||
PY33 = (sys.version_info >= (3, 3))
|
||||
@@ -24,6 +26,22 @@ PY35 = (sys.version_info >= (3, 5))
|
||||
def coroutine_function():
|
||||
pass
|
||||
|
||||
@asyncio.coroutine
|
||||
def coroutine_function2(x, y):
|
||||
yield From(asyncio.sleep(0))
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_coroutine_debug(enabled):
|
||||
coroutines = asyncio.coroutines
|
||||
|
||||
old_debug = coroutines._DEBUG
|
||||
try:
|
||||
coroutines._DEBUG = enabled
|
||||
yield
|
||||
finally:
|
||||
coroutines._DEBUG = old_debug
|
||||
|
||||
|
||||
|
||||
def format_coroutine(qualname, state, src, source_traceback, generator=False):
|
||||
if generator:
|
||||
@@ -69,11 +87,11 @@ class TaskTests(test_utils.TestCase):
|
||||
loop.run_until_complete(t)
|
||||
loop.close()
|
||||
|
||||
def test_async_coroutine(self):
|
||||
def test_ensure_future_coroutine(self):
|
||||
@asyncio.coroutine
|
||||
def notmuch():
|
||||
return 'ok'
|
||||
t = asyncio.async(notmuch(), loop=self.loop)
|
||||
t = asyncio.ensure_future(notmuch(), loop=self.loop)
|
||||
self.loop.run_until_complete(t)
|
||||
self.assertTrue(t.done())
|
||||
self.assertEqual(t.result(), 'ok')
|
||||
@@ -81,16 +99,16 @@ class TaskTests(test_utils.TestCase):
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
self.set_event_loop(loop)
|
||||
t = asyncio.async(notmuch(), loop=loop)
|
||||
t = asyncio.ensure_future(notmuch(), loop=loop)
|
||||
self.assertIs(t._loop, loop)
|
||||
loop.run_until_complete(t)
|
||||
loop.close()
|
||||
|
||||
def test_async_future(self):
|
||||
def test_ensure_future_future(self):
|
||||
f_orig = asyncio.Future(loop=self.loop)
|
||||
f_orig.set_result('ko')
|
||||
|
||||
f = asyncio.async(f_orig)
|
||||
f = asyncio.ensure_future(f_orig)
|
||||
self.loop.run_until_complete(f)
|
||||
self.assertTrue(f.done())
|
||||
self.assertEqual(f.result(), 'ko')
|
||||
@@ -100,19 +118,19 @@ class TaskTests(test_utils.TestCase):
|
||||
self.set_event_loop(loop)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
f = asyncio.async(f_orig, loop=loop)
|
||||
f = asyncio.ensure_future(f_orig, loop=loop)
|
||||
|
||||
loop.close()
|
||||
|
||||
f = asyncio.async(f_orig, loop=self.loop)
|
||||
f = asyncio.ensure_future(f_orig, loop=self.loop)
|
||||
self.assertIs(f, f_orig)
|
||||
|
||||
def test_async_task(self):
|
||||
def test_ensure_future_task(self):
|
||||
@asyncio.coroutine
|
||||
def notmuch():
|
||||
return 'ok'
|
||||
t_orig = asyncio.Task(notmuch(), loop=self.loop)
|
||||
t = asyncio.async(t_orig)
|
||||
t = asyncio.ensure_future(t_orig)
|
||||
self.loop.run_until_complete(t)
|
||||
self.assertTrue(t.done())
|
||||
self.assertEqual(t.result(), 'ok')
|
||||
@@ -122,28 +140,29 @@ class TaskTests(test_utils.TestCase):
|
||||
self.set_event_loop(loop)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
t = asyncio.async(t_orig, loop=loop)
|
||||
t = asyncio.ensure_future(t_orig, loop=loop)
|
||||
|
||||
loop.close()
|
||||
|
||||
t = asyncio.async(t_orig, loop=self.loop)
|
||||
t = asyncio.ensure_future(t_orig, loop=self.loop)
|
||||
self.assertIs(t, t_orig)
|
||||
|
||||
def test_async_neither(self):
|
||||
def test_ensure_future_neither(self):
|
||||
with self.assertRaises(TypeError):
|
||||
asyncio.async('ok')
|
||||
asyncio.ensure_future('ok')
|
||||
|
||||
def test_async_warning(self):
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
with self.assertWarnsRegex(DeprecationWarning,
|
||||
'function is deprecated, use ensure_'):
|
||||
self.assertIs(f, asyncio.async(f))
|
||||
|
||||
def test_task_repr(self):
|
||||
self.loop.set_debug(False)
|
||||
|
||||
@asyncio.coroutine
|
||||
def noop():
|
||||
yield From(None)
|
||||
raise Return('abc')
|
||||
|
||||
@asyncio.coroutine
|
||||
def notmuch():
|
||||
yield From(noop())
|
||||
yield From(None)
|
||||
raise Return('abc')
|
||||
|
||||
# test coroutine function
|
||||
@@ -210,7 +229,8 @@ class TaskTests(test_utils.TestCase):
|
||||
self.assertEqual(notmuch.__name__, 'notmuch')
|
||||
if PY35:
|
||||
self.assertEqual(notmuch.__qualname__,
|
||||
'TaskTests.test_task_repr_coro_decorator.<locals>.notmuch')
|
||||
'TaskTests.test_task_repr_coro_decorator'
|
||||
'.<locals>.notmuch')
|
||||
self.assertEqual(notmuch.__module__, __name__)
|
||||
|
||||
# test coroutine object
|
||||
@@ -221,7 +241,8 @@ class TaskTests(test_utils.TestCase):
|
||||
# attribute).
|
||||
coro_name = 'notmuch'
|
||||
if PY35 or (coroutines._DEBUG and PY33):
|
||||
coro_qualname = 'TaskTests.test_task_repr_coro_decorator.<locals>.notmuch'
|
||||
coro_qualname = ('TaskTests.test_task_repr_coro_decorator'
|
||||
'.<locals>.notmuch')
|
||||
else:
|
||||
coro_qualname = 'notmuch'
|
||||
else:
|
||||
@@ -244,7 +265,8 @@ class TaskTests(test_utils.TestCase):
|
||||
else:
|
||||
code = gen.gi_code
|
||||
coro = ('%s() running at %s:%s'
|
||||
% (coro_qualname, code.co_filename, code.co_firstlineno))
|
||||
% (coro_qualname, code.co_filename,
|
||||
code.co_firstlineno))
|
||||
|
||||
self.assertEqual(repr(gen), '<CoroWrapper %s>' % coro)
|
||||
|
||||
@@ -282,6 +304,25 @@ class TaskTests(test_utils.TestCase):
|
||||
fut.set_result(None)
|
||||
self.loop.run_until_complete(task)
|
||||
|
||||
def test_task_repr_partial_corowrapper(self):
|
||||
# Issue #222: repr(CoroWrapper) must not fail in debug mode if the
|
||||
# coroutine is a partial function
|
||||
with set_coroutine_debug(True):
|
||||
self.loop.set_debug(True)
|
||||
|
||||
cb = functools.partial(coroutine_function2, 1)
|
||||
partial_func = asyncio.coroutine(cb)
|
||||
task = self.loop.create_task(partial_func(2))
|
||||
|
||||
# make warnings quiet
|
||||
task._log_destroy_pending = False
|
||||
self.addCleanup(task._coro.close)
|
||||
|
||||
coro_repr = repr(task._coro)
|
||||
expected = ('<CoroWrapper coroutine_function2(1)() running, ')
|
||||
self.assertTrue(coro_repr.startswith(expected),
|
||||
coro_repr)
|
||||
|
||||
def test_task_basics(self):
|
||||
@asyncio.coroutine
|
||||
def outer():
|
||||
@@ -578,6 +619,21 @@ class TaskTests(test_utils.TestCase):
|
||||
self.assertTrue(fut.done())
|
||||
self.assertTrue(fut.cancelled())
|
||||
|
||||
def test_wait_for_race_condition(self):
|
||||
|
||||
def gen():
|
||||
yield 0.1
|
||||
yield 0.1
|
||||
yield 0.1
|
||||
|
||||
loop = self.new_test_loop(gen)
|
||||
|
||||
fut = asyncio.Future(loop=loop)
|
||||
task = asyncio.wait_for(fut, timeout=0.2, loop=loop)
|
||||
loop.call_later(0.1, fut.set_result, "ok")
|
||||
res = loop.run_until_complete(task)
|
||||
self.assertEqual(res, "ok")
|
||||
|
||||
def test_wait(self):
|
||||
|
||||
def gen():
|
||||
@@ -1344,7 +1400,7 @@ class TaskTests(test_utils.TestCase):
|
||||
else:
|
||||
non_local['proof'] += 10
|
||||
|
||||
f = asyncio.async(outer(), loop=self.loop)
|
||||
f = asyncio.ensure_future(outer(), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
f.cancel()
|
||||
self.loop.run_until_complete(f)
|
||||
@@ -1367,7 +1423,7 @@ class TaskTests(test_utils.TestCase):
|
||||
d, p = yield From(asyncio.wait([inner()], loop=self.loop))
|
||||
non_local['proof'] += 100
|
||||
|
||||
f = asyncio.async(outer(), loop=self.loop)
|
||||
f = asyncio.ensure_future(outer(), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
f.cancel()
|
||||
self.assertRaises(
|
||||
@@ -1421,7 +1477,7 @@ class TaskTests(test_utils.TestCase):
|
||||
yield From(asyncio.shield(inner(), loop=self.loop))
|
||||
non_local['proof'] += 100
|
||||
|
||||
f = asyncio.async(outer(), loop=self.loop)
|
||||
f = asyncio.ensure_future(outer(), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
f.cancel()
|
||||
with self.assertRaises(asyncio.CancelledError):
|
||||
@@ -1518,25 +1574,16 @@ class TaskTests(test_utils.TestCase):
|
||||
# The frame should have changed.
|
||||
self.assertIsNone(gen.gi_frame)
|
||||
|
||||
# Save debug flag.
|
||||
old_debug = asyncio.coroutines._DEBUG
|
||||
try:
|
||||
# Test with debug flag cleared.
|
||||
asyncio.coroutines._DEBUG = False
|
||||
# Test with debug flag cleared.
|
||||
with set_coroutine_debug(False):
|
||||
check()
|
||||
|
||||
# Test with debug flag set.
|
||||
asyncio.coroutines._DEBUG = True
|
||||
# Test with debug flag set.
|
||||
with set_coroutine_debug(True):
|
||||
check()
|
||||
|
||||
finally:
|
||||
# Restore original debug flag.
|
||||
asyncio.coroutines._DEBUG = old_debug
|
||||
|
||||
def test_yield_from_corowrapper(self):
|
||||
old_debug = asyncio.coroutines._DEBUG
|
||||
asyncio.coroutines._DEBUG = True
|
||||
try:
|
||||
with set_coroutine_debug(True):
|
||||
@asyncio.coroutine
|
||||
def t1():
|
||||
res = yield From(t2())
|
||||
@@ -1556,8 +1603,6 @@ class TaskTests(test_utils.TestCase):
|
||||
task = asyncio.Task(t1(), loop=self.loop)
|
||||
val = self.loop.run_until_complete(task)
|
||||
self.assertEqual(val, (1, 2, 3))
|
||||
finally:
|
||||
asyncio.coroutines._DEBUG = old_debug
|
||||
|
||||
def test_yield_from_corowrapper_send(self):
|
||||
def foo():
|
||||
@@ -1586,8 +1631,8 @@ class TaskTests(test_utils.TestCase):
|
||||
wd['cw'] = cw # Would fail without __weakref__ slot.
|
||||
cw.gen = None # Suppress warning from __del__.
|
||||
|
||||
@test_utils.skipUnless(PY34,
|
||||
'need python 3.4 or later')
|
||||
@unittest.skipUnless(PY34,
|
||||
'need python 3.4 or later')
|
||||
def test_log_destroyed_pending_task(self):
|
||||
@asyncio.coroutine
|
||||
def kill_me(loop):
|
||||
@@ -1603,7 +1648,7 @@ class TaskTests(test_utils.TestCase):
|
||||
|
||||
# schedule the task
|
||||
coro = kill_me(self.loop)
|
||||
task = asyncio.async(coro, loop=self.loop)
|
||||
task = asyncio.ensure_future(coro, loop=self.loop)
|
||||
self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), set((task,)))
|
||||
|
||||
# execute the task so it waits for future
|
||||
@@ -1630,14 +1675,10 @@ class TaskTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('trollius.coroutines.logger')
|
||||
def test_coroutine_never_yielded(self, m_log):
|
||||
debug = asyncio.coroutines._DEBUG
|
||||
try:
|
||||
asyncio.coroutines._DEBUG = True
|
||||
with set_coroutine_debug(True):
|
||||
@asyncio.coroutine
|
||||
def coro_noop():
|
||||
pass
|
||||
finally:
|
||||
asyncio.coroutines._DEBUG = debug
|
||||
|
||||
tb_filename = sys._getframe().f_code.co_filename
|
||||
tb_lineno = sys._getframe().f_lineno + 2
|
||||
@@ -1649,7 +1690,8 @@ class TaskTests(test_utils.TestCase):
|
||||
message = m_log.error.call_args[0][0]
|
||||
func_filename, func_lineno = test_utils.get_function_source(coro_noop)
|
||||
coro_name = getattr(coro_noop, '__qualname__', coro_noop.__name__)
|
||||
regex = (r'^<CoroWrapper %s\(\) .* at %s:%s, .*> was never yielded from\n'
|
||||
regex = (r'^<CoroWrapper %s\(\) .* at %s:%s, .*> '
|
||||
r'was never yielded from\n'
|
||||
r'Coroutine object created at \(most recent call last\):\n'
|
||||
r'.*\n'
|
||||
r' File "%s", line %s, in test_coroutine_never_yielded\n'
|
||||
@@ -1664,22 +1706,41 @@ class TaskTests(test_utils.TestCase):
|
||||
self.loop.set_debug(True)
|
||||
|
||||
task = asyncio.Task(coroutine_function(), loop=self.loop)
|
||||
self.check_soure_traceback(task._source_traceback, -1)
|
||||
lineno = sys._getframe().f_lineno - 1
|
||||
self.assertIsInstance(task._source_traceback, list)
|
||||
filename = sys._getframe().f_code.co_filename
|
||||
self.assertEqual(task._source_traceback[-1][:3],
|
||||
(filename,
|
||||
lineno,
|
||||
'test_task_source_traceback'))
|
||||
self.loop.run_until_complete(task)
|
||||
|
||||
def test_coroutine_class(self):
|
||||
# Trollius issue #9
|
||||
self.loop.set_debug(True)
|
||||
def _test_cancel_wait_for(self, timeout):
|
||||
loop = asyncio.new_event_loop()
|
||||
self.addCleanup(loop.close)
|
||||
|
||||
class MyClass(object):
|
||||
def __call__(self):
|
||||
return 7
|
||||
@asyncio.coroutine
|
||||
def blocking_coroutine():
|
||||
fut = asyncio.Future(loop=loop)
|
||||
# Block: fut result is never set
|
||||
yield From(fut)
|
||||
|
||||
obj = MyClass()
|
||||
coro_func = asyncio.coroutine(obj)
|
||||
coro_obj = coro_func()
|
||||
res = self.loop.run_until_complete(coro_obj)
|
||||
self.assertEqual(res, 7)
|
||||
task = loop.create_task(blocking_coroutine())
|
||||
|
||||
wait = loop.create_task(asyncio.wait_for(task, timeout, loop=loop))
|
||||
loop.call_soon(wait.cancel)
|
||||
|
||||
self.assertRaises(asyncio.CancelledError,
|
||||
loop.run_until_complete, wait)
|
||||
|
||||
# Python issue #23219: cancelling the wait must also cancel the task
|
||||
self.assertTrue(task.cancelled())
|
||||
|
||||
def test_cancel_blocking_wait_for(self):
|
||||
self._test_cancel_wait_for(None)
|
||||
|
||||
def test_cancel_wait_for(self):
|
||||
self._test_cancel_wait_for(60.0)
|
||||
|
||||
|
||||
class GatherTestsBase:
|
||||
@@ -1753,16 +1814,20 @@ class GatherTestsBase:
|
||||
self.assertEqual(fut.result(), [3, 1, exc, exc2])
|
||||
|
||||
def test_env_var_debug(self):
|
||||
aio_path = os.path.dirname(os.path.dirname(asyncio.__file__))
|
||||
|
||||
code = '\n'.join((
|
||||
'import trollius.coroutines',
|
||||
'print(trollius.coroutines._DEBUG)'))
|
||||
|
||||
sts, stdout, stderr = assert_python_ok('-c', code,
|
||||
TROLLIUSDEBUG='')
|
||||
sts, stdout, stderr = support.assert_python_ok('-c', code,
|
||||
TROLLIUSDEBUG='',
|
||||
PYTHONPATH=aio_path)
|
||||
self.assertEqual(stdout.rstrip(), b'False')
|
||||
|
||||
sts, stdout, stderr = assert_python_ok('-c', code,
|
||||
TROLLIUSDEBUG='1')
|
||||
sts, stdout, stderr = support.assert_python_ok('-c', code,
|
||||
TROLLIUSDEBUG='1',
|
||||
PYTHONPATH=aio_path)
|
||||
self.assertEqual(stdout.rstrip(), b'True')
|
||||
|
||||
|
||||
@@ -1902,8 +1967,8 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
|
||||
yield From(waiter)
|
||||
non_local['proof'] += 1
|
||||
|
||||
child1 = asyncio.async(inner(), loop=self.one_loop)
|
||||
child2 = asyncio.async(inner(), loop=self.one_loop)
|
||||
child1 = asyncio.ensure_future(inner(), loop=self.one_loop)
|
||||
child2 = asyncio.ensure_future(inner(), loop=self.one_loop)
|
||||
non_local['gatherer'] = None
|
||||
|
||||
@asyncio.coroutine
|
||||
@@ -1912,7 +1977,7 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
|
||||
yield From(non_local['gatherer'])
|
||||
non_local['proof'] += 100
|
||||
|
||||
f = asyncio.async(outer(), loop=self.one_loop)
|
||||
f = asyncio.ensure_future(outer(), loop=self.one_loop)
|
||||
test_utils.run_briefly(self.one_loop)
|
||||
self.assertTrue(f.cancel())
|
||||
with self.assertRaises(asyncio.CancelledError):
|
||||
@@ -1939,7 +2004,7 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
|
||||
def outer():
|
||||
yield From(asyncio.gather(inner(a), inner(b), loop=self.one_loop))
|
||||
|
||||
f = asyncio.async(outer(), loop=self.one_loop)
|
||||
f = asyncio.ensure_future(outer(), loop=self.one_loop)
|
||||
test_utils.run_briefly(self.one_loop)
|
||||
a.set_result(None)
|
||||
test_utils.run_briefly(self.one_loop)
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
"""Tests for transports.py."""
|
||||
|
||||
import unittest
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import test_utils
|
||||
from trollius import transports
|
||||
from trollius.test_utils import mock
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
try:
|
||||
memoryview
|
||||
@@ -76,7 +75,8 @@ class TransportTests(test_utils.TestCase):
|
||||
def get_write_buffer_size(self):
|
||||
return 512
|
||||
|
||||
transport = MyTransport()
|
||||
loop = mock.Mock()
|
||||
transport = MyTransport(loop=loop)
|
||||
transport._protocol = mock.Mock()
|
||||
|
||||
self.assertFalse(transport._protocol_paused)
|
||||
@@ -86,9 +86,11 @@ class TransportTests(test_utils.TestCase):
|
||||
|
||||
transport.set_write_buffer_limits(high=1024, low=128)
|
||||
self.assertFalse(transport._protocol_paused)
|
||||
self.assertEqual(transport.get_write_buffer_limits(), (128, 1024))
|
||||
|
||||
transport.set_write_buffer_limits(high=256, low=128)
|
||||
self.assertTrue(transport._protocol_paused)
|
||||
self.assertEqual(transport.get_write_buffer_limits(), (128, 256))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -2,23 +2,20 @@
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
#import gc
|
||||
import errno
|
||||
import io
|
||||
import os
|
||||
#import pprint
|
||||
import signal
|
||||
import socket
|
||||
import stat
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import unittest
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
if sys.platform == 'win32':
|
||||
raise unittest.SkipTest('UNIX only')
|
||||
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import log
|
||||
from trollius import test_utils
|
||||
@@ -30,7 +27,16 @@ from trollius.test_utils import mock
|
||||
MOCK_ANY = mock.ANY
|
||||
|
||||
|
||||
@test_utils.skipUnless(signal, 'Signals are not supported')
|
||||
def close_pipe_transport(transport):
|
||||
# Don't call transport.close() because the event loop and the selector
|
||||
# are mocked
|
||||
if transport._pipe is None:
|
||||
return
|
||||
transport._pipe.close()
|
||||
transport._pipe = None
|
||||
|
||||
|
||||
@unittest.skipUnless(signal, 'Signals are not supported')
|
||||
class SelectorEventLoopSignalTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@@ -65,6 +71,24 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
|
||||
self.loop.add_signal_handler,
|
||||
signal.SIGINT, lambda: True)
|
||||
|
||||
@mock.patch('trollius.unix_events.signal')
|
||||
def test_add_signal_handler_coroutine_error(self, m_signal):
|
||||
m_signal.NSIG = signal.NSIG
|
||||
|
||||
@asyncio.coroutine
|
||||
def simple_coroutine():
|
||||
yield None
|
||||
|
||||
# callback must not be a coroutine function
|
||||
coro_func = simple_coroutine
|
||||
coro_obj = coro_func()
|
||||
self.addCleanup(coro_obj.close)
|
||||
for func in (coro_func, coro_obj):
|
||||
self.assertRaisesRegex(
|
||||
TypeError, 'coroutines cannot be used with add_signal_handler',
|
||||
self.loop.add_signal_handler,
|
||||
signal.SIGINT, func)
|
||||
|
||||
@mock.patch('trollius.unix_events.signal')
|
||||
def test_add_signal_handler(self, m_signal):
|
||||
m_signal.NSIG = signal.NSIG
|
||||
@@ -205,8 +229,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
|
||||
m_signal.set_wakeup_fd.assert_called_once_with(-1)
|
||||
|
||||
|
||||
@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'),
|
||||
'UNIX Sockets are not supported')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
|
||||
'UNIX Sockets are not supported')
|
||||
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@@ -273,7 +297,7 @@ class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
|
||||
|
||||
def test_create_unix_connection_path_sock(self):
|
||||
coro = self.loop.create_unix_connection(
|
||||
lambda: None, '/dev/null', sock=object())
|
||||
lambda: None, os.devnull, sock=object())
|
||||
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
|
||||
self.loop.run_until_complete(coro)
|
||||
|
||||
@@ -286,14 +310,14 @@ class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
|
||||
|
||||
def test_create_unix_connection_nossl_serverhost(self):
|
||||
coro = self.loop.create_unix_connection(
|
||||
lambda: None, '/dev/null', server_hostname='spam')
|
||||
lambda: None, os.devnull, server_hostname='spam')
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
'server_hostname is only meaningful'):
|
||||
self.loop.run_until_complete(coro)
|
||||
|
||||
def test_create_unix_connection_ssl_noserverhost(self):
|
||||
coro = self.loop.create_unix_connection(
|
||||
lambda: None, '/dev/null', ssl=True)
|
||||
lambda: None, os.devnull, ssl=True)
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
ValueError, 'you have to pass server_hostname when using ssl'):
|
||||
@@ -309,9 +333,9 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
self.pipe = mock.Mock(spec_set=io.RawIOBase)
|
||||
self.pipe.fileno.return_value = 5
|
||||
|
||||
fcntl_patcher = mock.patch('fcntl.fcntl')
|
||||
fcntl_patcher.start()
|
||||
self.addCleanup(fcntl_patcher.stop)
|
||||
blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking')
|
||||
blocking_patcher.start()
|
||||
self.addCleanup(blocking_patcher.stop)
|
||||
|
||||
fstat_patcher = mock.patch('os.fstat')
|
||||
m_fstat = fstat_patcher.start()
|
||||
@@ -320,24 +344,25 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
m_fstat.return_value = st
|
||||
self.addCleanup(fstat_patcher.stop)
|
||||
|
||||
def test_ctor(self):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
self.loop.assert_reader(5, tr._read_ready)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.protocol.connection_made.assert_called_with(tr)
|
||||
def read_pipe_transport(self, waiter=None):
|
||||
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
|
||||
self.protocol,
|
||||
waiter=waiter)
|
||||
self.addCleanup(close_pipe_transport, transport)
|
||||
return transport
|
||||
|
||||
def test_ctor_with_waiter(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol, fut)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertIsNone(fut.result())
|
||||
def test_ctor(self):
|
||||
waiter = asyncio.Future(loop=self.loop)
|
||||
tr = self.read_pipe_transport(waiter=waiter)
|
||||
self.loop.run_until_complete(waiter)
|
||||
|
||||
self.protocol.connection_made.assert_called_with(tr)
|
||||
self.loop.assert_reader(5, tr._read_ready)
|
||||
self.assertIsNone(waiter.result())
|
||||
|
||||
@mock.patch('os.read')
|
||||
def test__read_ready(self, m_read):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.read_pipe_transport()
|
||||
m_read.return_value = b'data'
|
||||
tr._read_ready()
|
||||
|
||||
@@ -346,8 +371,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.read')
|
||||
def test__read_ready_eof(self, m_read):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.read_pipe_transport()
|
||||
m_read.return_value = b''
|
||||
tr._read_ready()
|
||||
|
||||
@@ -359,8 +383,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.read')
|
||||
def test__read_ready_blocked(self, m_read):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.read_pipe_transport()
|
||||
m_read.side_effect = BlockingIOError
|
||||
tr._read_ready()
|
||||
|
||||
@@ -371,8 +394,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
@mock.patch('trollius.log.logger.error')
|
||||
@mock.patch('os.read')
|
||||
def test__read_ready_error(self, m_read, m_logexc):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.read_pipe_transport()
|
||||
err = OSError()
|
||||
m_read.side_effect = err
|
||||
tr._close = mock.Mock()
|
||||
@@ -388,9 +410,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.read')
|
||||
def test_pause_reading(self, m_read):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.read_pipe_transport()
|
||||
m = mock.Mock()
|
||||
self.loop.add_reader(5, m)
|
||||
tr.pause_reading()
|
||||
@@ -398,26 +418,20 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.read')
|
||||
def test_resume_reading(self, m_read):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.read_pipe_transport()
|
||||
tr.resume_reading()
|
||||
self.loop.assert_reader(5, tr._read_ready)
|
||||
|
||||
@mock.patch('os.read')
|
||||
def test_close(self, m_read):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.read_pipe_transport()
|
||||
tr._close = mock.Mock()
|
||||
tr.close()
|
||||
tr._close.assert_called_with(None)
|
||||
|
||||
@mock.patch('os.read')
|
||||
def test_close_already_closing(self, m_read):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.read_pipe_transport()
|
||||
tr._closing = True
|
||||
tr._close = mock.Mock()
|
||||
tr.close()
|
||||
@@ -425,9 +439,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.read')
|
||||
def test__close(self, m_read):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.read_pipe_transport()
|
||||
err = object()
|
||||
tr._close(err)
|
||||
self.assertTrue(tr._closing)
|
||||
@@ -436,8 +448,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
self.protocol.connection_lost.assert_called_with(err)
|
||||
|
||||
def test__call_connection_lost(self):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.read_pipe_transport()
|
||||
self.assertIsNotNone(tr._protocol)
|
||||
self.assertIsNotNone(tr._loop)
|
||||
|
||||
@@ -450,8 +461,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
|
||||
self.assertIsNone(tr._loop)
|
||||
|
||||
def test__call_connection_lost_with_err(self):
|
||||
tr = unix_events._UnixReadPipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.read_pipe_transport()
|
||||
self.assertIsNotNone(tr._protocol)
|
||||
self.assertIsNotNone(tr._loop)
|
||||
|
||||
@@ -472,9 +482,9 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
self.pipe = mock.Mock(spec_set=io.RawIOBase)
|
||||
self.pipe.fileno.return_value = 5
|
||||
|
||||
fcntl_patcher = mock.patch('fcntl.fcntl')
|
||||
fcntl_patcher.start()
|
||||
self.addCleanup(fcntl_patcher.stop)
|
||||
blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking')
|
||||
blocking_patcher.start()
|
||||
self.addCleanup(blocking_patcher.stop)
|
||||
|
||||
fstat_patcher = mock.patch('os.fstat')
|
||||
m_fstat = fstat_patcher.start()
|
||||
@@ -483,31 +493,29 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
m_fstat.return_value = st
|
||||
self.addCleanup(fstat_patcher.stop)
|
||||
|
||||
def test_ctor(self):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
self.loop.assert_reader(5, tr._read_ready)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.protocol.connection_made.assert_called_with(tr)
|
||||
def write_pipe_transport(self, waiter=None):
|
||||
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
|
||||
self.protocol,
|
||||
waiter=waiter)
|
||||
self.addCleanup(close_pipe_transport, transport)
|
||||
return transport
|
||||
|
||||
def test_ctor_with_waiter(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol, fut)
|
||||
def test_ctor(self):
|
||||
waiter = asyncio.Future(loop=self.loop)
|
||||
tr = self.write_pipe_transport(waiter=waiter)
|
||||
self.loop.run_until_complete(waiter)
|
||||
|
||||
self.protocol.connection_made.assert_called_with(tr)
|
||||
self.loop.assert_reader(5, tr._read_ready)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual(None, fut.result())
|
||||
self.assertEqual(None, waiter.result())
|
||||
|
||||
def test_can_write_eof(self):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.write_pipe_transport()
|
||||
self.assertTrue(tr.can_write_eof())
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test_write(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
m_write.return_value = 4
|
||||
tr.write(b'data')
|
||||
m_write.assert_called_with(5, b'data')
|
||||
@@ -516,9 +524,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test_write_no_data(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
tr.write(b'')
|
||||
self.assertFalse(m_write.called)
|
||||
self.assertFalse(self.loop.writers)
|
||||
@@ -526,9 +532,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test_write_partial(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
m_write.return_value = 2
|
||||
tr.write(b'data')
|
||||
m_write.assert_called_with(5, b'data')
|
||||
@@ -537,9 +541,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test_write_buffer(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
self.loop.add_writer(5, tr._write_ready)
|
||||
tr._buffer = [b'previous']
|
||||
tr.write(b'data')
|
||||
@@ -549,9 +551,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test_write_again(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
m_write.side_effect = BlockingIOError()
|
||||
tr.write(b'data')
|
||||
m_write.assert_called_with(5, b'data')
|
||||
@@ -561,9 +561,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
@mock.patch('trollius.unix_events.logger')
|
||||
@mock.patch('os.write')
|
||||
def test_write_err(self, m_write, m_log):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
err = OSError()
|
||||
m_write.side_effect = err
|
||||
tr._fatal_error = mock.Mock()
|
||||
@@ -585,11 +583,11 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
# This is a bit overspecified. :-(
|
||||
m_log.warning.assert_called_with(
|
||||
'pipe closed by peer or os.write(pipe, data) raised exception.')
|
||||
tr.close()
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test_write_close(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.write_pipe_transport()
|
||||
tr._read_ready() # pipe was closed by peer
|
||||
|
||||
tr.write(b'data')
|
||||
@@ -598,8 +596,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
self.assertEqual(tr._conn_lost, 2)
|
||||
|
||||
def test__read_ready(self):
|
||||
tr = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
|
||||
self.protocol)
|
||||
tr = self.write_pipe_transport()
|
||||
tr._read_ready()
|
||||
self.assertFalse(self.loop.readers)
|
||||
self.assertFalse(self.loop.writers)
|
||||
@@ -609,8 +606,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test__write_ready(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.write_pipe_transport()
|
||||
self.loop.add_writer(5, tr._write_ready)
|
||||
tr._buffer = [b'da', b'ta']
|
||||
m_write.return_value = 4
|
||||
@@ -621,9 +617,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test__write_ready_partial(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
self.loop.add_writer(5, tr._write_ready)
|
||||
tr._buffer = [b'da', b'ta']
|
||||
m_write.return_value = 3
|
||||
@@ -634,9 +628,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test__write_ready_again(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
self.loop.add_writer(5, tr._write_ready)
|
||||
tr._buffer = [b'da', b'ta']
|
||||
m_write.side_effect = BlockingIOError()
|
||||
@@ -647,9 +639,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test__write_ready_empty(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
self.loop.add_writer(5, tr._write_ready)
|
||||
tr._buffer = [b'da', b'ta']
|
||||
m_write.return_value = 0
|
||||
@@ -661,9 +651,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
@mock.patch('trollius.log.logger.error')
|
||||
@mock.patch('os.write')
|
||||
def test__write_ready_err(self, m_write, m_logexc):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
self.loop.add_writer(5, tr._write_ready)
|
||||
tr._buffer = [b'da', b'ta']
|
||||
m_write.side_effect = err = OSError()
|
||||
@@ -684,9 +672,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test__write_ready_closing(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
self.loop.add_writer(5, tr._write_ready)
|
||||
tr._closing = True
|
||||
tr._buffer = [b'da', b'ta']
|
||||
@@ -701,9 +687,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
|
||||
@mock.patch('os.write')
|
||||
def test_abort(self, m_write):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
self.loop.add_writer(5, tr._write_ready)
|
||||
self.loop.add_reader(5, tr._read_ready)
|
||||
tr._buffer = [b'da', b'ta']
|
||||
@@ -717,8 +701,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
self.protocol.connection_lost.assert_called_with(None)
|
||||
|
||||
def test__call_connection_lost(self):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.write_pipe_transport()
|
||||
self.assertIsNotNone(tr._protocol)
|
||||
self.assertIsNotNone(tr._loop)
|
||||
|
||||
@@ -731,8 +714,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
self.assertIsNone(tr._loop)
|
||||
|
||||
def test__call_connection_lost_with_err(self):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.write_pipe_transport()
|
||||
self.assertIsNotNone(tr._protocol)
|
||||
self.assertIsNotNone(tr._loop)
|
||||
|
||||
@@ -745,26 +727,23 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
self.assertIsNone(tr._loop)
|
||||
|
||||
def test_close(self):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
tr.write_eof = mock.Mock()
|
||||
tr.close()
|
||||
tr.write_eof.assert_called_with()
|
||||
|
||||
def test_close_closing(self):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
# closing the transport twice must not fail
|
||||
tr.close()
|
||||
|
||||
def test_close_closing(self):
|
||||
tr = self.write_pipe_transport()
|
||||
tr.write_eof = mock.Mock()
|
||||
tr._closing = True
|
||||
tr.close()
|
||||
self.assertFalse(tr.write_eof.called)
|
||||
|
||||
def test_write_eof(self):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
|
||||
tr = self.write_pipe_transport()
|
||||
tr.write_eof()
|
||||
self.assertTrue(tr._closing)
|
||||
self.assertFalse(self.loop.readers)
|
||||
@@ -772,8 +751,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
|
||||
self.protocol.connection_lost.assert_called_with(None)
|
||||
|
||||
def test_write_eof_pending(self):
|
||||
tr = unix_events._UnixWritePipeTransport(
|
||||
self.loop, self.pipe, self.protocol)
|
||||
tr = self.write_pipe_transport()
|
||||
tr._buffer = [b'data']
|
||||
tr.write_eof()
|
||||
self.assertTrue(tr._closing)
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
from trollius import test_utils
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
if sys.platform != 'win32':
|
||||
raise test_utils.SkipTest('Windows only')
|
||||
raise unittest.SkipTest('Windows only')
|
||||
|
||||
import trollius as asyncio
|
||||
from trollius import Return, From
|
||||
@@ -12,6 +11,8 @@ from trollius import _overlapped
|
||||
from trollius import py33_winapi as _winapi
|
||||
from trollius import windows_events
|
||||
from trollius.py33_exceptions import PermissionError, FileNotFoundError
|
||||
from trollius import test_utils
|
||||
from trollius.test_utils import mock
|
||||
|
||||
|
||||
class UpperProto(asyncio.Protocol):
|
||||
@@ -37,7 +38,7 @@ class ProactorTests(test_utils.TestCase):
|
||||
def test_close(self):
|
||||
a, b = self.loop._socketpair()
|
||||
trans = self.loop._make_socket_transport(a, asyncio.Protocol())
|
||||
f = asyncio.async(self.loop.sock_recv(b, 100))
|
||||
f = asyncio.ensure_future(self.loop.sock_recv(b, 100))
|
||||
trans.close()
|
||||
self.loop.run_until_complete(f)
|
||||
self.assertEqual(f.result(), b'')
|
||||
@@ -68,7 +69,8 @@ class ProactorTests(test_utils.TestCase):
|
||||
clients = []
|
||||
for i in range(5):
|
||||
stream_reader = asyncio.StreamReader(loop=self.loop)
|
||||
protocol = asyncio.StreamReaderProtocol(stream_reader)
|
||||
protocol = asyncio.StreamReaderProtocol(stream_reader,
|
||||
loop=self.loop)
|
||||
trans, proto = yield From(self.loop.create_pipe_connection(
|
||||
lambda: protocol, ADDRESS))
|
||||
self.assertIsInstance(trans, asyncio.Transport)
|
||||
@@ -91,6 +93,18 @@ class ProactorTests(test_utils.TestCase):
|
||||
|
||||
raise Return('done')
|
||||
|
||||
def test_connect_pipe_cancel(self):
|
||||
exc = OSError()
|
||||
exc.winerror = _overlapped.ERROR_PIPE_BUSY
|
||||
with mock.patch.object(_overlapped, 'ConnectPipe', side_effect=exc) as connect:
|
||||
coro = self.loop._proactor.connect_pipe('pipe_address')
|
||||
task = self.loop.create_task(coro)
|
||||
|
||||
# check that it's possible to cancel connect_pipe()
|
||||
task.cancel()
|
||||
with self.assertRaises(asyncio.CancelledError):
|
||||
self.loop.run_until_complete(task)
|
||||
|
||||
def test_wait_for_handle(self):
|
||||
event = _overlapped.CreateEvent(None, True, False, None)
|
||||
self.addCleanup(_winapi.CloseHandle, event)
|
||||
@@ -99,23 +113,28 @@ class ProactorTests(test_utils.TestCase):
|
||||
# result should be False at timeout
|
||||
fut = self.loop._proactor.wait_for_handle(event, 0.5)
|
||||
start = self.loop.time()
|
||||
self.loop.run_until_complete(fut)
|
||||
done = self.loop.run_until_complete(fut)
|
||||
elapsed = self.loop.time() - start
|
||||
|
||||
self.assertEqual(done, False)
|
||||
self.assertFalse(fut.result())
|
||||
self.assertTrue(0.48 < elapsed < 0.9, elapsed)
|
||||
|
||||
_overlapped.SetEvent(event)
|
||||
|
||||
# Wait for for set event;
|
||||
# Wait for set event;
|
||||
# result should be True immediately
|
||||
fut = self.loop._proactor.wait_for_handle(event, 10)
|
||||
start = self.loop.time()
|
||||
self.loop.run_until_complete(fut)
|
||||
done = self.loop.run_until_complete(fut)
|
||||
elapsed = self.loop.time() - start
|
||||
|
||||
self.assertEqual(done, True)
|
||||
self.assertTrue(fut.result())
|
||||
self.assertTrue(0 <= elapsed < 0.3, elapsed)
|
||||
|
||||
# Tulip issue #195: cancelling a done _WaitHandleFuture must not crash
|
||||
# asyncio issue #195: cancelling a done _WaitHandleFuture
|
||||
# must not crash
|
||||
fut.cancel()
|
||||
|
||||
def test_wait_for_handle_cancel(self):
|
||||
@@ -132,7 +151,8 @@ class ProactorTests(test_utils.TestCase):
|
||||
elapsed = self.loop.time() - start
|
||||
self.assertTrue(0 <= elapsed < 0.1, elapsed)
|
||||
|
||||
# Tulip issue #195: cancelling a _WaitHandleFuture twice must not crash
|
||||
# asyncio issue #195: cancelling a _WaitHandleFuture twice
|
||||
# must not crash
|
||||
fut = self.loop._proactor.wait_for_handle(event)
|
||||
fut.cancel()
|
||||
fut.cancel()
|
||||
|
||||
@@ -2,19 +2,18 @@
|
||||
|
||||
import socket
|
||||
import sys
|
||||
import unittest
|
||||
import warnings
|
||||
from trollius.test_utils import unittest
|
||||
|
||||
if sys.platform != 'win32':
|
||||
from trollius.test_utils import SkipTest
|
||||
raise SkipTest('Windows only')
|
||||
raise unittest.SkipTest('Windows only')
|
||||
|
||||
from trollius import _overlapped
|
||||
from trollius import py33_winapi as _winapi
|
||||
from trollius import test_support as support
|
||||
from trollius import test_utils
|
||||
from trollius import windows_utils
|
||||
from trollius.test_support import IPV6_ENABLED
|
||||
from trollius.test_utils import mock
|
||||
import trollius.test_support as support
|
||||
|
||||
|
||||
class WinsocketpairTests(unittest.TestCase):
|
||||
@@ -29,11 +28,14 @@ class WinsocketpairTests(unittest.TestCase):
|
||||
ssock, csock = windows_utils.socketpair()
|
||||
self.check_winsocketpair(ssock, csock)
|
||||
|
||||
@test_utils.skipUnless(IPV6_ENABLED, 'IPv6 not supported or enabled')
|
||||
@unittest.skipUnless(support.IPV6_ENABLED,
|
||||
'IPv6 not supported or enabled')
|
||||
def test_winsocketpair_ipv6(self):
|
||||
ssock, csock = windows_utils.socketpair(family=socket.AF_INET6)
|
||||
self.check_winsocketpair(ssock, csock)
|
||||
|
||||
@unittest.skipIf(hasattr(socket, 'socketpair'),
|
||||
'socket.socketpair is available')
|
||||
@mock.patch('trollius.windows_utils.socket')
|
||||
def test_winsocketpair_exc(self, m_socket):
|
||||
m_socket.AF_INET = socket.AF_INET
|
||||
@@ -52,6 +54,8 @@ class WinsocketpairTests(unittest.TestCase):
|
||||
self.assertRaises(ValueError,
|
||||
windows_utils.socketpair, proto=1)
|
||||
|
||||
@unittest.skipIf(hasattr(socket, 'socketpair'),
|
||||
'socket.socketpair is available')
|
||||
@mock.patch('trollius.windows_utils.socket')
|
||||
def test_winsocketpair_close(self, m_socket):
|
||||
m_socket.AF_INET = socket.AF_INET
|
||||
@@ -110,8 +114,11 @@ class PipeTests(unittest.TestCase):
|
||||
self.assertEqual(p.handle, h)
|
||||
|
||||
# check garbage collection of p closes handle
|
||||
del p
|
||||
support.gc_collect()
|
||||
with warnings.catch_warnings():
|
||||
if sys.version_info >= (3, 4):
|
||||
warnings.filterwarnings("ignore", "", ResourceWarning)
|
||||
del p
|
||||
support.gc_collect()
|
||||
try:
|
||||
_winapi.CloseHandle(h)
|
||||
except OSError as e:
|
||||
@@ -165,6 +172,9 @@ class PopenTests(unittest.TestCase):
|
||||
self.assertTrue(msg.upper().rstrip().startswith(out))
|
||||
self.assertTrue(b"stderr".startswith(err))
|
||||
|
||||
p.stdin.close()
|
||||
p.stdout.close()
|
||||
p.stderr.close()
|
||||
p.wait()
|
||||
|
||||
|
||||
|
||||
99
tox.ini
99
tox.ini
@@ -1,32 +1,91 @@
|
||||
[tox]
|
||||
envlist = py26,py27,py32,py33,py34
|
||||
envlist = py26,py27,py2_release,py2_no_ssl,py2_no_concurrent,py32,py33,py34,py3_release,py3_no_ssl
|
||||
# and: pyflakes2,pyflakes3
|
||||
|
||||
[testenv]
|
||||
deps=
|
||||
aiotest
|
||||
setenv =
|
||||
TROLLIUSDEBUG = 1
|
||||
commands=
|
||||
python -Wd runtests.py -r {posargs}
|
||||
python -Wd run_aiotest.py -r {posargs}
|
||||
|
||||
[testenv:pyflakes2]
|
||||
basepython = python2
|
||||
deps=
|
||||
pyflakes
|
||||
commands=
|
||||
pyflakes trollius tests runtests.py check.py run_aiotest.py setup.py
|
||||
|
||||
[testenv:pyflakes3]
|
||||
basepython = python3
|
||||
deps=
|
||||
pyflakes
|
||||
commands=
|
||||
pyflakes trollius tests runtests.py check.py run_aiotest.py setup.py
|
||||
|
||||
[testenv:py26]
|
||||
setenv =
|
||||
TROLLIUSDEBUG = 1
|
||||
deps=
|
||||
futures
|
||||
mock
|
||||
ordereddict
|
||||
unittest2
|
||||
commands=python runtests.py -r
|
||||
aiotest
|
||||
futures
|
||||
mock==1.0.1
|
||||
ordereddict
|
||||
unittest2
|
||||
|
||||
[testenv:py27]
|
||||
setenv =
|
||||
TROLLIUSDEBUG = 1
|
||||
deps=
|
||||
futures
|
||||
mock
|
||||
commands=python runtests.py -r
|
||||
aiotest
|
||||
futures
|
||||
mock
|
||||
unittest2
|
||||
|
||||
[testenv:py2_release]
|
||||
# Run tests in release mode
|
||||
basepython = python2
|
||||
deps=
|
||||
aiotest
|
||||
futures
|
||||
mock
|
||||
unittest2
|
||||
setenv =
|
||||
TROLLIUSDEBUG =
|
||||
|
||||
[testenv:py2_no_ssl]
|
||||
basepython = python2
|
||||
deps=
|
||||
aiotest
|
||||
futures
|
||||
mock
|
||||
unittest2
|
||||
commands=
|
||||
python -Wd runtests.py --no-ssl -r {posargs}
|
||||
|
||||
[testenv:py2_no_concurrent]
|
||||
basepython = python2
|
||||
deps=
|
||||
aiotest
|
||||
futures
|
||||
mock
|
||||
unittest2
|
||||
commands=
|
||||
python -Wd runtests.py --no-concurrent -r {posargs}
|
||||
|
||||
[testenv:py32]
|
||||
setenv =
|
||||
TROLLIUSDEBUG = 1
|
||||
deps=
|
||||
mock
|
||||
commands=python runtests.py -r
|
||||
aiotest
|
||||
mock
|
||||
|
||||
[testenv:py33]
|
||||
[testenv:py35]
|
||||
basepython = python3.5
|
||||
|
||||
[testenv:py3_release]
|
||||
# Run tests in release mode
|
||||
basepython = python3
|
||||
setenv =
|
||||
TROLLIUSDEBUG = 1
|
||||
commands=python runtests.py -r
|
||||
TROLLIUSDEBUG =
|
||||
|
||||
[testenv:py3_no_ssl]
|
||||
basepython = python3
|
||||
commands=
|
||||
python -Wd runtests.py --no-ssl -r {posargs}
|
||||
|
||||
@@ -4,7 +4,7 @@ import sys
|
||||
|
||||
# The selectors module is in the stdlib in Python 3.4 but not in 3.3.
|
||||
# Do this first, so the other submodules can use "from . import selectors".
|
||||
# Prefer trollius/selectors.py over the stdlib one, as ours may be newer.
|
||||
# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer.
|
||||
try:
|
||||
from . import selectors
|
||||
except ImportError:
|
||||
@@ -18,6 +18,7 @@ if sys.platform == 'win32':
|
||||
import _overlapped # Will also be exported.
|
||||
|
||||
# This relies on each of the submodules having an __all__ variable.
|
||||
from .base_events import *
|
||||
from .coroutines import *
|
||||
from .events import *
|
||||
from .futures import *
|
||||
@@ -30,7 +31,8 @@ from .subprocess import *
|
||||
from .tasks import *
|
||||
from .transports import *
|
||||
|
||||
__all__ = (coroutines.__all__ +
|
||||
__all__ = (base_events.__all__ +
|
||||
coroutines.__all__ +
|
||||
events.__all__ +
|
||||
py33_exceptions.__all__ +
|
||||
futures.__all__ +
|
||||
|
||||
@@ -23,11 +23,17 @@ import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
# Python 2.6: use ordereddict backport
|
||||
from ordereddict import OrderedDict
|
||||
try:
|
||||
from threading import get_ident as _get_thread_ident
|
||||
except ImportError:
|
||||
# Python 2
|
||||
from threading import _get_ident as _get_thread_ident
|
||||
|
||||
from . import compat
|
||||
from . import coroutines
|
||||
@@ -40,12 +46,19 @@ from .log import logger
|
||||
from .time_monotonic import time_monotonic, time_monotonic_resolution
|
||||
|
||||
|
||||
__all__ = ['BaseEventLoop', 'Server']
|
||||
__all__ = ['BaseEventLoop']
|
||||
|
||||
|
||||
# Argument for default thread pool executor creation.
|
||||
_MAX_WORKERS = 5
|
||||
|
||||
# Minimum number of _scheduled timer handles before cleanup of
|
||||
# cancelled handles is performed.
|
||||
_MIN_SCHEDULED_TIMER_HANDLES = 100
|
||||
|
||||
# Minimum fraction of _scheduled timer handles that are cancelled
|
||||
# before cleanup of cancelled handles is performed.
|
||||
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
|
||||
|
||||
def _format_handle(handle):
|
||||
cb = handle._callback
|
||||
@@ -72,7 +85,11 @@ class _StopError(BaseException):
|
||||
def _check_resolved_address(sock, address):
|
||||
# Ensure that the address is already resolved to avoid the trap of hanging
|
||||
# the entire event loop when the address requires doing a DNS lookup.
|
||||
#
|
||||
# getaddrinfo() is slow (around 10 us per call): this function should only
|
||||
# be called in debug mode
|
||||
family = sock.family
|
||||
|
||||
if family == socket.AF_INET:
|
||||
host, port = address
|
||||
elif family == socket.AF_INET6:
|
||||
@@ -80,27 +97,49 @@ def _check_resolved_address(sock, address):
|
||||
else:
|
||||
return
|
||||
|
||||
type_mask = 0
|
||||
if hasattr(socket, 'SOCK_NONBLOCK'):
|
||||
type_mask |= socket.SOCK_NONBLOCK
|
||||
if hasattr(socket, 'SOCK_CLOEXEC'):
|
||||
type_mask |= socket.SOCK_CLOEXEC
|
||||
# Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is
|
||||
# already resolved.
|
||||
try:
|
||||
socket.getaddrinfo(host, port,
|
||||
family,
|
||||
(sock.type & ~type_mask),
|
||||
sock.proto,
|
||||
socket.AI_NUMERICHOST)
|
||||
except socket.gaierror as err:
|
||||
raise ValueError("address must be resolved (IP address), got %r: %s"
|
||||
% (address, err))
|
||||
# On Windows, socket.inet_pton() is only available since Python 3.4
|
||||
if hasattr(socket, 'inet_pton'):
|
||||
# getaddrinfo() is slow and has known issue: prefer inet_pton()
|
||||
# if available
|
||||
try:
|
||||
socket.inet_pton(family, host)
|
||||
except socket.error as exc:
|
||||
raise ValueError("address must be resolved (IP address), "
|
||||
"got host %r: %s"
|
||||
% (host, exc))
|
||||
else:
|
||||
# Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is
|
||||
# already resolved.
|
||||
type_mask = 0
|
||||
if hasattr(socket, 'SOCK_NONBLOCK'):
|
||||
type_mask |= socket.SOCK_NONBLOCK
|
||||
if hasattr(socket, 'SOCK_CLOEXEC'):
|
||||
type_mask |= socket.SOCK_CLOEXEC
|
||||
try:
|
||||
socket.getaddrinfo(host, port,
|
||||
family,
|
||||
(sock.type & ~type_mask),
|
||||
sock.proto,
|
||||
socket.AI_NUMERICHOST)
|
||||
except socket.gaierror as err:
|
||||
raise ValueError("address must be resolved (IP address), "
|
||||
"got host %r: %s"
|
||||
% (host, err))
|
||||
|
||||
def _raise_stop_error(*args):
|
||||
raise _StopError
|
||||
|
||||
|
||||
def _run_until_complete_cb(fut):
|
||||
exc = fut._exception
|
||||
if (isinstance(exc, BaseException)
|
||||
and not isinstance(exc, Exception)):
|
||||
# Issue #22429: run_forever() already finished, no need to
|
||||
# stop it.
|
||||
return
|
||||
_raise_stop_error()
|
||||
|
||||
|
||||
class Server(events.AbstractServer):
|
||||
|
||||
def __init__(self, loop, sockets):
|
||||
@@ -151,18 +190,24 @@ class Server(events.AbstractServer):
|
||||
class BaseEventLoop(events.AbstractEventLoop):
|
||||
|
||||
def __init__(self):
|
||||
self._timer_cancelled_count = 0
|
||||
self._closed = False
|
||||
self._ready = collections.deque()
|
||||
self._scheduled = []
|
||||
self._default_executor = None
|
||||
self._internal_fds = 0
|
||||
self._running = False
|
||||
# Identifier of the thread running the event loop, or None if the
|
||||
# event loop is not running
|
||||
self._thread_id = None
|
||||
self._clock_resolution = time_monotonic_resolution
|
||||
self._exception_handler = None
|
||||
self._debug = bool(os.environ.get('TROLLIUSDEBUG'))
|
||||
self.set_debug(bool(os.environ.get('TROLLIUSDEBUG')))
|
||||
# In debug mode, if the execution of a callback or a step of a task
|
||||
# exceed this duration in seconds, the slow callback/task is logged.
|
||||
self.slow_callback_duration = 0.1
|
||||
self._current_handle = None
|
||||
self._task_factory = None
|
||||
self._coroutine_wrapper_set = False
|
||||
|
||||
def __repr__(self):
|
||||
return ('<%s running=%s closed=%s debug=%s>'
|
||||
@@ -174,17 +219,39 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
|
||||
Return a task object.
|
||||
"""
|
||||
task = tasks.Task(coro, loop=self)
|
||||
if task._source_traceback:
|
||||
del task._source_traceback[-1]
|
||||
self._check_closed()
|
||||
if self._task_factory is None:
|
||||
task = tasks.Task(coro, loop=self)
|
||||
if task._source_traceback:
|
||||
del task._source_traceback[-1]
|
||||
else:
|
||||
task = self._task_factory(self, coro)
|
||||
return task
|
||||
|
||||
def set_task_factory(self, factory):
|
||||
"""Set a task factory that will be used by loop.create_task().
|
||||
|
||||
If factory is None the default task factory will be set.
|
||||
|
||||
If factory is a callable, it should have a signature matching
|
||||
'(loop, coro)', where 'loop' will be a reference to the active
|
||||
event loop, 'coro' will be a coroutine object. The callable
|
||||
must return a Future.
|
||||
"""
|
||||
if factory is not None and not callable(factory):
|
||||
raise TypeError('task factory must be a callable or None')
|
||||
self._task_factory = factory
|
||||
|
||||
def get_task_factory(self):
|
||||
"""Return a task factory, or None if the default one is in use."""
|
||||
return self._task_factory
|
||||
|
||||
def _make_socket_transport(self, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
"""Create socket transport."""
|
||||
raise NotImplementedError
|
||||
|
||||
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter,
|
||||
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
|
||||
server_side=False, server_hostname=None,
|
||||
extra=None, server=None):
|
||||
"""Create SSL transport."""
|
||||
@@ -232,9 +299,10 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
def run_forever(self):
|
||||
"""Run until stop() is called."""
|
||||
self._check_closed()
|
||||
if self._running:
|
||||
if self.is_running():
|
||||
raise RuntimeError('Event loop is running.')
|
||||
self._running = True
|
||||
self._set_coroutine_wrapper(self._debug)
|
||||
self._thread_id = _get_thread_ident()
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
@@ -242,7 +310,8 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
except _StopError:
|
||||
break
|
||||
finally:
|
||||
self._running = False
|
||||
self._thread_id = None
|
||||
self._set_coroutine_wrapper(False)
|
||||
|
||||
def run_until_complete(self, future):
|
||||
"""Run until the Future is done.
|
||||
@@ -258,15 +327,23 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
self._check_closed()
|
||||
|
||||
new_task = not isinstance(future, futures._FUTURE_CLASSES)
|
||||
future = tasks.async(future, loop=self)
|
||||
future = tasks.ensure_future(future, loop=self)
|
||||
if new_task:
|
||||
# An exception is raised if the future didn't complete, so there
|
||||
# is no need to log the "destroy pending task" message
|
||||
future._log_destroy_pending = False
|
||||
|
||||
future.add_done_callback(_raise_stop_error)
|
||||
self.run_forever()
|
||||
future.remove_done_callback(_raise_stop_error)
|
||||
future.add_done_callback(_run_until_complete_cb)
|
||||
try:
|
||||
self.run_forever()
|
||||
except:
|
||||
if new_task and future.done() and not future.cancelled():
|
||||
# The coroutine raised a BaseException. Consume the exception
|
||||
# to not log a warning, the caller doesn't have access to the
|
||||
# local task.
|
||||
future.exception()
|
||||
raise
|
||||
future.remove_done_callback(_run_until_complete_cb)
|
||||
if not future.done():
|
||||
raise RuntimeError('Event loop stopped before Future completed.')
|
||||
|
||||
@@ -289,7 +366,7 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
|
||||
The event loop must not be running.
|
||||
"""
|
||||
if self._running:
|
||||
if self.is_running():
|
||||
raise RuntimeError("Cannot close a running event loop")
|
||||
if self._closed:
|
||||
return
|
||||
@@ -307,9 +384,19 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
"""Returns True if the event loop was closed."""
|
||||
return self._closed
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if sys.version_info >= (3, 4):
|
||||
def __del__(self):
|
||||
if not self.is_closed():
|
||||
warnings.warn("unclosed event loop %r" % self, ResourceWarning)
|
||||
if not self.is_running():
|
||||
self.close()
|
||||
|
||||
def is_running(self):
|
||||
"""Returns True if the event loop is running."""
|
||||
return self._running
|
||||
return (self._thread_id is not None)
|
||||
|
||||
def time(self):
|
||||
"""Return the time according to the event loop's clock.
|
||||
@@ -346,14 +433,17 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
|
||||
Absolute time corresponds to the event loop's time() method.
|
||||
"""
|
||||
if coroutines.iscoroutinefunction(callback):
|
||||
if (coroutines.iscoroutine(callback)
|
||||
or coroutines.iscoroutinefunction(callback)):
|
||||
raise TypeError("coroutines cannot be used with call_at()")
|
||||
self._check_closed()
|
||||
if self._debug:
|
||||
self._assert_is_current_event_loop()
|
||||
self._check_thread()
|
||||
timer = events.TimerHandle(when, callback, args, self)
|
||||
if timer._source_traceback:
|
||||
del timer._source_traceback[-1]
|
||||
heapq.heappush(self._scheduled, timer)
|
||||
timer._scheduled = True
|
||||
return timer
|
||||
|
||||
def call_soon(self, callback, *args):
|
||||
@@ -366,24 +456,26 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
Any positional arguments after the callback will be passed to
|
||||
the callback when it is called.
|
||||
"""
|
||||
handle = self._call_soon(callback, args, check_loop=True)
|
||||
if self._debug:
|
||||
self._check_thread()
|
||||
handle = self._call_soon(callback, args)
|
||||
if handle._source_traceback:
|
||||
del handle._source_traceback[-1]
|
||||
return handle
|
||||
|
||||
def _call_soon(self, callback, args, check_loop):
|
||||
if coroutines.iscoroutinefunction(callback):
|
||||
def _call_soon(self, callback, args):
|
||||
if (coroutines.iscoroutine(callback)
|
||||
or coroutines.iscoroutinefunction(callback)):
|
||||
raise TypeError("coroutines cannot be used with call_soon()")
|
||||
if self._debug and check_loop:
|
||||
self._assert_is_current_event_loop()
|
||||
self._check_closed()
|
||||
handle = events.Handle(callback, args, self)
|
||||
if handle._source_traceback:
|
||||
del handle._source_traceback[-1]
|
||||
self._ready.append(handle)
|
||||
return handle
|
||||
|
||||
def _assert_is_current_event_loop(self):
|
||||
"""Asserts that this event loop is the current event loop.
|
||||
def _check_thread(self):
|
||||
"""Check that the current thread is the thread running the event loop.
|
||||
|
||||
Non-thread-safe methods of this class make this assumption and will
|
||||
likely behave incorrectly when the assumption is violated.
|
||||
@@ -391,40 +483,41 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
Should only be called when (self._debug == True). The caller is
|
||||
responsible for checking this condition for performance reasons.
|
||||
"""
|
||||
try:
|
||||
current = events.get_event_loop()
|
||||
except AssertionError:
|
||||
if self._thread_id is None:
|
||||
return
|
||||
if current is not self:
|
||||
thread_id = _get_thread_ident()
|
||||
if thread_id != self._thread_id:
|
||||
raise RuntimeError(
|
||||
"Non-thread-safe operation invoked on an event loop other "
|
||||
"than the current one")
|
||||
|
||||
def call_soon_threadsafe(self, callback, *args):
|
||||
"""Like call_soon(), but thread-safe."""
|
||||
handle = self._call_soon(callback, args, check_loop=False)
|
||||
handle = self._call_soon(callback, args)
|
||||
if handle._source_traceback:
|
||||
del handle._source_traceback[-1]
|
||||
self._write_to_self()
|
||||
return handle
|
||||
|
||||
def run_in_executor(self, executor, callback, *args):
|
||||
if coroutines.iscoroutinefunction(callback):
|
||||
raise TypeError("Coroutines cannot be used with run_in_executor()")
|
||||
if isinstance(callback, events.Handle):
|
||||
def run_in_executor(self, executor, func, *args):
|
||||
if (coroutines.iscoroutine(func)
|
||||
or coroutines.iscoroutinefunction(func)):
|
||||
raise TypeError("coroutines cannot be used with run_in_executor()")
|
||||
self._check_closed()
|
||||
if isinstance(func, events.Handle):
|
||||
assert not args
|
||||
assert not isinstance(callback, events.TimerHandle)
|
||||
if callback._cancelled:
|
||||
assert not isinstance(func, events.TimerHandle)
|
||||
if func._cancelled:
|
||||
f = futures.Future(loop=self)
|
||||
f.set_result(None)
|
||||
return f
|
||||
callback, args = callback._callback, callback._args
|
||||
func, args = func._callback, func._args
|
||||
if executor is None:
|
||||
executor = self._default_executor
|
||||
if executor is None:
|
||||
executor = get_default_executor()
|
||||
self._default_executor = executor
|
||||
return futures.wrap_future(executor.submit(callback, *args), loop=self)
|
||||
return futures.wrap_future(executor.submit(func, *args), loop=self)
|
||||
|
||||
def set_default_executor(self, executor):
|
||||
self._default_executor = executor
|
||||
@@ -583,6 +676,9 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
transport, protocol = yield From(self._create_connection_transport(
|
||||
sock, protocol_factory, ssl, server_hostname))
|
||||
if self._debug:
|
||||
# Get the socket from the transport because SSL transport closes
|
||||
# the old socket and creates a new SSL socket
|
||||
sock = transport.get_extra_info('socket')
|
||||
logger.debug("%r connected to %s:%r: (%r, %r)",
|
||||
sock, host, port, transport, protocol)
|
||||
raise Return(transport, protocol)
|
||||
@@ -600,7 +696,12 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
else:
|
||||
transport = self._make_socket_transport(sock, protocol, waiter)
|
||||
|
||||
yield From(waiter)
|
||||
try:
|
||||
yield From(waiter)
|
||||
except:
|
||||
transport.close()
|
||||
raise
|
||||
|
||||
raise Return(transport, protocol)
|
||||
|
||||
@coroutine
|
||||
@@ -684,7 +785,13 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
logger.debug("Datagram endpoint remote_addr=%r created: "
|
||||
"(%r, %r)",
|
||||
remote_addr, transport, protocol)
|
||||
yield From(waiter)
|
||||
|
||||
try:
|
||||
yield From(waiter)
|
||||
except:
|
||||
transport.close()
|
||||
raise
|
||||
|
||||
raise Return(transport, protocol)
|
||||
|
||||
@coroutine
|
||||
@@ -729,6 +836,10 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
sock = socket.socket(af, socktype, proto)
|
||||
except socket.error:
|
||||
# Assume it's a bad family/type/protocol combination.
|
||||
if self._debug:
|
||||
logger.warning('create_server() failed to create '
|
||||
'socket.socket(%r, %r, %r)',
|
||||
af, socktype, proto, exc_info=True)
|
||||
continue
|
||||
sockets.append(sock)
|
||||
if reuse_address:
|
||||
@@ -772,7 +883,13 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
protocol = protocol_factory()
|
||||
waiter = futures.Future(loop=self)
|
||||
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
|
||||
yield From(waiter)
|
||||
|
||||
try:
|
||||
yield From(waiter)
|
||||
except:
|
||||
transport.close()
|
||||
raise
|
||||
|
||||
if self._debug:
|
||||
logger.debug('Read pipe %r connected: (%r, %r)',
|
||||
pipe.fileno(), transport, protocol)
|
||||
@@ -783,7 +900,13 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
protocol = protocol_factory()
|
||||
waiter = futures.Future(loop=self)
|
||||
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
|
||||
yield From(waiter)
|
||||
|
||||
try:
|
||||
yield From(waiter)
|
||||
except:
|
||||
transport.close()
|
||||
raise
|
||||
|
||||
if self._debug:
|
||||
logger.debug('Write pipe %r connected: (%r, %r)',
|
||||
pipe.fileno(), transport, protocol)
|
||||
@@ -905,6 +1028,11 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
else:
|
||||
exc_info = False
|
||||
|
||||
if ('source_traceback' not in context
|
||||
and self._current_handle is not None
|
||||
and self._current_handle._source_traceback):
|
||||
context['handle_traceback'] = self._current_handle._source_traceback
|
||||
|
||||
log_lines = [message]
|
||||
for key in sorted(context):
|
||||
if key in ('message', 'exception'):
|
||||
@@ -914,6 +1042,10 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
tb = ''.join(traceback.format_list(value))
|
||||
value = 'Object created at (most recent call last):\n'
|
||||
value += tb.rstrip()
|
||||
elif key == 'handle_traceback':
|
||||
tb = ''.join(traceback.format_list(value))
|
||||
value = 'Handle created at (most recent call last):\n'
|
||||
value += tb.rstrip()
|
||||
else:
|
||||
value = repr(value)
|
||||
log_lines.append('{0}: {1}'.format(key, value))
|
||||
@@ -973,16 +1105,19 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
assert isinstance(handle, events.Handle), 'A Handle is required here'
|
||||
if handle._cancelled:
|
||||
return
|
||||
if isinstance(handle, events.TimerHandle):
|
||||
heapq.heappush(self._scheduled, handle)
|
||||
else:
|
||||
self._ready.append(handle)
|
||||
assert not isinstance(handle, events.TimerHandle)
|
||||
self._ready.append(handle)
|
||||
|
||||
def _add_callback_signalsafe(self, handle):
|
||||
"""Like _add_callback() but called from a signal handler."""
|
||||
self._add_callback(handle)
|
||||
self._write_to_self()
|
||||
|
||||
def _timer_handle_cancelled(self, handle):
|
||||
"""Notification that a TimerHandle has been cancelled."""
|
||||
if handle._scheduled:
|
||||
self._timer_cancelled_count += 1
|
||||
|
||||
def _run_once(self):
|
||||
"""Run one full iteration of the event loop.
|
||||
|
||||
@@ -990,9 +1125,29 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
schedules the resulting callbacks, and finally schedules
|
||||
'call_later' callbacks.
|
||||
"""
|
||||
# Remove delayed calls that were cancelled from head of queue.
|
||||
while self._scheduled and self._scheduled[0]._cancelled:
|
||||
heapq.heappop(self._scheduled)
|
||||
|
||||
sched_count = len(self._scheduled)
|
||||
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
|
||||
float(self._timer_cancelled_count) / sched_count >
|
||||
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
|
||||
# Remove delayed calls that were cancelled if their number
|
||||
# is too high
|
||||
new_scheduled = []
|
||||
for handle in self._scheduled:
|
||||
if handle._cancelled:
|
||||
handle._scheduled = False
|
||||
else:
|
||||
new_scheduled.append(handle)
|
||||
|
||||
heapq.heapify(new_scheduled)
|
||||
self._scheduled = new_scheduled
|
||||
self._timer_cancelled_count = 0
|
||||
else:
|
||||
# Remove delayed calls that were cancelled from head of queue.
|
||||
while self._scheduled and self._scheduled[0]._cancelled:
|
||||
self._timer_cancelled_count -= 1
|
||||
handle = heapq.heappop(self._scheduled)
|
||||
handle._scheduled = False
|
||||
|
||||
timeout = None
|
||||
if self._ready:
|
||||
@@ -1033,6 +1188,7 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
if handle._when >= end_time:
|
||||
break
|
||||
handle = heapq.heappop(self._scheduled)
|
||||
handle._scheduled = False
|
||||
self._ready.append(handle)
|
||||
|
||||
# This is the only place where callbacks are actually *called*.
|
||||
@@ -1047,18 +1203,58 @@ class BaseEventLoop(events.AbstractEventLoop):
|
||||
if handle._cancelled:
|
||||
continue
|
||||
if self._debug:
|
||||
t0 = self.time()
|
||||
handle._run()
|
||||
dt = self.time() - t0
|
||||
if dt >= self.slow_callback_duration:
|
||||
logger.warning('Executing %s took %.3f seconds',
|
||||
_format_handle(handle), dt)
|
||||
try:
|
||||
self._current_handle = handle
|
||||
t0 = self.time()
|
||||
handle._run()
|
||||
dt = self.time() - t0
|
||||
if dt >= self.slow_callback_duration:
|
||||
logger.warning('Executing %s took %.3f seconds',
|
||||
_format_handle(handle), dt)
|
||||
finally:
|
||||
self._current_handle = None
|
||||
else:
|
||||
handle._run()
|
||||
handle = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
def _set_coroutine_wrapper(self, enabled):
|
||||
try:
|
||||
set_wrapper = sys.set_coroutine_wrapper
|
||||
get_wrapper = sys.get_coroutine_wrapper
|
||||
except AttributeError:
|
||||
return
|
||||
|
||||
enabled = bool(enabled)
|
||||
if self._coroutine_wrapper_set is enabled:
|
||||
return
|
||||
|
||||
wrapper = coroutines.debug_wrapper
|
||||
current_wrapper = get_wrapper()
|
||||
|
||||
if enabled:
|
||||
if current_wrapper not in (None, wrapper):
|
||||
warnings.warn(
|
||||
"loop.set_debug(True): cannot set debug coroutine "
|
||||
"wrapper; another wrapper is already set %r" %
|
||||
current_wrapper, RuntimeWarning)
|
||||
else:
|
||||
set_wrapper(wrapper)
|
||||
self._coroutine_wrapper_set = True
|
||||
else:
|
||||
if current_wrapper not in (None, wrapper):
|
||||
warnings.warn(
|
||||
"loop.set_debug(False): cannot unset debug coroutine "
|
||||
"wrapper; another wrapper was set %r" %
|
||||
current_wrapper, RuntimeWarning)
|
||||
else:
|
||||
set_wrapper(None)
|
||||
self._coroutine_wrapper_set = False
|
||||
|
||||
def get_debug(self):
|
||||
return self._debug
|
||||
|
||||
def set_debug(self, enabled):
|
||||
self._debug = enabled
|
||||
|
||||
if self.is_running():
|
||||
self._set_coroutine_wrapper(enabled)
|
||||
|
||||
@@ -1,36 +1,46 @@
|
||||
import collections
|
||||
import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from . import futures
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .coroutines import coroutine, From
|
||||
from .coroutines import coroutine, From, Return
|
||||
from .log import logger
|
||||
from .py33_exceptions import ProcessLookupError
|
||||
|
||||
|
||||
class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
|
||||
def __init__(self, loop, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
waiter=None, extra=None, **kwargs):
|
||||
super(BaseSubprocessTransport, self).__init__(extra)
|
||||
self._closed = False
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self._proc = None
|
||||
self._pid = None
|
||||
|
||||
self._returncode = None
|
||||
self._exit_waiters = []
|
||||
self._pending_calls = collections.deque()
|
||||
self._pipes = {}
|
||||
self._finished = False
|
||||
|
||||
if stdin == subprocess.PIPE:
|
||||
self._pipes[0] = None
|
||||
if stdout == subprocess.PIPE:
|
||||
self._pipes[1] = None
|
||||
if stderr == subprocess.PIPE:
|
||||
self._pipes[2] = None
|
||||
self._pending_calls = collections.deque()
|
||||
self._finished = False
|
||||
self._returncode = None
|
||||
|
||||
# Create the child process: set the _proc attribute
|
||||
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, bufsize=bufsize, **kwargs)
|
||||
self._pid = self._proc.pid
|
||||
self._extra['subprocess'] = self._proc
|
||||
|
||||
if self._loop.get_debug():
|
||||
if isinstance(args, (bytes, str)):
|
||||
program = args
|
||||
@@ -39,10 +49,20 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
logger.debug('process %r created: pid %s',
|
||||
program, self._pid)
|
||||
|
||||
self._loop.create_task(self._connect_pipes(waiter))
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'pid=%s' % self._pid]
|
||||
info = [self.__class__.__name__]
|
||||
if self._closed:
|
||||
info.append('closed')
|
||||
if self._pid is not None:
|
||||
info.append('pid=%s' % self._pid)
|
||||
if self._returncode is not None:
|
||||
info.append('returncode=%s' % self._returncode)
|
||||
elif self._pid is not None:
|
||||
info.append('running')
|
||||
else:
|
||||
info.append('not started')
|
||||
|
||||
stdin = self._pipes.get(0)
|
||||
if stdin is not None:
|
||||
@@ -63,17 +83,40 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def _make_write_subprocess_pipe_proto(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
def _make_read_subprocess_pipe_proto(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
return
|
||||
self._closed = True
|
||||
|
||||
for proto in self._pipes.values():
|
||||
if proto is None:
|
||||
continue
|
||||
proto.pipe.close()
|
||||
if self._returncode is None:
|
||||
self.terminate()
|
||||
|
||||
if (self._proc is not None
|
||||
# the child process finished?
|
||||
and self._returncode is None
|
||||
# the child process finished but the transport was not notified yet?
|
||||
and self._proc.poll() is None
|
||||
):
|
||||
if self._loop.get_debug():
|
||||
logger.warning('Close running child process: kill %r', self)
|
||||
|
||||
try:
|
||||
self._proc.kill()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
# Don't clear the _proc reference yet: _post_init() may still run
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if sys.version_info >= (3, 4):
|
||||
def __del__(self):
|
||||
if not self._closed:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning)
|
||||
self.close()
|
||||
|
||||
def get_pid(self):
|
||||
return self._pid
|
||||
@@ -87,41 +130,58 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
else:
|
||||
return None
|
||||
|
||||
def _check_proc(self):
|
||||
if self._proc is None:
|
||||
raise ProcessLookupError()
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._check_proc()
|
||||
self._proc.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._check_proc()
|
||||
self._proc.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._check_proc()
|
||||
self._proc.kill()
|
||||
|
||||
@coroutine
|
||||
def _post_init(self):
|
||||
proc = self._proc
|
||||
loop = self._loop
|
||||
if proc.stdin is not None:
|
||||
_, pipe = yield From(loop.connect_write_pipe(
|
||||
lambda: WriteSubprocessPipeProto(self, 0),
|
||||
proc.stdin))
|
||||
self._pipes[0] = pipe
|
||||
if proc.stdout is not None:
|
||||
_, pipe = yield From(loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 1),
|
||||
proc.stdout))
|
||||
self._pipes[1] = pipe
|
||||
if proc.stderr is not None:
|
||||
_, pipe = yield From(loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 2),
|
||||
proc.stderr))
|
||||
self._pipes[2] = pipe
|
||||
def _connect_pipes(self, waiter):
|
||||
try:
|
||||
proc = self._proc
|
||||
loop = self._loop
|
||||
|
||||
assert self._pending_calls is not None
|
||||
if proc.stdin is not None:
|
||||
_, pipe = yield From(loop.connect_write_pipe(
|
||||
lambda: WriteSubprocessPipeProto(self, 0),
|
||||
proc.stdin))
|
||||
self._pipes[0] = pipe
|
||||
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
for callback, data in self._pending_calls:
|
||||
self._loop.call_soon(callback, *data)
|
||||
self._pending_calls = None
|
||||
if proc.stdout is not None:
|
||||
_, pipe = yield From(loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 1),
|
||||
proc.stdout))
|
||||
self._pipes[1] = pipe
|
||||
|
||||
if proc.stderr is not None:
|
||||
_, pipe = yield From(loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 2),
|
||||
proc.stderr))
|
||||
self._pipes[2] = pipe
|
||||
|
||||
assert self._pending_calls is not None
|
||||
|
||||
loop.call_soon(self._protocol.connection_made, self)
|
||||
for callback, data in self._pending_calls:
|
||||
loop.call_soon(callback, *data)
|
||||
self._pending_calls = None
|
||||
except Exception as exc:
|
||||
if waiter is not None and not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
else:
|
||||
if waiter is not None and not waiter.cancelled():
|
||||
waiter.set_result(None)
|
||||
|
||||
def _call(self, cb, *data):
|
||||
if self._pending_calls is not None:
|
||||
@@ -146,6 +206,25 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
self._call(self._protocol.process_exited)
|
||||
self._try_finish()
|
||||
|
||||
# wake up futures waiting for wait()
|
||||
for waiter in self._exit_waiters:
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(returncode)
|
||||
self._exit_waiters = None
|
||||
|
||||
@coroutine
|
||||
def _wait(self):
|
||||
"""Wait until the process exit and return the process return code.
|
||||
|
||||
This method is a coroutine."""
|
||||
if self._returncode is not None:
|
||||
raise Return(self._returncode)
|
||||
|
||||
waiter = futures.Future(loop=self._loop)
|
||||
self._exit_waiters.append(waiter)
|
||||
returncode = yield From(waiter)
|
||||
raise Return(returncode)
|
||||
|
||||
def _try_finish(self):
|
||||
assert not self._finished
|
||||
if self._returncode is None:
|
||||
@@ -153,15 +232,15 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
if all(p is not None and p.disconnected
|
||||
for p in self._pipes.values()):
|
||||
self._finished = True
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
self._call(self._call_connection_lost, None)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
self._loop = None
|
||||
self._proc = None
|
||||
self._protocol = None
|
||||
self._loop = None
|
||||
|
||||
|
||||
class WriteSubprocessPipeProto(protocols.BaseProtocol):
|
||||
@@ -182,6 +261,7 @@ class WriteSubprocessPipeProto(protocols.BaseProtocol):
|
||||
def connection_lost(self, exc):
|
||||
self.disconnected = True
|
||||
self.proc._pipe_connection_lost(self.fd, exc)
|
||||
self.proc = None
|
||||
|
||||
def pause_writing(self):
|
||||
self.proc._protocol.pause_writing()
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
"""
|
||||
Compatibility constants and functions for the different Python versions.
|
||||
"""
|
||||
"""Compatibility helpers for the different Python versions."""
|
||||
|
||||
import sys
|
||||
|
||||
# Python 2 or older?
|
||||
PY2 = (sys.version_info <= (2,))
|
||||
|
||||
# Python 2.6 or older?
|
||||
PY26 = (sys.version_info < (2, 7))
|
||||
|
||||
@@ -15,6 +17,9 @@ PY33 = (sys.version_info >= (3, 3))
|
||||
# Python 3.4 or newer?
|
||||
PY34 = sys.version_info >= (3, 4)
|
||||
|
||||
# Python 3.5 or newer?
|
||||
PY35 = sys.version_info >= (3, 5)
|
||||
|
||||
if PY3:
|
||||
integer_types = (int,)
|
||||
bytes_type = bytes
|
||||
@@ -31,6 +36,16 @@ else:
|
||||
else: # Python 2.7
|
||||
BYTES_TYPES = (str, bytearray, memoryview, buffer)
|
||||
|
||||
|
||||
if PY3:
|
||||
def reraise(tp, value, tb=None):
|
||||
if value.__traceback__ is not tb:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
else:
|
||||
exec("""def reraise(tp, value, tb=None): raise tp, value, tb""")
|
||||
|
||||
|
||||
def flatten_bytes(data):
|
||||
"""
|
||||
Convert bytes-like objects (bytes, bytearray, memoryview, buffer) to
|
||||
@@ -51,3 +66,9 @@ def flatten_bytes(data):
|
||||
return data.tobytes()
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
def flatten_list_bytes(data):
|
||||
"""Concatenate a sequence of bytes-like objects."""
|
||||
data = map(flatten_bytes, data)
|
||||
return b''.join(data)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
__all__ = ['coroutine',
|
||||
'iscoroutinefunction', 'iscoroutine']
|
||||
'iscoroutinefunction', 'iscoroutine',
|
||||
'From', 'Return']
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
@@ -21,7 +22,7 @@ _YIELD_FROM = opcode.opmap.get('YIELD_FROM', None)
|
||||
# If you set _DEBUG to true, @coroutine will wrap the resulting
|
||||
# generator objects in a CoroWrapper instance (defined below). That
|
||||
# instance will log a message when the generator is never iterated
|
||||
# over, which may happen when you forget to use "yield" with a
|
||||
# over, which may happen when you forget to use "yield from" with a
|
||||
# coroutine call. Note that the value of the _DEBUG flag is taken
|
||||
# when the decorator is used, so to be of any use it must be set
|
||||
# before you define your coroutines. A downside of using this feature
|
||||
@@ -30,6 +31,23 @@ _YIELD_FROM = opcode.opmap.get('YIELD_FROM', None)
|
||||
_DEBUG = bool(os.environ.get('TROLLIUSDEBUG'))
|
||||
|
||||
|
||||
try:
|
||||
_types_coroutine = types.coroutine
|
||||
except AttributeError:
|
||||
_types_coroutine = None
|
||||
|
||||
try:
|
||||
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
|
||||
except AttributeError:
|
||||
_inspect_iscoroutinefunction = lambda func: False
|
||||
|
||||
try:
|
||||
from collections.abc import Coroutine as _CoroutineABC, \
|
||||
Awaitable as _AwaitableABC
|
||||
except ImportError:
|
||||
_CoroutineABC = _AwaitableABC = None
|
||||
|
||||
|
||||
if _YIELD_FROM is not None:
|
||||
# Check for CPython issue #21209
|
||||
exec('''if 1:
|
||||
@@ -86,10 +104,32 @@ else:
|
||||
else:
|
||||
self.value = args
|
||||
self.raised = False
|
||||
if _DEBUG:
|
||||
frame = sys._getframe(1)
|
||||
self._source_traceback = traceback.extract_stack(frame)
|
||||
# explicitly clear the reference to avoid reference cycles
|
||||
frame = None
|
||||
else:
|
||||
self._source_traceback = None
|
||||
|
||||
def __del__(self):
|
||||
if not self.raised:
|
||||
logger.error('Return(%r) used without raise', self.value)
|
||||
if self.raised:
|
||||
return
|
||||
|
||||
fmt = 'Return(%r) used without raise'
|
||||
if self._source_traceback:
|
||||
fmt += '\nReturn created at (most recent call last):\n'
|
||||
tb = ''.join(traceback.format_list(self._source_traceback))
|
||||
fmt += tb.rstrip()
|
||||
logger.error(fmt, self.value)
|
||||
|
||||
|
||||
def debug_wrapper(gen):
|
||||
# This function is called from 'sys.set_coroutine_wrapper'.
|
||||
# We only wrap here coroutines defined via 'async def' syntax.
|
||||
# Generator-based coroutines are wrapped in @coroutine
|
||||
# decorator.
|
||||
return CoroWrapper(gen, None)
|
||||
|
||||
|
||||
def _coroutine_at_yield_from(coro):
|
||||
@@ -107,16 +147,16 @@ def _coroutine_at_yield_from(coro):
|
||||
return (instr == _YIELD_FROM)
|
||||
|
||||
|
||||
class CoroWrapper(object):
|
||||
class CoroWrapper:
|
||||
# Wrapper for coroutine object in _DEBUG mode.
|
||||
|
||||
def __init__(self, gen, func):
|
||||
assert inspect.isgenerator(gen), gen
|
||||
def __init__(self, gen, func=None):
|
||||
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
|
||||
self.gen = gen
|
||||
self.func = func
|
||||
self.func = func # Used to unwrap @coroutine decorator
|
||||
self._source_traceback = traceback.extract_stack(sys._getframe(1))
|
||||
# __name__, __qualname__, __doc__ attributes are set by the coroutine()
|
||||
# decorator
|
||||
self.__name__ = getattr(gen, '__name__', None)
|
||||
self.__qualname__ = getattr(gen, '__qualname__', None)
|
||||
|
||||
def __repr__(self):
|
||||
coro_repr = _format_coroutine(self)
|
||||
@@ -166,10 +206,36 @@ class CoroWrapper(object):
|
||||
def gi_code(self):
|
||||
return self.gen.gi_code
|
||||
|
||||
if compat.PY35:
|
||||
|
||||
__await__ = __iter__ # make compatible with 'await' expression
|
||||
|
||||
@property
|
||||
def gi_yieldfrom(self):
|
||||
return self.gen.gi_yieldfrom
|
||||
|
||||
@property
|
||||
def cr_await(self):
|
||||
return self.gen.cr_await
|
||||
|
||||
@property
|
||||
def cr_running(self):
|
||||
return self.gen.cr_running
|
||||
|
||||
@property
|
||||
def cr_code(self):
|
||||
return self.gen.cr_code
|
||||
|
||||
@property
|
||||
def cr_frame(self):
|
||||
return self.gen.cr_frame
|
||||
|
||||
def __del__(self):
|
||||
# Be careful accessing self.gen.frame -- self.gen might not exist.
|
||||
gen = getattr(self, 'gen', None)
|
||||
frame = getattr(gen, 'gi_frame', None)
|
||||
if frame is None:
|
||||
frame = getattr(gen, 'cr_frame', None)
|
||||
if frame is not None and frame.f_lasti == -1:
|
||||
msg = '%r was never yielded from' % self
|
||||
tb = getattr(self, '_source_traceback', ())
|
||||
@@ -237,6 +303,13 @@ def coroutine(func):
|
||||
If the coroutine is not yielded from before it is destroyed,
|
||||
an error message is logged.
|
||||
"""
|
||||
if _inspect_iscoroutinefunction(func):
|
||||
# In Python 3.5 that's all we need to do for coroutines
|
||||
# defiend with "async def".
|
||||
# Wrapping in CoroWrapper will happen via
|
||||
# 'sys.set_coroutine_wrapper' function.
|
||||
return func
|
||||
|
||||
if inspect.isgeneratorfunction(func):
|
||||
coro = func
|
||||
else:
|
||||
@@ -244,28 +317,38 @@ def coroutine(func):
|
||||
def coro(*args, **kw):
|
||||
res = func(*args, **kw)
|
||||
if (isinstance(res, futures._FUTURE_CLASSES)
|
||||
or inspect.isgenerator(res)):
|
||||
or inspect.isgenerator(res)):
|
||||
res = yield From(res)
|
||||
raise Return(res)
|
||||
|
||||
if not _DEBUG:
|
||||
wrapper = coro
|
||||
else:
|
||||
@_wraps(func)
|
||||
def wrapper(*args, **kwds):
|
||||
coro_wrapper = CoroWrapper(coro(*args, **kwds), func)
|
||||
if coro_wrapper._source_traceback:
|
||||
del coro_wrapper._source_traceback[-1]
|
||||
for attr in ('__name__', '__qualname__', '__doc__'):
|
||||
elif _AwaitableABC is not None:
|
||||
# If 'func' returns an Awaitable (new in 3.5) we
|
||||
# want to run it.
|
||||
try:
|
||||
value = getattr(func, attr)
|
||||
await_meth = res.__await__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
setattr(coro_wrapper, attr, value)
|
||||
return coro_wrapper
|
||||
if not compat.PY3:
|
||||
wrapper.__wrapped__ = func
|
||||
if isinstance(res, _AwaitableABC):
|
||||
res = yield From(await_meth())
|
||||
raise Return(res)
|
||||
|
||||
if not _DEBUG:
|
||||
if _types_coroutine is None:
|
||||
wrapper = coro
|
||||
else:
|
||||
wrapper = _types_coroutine(coro)
|
||||
else:
|
||||
@_wraps(func)
|
||||
def wrapper(*args, **kwds):
|
||||
w = CoroWrapper(coro(*args, **kwds), func=func)
|
||||
if w._source_traceback:
|
||||
del w._source_traceback[-1]
|
||||
# Python < 3.5 does not implement __qualname__
|
||||
# on generator objects, so we set it manually.
|
||||
# We use getattr as some callables (such as
|
||||
# functools.partial may lack __qualname__).
|
||||
w.__name__ = getattr(func, '__name__', None)
|
||||
w.__qualname__ = getattr(func, '__qualname__', None)
|
||||
return w
|
||||
|
||||
wrapper._is_coroutine = True # For iscoroutinefunction().
|
||||
return wrapper
|
||||
@@ -273,16 +356,19 @@ def coroutine(func):
|
||||
|
||||
def iscoroutinefunction(func):
|
||||
"""Return True if func is a decorated coroutine function."""
|
||||
return getattr(func, '_is_coroutine', False)
|
||||
return (getattr(func, '_is_coroutine', False) or
|
||||
_inspect_iscoroutinefunction(func))
|
||||
|
||||
|
||||
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
|
||||
if _CoroutineABC is not None:
|
||||
_COROUTINE_TYPES += (_CoroutineABC,)
|
||||
if events.asyncio is not None:
|
||||
# Accept also asyncio CoroWrapper for interoperability
|
||||
if hasattr(events.asyncio, 'coroutines'):
|
||||
_COROUTINE_TYPES += (events.asyncio.coroutines.CoroWrapper,)
|
||||
else:
|
||||
# old Tulip/Python versions
|
||||
# old asyncio/Python versions
|
||||
_COROUTINE_TYPES += (events.asyncio.tasks.CoroWrapper,)
|
||||
|
||||
def iscoroutine(obj):
|
||||
@@ -292,22 +378,48 @@ def iscoroutine(obj):
|
||||
|
||||
def _format_coroutine(coro):
|
||||
assert iscoroutine(coro)
|
||||
coro_name = getattr(coro, '__qualname__', coro.__name__)
|
||||
|
||||
filename = coro.gi_code.co_filename
|
||||
if (isinstance(coro, CoroWrapper)
|
||||
and not inspect.isgeneratorfunction(coro.func)):
|
||||
filename, lineno = events._get_function_source(coro.func)
|
||||
if coro.gi_frame is None:
|
||||
coro_repr = '%s() done, defined at %s:%s' % (coro_name, filename, lineno)
|
||||
else:
|
||||
coro_repr = '%s() running, defined at %s:%s' % (coro_name, filename, lineno)
|
||||
elif coro.gi_frame is not None:
|
||||
lineno = coro.gi_frame.f_lineno
|
||||
coro_repr = '%s() running at %s:%s' % (coro_name, filename, lineno)
|
||||
coro_name = None
|
||||
if isinstance(coro, CoroWrapper):
|
||||
func = coro.func
|
||||
coro_name = coro.__qualname__
|
||||
if coro_name is not None:
|
||||
coro_name = '{0}()'.format(coro_name)
|
||||
else:
|
||||
lineno = coro.gi_code.co_firstlineno
|
||||
coro_repr = '%s() done, defined at %s:%s' % (coro_name, filename, lineno)
|
||||
func = coro
|
||||
|
||||
if coro_name is None:
|
||||
coro_name = events._format_callback(func, ())
|
||||
|
||||
try:
|
||||
coro_code = coro.gi_code
|
||||
except AttributeError:
|
||||
coro_code = coro.cr_code
|
||||
|
||||
try:
|
||||
coro_frame = coro.gi_frame
|
||||
except AttributeError:
|
||||
coro_frame = coro.cr_frame
|
||||
|
||||
filename = coro_code.co_filename
|
||||
if (isinstance(coro, CoroWrapper)
|
||||
and not inspect.isgeneratorfunction(coro.func)
|
||||
and coro.func is not None):
|
||||
filename, lineno = events._get_function_source(coro.func)
|
||||
if coro_frame is None:
|
||||
coro_repr = ('%s done, defined at %s:%s'
|
||||
% (coro_name, filename, lineno))
|
||||
else:
|
||||
coro_repr = ('%s running, defined at %s:%s'
|
||||
% (coro_name, filename, lineno))
|
||||
elif coro_frame is not None:
|
||||
lineno = coro_frame.f_lineno
|
||||
coro_repr = ('%s running at %s:%s'
|
||||
% (coro_name, filename, lineno))
|
||||
else:
|
||||
lineno = coro_code.co_firstlineno
|
||||
coro_repr = ('%s done, defined at %s:%s'
|
||||
% (coro_name, filename, lineno))
|
||||
|
||||
return coro_repr
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ try:
|
||||
except ImportError:
|
||||
import repr as reprlib # Python 2
|
||||
|
||||
from trollius import compat
|
||||
try:
|
||||
import asyncio
|
||||
except (ImportError, SyntaxError):
|
||||
@@ -29,37 +28,20 @@ except (ImportError, SyntaxError):
|
||||
# from" if asyncio module is in the Python path
|
||||
asyncio = None
|
||||
|
||||
|
||||
_PY34 = sys.version_info >= (3, 4)
|
||||
|
||||
if not compat.PY34:
|
||||
# Backported functools.unwrap() from Python 3.4, without the stop parameter
|
||||
# (not needed here)
|
||||
#
|
||||
# @trollius.coroutine decorator chains wrapper using @functools.wrap
|
||||
# backported from Python 3.4.
|
||||
def _unwrap(func):
|
||||
f = func # remember the original func for error reporting
|
||||
memo = set((id(f),)) # Memoise by id to tolerate non-hashable objects
|
||||
while hasattr(func, '__wrapped__'):
|
||||
func = func.__wrapped__
|
||||
id_func = id(func)
|
||||
if id_func in memo:
|
||||
raise ValueError('wrapper loop when unwrapping {0!r}'.format(f))
|
||||
memo.add(id_func)
|
||||
return func
|
||||
else:
|
||||
_unwrap = inspect.unwrap
|
||||
from trollius import compat
|
||||
|
||||
|
||||
def _get_function_source(func):
|
||||
func = _unwrap(func)
|
||||
if compat.PY34:
|
||||
func = inspect.unwrap(func)
|
||||
elif hasattr(func, '__wrapped__'):
|
||||
func = func.__wrapped__
|
||||
if inspect.isfunction(func):
|
||||
code = func.__code__
|
||||
return (code.co_filename, code.co_firstlineno)
|
||||
if isinstance(func, functools.partial):
|
||||
return _get_function_source(func.func)
|
||||
if _PY34 and isinstance(func, functools.partialmethod):
|
||||
if compat.PY34 and isinstance(func, functools.partialmethod):
|
||||
return _get_function_source(func.func)
|
||||
return None
|
||||
|
||||
@@ -82,18 +64,21 @@ def _format_callback(func, args, suffix=''):
|
||||
suffix = _format_args(args) + suffix
|
||||
return _format_callback(func.func, func.args, suffix)
|
||||
|
||||
if compat.PY33:
|
||||
func_repr = getattr(func, '__qualname__', None)
|
||||
if hasattr(func, '__qualname__'):
|
||||
func_repr = getattr(func, '__qualname__')
|
||||
elif hasattr(func, '__name__'):
|
||||
func_repr = getattr(func, '__name__')
|
||||
else:
|
||||
func_repr = getattr(func, '__name__', None)
|
||||
if not func_repr:
|
||||
func_repr = repr(func)
|
||||
|
||||
if args is not None:
|
||||
func_repr += _format_args(args)
|
||||
if suffix:
|
||||
func_repr += suffix
|
||||
return func_repr
|
||||
|
||||
def _format_callback_source(func, args):
|
||||
func_repr = _format_callback(func, args)
|
||||
source = _get_function_source(func)
|
||||
if source:
|
||||
func_repr += ' at %s:%s' % source
|
||||
@@ -104,7 +89,7 @@ class Handle(object):
|
||||
"""Object returned by callback registration methods."""
|
||||
|
||||
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
|
||||
'_source_traceback', '__weakref__')
|
||||
'_source_traceback', '_repr', '__weakref__')
|
||||
|
||||
def __init__(self, callback, args, loop):
|
||||
assert not isinstance(callback, Handle), 'A Handle is not a callback'
|
||||
@@ -112,32 +97,45 @@ class Handle(object):
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._cancelled = False
|
||||
self._repr = None
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = traceback.extract_stack(sys._getframe(1))
|
||||
else:
|
||||
self._source_traceback = None
|
||||
|
||||
def __repr__(self):
|
||||
def _repr_info(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._cancelled:
|
||||
info.append('cancelled')
|
||||
if self._callback is not None:
|
||||
info.append(_format_callback(self._callback, self._args))
|
||||
info.append(_format_callback_source(self._callback, self._args))
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
info.append('created at %s:%s' % (frame[0], frame[1]))
|
||||
return info
|
||||
|
||||
def __repr__(self):
|
||||
if self._repr is not None:
|
||||
return self._repr
|
||||
info = self._repr_info()
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def cancel(self):
|
||||
self._cancelled = True
|
||||
self._callback = None
|
||||
self._args = None
|
||||
if not self._cancelled:
|
||||
self._cancelled = True
|
||||
if self._loop.get_debug():
|
||||
# Keep a representation in debug mode to keep callback and
|
||||
# parameters. For example, to log the warning
|
||||
# "Executing <Handle...> took 2.5 second"
|
||||
self._repr = repr(self)
|
||||
self._callback = None
|
||||
self._args = None
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
self._callback(*self._args)
|
||||
except Exception as exc:
|
||||
cb = _format_callback(self._callback, self._args)
|
||||
cb = _format_callback_source(self._callback, self._args)
|
||||
msg = 'Exception in callback {0}'.format(cb)
|
||||
context = {
|
||||
'message': msg,
|
||||
@@ -153,7 +151,7 @@ class Handle(object):
|
||||
class TimerHandle(Handle):
|
||||
"""Object returned by timed callback registration methods."""
|
||||
|
||||
__slots__ = ['_when']
|
||||
__slots__ = ['_scheduled', '_when']
|
||||
|
||||
def __init__(self, when, callback, args, loop):
|
||||
assert when is not None
|
||||
@@ -161,18 +159,13 @@ class TimerHandle(Handle):
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._when = when
|
||||
self._scheduled = False
|
||||
|
||||
def __repr__(self):
|
||||
info = []
|
||||
if self._cancelled:
|
||||
info.append('cancelled')
|
||||
info.append('when=%s' % self._when)
|
||||
if self._callback is not None:
|
||||
info.append(_format_callback(self._callback, self._args))
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
info.append('created at %s:%s' % (frame[0], frame[1]))
|
||||
return '<%s %s>' % (self.__class__.__name__, ' '.join(info))
|
||||
def _repr_info(self):
|
||||
info = super(TimerHandle, self)._repr_info()
|
||||
pos = 2 if self._cancelled else 1
|
||||
info.insert(pos, 'when=%s' % self._when)
|
||||
return info
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._when)
|
||||
@@ -205,6 +198,11 @@ class TimerHandle(Handle):
|
||||
equal = self.__eq__(other)
|
||||
return NotImplemented if equal is NotImplemented else not equal
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._loop._timer_handle_cancelled(self)
|
||||
super(TimerHandle, self).cancel()
|
||||
|
||||
|
||||
class AbstractServer(object):
|
||||
"""Abstract server returned by create_server()."""
|
||||
@@ -270,6 +268,10 @@ else:
|
||||
|
||||
# Methods scheduling callbacks. All these return Handles.
|
||||
|
||||
def _timer_handle_cancelled(self, handle):
|
||||
"""Notification that a TimerHandle has been cancelled."""
|
||||
raise NotImplementedError
|
||||
|
||||
def call_soon(self, callback, *args):
|
||||
return self.call_later(0, callback, *args)
|
||||
|
||||
@@ -292,7 +294,7 @@ else:
|
||||
def call_soon_threadsafe(self, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def run_in_executor(self, executor, callback, *args):
|
||||
def run_in_executor(self, executor, func, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_default_executor(self, executor):
|
||||
@@ -451,6 +453,14 @@ else:
|
||||
def remove_signal_handler(self, sig):
|
||||
raise NotImplementedError
|
||||
|
||||
# Task factory.
|
||||
|
||||
def set_task_factory(self, factory):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_task_factory(self):
|
||||
raise NotImplementedError
|
||||
|
||||
# Error handlers.
|
||||
|
||||
def set_exception_handler(self, handler):
|
||||
@@ -536,9 +546,9 @@ class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
|
||||
not self._local._set_called and
|
||||
isinstance(threading.current_thread(), threading._MainThread)):
|
||||
self.set_event_loop(self.new_event_loop())
|
||||
assert self._local._loop is not None, \
|
||||
('There is no current event loop in thread %r.' %
|
||||
threading.current_thread().name)
|
||||
if self._local._loop is None:
|
||||
raise RuntimeError('There is no current event loop in thread %r.'
|
||||
% threading.current_thread().name)
|
||||
return self._local._loop
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
|
||||
@@ -13,6 +13,7 @@ try:
|
||||
except ImportError:
|
||||
import repr as reprlib # Python 2
|
||||
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import executor
|
||||
|
||||
@@ -21,8 +22,6 @@ _PENDING = 'PENDING'
|
||||
_CANCELLED = 'CANCELLED'
|
||||
_FINISHED = 'FINISHED'
|
||||
|
||||
_PY34 = sys.version_info >= (3, 4)
|
||||
|
||||
Error = executor.Error
|
||||
CancelledError = executor.CancelledError
|
||||
TimeoutError = executor.TimeoutError
|
||||
@@ -32,7 +31,6 @@ STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
|
||||
|
||||
class InvalidStateError(Error):
|
||||
"""The operation is not allowed in this state."""
|
||||
# TODO: Show the future, its state, the method, and the required state.
|
||||
|
||||
|
||||
class _TracebackLogger(object):
|
||||
@@ -63,7 +61,7 @@ class _TracebackLogger(object):
|
||||
the Future is collected, and the helper is present, the helper
|
||||
object is also collected, and its __del__() method will log the
|
||||
traceback. When the Future's result() or exception() method is
|
||||
called (and a helper object is present), it removes the the helper
|
||||
called (and a helper object is present), it removes the helper
|
||||
object, after calling its clear() method to prevent it from
|
||||
logging.
|
||||
|
||||
@@ -77,7 +75,7 @@ class _TracebackLogger(object):
|
||||
immediately format the exception; we only do the work when
|
||||
activate() is called, which call is delayed until after all the
|
||||
Future's callbacks have run. Since usually a Future has at least
|
||||
one callback (typically set by 'yield From') and usually that
|
||||
one callback (typically set by 'yield from') and usually that
|
||||
callback extracts the callback, thereby removing the need to
|
||||
format the exception.
|
||||
|
||||
@@ -106,10 +104,11 @@ class _TracebackLogger(object):
|
||||
|
||||
def __del__(self):
|
||||
if self.tb:
|
||||
msg = 'Future/Task exception was never retrieved'
|
||||
msg = 'Future/Task exception was never retrieved\n'
|
||||
if self.source_traceback:
|
||||
msg += '\nFuture/Task created at (most recent call last):\n'
|
||||
msg += ''.join(traceback.format_list(self.source_traceback))
|
||||
src = ''.join(traceback.format_list(self.source_traceback))
|
||||
msg += 'Future/Task created at (most recent call last):\n'
|
||||
msg += '%s\n' % src.rstrip()
|
||||
msg += ''.join(self.tb).rstrip()
|
||||
self.loop.call_exception_handler({'message': msg})
|
||||
|
||||
@@ -136,9 +135,16 @@ class Future(object):
|
||||
_result = None
|
||||
_exception = None
|
||||
_loop = None
|
||||
_source_traceback = None
|
||||
|
||||
_log_traceback = False # Used for Python >= 3.4
|
||||
_tb_logger = None # Used for Python <= 3.3
|
||||
_blocking = False # proper use of future (yield vs yield from)
|
||||
|
||||
# Used by Python 2 to raise the exception with the original traceback
|
||||
# in the exception() method in debug mode
|
||||
_exception_tb = None
|
||||
|
||||
_log_traceback = False # Used for Python 3.4 and later
|
||||
_tb_logger = None # Used for Python 3.3 only
|
||||
|
||||
def __init__(self, loop=None):
|
||||
"""Initialize the future.
|
||||
@@ -154,8 +160,6 @@ class Future(object):
|
||||
self._callbacks = []
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = traceback.extract_stack(sys._getframe(1))
|
||||
else:
|
||||
self._source_traceback = None
|
||||
|
||||
def _format_callbacks(self):
|
||||
cb = self._callbacks
|
||||
@@ -164,7 +168,7 @@ class Future(object):
|
||||
cb = ''
|
||||
|
||||
def format_cb(callback):
|
||||
return events._format_callback(callback, ())
|
||||
return events._format_callback_source(callback, ())
|
||||
|
||||
if size == 1:
|
||||
cb = format_cb(cb[0])
|
||||
@@ -197,10 +201,10 @@ class Future(object):
|
||||
info = self._repr_info()
|
||||
return '<%s %s>' % (self.__class__.__name__, ' '.join(info))
|
||||
|
||||
# On Python 3.3 or older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks to
|
||||
# the PEP 442.
|
||||
if _PY34:
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if compat.PY34:
|
||||
def __del__(self):
|
||||
if not self._log_traceback:
|
||||
# set_exception() was not called, or result() or exception()
|
||||
@@ -273,8 +277,13 @@ class Future(object):
|
||||
if self._tb_logger is not None:
|
||||
self._tb_logger.clear()
|
||||
self._tb_logger = None
|
||||
exc_tb = self._exception_tb
|
||||
self._exception_tb = None
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
if exc_tb is not None:
|
||||
compat.reraise(type(self._exception), self._exception, exc_tb)
|
||||
else:
|
||||
raise self._exception
|
||||
return self._result
|
||||
|
||||
def exception(self):
|
||||
@@ -293,6 +302,7 @@ class Future(object):
|
||||
if self._tb_logger is not None:
|
||||
self._tb_logger.clear()
|
||||
self._tb_logger = None
|
||||
self._exception_tb = None
|
||||
return self._exception
|
||||
|
||||
def add_done_callback(self, fn):
|
||||
@@ -340,7 +350,13 @@ class Future(object):
|
||||
self._state = _FINISHED
|
||||
self._schedule_callbacks()
|
||||
|
||||
def _get_exception_tb(self):
|
||||
return self._exception_tb
|
||||
|
||||
def set_exception(self, exception):
|
||||
self._set_exception_with_tb(exception, None)
|
||||
|
||||
def _set_exception_with_tb(self, exception, exc_tb):
|
||||
"""Mark the future done and set an exception.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
@@ -351,9 +367,14 @@ class Future(object):
|
||||
if isinstance(exception, type):
|
||||
exception = exception()
|
||||
self._exception = exception
|
||||
if exc_tb is not None:
|
||||
self._exception_tb = exc_tb
|
||||
exc_tb = None
|
||||
elif self._loop.get_debug() and not compat.PY3:
|
||||
self._exception_tb = sys.exc_info()[2]
|
||||
self._state = _FINISHED
|
||||
self._schedule_callbacks()
|
||||
if _PY34:
|
||||
if compat.PY34:
|
||||
self._log_traceback = True
|
||||
else:
|
||||
self._tb_logger = _TracebackLogger(self, exception)
|
||||
@@ -367,7 +388,10 @@ class Future(object):
|
||||
if self._loop.get_debug():
|
||||
frame = sys._getframe(1)
|
||||
tb = ['Traceback (most recent call last):\n']
|
||||
tb += traceback.format_stack(frame)
|
||||
if self._exception_tb is not None:
|
||||
tb += traceback.format_tb(self._exception_tb)
|
||||
else:
|
||||
tb += traceback.format_stack(frame)
|
||||
tb += traceback.format_exception_only(type(exception), exception)
|
||||
self._tb_logger.tb = tb
|
||||
else:
|
||||
@@ -398,6 +422,9 @@ class Future(object):
|
||||
result = other.result()
|
||||
self.set_result(result)
|
||||
|
||||
if compat.PY35:
|
||||
__await__ = __iter__ # make compatible with 'await' expression
|
||||
|
||||
if events.asyncio is not None:
|
||||
# Accept also asyncio Future objects for interoperability
|
||||
_FUTURE_CLASSES = (Future, events.asyncio.Future)
|
||||
@@ -421,5 +448,5 @@ def wrap_future(fut, loop=None):
|
||||
new_future.add_done_callback(_check_cancel_other)
|
||||
fut.add_done_callback(
|
||||
lambda future: loop.call_soon_threadsafe(
|
||||
new_future._copy_state, fut))
|
||||
new_future._copy_state, future))
|
||||
return new_future
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
|
||||
|
||||
import collections
|
||||
import sys
|
||||
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import futures
|
||||
from .coroutines import coroutine, From, Return
|
||||
@@ -39,7 +41,37 @@ class _ContextManager:
|
||||
self._lock = None # Crudely prevent reuse.
|
||||
|
||||
|
||||
class Lock(object):
|
||||
class _ContextManagerMixin(object):
|
||||
def __enter__(self):
|
||||
raise RuntimeError(
|
||||
'"yield From" should be used as context manager expression')
|
||||
|
||||
def __exit__(self, *args):
|
||||
# This must exist because __enter__ exists, even though that
|
||||
# always raises; that's how the with-statement works.
|
||||
pass
|
||||
|
||||
# FIXME: support PEP 492?
|
||||
# if compat.PY35:
|
||||
|
||||
# def __await__(self):
|
||||
# # To make "with await lock" work.
|
||||
# yield from self.acquire()
|
||||
# return _ContextManager(self)
|
||||
|
||||
# @coroutine
|
||||
# def __aenter__(self):
|
||||
# yield from self.acquire()
|
||||
# # We have no use for the "as ..." clause in the with
|
||||
# # statement for locks.
|
||||
# return None
|
||||
|
||||
# @coroutine
|
||||
# def __aexit__(self, exc_type, exc, tb):
|
||||
# self.release()
|
||||
|
||||
|
||||
class Lock(_ContextManagerMixin):
|
||||
"""Primitive lock objects.
|
||||
|
||||
A primitive lock is a synchronization primitive that is not owned
|
||||
@@ -63,7 +95,7 @@ class Lock(object):
|
||||
|
||||
acquire() is a coroutine and should be called with 'yield From'.
|
||||
|
||||
Locks also support the context manager protocol. '(yield From(lock))'
|
||||
Locks also support the context management protocol. '(yield From(lock))'
|
||||
should be used as context manager expression.
|
||||
|
||||
Usage:
|
||||
@@ -153,15 +185,6 @@ class Lock(object):
|
||||
else:
|
||||
raise RuntimeError('Lock is not acquired.')
|
||||
|
||||
def __enter__(self):
|
||||
raise RuntimeError(
|
||||
'"yield" should be used as context manager expression')
|
||||
|
||||
def __exit__(self, *args):
|
||||
# This must exist because __enter__ exists, even though that
|
||||
# always raises; that's how the with-statement works.
|
||||
pass
|
||||
|
||||
|
||||
class Event(object):
|
||||
"""Asynchronous equivalent to threading.Event.
|
||||
@@ -229,7 +252,7 @@ class Event(object):
|
||||
self._waiters.remove(fut)
|
||||
|
||||
|
||||
class Condition(object):
|
||||
class Condition(_ContextManagerMixin):
|
||||
"""Asynchronous equivalent to threading.Condition.
|
||||
|
||||
This class implements condition variable objects. A condition variable
|
||||
@@ -290,8 +313,19 @@ class Condition(object):
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
finally:
|
||||
except Exception as exc:
|
||||
# Workaround CPython bug #23353: using yield/yield-from in an
|
||||
# except block of a generator doesn't clear properly
|
||||
# sys.exc_info()
|
||||
err = exc
|
||||
else:
|
||||
err = None
|
||||
|
||||
if err is not None:
|
||||
yield From(self.acquire())
|
||||
raise err
|
||||
|
||||
yield From(self.acquire())
|
||||
|
||||
@coroutine
|
||||
def wait_for(self, predicate):
|
||||
@@ -339,15 +373,8 @@ class Condition(object):
|
||||
"""
|
||||
self.notify(len(self._waiters))
|
||||
|
||||
def __enter__(self):
|
||||
raise RuntimeError(
|
||||
'"yield From" should be used as context manager expression')
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
|
||||
class Semaphore(object):
|
||||
class Semaphore(_ContextManagerMixin):
|
||||
"""A Semaphore implementation.
|
||||
|
||||
A semaphore manages an internal counter which is decremented by each
|
||||
@@ -355,7 +382,7 @@ class Semaphore(object):
|
||||
can never go below zero; when acquire() finds that it is zero, it blocks,
|
||||
waiting until some other thread calls release().
|
||||
|
||||
Semaphores also support the context manager protocol.
|
||||
Semaphores also support the context management protocol.
|
||||
|
||||
The optional argument gives the initial value for the internal
|
||||
counter; it defaults to 1. If the value given is less than 0,
|
||||
@@ -418,13 +445,6 @@ class Semaphore(object):
|
||||
waiter.set_result(True)
|
||||
break
|
||||
|
||||
def __enter__(self):
|
||||
raise RuntimeError(
|
||||
'"yield" should be used as context manager expression')
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""A bounded semaphore implementation.
|
||||
|
||||
@@ -7,10 +7,13 @@ proactor is only implemented on Windows with IOCP.
|
||||
__all__ = ['BaseProactorEventLoop']
|
||||
|
||||
import socket
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from . import base_events
|
||||
from . import constants
|
||||
from . import futures
|
||||
from . import sslproto
|
||||
from . import transports
|
||||
from .log import logger
|
||||
from .compat import flatten_bytes
|
||||
@@ -24,9 +27,8 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super(_ProactorBasePipeTransport, self).__init__(extra)
|
||||
super(_ProactorBasePipeTransport, self).__init__(extra, loop)
|
||||
self._set_extra(sock)
|
||||
self._loop = loop
|
||||
self._sock = sock
|
||||
self._protocol = protocol
|
||||
self._server = server
|
||||
@@ -41,11 +43,17 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
self._server._attach()
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
if waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'fd=%s' % self._sock.fileno()]
|
||||
info = [self.__class__.__name__]
|
||||
if self._sock is None:
|
||||
info.append('closed')
|
||||
elif self._closing:
|
||||
info.append('closing')
|
||||
if self._sock is not None:
|
||||
info.append('fd=%s' % self._sock.fileno())
|
||||
if self._read_fut is not None:
|
||||
info.append('read=%s' % self._read_fut)
|
||||
if self._write_fut is not None:
|
||||
@@ -69,6 +77,16 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._read_fut is not None:
|
||||
self._read_fut.cancel()
|
||||
self._read_fut = None
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if sys.version_info >= (3, 4):
|
||||
def __del__(self):
|
||||
if self._sock is not None:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning)
|
||||
self.close()
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
|
||||
@@ -90,9 +108,10 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
self._conn_lost += 1
|
||||
if self._write_fut:
|
||||
self._write_fut.cancel()
|
||||
self._write_fut = None
|
||||
if self._read_fut:
|
||||
self._read_fut.cancel()
|
||||
self._write_fut = self._read_fut = None
|
||||
self._read_fut = None
|
||||
self._pending_write = 0
|
||||
self._buffer = None
|
||||
self._loop.call_soon(self._call_connection_lost, exc)
|
||||
@@ -108,6 +127,7 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
if hasattr(self._sock, 'shutdown'):
|
||||
self._sock.shutdown(socket.SHUT_RDWR)
|
||||
self._sock.close()
|
||||
self._sock = None
|
||||
server = self._server
|
||||
if server is not None:
|
||||
server._detach()
|
||||
@@ -176,6 +196,9 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
|
||||
except ConnectionAbortedError as exc:
|
||||
if not self._closing:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
elif self._loop.get_debug():
|
||||
logger.debug("Read error on pipe transport while closing",
|
||||
exc_info=True)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
@@ -224,10 +247,6 @@ class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
|
||||
assert self._buffer is None
|
||||
# Pass a copy, except if it's already immutable.
|
||||
self._loop_writing(data=bytes(data))
|
||||
# XXX Should we pause the protocol at this point
|
||||
# if len(data) > self._high_water? (That would
|
||||
# require keeping track of the number of bytes passed
|
||||
# to a send() that hasn't finished yet.)
|
||||
elif not self._buffer: # WRITING -> BACKED UP
|
||||
# Make a mutable copy which we can extend.
|
||||
self._buffer = bytearray(data)
|
||||
@@ -326,12 +345,16 @@ class _ProactorSocketTransport(_ProactorReadPipeTransport,
|
||||
try:
|
||||
self._extra['sockname'] = sock.getsockname()
|
||||
except (socket.error, AttributeError):
|
||||
pass
|
||||
if self._loop.get_debug():
|
||||
logger.warning("getsockname() failed on %r",
|
||||
sock, exc_info=True)
|
||||
if 'peername' not in self._extra:
|
||||
try:
|
||||
self._extra['peername'] = sock.getpeername()
|
||||
except (socket.error, AttributeError):
|
||||
pass
|
||||
if self._loop.get_debug():
|
||||
logger.warning("getpeername() failed on %r",
|
||||
sock, exc_info=True)
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
@@ -361,6 +384,20 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
return _ProactorSocketTransport(self, sock, protocol, waiter,
|
||||
extra, server)
|
||||
|
||||
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
|
||||
server_side=False, server_hostname=None,
|
||||
extra=None, server=None):
|
||||
if not sslproto._is_sslproto_available():
|
||||
raise NotImplementedError("Proactor event loop requires Python 3.5"
|
||||
" or newer (ssl.MemoryBIO) to support "
|
||||
"SSL")
|
||||
|
||||
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
|
||||
server_side, server_hostname)
|
||||
_ProactorSocketTransport(self, rawsock, ssl_protocol,
|
||||
extra=extra, server=server)
|
||||
return ssl_protocol._app_transport
|
||||
|
||||
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorDuplexPipeTransport(self,
|
||||
@@ -377,15 +414,23 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def close(self):
|
||||
if self.is_running():
|
||||
raise RuntimeError("Cannot close a running event loop")
|
||||
if self.is_closed():
|
||||
return
|
||||
super(BaseProactorEventLoop, self).close()
|
||||
|
||||
# Call these methods before closing the event loop (before calling
|
||||
# BaseEventLoop.close), because they can schedule callbacks with
|
||||
# call_soon(), which is forbidden when the event loop is closed.
|
||||
self._stop_accept_futures()
|
||||
self._close_self_pipe()
|
||||
self._proactor.close()
|
||||
self._proactor = None
|
||||
self._selector = None
|
||||
|
||||
# Close the event loop
|
||||
super(BaseProactorEventLoop, self).close()
|
||||
|
||||
def sock_recv(self, sock, n):
|
||||
return self._proactor.recv(sock, n)
|
||||
|
||||
@@ -394,7 +439,8 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
def sock_connect(self, sock, address):
|
||||
try:
|
||||
base_events._check_resolved_address(sock, address)
|
||||
if self._debug:
|
||||
base_events._check_resolved_address(sock, address)
|
||||
except ValueError as err:
|
||||
fut = futures.Future(loop=self)
|
||||
fut.set_exception(err)
|
||||
@@ -424,18 +470,22 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
self._ssock.setblocking(False)
|
||||
self._csock.setblocking(False)
|
||||
self._internal_fds += 1
|
||||
# don't check the current loop because _make_self_pipe() is called
|
||||
# from the event loop constructor
|
||||
self._call_soon(self._loop_self_reading, (), check_loop=False)
|
||||
self.call_soon(self._loop_self_reading)
|
||||
|
||||
def _loop_self_reading(self, f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
f.result() # may raise
|
||||
f = self._proactor.recv(self._ssock, 4096)
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
except futures.CancelledError:
|
||||
# _close_self_pipe() has been called, stop waiting for data
|
||||
return
|
||||
except Exception as exc:
|
||||
self.call_exception_handler({
|
||||
'message': 'Error on reading from the event loop self pipe',
|
||||
'exception': exc,
|
||||
'loop': self,
|
||||
})
|
||||
else:
|
||||
self._self_reading_future = f
|
||||
f.add_done_callback(self._loop_self_reading)
|
||||
@@ -443,9 +493,8 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
def _write_to_self(self):
|
||||
self._csock.send(b'\0')
|
||||
|
||||
def _start_serving(self, protocol_factory, sock, ssl=None, server=None):
|
||||
if ssl:
|
||||
raise ValueError('IocpEventLoop is incompatible with SSL.')
|
||||
def _start_serving(self, protocol_factory, sock,
|
||||
sslcontext=None, server=None):
|
||||
|
||||
def loop(f=None):
|
||||
try:
|
||||
@@ -455,20 +504,28 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
logger.debug("%r got a new connection from %r: %r",
|
||||
server, addr, conn)
|
||||
protocol = protocol_factory()
|
||||
self._make_socket_transport(
|
||||
conn, protocol,
|
||||
extra={'peername': addr}, server=server)
|
||||
if sslcontext is not None:
|
||||
self._make_ssl_transport(
|
||||
conn, protocol, sslcontext, server_side=True,
|
||||
extra={'peername': addr}, server=server)
|
||||
else:
|
||||
self._make_socket_transport(
|
||||
conn, protocol,
|
||||
extra={'peername': addr}, server=server)
|
||||
if self.is_closed():
|
||||
return
|
||||
f = self._proactor.accept(sock)
|
||||
except OSError as exc:
|
||||
if sock.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Accept failed',
|
||||
'message': 'Accept failed on a socket',
|
||||
'exception': exc,
|
||||
'socket': sock,
|
||||
})
|
||||
sock.close()
|
||||
elif self._debug:
|
||||
logger.debug("Accept failed on socket %r",
|
||||
sock, exc_info=True)
|
||||
except futures.CancelledError:
|
||||
sock.close()
|
||||
else:
|
||||
@@ -478,7 +535,8 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
self.call_soon(loop)
|
||||
|
||||
def _process_events(self, event_list):
|
||||
pass # XXX hard work currently done in poll
|
||||
# Events are processed in the IocpProactor._poll() method
|
||||
pass
|
||||
|
||||
def _stop_accept_futures(self):
|
||||
for future in self._accept_futures.values():
|
||||
|
||||
@@ -78,6 +78,11 @@ class Protocol(BaseProtocol):
|
||||
State machine of calls:
|
||||
|
||||
start -> CM [-> DR*] [-> ER?] -> CL -> end
|
||||
|
||||
* CM: connection_made()
|
||||
* DR: data_received()
|
||||
* ER: eof_received()
|
||||
* CL: connection_lost()
|
||||
"""
|
||||
|
||||
def data_received(self, data):
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
__all__ = ['BlockingIOError', 'BrokenPipeError', 'ChildProcessError',
|
||||
'ConnectionRefusedError', 'ConnectionResetError',
|
||||
'InterruptedError', 'ConnectionAbortedError', 'PermissionError',
|
||||
'FileNotFoundError',
|
||||
'FileNotFoundError', 'ProcessLookupError',
|
||||
]
|
||||
|
||||
import errno
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
ssl = None
|
||||
|
||||
from .compat import PY33
|
||||
|
||||
if PY33:
|
||||
@@ -121,6 +126,8 @@ if not PY33:
|
||||
try:
|
||||
return func(*args, **kw)
|
||||
except (socket.error, IOError, OSError) as exc:
|
||||
if ssl is not None and isinstance(exc, ssl.SSLError):
|
||||
raise
|
||||
if hasattr(exc, 'winerror'):
|
||||
_wrap_error(exc, _MAP_ERRNO, exc.winerror)
|
||||
# _MAP_ERRNO does not contain all Windows errors.
|
||||
|
||||
@@ -95,18 +95,17 @@ except AttributeError:
|
||||
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||
self._makefile_refs = 0
|
||||
|
||||
def wrap_socket(sock, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=ssl.CERT_NONE,
|
||||
ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True):
|
||||
return BackportSSLSocket(sock, keyfile=keyfile, certfile=certfile,
|
||||
server_side=server_side, cert_reqs=cert_reqs,
|
||||
ssl_version=ssl_version, ca_certs=ca_certs,
|
||||
do_handshake_on_connect=do_handshake_on_connect,
|
||||
suppress_ragged_eofs=suppress_ragged_eofs)
|
||||
def wrap_socket(sock, server_hostname=None, **kwargs):
|
||||
# ignore server_hostname parameter, not supported
|
||||
kwargs.pop('server_hostname', None)
|
||||
return BackportSSLSocket(sock, **kwargs)
|
||||
else:
|
||||
wrap_socket = ssl.wrap_socket
|
||||
_wrap_socket = ssl.wrap_socket
|
||||
|
||||
def wrap_socket(sock, **kwargs):
|
||||
# ignore server_hostname parameter, not supported
|
||||
kwargs.pop('server_hostname', None)
|
||||
return _wrap_socket(sock, **kwargs)
|
||||
|
||||
|
||||
class SSLContext(object):
|
||||
@@ -119,12 +118,12 @@ except AttributeError:
|
||||
self.certfile = certfile
|
||||
self.keyfile = keyfile
|
||||
|
||||
def wrap_socket(self, sock, **kw):
|
||||
def wrap_socket(self, sock, **kwargs):
|
||||
return wrap_socket(sock,
|
||||
ssl_version=self.protocol,
|
||||
certfile=self.certfile,
|
||||
keyfile=self.keyfile,
|
||||
**kw)
|
||||
ssl_version=self.protocol,
|
||||
certfile=self.certfile,
|
||||
keyfile=self.keyfile,
|
||||
**kwargs)
|
||||
|
||||
@property
|
||||
def verify_mode(self):
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Queues"""
|
||||
|
||||
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue',
|
||||
'QueueFull', 'QueueEmpty']
|
||||
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
|
||||
|
||||
import collections
|
||||
import heapq
|
||||
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import locks
|
||||
@@ -13,12 +13,16 @@ from .coroutines import coroutine, From, Return
|
||||
|
||||
|
||||
class QueueEmpty(Exception):
|
||||
'Exception raised by Queue.get(block=0)/get_nowait().'
|
||||
"""Exception raised when Queue.get_nowait() is called on a Queue object
|
||||
which is empty.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class QueueFull(Exception):
|
||||
'Exception raised by Queue.put(block=0)/put_nowait().'
|
||||
"""Exception raised when the Queue.put_nowait() method is called on a Queue
|
||||
object which is full.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@@ -26,11 +30,11 @@ class Queue(object):
|
||||
"""A queue, useful for coordinating producer and consumer coroutines.
|
||||
|
||||
If maxsize is less than or equal to zero, the queue size is infinite. If it
|
||||
is an integer greater than 0, then "yield From(put())" will block when the
|
||||
is an integer greater than 0, then "yield from put()" will block when the
|
||||
queue reaches maxsize, until an item is removed by get().
|
||||
|
||||
Unlike the standard library Queue, you can reliably know this Queue's size
|
||||
with qsize(), since your single-threaded trollius application won't be
|
||||
with qsize(), since your single-threaded asyncio application won't be
|
||||
interrupted between calling qsize() and doing an operation on the Queue.
|
||||
"""
|
||||
|
||||
@@ -45,8 +49,13 @@ class Queue(object):
|
||||
self._getters = collections.deque()
|
||||
# Pairs of (item, Future).
|
||||
self._putters = collections.deque()
|
||||
self._unfinished_tasks = 0
|
||||
self._finished = locks.Event(loop=self._loop)
|
||||
self._finished.set()
|
||||
self._init(maxsize)
|
||||
|
||||
# These three are overridable in subclasses.
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = collections.deque()
|
||||
|
||||
@@ -56,6 +65,13 @@ class Queue(object):
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
# End of the overridable methods.
|
||||
|
||||
def __put_internal(self, item):
|
||||
self._put(item)
|
||||
self._unfinished_tasks += 1
|
||||
self._finished.clear()
|
||||
|
||||
def __repr__(self):
|
||||
return '<{0} at {1:#x} {2}>'.format(
|
||||
type(self).__name__, id(self), self._format())
|
||||
@@ -71,6 +87,8 @@ class Queue(object):
|
||||
result += ' _getters[{0}]'.format(len(self._getters))
|
||||
if self._putters:
|
||||
result += ' _putters[{0}]'.format(len(self._putters))
|
||||
if self._unfinished_tasks:
|
||||
result += ' tasks={0}'.format(self._unfinished_tasks)
|
||||
return result
|
||||
|
||||
def _consume_done_getters(self):
|
||||
@@ -111,8 +129,10 @@ class Queue(object):
|
||||
def put(self, item):
|
||||
"""Put an item into the queue.
|
||||
|
||||
If you yield From(put()), wait until a free slot is available
|
||||
before adding item.
|
||||
Put an item into the queue. If the queue is full, wait until a free
|
||||
slot is available before adding item.
|
||||
|
||||
This method is a coroutine.
|
||||
"""
|
||||
self._consume_done_getters()
|
||||
if self._getters:
|
||||
@@ -120,10 +140,9 @@ class Queue(object):
|
||||
'queue non-empty, why are getters waiting?')
|
||||
|
||||
getter = self._getters.popleft()
|
||||
self.__put_internal(item)
|
||||
|
||||
# Use _put and _get instead of passing item straight to getter, in
|
||||
# case a subclass has logic that must run (e.g. JoinableQueue).
|
||||
self._put(item)
|
||||
# getter cannot be cancelled, we just removed done getters
|
||||
getter.set_result(self._get())
|
||||
|
||||
elif self._maxsize > 0 and self._maxsize <= self.qsize():
|
||||
@@ -133,7 +152,7 @@ class Queue(object):
|
||||
yield From(waiter)
|
||||
|
||||
else:
|
||||
self._put(item)
|
||||
self.__put_internal(item)
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
@@ -146,28 +165,29 @@ class Queue(object):
|
||||
'queue non-empty, why are getters waiting?')
|
||||
|
||||
getter = self._getters.popleft()
|
||||
self.__put_internal(item)
|
||||
|
||||
# Use _put and _get instead of passing item straight to getter, in
|
||||
# case a subclass has logic that must run (e.g. JoinableQueue).
|
||||
self._put(item)
|
||||
# getter cannot be cancelled, we just removed done getters
|
||||
getter.set_result(self._get())
|
||||
|
||||
elif self._maxsize > 0 and self._maxsize <= self.qsize():
|
||||
raise QueueFull
|
||||
else:
|
||||
self._put(item)
|
||||
self.__put_internal(item)
|
||||
|
||||
@coroutine
|
||||
def get(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If you yield From(get()), wait until a item is available.
|
||||
If queue is empty, wait until an item is available.
|
||||
|
||||
This method is a coroutine.
|
||||
"""
|
||||
self._consume_done_putters()
|
||||
if self._putters:
|
||||
assert self.full(), 'queue not full, why are putters waiting?'
|
||||
item, putter = self._putters.popleft()
|
||||
self._put(item)
|
||||
self.__put_internal(item)
|
||||
|
||||
# When a getter runs and frees up a slot so this putter can
|
||||
# run, we need to defer the put for a tick to ensure that
|
||||
@@ -195,8 +215,10 @@ class Queue(object):
|
||||
if self._putters:
|
||||
assert self.full(), 'queue not full, why are putters waiting?'
|
||||
item, putter = self._putters.popleft()
|
||||
self._put(item)
|
||||
self.__put_internal(item)
|
||||
# Wake putter on next tick.
|
||||
|
||||
# getter cannot be cancelled, we just removed done putters
|
||||
putter.set_result(None)
|
||||
|
||||
return self._get()
|
||||
@@ -206,6 +228,38 @@ class Queue(object):
|
||||
else:
|
||||
raise QueueEmpty
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by queue consumers. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items have
|
||||
been processed (meaning that a task_done() call was received for every
|
||||
item that had been put() into the queue).
|
||||
|
||||
Raises ValueError if called more times than there were items placed in
|
||||
the queue.
|
||||
"""
|
||||
if self._unfinished_tasks <= 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self._unfinished_tasks -= 1
|
||||
if self._unfinished_tasks == 0:
|
||||
self._finished.set()
|
||||
|
||||
@coroutine
|
||||
def join(self):
|
||||
"""Block until all items in the queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the
|
||||
queue. The count goes down whenever a consumer calls task_done() to
|
||||
indicate that the item was retrieved and all work on it is complete.
|
||||
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||
"""
|
||||
if self._unfinished_tasks > 0:
|
||||
yield From(self._finished.wait())
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
"""A subclass of Queue; retrieves entries in priority order (lowest first).
|
||||
@@ -236,54 +290,7 @@ class LifoQueue(Queue):
|
||||
return self._queue.pop()
|
||||
|
||||
|
||||
class JoinableQueue(Queue):
|
||||
"""A subclass of Queue with task_done() and join() methods."""
|
||||
|
||||
def __init__(self, maxsize=0, loop=None):
|
||||
super(JoinableQueue, self).__init__(maxsize=maxsize, loop=loop)
|
||||
self._unfinished_tasks = 0
|
||||
self._finished = locks.Event(loop=self._loop)
|
||||
self._finished.set()
|
||||
|
||||
def _format(self):
|
||||
result = Queue._format(self)
|
||||
if self._unfinished_tasks:
|
||||
result += ' tasks={0}'.format(self._unfinished_tasks)
|
||||
return result
|
||||
|
||||
def _put(self, item):
|
||||
super(JoinableQueue, self)._put(item)
|
||||
self._unfinished_tasks += 1
|
||||
self._finished.clear()
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by queue consumers. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items have
|
||||
been processed (meaning that a task_done() call was received for every
|
||||
item that had been put() into the queue).
|
||||
|
||||
Raises ValueError if called more times than there were items placed in
|
||||
the queue.
|
||||
"""
|
||||
if self._unfinished_tasks <= 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self._unfinished_tasks -= 1
|
||||
if self._unfinished_tasks == 0:
|
||||
self._finished.set()
|
||||
|
||||
@coroutine
|
||||
def join(self):
|
||||
"""Block until all items in the queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the
|
||||
queue. The count goes down whenever a consumer thread calls task_done()
|
||||
to indicate that the item was retrieved and all work on it is complete.
|
||||
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||
"""
|
||||
if self._unfinished_tasks > 0:
|
||||
yield From(self._finished.wait())
|
||||
if not compat.PY35:
|
||||
JoinableQueue = Queue
|
||||
"""Deprecated alias for Queue."""
|
||||
__all__.append('JoinableQueue')
|
||||
|
||||
@@ -8,13 +8,13 @@ __all__ = ['BaseSelectorEventLoop']
|
||||
|
||||
import collections
|
||||
import errno
|
||||
import functools
|
||||
import socket
|
||||
import sys
|
||||
import warnings
|
||||
try:
|
||||
import ssl
|
||||
from .py3_ssl import (
|
||||
wrap_ssl_error, SSLContext, BACKPORT_SSL_CONTEXT, SSLWantReadError,
|
||||
SSLWantWriteError)
|
||||
from .py3_ssl import wrap_ssl_error, SSLWantReadError, SSLWantWriteError
|
||||
except ImportError: # pragma: no cover
|
||||
ssl = None
|
||||
|
||||
@@ -23,8 +23,10 @@ from . import constants
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import selectors
|
||||
from . import sslproto
|
||||
from . import transports
|
||||
from .compat import flatten_bytes
|
||||
from .coroutines import coroutine, From
|
||||
from .log import logger
|
||||
from .py33_exceptions import (wrap_error,
|
||||
BlockingIOError, InterruptedError, ConnectionAbortedError, BrokenPipeError,
|
||||
@@ -76,9 +78,27 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
||||
return _SelectorSocketTransport(self, sock, protocol, waiter,
|
||||
extra, server)
|
||||
|
||||
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter,
|
||||
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
|
||||
server_side=False, server_hostname=None,
|
||||
extra=None, server=None):
|
||||
if not sslproto._is_sslproto_available():
|
||||
return self._make_legacy_ssl_transport(
|
||||
rawsock, protocol, sslcontext, waiter,
|
||||
server_side=server_side, server_hostname=server_hostname,
|
||||
extra=extra, server=server)
|
||||
|
||||
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
|
||||
server_side, server_hostname)
|
||||
_SelectorSocketTransport(self, rawsock, ssl_protocol,
|
||||
extra=extra, server=server)
|
||||
return ssl_protocol._app_transport
|
||||
|
||||
def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext,
|
||||
waiter,
|
||||
server_side=False, server_hostname=None,
|
||||
extra=None, server=None):
|
||||
# Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used
|
||||
# on Python 3.4 and older, when ssl.MemoryBIO is not available.
|
||||
return _SelectorSslTransport(
|
||||
self, rawsock, protocol, sslcontext, waiter,
|
||||
server_side, server_hostname, extra, server)
|
||||
@@ -89,10 +109,12 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
||||
address, waiter, extra)
|
||||
|
||||
def close(self):
|
||||
if self.is_running():
|
||||
raise RuntimeError("Cannot close a running event loop")
|
||||
if self.is_closed():
|
||||
return
|
||||
super(BaseSelectorEventLoop, self).close()
|
||||
self._close_self_pipe()
|
||||
super(BaseSelectorEventLoop, self).close()
|
||||
if self._selector is not None:
|
||||
self._selector.close()
|
||||
self._selector = None
|
||||
@@ -164,7 +186,6 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
||||
pass # False alarm.
|
||||
except socket.error as exc:
|
||||
# There's nowhere to send the error, so just log it.
|
||||
# TODO: Someone will want an error handler for this.
|
||||
if exc.errno in (errno.EMFILE, errno.ENFILE,
|
||||
errno.ENOBUFS, errno.ENOMEM):
|
||||
# Some platforms (e.g. Linux keep reporting the FD as
|
||||
@@ -182,15 +203,47 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
||||
else:
|
||||
raise # The event loop will catch, log and ignore it.
|
||||
else:
|
||||
extra = {'peername': addr}
|
||||
accept = self._accept_connection2(protocol_factory, conn, extra,
|
||||
sslcontext, server)
|
||||
self.create_task(accept)
|
||||
|
||||
@coroutine
|
||||
def _accept_connection2(self, protocol_factory, conn, extra,
|
||||
sslcontext=None, server=None):
|
||||
protocol = None
|
||||
transport = None
|
||||
try:
|
||||
protocol = protocol_factory()
|
||||
waiter = futures.Future(loop=self)
|
||||
if sslcontext:
|
||||
self._make_ssl_transport(
|
||||
conn, protocol_factory(), sslcontext, None,
|
||||
server_side=True, extra={'peername': addr}, server=server)
|
||||
transport = self._make_ssl_transport(
|
||||
conn, protocol, sslcontext, waiter=waiter,
|
||||
server_side=True, extra=extra, server=server)
|
||||
else:
|
||||
self._make_socket_transport(
|
||||
conn, protocol_factory(), extra={'peername': addr},
|
||||
transport = self._make_socket_transport(
|
||||
conn, protocol, waiter=waiter, extra=extra,
|
||||
server=server)
|
||||
# It's now up to the protocol to handle the connection.
|
||||
|
||||
try:
|
||||
yield From(waiter)
|
||||
except:
|
||||
transport.close()
|
||||
raise
|
||||
|
||||
# It's now up to the protocol to handle the connection.
|
||||
except Exception as exc:
|
||||
if self._debug:
|
||||
context = {
|
||||
'message': ('Error on transport creation '
|
||||
'for incoming connection'),
|
||||
'exception': exc,
|
||||
}
|
||||
if protocol is not None:
|
||||
context['protocol'] = protocol
|
||||
if transport is not None:
|
||||
context['transport'] = transport
|
||||
self.call_exception_handler(context)
|
||||
|
||||
def add_reader(self, fd, callback, *args):
|
||||
"""Add a reader callback."""
|
||||
@@ -278,6 +331,8 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
This method is a coroutine.
|
||||
"""
|
||||
if self._debug and sock.gettimeout() != 0:
|
||||
raise ValueError("the socket must be non-blocking")
|
||||
fut = futures.Future(loop=self)
|
||||
self._sock_recv(fut, False, sock, n)
|
||||
return fut
|
||||
@@ -314,6 +369,8 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
This method is a coroutine.
|
||||
"""
|
||||
if self._debug and sock.gettimeout() != 0:
|
||||
raise ValueError("the socket must be non-blocking")
|
||||
fut = futures.Future(loop=self)
|
||||
if data:
|
||||
self._sock_sendall(fut, False, sock, data)
|
||||
@@ -355,29 +412,47 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
This method is a coroutine.
|
||||
"""
|
||||
if self._debug and sock.gettimeout() != 0:
|
||||
raise ValueError("the socket must be non-blocking")
|
||||
fut = futures.Future(loop=self)
|
||||
try:
|
||||
base_events._check_resolved_address(sock, address)
|
||||
if self._debug:
|
||||
base_events._check_resolved_address(sock, address)
|
||||
except ValueError as err:
|
||||
fut.set_exception(err)
|
||||
else:
|
||||
self._sock_connect(fut, False, sock, address)
|
||||
self._sock_connect(fut, sock, address)
|
||||
return fut
|
||||
|
||||
def _sock_connect(self, fut, registered, sock, address):
|
||||
def _sock_connect(self, fut, sock, address):
|
||||
fd = sock.fileno()
|
||||
if registered:
|
||||
self.remove_writer(fd)
|
||||
try:
|
||||
wrap_error(sock.connect, address)
|
||||
except (BlockingIOError, InterruptedError):
|
||||
# Issue #23618: When the C function connect() fails with EINTR, the
|
||||
# connection runs in background. We have to wait until the socket
|
||||
# becomes writable to be notified when the connection succeed or
|
||||
# fails.
|
||||
fut.add_done_callback(functools.partial(self._sock_connect_done,
|
||||
fd))
|
||||
self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
|
||||
except Exception as exc:
|
||||
fut.set_exception(exc)
|
||||
else:
|
||||
fut.set_result(None)
|
||||
|
||||
def _sock_connect_done(self, fd, fut):
|
||||
self.remove_writer(fd)
|
||||
|
||||
def _sock_connect_cb(self, fut, sock, address):
|
||||
if fut.cancelled():
|
||||
return
|
||||
|
||||
try:
|
||||
if not registered:
|
||||
# First time around.
|
||||
wrap_error(sock.connect, address)
|
||||
else:
|
||||
wrap_error(_get_socket_error, sock, address)
|
||||
wrap_error(_get_socket_error, sock, address)
|
||||
except (BlockingIOError, InterruptedError):
|
||||
self.add_writer(fd, self._sock_connect, fut, True, sock, address)
|
||||
# socket is still registered, the callback will be retried later
|
||||
pass
|
||||
except Exception as exc:
|
||||
fut.set_exception(exc)
|
||||
else:
|
||||
@@ -393,6 +468,8 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
This method is a coroutine.
|
||||
"""
|
||||
if self._debug and sock.gettimeout() != 0:
|
||||
raise ValueError("the socket must be non-blocking")
|
||||
fut = futures.Future(loop=self)
|
||||
self._sock_accept(fut, False, sock)
|
||||
return fut
|
||||
@@ -439,8 +516,13 @@ class _SelectorTransport(transports._FlowControlMixin,
|
||||
|
||||
_buffer_factory = bytearray # Constructs initial value for self._buffer.
|
||||
|
||||
def __init__(self, loop, sock, protocol, extra, server=None):
|
||||
super(_SelectorTransport, self).__init__(extra)
|
||||
# Attribute used in the destructor: it must be set even if the constructor
|
||||
# is not called (see _SelectorSslTransport which may start by raising an
|
||||
# exception)
|
||||
_sock = None
|
||||
|
||||
def __init__(self, loop, sock, protocol, extra=None, server=None):
|
||||
super(_SelectorTransport, self).__init__(extra, loop)
|
||||
self._extra['socket'] = sock
|
||||
self._extra['sockname'] = sock.getsockname()
|
||||
if 'peername' not in self._extra:
|
||||
@@ -448,10 +530,10 @@ class _SelectorTransport(transports._FlowControlMixin,
|
||||
self._extra['peername'] = sock.getpeername()
|
||||
except socket.error:
|
||||
self._extra['peername'] = None
|
||||
self._loop = loop
|
||||
self._sock = sock
|
||||
self._sock_fd = sock.fileno()
|
||||
self._protocol = protocol
|
||||
self._protocol_connected = True
|
||||
self._server = server
|
||||
self._buffer = self._buffer_factory()
|
||||
self._conn_lost = 0 # Set when call to connection_lost scheduled.
|
||||
@@ -460,23 +542,31 @@ class _SelectorTransport(transports._FlowControlMixin,
|
||||
self._server._attach()
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'fd=%s' % self._sock_fd]
|
||||
polling = _test_selector_event(self._loop._selector,
|
||||
self._sock_fd, selectors.EVENT_READ)
|
||||
if polling:
|
||||
info.append('read=polling')
|
||||
else:
|
||||
info.append('read=idle')
|
||||
info = [self.__class__.__name__]
|
||||
if self._sock is None:
|
||||
info.append('closed')
|
||||
elif self._closing:
|
||||
info.append('closing')
|
||||
info.append('fd=%s' % self._sock_fd)
|
||||
# test if the transport was closed
|
||||
if self._loop is not None and not self._loop.is_closed():
|
||||
polling = _test_selector_event(self._loop._selector,
|
||||
self._sock_fd, selectors.EVENT_READ)
|
||||
if polling:
|
||||
info.append('read=polling')
|
||||
else:
|
||||
info.append('read=idle')
|
||||
|
||||
polling = _test_selector_event(self._loop._selector,
|
||||
self._sock_fd, selectors.EVENT_WRITE)
|
||||
if polling:
|
||||
state = 'polling'
|
||||
else:
|
||||
state = 'idle'
|
||||
polling = _test_selector_event(self._loop._selector,
|
||||
self._sock_fd,
|
||||
selectors.EVENT_WRITE)
|
||||
if polling:
|
||||
state = 'polling'
|
||||
else:
|
||||
state = 'idle'
|
||||
|
||||
bufsize = self.get_write_buffer_size()
|
||||
info.append('write=<%s, bufsize=%s>' % (state, bufsize))
|
||||
bufsize = self.get_write_buffer_size()
|
||||
info.append('write=<%s, bufsize=%s>' % (state, bufsize))
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def abort(self):
|
||||
@@ -491,9 +581,19 @@ class _SelectorTransport(transports._FlowControlMixin,
|
||||
self._conn_lost += 1
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if sys.version_info >= (3, 4):
|
||||
def __del__(self):
|
||||
if self._sock is not None:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning)
|
||||
self._sock.close()
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on transport'):
|
||||
# Should be called from exception handler only.
|
||||
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
|
||||
if isinstance(exc, (BrokenPipeError,
|
||||
ConnectionResetError, ConnectionAbortedError)):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
@@ -519,7 +619,8 @@ class _SelectorTransport(transports._FlowControlMixin,
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
if self._protocol_connected:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
self._sock.close()
|
||||
self._sock = None
|
||||
@@ -542,10 +643,12 @@ class _SelectorSocketTransport(_SelectorTransport):
|
||||
self._eof = False
|
||||
self._paused = False
|
||||
|
||||
self._loop.add_reader(self._sock_fd, self._read_ready)
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
# only start reading when connection_made() has been called
|
||||
self._loop.call_soon(self._loop.add_reader,
|
||||
self._sock_fd, self._read_ready)
|
||||
if waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
|
||||
|
||||
def pause_reading(self):
|
||||
@@ -667,40 +770,23 @@ class _SelectorSslTransport(_SelectorTransport):
|
||||
if ssl is None:
|
||||
raise RuntimeError('stdlib ssl module not available')
|
||||
|
||||
if server_side:
|
||||
if not sslcontext:
|
||||
raise ValueError('Server side ssl needs a valid SSLContext')
|
||||
else:
|
||||
if not sslcontext:
|
||||
# Client side may pass ssl=True to use a default
|
||||
# context; in that case the sslcontext passed is None.
|
||||
# The default is the same as used by urllib with
|
||||
# cadefault=True.
|
||||
if hasattr(ssl, '_create_stdlib_context'):
|
||||
sslcontext = ssl._create_stdlib_context(
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
check_hostname=bool(server_hostname))
|
||||
else:
|
||||
# Python older than 3.4
|
||||
sslcontext = SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
if not BACKPORT_SSL_CONTEXT:
|
||||
sslcontext.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext.set_default_verify_paths()
|
||||
sslcontext.verify_mode = ssl.CERT_REQUIRED
|
||||
if not sslcontext:
|
||||
sslcontext = sslproto._create_transport_context(server_side, server_hostname)
|
||||
|
||||
wrap_kwargs = {
|
||||
'server_side': server_side,
|
||||
'do_handshake_on_connect': False,
|
||||
}
|
||||
if server_hostname and not server_side and getattr(ssl, 'HAS_SNI', False):
|
||||
if server_hostname and not server_side:
|
||||
wrap_kwargs['server_hostname'] = server_hostname
|
||||
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
|
||||
|
||||
super(_SelectorSslTransport, self).__init__(loop, sslsock, protocol, extra, server)
|
||||
# the protocol connection is only made after the SSL handshake
|
||||
self._protocol_connected = False
|
||||
|
||||
self._server_hostname = server_hostname
|
||||
self._waiter = waiter
|
||||
self._rawsock = rawsock
|
||||
self._sslcontext = sslcontext
|
||||
self._paused = False
|
||||
|
||||
@@ -714,6 +800,16 @@ class _SelectorSslTransport(_SelectorTransport):
|
||||
start_time = None
|
||||
self._on_handshake(start_time)
|
||||
|
||||
def _wakeup_waiter(self, exc=None):
|
||||
if self._waiter is None:
|
||||
return
|
||||
if not self._waiter.cancelled():
|
||||
if exc is not None:
|
||||
self._waiter.set_exception(exc)
|
||||
else:
|
||||
self._waiter.set_result(None)
|
||||
self._waiter = None
|
||||
|
||||
def _on_handshake(self, start_time):
|
||||
try:
|
||||
wrap_ssl_error(self._sock.do_handshake)
|
||||
@@ -732,8 +828,7 @@ class _SelectorSslTransport(_SelectorTransport):
|
||||
self._loop.remove_reader(self._sock_fd)
|
||||
self._loop.remove_writer(self._sock_fd)
|
||||
self._sock.close()
|
||||
if self._waiter is not None:
|
||||
self._waiter.set_exception(exc)
|
||||
self._wakeup_waiter(exc)
|
||||
if isinstance(exc, Exception):
|
||||
return
|
||||
else:
|
||||
@@ -756,8 +851,7 @@ class _SelectorSslTransport(_SelectorTransport):
|
||||
"on matching the hostname",
|
||||
self, exc_info=True)
|
||||
self._sock.close()
|
||||
if self._waiter is not None:
|
||||
self._waiter.set_exception(exc)
|
||||
self._wakeup_waiter(exc)
|
||||
return
|
||||
|
||||
# Add extra info that becomes available after handshake.
|
||||
@@ -770,11 +864,10 @@ class _SelectorSslTransport(_SelectorTransport):
|
||||
self._read_wants_write = False
|
||||
self._write_wants_read = False
|
||||
self._loop.add_reader(self._sock_fd, self._read_ready)
|
||||
self._protocol_connected = True
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
if self._waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
self._loop.call_soon(self._waiter._set_result_unless_cancelled,
|
||||
None)
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(self._wakeup_waiter)
|
||||
|
||||
if self._loop.get_debug():
|
||||
dt = self._loop.time() - start_time
|
||||
@@ -912,10 +1005,12 @@ class _SelectorDatagramTransport(_SelectorTransport):
|
||||
super(_SelectorDatagramTransport, self).__init__(loop, sock,
|
||||
protocol, extra)
|
||||
self._address = address
|
||||
self._loop.add_reader(self._sock_fd, self._read_ready)
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
# only start reading when connection_made() has been called
|
||||
self._loop.call_soon(self._loop.add_reader,
|
||||
self._sock_fd, self._read_ready)
|
||||
if waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
|
||||
@@ -178,6 +178,8 @@ class BaseSelector(object):
|
||||
SelectorKey for this file object
|
||||
"""
|
||||
mapping = self.get_map()
|
||||
if mapping is None:
|
||||
raise RuntimeError('Selector is closed')
|
||||
try:
|
||||
return mapping[fileobj]
|
||||
except KeyError:
|
||||
@@ -260,6 +262,7 @@ class _BaseSelectorImpl(BaseSelector):
|
||||
|
||||
def close(self):
|
||||
self._fd_to_key.clear()
|
||||
self._map = None
|
||||
|
||||
def get_map(self):
|
||||
return self._map
|
||||
@@ -408,7 +411,7 @@ if hasattr(select, 'epoll'):
|
||||
key = super(EpollSelector, self).unregister(fileobj)
|
||||
try:
|
||||
self._epoll.unregister(key.fd)
|
||||
except OSError:
|
||||
except IOError:
|
||||
# This can happen if the FD was closed since it
|
||||
# was registered.
|
||||
pass
|
||||
@@ -423,7 +426,12 @@ if hasattr(select, 'epoll'):
|
||||
# epoll_wait() has a resolution of 1 millisecond, round away
|
||||
# from zero to wait *at least* timeout seconds.
|
||||
timeout = math.ceil(timeout * 1e3) * 1e-3
|
||||
max_ev = len(self._fd_to_key)
|
||||
|
||||
# epoll_wait() expects `maxevents` to be greater than zero;
|
||||
# we want to make sure that `select()` can be called when no
|
||||
# FD is registered.
|
||||
max_ev = max(len(self._fd_to_key), 1)
|
||||
|
||||
ready = []
|
||||
try:
|
||||
fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev)
|
||||
@@ -577,7 +585,8 @@ if hasattr(select, 'kqueue'):
|
||||
super(KqueueSelector, self).close()
|
||||
|
||||
|
||||
# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select.
|
||||
# Choose the best implementation, roughly:
|
||||
# epoll|kqueue|devpoll > poll > select.
|
||||
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
|
||||
if 'KqueueSelector' in globals():
|
||||
DefaultSelector = KqueueSelector
|
||||
|
||||
678
trollius/sslproto.py
Normal file
678
trollius/sslproto.py
Normal file
@@ -0,0 +1,678 @@
|
||||
import collections
|
||||
import sys
|
||||
import warnings
|
||||
try:
|
||||
import ssl
|
||||
from .py3_ssl import BACKPORT_SSL_CONTEXT
|
||||
except ImportError: # pragma: no cover
|
||||
ssl = None
|
||||
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .log import logger
|
||||
from .py33_exceptions import BrokenPipeError, ConnectionResetError
|
||||
|
||||
|
||||
def _create_transport_context(server_side, server_hostname):
|
||||
if server_side:
|
||||
raise ValueError('Server side SSL needs a valid SSLContext')
|
||||
|
||||
# Client side may pass ssl=True to use a default
|
||||
# context; in that case the sslcontext passed is None.
|
||||
# The default is secure for client connections.
|
||||
if hasattr(ssl, 'create_default_context'):
|
||||
# Python 3.4+: use up-to-date strong settings.
|
||||
sslcontext = ssl.create_default_context()
|
||||
if not server_hostname:
|
||||
sslcontext.check_hostname = False
|
||||
else:
|
||||
# Fallback for Python 3.3.
|
||||
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
if not BACKPORT_SSL_CONTEXT:
|
||||
sslcontext.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext.options |= ssl.OP_NO_SSLv3
|
||||
sslcontext.set_default_verify_paths()
|
||||
sslcontext.verify_mode = ssl.CERT_REQUIRED
|
||||
return sslcontext
|
||||
|
||||
|
||||
def _is_sslproto_available():
|
||||
return hasattr(ssl, "MemoryBIO")
|
||||
|
||||
|
||||
# States of an _SSLPipe.
|
||||
_UNWRAPPED = "UNWRAPPED"
|
||||
_DO_HANDSHAKE = "DO_HANDSHAKE"
|
||||
_WRAPPED = "WRAPPED"
|
||||
_SHUTDOWN = "SHUTDOWN"
|
||||
|
||||
if ssl is not None:
|
||||
if hasattr(ssl, 'CertificateError'):
|
||||
_SSL_ERRORS = (ssl.SSLError, ssl.CertificateError)
|
||||
else:
|
||||
_SSL_ERRORS = ssl.SSLError
|
||||
|
||||
|
||||
class _SSLPipe(object):
|
||||
"""An SSL "Pipe".
|
||||
|
||||
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
|
||||
through memory buffers. It can be used to implement a security layer for an
|
||||
existing connection where you don't have access to the connection's file
|
||||
descriptor, or for some reason you don't want to use it.
|
||||
|
||||
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
|
||||
data is passed through untransformed. In wrapped mode, application level
|
||||
data is encrypted to SSL record level data and vice versa. The SSL record
|
||||
level is the lowest level in the SSL protocol suite and is what travels
|
||||
as-is over the wire.
|
||||
|
||||
An SslPipe initially is in "unwrapped" mode. To start SSL, call
|
||||
do_handshake(). To shutdown SSL again, call unwrap().
|
||||
"""
|
||||
|
||||
max_size = 256 * 1024 # Buffer size passed to read()
|
||||
|
||||
def __init__(self, context, server_side, server_hostname=None):
|
||||
"""
|
||||
The *context* argument specifies the ssl.SSLContext to use.
|
||||
|
||||
The *server_side* argument indicates whether this is a server side or
|
||||
client side transport.
|
||||
|
||||
The optional *server_hostname* argument can be used to specify the
|
||||
hostname you are connecting to. You may only specify this parameter if
|
||||
the _ssl module supports Server Name Indication (SNI).
|
||||
"""
|
||||
self._context = context
|
||||
self._server_side = server_side
|
||||
self._server_hostname = server_hostname
|
||||
self._state = _UNWRAPPED
|
||||
self._incoming = ssl.MemoryBIO()
|
||||
self._outgoing = ssl.MemoryBIO()
|
||||
self._sslobj = None
|
||||
self._need_ssldata = False
|
||||
self._handshake_cb = None
|
||||
self._shutdown_cb = None
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
"""The SSL context passed to the constructor."""
|
||||
return self._context
|
||||
|
||||
@property
|
||||
def ssl_object(self):
|
||||
"""The internal ssl.SSLObject instance.
|
||||
|
||||
Return None if the pipe is not wrapped.
|
||||
"""
|
||||
return self._sslobj
|
||||
|
||||
@property
|
||||
def need_ssldata(self):
|
||||
"""Whether more record level data is needed to complete a handshake
|
||||
that is currently in progress."""
|
||||
return self._need_ssldata
|
||||
|
||||
@property
|
||||
def wrapped(self):
|
||||
"""
|
||||
Whether a security layer is currently in effect.
|
||||
|
||||
Return False during handshake.
|
||||
"""
|
||||
return self._state == _WRAPPED
|
||||
|
||||
def do_handshake(self, callback=None):
|
||||
"""Start the SSL handshake.
|
||||
|
||||
Return a list of ssldata. A ssldata element is a list of buffers
|
||||
|
||||
The optional *callback* argument can be used to install a callback that
|
||||
will be called when the handshake is complete. The callback will be
|
||||
called with None if successful, else an exception instance.
|
||||
"""
|
||||
if self._state != _UNWRAPPED:
|
||||
raise RuntimeError('handshake in progress or completed')
|
||||
self._sslobj = self._context.wrap_bio(
|
||||
self._incoming, self._outgoing,
|
||||
server_side=self._server_side,
|
||||
server_hostname=self._server_hostname)
|
||||
self._state = _DO_HANDSHAKE
|
||||
self._handshake_cb = callback
|
||||
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
|
||||
assert len(appdata) == 0
|
||||
return ssldata
|
||||
|
||||
def shutdown(self, callback=None):
|
||||
"""Start the SSL shutdown sequence.
|
||||
|
||||
Return a list of ssldata. A ssldata element is a list of buffers
|
||||
|
||||
The optional *callback* argument can be used to install a callback that
|
||||
will be called when the shutdown is complete. The callback will be
|
||||
called without arguments.
|
||||
"""
|
||||
if self._state == _UNWRAPPED:
|
||||
raise RuntimeError('no security layer present')
|
||||
if self._state == _SHUTDOWN:
|
||||
raise RuntimeError('shutdown in progress')
|
||||
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
|
||||
self._state = _SHUTDOWN
|
||||
self._shutdown_cb = callback
|
||||
ssldata, appdata = self.feed_ssldata(b'')
|
||||
assert appdata == [] or appdata == [b'']
|
||||
return ssldata
|
||||
|
||||
def feed_eof(self):
|
||||
"""Send a potentially "ragged" EOF.
|
||||
|
||||
This method will raise an SSL_ERROR_EOF exception if the EOF is
|
||||
unexpected.
|
||||
"""
|
||||
self._incoming.write_eof()
|
||||
ssldata, appdata = self.feed_ssldata(b'')
|
||||
assert appdata == [] or appdata == [b'']
|
||||
|
||||
def feed_ssldata(self, data, only_handshake=False):
|
||||
"""Feed SSL record level data into the pipe.
|
||||
|
||||
The data must be a bytes instance. It is OK to send an empty bytes
|
||||
instance. This can be used to get ssldata for a handshake initiated by
|
||||
this endpoint.
|
||||
|
||||
Return a (ssldata, appdata) tuple. The ssldata element is a list of
|
||||
buffers containing SSL data that needs to be sent to the remote SSL.
|
||||
|
||||
The appdata element is a list of buffers containing plaintext data that
|
||||
needs to be forwarded to the application. The appdata list may contain
|
||||
an empty buffer indicating an SSL "close_notify" alert. This alert must
|
||||
be acknowledged by calling shutdown().
|
||||
"""
|
||||
if self._state == _UNWRAPPED:
|
||||
# If unwrapped, pass plaintext data straight through.
|
||||
if data:
|
||||
appdata = [data]
|
||||
else:
|
||||
appdata = []
|
||||
return ([], appdata)
|
||||
|
||||
self._need_ssldata = False
|
||||
if data:
|
||||
self._incoming.write(data)
|
||||
|
||||
ssldata = []
|
||||
appdata = []
|
||||
try:
|
||||
if self._state == _DO_HANDSHAKE:
|
||||
# Call do_handshake() until it doesn't raise anymore.
|
||||
self._sslobj.do_handshake()
|
||||
self._state = _WRAPPED
|
||||
if self._handshake_cb:
|
||||
self._handshake_cb(None)
|
||||
if only_handshake:
|
||||
return (ssldata, appdata)
|
||||
# Handshake done: execute the wrapped block
|
||||
|
||||
if self._state == _WRAPPED:
|
||||
# Main state: read data from SSL until close_notify
|
||||
while True:
|
||||
chunk = self._sslobj.read(self.max_size)
|
||||
appdata.append(chunk)
|
||||
if not chunk: # close_notify
|
||||
break
|
||||
|
||||
elif self._state == _SHUTDOWN:
|
||||
# Call shutdown() until it doesn't raise anymore.
|
||||
self._sslobj.unwrap()
|
||||
self._sslobj = None
|
||||
self._state = _UNWRAPPED
|
||||
if self._shutdown_cb:
|
||||
self._shutdown_cb()
|
||||
|
||||
elif self._state == _UNWRAPPED:
|
||||
# Drain possible plaintext data after close_notify.
|
||||
appdata.append(self._incoming.read())
|
||||
except _SSL_ERRORS as exc:
|
||||
if getattr(exc, 'errno', None) not in (
|
||||
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
|
||||
ssl.SSL_ERROR_SYSCALL):
|
||||
if self._state == _DO_HANDSHAKE and self._handshake_cb:
|
||||
self._handshake_cb(exc)
|
||||
raise
|
||||
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
|
||||
|
||||
# Check for record level data that needs to be sent back.
|
||||
# Happens for the initial handshake and renegotiations.
|
||||
if self._outgoing.pending:
|
||||
ssldata.append(self._outgoing.read())
|
||||
return (ssldata, appdata)
|
||||
|
||||
def feed_appdata(self, data, offset=0):
|
||||
"""Feed plaintext data into the pipe.
|
||||
|
||||
Return an (ssldata, offset) tuple. The ssldata element is a list of
|
||||
buffers containing record level data that needs to be sent to the
|
||||
remote SSL instance. The offset is the number of plaintext bytes that
|
||||
were processed, which may be less than the length of data.
|
||||
|
||||
NOTE: In case of short writes, this call MUST be retried with the SAME
|
||||
buffer passed into the *data* argument (i.e. the id() must be the
|
||||
same). This is an OpenSSL requirement. A further particularity is that
|
||||
a short write will always have offset == 0, because the _ssl module
|
||||
does not enable partial writes. And even though the offset is zero,
|
||||
there will still be encrypted data in ssldata.
|
||||
"""
|
||||
assert 0 <= offset <= len(data)
|
||||
if self._state == _UNWRAPPED:
|
||||
# pass through data in unwrapped mode
|
||||
if offset < len(data):
|
||||
ssldata = [data[offset:]]
|
||||
else:
|
||||
ssldata = []
|
||||
return (ssldata, len(data))
|
||||
|
||||
ssldata = []
|
||||
view = memoryview(data)
|
||||
while True:
|
||||
self._need_ssldata = False
|
||||
try:
|
||||
if offset < len(view):
|
||||
offset += self._sslobj.write(view[offset:])
|
||||
except ssl.SSLError as exc:
|
||||
# It is not allowed to call write() after unwrap() until the
|
||||
# close_notify is acknowledged. We return the condition to the
|
||||
# caller as a short write.
|
||||
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
|
||||
exc.errno = ssl.SSL_ERROR_WANT_READ
|
||||
if exc.errno not in (ssl.SSL_ERROR_WANT_READ,
|
||||
ssl.SSL_ERROR_WANT_WRITE,
|
||||
ssl.SSL_ERROR_SYSCALL):
|
||||
raise
|
||||
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
|
||||
|
||||
# See if there's any record level data back for us.
|
||||
if self._outgoing.pending:
|
||||
ssldata.append(self._outgoing.read())
|
||||
if offset == len(view) or self._need_ssldata:
|
||||
break
|
||||
return (ssldata, offset)
|
||||
|
||||
|
||||
class _SSLProtocolTransport(transports._FlowControlMixin,
|
||||
transports.Transport):
|
||||
|
||||
def __init__(self, loop, ssl_protocol, app_protocol):
|
||||
self._loop = loop
|
||||
self._ssl_protocol = ssl_protocol
|
||||
self._app_protocol = app_protocol
|
||||
self._closed = False
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
"""Get optional transport information."""
|
||||
return self._ssl_protocol._get_extra_info(name, default)
|
||||
|
||||
def close(self):
|
||||
"""Close the transport.
|
||||
|
||||
Buffered data will be flushed asynchronously. No more data
|
||||
will be received. After all buffered data is flushed, the
|
||||
protocol's connection_lost() method will (eventually) called
|
||||
with None as its argument.
|
||||
"""
|
||||
self._closed = True
|
||||
self._ssl_protocol._start_shutdown()
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if sys.version_info >= (3, 4):
|
||||
def __del__(self):
|
||||
if not self._closed:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning)
|
||||
self.close()
|
||||
|
||||
def pause_reading(self):
|
||||
"""Pause the receiving end.
|
||||
|
||||
No data will be passed to the protocol's data_received()
|
||||
method until resume_reading() is called.
|
||||
"""
|
||||
self._ssl_protocol._transport.pause_reading()
|
||||
|
||||
def resume_reading(self):
|
||||
"""Resume the receiving end.
|
||||
|
||||
Data received will once again be passed to the protocol's
|
||||
data_received() method.
|
||||
"""
|
||||
self._ssl_protocol._transport.resume_reading()
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for write flow control.
|
||||
|
||||
These two values control when to call the protocol's
|
||||
pause_writing() and resume_writing() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to a
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_writing() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_writing() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
"""Return the current size of the write buffer."""
|
||||
return self._ssl_protocol._transport.get_write_buffer_size()
|
||||
|
||||
def write(self, data):
|
||||
"""Write some data bytes to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
"""
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError("data: expecting a bytes-like instance, got {!r}"
|
||||
.format(type(data).__name__))
|
||||
if not data:
|
||||
return
|
||||
self._ssl_protocol._write_appdata(data)
|
||||
|
||||
def can_write_eof(self):
|
||||
"""Return True if this transport supports write_eof(), False if not."""
|
||||
return False
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
self._ssl_protocol._abort()
|
||||
|
||||
|
||||
class SSLProtocol(protocols.Protocol):
|
||||
"""SSL protocol.
|
||||
|
||||
Implementation of SSL on top of a socket using incoming and outgoing
|
||||
buffers which are ssl.MemoryBIO objects.
|
||||
"""
|
||||
|
||||
def __init__(self, loop, app_protocol, sslcontext, waiter,
|
||||
server_side=False, server_hostname=None):
|
||||
if ssl is None:
|
||||
raise RuntimeError('stdlib ssl module not available')
|
||||
|
||||
if not sslcontext:
|
||||
sslcontext = _create_transport_context(server_side, server_hostname)
|
||||
|
||||
self._server_side = server_side
|
||||
if server_hostname and not server_side:
|
||||
self._server_hostname = server_hostname
|
||||
else:
|
||||
self._server_hostname = None
|
||||
self._sslcontext = sslcontext
|
||||
# SSL-specific extra info. More info are set when the handshake
|
||||
# completes.
|
||||
self._extra = dict(sslcontext=sslcontext)
|
||||
|
||||
# App data write buffering
|
||||
self._write_backlog = collections.deque()
|
||||
self._write_buffer_size = 0
|
||||
|
||||
self._waiter = waiter
|
||||
self._loop = loop
|
||||
self._app_protocol = app_protocol
|
||||
self._app_transport = _SSLProtocolTransport(self._loop,
|
||||
self, self._app_protocol)
|
||||
self._sslpipe = None
|
||||
self._session_established = False
|
||||
self._in_handshake = False
|
||||
self._in_shutdown = False
|
||||
self._transport = None
|
||||
|
||||
def _wakeup_waiter(self, exc=None):
|
||||
if self._waiter is None:
|
||||
return
|
||||
if not self._waiter.cancelled():
|
||||
if exc is not None:
|
||||
self._waiter.set_exception(exc)
|
||||
else:
|
||||
self._waiter.set_result(None)
|
||||
self._waiter = None
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Called when the low-level connection is made.
|
||||
|
||||
Start the SSL handshake.
|
||||
"""
|
||||
self._transport = transport
|
||||
self._sslpipe = _SSLPipe(self._sslcontext,
|
||||
self._server_side,
|
||||
self._server_hostname)
|
||||
self._start_handshake()
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Called when the low-level connection is lost or closed.
|
||||
|
||||
The argument is an exception object or None (the latter
|
||||
meaning a regular EOF is received or the connection was
|
||||
aborted or closed).
|
||||
"""
|
||||
if self._session_established:
|
||||
self._session_established = False
|
||||
self._loop.call_soon(self._app_protocol.connection_lost, exc)
|
||||
self._transport = None
|
||||
self._app_transport = None
|
||||
|
||||
def pause_writing(self):
|
||||
"""Called when the low-level transport's buffer goes over
|
||||
the high-water mark.
|
||||
"""
|
||||
self._app_protocol.pause_writing()
|
||||
|
||||
def resume_writing(self):
|
||||
"""Called when the low-level transport's buffer drains below
|
||||
the low-water mark.
|
||||
"""
|
||||
self._app_protocol.resume_writing()
|
||||
|
||||
def data_received(self, data):
|
||||
"""Called when some SSL data is received.
|
||||
|
||||
The argument is a bytes object.
|
||||
"""
|
||||
try:
|
||||
ssldata, appdata = self._sslpipe.feed_ssldata(data)
|
||||
except ssl.SSLError as e:
|
||||
if self._loop.get_debug():
|
||||
logger.warning('%r: SSL error %s (reason %s)',
|
||||
self, e.errno, e.reason)
|
||||
self._abort()
|
||||
return
|
||||
|
||||
for chunk in ssldata:
|
||||
self._transport.write(chunk)
|
||||
|
||||
for chunk in appdata:
|
||||
if chunk:
|
||||
self._app_protocol.data_received(chunk)
|
||||
else:
|
||||
self._start_shutdown()
|
||||
break
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end of the low-level stream
|
||||
is half-closed.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
try:
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r received EOF", self)
|
||||
|
||||
self._wakeup_waiter(ConnectionResetError)
|
||||
|
||||
if not self._in_handshake:
|
||||
keep_open = self._app_protocol.eof_received()
|
||||
if keep_open:
|
||||
logger.warning('returning true from eof_received() '
|
||||
'has no effect when using ssl')
|
||||
finally:
|
||||
self._transport.close()
|
||||
|
||||
def _get_extra_info(self, name, default=None):
|
||||
if name in self._extra:
|
||||
return self._extra[name]
|
||||
else:
|
||||
return self._transport.get_extra_info(name, default)
|
||||
|
||||
def _start_shutdown(self):
|
||||
if self._in_shutdown:
|
||||
return
|
||||
self._in_shutdown = True
|
||||
self._write_appdata(b'')
|
||||
|
||||
def _write_appdata(self, data):
|
||||
self._write_backlog.append((data, 0))
|
||||
self._write_buffer_size += len(data)
|
||||
self._process_write_backlog()
|
||||
|
||||
def _start_handshake(self):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r starts SSL handshake", self)
|
||||
self._handshake_start_time = self._loop.time()
|
||||
else:
|
||||
self._handshake_start_time = None
|
||||
self._in_handshake = True
|
||||
# (b'', 1) is a special value in _process_write_backlog() to do
|
||||
# the SSL handshake
|
||||
self._write_backlog.append((b'', 1))
|
||||
self._loop.call_soon(self._process_write_backlog)
|
||||
|
||||
def _on_handshake_complete(self, handshake_exc):
|
||||
self._in_handshake = False
|
||||
|
||||
sslobj = self._sslpipe.ssl_object
|
||||
try:
|
||||
if handshake_exc is not None:
|
||||
raise handshake_exc
|
||||
|
||||
peercert = sslobj.getpeercert()
|
||||
if not hasattr(self._sslcontext, 'check_hostname'):
|
||||
# Verify hostname if requested, Python 3.4+ uses check_hostname
|
||||
# and checks the hostname in do_handshake()
|
||||
if (self._server_hostname
|
||||
and self._sslcontext.verify_mode != ssl.CERT_NONE):
|
||||
ssl.match_hostname(peercert, self._server_hostname)
|
||||
except BaseException as exc:
|
||||
if self._loop.get_debug():
|
||||
if (hasattr(ssl, 'CertificateError')
|
||||
and isinstance(exc, ssl.CertificateError)):
|
||||
logger.warning("%r: SSL handshake failed "
|
||||
"on verifying the certificate",
|
||||
self, exc_info=True)
|
||||
else:
|
||||
logger.warning("%r: SSL handshake failed",
|
||||
self, exc_info=True)
|
||||
self._transport.close()
|
||||
if isinstance(exc, Exception):
|
||||
self._wakeup_waiter(exc)
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
if self._loop.get_debug():
|
||||
dt = self._loop.time() - self._handshake_start_time
|
||||
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
|
||||
|
||||
# Add extra info that becomes available after handshake.
|
||||
self._extra.update(peercert=peercert,
|
||||
cipher=sslobj.cipher(),
|
||||
compression=sslobj.compression(),
|
||||
)
|
||||
self._app_protocol.connection_made(self._app_transport)
|
||||
self._wakeup_waiter()
|
||||
self._session_established = True
|
||||
# In case transport.write() was already called. Don't call
|
||||
# immediatly _process_write_backlog(), but schedule it:
|
||||
# _on_handshake_complete() can be called indirectly from
|
||||
# _process_write_backlog(), and _process_write_backlog() is not
|
||||
# reentrant.
|
||||
self._loop.call_soon(self._process_write_backlog)
|
||||
|
||||
def _process_write_backlog(self):
|
||||
# Try to make progress on the write backlog.
|
||||
if self._transport is None:
|
||||
return
|
||||
|
||||
try:
|
||||
for i in range(len(self._write_backlog)):
|
||||
data, offset = self._write_backlog[0]
|
||||
if data:
|
||||
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
|
||||
elif offset:
|
||||
ssldata = self._sslpipe.do_handshake(self._on_handshake_complete)
|
||||
offset = 1
|
||||
else:
|
||||
ssldata = self._sslpipe.shutdown(self._finalize)
|
||||
offset = 1
|
||||
|
||||
for chunk in ssldata:
|
||||
self._transport.write(chunk)
|
||||
|
||||
if offset < len(data):
|
||||
self._write_backlog[0] = (data, offset)
|
||||
# A short write means that a write is blocked on a read
|
||||
# We need to enable reading if it is paused!
|
||||
assert self._sslpipe.need_ssldata
|
||||
if self._transport._paused:
|
||||
self._transport.resume_reading()
|
||||
break
|
||||
|
||||
# An entire chunk from the backlog was processed. We can
|
||||
# delete it and reduce the outstanding buffer size.
|
||||
del self._write_backlog[0]
|
||||
self._write_buffer_size -= len(data)
|
||||
except BaseException as exc:
|
||||
if self._in_handshake:
|
||||
self._on_handshake_complete(exc)
|
||||
else:
|
||||
self._fatal_error(exc, 'Fatal error on SSL transport')
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on transport'):
|
||||
# Should be called from exception handler only.
|
||||
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self._transport,
|
||||
'protocol': self,
|
||||
})
|
||||
if self._transport:
|
||||
self._transport._force_close(exc)
|
||||
|
||||
def _finalize(self):
|
||||
if self._transport is not None:
|
||||
self._transport.close()
|
||||
|
||||
def _abort(self):
|
||||
if self._transport is not None:
|
||||
try:
|
||||
self._transport.abort()
|
||||
finally:
|
||||
self._finalize()
|
||||
@@ -6,11 +6,13 @@ __all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
|
||||
]
|
||||
|
||||
import socket
|
||||
import sys
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
__all__.extend(['open_unix_connection', 'start_unix_server'])
|
||||
|
||||
from . import coroutines
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import protocols
|
||||
@@ -99,8 +101,8 @@ def start_server(client_connected_cb, host=None, port=None,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
result = yield From(loop.create_server(factory, host, port, **kwds))
|
||||
raise Return(result)
|
||||
server = yield From(loop.create_server(factory, host, port, **kwds))
|
||||
raise Return(server)
|
||||
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
@@ -133,8 +135,8 @@ if hasattr(socket, 'AF_UNIX'):
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
res = (yield From(loop.create_unix_server(factory, path, **kwds)))
|
||||
raise Return(res)
|
||||
server = (yield From(loop.create_unix_server(factory, path, **kwds)))
|
||||
raise Return(server)
|
||||
|
||||
|
||||
class FlowControlMixin(protocols.Protocol):
|
||||
@@ -148,7 +150,10 @@ class FlowControlMixin(protocols.Protocol):
|
||||
"""
|
||||
|
||||
def __init__(self, loop=None):
|
||||
self._loop = loop # May be None; we may never need it.
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._paused = False
|
||||
self._drain_waiter = None
|
||||
self._connection_lost = False
|
||||
@@ -309,11 +314,12 @@ class StreamReader(object):
|
||||
# it also doubles as half the buffer limit.
|
||||
self._limit = limit
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
self._loop = loop
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._buffer = bytearray()
|
||||
self._eof = False # Whether we're done.
|
||||
self._waiter = None # A future.
|
||||
self._eof = False # Whether we're done.
|
||||
self._waiter = None # A future used by _wait_for_data()
|
||||
self._exception = None
|
||||
self._transport = None
|
||||
self._paused = False
|
||||
@@ -330,6 +336,14 @@ class StreamReader(object):
|
||||
if not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
|
||||
def _wakeup_waiter(self):
|
||||
"""Wakeup read() or readline() function waiting for data or EOF."""
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(None)
|
||||
|
||||
def set_transport(self, transport):
|
||||
assert self._transport is None, 'Transport already set'
|
||||
self._transport = transport
|
||||
@@ -341,11 +355,7 @@ class StreamReader(object):
|
||||
|
||||
def feed_eof(self):
|
||||
self._eof = True
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(True)
|
||||
self._wakeup_waiter()
|
||||
|
||||
def at_eof(self):
|
||||
"""Return True if the buffer is empty and 'feed_eof' was called."""
|
||||
@@ -358,12 +368,7 @@ class StreamReader(object):
|
||||
return
|
||||
|
||||
self._buffer.extend(data)
|
||||
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(False)
|
||||
self._wakeup_waiter()
|
||||
|
||||
if (self._transport is not None and
|
||||
not self._paused and
|
||||
@@ -378,7 +383,9 @@ class StreamReader(object):
|
||||
else:
|
||||
self._paused = True
|
||||
|
||||
def _create_waiter(self, func_name):
|
||||
@coroutine
|
||||
def _wait_for_data(self, func_name):
|
||||
"""Wait until feed_data() or feed_eof() is called."""
|
||||
# StreamReader uses a future to link the protocol feed_data() method
|
||||
# to a read coroutine. Running two read coroutines at the same time
|
||||
# would have an unexpected behaviour. It would not possible to know
|
||||
@@ -386,7 +393,19 @@ class StreamReader(object):
|
||||
if self._waiter is not None:
|
||||
raise RuntimeError('%s() called while another coroutine is '
|
||||
'already waiting for incoming data' % func_name)
|
||||
return futures.Future(loop=self._loop)
|
||||
|
||||
# In asyncio, there is no need to recheck if we got data or EOF thanks
|
||||
# to "yield from". In trollius, a StreamReader method can be called
|
||||
# after the _wait_for_data() coroutine is scheduled and before it is
|
||||
# really executed.
|
||||
if self._buffer or self._eof:
|
||||
return
|
||||
|
||||
self._waiter = futures.Future(loop=self._loop)
|
||||
try:
|
||||
yield From(self._waiter)
|
||||
finally:
|
||||
self._waiter = None
|
||||
|
||||
@coroutine
|
||||
def readline(self):
|
||||
@@ -416,11 +435,7 @@ class StreamReader(object):
|
||||
break
|
||||
|
||||
if not_enough:
|
||||
self._waiter = self._create_waiter('readline')
|
||||
try:
|
||||
yield From(self._waiter)
|
||||
finally:
|
||||
self._waiter = None
|
||||
yield From(self._wait_for_data('readline'))
|
||||
|
||||
self._maybe_resume_transport()
|
||||
raise Return(bytes(line))
|
||||
@@ -447,11 +462,7 @@ class StreamReader(object):
|
||||
raise Return(b''.join(blocks))
|
||||
else:
|
||||
if not self._buffer and not self._eof:
|
||||
self._waiter = self._create_waiter('read')
|
||||
try:
|
||||
yield From(self._waiter)
|
||||
finally:
|
||||
self._waiter = None
|
||||
yield From(self._wait_for_data('read'))
|
||||
|
||||
if n < 0 or len(self._buffer) <= n:
|
||||
data = bytes(self._buffer)
|
||||
@@ -486,3 +497,16 @@ class StreamReader(object):
|
||||
n -= len(block)
|
||||
|
||||
raise Return(b''.join(blocks))
|
||||
|
||||
# FIXME: should we support __aiter__ and __anext__ in Trollius?
|
||||
#if compat.PY35:
|
||||
# @coroutine
|
||||
# def __aiter__(self):
|
||||
# return self
|
||||
#
|
||||
# @coroutine
|
||||
# def __anext__(self):
|
||||
# val = yield from self.readline()
|
||||
# if val == b'':
|
||||
# raise StopAsyncIteration
|
||||
# return val
|
||||
|
||||
@@ -2,22 +2,21 @@ from __future__ import absolute_import
|
||||
|
||||
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
|
||||
|
||||
import collections
|
||||
import subprocess
|
||||
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import protocols
|
||||
from . import streams
|
||||
from . import tasks
|
||||
from .coroutines import coroutine, From, Return
|
||||
from .py33_exceptions import (BrokenPipeError, ConnectionResetError,
|
||||
ProcessLookupError)
|
||||
from .py33_exceptions import BrokenPipeError, ConnectionResetError
|
||||
from .log import logger
|
||||
|
||||
|
||||
PIPE = subprocess.PIPE
|
||||
STDOUT = subprocess.STDOUT
|
||||
if hasattr(subprocess, 'DEVNULL'):
|
||||
DEVNULL = subprocess.DEVNULL
|
||||
|
||||
|
||||
class SubprocessStreamProtocol(streams.FlowControlMixin,
|
||||
@@ -28,8 +27,6 @@ class SubprocessStreamProtocol(streams.FlowControlMixin,
|
||||
super(SubprocessStreamProtocol, self).__init__(loop=loop)
|
||||
self._limit = limit
|
||||
self.stdin = self.stdout = self.stderr = None
|
||||
self.waiter = futures.Future(loop=loop)
|
||||
self._waiters = collections.deque()
|
||||
self._transport = None
|
||||
|
||||
def __repr__(self):
|
||||
@@ -44,19 +41,25 @@ class SubprocessStreamProtocol(streams.FlowControlMixin,
|
||||
|
||||
def connection_made(self, transport):
|
||||
self._transport = transport
|
||||
if transport.get_pipe_transport(1):
|
||||
|
||||
stdout_transport = transport.get_pipe_transport(1)
|
||||
if stdout_transport is not None:
|
||||
self.stdout = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
if transport.get_pipe_transport(2):
|
||||
self.stdout.set_transport(stdout_transport)
|
||||
|
||||
stderr_transport = transport.get_pipe_transport(2)
|
||||
if stderr_transport is not None:
|
||||
self.stderr = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
stdin = transport.get_pipe_transport(0)
|
||||
if stdin is not None:
|
||||
self.stdin = streams.StreamWriter(stdin,
|
||||
self.stderr.set_transport(stderr_transport)
|
||||
|
||||
stdin_transport = transport.get_pipe_transport(0)
|
||||
if stdin_transport is not None:
|
||||
self.stdin = streams.StreamWriter(stdin_transport,
|
||||
protocol=self,
|
||||
reader=None,
|
||||
loop=self._loop)
|
||||
self.waiter.set_result(None)
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if fd == 1:
|
||||
@@ -88,11 +91,8 @@ class SubprocessStreamProtocol(streams.FlowControlMixin,
|
||||
reader.set_exception(exc)
|
||||
|
||||
def process_exited(self):
|
||||
# wake up futures waiting for wait()
|
||||
returncode = self._transport.get_returncode()
|
||||
while self._waiters:
|
||||
waiter = self._waiters.popleft()
|
||||
waiter.set_result(returncode)
|
||||
self._transport.close()
|
||||
self._transport = None
|
||||
|
||||
|
||||
class Process:
|
||||
@@ -103,9 +103,7 @@ class Process:
|
||||
self.stdin = protocol.stdin
|
||||
self.stdout = protocol.stdout
|
||||
self.stderr = protocol.stderr
|
||||
# transport.get_pid() cannot be used because it fails
|
||||
# if the process already exited
|
||||
self.pid = self._transport.get_extra_info('subprocess').pid
|
||||
self.pid = transport.get_pid()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (self.__class__.__name__, self.pid)
|
||||
@@ -116,30 +114,19 @@ class Process:
|
||||
|
||||
@coroutine
|
||||
def wait(self):
|
||||
"""Wait until the process exit and return the process return code."""
|
||||
returncode = self._transport.get_returncode()
|
||||
if returncode is not None:
|
||||
raise Return(returncode)
|
||||
"""Wait until the process exit and return the process return code.
|
||||
|
||||
waiter = futures.Future(loop=self._loop)
|
||||
self._protocol._waiters.append(waiter)
|
||||
yield From(waiter)
|
||||
raise Return(waiter.result())
|
||||
|
||||
def _check_alive(self):
|
||||
if self._transport.get_returncode() is not None:
|
||||
raise ProcessLookupError()
|
||||
This method is a coroutine."""
|
||||
return_code = yield From(self._transport._wait())
|
||||
raise Return(return_code)
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._check_alive()
|
||||
self._transport.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._check_alive()
|
||||
self._transport.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._check_alive()
|
||||
self._transport.kill()
|
||||
|
||||
@coroutine
|
||||
@@ -214,10 +201,9 @@ def create_subprocess_shell(cmd, **kwds):
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = yield From(loop.subprocess_shell(
|
||||
protocol_factory,
|
||||
cmd, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds))
|
||||
yield From(protocol.waiter)
|
||||
protocol_factory,
|
||||
cmd, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds))
|
||||
raise Return(Process(transport, protocol, loop))
|
||||
|
||||
@coroutine
|
||||
@@ -232,9 +218,8 @@ def create_subprocess_exec(program, *args, **kwds):
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = yield From(loop.subprocess_exec(
|
||||
protocol_factory,
|
||||
program, *args,
|
||||
stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds))
|
||||
yield From(protocol.waiter)
|
||||
protocol_factory,
|
||||
program, *args,
|
||||
stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds))
|
||||
raise Return(Process(transport, protocol, loop))
|
||||
|
||||
@@ -4,13 +4,14 @@ from __future__ import print_function
|
||||
__all__ = ['Task',
|
||||
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
|
||||
'wait', 'wait_for', 'as_completed', 'sleep', 'async',
|
||||
'gather', 'shield', 'Return', 'From',
|
||||
'gather', 'shield', 'ensure_future',
|
||||
]
|
||||
|
||||
import functools
|
||||
import linecache
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
try:
|
||||
from weakref import WeakSet
|
||||
except ImportError:
|
||||
@@ -23,11 +24,9 @@ from . import events
|
||||
from . import executor
|
||||
from . import futures
|
||||
from .locks import Lock, Condition, Semaphore, _ContextManager
|
||||
from .coroutines import coroutine, From, Return, iscoroutinefunction, iscoroutine
|
||||
from .coroutines import coroutine, From, Return
|
||||
|
||||
|
||||
_PY34 = (sys.version_info >= (3, 4))
|
||||
|
||||
|
||||
@coroutine
|
||||
def _lock_coroutine(lock):
|
||||
@@ -54,6 +53,10 @@ class Task(futures.Future):
|
||||
# all running event loops. {EventLoop: Task}
|
||||
_current_tasks = {}
|
||||
|
||||
# If False, don't log a message if the task is destroyed whereas its
|
||||
# status is still pending
|
||||
_log_destroy_pending = True
|
||||
|
||||
@classmethod
|
||||
def current_task(cls, loop=None):
|
||||
"""Return the currently running task in an event loop or None.
|
||||
@@ -77,23 +80,20 @@ class Task(futures.Future):
|
||||
return set(t for t in cls._all_tasks if t._loop is loop)
|
||||
|
||||
def __init__(self, coro, loop=None):
|
||||
assert coroutines.iscoroutine(coro), repr(coro) # Not a coroutine function!
|
||||
assert coroutines.iscoroutine(coro), repr(coro)
|
||||
super(Task, self).__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._coro = iter(coro) # Use the iterator just in case.
|
||||
self._coro = coro
|
||||
self._fut_waiter = None
|
||||
self._must_cancel = False
|
||||
self._loop.call_soon(self._step)
|
||||
self.__class__._all_tasks.add(self)
|
||||
# If False, don't log a message if the task is destroyed whereas its
|
||||
# status is still pending
|
||||
self._log_destroy_pending = True
|
||||
|
||||
# On Python 3.3 or older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks to
|
||||
# the PEP 442.
|
||||
if _PY34:
|
||||
# On Python 3.3 or older, objects with a destructor that are part of a
|
||||
# reference cycle are never destroyed. That's not the case any more on
|
||||
# Python 3.4 thanks to the PEP 442.
|
||||
if compat.PY34:
|
||||
def __del__(self):
|
||||
if self._state == futures._PENDING and self._log_destroy_pending:
|
||||
context = {
|
||||
@@ -122,7 +122,7 @@ class Task(futures.Future):
|
||||
def get_stack(self, limit=None):
|
||||
"""Return the list of stack frames for this task's coroutine.
|
||||
|
||||
If the coroutine is active, this returns the stack where it is
|
||||
If the coroutine is not done, this returns the stack where it is
|
||||
suspended. If the coroutine has completed successfully or was
|
||||
cancelled, this returns an empty list. If the coroutine was
|
||||
terminated by an exception, this returns the list of traceback
|
||||
@@ -168,7 +168,8 @@ class Task(futures.Future):
|
||||
This produces output similar to that of the traceback module,
|
||||
for the frames retrieved by get_stack(). The limit argument
|
||||
is passed to get_stack(). The file argument is an I/O stream
|
||||
to which the output goes; by default it goes to sys.stderr.
|
||||
to which the output is written; by default output is written
|
||||
to sys.stderr.
|
||||
"""
|
||||
extracted_list = []
|
||||
checked = set()
|
||||
@@ -197,18 +198,18 @@ class Task(futures.Future):
|
||||
print(line, file=file, end='')
|
||||
|
||||
def cancel(self):
|
||||
"""Request this task to cancel itself.
|
||||
"""Request that this task cancel itself.
|
||||
|
||||
This arranges for a CancelledError to be thrown into the
|
||||
wrapped coroutine on the next cycle through the event loop.
|
||||
The coroutine then has a chance to clean up or even deny
|
||||
the request using try/except/finally.
|
||||
|
||||
Contrary to Future.cancel(), this does not guarantee that the
|
||||
Unlike Future.cancel, this does not guarantee that the
|
||||
task will be cancelled: the exception might be caught and
|
||||
acted upon, delaying cancellation of the task or preventing it
|
||||
completely. The task may also return a value or raise a
|
||||
different exception.
|
||||
acted upon, delaying cancellation of the task or preventing
|
||||
cancellation completely. The task may also return a value or
|
||||
raise a different exception.
|
||||
|
||||
Immediately after this method is called, Task.cancelled() will
|
||||
not return True (unless the task was already cancelled). A
|
||||
@@ -228,7 +229,7 @@ class Task(futures.Future):
|
||||
self._must_cancel = True
|
||||
return True
|
||||
|
||||
def _step(self, value=None, exc=None):
|
||||
def _step(self, value=None, exc=None, exc_tb=None):
|
||||
assert not self.done(), \
|
||||
'_step(): already done: {0!r}, {1!r}, {2!r}'.format(self, value, exc)
|
||||
if self._must_cancel:
|
||||
@@ -238,15 +239,17 @@ class Task(futures.Future):
|
||||
coro = self._coro
|
||||
self._fut_waiter = None
|
||||
|
||||
if exc_tb is not None:
|
||||
init_exc = exc
|
||||
else:
|
||||
init_exc = None
|
||||
self.__class__._current_tasks[self._loop] = self
|
||||
# Call either coro.throw(exc) or coro.send(value).
|
||||
try:
|
||||
if exc is not None:
|
||||
result = coro.throw(exc)
|
||||
elif value is not None:
|
||||
result = coro.send(value)
|
||||
else:
|
||||
result = next(coro)
|
||||
result = coro.send(value)
|
||||
except StopIteration as exc:
|
||||
if compat.PY33:
|
||||
# asyncio Task object? get the result of the coroutine
|
||||
@@ -260,11 +263,16 @@ class Task(futures.Future):
|
||||
self.set_result(result)
|
||||
except futures.CancelledError as exc:
|
||||
super(Task, self).cancel() # I.e., Future.cancel(self).
|
||||
except Exception as exc:
|
||||
self.set_exception(exc)
|
||||
except BaseException as exc:
|
||||
self.set_exception(exc)
|
||||
raise
|
||||
if exc is init_exc:
|
||||
self._set_exception_with_tb(exc, exc_tb)
|
||||
exc_tb = None
|
||||
else:
|
||||
self.set_exception(exc)
|
||||
|
||||
if not isinstance(exc, Exception):
|
||||
# reraise BaseException
|
||||
raise
|
||||
else:
|
||||
if coroutines._DEBUG:
|
||||
if not coroutines._coroutine_at_yield_from(self._coro):
|
||||
@@ -282,8 +290,10 @@ class Task(futures.Future):
|
||||
elif isinstance(result, coroutines.FromWrapper):
|
||||
result = result.obj
|
||||
|
||||
if iscoroutine(result):
|
||||
result = async(result, loop=self._loop)
|
||||
if coroutines.iscoroutine(result):
|
||||
# "yield coroutine" creates a task, the current task
|
||||
# will wait until the new task is done
|
||||
result = self._loop.create_task(result)
|
||||
# FIXME: faster check. common base class? hasattr?
|
||||
elif isinstance(result, (Lock, Condition, Semaphore)):
|
||||
coro = _lock_coroutine(result)
|
||||
@@ -310,19 +320,28 @@ class Task(futures.Future):
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
def _wakeup(self, future):
|
||||
try:
|
||||
value = future.result()
|
||||
except Exception as exc:
|
||||
# This may also be a cancellation.
|
||||
self._step(None, exc)
|
||||
if (future._state == futures._FINISHED
|
||||
and future._exception is not None):
|
||||
# Get the traceback before calling exception(), because calling
|
||||
# the exception() method clears the traceback
|
||||
exc_tb = future._get_exception_tb()
|
||||
exc = future.exception()
|
||||
self._step(None, exc, exc_tb)
|
||||
exc_tb = None
|
||||
else:
|
||||
self._step(value, None)
|
||||
try:
|
||||
value = future.result()
|
||||
except Exception as exc:
|
||||
# This may also be a cancellation.
|
||||
self._step(None, exc)
|
||||
else:
|
||||
self._step(value, None)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
|
||||
# wait() and as_completed() similar to those in PEP 3148.
|
||||
|
||||
# Export symbols in trollius.tasks for compatibility with Tulip
|
||||
# Export symbols in trollius.tasks for compatibility with asyncio
|
||||
FIRST_COMPLETED = executor.FIRST_COMPLETED
|
||||
FIRST_EXCEPTION = executor.FIRST_EXCEPTION
|
||||
ALL_COMPLETED = executor.ALL_COMPLETED
|
||||
@@ -340,7 +359,7 @@ def wait(fs, loop=None, timeout=None, return_when=ALL_COMPLETED):
|
||||
|
||||
Usage:
|
||||
|
||||
done, pending = yield From(trollius.wait(fs))
|
||||
done, pending = yield From(asyncio.wait(fs))
|
||||
|
||||
Note: This does not raise TimeoutError! Futures that aren't done
|
||||
when the timeout occurs are returned in the second set.
|
||||
@@ -355,15 +374,15 @@ def wait(fs, loop=None, timeout=None, return_when=ALL_COMPLETED):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
fs = set(async(f, loop=loop) for f in set(fs))
|
||||
fs = set(ensure_future(f, loop=loop) for f in set(fs))
|
||||
|
||||
result = yield From(_wait(fs, timeout, return_when, loop))
|
||||
raise Return(result)
|
||||
|
||||
|
||||
def _release_waiter(waiter, value=True, *args):
|
||||
def _release_waiter(waiter, *args):
|
||||
if not waiter.done():
|
||||
waiter.set_result(value)
|
||||
waiter.set_result(None)
|
||||
|
||||
|
||||
@coroutine
|
||||
@@ -376,26 +395,34 @@ def wait_for(fut, timeout, loop=None):
|
||||
it cancels the task and raises TimeoutError. To avoid the task
|
||||
cancellation, wrap it in shield().
|
||||
|
||||
Usage:
|
||||
|
||||
result = yield From(trollius.wait_for(fut, 10.0))
|
||||
If the wait is cancelled, the task is also cancelled.
|
||||
|
||||
This function is a coroutine.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
if timeout is None:
|
||||
raise Return((yield From(fut)))
|
||||
result = yield From(fut)
|
||||
raise Return(result)
|
||||
|
||||
waiter = futures.Future(loop=loop)
|
||||
timeout_handle = loop.call_later(timeout, _release_waiter, waiter, False)
|
||||
cb = functools.partial(_release_waiter, waiter, True)
|
||||
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
||||
cb = functools.partial(_release_waiter, waiter)
|
||||
|
||||
fut = async(fut, loop=loop)
|
||||
fut = ensure_future(fut, loop=loop)
|
||||
fut.add_done_callback(cb)
|
||||
|
||||
try:
|
||||
if (yield From(waiter)):
|
||||
# wait until the future completes or the timeout
|
||||
try:
|
||||
yield From(waiter)
|
||||
except futures.CancelledError:
|
||||
fut.remove_done_callback(cb)
|
||||
fut.cancel()
|
||||
raise
|
||||
|
||||
if fut.done():
|
||||
raise Return(fut.result())
|
||||
else:
|
||||
fut.remove_done_callback(cb)
|
||||
@@ -427,7 +454,7 @@ def _wait(fs, timeout, return_when, loop):
|
||||
if timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
if not waiter.done():
|
||||
waiter.set_result(False)
|
||||
waiter.set_result(None)
|
||||
|
||||
for f in fs:
|
||||
f.add_done_callback(_on_completion)
|
||||
@@ -470,7 +497,7 @@ def as_completed(fs, loop=None, timeout=None):
|
||||
if isinstance(fs, futures._FUTURE_CLASSES) or coroutines.iscoroutine(fs):
|
||||
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
|
||||
loop = loop if loop is not None else events.get_event_loop()
|
||||
todo = set(async(f, loop=loop) for f in set(fs))
|
||||
todo = set(ensure_future(f, loop=loop) for f in set(fs))
|
||||
from .queues import Queue # Import here to avoid circular import problem.
|
||||
done = Queue(loop=loop)
|
||||
timeout_handle = None
|
||||
@@ -521,6 +548,20 @@ def sleep(delay, result=None, loop=None):
|
||||
def async(coro_or_future, loop=None):
|
||||
"""Wrap a coroutine in a future.
|
||||
|
||||
If the argument is a Future, it is returned directly.
|
||||
|
||||
This function is deprecated in 3.5. Use asyncio.ensure_future() instead.
|
||||
"""
|
||||
|
||||
warnings.warn("asyncio.async() function is deprecated, use ensure_future()",
|
||||
DeprecationWarning)
|
||||
|
||||
return ensure_future(coro_or_future, loop=loop)
|
||||
|
||||
|
||||
def ensure_future(coro_or_future, loop=None):
|
||||
"""Wrap a coroutine in a future.
|
||||
|
||||
If the argument is a Future, it is returned directly.
|
||||
"""
|
||||
# FIXME: only check if coroutines._DEBUG is True?
|
||||
@@ -594,7 +635,7 @@ def gather(*coros_or_futures, **kw):
|
||||
arg_to_fut = {}
|
||||
for arg in set(coros_or_futures):
|
||||
if not isinstance(arg, futures._FUTURE_CLASSES):
|
||||
fut = async(arg, loop=loop)
|
||||
fut = ensure_future(arg, loop=loop)
|
||||
if loop is None:
|
||||
loop = fut._loop
|
||||
# The caller cannot control this future, the "destroy pending task"
|
||||
@@ -615,12 +656,13 @@ def gather(*coros_or_futures, **kw):
|
||||
results = [None] * nchildren
|
||||
|
||||
def _done_callback(i, fut):
|
||||
if outer._state != futures._PENDING:
|
||||
if fut._exception is not None:
|
||||
if outer.done():
|
||||
if not fut.cancelled():
|
||||
# Mark exception retrieved.
|
||||
fut.exception()
|
||||
return
|
||||
if fut._state == futures._CANCELLED:
|
||||
|
||||
if fut.cancelled():
|
||||
res = futures.CancelledError()
|
||||
if not return_exceptions:
|
||||
outer.set_exception(res)
|
||||
@@ -668,7 +710,7 @@ def shield(arg, loop=None):
|
||||
except CancelledError:
|
||||
res = None
|
||||
"""
|
||||
inner = async(arg, loop=loop)
|
||||
inner = ensure_future(arg, loop=loop)
|
||||
if inner.done():
|
||||
# Shortcut.
|
||||
return inner
|
||||
@@ -677,9 +719,11 @@ def shield(arg, loop=None):
|
||||
|
||||
def _done_callback(inner):
|
||||
if outer.cancelled():
|
||||
# Mark inner's result as retrieved.
|
||||
inner.cancelled() or inner.exception()
|
||||
if not inner.cancelled():
|
||||
# Mark inner's result as retrieved.
|
||||
inner.exception()
|
||||
return
|
||||
|
||||
if inner.cancelled():
|
||||
outer.cancel()
|
||||
else:
|
||||
|
||||
@@ -1,19 +1,118 @@
|
||||
# Subset of test.support from CPython 3.5, just what we need to run asyncio
|
||||
# test suite. The code is copied from CPython 3.5 to not depend on the test
|
||||
# module because it is rarely installed.
|
||||
|
||||
# Ignore symbol TEST_HOME_DIR: test_events works without it
|
||||
|
||||
from __future__ import absolute_import
|
||||
import functools
|
||||
import gc
|
||||
import os.path
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from trollius import test_utils
|
||||
|
||||
# TEST_HOME_DIR refers to the top level directory of the "test" package
|
||||
# that contains Python's regression test suite
|
||||
TEST_SUPPORT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
TEST_HOME_DIR = os.path.dirname(TEST_SUPPORT_DIR)
|
||||
# A constant likely larger than the underlying OS pipe buffer size, to
|
||||
# make writes blocking.
|
||||
# Windows limit seems to be around 512 B, and many Unix kernels have a
|
||||
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
|
||||
# (see issue #17835 for a discussion of this number).
|
||||
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
|
||||
|
||||
def strip_python_stderr(stderr):
|
||||
"""Strip the stderr of a Python process from potential debug output
|
||||
emitted by the interpreter.
|
||||
|
||||
This will typically be run on the result of the communicate() method
|
||||
of a subprocess.Popen object.
|
||||
"""
|
||||
stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
|
||||
return stderr
|
||||
|
||||
|
||||
# Executing the interpreter in a subprocess
|
||||
def _assert_python(expected_success, *args, **env_vars):
|
||||
if '__isolated' in env_vars:
|
||||
isolated = env_vars.pop('__isolated')
|
||||
else:
|
||||
isolated = not env_vars
|
||||
cmd_line = [sys.executable]
|
||||
if sys.version_info >= (3, 3):
|
||||
cmd_line.extend(('-X', 'faulthandler'))
|
||||
if isolated and sys.version_info >= (3, 4):
|
||||
# isolated mode: ignore Python environment variables, ignore user
|
||||
# site-packages, and don't add the current directory to sys.path
|
||||
cmd_line.append('-I')
|
||||
elif not env_vars:
|
||||
# ignore Python environment variables
|
||||
cmd_line.append('-E')
|
||||
# Need to preserve the original environment, for in-place testing of
|
||||
# shared library builds.
|
||||
env = os.environ.copy()
|
||||
# But a special flag that can be set to override -- in this case, the
|
||||
# caller is responsible to pass the full environment.
|
||||
if env_vars.pop('__cleanenv', None):
|
||||
env = {}
|
||||
env.update(env_vars)
|
||||
cmd_line.extend(args)
|
||||
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
env=env)
|
||||
try:
|
||||
out, err = p.communicate()
|
||||
finally:
|
||||
subprocess._cleanup()
|
||||
p.stdout.close()
|
||||
p.stderr.close()
|
||||
rc = p.returncode
|
||||
err = strip_python_stderr(err)
|
||||
if (rc and expected_success) or (not rc and not expected_success):
|
||||
raise AssertionError(
|
||||
"Process return code is %d, "
|
||||
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
|
||||
return rc, out, err
|
||||
|
||||
|
||||
def assert_python_ok(*args, **env_vars):
|
||||
"""
|
||||
Assert that running the interpreter with `args` and optional environment
|
||||
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
|
||||
stderr) tuple.
|
||||
|
||||
If the __cleanenv keyword is set, env_vars is used a fresh environment.
|
||||
|
||||
Python is started in isolated mode (command line option -I),
|
||||
except if the __isolated keyword is set to False.
|
||||
"""
|
||||
return _assert_python(True, *args, **env_vars)
|
||||
|
||||
|
||||
is_jython = sys.platform.startswith('java')
|
||||
|
||||
def gc_collect():
|
||||
"""Force as many objects as possible to be collected.
|
||||
|
||||
In non-CPython implementations of Python, this is needed because timely
|
||||
deallocation is not guaranteed by the garbage collector. (Even in CPython
|
||||
this can be the case in case of reference cycles.) This means that __del__
|
||||
methods may be called later than expected and weakrefs may remain alive for
|
||||
longer than expected. This function tries its best to force all garbage
|
||||
objects to disappear.
|
||||
"""
|
||||
gc.collect()
|
||||
if is_jython:
|
||||
time.sleep(0.1)
|
||||
gc.collect()
|
||||
gc.collect()
|
||||
|
||||
|
||||
HOST = "127.0.0.1"
|
||||
HOSTv6 = "::1"
|
||||
|
||||
|
||||
def _is_ipv6_enabled():
|
||||
@@ -22,7 +121,7 @@ def _is_ipv6_enabled():
|
||||
sock = None
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||
sock.bind(("::1", 0))
|
||||
sock.bind((HOSTv6, 0))
|
||||
return True
|
||||
except OSError:
|
||||
pass
|
||||
@@ -34,56 +133,6 @@ def _is_ipv6_enabled():
|
||||
IPV6_ENABLED = _is_ipv6_enabled()
|
||||
|
||||
|
||||
# A constant likely larger than the underlying OS pipe buffer size, to
|
||||
# make writes blocking.
|
||||
# Windows limit seems to be around 512 B, and many Unix kernels have a
|
||||
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
|
||||
# (see issue #17835 for a discussion of this number).
|
||||
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
|
||||
|
||||
|
||||
class TestFailed(Exception):
|
||||
"""Test failed."""
|
||||
|
||||
|
||||
def bind_port(sock, host="127.0.0.1"):
|
||||
"""Bind the socket to a free port and return the port number. Relies on
|
||||
ephemeral ports in order to ensure we are using an unbound port. This is
|
||||
important as many tests may be running simultaneously, especially in a
|
||||
buildbot environment. This method raises an exception if the sock.family
|
||||
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
|
||||
or SO_REUSEPORT set on it. Tests should *never* set these socket options
|
||||
for TCP/IP sockets. The only case for setting these options is testing
|
||||
multicasting via multiple UDP sockets.
|
||||
|
||||
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
|
||||
on Windows), it will be set on the socket. This will prevent anyone else
|
||||
from bind()'ing to our host/port for the duration of the test.
|
||||
"""
|
||||
|
||||
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
|
||||
if hasattr(socket, 'SO_REUSEADDR'):
|
||||
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
|
||||
raise TestFailed("tests should never set the SO_REUSEADDR " \
|
||||
"socket option on TCP/IP sockets!")
|
||||
if hasattr(socket, 'SO_REUSEPORT'):
|
||||
try:
|
||||
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
|
||||
raise TestFailed("tests should never set the SO_REUSEPORT " \
|
||||
"socket option on TCP/IP sockets!")
|
||||
except EnvironmentError:
|
||||
# Python's socket module was compiled using modern headers
|
||||
# thus defining SO_REUSEPORT but this process is running
|
||||
# under an older kernel that does not support SO_REUSEPORT.
|
||||
pass
|
||||
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
|
||||
|
||||
sock.bind((host, 0))
|
||||
port = sock.getsockname()[1]
|
||||
return port
|
||||
|
||||
|
||||
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
|
||||
"""Returns an unused port that should be suitable for binding. This is
|
||||
achieved by creating a temporary socket with the same family and type as
|
||||
@@ -146,61 +195,43 @@ def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
|
||||
del tempsock
|
||||
return port
|
||||
|
||||
def bind_port(sock, host=HOST):
|
||||
"""Bind the socket to a free port and return the port number. Relies on
|
||||
ephemeral ports in order to ensure we are using an unbound port. This is
|
||||
important as many tests may be running simultaneously, especially in a
|
||||
buildbot environment. This method raises an exception if the sock.family
|
||||
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
|
||||
or SO_REUSEPORT set on it. Tests should *never* set these socket options
|
||||
for TCP/IP sockets. The only case for setting these options is testing
|
||||
multicasting via multiple UDP sockets.
|
||||
|
||||
is_jython = sys.platform.startswith('java')
|
||||
|
||||
|
||||
def gc_collect():
|
||||
"""Force as many objects as possible to be collected.
|
||||
|
||||
In non-CPython implementations of Python, this is needed because timely
|
||||
deallocation is not guaranteed by the garbage collector. (Even in CPython
|
||||
this can be the case in case of reference cycles.) This means that __del__
|
||||
methods may be called later than expected and weakrefs may remain alive for
|
||||
longer than expected. This function tries its best to force all garbage
|
||||
objects to disappear.
|
||||
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
|
||||
on Windows), it will be set on the socket. This will prevent anyone else
|
||||
from bind()'ing to our host/port for the duration of the test.
|
||||
"""
|
||||
gc.collect()
|
||||
if is_jython:
|
||||
time.sleep(0.1)
|
||||
gc.collect()
|
||||
gc.collect()
|
||||
|
||||
def _requires_unix_version(sysname, min_version):
|
||||
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
|
||||
than `min_version`.
|
||||
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
|
||||
if hasattr(socket, 'SO_REUSEADDR'):
|
||||
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
|
||||
raise TestFailed("tests should never set the SO_REUSEADDR "
|
||||
"socket option on TCP/IP sockets!")
|
||||
if hasattr(socket, 'SO_REUSEPORT'):
|
||||
try:
|
||||
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
|
||||
if reuse == 1:
|
||||
raise TestFailed("tests should never set the SO_REUSEPORT "
|
||||
"socket option on TCP/IP sockets!")
|
||||
except OSError:
|
||||
# Python's socket module was compiled using modern headers
|
||||
# thus defining SO_REUSEPORT but this process is running
|
||||
# under an older kernel that does not support SO_REUSEPORT.
|
||||
pass
|
||||
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
|
||||
|
||||
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
|
||||
the FreeBSD version is less than 7.2.
|
||||
"""
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kw):
|
||||
if platform.system() == sysname:
|
||||
version_txt = platform.release().split('-', 1)[0]
|
||||
try:
|
||||
version = tuple(map(int, version_txt.split('.')))
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if version < min_version:
|
||||
min_version_txt = '.'.join(map(str, min_version))
|
||||
raise test_utils.SkipTest(
|
||||
"%s version %s or higher required, not %s"
|
||||
% (sysname, min_version_txt, version_txt))
|
||||
return func(*args, **kw)
|
||||
wrapper.min_version = min_version
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
def requires_freebsd_version(*min_version):
|
||||
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
|
||||
less than `min_version`.
|
||||
|
||||
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
|
||||
version is less than 7.2.
|
||||
"""
|
||||
return _requires_unix_version('FreeBSD', min_version)
|
||||
sock.bind((host, 0))
|
||||
port = sock.getsockname()[1]
|
||||
return port
|
||||
|
||||
def requires_mac_ver(*min_version):
|
||||
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
|
||||
@@ -229,58 +260,50 @@ def requires_mac_ver(*min_version):
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
def _requires_unix_version(sysname, min_version):
|
||||
"""Decorator raising SkipTest if the OS is `sysname` and the version is
|
||||
less than `min_version`.
|
||||
|
||||
def strip_python_stderr(stderr):
|
||||
"""Strip the stderr of a Python process from potential debug output
|
||||
emitted by the interpreter.
|
||||
|
||||
This will typically be run on the result of the communicate() method
|
||||
of a subprocess.Popen object.
|
||||
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
|
||||
the FreeBSD version is less than 7.2.
|
||||
"""
|
||||
stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
|
||||
return stderr
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kw):
|
||||
if platform.system() == sysname:
|
||||
version_txt = platform.release().split('-', 1)[0]
|
||||
try:
|
||||
version = tuple(map(int, version_txt.split('.')))
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if version < min_version:
|
||||
min_version_txt = '.'.join(map(str, min_version))
|
||||
raise test_utils.SkipTest(
|
||||
"%s version %s or higher required, not %s"
|
||||
% (sysname, min_version_txt, version_txt))
|
||||
return func(*args, **kw)
|
||||
wrapper.min_version = min_version
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
# Executing the interpreter in a subprocess
|
||||
def _assert_python(expected_success, *args, **env_vars):
|
||||
cmd_line = [sys.executable]
|
||||
if not env_vars:
|
||||
# ignore Python environment variables
|
||||
cmd_line.append('-E')
|
||||
# Need to preserve the original environment, for in-place testing of
|
||||
# shared library builds.
|
||||
env = os.environ.copy()
|
||||
# But a special flag that can be set to override -- in this case, the
|
||||
# caller is responsible to pass the full environment.
|
||||
if env_vars.pop('__cleanenv', None):
|
||||
env = {}
|
||||
env.update(env_vars)
|
||||
cmd_line.extend(args)
|
||||
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
env=env)
|
||||
try:
|
||||
out, err = p.communicate()
|
||||
finally:
|
||||
subprocess._cleanup()
|
||||
p.stdout.close()
|
||||
p.stderr.close()
|
||||
rc = p.returncode
|
||||
err = strip_python_stderr(err)
|
||||
if (rc and expected_success) or (not rc and not expected_success):
|
||||
raise AssertionError(
|
||||
"Process return code is %d, "
|
||||
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
|
||||
return rc, out, err
|
||||
def requires_freebsd_version(*min_version):
|
||||
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version
|
||||
is less than `min_version`.
|
||||
|
||||
def assert_python_ok(*args, **env_vars):
|
||||
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
|
||||
version is less than 7.2.
|
||||
"""
|
||||
Assert that running the interpreter with `args` and optional environment
|
||||
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
|
||||
stderr) tuple.
|
||||
return _requires_unix_version('FreeBSD', min_version)
|
||||
|
||||
If the __cleanenv keyword is set, env_vars is used a fresh environment.
|
||||
# Use test.support if available
|
||||
try:
|
||||
from test.support import *
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
Python is started in isolated mode (command line option -I),
|
||||
except if the __isolated keyword is set to False.
|
||||
"""
|
||||
return _assert_python(True, *args, **env_vars)
|
||||
# Use test.script_helper if available
|
||||
try:
|
||||
from test.script_helper import assert_python_ok
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
@@ -36,6 +36,7 @@ except ImportError: # pragma: no cover
|
||||
ssl = None
|
||||
|
||||
from . import base_events
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import selectors
|
||||
@@ -50,21 +51,17 @@ else:
|
||||
from socket import socketpair # pragma: no cover
|
||||
|
||||
try:
|
||||
# Prefer unittest2 if available (on Python 2)
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
skipIf = unittest.skipIf
|
||||
skipUnless = unittest.skipUnless
|
||||
SkipTest = unittest.SkipTest
|
||||
_TestCase = unittest.TestCase
|
||||
except AttributeError:
|
||||
# Python 2.6: use the backported unittest module called "unittest2"
|
||||
import unittest2
|
||||
skipIf = unittest2.skipIf
|
||||
skipUnless = unittest2.skipUnless
|
||||
SkipTest = unittest2.SkipTest
|
||||
_TestCase = unittest2.TestCase
|
||||
|
||||
skipIf = unittest.skipIf
|
||||
skipUnless = unittest.skipUnless
|
||||
SkipTest = unittest.SkipTest
|
||||
|
||||
|
||||
if not hasattr(_TestCase, 'assertRaisesRegex'):
|
||||
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
|
||||
class _BaseTestCaseContext:
|
||||
|
||||
def __init__(self, test_case):
|
||||
@@ -190,7 +187,14 @@ class SilentWSGIRequestHandler(WSGIRequestHandler):
|
||||
pass
|
||||
|
||||
|
||||
class SilentWSGIServer(WSGIServer):
|
||||
class SilentWSGIServer(WSGIServer, object):
|
||||
|
||||
request_timeout = 2
|
||||
|
||||
def get_request(self):
|
||||
request, client_addr = super(SilentWSGIServer, self).get_request()
|
||||
request.settimeout(self.request_timeout)
|
||||
return request, client_addr
|
||||
|
||||
def handle_error(self, request, client_address):
|
||||
pass
|
||||
@@ -239,7 +243,8 @@ def _run_test_server(address, use_ssl, server_cls, server_ssl_cls):
|
||||
httpd = server_class(address, SilentWSGIRequestHandler)
|
||||
httpd.set_app(app)
|
||||
httpd.address = httpd.server_address
|
||||
server_thread = threading.Thread(target=httpd.serve_forever)
|
||||
server_thread = threading.Thread(
|
||||
target=lambda: httpd.serve_forever(poll_interval=0.05))
|
||||
server_thread.start()
|
||||
try:
|
||||
yield httpd
|
||||
@@ -259,7 +264,9 @@ if hasattr(socket, 'AF_UNIX'):
|
||||
self.server_port = 80
|
||||
|
||||
|
||||
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
|
||||
class UnixWSGIServer(UnixHTTPServer, WSGIServer, object):
|
||||
|
||||
request_timeout = 2
|
||||
|
||||
def server_bind(self):
|
||||
UnixHTTPServer.server_bind(self)
|
||||
@@ -267,6 +274,7 @@ if hasattr(socket, 'AF_UNIX'):
|
||||
|
||||
def get_request(self):
|
||||
request, client_addr = super(UnixWSGIServer, self).get_request()
|
||||
request.settimeout(self.request_timeout)
|
||||
# Code in the stdlib expects that get_request
|
||||
# will return a socket and a tuple (host, port).
|
||||
# However, this isn't true for UNIX sockets,
|
||||
@@ -399,6 +407,7 @@ class TestLoop(base_events.BaseEventLoop):
|
||||
self._time += advance
|
||||
|
||||
def close(self):
|
||||
super(TestLoop, self).close()
|
||||
if self._check_on_close:
|
||||
try:
|
||||
self._gen.send(0)
|
||||
@@ -491,7 +500,7 @@ def get_function_source(func):
|
||||
return source
|
||||
|
||||
|
||||
class TestCase(_TestCase):
|
||||
class TestCase(unittest.TestCase):
|
||||
def set_event_loop(self, loop, cleanup=True):
|
||||
assert loop is not None
|
||||
# ensure that the event loop is passed explicitly in asyncio
|
||||
@@ -507,7 +516,15 @@ class TestCase(_TestCase):
|
||||
def tearDown(self):
|
||||
events.set_event_loop(None)
|
||||
|
||||
if not hasattr(_TestCase, 'assertRaisesRegex'):
|
||||
# Detect CPython bug #23353: ensure that yield/yield-from is not used
|
||||
# in an except block of a generator
|
||||
if sys.exc_info()[0] == SkipTest:
|
||||
if compat.PY2:
|
||||
sys.exc_clear()
|
||||
else:
|
||||
self.assertEqual(sys.exc_info(), (None, None, None))
|
||||
|
||||
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
|
||||
def assertRaisesRegex(self, expected_exception, expected_regex,
|
||||
callable_obj=None, *args, **kwargs):
|
||||
"""Asserts that the message in a raised exception matches a regex.
|
||||
@@ -527,7 +544,7 @@ class TestCase(_TestCase):
|
||||
|
||||
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
|
||||
|
||||
if not hasattr(_TestCase, 'assertRegex'):
|
||||
if not hasattr(unittest.TestCase, 'assertRegex'):
|
||||
def assertRegex(self, text, expected_regex, msg=None):
|
||||
"""Fail the test unless the text matches the regular expression."""
|
||||
if isinstance(expected_regex, (str, bytes)):
|
||||
@@ -562,3 +579,14 @@ def disable_logger():
|
||||
yield
|
||||
finally:
|
||||
logger.setLevel(old_level)
|
||||
|
||||
def mock_nonblocking_socket():
|
||||
"""Create a mock of a non-blocking socket."""
|
||||
sock = mock.Mock(socket.socket)
|
||||
sock.gettimeout.return_value = 0.0
|
||||
return sock
|
||||
|
||||
|
||||
def force_legacy_ssl_support():
|
||||
return mock.patch('trollius.sslproto._is_sslproto_available',
|
||||
return_value=False)
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
"""Abstract Transport class."""
|
||||
|
||||
import sys
|
||||
from .compat import flatten_bytes
|
||||
|
||||
_PY34 = sys.version_info >= (3, 4)
|
||||
from trollius import compat
|
||||
|
||||
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
|
||||
'Transport', 'DatagramTransport', 'SubprocessTransport',
|
||||
@@ -95,8 +94,8 @@ class WriteTransport(BaseTransport):
|
||||
The default implementation concatenates the arguments and
|
||||
calls write() on the result.
|
||||
"""
|
||||
data = map(flatten_bytes, list_of_data)
|
||||
self.write(b''.join(data))
|
||||
data = compat.flatten_list_bytes(list_of_data)
|
||||
self.write(data)
|
||||
|
||||
def write_eof(self):
|
||||
"""Close the write end after flushing buffered data.
|
||||
@@ -235,8 +234,10 @@ class _FlowControlMixin(Transport):
|
||||
resume_writing() may be called.
|
||||
"""
|
||||
|
||||
def __init__(self, extra=None):
|
||||
def __init__(self, extra=None, loop=None):
|
||||
super(_FlowControlMixin, self).__init__(extra)
|
||||
assert loop is not None
|
||||
self._loop = loop
|
||||
self._protocol_paused = False
|
||||
self._set_write_buffer_limits()
|
||||
|
||||
@@ -270,6 +271,9 @@ class _FlowControlMixin(Transport):
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def get_write_buffer_limits(self):
|
||||
return (self._low_water, self._high_water)
|
||||
|
||||
def _set_write_buffer_limits(self, high=None, low=None):
|
||||
if high is None:
|
||||
if low is None:
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import errno
|
||||
import fcntl
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
@@ -10,13 +9,16 @@ import stat
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import warnings
|
||||
|
||||
|
||||
from . import base_events
|
||||
from . import base_subprocess
|
||||
from . import compat
|
||||
from . import constants
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import selector_events
|
||||
from . import selectors
|
||||
from . import transports
|
||||
@@ -79,7 +81,12 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
||||
Raise ValueError if the signal number is invalid or uncatchable.
|
||||
Raise RuntimeError if there is a problem setting up the handler.
|
||||
"""
|
||||
if (coroutines.iscoroutine(callback)
|
||||
or coroutines.iscoroutinefunction(callback)):
|
||||
raise TypeError("coroutines cannot be used "
|
||||
"with add_signal_handler()")
|
||||
self._check_signal(sig)
|
||||
self._check_closed()
|
||||
try:
|
||||
# set_wakeup_fd() raises ValueError if this is not the
|
||||
# main thread. By calling it early we ensure that an
|
||||
@@ -197,12 +204,28 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
with events.get_child_watcher() as watcher:
|
||||
waiter = futures.Future(loop=self)
|
||||
transp = _UnixSubprocessTransport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=extra, **kwargs)
|
||||
yield From(transp._post_init())
|
||||
waiter=waiter, extra=extra,
|
||||
**kwargs)
|
||||
|
||||
watcher.add_child_handler(transp.get_pid(),
|
||||
self._child_watcher_callback, transp)
|
||||
try:
|
||||
yield From(waiter)
|
||||
except Exception as exc:
|
||||
# Workaround CPython bug #23353: using yield/yield-from in an
|
||||
# except block of a generator doesn't clear properly
|
||||
# sys.exc_info()
|
||||
err = exc
|
||||
else:
|
||||
err = None
|
||||
|
||||
if err is not None:
|
||||
transp.close()
|
||||
yield From(transp._wait())
|
||||
raise err
|
||||
|
||||
raise Return(transp)
|
||||
|
||||
@@ -287,10 +310,17 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
||||
return server
|
||||
|
||||
|
||||
def _set_nonblocking(fd):
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
|
||||
flags = flags | os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
if hasattr(os, 'set_blocking'):
|
||||
# Python 3.5 and newer
|
||||
def _set_nonblocking(fd):
|
||||
os.set_blocking(fd, False)
|
||||
else:
|
||||
import fcntl
|
||||
|
||||
def _set_nonblocking(fd):
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
|
||||
flags = flags | os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
|
||||
class _UnixReadPipeTransport(transports.ReadTransport):
|
||||
@@ -311,14 +341,21 @@ class _UnixReadPipeTransport(transports.ReadTransport):
|
||||
_set_nonblocking(self._fileno)
|
||||
self._protocol = protocol
|
||||
self._closing = False
|
||||
self._loop.add_reader(self._fileno, self._read_ready)
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
# only start reading when connection_made() has been called
|
||||
self._loop.call_soon(self._loop.add_reader,
|
||||
self._fileno, self._read_ready)
|
||||
if waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'fd=%s' % self._fileno]
|
||||
info = [self.__class__.__name__]
|
||||
if self._pipe is None:
|
||||
info.append('closed')
|
||||
elif self._closing:
|
||||
info.append('closing')
|
||||
info.append('fd=%s' % self._fileno)
|
||||
if self._pipe is not None:
|
||||
polling = selector_events._test_selector_event(
|
||||
self._loop._selector,
|
||||
@@ -359,9 +396,21 @@ class _UnixReadPipeTransport(transports.ReadTransport):
|
||||
if not self._closing:
|
||||
self._close(None)
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if sys.version_info >= (3, 4):
|
||||
def __del__(self):
|
||||
if self._pipe is not None:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning)
|
||||
self._pipe.close()
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
# should be called by exception handler only
|
||||
if not (isinstance(exc, OSError) and exc.errno == errno.EIO):
|
||||
if (isinstance(exc, OSError) and exc.errno == errno.EIO):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
@@ -389,9 +438,8 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
|
||||
transports.WriteTransport):
|
||||
|
||||
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
|
||||
super(_UnixWritePipeTransport, self).__init__(extra)
|
||||
super(_UnixWritePipeTransport, self).__init__(extra, loop)
|
||||
self._extra['pipe'] = pipe
|
||||
self._loop = loop
|
||||
self._pipe = pipe
|
||||
self._fileno = pipe.fileno()
|
||||
mode = os.fstat(self._fileno).st_mode
|
||||
@@ -407,19 +455,27 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
|
||||
self._conn_lost = 0
|
||||
self._closing = False # Set when close() or write_eof() called.
|
||||
|
||||
# On AIX, the reader trick only works for sockets.
|
||||
# On other platforms it works for pipes and sockets.
|
||||
# (Exception: OS X 10.4? Issue #19294.)
|
||||
if is_socket or not sys.platform.startswith("aix"):
|
||||
self._loop.add_reader(self._fileno, self._read_ready)
|
||||
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
|
||||
# On AIX, the reader trick (to be notified when the read end of the
|
||||
# socket is closed) only works for sockets. On other platforms it
|
||||
# works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
|
||||
if is_socket or not sys.platform.startswith("aix"):
|
||||
# only start reading when connection_made() has been called
|
||||
self._loop.call_soon(self._loop.add_reader,
|
||||
self._fileno, self._read_ready)
|
||||
|
||||
if waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'fd=%s' % self._fileno]
|
||||
info = [self.__class__.__name__]
|
||||
if self._pipe is None:
|
||||
info.append('closed')
|
||||
elif self._closing:
|
||||
info.append('closing')
|
||||
info.append('fd=%s' % self._fileno)
|
||||
if self._pipe is not None:
|
||||
polling = selector_events._test_selector_event(
|
||||
self._loop._selector,
|
||||
@@ -509,9 +565,6 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
# TODO: Make the relationships between write_eof(), close(),
|
||||
# abort(), _fatal_error() and _close() more straightforward.
|
||||
|
||||
def write_eof(self):
|
||||
if self._closing:
|
||||
return
|
||||
@@ -522,16 +575,28 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
|
||||
def close(self):
|
||||
if not self._closing:
|
||||
if self._pipe is not None and not self._closing:
|
||||
# write_eof is all what we needed to close the write pipe
|
||||
self.write_eof()
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if sys.version_info >= (3, 4):
|
||||
def __del__(self):
|
||||
if self._pipe is not None:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning)
|
||||
self._pipe.close()
|
||||
|
||||
def abort(self):
|
||||
self._close(None)
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
# should be called by exception handler only
|
||||
if not isinstance(exc, (BrokenPipeError, ConnectionResetError)):
|
||||
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
@@ -558,14 +623,20 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
|
||||
self._loop = None
|
||||
|
||||
|
||||
def _set_cloexec_flag(fd, cloexec):
|
||||
cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
|
||||
if hasattr(os, 'set_inheritable'):
|
||||
# Python 3.4 and newer
|
||||
_set_inheritable = os.set_inheritable
|
||||
else:
|
||||
import fcntl
|
||||
|
||||
old = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
if cloexec:
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
|
||||
else:
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
|
||||
def _set_inheritable(fd, inheritable):
|
||||
cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
|
||||
|
||||
old = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
if not inheritable:
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
|
||||
else:
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
|
||||
|
||||
|
||||
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
||||
@@ -579,7 +650,12 @@ class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
||||
# other end). Notably this is needed on AIX, and works
|
||||
# just fine on other platforms.
|
||||
stdin, stdin_w = self._loop._socketpair()
|
||||
_set_cloexec_flag(stdin_w.fileno(), True)
|
||||
|
||||
# Mark the write end of the stdin pipe as non-inheritable,
|
||||
# needed by close_fds=False on Python 3.3 and older
|
||||
# (Python 3.4 implements the PEP 446, socketpair returns
|
||||
# non-inheritable sockets)
|
||||
_set_inheritable(stdin_w.fileno(), False)
|
||||
self._proc = subprocess.Popen(
|
||||
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
||||
universal_newlines=False, bufsize=bufsize, **kwargs)
|
||||
@@ -747,7 +823,7 @@ class SafeChildWatcher(BaseChildWatcher):
|
||||
pass
|
||||
|
||||
def add_child_handler(self, pid, callback, *args):
|
||||
self._callbacks[pid] = callback, args
|
||||
self._callbacks[pid] = (callback, args)
|
||||
|
||||
# Prevent a race condition in case the child is already terminated.
|
||||
self._do_waitpid(pid)
|
||||
@@ -792,7 +868,9 @@ class SafeChildWatcher(BaseChildWatcher):
|
||||
except KeyError: # pragma: no cover
|
||||
# May happen if .remove_child_handler() is called
|
||||
# after os.waitpid() returns.
|
||||
pass
|
||||
if self._loop.get_debug():
|
||||
logger.warning("Child watcher got an unexpected pid: %r",
|
||||
pid, exc_info=True)
|
||||
else:
|
||||
callback(pid, returncode, *args)
|
||||
|
||||
@@ -903,7 +981,7 @@ class FastChildWatcher(BaseChildWatcher):
|
||||
|
||||
|
||||
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
||||
"""XXX"""
|
||||
"""UNIX event loop policy with a watcher for child processes."""
|
||||
_loop_factory = _UnixSelectorEventLoop
|
||||
|
||||
def __init__(self):
|
||||
|
||||
@@ -17,7 +17,7 @@ from . import windows_utils
|
||||
from . import _overlapped
|
||||
from .coroutines import coroutine, From, Return
|
||||
from .log import logger
|
||||
from .py33_exceptions import wrap_error, get_error_class, ConnectionRefusedError
|
||||
from .py33_exceptions import wrap_error, BrokenPipeError, ConnectionResetError
|
||||
|
||||
|
||||
__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
|
||||
@@ -30,6 +30,13 @@ INFINITE = 0xffffffff
|
||||
ERROR_CONNECTION_REFUSED = 1225
|
||||
ERROR_CONNECTION_ABORTED = 1236
|
||||
|
||||
# Initial delay in seconds for connect_pipe() before retrying to connect
|
||||
CONNECT_PIPE_INIT_DELAY = 0.001
|
||||
|
||||
# Maximum delay in seconds for connect_pipe() before retrying to connect
|
||||
CONNECT_PIPE_MAX_DELAY = 0.100
|
||||
|
||||
|
||||
class _OverlappedFuture(futures.Future):
|
||||
"""Subclass of Future which represents an overlapped operation.
|
||||
|
||||
@@ -78,63 +85,151 @@ class _OverlappedFuture(futures.Future):
|
||||
self._ov = None
|
||||
|
||||
|
||||
class _WaitHandleFuture(futures.Future):
|
||||
class _BaseWaitHandleFuture(futures.Future):
|
||||
"""Subclass of Future which represents a wait handle."""
|
||||
|
||||
def __init__(self, iocp, ov, handle, wait_handle, loop=None):
|
||||
super(_WaitHandleFuture, self).__init__(loop=loop)
|
||||
def __init__(self, ov, handle, wait_handle, loop=None):
|
||||
super(_BaseWaitHandleFuture, self).__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
# iocp and ov are only used by cancel() to notify IocpProactor
|
||||
# that the wait was cancelled
|
||||
self._iocp = iocp
|
||||
# Keep a reference to the Overlapped object to keep it alive until the
|
||||
# wait is unregistered
|
||||
self._ov = ov
|
||||
self._handle = handle
|
||||
self._wait_handle = wait_handle
|
||||
|
||||
# Should we call UnregisterWaitEx() if the wait completes
|
||||
# or is cancelled?
|
||||
self._registered = True
|
||||
|
||||
def _poll(self):
|
||||
# non-blocking wait: use a timeout of 0 millisecond
|
||||
return (_winapi.WaitForSingleObject(self._handle, 0) ==
|
||||
_winapi.WAIT_OBJECT_0)
|
||||
|
||||
def _repr_info(self):
|
||||
info = super(_WaitHandleFuture, self)._repr_info()
|
||||
info.insert(1, 'handle=%#x' % self._handle)
|
||||
if self._wait_handle:
|
||||
info = super(_BaseWaitHandleFuture, self)._repr_info()
|
||||
info.append('handle=%#x' % self._handle)
|
||||
if self._handle is not None:
|
||||
state = 'signaled' if self._poll() else 'waiting'
|
||||
info.insert(1, 'wait_handle=<%s, %#x>'
|
||||
% (state, self._wait_handle))
|
||||
info.append(state)
|
||||
if self._wait_handle is not None:
|
||||
info.append('wait_handle=%#x' % self._wait_handle)
|
||||
return info
|
||||
|
||||
def _unregister_wait(self):
|
||||
if self._wait_handle is None:
|
||||
return
|
||||
try:
|
||||
_overlapped.UnregisterWait(self._wait_handle)
|
||||
except WindowsError as e:
|
||||
if e.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
raise
|
||||
# ERROR_IO_PENDING is not an error, the wait was unregistered
|
||||
self._wait_handle = None
|
||||
self._iocp = None
|
||||
def _unregister_wait_cb(self, fut):
|
||||
# The wait was unregistered: it's not safe to destroy the Overlapped
|
||||
# object
|
||||
self._ov = None
|
||||
|
||||
def _unregister_wait(self):
|
||||
if not self._registered:
|
||||
return
|
||||
self._registered = False
|
||||
|
||||
wait_handle = self._wait_handle
|
||||
self._wait_handle = None
|
||||
try:
|
||||
_overlapped.UnregisterWait(wait_handle)
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
return
|
||||
# ERROR_IO_PENDING means that the unregister is pending
|
||||
|
||||
self._unregister_wait_cb(None)
|
||||
|
||||
def cancel(self):
|
||||
result = super(_WaitHandleFuture, self).cancel()
|
||||
if self._ov is not None:
|
||||
# signal the cancellation to the overlapped object
|
||||
_overlapped.PostQueuedCompletionStatus(self._iocp, True,
|
||||
0, self._ov.address)
|
||||
self._unregister_wait()
|
||||
return result
|
||||
return super(_BaseWaitHandleFuture, self).cancel()
|
||||
|
||||
def set_exception(self, exception):
|
||||
super(_WaitHandleFuture, self).set_exception(exception)
|
||||
self._unregister_wait()
|
||||
super(_BaseWaitHandleFuture, self).set_exception(exception)
|
||||
|
||||
def set_result(self, result):
|
||||
super(_WaitHandleFuture, self).set_result(result)
|
||||
self._unregister_wait()
|
||||
super(_BaseWaitHandleFuture, self).set_result(result)
|
||||
|
||||
|
||||
class _WaitCancelFuture(_BaseWaitHandleFuture):
|
||||
"""Subclass of Future which represents a wait for the cancellation of a
|
||||
_WaitHandleFuture using an event.
|
||||
"""
|
||||
|
||||
def __init__(self, ov, event, wait_handle, loop=None):
|
||||
super(_WaitCancelFuture, self).__init__(ov, event, wait_handle,
|
||||
loop=loop)
|
||||
|
||||
self._done_callback = None
|
||||
|
||||
def cancel(self):
|
||||
raise RuntimeError("_WaitCancelFuture must not be cancelled")
|
||||
|
||||
def _schedule_callbacks(self):
|
||||
super(_WaitCancelFuture, self)._schedule_callbacks()
|
||||
if self._done_callback is not None:
|
||||
self._done_callback(self)
|
||||
|
||||
|
||||
class _WaitHandleFuture(_BaseWaitHandleFuture):
|
||||
def __init__(self, ov, handle, wait_handle, proactor, loop=None):
|
||||
super(_WaitHandleFuture, self).__init__(ov, handle, wait_handle,
|
||||
loop=loop)
|
||||
self._proactor = proactor
|
||||
self._unregister_proactor = True
|
||||
self._event = _overlapped.CreateEvent(None, True, False, None)
|
||||
self._event_fut = None
|
||||
|
||||
def _unregister_wait_cb(self, fut):
|
||||
if self._event is not None:
|
||||
_winapi.CloseHandle(self._event)
|
||||
self._event = None
|
||||
self._event_fut = None
|
||||
|
||||
# If the wait was cancelled, the wait may never be signalled, so
|
||||
# it's required to unregister it. Otherwise, IocpProactor.close() will
|
||||
# wait forever for an event which will never come.
|
||||
#
|
||||
# If the IocpProactor already received the event, it's safe to call
|
||||
# _unregister() because we kept a reference to the Overlapped object
|
||||
# which is used as an unique key.
|
||||
self._proactor._unregister(self._ov)
|
||||
self._proactor = None
|
||||
|
||||
super(_WaitHandleFuture, self)._unregister_wait_cb(fut)
|
||||
|
||||
def _unregister_wait(self):
|
||||
if not self._registered:
|
||||
return
|
||||
self._registered = False
|
||||
|
||||
wait_handle = self._wait_handle
|
||||
self._wait_handle = None
|
||||
try:
|
||||
_overlapped.UnregisterWaitEx(wait_handle, self._event)
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
return
|
||||
# ERROR_IO_PENDING is not an error, the wait was unregistered
|
||||
|
||||
self._event_fut = self._proactor._wait_cancel(self._event,
|
||||
self._unregister_wait_cb)
|
||||
|
||||
|
||||
class PipeServer(object):
|
||||
@@ -145,6 +240,11 @@ class PipeServer(object):
|
||||
def __init__(self, address):
|
||||
self._address = address
|
||||
self._free_instances = weakref.WeakSet()
|
||||
# initialize the pipe attribute before calling _server_pipe_handle()
|
||||
# because this function can raise an exception and the destructor calls
|
||||
# the close() method
|
||||
self._pipe = None
|
||||
self._accept_pipe_future = None
|
||||
self._pipe = self._server_pipe_handle(True)
|
||||
|
||||
def _get_unconnected_pipe(self):
|
||||
@@ -157,7 +257,7 @@ class PipeServer(object):
|
||||
|
||||
def _server_pipe_handle(self, first):
|
||||
# Return a wrapper for a new pipe handle.
|
||||
if self._address is None:
|
||||
if self.closed():
|
||||
return None
|
||||
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
|
||||
if first:
|
||||
@@ -173,7 +273,13 @@ class PipeServer(object):
|
||||
self._free_instances.add(pipe)
|
||||
return pipe
|
||||
|
||||
def closed(self):
|
||||
return (self._address is None)
|
||||
|
||||
def close(self):
|
||||
if self._accept_pipe_future is not None:
|
||||
self._accept_pipe_future.cancel()
|
||||
self._accept_pipe_future = None
|
||||
# Close all instances which have not been connected to by a client.
|
||||
if self._address is not None:
|
||||
for pipe in self._free_instances:
|
||||
@@ -216,18 +322,27 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
|
||||
def start_serving_pipe(self, protocol_factory, address):
|
||||
server = PipeServer(address)
|
||||
|
||||
def loop(f=None):
|
||||
def loop_accept_pipe(f=None):
|
||||
pipe = None
|
||||
try:
|
||||
if f:
|
||||
pipe = f.result()
|
||||
server._free_instances.discard(pipe)
|
||||
|
||||
if server.closed():
|
||||
# A client connected before the server was closed:
|
||||
# drop the client (close the pipe) and exit
|
||||
pipe.close()
|
||||
return
|
||||
|
||||
protocol = protocol_factory()
|
||||
self._make_duplex_pipe_transport(
|
||||
pipe, protocol, extra={'addr': address})
|
||||
|
||||
pipe = server._get_unconnected_pipe()
|
||||
if pipe is None:
|
||||
return
|
||||
|
||||
f = self._proactor.accept_pipe(pipe)
|
||||
except OSError as exc:
|
||||
if pipe and pipe.fileno() != -1:
|
||||
@@ -237,23 +352,42 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
|
||||
'pipe': pipe,
|
||||
})
|
||||
pipe.close()
|
||||
elif self._debug:
|
||||
logger.warning("Accept pipe failed on pipe %r",
|
||||
pipe, exc_info=True)
|
||||
except futures.CancelledError:
|
||||
if pipe:
|
||||
pipe.close()
|
||||
else:
|
||||
f.add_done_callback(loop)
|
||||
server._accept_pipe_future = f
|
||||
f.add_done_callback(loop_accept_pipe)
|
||||
|
||||
self.call_soon(loop)
|
||||
self.call_soon(loop_accept_pipe)
|
||||
return [server]
|
||||
|
||||
@coroutine
|
||||
def _make_subprocess_transport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
waiter = futures.Future(loop=self)
|
||||
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=extra, **kwargs)
|
||||
yield From(transp._post_init())
|
||||
waiter=waiter, extra=extra,
|
||||
**kwargs)
|
||||
try:
|
||||
yield From(waiter)
|
||||
except Exception as exc:
|
||||
# Workaround CPython bug #23353: using yield/yield-from in an
|
||||
# except block of a generator doesn't clear properly sys.exc_info()
|
||||
err = exc
|
||||
else:
|
||||
err = None
|
||||
|
||||
if err is not None:
|
||||
transp.close()
|
||||
yield From(transp._wait())
|
||||
raise err
|
||||
|
||||
raise Return(transp)
|
||||
|
||||
|
||||
@@ -267,6 +401,7 @@ class IocpProactor(object):
|
||||
_overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
|
||||
self._cache = {}
|
||||
self._registered = weakref.WeakSet()
|
||||
self._unregistered = []
|
||||
self._stopped_serving = weakref.WeakSet()
|
||||
|
||||
def __repr__(self):
|
||||
@@ -284,16 +419,30 @@ class IocpProactor(object):
|
||||
self._results = []
|
||||
return tmp
|
||||
|
||||
def _result(self, value):
|
||||
fut = futures.Future(loop=self._loop)
|
||||
fut.set_result(value)
|
||||
return fut
|
||||
|
||||
def recv(self, conn, nbytes, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
if isinstance(conn, socket.socket):
|
||||
wrap_error(ov.WSARecv, conn.fileno(), nbytes, flags)
|
||||
else:
|
||||
wrap_error(ov.ReadFile, conn.fileno(), nbytes)
|
||||
try:
|
||||
if isinstance(conn, socket.socket):
|
||||
wrap_error(ov.WSARecv, conn.fileno(), nbytes, flags)
|
||||
else:
|
||||
wrap_error(ov.ReadFile, conn.fileno(), nbytes)
|
||||
except BrokenPipeError:
|
||||
return self._result(b'')
|
||||
|
||||
def finish_recv(trans, key, ov):
|
||||
return wrap_error(ov.getresult)
|
||||
try:
|
||||
return wrap_error(ov.getresult)
|
||||
except WindowsError as exc:
|
||||
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_recv)
|
||||
|
||||
@@ -306,7 +455,13 @@ class IocpProactor(object):
|
||||
ov.WriteFile(conn.fileno(), buf)
|
||||
|
||||
def finish_send(trans, key, ov):
|
||||
return wrap_error(ov.getresult)
|
||||
try:
|
||||
return wrap_error(ov.getresult)
|
||||
except WindowsError as exc:
|
||||
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_send)
|
||||
|
||||
@@ -336,7 +491,7 @@ class IocpProactor(object):
|
||||
|
||||
future = self._register(ov, listener, finish_accept)
|
||||
coro = accept_coro(future, conn)
|
||||
tasks.async(coro, loop=self._loop)
|
||||
tasks.ensure_future(coro, loop=self._loop)
|
||||
return future
|
||||
|
||||
def connect(self, conn, address):
|
||||
@@ -365,41 +520,56 @@ class IocpProactor(object):
|
||||
def accept_pipe(self, pipe):
|
||||
self._register_with_iocp(pipe)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.ConnectNamedPipe(pipe.fileno())
|
||||
connected = ov.ConnectNamedPipe(pipe.fileno())
|
||||
|
||||
if connected:
|
||||
# ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
|
||||
# that the pipe is connected. There is no need to wait for the
|
||||
# completion of the connection.
|
||||
return self._result(pipe)
|
||||
|
||||
def finish_accept_pipe(trans, key, ov):
|
||||
wrap_error(ov.getresult)
|
||||
return pipe
|
||||
|
||||
# FIXME: Tulip issue 196: why to we neeed register=False?
|
||||
# See also the comment in the _register() method
|
||||
return self._register(ov, pipe, finish_accept_pipe,
|
||||
register=False)
|
||||
return self._register(ov, pipe, finish_accept_pipe)
|
||||
|
||||
@coroutine
|
||||
def connect_pipe(self, address):
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.WaitNamedPipeAndConnect(address, self._iocp, ov.address)
|
||||
delay = CONNECT_PIPE_INIT_DELAY
|
||||
while True:
|
||||
# Unfortunately there is no way to do an overlapped connect to a pipe.
|
||||
# Call CreateFile() in a loop until it doesn't fail with
|
||||
# ERROR_PIPE_BUSY
|
||||
try:
|
||||
handle = wrap_error(_overlapped.ConnectPipe, address)
|
||||
break
|
||||
except WindowsError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
|
||||
raise
|
||||
|
||||
def finish_connect_pipe(err, handle, ov):
|
||||
# err, handle were arguments passed to PostQueuedCompletionStatus()
|
||||
# in a function run in a thread pool.
|
||||
if err == _overlapped.ERROR_SEM_TIMEOUT:
|
||||
# Connection did not succeed within time limit.
|
||||
msg = _overlapped.FormatMessage(err)
|
||||
raise ConnectionRefusedError(0, msg, None, err)
|
||||
elif err != 0:
|
||||
msg = _overlapped.FormatMessage(err)
|
||||
err_cls = get_error_class(err, None)
|
||||
if err_cls is not None:
|
||||
raise err_cls(0, msg, None, err)
|
||||
else:
|
||||
raise WindowsError(err, msg)
|
||||
else:
|
||||
return windows_utils.PipeHandle(handle)
|
||||
# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
|
||||
delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
|
||||
yield From(tasks.sleep(delay, loop=self._loop))
|
||||
|
||||
return self._register(ov, None, finish_connect_pipe, wait_for_post=True)
|
||||
raise Return(windows_utils.PipeHandle(handle))
|
||||
|
||||
def wait_for_handle(self, handle, timeout=None):
|
||||
"""Wait for a handle.
|
||||
|
||||
Return a Future object. The result of the future is True if the wait
|
||||
completed, or False if the wait did not complete (on timeout).
|
||||
"""
|
||||
return self._wait_for_handle(handle, timeout, False)
|
||||
|
||||
def _wait_cancel(self, event, done_callback):
|
||||
fut = self._wait_for_handle(event, None, True)
|
||||
# add_done_callback() cannot be used because the wait may only complete
|
||||
# in IocpProactor.close(), while the event loop is not running.
|
||||
fut._done_callback = done_callback
|
||||
return fut
|
||||
|
||||
def _wait_for_handle(self, handle, timeout, _is_cancel):
|
||||
if timeout is None:
|
||||
ms = _winapi.INFINITE
|
||||
else:
|
||||
@@ -409,9 +579,13 @@ class IocpProactor(object):
|
||||
|
||||
# We only create ov so we can use ov.address as a key for the cache.
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
wh = _overlapped.RegisterWaitWithQueue(
|
||||
wait_handle = _overlapped.RegisterWaitWithQueue(
|
||||
handle, self._iocp, ov.address, ms)
|
||||
f = _WaitHandleFuture(self._iocp, ov, handle, wh, loop=self._loop)
|
||||
if _is_cancel:
|
||||
f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
|
||||
else:
|
||||
f = _WaitHandleFuture(ov, handle, wait_handle, self,
|
||||
loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
|
||||
@@ -424,14 +598,6 @@ class IocpProactor(object):
|
||||
# False even though we have not timed out.
|
||||
return f._poll()
|
||||
|
||||
if f._poll():
|
||||
try:
|
||||
result = f._poll()
|
||||
except OSError as exc:
|
||||
f.set_exception(exc)
|
||||
else:
|
||||
f.set_result(result)
|
||||
|
||||
self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
|
||||
return f
|
||||
|
||||
@@ -445,15 +611,14 @@ class IocpProactor(object):
|
||||
# to avoid sending notifications to completion port of ops
|
||||
# that succeed immediately.
|
||||
|
||||
def _register(self, ov, obj, callback,
|
||||
wait_for_post=False, register=True):
|
||||
def _register(self, ov, obj, callback):
|
||||
# Return a future which will be set with the result of the
|
||||
# operation when it completes. The future's value is actually
|
||||
# the value returned by callback().
|
||||
f = _OverlappedFuture(ov, loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
if not ov.pending and not wait_for_post:
|
||||
if not ov.pending:
|
||||
# The operation has completed, so no need to postpone the
|
||||
# work. We cannot take this short cut if we need the
|
||||
# NumberOfBytes, CompletionKey values returned by
|
||||
@@ -469,20 +634,22 @@ class IocpProactor(object):
|
||||
# Register the overlapped operation to keep a reference to the
|
||||
# OVERLAPPED object, otherwise the memory is freed and Windows may
|
||||
# read uninitialized memory.
|
||||
#
|
||||
# For an unknown reason, ConnectNamedPipe() behaves differently:
|
||||
# the completion is not notified by GetOverlappedResult() if we
|
||||
# already called GetOverlappedResult(). For this specific case, we
|
||||
# don't expect notification (register is set to False).
|
||||
else:
|
||||
register = True
|
||||
if register:
|
||||
# Register the overlapped operation for later. Note that
|
||||
# we only store obj to prevent it from being garbage
|
||||
# collected too early.
|
||||
self._cache[ov.address] = (f, ov, obj, callback)
|
||||
|
||||
# Register the overlapped operation for later. Note that
|
||||
# we only store obj to prevent it from being garbage
|
||||
# collected too early.
|
||||
self._cache[ov.address] = (f, ov, obj, callback)
|
||||
return f
|
||||
|
||||
def _unregister(self, ov):
|
||||
"""Unregister an overlapped object.
|
||||
|
||||
Call this method when its future has been cancelled. The event can
|
||||
already be signalled (pending in the proactor event queue). It is also
|
||||
safe if the event is never signalled (because it was cancelled).
|
||||
"""
|
||||
self._unregistered.append(ov)
|
||||
|
||||
def _get_accept_socket(self, family):
|
||||
s = socket.socket(family)
|
||||
s.settimeout(0)
|
||||
@@ -503,7 +670,7 @@ class IocpProactor(object):
|
||||
while True:
|
||||
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
|
||||
if status is None:
|
||||
return
|
||||
break
|
||||
ms = 0
|
||||
|
||||
err, transferred, key, address = status
|
||||
@@ -538,6 +705,11 @@ class IocpProactor(object):
|
||||
f.set_result(value)
|
||||
self._results.append(f)
|
||||
|
||||
# Remove unregisted futures
|
||||
for ov in self._unregistered:
|
||||
self._cache.pop(ov.address, None)
|
||||
del self._unregistered[:]
|
||||
|
||||
def _stop_serving(self, obj):
|
||||
# obj is a socket or pipe handle. It will be closed in
|
||||
# BaseProactorEventLoop._stop_serving() which will make any
|
||||
@@ -547,18 +719,16 @@ class IocpProactor(object):
|
||||
def close(self):
|
||||
# Cancel remaining registered operations.
|
||||
for address, (fut, ov, obj, callback) in list(self._cache.items()):
|
||||
if obj is None:
|
||||
# The operation was started with connect_pipe() which
|
||||
# queues a task to Windows' thread pool. This cannot
|
||||
# be cancelled, so just forget it.
|
||||
del self._cache[address]
|
||||
# FIXME: Tulip issue 196: remove this case, it should not happen
|
||||
elif fut.done() and not fut.cancelled():
|
||||
del self._cache[address]
|
||||
if fut.cancelled():
|
||||
# Nothing to do with cancelled futures
|
||||
pass
|
||||
elif isinstance(fut, _WaitCancelFuture):
|
||||
# _WaitCancelFuture must not be cancelled
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
fut.cancel()
|
||||
except WindowsError as exc:
|
||||
except OSError as exc:
|
||||
if self._loop is not None:
|
||||
context = {
|
||||
'message': 'Cancelling a future failed',
|
||||
|
||||
@@ -8,14 +8,16 @@ import sys
|
||||
if sys.platform != 'win32': # pragma: no cover
|
||||
raise ImportError('win32 only')
|
||||
|
||||
import socket
|
||||
import itertools
|
||||
import msvcrt
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import tempfile
|
||||
import warnings
|
||||
|
||||
from . import py33_winapi as _winapi
|
||||
from . import compat
|
||||
from .py33_exceptions import wrap_error, BlockingIOError, InterruptedError
|
||||
|
||||
|
||||
@@ -31,49 +33,52 @@ STDOUT = subprocess.STDOUT
|
||||
_mmap_counter = itertools.count()
|
||||
|
||||
|
||||
# Replacement for socket.socketpair()
|
||||
if hasattr(socket, 'socketpair'):
|
||||
# Since Python 3.5, socket.socketpair() is now also available on Windows
|
||||
socketpair = socket.socketpair
|
||||
else:
|
||||
# Replacement for socket.socketpair()
|
||||
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
|
||||
"""A socket pair usable as a self-pipe, for Windows.
|
||||
|
||||
Origin: https://gist.github.com/4325783, by Geert Jansen.
|
||||
Public domain.
|
||||
"""
|
||||
if family == socket.AF_INET:
|
||||
host = '127.0.0.1'
|
||||
elif family == socket.AF_INET6:
|
||||
host = '::1'
|
||||
else:
|
||||
raise ValueError("Only AF_INET and AF_INET6 socket address "
|
||||
"families are supported")
|
||||
if type != socket.SOCK_STREAM:
|
||||
raise ValueError("Only SOCK_STREAM socket type is supported")
|
||||
if proto != 0:
|
||||
raise ValueError("Only protocol zero is supported")
|
||||
|
||||
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
|
||||
"""A socket pair usable as a self-pipe, for Windows.
|
||||
|
||||
Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
|
||||
"""
|
||||
if family == socket.AF_INET:
|
||||
host = '127.0.0.1'
|
||||
elif family == socket.AF_INET6:
|
||||
host = '::1'
|
||||
else:
|
||||
raise ValueError("Ony AF_INET and AF_INET6 socket address families "
|
||||
"are supported")
|
||||
if type != socket.SOCK_STREAM:
|
||||
raise ValueError("Only SOCK_STREAM socket type is supported")
|
||||
if proto != 0:
|
||||
raise ValueError("Only protocol zero is supported")
|
||||
|
||||
# We create a connected TCP socket. Note the trick with setblocking(0)
|
||||
# that prevents us from having to create a thread.
|
||||
lsock = socket.socket(family, type, proto)
|
||||
try:
|
||||
lsock.bind((host, 0))
|
||||
lsock.listen(1)
|
||||
# On IPv6, ignore flow_info and scope_id
|
||||
addr, port = lsock.getsockname()[:2]
|
||||
csock = socket.socket(family, type, proto)
|
||||
# We create a connected TCP socket. Note the trick with setblocking(0)
|
||||
# that prevents us from having to create a thread.
|
||||
lsock = socket.socket(family, type, proto)
|
||||
try:
|
||||
csock.setblocking(False)
|
||||
lsock.bind((host, 0))
|
||||
lsock.listen(1)
|
||||
# On IPv6, ignore flow_info and scope_id
|
||||
addr, port = lsock.getsockname()[:2]
|
||||
csock = socket.socket(family, type, proto)
|
||||
try:
|
||||
wrap_error(csock.connect, (addr, port))
|
||||
except (BlockingIOError, InterruptedError):
|
||||
pass
|
||||
ssock, _ = lsock.accept()
|
||||
csock.setblocking(True)
|
||||
except:
|
||||
csock.close()
|
||||
raise
|
||||
finally:
|
||||
lsock.close()
|
||||
return (ssock, csock)
|
||||
csock.setblocking(False)
|
||||
try:
|
||||
wrap_error(csock.connect, (addr, port))
|
||||
except (BlockingIOError, InterruptedError):
|
||||
pass
|
||||
csock.setblocking(True)
|
||||
ssock, _ = lsock.accept()
|
||||
except:
|
||||
csock.close()
|
||||
raise
|
||||
finally:
|
||||
lsock.close()
|
||||
return (ssock, csock)
|
||||
|
||||
|
||||
# Replacement for os.pipe() using handles instead of fds
|
||||
@@ -113,7 +118,7 @@ def pipe(duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
|
||||
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
|
||||
flags_and_attribs, _winapi.NULL)
|
||||
|
||||
ov = _winapi.ConnectNamedPipe(h1, True)
|
||||
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
|
||||
if hasattr(ov, 'GetOverlappedResult'):
|
||||
# _winapi module of Python 3.3
|
||||
ov.GetOverlappedResult(True)
|
||||
@@ -140,19 +145,32 @@ class PipeHandle(object):
|
||||
def __init__(self, handle):
|
||||
self._handle = handle
|
||||
|
||||
def __repr__(self):
|
||||
if self._handle is not None:
|
||||
handle = 'handle=%r' % self._handle
|
||||
else:
|
||||
handle = 'closed'
|
||||
return '<%s %s>' % (self.__class__.__name__, handle)
|
||||
|
||||
@property
|
||||
def handle(self):
|
||||
return self._handle
|
||||
|
||||
def fileno(self):
|
||||
if self._handle is None:
|
||||
raise ValueError("I/O operatioon on closed pipe")
|
||||
return self._handle
|
||||
|
||||
def close(self, CloseHandle=_winapi.CloseHandle):
|
||||
if self._handle != -1:
|
||||
if self._handle is not None:
|
||||
CloseHandle(self._handle)
|
||||
self._handle = -1
|
||||
self._handle = None
|
||||
|
||||
__del__ = close
|
||||
def __del__(self):
|
||||
if self._handle is not None:
|
||||
if compat.PY3:
|
||||
warnings.warn("unclosed %r" % self, ResourceWarning)
|
||||
self.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
12
update-asyncio-step1.sh
Executable file
12
update-asyncio-step1.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
set -e -x
|
||||
git checkout trollius
|
||||
git pull -u
|
||||
git checkout master
|
||||
git pull https://github.com/python/asyncio.git
|
||||
|
||||
git checkout trollius
|
||||
# rename-threshold=25: a similarity of 25% is enough to consider two files
|
||||
# rename candidates
|
||||
git merge -X rename-threshold=25 master
|
||||
|
||||
echo "Now run ./update-tulip-step2.sh"
|
||||
36
update-asyncio-step2.sh
Executable file
36
update-asyncio-step2.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
set -e
|
||||
|
||||
# Check for merge conflicts
|
||||
if $(git status --porcelain|grep -q '^.U '); then
|
||||
echo "Fix the following conflicts:"
|
||||
git status
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure that yield from is not used
|
||||
if $(git diff|grep -q 'yield from'); then
|
||||
echo "yield from present in changed code!"
|
||||
git diff | grep 'yield from' -B5 -A3
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure that mock patchs trollius module, not asyncio
|
||||
if $(grep -q 'patch.*asyncio' tests/*.py); then
|
||||
echo "Fix following patch lines in tests/"
|
||||
grep 'patch.*asyncio' tests/*.py
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Python 2.6 compatibility
|
||||
if $(grep -q -E '\{[^0-9].*format' */*.py); then
|
||||
echo "Issues with Python 2.6 compatibility:"
|
||||
grep -E '\{[^0-9].*format' */*.py
|
||||
exit 1
|
||||
fi
|
||||
if $(grep -q -F 'super()' */*.py); then
|
||||
echo "Issues with Python 2.6 compatibility:"
|
||||
grep -F 'super()' */*.py
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Now run ./update-tulip-step3.sh"
|
||||
10
update-asyncio-step3.sh
Executable file
10
update-asyncio-step3.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
set -e -x
|
||||
./update-asyncio-step2.sh
|
||||
tox -e py27,py34
|
||||
|
||||
git status
|
||||
echo
|
||||
echo "Now type:"
|
||||
echo "git commit -m 'Merge asyncio into trollius'"
|
||||
echo
|
||||
echo "You may have to add unstaged files"
|
||||
Reference in New Issue
Block a user