Add run_aiotest.py
This commit is contained in:
4
.hgeol
Normal file
4
.hgeol
Normal file
@@ -0,0 +1,4 @@
|
||||
[patterns]
|
||||
** = native
|
||||
.hgignore = native
|
||||
.hgeol = native
|
||||
14
.hgignore
Normal file
14
.hgignore
Normal file
@@ -0,0 +1,14 @@
|
||||
.*\.py[co]$
|
||||
.*~$
|
||||
.*\.orig$
|
||||
.*\#.*$
|
||||
.*@.*$
|
||||
\.coverage$
|
||||
htmlcov$
|
||||
\.DS_Store$
|
||||
venv$
|
||||
distribute_setup.py$
|
||||
distribute-\d+.\d+.\d+.tar.gz$
|
||||
build$
|
||||
dist$
|
||||
.*\.egg-info$
|
||||
26
AUTHORS
Normal file
26
AUTHORS
Normal file
@@ -0,0 +1,26 @@
|
||||
A. Jesse Jiryu Davis <jesse AT mongodb.com>
|
||||
Aaron Griffith
|
||||
Andrew Svetlov <andrew.svetlov AT gmail.com>
|
||||
Anthony Baire
|
||||
Antoine Pitrou <solipsis AT pitrou.net>
|
||||
Arnaud Faure
|
||||
Aymeric Augustin
|
||||
Brett Cannon
|
||||
Charles-François Natali <cf.natali AT gmail.com>
|
||||
Christian Heimes
|
||||
Donald Stufft
|
||||
Eli Bendersky <eliben AT gmail.com>
|
||||
Geert Jansen <geertj AT gmail.com>
|
||||
Giampaolo Rodola' <g.rodola AT gmail.com>
|
||||
Guido van Rossum <guido AT python.org>: creator of the Tulip project and author of the PEP 3156
|
||||
Gustavo Carneiro <gjcarneiro AT gmail.com>
|
||||
Jeff Quast
|
||||
Jonathan Slenders <jonathan.slenders AT gmail.com>
|
||||
Nikolay Kim <fafhrd91 AT gmail.com>
|
||||
Richard Oudkerk <shibturn AT gmail.com>
|
||||
Saúl Ibarra Corretgé <saghul AT gmail.com>
|
||||
Serhiy Storchaka
|
||||
Vajrasky Kok
|
||||
Victor Stinner <victor.stinner AT gmail.com>
|
||||
Vladimir Kryachko
|
||||
Yury Selivanov <yselivanov AT gmail.com>
|
||||
201
COPYING
Normal file
201
COPYING
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
11
MANIFEST.in
Normal file
11
MANIFEST.in
Normal file
@@ -0,0 +1,11 @@
|
||||
include AUTHORS COPYING
|
||||
include Makefile
|
||||
include overlapped.c pypi.bat
|
||||
include check.py runtests.py
|
||||
include update_stdlib.sh
|
||||
|
||||
recursive-include examples *.py
|
||||
recursive-include tests *.crt
|
||||
recursive-include tests *.key
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.py
|
||||
60
Makefile
Normal file
60
Makefile
Normal file
@@ -0,0 +1,60 @@
|
||||
# Some simple testing tasks (sorry, UNIX only).
|
||||
|
||||
PYTHON=python3
|
||||
VERBOSE=$(V)
|
||||
V= 0
|
||||
FLAGS=
|
||||
|
||||
test:
|
||||
$(PYTHON) runtests.py -v $(VERBOSE) $(FLAGS)
|
||||
|
||||
vtest:
|
||||
$(PYTHON) runtests.py -v 1 $(FLAGS)
|
||||
|
||||
testloop:
|
||||
while sleep 1; do $(PYTHON) runtests.py -v $(VERBOSE) $(FLAGS); done
|
||||
|
||||
# See runtests.py for coverage installation instructions.
|
||||
cov coverage:
|
||||
$(PYTHON) runtests.py --coverage -v $(VERBOSE) $(FLAGS)
|
||||
|
||||
check:
|
||||
$(PYTHON) check.py
|
||||
|
||||
# Requires "pip install pep8".
|
||||
pep8: check
|
||||
pep8 --ignore E125,E127,E226 tests asyncio
|
||||
|
||||
clean:
|
||||
rm -rf `find . -name __pycache__`
|
||||
rm -f `find . -type f -name '*.py[co]' `
|
||||
rm -f `find . -type f -name '*~' `
|
||||
rm -f `find . -type f -name '.*~' `
|
||||
rm -f `find . -type f -name '@*' `
|
||||
rm -f `find . -type f -name '#*#' `
|
||||
rm -f `find . -type f -name '*.orig' `
|
||||
rm -f `find . -type f -name '*.rej' `
|
||||
rm -rf dist
|
||||
rm -f .coverage
|
||||
rm -rf htmlcov
|
||||
rm -rf build
|
||||
rm -rf asyncio.egg-info
|
||||
rm -f MANIFEST
|
||||
|
||||
|
||||
# For distribution builders only!
|
||||
# Push a source distribution for Python 3.3 to PyPI.
|
||||
# You must update the version in setup.py first.
|
||||
# A PyPI user configuration in ~/.pypirc is required;
|
||||
# you can create a suitable confifuration using
|
||||
# python setup.py register
|
||||
pypi: clean
|
||||
python3.3 setup.py sdist upload
|
||||
|
||||
# The corresponding action on Windows is pypi.bat. For that to work,
|
||||
# you need to install wheel and setuptools. The easiest way is to get
|
||||
# pip using the get-pip.py script found here:
|
||||
# https://pip.pypa.io/en/latest/installing.html#install-pip
|
||||
# That will install setuptools and pip; then you can just do
|
||||
# \Python33\python.exe -m pip install wheel
|
||||
# after which the pypi.bat script should work.
|
||||
44
README
Normal file
44
README
Normal file
@@ -0,0 +1,44 @@
|
||||
Tulip is the codename for my reference implementation of PEP 3156.
|
||||
|
||||
PEP 3156: http://www.python.org/dev/peps/pep-3156/
|
||||
|
||||
*** This requires Python 3.3 or later! ***
|
||||
|
||||
Copyright/license: Open source, Apache 2.0. Enjoy.
|
||||
|
||||
Master Mercurial repo: http://code.google.com/p/tulip/
|
||||
|
||||
The actual code lives in the 'asyncio' subdirectory.
|
||||
Tests are in the 'tests' subdirectory.
|
||||
|
||||
To run tests:
|
||||
- make test
|
||||
|
||||
To run coverage (coverage package is required):
|
||||
- make coverage
|
||||
|
||||
On Windows, things are a little more complicated. Assume 'P' is your
|
||||
Python binary (for example C:\Python33\python.exe).
|
||||
|
||||
You must first build the _overlapped.pyd extension and have it placed
|
||||
in the asyncio directory, as follows:
|
||||
|
||||
C> P setup.py build_ext --inplace
|
||||
|
||||
If this complains about vcvars.bat, you probably don't have the
|
||||
required version of Visual Studio installed. Compiling extensions for
|
||||
Python 3.3 requires Microsoft Visual C++ 2010 (MSVC 10.0) of any
|
||||
edition; you can download Visual Studio Express 2010 for free from
|
||||
http://www.visualstudio.com/downloads (scroll down to Visual C++ 2010
|
||||
Express).
|
||||
|
||||
Once you have built the _overlapped.pyd extension successfully you can
|
||||
run the tests as follows:
|
||||
|
||||
C> P runtests.py
|
||||
|
||||
And coverage as follows:
|
||||
|
||||
C> P runtests.py --coverage
|
||||
|
||||
--Guido van Rossum <guido@python.org>
|
||||
48
asyncio/__init__.py
Normal file
48
asyncio/__init__.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""The asyncio package, tracking PEP 3156."""
|
||||
|
||||
import sys
|
||||
|
||||
# The selectors module is in the stdlib in Python 3.4 but not in 3.3.
|
||||
# Do this first, so the other submodules can use "from . import selectors".
|
||||
# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer.
|
||||
try:
|
||||
from . import selectors
|
||||
except ImportError:
|
||||
import selectors # Will also be exported.
|
||||
|
||||
if sys.platform == 'win32':
|
||||
# Similar thing for _overlapped.
|
||||
try:
|
||||
from . import _overlapped
|
||||
except ImportError:
|
||||
import _overlapped # Will also be exported.
|
||||
|
||||
# This relies on each of the submodules having an __all__ variable.
|
||||
from .coroutines import *
|
||||
from .events import *
|
||||
from .futures import *
|
||||
from .locks import *
|
||||
from .protocols import *
|
||||
from .queues import *
|
||||
from .streams import *
|
||||
from .subprocess import *
|
||||
from .tasks import *
|
||||
from .transports import *
|
||||
|
||||
__all__ = (coroutines.__all__ +
|
||||
events.__all__ +
|
||||
futures.__all__ +
|
||||
locks.__all__ +
|
||||
protocols.__all__ +
|
||||
queues.__all__ +
|
||||
streams.__all__ +
|
||||
subprocess.__all__ +
|
||||
tasks.__all__ +
|
||||
transports.__all__)
|
||||
|
||||
if sys.platform == 'win32': # pragma: no cover
|
||||
from .windows_events import *
|
||||
__all__ += windows_events.__all__
|
||||
else:
|
||||
from .unix_events import * # pragma: no cover
|
||||
__all__ += unix_events.__all__
|
||||
1113
asyncio/base_events.py
Normal file
1113
asyncio/base_events.py
Normal file
File diff suppressed because it is too large
Load Diff
197
asyncio/base_subprocess.py
Normal file
197
asyncio/base_subprocess.py
Normal file
@@ -0,0 +1,197 @@
|
||||
import collections
|
||||
import subprocess
|
||||
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
|
||||
def __init__(self, loop, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
super().__init__(extra)
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self._pid = None
|
||||
|
||||
self._pipes = {}
|
||||
if stdin == subprocess.PIPE:
|
||||
self._pipes[0] = None
|
||||
if stdout == subprocess.PIPE:
|
||||
self._pipes[1] = None
|
||||
if stderr == subprocess.PIPE:
|
||||
self._pipes[2] = None
|
||||
self._pending_calls = collections.deque()
|
||||
self._finished = False
|
||||
self._returncode = None
|
||||
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, bufsize=bufsize, **kwargs)
|
||||
self._pid = self._proc.pid
|
||||
self._extra['subprocess'] = self._proc
|
||||
if self._loop.get_debug():
|
||||
if isinstance(args, (bytes, str)):
|
||||
program = args
|
||||
else:
|
||||
program = args[0]
|
||||
logger.debug('process %r created: pid %s',
|
||||
program, self._pid)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'pid=%s' % self._pid]
|
||||
if self._returncode is not None:
|
||||
info.append('returncode=%s' % self._returncode)
|
||||
|
||||
stdin = self._pipes.get(0)
|
||||
if stdin is not None:
|
||||
info.append('stdin=%s' % stdin.pipe)
|
||||
|
||||
stdout = self._pipes.get(1)
|
||||
stderr = self._pipes.get(2)
|
||||
if stdout is not None and stderr is stdout:
|
||||
info.append('stdout=stderr=%s' % stdout.pipe)
|
||||
else:
|
||||
if stdout is not None:
|
||||
info.append('stdout=%s' % stdout.pipe)
|
||||
if stderr is not None:
|
||||
info.append('stderr=%s' % stderr.pipe)
|
||||
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def _make_write_subprocess_pipe_proto(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
def _make_read_subprocess_pipe_proto(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
for proto in self._pipes.values():
|
||||
proto.pipe.close()
|
||||
if self._returncode is None:
|
||||
self.terminate()
|
||||
|
||||
def get_pid(self):
|
||||
return self._pid
|
||||
|
||||
def get_returncode(self):
|
||||
return self._returncode
|
||||
|
||||
def get_pipe_transport(self, fd):
|
||||
if fd in self._pipes:
|
||||
return self._pipes[fd].pipe
|
||||
else:
|
||||
return None
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._proc.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._proc.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._proc.kill()
|
||||
|
||||
@coroutine
|
||||
def _post_init(self):
|
||||
proc = self._proc
|
||||
loop = self._loop
|
||||
if proc.stdin is not None:
|
||||
_, pipe = yield from loop.connect_write_pipe(
|
||||
lambda: WriteSubprocessPipeProto(self, 0),
|
||||
proc.stdin)
|
||||
self._pipes[0] = pipe
|
||||
if proc.stdout is not None:
|
||||
_, pipe = yield from loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 1),
|
||||
proc.stdout)
|
||||
self._pipes[1] = pipe
|
||||
if proc.stderr is not None:
|
||||
_, pipe = yield from loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 2),
|
||||
proc.stderr)
|
||||
self._pipes[2] = pipe
|
||||
|
||||
assert self._pending_calls is not None
|
||||
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
for callback, data in self._pending_calls:
|
||||
self._loop.call_soon(callback, *data)
|
||||
self._pending_calls = None
|
||||
|
||||
def _call(self, cb, *data):
|
||||
if self._pending_calls is not None:
|
||||
self._pending_calls.append((cb, data))
|
||||
else:
|
||||
self._loop.call_soon(cb, *data)
|
||||
|
||||
def _pipe_connection_lost(self, fd, exc):
|
||||
self._call(self._protocol.pipe_connection_lost, fd, exc)
|
||||
self._try_finish()
|
||||
|
||||
def _pipe_data_received(self, fd, data):
|
||||
self._call(self._protocol.pipe_data_received, fd, data)
|
||||
|
||||
def _process_exited(self, returncode):
|
||||
assert returncode is not None, returncode
|
||||
assert self._returncode is None, self._returncode
|
||||
if self._loop.get_debug():
|
||||
logger.info('%r exited with return code %r',
|
||||
self, returncode)
|
||||
self._returncode = returncode
|
||||
self._call(self._protocol.process_exited)
|
||||
self._try_finish()
|
||||
|
||||
def _try_finish(self):
|
||||
assert not self._finished
|
||||
if self._returncode is None:
|
||||
return
|
||||
if all(p is not None and p.disconnected
|
||||
for p in self._pipes.values()):
|
||||
self._finished = True
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
self._proc = None
|
||||
self._protocol = None
|
||||
self._loop = None
|
||||
|
||||
|
||||
class WriteSubprocessPipeProto(protocols.BaseProtocol):
|
||||
|
||||
def __init__(self, proc, fd):
|
||||
self.proc = proc
|
||||
self.fd = fd
|
||||
self.pipe = None
|
||||
self.disconnected = False
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.pipe = transport
|
||||
|
||||
def __repr__(self):
|
||||
return ('<%s fd=%s pipe=%r>'
|
||||
% (self.__class__.__name__, self.fd, self.pipe))
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self.disconnected = True
|
||||
self.proc._pipe_connection_lost(self.fd, exc)
|
||||
|
||||
def pause_writing(self):
|
||||
self.proc._protocol.pause_writing()
|
||||
|
||||
def resume_writing(self):
|
||||
self.proc._protocol.resume_writing()
|
||||
|
||||
|
||||
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
|
||||
protocols.Protocol):
|
||||
|
||||
def data_received(self, data):
|
||||
self.proc._pipe_data_received(self.fd, data)
|
||||
7
asyncio/constants.py
Normal file
7
asyncio/constants.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Constants."""
|
||||
|
||||
# After the connection is lost, log warnings after this many write()s.
|
||||
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
|
||||
|
||||
# Seconds to wait before retrying accept().
|
||||
ACCEPT_RETRY_DELAY = 1
|
||||
195
asyncio/coroutines.py
Normal file
195
asyncio/coroutines.py
Normal file
@@ -0,0 +1,195 @@
|
||||
__all__ = ['coroutine',
|
||||
'iscoroutinefunction', 'iscoroutine']
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import opcode
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
|
||||
from . import events
|
||||
from . import futures
|
||||
from .log import logger
|
||||
|
||||
|
||||
# Opcode of "yield from" instruction
|
||||
_YIELD_FROM = opcode.opmap['YIELD_FROM']
|
||||
|
||||
# If you set _DEBUG to true, @coroutine will wrap the resulting
|
||||
# generator objects in a CoroWrapper instance (defined below). That
|
||||
# instance will log a message when the generator is never iterated
|
||||
# over, which may happen when you forget to use "yield from" with a
|
||||
# coroutine call. Note that the value of the _DEBUG flag is taken
|
||||
# when the decorator is used, so to be of any use it must be set
|
||||
# before you define your coroutines. A downside of using this feature
|
||||
# is that tracebacks show entries for the CoroWrapper.__next__ method
|
||||
# when _DEBUG is true.
|
||||
_DEBUG = (not sys.flags.ignore_environment
|
||||
and bool(os.environ.get('PYTHONASYNCIODEBUG')))
|
||||
|
||||
|
||||
# Check for CPython issue #21209
|
||||
def has_yield_from_bug():
|
||||
class MyGen:
|
||||
def __init__(self):
|
||||
self.send_args = None
|
||||
def __iter__(self):
|
||||
return self
|
||||
def __next__(self):
|
||||
return 42
|
||||
def send(self, *what):
|
||||
self.send_args = what
|
||||
return None
|
||||
def yield_from_gen(gen):
|
||||
yield from gen
|
||||
value = (1, 2, 3)
|
||||
gen = MyGen()
|
||||
coro = yield_from_gen(gen)
|
||||
next(coro)
|
||||
coro.send(value)
|
||||
return gen.send_args != (value,)
|
||||
_YIELD_FROM_BUG = has_yield_from_bug()
|
||||
del has_yield_from_bug
|
||||
|
||||
|
||||
class CoroWrapper:
|
||||
# Wrapper for coroutine object in _DEBUG mode.
|
||||
|
||||
def __init__(self, gen, func):
|
||||
assert inspect.isgenerator(gen), gen
|
||||
self.gen = gen
|
||||
self.func = func
|
||||
self._source_traceback = traceback.extract_stack(sys._getframe(1))
|
||||
# __name__, __qualname__, __doc__ attributes are set by the coroutine()
|
||||
# decorator
|
||||
|
||||
def __repr__(self):
|
||||
coro_repr = _format_coroutine(self)
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
|
||||
return '<%s %s>' % (self.__class__.__name__, coro_repr)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
return next(self.gen)
|
||||
|
||||
if _YIELD_FROM_BUG:
|
||||
# For for CPython issue #21209: using "yield from" and a custom
|
||||
# generator, generator.send(tuple) unpacks the tuple instead of passing
|
||||
# the tuple unchanged. Check if the caller is a generator using "yield
|
||||
# from" to decide if the parameter should be unpacked or not.
|
||||
def send(self, *value):
|
||||
frame = sys._getframe()
|
||||
caller = frame.f_back
|
||||
assert caller.f_lasti >= 0
|
||||
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
|
||||
value = value[0]
|
||||
return self.gen.send(value)
|
||||
else:
|
||||
def send(self, value):
|
||||
return self.gen.send(value)
|
||||
|
||||
def throw(self, exc):
|
||||
return self.gen.throw(exc)
|
||||
|
||||
def close(self):
|
||||
return self.gen.close()
|
||||
|
||||
@property
|
||||
def gi_frame(self):
|
||||
return self.gen.gi_frame
|
||||
|
||||
@property
|
||||
def gi_running(self):
|
||||
return self.gen.gi_running
|
||||
|
||||
@property
|
||||
def gi_code(self):
|
||||
return self.gen.gi_code
|
||||
|
||||
def __del__(self):
|
||||
# Be careful accessing self.gen.frame -- self.gen might not exist.
|
||||
gen = getattr(self, 'gen', None)
|
||||
frame = getattr(gen, 'gi_frame', None)
|
||||
if frame is not None and frame.f_lasti == -1:
|
||||
msg = '%r was never yielded from' % self
|
||||
tb = getattr(self, '_source_traceback', ())
|
||||
if tb:
|
||||
tb = ''.join(traceback.format_list(tb))
|
||||
msg += ('\nCoroutine object created at '
|
||||
'(most recent call last):\n')
|
||||
msg += tb.rstrip()
|
||||
logger.error(msg)
|
||||
|
||||
|
||||
def coroutine(func):
|
||||
"""Decorator to mark coroutines.
|
||||
|
||||
If the coroutine is not yielded from before it is destroyed,
|
||||
an error message is logged.
|
||||
"""
|
||||
if inspect.isgeneratorfunction(func):
|
||||
coro = func
|
||||
else:
|
||||
@functools.wraps(func)
|
||||
def coro(*args, **kw):
|
||||
res = func(*args, **kw)
|
||||
if isinstance(res, futures.Future) or inspect.isgenerator(res):
|
||||
res = yield from res
|
||||
return res
|
||||
|
||||
if not _DEBUG:
|
||||
wrapper = coro
|
||||
else:
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwds):
|
||||
w = CoroWrapper(coro(*args, **kwds), func)
|
||||
if w._source_traceback:
|
||||
del w._source_traceback[-1]
|
||||
w.__name__ = func.__name__
|
||||
if hasattr(func, '__qualname__'):
|
||||
w.__qualname__ = func.__qualname__
|
||||
w.__doc__ = func.__doc__
|
||||
return w
|
||||
|
||||
wrapper._is_coroutine = True # For iscoroutinefunction().
|
||||
return wrapper
|
||||
|
||||
|
||||
def iscoroutinefunction(func):
|
||||
"""Return True if func is a decorated coroutine function."""
|
||||
return getattr(func, '_is_coroutine', False)
|
||||
|
||||
|
||||
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
|
||||
|
||||
def iscoroutine(obj):
|
||||
"""Return True if obj is a coroutine object."""
|
||||
return isinstance(obj, _COROUTINE_TYPES)
|
||||
|
||||
|
||||
def _format_coroutine(coro):
|
||||
assert iscoroutine(coro)
|
||||
coro_name = getattr(coro, '__qualname__', coro.__name__)
|
||||
|
||||
filename = coro.gi_code.co_filename
|
||||
if (isinstance(coro, CoroWrapper)
|
||||
and not inspect.isgeneratorfunction(coro.func)):
|
||||
filename, lineno = events._get_function_source(coro.func)
|
||||
if coro.gi_frame is None:
|
||||
coro_repr = '%s() done, defined at %s:%s' % (coro_name, filename, lineno)
|
||||
else:
|
||||
coro_repr = '%s() running, defined at %s:%s' % (coro_name, filename, lineno)
|
||||
elif coro.gi_frame is not None:
|
||||
lineno = coro.gi_frame.f_lineno
|
||||
coro_repr = '%s() running at %s:%s' % (coro_name, filename, lineno)
|
||||
else:
|
||||
lineno = coro.gi_code.co_firstlineno
|
||||
coro_repr = '%s() done, defined at %s:%s' % (coro_name, filename, lineno)
|
||||
|
||||
return coro_repr
|
||||
597
asyncio/events.py
Normal file
597
asyncio/events.py
Normal file
@@ -0,0 +1,597 @@
|
||||
"""Event loop and event loop policy."""
|
||||
|
||||
__all__ = ['AbstractEventLoopPolicy',
|
||||
'AbstractEventLoop', 'AbstractServer',
|
||||
'Handle', 'TimerHandle',
|
||||
'get_event_loop_policy', 'set_event_loop_policy',
|
||||
'get_event_loop', 'set_event_loop', 'new_event_loop',
|
||||
'get_child_watcher', 'set_child_watcher',
|
||||
]
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import reprlib
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
|
||||
_PY34 = sys.version_info >= (3, 4)
|
||||
|
||||
|
||||
def _get_function_source(func):
|
||||
if _PY34:
|
||||
func = inspect.unwrap(func)
|
||||
elif hasattr(func, '__wrapped__'):
|
||||
func = func.__wrapped__
|
||||
if inspect.isfunction(func):
|
||||
code = func.__code__
|
||||
return (code.co_filename, code.co_firstlineno)
|
||||
if isinstance(func, functools.partial):
|
||||
return _get_function_source(func.func)
|
||||
if _PY34 and isinstance(func, functools.partialmethod):
|
||||
return _get_function_source(func.func)
|
||||
return None
|
||||
|
||||
|
||||
def _format_args(args):
|
||||
"""Format function arguments.
|
||||
|
||||
Special case for a single parameter: ('hello',) is formatted as ('hello').
|
||||
"""
|
||||
# use reprlib to limit the length of the output
|
||||
args_repr = reprlib.repr(args)
|
||||
if len(args) == 1 and args_repr.endswith(',)'):
|
||||
args_repr = args_repr[:-2] + ')'
|
||||
return args_repr
|
||||
|
||||
|
||||
def _format_callback(func, args, suffix=''):
|
||||
if isinstance(func, functools.partial):
|
||||
if args is not None:
|
||||
suffix = _format_args(args) + suffix
|
||||
return _format_callback(func.func, func.args, suffix)
|
||||
|
||||
func_repr = getattr(func, '__qualname__', None)
|
||||
if not func_repr:
|
||||
func_repr = repr(func)
|
||||
|
||||
if args is not None:
|
||||
func_repr += _format_args(args)
|
||||
if suffix:
|
||||
func_repr += suffix
|
||||
|
||||
source = _get_function_source(func)
|
||||
if source:
|
||||
func_repr += ' at %s:%s' % source
|
||||
return func_repr
|
||||
|
||||
|
||||
class Handle:
|
||||
"""Object returned by callback registration methods."""
|
||||
|
||||
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
|
||||
'_source_traceback', '_repr', '__weakref__')
|
||||
|
||||
def __init__(self, callback, args, loop):
|
||||
assert not isinstance(callback, Handle), 'A Handle is not a callback'
|
||||
self._loop = loop
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._cancelled = False
|
||||
self._repr = None
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = traceback.extract_stack(sys._getframe(1))
|
||||
else:
|
||||
self._source_traceback = None
|
||||
|
||||
def _repr_info(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._cancelled:
|
||||
info.append('cancelled')
|
||||
if self._callback is not None:
|
||||
info.append(_format_callback(self._callback, self._args))
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
info.append('created at %s:%s' % (frame[0], frame[1]))
|
||||
return info
|
||||
|
||||
def __repr__(self):
|
||||
if self._repr is not None:
|
||||
return self._repr
|
||||
info = self._repr_info()
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._cancelled = True
|
||||
if self._loop.get_debug():
|
||||
# Keep a representation in debug mode to keep callback and
|
||||
# parameters. For example, to log the warning
|
||||
# "Executing <Handle...> took 2.5 second"
|
||||
self._repr = repr(self)
|
||||
self._callback = None
|
||||
self._args = None
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
self._callback(*self._args)
|
||||
except Exception as exc:
|
||||
cb = _format_callback(self._callback, self._args)
|
||||
msg = 'Exception in callback {}'.format(cb)
|
||||
context = {
|
||||
'message': msg,
|
||||
'exception': exc,
|
||||
'handle': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
|
||||
class TimerHandle(Handle):
|
||||
"""Object returned by timed callback registration methods."""
|
||||
|
||||
__slots__ = ['_scheduled', '_when']
|
||||
|
||||
def __init__(self, when, callback, args, loop):
|
||||
assert when is not None
|
||||
super().__init__(callback, args, loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._when = when
|
||||
self._scheduled = False
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
pos = 2 if self._cancelled else 1
|
||||
info.insert(pos, 'when=%s' % self._when)
|
||||
return info
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._when)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._when < other._when
|
||||
|
||||
def __le__(self, other):
|
||||
if self._when < other._when:
|
||||
return True
|
||||
return self.__eq__(other)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self._when > other._when
|
||||
|
||||
def __ge__(self, other):
|
||||
if self._when > other._when:
|
||||
return True
|
||||
return self.__eq__(other)
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return (self._when == other._when and
|
||||
self._callback == other._callback and
|
||||
self._args == other._args and
|
||||
self._cancelled == other._cancelled)
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other):
|
||||
equal = self.__eq__(other)
|
||||
return NotImplemented if equal is NotImplemented else not equal
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._loop._timer_handle_cancelled(self)
|
||||
super().cancel()
|
||||
|
||||
|
||||
class AbstractServer:
|
||||
"""Abstract server returned by create_server()."""
|
||||
|
||||
def close(self):
|
||||
"""Stop serving. This leaves existing connections open."""
|
||||
return NotImplemented
|
||||
|
||||
def wait_closed(self):
|
||||
"""Coroutine to wait until service is closed."""
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class AbstractEventLoop:
|
||||
"""Abstract event loop."""
|
||||
|
||||
# Running and stopping the event loop.
|
||||
|
||||
def run_forever(self):
|
||||
"""Run the event loop until stop() is called."""
|
||||
raise NotImplementedError
|
||||
|
||||
def run_until_complete(self, future):
|
||||
"""Run the event loop until a Future is done.
|
||||
|
||||
Return the Future's result, or raise its exception.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def stop(self):
|
||||
"""Stop the event loop as soon as reasonable.
|
||||
|
||||
Exactly how soon that is may depend on the implementation, but
|
||||
no more I/O callbacks should be scheduled.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_running(self):
|
||||
"""Return whether the event loop is currently running."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_closed(self):
|
||||
"""Returns True if the event loop was closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close the loop.
|
||||
|
||||
The loop should not be running.
|
||||
|
||||
This is idempotent and irreversible.
|
||||
|
||||
No other methods should be called after this one.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# Methods scheduling callbacks. All these return Handles.
|
||||
|
||||
def _timer_handle_cancelled(self, handle):
|
||||
"""Notification that a TimerHandle has been cancelled."""
|
||||
raise NotImplementedError
|
||||
|
||||
def call_soon(self, callback, *args):
|
||||
return self.call_later(0, callback, *args)
|
||||
|
||||
def call_later(self, delay, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_at(self, when, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def time(self):
|
||||
raise NotImplementedError
|
||||
|
||||
# Method scheduling a coroutine object: create a task.
|
||||
|
||||
def create_task(self, coro):
|
||||
raise NotImplementedError
|
||||
|
||||
# Methods for interacting with threads.
|
||||
|
||||
def call_soon_threadsafe(self, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def run_in_executor(self, executor, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_default_executor(self, executor):
|
||||
raise NotImplementedError
|
||||
|
||||
# Network I/O methods returning Futures.
|
||||
|
||||
def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
def getnameinfo(self, sockaddr, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_connection(self, protocol_factory, host=None, port=None, *,
|
||||
ssl=None, family=0, proto=0, flags=0, sock=None,
|
||||
local_addr=None, server_hostname=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_server(self, protocol_factory, host=None, port=None, *,
|
||||
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
|
||||
sock=None, backlog=100, ssl=None, reuse_address=None):
|
||||
"""A coroutine which creates a TCP server bound to host and port.
|
||||
|
||||
The return value is a Server object which can be used to stop
|
||||
the service.
|
||||
|
||||
If host is an empty string or None all interfaces are assumed
|
||||
and a list of multiple sockets will be returned (most likely
|
||||
one for IPv4 and another one for IPv6).
|
||||
|
||||
family can be set to either AF_INET or AF_INET6 to force the
|
||||
socket to use IPv4 or IPv6. If not set it will be determined
|
||||
from host (defaults to AF_UNSPEC).
|
||||
|
||||
flags is a bitmask for getaddrinfo().
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
|
||||
backlog is the maximum number of queued connections passed to
|
||||
listen() (defaults to 100).
|
||||
|
||||
ssl can be set to an SSLContext to enable SSL over the
|
||||
accepted connections.
|
||||
|
||||
reuse_address tells the kernel to reuse a local socket in
|
||||
TIME_WAIT state, without waiting for its natural timeout to
|
||||
expire. If not specified will automatically be set to True on
|
||||
UNIX.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def create_unix_connection(self, protocol_factory, path, *,
|
||||
ssl=None, sock=None,
|
||||
server_hostname=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_unix_server(self, protocol_factory, path, *,
|
||||
sock=None, backlog=100, ssl=None):
|
||||
"""A coroutine which creates a UNIX Domain Socket server.
|
||||
|
||||
The return value is a Server object, which can be used to stop
|
||||
the service.
|
||||
|
||||
path is a str, representing a file systsem path to bind the
|
||||
server socket to.
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
|
||||
backlog is the maximum number of queued connections passed to
|
||||
listen() (defaults to 100).
|
||||
|
||||
ssl can be set to an SSLContext to enable SSL over the
|
||||
accepted connections.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def create_datagram_endpoint(self, protocol_factory,
|
||||
local_addr=None, remote_addr=None, *,
|
||||
family=0, proto=0, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
# Pipes and subprocesses.
|
||||
|
||||
def connect_read_pipe(self, protocol_factory, pipe):
|
||||
"""Register read pipe in event loop. Set the pipe to non-blocking mode.
|
||||
|
||||
protocol_factory should instantiate object with Protocol interface.
|
||||
pipe is a file-like object.
|
||||
Return pair (transport, protocol), where transport supports the
|
||||
ReadTransport interface."""
|
||||
# The reason to accept file-like object instead of just file descriptor
|
||||
# is: we need to own pipe and close it at transport finishing
|
||||
# Can got complicated errors if pass f.fileno(),
|
||||
# close fd in pipe transport then close f and vise versa.
|
||||
raise NotImplementedError
|
||||
|
||||
def connect_write_pipe(self, protocol_factory, pipe):
|
||||
"""Register write pipe in event loop.
|
||||
|
||||
protocol_factory should instantiate object with BaseProtocol interface.
|
||||
Pipe is file-like object already switched to nonblocking.
|
||||
Return pair (transport, protocol), where transport support
|
||||
WriteTransport interface."""
|
||||
# The reason to accept file-like object instead of just file descriptor
|
||||
# is: we need to own pipe and close it at transport finishing
|
||||
# Can got complicated errors if pass f.fileno(),
|
||||
# close fd in pipe transport then close f and vise versa.
|
||||
raise NotImplementedError
|
||||
|
||||
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
**kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
**kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
# Ready-based callback registration methods.
|
||||
# The add_*() methods return None.
|
||||
# The remove_*() methods return True if something was removed,
|
||||
# False if there was nothing to delete.
|
||||
|
||||
def add_reader(self, fd, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_reader(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
def add_writer(self, fd, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_writer(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
# Completion based I/O methods returning Futures.
|
||||
|
||||
def sock_recv(self, sock, nbytes):
|
||||
raise NotImplementedError
|
||||
|
||||
def sock_sendall(self, sock, data):
|
||||
raise NotImplementedError
|
||||
|
||||
def sock_connect(self, sock, address):
|
||||
raise NotImplementedError
|
||||
|
||||
def sock_accept(self, sock):
|
||||
raise NotImplementedError
|
||||
|
||||
# Signal handling.
|
||||
|
||||
def add_signal_handler(self, sig, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_signal_handler(self, sig):
|
||||
raise NotImplementedError
|
||||
|
||||
# Error handlers.
|
||||
|
||||
def set_exception_handler(self, handler):
|
||||
raise NotImplementedError
|
||||
|
||||
def default_exception_handler(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_exception_handler(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
# Debug flag management.
|
||||
|
||||
def get_debug(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_debug(self, enabled):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractEventLoopPolicy:
|
||||
"""Abstract policy for accessing the event loop."""
|
||||
|
||||
def get_event_loop(self):
|
||||
"""Get the event loop for the current context.
|
||||
|
||||
Returns an event loop object implementing the BaseEventLoop interface,
|
||||
or raises an exception in case no event loop has been set for the
|
||||
current context and the current policy does not specify to create one.
|
||||
|
||||
It should never return None."""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop for the current context to loop."""
|
||||
raise NotImplementedError
|
||||
|
||||
def new_event_loop(self):
|
||||
"""Create and return a new event loop object according to this
|
||||
policy's rules. If there's need to set this loop as the event loop for
|
||||
the current context, set_event_loop must be called explicitly."""
|
||||
raise NotImplementedError
|
||||
|
||||
# Child processes handling (Unix only).
|
||||
|
||||
def get_child_watcher(self):
|
||||
"Get the watcher for child processes."
|
||||
raise NotImplementedError
|
||||
|
||||
def set_child_watcher(self, watcher):
|
||||
"""Set the watcher for child processes."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
|
||||
"""Default policy implementation for accessing the event loop.
|
||||
|
||||
In this policy, each thread has its own event loop. However, we
|
||||
only automatically create an event loop by default for the main
|
||||
thread; other threads by default have no event loop.
|
||||
|
||||
Other policies may have different rules (e.g. a single global
|
||||
event loop, or automatically creating an event loop per thread, or
|
||||
using some other notion of context to which an event loop is
|
||||
associated).
|
||||
"""
|
||||
|
||||
_loop_factory = None
|
||||
|
||||
class _Local(threading.local):
|
||||
_loop = None
|
||||
_set_called = False
|
||||
|
||||
def __init__(self):
|
||||
self._local = self._Local()
|
||||
|
||||
def get_event_loop(self):
|
||||
"""Get the event loop.
|
||||
|
||||
This may be None or an instance of EventLoop.
|
||||
"""
|
||||
if (self._local._loop is None and
|
||||
not self._local._set_called and
|
||||
isinstance(threading.current_thread(), threading._MainThread)):
|
||||
self.set_event_loop(self.new_event_loop())
|
||||
assert self._local._loop is not None, \
|
||||
('There is no current event loop in thread %r.' %
|
||||
threading.current_thread().name)
|
||||
return self._local._loop
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop."""
|
||||
self._local._set_called = True
|
||||
assert loop is None or isinstance(loop, AbstractEventLoop)
|
||||
self._local._loop = loop
|
||||
|
||||
def new_event_loop(self):
|
||||
"""Create a new event loop.
|
||||
|
||||
You must call set_event_loop() to make this the current event
|
||||
loop.
|
||||
"""
|
||||
return self._loop_factory()
|
||||
|
||||
|
||||
# Event loop policy. The policy itself is always global, even if the
|
||||
# policy's rules say that there is an event loop per thread (or other
|
||||
# notion of context). The default policy is installed by the first
|
||||
# call to get_event_loop_policy().
|
||||
_event_loop_policy = None
|
||||
|
||||
# Lock for protecting the on-the-fly creation of the event loop policy.
|
||||
_lock = threading.Lock()
|
||||
|
||||
|
||||
def _init_event_loop_policy():
|
||||
global _event_loop_policy
|
||||
with _lock:
|
||||
if _event_loop_policy is None: # pragma: no branch
|
||||
from . import DefaultEventLoopPolicy
|
||||
_event_loop_policy = DefaultEventLoopPolicy()
|
||||
|
||||
|
||||
def get_event_loop_policy():
|
||||
"""Get the current event loop policy."""
|
||||
if _event_loop_policy is None:
|
||||
_init_event_loop_policy()
|
||||
return _event_loop_policy
|
||||
|
||||
|
||||
def set_event_loop_policy(policy):
|
||||
"""Set the current event loop policy.
|
||||
|
||||
If policy is None, the default policy is restored."""
|
||||
global _event_loop_policy
|
||||
assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
|
||||
_event_loop_policy = policy
|
||||
|
||||
|
||||
def get_event_loop():
|
||||
"""Equivalent to calling get_event_loop_policy().get_event_loop()."""
|
||||
return get_event_loop_policy().get_event_loop()
|
||||
|
||||
|
||||
def set_event_loop(loop):
|
||||
"""Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
|
||||
get_event_loop_policy().set_event_loop(loop)
|
||||
|
||||
|
||||
def new_event_loop():
|
||||
"""Equivalent to calling get_event_loop_policy().new_event_loop()."""
|
||||
return get_event_loop_policy().new_event_loop()
|
||||
|
||||
|
||||
def get_child_watcher():
|
||||
"""Equivalent to calling get_event_loop_policy().get_child_watcher()."""
|
||||
return get_event_loop_policy().get_child_watcher()
|
||||
|
||||
|
||||
def set_child_watcher(watcher):
|
||||
"""Equivalent to calling
|
||||
get_event_loop_policy().set_child_watcher(watcher)."""
|
||||
return get_event_loop_policy().set_child_watcher(watcher)
|
||||
411
asyncio/futures.py
Normal file
411
asyncio/futures.py
Normal file
@@ -0,0 +1,411 @@
|
||||
"""A Future class similar to the one in PEP 3148."""
|
||||
|
||||
__all__ = ['CancelledError', 'TimeoutError',
|
||||
'InvalidStateError',
|
||||
'Future', 'wrap_future',
|
||||
]
|
||||
|
||||
import concurrent.futures._base
|
||||
import logging
|
||||
import reprlib
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from . import events
|
||||
|
||||
# States for Future.
|
||||
_PENDING = 'PENDING'
|
||||
_CANCELLED = 'CANCELLED'
|
||||
_FINISHED = 'FINISHED'
|
||||
|
||||
_PY34 = sys.version_info >= (3, 4)
|
||||
|
||||
# TODO: Do we really want to depend on concurrent.futures internals?
|
||||
Error = concurrent.futures._base.Error
|
||||
CancelledError = concurrent.futures.CancelledError
|
||||
TimeoutError = concurrent.futures.TimeoutError
|
||||
|
||||
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
|
||||
|
||||
|
||||
class InvalidStateError(Error):
|
||||
"""The operation is not allowed in this state."""
|
||||
# TODO: Show the future, its state, the method, and the required state.
|
||||
|
||||
|
||||
class _TracebackLogger:
|
||||
"""Helper to log a traceback upon destruction if not cleared.
|
||||
|
||||
This solves a nasty problem with Futures and Tasks that have an
|
||||
exception set: if nobody asks for the exception, the exception is
|
||||
never logged. This violates the Zen of Python: 'Errors should
|
||||
never pass silently. Unless explicitly silenced.'
|
||||
|
||||
However, we don't want to log the exception as soon as
|
||||
set_exception() is called: if the calling code is written
|
||||
properly, it will get the exception and handle it properly. But
|
||||
we *do* want to log it if result() or exception() was never called
|
||||
-- otherwise developers waste a lot of time wondering why their
|
||||
buggy code fails silently.
|
||||
|
||||
An earlier attempt added a __del__() method to the Future class
|
||||
itself, but this backfired because the presence of __del__()
|
||||
prevents garbage collection from breaking cycles. A way out of
|
||||
this catch-22 is to avoid having a __del__() method on the Future
|
||||
class itself, but instead to have a reference to a helper object
|
||||
with a __del__() method that logs the traceback, where we ensure
|
||||
that the helper object doesn't participate in cycles, and only the
|
||||
Future has a reference to it.
|
||||
|
||||
The helper object is added when set_exception() is called. When
|
||||
the Future is collected, and the helper is present, the helper
|
||||
object is also collected, and its __del__() method will log the
|
||||
traceback. When the Future's result() or exception() method is
|
||||
called (and a helper object is present), it removes the helper
|
||||
object, after calling its clear() method to prevent it from
|
||||
logging.
|
||||
|
||||
One downside is that we do a fair amount of work to extract the
|
||||
traceback from the exception, even when it is never logged. It
|
||||
would seem cheaper to just store the exception object, but that
|
||||
references the traceback, which references stack frames, which may
|
||||
reference the Future, which references the _TracebackLogger, and
|
||||
then the _TracebackLogger would be included in a cycle, which is
|
||||
what we're trying to avoid! As an optimization, we don't
|
||||
immediately format the exception; we only do the work when
|
||||
activate() is called, which call is delayed until after all the
|
||||
Future's callbacks have run. Since usually a Future has at least
|
||||
one callback (typically set by 'yield from') and usually that
|
||||
callback extracts the callback, thereby removing the need to
|
||||
format the exception.
|
||||
|
||||
PS. I don't claim credit for this solution. I first heard of it
|
||||
in a discussion about closing files when they are collected.
|
||||
"""
|
||||
|
||||
__slots__ = ('loop', 'source_traceback', 'exc', 'tb')
|
||||
|
||||
def __init__(self, future, exc):
|
||||
self.loop = future._loop
|
||||
self.source_traceback = future._source_traceback
|
||||
self.exc = exc
|
||||
self.tb = None
|
||||
|
||||
def activate(self):
|
||||
exc = self.exc
|
||||
if exc is not None:
|
||||
self.exc = None
|
||||
self.tb = traceback.format_exception(exc.__class__, exc,
|
||||
exc.__traceback__)
|
||||
|
||||
def clear(self):
|
||||
self.exc = None
|
||||
self.tb = None
|
||||
|
||||
def __del__(self):
|
||||
if self.tb:
|
||||
msg = 'Future/Task exception was never retrieved\n'
|
||||
if self.source_traceback:
|
||||
src = ''.join(traceback.format_list(self.source_traceback))
|
||||
msg += 'Future/Task created at (most recent call last):\n'
|
||||
msg += '%s\n' % src.rstrip()
|
||||
msg += ''.join(self.tb).rstrip()
|
||||
self.loop.call_exception_handler({'message': msg})
|
||||
|
||||
|
||||
class Future:
|
||||
"""This class is *almost* compatible with concurrent.futures.Future.
|
||||
|
||||
Differences:
|
||||
|
||||
- result() and exception() do not take a timeout argument and
|
||||
raise an exception when the future isn't done yet.
|
||||
|
||||
- Callbacks registered with add_done_callback() are always called
|
||||
via the event loop's call_soon_threadsafe().
|
||||
|
||||
- This class is not compatible with the wait() and as_completed()
|
||||
methods in the concurrent.futures package.
|
||||
|
||||
(In Python 3.4 or later we may be able to unify the implementations.)
|
||||
"""
|
||||
|
||||
# Class variables serving as defaults for instance variables.
|
||||
_state = _PENDING
|
||||
_result = None
|
||||
_exception = None
|
||||
_loop = None
|
||||
_source_traceback = None
|
||||
|
||||
_blocking = False # proper use of future (yield vs yield from)
|
||||
|
||||
_log_traceback = False # Used for Python 3.4 and later
|
||||
_tb_logger = None # Used for Python 3.3 only
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
"""Initialize the future.
|
||||
|
||||
The optional event_loop argument allows to explicitly set the event
|
||||
loop object used by the future. If it's not provided, the future uses
|
||||
the default event loop.
|
||||
"""
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._callbacks = []
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = traceback.extract_stack(sys._getframe(1))
|
||||
|
||||
def _format_callbacks(self):
|
||||
cb = self._callbacks
|
||||
size = len(cb)
|
||||
if not size:
|
||||
cb = ''
|
||||
|
||||
def format_cb(callback):
|
||||
return events._format_callback(callback, ())
|
||||
|
||||
if size == 1:
|
||||
cb = format_cb(cb[0])
|
||||
elif size == 2:
|
||||
cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1]))
|
||||
elif size > 2:
|
||||
cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
|
||||
size-2,
|
||||
format_cb(cb[-1]))
|
||||
return 'cb=[%s]' % cb
|
||||
|
||||
def _repr_info(self):
|
||||
info = [self._state.lower()]
|
||||
if self._state == _FINISHED:
|
||||
if self._exception is not None:
|
||||
info.append('exception={!r}'.format(self._exception))
|
||||
else:
|
||||
# use reprlib to limit the length of the output, especially
|
||||
# for very long strings
|
||||
result = reprlib.repr(self._result)
|
||||
info.append('result={}'.format(result))
|
||||
if self._callbacks:
|
||||
info.append(self._format_callbacks())
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
info.append('created at %s:%s' % (frame[0], frame[1]))
|
||||
return info
|
||||
|
||||
def __repr__(self):
|
||||
info = self._repr_info()
|
||||
return '<%s %s>' % (self.__class__.__name__, ' '.join(info))
|
||||
|
||||
# On Python 3.3 or older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks to
|
||||
# the PEP 442.
|
||||
if _PY34:
|
||||
def __del__(self):
|
||||
if not self._log_traceback:
|
||||
# set_exception() was not called, or result() or exception()
|
||||
# has consumed the exception
|
||||
return
|
||||
exc = self._exception
|
||||
context = {
|
||||
'message': ('%s exception was never retrieved'
|
||||
% self.__class__.__name__),
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the future and schedule callbacks.
|
||||
|
||||
If the future is already done or cancelled, return False. Otherwise,
|
||||
change the future's state to cancelled, schedule the callbacks and
|
||||
return True.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
return False
|
||||
self._state = _CANCELLED
|
||||
self._schedule_callbacks()
|
||||
return True
|
||||
|
||||
def _schedule_callbacks(self):
|
||||
"""Internal: Ask the event loop to call all callbacks.
|
||||
|
||||
The callbacks are scheduled to be called as soon as possible. Also
|
||||
clears the callback list.
|
||||
"""
|
||||
callbacks = self._callbacks[:]
|
||||
if not callbacks:
|
||||
return
|
||||
|
||||
self._callbacks[:] = []
|
||||
for callback in callbacks:
|
||||
self._loop.call_soon(callback, self)
|
||||
|
||||
def cancelled(self):
|
||||
"""Return True if the future was cancelled."""
|
||||
return self._state == _CANCELLED
|
||||
|
||||
# Don't implement running(); see http://bugs.python.org/issue18699
|
||||
|
||||
def done(self):
|
||||
"""Return True if the future is done.
|
||||
|
||||
Done means either that a result / exception are available, or that the
|
||||
future was cancelled.
|
||||
"""
|
||||
return self._state != _PENDING
|
||||
|
||||
def result(self):
|
||||
"""Return the result this future represents.
|
||||
|
||||
If the future has been cancelled, raises CancelledError. If the
|
||||
future's result isn't yet available, raises InvalidStateError. If
|
||||
the future is done and has an exception set, this exception is raised.
|
||||
"""
|
||||
if self._state == _CANCELLED:
|
||||
raise CancelledError
|
||||
if self._state != _FINISHED:
|
||||
raise InvalidStateError('Result is not ready.')
|
||||
self._log_traceback = False
|
||||
if self._tb_logger is not None:
|
||||
self._tb_logger.clear()
|
||||
self._tb_logger = None
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
return self._result
|
||||
|
||||
def exception(self):
|
||||
"""Return the exception that was set on this future.
|
||||
|
||||
The exception (or None if no exception was set) is returned only if
|
||||
the future is done. If the future has been cancelled, raises
|
||||
CancelledError. If the future isn't done yet, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state == _CANCELLED:
|
||||
raise CancelledError
|
||||
if self._state != _FINISHED:
|
||||
raise InvalidStateError('Exception is not set.')
|
||||
self._log_traceback = False
|
||||
if self._tb_logger is not None:
|
||||
self._tb_logger.clear()
|
||||
self._tb_logger = None
|
||||
return self._exception
|
||||
|
||||
def add_done_callback(self, fn):
|
||||
"""Add a callback to be run when the future becomes done.
|
||||
|
||||
The callback is called with a single argument - the future object. If
|
||||
the future is already done when this is called, the callback is
|
||||
scheduled with call_soon.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
self._loop.call_soon(fn, self)
|
||||
else:
|
||||
self._callbacks.append(fn)
|
||||
|
||||
# New method not in PEP 3148.
|
||||
|
||||
def remove_done_callback(self, fn):
|
||||
"""Remove all instances of a callback from the "call when done" list.
|
||||
|
||||
Returns the number of callbacks removed.
|
||||
"""
|
||||
filtered_callbacks = [f for f in self._callbacks if f != fn]
|
||||
removed_count = len(self._callbacks) - len(filtered_callbacks)
|
||||
if removed_count:
|
||||
self._callbacks[:] = filtered_callbacks
|
||||
return removed_count
|
||||
|
||||
# So-called internal methods (note: no set_running_or_notify_cancel()).
|
||||
|
||||
def _set_result_unless_cancelled(self, result):
|
||||
"""Helper setting the result only if the future was not cancelled."""
|
||||
if self.cancelled():
|
||||
return
|
||||
self.set_result(result)
|
||||
|
||||
def set_result(self, result):
|
||||
"""Mark the future done and set its result.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
raise InvalidStateError('{}: {!r}'.format(self._state, self))
|
||||
self._result = result
|
||||
self._state = _FINISHED
|
||||
self._schedule_callbacks()
|
||||
|
||||
def set_exception(self, exception):
|
||||
"""Mark the future done and set an exception.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
raise InvalidStateError('{}: {!r}'.format(self._state, self))
|
||||
if isinstance(exception, type):
|
||||
exception = exception()
|
||||
self._exception = exception
|
||||
self._state = _FINISHED
|
||||
self._schedule_callbacks()
|
||||
if _PY34:
|
||||
self._log_traceback = True
|
||||
else:
|
||||
self._tb_logger = _TracebackLogger(self, exception)
|
||||
# Arrange for the logger to be activated after all callbacks
|
||||
# have had a chance to call result() or exception().
|
||||
self._loop.call_soon(self._tb_logger.activate)
|
||||
|
||||
# Truly internal methods.
|
||||
|
||||
def _copy_state(self, other):
|
||||
"""Internal helper to copy state from another Future.
|
||||
|
||||
The other Future may be a concurrent.futures.Future.
|
||||
"""
|
||||
assert other.done()
|
||||
if self.cancelled():
|
||||
return
|
||||
assert not self.done()
|
||||
if other.cancelled():
|
||||
self.cancel()
|
||||
else:
|
||||
exception = other.exception()
|
||||
if exception is not None:
|
||||
self.set_exception(exception)
|
||||
else:
|
||||
result = other.result()
|
||||
self.set_result(result)
|
||||
|
||||
def __iter__(self):
|
||||
if not self.done():
|
||||
self._blocking = True
|
||||
yield self # This tells Task to wait for completion.
|
||||
assert self.done(), "yield from wasn't used with future"
|
||||
return self.result() # May raise too.
|
||||
|
||||
|
||||
def wrap_future(fut, *, loop=None):
|
||||
"""Wrap concurrent.futures.Future object."""
|
||||
if isinstance(fut, Future):
|
||||
return fut
|
||||
assert isinstance(fut, concurrent.futures.Future), \
|
||||
'concurrent.futures.Future is expected, got {!r}'.format(fut)
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
new_future = Future(loop=loop)
|
||||
|
||||
def _check_cancel_other(f):
|
||||
if f.cancelled():
|
||||
fut.cancel()
|
||||
|
||||
new_future.add_done_callback(_check_cancel_other)
|
||||
fut.add_done_callback(
|
||||
lambda future: loop.call_soon_threadsafe(
|
||||
new_future._copy_state, fut))
|
||||
return new_future
|
||||
469
asyncio/locks.py
Normal file
469
asyncio/locks.py
Normal file
@@ -0,0 +1,469 @@
|
||||
"""Synchronization primitives."""
|
||||
|
||||
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
|
||||
|
||||
import collections
|
||||
|
||||
from . import events
|
||||
from . import futures
|
||||
from .coroutines import coroutine
|
||||
|
||||
|
||||
class _ContextManager:
|
||||
"""Context manager.
|
||||
|
||||
This enables the following idiom for acquiring and releasing a
|
||||
lock around a block:
|
||||
|
||||
with (yield from lock):
|
||||
<block>
|
||||
|
||||
while failing loudly when accidentally using:
|
||||
|
||||
with lock:
|
||||
<block>
|
||||
"""
|
||||
|
||||
def __init__(self, lock):
|
||||
self._lock = lock
|
||||
|
||||
def __enter__(self):
|
||||
# We have no use for the "as ..." clause in the with
|
||||
# statement for locks.
|
||||
return None
|
||||
|
||||
def __exit__(self, *args):
|
||||
try:
|
||||
self._lock.release()
|
||||
finally:
|
||||
self._lock = None # Crudely prevent reuse.
|
||||
|
||||
|
||||
class Lock:
|
||||
"""Primitive lock objects.
|
||||
|
||||
A primitive lock is a synchronization primitive that is not owned
|
||||
by a particular coroutine when locked. A primitive lock is in one
|
||||
of two states, 'locked' or 'unlocked'.
|
||||
|
||||
It is created in the unlocked state. It has two basic methods,
|
||||
acquire() and release(). When the state is unlocked, acquire()
|
||||
changes the state to locked and returns immediately. When the
|
||||
state is locked, acquire() blocks until a call to release() in
|
||||
another coroutine changes it to unlocked, then the acquire() call
|
||||
resets it to locked and returns. The release() method should only
|
||||
be called in the locked state; it changes the state to unlocked
|
||||
and returns immediately. If an attempt is made to release an
|
||||
unlocked lock, a RuntimeError will be raised.
|
||||
|
||||
When more than one coroutine is blocked in acquire() waiting for
|
||||
the state to turn to unlocked, only one coroutine proceeds when a
|
||||
release() call resets the state to unlocked; first coroutine which
|
||||
is blocked in acquire() is being processed.
|
||||
|
||||
acquire() is a coroutine and should be called with 'yield from'.
|
||||
|
||||
Locks also support the context management protocol. '(yield from lock)'
|
||||
should be used as context manager expression.
|
||||
|
||||
Usage:
|
||||
|
||||
lock = Lock()
|
||||
...
|
||||
yield from lock
|
||||
try:
|
||||
...
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
Context manager usage:
|
||||
|
||||
lock = Lock()
|
||||
...
|
||||
with (yield from lock):
|
||||
...
|
||||
|
||||
Lock objects can be tested for locking state:
|
||||
|
||||
if not lock.locked():
|
||||
yield from lock
|
||||
else:
|
||||
# lock is acquired
|
||||
...
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
self._waiters = collections.deque()
|
||||
self._locked = False
|
||||
if loop is not None:
|
||||
self._loop = loop
|
||||
else:
|
||||
self._loop = events.get_event_loop()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self._locked else 'unlocked'
|
||||
if self._waiters:
|
||||
extra = '{},waiters:{}'.format(extra, len(self._waiters))
|
||||
return '<{} [{}]>'.format(res[1:-1], extra)
|
||||
|
||||
def locked(self):
|
||||
"""Return True if lock is acquired."""
|
||||
return self._locked
|
||||
|
||||
@coroutine
|
||||
def acquire(self):
|
||||
"""Acquire a lock.
|
||||
|
||||
This method blocks until the lock is unlocked, then sets it to
|
||||
locked and returns True.
|
||||
"""
|
||||
if not self._waiters and not self._locked:
|
||||
self._locked = True
|
||||
return True
|
||||
|
||||
fut = futures.Future(loop=self._loop)
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
yield from fut
|
||||
self._locked = True
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
def release(self):
|
||||
"""Release a lock.
|
||||
|
||||
When the lock is locked, reset it to unlocked, and return.
|
||||
If any other coroutines are blocked waiting for the lock to become
|
||||
unlocked, allow exactly one of them to proceed.
|
||||
|
||||
When invoked on an unlocked lock, a RuntimeError is raised.
|
||||
|
||||
There is no return value.
|
||||
"""
|
||||
if self._locked:
|
||||
self._locked = False
|
||||
# Wake up the first waiter who isn't cancelled.
|
||||
for fut in self._waiters:
|
||||
if not fut.done():
|
||||
fut.set_result(True)
|
||||
break
|
||||
else:
|
||||
raise RuntimeError('Lock is not acquired.')
|
||||
|
||||
def __enter__(self):
|
||||
raise RuntimeError(
|
||||
'"yield from" should be used as context manager expression')
|
||||
|
||||
def __exit__(self, *args):
|
||||
# This must exist because __enter__ exists, even though that
|
||||
# always raises; that's how the with-statement works.
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
# This is not a coroutine. It is meant to enable the idiom:
|
||||
#
|
||||
# with (yield from lock):
|
||||
# <block>
|
||||
#
|
||||
# as an alternative to:
|
||||
#
|
||||
# yield from lock.acquire()
|
||||
# try:
|
||||
# <block>
|
||||
# finally:
|
||||
# lock.release()
|
||||
yield from self.acquire()
|
||||
return _ContextManager(self)
|
||||
|
||||
|
||||
class Event:
|
||||
"""Asynchronous equivalent to threading.Event.
|
||||
|
||||
Class implementing event objects. An event manages a flag that can be set
|
||||
to true with the set() method and reset to false with the clear() method.
|
||||
The wait() method blocks until the flag is true. The flag is initially
|
||||
false.
|
||||
"""
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
self._waiters = collections.deque()
|
||||
self._value = False
|
||||
if loop is not None:
|
||||
self._loop = loop
|
||||
else:
|
||||
self._loop = events.get_event_loop()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'set' if self._value else 'unset'
|
||||
if self._waiters:
|
||||
extra = '{},waiters:{}'.format(extra, len(self._waiters))
|
||||
return '<{} [{}]>'.format(res[1:-1], extra)
|
||||
|
||||
def is_set(self):
|
||||
"""Return True if and only if the internal flag is true."""
|
||||
return self._value
|
||||
|
||||
def set(self):
|
||||
"""Set the internal flag to true. All coroutines waiting for it to
|
||||
become true are awakened. Coroutine that call wait() once the flag is
|
||||
true will not block at all.
|
||||
"""
|
||||
if not self._value:
|
||||
self._value = True
|
||||
|
||||
for fut in self._waiters:
|
||||
if not fut.done():
|
||||
fut.set_result(True)
|
||||
|
||||
def clear(self):
|
||||
"""Reset the internal flag to false. Subsequently, coroutines calling
|
||||
wait() will block until set() is called to set the internal flag
|
||||
to true again."""
|
||||
self._value = False
|
||||
|
||||
@coroutine
|
||||
def wait(self):
|
||||
"""Block until the internal flag is true.
|
||||
|
||||
If the internal flag is true on entry, return True
|
||||
immediately. Otherwise, block until another coroutine calls
|
||||
set() to set the flag to true, then return True.
|
||||
"""
|
||||
if self._value:
|
||||
return True
|
||||
|
||||
fut = futures.Future(loop=self._loop)
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
yield from fut
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
|
||||
class Condition:
|
||||
"""Asynchronous equivalent to threading.Condition.
|
||||
|
||||
This class implements condition variable objects. A condition variable
|
||||
allows one or more coroutines to wait until they are notified by another
|
||||
coroutine.
|
||||
|
||||
A new Lock object is created and used as the underlying lock.
|
||||
"""
|
||||
|
||||
def __init__(self, lock=None, *, loop=None):
|
||||
if loop is not None:
|
||||
self._loop = loop
|
||||
else:
|
||||
self._loop = events.get_event_loop()
|
||||
|
||||
if lock is None:
|
||||
lock = Lock(loop=self._loop)
|
||||
elif lock._loop is not self._loop:
|
||||
raise ValueError("loop argument must agree with lock")
|
||||
|
||||
self._lock = lock
|
||||
# Export the lock's locked(), acquire() and release() methods.
|
||||
self.locked = lock.locked
|
||||
self.acquire = lock.acquire
|
||||
self.release = lock.release
|
||||
|
||||
self._waiters = collections.deque()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self.locked() else 'unlocked'
|
||||
if self._waiters:
|
||||
extra = '{},waiters:{}'.format(extra, len(self._waiters))
|
||||
return '<{} [{}]>'.format(res[1:-1], extra)
|
||||
|
||||
@coroutine
|
||||
def wait(self):
|
||||
"""Wait until notified.
|
||||
|
||||
If the calling coroutine has not acquired the lock when this
|
||||
method is called, a RuntimeError is raised.
|
||||
|
||||
This method releases the underlying lock, and then blocks
|
||||
until it is awakened by a notify() or notify_all() call for
|
||||
the same condition variable in another coroutine. Once
|
||||
awakened, it re-acquires the lock and returns True.
|
||||
"""
|
||||
if not self.locked():
|
||||
raise RuntimeError('cannot wait on un-acquired lock')
|
||||
|
||||
self.release()
|
||||
try:
|
||||
fut = futures.Future(loop=self._loop)
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
yield from fut
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
finally:
|
||||
yield from self.acquire()
|
||||
|
||||
@coroutine
|
||||
def wait_for(self, predicate):
|
||||
"""Wait until a predicate becomes true.
|
||||
|
||||
The predicate should be a callable which result will be
|
||||
interpreted as a boolean value. The final predicate value is
|
||||
the return value.
|
||||
"""
|
||||
result = predicate()
|
||||
while not result:
|
||||
yield from self.wait()
|
||||
result = predicate()
|
||||
return result
|
||||
|
||||
def notify(self, n=1):
|
||||
"""By default, wake up one coroutine waiting on this condition, if any.
|
||||
If the calling coroutine has not acquired the lock when this method
|
||||
is called, a RuntimeError is raised.
|
||||
|
||||
This method wakes up at most n of the coroutines waiting for the
|
||||
condition variable; it is a no-op if no coroutines are waiting.
|
||||
|
||||
Note: an awakened coroutine does not actually return from its
|
||||
wait() call until it can reacquire the lock. Since notify() does
|
||||
not release the lock, its caller should.
|
||||
"""
|
||||
if not self.locked():
|
||||
raise RuntimeError('cannot notify on un-acquired lock')
|
||||
|
||||
idx = 0
|
||||
for fut in self._waiters:
|
||||
if idx >= n:
|
||||
break
|
||||
|
||||
if not fut.done():
|
||||
idx += 1
|
||||
fut.set_result(False)
|
||||
|
||||
def notify_all(self):
|
||||
"""Wake up all threads waiting on this condition. This method acts
|
||||
like notify(), but wakes up all waiting threads instead of one. If the
|
||||
calling thread has not acquired the lock when this method is called,
|
||||
a RuntimeError is raised.
|
||||
"""
|
||||
self.notify(len(self._waiters))
|
||||
|
||||
def __enter__(self):
|
||||
raise RuntimeError(
|
||||
'"yield from" should be used as context manager expression')
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
# See comment in Lock.__iter__().
|
||||
yield from self.acquire()
|
||||
return _ContextManager(self)
|
||||
|
||||
|
||||
class Semaphore:
|
||||
"""A Semaphore implementation.
|
||||
|
||||
A semaphore manages an internal counter which is decremented by each
|
||||
acquire() call and incremented by each release() call. The counter
|
||||
can never go below zero; when acquire() finds that it is zero, it blocks,
|
||||
waiting until some other thread calls release().
|
||||
|
||||
Semaphores also support the context management protocol.
|
||||
|
||||
The optional argument gives the initial value for the internal
|
||||
counter; it defaults to 1. If the value given is less than 0,
|
||||
ValueError is raised.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1, *, loop=None):
|
||||
if value < 0:
|
||||
raise ValueError("Semaphore initial value must be >= 0")
|
||||
self._value = value
|
||||
self._waiters = collections.deque()
|
||||
if loop is not None:
|
||||
self._loop = loop
|
||||
else:
|
||||
self._loop = events.get_event_loop()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
|
||||
self._value)
|
||||
if self._waiters:
|
||||
extra = '{},waiters:{}'.format(extra, len(self._waiters))
|
||||
return '<{} [{}]>'.format(res[1:-1], extra)
|
||||
|
||||
def locked(self):
|
||||
"""Returns True if semaphore can not be acquired immediately."""
|
||||
return self._value == 0
|
||||
|
||||
@coroutine
|
||||
def acquire(self):
|
||||
"""Acquire a semaphore.
|
||||
|
||||
If the internal counter is larger than zero on entry,
|
||||
decrement it by one and return True immediately. If it is
|
||||
zero on entry, block, waiting until some other coroutine has
|
||||
called release() to make it larger than 0, and then return
|
||||
True.
|
||||
"""
|
||||
if not self._waiters and self._value > 0:
|
||||
self._value -= 1
|
||||
return True
|
||||
|
||||
fut = futures.Future(loop=self._loop)
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
yield from fut
|
||||
self._value -= 1
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
def release(self):
|
||||
"""Release a semaphore, incrementing the internal counter by one.
|
||||
When it was zero on entry and another coroutine is waiting for it to
|
||||
become larger than zero again, wake up that coroutine.
|
||||
"""
|
||||
self._value += 1
|
||||
for waiter in self._waiters:
|
||||
if not waiter.done():
|
||||
waiter.set_result(True)
|
||||
break
|
||||
|
||||
def __enter__(self):
|
||||
raise RuntimeError(
|
||||
'"yield from" should be used as context manager expression')
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
# See comment in Lock.__iter__().
|
||||
yield from self.acquire()
|
||||
return _ContextManager(self)
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""A bounded semaphore implementation.
|
||||
|
||||
This raises ValueError in release() if it would increase the value
|
||||
above the initial value.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1, *, loop=None):
|
||||
self._bound_value = value
|
||||
super().__init__(value, loop=loop)
|
||||
|
||||
def release(self):
|
||||
if self._value >= self._bound_value:
|
||||
raise ValueError('BoundedSemaphore released too many times')
|
||||
super().release()
|
||||
7
asyncio/log.py
Normal file
7
asyncio/log.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Logging configuration."""
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
# Name the logger after the package.
|
||||
logger = logging.getLogger(__package__)
|
||||
502
asyncio/proactor_events.py
Normal file
502
asyncio/proactor_events.py
Normal file
@@ -0,0 +1,502 @@
|
||||
"""Event loop using a proactor and related classes.
|
||||
|
||||
A proactor is a "notify-on-completion" multiplexer. Currently a
|
||||
proactor is only implemented on Windows with IOCP.
|
||||
"""
|
||||
|
||||
__all__ = ['BaseProactorEventLoop']
|
||||
|
||||
import socket
|
||||
|
||||
from . import base_events
|
||||
from . import constants
|
||||
from . import futures
|
||||
from . import transports
|
||||
from .log import logger
|
||||
|
||||
|
||||
class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
transports.BaseTransport):
|
||||
"""Base class for pipe and socket transports."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(extra, loop)
|
||||
self._set_extra(sock)
|
||||
self._sock = sock
|
||||
self._protocol = protocol
|
||||
self._server = server
|
||||
self._buffer = None # None or bytearray.
|
||||
self._read_fut = None
|
||||
self._write_fut = None
|
||||
self._pending_write = 0
|
||||
self._conn_lost = 0
|
||||
self._closing = False # Set when close() called.
|
||||
self._eof_written = False
|
||||
if self._server is not None:
|
||||
self._server._attach()
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
if waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
fd = self._sock.fileno()
|
||||
if fd < 0:
|
||||
info.append('closed')
|
||||
elif self._closing:
|
||||
info.append('closing')
|
||||
info.append('fd=%s' % fd)
|
||||
if self._read_fut is not None:
|
||||
info.append('read=%s' % self._read_fut)
|
||||
if self._write_fut is not None:
|
||||
info.append("write=%r" % self._write_fut)
|
||||
if self._buffer:
|
||||
bufsize = len(self._buffer)
|
||||
info.append('write_bufsize=%s' % bufsize)
|
||||
if self._eof_written:
|
||||
info.append('EOF written')
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def _set_extra(self, sock):
|
||||
self._extra['pipe'] = sock
|
||||
|
||||
def close(self):
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self._conn_lost += 1
|
||||
if not self._buffer and self._write_fut is None:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._read_fut is not None:
|
||||
self._read_fut.cancel()
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
self._force_close(exc)
|
||||
|
||||
def _force_close(self, exc):
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self._conn_lost += 1
|
||||
if self._write_fut:
|
||||
self._write_fut.cancel()
|
||||
if self._read_fut:
|
||||
self._read_fut.cancel()
|
||||
self._write_fut = self._read_fut = None
|
||||
self._pending_write = 0
|
||||
self._buffer = None
|
||||
self._loop.call_soon(self._call_connection_lost, exc)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
# XXX If there is a pending overlapped read on the other
|
||||
# end then it may fail with ERROR_NETNAME_DELETED if we
|
||||
# just close our end. First calling shutdown() seems to
|
||||
# cure it, but maybe using DisconnectEx() would be better.
|
||||
if hasattr(self._sock, 'shutdown'):
|
||||
self._sock.shutdown(socket.SHUT_RDWR)
|
||||
self._sock.close()
|
||||
server = self._server
|
||||
if server is not None:
|
||||
server._detach()
|
||||
self._server = None
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
size = self._pending_write
|
||||
if self._buffer is not None:
|
||||
size += len(self._buffer)
|
||||
return size
|
||||
|
||||
|
||||
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
|
||||
transports.ReadTransport):
|
||||
"""Transport for read pipes."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(loop, sock, protocol, waiter, extra, server)
|
||||
self._paused = False
|
||||
self._loop.call_soon(self._loop_reading)
|
||||
|
||||
def pause_reading(self):
|
||||
if self._closing:
|
||||
raise RuntimeError('Cannot pause_reading() when closing')
|
||||
if self._paused:
|
||||
raise RuntimeError('Already paused')
|
||||
self._paused = True
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses reading", self)
|
||||
|
||||
def resume_reading(self):
|
||||
if not self._paused:
|
||||
raise RuntimeError('Not paused')
|
||||
self._paused = False
|
||||
if self._closing:
|
||||
return
|
||||
self._loop.call_soon(self._loop_reading, self._read_fut)
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes reading", self)
|
||||
|
||||
def _loop_reading(self, fut=None):
|
||||
if self._paused:
|
||||
return
|
||||
data = None
|
||||
|
||||
try:
|
||||
if fut is not None:
|
||||
assert self._read_fut is fut or (self._read_fut is None and
|
||||
self._closing)
|
||||
self._read_fut = None
|
||||
data = fut.result() # deliver data later in "finally" clause
|
||||
|
||||
if self._closing:
|
||||
# since close() has been called we ignore any read data
|
||||
data = None
|
||||
return
|
||||
|
||||
if data == b'':
|
||||
# we got end-of-file so no need to reschedule a new read
|
||||
return
|
||||
|
||||
# reschedule a new read
|
||||
self._read_fut = self._loop._proactor.recv(self._sock, 4096)
|
||||
except ConnectionAbortedError as exc:
|
||||
if not self._closing:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
elif self._loop.get_debug():
|
||||
logger.debug("Read error on pipe transport while closing",
|
||||
exc_info=True)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
except futures.CancelledError:
|
||||
if not self._closing:
|
||||
raise
|
||||
else:
|
||||
self._read_fut.add_done_callback(self._loop_reading)
|
||||
finally:
|
||||
if data:
|
||||
self._protocol.data_received(data)
|
||||
elif data is not None:
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r received EOF", self)
|
||||
keep_open = self._protocol.eof_received()
|
||||
if not keep_open:
|
||||
self.close()
|
||||
|
||||
|
||||
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
|
||||
transports.WriteTransport):
|
||||
"""Transport for write pipes."""
|
||||
|
||||
def write(self, data):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError('data argument must be byte-ish (%r)',
|
||||
type(data))
|
||||
if self._eof_written:
|
||||
raise RuntimeError('write_eof() already called')
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
if self._conn_lost:
|
||||
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
||||
logger.warning('socket.send() raised exception.')
|
||||
self._conn_lost += 1
|
||||
return
|
||||
|
||||
# Observable states:
|
||||
# 1. IDLE: _write_fut and _buffer both None
|
||||
# 2. WRITING: _write_fut set; _buffer None
|
||||
# 3. BACKED UP: _write_fut set; _buffer a bytearray
|
||||
# We always copy the data, so the caller can't modify it
|
||||
# while we're still waiting for the I/O to happen.
|
||||
if self._write_fut is None: # IDLE -> WRITING
|
||||
assert self._buffer is None
|
||||
# Pass a copy, except if it's already immutable.
|
||||
self._loop_writing(data=bytes(data))
|
||||
elif not self._buffer: # WRITING -> BACKED UP
|
||||
# Make a mutable copy which we can extend.
|
||||
self._buffer = bytearray(data)
|
||||
self._maybe_pause_protocol()
|
||||
else: # BACKED UP
|
||||
# Append to buffer (also copies).
|
||||
self._buffer.extend(data)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def _loop_writing(self, f=None, data=None):
|
||||
try:
|
||||
assert f is self._write_fut
|
||||
self._write_fut = None
|
||||
self._pending_write = 0
|
||||
if f:
|
||||
f.result()
|
||||
if data is None:
|
||||
data = self._buffer
|
||||
self._buffer = None
|
||||
if not data:
|
||||
if self._closing:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._eof_written:
|
||||
self._sock.shutdown(socket.SHUT_WR)
|
||||
# Now that we've reduced the buffer size, tell the
|
||||
# protocol to resume writing if it was paused. Note that
|
||||
# we do this last since the callback is called immediately
|
||||
# and it may add more data to the buffer (even causing the
|
||||
# protocol to be paused again).
|
||||
self._maybe_resume_protocol()
|
||||
else:
|
||||
self._write_fut = self._loop._proactor.send(self._sock, data)
|
||||
if not self._write_fut.done():
|
||||
assert self._pending_write == 0
|
||||
self._pending_write = len(data)
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
self._maybe_pause_protocol()
|
||||
else:
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
def write_eof(self):
|
||||
self.close()
|
||||
|
||||
def abort(self):
|
||||
self._force_close(None)
|
||||
|
||||
|
||||
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
|
||||
def __init__(self, *args, **kw):
|
||||
super().__init__(*args, **kw)
|
||||
self._read_fut = self._loop._proactor.recv(self._sock, 16)
|
||||
self._read_fut.add_done_callback(self._pipe_closed)
|
||||
|
||||
def _pipe_closed(self, fut):
|
||||
if fut.cancelled():
|
||||
# the transport has been closed
|
||||
return
|
||||
assert fut.result() == b''
|
||||
if self._closing:
|
||||
assert self._read_fut is None
|
||||
return
|
||||
assert fut is self._read_fut, (fut, self._read_fut)
|
||||
self._read_fut = None
|
||||
if self._write_fut is not None:
|
||||
self._force_close(BrokenPipeError())
|
||||
else:
|
||||
self.close()
|
||||
|
||||
|
||||
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
|
||||
_ProactorBaseWritePipeTransport,
|
||||
transports.Transport):
|
||||
"""Transport for duplex pipes."""
|
||||
|
||||
def can_write_eof(self):
|
||||
return False
|
||||
|
||||
def write_eof(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _ProactorSocketTransport(_ProactorReadPipeTransport,
|
||||
_ProactorBaseWritePipeTransport,
|
||||
transports.Transport):
|
||||
"""Transport for connected sockets."""
|
||||
|
||||
def _set_extra(self, sock):
|
||||
self._extra['socket'] = sock
|
||||
try:
|
||||
self._extra['sockname'] = sock.getsockname()
|
||||
except (socket.error, AttributeError):
|
||||
if self._loop.get_debug():
|
||||
logger.warning("getsockname() failed on %r",
|
||||
sock, exc_info=True)
|
||||
if 'peername' not in self._extra:
|
||||
try:
|
||||
self._extra['peername'] = sock.getpeername()
|
||||
except (socket.error, AttributeError):
|
||||
if self._loop.get_debug():
|
||||
logger.warning("getpeername() failed on %r",
|
||||
sock, exc_info=True)
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
def write_eof(self):
|
||||
if self._closing or self._eof_written:
|
||||
return
|
||||
self._eof_written = True
|
||||
if self._write_fut is None:
|
||||
self._sock.shutdown(socket.SHUT_WR)
|
||||
|
||||
|
||||
class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
def __init__(self, proactor):
|
||||
super().__init__()
|
||||
logger.debug('Using proactor: %s', proactor.__class__.__name__)
|
||||
self._proactor = proactor
|
||||
self._selector = proactor # convenient alias
|
||||
self._self_reading_future = None
|
||||
self._accept_futures = {} # socket file descriptor => Future
|
||||
proactor.set_loop(self)
|
||||
self._make_self_pipe()
|
||||
|
||||
def _make_socket_transport(self, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
return _ProactorSocketTransport(self, sock, protocol, waiter,
|
||||
extra, server)
|
||||
|
||||
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorDuplexPipeTransport(self,
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
|
||||
|
||||
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
# We want connection_lost() to be called when other end closes
|
||||
return _ProactorWritePipeTransport(self,
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def close(self):
|
||||
if self._running:
|
||||
raise RuntimeError("Cannot close a running event loop")
|
||||
if self.is_closed():
|
||||
return
|
||||
self._stop_accept_futures()
|
||||
self._close_self_pipe()
|
||||
super().close()
|
||||
self._proactor.close()
|
||||
self._proactor = None
|
||||
self._selector = None
|
||||
|
||||
def sock_recv(self, sock, n):
|
||||
return self._proactor.recv(sock, n)
|
||||
|
||||
def sock_sendall(self, sock, data):
|
||||
return self._proactor.send(sock, data)
|
||||
|
||||
def sock_connect(self, sock, address):
|
||||
try:
|
||||
base_events._check_resolved_address(sock, address)
|
||||
except ValueError as err:
|
||||
fut = futures.Future(loop=self)
|
||||
fut.set_exception(err)
|
||||
return fut
|
||||
else:
|
||||
return self._proactor.connect(sock, address)
|
||||
|
||||
def sock_accept(self, sock):
|
||||
return self._proactor.accept(sock)
|
||||
|
||||
def _socketpair(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def _close_self_pipe(self):
|
||||
if self._self_reading_future is not None:
|
||||
self._self_reading_future.cancel()
|
||||
self._self_reading_future = None
|
||||
self._ssock.close()
|
||||
self._ssock = None
|
||||
self._csock.close()
|
||||
self._csock = None
|
||||
self._internal_fds -= 1
|
||||
|
||||
def _make_self_pipe(self):
|
||||
# A self-socket, really. :-)
|
||||
self._ssock, self._csock = self._socketpair()
|
||||
self._ssock.setblocking(False)
|
||||
self._csock.setblocking(False)
|
||||
self._internal_fds += 1
|
||||
# don't check the current loop because _make_self_pipe() is called
|
||||
# from the event loop constructor
|
||||
self._call_soon(self._loop_self_reading, (), check_loop=False)
|
||||
|
||||
def _loop_self_reading(self, f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
f.result() # may raise
|
||||
f = self._proactor.recv(self._ssock, 4096)
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
else:
|
||||
self._self_reading_future = f
|
||||
f.add_done_callback(self._loop_self_reading)
|
||||
|
||||
def _write_to_self(self):
|
||||
self._csock.send(b'\0')
|
||||
|
||||
def _start_serving(self, protocol_factory, sock, ssl=None, server=None):
|
||||
if ssl:
|
||||
raise ValueError('IocpEventLoop is incompatible with SSL.')
|
||||
|
||||
def loop(f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
conn, addr = f.result()
|
||||
if self._debug:
|
||||
logger.debug("%r got a new connection from %r: %r",
|
||||
server, addr, conn)
|
||||
protocol = protocol_factory()
|
||||
self._make_socket_transport(
|
||||
conn, protocol,
|
||||
extra={'peername': addr}, server=server)
|
||||
if self.is_closed():
|
||||
return
|
||||
f = self._proactor.accept(sock)
|
||||
except OSError as exc:
|
||||
if sock.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Accept failed on a socket',
|
||||
'exception': exc,
|
||||
'socket': sock,
|
||||
})
|
||||
sock.close()
|
||||
elif self._debug:
|
||||
logger.debug("Accept failed on socket %r",
|
||||
sock, exc_info=True)
|
||||
except futures.CancelledError:
|
||||
sock.close()
|
||||
else:
|
||||
self._accept_futures[sock.fileno()] = f
|
||||
f.add_done_callback(loop)
|
||||
|
||||
self.call_soon(loop)
|
||||
|
||||
def _process_events(self, event_list):
|
||||
pass # XXX hard work currently done in poll
|
||||
|
||||
def _stop_accept_futures(self):
|
||||
for future in self._accept_futures.values():
|
||||
future.cancel()
|
||||
self._accept_futures.clear()
|
||||
|
||||
def _stop_serving(self, sock):
|
||||
self._stop_accept_futures()
|
||||
self._proactor._stop_serving(sock)
|
||||
sock.close()
|
||||
129
asyncio/protocols.py
Normal file
129
asyncio/protocols.py
Normal file
@@ -0,0 +1,129 @@
|
||||
"""Abstract Protocol class."""
|
||||
|
||||
__all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol',
|
||||
'SubprocessProtocol']
|
||||
|
||||
|
||||
class BaseProtocol:
|
||||
"""Common base class for protocol interfaces.
|
||||
|
||||
Usually user implements protocols that derived from BaseProtocol
|
||||
like Protocol or ProcessProtocol.
|
||||
|
||||
The only case when BaseProtocol should be implemented directly is
|
||||
write-only transport like write pipe
|
||||
"""
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Called when a connection is made.
|
||||
|
||||
The argument is the transport representing the pipe connection.
|
||||
To receive data, wait for data_received() calls.
|
||||
When the connection is closed, connection_lost() is called.
|
||||
"""
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Called when the connection is lost or closed.
|
||||
|
||||
The argument is an exception object or None (the latter
|
||||
meaning a regular EOF is received or the connection was
|
||||
aborted or closed).
|
||||
"""
|
||||
|
||||
def pause_writing(self):
|
||||
"""Called when the transport's buffer goes over the high-water mark.
|
||||
|
||||
Pause and resume calls are paired -- pause_writing() is called
|
||||
once when the buffer goes strictly over the high-water mark
|
||||
(even if subsequent writes increases the buffer size even
|
||||
more), and eventually resume_writing() is called once when the
|
||||
buffer size reaches the low-water mark.
|
||||
|
||||
Note that if the buffer size equals the high-water mark,
|
||||
pause_writing() is not called -- it must go strictly over.
|
||||
Conversely, resume_writing() is called when the buffer size is
|
||||
equal or lower than the low-water mark. These end conditions
|
||||
are important to ensure that things go as expected when either
|
||||
mark is zero.
|
||||
|
||||
NOTE: This is the only Protocol callback that is not called
|
||||
through EventLoop.call_soon() -- if it were, it would have no
|
||||
effect when it's most needed (when the app keeps writing
|
||||
without yielding until pause_writing() is called).
|
||||
"""
|
||||
|
||||
def resume_writing(self):
|
||||
"""Called when the transport's buffer drains below the low-water mark.
|
||||
|
||||
See pause_writing() for details.
|
||||
"""
|
||||
|
||||
|
||||
class Protocol(BaseProtocol):
|
||||
"""Interface for stream protocol.
|
||||
|
||||
The user should implement this interface. They can inherit from
|
||||
this class but don't need to. The implementations here do
|
||||
nothing (they don't raise exceptions).
|
||||
|
||||
When the user wants to requests a transport, they pass a protocol
|
||||
factory to a utility function (e.g., EventLoop.create_connection()).
|
||||
|
||||
When the connection is made successfully, connection_made() is
|
||||
called with a suitable transport object. Then data_received()
|
||||
will be called 0 or more times with data (bytes) received from the
|
||||
transport; finally, connection_lost() will be called exactly once
|
||||
with either an exception object or None as an argument.
|
||||
|
||||
State machine of calls:
|
||||
|
||||
start -> CM [-> DR*] [-> ER?] -> CL -> end
|
||||
"""
|
||||
|
||||
def data_received(self, data):
|
||||
"""Called when some data is received.
|
||||
|
||||
The argument is a bytes object.
|
||||
"""
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end calls write_eof() or equivalent.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
|
||||
|
||||
class DatagramProtocol(BaseProtocol):
|
||||
"""Interface for datagram protocol."""
|
||||
|
||||
def datagram_received(self, data, addr):
|
||||
"""Called when some datagram is received."""
|
||||
|
||||
def error_received(self, exc):
|
||||
"""Called when a send or receive operation raises an OSError.
|
||||
|
||||
(Other than BlockingIOError or InterruptedError.)
|
||||
"""
|
||||
|
||||
|
||||
class SubprocessProtocol(BaseProtocol):
|
||||
"""Interface for protocol for subprocess calls."""
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
"""Called when the subprocess writes data into stdout/stderr pipe.
|
||||
|
||||
fd is int file descriptor.
|
||||
data is bytes object.
|
||||
"""
|
||||
|
||||
def pipe_connection_lost(self, fd, exc):
|
||||
"""Called when a file descriptor associated with the child process is
|
||||
closed.
|
||||
|
||||
fd is the int file descriptor that was closed.
|
||||
"""
|
||||
|
||||
def process_exited(self):
|
||||
"""Called when subprocess has exited."""
|
||||
288
asyncio/queues.py
Normal file
288
asyncio/queues.py
Normal file
@@ -0,0 +1,288 @@
|
||||
"""Queues"""
|
||||
|
||||
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue',
|
||||
'QueueFull', 'QueueEmpty']
|
||||
|
||||
import collections
|
||||
import heapq
|
||||
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import locks
|
||||
from .tasks import coroutine
|
||||
|
||||
|
||||
class QueueEmpty(Exception):
|
||||
'Exception raised by Queue.get(block=0)/get_nowait().'
|
||||
pass
|
||||
|
||||
|
||||
class QueueFull(Exception):
|
||||
'Exception raised by Queue.put(block=0)/put_nowait().'
|
||||
pass
|
||||
|
||||
|
||||
class Queue:
|
||||
"""A queue, useful for coordinating producer and consumer coroutines.
|
||||
|
||||
If maxsize is less than or equal to zero, the queue size is infinite. If it
|
||||
is an integer greater than 0, then "yield from put()" will block when the
|
||||
queue reaches maxsize, until an item is removed by get().
|
||||
|
||||
Unlike the standard library Queue, you can reliably know this Queue's size
|
||||
with qsize(), since your single-threaded asyncio application won't be
|
||||
interrupted between calling qsize() and doing an operation on the Queue.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=0, *, loop=None):
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._maxsize = maxsize
|
||||
|
||||
# Futures.
|
||||
self._getters = collections.deque()
|
||||
# Pairs of (item, Future).
|
||||
self._putters = collections.deque()
|
||||
self._init(maxsize)
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = collections.deque()
|
||||
|
||||
def _get(self):
|
||||
return self._queue.popleft()
|
||||
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
def __repr__(self):
|
||||
return '<{} at {:#x} {}>'.format(
|
||||
type(self).__name__, id(self), self._format())
|
||||
|
||||
def __str__(self):
|
||||
return '<{} {}>'.format(type(self).__name__, self._format())
|
||||
|
||||
def _format(self):
|
||||
result = 'maxsize={!r}'.format(self._maxsize)
|
||||
if getattr(self, '_queue', None):
|
||||
result += ' _queue={!r}'.format(list(self._queue))
|
||||
if self._getters:
|
||||
result += ' _getters[{}]'.format(len(self._getters))
|
||||
if self._putters:
|
||||
result += ' _putters[{}]'.format(len(self._putters))
|
||||
return result
|
||||
|
||||
def _consume_done_getters(self):
|
||||
# Delete waiters at the head of the get() queue who've timed out.
|
||||
while self._getters and self._getters[0].done():
|
||||
self._getters.popleft()
|
||||
|
||||
def _consume_done_putters(self):
|
||||
# Delete waiters at the head of the put() queue who've timed out.
|
||||
while self._putters and self._putters[0][1].done():
|
||||
self._putters.popleft()
|
||||
|
||||
def qsize(self):
|
||||
"""Number of items in the queue."""
|
||||
return len(self._queue)
|
||||
|
||||
@property
|
||||
def maxsize(self):
|
||||
"""Number of items allowed in the queue."""
|
||||
return self._maxsize
|
||||
|
||||
def empty(self):
|
||||
"""Return True if the queue is empty, False otherwise."""
|
||||
return not self._queue
|
||||
|
||||
def full(self):
|
||||
"""Return True if there are maxsize items in the queue.
|
||||
|
||||
Note: if the Queue was initialized with maxsize=0 (the default),
|
||||
then full() is never True.
|
||||
"""
|
||||
if self._maxsize <= 0:
|
||||
return False
|
||||
else:
|
||||
return self.qsize() >= self._maxsize
|
||||
|
||||
@coroutine
|
||||
def put(self, item):
|
||||
"""Put an item into the queue.
|
||||
|
||||
If you yield from put(), wait until a free slot is available
|
||||
before adding item.
|
||||
"""
|
||||
self._consume_done_getters()
|
||||
if self._getters:
|
||||
assert not self._queue, (
|
||||
'queue non-empty, why are getters waiting?')
|
||||
|
||||
getter = self._getters.popleft()
|
||||
|
||||
# Use _put and _get instead of passing item straight to getter, in
|
||||
# case a subclass has logic that must run (e.g. JoinableQueue).
|
||||
self._put(item)
|
||||
getter.set_result(self._get())
|
||||
|
||||
elif self._maxsize > 0 and self._maxsize <= self.qsize():
|
||||
waiter = futures.Future(loop=self._loop)
|
||||
|
||||
self._putters.append((item, waiter))
|
||||
yield from waiter
|
||||
|
||||
else:
|
||||
self._put(item)
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
If no free slot is immediately available, raise QueueFull.
|
||||
"""
|
||||
self._consume_done_getters()
|
||||
if self._getters:
|
||||
assert not self._queue, (
|
||||
'queue non-empty, why are getters waiting?')
|
||||
|
||||
getter = self._getters.popleft()
|
||||
|
||||
# Use _put and _get instead of passing item straight to getter, in
|
||||
# case a subclass has logic that must run (e.g. JoinableQueue).
|
||||
self._put(item)
|
||||
getter.set_result(self._get())
|
||||
|
||||
elif self._maxsize > 0 and self._maxsize <= self.qsize():
|
||||
raise QueueFull
|
||||
else:
|
||||
self._put(item)
|
||||
|
||||
@coroutine
|
||||
def get(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If you yield from get(), wait until a item is available.
|
||||
"""
|
||||
self._consume_done_putters()
|
||||
if self._putters:
|
||||
assert self.full(), 'queue not full, why are putters waiting?'
|
||||
item, putter = self._putters.popleft()
|
||||
self._put(item)
|
||||
|
||||
# When a getter runs and frees up a slot so this putter can
|
||||
# run, we need to defer the put for a tick to ensure that
|
||||
# getters and putters alternate perfectly. See
|
||||
# ChannelTest.test_wait.
|
||||
self._loop.call_soon(putter._set_result_unless_cancelled, None)
|
||||
|
||||
return self._get()
|
||||
|
||||
elif self.qsize():
|
||||
return self._get()
|
||||
else:
|
||||
waiter = futures.Future(loop=self._loop)
|
||||
|
||||
self._getters.append(waiter)
|
||||
return (yield from waiter)
|
||||
|
||||
def get_nowait(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
Return an item if one is immediately available, else raise QueueEmpty.
|
||||
"""
|
||||
self._consume_done_putters()
|
||||
if self._putters:
|
||||
assert self.full(), 'queue not full, why are putters waiting?'
|
||||
item, putter = self._putters.popleft()
|
||||
self._put(item)
|
||||
# Wake putter on next tick.
|
||||
putter.set_result(None)
|
||||
|
||||
return self._get()
|
||||
|
||||
elif self.qsize():
|
||||
return self._get()
|
||||
else:
|
||||
raise QueueEmpty
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
"""A subclass of Queue; retrieves entries in priority order (lowest first).
|
||||
|
||||
Entries are typically tuples of the form: (priority number, data).
|
||||
"""
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item, heappush=heapq.heappush):
|
||||
heappush(self._queue, item)
|
||||
|
||||
def _get(self, heappop=heapq.heappop):
|
||||
return heappop(self._queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
"""A subclass of Queue that retrieves most recently added entries first."""
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
def _get(self):
|
||||
return self._queue.pop()
|
||||
|
||||
|
||||
class JoinableQueue(Queue):
|
||||
"""A subclass of Queue with task_done() and join() methods."""
|
||||
|
||||
def __init__(self, maxsize=0, *, loop=None):
|
||||
super().__init__(maxsize=maxsize, loop=loop)
|
||||
self._unfinished_tasks = 0
|
||||
self._finished = locks.Event(loop=self._loop)
|
||||
self._finished.set()
|
||||
|
||||
def _format(self):
|
||||
result = Queue._format(self)
|
||||
if self._unfinished_tasks:
|
||||
result += ' tasks={}'.format(self._unfinished_tasks)
|
||||
return result
|
||||
|
||||
def _put(self, item):
|
||||
super()._put(item)
|
||||
self._unfinished_tasks += 1
|
||||
self._finished.clear()
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by queue consumers. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items have
|
||||
been processed (meaning that a task_done() call was received for every
|
||||
item that had been put() into the queue).
|
||||
|
||||
Raises ValueError if called more times than there were items placed in
|
||||
the queue.
|
||||
"""
|
||||
if self._unfinished_tasks <= 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self._unfinished_tasks -= 1
|
||||
if self._unfinished_tasks == 0:
|
||||
self._finished.set()
|
||||
|
||||
@coroutine
|
||||
def join(self):
|
||||
"""Block until all items in the queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the
|
||||
queue. The count goes down whenever a consumer thread calls task_done()
|
||||
to indicate that the item was retrieved and all work on it is complete.
|
||||
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||
"""
|
||||
if self._unfinished_tasks > 0:
|
||||
yield from self._finished.wait()
|
||||
1003
asyncio/selector_events.py
Normal file
1003
asyncio/selector_events.py
Normal file
File diff suppressed because it is too large
Load Diff
590
asyncio/selectors.py
Normal file
590
asyncio/selectors.py
Normal file
@@ -0,0 +1,590 @@
|
||||
"""Selectors module.
|
||||
|
||||
This module allows high-level and efficient I/O multiplexing, built upon the
|
||||
`select` module primitives.
|
||||
"""
|
||||
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections import namedtuple, Mapping
|
||||
import math
|
||||
import select
|
||||
import sys
|
||||
|
||||
|
||||
# generic events, that must be mapped to implementation-specific ones
|
||||
EVENT_READ = (1 << 0)
|
||||
EVENT_WRITE = (1 << 1)
|
||||
|
||||
|
||||
def _fileobj_to_fd(fileobj):
|
||||
"""Return a file descriptor from a file object.
|
||||
|
||||
Parameters:
|
||||
fileobj -- file object or file descriptor
|
||||
|
||||
Returns:
|
||||
corresponding file descriptor
|
||||
|
||||
Raises:
|
||||
ValueError if the object is invalid
|
||||
"""
|
||||
if isinstance(fileobj, int):
|
||||
fd = fileobj
|
||||
else:
|
||||
try:
|
||||
fd = int(fileobj.fileno())
|
||||
except (AttributeError, TypeError, ValueError):
|
||||
raise ValueError("Invalid file object: "
|
||||
"{!r}".format(fileobj)) from None
|
||||
if fd < 0:
|
||||
raise ValueError("Invalid file descriptor: {}".format(fd))
|
||||
return fd
|
||||
|
||||
|
||||
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
|
||||
"""Object used to associate a file object to its backing file descriptor,
|
||||
selected event mask and attached data."""
|
||||
|
||||
|
||||
class _SelectorMapping(Mapping):
|
||||
"""Mapping of file objects to selector keys."""
|
||||
|
||||
def __init__(self, selector):
|
||||
self._selector = selector
|
||||
|
||||
def __len__(self):
|
||||
return len(self._selector._fd_to_key)
|
||||
|
||||
def __getitem__(self, fileobj):
|
||||
try:
|
||||
fd = self._selector._fileobj_lookup(fileobj)
|
||||
return self._selector._fd_to_key[fd]
|
||||
except KeyError:
|
||||
raise KeyError("{!r} is not registered".format(fileobj)) from None
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._selector._fd_to_key)
|
||||
|
||||
|
||||
class BaseSelector(metaclass=ABCMeta):
|
||||
"""Selector abstract base class.
|
||||
|
||||
A selector supports registering file objects to be monitored for specific
|
||||
I/O events.
|
||||
|
||||
A file object is a file descriptor or any object with a `fileno()` method.
|
||||
An arbitrary object can be attached to the file object, which can be used
|
||||
for example to store context information, a callback, etc.
|
||||
|
||||
A selector can use various implementations (select(), poll(), epoll()...)
|
||||
depending on the platform. The default `Selector` class uses the most
|
||||
efficient implementation on the current platform.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def register(self, fileobj, events, data=None):
|
||||
"""Register a file object.
|
||||
|
||||
Parameters:
|
||||
fileobj -- file object or file descriptor
|
||||
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
|
||||
data -- attached data
|
||||
|
||||
Returns:
|
||||
SelectorKey instance
|
||||
|
||||
Raises:
|
||||
ValueError if events is invalid
|
||||
KeyError if fileobj is already registered
|
||||
OSError if fileobj is closed or otherwise is unacceptable to
|
||||
the underlying system call (if a system call is made)
|
||||
|
||||
Note:
|
||||
OSError may or may not be raised
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def unregister(self, fileobj):
|
||||
"""Unregister a file object.
|
||||
|
||||
Parameters:
|
||||
fileobj -- file object or file descriptor
|
||||
|
||||
Returns:
|
||||
SelectorKey instance
|
||||
|
||||
Raises:
|
||||
KeyError if fileobj is not registered
|
||||
|
||||
Note:
|
||||
If fileobj is registered but has since been closed this does
|
||||
*not* raise OSError (even if the wrapped syscall does)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def modify(self, fileobj, events, data=None):
|
||||
"""Change a registered file object monitored events or attached data.
|
||||
|
||||
Parameters:
|
||||
fileobj -- file object or file descriptor
|
||||
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
|
||||
data -- attached data
|
||||
|
||||
Returns:
|
||||
SelectorKey instance
|
||||
|
||||
Raises:
|
||||
Anything that unregister() or register() raises
|
||||
"""
|
||||
self.unregister(fileobj)
|
||||
return self.register(fileobj, events, data)
|
||||
|
||||
@abstractmethod
|
||||
def select(self, timeout=None):
|
||||
"""Perform the actual selection, until some monitored file objects are
|
||||
ready or a timeout expires.
|
||||
|
||||
Parameters:
|
||||
timeout -- if timeout > 0, this specifies the maximum wait time, in
|
||||
seconds
|
||||
if timeout <= 0, the select() call won't block, and will
|
||||
report the currently ready file objects
|
||||
if timeout is None, select() will block until a monitored
|
||||
file object becomes ready
|
||||
|
||||
Returns:
|
||||
list of (key, events) for ready file objects
|
||||
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close the selector.
|
||||
|
||||
This must be called to make sure that any underlying resource is freed.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_key(self, fileobj):
|
||||
"""Return the key associated to a registered file object.
|
||||
|
||||
Returns:
|
||||
SelectorKey for this file object
|
||||
"""
|
||||
mapping = self.get_map()
|
||||
try:
|
||||
return mapping[fileobj]
|
||||
except KeyError:
|
||||
raise KeyError("{!r} is not registered".format(fileobj)) from None
|
||||
|
||||
@abstractmethod
|
||||
def get_map(self):
|
||||
"""Return a mapping of file objects to selector keys."""
|
||||
raise NotImplementedError
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
|
||||
class _BaseSelectorImpl(BaseSelector):
|
||||
"""Base selector implementation."""
|
||||
|
||||
def __init__(self):
|
||||
# this maps file descriptors to keys
|
||||
self._fd_to_key = {}
|
||||
# read-only mapping returned by get_map()
|
||||
self._map = _SelectorMapping(self)
|
||||
|
||||
def _fileobj_lookup(self, fileobj):
|
||||
"""Return a file descriptor from a file object.
|
||||
|
||||
This wraps _fileobj_to_fd() to do an exhaustive search in case
|
||||
the object is invalid but we still have it in our map. This
|
||||
is used by unregister() so we can unregister an object that
|
||||
was previously registered even if it is closed. It is also
|
||||
used by _SelectorMapping.
|
||||
"""
|
||||
try:
|
||||
return _fileobj_to_fd(fileobj)
|
||||
except ValueError:
|
||||
# Do an exhaustive search.
|
||||
for key in self._fd_to_key.values():
|
||||
if key.fileobj is fileobj:
|
||||
return key.fd
|
||||
# Raise ValueError after all.
|
||||
raise
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
|
||||
raise ValueError("Invalid events: {!r}".format(events))
|
||||
|
||||
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
|
||||
|
||||
if key.fd in self._fd_to_key:
|
||||
raise KeyError("{!r} (FD {}) is already registered"
|
||||
.format(fileobj, key.fd))
|
||||
|
||||
self._fd_to_key[key.fd] = key
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
try:
|
||||
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
|
||||
except KeyError:
|
||||
raise KeyError("{!r} is not registered".format(fileobj)) from None
|
||||
return key
|
||||
|
||||
def modify(self, fileobj, events, data=None):
|
||||
# TODO: Subclasses can probably optimize this even further.
|
||||
try:
|
||||
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
|
||||
except KeyError:
|
||||
raise KeyError("{!r} is not registered".format(fileobj)) from None
|
||||
if events != key.events:
|
||||
self.unregister(fileobj)
|
||||
key = self.register(fileobj, events, data)
|
||||
elif data != key.data:
|
||||
# Use a shortcut to update the data.
|
||||
key = key._replace(data=data)
|
||||
self._fd_to_key[key.fd] = key
|
||||
return key
|
||||
|
||||
def close(self):
|
||||
self._fd_to_key.clear()
|
||||
|
||||
def get_map(self):
|
||||
return self._map
|
||||
|
||||
def _key_from_fd(self, fd):
|
||||
"""Return the key associated to a given file descriptor.
|
||||
|
||||
Parameters:
|
||||
fd -- file descriptor
|
||||
|
||||
Returns:
|
||||
corresponding key, or None if not found
|
||||
"""
|
||||
try:
|
||||
return self._fd_to_key[fd]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
class SelectSelector(_BaseSelectorImpl):
|
||||
"""Select-based selector."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._readers = set()
|
||||
self._writers = set()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super().register(fileobj, events, data)
|
||||
if events & EVENT_READ:
|
||||
self._readers.add(key.fd)
|
||||
if events & EVENT_WRITE:
|
||||
self._writers.add(key.fd)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super().unregister(fileobj)
|
||||
self._readers.discard(key.fd)
|
||||
self._writers.discard(key.fd)
|
||||
return key
|
||||
|
||||
if sys.platform == 'win32':
|
||||
def _select(self, r, w, _, timeout=None):
|
||||
r, w, x = select.select(r, w, w, timeout)
|
||||
return r, w + x, []
|
||||
else:
|
||||
_select = select.select
|
||||
|
||||
def select(self, timeout=None):
|
||||
timeout = None if timeout is None else max(timeout, 0)
|
||||
ready = []
|
||||
try:
|
||||
r, w, _ = self._select(self._readers, self._writers, [], timeout)
|
||||
except InterruptedError:
|
||||
return ready
|
||||
r = set(r)
|
||||
w = set(w)
|
||||
for fd in r | w:
|
||||
events = 0
|
||||
if fd in r:
|
||||
events |= EVENT_READ
|
||||
if fd in w:
|
||||
events |= EVENT_WRITE
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
return ready
|
||||
|
||||
|
||||
if hasattr(select, 'poll'):
|
||||
|
||||
class PollSelector(_BaseSelectorImpl):
|
||||
"""Poll-based selector."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._poll = select.poll()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super().register(fileobj, events, data)
|
||||
poll_events = 0
|
||||
if events & EVENT_READ:
|
||||
poll_events |= select.POLLIN
|
||||
if events & EVENT_WRITE:
|
||||
poll_events |= select.POLLOUT
|
||||
self._poll.register(key.fd, poll_events)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super().unregister(fileobj)
|
||||
self._poll.unregister(key.fd)
|
||||
return key
|
||||
|
||||
def select(self, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = None
|
||||
elif timeout <= 0:
|
||||
timeout = 0
|
||||
else:
|
||||
# poll() has a resolution of 1 millisecond, round away from
|
||||
# zero to wait *at least* timeout seconds.
|
||||
timeout = math.ceil(timeout * 1e3)
|
||||
ready = []
|
||||
try:
|
||||
fd_event_list = self._poll.poll(timeout)
|
||||
except InterruptedError:
|
||||
return ready
|
||||
for fd, event in fd_event_list:
|
||||
events = 0
|
||||
if event & ~select.POLLIN:
|
||||
events |= EVENT_WRITE
|
||||
if event & ~select.POLLOUT:
|
||||
events |= EVENT_READ
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
return ready
|
||||
|
||||
|
||||
if hasattr(select, 'epoll'):
|
||||
|
||||
class EpollSelector(_BaseSelectorImpl):
|
||||
"""Epoll-based selector."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._epoll = select.epoll()
|
||||
|
||||
def fileno(self):
|
||||
return self._epoll.fileno()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super().register(fileobj, events, data)
|
||||
epoll_events = 0
|
||||
if events & EVENT_READ:
|
||||
epoll_events |= select.EPOLLIN
|
||||
if events & EVENT_WRITE:
|
||||
epoll_events |= select.EPOLLOUT
|
||||
self._epoll.register(key.fd, epoll_events)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super().unregister(fileobj)
|
||||
try:
|
||||
self._epoll.unregister(key.fd)
|
||||
except OSError:
|
||||
# This can happen if the FD was closed since it
|
||||
# was registered.
|
||||
pass
|
||||
return key
|
||||
|
||||
def select(self, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = -1
|
||||
elif timeout <= 0:
|
||||
timeout = 0
|
||||
else:
|
||||
# epoll_wait() has a resolution of 1 millisecond, round away
|
||||
# from zero to wait *at least* timeout seconds.
|
||||
timeout = math.ceil(timeout * 1e3) * 1e-3
|
||||
|
||||
# epoll_wait() expects `maxevents` to be greater than zero;
|
||||
# we want to make sure that `select()` can be called when no
|
||||
# FD is registered.
|
||||
max_ev = max(len(self._fd_to_key), 1)
|
||||
|
||||
ready = []
|
||||
try:
|
||||
fd_event_list = self._epoll.poll(timeout, max_ev)
|
||||
except InterruptedError:
|
||||
return ready
|
||||
for fd, event in fd_event_list:
|
||||
events = 0
|
||||
if event & ~select.EPOLLIN:
|
||||
events |= EVENT_WRITE
|
||||
if event & ~select.EPOLLOUT:
|
||||
events |= EVENT_READ
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
return ready
|
||||
|
||||
def close(self):
|
||||
self._epoll.close()
|
||||
super().close()
|
||||
|
||||
|
||||
if hasattr(select, 'devpoll'):
|
||||
|
||||
class DevpollSelector(_BaseSelectorImpl):
|
||||
"""Solaris /dev/poll selector."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._devpoll = select.devpoll()
|
||||
|
||||
def fileno(self):
|
||||
return self._devpoll.fileno()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super().register(fileobj, events, data)
|
||||
poll_events = 0
|
||||
if events & EVENT_READ:
|
||||
poll_events |= select.POLLIN
|
||||
if events & EVENT_WRITE:
|
||||
poll_events |= select.POLLOUT
|
||||
self._devpoll.register(key.fd, poll_events)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super().unregister(fileobj)
|
||||
self._devpoll.unregister(key.fd)
|
||||
return key
|
||||
|
||||
def select(self, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = None
|
||||
elif timeout <= 0:
|
||||
timeout = 0
|
||||
else:
|
||||
# devpoll() has a resolution of 1 millisecond, round away from
|
||||
# zero to wait *at least* timeout seconds.
|
||||
timeout = math.ceil(timeout * 1e3)
|
||||
ready = []
|
||||
try:
|
||||
fd_event_list = self._devpoll.poll(timeout)
|
||||
except InterruptedError:
|
||||
return ready
|
||||
for fd, event in fd_event_list:
|
||||
events = 0
|
||||
if event & ~select.POLLIN:
|
||||
events |= EVENT_WRITE
|
||||
if event & ~select.POLLOUT:
|
||||
events |= EVENT_READ
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
return ready
|
||||
|
||||
def close(self):
|
||||
self._devpoll.close()
|
||||
super().close()
|
||||
|
||||
|
||||
if hasattr(select, 'kqueue'):
|
||||
|
||||
class KqueueSelector(_BaseSelectorImpl):
|
||||
"""Kqueue-based selector."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._kqueue = select.kqueue()
|
||||
|
||||
def fileno(self):
|
||||
return self._kqueue.fileno()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super().register(fileobj, events, data)
|
||||
if events & EVENT_READ:
|
||||
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
|
||||
select.KQ_EV_ADD)
|
||||
self._kqueue.control([kev], 0, 0)
|
||||
if events & EVENT_WRITE:
|
||||
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
|
||||
select.KQ_EV_ADD)
|
||||
self._kqueue.control([kev], 0, 0)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super().unregister(fileobj)
|
||||
if key.events & EVENT_READ:
|
||||
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
|
||||
select.KQ_EV_DELETE)
|
||||
try:
|
||||
self._kqueue.control([kev], 0, 0)
|
||||
except OSError:
|
||||
# This can happen if the FD was closed since it
|
||||
# was registered.
|
||||
pass
|
||||
if key.events & EVENT_WRITE:
|
||||
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
|
||||
select.KQ_EV_DELETE)
|
||||
try:
|
||||
self._kqueue.control([kev], 0, 0)
|
||||
except OSError:
|
||||
# See comment above.
|
||||
pass
|
||||
return key
|
||||
|
||||
def select(self, timeout=None):
|
||||
timeout = None if timeout is None else max(timeout, 0)
|
||||
max_ev = len(self._fd_to_key)
|
||||
ready = []
|
||||
try:
|
||||
kev_list = self._kqueue.control(None, max_ev, timeout)
|
||||
except InterruptedError:
|
||||
return ready
|
||||
for kev in kev_list:
|
||||
fd = kev.ident
|
||||
flag = kev.filter
|
||||
events = 0
|
||||
if flag == select.KQ_FILTER_READ:
|
||||
events |= EVENT_READ
|
||||
if flag == select.KQ_FILTER_WRITE:
|
||||
events |= EVENT_WRITE
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
return ready
|
||||
|
||||
def close(self):
|
||||
self._kqueue.close()
|
||||
super().close()
|
||||
|
||||
|
||||
# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select.
|
||||
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
|
||||
if 'KqueueSelector' in globals():
|
||||
DefaultSelector = KqueueSelector
|
||||
elif 'EpollSelector' in globals():
|
||||
DefaultSelector = EpollSelector
|
||||
elif 'DevpollSelector' in globals():
|
||||
DefaultSelector = DevpollSelector
|
||||
elif 'PollSelector' in globals():
|
||||
DefaultSelector = PollSelector
|
||||
else:
|
||||
DefaultSelector = SelectSelector
|
||||
485
asyncio/streams.py
Normal file
485
asyncio/streams.py
Normal file
@@ -0,0 +1,485 @@
|
||||
"""Stream-related things."""
|
||||
|
||||
__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
|
||||
'open_connection', 'start_server',
|
||||
'IncompleteReadError',
|
||||
]
|
||||
|
||||
import socket
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
__all__.extend(['open_unix_connection', 'start_unix_server'])
|
||||
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import protocols
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
_DEFAULT_LIMIT = 2**16
|
||||
|
||||
|
||||
class IncompleteReadError(EOFError):
|
||||
"""
|
||||
Incomplete read error. Attributes:
|
||||
|
||||
- partial: read bytes string before the end of stream was reached
|
||||
- expected: total number of expected bytes
|
||||
"""
|
||||
def __init__(self, partial, expected):
|
||||
EOFError.__init__(self, "%s bytes read on a total of %s expected bytes"
|
||||
% (len(partial), expected))
|
||||
self.partial = partial
|
||||
self.expected = expected
|
||||
|
||||
|
||||
@coroutine
|
||||
def open_connection(host=None, port=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""A wrapper for create_connection() returning a (reader, writer) pair.
|
||||
|
||||
The reader returned is a StreamReader instance; the writer is a
|
||||
StreamWriter instance.
|
||||
|
||||
The arguments are all the usual arguments to create_connection()
|
||||
except protocol_factory; most common are positional host and port,
|
||||
with various optional keyword arguments following.
|
||||
|
||||
Additional optional keyword arguments are loop (to set the event loop
|
||||
instance to use) and limit (to set the buffer limit passed to the
|
||||
StreamReader).
|
||||
|
||||
(If you want to customize the StreamReader and/or
|
||||
StreamReaderProtocol classes, just copy the code -- there's
|
||||
really nothing special here except some convenience.)
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = yield from loop.create_connection(
|
||||
lambda: protocol, host, port, **kwds)
|
||||
writer = StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
|
||||
|
||||
@coroutine
|
||||
def start_server(client_connected_cb, host=None, port=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Start a socket server, call back for each client connected.
|
||||
|
||||
The first parameter, `client_connected_cb`, takes two parameters:
|
||||
client_reader, client_writer. client_reader is a StreamReader
|
||||
object, while client_writer is a StreamWriter object. This
|
||||
parameter can either be a plain callback function or a coroutine;
|
||||
if it is a coroutine, it will be automatically converted into a
|
||||
Task.
|
||||
|
||||
The rest of the arguments are all the usual arguments to
|
||||
loop.create_server() except protocol_factory; most common are
|
||||
positional host and port, with various optional keyword arguments
|
||||
following. The return value is the same as loop.create_server().
|
||||
|
||||
Additional optional keyword arguments are loop (to set the event loop
|
||||
instance to use) and limit (to set the buffer limit passed to the
|
||||
StreamReader).
|
||||
|
||||
The return value is the same as loop.create_server(), i.e. a
|
||||
Server object which can be used to stop the service.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
def factory():
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
return (yield from loop.create_server(factory, host, port, **kwds))
|
||||
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
# UNIX Domain Sockets are supported on this platform
|
||||
|
||||
@coroutine
|
||||
def open_unix_connection(path=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = yield from loop.create_unix_connection(
|
||||
lambda: protocol, path, **kwds)
|
||||
writer = StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
|
||||
|
||||
@coroutine
|
||||
def start_unix_server(client_connected_cb, path=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Similar to `start_server` but works with UNIX Domain Sockets."""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
def factory():
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
return (yield from loop.create_unix_server(factory, path, **kwds))
|
||||
|
||||
|
||||
class FlowControlMixin(protocols.Protocol):
|
||||
"""Reusable flow control logic for StreamWriter.drain().
|
||||
|
||||
This implements the protocol methods pause_writing(),
|
||||
resume_reading() and connection_lost(). If the subclass overrides
|
||||
these it must call the super methods.
|
||||
|
||||
StreamWriter.drain() must wait for _drain_helper() coroutine.
|
||||
"""
|
||||
|
||||
def __init__(self, loop=None):
|
||||
self._loop = loop # May be None; we may never need it.
|
||||
self._paused = False
|
||||
self._drain_waiter = None
|
||||
self._connection_lost = False
|
||||
|
||||
def pause_writing(self):
|
||||
assert not self._paused
|
||||
self._paused = True
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses writing", self)
|
||||
|
||||
def resume_writing(self):
|
||||
assert self._paused
|
||||
self._paused = False
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes writing", self)
|
||||
|
||||
waiter = self._drain_waiter
|
||||
if waiter is not None:
|
||||
self._drain_waiter = None
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self._connection_lost = True
|
||||
# Wake up the writer if currently paused.
|
||||
if not self._paused:
|
||||
return
|
||||
waiter = self._drain_waiter
|
||||
if waiter is None:
|
||||
return
|
||||
self._drain_waiter = None
|
||||
if waiter.done():
|
||||
return
|
||||
if exc is None:
|
||||
waiter.set_result(None)
|
||||
else:
|
||||
waiter.set_exception(exc)
|
||||
|
||||
@coroutine
|
||||
def _drain_helper(self):
|
||||
if self._connection_lost:
|
||||
raise ConnectionResetError('Connection lost')
|
||||
if not self._paused:
|
||||
return
|
||||
waiter = self._drain_waiter
|
||||
assert waiter is None or waiter.cancelled()
|
||||
waiter = futures.Future(loop=self._loop)
|
||||
self._drain_waiter = waiter
|
||||
yield from waiter
|
||||
|
||||
|
||||
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
|
||||
"""Helper class to adapt between Protocol and StreamReader.
|
||||
|
||||
(This is a helper class instead of making StreamReader itself a
|
||||
Protocol subclass, because the StreamReader has other potential
|
||||
uses, and to prevent the user of the StreamReader to accidentally
|
||||
call inappropriate methods of the protocol.)
|
||||
"""
|
||||
|
||||
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
self._stream_reader = stream_reader
|
||||
self._stream_writer = None
|
||||
self._client_connected_cb = client_connected_cb
|
||||
|
||||
def connection_made(self, transport):
|
||||
self._stream_reader.set_transport(transport)
|
||||
if self._client_connected_cb is not None:
|
||||
self._stream_writer = StreamWriter(transport, self,
|
||||
self._stream_reader,
|
||||
self._loop)
|
||||
res = self._client_connected_cb(self._stream_reader,
|
||||
self._stream_writer)
|
||||
if coroutines.iscoroutine(res):
|
||||
self._loop.create_task(res)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
if exc is None:
|
||||
self._stream_reader.feed_eof()
|
||||
else:
|
||||
self._stream_reader.set_exception(exc)
|
||||
super().connection_lost(exc)
|
||||
|
||||
def data_received(self, data):
|
||||
self._stream_reader.feed_data(data)
|
||||
|
||||
def eof_received(self):
|
||||
self._stream_reader.feed_eof()
|
||||
|
||||
|
||||
class StreamWriter:
|
||||
"""Wraps a Transport.
|
||||
|
||||
This exposes write(), writelines(), [can_]write_eof(),
|
||||
get_extra_info() and close(). It adds drain() which returns an
|
||||
optional Future on which you can wait for flow control. It also
|
||||
adds a transport property which references the Transport
|
||||
directly.
|
||||
"""
|
||||
|
||||
def __init__(self, transport, protocol, reader, loop):
|
||||
self._transport = transport
|
||||
self._protocol = protocol
|
||||
# drain() expects that the reader has a exception() method
|
||||
assert reader is None or isinstance(reader, StreamReader)
|
||||
self._reader = reader
|
||||
self._loop = loop
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'transport=%r' % self._transport]
|
||||
if self._reader is not None:
|
||||
info.append('reader=%r' % self._reader)
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
return self._transport
|
||||
|
||||
def write(self, data):
|
||||
self._transport.write(data)
|
||||
|
||||
def writelines(self, data):
|
||||
self._transport.writelines(data)
|
||||
|
||||
def write_eof(self):
|
||||
return self._transport.write_eof()
|
||||
|
||||
def can_write_eof(self):
|
||||
return self._transport.can_write_eof()
|
||||
|
||||
def close(self):
|
||||
return self._transport.close()
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
return self._transport.get_extra_info(name, default)
|
||||
|
||||
@coroutine
|
||||
def drain(self):
|
||||
"""Flush the write buffer.
|
||||
|
||||
The intended use is to write
|
||||
|
||||
w.write(data)
|
||||
yield from w.drain()
|
||||
"""
|
||||
if self._reader is not None:
|
||||
exc = self._reader.exception()
|
||||
if exc is not None:
|
||||
raise exc
|
||||
yield from self._protocol._drain_helper()
|
||||
|
||||
|
||||
class StreamReader:
|
||||
|
||||
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
|
||||
# The line length limit is a security feature;
|
||||
# it also doubles as half the buffer limit.
|
||||
self._limit = limit
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
self._loop = loop
|
||||
self._buffer = bytearray()
|
||||
self._eof = False # Whether we're done.
|
||||
self._waiter = None # A future.
|
||||
self._exception = None
|
||||
self._transport = None
|
||||
self._paused = False
|
||||
|
||||
def exception(self):
|
||||
return self._exception
|
||||
|
||||
def set_exception(self, exc):
|
||||
self._exception = exc
|
||||
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
|
||||
def set_transport(self, transport):
|
||||
assert self._transport is None, 'Transport already set'
|
||||
self._transport = transport
|
||||
|
||||
def _maybe_resume_transport(self):
|
||||
if self._paused and len(self._buffer) <= self._limit:
|
||||
self._paused = False
|
||||
self._transport.resume_reading()
|
||||
|
||||
def feed_eof(self):
|
||||
self._eof = True
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(True)
|
||||
|
||||
def at_eof(self):
|
||||
"""Return True if the buffer is empty and 'feed_eof' was called."""
|
||||
return self._eof and not self._buffer
|
||||
|
||||
def feed_data(self, data):
|
||||
assert not self._eof, 'feed_data after feed_eof'
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
self._buffer.extend(data)
|
||||
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(False)
|
||||
|
||||
if (self._transport is not None and
|
||||
not self._paused and
|
||||
len(self._buffer) > 2*self._limit):
|
||||
try:
|
||||
self._transport.pause_reading()
|
||||
except NotImplementedError:
|
||||
# The transport can't be paused.
|
||||
# We'll just have to buffer all data.
|
||||
# Forget the transport so we don't keep trying.
|
||||
self._transport = None
|
||||
else:
|
||||
self._paused = True
|
||||
|
||||
def _create_waiter(self, func_name):
|
||||
# StreamReader uses a future to link the protocol feed_data() method
|
||||
# to a read coroutine. Running two read coroutines at the same time
|
||||
# would have an unexpected behaviour. It would not possible to know
|
||||
# which coroutine would get the next data.
|
||||
if self._waiter is not None:
|
||||
raise RuntimeError('%s() called while another coroutine is '
|
||||
'already waiting for incoming data' % func_name)
|
||||
return futures.Future(loop=self._loop)
|
||||
|
||||
@coroutine
|
||||
def readline(self):
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
line = bytearray()
|
||||
not_enough = True
|
||||
|
||||
while not_enough:
|
||||
while self._buffer and not_enough:
|
||||
ichar = self._buffer.find(b'\n')
|
||||
if ichar < 0:
|
||||
line.extend(self._buffer)
|
||||
self._buffer.clear()
|
||||
else:
|
||||
ichar += 1
|
||||
line.extend(self._buffer[:ichar])
|
||||
del self._buffer[:ichar]
|
||||
not_enough = False
|
||||
|
||||
if len(line) > self._limit:
|
||||
self._maybe_resume_transport()
|
||||
raise ValueError('Line is too long')
|
||||
|
||||
if self._eof:
|
||||
break
|
||||
|
||||
if not_enough:
|
||||
self._waiter = self._create_waiter('readline')
|
||||
try:
|
||||
yield from self._waiter
|
||||
finally:
|
||||
self._waiter = None
|
||||
|
||||
self._maybe_resume_transport()
|
||||
return bytes(line)
|
||||
|
||||
@coroutine
|
||||
def read(self, n=-1):
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
if not n:
|
||||
return b''
|
||||
|
||||
if n < 0:
|
||||
# This used to just loop creating a new waiter hoping to
|
||||
# collect everything in self._buffer, but that would
|
||||
# deadlock if the subprocess sends more than self.limit
|
||||
# bytes. So just call self.read(self._limit) until EOF.
|
||||
blocks = []
|
||||
while True:
|
||||
block = yield from self.read(self._limit)
|
||||
if not block:
|
||||
break
|
||||
blocks.append(block)
|
||||
return b''.join(blocks)
|
||||
else:
|
||||
if not self._buffer and not self._eof:
|
||||
self._waiter = self._create_waiter('read')
|
||||
try:
|
||||
yield from self._waiter
|
||||
finally:
|
||||
self._waiter = None
|
||||
|
||||
if n < 0 or len(self._buffer) <= n:
|
||||
data = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
else:
|
||||
# n > 0 and len(self._buffer) > n
|
||||
data = bytes(self._buffer[:n])
|
||||
del self._buffer[:n]
|
||||
|
||||
self._maybe_resume_transport()
|
||||
return data
|
||||
|
||||
@coroutine
|
||||
def readexactly(self, n):
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
# There used to be "optimized" code here. It created its own
|
||||
# Future and waited until self._buffer had at least the n
|
||||
# bytes, then called read(n). Unfortunately, this could pause
|
||||
# the transport if the argument was larger than the pause
|
||||
# limit (which is twice self._limit). So now we just read()
|
||||
# into a local buffer.
|
||||
|
||||
blocks = []
|
||||
while n > 0:
|
||||
block = yield from self.read(n)
|
||||
if not block:
|
||||
partial = b''.join(blocks)
|
||||
raise IncompleteReadError(partial, len(partial) + n)
|
||||
blocks.append(block)
|
||||
n -= len(block)
|
||||
|
||||
return b''.join(blocks)
|
||||
235
asyncio/subprocess.py
Normal file
235
asyncio/subprocess.py
Normal file
@@ -0,0 +1,235 @@
|
||||
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
|
||||
|
||||
import collections
|
||||
import subprocess
|
||||
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import protocols
|
||||
from . import streams
|
||||
from . import tasks
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
PIPE = subprocess.PIPE
|
||||
STDOUT = subprocess.STDOUT
|
||||
DEVNULL = subprocess.DEVNULL
|
||||
|
||||
|
||||
class SubprocessStreamProtocol(streams.FlowControlMixin,
|
||||
protocols.SubprocessProtocol):
|
||||
"""Like StreamReaderProtocol, but for a subprocess."""
|
||||
|
||||
def __init__(self, limit, loop):
|
||||
super().__init__(loop=loop)
|
||||
self._limit = limit
|
||||
self.stdin = self.stdout = self.stderr = None
|
||||
self.waiter = futures.Future(loop=loop)
|
||||
self._waiters = collections.deque()
|
||||
self._transport = None
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self.stdin is not None:
|
||||
info.append('stdin=%r' % self.stdin)
|
||||
if self.stdout is not None:
|
||||
info.append('stdout=%r' % self.stdout)
|
||||
if self.stderr is not None:
|
||||
info.append('stderr=%r' % self.stderr)
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def connection_made(self, transport):
|
||||
self._transport = transport
|
||||
|
||||
stdout_transport = transport.get_pipe_transport(1)
|
||||
if stdout_transport is not None:
|
||||
self.stdout = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
self.stdout.set_transport(stdout_transport)
|
||||
|
||||
stderr_transport = transport.get_pipe_transport(2)
|
||||
if stderr_transport is not None:
|
||||
self.stderr = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
self.stderr.set_transport(stderr_transport)
|
||||
|
||||
stdin_transport = transport.get_pipe_transport(0)
|
||||
if stdin_transport is not None:
|
||||
self.stdin = streams.StreamWriter(stdin_transport,
|
||||
protocol=self,
|
||||
reader=None,
|
||||
loop=self._loop)
|
||||
self.waiter.set_result(None)
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if fd == 1:
|
||||
reader = self.stdout
|
||||
elif fd == 2:
|
||||
reader = self.stderr
|
||||
else:
|
||||
reader = None
|
||||
if reader is not None:
|
||||
reader.feed_data(data)
|
||||
|
||||
def pipe_connection_lost(self, fd, exc):
|
||||
if fd == 0:
|
||||
pipe = self.stdin
|
||||
if pipe is not None:
|
||||
pipe.close()
|
||||
self.connection_lost(exc)
|
||||
return
|
||||
if fd == 1:
|
||||
reader = self.stdout
|
||||
elif fd == 2:
|
||||
reader = self.stderr
|
||||
else:
|
||||
reader = None
|
||||
if reader != None:
|
||||
if exc is None:
|
||||
reader.feed_eof()
|
||||
else:
|
||||
reader.set_exception(exc)
|
||||
|
||||
def process_exited(self):
|
||||
# wake up futures waiting for wait()
|
||||
returncode = self._transport.get_returncode()
|
||||
while self._waiters:
|
||||
waiter = self._waiters.popleft()
|
||||
waiter.set_result(returncode)
|
||||
|
||||
|
||||
class Process:
|
||||
def __init__(self, transport, protocol, loop):
|
||||
self._transport = transport
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self.stdin = protocol.stdin
|
||||
self.stdout = protocol.stdout
|
||||
self.stderr = protocol.stderr
|
||||
self.pid = transport.get_pid()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (self.__class__.__name__, self.pid)
|
||||
|
||||
@property
|
||||
def returncode(self):
|
||||
return self._transport.get_returncode()
|
||||
|
||||
@coroutine
|
||||
def wait(self):
|
||||
"""Wait until the process exit and return the process return code."""
|
||||
returncode = self._transport.get_returncode()
|
||||
if returncode is not None:
|
||||
return returncode
|
||||
|
||||
waiter = futures.Future(loop=self._loop)
|
||||
self._protocol._waiters.append(waiter)
|
||||
yield from waiter
|
||||
return waiter.result()
|
||||
|
||||
def _check_alive(self):
|
||||
if self._transport.get_returncode() is not None:
|
||||
raise ProcessLookupError()
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._check_alive()
|
||||
self._transport.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._check_alive()
|
||||
self._transport.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._check_alive()
|
||||
self._transport.kill()
|
||||
|
||||
@coroutine
|
||||
def _feed_stdin(self, input):
|
||||
debug = self._loop.get_debug()
|
||||
self.stdin.write(input)
|
||||
if debug:
|
||||
logger.debug('%r communicate: feed stdin (%s bytes)',
|
||||
self, len(input))
|
||||
try:
|
||||
yield from self.stdin.drain()
|
||||
except (BrokenPipeError, ConnectionResetError) as exc:
|
||||
# communicate() ignores BrokenPipeError and ConnectionResetError
|
||||
if debug:
|
||||
logger.debug('%r communicate: stdin got %r', self, exc)
|
||||
|
||||
if debug:
|
||||
logger.debug('%r communicate: close stdin', self)
|
||||
self.stdin.close()
|
||||
|
||||
@coroutine
|
||||
def _noop(self):
|
||||
return None
|
||||
|
||||
@coroutine
|
||||
def _read_stream(self, fd):
|
||||
transport = self._transport.get_pipe_transport(fd)
|
||||
if fd == 2:
|
||||
stream = self.stderr
|
||||
else:
|
||||
assert fd == 1
|
||||
stream = self.stdout
|
||||
if self._loop.get_debug():
|
||||
name = 'stdout' if fd == 1 else 'stderr'
|
||||
logger.debug('%r communicate: read %s', self, name)
|
||||
output = yield from stream.read()
|
||||
if self._loop.get_debug():
|
||||
name = 'stdout' if fd == 1 else 'stderr'
|
||||
logger.debug('%r communicate: close %s', self, name)
|
||||
transport.close()
|
||||
return output
|
||||
|
||||
@coroutine
|
||||
def communicate(self, input=None):
|
||||
if input:
|
||||
stdin = self._feed_stdin(input)
|
||||
else:
|
||||
stdin = self._noop()
|
||||
if self.stdout is not None:
|
||||
stdout = self._read_stream(1)
|
||||
else:
|
||||
stdout = self._noop()
|
||||
if self.stderr is not None:
|
||||
stderr = self._read_stream(2)
|
||||
else:
|
||||
stderr = self._noop()
|
||||
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
|
||||
loop=self._loop)
|
||||
yield from self.wait()
|
||||
return (stdout, stderr)
|
||||
|
||||
|
||||
@coroutine
|
||||
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
|
||||
loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = yield from loop.subprocess_shell(
|
||||
protocol_factory,
|
||||
cmd, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds)
|
||||
yield from protocol.waiter
|
||||
return Process(transport, protocol, loop)
|
||||
|
||||
@coroutine
|
||||
def create_subprocess_exec(program, *args, stdin=None, stdout=None,
|
||||
stderr=None, loop=None,
|
||||
limit=streams._DEFAULT_LIMIT, **kwds):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = yield from loop.subprocess_exec(
|
||||
protocol_factory,
|
||||
program, *args,
|
||||
stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds)
|
||||
yield from protocol.waiter
|
||||
return Process(transport, protocol, loop)
|
||||
660
asyncio/tasks.py
Normal file
660
asyncio/tasks.py
Normal file
@@ -0,0 +1,660 @@
|
||||
"""Support for tasks, coroutines and the scheduler."""
|
||||
|
||||
__all__ = ['Task',
|
||||
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
|
||||
'wait', 'wait_for', 'as_completed', 'sleep', 'async',
|
||||
'gather', 'shield',
|
||||
]
|
||||
|
||||
import concurrent.futures
|
||||
import functools
|
||||
import inspect
|
||||
import linecache
|
||||
import sys
|
||||
import traceback
|
||||
import weakref
|
||||
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import futures
|
||||
from .coroutines import coroutine
|
||||
|
||||
_PY34 = (sys.version_info >= (3, 4))
|
||||
|
||||
|
||||
class Task(futures.Future):
|
||||
"""A coroutine wrapped in a Future."""
|
||||
|
||||
# An important invariant maintained while a Task not done:
|
||||
#
|
||||
# - Either _fut_waiter is None, and _step() is scheduled;
|
||||
# - or _fut_waiter is some Future, and _step() is *not* scheduled.
|
||||
#
|
||||
# The only transition from the latter to the former is through
|
||||
# _wakeup(). When _fut_waiter is not None, one of its callbacks
|
||||
# must be _wakeup().
|
||||
|
||||
# Weak set containing all tasks alive.
|
||||
_all_tasks = weakref.WeakSet()
|
||||
|
||||
# Dictionary containing tasks that are currently active in
|
||||
# all running event loops. {EventLoop: Task}
|
||||
_current_tasks = {}
|
||||
|
||||
# If False, don't log a message if the task is destroyed whereas its
|
||||
# status is still pending
|
||||
_log_destroy_pending = True
|
||||
|
||||
@classmethod
|
||||
def current_task(cls, loop=None):
|
||||
"""Return the currently running task in an event loop or None.
|
||||
|
||||
By default the current task for the current event loop is returned.
|
||||
|
||||
None is returned when called not in the context of a Task.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
return cls._current_tasks.get(loop)
|
||||
|
||||
@classmethod
|
||||
def all_tasks(cls, loop=None):
|
||||
"""Return a set of all tasks for an event loop.
|
||||
|
||||
By default all tasks for the current event loop are returned.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
return {t for t in cls._all_tasks if t._loop is loop}
|
||||
|
||||
def __init__(self, coro, *, loop=None):
|
||||
assert coroutines.iscoroutine(coro), repr(coro) # Not a coroutine function!
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._coro = iter(coro) # Use the iterator just in case.
|
||||
self._fut_waiter = None
|
||||
self._must_cancel = False
|
||||
self._loop.call_soon(self._step)
|
||||
self.__class__._all_tasks.add(self)
|
||||
|
||||
# On Python 3.3 or older, objects with a destructor that are part of a
|
||||
# reference cycle are never destroyed. That's not the case any more on
|
||||
# Python 3.4 thanks to the PEP 442.
|
||||
if _PY34:
|
||||
def __del__(self):
|
||||
if self._state == futures._PENDING and self._log_destroy_pending:
|
||||
context = {
|
||||
'task': self,
|
||||
'message': 'Task was destroyed but it is pending!',
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
futures.Future.__del__(self)
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
|
||||
if self._must_cancel:
|
||||
# replace status
|
||||
info[0] = 'cancelling'
|
||||
|
||||
coro = coroutines._format_coroutine(self._coro)
|
||||
info.insert(1, 'coro=<%s>' % coro)
|
||||
|
||||
if self._fut_waiter is not None:
|
||||
info.insert(2, 'wait_for=%r' % self._fut_waiter)
|
||||
return info
|
||||
|
||||
def get_stack(self, *, limit=None):
|
||||
"""Return the list of stack frames for this task's coroutine.
|
||||
|
||||
If the coroutine is not done, this returns the stack where it is
|
||||
suspended. If the coroutine has completed successfully or was
|
||||
cancelled, this returns an empty list. If the coroutine was
|
||||
terminated by an exception, this returns the list of traceback
|
||||
frames.
|
||||
|
||||
The frames are always ordered from oldest to newest.
|
||||
|
||||
The optional limit gives the maximum number of frames to
|
||||
return; by default all available frames are returned. Its
|
||||
meaning differs depending on whether a stack or a traceback is
|
||||
returned: the newest frames of a stack are returned, but the
|
||||
oldest frames of a traceback are returned. (This matches the
|
||||
behavior of the traceback module.)
|
||||
|
||||
For reasons beyond our control, only one stack frame is
|
||||
returned for a suspended coroutine.
|
||||
"""
|
||||
frames = []
|
||||
f = self._coro.gi_frame
|
||||
if f is not None:
|
||||
while f is not None:
|
||||
if limit is not None:
|
||||
if limit <= 0:
|
||||
break
|
||||
limit -= 1
|
||||
frames.append(f)
|
||||
f = f.f_back
|
||||
frames.reverse()
|
||||
elif self._exception is not None:
|
||||
tb = self._exception.__traceback__
|
||||
while tb is not None:
|
||||
if limit is not None:
|
||||
if limit <= 0:
|
||||
break
|
||||
limit -= 1
|
||||
frames.append(tb.tb_frame)
|
||||
tb = tb.tb_next
|
||||
return frames
|
||||
|
||||
def print_stack(self, *, limit=None, file=None):
|
||||
"""Print the stack or traceback for this task's coroutine.
|
||||
|
||||
This produces output similar to that of the traceback module,
|
||||
for the frames retrieved by get_stack(). The limit argument
|
||||
is passed to get_stack(). The file argument is an I/O stream
|
||||
to which the output is written; by default output is written
|
||||
to sys.stderr.
|
||||
"""
|
||||
extracted_list = []
|
||||
checked = set()
|
||||
for f in self.get_stack(limit=limit):
|
||||
lineno = f.f_lineno
|
||||
co = f.f_code
|
||||
filename = co.co_filename
|
||||
name = co.co_name
|
||||
if filename not in checked:
|
||||
checked.add(filename)
|
||||
linecache.checkcache(filename)
|
||||
line = linecache.getline(filename, lineno, f.f_globals)
|
||||
extracted_list.append((filename, lineno, name, line))
|
||||
exc = self._exception
|
||||
if not extracted_list:
|
||||
print('No stack for %r' % self, file=file)
|
||||
elif exc is not None:
|
||||
print('Traceback for %r (most recent call last):' % self,
|
||||
file=file)
|
||||
else:
|
||||
print('Stack for %r (most recent call last):' % self,
|
||||
file=file)
|
||||
traceback.print_list(extracted_list, file=file)
|
||||
if exc is not None:
|
||||
for line in traceback.format_exception_only(exc.__class__, exc):
|
||||
print(line, file=file, end='')
|
||||
|
||||
def cancel(self):
|
||||
"""Request that this task cancel itself.
|
||||
|
||||
This arranges for a CancelledError to be thrown into the
|
||||
wrapped coroutine on the next cycle through the event loop.
|
||||
The coroutine then has a chance to clean up or even deny
|
||||
the request using try/except/finally.
|
||||
|
||||
Unlike Future.cancel, this does not guarantee that the
|
||||
task will be cancelled: the exception might be caught and
|
||||
acted upon, delaying cancellation of the task or preventing
|
||||
cancellation completely. The task may also return a value or
|
||||
raise a different exception.
|
||||
|
||||
Immediately after this method is called, Task.cancelled() will
|
||||
not return True (unless the task was already cancelled). A
|
||||
task will be marked as cancelled when the wrapped coroutine
|
||||
terminates with a CancelledError exception (even if cancel()
|
||||
was not called).
|
||||
"""
|
||||
if self.done():
|
||||
return False
|
||||
if self._fut_waiter is not None:
|
||||
if self._fut_waiter.cancel():
|
||||
# Leave self._fut_waiter; it may be a Task that
|
||||
# catches and ignores the cancellation so we may have
|
||||
# to cancel it again later.
|
||||
return True
|
||||
# It must be the case that self._step is already scheduled.
|
||||
self._must_cancel = True
|
||||
return True
|
||||
|
||||
def _step(self, value=None, exc=None):
|
||||
assert not self.done(), \
|
||||
'_step(): already done: {!r}, {!r}, {!r}'.format(self, value, exc)
|
||||
if self._must_cancel:
|
||||
if not isinstance(exc, futures.CancelledError):
|
||||
exc = futures.CancelledError()
|
||||
self._must_cancel = False
|
||||
coro = self._coro
|
||||
self._fut_waiter = None
|
||||
|
||||
self.__class__._current_tasks[self._loop] = self
|
||||
# Call either coro.throw(exc) or coro.send(value).
|
||||
try:
|
||||
if exc is not None:
|
||||
result = coro.throw(exc)
|
||||
elif value is not None:
|
||||
result = coro.send(value)
|
||||
else:
|
||||
result = next(coro)
|
||||
except StopIteration as exc:
|
||||
self.set_result(exc.value)
|
||||
except futures.CancelledError as exc:
|
||||
super().cancel() # I.e., Future.cancel(self).
|
||||
except Exception as exc:
|
||||
self.set_exception(exc)
|
||||
except BaseException as exc:
|
||||
self.set_exception(exc)
|
||||
raise
|
||||
else:
|
||||
if isinstance(result, futures.Future):
|
||||
# Yielded Future must come from Future.__iter__().
|
||||
if result._blocking:
|
||||
result._blocking = False
|
||||
result.add_done_callback(self._wakeup)
|
||||
self._fut_waiter = result
|
||||
if self._must_cancel:
|
||||
if self._fut_waiter.cancel():
|
||||
self._must_cancel = False
|
||||
else:
|
||||
self._loop.call_soon(
|
||||
self._step, None,
|
||||
RuntimeError(
|
||||
'yield was used instead of yield from '
|
||||
'in task {!r} with {!r}'.format(self, result)))
|
||||
elif result is None:
|
||||
# Bare yield relinquishes control for one event loop iteration.
|
||||
self._loop.call_soon(self._step)
|
||||
elif inspect.isgenerator(result):
|
||||
# Yielding a generator is just wrong.
|
||||
self._loop.call_soon(
|
||||
self._step, None,
|
||||
RuntimeError(
|
||||
'yield was used instead of yield from for '
|
||||
'generator in task {!r} with {}'.format(
|
||||
self, result)))
|
||||
else:
|
||||
# Yielding something else is an error.
|
||||
self._loop.call_soon(
|
||||
self._step, None,
|
||||
RuntimeError(
|
||||
'Task got bad yield: {!r}'.format(result)))
|
||||
finally:
|
||||
self.__class__._current_tasks.pop(self._loop)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
def _wakeup(self, future):
|
||||
try:
|
||||
value = future.result()
|
||||
except Exception as exc:
|
||||
# This may also be a cancellation.
|
||||
self._step(None, exc)
|
||||
else:
|
||||
self._step(value, None)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
|
||||
# wait() and as_completed() similar to those in PEP 3148.
|
||||
|
||||
FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
|
||||
FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
|
||||
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
|
||||
|
||||
|
||||
@coroutine
|
||||
def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
|
||||
"""Wait for the Futures and coroutines given by fs to complete.
|
||||
|
||||
The sequence futures must not be empty.
|
||||
|
||||
Coroutines will be wrapped in Tasks.
|
||||
|
||||
Returns two sets of Future: (done, pending).
|
||||
|
||||
Usage:
|
||||
|
||||
done, pending = yield from asyncio.wait(fs)
|
||||
|
||||
Note: This does not raise TimeoutError! Futures that aren't done
|
||||
when the timeout occurs are returned in the second set.
|
||||
"""
|
||||
if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
|
||||
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
|
||||
if not fs:
|
||||
raise ValueError('Set of coroutines/Futures is empty.')
|
||||
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
|
||||
raise ValueError('Invalid return_when value: {}'.format(return_when))
|
||||
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
fs = {async(f, loop=loop) for f in set(fs)}
|
||||
|
||||
return (yield from _wait(fs, timeout, return_when, loop))
|
||||
|
||||
|
||||
def _release_waiter(waiter, *args):
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
|
||||
@coroutine
|
||||
def wait_for(fut, timeout, *, loop=None):
|
||||
"""Wait for the single Future or coroutine to complete, with timeout.
|
||||
|
||||
Coroutine will be wrapped in Task.
|
||||
|
||||
Returns result of the Future or coroutine. When a timeout occurs,
|
||||
it cancels the task and raises TimeoutError. To avoid the task
|
||||
cancellation, wrap it in shield().
|
||||
|
||||
Usage:
|
||||
|
||||
result = yield from asyncio.wait_for(fut, 10.0)
|
||||
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
if timeout is None:
|
||||
return (yield from fut)
|
||||
|
||||
waiter = futures.Future(loop=loop)
|
||||
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
||||
cb = functools.partial(_release_waiter, waiter)
|
||||
|
||||
fut = async(fut, loop=loop)
|
||||
fut.add_done_callback(cb)
|
||||
|
||||
try:
|
||||
# wait until the future completes or the timeout
|
||||
yield from waiter
|
||||
|
||||
if fut.done():
|
||||
return fut.result()
|
||||
else:
|
||||
fut.remove_done_callback(cb)
|
||||
fut.cancel()
|
||||
raise futures.TimeoutError()
|
||||
finally:
|
||||
timeout_handle.cancel()
|
||||
|
||||
|
||||
@coroutine
|
||||
def _wait(fs, timeout, return_when, loop):
|
||||
"""Internal helper for wait() and _wait_for().
|
||||
|
||||
The fs argument must be a collection of Futures.
|
||||
"""
|
||||
assert fs, 'Set of Futures is empty.'
|
||||
waiter = futures.Future(loop=loop)
|
||||
timeout_handle = None
|
||||
if timeout is not None:
|
||||
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
||||
counter = len(fs)
|
||||
|
||||
def _on_completion(f):
|
||||
nonlocal counter
|
||||
counter -= 1
|
||||
if (counter <= 0 or
|
||||
return_when == FIRST_COMPLETED or
|
||||
return_when == FIRST_EXCEPTION and (not f.cancelled() and
|
||||
f.exception() is not None)):
|
||||
if timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
for f in fs:
|
||||
f.add_done_callback(_on_completion)
|
||||
|
||||
try:
|
||||
yield from waiter
|
||||
finally:
|
||||
if timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
|
||||
done, pending = set(), set()
|
||||
for f in fs:
|
||||
f.remove_done_callback(_on_completion)
|
||||
if f.done():
|
||||
done.add(f)
|
||||
else:
|
||||
pending.add(f)
|
||||
return done, pending
|
||||
|
||||
|
||||
# This is *not* a @coroutine! It is just an iterator (yielding Futures).
|
||||
def as_completed(fs, *, loop=None, timeout=None):
|
||||
"""Return an iterator whose values are coroutines.
|
||||
|
||||
When waiting for the yielded coroutines you'll get the results (or
|
||||
exceptions!) of the original Futures (or coroutines), in the order
|
||||
in which and as soon as they complete.
|
||||
|
||||
This differs from PEP 3148; the proper way to use this is:
|
||||
|
||||
for f in as_completed(fs):
|
||||
result = yield from f # The 'yield from' may raise.
|
||||
# Use result.
|
||||
|
||||
If a timeout is specified, the 'yield from' will raise
|
||||
TimeoutError when the timeout occurs before all Futures are done.
|
||||
|
||||
Note: The futures 'f' are not necessarily members of fs.
|
||||
"""
|
||||
if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
|
||||
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
|
||||
loop = loop if loop is not None else events.get_event_loop()
|
||||
todo = {async(f, loop=loop) for f in set(fs)}
|
||||
from .queues import Queue # Import here to avoid circular import problem.
|
||||
done = Queue(loop=loop)
|
||||
timeout_handle = None
|
||||
|
||||
def _on_timeout():
|
||||
for f in todo:
|
||||
f.remove_done_callback(_on_completion)
|
||||
done.put_nowait(None) # Queue a dummy value for _wait_for_one().
|
||||
todo.clear() # Can't do todo.remove(f) in the loop.
|
||||
|
||||
def _on_completion(f):
|
||||
if not todo:
|
||||
return # _on_timeout() was here first.
|
||||
todo.remove(f)
|
||||
done.put_nowait(f)
|
||||
if not todo and timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
|
||||
@coroutine
|
||||
def _wait_for_one():
|
||||
f = yield from done.get()
|
||||
if f is None:
|
||||
# Dummy value from _on_timeout().
|
||||
raise futures.TimeoutError
|
||||
return f.result() # May raise f.exception().
|
||||
|
||||
for f in todo:
|
||||
f.add_done_callback(_on_completion)
|
||||
if todo and timeout is not None:
|
||||
timeout_handle = loop.call_later(timeout, _on_timeout)
|
||||
for _ in range(len(todo)):
|
||||
yield _wait_for_one()
|
||||
|
||||
|
||||
@coroutine
|
||||
def sleep(delay, result=None, *, loop=None):
|
||||
"""Coroutine that completes after a given time (in seconds)."""
|
||||
future = futures.Future(loop=loop)
|
||||
h = future._loop.call_later(delay,
|
||||
future._set_result_unless_cancelled, result)
|
||||
try:
|
||||
return (yield from future)
|
||||
finally:
|
||||
h.cancel()
|
||||
|
||||
|
||||
def async(coro_or_future, *, loop=None):
|
||||
"""Wrap a coroutine in a future.
|
||||
|
||||
If the argument is a Future, it is returned directly.
|
||||
"""
|
||||
if isinstance(coro_or_future, futures.Future):
|
||||
if loop is not None and loop is not coro_or_future._loop:
|
||||
raise ValueError('loop argument must agree with Future')
|
||||
return coro_or_future
|
||||
elif coroutines.iscoroutine(coro_or_future):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
task = loop.create_task(coro_or_future)
|
||||
if task._source_traceback:
|
||||
del task._source_traceback[-1]
|
||||
return task
|
||||
else:
|
||||
raise TypeError('A Future or coroutine is required')
|
||||
|
||||
|
||||
class _GatheringFuture(futures.Future):
|
||||
"""Helper for gather().
|
||||
|
||||
This overrides cancel() to cancel all the children and act more
|
||||
like Task.cancel(), which doesn't immediately mark itself as
|
||||
cancelled.
|
||||
"""
|
||||
|
||||
def __init__(self, children, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
self._children = children
|
||||
|
||||
def cancel(self):
|
||||
if self.done():
|
||||
return False
|
||||
for child in self._children:
|
||||
child.cancel()
|
||||
return True
|
||||
|
||||
|
||||
def gather(*coros_or_futures, loop=None, return_exceptions=False):
|
||||
"""Return a future aggregating results from the given coroutines
|
||||
or futures.
|
||||
|
||||
All futures must share the same event loop. If all the tasks are
|
||||
done successfully, the returned future's result is the list of
|
||||
results (in the order of the original sequence, not necessarily
|
||||
the order of results arrival). If *return_exceptions* is True,
|
||||
exceptions in the tasks are treated the same as successful
|
||||
results, and gathered in the result list; otherwise, the first
|
||||
raised exception will be immediately propagated to the returned
|
||||
future.
|
||||
|
||||
Cancellation: if the outer Future is cancelled, all children (that
|
||||
have not completed yet) are also cancelled. If any child is
|
||||
cancelled, this is treated as if it raised CancelledError --
|
||||
the outer Future is *not* cancelled in this case. (This is to
|
||||
prevent the cancellation of one child to cause other children to
|
||||
be cancelled.)
|
||||
"""
|
||||
if not coros_or_futures:
|
||||
outer = futures.Future(loop=loop)
|
||||
outer.set_result([])
|
||||
return outer
|
||||
|
||||
arg_to_fut = {}
|
||||
for arg in set(coros_or_futures):
|
||||
if not isinstance(arg, futures.Future):
|
||||
fut = async(arg, loop=loop)
|
||||
if loop is None:
|
||||
loop = fut._loop
|
||||
# The caller cannot control this future, the "destroy pending task"
|
||||
# warning should not be emitted.
|
||||
fut._log_destroy_pending = False
|
||||
else:
|
||||
fut = arg
|
||||
if loop is None:
|
||||
loop = fut._loop
|
||||
elif fut._loop is not loop:
|
||||
raise ValueError("futures are tied to different event loops")
|
||||
arg_to_fut[arg] = fut
|
||||
|
||||
children = [arg_to_fut[arg] for arg in coros_or_futures]
|
||||
nchildren = len(children)
|
||||
outer = _GatheringFuture(children, loop=loop)
|
||||
nfinished = 0
|
||||
results = [None] * nchildren
|
||||
|
||||
def _done_callback(i, fut):
|
||||
nonlocal nfinished
|
||||
if outer._state != futures._PENDING:
|
||||
if fut._exception is not None:
|
||||
# Mark exception retrieved.
|
||||
fut.exception()
|
||||
return
|
||||
if fut._state == futures._CANCELLED:
|
||||
res = futures.CancelledError()
|
||||
if not return_exceptions:
|
||||
outer.set_exception(res)
|
||||
return
|
||||
elif fut._exception is not None:
|
||||
res = fut.exception() # Mark exception retrieved.
|
||||
if not return_exceptions:
|
||||
outer.set_exception(res)
|
||||
return
|
||||
else:
|
||||
res = fut._result
|
||||
results[i] = res
|
||||
nfinished += 1
|
||||
if nfinished == nchildren:
|
||||
outer.set_result(results)
|
||||
|
||||
for i, fut in enumerate(children):
|
||||
fut.add_done_callback(functools.partial(_done_callback, i))
|
||||
return outer
|
||||
|
||||
|
||||
def shield(arg, *, loop=None):
|
||||
"""Wait for a future, shielding it from cancellation.
|
||||
|
||||
The statement
|
||||
|
||||
res = yield from shield(something())
|
||||
|
||||
is exactly equivalent to the statement
|
||||
|
||||
res = yield from something()
|
||||
|
||||
*except* that if the coroutine containing it is cancelled, the
|
||||
task running in something() is not cancelled. From the POV of
|
||||
something(), the cancellation did not happen. But its caller is
|
||||
still cancelled, so the yield-from expression still raises
|
||||
CancelledError. Note: If something() is cancelled by other means
|
||||
this will still cancel shield().
|
||||
|
||||
If you want to completely ignore cancellation (not recommended)
|
||||
you can combine shield() with a try/except clause, as follows:
|
||||
|
||||
try:
|
||||
res = yield from shield(something())
|
||||
except CancelledError:
|
||||
res = None
|
||||
"""
|
||||
inner = async(arg, loop=loop)
|
||||
if inner.done():
|
||||
# Shortcut.
|
||||
return inner
|
||||
loop = inner._loop
|
||||
outer = futures.Future(loop=loop)
|
||||
|
||||
def _done_callback(inner):
|
||||
if outer.cancelled():
|
||||
# Mark inner's result as retrieved.
|
||||
inner.cancelled() or inner.exception()
|
||||
return
|
||||
if inner.cancelled():
|
||||
outer.cancel()
|
||||
else:
|
||||
exc = inner.exception()
|
||||
if exc is not None:
|
||||
outer.set_exception(exc)
|
||||
else:
|
||||
outer.set_result(inner.result())
|
||||
|
||||
inner.add_done_callback(_done_callback)
|
||||
return outer
|
||||
436
asyncio/test_utils.py
Normal file
436
asyncio/test_utils.py
Normal file
@@ -0,0 +1,436 @@
|
||||
"""Utilities shared by tests."""
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import socketserver
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from http.server import HTTPServer
|
||||
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
|
||||
|
||||
try:
|
||||
import ssl
|
||||
except ImportError: # pragma: no cover
|
||||
ssl = None
|
||||
|
||||
from . import base_events
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import selectors
|
||||
from . import tasks
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
if sys.platform == 'win32': # pragma: no cover
|
||||
from .windows_utils import socketpair
|
||||
else:
|
||||
from socket import socketpair # pragma: no cover
|
||||
|
||||
|
||||
def dummy_ssl_context():
|
||||
if ssl is None:
|
||||
return None
|
||||
else:
|
||||
return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
|
||||
|
||||
def run_briefly(loop):
|
||||
@coroutine
|
||||
def once():
|
||||
pass
|
||||
gen = once()
|
||||
t = loop.create_task(gen)
|
||||
# Don't log a warning if the task is not done after run_until_complete().
|
||||
# It occurs if the loop is stopped or if a task raises a BaseException.
|
||||
t._log_destroy_pending = False
|
||||
try:
|
||||
loop.run_until_complete(t)
|
||||
finally:
|
||||
gen.close()
|
||||
|
||||
|
||||
def run_until(loop, pred, timeout=30):
|
||||
deadline = time.time() + timeout
|
||||
while not pred():
|
||||
if timeout is not None:
|
||||
timeout = deadline - time.time()
|
||||
if timeout <= 0:
|
||||
raise futures.TimeoutError()
|
||||
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
|
||||
|
||||
|
||||
def run_once(loop):
|
||||
"""loop.stop() schedules _raise_stop_error()
|
||||
and run_forever() runs until _raise_stop_error() callback.
|
||||
this wont work if test waits for some IO events, because
|
||||
_raise_stop_error() runs before any of io events callbacks.
|
||||
"""
|
||||
loop.stop()
|
||||
loop.run_forever()
|
||||
|
||||
|
||||
class SilentWSGIRequestHandler(WSGIRequestHandler):
|
||||
|
||||
def get_stderr(self):
|
||||
return io.StringIO()
|
||||
|
||||
def log_message(self, format, *args):
|
||||
pass
|
||||
|
||||
|
||||
class SilentWSGIServer(WSGIServer):
|
||||
|
||||
request_timeout = 2
|
||||
|
||||
def get_request(self):
|
||||
request, client_addr = super().get_request()
|
||||
request.settimeout(self.request_timeout)
|
||||
return request, client_addr
|
||||
|
||||
def handle_error(self, request, client_address):
|
||||
pass
|
||||
|
||||
|
||||
class SSLWSGIServerMixin:
|
||||
|
||||
def finish_request(self, request, client_address):
|
||||
# The relative location of our test directory (which
|
||||
# contains the ssl key and certificate files) differs
|
||||
# between the stdlib and stand-alone asyncio.
|
||||
# Prefer our own if we can find it.
|
||||
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
|
||||
if not os.path.isdir(here):
|
||||
here = os.path.join(os.path.dirname(os.__file__),
|
||||
'test', 'test_asyncio')
|
||||
keyfile = os.path.join(here, 'ssl_key.pem')
|
||||
certfile = os.path.join(here, 'ssl_cert.pem')
|
||||
ssock = ssl.wrap_socket(request,
|
||||
keyfile=keyfile,
|
||||
certfile=certfile,
|
||||
server_side=True)
|
||||
try:
|
||||
self.RequestHandlerClass(ssock, client_address, self)
|
||||
ssock.close()
|
||||
except OSError:
|
||||
# maybe socket has been closed by peer
|
||||
pass
|
||||
|
||||
|
||||
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
|
||||
pass
|
||||
|
||||
|
||||
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
|
||||
|
||||
def app(environ, start_response):
|
||||
status = '200 OK'
|
||||
headers = [('Content-type', 'text/plain')]
|
||||
start_response(status, headers)
|
||||
return [b'Test message']
|
||||
|
||||
# Run the test WSGI server in a separate thread in order not to
|
||||
# interfere with event handling in the main thread
|
||||
server_class = server_ssl_cls if use_ssl else server_cls
|
||||
httpd = server_class(address, SilentWSGIRequestHandler)
|
||||
httpd.set_app(app)
|
||||
httpd.address = httpd.server_address
|
||||
server_thread = threading.Thread(
|
||||
target=lambda: httpd.serve_forever(poll_interval=0.05))
|
||||
server_thread.start()
|
||||
try:
|
||||
yield httpd
|
||||
finally:
|
||||
httpd.shutdown()
|
||||
httpd.server_close()
|
||||
server_thread.join()
|
||||
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
|
||||
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
|
||||
|
||||
def server_bind(self):
|
||||
socketserver.UnixStreamServer.server_bind(self)
|
||||
self.server_name = '127.0.0.1'
|
||||
self.server_port = 80
|
||||
|
||||
|
||||
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
|
||||
|
||||
request_timeout = 2
|
||||
|
||||
def server_bind(self):
|
||||
UnixHTTPServer.server_bind(self)
|
||||
self.setup_environ()
|
||||
|
||||
def get_request(self):
|
||||
request, client_addr = super().get_request()
|
||||
request.settimeout(self.request_timeout)
|
||||
# Code in the stdlib expects that get_request
|
||||
# will return a socket and a tuple (host, port).
|
||||
# However, this isn't true for UNIX sockets,
|
||||
# as the second return value will be a path;
|
||||
# hence we return some fake data sufficient
|
||||
# to get the tests going
|
||||
return request, ('127.0.0.1', '')
|
||||
|
||||
|
||||
class SilentUnixWSGIServer(UnixWSGIServer):
|
||||
|
||||
def handle_error(self, request, client_address):
|
||||
pass
|
||||
|
||||
|
||||
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
|
||||
pass
|
||||
|
||||
|
||||
def gen_unix_socket_path():
|
||||
with tempfile.NamedTemporaryFile() as file:
|
||||
return file.name
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def unix_socket_path():
|
||||
path = gen_unix_socket_path()
|
||||
try:
|
||||
yield path
|
||||
finally:
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def run_test_unix_server(*, use_ssl=False):
|
||||
with unix_socket_path() as path:
|
||||
yield from _run_test_server(address=path, use_ssl=use_ssl,
|
||||
server_cls=SilentUnixWSGIServer,
|
||||
server_ssl_cls=UnixSSLWSGIServer)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
|
||||
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
|
||||
server_cls=SilentWSGIServer,
|
||||
server_ssl_cls=SSLWSGIServer)
|
||||
|
||||
|
||||
def make_test_protocol(base):
|
||||
dct = {}
|
||||
for name in dir(base):
|
||||
if name.startswith('__') and name.endswith('__'):
|
||||
# skip magic names
|
||||
continue
|
||||
dct[name] = MockCallback(return_value=None)
|
||||
return type('TestProtocol', (base,) + base.__bases__, dct)()
|
||||
|
||||
|
||||
class TestSelector(selectors.BaseSelector):
|
||||
|
||||
def __init__(self):
|
||||
self.keys = {}
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = selectors.SelectorKey(fileobj, 0, events, data)
|
||||
self.keys[fileobj] = key
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
return self.keys.pop(fileobj)
|
||||
|
||||
def select(self, timeout):
|
||||
return []
|
||||
|
||||
def get_map(self):
|
||||
return self.keys
|
||||
|
||||
|
||||
class TestLoop(base_events.BaseEventLoop):
|
||||
"""Loop for unittests.
|
||||
|
||||
It manages self time directly.
|
||||
If something scheduled to be executed later then
|
||||
on next loop iteration after all ready handlers done
|
||||
generator passed to __init__ is calling.
|
||||
|
||||
Generator should be like this:
|
||||
|
||||
def gen():
|
||||
...
|
||||
when = yield ...
|
||||
... = yield time_advance
|
||||
|
||||
Value returned by yield is absolute time of next scheduled handler.
|
||||
Value passed to yield is time advance to move loop's time forward.
|
||||
"""
|
||||
|
||||
def __init__(self, gen=None):
|
||||
super().__init__()
|
||||
|
||||
if gen is None:
|
||||
def gen():
|
||||
yield
|
||||
self._check_on_close = False
|
||||
else:
|
||||
self._check_on_close = True
|
||||
|
||||
self._gen = gen()
|
||||
next(self._gen)
|
||||
self._time = 0
|
||||
self._clock_resolution = 1e-9
|
||||
self._timers = []
|
||||
self._selector = TestSelector()
|
||||
|
||||
self.readers = {}
|
||||
self.writers = {}
|
||||
self.reset_counters()
|
||||
|
||||
def time(self):
|
||||
return self._time
|
||||
|
||||
def advance_time(self, advance):
|
||||
"""Move test time forward."""
|
||||
if advance:
|
||||
self._time += advance
|
||||
|
||||
def close(self):
|
||||
if self._check_on_close:
|
||||
try:
|
||||
self._gen.send(0)
|
||||
except StopIteration:
|
||||
pass
|
||||
else: # pragma: no cover
|
||||
raise AssertionError("Time generator is not finished")
|
||||
|
||||
def add_reader(self, fd, callback, *args):
|
||||
self.readers[fd] = events.Handle(callback, args, self)
|
||||
|
||||
def remove_reader(self, fd):
|
||||
self.remove_reader_count[fd] += 1
|
||||
if fd in self.readers:
|
||||
del self.readers[fd]
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def assert_reader(self, fd, callback, *args):
|
||||
assert fd in self.readers, 'fd {} is not registered'.format(fd)
|
||||
handle = self.readers[fd]
|
||||
assert handle._callback == callback, '{!r} != {!r}'.format(
|
||||
handle._callback, callback)
|
||||
assert handle._args == args, '{!r} != {!r}'.format(
|
||||
handle._args, args)
|
||||
|
||||
def add_writer(self, fd, callback, *args):
|
||||
self.writers[fd] = events.Handle(callback, args, self)
|
||||
|
||||
def remove_writer(self, fd):
|
||||
self.remove_writer_count[fd] += 1
|
||||
if fd in self.writers:
|
||||
del self.writers[fd]
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def assert_writer(self, fd, callback, *args):
|
||||
assert fd in self.writers, 'fd {} is not registered'.format(fd)
|
||||
handle = self.writers[fd]
|
||||
assert handle._callback == callback, '{!r} != {!r}'.format(
|
||||
handle._callback, callback)
|
||||
assert handle._args == args, '{!r} != {!r}'.format(
|
||||
handle._args, args)
|
||||
|
||||
def reset_counters(self):
|
||||
self.remove_reader_count = collections.defaultdict(int)
|
||||
self.remove_writer_count = collections.defaultdict(int)
|
||||
|
||||
def _run_once(self):
|
||||
super()._run_once()
|
||||
for when in self._timers:
|
||||
advance = self._gen.send(when)
|
||||
self.advance_time(advance)
|
||||
self._timers = []
|
||||
|
||||
def call_at(self, when, callback, *args):
|
||||
self._timers.append(when)
|
||||
return super().call_at(when, callback, *args)
|
||||
|
||||
def _process_events(self, event_list):
|
||||
return
|
||||
|
||||
def _write_to_self(self):
|
||||
pass
|
||||
|
||||
|
||||
def MockCallback(**kwargs):
|
||||
return mock.Mock(spec=['__call__'], **kwargs)
|
||||
|
||||
|
||||
class MockPattern(str):
|
||||
"""A regex based str with a fuzzy __eq__.
|
||||
|
||||
Use this helper with 'mock.assert_called_with', or anywhere
|
||||
where a regex comparison between strings is needed.
|
||||
|
||||
For instance:
|
||||
mock_call.assert_called_with(MockPattern('spam.*ham'))
|
||||
"""
|
||||
def __eq__(self, other):
|
||||
return bool(re.search(str(self), other, re.S))
|
||||
|
||||
|
||||
def get_function_source(func):
|
||||
source = events._get_function_source(func)
|
||||
if source is None:
|
||||
raise ValueError("unable to get the source of %r" % (func,))
|
||||
return source
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
def set_event_loop(self, loop, *, cleanup=True):
|
||||
assert loop is not None
|
||||
# ensure that the event loop is passed explicitly in asyncio
|
||||
events.set_event_loop(None)
|
||||
if cleanup:
|
||||
self.addCleanup(loop.close)
|
||||
|
||||
def new_test_loop(self, gen=None):
|
||||
loop = TestLoop(gen)
|
||||
self.set_event_loop(loop)
|
||||
return loop
|
||||
|
||||
def tearDown(self):
|
||||
events.set_event_loop(None)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def disable_logger():
|
||||
"""Context manager to disable asyncio logger.
|
||||
|
||||
For example, it can be used to ignore warnings in debug mode.
|
||||
"""
|
||||
old_level = logger.level
|
||||
try:
|
||||
logger.setLevel(logging.CRITICAL+1)
|
||||
yield
|
||||
finally:
|
||||
logger.setLevel(old_level)
|
||||
|
||||
def mock_nonblocking_socket():
|
||||
"""Create a mock of a non-blocking socket."""
|
||||
sock = mock.Mock(socket.socket)
|
||||
sock.gettimeout.return_value = 0.0
|
||||
return sock
|
||||
300
asyncio/transports.py
Normal file
300
asyncio/transports.py
Normal file
@@ -0,0 +1,300 @@
|
||||
"""Abstract Transport class."""
|
||||
|
||||
import sys
|
||||
|
||||
_PY34 = sys.version_info >= (3, 4)
|
||||
|
||||
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
|
||||
'Transport', 'DatagramTransport', 'SubprocessTransport',
|
||||
]
|
||||
|
||||
|
||||
class BaseTransport:
|
||||
"""Base class for transports."""
|
||||
|
||||
def __init__(self, extra=None):
|
||||
if extra is None:
|
||||
extra = {}
|
||||
self._extra = extra
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
"""Get optional transport information."""
|
||||
return self._extra.get(name, default)
|
||||
|
||||
def close(self):
|
||||
"""Close the transport.
|
||||
|
||||
Buffered data will be flushed asynchronously. No more data
|
||||
will be received. After all buffered data is flushed, the
|
||||
protocol's connection_lost() method will (eventually) called
|
||||
with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ReadTransport(BaseTransport):
|
||||
"""Interface for read-only transports."""
|
||||
|
||||
def pause_reading(self):
|
||||
"""Pause the receiving end.
|
||||
|
||||
No data will be passed to the protocol's data_received()
|
||||
method until resume_reading() is called.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def resume_reading(self):
|
||||
"""Resume the receiving end.
|
||||
|
||||
Data received will once again be passed to the protocol's
|
||||
data_received() method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class WriteTransport(BaseTransport):
|
||||
"""Interface for write-only transports."""
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for write flow control.
|
||||
|
||||
These two values control when to call the protocol's
|
||||
pause_writing() and resume_writing() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to a
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_writing() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_writing() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
"""Return the current size of the write buffer."""
|
||||
raise NotImplementedError
|
||||
|
||||
def write(self, data):
|
||||
"""Write some data bytes to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def writelines(self, list_of_data):
|
||||
"""Write a list (or any iterable) of data bytes to the transport.
|
||||
|
||||
The default implementation concatenates the arguments and
|
||||
calls write() on the result.
|
||||
"""
|
||||
if not _PY34:
|
||||
# In Python 3.3, bytes.join() doesn't handle memoryview.
|
||||
list_of_data = (
|
||||
bytes(data) if isinstance(data, memoryview) else data
|
||||
for data in list_of_data)
|
||||
self.write(b''.join(list_of_data))
|
||||
|
||||
def write_eof(self):
|
||||
"""Close the write end after flushing buffered data.
|
||||
|
||||
(This is like typing ^D into a UNIX program reading from stdin.)
|
||||
|
||||
Data may still be received.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def can_write_eof(self):
|
||||
"""Return True if this transport supports write_eof(), False if not."""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Transport(ReadTransport, WriteTransport):
|
||||
"""Interface representing a bidirectional transport.
|
||||
|
||||
There may be several implementations, but typically, the user does
|
||||
not implement new transports; rather, the platform provides some
|
||||
useful transports that are implemented using the platform's best
|
||||
practices.
|
||||
|
||||
The user never instantiates a transport directly; they call a
|
||||
utility function, passing it a protocol factory and other
|
||||
information necessary to create the transport and protocol. (E.g.
|
||||
EventLoop.create_connection() or EventLoop.create_server().)
|
||||
|
||||
The utility function will asynchronously create a transport and a
|
||||
protocol and hook them up by calling the protocol's
|
||||
connection_made() method, passing it the transport.
|
||||
|
||||
The implementation here raises NotImplemented for every method
|
||||
except writelines(), which calls write() in a loop.
|
||||
"""
|
||||
|
||||
|
||||
class DatagramTransport(BaseTransport):
|
||||
"""Interface for datagram (UDP) transports."""
|
||||
|
||||
def sendto(self, data, addr=None):
|
||||
"""Send data to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
addr is target socket address.
|
||||
If addr is None use target address pointed on transport creation.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SubprocessTransport(BaseTransport):
|
||||
|
||||
def get_pid(self):
|
||||
"""Get subprocess id."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_returncode(self):
|
||||
"""Get subprocess returncode.
|
||||
|
||||
See also
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_pipe_transport(self, fd):
|
||||
"""Get transport for pipe with number fd."""
|
||||
raise NotImplementedError
|
||||
|
||||
def send_signal(self, signal):
|
||||
"""Send signal to subprocess.
|
||||
|
||||
See also:
|
||||
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def terminate(self):
|
||||
"""Stop the subprocess.
|
||||
|
||||
Alias for close() method.
|
||||
|
||||
On Posix OSs the method sends SIGTERM to the subprocess.
|
||||
On Windows the Win32 API function TerminateProcess()
|
||||
is called to stop the subprocess.
|
||||
|
||||
See also:
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def kill(self):
|
||||
"""Kill the subprocess.
|
||||
|
||||
On Posix OSs the function sends SIGKILL to the subprocess.
|
||||
On Windows kill() is an alias for terminate().
|
||||
|
||||
See also:
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _FlowControlMixin(Transport):
|
||||
"""All the logic for (write) flow control in a mix-in base class.
|
||||
|
||||
The subclass must implement get_write_buffer_size(). It must call
|
||||
_maybe_pause_protocol() whenever the write buffer size increases,
|
||||
and _maybe_resume_protocol() whenever it decreases. It may also
|
||||
override set_write_buffer_limits() (e.g. to specify different
|
||||
defaults).
|
||||
|
||||
The subclass constructor must call super().__init__(extra). This
|
||||
will call set_write_buffer_limits().
|
||||
|
||||
The user may call set_write_buffer_limits() and
|
||||
get_write_buffer_size(), and their protocol's pause_writing() and
|
||||
resume_writing() may be called.
|
||||
"""
|
||||
|
||||
def __init__(self, extra=None, loop=None):
|
||||
super().__init__(extra)
|
||||
assert loop is not None
|
||||
self._loop = loop
|
||||
self._protocol_paused = False
|
||||
self._set_write_buffer_limits()
|
||||
|
||||
def _maybe_pause_protocol(self):
|
||||
size = self.get_write_buffer_size()
|
||||
if size <= self._high_water:
|
||||
return
|
||||
if not self._protocol_paused:
|
||||
self._protocol_paused = True
|
||||
try:
|
||||
self._protocol.pause_writing()
|
||||
except Exception as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.pause_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def _maybe_resume_protocol(self):
|
||||
if (self._protocol_paused and
|
||||
self.get_write_buffer_size() <= self._low_water):
|
||||
self._protocol_paused = False
|
||||
try:
|
||||
self._protocol.resume_writing()
|
||||
except Exception as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.resume_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def get_write_buffer_limits(self):
|
||||
return (self._low_water, self._high_water)
|
||||
|
||||
def _set_write_buffer_limits(self, high=None, low=None):
|
||||
if high is None:
|
||||
if low is None:
|
||||
high = 64*1024
|
||||
else:
|
||||
high = 4*low
|
||||
if low is None:
|
||||
low = high // 4
|
||||
if not high >= low >= 0:
|
||||
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
|
||||
(high, low))
|
||||
self._high_water = high
|
||||
self._low_water = low
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
self._set_write_buffer_limits(high=high, low=low)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
raise NotImplementedError
|
||||
949
asyncio/unix_events.py
Normal file
949
asyncio/unix_events.py
Normal file
@@ -0,0 +1,949 @@
|
||||
"""Selector event loop for Unix with signal handling."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
|
||||
|
||||
from . import base_events
|
||||
from . import base_subprocess
|
||||
from . import constants
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import selector_events
|
||||
from . import selectors
|
||||
from . import transports
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
__all__ = ['SelectorEventLoop',
|
||||
'AbstractChildWatcher', 'SafeChildWatcher',
|
||||
'FastChildWatcher', 'DefaultEventLoopPolicy',
|
||||
]
|
||||
|
||||
if sys.platform == 'win32': # pragma: no cover
|
||||
raise ImportError('Signals are not really supported on Windows')
|
||||
|
||||
|
||||
def _sighandler_noop(signum, frame):
|
||||
"""Dummy signal handler."""
|
||||
pass
|
||||
|
||||
|
||||
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
||||
"""Unix event loop.
|
||||
|
||||
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
|
||||
"""
|
||||
|
||||
def __init__(self, selector=None):
|
||||
super().__init__(selector)
|
||||
self._signal_handlers = {}
|
||||
|
||||
def _socketpair(self):
|
||||
return socket.socketpair()
|
||||
|
||||
def close(self):
|
||||
super().close()
|
||||
for sig in list(self._signal_handlers):
|
||||
self.remove_signal_handler(sig)
|
||||
|
||||
def _process_self_data(self, data):
|
||||
for signum in data:
|
||||
if not signum:
|
||||
# ignore null bytes written by _write_to_self()
|
||||
continue
|
||||
self._handle_signal(signum)
|
||||
|
||||
def add_signal_handler(self, sig, callback, *args):
|
||||
"""Add a handler for a signal. UNIX only.
|
||||
|
||||
Raise ValueError if the signal number is invalid or uncatchable.
|
||||
Raise RuntimeError if there is a problem setting up the handler.
|
||||
"""
|
||||
if (coroutines.iscoroutine(callback)
|
||||
or coroutines.iscoroutinefunction(callback)):
|
||||
raise TypeError("coroutines cannot be used with add_signal_handler()")
|
||||
self._check_signal(sig)
|
||||
self._check_closed()
|
||||
try:
|
||||
# set_wakeup_fd() raises ValueError if this is not the
|
||||
# main thread. By calling it early we ensure that an
|
||||
# event loop running in another thread cannot add a signal
|
||||
# handler.
|
||||
signal.set_wakeup_fd(self._csock.fileno())
|
||||
except (ValueError, OSError) as exc:
|
||||
raise RuntimeError(str(exc))
|
||||
|
||||
handle = events.Handle(callback, args, self)
|
||||
self._signal_handlers[sig] = handle
|
||||
|
||||
try:
|
||||
# Register a dummy signal handler to ask Python to write the signal
|
||||
# number in the wakup file descriptor. _process_self_data() will
|
||||
# read signal numbers from this file descriptor to handle signals.
|
||||
signal.signal(sig, _sighandler_noop)
|
||||
|
||||
# Set SA_RESTART to limit EINTR occurrences.
|
||||
signal.siginterrupt(sig, False)
|
||||
except OSError as exc:
|
||||
del self._signal_handlers[sig]
|
||||
if not self._signal_handlers:
|
||||
try:
|
||||
signal.set_wakeup_fd(-1)
|
||||
except (ValueError, OSError) as nexc:
|
||||
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
|
||||
|
||||
if exc.errno == errno.EINVAL:
|
||||
raise RuntimeError('sig {} cannot be caught'.format(sig))
|
||||
else:
|
||||
raise
|
||||
|
||||
def _handle_signal(self, sig):
|
||||
"""Internal helper that is the actual signal handler."""
|
||||
handle = self._signal_handlers.get(sig)
|
||||
if handle is None:
|
||||
return # Assume it's some race condition.
|
||||
if handle._cancelled:
|
||||
self.remove_signal_handler(sig) # Remove it properly.
|
||||
else:
|
||||
self._add_callback_signalsafe(handle)
|
||||
|
||||
def remove_signal_handler(self, sig):
|
||||
"""Remove a handler for a signal. UNIX only.
|
||||
|
||||
Return True if a signal handler was removed, False if not.
|
||||
"""
|
||||
self._check_signal(sig)
|
||||
try:
|
||||
del self._signal_handlers[sig]
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if sig == signal.SIGINT:
|
||||
handler = signal.default_int_handler
|
||||
else:
|
||||
handler = signal.SIG_DFL
|
||||
|
||||
try:
|
||||
signal.signal(sig, handler)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EINVAL:
|
||||
raise RuntimeError('sig {} cannot be caught'.format(sig))
|
||||
else:
|
||||
raise
|
||||
|
||||
if not self._signal_handlers:
|
||||
try:
|
||||
signal.set_wakeup_fd(-1)
|
||||
except (ValueError, OSError) as exc:
|
||||
logger.info('set_wakeup_fd(-1) failed: %s', exc)
|
||||
|
||||
return True
|
||||
|
||||
def _check_signal(self, sig):
|
||||
"""Internal helper to validate a signal.
|
||||
|
||||
Raise ValueError if the signal number is invalid or uncatchable.
|
||||
Raise RuntimeError if there is a problem setting up the handler.
|
||||
"""
|
||||
if not isinstance(sig, int):
|
||||
raise TypeError('sig must be an int, not {!r}'.format(sig))
|
||||
|
||||
if not (1 <= sig < signal.NSIG):
|
||||
raise ValueError(
|
||||
'sig {} out of range(1, {})'.format(sig, signal.NSIG))
|
||||
|
||||
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
|
||||
|
||||
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
|
||||
|
||||
@coroutine
|
||||
def _make_subprocess_transport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
with events.get_child_watcher() as watcher:
|
||||
transp = _UnixSubprocessTransport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=extra, **kwargs)
|
||||
yield from transp._post_init()
|
||||
watcher.add_child_handler(transp.get_pid(),
|
||||
self._child_watcher_callback, transp)
|
||||
|
||||
return transp
|
||||
|
||||
def _child_watcher_callback(self, pid, returncode, transp):
|
||||
self.call_soon_threadsafe(transp._process_exited, returncode)
|
||||
|
||||
@coroutine
|
||||
def create_unix_connection(self, protocol_factory, path, *,
|
||||
ssl=None, sock=None,
|
||||
server_hostname=None):
|
||||
assert server_hostname is None or isinstance(server_hostname, str)
|
||||
if ssl:
|
||||
if server_hostname is None:
|
||||
raise ValueError(
|
||||
'you have to pass server_hostname when using ssl')
|
||||
else:
|
||||
if server_hostname is not None:
|
||||
raise ValueError('server_hostname is only meaningful with ssl')
|
||||
|
||||
if path is not None:
|
||||
if sock is not None:
|
||||
raise ValueError(
|
||||
'path and sock can not be specified at the same time')
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
||||
try:
|
||||
sock.setblocking(False)
|
||||
yield from self.sock_connect(sock, path)
|
||||
except:
|
||||
sock.close()
|
||||
raise
|
||||
|
||||
else:
|
||||
if sock is None:
|
||||
raise ValueError('no path and sock were specified')
|
||||
sock.setblocking(False)
|
||||
|
||||
transport, protocol = yield from self._create_connection_transport(
|
||||
sock, protocol_factory, ssl, server_hostname)
|
||||
return transport, protocol
|
||||
|
||||
@coroutine
|
||||
def create_unix_server(self, protocol_factory, path=None, *,
|
||||
sock=None, backlog=100, ssl=None):
|
||||
if isinstance(ssl, bool):
|
||||
raise TypeError('ssl argument must be an SSLContext or None')
|
||||
|
||||
if path is not None:
|
||||
if sock is not None:
|
||||
raise ValueError(
|
||||
'path and sock can not be specified at the same time')
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
|
||||
try:
|
||||
sock.bind(path)
|
||||
except OSError as exc:
|
||||
sock.close()
|
||||
if exc.errno == errno.EADDRINUSE:
|
||||
# Let's improve the error message by adding
|
||||
# with what exact address it occurs.
|
||||
msg = 'Address {!r} is already in use'.format(path)
|
||||
raise OSError(errno.EADDRINUSE, msg) from None
|
||||
else:
|
||||
raise
|
||||
except:
|
||||
sock.close()
|
||||
raise
|
||||
else:
|
||||
if sock is None:
|
||||
raise ValueError(
|
||||
'path was not specified, and no sock specified')
|
||||
|
||||
if sock.family != socket.AF_UNIX:
|
||||
raise ValueError(
|
||||
'A UNIX Domain Socket was expected, got {!r}'.format(sock))
|
||||
|
||||
server = base_events.Server(self, [sock])
|
||||
sock.listen(backlog)
|
||||
sock.setblocking(False)
|
||||
self._start_serving(protocol_factory, sock, ssl, server)
|
||||
return server
|
||||
|
||||
|
||||
if hasattr(os, 'set_blocking'):
|
||||
def _set_nonblocking(fd):
|
||||
os.set_blocking(fd, False)
|
||||
else:
|
||||
import fcntl
|
||||
|
||||
def _set_nonblocking(fd):
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
|
||||
flags = flags | os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
|
||||
class _UnixReadPipeTransport(transports.ReadTransport):
|
||||
|
||||
max_size = 256 * 1024 # max bytes we read in one event loop iteration
|
||||
|
||||
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
|
||||
super().__init__(extra)
|
||||
self._extra['pipe'] = pipe
|
||||
self._loop = loop
|
||||
self._pipe = pipe
|
||||
self._fileno = pipe.fileno()
|
||||
mode = os.fstat(self._fileno).st_mode
|
||||
if not (stat.S_ISFIFO(mode) or
|
||||
stat.S_ISSOCK(mode) or
|
||||
stat.S_ISCHR(mode)):
|
||||
raise ValueError("Pipe transport is for pipes/sockets only.")
|
||||
_set_nonblocking(self._fileno)
|
||||
self._protocol = protocol
|
||||
self._closing = False
|
||||
self._loop.add_reader(self._fileno, self._read_ready)
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
if waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'fd=%s' % self._fileno]
|
||||
if self._pipe is not None:
|
||||
polling = selector_events._test_selector_event(
|
||||
self._loop._selector,
|
||||
self._fileno, selectors.EVENT_READ)
|
||||
if polling:
|
||||
info.append('polling')
|
||||
else:
|
||||
info.append('idle')
|
||||
else:
|
||||
info.append('closed')
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def _read_ready(self):
|
||||
try:
|
||||
data = os.read(self._fileno, self.max_size)
|
||||
except (BlockingIOError, InterruptedError):
|
||||
pass
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
else:
|
||||
if data:
|
||||
self._protocol.data_received(data)
|
||||
else:
|
||||
if self._loop.get_debug():
|
||||
logger.info("%r was closed by peer", self)
|
||||
self._closing = True
|
||||
self._loop.remove_reader(self._fileno)
|
||||
self._loop.call_soon(self._protocol.eof_received)
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
|
||||
def pause_reading(self):
|
||||
self._loop.remove_reader(self._fileno)
|
||||
|
||||
def resume_reading(self):
|
||||
self._loop.add_reader(self._fileno, self._read_ready)
|
||||
|
||||
def close(self):
|
||||
if not self._closing:
|
||||
self._close(None)
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
# should be called by exception handler only
|
||||
if (isinstance(exc, OSError) and exc.errno == errno.EIO):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
self._close(exc)
|
||||
|
||||
def _close(self, exc):
|
||||
self._closing = True
|
||||
self._loop.remove_reader(self._fileno)
|
||||
self._loop.call_soon(self._call_connection_lost, exc)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
self._pipe.close()
|
||||
self._pipe = None
|
||||
self._protocol = None
|
||||
self._loop = None
|
||||
|
||||
|
||||
class _UnixWritePipeTransport(transports._FlowControlMixin,
|
||||
transports.WriteTransport):
|
||||
|
||||
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
|
||||
super().__init__(extra, loop)
|
||||
self._extra['pipe'] = pipe
|
||||
self._pipe = pipe
|
||||
self._fileno = pipe.fileno()
|
||||
mode = os.fstat(self._fileno).st_mode
|
||||
is_socket = stat.S_ISSOCK(mode)
|
||||
if not (is_socket or
|
||||
stat.S_ISFIFO(mode) or
|
||||
stat.S_ISCHR(mode)):
|
||||
raise ValueError("Pipe transport is only for "
|
||||
"pipes, sockets and character devices")
|
||||
_set_nonblocking(self._fileno)
|
||||
self._protocol = protocol
|
||||
self._buffer = []
|
||||
self._conn_lost = 0
|
||||
self._closing = False # Set when close() or write_eof() called.
|
||||
|
||||
# On AIX, the reader trick only works for sockets.
|
||||
# On other platforms it works for pipes and sockets.
|
||||
# (Exception: OS X 10.4? Issue #19294.)
|
||||
if is_socket or not sys.platform.startswith("aix"):
|
||||
self._loop.add_reader(self._fileno, self._read_ready)
|
||||
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
if waiter is not None:
|
||||
# wait until protocol.connection_made() has been called
|
||||
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'fd=%s' % self._fileno]
|
||||
if self._pipe is not None:
|
||||
polling = selector_events._test_selector_event(
|
||||
self._loop._selector,
|
||||
self._fileno, selectors.EVENT_WRITE)
|
||||
if polling:
|
||||
info.append('polling')
|
||||
else:
|
||||
info.append('idle')
|
||||
|
||||
bufsize = self.get_write_buffer_size()
|
||||
info.append('bufsize=%s' % bufsize)
|
||||
else:
|
||||
info.append('closed')
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
return sum(len(data) for data in self._buffer)
|
||||
|
||||
def _read_ready(self):
|
||||
# Pipe was closed by peer.
|
||||
if self._loop.get_debug():
|
||||
logger.info("%r was closed by peer", self)
|
||||
if self._buffer:
|
||||
self._close(BrokenPipeError())
|
||||
else:
|
||||
self._close()
|
||||
|
||||
def write(self, data):
|
||||
assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
|
||||
if isinstance(data, bytearray):
|
||||
data = memoryview(data)
|
||||
if not data:
|
||||
return
|
||||
|
||||
if self._conn_lost or self._closing:
|
||||
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
||||
logger.warning('pipe closed by peer or '
|
||||
'os.write(pipe, data) raised exception.')
|
||||
self._conn_lost += 1
|
||||
return
|
||||
|
||||
if not self._buffer:
|
||||
# Attempt to send it right away first.
|
||||
try:
|
||||
n = os.write(self._fileno, data)
|
||||
except (BlockingIOError, InterruptedError):
|
||||
n = 0
|
||||
except Exception as exc:
|
||||
self._conn_lost += 1
|
||||
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
||||
return
|
||||
if n == len(data):
|
||||
return
|
||||
elif n > 0:
|
||||
data = data[n:]
|
||||
self._loop.add_writer(self._fileno, self._write_ready)
|
||||
|
||||
self._buffer.append(data)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def _write_ready(self):
|
||||
data = b''.join(self._buffer)
|
||||
assert data, 'Data should not be empty'
|
||||
|
||||
self._buffer.clear()
|
||||
try:
|
||||
n = os.write(self._fileno, data)
|
||||
except (BlockingIOError, InterruptedError):
|
||||
self._buffer.append(data)
|
||||
except Exception as exc:
|
||||
self._conn_lost += 1
|
||||
# Remove writer here, _fatal_error() doesn't it
|
||||
# because _buffer is empty.
|
||||
self._loop.remove_writer(self._fileno)
|
||||
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
||||
else:
|
||||
if n == len(data):
|
||||
self._loop.remove_writer(self._fileno)
|
||||
self._maybe_resume_protocol() # May append to buffer.
|
||||
if not self._buffer and self._closing:
|
||||
self._loop.remove_reader(self._fileno)
|
||||
self._call_connection_lost(None)
|
||||
return
|
||||
elif n > 0:
|
||||
data = data[n:]
|
||||
|
||||
self._buffer.append(data) # Try again later.
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
# TODO: Make the relationships between write_eof(), close(),
|
||||
# abort(), _fatal_error() and _close() more straightforward.
|
||||
|
||||
def write_eof(self):
|
||||
if self._closing:
|
||||
return
|
||||
assert self._pipe
|
||||
self._closing = True
|
||||
if not self._buffer:
|
||||
self._loop.remove_reader(self._fileno)
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
|
||||
def close(self):
|
||||
if not self._closing:
|
||||
# write_eof is all what we needed to close the write pipe
|
||||
self.write_eof()
|
||||
|
||||
def abort(self):
|
||||
self._close(None)
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
# should be called by exception handler only
|
||||
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
self._close(exc)
|
||||
|
||||
def _close(self, exc=None):
|
||||
self._closing = True
|
||||
if self._buffer:
|
||||
self._loop.remove_writer(self._fileno)
|
||||
self._buffer.clear()
|
||||
self._loop.remove_reader(self._fileno)
|
||||
self._loop.call_soon(self._call_connection_lost, exc)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
self._pipe.close()
|
||||
self._pipe = None
|
||||
self._protocol = None
|
||||
self._loop = None
|
||||
|
||||
|
||||
if hasattr(os, 'set_inheritable'):
|
||||
# Python 3.4 and newer
|
||||
_set_inheritable = os.set_inheritable
|
||||
else:
|
||||
import fcntl
|
||||
|
||||
def _set_inheritable(fd, inheritable):
|
||||
cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
|
||||
|
||||
old = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
if not inheritable:
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
|
||||
else:
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
|
||||
|
||||
|
||||
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
stdin_w = None
|
||||
if stdin == subprocess.PIPE:
|
||||
# Use a socket pair for stdin, since not all platforms
|
||||
# support selecting read events on the write end of a
|
||||
# socket (which we use in order to detect closing of the
|
||||
# other end). Notably this is needed on AIX, and works
|
||||
# just fine on other platforms.
|
||||
stdin, stdin_w = self._loop._socketpair()
|
||||
|
||||
# Mark the write end of the stdin pipe as non-inheritable,
|
||||
# needed by close_fds=False on Python 3.3 and older
|
||||
# (Python 3.4 implements the PEP 446, socketpair returns
|
||||
# non-inheritable sockets)
|
||||
_set_inheritable(stdin_w.fileno(), False)
|
||||
self._proc = subprocess.Popen(
|
||||
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
||||
universal_newlines=False, bufsize=bufsize, **kwargs)
|
||||
if stdin_w is not None:
|
||||
stdin.close()
|
||||
self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
|
||||
|
||||
|
||||
class AbstractChildWatcher:
|
||||
"""Abstract base class for monitoring child processes.
|
||||
|
||||
Objects derived from this class monitor a collection of subprocesses and
|
||||
report their termination or interruption by a signal.
|
||||
|
||||
New callbacks are registered with .add_child_handler(). Starting a new
|
||||
process must be done within a 'with' block to allow the watcher to suspend
|
||||
its activity until the new process if fully registered (this is needed to
|
||||
prevent a race condition in some implementations).
|
||||
|
||||
Example:
|
||||
with watcher:
|
||||
proc = subprocess.Popen("sleep 1")
|
||||
watcher.add_child_handler(proc.pid, callback)
|
||||
|
||||
Notes:
|
||||
Implementations of this class must be thread-safe.
|
||||
|
||||
Since child watcher objects may catch the SIGCHLD signal and call
|
||||
waitpid(-1), there should be only one active object per process.
|
||||
"""
|
||||
|
||||
def add_child_handler(self, pid, callback, *args):
|
||||
"""Register a new child handler.
|
||||
|
||||
Arrange for callback(pid, returncode, *args) to be called when
|
||||
process 'pid' terminates. Specifying another callback for the same
|
||||
process replaces the previous handler.
|
||||
|
||||
Note: callback() must be thread-safe.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def remove_child_handler(self, pid):
|
||||
"""Removes the handler for process 'pid'.
|
||||
|
||||
The function returns True if the handler was successfully removed,
|
||||
False if there was nothing to remove."""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
def attach_loop(self, loop):
|
||||
"""Attach the watcher to an event loop.
|
||||
|
||||
If the watcher was previously attached to an event loop, then it is
|
||||
first detached before attaching to the new loop.
|
||||
|
||||
Note: loop may be None.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self):
|
||||
"""Close the watcher.
|
||||
|
||||
This must be called to make sure that any underlying resource is freed.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def __enter__(self):
|
||||
"""Enter the watcher's context and allow starting new processes
|
||||
|
||||
This function must return self"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def __exit__(self, a, b, c):
|
||||
"""Exit the watcher's context"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class BaseChildWatcher(AbstractChildWatcher):
|
||||
|
||||
def __init__(self):
|
||||
self._loop = None
|
||||
|
||||
def close(self):
|
||||
self.attach_loop(None)
|
||||
|
||||
def _do_waitpid(self, expected_pid):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _do_waitpid_all(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def attach_loop(self, loop):
|
||||
assert loop is None or isinstance(loop, events.AbstractEventLoop)
|
||||
|
||||
if self._loop is not None:
|
||||
self._loop.remove_signal_handler(signal.SIGCHLD)
|
||||
|
||||
self._loop = loop
|
||||
if loop is not None:
|
||||
loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
|
||||
|
||||
# Prevent a race condition in case a child terminated
|
||||
# during the switch.
|
||||
self._do_waitpid_all()
|
||||
|
||||
def _sig_chld(self):
|
||||
try:
|
||||
self._do_waitpid_all()
|
||||
except Exception as exc:
|
||||
# self._loop should always be available here
|
||||
# as '_sig_chld' is added as a signal handler
|
||||
# in 'attach_loop'
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'Unknown exception in SIGCHLD handler',
|
||||
'exception': exc,
|
||||
})
|
||||
|
||||
def _compute_returncode(self, status):
|
||||
if os.WIFSIGNALED(status):
|
||||
# The child process died because of a signal.
|
||||
return -os.WTERMSIG(status)
|
||||
elif os.WIFEXITED(status):
|
||||
# The child process exited (e.g sys.exit()).
|
||||
return os.WEXITSTATUS(status)
|
||||
else:
|
||||
# The child exited, but we don't understand its status.
|
||||
# This shouldn't happen, but if it does, let's just
|
||||
# return that status; perhaps that helps debug it.
|
||||
return status
|
||||
|
||||
|
||||
class SafeChildWatcher(BaseChildWatcher):
|
||||
"""'Safe' child watcher implementation.
|
||||
|
||||
This implementation avoids disrupting other code spawning processes by
|
||||
polling explicitly each process in the SIGCHLD handler instead of calling
|
||||
os.waitpid(-1).
|
||||
|
||||
This is a safe solution but it has a significant overhead when handling a
|
||||
big number of children (O(n) each time SIGCHLD is raised)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._callbacks = {}
|
||||
|
||||
def close(self):
|
||||
self._callbacks.clear()
|
||||
super().close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, a, b, c):
|
||||
pass
|
||||
|
||||
def add_child_handler(self, pid, callback, *args):
|
||||
self._callbacks[pid] = callback, args
|
||||
|
||||
# Prevent a race condition in case the child is already terminated.
|
||||
self._do_waitpid(pid)
|
||||
|
||||
def remove_child_handler(self, pid):
|
||||
try:
|
||||
del self._callbacks[pid]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def _do_waitpid_all(self):
|
||||
|
||||
for pid in list(self._callbacks):
|
||||
self._do_waitpid(pid)
|
||||
|
||||
def _do_waitpid(self, expected_pid):
|
||||
assert expected_pid > 0
|
||||
|
||||
try:
|
||||
pid, status = os.waitpid(expected_pid, os.WNOHANG)
|
||||
except ChildProcessError:
|
||||
# The child process is already reaped
|
||||
# (may happen if waitpid() is called elsewhere).
|
||||
pid = expected_pid
|
||||
returncode = 255
|
||||
logger.warning(
|
||||
"Unknown child process pid %d, will report returncode 255",
|
||||
pid)
|
||||
else:
|
||||
if pid == 0:
|
||||
# The child process is still alive.
|
||||
return
|
||||
|
||||
returncode = self._compute_returncode(status)
|
||||
if self._loop.get_debug():
|
||||
logger.debug('process %s exited with returncode %s',
|
||||
expected_pid, returncode)
|
||||
|
||||
try:
|
||||
callback, args = self._callbacks.pop(pid)
|
||||
except KeyError: # pragma: no cover
|
||||
# May happen if .remove_child_handler() is called
|
||||
# after os.waitpid() returns.
|
||||
if self._loop.get_debug():
|
||||
logger.warning("Child watcher got an unexpected pid: %r",
|
||||
pid, exc_info=True)
|
||||
else:
|
||||
callback(pid, returncode, *args)
|
||||
|
||||
|
||||
class FastChildWatcher(BaseChildWatcher):
|
||||
"""'Fast' child watcher implementation.
|
||||
|
||||
This implementation reaps every terminated processes by calling
|
||||
os.waitpid(-1) directly, possibly breaking other code spawning processes
|
||||
and waiting for their termination.
|
||||
|
||||
There is no noticeable overhead when handling a big number of children
|
||||
(O(1) each time a child terminates).
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._callbacks = {}
|
||||
self._lock = threading.Lock()
|
||||
self._zombies = {}
|
||||
self._forks = 0
|
||||
|
||||
def close(self):
|
||||
self._callbacks.clear()
|
||||
self._zombies.clear()
|
||||
super().close()
|
||||
|
||||
def __enter__(self):
|
||||
with self._lock:
|
||||
self._forks += 1
|
||||
|
||||
return self
|
||||
|
||||
def __exit__(self, a, b, c):
|
||||
with self._lock:
|
||||
self._forks -= 1
|
||||
|
||||
if self._forks or not self._zombies:
|
||||
return
|
||||
|
||||
collateral_victims = str(self._zombies)
|
||||
self._zombies.clear()
|
||||
|
||||
logger.warning(
|
||||
"Caught subprocesses termination from unknown pids: %s",
|
||||
collateral_victims)
|
||||
|
||||
def add_child_handler(self, pid, callback, *args):
|
||||
assert self._forks, "Must use the context manager"
|
||||
with self._lock:
|
||||
try:
|
||||
returncode = self._zombies.pop(pid)
|
||||
except KeyError:
|
||||
# The child is running.
|
||||
self._callbacks[pid] = callback, args
|
||||
return
|
||||
|
||||
# The child is dead already. We can fire the callback.
|
||||
callback(pid, returncode, *args)
|
||||
|
||||
def remove_child_handler(self, pid):
|
||||
try:
|
||||
del self._callbacks[pid]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def _do_waitpid_all(self):
|
||||
# Because of signal coalescing, we must keep calling waitpid() as
|
||||
# long as we're able to reap a child.
|
||||
while True:
|
||||
try:
|
||||
pid, status = os.waitpid(-1, os.WNOHANG)
|
||||
except ChildProcessError:
|
||||
# No more child processes exist.
|
||||
return
|
||||
else:
|
||||
if pid == 0:
|
||||
# A child process is still alive.
|
||||
return
|
||||
|
||||
returncode = self._compute_returncode(status)
|
||||
|
||||
with self._lock:
|
||||
try:
|
||||
callback, args = self._callbacks.pop(pid)
|
||||
except KeyError:
|
||||
# unknown child
|
||||
if self._forks:
|
||||
# It may not be registered yet.
|
||||
self._zombies[pid] = returncode
|
||||
if self._loop.get_debug():
|
||||
logger.debug('unknown process %s exited '
|
||||
'with returncode %s',
|
||||
pid, returncode)
|
||||
continue
|
||||
callback = None
|
||||
else:
|
||||
if self._loop.get_debug():
|
||||
logger.debug('process %s exited with returncode %s',
|
||||
pid, returncode)
|
||||
|
||||
if callback is None:
|
||||
logger.warning(
|
||||
"Caught subprocess termination from unknown pid: "
|
||||
"%d -> %d", pid, returncode)
|
||||
else:
|
||||
callback(pid, returncode, *args)
|
||||
|
||||
|
||||
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
||||
"""XXX"""
|
||||
_loop_factory = _UnixSelectorEventLoop
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._watcher = None
|
||||
|
||||
def _init_watcher(self):
|
||||
with events._lock:
|
||||
if self._watcher is None: # pragma: no branch
|
||||
self._watcher = SafeChildWatcher()
|
||||
if isinstance(threading.current_thread(),
|
||||
threading._MainThread):
|
||||
self._watcher.attach_loop(self._local._loop)
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop.
|
||||
|
||||
As a side effect, if a child watcher was set before, then calling
|
||||
.set_event_loop() from the main thread will call .attach_loop(loop) on
|
||||
the child watcher.
|
||||
"""
|
||||
|
||||
super().set_event_loop(loop)
|
||||
|
||||
if self._watcher is not None and \
|
||||
isinstance(threading.current_thread(), threading._MainThread):
|
||||
self._watcher.attach_loop(loop)
|
||||
|
||||
def get_child_watcher(self):
|
||||
"""Get the watcher for child processes.
|
||||
|
||||
If not yet set, a SafeChildWatcher object is automatically created.
|
||||
"""
|
||||
if self._watcher is None:
|
||||
self._init_watcher()
|
||||
|
||||
return self._watcher
|
||||
|
||||
def set_child_watcher(self, watcher):
|
||||
"""Set the watcher for child processes."""
|
||||
|
||||
assert watcher is None or isinstance(watcher, AbstractChildWatcher)
|
||||
|
||||
if self._watcher is not None:
|
||||
self._watcher.close()
|
||||
|
||||
self._watcher = watcher
|
||||
|
||||
SelectorEventLoop = _UnixSelectorEventLoop
|
||||
DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
|
||||
634
asyncio/windows_events.py
Normal file
634
asyncio/windows_events.py
Normal file
@@ -0,0 +1,634 @@
|
||||
"""Selector and proactor event loops for Windows."""
|
||||
|
||||
import _winapi
|
||||
import errno
|
||||
import math
|
||||
import socket
|
||||
import struct
|
||||
import weakref
|
||||
|
||||
from . import events
|
||||
from . import base_subprocess
|
||||
from . import futures
|
||||
from . import proactor_events
|
||||
from . import selector_events
|
||||
from . import tasks
|
||||
from . import windows_utils
|
||||
from . import _overlapped
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
|
||||
'DefaultEventLoopPolicy',
|
||||
]
|
||||
|
||||
|
||||
NULL = 0
|
||||
INFINITE = 0xffffffff
|
||||
ERROR_CONNECTION_REFUSED = 1225
|
||||
ERROR_CONNECTION_ABORTED = 1236
|
||||
|
||||
|
||||
class _OverlappedFuture(futures.Future):
|
||||
"""Subclass of Future which represents an overlapped operation.
|
||||
|
||||
Cancelling it will immediately cancel the overlapped operation.
|
||||
"""
|
||||
|
||||
def __init__(self, ov, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._ov = ov
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
if self._ov is not None:
|
||||
state = 'pending' if self._ov.pending else 'completed'
|
||||
info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address))
|
||||
return info
|
||||
|
||||
def _cancel_overlapped(self):
|
||||
if self._ov is None:
|
||||
return
|
||||
try:
|
||||
self._ov.cancel()
|
||||
except OSError as exc:
|
||||
context = {
|
||||
'message': 'Cancelling an overlapped future failed',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self._ov = None
|
||||
|
||||
def cancel(self):
|
||||
self._cancel_overlapped()
|
||||
return super().cancel()
|
||||
|
||||
def set_exception(self, exception):
|
||||
super().set_exception(exception)
|
||||
self._cancel_overlapped()
|
||||
|
||||
def set_result(self, result):
|
||||
super().set_result(result)
|
||||
self._ov = None
|
||||
|
||||
|
||||
class _WaitHandleFuture(futures.Future):
|
||||
"""Subclass of Future which represents a wait handle."""
|
||||
|
||||
def __init__(self, iocp, ov, handle, wait_handle, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
# iocp and ov are only used by cancel() to notify IocpProactor
|
||||
# that the wait was cancelled
|
||||
self._iocp = iocp
|
||||
self._ov = ov
|
||||
self._handle = handle
|
||||
self._wait_handle = wait_handle
|
||||
|
||||
def _poll(self):
|
||||
# non-blocking wait: use a timeout of 0 millisecond
|
||||
return (_winapi.WaitForSingleObject(self._handle, 0) ==
|
||||
_winapi.WAIT_OBJECT_0)
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
info.insert(1, 'handle=%#x' % self._handle)
|
||||
if self._wait_handle:
|
||||
state = 'signaled' if self._poll() else 'waiting'
|
||||
info.insert(1, 'wait_handle=<%s, %#x>'
|
||||
% (state, self._wait_handle))
|
||||
return info
|
||||
|
||||
def _unregister_wait(self):
|
||||
if self._wait_handle is None:
|
||||
return
|
||||
try:
|
||||
_overlapped.UnregisterWait(self._wait_handle)
|
||||
except OSError as exc:
|
||||
# ERROR_IO_PENDING is not an error, the wait was unregistered
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self._wait_handle = None
|
||||
self._iocp = None
|
||||
self._ov = None
|
||||
|
||||
def cancel(self):
|
||||
result = super().cancel()
|
||||
if self._ov is not None:
|
||||
# signal the cancellation to the overlapped object
|
||||
_overlapped.PostQueuedCompletionStatus(self._iocp, True,
|
||||
0, self._ov.address)
|
||||
self._unregister_wait()
|
||||
return result
|
||||
|
||||
def set_exception(self, exception):
|
||||
super().set_exception(exception)
|
||||
self._unregister_wait()
|
||||
|
||||
def set_result(self, result):
|
||||
super().set_result(result)
|
||||
self._unregister_wait()
|
||||
|
||||
|
||||
class PipeServer(object):
|
||||
"""Class representing a pipe server.
|
||||
|
||||
This is much like a bound, listening socket.
|
||||
"""
|
||||
def __init__(self, address):
|
||||
self._address = address
|
||||
self._free_instances = weakref.WeakSet()
|
||||
# initialize the pipe attribute before calling _server_pipe_handle()
|
||||
# because this function can raise an exception and the destructor calls
|
||||
# the close() method
|
||||
self._pipe = None
|
||||
self._accept_pipe_future = None
|
||||
self._pipe = self._server_pipe_handle(True)
|
||||
|
||||
def _get_unconnected_pipe(self):
|
||||
# Create new instance and return previous one. This ensures
|
||||
# that (until the server is closed) there is always at least
|
||||
# one pipe handle for address. Therefore if a client attempt
|
||||
# to connect it will not fail with FileNotFoundError.
|
||||
tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
|
||||
return tmp
|
||||
|
||||
def _server_pipe_handle(self, first):
|
||||
# Return a wrapper for a new pipe handle.
|
||||
if self._address is None:
|
||||
return None
|
||||
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
|
||||
if first:
|
||||
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
h = _winapi.CreateNamedPipe(
|
||||
self._address, flags,
|
||||
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
|
||||
_winapi.PIPE_WAIT,
|
||||
_winapi.PIPE_UNLIMITED_INSTANCES,
|
||||
windows_utils.BUFSIZE, windows_utils.BUFSIZE,
|
||||
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
||||
pipe = windows_utils.PipeHandle(h)
|
||||
self._free_instances.add(pipe)
|
||||
return pipe
|
||||
|
||||
def close(self):
|
||||
if self._accept_pipe_future is not None:
|
||||
self._accept_pipe_future.cancel()
|
||||
self._accept_pipe_future = None
|
||||
# Close all instances which have not been connected to by a client.
|
||||
if self._address is not None:
|
||||
for pipe in self._free_instances:
|
||||
pipe.close()
|
||||
self._pipe = None
|
||||
self._address = None
|
||||
self._free_instances.clear()
|
||||
|
||||
__del__ = close
|
||||
|
||||
|
||||
class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
||||
"""Windows version of selector event loop."""
|
||||
|
||||
def _socketpair(self):
|
||||
return windows_utils.socketpair()
|
||||
|
||||
|
||||
class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
|
||||
"""Windows version of proactor event loop using IOCP."""
|
||||
|
||||
def __init__(self, proactor=None):
|
||||
if proactor is None:
|
||||
proactor = IocpProactor()
|
||||
super().__init__(proactor)
|
||||
|
||||
def _socketpair(self):
|
||||
return windows_utils.socketpair()
|
||||
|
||||
@coroutine
|
||||
def create_pipe_connection(self, protocol_factory, address):
|
||||
f = self._proactor.connect_pipe(address)
|
||||
pipe = yield from f
|
||||
protocol = protocol_factory()
|
||||
trans = self._make_duplex_pipe_transport(pipe, protocol,
|
||||
extra={'addr': address})
|
||||
return trans, protocol
|
||||
|
||||
@coroutine
|
||||
def start_serving_pipe(self, protocol_factory, address):
|
||||
server = PipeServer(address)
|
||||
|
||||
def loop_accept_pipe(f=None):
|
||||
pipe = None
|
||||
try:
|
||||
if f:
|
||||
pipe = f.result()
|
||||
server._free_instances.discard(pipe)
|
||||
protocol = protocol_factory()
|
||||
self._make_duplex_pipe_transport(
|
||||
pipe, protocol, extra={'addr': address})
|
||||
pipe = server._get_unconnected_pipe()
|
||||
if pipe is None:
|
||||
return
|
||||
f = self._proactor.accept_pipe(pipe)
|
||||
except OSError as exc:
|
||||
if pipe and pipe.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Pipe accept failed',
|
||||
'exception': exc,
|
||||
'pipe': pipe,
|
||||
})
|
||||
pipe.close()
|
||||
elif self._debug:
|
||||
logger.warning("Accept pipe failed on pipe %r",
|
||||
pipe, exc_info=True)
|
||||
except futures.CancelledError:
|
||||
if pipe:
|
||||
pipe.close()
|
||||
else:
|
||||
server._accept_pipe_future = f
|
||||
f.add_done_callback(loop_accept_pipe)
|
||||
|
||||
self.call_soon(loop_accept_pipe)
|
||||
return [server]
|
||||
|
||||
@coroutine
|
||||
def _make_subprocess_transport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=extra, **kwargs)
|
||||
yield from transp._post_init()
|
||||
return transp
|
||||
|
||||
|
||||
class IocpProactor:
|
||||
"""Proactor implementation using IOCP."""
|
||||
|
||||
def __init__(self, concurrency=0xffffffff):
|
||||
self._loop = None
|
||||
self._results = []
|
||||
self._iocp = _overlapped.CreateIoCompletionPort(
|
||||
_overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
|
||||
self._cache = {}
|
||||
self._registered = weakref.WeakSet()
|
||||
self._stopped_serving = weakref.WeakSet()
|
||||
|
||||
def __repr__(self):
|
||||
return ('<%s overlapped#=%s result#=%s>'
|
||||
% (self.__class__.__name__, len(self._cache),
|
||||
len(self._results)))
|
||||
|
||||
def set_loop(self, loop):
|
||||
self._loop = loop
|
||||
|
||||
def select(self, timeout=None):
|
||||
if not self._results:
|
||||
self._poll(timeout)
|
||||
tmp = self._results
|
||||
self._results = []
|
||||
return tmp
|
||||
|
||||
def recv(self, conn, nbytes, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSARecv(conn.fileno(), nbytes, flags)
|
||||
else:
|
||||
ov.ReadFile(conn.fileno(), nbytes)
|
||||
|
||||
def finish_recv(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_recv)
|
||||
|
||||
def send(self, conn, buf, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSASend(conn.fileno(), buf, flags)
|
||||
else:
|
||||
ov.WriteFile(conn.fileno(), buf)
|
||||
|
||||
def finish_send(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_send)
|
||||
|
||||
def accept(self, listener):
|
||||
self._register_with_iocp(listener)
|
||||
conn = self._get_accept_socket(listener.family)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.AcceptEx(listener.fileno(), conn.fileno())
|
||||
|
||||
def finish_accept(trans, key, ov):
|
||||
ov.getresult()
|
||||
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
|
||||
buf = struct.pack('@P', listener.fileno())
|
||||
conn.setsockopt(socket.SOL_SOCKET,
|
||||
_overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
|
||||
conn.settimeout(listener.gettimeout())
|
||||
return conn, conn.getpeername()
|
||||
|
||||
@coroutine
|
||||
def accept_coro(future, conn):
|
||||
# Coroutine closing the accept socket if the future is cancelled
|
||||
try:
|
||||
yield from future
|
||||
except futures.CancelledError:
|
||||
conn.close()
|
||||
raise
|
||||
|
||||
future = self._register(ov, listener, finish_accept)
|
||||
coro = accept_coro(future, conn)
|
||||
tasks.async(coro, loop=self._loop)
|
||||
return future
|
||||
|
||||
def connect(self, conn, address):
|
||||
self._register_with_iocp(conn)
|
||||
# The socket needs to be locally bound before we call ConnectEx().
|
||||
try:
|
||||
_overlapped.BindLocal(conn.fileno(), conn.family)
|
||||
except OSError as e:
|
||||
if e.winerror != errno.WSAEINVAL:
|
||||
raise
|
||||
# Probably already locally bound; check using getsockname().
|
||||
if conn.getsockname()[1] == 0:
|
||||
raise
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.ConnectEx(conn.fileno(), address)
|
||||
|
||||
def finish_connect(trans, key, ov):
|
||||
ov.getresult()
|
||||
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
|
||||
conn.setsockopt(socket.SOL_SOCKET,
|
||||
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
|
||||
return conn
|
||||
|
||||
return self._register(ov, conn, finish_connect)
|
||||
|
||||
def accept_pipe(self, pipe):
|
||||
self._register_with_iocp(pipe)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.ConnectNamedPipe(pipe.fileno())
|
||||
|
||||
def finish_accept_pipe(trans, key, ov):
|
||||
ov.getresult()
|
||||
return pipe
|
||||
|
||||
# FIXME: Tulip issue 196: why to we neeed register=False?
|
||||
# See also the comment in the _register() method
|
||||
return self._register(ov, pipe, finish_accept_pipe,
|
||||
register=False)
|
||||
|
||||
def connect_pipe(self, address):
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.WaitNamedPipeAndConnect(address, self._iocp, ov.address)
|
||||
|
||||
def finish_connect_pipe(err, handle, ov):
|
||||
# err, handle were arguments passed to PostQueuedCompletionStatus()
|
||||
# in a function run in a thread pool.
|
||||
if err == _overlapped.ERROR_SEM_TIMEOUT:
|
||||
# Connection did not succeed within time limit.
|
||||
msg = _overlapped.FormatMessage(err)
|
||||
raise ConnectionRefusedError(0, msg, None, err)
|
||||
elif err != 0:
|
||||
msg = _overlapped.FormatMessage(err)
|
||||
raise OSError(0, msg, None, err)
|
||||
else:
|
||||
return windows_utils.PipeHandle(handle)
|
||||
|
||||
return self._register(ov, None, finish_connect_pipe, wait_for_post=True)
|
||||
|
||||
def wait_for_handle(self, handle, timeout=None):
|
||||
if timeout is None:
|
||||
ms = _winapi.INFINITE
|
||||
else:
|
||||
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
ms = math.ceil(timeout * 1e3)
|
||||
|
||||
# We only create ov so we can use ov.address as a key for the cache.
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
wh = _overlapped.RegisterWaitWithQueue(
|
||||
handle, self._iocp, ov.address, ms)
|
||||
f = _WaitHandleFuture(self._iocp, ov, handle, wh, loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
|
||||
def finish_wait_for_handle(trans, key, ov):
|
||||
# Note that this second wait means that we should only use
|
||||
# this with handles types where a successful wait has no
|
||||
# effect. So events or processes are all right, but locks
|
||||
# or semaphores are not. Also note if the handle is
|
||||
# signalled and then quickly reset, then we may return
|
||||
# False even though we have not timed out.
|
||||
return f._poll()
|
||||
|
||||
if f._poll():
|
||||
try:
|
||||
result = f._poll()
|
||||
except OSError as exc:
|
||||
f.set_exception(exc)
|
||||
else:
|
||||
f.set_result(result)
|
||||
|
||||
self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
|
||||
return f
|
||||
|
||||
def _register_with_iocp(self, obj):
|
||||
# To get notifications of finished ops on this objects sent to the
|
||||
# completion port, were must register the handle.
|
||||
if obj not in self._registered:
|
||||
self._registered.add(obj)
|
||||
_overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
|
||||
# XXX We could also use SetFileCompletionNotificationModes()
|
||||
# to avoid sending notifications to completion port of ops
|
||||
# that succeed immediately.
|
||||
|
||||
def _register(self, ov, obj, callback,
|
||||
wait_for_post=False, register=True):
|
||||
# Return a future which will be set with the result of the
|
||||
# operation when it completes. The future's value is actually
|
||||
# the value returned by callback().
|
||||
f = _OverlappedFuture(ov, loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
if not ov.pending and not wait_for_post:
|
||||
# The operation has completed, so no need to postpone the
|
||||
# work. We cannot take this short cut if we need the
|
||||
# NumberOfBytes, CompletionKey values returned by
|
||||
# PostQueuedCompletionStatus().
|
||||
try:
|
||||
value = callback(None, None, ov)
|
||||
except OSError as e:
|
||||
f.set_exception(e)
|
||||
else:
|
||||
f.set_result(value)
|
||||
# Even if GetOverlappedResult() was called, we have to wait for the
|
||||
# notification of the completion in GetQueuedCompletionStatus().
|
||||
# Register the overlapped operation to keep a reference to the
|
||||
# OVERLAPPED object, otherwise the memory is freed and Windows may
|
||||
# read uninitialized memory.
|
||||
#
|
||||
# For an unknown reason, ConnectNamedPipe() behaves differently:
|
||||
# the completion is not notified by GetOverlappedResult() if we
|
||||
# already called GetOverlappedResult(). For this specific case, we
|
||||
# don't expect notification (register is set to False).
|
||||
else:
|
||||
register = True
|
||||
if register:
|
||||
# Register the overlapped operation for later. Note that
|
||||
# we only store obj to prevent it from being garbage
|
||||
# collected too early.
|
||||
self._cache[ov.address] = (f, ov, obj, callback)
|
||||
return f
|
||||
|
||||
def _get_accept_socket(self, family):
|
||||
s = socket.socket(family)
|
||||
s.settimeout(0)
|
||||
return s
|
||||
|
||||
def _poll(self, timeout=None):
|
||||
if timeout is None:
|
||||
ms = INFINITE
|
||||
elif timeout < 0:
|
||||
raise ValueError("negative timeout")
|
||||
else:
|
||||
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
ms = math.ceil(timeout * 1e3)
|
||||
if ms >= INFINITE:
|
||||
raise ValueError("timeout too big")
|
||||
|
||||
while True:
|
||||
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
|
||||
if status is None:
|
||||
return
|
||||
ms = 0
|
||||
|
||||
err, transferred, key, address = status
|
||||
try:
|
||||
f, ov, obj, callback = self._cache.pop(address)
|
||||
except KeyError:
|
||||
if self._loop.get_debug():
|
||||
self._loop.call_exception_handler({
|
||||
'message': ('GetQueuedCompletionStatus() returned an '
|
||||
'unexpected event'),
|
||||
'status': ('err=%s transferred=%s key=%#x address=%#x'
|
||||
% (err, transferred, key, address)),
|
||||
})
|
||||
|
||||
# key is either zero, or it is used to return a pipe
|
||||
# handle which should be closed to avoid a leak.
|
||||
if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
|
||||
_winapi.CloseHandle(key)
|
||||
continue
|
||||
|
||||
if obj in self._stopped_serving:
|
||||
f.cancel()
|
||||
# Don't call the callback if _register() already read the result or
|
||||
# if the overlapped has been cancelled
|
||||
elif not f.done():
|
||||
try:
|
||||
value = callback(transferred, key, ov)
|
||||
except OSError as e:
|
||||
f.set_exception(e)
|
||||
self._results.append(f)
|
||||
else:
|
||||
f.set_result(value)
|
||||
self._results.append(f)
|
||||
|
||||
def _stop_serving(self, obj):
|
||||
# obj is a socket or pipe handle. It will be closed in
|
||||
# BaseProactorEventLoop._stop_serving() which will make any
|
||||
# pending operations fail quickly.
|
||||
self._stopped_serving.add(obj)
|
||||
|
||||
def close(self):
|
||||
# Cancel remaining registered operations.
|
||||
for address, (fut, ov, obj, callback) in list(self._cache.items()):
|
||||
if obj is None:
|
||||
# The operation was started with connect_pipe() which
|
||||
# queues a task to Windows' thread pool. This cannot
|
||||
# be cancelled, so just forget it.
|
||||
del self._cache[address]
|
||||
# FIXME: Tulip issue 196: remove this case, it should not happen
|
||||
elif fut.done() and not fut.cancelled():
|
||||
del self._cache[address]
|
||||
else:
|
||||
try:
|
||||
fut.cancel()
|
||||
except OSError as exc:
|
||||
if self._loop is not None:
|
||||
context = {
|
||||
'message': 'Cancelling a future failed',
|
||||
'exception': exc,
|
||||
'future': fut,
|
||||
}
|
||||
if fut._source_traceback:
|
||||
context['source_traceback'] = fut._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
|
||||
while self._cache:
|
||||
if not self._poll(1):
|
||||
logger.debug('taking long time to close proactor')
|
||||
|
||||
self._results = []
|
||||
if self._iocp is not None:
|
||||
_winapi.CloseHandle(self._iocp)
|
||||
self._iocp = None
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
self._proc = windows_utils.Popen(
|
||||
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
||||
bufsize=bufsize, **kwargs)
|
||||
|
||||
def callback(f):
|
||||
returncode = self._proc.poll()
|
||||
self._process_exited(returncode)
|
||||
|
||||
f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
|
||||
f.add_done_callback(callback)
|
||||
|
||||
|
||||
SelectorEventLoop = _WindowsSelectorEventLoop
|
||||
|
||||
|
||||
class _WindowsDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
||||
_loop_factory = SelectorEventLoop
|
||||
|
||||
|
||||
DefaultEventLoopPolicy = _WindowsDefaultEventLoopPolicy
|
||||
209
asyncio/windows_utils.py
Normal file
209
asyncio/windows_utils.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""
|
||||
Various Windows specific bits and pieces
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
if sys.platform != 'win32': # pragma: no cover
|
||||
raise ImportError('win32 only')
|
||||
|
||||
import socket
|
||||
import itertools
|
||||
import msvcrt
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
import _winapi
|
||||
|
||||
|
||||
__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle']
|
||||
|
||||
|
||||
# Constants/globals
|
||||
|
||||
|
||||
BUFSIZE = 8192
|
||||
PIPE = subprocess.PIPE
|
||||
STDOUT = subprocess.STDOUT
|
||||
_mmap_counter = itertools.count()
|
||||
|
||||
|
||||
if hasattr(socket, 'socketpair'):
|
||||
# Since Python 3.5, socket.socketpair() is now also available on Windows
|
||||
socketpair = socket.socketpair
|
||||
else:
|
||||
# Replacement for socket.socketpair()
|
||||
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
|
||||
"""A socket pair usable as a self-pipe, for Windows.
|
||||
|
||||
Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
|
||||
"""
|
||||
if family == socket.AF_INET:
|
||||
host = '127.0.0.1'
|
||||
elif family == socket.AF_INET6:
|
||||
host = '::1'
|
||||
else:
|
||||
raise ValueError("Only AF_INET and AF_INET6 socket address families "
|
||||
"are supported")
|
||||
if type != socket.SOCK_STREAM:
|
||||
raise ValueError("Only SOCK_STREAM socket type is supported")
|
||||
if proto != 0:
|
||||
raise ValueError("Only protocol zero is supported")
|
||||
|
||||
# We create a connected TCP socket. Note the trick with setblocking(0)
|
||||
# that prevents us from having to create a thread.
|
||||
lsock = socket.socket(family, type, proto)
|
||||
try:
|
||||
lsock.bind((host, 0))
|
||||
lsock.listen(1)
|
||||
# On IPv6, ignore flow_info and scope_id
|
||||
addr, port = lsock.getsockname()[:2]
|
||||
csock = socket.socket(family, type, proto)
|
||||
try:
|
||||
csock.setblocking(False)
|
||||
try:
|
||||
csock.connect((addr, port))
|
||||
except (BlockingIOError, InterruptedError):
|
||||
pass
|
||||
csock.setblocking(True)
|
||||
ssock, _ = lsock.accept()
|
||||
except:
|
||||
csock.close()
|
||||
raise
|
||||
finally:
|
||||
lsock.close()
|
||||
return (ssock, csock)
|
||||
|
||||
|
||||
# Replacement for os.pipe() using handles instead of fds
|
||||
|
||||
|
||||
def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
|
||||
"""Like os.pipe() but with overlapped support and using handles not fds."""
|
||||
address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
|
||||
(os.getpid(), next(_mmap_counter)))
|
||||
|
||||
if duplex:
|
||||
openmode = _winapi.PIPE_ACCESS_DUPLEX
|
||||
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
|
||||
obsize, ibsize = bufsize, bufsize
|
||||
else:
|
||||
openmode = _winapi.PIPE_ACCESS_INBOUND
|
||||
access = _winapi.GENERIC_WRITE
|
||||
obsize, ibsize = 0, bufsize
|
||||
|
||||
openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
|
||||
if overlapped[0]:
|
||||
openmode |= _winapi.FILE_FLAG_OVERLAPPED
|
||||
|
||||
if overlapped[1]:
|
||||
flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
|
||||
else:
|
||||
flags_and_attribs = 0
|
||||
|
||||
h1 = h2 = None
|
||||
try:
|
||||
h1 = _winapi.CreateNamedPipe(
|
||||
address, openmode, _winapi.PIPE_WAIT,
|
||||
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
||||
|
||||
h2 = _winapi.CreateFile(
|
||||
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
|
||||
flags_and_attribs, _winapi.NULL)
|
||||
|
||||
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
|
||||
ov.GetOverlappedResult(True)
|
||||
return h1, h2
|
||||
except:
|
||||
if h1 is not None:
|
||||
_winapi.CloseHandle(h1)
|
||||
if h2 is not None:
|
||||
_winapi.CloseHandle(h2)
|
||||
raise
|
||||
|
||||
|
||||
# Wrapper for a pipe handle
|
||||
|
||||
|
||||
class PipeHandle:
|
||||
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
|
||||
|
||||
The IOCP event loop can use these instead of socket objects.
|
||||
"""
|
||||
def __init__(self, handle):
|
||||
self._handle = handle
|
||||
|
||||
@property
|
||||
def handle(self):
|
||||
return self._handle
|
||||
|
||||
def fileno(self):
|
||||
return self._handle
|
||||
|
||||
def close(self, *, CloseHandle=_winapi.CloseHandle):
|
||||
if self._handle != -1:
|
||||
CloseHandle(self._handle)
|
||||
self._handle = -1
|
||||
|
||||
__del__ = close
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.close()
|
||||
|
||||
|
||||
# Replacement for subprocess.Popen using overlapped pipe handles
|
||||
|
||||
|
||||
class Popen(subprocess.Popen):
|
||||
"""Replacement for subprocess.Popen using overlapped pipe handles.
|
||||
|
||||
The stdin, stdout, stderr are None or instances of PipeHandle.
|
||||
"""
|
||||
def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
|
||||
assert not kwds.get('universal_newlines')
|
||||
assert kwds.get('bufsize', 0) == 0
|
||||
stdin_rfd = stdout_wfd = stderr_wfd = None
|
||||
stdin_wh = stdout_rh = stderr_rh = None
|
||||
if stdin == PIPE:
|
||||
stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
|
||||
stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
|
||||
else:
|
||||
stdin_rfd = stdin
|
||||
if stdout == PIPE:
|
||||
stdout_rh, stdout_wh = pipe(overlapped=(True, False))
|
||||
stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
|
||||
else:
|
||||
stdout_wfd = stdout
|
||||
if stderr == PIPE:
|
||||
stderr_rh, stderr_wh = pipe(overlapped=(True, False))
|
||||
stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
|
||||
elif stderr == STDOUT:
|
||||
stderr_wfd = stdout_wfd
|
||||
else:
|
||||
stderr_wfd = stderr
|
||||
try:
|
||||
super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
|
||||
stderr=stderr_wfd, **kwds)
|
||||
except:
|
||||
for h in (stdin_wh, stdout_rh, stderr_rh):
|
||||
if h is not None:
|
||||
_winapi.CloseHandle(h)
|
||||
raise
|
||||
else:
|
||||
if stdin_wh is not None:
|
||||
self.stdin = PipeHandle(stdin_wh)
|
||||
if stdout_rh is not None:
|
||||
self.stdout = PipeHandle(stdout_rh)
|
||||
if stderr_rh is not None:
|
||||
self.stderr = PipeHandle(stderr_rh)
|
||||
finally:
|
||||
if stdin == PIPE:
|
||||
os.close(stdin_rfd)
|
||||
if stdout == PIPE:
|
||||
os.close(stdout_wfd)
|
||||
if stderr == PIPE:
|
||||
os.close(stderr_wfd)
|
||||
45
check.py
Normal file
45
check.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""Search for lines >= 80 chars or with trailing whitespace."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:] or os.curdir
|
||||
for arg in args:
|
||||
if os.path.isdir(arg):
|
||||
for dn, dirs, files in os.walk(arg):
|
||||
for fn in sorted(files):
|
||||
if fn.endswith('.py'):
|
||||
process(os.path.join(dn, fn))
|
||||
dirs[:] = [d for d in dirs if d[0] != '.']
|
||||
dirs.sort()
|
||||
else:
|
||||
process(arg)
|
||||
|
||||
|
||||
def isascii(x):
|
||||
try:
|
||||
x.encode('ascii')
|
||||
return True
|
||||
except UnicodeError:
|
||||
return False
|
||||
|
||||
|
||||
def process(fn):
|
||||
try:
|
||||
f = open(fn)
|
||||
except IOError as err:
|
||||
print(err)
|
||||
return
|
||||
try:
|
||||
for i, line in enumerate(f):
|
||||
line = line.rstrip('\n')
|
||||
sline = line.rstrip()
|
||||
if len(line) >= 80 or line != sline or not isascii(line):
|
||||
print('{}:{:d}:{}{}'.format(
|
||||
fn, i+1, sline, '_' * (len(line) - len(sline))))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
main()
|
||||
213
examples/cacheclt.py
Normal file
213
examples/cacheclt.py
Normal file
@@ -0,0 +1,213 @@
|
||||
"""Client for cache server.
|
||||
|
||||
See cachesvr.py for protocol description.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
from asyncio import test_utils
|
||||
import json
|
||||
import logging
|
||||
|
||||
ARGS = argparse.ArgumentParser(description='Cache client example.')
|
||||
ARGS.add_argument(
|
||||
'--tls', action='store_true', dest='tls',
|
||||
default=False, help='Use TLS')
|
||||
ARGS.add_argument(
|
||||
'--iocp', action='store_true', dest='iocp',
|
||||
default=False, help='Use IOCP event loop (Windows only)')
|
||||
ARGS.add_argument(
|
||||
'--host', action='store', dest='host',
|
||||
default='localhost', help='Host name')
|
||||
ARGS.add_argument(
|
||||
'--port', action='store', dest='port',
|
||||
default=54321, type=int, help='Port number')
|
||||
ARGS.add_argument(
|
||||
'--timeout', action='store', dest='timeout',
|
||||
default=5, type=float, help='Timeout')
|
||||
ARGS.add_argument(
|
||||
'--max_backoff', action='store', dest='max_backoff',
|
||||
default=5, type=float, help='Max backoff on reconnect')
|
||||
ARGS.add_argument(
|
||||
'--ntasks', action='store', dest='ntasks',
|
||||
default=10, type=int, help='Number of tester tasks')
|
||||
ARGS.add_argument(
|
||||
'--ntries', action='store', dest='ntries',
|
||||
default=5, type=int, help='Number of request tries before giving up')
|
||||
|
||||
|
||||
args = ARGS.parse_args()
|
||||
|
||||
|
||||
class CacheClient:
|
||||
"""Multiplexing cache client.
|
||||
|
||||
This wraps a single connection to the cache client. The
|
||||
connection is automatically re-opened when an error occurs.
|
||||
|
||||
Multiple tasks may share this object; the requests will be
|
||||
serialized.
|
||||
|
||||
The public API is get(), set(), delete() (all are coroutines).
|
||||
"""
|
||||
|
||||
def __init__(self, host, port, sslctx=None, loop=None):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.sslctx = sslctx
|
||||
self.loop = loop
|
||||
self.todo = set()
|
||||
self.initialized = False
|
||||
self.task = asyncio.Task(self.activity(), loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def get(self, key):
|
||||
resp = yield from self.request('get', key)
|
||||
if resp is None:
|
||||
return None
|
||||
return resp.get('value')
|
||||
|
||||
@asyncio.coroutine
|
||||
def set(self, key, value):
|
||||
resp = yield from self.request('set', key, value)
|
||||
if resp is None:
|
||||
return False
|
||||
return resp.get('status') == 'ok'
|
||||
|
||||
@asyncio.coroutine
|
||||
def delete(self, key):
|
||||
resp = yield from self.request('delete', key)
|
||||
if resp is None:
|
||||
return False
|
||||
return resp.get('status') == 'ok'
|
||||
|
||||
@asyncio.coroutine
|
||||
def request(self, type, key, value=None):
|
||||
assert not self.task.done()
|
||||
data = {'type': type, 'key': key}
|
||||
if value is not None:
|
||||
data['value'] = value
|
||||
payload = json.dumps(data).encode('utf8')
|
||||
waiter = asyncio.Future(loop=self.loop)
|
||||
if self.initialized:
|
||||
try:
|
||||
yield from self.send(payload, waiter)
|
||||
except IOError:
|
||||
self.todo.add((payload, waiter))
|
||||
else:
|
||||
self.todo.add((payload, waiter))
|
||||
return (yield from waiter)
|
||||
|
||||
@asyncio.coroutine
|
||||
def activity(self):
|
||||
backoff = 0
|
||||
while True:
|
||||
try:
|
||||
self.reader, self.writer = yield from asyncio.open_connection(
|
||||
self.host, self.port, ssl=self.sslctx, loop=self.loop)
|
||||
except Exception as exc:
|
||||
backoff = min(args.max_backoff, backoff + (backoff//2) + 1)
|
||||
logging.info('Error connecting: %r; sleep %s', exc, backoff)
|
||||
yield from asyncio.sleep(backoff, loop=self.loop)
|
||||
continue
|
||||
backoff = 0
|
||||
self.next_id = 0
|
||||
self.pending = {}
|
||||
self. initialized = True
|
||||
try:
|
||||
while self.todo:
|
||||
payload, waiter = self.todo.pop()
|
||||
if not waiter.done():
|
||||
yield from self.send(payload, waiter)
|
||||
while True:
|
||||
resp_id, resp = yield from self.process()
|
||||
if resp_id in self.pending:
|
||||
payload, waiter = self.pending.pop(resp_id)
|
||||
if not waiter.done():
|
||||
waiter.set_result(resp)
|
||||
except Exception as exc:
|
||||
self.initialized = False
|
||||
self.writer.close()
|
||||
while self.pending:
|
||||
req_id, pair = self.pending.popitem()
|
||||
payload, waiter = pair
|
||||
if not waiter.done():
|
||||
self.todo.add(pair)
|
||||
logging.info('Error processing: %r', exc)
|
||||
|
||||
@asyncio.coroutine
|
||||
def send(self, payload, waiter):
|
||||
self.next_id += 1
|
||||
req_id = self.next_id
|
||||
frame = 'request %d %d\n' % (req_id, len(payload))
|
||||
self.writer.write(frame.encode('ascii'))
|
||||
self.writer.write(payload)
|
||||
self.pending[req_id] = payload, waiter
|
||||
yield from self.writer.drain()
|
||||
|
||||
@asyncio.coroutine
|
||||
def process(self):
|
||||
frame = yield from self.reader.readline()
|
||||
if not frame:
|
||||
raise EOFError()
|
||||
head, tail = frame.split(None, 1)
|
||||
if head == b'error':
|
||||
raise IOError('OOB error: %r' % tail)
|
||||
if head != b'response':
|
||||
raise IOError('Bad frame: %r' % frame)
|
||||
resp_id, resp_size = map(int, tail.split())
|
||||
data = yield from self.reader.readexactly(resp_size)
|
||||
if len(data) != resp_size:
|
||||
raise EOFError()
|
||||
resp = json.loads(data.decode('utf8'))
|
||||
return resp_id, resp
|
||||
|
||||
|
||||
def main():
|
||||
asyncio.set_event_loop(None)
|
||||
if args.iocp:
|
||||
from asyncio.windows_events import ProactorEventLoop
|
||||
loop = ProactorEventLoop()
|
||||
else:
|
||||
loop = asyncio.new_event_loop()
|
||||
sslctx = None
|
||||
if args.tls:
|
||||
sslctx = test_utils.dummy_ssl_context()
|
||||
cache = CacheClient(args.host, args.port, sslctx=sslctx, loop=loop)
|
||||
try:
|
||||
loop.run_until_complete(
|
||||
asyncio.gather(
|
||||
*[testing(i, cache, loop) for i in range(args.ntasks)],
|
||||
loop=loop))
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def testing(label, cache, loop):
|
||||
|
||||
def w(g):
|
||||
return asyncio.wait_for(g, args.timeout, loop=loop)
|
||||
|
||||
key = 'foo-%s' % label
|
||||
while True:
|
||||
logging.info('%s %s', label, '-'*20)
|
||||
try:
|
||||
ret = yield from w(cache.set(key, 'hello-%s-world' % label))
|
||||
logging.info('%s set %s', label, ret)
|
||||
ret = yield from w(cache.get(key))
|
||||
logging.info('%s get %s', label, ret)
|
||||
ret = yield from w(cache.delete(key))
|
||||
logging.info('%s del %s', label, ret)
|
||||
ret = yield from w(cache.get(key))
|
||||
logging.info('%s get2 %s', label, ret)
|
||||
except asyncio.TimeoutError:
|
||||
logging.warn('%s Timeout', label)
|
||||
except Exception as exc:
|
||||
logging.exception('%s Client exception: %r', label, exc)
|
||||
break
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
main()
|
||||
249
examples/cachesvr.py
Normal file
249
examples/cachesvr.py
Normal file
@@ -0,0 +1,249 @@
|
||||
"""A simple memcache-like server.
|
||||
|
||||
The basic data structure maintained is a single in-memory dictionary
|
||||
mapping string keys to string values, with operations get, set and
|
||||
delete. (Both keys and values may contain Unicode.)
|
||||
|
||||
This is a TCP server listening on port 54321. There is no
|
||||
authentication.
|
||||
|
||||
Requests provide an operation and return a response. A connection may
|
||||
be used for multiple requests. The connection is closed when a client
|
||||
sends a bad request.
|
||||
|
||||
If a client is idle for over 5 seconds (i.e., it does not send another
|
||||
request, or fails to read the whole response, within this time), it is
|
||||
disconnected.
|
||||
|
||||
Framing of requests and responses within a connection uses a
|
||||
line-based protocol. The first line of a request is the frame header
|
||||
and contains three whitespace-delimited token followed by LF or CRLF:
|
||||
|
||||
- the keyword 'request'
|
||||
- a decimal request ID; the first request is '1', the second '2', etc.
|
||||
- a decimal byte count giving the size of the rest of the request
|
||||
|
||||
Note that the requests ID *must* be consecutive and start at '1' for
|
||||
each connection.
|
||||
|
||||
Response frames look the same except the keyword is 'response'. The
|
||||
response ID matches the request ID. There should be exactly one
|
||||
response to each request and responses should be seen in the same
|
||||
order as the requests.
|
||||
|
||||
After the frame, individual requests and responses are JSON encoded.
|
||||
|
||||
If the frame header or the JSON request body cannot be parsed, an
|
||||
unframed error message (always starting with 'error') is written back
|
||||
and the connection is closed.
|
||||
|
||||
JSON-encoded requests can be:
|
||||
|
||||
- {"type": "get", "key": <string>}
|
||||
- {"type": "set", "key": <string>, "value": <string>}
|
||||
- {"type": "delete", "key": <string>}
|
||||
|
||||
Responses are also JSON-encoded:
|
||||
|
||||
- {"status": "ok", "value": <string>} # Successful get request
|
||||
- {"status": "ok"} # Successful set or delete request
|
||||
- {"status": "notfound"} # Key not found for get or delete request
|
||||
|
||||
If the request is valid JSON but cannot be handled (e.g., the type or
|
||||
key field is absent or invalid), an error response of the following
|
||||
form is returned, but the connection is not closed:
|
||||
|
||||
- {"error": <string>}
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
|
||||
ARGS = argparse.ArgumentParser(description='Cache server example.')
|
||||
ARGS.add_argument(
|
||||
'--tls', action='store_true', dest='tls',
|
||||
default=False, help='Use TLS')
|
||||
ARGS.add_argument(
|
||||
'--iocp', action='store_true', dest='iocp',
|
||||
default=False, help='Use IOCP event loop (Windows only)')
|
||||
ARGS.add_argument(
|
||||
'--host', action='store', dest='host',
|
||||
default='localhost', help='Host name')
|
||||
ARGS.add_argument(
|
||||
'--port', action='store', dest='port',
|
||||
default=54321, type=int, help='Port number')
|
||||
ARGS.add_argument(
|
||||
'--timeout', action='store', dest='timeout',
|
||||
default=5, type=float, help='Timeout')
|
||||
ARGS.add_argument(
|
||||
'--random_failure_percent', action='store', dest='fail_percent',
|
||||
default=0, type=float, help='Fail randomly N percent of the time')
|
||||
ARGS.add_argument(
|
||||
'--random_failure_sleep', action='store', dest='fail_sleep',
|
||||
default=0, type=float, help='Sleep time when randomly failing')
|
||||
ARGS.add_argument(
|
||||
'--random_response_sleep', action='store', dest='resp_sleep',
|
||||
default=0, type=float, help='Sleep time before responding')
|
||||
|
||||
args = ARGS.parse_args()
|
||||
|
||||
|
||||
class Cache:
|
||||
|
||||
def __init__(self, loop):
|
||||
self.loop = loop
|
||||
self.table = {}
|
||||
|
||||
@asyncio.coroutine
|
||||
def handle_client(self, reader, writer):
|
||||
# Wrapper to log stuff and close writer (i.e., transport).
|
||||
peer = writer.get_extra_info('socket').getpeername()
|
||||
logging.info('got a connection from %s', peer)
|
||||
try:
|
||||
yield from self.frame_parser(reader, writer)
|
||||
except Exception as exc:
|
||||
logging.error('error %r from %s', exc, peer)
|
||||
else:
|
||||
logging.info('end connection from %s', peer)
|
||||
finally:
|
||||
writer.close()
|
||||
|
||||
@asyncio.coroutine
|
||||
def frame_parser(self, reader, writer):
|
||||
# This takes care of the framing.
|
||||
last_request_id = 0
|
||||
while True:
|
||||
# Read the frame header, parse it, read the data.
|
||||
# NOTE: The readline() and readexactly() calls will hang
|
||||
# if the client doesn't send enough data but doesn't
|
||||
# disconnect either. We add a timeout to each. (But the
|
||||
# timeout should really be implemented by StreamReader.)
|
||||
framing_b = yield from asyncio.wait_for(
|
||||
reader.readline(),
|
||||
timeout=args.timeout, loop=self.loop)
|
||||
if random.random()*100 < args.fail_percent:
|
||||
logging.warn('Inserting random failure')
|
||||
yield from asyncio.sleep(args.fail_sleep*random.random(),
|
||||
loop=self.loop)
|
||||
writer.write(b'error random failure\r\n')
|
||||
break
|
||||
logging.debug('framing_b = %r', framing_b)
|
||||
if not framing_b:
|
||||
break # Clean close.
|
||||
try:
|
||||
frame_keyword, request_id_b, byte_count_b = framing_b.split()
|
||||
except ValueError:
|
||||
writer.write(b'error unparseable frame\r\n')
|
||||
break
|
||||
if frame_keyword != b'request':
|
||||
writer.write(b'error frame does not start with request\r\n')
|
||||
break
|
||||
try:
|
||||
request_id, byte_count = int(request_id_b), int(byte_count_b)
|
||||
except ValueError:
|
||||
writer.write(b'error unparsable frame parameters\r\n')
|
||||
break
|
||||
if request_id != last_request_id + 1 or byte_count < 2:
|
||||
writer.write(b'error invalid frame parameters\r\n')
|
||||
break
|
||||
last_request_id = request_id
|
||||
request_b = yield from asyncio.wait_for(
|
||||
reader.readexactly(byte_count),
|
||||
timeout=args.timeout, loop=self.loop)
|
||||
try:
|
||||
request = json.loads(request_b.decode('utf8'))
|
||||
except ValueError:
|
||||
writer.write(b'error unparsable json\r\n')
|
||||
break
|
||||
response = self.handle_request(request) # Not a coroutine.
|
||||
if response is None:
|
||||
writer.write(b'error unhandlable request\r\n')
|
||||
break
|
||||
response_b = json.dumps(response).encode('utf8') + b'\r\n'
|
||||
byte_count = len(response_b)
|
||||
framing_s = 'response {} {}\r\n'.format(request_id, byte_count)
|
||||
writer.write(framing_s.encode('ascii'))
|
||||
yield from asyncio.sleep(args.resp_sleep*random.random(),
|
||||
loop=self.loop)
|
||||
writer.write(response_b)
|
||||
|
||||
def handle_request(self, request):
|
||||
# This parses one request and farms it out to a specific handler.
|
||||
# Return None for all errors.
|
||||
if not isinstance(request, dict):
|
||||
return {'error': 'request is not a dict'}
|
||||
request_type = request.get('type')
|
||||
if request_type is None:
|
||||
return {'error': 'no type in request'}
|
||||
if request_type not in {'get', 'set', 'delete'}:
|
||||
return {'error': 'unknown request type'}
|
||||
key = request.get('key')
|
||||
if not isinstance(key, str):
|
||||
return {'error': 'key is not a string'}
|
||||
if request_type == 'get':
|
||||
return self.handle_get(key)
|
||||
if request_type == 'set':
|
||||
value = request.get('value')
|
||||
if not isinstance(value, str):
|
||||
return {'error': 'value is not a string'}
|
||||
return self.handle_set(key, value)
|
||||
if request_type == 'delete':
|
||||
return self.handle_delete(key)
|
||||
assert False, 'bad request type' # Should have been caught above.
|
||||
|
||||
def handle_get(self, key):
|
||||
value = self.table.get(key)
|
||||
if value is None:
|
||||
return {'status': 'notfound'}
|
||||
else:
|
||||
return {'status': 'ok', 'value': value}
|
||||
|
||||
def handle_set(self, key, value):
|
||||
self.table[key] = value
|
||||
return {'status': 'ok'}
|
||||
|
||||
def handle_delete(self, key):
|
||||
if key not in self.table:
|
||||
return {'status': 'notfound'}
|
||||
else:
|
||||
del self.table[key]
|
||||
return {'status': 'ok'}
|
||||
|
||||
|
||||
def main():
|
||||
asyncio.set_event_loop(None)
|
||||
if args.iocp:
|
||||
from asyncio.windows_events import ProactorEventLoop
|
||||
loop = ProactorEventLoop()
|
||||
else:
|
||||
loop = asyncio.new_event_loop()
|
||||
sslctx = None
|
||||
if args.tls:
|
||||
import ssl
|
||||
# TODO: take cert/key from args as well.
|
||||
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
|
||||
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
sslctx.options |= ssl.OP_NO_SSLv2
|
||||
sslctx.load_cert_chain(
|
||||
certfile=os.path.join(here, 'ssl_cert.pem'),
|
||||
keyfile=os.path.join(here, 'ssl_key.pem'))
|
||||
cache = Cache(loop)
|
||||
task = asyncio.streams.start_server(cache.handle_client,
|
||||
args.host, args.port,
|
||||
ssl=sslctx, loop=loop)
|
||||
svr = loop.run_until_complete(task)
|
||||
for sock in svr.sockets:
|
||||
logging.info('socket %s', sock.getsockname())
|
||||
try:
|
||||
loop.run_forever()
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
main()
|
||||
128
examples/child_process.py
Normal file
128
examples/child_process.py
Normal file
@@ -0,0 +1,128 @@
|
||||
"""
|
||||
Example of asynchronous interaction with a child python process.
|
||||
|
||||
This example shows how to attach an existing Popen object and use the low level
|
||||
transport-protocol API. See shell.py and subprocess_shell.py for higher level
|
||||
examples.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
try:
|
||||
import asyncio
|
||||
except ImportError:
|
||||
# asyncio is not installed
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||
import asyncio
|
||||
|
||||
if sys.platform == 'win32':
|
||||
from asyncio.windows_utils import Popen, PIPE
|
||||
from asyncio.windows_events import ProactorEventLoop
|
||||
else:
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
#
|
||||
# Return a write-only transport wrapping a writable pipe
|
||||
#
|
||||
|
||||
@asyncio.coroutine
|
||||
def connect_write_pipe(file):
|
||||
loop = asyncio.get_event_loop()
|
||||
transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, file)
|
||||
return transport
|
||||
|
||||
#
|
||||
# Wrap a readable pipe in a stream
|
||||
#
|
||||
|
||||
@asyncio.coroutine
|
||||
def connect_read_pipe(file):
|
||||
loop = asyncio.get_event_loop()
|
||||
stream_reader = asyncio.StreamReader(loop=loop)
|
||||
def factory():
|
||||
return asyncio.StreamReaderProtocol(stream_reader)
|
||||
transport, _ = yield from loop.connect_read_pipe(factory, file)
|
||||
return stream_reader, transport
|
||||
|
||||
|
||||
#
|
||||
# Example
|
||||
#
|
||||
|
||||
@asyncio.coroutine
|
||||
def main(loop):
|
||||
# program which prints evaluation of each expression from stdin
|
||||
code = r'''if 1:
|
||||
import os
|
||||
def writeall(fd, buf):
|
||||
while buf:
|
||||
n = os.write(fd, buf)
|
||||
buf = buf[n:]
|
||||
while True:
|
||||
s = os.read(0, 1024)
|
||||
if not s:
|
||||
break
|
||||
s = s.decode('ascii')
|
||||
s = repr(eval(s)) + '\n'
|
||||
s = s.encode('ascii')
|
||||
writeall(1, s)
|
||||
'''
|
||||
|
||||
# commands to send to input
|
||||
commands = iter([b"1+1\n",
|
||||
b"2**16\n",
|
||||
b"1/3\n",
|
||||
b"'x'*50",
|
||||
b"1/0\n"])
|
||||
|
||||
# start subprocess and wrap stdin, stdout, stderr
|
||||
p = Popen([sys.executable, '-c', code],
|
||||
stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
|
||||
stdin = yield from connect_write_pipe(p.stdin)
|
||||
stdout, stdout_transport = yield from connect_read_pipe(p.stdout)
|
||||
stderr, stderr_transport = yield from connect_read_pipe(p.stderr)
|
||||
|
||||
# interact with subprocess
|
||||
name = {stdout:'OUT', stderr:'ERR'}
|
||||
registered = {asyncio.Task(stderr.readline()): stderr,
|
||||
asyncio.Task(stdout.readline()): stdout}
|
||||
while registered:
|
||||
# write command
|
||||
cmd = next(commands, None)
|
||||
if cmd is None:
|
||||
stdin.close()
|
||||
else:
|
||||
print('>>>', cmd.decode('ascii').rstrip())
|
||||
stdin.write(cmd)
|
||||
|
||||
# get and print lines from stdout, stderr
|
||||
timeout = None
|
||||
while registered:
|
||||
done, pending = yield from asyncio.wait(
|
||||
registered, timeout=timeout,
|
||||
return_when=asyncio.FIRST_COMPLETED)
|
||||
if not done:
|
||||
break
|
||||
for f in done:
|
||||
stream = registered.pop(f)
|
||||
res = f.result()
|
||||
print(name[stream], res.decode('ascii').rstrip())
|
||||
if res != b'':
|
||||
registered[asyncio.Task(stream.readline())] = stream
|
||||
timeout = 0.0
|
||||
|
||||
stdout_transport.close()
|
||||
stderr_transport.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
if sys.platform == 'win32':
|
||||
loop = ProactorEventLoop()
|
||||
asyncio.set_event_loop(loop)
|
||||
else:
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(main(loop))
|
||||
finally:
|
||||
loop.close()
|
||||
863
examples/crawl.py
Normal file
863
examples/crawl.py
Normal file
@@ -0,0 +1,863 @@
|
||||
#!/usr/bin/env python3.4
|
||||
|
||||
"""A simple web crawler."""
|
||||
|
||||
# TODO:
|
||||
# - More organized logging (with task ID or URL?).
|
||||
# - Use logging module for Logger.
|
||||
# - KeyboardInterrupt in HTML parsing may hang or report unretrieved error.
|
||||
# - Support gzip encoding.
|
||||
# - Close connection if HTTP/1.0 response.
|
||||
# - Add timeouts. (E.g. when switching networks, all seems to hang.)
|
||||
# - Add arguments to specify TLS settings (e.g. cert/key files).
|
||||
# - Skip reading large non-text/html files?
|
||||
# - Use ETag and If-Modified-Since?
|
||||
# - Handle out of file descriptors directly? (How?)
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import asyncio.locks
|
||||
import cgi
|
||||
from http.client import BadStatusLine
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import urllib.parse
|
||||
|
||||
|
||||
ARGS = argparse.ArgumentParser(description="Web crawler")
|
||||
ARGS.add_argument(
|
||||
'--iocp', action='store_true', dest='iocp',
|
||||
default=False, help='Use IOCP event loop (Windows only)')
|
||||
ARGS.add_argument(
|
||||
'--select', action='store_true', dest='select',
|
||||
default=False, help='Use Select event loop instead of default')
|
||||
ARGS.add_argument(
|
||||
'roots', nargs='*',
|
||||
default=[], help='Root URL (may be repeated)')
|
||||
ARGS.add_argument(
|
||||
'--max_redirect', action='store', type=int, metavar='N',
|
||||
default=10, help='Limit redirection chains (for 301, 302 etc.)')
|
||||
ARGS.add_argument(
|
||||
'--max_tries', action='store', type=int, metavar='N',
|
||||
default=4, help='Limit retries on network errors')
|
||||
ARGS.add_argument(
|
||||
'--max_tasks', action='store', type=int, metavar='N',
|
||||
default=100, help='Limit concurrent connections')
|
||||
ARGS.add_argument(
|
||||
'--max_pool', action='store', type=int, metavar='N',
|
||||
default=100, help='Limit connection pool size')
|
||||
ARGS.add_argument(
|
||||
'--exclude', action='store', metavar='REGEX',
|
||||
help='Exclude matching URLs')
|
||||
ARGS.add_argument(
|
||||
'--strict', action='store_true',
|
||||
default=True, help='Strict host matching (default)')
|
||||
ARGS.add_argument(
|
||||
'--lenient', action='store_false', dest='strict',
|
||||
default=False, help='Lenient host matching')
|
||||
ARGS.add_argument(
|
||||
'-v', '--verbose', action='count', dest='level',
|
||||
default=1, help='Verbose logging (repeat for more verbose)')
|
||||
ARGS.add_argument(
|
||||
'-q', '--quiet', action='store_const', const=0, dest='level',
|
||||
default=1, help='Quiet logging (opposite of --verbose)')
|
||||
|
||||
|
||||
ESCAPES = [('quot', '"'),
|
||||
('gt', '>'),
|
||||
('lt', '<'),
|
||||
('amp', '&') # Must be last.
|
||||
]
|
||||
|
||||
|
||||
def unescape(url):
|
||||
"""Turn & into &, and so on.
|
||||
|
||||
This is the inverse of cgi.escape().
|
||||
"""
|
||||
for name, char in ESCAPES:
|
||||
url = url.replace('&' + name + ';', char)
|
||||
return url
|
||||
|
||||
|
||||
def fix_url(url):
|
||||
"""Prefix a schema-less URL with http://."""
|
||||
if '://' not in url:
|
||||
url = 'http://' + url
|
||||
return url
|
||||
|
||||
|
||||
class Logger:
|
||||
|
||||
def __init__(self, level):
|
||||
self.level = level
|
||||
|
||||
def _log(self, n, args):
|
||||
if self.level >= n:
|
||||
print(*args, file=sys.stderr, flush=True)
|
||||
|
||||
def log(self, n, *args):
|
||||
self._log(n, args)
|
||||
|
||||
def __call__(self, n, *args):
|
||||
self._log(n, args)
|
||||
|
||||
|
||||
class ConnectionPool:
|
||||
"""A connection pool.
|
||||
|
||||
To open a connection, use reserve(). To recycle it, use unreserve().
|
||||
|
||||
The pool is mostly just a mapping from (host, port, ssl) tuples to
|
||||
lists of Connections. The currently active connections are *not*
|
||||
in the data structure; get_connection() takes the connection out,
|
||||
and recycle_connection() puts it back in. To recycle a
|
||||
connection, call conn.close(recycle=True).
|
||||
|
||||
There are limits to both the overall pool and the per-key pool.
|
||||
"""
|
||||
|
||||
def __init__(self, log, max_pool=10, max_tasks=5):
|
||||
self.log = log
|
||||
self.max_pool = max_pool # Overall limit.
|
||||
self.max_tasks = max_tasks # Per-key limit.
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.connections = {} # {(host, port, ssl): [Connection, ...], ...}
|
||||
self.queue = [] # [Connection, ...]
|
||||
|
||||
def close(self):
|
||||
"""Close all connections available for reuse."""
|
||||
for conns in self.connections.values():
|
||||
for conn in conns:
|
||||
conn.close()
|
||||
self.connections.clear()
|
||||
self.queue.clear()
|
||||
|
||||
@asyncio.coroutine
|
||||
def get_connection(self, host, port, ssl):
|
||||
"""Create or reuse a connection."""
|
||||
port = port or (443 if ssl else 80)
|
||||
try:
|
||||
ipaddrs = yield from self.loop.getaddrinfo(host, port)
|
||||
except Exception as exc:
|
||||
self.log(0, 'Exception %r for (%r, %r)' % (exc, host, port))
|
||||
raise
|
||||
self.log(1, '* %s resolves to %s' %
|
||||
(host, ', '.join(ip[4][0] for ip in ipaddrs)))
|
||||
|
||||
# Look for a reusable connection.
|
||||
for _, _, _, _, (h, p, *_) in ipaddrs:
|
||||
key = h, p, ssl
|
||||
conn = None
|
||||
conns = self.connections.get(key)
|
||||
while conns:
|
||||
conn = conns.pop(0)
|
||||
self.queue.remove(conn)
|
||||
if not conns:
|
||||
del self.connections[key]
|
||||
if conn.stale():
|
||||
self.log(1, 'closing stale connection for', key)
|
||||
conn.close() # Just in case.
|
||||
else:
|
||||
self.log(1, '* Reusing pooled connection', key,
|
||||
'FD =', conn.fileno())
|
||||
return conn
|
||||
|
||||
# Create a new connection.
|
||||
conn = Connection(self.log, self, host, port, ssl)
|
||||
yield from conn.connect()
|
||||
self.log(1, '* New connection', conn.key, 'FD =', conn.fileno())
|
||||
return conn
|
||||
|
||||
def recycle_connection(self, conn):
|
||||
"""Make a connection available for reuse.
|
||||
|
||||
This also prunes the pool if it exceeds the size limits.
|
||||
"""
|
||||
if conn.stale():
|
||||
conn.close()
|
||||
return
|
||||
|
||||
key = conn.key
|
||||
conns = self.connections.setdefault(key, [])
|
||||
conns.append(conn)
|
||||
self.queue.append(conn)
|
||||
|
||||
if len(conns) <= self.max_tasks and len(self.queue) <= self.max_pool:
|
||||
return
|
||||
|
||||
# Prune the queue.
|
||||
|
||||
# Close stale connections for this key first.
|
||||
stale = [conn for conn in conns if conn.stale()]
|
||||
if stale:
|
||||
for conn in stale:
|
||||
conns.remove(conn)
|
||||
self.queue.remove(conn)
|
||||
self.log(1, 'closing stale connection for', key)
|
||||
conn.close()
|
||||
if not conns:
|
||||
del self.connections[key]
|
||||
|
||||
# Close oldest connection(s) for this key if limit reached.
|
||||
while len(conns) > self.max_tasks:
|
||||
conn = conns.pop(0)
|
||||
self.queue.remove(conn)
|
||||
self.log(1, 'closing oldest connection for', key)
|
||||
conn.close()
|
||||
|
||||
if len(self.queue) <= self.max_pool:
|
||||
return
|
||||
|
||||
# Close overall stale connections.
|
||||
stale = [conn for conn in self.queue if conn.stale()]
|
||||
if stale:
|
||||
for conn in stale:
|
||||
conns = self.connections.get(conn.key)
|
||||
conns.remove(conn)
|
||||
self.queue.remove(conn)
|
||||
self.log(1, 'closing stale connection for', key)
|
||||
conn.close()
|
||||
|
||||
# Close oldest overall connection(s) if limit reached.
|
||||
while len(self.queue) > self.max_pool:
|
||||
conn = self.queue.pop(0)
|
||||
conns = self.connections.get(conn.key)
|
||||
c = conns.pop(0)
|
||||
assert conn == c, (conn.key, conn, c, conns)
|
||||
self.log(1, 'closing overall oldest connection for', conn.key)
|
||||
conn.close()
|
||||
|
||||
|
||||
class Connection:
|
||||
|
||||
def __init__(self, log, pool, host, port, ssl):
|
||||
self.log = log
|
||||
self.pool = pool
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.ssl = ssl
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
self.key = None
|
||||
|
||||
def stale(self):
|
||||
return self.reader is None or self.reader.at_eof()
|
||||
|
||||
def fileno(self):
|
||||
writer = self.writer
|
||||
if writer is not None:
|
||||
transport = writer.transport
|
||||
if transport is not None:
|
||||
sock = transport.get_extra_info('socket')
|
||||
if sock is not None:
|
||||
return sock.fileno()
|
||||
return None
|
||||
|
||||
@asyncio.coroutine
|
||||
def connect(self):
|
||||
self.reader, self.writer = yield from asyncio.open_connection(
|
||||
self.host, self.port, ssl=self.ssl)
|
||||
peername = self.writer.get_extra_info('peername')
|
||||
if peername:
|
||||
self.host, self.port = peername[:2]
|
||||
else:
|
||||
self.log(1, 'NO PEERNAME???', self.host, self.port, self.ssl)
|
||||
self.key = self.host, self.port, self.ssl
|
||||
|
||||
def close(self, recycle=False):
|
||||
if recycle and not self.stale():
|
||||
self.pool.recycle_connection(self)
|
||||
else:
|
||||
self.writer.close()
|
||||
self.pool = self.reader = self.writer = None
|
||||
|
||||
|
||||
class Request:
|
||||
"""HTTP request.
|
||||
|
||||
Use connect() to open a connection; send_request() to send the
|
||||
request; get_response() to receive the response headers.
|
||||
"""
|
||||
|
||||
def __init__(self, log, url, pool):
|
||||
self.log = log
|
||||
self.url = url
|
||||
self.pool = pool
|
||||
self.parts = urllib.parse.urlparse(self.url)
|
||||
self.scheme = self.parts.scheme
|
||||
assert self.scheme in ('http', 'https'), repr(url)
|
||||
self.ssl = self.parts.scheme == 'https'
|
||||
self.netloc = self.parts.netloc
|
||||
self.hostname = self.parts.hostname
|
||||
self.port = self.parts.port or (443 if self.ssl else 80)
|
||||
self.path = (self.parts.path or '/')
|
||||
self.query = self.parts.query
|
||||
if self.query:
|
||||
self.full_path = '%s?%s' % (self.path, self.query)
|
||||
else:
|
||||
self.full_path = self.path
|
||||
self.http_version = 'HTTP/1.1'
|
||||
self.method = 'GET'
|
||||
self.headers = []
|
||||
self.conn = None
|
||||
|
||||
@asyncio.coroutine
|
||||
def connect(self):
|
||||
"""Open a connection to the server."""
|
||||
self.log(1, '* Connecting to %s:%s using %s for %s' %
|
||||
(self.hostname, self.port,
|
||||
'ssl' if self.ssl else 'tcp',
|
||||
self.url))
|
||||
self.conn = yield from self.pool.get_connection(self.hostname,
|
||||
self.port, self.ssl)
|
||||
|
||||
def close(self, recycle=False):
|
||||
"""Close the connection, recycle if requested."""
|
||||
if self.conn is not None:
|
||||
if not recycle:
|
||||
self.log(1, 'closing connection for', self.conn.key)
|
||||
self.conn.close(recycle)
|
||||
self.conn = None
|
||||
|
||||
@asyncio.coroutine
|
||||
def putline(self, line):
|
||||
"""Write a line to the connection.
|
||||
|
||||
Used for the request line and headers.
|
||||
"""
|
||||
self.log(2, '>', line)
|
||||
self.conn.writer.write(line.encode('latin-1') + b'\r\n')
|
||||
|
||||
@asyncio.coroutine
|
||||
def send_request(self):
|
||||
"""Send the request."""
|
||||
request_line = '%s %s %s' % (self.method, self.full_path,
|
||||
self.http_version)
|
||||
yield from self.putline(request_line)
|
||||
# TODO: What if a header is already set?
|
||||
self.headers.append(('User-Agent', 'asyncio-example-crawl/0.0'))
|
||||
self.headers.append(('Host', self.netloc))
|
||||
self.headers.append(('Accept', '*/*'))
|
||||
##self.headers.append(('Accept-Encoding', 'gzip'))
|
||||
for key, value in self.headers:
|
||||
line = '%s: %s' % (key, value)
|
||||
yield from self.putline(line)
|
||||
yield from self.putline('')
|
||||
|
||||
@asyncio.coroutine
|
||||
def get_response(self):
|
||||
"""Receive the response."""
|
||||
response = Response(self.log, self.conn.reader)
|
||||
yield from response.read_headers()
|
||||
return response
|
||||
|
||||
|
||||
class Response:
|
||||
"""HTTP response.
|
||||
|
||||
Call read_headers() to receive the request headers. Then check
|
||||
the status attribute and call get_header() to inspect the headers.
|
||||
Finally call read() to receive the body.
|
||||
"""
|
||||
|
||||
def __init__(self, log, reader):
|
||||
self.log = log
|
||||
self.reader = reader
|
||||
self.http_version = None # 'HTTP/1.1'
|
||||
self.status = None # 200
|
||||
self.reason = None # 'Ok'
|
||||
self.headers = [] # [('Content-Type', 'text/html')]
|
||||
|
||||
@asyncio.coroutine
|
||||
def getline(self):
|
||||
"""Read one line from the connection."""
|
||||
line = (yield from self.reader.readline()).decode('latin-1').rstrip()
|
||||
self.log(2, '<', line)
|
||||
return line
|
||||
|
||||
@asyncio.coroutine
|
||||
def read_headers(self):
|
||||
"""Read the response status and the request headers."""
|
||||
status_line = yield from self.getline()
|
||||
status_parts = status_line.split(None, 2)
|
||||
if len(status_parts) != 3:
|
||||
self.log(0, 'bad status_line', repr(status_line))
|
||||
raise BadStatusLine(status_line)
|
||||
self.http_version, status, self.reason = status_parts
|
||||
self.status = int(status)
|
||||
while True:
|
||||
header_line = yield from self.getline()
|
||||
if not header_line:
|
||||
break
|
||||
# TODO: Continuation lines.
|
||||
key, value = header_line.split(':', 1)
|
||||
self.headers.append((key, value.strip()))
|
||||
|
||||
def get_redirect_url(self, default=''):
|
||||
"""Inspect the status and return the redirect url if appropriate."""
|
||||
if self.status not in (300, 301, 302, 303, 307):
|
||||
return default
|
||||
return self.get_header('Location', default)
|
||||
|
||||
def get_header(self, key, default=''):
|
||||
"""Get one header value, using a case insensitive header name."""
|
||||
key = key.lower()
|
||||
for k, v in self.headers:
|
||||
if k.lower() == key:
|
||||
return v
|
||||
return default
|
||||
|
||||
@asyncio.coroutine
|
||||
def read(self):
|
||||
"""Read the response body.
|
||||
|
||||
This honors Content-Length and Transfer-Encoding: chunked.
|
||||
"""
|
||||
nbytes = None
|
||||
for key, value in self.headers:
|
||||
if key.lower() == 'content-length':
|
||||
nbytes = int(value)
|
||||
break
|
||||
if nbytes is None:
|
||||
if self.get_header('transfer-encoding').lower() == 'chunked':
|
||||
self.log(2, 'parsing chunked response')
|
||||
blocks = []
|
||||
while True:
|
||||
size_header = yield from self.reader.readline()
|
||||
if not size_header:
|
||||
self.log(0, 'premature end of chunked response')
|
||||
break
|
||||
self.log(3, 'size_header =', repr(size_header))
|
||||
parts = size_header.split(b';')
|
||||
size = int(parts[0], 16)
|
||||
if size:
|
||||
self.log(3, 'reading chunk of', size, 'bytes')
|
||||
block = yield from self.reader.readexactly(size)
|
||||
assert len(block) == size, (len(block), size)
|
||||
blocks.append(block)
|
||||
crlf = yield from self.reader.readline()
|
||||
assert crlf == b'\r\n', repr(crlf)
|
||||
if not size:
|
||||
break
|
||||
body = b''.join(blocks)
|
||||
self.log(1, 'chunked response had', len(body),
|
||||
'bytes in', len(blocks), 'blocks')
|
||||
else:
|
||||
self.log(3, 'reading until EOF')
|
||||
body = yield from self.reader.read()
|
||||
# TODO: Should make sure not to recycle the connection
|
||||
# in this case.
|
||||
else:
|
||||
body = yield from self.reader.readexactly(nbytes)
|
||||
return body
|
||||
|
||||
|
||||
class Fetcher:
|
||||
"""Logic and state for one URL.
|
||||
|
||||
When found in crawler.busy, this represents a URL to be fetched or
|
||||
in the process of being fetched; when found in crawler.done, this
|
||||
holds the results from fetching it.
|
||||
|
||||
This is usually associated with a task. This references the
|
||||
crawler for the connection pool and to add more URLs to its todo
|
||||
list.
|
||||
|
||||
Call fetch() to do the fetching, then report() to print the results.
|
||||
"""
|
||||
|
||||
def __init__(self, log, url, crawler, max_redirect=10, max_tries=4):
|
||||
self.log = log
|
||||
self.url = url
|
||||
self.crawler = crawler
|
||||
# We don't loop resolving redirects here -- we just use this
|
||||
# to decide whether to add the redirect URL to crawler.todo.
|
||||
self.max_redirect = max_redirect
|
||||
# But we do loop to retry on errors a few times.
|
||||
self.max_tries = max_tries
|
||||
# Everything we collect from the response goes here.
|
||||
self.task = None
|
||||
self.exceptions = []
|
||||
self.tries = 0
|
||||
self.request = None
|
||||
self.response = None
|
||||
self.body = None
|
||||
self.next_url = None
|
||||
self.ctype = None
|
||||
self.pdict = None
|
||||
self.encoding = None
|
||||
self.urls = None
|
||||
self.new_urls = None
|
||||
|
||||
@asyncio.coroutine
|
||||
def fetch(self):
|
||||
"""Attempt to fetch the contents of the URL.
|
||||
|
||||
If successful, and the data is HTML, extract further links and
|
||||
add them to the crawler. Redirects are also added back there.
|
||||
"""
|
||||
while self.tries < self.max_tries:
|
||||
self.tries += 1
|
||||
self.request = None
|
||||
try:
|
||||
self.request = Request(self.log, self.url, self.crawler.pool)
|
||||
yield from self.request.connect()
|
||||
yield from self.request.send_request()
|
||||
self.response = yield from self.request.get_response()
|
||||
self.body = yield from self.response.read()
|
||||
h_conn = self.response.get_header('connection').lower()
|
||||
if h_conn != 'close':
|
||||
self.request.close(recycle=True)
|
||||
self.request = None
|
||||
if self.tries > 1:
|
||||
self.log(1, 'try', self.tries, 'for', self.url, 'success')
|
||||
break
|
||||
except (BadStatusLine, OSError) as exc:
|
||||
self.exceptions.append(exc)
|
||||
self.log(1, 'try', self.tries, 'for', self.url,
|
||||
'raised', repr(exc))
|
||||
##import pdb; pdb.set_trace()
|
||||
# Don't reuse the connection in this case.
|
||||
finally:
|
||||
if self.request is not None:
|
||||
self.request.close()
|
||||
else:
|
||||
# We never broke out of the while loop, i.e. all tries failed.
|
||||
self.log(0, 'no success for', self.url,
|
||||
'in', self.max_tries, 'tries')
|
||||
return
|
||||
next_url = self.response.get_redirect_url()
|
||||
if next_url:
|
||||
self.next_url = urllib.parse.urljoin(self.url, next_url)
|
||||
if self.max_redirect > 0:
|
||||
self.log(1, 'redirect to', self.next_url, 'from', self.url)
|
||||
self.crawler.add_url(self.next_url, self.max_redirect-1)
|
||||
else:
|
||||
self.log(0, 'redirect limit reached for', self.next_url,
|
||||
'from', self.url)
|
||||
else:
|
||||
if self.response.status == 200:
|
||||
self.ctype = self.response.get_header('content-type')
|
||||
self.pdict = {}
|
||||
if self.ctype:
|
||||
self.ctype, self.pdict = cgi.parse_header(self.ctype)
|
||||
self.encoding = self.pdict.get('charset', 'utf-8')
|
||||
if self.ctype == 'text/html':
|
||||
body = self.body.decode(self.encoding, 'replace')
|
||||
# Replace href with (?:href|src) to follow image links.
|
||||
self.urls = set(re.findall(r'(?i)href=["\']?([^\s"\'<>]+)',
|
||||
body))
|
||||
if self.urls:
|
||||
self.log(1, 'got', len(self.urls),
|
||||
'distinct urls from', self.url)
|
||||
self.new_urls = set()
|
||||
for url in self.urls:
|
||||
url = unescape(url)
|
||||
url = urllib.parse.urljoin(self.url, url)
|
||||
url, frag = urllib.parse.urldefrag(url)
|
||||
if self.crawler.add_url(url):
|
||||
self.new_urls.add(url)
|
||||
|
||||
def report(self, stats, file=None):
|
||||
"""Print a report on the state for this URL.
|
||||
|
||||
Also update the Stats instance.
|
||||
"""
|
||||
if self.task is not None:
|
||||
if not self.task.done():
|
||||
stats.add('pending')
|
||||
print(self.url, 'pending', file=file)
|
||||
return
|
||||
elif self.task.cancelled():
|
||||
stats.add('cancelled')
|
||||
print(self.url, 'cancelled', file=file)
|
||||
return
|
||||
elif self.task.exception():
|
||||
stats.add('exception')
|
||||
exc = self.task.exception()
|
||||
stats.add('exception_' + exc.__class__.__name__)
|
||||
print(self.url, exc, file=file)
|
||||
return
|
||||
if len(self.exceptions) == self.tries:
|
||||
stats.add('fail')
|
||||
exc = self.exceptions[-1]
|
||||
stats.add('fail_' + str(exc.__class__.__name__))
|
||||
print(self.url, 'error', exc, file=file)
|
||||
elif self.next_url:
|
||||
stats.add('redirect')
|
||||
print(self.url, self.response.status, 'redirect', self.next_url,
|
||||
file=file)
|
||||
elif self.ctype == 'text/html':
|
||||
stats.add('html')
|
||||
size = len(self.body or b'')
|
||||
stats.add('html_bytes', size)
|
||||
print(self.url, self.response.status,
|
||||
self.ctype, self.encoding,
|
||||
size,
|
||||
'%d/%d' % (len(self.new_urls or ()), len(self.urls or ())),
|
||||
file=file)
|
||||
elif self.response is None:
|
||||
print(self.url, 'no response object')
|
||||
else:
|
||||
size = len(self.body or b'')
|
||||
if self.response.status == 200:
|
||||
stats.add('other')
|
||||
stats.add('other_bytes', size)
|
||||
else:
|
||||
stats.add('error')
|
||||
stats.add('error_bytes', size)
|
||||
stats.add('status_%s' % self.response.status)
|
||||
print(self.url, self.response.status,
|
||||
self.ctype, self.encoding,
|
||||
size,
|
||||
file=file)
|
||||
|
||||
|
||||
class Stats:
|
||||
"""Record stats of various sorts."""
|
||||
|
||||
def __init__(self):
|
||||
self.stats = {}
|
||||
|
||||
def add(self, key, count=1):
|
||||
self.stats[key] = self.stats.get(key, 0) + count
|
||||
|
||||
def report(self, file=None):
|
||||
for key, count in sorted(self.stats.items()):
|
||||
print('%10d' % count, key, file=file)
|
||||
|
||||
|
||||
class Crawler:
|
||||
"""Crawl a set of URLs.
|
||||
|
||||
This manages three disjoint sets of URLs (todo, busy, done). The
|
||||
data structures actually store dicts -- the values in todo give
|
||||
the redirect limit, while the values in busy and done are Fetcher
|
||||
instances.
|
||||
"""
|
||||
def __init__(self, log,
|
||||
roots, exclude=None, strict=True, # What to crawl.
|
||||
max_redirect=10, max_tries=4, # Per-url limits.
|
||||
max_tasks=10, max_pool=10, # Global limits.
|
||||
):
|
||||
self.log = log
|
||||
self.roots = roots
|
||||
self.exclude = exclude
|
||||
self.strict = strict
|
||||
self.max_redirect = max_redirect
|
||||
self.max_tries = max_tries
|
||||
self.max_tasks = max_tasks
|
||||
self.max_pool = max_pool
|
||||
self.todo = {}
|
||||
self.busy = {}
|
||||
self.done = {}
|
||||
self.pool = ConnectionPool(self.log, max_pool, max_tasks)
|
||||
self.root_domains = set()
|
||||
for root in roots:
|
||||
parts = urllib.parse.urlparse(root)
|
||||
host, port = urllib.parse.splitport(parts.netloc)
|
||||
if not host:
|
||||
continue
|
||||
if re.match(r'\A[\d\.]*\Z', host):
|
||||
self.root_domains.add(host)
|
||||
else:
|
||||
host = host.lower()
|
||||
if self.strict:
|
||||
self.root_domains.add(host)
|
||||
if host.startswith('www.'):
|
||||
self.root_domains.add(host[4:])
|
||||
else:
|
||||
self.root_domains.add('www.' + host)
|
||||
else:
|
||||
parts = host.split('.')
|
||||
if len(parts) > 2:
|
||||
host = '.'.join(parts[-2:])
|
||||
self.root_domains.add(host)
|
||||
for root in roots:
|
||||
self.add_url(root)
|
||||
self.governor = asyncio.locks.Semaphore(max_tasks)
|
||||
self.termination = asyncio.locks.Condition()
|
||||
self.t0 = time.time()
|
||||
self.t1 = None
|
||||
|
||||
def close(self):
|
||||
"""Close resources (currently only the pool)."""
|
||||
self.pool.close()
|
||||
|
||||
def host_okay(self, host):
|
||||
"""Check if a host should be crawled.
|
||||
|
||||
A literal match (after lowercasing) is always good. For hosts
|
||||
that don't look like IP addresses, some approximate matches
|
||||
are okay depending on the strict flag.
|
||||
"""
|
||||
host = host.lower()
|
||||
if host in self.root_domains:
|
||||
return True
|
||||
if re.match(r'\A[\d\.]*\Z', host):
|
||||
return False
|
||||
if self.strict:
|
||||
return self._host_okay_strictish(host)
|
||||
else:
|
||||
return self._host_okay_lenient(host)
|
||||
|
||||
def _host_okay_strictish(self, host):
|
||||
"""Check if a host should be crawled, strict-ish version.
|
||||
|
||||
This checks for equality modulo an initial 'www.' component.
|
||||
"""
|
||||
if host.startswith('www.'):
|
||||
if host[4:] in self.root_domains:
|
||||
return True
|
||||
else:
|
||||
if 'www.' + host in self.root_domains:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _host_okay_lenient(self, host):
|
||||
"""Check if a host should be crawled, lenient version.
|
||||
|
||||
This compares the last two components of the host.
|
||||
"""
|
||||
parts = host.split('.')
|
||||
if len(parts) > 2:
|
||||
host = '.'.join(parts[-2:])
|
||||
return host in self.root_domains
|
||||
|
||||
def add_url(self, url, max_redirect=None):
|
||||
"""Add a URL to the todo list if not seen before."""
|
||||
if self.exclude and re.search(self.exclude, url):
|
||||
return False
|
||||
parts = urllib.parse.urlparse(url)
|
||||
if parts.scheme not in ('http', 'https'):
|
||||
self.log(2, 'skipping non-http scheme in', url)
|
||||
return False
|
||||
host, port = urllib.parse.splitport(parts.netloc)
|
||||
if not self.host_okay(host):
|
||||
self.log(2, 'skipping non-root host in', url)
|
||||
return False
|
||||
if max_redirect is None:
|
||||
max_redirect = self.max_redirect
|
||||
if url in self.todo or url in self.busy or url in self.done:
|
||||
return False
|
||||
self.log(1, 'adding', url, max_redirect)
|
||||
self.todo[url] = max_redirect
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def crawl(self):
|
||||
"""Run the crawler until all finished."""
|
||||
with (yield from self.termination):
|
||||
while self.todo or self.busy:
|
||||
if self.todo:
|
||||
url, max_redirect = self.todo.popitem()
|
||||
fetcher = Fetcher(self.log, url,
|
||||
crawler=self,
|
||||
max_redirect=max_redirect,
|
||||
max_tries=self.max_tries,
|
||||
)
|
||||
self.busy[url] = fetcher
|
||||
fetcher.task = asyncio.Task(self.fetch(fetcher))
|
||||
else:
|
||||
yield from self.termination.wait()
|
||||
self.t1 = time.time()
|
||||
|
||||
@asyncio.coroutine
|
||||
def fetch(self, fetcher):
|
||||
"""Call the Fetcher's fetch(), with a limit on concurrency.
|
||||
|
||||
Once this returns, move the fetcher from busy to done.
|
||||
"""
|
||||
url = fetcher.url
|
||||
with (yield from self.governor):
|
||||
try:
|
||||
yield from fetcher.fetch() # Fetcher gonna fetch.
|
||||
finally:
|
||||
# Force GC of the task, so the error is logged.
|
||||
fetcher.task = None
|
||||
with (yield from self.termination):
|
||||
self.done[url] = fetcher
|
||||
del self.busy[url]
|
||||
self.termination.notify()
|
||||
|
||||
def report(self, file=None):
|
||||
"""Print a report on all completed URLs."""
|
||||
if self.t1 is None:
|
||||
self.t1 = time.time()
|
||||
dt = self.t1 - self.t0
|
||||
if dt and self.max_tasks:
|
||||
speed = len(self.done) / dt / self.max_tasks
|
||||
else:
|
||||
speed = 0
|
||||
stats = Stats()
|
||||
print('*** Report ***', file=file)
|
||||
try:
|
||||
show = []
|
||||
show.extend(self.done.items())
|
||||
show.extend(self.busy.items())
|
||||
show.sort()
|
||||
for url, fetcher in show:
|
||||
fetcher.report(stats, file=file)
|
||||
except KeyboardInterrupt:
|
||||
print('\nInterrupted', file=file)
|
||||
print('Finished', len(self.done),
|
||||
'urls in %.3f secs' % dt,
|
||||
'(max_tasks=%d)' % self.max_tasks,
|
||||
'(%.3f urls/sec/task)' % speed,
|
||||
file=file)
|
||||
stats.report(file=file)
|
||||
print('Todo:', len(self.todo), file=file)
|
||||
print('Busy:', len(self.busy), file=file)
|
||||
print('Done:', len(self.done), file=file)
|
||||
print('Date:', time.ctime(), 'local time', file=file)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program.
|
||||
|
||||
Parse arguments, set up event loop, run crawler, print report.
|
||||
"""
|
||||
args = ARGS.parse_args()
|
||||
if not args.roots:
|
||||
print('Use --help for command line help')
|
||||
return
|
||||
|
||||
log = Logger(args.level)
|
||||
|
||||
if args.iocp:
|
||||
from asyncio.windows_events import ProactorEventLoop
|
||||
loop = ProactorEventLoop()
|
||||
asyncio.set_event_loop(loop)
|
||||
elif args.select:
|
||||
loop = asyncio.SelectorEventLoop()
|
||||
asyncio.set_event_loop(loop)
|
||||
else:
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
roots = {fix_url(root) for root in args.roots}
|
||||
|
||||
crawler = Crawler(log,
|
||||
roots, exclude=args.exclude,
|
||||
strict=args.strict,
|
||||
max_redirect=args.max_redirect,
|
||||
max_tries=args.max_tries,
|
||||
max_tasks=args.max_tasks,
|
||||
max_pool=args.max_pool,
|
||||
)
|
||||
try:
|
||||
loop.run_until_complete(crawler.crawl()) # Crawler gonna crawl.
|
||||
except KeyboardInterrupt:
|
||||
sys.stderr.flush()
|
||||
print('\nInterrupted\n')
|
||||
finally:
|
||||
crawler.report()
|
||||
crawler.close()
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
main()
|
||||
20
examples/echo_client_tulip.py
Normal file
20
examples/echo_client_tulip.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import asyncio
|
||||
|
||||
END = b'Bye-bye!\n'
|
||||
|
||||
@asyncio.coroutine
|
||||
def echo_client():
|
||||
reader, writer = yield from asyncio.open_connection('localhost', 8000)
|
||||
writer.write(b'Hello, world\n')
|
||||
writer.write(b'What a fine day it is.\n')
|
||||
writer.write(END)
|
||||
while True:
|
||||
line = yield from reader.readline()
|
||||
print('received:', line)
|
||||
if line == END or not line:
|
||||
break
|
||||
writer.close()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(echo_client())
|
||||
loop.close()
|
||||
20
examples/echo_server_tulip.py
Normal file
20
examples/echo_server_tulip.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import asyncio
|
||||
|
||||
@asyncio.coroutine
|
||||
def echo_server():
|
||||
yield from asyncio.start_server(handle_connection, 'localhost', 8000)
|
||||
|
||||
@asyncio.coroutine
|
||||
def handle_connection(reader, writer):
|
||||
while True:
|
||||
data = yield from reader.read(8192)
|
||||
if not data:
|
||||
break
|
||||
writer.write(data)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(echo_server())
|
||||
try:
|
||||
loop.run_forever()
|
||||
finally:
|
||||
loop.close()
|
||||
35
examples/fetch0.py
Normal file
35
examples/fetch0.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""Simplest possible HTTP client."""
|
||||
|
||||
import sys
|
||||
|
||||
from asyncio import *
|
||||
|
||||
|
||||
@coroutine
|
||||
def fetch():
|
||||
r, w = yield from open_connection('python.org', 80)
|
||||
request = 'GET / HTTP/1.0\r\n\r\n'
|
||||
print('>', request, file=sys.stderr)
|
||||
w.write(request.encode('latin-1'))
|
||||
while True:
|
||||
line = yield from r.readline()
|
||||
line = line.decode('latin-1').rstrip()
|
||||
if not line:
|
||||
break
|
||||
print('<', line, file=sys.stderr)
|
||||
print(file=sys.stderr)
|
||||
body = yield from r.read()
|
||||
return body
|
||||
|
||||
|
||||
def main():
|
||||
loop = get_event_loop()
|
||||
try:
|
||||
body = loop.run_until_complete(fetch())
|
||||
finally:
|
||||
loop.close()
|
||||
print(body.decode('latin-1'), end='')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
78
examples/fetch1.py
Normal file
78
examples/fetch1.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""Fetch one URL and write its content to stdout.
|
||||
|
||||
This version adds URL parsing (including SSL) and a Response object.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import urllib.parse
|
||||
|
||||
from asyncio import *
|
||||
|
||||
|
||||
class Response:
|
||||
|
||||
def __init__(self, verbose=True):
|
||||
self.verbose = verbose
|
||||
self.http_version = None # 'HTTP/1.1'
|
||||
self.status = None # 200
|
||||
self.reason = None # 'Ok'
|
||||
self.headers = [] # [('Content-Type', 'text/html')]
|
||||
|
||||
@coroutine
|
||||
def read(self, reader):
|
||||
@coroutine
|
||||
def getline():
|
||||
return (yield from reader.readline()).decode('latin-1').rstrip()
|
||||
status_line = yield from getline()
|
||||
if self.verbose: print('<', status_line, file=sys.stderr)
|
||||
self.http_version, status, self.reason = status_line.split(None, 2)
|
||||
self.status = int(status)
|
||||
while True:
|
||||
header_line = yield from getline()
|
||||
if not header_line:
|
||||
break
|
||||
if self.verbose: print('<', header_line, file=sys.stderr)
|
||||
# TODO: Continuation lines.
|
||||
key, value = header_line.split(':', 1)
|
||||
self.headers.append((key, value.strip()))
|
||||
if self.verbose: print(file=sys.stderr)
|
||||
|
||||
|
||||
@coroutine
|
||||
def fetch(url, verbose=True):
|
||||
parts = urllib.parse.urlparse(url)
|
||||
if parts.scheme == 'http':
|
||||
ssl = False
|
||||
elif parts.scheme == 'https':
|
||||
ssl = True
|
||||
else:
|
||||
print('URL must use http or https.')
|
||||
sys.exit(1)
|
||||
port = parts.port
|
||||
if port is None:
|
||||
port = 443 if ssl else 80
|
||||
path = parts.path or '/'
|
||||
if parts.query:
|
||||
path += '?' + parts.query
|
||||
request = 'GET %s HTTP/1.0\r\n\r\n' % path
|
||||
if verbose:
|
||||
print('>', request, file=sys.stderr, end='')
|
||||
r, w = yield from open_connection(parts.hostname, port, ssl=ssl)
|
||||
w.write(request.encode('latin-1'))
|
||||
response = Response(verbose)
|
||||
yield from response.read(r)
|
||||
body = yield from r.read()
|
||||
return body
|
||||
|
||||
|
||||
def main():
|
||||
loop = get_event_loop()
|
||||
try:
|
||||
body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv))
|
||||
finally:
|
||||
loop.close()
|
||||
print(body.decode('latin-1'), end='')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
141
examples/fetch2.py
Normal file
141
examples/fetch2.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""Fetch one URL and write its content to stdout.
|
||||
|
||||
This version adds a Request object.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import urllib.parse
|
||||
from http.client import BadStatusLine
|
||||
|
||||
from asyncio import *
|
||||
|
||||
|
||||
class Request:
|
||||
|
||||
def __init__(self, url, verbose=True):
|
||||
self.url = url
|
||||
self.verbose = verbose
|
||||
self.parts = urllib.parse.urlparse(self.url)
|
||||
self.scheme = self.parts.scheme
|
||||
assert self.scheme in ('http', 'https'), repr(url)
|
||||
self.ssl = self.parts.scheme == 'https'
|
||||
self.netloc = self.parts.netloc
|
||||
self.hostname = self.parts.hostname
|
||||
self.port = self.parts.port or (443 if self.ssl else 80)
|
||||
self.path = (self.parts.path or '/')
|
||||
self.query = self.parts.query
|
||||
if self.query:
|
||||
self.full_path = '%s?%s' % (self.path, self.query)
|
||||
else:
|
||||
self.full_path = self.path
|
||||
self.http_version = 'HTTP/1.1'
|
||||
self.method = 'GET'
|
||||
self.headers = []
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
|
||||
@coroutine
|
||||
def connect(self):
|
||||
if self.verbose:
|
||||
print('* Connecting to %s:%s using %s' %
|
||||
(self.hostname, self.port, 'ssl' if self.ssl else 'tcp'),
|
||||
file=sys.stderr)
|
||||
self.reader, self.writer = yield from open_connection(self.hostname,
|
||||
self.port,
|
||||
ssl=self.ssl)
|
||||
if self.verbose:
|
||||
print('* Connected to %s' %
|
||||
(self.writer.get_extra_info('peername'),),
|
||||
file=sys.stderr)
|
||||
|
||||
def putline(self, line):
|
||||
self.writer.write(line.encode('latin-1') + b'\r\n')
|
||||
|
||||
@coroutine
|
||||
def send_request(self):
|
||||
request = '%s %s %s' % (self.method, self.full_path, self.http_version)
|
||||
if self.verbose: print('>', request, file=sys.stderr)
|
||||
self.putline(request)
|
||||
if 'host' not in {key.lower() for key, _ in self.headers}:
|
||||
self.headers.insert(0, ('Host', self.netloc))
|
||||
for key, value in self.headers:
|
||||
line = '%s: %s' % (key, value)
|
||||
if self.verbose: print('>', line, file=sys.stderr)
|
||||
self.putline(line)
|
||||
self.putline('')
|
||||
|
||||
@coroutine
|
||||
def get_response(self):
|
||||
response = Response(self.reader, self.verbose)
|
||||
yield from response.read_headers()
|
||||
return response
|
||||
|
||||
|
||||
class Response:
|
||||
|
||||
def __init__(self, reader, verbose=True):
|
||||
self.reader = reader
|
||||
self.verbose = verbose
|
||||
self.http_version = None # 'HTTP/1.1'
|
||||
self.status = None # 200
|
||||
self.reason = None # 'Ok'
|
||||
self.headers = [] # [('Content-Type', 'text/html')]
|
||||
|
||||
@coroutine
|
||||
def getline(self):
|
||||
return (yield from self.reader.readline()).decode('latin-1').rstrip()
|
||||
|
||||
@coroutine
|
||||
def read_headers(self):
|
||||
status_line = yield from self.getline()
|
||||
if self.verbose: print('<', status_line, file=sys.stderr)
|
||||
status_parts = status_line.split(None, 2)
|
||||
if len(status_parts) != 3:
|
||||
raise BadStatusLine(status_line)
|
||||
self.http_version, status, self.reason = status_parts
|
||||
self.status = int(status)
|
||||
while True:
|
||||
header_line = yield from self.getline()
|
||||
if not header_line:
|
||||
break
|
||||
if self.verbose: print('<', header_line, file=sys.stderr)
|
||||
# TODO: Continuation lines.
|
||||
key, value = header_line.split(':', 1)
|
||||
self.headers.append((key, value.strip()))
|
||||
if self.verbose: print(file=sys.stderr)
|
||||
|
||||
@coroutine
|
||||
def read(self):
|
||||
nbytes = None
|
||||
for key, value in self.headers:
|
||||
if key.lower() == 'content-length':
|
||||
nbytes = int(value)
|
||||
break
|
||||
if nbytes is None:
|
||||
body = yield from self.reader.read()
|
||||
else:
|
||||
body = yield from self.reader.readexactly(nbytes)
|
||||
return body
|
||||
|
||||
|
||||
@coroutine
|
||||
def fetch(url, verbose=True):
|
||||
request = Request(url, verbose)
|
||||
yield from request.connect()
|
||||
yield from request.send_request()
|
||||
response = yield from request.get_response()
|
||||
body = yield from response.read()
|
||||
return body
|
||||
|
||||
|
||||
def main():
|
||||
loop = get_event_loop()
|
||||
try:
|
||||
body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv))
|
||||
finally:
|
||||
loop.close()
|
||||
sys.stdout.buffer.write(body)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
230
examples/fetch3.py
Normal file
230
examples/fetch3.py
Normal file
@@ -0,0 +1,230 @@
|
||||
"""Fetch one URL and write its content to stdout.
|
||||
|
||||
This version adds a primitive connection pool, redirect following and
|
||||
chunked transfer-encoding. It also supports a --iocp flag.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import urllib.parse
|
||||
from http.client import BadStatusLine
|
||||
|
||||
from asyncio import *
|
||||
|
||||
|
||||
class ConnectionPool:
|
||||
# TODO: Locking? Close idle connections?
|
||||
|
||||
def __init__(self, verbose=False):
|
||||
self.verbose = verbose
|
||||
self.connections = {} # {(host, port, ssl): (reader, writer)}
|
||||
|
||||
def close(self):
|
||||
for _, writer in self.connections.values():
|
||||
writer.close()
|
||||
|
||||
@coroutine
|
||||
def open_connection(self, host, port, ssl):
|
||||
port = port or (443 if ssl else 80)
|
||||
ipaddrs = yield from get_event_loop().getaddrinfo(host, port)
|
||||
if self.verbose:
|
||||
print('* %s resolves to %s' %
|
||||
(host, ', '.join(ip[4][0] for ip in ipaddrs)),
|
||||
file=sys.stderr)
|
||||
for _, _, _, _, (h, p, *_) in ipaddrs:
|
||||
key = h, p, ssl
|
||||
conn = self.connections.get(key)
|
||||
if conn:
|
||||
reader, writer = conn
|
||||
if reader._eof:
|
||||
self.connections.pop(key)
|
||||
continue
|
||||
if self.verbose:
|
||||
print('* Reusing pooled connection', key, file=sys.stderr)
|
||||
return conn
|
||||
reader, writer = yield from open_connection(host, port, ssl=ssl)
|
||||
host, port, *_ = writer.get_extra_info('peername')
|
||||
key = host, port, ssl
|
||||
self.connections[key] = reader, writer
|
||||
if self.verbose:
|
||||
print('* New connection', key, file=sys.stderr)
|
||||
return reader, writer
|
||||
|
||||
|
||||
class Request:
|
||||
|
||||
def __init__(self, url, verbose=True):
|
||||
self.url = url
|
||||
self.verbose = verbose
|
||||
self.parts = urllib.parse.urlparse(self.url)
|
||||
self.scheme = self.parts.scheme
|
||||
assert self.scheme in ('http', 'https'), repr(url)
|
||||
self.ssl = self.parts.scheme == 'https'
|
||||
self.netloc = self.parts.netloc
|
||||
self.hostname = self.parts.hostname
|
||||
self.port = self.parts.port or (443 if self.ssl else 80)
|
||||
self.path = (self.parts.path or '/')
|
||||
self.query = self.parts.query
|
||||
if self.query:
|
||||
self.full_path = '%s?%s' % (self.path, self.query)
|
||||
else:
|
||||
self.full_path = self.path
|
||||
self.http_version = 'HTTP/1.1'
|
||||
self.method = 'GET'
|
||||
self.headers = []
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
|
||||
def vprint(self, *args):
|
||||
if self.verbose:
|
||||
print(*args, file=sys.stderr)
|
||||
|
||||
@coroutine
|
||||
def connect(self, pool):
|
||||
self.vprint('* Connecting to %s:%s using %s' %
|
||||
(self.hostname, self.port, 'ssl' if self.ssl else 'tcp'))
|
||||
self.reader, self.writer = \
|
||||
yield from pool.open_connection(self.hostname,
|
||||
self.port,
|
||||
ssl=self.ssl)
|
||||
self.vprint('* Connected to %s' %
|
||||
(self.writer.get_extra_info('peername'),))
|
||||
|
||||
@coroutine
|
||||
def putline(self, line):
|
||||
self.vprint('>', line)
|
||||
self.writer.write(line.encode('latin-1') + b'\r\n')
|
||||
##yield from self.writer.drain()
|
||||
|
||||
@coroutine
|
||||
def send_request(self):
|
||||
request = '%s %s %s' % (self.method, self.full_path, self.http_version)
|
||||
yield from self.putline(request)
|
||||
if 'host' not in {key.lower() for key, _ in self.headers}:
|
||||
self.headers.insert(0, ('Host', self.netloc))
|
||||
for key, value in self.headers:
|
||||
line = '%s: %s' % (key, value)
|
||||
yield from self.putline(line)
|
||||
yield from self.putline('')
|
||||
|
||||
@coroutine
|
||||
def get_response(self):
|
||||
response = Response(self.reader, self.verbose)
|
||||
yield from response.read_headers()
|
||||
return response
|
||||
|
||||
|
||||
class Response:
|
||||
|
||||
def __init__(self, reader, verbose=True):
|
||||
self.reader = reader
|
||||
self.verbose = verbose
|
||||
self.http_version = None # 'HTTP/1.1'
|
||||
self.status = None # 200
|
||||
self.reason = None # 'Ok'
|
||||
self.headers = [] # [('Content-Type', 'text/html')]
|
||||
|
||||
def vprint(self, *args):
|
||||
if self.verbose:
|
||||
print(*args, file=sys.stderr)
|
||||
|
||||
@coroutine
|
||||
def getline(self):
|
||||
line = (yield from self.reader.readline()).decode('latin-1').rstrip()
|
||||
self.vprint('<', line)
|
||||
return line
|
||||
|
||||
@coroutine
|
||||
def read_headers(self):
|
||||
status_line = yield from self.getline()
|
||||
status_parts = status_line.split(None, 2)
|
||||
if len(status_parts) != 3:
|
||||
raise BadStatusLine(status_line)
|
||||
self.http_version, status, self.reason = status_parts
|
||||
self.status = int(status)
|
||||
while True:
|
||||
header_line = yield from self.getline()
|
||||
if not header_line:
|
||||
break
|
||||
# TODO: Continuation lines.
|
||||
key, value = header_line.split(':', 1)
|
||||
self.headers.append((key, value.strip()))
|
||||
|
||||
def get_redirect_url(self, default=None):
|
||||
if self.status not in (300, 301, 302, 303, 307):
|
||||
return default
|
||||
return self.get_header('Location', default)
|
||||
|
||||
def get_header(self, key, default=None):
|
||||
key = key.lower()
|
||||
for k, v in self.headers:
|
||||
if k.lower() == key:
|
||||
return v
|
||||
return default
|
||||
|
||||
@coroutine
|
||||
def read(self):
|
||||
nbytes = None
|
||||
for key, value in self.headers:
|
||||
if key.lower() == 'content-length':
|
||||
nbytes = int(value)
|
||||
break
|
||||
if nbytes is None:
|
||||
if self.get_header('transfer-encoding', '').lower() == 'chunked':
|
||||
blocks = []
|
||||
size = -1
|
||||
while size:
|
||||
size_header = yield from self.reader.readline()
|
||||
if not size_header:
|
||||
break
|
||||
parts = size_header.split(b';')
|
||||
size = int(parts[0], 16)
|
||||
if size:
|
||||
block = yield from self.reader.readexactly(size)
|
||||
assert len(block) == size, (len(block), size)
|
||||
blocks.append(block)
|
||||
crlf = yield from self.reader.readline()
|
||||
assert crlf == b'\r\n', repr(crlf)
|
||||
body = b''.join(blocks)
|
||||
else:
|
||||
body = yield from self.reader.read()
|
||||
else:
|
||||
body = yield from self.reader.readexactly(nbytes)
|
||||
return body
|
||||
|
||||
|
||||
@coroutine
|
||||
def fetch(url, verbose=True, max_redirect=10):
|
||||
pool = ConnectionPool(verbose)
|
||||
try:
|
||||
for _ in range(max_redirect):
|
||||
request = Request(url, verbose)
|
||||
yield from request.connect(pool)
|
||||
yield from request.send_request()
|
||||
response = yield from request.get_response()
|
||||
body = yield from response.read()
|
||||
next_url = response.get_redirect_url()
|
||||
if not next_url:
|
||||
break
|
||||
url = urllib.parse.urljoin(url, next_url)
|
||||
print('redirect to', url, file=sys.stderr)
|
||||
return body
|
||||
finally:
|
||||
pool.close()
|
||||
|
||||
|
||||
def main():
|
||||
if '--iocp' in sys.argv:
|
||||
from asyncio.windows_events import ProactorEventLoop
|
||||
loop = ProactorEventLoop()
|
||||
set_event_loop(loop)
|
||||
else:
|
||||
loop = get_event_loop()
|
||||
try:
|
||||
body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv))
|
||||
finally:
|
||||
loop.close()
|
||||
sys.stdout.buffer.write(body)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
69
examples/fuzz_as_completed.py
Normal file
69
examples/fuzz_as_completed.py
Normal file
@@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""Fuzz tester for as_completed(), by Glenn Langford."""
|
||||
|
||||
import asyncio
|
||||
import itertools
|
||||
import random
|
||||
import sys
|
||||
|
||||
@asyncio.coroutine
|
||||
def sleeper(time):
|
||||
yield from asyncio.sleep(time)
|
||||
return time
|
||||
|
||||
@asyncio.coroutine
|
||||
def watcher(tasks,delay=False):
|
||||
res = []
|
||||
for t in asyncio.as_completed(tasks):
|
||||
r = yield from t
|
||||
res.append(r)
|
||||
if delay:
|
||||
# simulate processing delay
|
||||
process_time = random.random() / 10
|
||||
yield from asyncio.sleep(process_time)
|
||||
#print(res)
|
||||
#assert(sorted(res) == res)
|
||||
if sorted(res) != res:
|
||||
print('FAIL', res)
|
||||
print('------------')
|
||||
else:
|
||||
print('.', end='')
|
||||
sys.stdout.flush()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
print('Pass 1')
|
||||
# All permutations of discrete task running times must be returned
|
||||
# by as_completed in the correct order.
|
||||
task_times = [0, 0.1, 0.2, 0.3, 0.4 ] # 120 permutations
|
||||
for times in itertools.permutations(task_times):
|
||||
tasks = [ asyncio.Task(sleeper(t)) for t in times ]
|
||||
loop.run_until_complete(asyncio.Task(watcher(tasks)))
|
||||
|
||||
print()
|
||||
print('Pass 2')
|
||||
# Longer task times, with randomized duplicates. 100 tasks each time.
|
||||
longer_task_times = [x/10 for x in range(30)]
|
||||
for i in range(20):
|
||||
task_times = longer_task_times * 10
|
||||
random.shuffle(task_times)
|
||||
#print('Times', task_times[:500])
|
||||
tasks = [ asyncio.Task(sleeper(t)) for t in task_times[:100] ]
|
||||
loop.run_until_complete(asyncio.Task(watcher(tasks)))
|
||||
|
||||
print()
|
||||
print('Pass 3')
|
||||
# Same as pass 2, but with a random processing delay (0 - 0.1s) after
|
||||
# retrieving each future from as_completed and 200 tasks. This tests whether
|
||||
# the order that callbacks are triggered is preserved through to the
|
||||
# as_completed caller.
|
||||
for i in range(20):
|
||||
task_times = longer_task_times * 10
|
||||
random.shuffle(task_times)
|
||||
#print('Times', task_times[:200])
|
||||
tasks = [ asyncio.Task(sleeper(t)) for t in task_times[:200] ]
|
||||
loop.run_until_complete(asyncio.Task(watcher(tasks, delay=True)))
|
||||
|
||||
print()
|
||||
loop.close()
|
||||
17
examples/hello_callback.py
Normal file
17
examples/hello_callback.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""Print 'Hello World' every two seconds, using a callback."""
|
||||
|
||||
import asyncio
|
||||
|
||||
|
||||
def print_and_repeat(loop):
|
||||
print('Hello World')
|
||||
loop.call_later(2, print_and_repeat, loop)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
loop = asyncio.get_event_loop()
|
||||
print_and_repeat(loop)
|
||||
try:
|
||||
loop.run_forever()
|
||||
finally:
|
||||
loop.close()
|
||||
18
examples/hello_coroutine.py
Normal file
18
examples/hello_coroutine.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""Print 'Hello World' every two seconds, using a coroutine."""
|
||||
|
||||
import asyncio
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def greet_every_two_seconds():
|
||||
while True:
|
||||
print('Hello World')
|
||||
yield from asyncio.sleep(2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(greet_every_two_seconds())
|
||||
finally:
|
||||
loop.close()
|
||||
50
examples/shell.py
Normal file
50
examples/shell.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""Examples using create_subprocess_exec() and create_subprocess_shell()."""
|
||||
|
||||
import asyncio
|
||||
import signal
|
||||
from asyncio.subprocess import PIPE
|
||||
|
||||
@asyncio.coroutine
|
||||
def cat(loop):
|
||||
proc = yield from asyncio.create_subprocess_shell("cat",
|
||||
stdin=PIPE,
|
||||
stdout=PIPE)
|
||||
print("pid: %s" % proc.pid)
|
||||
|
||||
message = "Hello World!"
|
||||
print("cat write: %r" % message)
|
||||
|
||||
stdout, stderr = yield from proc.communicate(message.encode('ascii'))
|
||||
print("cat read: %r" % stdout.decode('ascii'))
|
||||
|
||||
exitcode = yield from proc.wait()
|
||||
print("(exit code %s)" % exitcode)
|
||||
|
||||
@asyncio.coroutine
|
||||
def ls(loop):
|
||||
proc = yield from asyncio.create_subprocess_exec("ls",
|
||||
stdout=PIPE)
|
||||
while True:
|
||||
line = yield from proc.stdout.readline()
|
||||
if not line:
|
||||
break
|
||||
print("ls>>", line.decode('ascii').rstrip())
|
||||
try:
|
||||
proc.send_signal(signal.SIGINT)
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
@asyncio.coroutine
|
||||
def test_call(*args, timeout=None):
|
||||
try:
|
||||
proc = yield from asyncio.create_subprocess_exec(*args)
|
||||
exitcode = yield from asyncio.wait_for(proc.wait(), timeout)
|
||||
print("%s: exit code %s" % (' '.join(args), exitcode))
|
||||
except asyncio.TimeoutError:
|
||||
print("timeout! (%.1f sec)" % timeout)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(cat(loop))
|
||||
loop.run_until_complete(ls(loop))
|
||||
loop.run_until_complete(test_call("bash", "-c", "sleep 3", timeout=1.0))
|
||||
loop.close()
|
||||
154
examples/simple_tcp_server.py
Normal file
154
examples/simple_tcp_server.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""
|
||||
Example of a simple TCP server that is written in (mostly) coroutine
|
||||
style and uses asyncio.streams.start_server() and
|
||||
asyncio.streams.open_connection().
|
||||
|
||||
Note that running this example starts both the TCP server and client
|
||||
in the same process. It listens on port 1234 on 127.0.0.1, so it will
|
||||
fail if this port is currently in use.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import asyncio
|
||||
import asyncio.streams
|
||||
|
||||
|
||||
class MyServer:
|
||||
"""
|
||||
This is just an example of how a TCP server might be potentially
|
||||
structured. This class has basically 3 methods: start the server,
|
||||
handle a client, and stop the server.
|
||||
|
||||
Note that you don't have to follow this structure, it is really
|
||||
just an example or possible starting point.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.server = None # encapsulates the server sockets
|
||||
|
||||
# this keeps track of all the clients that connected to our
|
||||
# server. It can be useful in some cases, for instance to
|
||||
# kill client connections or to broadcast some data to all
|
||||
# clients...
|
||||
self.clients = {} # task -> (reader, writer)
|
||||
|
||||
def _accept_client(self, client_reader, client_writer):
|
||||
"""
|
||||
This method accepts a new client connection and creates a Task
|
||||
to handle this client. self.clients is updated to keep track
|
||||
of the new client.
|
||||
"""
|
||||
|
||||
# start a new Task to handle this specific client connection
|
||||
task = asyncio.Task(self._handle_client(client_reader, client_writer))
|
||||
self.clients[task] = (client_reader, client_writer)
|
||||
|
||||
def client_done(task):
|
||||
print("client task done:", task, file=sys.stderr)
|
||||
del self.clients[task]
|
||||
|
||||
task.add_done_callback(client_done)
|
||||
|
||||
@asyncio.coroutine
|
||||
def _handle_client(self, client_reader, client_writer):
|
||||
"""
|
||||
This method actually does the work to handle the requests for
|
||||
a specific client. The protocol is line oriented, so there is
|
||||
a main loop that reads a line with a request and then sends
|
||||
out one or more lines back to the client with the result.
|
||||
"""
|
||||
while True:
|
||||
data = (yield from client_reader.readline()).decode("utf-8")
|
||||
if not data: # an empty string means the client disconnected
|
||||
break
|
||||
cmd, *args = data.rstrip().split(' ')
|
||||
if cmd == 'add':
|
||||
arg1 = float(args[0])
|
||||
arg2 = float(args[1])
|
||||
retval = arg1 + arg2
|
||||
client_writer.write("{!r}\n".format(retval).encode("utf-8"))
|
||||
elif cmd == 'repeat':
|
||||
times = int(args[0])
|
||||
msg = args[1]
|
||||
client_writer.write("begin\n".encode("utf-8"))
|
||||
for idx in range(times):
|
||||
client_writer.write("{}. {}\n".format(idx+1, msg)
|
||||
.encode("utf-8"))
|
||||
client_writer.write("end\n".encode("utf-8"))
|
||||
else:
|
||||
print("Bad command {!r}".format(data), file=sys.stderr)
|
||||
|
||||
# This enables us to have flow control in our connection.
|
||||
yield from client_writer.drain()
|
||||
|
||||
def start(self, loop):
|
||||
"""
|
||||
Starts the TCP server, so that it listens on port 1234.
|
||||
|
||||
For each client that connects, the accept_client method gets
|
||||
called. This method runs the loop until the server sockets
|
||||
are ready to accept connections.
|
||||
"""
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.streams.start_server(self._accept_client,
|
||||
'127.0.0.1', 12345,
|
||||
loop=loop))
|
||||
|
||||
def stop(self, loop):
|
||||
"""
|
||||
Stops the TCP server, i.e. closes the listening socket(s).
|
||||
|
||||
This method runs the loop until the server sockets are closed.
|
||||
"""
|
||||
if self.server is not None:
|
||||
self.server.close()
|
||||
loop.run_until_complete(self.server.wait_closed())
|
||||
self.server = None
|
||||
|
||||
|
||||
def main():
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
# creates a server and starts listening to TCP connections
|
||||
server = MyServer()
|
||||
server.start(loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def client():
|
||||
reader, writer = yield from asyncio.streams.open_connection(
|
||||
'127.0.0.1', 12345, loop=loop)
|
||||
|
||||
def send(msg):
|
||||
print("> " + msg)
|
||||
writer.write((msg + '\n').encode("utf-8"))
|
||||
|
||||
def recv():
|
||||
msgback = (yield from reader.readline()).decode("utf-8").rstrip()
|
||||
print("< " + msgback)
|
||||
return msgback
|
||||
|
||||
# send a line
|
||||
send("add 1 2")
|
||||
msg = yield from recv()
|
||||
|
||||
send("repeat 5 hello")
|
||||
msg = yield from recv()
|
||||
assert msg == 'begin'
|
||||
while True:
|
||||
msg = yield from recv()
|
||||
if msg == 'end':
|
||||
break
|
||||
|
||||
writer.close()
|
||||
yield from asyncio.sleep(0.5)
|
||||
|
||||
# creates a client and connects to our server
|
||||
try:
|
||||
loop.run_until_complete(client())
|
||||
server.stop(loop)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
94
examples/sink.py
Normal file
94
examples/sink.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""Test service that accepts connections and reads all data off them."""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
from asyncio import *
|
||||
|
||||
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
|
||||
ARGS.add_argument(
|
||||
'--tls', action='store_true', dest='tls',
|
||||
default=False, help='Use TLS with a self-signed cert')
|
||||
ARGS.add_argument(
|
||||
'--iocp', action='store_true', dest='iocp',
|
||||
default=False, help='Use IOCP event loop (Windows only)')
|
||||
ARGS.add_argument(
|
||||
'--host', action='store', dest='host',
|
||||
default='127.0.0.1', help='Host name')
|
||||
ARGS.add_argument(
|
||||
'--port', action='store', dest='port',
|
||||
default=1111, type=int, help='Port number')
|
||||
ARGS.add_argument(
|
||||
'--maxsize', action='store', dest='maxsize',
|
||||
default=16*1024*1024, type=int, help='Max total data size')
|
||||
|
||||
server = None
|
||||
args = None
|
||||
|
||||
|
||||
def dprint(*args):
|
||||
print('sink:', *args, file=sys.stderr)
|
||||
|
||||
|
||||
class Service(Protocol):
|
||||
|
||||
def connection_made(self, tr):
|
||||
dprint('connection from', tr.get_extra_info('peername'))
|
||||
dprint('my socket is', tr.get_extra_info('sockname'))
|
||||
self.tr = tr
|
||||
self.total = 0
|
||||
|
||||
def data_received(self, data):
|
||||
if data == b'stop':
|
||||
dprint('stopping server')
|
||||
server.close()
|
||||
self.tr.close()
|
||||
return
|
||||
self.total += len(data)
|
||||
dprint('received', len(data), 'bytes; total', self.total)
|
||||
if self.total > args.maxsize:
|
||||
dprint('closing due to too much data')
|
||||
self.tr.close()
|
||||
|
||||
def connection_lost(self, how):
|
||||
dprint('closed', repr(how))
|
||||
|
||||
|
||||
@coroutine
|
||||
def start(loop, host, port):
|
||||
global server
|
||||
sslctx = None
|
||||
if args.tls:
|
||||
import ssl
|
||||
# TODO: take cert/key from args as well.
|
||||
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
|
||||
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
sslctx.options |= ssl.OP_NO_SSLv2
|
||||
sslctx.load_cert_chain(
|
||||
certfile=os.path.join(here, 'ssl_cert.pem'),
|
||||
keyfile=os.path.join(here, 'ssl_key.pem'))
|
||||
|
||||
server = yield from loop.create_server(Service, host, port, ssl=sslctx)
|
||||
dprint('serving TLS' if sslctx else 'serving',
|
||||
[s.getsockname() for s in server.sockets])
|
||||
yield from server.wait_closed()
|
||||
|
||||
|
||||
def main():
|
||||
global args
|
||||
args = ARGS.parse_args()
|
||||
if args.iocp:
|
||||
from asyncio.windows_events import ProactorEventLoop
|
||||
loop = ProactorEventLoop()
|
||||
set_event_loop(loop)
|
||||
else:
|
||||
loop = get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(start(loop, args.host, args.port))
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
100
examples/source.py
Normal file
100
examples/source.py
Normal file
@@ -0,0 +1,100 @@
|
||||
"""Test client that connects and sends infinite data."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from asyncio import *
|
||||
from asyncio import test_utils
|
||||
|
||||
|
||||
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
|
||||
ARGS.add_argument(
|
||||
'--tls', action='store_true', dest='tls',
|
||||
default=False, help='Use TLS')
|
||||
ARGS.add_argument(
|
||||
'--iocp', action='store_true', dest='iocp',
|
||||
default=False, help='Use IOCP event loop (Windows only)')
|
||||
ARGS.add_argument(
|
||||
'--stop', action='store_true', dest='stop',
|
||||
default=False, help='Stop the server by sending it b"stop" as data')
|
||||
ARGS.add_argument(
|
||||
'--host', action='store', dest='host',
|
||||
default='127.0.0.1', help='Host name')
|
||||
ARGS.add_argument(
|
||||
'--port', action='store', dest='port',
|
||||
default=1111, type=int, help='Port number')
|
||||
ARGS.add_argument(
|
||||
'--size', action='store', dest='size',
|
||||
default=16*1024, type=int, help='Data size')
|
||||
|
||||
args = None
|
||||
|
||||
|
||||
def dprint(*args):
|
||||
print('source:', *args, file=sys.stderr)
|
||||
|
||||
|
||||
class Client(Protocol):
|
||||
|
||||
total = 0
|
||||
|
||||
def connection_made(self, tr):
|
||||
dprint('connecting to', tr.get_extra_info('peername'))
|
||||
dprint('my socket is', tr.get_extra_info('sockname'))
|
||||
self.tr = tr
|
||||
self.lost = False
|
||||
self.loop = get_event_loop()
|
||||
self.waiter = Future()
|
||||
if args.stop:
|
||||
self.tr.write(b'stop')
|
||||
self.tr.close()
|
||||
else:
|
||||
self.data = b'x'*args.size
|
||||
self.write_some_data()
|
||||
|
||||
def write_some_data(self):
|
||||
if self.lost:
|
||||
dprint('lost already')
|
||||
return
|
||||
data = self.data
|
||||
size = len(data)
|
||||
self.total += size
|
||||
dprint('writing', size, 'bytes; total', self.total)
|
||||
self.tr.write(data)
|
||||
self.loop.call_soon(self.write_some_data)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
dprint('lost connection', repr(exc))
|
||||
self.lost = True
|
||||
self.waiter.set_result(None)
|
||||
|
||||
|
||||
@coroutine
|
||||
def start(loop, host, port):
|
||||
sslctx = None
|
||||
if args.tls:
|
||||
sslctx = test_utils.dummy_ssl_context()
|
||||
tr, pr = yield from loop.create_connection(Client, host, port,
|
||||
ssl=sslctx)
|
||||
dprint('tr =', tr)
|
||||
dprint('pr =', pr)
|
||||
yield from pr.waiter
|
||||
|
||||
|
||||
def main():
|
||||
global args
|
||||
args = ARGS.parse_args()
|
||||
if args.iocp:
|
||||
from asyncio.windows_events import ProactorEventLoop
|
||||
loop = ProactorEventLoop()
|
||||
set_event_loop(loop)
|
||||
else:
|
||||
loop = get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(start(loop, args.host, args.port))
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
98
examples/source1.py
Normal file
98
examples/source1.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""Like source.py, but uses streams."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from asyncio import *
|
||||
from asyncio import test_utils
|
||||
|
||||
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
|
||||
ARGS.add_argument(
|
||||
'--tls', action='store_true', dest='tls',
|
||||
default=False, help='Use TLS')
|
||||
ARGS.add_argument(
|
||||
'--iocp', action='store_true', dest='iocp',
|
||||
default=False, help='Use IOCP event loop (Windows only)')
|
||||
ARGS.add_argument(
|
||||
'--stop', action='store_true', dest='stop',
|
||||
default=False, help='Stop the server by sending it b"stop" as data')
|
||||
ARGS.add_argument(
|
||||
'--host', action='store', dest='host',
|
||||
default='127.0.0.1', help='Host name')
|
||||
ARGS.add_argument(
|
||||
'--port', action='store', dest='port',
|
||||
default=1111, type=int, help='Port number')
|
||||
ARGS.add_argument(
|
||||
'--size', action='store', dest='size',
|
||||
default=16*1024, type=int, help='Data size')
|
||||
|
||||
|
||||
class Debug:
|
||||
"""A clever little class that suppresses repetitive messages."""
|
||||
|
||||
overwriting = False
|
||||
label = 'stream1:'
|
||||
|
||||
def print(self, *args):
|
||||
if self.overwriting:
|
||||
print(file=sys.stderr)
|
||||
self.overwriting = 0
|
||||
print(self.label, *args, file=sys.stderr)
|
||||
|
||||
def oprint(self, *args):
|
||||
self.overwriting += 1
|
||||
end = '\n'
|
||||
if self.overwriting >= 3:
|
||||
if self.overwriting == 3:
|
||||
print(self.label, '[...]', file=sys.stderr)
|
||||
end = '\r'
|
||||
print(self.label, *args, file=sys.stderr, end=end, flush=True)
|
||||
|
||||
|
||||
@coroutine
|
||||
def start(loop, args):
|
||||
d = Debug()
|
||||
total = 0
|
||||
sslctx = None
|
||||
if args.tls:
|
||||
d.print('using dummy SSLContext')
|
||||
sslctx = test_utils.dummy_ssl_context()
|
||||
r, w = yield from open_connection(args.host, args.port, ssl=sslctx)
|
||||
d.print('r =', r)
|
||||
d.print('w =', w)
|
||||
if args.stop:
|
||||
w.write(b'stop')
|
||||
w.close()
|
||||
else:
|
||||
size = args.size
|
||||
data = b'x'*size
|
||||
try:
|
||||
while True:
|
||||
total += size
|
||||
d.oprint('writing', size, 'bytes; total', total)
|
||||
w.write(data)
|
||||
f = w.drain()
|
||||
if f:
|
||||
d.print('pausing')
|
||||
yield from f
|
||||
except (ConnectionResetError, BrokenPipeError) as exc:
|
||||
d.print('caught', repr(exc))
|
||||
|
||||
|
||||
def main():
|
||||
global args
|
||||
args = ARGS.parse_args()
|
||||
if args.iocp:
|
||||
from asyncio.windows_events import ProactorEventLoop
|
||||
loop = ProactorEventLoop()
|
||||
set_event_loop(loop)
|
||||
else:
|
||||
loop = get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(start(loop, args))
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
44
examples/stacks.py
Normal file
44
examples/stacks.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""Crude demo for print_stack()."""
|
||||
|
||||
|
||||
from asyncio import *
|
||||
|
||||
|
||||
@coroutine
|
||||
def helper(r):
|
||||
print('--- helper ---')
|
||||
for t in Task.all_tasks():
|
||||
t.print_stack()
|
||||
print('--- end helper ---')
|
||||
line = yield from r.readline()
|
||||
1/0
|
||||
return line
|
||||
|
||||
def doit():
|
||||
l = get_event_loop()
|
||||
lr = l.run_until_complete
|
||||
r, w = lr(open_connection('python.org', 80))
|
||||
t1 = async(helper(r))
|
||||
for t in Task.all_tasks(): t.print_stack()
|
||||
print('---')
|
||||
l._run_once()
|
||||
for t in Task.all_tasks(): t.print_stack()
|
||||
print('---')
|
||||
w.write(b'GET /\r\n')
|
||||
w.write_eof()
|
||||
try:
|
||||
lr(t1)
|
||||
except Exception as e:
|
||||
print('catching', e)
|
||||
finally:
|
||||
for t in Task.all_tasks():
|
||||
t.print_stack()
|
||||
l.close()
|
||||
|
||||
|
||||
def main():
|
||||
doit()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
33
examples/subprocess_attach_read_pipe.py
Normal file
33
examples/subprocess_attach_read_pipe.py
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Example showing how to attach a read pipe to a subprocess."""
|
||||
import asyncio
|
||||
import os, sys
|
||||
|
||||
code = """
|
||||
import os, sys
|
||||
fd = int(sys.argv[1])
|
||||
os.write(fd, b'data')
|
||||
os.close(fd)
|
||||
"""
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
@asyncio.coroutine
|
||||
def task():
|
||||
rfd, wfd = os.pipe()
|
||||
args = [sys.executable, '-c', code, str(wfd)]
|
||||
|
||||
pipe = open(rfd, 'rb', 0)
|
||||
reader = asyncio.StreamReader(loop=loop)
|
||||
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = yield from loop.connect_read_pipe(lambda: protocol, pipe)
|
||||
|
||||
proc = yield from asyncio.create_subprocess_exec(*args, pass_fds={wfd})
|
||||
yield from proc.wait()
|
||||
|
||||
os.close(wfd)
|
||||
data = yield from reader.read()
|
||||
print("read = %r" % data.decode())
|
||||
|
||||
loop.run_until_complete(task())
|
||||
loop.close()
|
||||
35
examples/subprocess_attach_write_pipe.py
Normal file
35
examples/subprocess_attach_write_pipe.py
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Example showing how to attach a write pipe to a subprocess."""
|
||||
import asyncio
|
||||
import os, sys
|
||||
from asyncio import subprocess
|
||||
|
||||
code = """
|
||||
import os, sys
|
||||
fd = int(sys.argv[1])
|
||||
data = os.read(fd, 1024)
|
||||
sys.stdout.buffer.write(data)
|
||||
"""
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
@asyncio.coroutine
|
||||
def task():
|
||||
rfd, wfd = os.pipe()
|
||||
args = [sys.executable, '-c', code, str(rfd)]
|
||||
proc = yield from asyncio.create_subprocess_exec(
|
||||
*args,
|
||||
pass_fds={rfd},
|
||||
stdout=subprocess.PIPE)
|
||||
|
||||
pipe = open(wfd, 'wb', 0)
|
||||
transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol,
|
||||
pipe)
|
||||
transport.write(b'data')
|
||||
|
||||
stdout, stderr = yield from proc.communicate()
|
||||
print("stdout = %r" % stdout.decode())
|
||||
pipe.close()
|
||||
|
||||
loop.run_until_complete(task())
|
||||
loop.close()
|
||||
87
examples/subprocess_shell.py
Normal file
87
examples/subprocess_shell.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""Example writing to and reading from a subprocess at the same time using
|
||||
tasks."""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from asyncio.subprocess import PIPE
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def send_input(writer, input):
|
||||
try:
|
||||
for line in input:
|
||||
print('sending', len(line), 'bytes')
|
||||
writer.write(line)
|
||||
d = writer.drain()
|
||||
if d:
|
||||
print('pause writing')
|
||||
yield from d
|
||||
print('resume writing')
|
||||
writer.close()
|
||||
except BrokenPipeError:
|
||||
print('stdin: broken pipe error')
|
||||
except ConnectionResetError:
|
||||
print('stdin: connection reset error')
|
||||
|
||||
@asyncio.coroutine
|
||||
def log_errors(reader):
|
||||
while True:
|
||||
line = yield from reader.readline()
|
||||
if not line:
|
||||
break
|
||||
print('ERROR', repr(line))
|
||||
|
||||
@asyncio.coroutine
|
||||
def read_stdout(stdout):
|
||||
while True:
|
||||
line = yield from stdout.readline()
|
||||
print('received', repr(line))
|
||||
if not line:
|
||||
break
|
||||
|
||||
@asyncio.coroutine
|
||||
def start(cmd, input=None, **kwds):
|
||||
kwds['stdout'] = PIPE
|
||||
kwds['stderr'] = PIPE
|
||||
if input is None and 'stdin' not in kwds:
|
||||
kwds['stdin'] = None
|
||||
else:
|
||||
kwds['stdin'] = PIPE
|
||||
proc = yield from asyncio.create_subprocess_shell(cmd, **kwds)
|
||||
|
||||
tasks = []
|
||||
if input is not None:
|
||||
tasks.append(send_input(proc.stdin, input))
|
||||
else:
|
||||
print('No stdin')
|
||||
if proc.stderr is not None:
|
||||
tasks.append(log_errors(proc.stderr))
|
||||
else:
|
||||
print('No stderr')
|
||||
if proc.stdout is not None:
|
||||
tasks.append(read_stdout(proc.stdout))
|
||||
else:
|
||||
print('No stdout')
|
||||
|
||||
if tasks:
|
||||
# feed stdin while consuming stdout to avoid hang
|
||||
# when stdin pipe is full
|
||||
yield from asyncio.wait(tasks)
|
||||
|
||||
exitcode = yield from proc.wait()
|
||||
print("exit code: %s" % exitcode)
|
||||
|
||||
|
||||
def main():
|
||||
if os.name == 'nt':
|
||||
loop = asyncio.ProactorEventLoop()
|
||||
asyncio.set_event_loop(loop)
|
||||
else:
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(start(
|
||||
'sleep 2; wc', input=[b'foo bar baz\n'*300 for i in range(100)]))
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
128
examples/tcp_echo.py
Executable file
128
examples/tcp_echo.py
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python3
|
||||
"""TCP echo server example."""
|
||||
import argparse
|
||||
import asyncio
|
||||
import sys
|
||||
try:
|
||||
import signal
|
||||
except ImportError:
|
||||
signal = None
|
||||
|
||||
|
||||
class EchoServer(asyncio.Protocol):
|
||||
|
||||
TIMEOUT = 5.0
|
||||
|
||||
def timeout(self):
|
||||
print('connection timeout, closing.')
|
||||
self.transport.close()
|
||||
|
||||
def connection_made(self, transport):
|
||||
print('connection made')
|
||||
self.transport = transport
|
||||
|
||||
# start 5 seconds timeout timer
|
||||
self.h_timeout = asyncio.get_event_loop().call_later(
|
||||
self.TIMEOUT, self.timeout)
|
||||
|
||||
def data_received(self, data):
|
||||
print('data received: ', data.decode())
|
||||
self.transport.write(b'Re: ' + data)
|
||||
|
||||
# restart timeout timer
|
||||
self.h_timeout.cancel()
|
||||
self.h_timeout = asyncio.get_event_loop().call_later(
|
||||
self.TIMEOUT, self.timeout)
|
||||
|
||||
def eof_received(self):
|
||||
pass
|
||||
|
||||
def connection_lost(self, exc):
|
||||
print('connection lost:', exc)
|
||||
self.h_timeout.cancel()
|
||||
|
||||
|
||||
class EchoClient(asyncio.Protocol):
|
||||
|
||||
message = 'This is the message. It will be echoed.'
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
self.transport.write(self.message.encode())
|
||||
print('data sent:', self.message)
|
||||
|
||||
def data_received(self, data):
|
||||
print('data received:', data)
|
||||
|
||||
# disconnect after 10 seconds
|
||||
asyncio.get_event_loop().call_later(10.0, self.transport.close)
|
||||
|
||||
def eof_received(self):
|
||||
pass
|
||||
|
||||
def connection_lost(self, exc):
|
||||
print('connection lost:', exc)
|
||||
asyncio.get_event_loop().stop()
|
||||
|
||||
|
||||
def start_client(loop, host, port):
|
||||
t = asyncio.Task(loop.create_connection(EchoClient, host, port))
|
||||
loop.run_until_complete(t)
|
||||
|
||||
|
||||
def start_server(loop, host, port):
|
||||
f = loop.create_server(EchoServer, host, port)
|
||||
return loop.run_until_complete(f)
|
||||
|
||||
|
||||
ARGS = argparse.ArgumentParser(description="TCP Echo example.")
|
||||
ARGS.add_argument(
|
||||
'--server', action="store_true", dest='server',
|
||||
default=False, help='Run tcp server')
|
||||
ARGS.add_argument(
|
||||
'--client', action="store_true", dest='client',
|
||||
default=False, help='Run tcp client')
|
||||
ARGS.add_argument(
|
||||
'--host', action="store", dest='host',
|
||||
default='127.0.0.1', help='Host name')
|
||||
ARGS.add_argument(
|
||||
'--port', action="store", dest='port',
|
||||
default=9999, type=int, help='Port number')
|
||||
ARGS.add_argument(
|
||||
'--iocp', action="store_true", dest='iocp',
|
||||
default=False, help='Use IOCP event loop')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = ARGS.parse_args()
|
||||
|
||||
if ':' in args.host:
|
||||
args.host, port = args.host.split(':', 1)
|
||||
args.port = int(port)
|
||||
|
||||
if (not (args.server or args.client)) or (args.server and args.client):
|
||||
print('Please specify --server or --client\n')
|
||||
ARGS.print_help()
|
||||
else:
|
||||
if args.iocp:
|
||||
from asyncio import windows_events
|
||||
loop = windows_events.ProactorEventLoop()
|
||||
asyncio.set_event_loop(loop)
|
||||
else:
|
||||
loop = asyncio.get_event_loop()
|
||||
print ('Using backend: {0}'.format(loop.__class__.__name__))
|
||||
|
||||
if signal is not None and sys.platform != 'win32':
|
||||
loop.add_signal_handler(signal.SIGINT, loop.stop)
|
||||
|
||||
if args.server:
|
||||
server = start_server(loop, args.host, args.port)
|
||||
else:
|
||||
start_client(loop, args.host, args.port)
|
||||
|
||||
try:
|
||||
loop.run_forever()
|
||||
finally:
|
||||
if args.server:
|
||||
server.close()
|
||||
loop.close()
|
||||
168
examples/timing_tcp_server.py
Normal file
168
examples/timing_tcp_server.py
Normal file
@@ -0,0 +1,168 @@
|
||||
"""
|
||||
A variant of simple_tcp_server.py that measures the time it takes to
|
||||
send N messages for a range of N. (This was O(N**2) in a previous
|
||||
version of Tulip.)
|
||||
|
||||
Note that running this example starts both the TCP server and client
|
||||
in the same process. It listens on port 1234 on 127.0.0.1, so it will
|
||||
fail if this port is currently in use.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
|
||||
import asyncio
|
||||
import asyncio.streams
|
||||
|
||||
|
||||
class MyServer:
|
||||
"""
|
||||
This is just an example of how a TCP server might be potentially
|
||||
structured. This class has basically 3 methods: start the server,
|
||||
handle a client, and stop the server.
|
||||
|
||||
Note that you don't have to follow this structure, it is really
|
||||
just an example or possible starting point.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.server = None # encapsulates the server sockets
|
||||
|
||||
# this keeps track of all the clients that connected to our
|
||||
# server. It can be useful in some cases, for instance to
|
||||
# kill client connections or to broadcast some data to all
|
||||
# clients...
|
||||
self.clients = {} # task -> (reader, writer)
|
||||
|
||||
def _accept_client(self, client_reader, client_writer):
|
||||
"""
|
||||
This method accepts a new client connection and creates a Task
|
||||
to handle this client. self.clients is updated to keep track
|
||||
of the new client.
|
||||
"""
|
||||
|
||||
# start a new Task to handle this specific client connection
|
||||
task = asyncio.Task(self._handle_client(client_reader, client_writer))
|
||||
self.clients[task] = (client_reader, client_writer)
|
||||
|
||||
def client_done(task):
|
||||
print("client task done:", task, file=sys.stderr)
|
||||
del self.clients[task]
|
||||
|
||||
task.add_done_callback(client_done)
|
||||
|
||||
@asyncio.coroutine
|
||||
def _handle_client(self, client_reader, client_writer):
|
||||
"""
|
||||
This method actually does the work to handle the requests for
|
||||
a specific client. The protocol is line oriented, so there is
|
||||
a main loop that reads a line with a request and then sends
|
||||
out one or more lines back to the client with the result.
|
||||
"""
|
||||
while True:
|
||||
data = (yield from client_reader.readline()).decode("utf-8")
|
||||
if not data: # an empty string means the client disconnected
|
||||
break
|
||||
cmd, *args = data.rstrip().split(' ')
|
||||
if cmd == 'add':
|
||||
arg1 = float(args[0])
|
||||
arg2 = float(args[1])
|
||||
retval = arg1 + arg2
|
||||
client_writer.write("{!r}\n".format(retval).encode("utf-8"))
|
||||
elif cmd == 'repeat':
|
||||
times = int(args[0])
|
||||
msg = args[1]
|
||||
client_writer.write("begin\n".encode("utf-8"))
|
||||
for idx in range(times):
|
||||
client_writer.write("{}. {}\n".format(
|
||||
idx+1, msg + 'x'*random.randint(10, 50))
|
||||
.encode("utf-8"))
|
||||
client_writer.write("end\n".encode("utf-8"))
|
||||
else:
|
||||
print("Bad command {!r}".format(data), file=sys.stderr)
|
||||
|
||||
# This enables us to have flow control in our connection.
|
||||
yield from client_writer.drain()
|
||||
|
||||
def start(self, loop):
|
||||
"""
|
||||
Starts the TCP server, so that it listens on port 1234.
|
||||
|
||||
For each client that connects, the accept_client method gets
|
||||
called. This method runs the loop until the server sockets
|
||||
are ready to accept connections.
|
||||
"""
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.streams.start_server(self._accept_client,
|
||||
'127.0.0.1', 12345,
|
||||
loop=loop))
|
||||
|
||||
def stop(self, loop):
|
||||
"""
|
||||
Stops the TCP server, i.e. closes the listening socket(s).
|
||||
|
||||
This method runs the loop until the server sockets are closed.
|
||||
"""
|
||||
if self.server is not None:
|
||||
self.server.close()
|
||||
loop.run_until_complete(self.server.wait_closed())
|
||||
self.server = None
|
||||
|
||||
|
||||
def main():
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
# creates a server and starts listening to TCP connections
|
||||
server = MyServer()
|
||||
server.start(loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def client():
|
||||
reader, writer = yield from asyncio.streams.open_connection(
|
||||
'127.0.0.1', 12345, loop=loop)
|
||||
|
||||
def send(msg):
|
||||
print("> " + msg)
|
||||
writer.write((msg + '\n').encode("utf-8"))
|
||||
|
||||
def recv():
|
||||
msgback = (yield from reader.readline()).decode("utf-8").rstrip()
|
||||
print("< " + msgback)
|
||||
return msgback
|
||||
|
||||
# send a line
|
||||
send("add 1 2")
|
||||
msg = yield from recv()
|
||||
|
||||
Ns = list(range(100, 100000, 10000))
|
||||
times = []
|
||||
|
||||
for N in Ns:
|
||||
t0 = time.time()
|
||||
send("repeat {} hello world ".format(N))
|
||||
msg = yield from recv()
|
||||
assert msg == 'begin'
|
||||
while True:
|
||||
msg = (yield from reader.readline()).decode("utf-8").rstrip()
|
||||
if msg == 'end':
|
||||
break
|
||||
t1 = time.time()
|
||||
dt = t1 - t0
|
||||
print("Time taken: {:.3f} seconds ({:.6f} per repetition)"
|
||||
.format(dt, dt/N))
|
||||
times.append(dt)
|
||||
|
||||
writer.close()
|
||||
yield from asyncio.sleep(0.5)
|
||||
|
||||
# creates a client and connects to our server
|
||||
try:
|
||||
loop.run_until_complete(client())
|
||||
server.stop(loop)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
104
examples/udp_echo.py
Executable file
104
examples/udp_echo.py
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python3
|
||||
"""UDP echo example."""
|
||||
import argparse
|
||||
import sys
|
||||
import asyncio
|
||||
try:
|
||||
import signal
|
||||
except ImportError:
|
||||
signal = None
|
||||
|
||||
|
||||
class MyServerUdpEchoProtocol:
|
||||
|
||||
def connection_made(self, transport):
|
||||
print('start', transport)
|
||||
self.transport = transport
|
||||
|
||||
def datagram_received(self, data, addr):
|
||||
print('Data received:', data, addr)
|
||||
self.transport.sendto(data, addr)
|
||||
|
||||
def error_received(self, exc):
|
||||
print('Error received:', exc)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
print('stop', exc)
|
||||
|
||||
|
||||
class MyClientUdpEchoProtocol:
|
||||
|
||||
message = 'This is the message. It will be echoed.'
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
print('sending "{}"'.format(self.message))
|
||||
self.transport.sendto(self.message.encode())
|
||||
print('waiting to receive')
|
||||
|
||||
def datagram_received(self, data, addr):
|
||||
print('received "{}"'.format(data.decode()))
|
||||
self.transport.close()
|
||||
|
||||
def error_received(self, exc):
|
||||
print('Error received:', exc)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
print('closing transport', exc)
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.stop()
|
||||
|
||||
|
||||
def start_server(loop, addr):
|
||||
t = asyncio.Task(loop.create_datagram_endpoint(
|
||||
MyServerUdpEchoProtocol, local_addr=addr))
|
||||
transport, server = loop.run_until_complete(t)
|
||||
return transport
|
||||
|
||||
|
||||
def start_client(loop, addr):
|
||||
t = asyncio.Task(loop.create_datagram_endpoint(
|
||||
MyClientUdpEchoProtocol, remote_addr=addr))
|
||||
loop.run_until_complete(t)
|
||||
|
||||
|
||||
ARGS = argparse.ArgumentParser(description="UDP Echo example.")
|
||||
ARGS.add_argument(
|
||||
'--server', action="store_true", dest='server',
|
||||
default=False, help='Run udp server')
|
||||
ARGS.add_argument(
|
||||
'--client', action="store_true", dest='client',
|
||||
default=False, help='Run udp client')
|
||||
ARGS.add_argument(
|
||||
'--host', action="store", dest='host',
|
||||
default='127.0.0.1', help='Host name')
|
||||
ARGS.add_argument(
|
||||
'--port', action="store", dest='port',
|
||||
default=9999, type=int, help='Port number')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = ARGS.parse_args()
|
||||
if ':' in args.host:
|
||||
args.host, port = args.host.split(':', 1)
|
||||
args.port = int(port)
|
||||
|
||||
if (not (args.server or args.client)) or (args.server and args.client):
|
||||
print('Please specify --server or --client\n')
|
||||
ARGS.print_help()
|
||||
else:
|
||||
loop = asyncio.get_event_loop()
|
||||
if signal is not None:
|
||||
loop.add_signal_handler(signal.SIGINT, loop.stop)
|
||||
|
||||
if '--server' in sys.argv:
|
||||
server = start_server(loop, (args.host, args.port))
|
||||
else:
|
||||
start_client(loop, (args.host, args.port))
|
||||
|
||||
try:
|
||||
loop.run_forever()
|
||||
finally:
|
||||
if '--server' in sys.argv:
|
||||
server.close()
|
||||
loop.close()
|
||||
1380
overlapped.c
Normal file
1380
overlapped.c
Normal file
File diff suppressed because it is too large
Load Diff
14
run_aiotest.py
Normal file
14
run_aiotest.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import aiotest.run
|
||||
import asyncio
|
||||
import sys
|
||||
if sys.platform == 'win32':
|
||||
from asyncio.windows_utils import socketpair
|
||||
else:
|
||||
from socket import socketpair
|
||||
|
||||
config = aiotest.TestConfig()
|
||||
config.asyncio = asyncio
|
||||
config.socketpair = socketpair
|
||||
config.new_event_pool_policy = asyncio.DefaultEventLoopPolicy
|
||||
config.call_soon_check_closed = True
|
||||
aiotest.run.main(config)
|
||||
302
runtests.py
Normal file
302
runtests.py
Normal file
@@ -0,0 +1,302 @@
|
||||
"""Run Tulip unittests.
|
||||
|
||||
Usage:
|
||||
python3 runtests.py [flags] [pattern] ...
|
||||
|
||||
Patterns are matched against the fully qualified name of the test,
|
||||
including package, module, class and method,
|
||||
e.g. 'tests.test_events.PolicyTests.testPolicy'.
|
||||
|
||||
For full help, try --help.
|
||||
|
||||
runtests.py --coverage is equivalent of:
|
||||
|
||||
$(COVERAGE) run --branch runtests.py -v
|
||||
$(COVERAGE) html $(list of files)
|
||||
$(COVERAGE) report -m $(list of files)
|
||||
|
||||
"""
|
||||
|
||||
# Originally written by Beech Horn (for NDB).
|
||||
|
||||
import argparse
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
import unittest
|
||||
import textwrap
|
||||
import importlib.machinery
|
||||
try:
|
||||
import coverage
|
||||
except ImportError:
|
||||
coverage = None
|
||||
|
||||
from unittest.signals import installHandler
|
||||
|
||||
assert sys.version >= '3.3', 'Please use Python 3.3 or higher.'
|
||||
|
||||
ARGS = argparse.ArgumentParser(description="Run all unittests.")
|
||||
ARGS.add_argument(
|
||||
'-v', action="store", dest='verbose',
|
||||
nargs='?', const=1, type=int, default=0, help='verbose')
|
||||
ARGS.add_argument(
|
||||
'-x', action="store_true", dest='exclude', help='exclude tests')
|
||||
ARGS.add_argument(
|
||||
'-f', '--failfast', action="store_true", default=False,
|
||||
dest='failfast', help='Stop on first fail or error')
|
||||
ARGS.add_argument(
|
||||
'-c', '--catch', action="store_true", default=False,
|
||||
dest='catchbreak', help='Catch control-C and display results')
|
||||
ARGS.add_argument(
|
||||
'--forever', action="store_true", dest='forever', default=False,
|
||||
help='run tests forever to catch sporadic errors')
|
||||
ARGS.add_argument(
|
||||
'--findleaks', action='store_true', dest='findleaks',
|
||||
help='detect tests that leak memory')
|
||||
ARGS.add_argument('-r', '--randomize', action='store_true',
|
||||
help='randomize test execution order.')
|
||||
ARGS.add_argument('--seed', type=int,
|
||||
help='random seed to reproduce a previous random run')
|
||||
ARGS.add_argument(
|
||||
'-q', action="store_true", dest='quiet', help='quiet')
|
||||
ARGS.add_argument(
|
||||
'--tests', action="store", dest='testsdir', default='tests',
|
||||
help='tests directory')
|
||||
ARGS.add_argument(
|
||||
'--coverage', action="store_true", dest='coverage',
|
||||
help='enable html coverage report')
|
||||
ARGS.add_argument(
|
||||
'pattern', action="store", nargs="*",
|
||||
help='optional regex patterns to match test ids (default all tests)')
|
||||
|
||||
COV_ARGS = argparse.ArgumentParser(description="Run all unittests.")
|
||||
COV_ARGS.add_argument(
|
||||
'--coverage', action="store", dest='coverage', nargs='?', const='',
|
||||
help='enable coverage report and provide python files directory')
|
||||
|
||||
|
||||
def load_modules(basedir, suffix='.py'):
|
||||
def list_dir(prefix, dir):
|
||||
files = []
|
||||
|
||||
modpath = os.path.join(dir, '__init__.py')
|
||||
if os.path.isfile(modpath):
|
||||
mod = os.path.split(dir)[-1]
|
||||
files.append(('{}{}'.format(prefix, mod), modpath))
|
||||
|
||||
prefix = '{}{}.'.format(prefix, mod)
|
||||
|
||||
for name in os.listdir(dir):
|
||||
path = os.path.join(dir, name)
|
||||
|
||||
if os.path.isdir(path):
|
||||
files.extend(list_dir('{}{}.'.format(prefix, name), path))
|
||||
else:
|
||||
if (name != '__init__.py' and
|
||||
name.endswith(suffix) and
|
||||
not name.startswith(('.', '_'))):
|
||||
files.append(('{}{}'.format(prefix, name[:-3]), path))
|
||||
|
||||
return files
|
||||
|
||||
mods = []
|
||||
for modname, sourcefile in list_dir('', basedir):
|
||||
if modname == 'runtests':
|
||||
continue
|
||||
try:
|
||||
loader = importlib.machinery.SourceFileLoader(modname, sourcefile)
|
||||
mods.append((loader.load_module(), sourcefile))
|
||||
except SyntaxError:
|
||||
raise
|
||||
except unittest.SkipTest as err:
|
||||
print("Skipping '{}': {}".format(modname, err), file=sys.stderr)
|
||||
|
||||
return mods
|
||||
|
||||
|
||||
def randomize_tests(tests, seed):
|
||||
if seed is None:
|
||||
seed = random.randrange(10000000)
|
||||
random.seed(seed)
|
||||
print("Using random seed", seed)
|
||||
random.shuffle(tests._tests)
|
||||
|
||||
|
||||
class TestsFinder:
|
||||
|
||||
def __init__(self, testsdir, includes=(), excludes=()):
|
||||
self._testsdir = testsdir
|
||||
self._includes = includes
|
||||
self._excludes = excludes
|
||||
self.find_available_tests()
|
||||
|
||||
def find_available_tests(self):
|
||||
"""
|
||||
Find available test classes without instantiating them.
|
||||
"""
|
||||
self._test_factories = []
|
||||
mods = [mod for mod, _ in load_modules(self._testsdir)]
|
||||
for mod in mods:
|
||||
for name in set(dir(mod)):
|
||||
if name.endswith('Tests'):
|
||||
self._test_factories.append(getattr(mod, name))
|
||||
|
||||
def load_tests(self):
|
||||
"""
|
||||
Load test cases from the available test classes and apply
|
||||
optional include / exclude filters.
|
||||
"""
|
||||
loader = unittest.TestLoader()
|
||||
suite = unittest.TestSuite()
|
||||
for test_factory in self._test_factories:
|
||||
tests = loader.loadTestsFromTestCase(test_factory)
|
||||
if self._includes:
|
||||
tests = [test
|
||||
for test in tests
|
||||
if any(re.search(pat, test.id())
|
||||
for pat in self._includes)]
|
||||
if self._excludes:
|
||||
tests = [test
|
||||
for test in tests
|
||||
if not any(re.search(pat, test.id())
|
||||
for pat in self._excludes)]
|
||||
suite.addTests(tests)
|
||||
return suite
|
||||
|
||||
|
||||
class TestResult(unittest.TextTestResult):
|
||||
|
||||
def __init__(self, stream, descriptions, verbosity):
|
||||
super().__init__(stream, descriptions, verbosity)
|
||||
self.leaks = []
|
||||
|
||||
def startTest(self, test):
|
||||
super().startTest(test)
|
||||
gc.collect()
|
||||
|
||||
def addSuccess(self, test):
|
||||
super().addSuccess(test)
|
||||
gc.collect()
|
||||
if gc.garbage:
|
||||
if self.showAll:
|
||||
self.stream.writeln(
|
||||
" Warning: test created {} uncollectable "
|
||||
"object(s).".format(len(gc.garbage)))
|
||||
# move the uncollectable objects somewhere so we don't see
|
||||
# them again
|
||||
self.leaks.append((self.getDescription(test), gc.garbage[:]))
|
||||
del gc.garbage[:]
|
||||
|
||||
|
||||
class TestRunner(unittest.TextTestRunner):
|
||||
resultclass = TestResult
|
||||
|
||||
def run(self, test):
|
||||
result = super().run(test)
|
||||
if result.leaks:
|
||||
self.stream.writeln("{} tests leaks:".format(len(result.leaks)))
|
||||
for name, leaks in result.leaks:
|
||||
self.stream.writeln(' '*4 + name + ':')
|
||||
for leak in leaks:
|
||||
self.stream.writeln(' '*8 + repr(leak))
|
||||
return result
|
||||
|
||||
|
||||
def runtests():
|
||||
args = ARGS.parse_args()
|
||||
|
||||
if args.coverage and coverage is None:
|
||||
URL = "bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py"
|
||||
print(textwrap.dedent("""
|
||||
coverage package is not installed.
|
||||
|
||||
To install coverage3 for Python 3, you need:
|
||||
- Setuptools (https://pypi.python.org/pypi/setuptools)
|
||||
|
||||
What worked for me:
|
||||
- download {0}
|
||||
* curl -O https://{0}
|
||||
- python3 ez_setup.py
|
||||
- python3 -m easy_install coverage
|
||||
""".format(URL)).strip())
|
||||
sys.exit(1)
|
||||
|
||||
testsdir = os.path.abspath(args.testsdir)
|
||||
if not os.path.isdir(testsdir):
|
||||
print("Tests directory is not found: {}\n".format(testsdir))
|
||||
ARGS.print_help()
|
||||
return
|
||||
|
||||
excludes = includes = []
|
||||
if args.exclude:
|
||||
excludes = args.pattern
|
||||
else:
|
||||
includes = args.pattern
|
||||
|
||||
v = 0 if args.quiet else args.verbose + 1
|
||||
failfast = args.failfast
|
||||
catchbreak = args.catchbreak
|
||||
findleaks = args.findleaks
|
||||
runner_factory = TestRunner if findleaks else unittest.TextTestRunner
|
||||
|
||||
if args.coverage:
|
||||
cov = coverage.coverage(branch=True,
|
||||
source=['asyncio'],
|
||||
)
|
||||
cov.start()
|
||||
|
||||
logger = logging.getLogger()
|
||||
if v == 0:
|
||||
level = logging.CRITICAL
|
||||
elif v == 1:
|
||||
level = logging.ERROR
|
||||
elif v == 2:
|
||||
level = logging.WARNING
|
||||
elif v == 3:
|
||||
level = logging.INFO
|
||||
elif v >= 4:
|
||||
level = logging.DEBUG
|
||||
logging.basicConfig(level=level)
|
||||
|
||||
finder = TestsFinder(args.testsdir, includes, excludes)
|
||||
if catchbreak:
|
||||
installHandler()
|
||||
import asyncio.coroutines
|
||||
if asyncio.coroutines._DEBUG:
|
||||
print("Run tests in debug mode")
|
||||
else:
|
||||
print("Run tests in release mode")
|
||||
try:
|
||||
if args.forever:
|
||||
while True:
|
||||
tests = finder.load_tests()
|
||||
if args.randomize:
|
||||
randomize_tests(tests, args.seed)
|
||||
result = runner_factory(verbosity=v,
|
||||
failfast=failfast).run(tests)
|
||||
if not result.wasSuccessful():
|
||||
sys.exit(1)
|
||||
else:
|
||||
tests = finder.load_tests()
|
||||
if args.randomize:
|
||||
randomize_tests(tests, args.seed)
|
||||
result = runner_factory(verbosity=v,
|
||||
failfast=failfast).run(tests)
|
||||
sys.exit(not result.wasSuccessful())
|
||||
finally:
|
||||
if args.coverage:
|
||||
cov.stop()
|
||||
cov.save()
|
||||
cov.html_report(directory='htmlcov')
|
||||
print("\nCoverage report:")
|
||||
cov.report(show_missing=False)
|
||||
here = os.path.dirname(os.path.abspath(__file__))
|
||||
print("\nFor html report:")
|
||||
print("open file://{}/htmlcov/index.html".format(here))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
runtests()
|
||||
34
setup.py
Normal file
34
setup.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import os
|
||||
try:
|
||||
from setuptools import setup, Extension
|
||||
except ImportError:
|
||||
# Use distutils.core as a fallback.
|
||||
# We won't be able to build the Wheel file on Windows.
|
||||
from distutils.core import setup, Extension
|
||||
|
||||
extensions = []
|
||||
if os.name == 'nt':
|
||||
ext = Extension(
|
||||
'asyncio._overlapped', ['overlapped.c'], libraries=['ws2_32'],
|
||||
)
|
||||
extensions.append(ext)
|
||||
|
||||
setup(
|
||||
name="asyncio",
|
||||
version="3.4.1",
|
||||
|
||||
description="reference implementation of PEP 3156",
|
||||
long_description=open("README").read(),
|
||||
url="http://www.python.org/dev/peps/pep-3156/",
|
||||
|
||||
classifiers=[
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.3",
|
||||
],
|
||||
|
||||
packages=["asyncio"],
|
||||
test_suite="runtests.runtests",
|
||||
|
||||
ext_modules=extensions,
|
||||
)
|
||||
8
tests/echo.py
Normal file
8
tests/echo.py
Normal file
@@ -0,0 +1,8 @@
|
||||
import os
|
||||
|
||||
if __name__ == '__main__':
|
||||
while True:
|
||||
buf = os.read(0, 1024)
|
||||
if not buf:
|
||||
break
|
||||
os.write(1, buf)
|
||||
6
tests/echo2.py
Normal file
6
tests/echo2.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import os
|
||||
|
||||
if __name__ == '__main__':
|
||||
buf = os.read(0, 1024)
|
||||
os.write(1, b'OUT:'+buf)
|
||||
os.write(2, b'ERR:'+buf)
|
||||
11
tests/echo3.py
Normal file
11
tests/echo3.py
Normal file
@@ -0,0 +1,11 @@
|
||||
import os
|
||||
|
||||
if __name__ == '__main__':
|
||||
while True:
|
||||
buf = os.read(0, 1024)
|
||||
if not buf:
|
||||
break
|
||||
try:
|
||||
os.write(1, b'OUT:'+buf)
|
||||
except OSError as ex:
|
||||
os.write(2, b'ERR:' + ex.__class__.__name__.encode('ascii'))
|
||||
73
tests/keycert3.pem
Normal file
73
tests/keycert3.pem
Normal file
@@ -0,0 +1,73 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAMLgD0kAKDb5cFyP
|
||||
jbwNfR5CtewdXC+kMXAWD8DLxiTTvhMW7qVnlwOm36mZlszHKvsRf05lT4pegiFM
|
||||
9z2j1OlaN+ci/X7NU22TNN6crYSiN77FjYJP464j876ndSxyD+rzys386T+1r1aZ
|
||||
aggEdkj1TsSsv1zWIYKlPIjlvhuxAgMBAAECgYA0aH+T2Vf3WOPv8KdkcJg6gCRe
|
||||
yJKXOWgWRcicx/CUzOEsTxmFIDPLxqAWA3k7v0B+3vjGw5Y9lycV/5XqXNoQI14j
|
||||
y09iNsumds13u5AKkGdTJnZhQ7UKdoVHfuP44ZdOv/rJ5/VD6F4zWywpe90pcbK+
|
||||
AWDVtusgGQBSieEl1QJBAOyVrUG5l2yoUBtd2zr/kiGm/DYyXlIthQO/A3/LngDW
|
||||
5/ydGxVsT7lAVOgCsoT+0L4efTh90PjzW8LPQrPBWVMCQQDS3h/FtYYd5lfz+FNL
|
||||
9CEe1F1w9l8P749uNUD0g317zv1tatIqVCsQWHfVHNdVvfQ+vSFw38OORO00Xqs9
|
||||
1GJrAkBkoXXEkxCZoy4PteheO/8IWWLGGr6L7di6MzFl1lIqwT6D8L9oaV2vynFT
|
||||
DnKop0pa09Unhjyw57KMNmSE2SUJAkEArloTEzpgRmCq4IK2/NpCeGdHS5uqRlbh
|
||||
1VIa/xGps7EWQl5Mn8swQDel/YP3WGHTjfx7pgSegQfkyaRtGpZ9OQJAa9Vumj8m
|
||||
JAAtI0Bnga8hgQx7BhTQY4CadDxyiRGOGYhwUzYVCqkb2sbVRH9HnwUaJT7cWBY3
|
||||
RnJdHOMXWem7/w==
|
||||
-----END PRIVATE KEY-----
|
||||
Certificate:
|
||||
Data:
|
||||
Version: 1 (0x0)
|
||||
Serial Number: 12723342612721443281 (0xb09264b1f2da21d1)
|
||||
Signature Algorithm: sha1WithRSAEncryption
|
||||
Issuer: C=XY, O=Python Software Foundation CA, CN=our-ca-server
|
||||
Validity
|
||||
Not Before: Jan 4 19:47:07 2013 GMT
|
||||
Not After : Nov 13 19:47:07 2022 GMT
|
||||
Subject: C=XY, L=Castle Anthrax, O=Python Software Foundation, CN=localhost
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
Public-Key: (1024 bit)
|
||||
Modulus:
|
||||
00:c2:e0:0f:49:00:28:36:f9:70:5c:8f:8d:bc:0d:
|
||||
7d:1e:42:b5:ec:1d:5c:2f:a4:31:70:16:0f:c0:cb:
|
||||
c6:24:d3:be:13:16:ee:a5:67:97:03:a6:df:a9:99:
|
||||
96:cc:c7:2a:fb:11:7f:4e:65:4f:8a:5e:82:21:4c:
|
||||
f7:3d:a3:d4:e9:5a:37:e7:22:fd:7e:cd:53:6d:93:
|
||||
34:de:9c:ad:84:a2:37:be:c5:8d:82:4f:e3:ae:23:
|
||||
f3:be:a7:75:2c:72:0f:ea:f3:ca:cd:fc:e9:3f:b5:
|
||||
af:56:99:6a:08:04:76:48:f5:4e:c4:ac:bf:5c:d6:
|
||||
21:82:a5:3c:88:e5:be:1b:b1
|
||||
Exponent: 65537 (0x10001)
|
||||
Signature Algorithm: sha1WithRSAEncryption
|
||||
2f:42:5f:a3:09:2c:fa:51:88:c7:37:7f:ea:0e:63:f0:a2:9a:
|
||||
e5:5a:e2:c8:20:f0:3f:60:bc:c8:0f:b6:c6:76:ce:db:83:93:
|
||||
f5:a3:33:67:01:8e:04:cd:00:9a:73:fd:f3:35:86:fa:d7:13:
|
||||
e2:46:c6:9d:c0:29:53:d4:a9:90:b8:77:4b:e6:83:76:e4:92:
|
||||
d6:9c:50:cf:43:d0:c6:01:77:61:9a:de:9b:70:f7:72:cd:59:
|
||||
00:31:69:d9:b4:ca:06:9c:6d:c3:c7:80:8c:68:e6:b5:a2:f8:
|
||||
ef:1d:bb:16:9f:77:77:ef:87:62:22:9b:4d:69:a4:3a:1a:f1:
|
||||
21:5e:8c:32:ac:92:fd:15:6b:18:c2:7f:15:0d:98:30:ca:75:
|
||||
8f:1a:71:df:da:1d:b2:ef:9a:e8:2d:2e:02:fd:4a:3c:aa:96:
|
||||
0b:06:5d:35:b3:3d:24:87:4b:e0:b0:58:60:2f:45:ac:2e:48:
|
||||
8a:b0:99:10:65:27:ff:cc:b1:d8:fd:bd:26:6b:b9:0c:05:2a:
|
||||
f4:45:63:35:51:07:ed:83:85:fe:6f:69:cb:bb:40:a8:ae:b6:
|
||||
3b:56:4a:2d:a4:ed:6d:11:2c:4d:ed:17:24:fd:47:bc:d3:41:
|
||||
a2:d3:06:fe:0c:90:d8:d8:94:26:c4:ff:cc:a1:d8:42:77:eb:
|
||||
fc:a9:94:71
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICpDCCAYwCCQCwkmSx8toh0TANBgkqhkiG9w0BAQUFADBNMQswCQYDVQQGEwJY
|
||||
WTEmMCQGA1UECgwdUHl0aG9uIFNvZnR3YXJlIEZvdW5kYXRpb24gQ0ExFjAUBgNV
|
||||
BAMMDW91ci1jYS1zZXJ2ZXIwHhcNMTMwMTA0MTk0NzA3WhcNMjIxMTEzMTk0NzA3
|
||||
WjBfMQswCQYDVQQGEwJYWTEXMBUGA1UEBxMOQ2FzdGxlIEFudGhyYXgxIzAhBgNV
|
||||
BAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMRIwEAYDVQQDEwlsb2NhbGhv
|
||||
c3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMLgD0kAKDb5cFyPjbwNfR5C
|
||||
tewdXC+kMXAWD8DLxiTTvhMW7qVnlwOm36mZlszHKvsRf05lT4pegiFM9z2j1Ola
|
||||
N+ci/X7NU22TNN6crYSiN77FjYJP464j876ndSxyD+rzys386T+1r1aZaggEdkj1
|
||||
TsSsv1zWIYKlPIjlvhuxAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAC9CX6MJLPpR
|
||||
iMc3f+oOY/CimuVa4sgg8D9gvMgPtsZ2ztuDk/WjM2cBjgTNAJpz/fM1hvrXE+JG
|
||||
xp3AKVPUqZC4d0vmg3bkktacUM9D0MYBd2Ga3ptw93LNWQAxadm0ygacbcPHgIxo
|
||||
5rWi+O8duxafd3fvh2Iim01ppDoa8SFejDKskv0VaxjCfxUNmDDKdY8acd/aHbLv
|
||||
mugtLgL9SjyqlgsGXTWzPSSHS+CwWGAvRawuSIqwmRBlJ//Msdj9vSZruQwFKvRF
|
||||
YzVRB+2Dhf5vacu7QKiutjtWSi2k7W0RLE3tFyT9R7zTQaLTBv4MkNjYlCbE/8yh
|
||||
2EJ36/yplHE=
|
||||
-----END CERTIFICATE-----
|
||||
78
tests/pycacert.pem
Normal file
78
tests/pycacert.pem
Normal file
@@ -0,0 +1,78 @@
|
||||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number: 12723342612721443280 (0xb09264b1f2da21d0)
|
||||
Signature Algorithm: sha1WithRSAEncryption
|
||||
Issuer: C=XY, O=Python Software Foundation CA, CN=our-ca-server
|
||||
Validity
|
||||
Not Before: Jan 4 19:47:07 2013 GMT
|
||||
Not After : Jan 2 19:47:07 2023 GMT
|
||||
Subject: C=XY, O=Python Software Foundation CA, CN=our-ca-server
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
Public-Key: (2048 bit)
|
||||
Modulus:
|
||||
00:e7:de:e9:e3:0c:9f:00:b6:a1:fd:2b:5b:96:d2:
|
||||
6f:cc:e0:be:86:b9:20:5e:ec:03:7a:55:ab:ea:a4:
|
||||
e9:f9:49:85:d2:66:d5:ed:c7:7a:ea:56:8e:2d:8f:
|
||||
e7:42:e2:62:28:a9:9f:d6:1b:8e:eb:b5:b4:9c:9f:
|
||||
14:ab:df:e6:94:8b:76:1d:3e:6d:24:61:ed:0c:bf:
|
||||
00:8a:61:0c:df:5c:c8:36:73:16:00:cd:47:ba:6d:
|
||||
a4:a4:74:88:83:23:0a:19:fc:09:a7:3c:4a:4b:d3:
|
||||
e7:1d:2d:e4:ea:4c:54:21:f3:26:db:89:37:18:d4:
|
||||
02:bb:40:32:5f:a4:ff:2d:1c:f7:d4:bb:ec:8e:cf:
|
||||
5c:82:ac:e6:7c:08:6c:48:85:61:07:7f:25:e0:5c:
|
||||
e0:bc:34:5f:e0:b9:04:47:75:c8:47:0b:8d:bc:d6:
|
||||
c8:68:5f:33:83:62:d2:20:44:35:b1:ad:81:1a:8a:
|
||||
cd:bc:35:b0:5c:8b:47:d6:18:e9:9c:18:97:cc:01:
|
||||
3c:29:cc:e8:1e:e4:e4:c1:b8:de:e7:c2:11:18:87:
|
||||
5a:93:34:d8:a6:25:f7:14:71:eb:e4:21:a2:d2:0f:
|
||||
2e:2e:d4:62:00:35:d3:d6:ef:5c:60:4b:4c:a9:14:
|
||||
e2:dd:15:58:46:37:33:26:b7:e7:2e:5d:ed:42:e4:
|
||||
c5:4d
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Subject Key Identifier:
|
||||
BC:DD:62:D9:76:DA:1B:D2:54:6B:CF:E0:66:9B:1E:1E:7B:56:0C:0B
|
||||
X509v3 Authority Key Identifier:
|
||||
keyid:BC:DD:62:D9:76:DA:1B:D2:54:6B:CF:E0:66:9B:1E:1E:7B:56:0C:0B
|
||||
|
||||
X509v3 Basic Constraints:
|
||||
CA:TRUE
|
||||
Signature Algorithm: sha1WithRSAEncryption
|
||||
7d:0a:f5:cb:8d:d3:5d:bd:99:8e:f8:2b:0f:ba:eb:c2:d9:a6:
|
||||
27:4f:2e:7b:2f:0e:64:d8:1c:35:50:4e:ee:fc:90:b9:8d:6d:
|
||||
a8:c5:c6:06:b0:af:f3:2d:bf:3b:b8:42:07:dd:18:7d:6d:95:
|
||||
54:57:85:18:60:47:2f:eb:78:1b:f9:e8:17:fd:5a:0d:87:17:
|
||||
28:ac:4c:6a:e6:bc:29:f4:f4:55:70:29:42:de:85:ea:ab:6c:
|
||||
23:06:64:30:75:02:8e:53:bc:5e:01:33:37:cc:1e:cd:b8:a4:
|
||||
fd:ca:e4:5f:65:3b:83:1c:86:f1:55:02:a0:3a:8f:db:91:b7:
|
||||
40:14:b4:e7:8d:d2:ee:73:ba:e3:e5:34:2d:bc:94:6f:4e:24:
|
||||
06:f7:5f:8b:0e:a7:8e:6b:de:5e:75:f4:32:9a:50:b1:44:33:
|
||||
9a:d0:05:e2:78:82:ff:db:da:8a:63:eb:a9:dd:d1:bf:a0:61:
|
||||
ad:e3:9e:8a:24:5d:62:0e:e7:4c:91:7f:ef:df:34:36:3b:2f:
|
||||
5d:f5:84:b2:2f:c4:6d:93:96:1a:6f:30:28:f1:da:12:9a:64:
|
||||
b4:40:33:1d:bd:de:2b:53:a8:ea:be:d6:bc:4e:96:f5:44:fb:
|
||||
32:18:ae:d5:1f:f6:69:af:b6:4e:7b:1d:58:ec:3b:a9:53:a3:
|
||||
5e:58:c8:9e
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDbTCCAlWgAwIBAgIJALCSZLHy2iHQMA0GCSqGSIb3DQEBBQUAME0xCzAJBgNV
|
||||
BAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUgRm91bmRhdGlvbiBDQTEW
|
||||
MBQGA1UEAwwNb3VyLWNhLXNlcnZlcjAeFw0xMzAxMDQxOTQ3MDdaFw0yMzAxMDIx
|
||||
OTQ3MDdaME0xCzAJBgNVBAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUg
|
||||
Rm91bmRhdGlvbiBDQTEWMBQGA1UEAwwNb3VyLWNhLXNlcnZlcjCCASIwDQYJKoZI
|
||||
hvcNAQEBBQADggEPADCCAQoCggEBAOfe6eMMnwC2of0rW5bSb8zgvoa5IF7sA3pV
|
||||
q+qk6flJhdJm1e3HeupWji2P50LiYiipn9Ybjuu1tJyfFKvf5pSLdh0+bSRh7Qy/
|
||||
AIphDN9cyDZzFgDNR7ptpKR0iIMjChn8Cac8SkvT5x0t5OpMVCHzJtuJNxjUArtA
|
||||
Ml+k/y0c99S77I7PXIKs5nwIbEiFYQd/JeBc4Lw0X+C5BEd1yEcLjbzWyGhfM4Ni
|
||||
0iBENbGtgRqKzbw1sFyLR9YY6ZwYl8wBPCnM6B7k5MG43ufCERiHWpM02KYl9xRx
|
||||
6+QhotIPLi7UYgA109bvXGBLTKkU4t0VWEY3Mya35y5d7ULkxU0CAwEAAaNQME4w
|
||||
HQYDVR0OBBYEFLzdYtl22hvSVGvP4GabHh57VgwLMB8GA1UdIwQYMBaAFLzdYtl2
|
||||
2hvSVGvP4GabHh57VgwLMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
|
||||
AH0K9cuN0129mY74Kw+668LZpidPLnsvDmTYHDVQTu78kLmNbajFxgawr/Mtvzu4
|
||||
QgfdGH1tlVRXhRhgRy/reBv56Bf9Wg2HFyisTGrmvCn09FVwKULeheqrbCMGZDB1
|
||||
Ao5TvF4BMzfMHs24pP3K5F9lO4MchvFVAqA6j9uRt0AUtOeN0u5zuuPlNC28lG9O
|
||||
JAb3X4sOp45r3l519DKaULFEM5rQBeJ4gv/b2opj66nd0b+gYa3jnookXWIO50yR
|
||||
f+/fNDY7L131hLIvxG2TlhpvMCjx2hKaZLRAMx293itTqOq+1rxOlvVE+zIYrtUf
|
||||
9mmvtk57HVjsO6lTo15YyJ4=
|
||||
-----END CERTIFICATE-----
|
||||
14
tests/sample.crt
Normal file
14
tests/sample.crt
Normal file
@@ -0,0 +1,14 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICMzCCAZwCCQDFl4ys0fU7iTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJV
|
||||
UzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuLUZyYW5jaXNjbzEi
|
||||
MCAGA1UECgwZUHl0aG9uIFNvZnR3YXJlIEZvbmRhdGlvbjAeFw0xMzAzMTgyMDA3
|
||||
MjhaFw0yMzAzMTYyMDA3MjhaMF4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxp
|
||||
Zm9ybmlhMRYwFAYDVQQHDA1TYW4tRnJhbmNpc2NvMSIwIAYDVQQKDBlQeXRob24g
|
||||
U29mdHdhcmUgRm9uZGF0aW9uMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCn
|
||||
t3s+J7L0xP/YdAQOacpPi9phlrzKZhcXL3XMu2LCUg2fNJpx/47Vc5TZSaO11uO7
|
||||
gdwVz3Z7Q2epAgwo59JLffLt5fia8+a/SlPweI/j4+wcIIIiqusnLfpqR8cIAavg
|
||||
Z06cLYCDvb9wMlheIvSJY12skc1nnphWS2YJ0Xm6uQIDAQABMA0GCSqGSIb3DQEB
|
||||
BQUAA4GBAE9PknG6pv72+5z/gsDGYy8sK5UNkbWSNr4i4e5lxVsF03+/M71H+3AB
|
||||
MxVX4+A+Vlk2fmU+BrdHIIUE0r1dDcO3josQ9hc9OJpp5VLSQFP8VeuJCmzYPp9I
|
||||
I8WbW93cnXnChTrYQVdgVoFdv7GE9YgU7NYkrGIM0nZl1/f/bHPB
|
||||
-----END CERTIFICATE-----
|
||||
15
tests/sample.key
Normal file
15
tests/sample.key
Normal file
@@ -0,0 +1,15 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCnt3s+J7L0xP/YdAQOacpPi9phlrzKZhcXL3XMu2LCUg2fNJpx
|
||||
/47Vc5TZSaO11uO7gdwVz3Z7Q2epAgwo59JLffLt5fia8+a/SlPweI/j4+wcIIIi
|
||||
qusnLfpqR8cIAavgZ06cLYCDvb9wMlheIvSJY12skc1nnphWS2YJ0Xm6uQIDAQAB
|
||||
AoGABfm8k19Yue3W68BecKEGS0VBV57GRTPT+MiBGvVGNIQ15gk6w3sGfMZsdD1y
|
||||
bsUkQgcDb2d/4i5poBTpl/+Cd41V+c20IC/sSl5X1IEreHMKSLhy/uyjyiyfXlP1
|
||||
iXhToFCgLWwENWc8LzfUV8vuAV5WG6oL9bnudWzZxeqx8V0CQQDR7xwVj6LN70Eb
|
||||
DUhSKLkusmFw5Gk9NJ/7wZ4eHg4B8c9KNVvSlLCLhcsVTQXuqYeFpOqytI45SneP
|
||||
lr0vrvsDAkEAzITYiXu6ox5huDCG7imX2W9CAYuX638urLxBqBXMS7GqBzojD6RL
|
||||
21Q8oPwJWJquERa3HDScq1deiQbM9uKIkwJBAIa1PLslGN216Xv3UPHPScyKD/aF
|
||||
ynXIv+OnANPoiyp6RH4ksQ/18zcEGiVH8EeNpvV9tlAHhb+DZibQHgNr74sCQQC0
|
||||
zhToplu/bVKSlUQUNO0rqrI9z30FErDewKeCw5KSsIRSU1E/uM3fHr9iyq4wiL6u
|
||||
GNjUtKZ0y46lsT9uW6LFAkB5eqeEQnshAdr3X5GykWHJ8DDGBXPPn6Rce1NX4RSq
|
||||
V9khG2z1bFyfo+hMqpYnF2k32hVq3E54RS8YYnwBsVof
|
||||
-----END RSA PRIVATE KEY-----
|
||||
15
tests/ssl_cert.pem
Normal file
15
tests/ssl_cert.pem
Normal file
@@ -0,0 +1,15 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV
|
||||
BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u
|
||||
IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw
|
||||
MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH
|
||||
Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k
|
||||
YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
|
||||
gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7
|
||||
6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt
|
||||
pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw
|
||||
FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd
|
||||
BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G
|
||||
lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1
|
||||
CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX
|
||||
-----END CERTIFICATE-----
|
||||
16
tests/ssl_key.pem
Normal file
16
tests/ssl_key.pem
Normal file
@@ -0,0 +1,16 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm
|
||||
LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0
|
||||
ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP
|
||||
USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt
|
||||
CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq
|
||||
SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK
|
||||
UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y
|
||||
BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ
|
||||
ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5
|
||||
oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik
|
||||
eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F
|
||||
0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS
|
||||
x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/
|
||||
SPIXQuT8RMPDVNQ=
|
||||
-----END PRIVATE KEY-----
|
||||
1183
tests/test_base_events.py
Normal file
1183
tests/test_base_events.py
Normal file
File diff suppressed because it is too large
Load Diff
2306
tests/test_events.py
Normal file
2306
tests/test_events.py
Normal file
File diff suppressed because it is too large
Load Diff
461
tests/test_futures.py
Normal file
461
tests/test_futures.py
Normal file
@@ -0,0 +1,461 @@
|
||||
"""Tests for futures.py."""
|
||||
|
||||
import concurrent.futures
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import unittest
|
||||
from test import support
|
||||
from unittest import mock
|
||||
|
||||
import asyncio
|
||||
from asyncio import test_utils
|
||||
|
||||
|
||||
def _fakefunc(f):
|
||||
return f
|
||||
|
||||
def first_cb():
|
||||
pass
|
||||
|
||||
def last_cb():
|
||||
pass
|
||||
|
||||
|
||||
class FutureTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
|
||||
def test_initial_state(self):
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
self.assertFalse(f.cancelled())
|
||||
self.assertFalse(f.done())
|
||||
f.cancel()
|
||||
self.assertTrue(f.cancelled())
|
||||
|
||||
def test_init_constructor_default_loop(self):
|
||||
asyncio.set_event_loop(self.loop)
|
||||
f = asyncio.Future()
|
||||
self.assertIs(f._loop, self.loop)
|
||||
|
||||
def test_constructor_positional(self):
|
||||
# Make sure Future doesn't accept a positional argument
|
||||
self.assertRaises(TypeError, asyncio.Future, 42)
|
||||
|
||||
def test_cancel(self):
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
self.assertTrue(f.cancel())
|
||||
self.assertTrue(f.cancelled())
|
||||
self.assertTrue(f.done())
|
||||
self.assertRaises(asyncio.CancelledError, f.result)
|
||||
self.assertRaises(asyncio.CancelledError, f.exception)
|
||||
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
|
||||
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
|
||||
self.assertFalse(f.cancel())
|
||||
|
||||
def test_result(self):
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
self.assertRaises(asyncio.InvalidStateError, f.result)
|
||||
|
||||
f.set_result(42)
|
||||
self.assertFalse(f.cancelled())
|
||||
self.assertTrue(f.done())
|
||||
self.assertEqual(f.result(), 42)
|
||||
self.assertEqual(f.exception(), None)
|
||||
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
|
||||
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
|
||||
self.assertFalse(f.cancel())
|
||||
|
||||
def test_exception(self):
|
||||
exc = RuntimeError()
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
self.assertRaises(asyncio.InvalidStateError, f.exception)
|
||||
|
||||
f.set_exception(exc)
|
||||
self.assertFalse(f.cancelled())
|
||||
self.assertTrue(f.done())
|
||||
self.assertRaises(RuntimeError, f.result)
|
||||
self.assertEqual(f.exception(), exc)
|
||||
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
|
||||
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
|
||||
self.assertFalse(f.cancel())
|
||||
|
||||
def test_exception_class(self):
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
f.set_exception(RuntimeError)
|
||||
self.assertIsInstance(f.exception(), RuntimeError)
|
||||
|
||||
def test_yield_from_twice(self):
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
|
||||
def fixture():
|
||||
yield 'A'
|
||||
x = yield from f
|
||||
yield 'B', x
|
||||
y = yield from f
|
||||
yield 'C', y
|
||||
|
||||
g = fixture()
|
||||
self.assertEqual(next(g), 'A') # yield 'A'.
|
||||
self.assertEqual(next(g), f) # First yield from f.
|
||||
f.set_result(42)
|
||||
self.assertEqual(next(g), ('B', 42)) # yield 'B', x.
|
||||
# The second "yield from f" does not yield f.
|
||||
self.assertEqual(next(g), ('C', 42)) # yield 'C', y.
|
||||
|
||||
def test_future_repr(self):
|
||||
self.loop.set_debug(True)
|
||||
f_pending_debug = asyncio.Future(loop=self.loop)
|
||||
frame = f_pending_debug._source_traceback[-1]
|
||||
self.assertEqual(repr(f_pending_debug),
|
||||
'<Future pending created at %s:%s>'
|
||||
% (frame[0], frame[1]))
|
||||
f_pending_debug.cancel()
|
||||
|
||||
self.loop.set_debug(False)
|
||||
f_pending = asyncio.Future(loop=self.loop)
|
||||
self.assertEqual(repr(f_pending), '<Future pending>')
|
||||
f_pending.cancel()
|
||||
|
||||
f_cancelled = asyncio.Future(loop=self.loop)
|
||||
f_cancelled.cancel()
|
||||
self.assertEqual(repr(f_cancelled), '<Future cancelled>')
|
||||
|
||||
f_result = asyncio.Future(loop=self.loop)
|
||||
f_result.set_result(4)
|
||||
self.assertEqual(repr(f_result), '<Future finished result=4>')
|
||||
self.assertEqual(f_result.result(), 4)
|
||||
|
||||
exc = RuntimeError()
|
||||
f_exception = asyncio.Future(loop=self.loop)
|
||||
f_exception.set_exception(exc)
|
||||
self.assertEqual(repr(f_exception), '<Future finished exception=RuntimeError()>')
|
||||
self.assertIs(f_exception.exception(), exc)
|
||||
|
||||
def func_repr(func):
|
||||
filename, lineno = test_utils.get_function_source(func)
|
||||
text = '%s() at %s:%s' % (func.__qualname__, filename, lineno)
|
||||
return re.escape(text)
|
||||
|
||||
f_one_callbacks = asyncio.Future(loop=self.loop)
|
||||
f_one_callbacks.add_done_callback(_fakefunc)
|
||||
fake_repr = func_repr(_fakefunc)
|
||||
self.assertRegex(repr(f_one_callbacks),
|
||||
r'<Future pending cb=\[%s\]>' % fake_repr)
|
||||
f_one_callbacks.cancel()
|
||||
self.assertEqual(repr(f_one_callbacks),
|
||||
'<Future cancelled>')
|
||||
|
||||
f_two_callbacks = asyncio.Future(loop=self.loop)
|
||||
f_two_callbacks.add_done_callback(first_cb)
|
||||
f_two_callbacks.add_done_callback(last_cb)
|
||||
first_repr = func_repr(first_cb)
|
||||
last_repr = func_repr(last_cb)
|
||||
self.assertRegex(repr(f_two_callbacks),
|
||||
r'<Future pending cb=\[%s, %s\]>'
|
||||
% (first_repr, last_repr))
|
||||
|
||||
f_many_callbacks = asyncio.Future(loop=self.loop)
|
||||
f_many_callbacks.add_done_callback(first_cb)
|
||||
for i in range(8):
|
||||
f_many_callbacks.add_done_callback(_fakefunc)
|
||||
f_many_callbacks.add_done_callback(last_cb)
|
||||
cb_regex = r'%s, <8 more>, %s' % (first_repr, last_repr)
|
||||
self.assertRegex(repr(f_many_callbacks),
|
||||
r'<Future pending cb=\[%s\]>' % cb_regex)
|
||||
f_many_callbacks.cancel()
|
||||
self.assertEqual(repr(f_many_callbacks),
|
||||
'<Future cancelled>')
|
||||
|
||||
def test_copy_state(self):
|
||||
# Test the internal _copy_state method since it's being directly
|
||||
# invoked in other modules.
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
f.set_result(10)
|
||||
|
||||
newf = asyncio.Future(loop=self.loop)
|
||||
newf._copy_state(f)
|
||||
self.assertTrue(newf.done())
|
||||
self.assertEqual(newf.result(), 10)
|
||||
|
||||
f_exception = asyncio.Future(loop=self.loop)
|
||||
f_exception.set_exception(RuntimeError())
|
||||
|
||||
newf_exception = asyncio.Future(loop=self.loop)
|
||||
newf_exception._copy_state(f_exception)
|
||||
self.assertTrue(newf_exception.done())
|
||||
self.assertRaises(RuntimeError, newf_exception.result)
|
||||
|
||||
f_cancelled = asyncio.Future(loop=self.loop)
|
||||
f_cancelled.cancel()
|
||||
|
||||
newf_cancelled = asyncio.Future(loop=self.loop)
|
||||
newf_cancelled._copy_state(f_cancelled)
|
||||
self.assertTrue(newf_cancelled.cancelled())
|
||||
|
||||
def test_iter(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
|
||||
def coro():
|
||||
yield from fut
|
||||
|
||||
def test():
|
||||
arg1, arg2 = coro()
|
||||
|
||||
self.assertRaises(AssertionError, test)
|
||||
fut.cancel()
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def test_tb_logger_abandoned(self, m_log):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
del fut
|
||||
self.assertFalse(m_log.error.called)
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def test_tb_logger_result_unretrieved(self, m_log):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_result(42)
|
||||
del fut
|
||||
self.assertFalse(m_log.error.called)
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def test_tb_logger_result_retrieved(self, m_log):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_result(42)
|
||||
fut.result()
|
||||
del fut
|
||||
self.assertFalse(m_log.error.called)
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def test_tb_logger_exception_unretrieved(self, m_log):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_exception(RuntimeError('boom'))
|
||||
del fut
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertTrue(m_log.error.called)
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def test_tb_logger_exception_retrieved(self, m_log):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_exception(RuntimeError('boom'))
|
||||
fut.exception()
|
||||
del fut
|
||||
self.assertFalse(m_log.error.called)
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def test_tb_logger_exception_result_retrieved(self, m_log):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_exception(RuntimeError('boom'))
|
||||
self.assertRaises(RuntimeError, fut.result)
|
||||
del fut
|
||||
self.assertFalse(m_log.error.called)
|
||||
|
||||
def test_wrap_future(self):
|
||||
|
||||
def run(arg):
|
||||
return (arg, threading.get_ident())
|
||||
ex = concurrent.futures.ThreadPoolExecutor(1)
|
||||
f1 = ex.submit(run, 'oi')
|
||||
f2 = asyncio.wrap_future(f1, loop=self.loop)
|
||||
res, ident = self.loop.run_until_complete(f2)
|
||||
self.assertIsInstance(f2, asyncio.Future)
|
||||
self.assertEqual(res, 'oi')
|
||||
self.assertNotEqual(ident, threading.get_ident())
|
||||
|
||||
def test_wrap_future_future(self):
|
||||
f1 = asyncio.Future(loop=self.loop)
|
||||
f2 = asyncio.wrap_future(f1)
|
||||
self.assertIs(f1, f2)
|
||||
|
||||
@mock.patch('asyncio.futures.events')
|
||||
def test_wrap_future_use_global_loop(self, m_events):
|
||||
def run(arg):
|
||||
return (arg, threading.get_ident())
|
||||
ex = concurrent.futures.ThreadPoolExecutor(1)
|
||||
f1 = ex.submit(run, 'oi')
|
||||
f2 = asyncio.wrap_future(f1)
|
||||
self.assertIs(m_events.get_event_loop.return_value, f2._loop)
|
||||
|
||||
def test_wrap_future_cancel(self):
|
||||
f1 = concurrent.futures.Future()
|
||||
f2 = asyncio.wrap_future(f1, loop=self.loop)
|
||||
f2.cancel()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertTrue(f1.cancelled())
|
||||
self.assertTrue(f2.cancelled())
|
||||
|
||||
def test_wrap_future_cancel2(self):
|
||||
f1 = concurrent.futures.Future()
|
||||
f2 = asyncio.wrap_future(f1, loop=self.loop)
|
||||
f1.set_result(42)
|
||||
f2.cancel()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertFalse(f1.cancelled())
|
||||
self.assertEqual(f1.result(), 42)
|
||||
self.assertTrue(f2.cancelled())
|
||||
|
||||
def test_future_source_traceback(self):
|
||||
self.loop.set_debug(True)
|
||||
|
||||
future = asyncio.Future(loop=self.loop)
|
||||
lineno = sys._getframe().f_lineno - 1
|
||||
self.assertIsInstance(future._source_traceback, list)
|
||||
self.assertEqual(future._source_traceback[-1][:3],
|
||||
(__file__,
|
||||
lineno,
|
||||
'test_future_source_traceback'))
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def check_future_exception_never_retrieved(self, debug, m_log):
|
||||
self.loop.set_debug(debug)
|
||||
|
||||
def memory_error():
|
||||
try:
|
||||
raise MemoryError()
|
||||
except BaseException as exc:
|
||||
return exc
|
||||
exc = memory_error()
|
||||
|
||||
future = asyncio.Future(loop=self.loop)
|
||||
if debug:
|
||||
source_traceback = future._source_traceback
|
||||
future.set_exception(exc)
|
||||
future = None
|
||||
test_utils.run_briefly(self.loop)
|
||||
support.gc_collect()
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
if debug:
|
||||
frame = source_traceback[-1]
|
||||
regex = (r'^Future exception was never retrieved\n'
|
||||
r'future: <Future finished exception=MemoryError\(\) created at {filename}:{lineno}>\n'
|
||||
r'source_traceback: Object created at \(most recent call last\):\n'
|
||||
r' File'
|
||||
r'.*\n'
|
||||
r' File "{filename}", line {lineno}, in check_future_exception_never_retrieved\n'
|
||||
r' future = asyncio\.Future\(loop=self\.loop\)$'
|
||||
).format(filename=re.escape(frame[0]), lineno=frame[1])
|
||||
else:
|
||||
regex = (r'^Future exception was never retrieved\n'
|
||||
r'future: <Future finished exception=MemoryError\(\)>$'
|
||||
)
|
||||
exc_info = (type(exc), exc, exc.__traceback__)
|
||||
m_log.error.assert_called_once_with(mock.ANY, exc_info=exc_info)
|
||||
else:
|
||||
if debug:
|
||||
frame = source_traceback[-1]
|
||||
regex = (r'^Future/Task exception was never retrieved\n'
|
||||
r'Future/Task created at \(most recent call last\):\n'
|
||||
r' File'
|
||||
r'.*\n'
|
||||
r' File "{filename}", line {lineno}, in check_future_exception_never_retrieved\n'
|
||||
r' future = asyncio\.Future\(loop=self\.loop\)\n'
|
||||
r'Traceback \(most recent call last\):\n'
|
||||
r'.*\n'
|
||||
r'MemoryError$'
|
||||
).format(filename=re.escape(frame[0]), lineno=frame[1])
|
||||
else:
|
||||
regex = (r'^Future/Task exception was never retrieved\n'
|
||||
r'Traceback \(most recent call last\):\n'
|
||||
r'.*\n'
|
||||
r'MemoryError$'
|
||||
)
|
||||
m_log.error.assert_called_once_with(mock.ANY, exc_info=False)
|
||||
message = m_log.error.call_args[0][0]
|
||||
self.assertRegex(message, re.compile(regex, re.DOTALL))
|
||||
|
||||
def test_future_exception_never_retrieved(self):
|
||||
self.check_future_exception_never_retrieved(False)
|
||||
|
||||
def test_future_exception_never_retrieved_debug(self):
|
||||
self.check_future_exception_never_retrieved(True)
|
||||
|
||||
def test_set_result_unless_cancelled(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.cancel()
|
||||
fut._set_result_unless_cancelled(2)
|
||||
self.assertTrue(fut.cancelled())
|
||||
|
||||
|
||||
class FutureDoneCallbackTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
|
||||
def run_briefly(self):
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
def _make_callback(self, bag, thing):
|
||||
# Create a callback function that appends thing to bag.
|
||||
def bag_appender(future):
|
||||
bag.append(thing)
|
||||
return bag_appender
|
||||
|
||||
def _new_future(self):
|
||||
return asyncio.Future(loop=self.loop)
|
||||
|
||||
def test_callbacks_invoked_on_set_result(self):
|
||||
bag = []
|
||||
f = self._new_future()
|
||||
f.add_done_callback(self._make_callback(bag, 42))
|
||||
f.add_done_callback(self._make_callback(bag, 17))
|
||||
|
||||
self.assertEqual(bag, [])
|
||||
f.set_result('foo')
|
||||
|
||||
self.run_briefly()
|
||||
|
||||
self.assertEqual(bag, [42, 17])
|
||||
self.assertEqual(f.result(), 'foo')
|
||||
|
||||
def test_callbacks_invoked_on_set_exception(self):
|
||||
bag = []
|
||||
f = self._new_future()
|
||||
f.add_done_callback(self._make_callback(bag, 100))
|
||||
|
||||
self.assertEqual(bag, [])
|
||||
exc = RuntimeError()
|
||||
f.set_exception(exc)
|
||||
|
||||
self.run_briefly()
|
||||
|
||||
self.assertEqual(bag, [100])
|
||||
self.assertEqual(f.exception(), exc)
|
||||
|
||||
def test_remove_done_callback(self):
|
||||
bag = []
|
||||
f = self._new_future()
|
||||
cb1 = self._make_callback(bag, 1)
|
||||
cb2 = self._make_callback(bag, 2)
|
||||
cb3 = self._make_callback(bag, 3)
|
||||
|
||||
# Add one cb1 and one cb2.
|
||||
f.add_done_callback(cb1)
|
||||
f.add_done_callback(cb2)
|
||||
|
||||
# One instance of cb2 removed. Now there's only one cb1.
|
||||
self.assertEqual(f.remove_done_callback(cb2), 1)
|
||||
|
||||
# Never had any cb3 in there.
|
||||
self.assertEqual(f.remove_done_callback(cb3), 0)
|
||||
|
||||
# After this there will be 6 instances of cb1 and one of cb2.
|
||||
f.add_done_callback(cb2)
|
||||
for i in range(5):
|
||||
f.add_done_callback(cb1)
|
||||
|
||||
# Remove all instances of cb1. One cb2 remains.
|
||||
self.assertEqual(f.remove_done_callback(cb1), 6)
|
||||
|
||||
self.assertEqual(bag, [])
|
||||
f.set_result('foo')
|
||||
|
||||
self.run_briefly()
|
||||
|
||||
self.assertEqual(bag, [2])
|
||||
self.assertEqual(f.result(), 'foo')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
858
tests/test_locks.py
Normal file
858
tests/test_locks.py
Normal file
@@ -0,0 +1,858 @@
|
||||
"""Tests for lock.py"""
|
||||
|
||||
import unittest
|
||||
from unittest import mock
|
||||
import re
|
||||
|
||||
import asyncio
|
||||
from asyncio import test_utils
|
||||
|
||||
|
||||
STR_RGX_REPR = (
|
||||
r'^<(?P<class>.*?) object at (?P<address>.*?)'
|
||||
r'\[(?P<extras>'
|
||||
r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?'
|
||||
r')\]>\Z'
|
||||
)
|
||||
RGX_REPR = re.compile(STR_RGX_REPR)
|
||||
|
||||
|
||||
class LockTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
|
||||
def test_ctor_loop(self):
|
||||
loop = mock.Mock()
|
||||
lock = asyncio.Lock(loop=loop)
|
||||
self.assertIs(lock._loop, loop)
|
||||
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
self.assertIs(lock._loop, self.loop)
|
||||
|
||||
def test_ctor_noloop(self):
|
||||
asyncio.set_event_loop(self.loop)
|
||||
lock = asyncio.Lock()
|
||||
self.assertIs(lock._loop, self.loop)
|
||||
|
||||
def test_repr(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
self.assertTrue(repr(lock).endswith('[unlocked]>'))
|
||||
self.assertTrue(RGX_REPR.match(repr(lock)))
|
||||
|
||||
@asyncio.coroutine
|
||||
def acquire_lock():
|
||||
yield from lock
|
||||
|
||||
self.loop.run_until_complete(acquire_lock())
|
||||
self.assertTrue(repr(lock).endswith('[locked]>'))
|
||||
self.assertTrue(RGX_REPR.match(repr(lock)))
|
||||
|
||||
def test_lock(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def acquire_lock():
|
||||
return (yield from lock)
|
||||
|
||||
res = self.loop.run_until_complete(acquire_lock())
|
||||
|
||||
self.assertTrue(res)
|
||||
self.assertTrue(lock.locked())
|
||||
|
||||
lock.release()
|
||||
self.assertFalse(lock.locked())
|
||||
|
||||
def test_acquire(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
result = []
|
||||
|
||||
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
|
||||
|
||||
@asyncio.coroutine
|
||||
def c1(result):
|
||||
if (yield from lock.acquire()):
|
||||
result.append(1)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c2(result):
|
||||
if (yield from lock.acquire()):
|
||||
result.append(2)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c3(result):
|
||||
if (yield from lock.acquire()):
|
||||
result.append(3)
|
||||
return True
|
||||
|
||||
t1 = asyncio.Task(c1(result), loop=self.loop)
|
||||
t2 = asyncio.Task(c2(result), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
|
||||
lock.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1], result)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1], result)
|
||||
|
||||
t3 = asyncio.Task(c3(result), loop=self.loop)
|
||||
|
||||
lock.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1, 2], result)
|
||||
|
||||
lock.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1, 2, 3], result)
|
||||
|
||||
self.assertTrue(t1.done())
|
||||
self.assertTrue(t1.result())
|
||||
self.assertTrue(t2.done())
|
||||
self.assertTrue(t2.result())
|
||||
self.assertTrue(t3.done())
|
||||
self.assertTrue(t3.result())
|
||||
|
||||
def test_acquire_cancel(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
|
||||
|
||||
task = asyncio.Task(lock.acquire(), loop=self.loop)
|
||||
self.loop.call_soon(task.cancel)
|
||||
self.assertRaises(
|
||||
asyncio.CancelledError,
|
||||
self.loop.run_until_complete, task)
|
||||
self.assertFalse(lock._waiters)
|
||||
|
||||
def test_cancel_race(self):
|
||||
# Several tasks:
|
||||
# - A acquires the lock
|
||||
# - B is blocked in aqcuire()
|
||||
# - C is blocked in aqcuire()
|
||||
#
|
||||
# Now, concurrently:
|
||||
# - B is cancelled
|
||||
# - A releases the lock
|
||||
#
|
||||
# If B's waiter is marked cancelled but not yet removed from
|
||||
# _waiters, A's release() call will crash when trying to set
|
||||
# B's waiter; instead, it should move on to C's waiter.
|
||||
|
||||
# Setup: A has the lock, b and c are waiting.
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def lockit(name, blocker):
|
||||
yield from lock.acquire()
|
||||
try:
|
||||
if blocker is not None:
|
||||
yield from blocker
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
fa = asyncio.Future(loop=self.loop)
|
||||
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertTrue(lock.locked())
|
||||
tb = asyncio.Task(lockit('B', None), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual(len(lock._waiters), 1)
|
||||
tc = asyncio.Task(lockit('C', None), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual(len(lock._waiters), 2)
|
||||
|
||||
# Create the race and check.
|
||||
# Without the fix this failed at the last assert.
|
||||
fa.set_result(None)
|
||||
tb.cancel()
|
||||
self.assertTrue(lock._waiters[0].cancelled())
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertFalse(lock.locked())
|
||||
self.assertTrue(ta.done())
|
||||
self.assertTrue(tb.cancelled())
|
||||
self.assertTrue(tc.done())
|
||||
|
||||
def test_release_not_acquired(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
|
||||
self.assertRaises(RuntimeError, lock.release)
|
||||
|
||||
def test_release_no_waiters(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
self.loop.run_until_complete(lock.acquire())
|
||||
self.assertTrue(lock.locked())
|
||||
|
||||
lock.release()
|
||||
self.assertFalse(lock.locked())
|
||||
|
||||
def test_context_manager(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def acquire_lock():
|
||||
return (yield from lock)
|
||||
|
||||
with self.loop.run_until_complete(acquire_lock()):
|
||||
self.assertTrue(lock.locked())
|
||||
|
||||
self.assertFalse(lock.locked())
|
||||
|
||||
def test_context_manager_cant_reuse(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def acquire_lock():
|
||||
return (yield from lock)
|
||||
|
||||
# This spells "yield from lock" outside a generator.
|
||||
cm = self.loop.run_until_complete(acquire_lock())
|
||||
with cm:
|
||||
self.assertTrue(lock.locked())
|
||||
|
||||
self.assertFalse(lock.locked())
|
||||
|
||||
with self.assertRaises(AttributeError):
|
||||
with cm:
|
||||
pass
|
||||
|
||||
def test_context_manager_no_yield(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
|
||||
try:
|
||||
with lock:
|
||||
self.fail('RuntimeError is not raised in with expression')
|
||||
except RuntimeError as err:
|
||||
self.assertEqual(
|
||||
str(err),
|
||||
'"yield from" should be used as context manager expression')
|
||||
|
||||
self.assertFalse(lock.locked())
|
||||
|
||||
|
||||
class EventTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
|
||||
def test_ctor_loop(self):
|
||||
loop = mock.Mock()
|
||||
ev = asyncio.Event(loop=loop)
|
||||
self.assertIs(ev._loop, loop)
|
||||
|
||||
ev = asyncio.Event(loop=self.loop)
|
||||
self.assertIs(ev._loop, self.loop)
|
||||
|
||||
def test_ctor_noloop(self):
|
||||
asyncio.set_event_loop(self.loop)
|
||||
ev = asyncio.Event()
|
||||
self.assertIs(ev._loop, self.loop)
|
||||
|
||||
def test_repr(self):
|
||||
ev = asyncio.Event(loop=self.loop)
|
||||
self.assertTrue(repr(ev).endswith('[unset]>'))
|
||||
match = RGX_REPR.match(repr(ev))
|
||||
self.assertEqual(match.group('extras'), 'unset')
|
||||
|
||||
ev.set()
|
||||
self.assertTrue(repr(ev).endswith('[set]>'))
|
||||
self.assertTrue(RGX_REPR.match(repr(ev)))
|
||||
|
||||
ev._waiters.append(mock.Mock())
|
||||
self.assertTrue('waiters:1' in repr(ev))
|
||||
self.assertTrue(RGX_REPR.match(repr(ev)))
|
||||
|
||||
def test_wait(self):
|
||||
ev = asyncio.Event(loop=self.loop)
|
||||
self.assertFalse(ev.is_set())
|
||||
|
||||
result = []
|
||||
|
||||
@asyncio.coroutine
|
||||
def c1(result):
|
||||
if (yield from ev.wait()):
|
||||
result.append(1)
|
||||
|
||||
@asyncio.coroutine
|
||||
def c2(result):
|
||||
if (yield from ev.wait()):
|
||||
result.append(2)
|
||||
|
||||
@asyncio.coroutine
|
||||
def c3(result):
|
||||
if (yield from ev.wait()):
|
||||
result.append(3)
|
||||
|
||||
t1 = asyncio.Task(c1(result), loop=self.loop)
|
||||
t2 = asyncio.Task(c2(result), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
|
||||
t3 = asyncio.Task(c3(result), loop=self.loop)
|
||||
|
||||
ev.set()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([3, 1, 2], result)
|
||||
|
||||
self.assertTrue(t1.done())
|
||||
self.assertIsNone(t1.result())
|
||||
self.assertTrue(t2.done())
|
||||
self.assertIsNone(t2.result())
|
||||
self.assertTrue(t3.done())
|
||||
self.assertIsNone(t3.result())
|
||||
|
||||
def test_wait_on_set(self):
|
||||
ev = asyncio.Event(loop=self.loop)
|
||||
ev.set()
|
||||
|
||||
res = self.loop.run_until_complete(ev.wait())
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_wait_cancel(self):
|
||||
ev = asyncio.Event(loop=self.loop)
|
||||
|
||||
wait = asyncio.Task(ev.wait(), loop=self.loop)
|
||||
self.loop.call_soon(wait.cancel)
|
||||
self.assertRaises(
|
||||
asyncio.CancelledError,
|
||||
self.loop.run_until_complete, wait)
|
||||
self.assertFalse(ev._waiters)
|
||||
|
||||
def test_clear(self):
|
||||
ev = asyncio.Event(loop=self.loop)
|
||||
self.assertFalse(ev.is_set())
|
||||
|
||||
ev.set()
|
||||
self.assertTrue(ev.is_set())
|
||||
|
||||
ev.clear()
|
||||
self.assertFalse(ev.is_set())
|
||||
|
||||
def test_clear_with_waiters(self):
|
||||
ev = asyncio.Event(loop=self.loop)
|
||||
result = []
|
||||
|
||||
@asyncio.coroutine
|
||||
def c1(result):
|
||||
if (yield from ev.wait()):
|
||||
result.append(1)
|
||||
return True
|
||||
|
||||
t = asyncio.Task(c1(result), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
|
||||
ev.set()
|
||||
ev.clear()
|
||||
self.assertFalse(ev.is_set())
|
||||
|
||||
ev.set()
|
||||
ev.set()
|
||||
self.assertEqual(1, len(ev._waiters))
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1], result)
|
||||
self.assertEqual(0, len(ev._waiters))
|
||||
|
||||
self.assertTrue(t.done())
|
||||
self.assertTrue(t.result())
|
||||
|
||||
|
||||
class ConditionTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
|
||||
def test_ctor_loop(self):
|
||||
loop = mock.Mock()
|
||||
cond = asyncio.Condition(loop=loop)
|
||||
self.assertIs(cond._loop, loop)
|
||||
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
self.assertIs(cond._loop, self.loop)
|
||||
|
||||
def test_ctor_noloop(self):
|
||||
asyncio.set_event_loop(self.loop)
|
||||
cond = asyncio.Condition()
|
||||
self.assertIs(cond._loop, self.loop)
|
||||
|
||||
def test_wait(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
result = []
|
||||
|
||||
@asyncio.coroutine
|
||||
def c1(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait()):
|
||||
result.append(1)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c2(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait()):
|
||||
result.append(2)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c3(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait()):
|
||||
result.append(3)
|
||||
return True
|
||||
|
||||
t1 = asyncio.Task(c1(result), loop=self.loop)
|
||||
t2 = asyncio.Task(c2(result), loop=self.loop)
|
||||
t3 = asyncio.Task(c3(result), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
self.assertFalse(cond.locked())
|
||||
|
||||
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
|
||||
cond.notify()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
self.assertTrue(cond.locked())
|
||||
|
||||
cond.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1], result)
|
||||
self.assertTrue(cond.locked())
|
||||
|
||||
cond.notify(2)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1], result)
|
||||
self.assertTrue(cond.locked())
|
||||
|
||||
cond.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1, 2], result)
|
||||
self.assertTrue(cond.locked())
|
||||
|
||||
cond.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1, 2, 3], result)
|
||||
self.assertTrue(cond.locked())
|
||||
|
||||
self.assertTrue(t1.done())
|
||||
self.assertTrue(t1.result())
|
||||
self.assertTrue(t2.done())
|
||||
self.assertTrue(t2.result())
|
||||
self.assertTrue(t3.done())
|
||||
self.assertTrue(t3.result())
|
||||
|
||||
def test_wait_cancel(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
self.loop.run_until_complete(cond.acquire())
|
||||
|
||||
wait = asyncio.Task(cond.wait(), loop=self.loop)
|
||||
self.loop.call_soon(wait.cancel)
|
||||
self.assertRaises(
|
||||
asyncio.CancelledError,
|
||||
self.loop.run_until_complete, wait)
|
||||
self.assertFalse(cond._waiters)
|
||||
self.assertTrue(cond.locked())
|
||||
|
||||
def test_wait_unacquired(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
self.assertRaises(
|
||||
RuntimeError,
|
||||
self.loop.run_until_complete, cond.wait())
|
||||
|
||||
def test_wait_for(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
presult = False
|
||||
|
||||
def predicate():
|
||||
return presult
|
||||
|
||||
result = []
|
||||
|
||||
@asyncio.coroutine
|
||||
def c1(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait_for(predicate)):
|
||||
result.append(1)
|
||||
cond.release()
|
||||
return True
|
||||
|
||||
t = asyncio.Task(c1(result), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
|
||||
self.loop.run_until_complete(cond.acquire())
|
||||
cond.notify()
|
||||
cond.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
|
||||
presult = True
|
||||
self.loop.run_until_complete(cond.acquire())
|
||||
cond.notify()
|
||||
cond.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1], result)
|
||||
|
||||
self.assertTrue(t.done())
|
||||
self.assertTrue(t.result())
|
||||
|
||||
def test_wait_for_unacquired(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
|
||||
# predicate can return true immediately
|
||||
res = self.loop.run_until_complete(cond.wait_for(lambda: [1, 2, 3]))
|
||||
self.assertEqual([1, 2, 3], res)
|
||||
|
||||
self.assertRaises(
|
||||
RuntimeError,
|
||||
self.loop.run_until_complete,
|
||||
cond.wait_for(lambda: False))
|
||||
|
||||
def test_notify(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
result = []
|
||||
|
||||
@asyncio.coroutine
|
||||
def c1(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait()):
|
||||
result.append(1)
|
||||
cond.release()
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c2(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait()):
|
||||
result.append(2)
|
||||
cond.release()
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c3(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait()):
|
||||
result.append(3)
|
||||
cond.release()
|
||||
return True
|
||||
|
||||
t1 = asyncio.Task(c1(result), loop=self.loop)
|
||||
t2 = asyncio.Task(c2(result), loop=self.loop)
|
||||
t3 = asyncio.Task(c3(result), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
|
||||
self.loop.run_until_complete(cond.acquire())
|
||||
cond.notify(1)
|
||||
cond.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1], result)
|
||||
|
||||
self.loop.run_until_complete(cond.acquire())
|
||||
cond.notify(1)
|
||||
cond.notify(2048)
|
||||
cond.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1, 2, 3], result)
|
||||
|
||||
self.assertTrue(t1.done())
|
||||
self.assertTrue(t1.result())
|
||||
self.assertTrue(t2.done())
|
||||
self.assertTrue(t2.result())
|
||||
self.assertTrue(t3.done())
|
||||
self.assertTrue(t3.result())
|
||||
|
||||
def test_notify_all(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
|
||||
result = []
|
||||
|
||||
@asyncio.coroutine
|
||||
def c1(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait()):
|
||||
result.append(1)
|
||||
cond.release()
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c2(result):
|
||||
yield from cond.acquire()
|
||||
if (yield from cond.wait()):
|
||||
result.append(2)
|
||||
cond.release()
|
||||
return True
|
||||
|
||||
t1 = asyncio.Task(c1(result), loop=self.loop)
|
||||
t2 = asyncio.Task(c2(result), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([], result)
|
||||
|
||||
self.loop.run_until_complete(cond.acquire())
|
||||
cond.notify_all()
|
||||
cond.release()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1, 2], result)
|
||||
|
||||
self.assertTrue(t1.done())
|
||||
self.assertTrue(t1.result())
|
||||
self.assertTrue(t2.done())
|
||||
self.assertTrue(t2.result())
|
||||
|
||||
def test_notify_unacquired(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
self.assertRaises(RuntimeError, cond.notify)
|
||||
|
||||
def test_notify_all_unacquired(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
self.assertRaises(RuntimeError, cond.notify_all)
|
||||
|
||||
def test_repr(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
self.assertTrue('unlocked' in repr(cond))
|
||||
self.assertTrue(RGX_REPR.match(repr(cond)))
|
||||
|
||||
self.loop.run_until_complete(cond.acquire())
|
||||
self.assertTrue('locked' in repr(cond))
|
||||
|
||||
cond._waiters.append(mock.Mock())
|
||||
self.assertTrue('waiters:1' in repr(cond))
|
||||
self.assertTrue(RGX_REPR.match(repr(cond)))
|
||||
|
||||
cond._waiters.append(mock.Mock())
|
||||
self.assertTrue('waiters:2' in repr(cond))
|
||||
self.assertTrue(RGX_REPR.match(repr(cond)))
|
||||
|
||||
def test_context_manager(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def acquire_cond():
|
||||
return (yield from cond)
|
||||
|
||||
with self.loop.run_until_complete(acquire_cond()):
|
||||
self.assertTrue(cond.locked())
|
||||
|
||||
self.assertFalse(cond.locked())
|
||||
|
||||
def test_context_manager_no_yield(self):
|
||||
cond = asyncio.Condition(loop=self.loop)
|
||||
|
||||
try:
|
||||
with cond:
|
||||
self.fail('RuntimeError is not raised in with expression')
|
||||
except RuntimeError as err:
|
||||
self.assertEqual(
|
||||
str(err),
|
||||
'"yield from" should be used as context manager expression')
|
||||
|
||||
self.assertFalse(cond.locked())
|
||||
|
||||
def test_explicit_lock(self):
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
cond = asyncio.Condition(lock, loop=self.loop)
|
||||
|
||||
self.assertIs(cond._lock, lock)
|
||||
self.assertIs(cond._loop, lock._loop)
|
||||
|
||||
def test_ambiguous_loops(self):
|
||||
loop = self.new_test_loop()
|
||||
self.addCleanup(loop.close)
|
||||
|
||||
lock = asyncio.Lock(loop=self.loop)
|
||||
with self.assertRaises(ValueError):
|
||||
asyncio.Condition(lock, loop=loop)
|
||||
|
||||
|
||||
class SemaphoreTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
|
||||
def test_ctor_loop(self):
|
||||
loop = mock.Mock()
|
||||
sem = asyncio.Semaphore(loop=loop)
|
||||
self.assertIs(sem._loop, loop)
|
||||
|
||||
sem = asyncio.Semaphore(loop=self.loop)
|
||||
self.assertIs(sem._loop, self.loop)
|
||||
|
||||
def test_ctor_noloop(self):
|
||||
asyncio.set_event_loop(self.loop)
|
||||
sem = asyncio.Semaphore()
|
||||
self.assertIs(sem._loop, self.loop)
|
||||
|
||||
def test_initial_value_zero(self):
|
||||
sem = asyncio.Semaphore(0, loop=self.loop)
|
||||
self.assertTrue(sem.locked())
|
||||
|
||||
def test_repr(self):
|
||||
sem = asyncio.Semaphore(loop=self.loop)
|
||||
self.assertTrue(repr(sem).endswith('[unlocked,value:1]>'))
|
||||
self.assertTrue(RGX_REPR.match(repr(sem)))
|
||||
|
||||
self.loop.run_until_complete(sem.acquire())
|
||||
self.assertTrue(repr(sem).endswith('[locked]>'))
|
||||
self.assertTrue('waiters' not in repr(sem))
|
||||
self.assertTrue(RGX_REPR.match(repr(sem)))
|
||||
|
||||
sem._waiters.append(mock.Mock())
|
||||
self.assertTrue('waiters:1' in repr(sem))
|
||||
self.assertTrue(RGX_REPR.match(repr(sem)))
|
||||
|
||||
sem._waiters.append(mock.Mock())
|
||||
self.assertTrue('waiters:2' in repr(sem))
|
||||
self.assertTrue(RGX_REPR.match(repr(sem)))
|
||||
|
||||
def test_semaphore(self):
|
||||
sem = asyncio.Semaphore(loop=self.loop)
|
||||
self.assertEqual(1, sem._value)
|
||||
|
||||
@asyncio.coroutine
|
||||
def acquire_lock():
|
||||
return (yield from sem)
|
||||
|
||||
res = self.loop.run_until_complete(acquire_lock())
|
||||
|
||||
self.assertTrue(res)
|
||||
self.assertTrue(sem.locked())
|
||||
self.assertEqual(0, sem._value)
|
||||
|
||||
sem.release()
|
||||
self.assertFalse(sem.locked())
|
||||
self.assertEqual(1, sem._value)
|
||||
|
||||
def test_semaphore_value(self):
|
||||
self.assertRaises(ValueError, asyncio.Semaphore, -1)
|
||||
|
||||
def test_acquire(self):
|
||||
sem = asyncio.Semaphore(3, loop=self.loop)
|
||||
result = []
|
||||
|
||||
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
|
||||
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
|
||||
self.assertFalse(sem.locked())
|
||||
|
||||
@asyncio.coroutine
|
||||
def c1(result):
|
||||
yield from sem.acquire()
|
||||
result.append(1)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c2(result):
|
||||
yield from sem.acquire()
|
||||
result.append(2)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c3(result):
|
||||
yield from sem.acquire()
|
||||
result.append(3)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def c4(result):
|
||||
yield from sem.acquire()
|
||||
result.append(4)
|
||||
return True
|
||||
|
||||
t1 = asyncio.Task(c1(result), loop=self.loop)
|
||||
t2 = asyncio.Task(c2(result), loop=self.loop)
|
||||
t3 = asyncio.Task(c3(result), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual([1], result)
|
||||
self.assertTrue(sem.locked())
|
||||
self.assertEqual(2, len(sem._waiters))
|
||||
self.assertEqual(0, sem._value)
|
||||
|
||||
t4 = asyncio.Task(c4(result), loop=self.loop)
|
||||
|
||||
sem.release()
|
||||
sem.release()
|
||||
self.assertEqual(2, sem._value)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual(0, sem._value)
|
||||
self.assertEqual([1, 2, 3], result)
|
||||
self.assertTrue(sem.locked())
|
||||
self.assertEqual(1, len(sem._waiters))
|
||||
self.assertEqual(0, sem._value)
|
||||
|
||||
self.assertTrue(t1.done())
|
||||
self.assertTrue(t1.result())
|
||||
self.assertTrue(t2.done())
|
||||
self.assertTrue(t2.result())
|
||||
self.assertTrue(t3.done())
|
||||
self.assertTrue(t3.result())
|
||||
self.assertFalse(t4.done())
|
||||
|
||||
# cleanup locked semaphore
|
||||
sem.release()
|
||||
self.loop.run_until_complete(t4)
|
||||
|
||||
def test_acquire_cancel(self):
|
||||
sem = asyncio.Semaphore(loop=self.loop)
|
||||
self.loop.run_until_complete(sem.acquire())
|
||||
|
||||
acquire = asyncio.Task(sem.acquire(), loop=self.loop)
|
||||
self.loop.call_soon(acquire.cancel)
|
||||
self.assertRaises(
|
||||
asyncio.CancelledError,
|
||||
self.loop.run_until_complete, acquire)
|
||||
self.assertFalse(sem._waiters)
|
||||
|
||||
def test_release_not_acquired(self):
|
||||
sem = asyncio.BoundedSemaphore(loop=self.loop)
|
||||
|
||||
self.assertRaises(ValueError, sem.release)
|
||||
|
||||
def test_release_no_waiters(self):
|
||||
sem = asyncio.Semaphore(loop=self.loop)
|
||||
self.loop.run_until_complete(sem.acquire())
|
||||
self.assertTrue(sem.locked())
|
||||
|
||||
sem.release()
|
||||
self.assertFalse(sem.locked())
|
||||
|
||||
def test_context_manager(self):
|
||||
sem = asyncio.Semaphore(2, loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def acquire_lock():
|
||||
return (yield from sem)
|
||||
|
||||
with self.loop.run_until_complete(acquire_lock()):
|
||||
self.assertFalse(sem.locked())
|
||||
self.assertEqual(1, sem._value)
|
||||
|
||||
with self.loop.run_until_complete(acquire_lock()):
|
||||
self.assertTrue(sem.locked())
|
||||
|
||||
self.assertEqual(2, sem._value)
|
||||
|
||||
def test_context_manager_no_yield(self):
|
||||
sem = asyncio.Semaphore(2, loop=self.loop)
|
||||
|
||||
try:
|
||||
with sem:
|
||||
self.fail('RuntimeError is not raised in with expression')
|
||||
except RuntimeError as err:
|
||||
self.assertEqual(
|
||||
str(err),
|
||||
'"yield from" should be used as context manager expression')
|
||||
|
||||
self.assertEqual(2, sem._value)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
574
tests/test_proactor_events.py
Normal file
574
tests/test_proactor_events.py
Normal file
@@ -0,0 +1,574 @@
|
||||
"""Tests for proactor_events.py"""
|
||||
|
||||
import socket
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
import asyncio
|
||||
from asyncio.proactor_events import BaseProactorEventLoop
|
||||
from asyncio.proactor_events import _ProactorSocketTransport
|
||||
from asyncio.proactor_events import _ProactorWritePipeTransport
|
||||
from asyncio.proactor_events import _ProactorDuplexPipeTransport
|
||||
from asyncio import test_utils
|
||||
|
||||
|
||||
class ProactorSocketTransportTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
self.proactor = mock.Mock()
|
||||
self.loop._proactor = self.proactor
|
||||
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
|
||||
self.sock = mock.Mock(socket.socket)
|
||||
|
||||
def test_ctor(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
tr = _ProactorSocketTransport(
|
||||
self.loop, self.sock, self.protocol, fut)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertIsNone(fut.result())
|
||||
self.protocol.connection_made(tr)
|
||||
self.proactor.recv.assert_called_with(self.sock, 4096)
|
||||
|
||||
def test_loop_reading(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._loop_reading()
|
||||
self.loop._proactor.recv.assert_called_with(self.sock, 4096)
|
||||
self.assertFalse(self.protocol.data_received.called)
|
||||
self.assertFalse(self.protocol.eof_received.called)
|
||||
|
||||
def test_loop_reading_data(self):
|
||||
res = asyncio.Future(loop=self.loop)
|
||||
res.set_result(b'data')
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
|
||||
tr._read_fut = res
|
||||
tr._loop_reading(res)
|
||||
self.loop._proactor.recv.assert_called_with(self.sock, 4096)
|
||||
self.protocol.data_received.assert_called_with(b'data')
|
||||
|
||||
def test_loop_reading_no_data(self):
|
||||
res = asyncio.Future(loop=self.loop)
|
||||
res.set_result(b'')
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
|
||||
self.assertRaises(AssertionError, tr._loop_reading, res)
|
||||
|
||||
tr.close = mock.Mock()
|
||||
tr._read_fut = res
|
||||
tr._loop_reading(res)
|
||||
self.assertFalse(self.loop._proactor.recv.called)
|
||||
self.assertTrue(self.protocol.eof_received.called)
|
||||
self.assertTrue(tr.close.called)
|
||||
|
||||
def test_loop_reading_aborted(self):
|
||||
err = self.loop._proactor.recv.side_effect = ConnectionAbortedError()
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._loop_reading()
|
||||
tr._fatal_error.assert_called_with(
|
||||
err,
|
||||
'Fatal read error on pipe transport')
|
||||
|
||||
def test_loop_reading_aborted_closing(self):
|
||||
self.loop._proactor.recv.side_effect = ConnectionAbortedError()
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._closing = True
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._loop_reading()
|
||||
self.assertFalse(tr._fatal_error.called)
|
||||
|
||||
def test_loop_reading_aborted_is_fatal(self):
|
||||
self.loop._proactor.recv.side_effect = ConnectionAbortedError()
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._closing = False
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._loop_reading()
|
||||
self.assertTrue(tr._fatal_error.called)
|
||||
|
||||
def test_loop_reading_conn_reset_lost(self):
|
||||
err = self.loop._proactor.recv.side_effect = ConnectionResetError()
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._closing = False
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._force_close = mock.Mock()
|
||||
tr._loop_reading()
|
||||
self.assertFalse(tr._fatal_error.called)
|
||||
tr._force_close.assert_called_with(err)
|
||||
|
||||
def test_loop_reading_exception(self):
|
||||
err = self.loop._proactor.recv.side_effect = (OSError())
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._loop_reading()
|
||||
tr._fatal_error.assert_called_with(
|
||||
err,
|
||||
'Fatal read error on pipe transport')
|
||||
|
||||
def test_write(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._loop_writing = mock.Mock()
|
||||
tr.write(b'data')
|
||||
self.assertEqual(tr._buffer, None)
|
||||
tr._loop_writing.assert_called_with(data=b'data')
|
||||
|
||||
def test_write_no_data(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr.write(b'')
|
||||
self.assertFalse(tr._buffer)
|
||||
|
||||
def test_write_more(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._write_fut = mock.Mock()
|
||||
tr._loop_writing = mock.Mock()
|
||||
tr.write(b'data')
|
||||
self.assertEqual(tr._buffer, b'data')
|
||||
self.assertFalse(tr._loop_writing.called)
|
||||
|
||||
def test_loop_writing(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._buffer = bytearray(b'data')
|
||||
tr._loop_writing()
|
||||
self.loop._proactor.send.assert_called_with(self.sock, b'data')
|
||||
self.loop._proactor.send.return_value.add_done_callback.\
|
||||
assert_called_with(tr._loop_writing)
|
||||
|
||||
@mock.patch('asyncio.proactor_events.logger')
|
||||
def test_loop_writing_err(self, m_log):
|
||||
err = self.loop._proactor.send.side_effect = OSError()
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._fatal_error = mock.Mock()
|
||||
tr._buffer = [b'da', b'ta']
|
||||
tr._loop_writing()
|
||||
tr._fatal_error.assert_called_with(
|
||||
err,
|
||||
'Fatal write error on pipe transport')
|
||||
tr._conn_lost = 1
|
||||
|
||||
tr.write(b'data')
|
||||
tr.write(b'data')
|
||||
tr.write(b'data')
|
||||
tr.write(b'data')
|
||||
tr.write(b'data')
|
||||
self.assertEqual(tr._buffer, None)
|
||||
m_log.warning.assert_called_with('socket.send() raised exception.')
|
||||
|
||||
def test_loop_writing_stop(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_result(b'data')
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._write_fut = fut
|
||||
tr._loop_writing(fut)
|
||||
self.assertIsNone(tr._write_fut)
|
||||
|
||||
def test_loop_writing_closing(self):
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_result(1)
|
||||
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._write_fut = fut
|
||||
tr.close()
|
||||
tr._loop_writing(fut)
|
||||
self.assertIsNone(tr._write_fut)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.protocol.connection_lost.assert_called_with(None)
|
||||
|
||||
def test_abort(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._force_close = mock.Mock()
|
||||
tr.abort()
|
||||
tr._force_close.assert_called_with(None)
|
||||
|
||||
def test_close(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr.close()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.protocol.connection_lost.assert_called_with(None)
|
||||
self.assertTrue(tr._closing)
|
||||
self.assertEqual(tr._conn_lost, 1)
|
||||
|
||||
self.protocol.connection_lost.reset_mock()
|
||||
tr.close()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertFalse(self.protocol.connection_lost.called)
|
||||
|
||||
def test_close_write_fut(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._write_fut = mock.Mock()
|
||||
tr.close()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertFalse(self.protocol.connection_lost.called)
|
||||
|
||||
def test_close_buffer(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._buffer = [b'data']
|
||||
tr.close()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertFalse(self.protocol.connection_lost.called)
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def test_fatal_error(self, m_logging):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._force_close = mock.Mock()
|
||||
tr._fatal_error(None)
|
||||
self.assertTrue(tr._force_close.called)
|
||||
self.assertTrue(m_logging.error.called)
|
||||
|
||||
def test_force_close(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._buffer = [b'data']
|
||||
read_fut = tr._read_fut = mock.Mock()
|
||||
write_fut = tr._write_fut = mock.Mock()
|
||||
tr._force_close(None)
|
||||
|
||||
read_fut.cancel.assert_called_with()
|
||||
write_fut.cancel.assert_called_with()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.protocol.connection_lost.assert_called_with(None)
|
||||
self.assertEqual(None, tr._buffer)
|
||||
self.assertEqual(tr._conn_lost, 1)
|
||||
|
||||
def test_force_close_idempotent(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._closing = True
|
||||
tr._force_close(None)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertFalse(self.protocol.connection_lost.called)
|
||||
|
||||
def test_fatal_error_2(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._buffer = [b'data']
|
||||
tr._force_close(None)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.protocol.connection_lost.assert_called_with(None)
|
||||
self.assertEqual(None, tr._buffer)
|
||||
|
||||
def test_call_connection_lost(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
tr._call_connection_lost(None)
|
||||
self.assertTrue(self.protocol.connection_lost.called)
|
||||
self.assertTrue(self.sock.close.called)
|
||||
|
||||
def test_write_eof(self):
|
||||
tr = _ProactorSocketTransport(
|
||||
self.loop, self.sock, self.protocol)
|
||||
self.assertTrue(tr.can_write_eof())
|
||||
tr.write_eof()
|
||||
self.sock.shutdown.assert_called_with(socket.SHUT_WR)
|
||||
tr.write_eof()
|
||||
self.assertEqual(self.sock.shutdown.call_count, 1)
|
||||
tr.close()
|
||||
|
||||
def test_write_eof_buffer(self):
|
||||
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
tr._loop._proactor.send.return_value = f
|
||||
tr.write(b'data')
|
||||
tr.write_eof()
|
||||
self.assertTrue(tr._eof_written)
|
||||
self.assertFalse(self.sock.shutdown.called)
|
||||
tr._loop._proactor.send.assert_called_with(self.sock, b'data')
|
||||
f.set_result(4)
|
||||
self.loop._run_once()
|
||||
self.sock.shutdown.assert_called_with(socket.SHUT_WR)
|
||||
tr.close()
|
||||
|
||||
def test_write_eof_write_pipe(self):
|
||||
tr = _ProactorWritePipeTransport(
|
||||
self.loop, self.sock, self.protocol)
|
||||
self.assertTrue(tr.can_write_eof())
|
||||
tr.write_eof()
|
||||
self.assertTrue(tr._closing)
|
||||
self.loop._run_once()
|
||||
self.assertTrue(self.sock.close.called)
|
||||
tr.close()
|
||||
|
||||
def test_write_eof_buffer_write_pipe(self):
|
||||
tr = _ProactorWritePipeTransport(self.loop, self.sock, self.protocol)
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
tr._loop._proactor.send.return_value = f
|
||||
tr.write(b'data')
|
||||
tr.write_eof()
|
||||
self.assertTrue(tr._closing)
|
||||
self.assertFalse(self.sock.shutdown.called)
|
||||
tr._loop._proactor.send.assert_called_with(self.sock, b'data')
|
||||
f.set_result(4)
|
||||
self.loop._run_once()
|
||||
self.loop._run_once()
|
||||
self.assertTrue(self.sock.close.called)
|
||||
tr.close()
|
||||
|
||||
def test_write_eof_duplex_pipe(self):
|
||||
tr = _ProactorDuplexPipeTransport(
|
||||
self.loop, self.sock, self.protocol)
|
||||
self.assertFalse(tr.can_write_eof())
|
||||
with self.assertRaises(NotImplementedError):
|
||||
tr.write_eof()
|
||||
tr.close()
|
||||
|
||||
def test_pause_resume_reading(self):
|
||||
tr = _ProactorSocketTransport(
|
||||
self.loop, self.sock, self.protocol)
|
||||
futures = []
|
||||
for msg in [b'data1', b'data2', b'data3', b'data4', b'']:
|
||||
f = asyncio.Future(loop=self.loop)
|
||||
f.set_result(msg)
|
||||
futures.append(f)
|
||||
self.loop._proactor.recv.side_effect = futures
|
||||
self.loop._run_once()
|
||||
self.assertFalse(tr._paused)
|
||||
self.loop._run_once()
|
||||
self.protocol.data_received.assert_called_with(b'data1')
|
||||
self.loop._run_once()
|
||||
self.protocol.data_received.assert_called_with(b'data2')
|
||||
tr.pause_reading()
|
||||
self.assertTrue(tr._paused)
|
||||
for i in range(10):
|
||||
self.loop._run_once()
|
||||
self.protocol.data_received.assert_called_with(b'data2')
|
||||
tr.resume_reading()
|
||||
self.assertFalse(tr._paused)
|
||||
self.loop._run_once()
|
||||
self.protocol.data_received.assert_called_with(b'data3')
|
||||
self.loop._run_once()
|
||||
self.protocol.data_received.assert_called_with(b'data4')
|
||||
tr.close()
|
||||
|
||||
|
||||
def pause_writing_transport(self, high):
|
||||
tr = _ProactorSocketTransport(
|
||||
self.loop, self.sock, self.protocol)
|
||||
self.addCleanup(tr.close)
|
||||
|
||||
tr.set_write_buffer_limits(high=high)
|
||||
|
||||
self.assertEqual(tr.get_write_buffer_size(), 0)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
self.assertFalse(self.protocol.resume_writing.called)
|
||||
return tr
|
||||
|
||||
def test_pause_resume_writing(self):
|
||||
tr = self.pause_writing_transport(high=4)
|
||||
|
||||
# write a large chunk, must pause writing
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
self.loop._proactor.send.return_value = fut
|
||||
tr.write(b'large data')
|
||||
self.loop._run_once()
|
||||
self.assertTrue(self.protocol.pause_writing.called)
|
||||
|
||||
# flush the buffer
|
||||
fut.set_result(None)
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 0)
|
||||
self.assertTrue(self.protocol.resume_writing.called)
|
||||
|
||||
def test_pause_writing_2write(self):
|
||||
tr = self.pause_writing_transport(high=4)
|
||||
|
||||
# first short write, the buffer is not full (3 <= 4)
|
||||
fut1 = asyncio.Future(loop=self.loop)
|
||||
self.loop._proactor.send.return_value = fut1
|
||||
tr.write(b'123')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 3)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
|
||||
# fill the buffer, must pause writing (6 > 4)
|
||||
tr.write(b'abc')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 6)
|
||||
self.assertTrue(self.protocol.pause_writing.called)
|
||||
|
||||
def test_pause_writing_3write(self):
|
||||
tr = self.pause_writing_transport(high=4)
|
||||
|
||||
# first short write, the buffer is not full (1 <= 4)
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
self.loop._proactor.send.return_value = fut
|
||||
tr.write(b'1')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 1)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
|
||||
# second short write, the buffer is not full (3 <= 4)
|
||||
tr.write(b'23')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 3)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
|
||||
# fill the buffer, must pause writing (6 > 4)
|
||||
tr.write(b'abc')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 6)
|
||||
self.assertTrue(self.protocol.pause_writing.called)
|
||||
|
||||
def test_dont_pause_writing(self):
|
||||
tr = self.pause_writing_transport(high=4)
|
||||
|
||||
# write a large chunk which completes immedialty,
|
||||
# it should not pause writing
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.set_result(None)
|
||||
self.loop._proactor.send.return_value = fut
|
||||
tr.write(b'very large data')
|
||||
self.loop._run_once()
|
||||
self.assertEqual(tr.get_write_buffer_size(), 0)
|
||||
self.assertFalse(self.protocol.pause_writing.called)
|
||||
|
||||
|
||||
class BaseProactorEventLoopTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.sock = mock.Mock(socket.socket)
|
||||
self.proactor = mock.Mock()
|
||||
|
||||
self.ssock, self.csock = mock.Mock(), mock.Mock()
|
||||
|
||||
class EventLoop(BaseProactorEventLoop):
|
||||
def _socketpair(s):
|
||||
return (self.ssock, self.csock)
|
||||
|
||||
self.loop = EventLoop(self.proactor)
|
||||
self.set_event_loop(self.loop, cleanup=False)
|
||||
|
||||
@mock.patch.object(BaseProactorEventLoop, '_call_soon')
|
||||
@mock.patch.object(BaseProactorEventLoop, '_socketpair')
|
||||
def test_ctor(self, socketpair, _call_soon):
|
||||
ssock, csock = socketpair.return_value = (
|
||||
mock.Mock(), mock.Mock())
|
||||
loop = BaseProactorEventLoop(self.proactor)
|
||||
self.assertIs(loop._ssock, ssock)
|
||||
self.assertIs(loop._csock, csock)
|
||||
self.assertEqual(loop._internal_fds, 1)
|
||||
_call_soon.assert_called_with(loop._loop_self_reading, (),
|
||||
check_loop=False)
|
||||
|
||||
def test_close_self_pipe(self):
|
||||
self.loop._close_self_pipe()
|
||||
self.assertEqual(self.loop._internal_fds, 0)
|
||||
self.assertTrue(self.ssock.close.called)
|
||||
self.assertTrue(self.csock.close.called)
|
||||
self.assertIsNone(self.loop._ssock)
|
||||
self.assertIsNone(self.loop._csock)
|
||||
|
||||
def test_close(self):
|
||||
self.loop._close_self_pipe = mock.Mock()
|
||||
self.loop.close()
|
||||
self.assertTrue(self.loop._close_self_pipe.called)
|
||||
self.assertTrue(self.proactor.close.called)
|
||||
self.assertIsNone(self.loop._proactor)
|
||||
|
||||
self.loop._close_self_pipe.reset_mock()
|
||||
self.loop.close()
|
||||
self.assertFalse(self.loop._close_self_pipe.called)
|
||||
|
||||
def test_sock_recv(self):
|
||||
self.loop.sock_recv(self.sock, 1024)
|
||||
self.proactor.recv.assert_called_with(self.sock, 1024)
|
||||
|
||||
def test_sock_sendall(self):
|
||||
self.loop.sock_sendall(self.sock, b'data')
|
||||
self.proactor.send.assert_called_with(self.sock, b'data')
|
||||
|
||||
def test_sock_connect(self):
|
||||
self.loop.sock_connect(self.sock, 123)
|
||||
self.proactor.connect.assert_called_with(self.sock, 123)
|
||||
|
||||
def test_sock_accept(self):
|
||||
self.loop.sock_accept(self.sock)
|
||||
self.proactor.accept.assert_called_with(self.sock)
|
||||
|
||||
def test_socketpair(self):
|
||||
self.assertRaises(
|
||||
NotImplementedError, BaseProactorEventLoop, self.proactor)
|
||||
|
||||
def test_make_socket_transport(self):
|
||||
tr = self.loop._make_socket_transport(self.sock, asyncio.Protocol())
|
||||
self.assertIsInstance(tr, _ProactorSocketTransport)
|
||||
|
||||
def test_loop_self_reading(self):
|
||||
self.loop._loop_self_reading()
|
||||
self.proactor.recv.assert_called_with(self.ssock, 4096)
|
||||
self.proactor.recv.return_value.add_done_callback.assert_called_with(
|
||||
self.loop._loop_self_reading)
|
||||
|
||||
def test_loop_self_reading_fut(self):
|
||||
fut = mock.Mock()
|
||||
self.loop._loop_self_reading(fut)
|
||||
self.assertTrue(fut.result.called)
|
||||
self.proactor.recv.assert_called_with(self.ssock, 4096)
|
||||
self.proactor.recv.return_value.add_done_callback.assert_called_with(
|
||||
self.loop._loop_self_reading)
|
||||
|
||||
def test_loop_self_reading_exception(self):
|
||||
self.loop.close = mock.Mock()
|
||||
self.proactor.recv.side_effect = OSError()
|
||||
self.assertRaises(OSError, self.loop._loop_self_reading)
|
||||
self.assertTrue(self.loop.close.called)
|
||||
|
||||
def test_write_to_self(self):
|
||||
self.loop._write_to_self()
|
||||
self.csock.send.assert_called_with(b'\0')
|
||||
|
||||
def test_process_events(self):
|
||||
self.loop._process_events([])
|
||||
|
||||
@mock.patch('asyncio.base_events.logger')
|
||||
def test_create_server(self, m_log):
|
||||
pf = mock.Mock()
|
||||
call_soon = self.loop.call_soon = mock.Mock()
|
||||
|
||||
self.loop._start_serving(pf, self.sock)
|
||||
self.assertTrue(call_soon.called)
|
||||
|
||||
# callback
|
||||
loop = call_soon.call_args[0][0]
|
||||
loop()
|
||||
self.proactor.accept.assert_called_with(self.sock)
|
||||
|
||||
# conn
|
||||
fut = mock.Mock()
|
||||
fut.result.return_value = (mock.Mock(), mock.Mock())
|
||||
|
||||
make_tr = self.loop._make_socket_transport = mock.Mock()
|
||||
loop(fut)
|
||||
self.assertTrue(fut.result.called)
|
||||
self.assertTrue(make_tr.called)
|
||||
|
||||
# exception
|
||||
fut.result.side_effect = OSError()
|
||||
loop(fut)
|
||||
self.assertTrue(self.sock.close.called)
|
||||
self.assertTrue(m_log.error.called)
|
||||
|
||||
def test_create_server_cancel(self):
|
||||
pf = mock.Mock()
|
||||
call_soon = self.loop.call_soon = mock.Mock()
|
||||
|
||||
self.loop._start_serving(pf, self.sock)
|
||||
loop = call_soon.call_args[0][0]
|
||||
|
||||
# cancelled
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut.cancel()
|
||||
loop(fut)
|
||||
self.assertTrue(self.sock.close.called)
|
||||
|
||||
def test_stop_serving(self):
|
||||
sock = mock.Mock()
|
||||
self.loop._stop_serving(sock)
|
||||
self.assertTrue(sock.close.called)
|
||||
self.proactor._stop_serving.assert_called_with(sock)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
476
tests/test_queues.py
Normal file
476
tests/test_queues.py
Normal file
@@ -0,0 +1,476 @@
|
||||
"""Tests for queues.py"""
|
||||
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
import asyncio
|
||||
from asyncio import test_utils
|
||||
|
||||
|
||||
class _QueueTestBase(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = self.new_test_loop()
|
||||
|
||||
|
||||
class QueueBasicTests(_QueueTestBase):
|
||||
|
||||
def _test_repr_or_str(self, fn, expect_id):
|
||||
"""Test Queue's repr or str.
|
||||
|
||||
fn is repr or str. expect_id is True if we expect the Queue's id to
|
||||
appear in fn(Queue()).
|
||||
"""
|
||||
def gen():
|
||||
when = yield
|
||||
self.assertAlmostEqual(0.1, when)
|
||||
when = yield 0.1
|
||||
self.assertAlmostEqual(0.2, when)
|
||||
yield 0.1
|
||||
|
||||
loop = self.new_test_loop(gen)
|
||||
|
||||
q = asyncio.Queue(loop=loop)
|
||||
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
|
||||
id_is_present = hex(id(q)) in fn(q)
|
||||
self.assertEqual(expect_id, id_is_present)
|
||||
|
||||
@asyncio.coroutine
|
||||
def add_getter():
|
||||
q = asyncio.Queue(loop=loop)
|
||||
# Start a task that waits to get.
|
||||
asyncio.Task(q.get(), loop=loop)
|
||||
# Let it start waiting.
|
||||
yield from asyncio.sleep(0.1, loop=loop)
|
||||
self.assertTrue('_getters[1]' in fn(q))
|
||||
# resume q.get coroutine to finish generator
|
||||
q.put_nowait(0)
|
||||
|
||||
loop.run_until_complete(add_getter())
|
||||
|
||||
@asyncio.coroutine
|
||||
def add_putter():
|
||||
q = asyncio.Queue(maxsize=1, loop=loop)
|
||||
q.put_nowait(1)
|
||||
# Start a task that waits to put.
|
||||
asyncio.Task(q.put(2), loop=loop)
|
||||
# Let it start waiting.
|
||||
yield from asyncio.sleep(0.1, loop=loop)
|
||||
self.assertTrue('_putters[1]' in fn(q))
|
||||
# resume q.put coroutine to finish generator
|
||||
q.get_nowait()
|
||||
|
||||
loop.run_until_complete(add_putter())
|
||||
|
||||
q = asyncio.Queue(loop=loop)
|
||||
q.put_nowait(1)
|
||||
self.assertTrue('_queue=[1]' in fn(q))
|
||||
|
||||
def test_ctor_loop(self):
|
||||
loop = mock.Mock()
|
||||
q = asyncio.Queue(loop=loop)
|
||||
self.assertIs(q._loop, loop)
|
||||
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
self.assertIs(q._loop, self.loop)
|
||||
|
||||
def test_ctor_noloop(self):
|
||||
asyncio.set_event_loop(self.loop)
|
||||
q = asyncio.Queue()
|
||||
self.assertIs(q._loop, self.loop)
|
||||
|
||||
def test_repr(self):
|
||||
self._test_repr_or_str(repr, True)
|
||||
|
||||
def test_str(self):
|
||||
self._test_repr_or_str(str, False)
|
||||
|
||||
def test_empty(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
self.assertTrue(q.empty())
|
||||
q.put_nowait(1)
|
||||
self.assertFalse(q.empty())
|
||||
self.assertEqual(1, q.get_nowait())
|
||||
self.assertTrue(q.empty())
|
||||
|
||||
def test_full(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
self.assertFalse(q.full())
|
||||
|
||||
q = asyncio.Queue(maxsize=1, loop=self.loop)
|
||||
q.put_nowait(1)
|
||||
self.assertTrue(q.full())
|
||||
|
||||
def test_order(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
for i in [1, 3, 2]:
|
||||
q.put_nowait(i)
|
||||
|
||||
items = [q.get_nowait() for _ in range(3)]
|
||||
self.assertEqual([1, 3, 2], items)
|
||||
|
||||
def test_maxsize(self):
|
||||
|
||||
def gen():
|
||||
when = yield
|
||||
self.assertAlmostEqual(0.01, when)
|
||||
when = yield 0.01
|
||||
self.assertAlmostEqual(0.02, when)
|
||||
yield 0.01
|
||||
|
||||
loop = self.new_test_loop(gen)
|
||||
|
||||
q = asyncio.Queue(maxsize=2, loop=loop)
|
||||
self.assertEqual(2, q.maxsize)
|
||||
have_been_put = []
|
||||
|
||||
@asyncio.coroutine
|
||||
def putter():
|
||||
for i in range(3):
|
||||
yield from q.put(i)
|
||||
have_been_put.append(i)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def test():
|
||||
t = asyncio.Task(putter(), loop=loop)
|
||||
yield from asyncio.sleep(0.01, loop=loop)
|
||||
|
||||
# The putter is blocked after putting two items.
|
||||
self.assertEqual([0, 1], have_been_put)
|
||||
self.assertEqual(0, q.get_nowait())
|
||||
|
||||
# Let the putter resume and put last item.
|
||||
yield from asyncio.sleep(0.01, loop=loop)
|
||||
self.assertEqual([0, 1, 2], have_been_put)
|
||||
self.assertEqual(1, q.get_nowait())
|
||||
self.assertEqual(2, q.get_nowait())
|
||||
|
||||
self.assertTrue(t.done())
|
||||
self.assertTrue(t.result())
|
||||
|
||||
loop.run_until_complete(test())
|
||||
self.assertAlmostEqual(0.02, loop.time())
|
||||
|
||||
|
||||
class QueueGetTests(_QueueTestBase):
|
||||
|
||||
def test_blocking_get(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
q.put_nowait(1)
|
||||
|
||||
@asyncio.coroutine
|
||||
def queue_get():
|
||||
return (yield from q.get())
|
||||
|
||||
res = self.loop.run_until_complete(queue_get())
|
||||
self.assertEqual(1, res)
|
||||
|
||||
def test_get_with_putters(self):
|
||||
q = asyncio.Queue(1, loop=self.loop)
|
||||
q.put_nowait(1)
|
||||
|
||||
waiter = asyncio.Future(loop=self.loop)
|
||||
q._putters.append((2, waiter))
|
||||
|
||||
res = self.loop.run_until_complete(q.get())
|
||||
self.assertEqual(1, res)
|
||||
self.assertTrue(waiter.done())
|
||||
self.assertIsNone(waiter.result())
|
||||
|
||||
def test_blocking_get_wait(self):
|
||||
|
||||
def gen():
|
||||
when = yield
|
||||
self.assertAlmostEqual(0.01, when)
|
||||
yield 0.01
|
||||
|
||||
loop = self.new_test_loop(gen)
|
||||
|
||||
q = asyncio.Queue(loop=loop)
|
||||
started = asyncio.Event(loop=loop)
|
||||
finished = False
|
||||
|
||||
@asyncio.coroutine
|
||||
def queue_get():
|
||||
nonlocal finished
|
||||
started.set()
|
||||
res = yield from q.get()
|
||||
finished = True
|
||||
return res
|
||||
|
||||
@asyncio.coroutine
|
||||
def queue_put():
|
||||
loop.call_later(0.01, q.put_nowait, 1)
|
||||
queue_get_task = asyncio.Task(queue_get(), loop=loop)
|
||||
yield from started.wait()
|
||||
self.assertFalse(finished)
|
||||
res = yield from queue_get_task
|
||||
self.assertTrue(finished)
|
||||
return res
|
||||
|
||||
res = loop.run_until_complete(queue_put())
|
||||
self.assertEqual(1, res)
|
||||
self.assertAlmostEqual(0.01, loop.time())
|
||||
|
||||
def test_nonblocking_get(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
q.put_nowait(1)
|
||||
self.assertEqual(1, q.get_nowait())
|
||||
|
||||
def test_nonblocking_get_exception(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
self.assertRaises(asyncio.QueueEmpty, q.get_nowait)
|
||||
|
||||
def test_get_cancelled(self):
|
||||
|
||||
def gen():
|
||||
when = yield
|
||||
self.assertAlmostEqual(0.01, when)
|
||||
when = yield 0.01
|
||||
self.assertAlmostEqual(0.061, when)
|
||||
yield 0.05
|
||||
|
||||
loop = self.new_test_loop(gen)
|
||||
|
||||
q = asyncio.Queue(loop=loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def queue_get():
|
||||
return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
|
||||
|
||||
@asyncio.coroutine
|
||||
def test():
|
||||
get_task = asyncio.Task(queue_get(), loop=loop)
|
||||
yield from asyncio.sleep(0.01, loop=loop) # let the task start
|
||||
q.put_nowait(1)
|
||||
return (yield from get_task)
|
||||
|
||||
self.assertEqual(1, loop.run_until_complete(test()))
|
||||
self.assertAlmostEqual(0.06, loop.time())
|
||||
|
||||
def test_get_cancelled_race(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
|
||||
t1 = asyncio.Task(q.get(), loop=self.loop)
|
||||
t2 = asyncio.Task(q.get(), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
t1.cancel()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertTrue(t1.done())
|
||||
q.put_nowait('a')
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual(t2.result(), 'a')
|
||||
|
||||
def test_get_with_waiting_putters(self):
|
||||
q = asyncio.Queue(loop=self.loop, maxsize=1)
|
||||
asyncio.Task(q.put('a'), loop=self.loop)
|
||||
asyncio.Task(q.put('b'), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertEqual(self.loop.run_until_complete(q.get()), 'a')
|
||||
self.assertEqual(self.loop.run_until_complete(q.get()), 'b')
|
||||
|
||||
|
||||
class QueuePutTests(_QueueTestBase):
|
||||
|
||||
def test_blocking_put(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def queue_put():
|
||||
# No maxsize, won't block.
|
||||
yield from q.put(1)
|
||||
|
||||
self.loop.run_until_complete(queue_put())
|
||||
|
||||
def test_blocking_put_wait(self):
|
||||
|
||||
def gen():
|
||||
when = yield
|
||||
self.assertAlmostEqual(0.01, when)
|
||||
yield 0.01
|
||||
|
||||
loop = self.new_test_loop(gen)
|
||||
|
||||
q = asyncio.Queue(maxsize=1, loop=loop)
|
||||
started = asyncio.Event(loop=loop)
|
||||
finished = False
|
||||
|
||||
@asyncio.coroutine
|
||||
def queue_put():
|
||||
nonlocal finished
|
||||
started.set()
|
||||
yield from q.put(1)
|
||||
yield from q.put(2)
|
||||
finished = True
|
||||
|
||||
@asyncio.coroutine
|
||||
def queue_get():
|
||||
loop.call_later(0.01, q.get_nowait)
|
||||
queue_put_task = asyncio.Task(queue_put(), loop=loop)
|
||||
yield from started.wait()
|
||||
self.assertFalse(finished)
|
||||
yield from queue_put_task
|
||||
self.assertTrue(finished)
|
||||
|
||||
loop.run_until_complete(queue_get())
|
||||
self.assertAlmostEqual(0.01, loop.time())
|
||||
|
||||
def test_nonblocking_put(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
q.put_nowait(1)
|
||||
self.assertEqual(1, q.get_nowait())
|
||||
|
||||
def test_nonblocking_put_exception(self):
|
||||
q = asyncio.Queue(maxsize=1, loop=self.loop)
|
||||
q.put_nowait(1)
|
||||
self.assertRaises(asyncio.QueueFull, q.put_nowait, 2)
|
||||
|
||||
def test_float_maxsize(self):
|
||||
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
|
||||
q.put_nowait(1)
|
||||
q.put_nowait(2)
|
||||
self.assertTrue(q.full())
|
||||
self.assertRaises(asyncio.QueueFull, q.put_nowait, 3)
|
||||
|
||||
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
|
||||
@asyncio.coroutine
|
||||
def queue_put():
|
||||
yield from q.put(1)
|
||||
yield from q.put(2)
|
||||
self.assertTrue(q.full())
|
||||
self.loop.run_until_complete(queue_put())
|
||||
|
||||
def test_put_cancelled(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def queue_put():
|
||||
yield from q.put(1)
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def test():
|
||||
return (yield from q.get())
|
||||
|
||||
t = asyncio.Task(queue_put(), loop=self.loop)
|
||||
self.assertEqual(1, self.loop.run_until_complete(test()))
|
||||
self.assertTrue(t.done())
|
||||
self.assertTrue(t.result())
|
||||
|
||||
def test_put_cancelled_race(self):
|
||||
q = asyncio.Queue(loop=self.loop, maxsize=1)
|
||||
|
||||
put_a = asyncio.Task(q.put('a'), loop=self.loop)
|
||||
put_b = asyncio.Task(q.put('b'), loop=self.loop)
|
||||
put_c = asyncio.Task(q.put('X'), loop=self.loop)
|
||||
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertTrue(put_a.done())
|
||||
self.assertFalse(put_b.done())
|
||||
|
||||
put_c.cancel()
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertTrue(put_c.done())
|
||||
self.assertEqual(q.get_nowait(), 'a')
|
||||
self.assertEqual(q.get_nowait(), 'b')
|
||||
|
||||
self.loop.run_until_complete(put_b)
|
||||
|
||||
def test_put_with_waiting_getters(self):
|
||||
q = asyncio.Queue(loop=self.loop)
|
||||
t = asyncio.Task(q.get(), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.loop.run_until_complete(q.put('a'))
|
||||
self.assertEqual(self.loop.run_until_complete(t), 'a')
|
||||
|
||||
|
||||
class LifoQueueTests(_QueueTestBase):
|
||||
|
||||
def test_order(self):
|
||||
q = asyncio.LifoQueue(loop=self.loop)
|
||||
for i in [1, 3, 2]:
|
||||
q.put_nowait(i)
|
||||
|
||||
items = [q.get_nowait() for _ in range(3)]
|
||||
self.assertEqual([2, 3, 1], items)
|
||||
|
||||
|
||||
class PriorityQueueTests(_QueueTestBase):
|
||||
|
||||
def test_order(self):
|
||||
q = asyncio.PriorityQueue(loop=self.loop)
|
||||
for i in [1, 3, 2]:
|
||||
q.put_nowait(i)
|
||||
|
||||
items = [q.get_nowait() for _ in range(3)]
|
||||
self.assertEqual([1, 2, 3], items)
|
||||
|
||||
|
||||
class JoinableQueueTests(_QueueTestBase):
|
||||
|
||||
def test_task_done_underflow(self):
|
||||
q = asyncio.JoinableQueue(loop=self.loop)
|
||||
self.assertRaises(ValueError, q.task_done)
|
||||
|
||||
def test_task_done(self):
|
||||
q = asyncio.JoinableQueue(loop=self.loop)
|
||||
for i in range(100):
|
||||
q.put_nowait(i)
|
||||
|
||||
accumulator = 0
|
||||
|
||||
# Two workers get items from the queue and call task_done after each.
|
||||
# Join the queue and assert all items have been processed.
|
||||
running = True
|
||||
|
||||
@asyncio.coroutine
|
||||
def worker():
|
||||
nonlocal accumulator
|
||||
|
||||
while running:
|
||||
item = yield from q.get()
|
||||
accumulator += item
|
||||
q.task_done()
|
||||
|
||||
@asyncio.coroutine
|
||||
def test():
|
||||
tasks = [asyncio.Task(worker(), loop=self.loop)
|
||||
for index in range(2)]
|
||||
|
||||
yield from q.join()
|
||||
return tasks
|
||||
|
||||
tasks = self.loop.run_until_complete(test())
|
||||
self.assertEqual(sum(range(100)), accumulator)
|
||||
|
||||
# close running generators
|
||||
running = False
|
||||
for i in range(len(tasks)):
|
||||
q.put_nowait(0)
|
||||
self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
|
||||
|
||||
def test_join_empty_queue(self):
|
||||
q = asyncio.JoinableQueue(loop=self.loop)
|
||||
|
||||
# Test that a queue join()s successfully, and before anything else
|
||||
# (done twice for insurance).
|
||||
|
||||
@asyncio.coroutine
|
||||
def join():
|
||||
yield from q.join()
|
||||
yield from q.join()
|
||||
|
||||
self.loop.run_until_complete(join())
|
||||
|
||||
def test_format(self):
|
||||
q = asyncio.JoinableQueue(loop=self.loop)
|
||||
self.assertEqual(q._format(), 'maxsize=0')
|
||||
|
||||
q._unfinished_tasks = 2
|
||||
self.assertEqual(q._format(), 'maxsize=0 tasks=2')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
1737
tests/test_selector_events.py
Normal file
1737
tests/test_selector_events.py
Normal file
File diff suppressed because it is too large
Load Diff
214
tests/test_selectors.py
Normal file
214
tests/test_selectors.py
Normal file
@@ -0,0 +1,214 @@
|
||||
"""Tests for selectors.py."""
|
||||
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from asyncio import selectors
|
||||
|
||||
|
||||
class FakeSelector(selectors._BaseSelectorImpl):
|
||||
"""Trivial non-abstract subclass of BaseSelector."""
|
||||
|
||||
def select(self, timeout=None):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _SelectorMappingTests(unittest.TestCase):
|
||||
|
||||
def test_len(self):
|
||||
s = FakeSelector()
|
||||
map = selectors._SelectorMapping(s)
|
||||
self.assertTrue(map.__len__() == 0)
|
||||
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 10
|
||||
s.register(f, selectors.EVENT_READ, None)
|
||||
self.assertTrue(len(map) == 1)
|
||||
|
||||
def test_getitem(self):
|
||||
s = FakeSelector()
|
||||
map = selectors._SelectorMapping(s)
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 10
|
||||
s.register(f, selectors.EVENT_READ, None)
|
||||
attended = selectors.SelectorKey(f, 10, selectors.EVENT_READ, None)
|
||||
self.assertEqual(attended, map.__getitem__(f))
|
||||
|
||||
def test_getitem_key_error(self):
|
||||
s = FakeSelector()
|
||||
map = selectors._SelectorMapping(s)
|
||||
self.assertTrue(len(map) == 0)
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 10
|
||||
s.register(f, selectors.EVENT_READ, None)
|
||||
self.assertRaises(KeyError, map.__getitem__, 5)
|
||||
|
||||
def test_iter(self):
|
||||
s = FakeSelector()
|
||||
map = selectors._SelectorMapping(s)
|
||||
self.assertTrue(len(map) == 0)
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 5
|
||||
s.register(f, selectors.EVENT_READ, None)
|
||||
counter = 0
|
||||
for fileno in map.__iter__():
|
||||
self.assertEqual(5, fileno)
|
||||
counter += 1
|
||||
|
||||
for idx in map:
|
||||
self.assertEqual(f, map[idx].fileobj)
|
||||
self.assertEqual(1, counter)
|
||||
|
||||
|
||||
class BaseSelectorTests(unittest.TestCase):
|
||||
def test_fileobj_to_fd(self):
|
||||
self.assertEqual(10, selectors._fileobj_to_fd(10))
|
||||
|
||||
f = mock.Mock()
|
||||
f.fileno.return_value = 10
|
||||
self.assertEqual(10, selectors._fileobj_to_fd(f))
|
||||
|
||||
f.fileno.side_effect = AttributeError
|
||||
self.assertRaises(ValueError, selectors._fileobj_to_fd, f)
|
||||
|
||||
f.fileno.return_value = -1
|
||||
self.assertRaises(ValueError, selectors._fileobj_to_fd, f)
|
||||
|
||||
def test_selector_key_repr(self):
|
||||
key = selectors.SelectorKey(10, 10, selectors.EVENT_READ, None)
|
||||
self.assertEqual(
|
||||
"SelectorKey(fileobj=10, fd=10, events=1, data=None)", repr(key))
|
||||
|
||||
def test_register(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ)
|
||||
self.assertIsInstance(key, selectors.SelectorKey)
|
||||
self.assertEqual(key.fd, 10)
|
||||
self.assertIs(key, s._fd_to_key[10])
|
||||
|
||||
def test_register_unknown_event(self):
|
||||
s = FakeSelector()
|
||||
self.assertRaises(ValueError, s.register, mock.Mock(), 999999)
|
||||
|
||||
def test_register_already_registered(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
s = FakeSelector()
|
||||
s.register(fobj, selectors.EVENT_READ)
|
||||
self.assertRaises(KeyError, s.register, fobj, selectors.EVENT_READ)
|
||||
|
||||
def test_unregister(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
s = FakeSelector()
|
||||
s.register(fobj, selectors.EVENT_READ)
|
||||
s.unregister(fobj)
|
||||
self.assertFalse(s._fd_to_key)
|
||||
|
||||
def test_unregister_unknown(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
s = FakeSelector()
|
||||
self.assertRaises(KeyError, s.unregister, fobj)
|
||||
|
||||
def test_modify_unknown(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
s = FakeSelector()
|
||||
self.assertRaises(KeyError, s.modify, fobj, 1)
|
||||
|
||||
def test_modify(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ)
|
||||
key2 = s.modify(fobj, selectors.EVENT_WRITE)
|
||||
self.assertNotEqual(key.events, key2.events)
|
||||
self.assertEqual(
|
||||
selectors.SelectorKey(fobj, 10, selectors.EVENT_WRITE, None),
|
||||
s.get_key(fobj))
|
||||
|
||||
def test_modify_data(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
d1 = object()
|
||||
d2 = object()
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ, d1)
|
||||
key2 = s.modify(fobj, selectors.EVENT_READ, d2)
|
||||
self.assertEqual(key.events, key2.events)
|
||||
self.assertNotEqual(key.data, key2.data)
|
||||
self.assertEqual(
|
||||
selectors.SelectorKey(fobj, 10, selectors.EVENT_READ, d2),
|
||||
s.get_key(fobj))
|
||||
|
||||
def test_modify_data_use_a_shortcut(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
d1 = object()
|
||||
d2 = object()
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ, d1)
|
||||
|
||||
s.unregister = mock.Mock()
|
||||
s.register = mock.Mock()
|
||||
key2 = s.modify(fobj, selectors.EVENT_READ, d2)
|
||||
self.assertFalse(s.unregister.called)
|
||||
self.assertFalse(s.register.called)
|
||||
|
||||
def test_modify_same(self):
|
||||
fobj = mock.Mock()
|
||||
fobj.fileno.return_value = 10
|
||||
|
||||
data = object()
|
||||
|
||||
s = FakeSelector()
|
||||
key = s.register(fobj, selectors.EVENT_READ, data)
|
||||
key2 = s.modify(fobj, selectors.EVENT_READ, data)
|
||||
self.assertIs(key, key2)
|
||||
|
||||
def test_select(self):
|
||||
s = FakeSelector()
|
||||
self.assertRaises(NotImplementedError, s.select)
|
||||
|
||||
def test_close(self):
|
||||
s = FakeSelector()
|
||||
s.register(1, selectors.EVENT_READ)
|
||||
|
||||
s.close()
|
||||
self.assertFalse(s._fd_to_key)
|
||||
|
||||
def test_context_manager(self):
|
||||
s = FakeSelector()
|
||||
|
||||
with s as sel:
|
||||
sel.register(1, selectors.EVENT_READ)
|
||||
|
||||
self.assertFalse(s._fd_to_key)
|
||||
|
||||
def test_key_from_fd(self):
|
||||
s = FakeSelector()
|
||||
key = s.register(1, selectors.EVENT_READ)
|
||||
|
||||
self.assertIs(key, s._key_from_fd(1))
|
||||
self.assertIsNone(s._key_from_fd(10))
|
||||
|
||||
if hasattr(selectors.DefaultSelector, 'fileno'):
|
||||
def test_fileno(self):
|
||||
self.assertIsInstance(selectors.DefaultSelector().fileno(), int)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
628
tests/test_streams.py
Normal file
628
tests/test_streams.py
Normal file
@@ -0,0 +1,628 @@
|
||||
"""Tests for streams.py."""
|
||||
|
||||
import gc
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import unittest
|
||||
from unittest import mock
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
ssl = None
|
||||
|
||||
import asyncio
|
||||
from asyncio import test_utils
|
||||
|
||||
|
||||
class StreamReaderTests(test_utils.TestCase):
|
||||
|
||||
DATA = b'line1\nline2\nline3\n'
|
||||
|
||||
def setUp(self):
|
||||
self.loop = asyncio.new_event_loop()
|
||||
self.set_event_loop(self.loop)
|
||||
|
||||
def tearDown(self):
|
||||
# just in case if we have transport close callbacks
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
self.loop.close()
|
||||
gc.collect()
|
||||
super().tearDown()
|
||||
|
||||
@mock.patch('asyncio.streams.events')
|
||||
def test_ctor_global_loop(self, m_events):
|
||||
stream = asyncio.StreamReader()
|
||||
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
|
||||
|
||||
def _basetest_open_connection(self, open_connection_fut):
|
||||
reader, writer = self.loop.run_until_complete(open_connection_fut)
|
||||
writer.write(b'GET / HTTP/1.0\r\n\r\n')
|
||||
f = reader.readline()
|
||||
data = self.loop.run_until_complete(f)
|
||||
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
|
||||
f = reader.read()
|
||||
data = self.loop.run_until_complete(f)
|
||||
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
|
||||
writer.close()
|
||||
|
||||
def test_open_connection(self):
|
||||
with test_utils.run_test_server() as httpd:
|
||||
conn_fut = asyncio.open_connection(*httpd.address,
|
||||
loop=self.loop)
|
||||
self._basetest_open_connection(conn_fut)
|
||||
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_open_unix_connection(self):
|
||||
with test_utils.run_test_unix_server() as httpd:
|
||||
conn_fut = asyncio.open_unix_connection(httpd.address,
|
||||
loop=self.loop)
|
||||
self._basetest_open_connection(conn_fut)
|
||||
|
||||
def _basetest_open_connection_no_loop_ssl(self, open_connection_fut):
|
||||
try:
|
||||
reader, writer = self.loop.run_until_complete(open_connection_fut)
|
||||
finally:
|
||||
asyncio.set_event_loop(None)
|
||||
writer.write(b'GET / HTTP/1.0\r\n\r\n')
|
||||
f = reader.read()
|
||||
data = self.loop.run_until_complete(f)
|
||||
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
|
||||
|
||||
writer.close()
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
def test_open_connection_no_loop_ssl(self):
|
||||
with test_utils.run_test_server(use_ssl=True) as httpd:
|
||||
conn_fut = asyncio.open_connection(
|
||||
*httpd.address,
|
||||
ssl=test_utils.dummy_ssl_context(),
|
||||
loop=self.loop)
|
||||
|
||||
self._basetest_open_connection_no_loop_ssl(conn_fut)
|
||||
|
||||
@unittest.skipIf(ssl is None, 'No ssl module')
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_open_unix_connection_no_loop_ssl(self):
|
||||
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
|
||||
conn_fut = asyncio.open_unix_connection(
|
||||
httpd.address,
|
||||
ssl=test_utils.dummy_ssl_context(),
|
||||
server_hostname='',
|
||||
loop=self.loop)
|
||||
|
||||
self._basetest_open_connection_no_loop_ssl(conn_fut)
|
||||
|
||||
def _basetest_open_connection_error(self, open_connection_fut):
|
||||
reader, writer = self.loop.run_until_complete(open_connection_fut)
|
||||
writer._protocol.connection_lost(ZeroDivisionError())
|
||||
f = reader.read()
|
||||
with self.assertRaises(ZeroDivisionError):
|
||||
self.loop.run_until_complete(f)
|
||||
writer.close()
|
||||
test_utils.run_briefly(self.loop)
|
||||
|
||||
def test_open_connection_error(self):
|
||||
with test_utils.run_test_server() as httpd:
|
||||
conn_fut = asyncio.open_connection(*httpd.address,
|
||||
loop=self.loop)
|
||||
self._basetest_open_connection_error(conn_fut)
|
||||
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_open_unix_connection_error(self):
|
||||
with test_utils.run_test_unix_server() as httpd:
|
||||
conn_fut = asyncio.open_unix_connection(httpd.address,
|
||||
loop=self.loop)
|
||||
self._basetest_open_connection_error(conn_fut)
|
||||
|
||||
def test_feed_empty_data(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
|
||||
stream.feed_data(b'')
|
||||
self.assertEqual(b'', stream._buffer)
|
||||
|
||||
def test_feed_nonempty_data(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
|
||||
stream.feed_data(self.DATA)
|
||||
self.assertEqual(self.DATA, stream._buffer)
|
||||
|
||||
def test_read_zero(self):
|
||||
# Read zero bytes.
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(self.DATA)
|
||||
|
||||
data = self.loop.run_until_complete(stream.read(0))
|
||||
self.assertEqual(b'', data)
|
||||
self.assertEqual(self.DATA, stream._buffer)
|
||||
|
||||
def test_read(self):
|
||||
# Read bytes.
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
read_task = asyncio.Task(stream.read(30), loop=self.loop)
|
||||
|
||||
def cb():
|
||||
stream.feed_data(self.DATA)
|
||||
self.loop.call_soon(cb)
|
||||
|
||||
data = self.loop.run_until_complete(read_task)
|
||||
self.assertEqual(self.DATA, data)
|
||||
self.assertEqual(b'', stream._buffer)
|
||||
|
||||
def test_read_line_breaks(self):
|
||||
# Read bytes without line breaks.
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(b'line1')
|
||||
stream.feed_data(b'line2')
|
||||
|
||||
data = self.loop.run_until_complete(stream.read(5))
|
||||
|
||||
self.assertEqual(b'line1', data)
|
||||
self.assertEqual(b'line2', stream._buffer)
|
||||
|
||||
def test_read_eof(self):
|
||||
# Read bytes, stop at eof.
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
read_task = asyncio.Task(stream.read(1024), loop=self.loop)
|
||||
|
||||
def cb():
|
||||
stream.feed_eof()
|
||||
self.loop.call_soon(cb)
|
||||
|
||||
data = self.loop.run_until_complete(read_task)
|
||||
self.assertEqual(b'', data)
|
||||
self.assertEqual(b'', stream._buffer)
|
||||
|
||||
def test_read_until_eof(self):
|
||||
# Read all bytes until eof.
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
read_task = asyncio.Task(stream.read(-1), loop=self.loop)
|
||||
|
||||
def cb():
|
||||
stream.feed_data(b'chunk1\n')
|
||||
stream.feed_data(b'chunk2')
|
||||
stream.feed_eof()
|
||||
self.loop.call_soon(cb)
|
||||
|
||||
data = self.loop.run_until_complete(read_task)
|
||||
|
||||
self.assertEqual(b'chunk1\nchunk2', data)
|
||||
self.assertEqual(b'', stream._buffer)
|
||||
|
||||
def test_read_exception(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(b'line\n')
|
||||
|
||||
data = self.loop.run_until_complete(stream.read(2))
|
||||
self.assertEqual(b'li', data)
|
||||
|
||||
stream.set_exception(ValueError())
|
||||
self.assertRaises(
|
||||
ValueError, self.loop.run_until_complete, stream.read(2))
|
||||
|
||||
def test_readline(self):
|
||||
# Read one line. 'readline' will need to wait for the data
|
||||
# to come from 'cb'
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(b'chunk1 ')
|
||||
read_task = asyncio.Task(stream.readline(), loop=self.loop)
|
||||
|
||||
def cb():
|
||||
stream.feed_data(b'chunk2 ')
|
||||
stream.feed_data(b'chunk3 ')
|
||||
stream.feed_data(b'\n chunk4')
|
||||
self.loop.call_soon(cb)
|
||||
|
||||
line = self.loop.run_until_complete(read_task)
|
||||
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
|
||||
self.assertEqual(b' chunk4', stream._buffer)
|
||||
|
||||
def test_readline_limit_with_existing_data(self):
|
||||
# Read one line. The data is in StreamReader's buffer
|
||||
# before the event loop is run.
|
||||
|
||||
stream = asyncio.StreamReader(limit=3, loop=self.loop)
|
||||
stream.feed_data(b'li')
|
||||
stream.feed_data(b'ne1\nline2\n')
|
||||
|
||||
self.assertRaises(
|
||||
ValueError, self.loop.run_until_complete, stream.readline())
|
||||
# The buffer should contain the remaining data after exception
|
||||
self.assertEqual(b'line2\n', stream._buffer)
|
||||
|
||||
stream = asyncio.StreamReader(limit=3, loop=self.loop)
|
||||
stream.feed_data(b'li')
|
||||
stream.feed_data(b'ne1')
|
||||
stream.feed_data(b'li')
|
||||
|
||||
self.assertRaises(
|
||||
ValueError, self.loop.run_until_complete, stream.readline())
|
||||
# No b'\n' at the end. The 'limit' is set to 3. So before
|
||||
# waiting for the new data in buffer, 'readline' will consume
|
||||
# the entire buffer, and since the length of the consumed data
|
||||
# is more than 3, it will raise a ValueError. The buffer is
|
||||
# expected to be empty now.
|
||||
self.assertEqual(b'', stream._buffer)
|
||||
|
||||
def test_at_eof(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
self.assertFalse(stream.at_eof())
|
||||
|
||||
stream.feed_data(b'some data\n')
|
||||
self.assertFalse(stream.at_eof())
|
||||
|
||||
self.loop.run_until_complete(stream.readline())
|
||||
self.assertFalse(stream.at_eof())
|
||||
|
||||
stream.feed_data(b'some data\n')
|
||||
stream.feed_eof()
|
||||
self.loop.run_until_complete(stream.readline())
|
||||
self.assertTrue(stream.at_eof())
|
||||
|
||||
def test_readline_limit(self):
|
||||
# Read one line. StreamReaders are fed with data after
|
||||
# their 'readline' methods are called.
|
||||
|
||||
stream = asyncio.StreamReader(limit=7, loop=self.loop)
|
||||
def cb():
|
||||
stream.feed_data(b'chunk1')
|
||||
stream.feed_data(b'chunk2')
|
||||
stream.feed_data(b'chunk3\n')
|
||||
stream.feed_eof()
|
||||
self.loop.call_soon(cb)
|
||||
|
||||
self.assertRaises(
|
||||
ValueError, self.loop.run_until_complete, stream.readline())
|
||||
# The buffer had just one line of data, and after raising
|
||||
# a ValueError it should be empty.
|
||||
self.assertEqual(b'', stream._buffer)
|
||||
|
||||
stream = asyncio.StreamReader(limit=7, loop=self.loop)
|
||||
def cb():
|
||||
stream.feed_data(b'chunk1')
|
||||
stream.feed_data(b'chunk2\n')
|
||||
stream.feed_data(b'chunk3\n')
|
||||
stream.feed_eof()
|
||||
self.loop.call_soon(cb)
|
||||
|
||||
self.assertRaises(
|
||||
ValueError, self.loop.run_until_complete, stream.readline())
|
||||
self.assertEqual(b'chunk3\n', stream._buffer)
|
||||
|
||||
def test_readline_nolimit_nowait(self):
|
||||
# All needed data for the first 'readline' call will be
|
||||
# in the buffer.
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(self.DATA[:6])
|
||||
stream.feed_data(self.DATA[6:])
|
||||
|
||||
line = self.loop.run_until_complete(stream.readline())
|
||||
|
||||
self.assertEqual(b'line1\n', line)
|
||||
self.assertEqual(b'line2\nline3\n', stream._buffer)
|
||||
|
||||
def test_readline_eof(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(b'some data')
|
||||
stream.feed_eof()
|
||||
|
||||
line = self.loop.run_until_complete(stream.readline())
|
||||
self.assertEqual(b'some data', line)
|
||||
|
||||
def test_readline_empty_eof(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_eof()
|
||||
|
||||
line = self.loop.run_until_complete(stream.readline())
|
||||
self.assertEqual(b'', line)
|
||||
|
||||
def test_readline_read_byte_count(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(self.DATA)
|
||||
|
||||
self.loop.run_until_complete(stream.readline())
|
||||
|
||||
data = self.loop.run_until_complete(stream.read(7))
|
||||
|
||||
self.assertEqual(b'line2\nl', data)
|
||||
self.assertEqual(b'ine3\n', stream._buffer)
|
||||
|
||||
def test_readline_exception(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(b'line\n')
|
||||
|
||||
data = self.loop.run_until_complete(stream.readline())
|
||||
self.assertEqual(b'line\n', data)
|
||||
|
||||
stream.set_exception(ValueError())
|
||||
self.assertRaises(
|
||||
ValueError, self.loop.run_until_complete, stream.readline())
|
||||
self.assertEqual(b'', stream._buffer)
|
||||
|
||||
def test_readexactly_zero_or_less(self):
|
||||
# Read exact number of bytes (zero or less).
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(self.DATA)
|
||||
|
||||
data = self.loop.run_until_complete(stream.readexactly(0))
|
||||
self.assertEqual(b'', data)
|
||||
self.assertEqual(self.DATA, stream._buffer)
|
||||
|
||||
data = self.loop.run_until_complete(stream.readexactly(-1))
|
||||
self.assertEqual(b'', data)
|
||||
self.assertEqual(self.DATA, stream._buffer)
|
||||
|
||||
def test_readexactly(self):
|
||||
# Read exact number of bytes.
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
|
||||
n = 2 * len(self.DATA)
|
||||
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
|
||||
|
||||
def cb():
|
||||
stream.feed_data(self.DATA)
|
||||
stream.feed_data(self.DATA)
|
||||
stream.feed_data(self.DATA)
|
||||
self.loop.call_soon(cb)
|
||||
|
||||
data = self.loop.run_until_complete(read_task)
|
||||
self.assertEqual(self.DATA + self.DATA, data)
|
||||
self.assertEqual(self.DATA, stream._buffer)
|
||||
|
||||
def test_readexactly_eof(self):
|
||||
# Read exact number of bytes (eof).
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
n = 2 * len(self.DATA)
|
||||
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
|
||||
|
||||
def cb():
|
||||
stream.feed_data(self.DATA)
|
||||
stream.feed_eof()
|
||||
self.loop.call_soon(cb)
|
||||
|
||||
with self.assertRaises(asyncio.IncompleteReadError) as cm:
|
||||
self.loop.run_until_complete(read_task)
|
||||
self.assertEqual(cm.exception.partial, self.DATA)
|
||||
self.assertEqual(cm.exception.expected, n)
|
||||
self.assertEqual(str(cm.exception),
|
||||
'18 bytes read on a total of 36 expected bytes')
|
||||
self.assertEqual(b'', stream._buffer)
|
||||
|
||||
def test_readexactly_exception(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
stream.feed_data(b'line\n')
|
||||
|
||||
data = self.loop.run_until_complete(stream.readexactly(2))
|
||||
self.assertEqual(b'li', data)
|
||||
|
||||
stream.set_exception(ValueError())
|
||||
self.assertRaises(
|
||||
ValueError, self.loop.run_until_complete, stream.readexactly(2))
|
||||
|
||||
def test_exception(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
self.assertIsNone(stream.exception())
|
||||
|
||||
exc = ValueError()
|
||||
stream.set_exception(exc)
|
||||
self.assertIs(stream.exception(), exc)
|
||||
|
||||
def test_exception_waiter(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def set_err():
|
||||
stream.set_exception(ValueError())
|
||||
|
||||
@asyncio.coroutine
|
||||
def readline():
|
||||
yield from stream.readline()
|
||||
|
||||
t1 = asyncio.Task(stream.readline(), loop=self.loop)
|
||||
t2 = asyncio.Task(set_err(), loop=self.loop)
|
||||
|
||||
self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop))
|
||||
|
||||
self.assertRaises(ValueError, t1.result)
|
||||
|
||||
def test_exception_cancel(self):
|
||||
stream = asyncio.StreamReader(loop=self.loop)
|
||||
|
||||
@asyncio.coroutine
|
||||
def read_a_line():
|
||||
yield from stream.readline()
|
||||
|
||||
t = asyncio.Task(read_a_line(), loop=self.loop)
|
||||
test_utils.run_briefly(self.loop)
|
||||
t.cancel()
|
||||
test_utils.run_briefly(self.loop)
|
||||
# The following line fails if set_exception() isn't careful.
|
||||
stream.set_exception(RuntimeError('message'))
|
||||
test_utils.run_briefly(self.loop)
|
||||
self.assertIs(stream._waiter, None)
|
||||
|
||||
def test_start_server(self):
|
||||
|
||||
class MyServer:
|
||||
|
||||
def __init__(self, loop):
|
||||
self.server = None
|
||||
self.loop = loop
|
||||
|
||||
@asyncio.coroutine
|
||||
def handle_client(self, client_reader, client_writer):
|
||||
data = yield from client_reader.readline()
|
||||
client_writer.write(data)
|
||||
|
||||
def start(self):
|
||||
sock = socket.socket()
|
||||
sock.bind(('127.0.0.1', 0))
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_client,
|
||||
sock=sock,
|
||||
loop=self.loop))
|
||||
return sock.getsockname()
|
||||
|
||||
def handle_client_callback(self, client_reader, client_writer):
|
||||
task = asyncio.Task(client_reader.readline(), loop=self.loop)
|
||||
|
||||
def done(task):
|
||||
client_writer.write(task.result())
|
||||
|
||||
task.add_done_callback(done)
|
||||
|
||||
def start_callback(self):
|
||||
sock = socket.socket()
|
||||
sock.bind(('127.0.0.1', 0))
|
||||
addr = sock.getsockname()
|
||||
sock.close()
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_client_callback,
|
||||
host=addr[0], port=addr[1],
|
||||
loop=self.loop))
|
||||
return addr
|
||||
|
||||
def stop(self):
|
||||
if self.server is not None:
|
||||
self.server.close()
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
self.server = None
|
||||
|
||||
@asyncio.coroutine
|
||||
def client(addr):
|
||||
reader, writer = yield from asyncio.open_connection(
|
||||
*addr, loop=self.loop)
|
||||
# send a line
|
||||
writer.write(b"hello world!\n")
|
||||
# read it back
|
||||
msgback = yield from reader.readline()
|
||||
writer.close()
|
||||
return msgback
|
||||
|
||||
# test the server variant with a coroutine as client handler
|
||||
server = MyServer(self.loop)
|
||||
addr = server.start()
|
||||
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
|
||||
loop=self.loop))
|
||||
server.stop()
|
||||
self.assertEqual(msg, b"hello world!\n")
|
||||
|
||||
# test the server variant with a callback as client handler
|
||||
server = MyServer(self.loop)
|
||||
addr = server.start_callback()
|
||||
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
|
||||
loop=self.loop))
|
||||
server.stop()
|
||||
self.assertEqual(msg, b"hello world!\n")
|
||||
|
||||
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
|
||||
def test_start_unix_server(self):
|
||||
|
||||
class MyServer:
|
||||
|
||||
def __init__(self, loop, path):
|
||||
self.server = None
|
||||
self.loop = loop
|
||||
self.path = path
|
||||
|
||||
@asyncio.coroutine
|
||||
def handle_client(self, client_reader, client_writer):
|
||||
data = yield from client_reader.readline()
|
||||
client_writer.write(data)
|
||||
|
||||
def start(self):
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_unix_server(self.handle_client,
|
||||
path=self.path,
|
||||
loop=self.loop))
|
||||
|
||||
def handle_client_callback(self, client_reader, client_writer):
|
||||
task = asyncio.Task(client_reader.readline(), loop=self.loop)
|
||||
|
||||
def done(task):
|
||||
client_writer.write(task.result())
|
||||
|
||||
task.add_done_callback(done)
|
||||
|
||||
def start_callback(self):
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_unix_server(self.handle_client_callback,
|
||||
path=self.path,
|
||||
loop=self.loop))
|
||||
|
||||
def stop(self):
|
||||
if self.server is not None:
|
||||
self.server.close()
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
self.server = None
|
||||
|
||||
@asyncio.coroutine
|
||||
def client(path):
|
||||
reader, writer = yield from asyncio.open_unix_connection(
|
||||
path, loop=self.loop)
|
||||
# send a line
|
||||
writer.write(b"hello world!\n")
|
||||
# read it back
|
||||
msgback = yield from reader.readline()
|
||||
writer.close()
|
||||
return msgback
|
||||
|
||||
# test the server variant with a coroutine as client handler
|
||||
with test_utils.unix_socket_path() as path:
|
||||
server = MyServer(self.loop, path)
|
||||
server.start()
|
||||
msg = self.loop.run_until_complete(asyncio.Task(client(path),
|
||||
loop=self.loop))
|
||||
server.stop()
|
||||
self.assertEqual(msg, b"hello world!\n")
|
||||
|
||||
# test the server variant with a callback as client handler
|
||||
with test_utils.unix_socket_path() as path:
|
||||
server = MyServer(self.loop, path)
|
||||
server.start_callback()
|
||||
msg = self.loop.run_until_complete(asyncio.Task(client(path),
|
||||
loop=self.loop))
|
||||
server.stop()
|
||||
self.assertEqual(msg, b"hello world!\n")
|
||||
|
||||
@unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
|
||||
def test_read_all_from_pipe_reader(self):
|
||||
# See Tulip issue 168. This test is derived from the example
|
||||
# subprocess_attach_read_pipe.py, but we configure the
|
||||
# StreamReader's limit so that twice it is less than the size
|
||||
# of the data writter. Also we must explicitly attach a child
|
||||
# watcher to the event loop.
|
||||
|
||||
code = """\
|
||||
import os, sys
|
||||
fd = int(sys.argv[1])
|
||||
os.write(fd, b'data')
|
||||
os.close(fd)
|
||||
"""
|
||||
rfd, wfd = os.pipe()
|
||||
args = [sys.executable, '-c', code, str(wfd)]
|
||||
|
||||
pipe = open(rfd, 'rb', 0)
|
||||
reader = asyncio.StreamReader(loop=self.loop, limit=1)
|
||||
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop)
|
||||
transport, _ = self.loop.run_until_complete(
|
||||
self.loop.connect_read_pipe(lambda: protocol, pipe))
|
||||
|
||||
watcher = asyncio.SafeChildWatcher()
|
||||
watcher.attach_loop(self.loop)
|
||||
try:
|
||||
asyncio.set_child_watcher(watcher)
|
||||
proc = self.loop.run_until_complete(
|
||||
asyncio.create_subprocess_exec(*args, pass_fds={wfd}, loop=self.loop))
|
||||
self.loop.run_until_complete(proc.wait())
|
||||
finally:
|
||||
asyncio.set_child_watcher(None)
|
||||
|
||||
os.close(wfd)
|
||||
data = self.loop.run_until_complete(reader.read(-1))
|
||||
self.assertEqual(data, b'data')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
275
tests/test_subprocess.py
Normal file
275
tests/test_subprocess.py
Normal file
@@ -0,0 +1,275 @@
|
||||
from asyncio import subprocess
|
||||
from asyncio import test_utils
|
||||
import asyncio
|
||||
import signal
|
||||
import sys
|
||||
import unittest
|
||||
from unittest import mock
|
||||
from test import support
|
||||
if sys.platform != 'win32':
|
||||
from asyncio import unix_events
|
||||
|
||||
# Program blocking
|
||||
PROGRAM_BLOCKED = [sys.executable, '-c', 'import time; time.sleep(3600)']
|
||||
|
||||
# Program copying input to output
|
||||
PROGRAM_CAT = [
|
||||
sys.executable, '-c',
|
||||
';'.join(('import sys',
|
||||
'data = sys.stdin.buffer.read()',
|
||||
'sys.stdout.buffer.write(data)'))]
|
||||
|
||||
class SubprocessMixin:
|
||||
|
||||
def test_stdin_stdout(self):
|
||||
args = PROGRAM_CAT
|
||||
|
||||
@asyncio.coroutine
|
||||
def run(data):
|
||||
proc = yield from asyncio.create_subprocess_exec(
|
||||
*args,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
loop=self.loop)
|
||||
|
||||
# feed data
|
||||
proc.stdin.write(data)
|
||||
yield from proc.stdin.drain()
|
||||
proc.stdin.close()
|
||||
|
||||
# get output and exitcode
|
||||
data = yield from proc.stdout.read()
|
||||
exitcode = yield from proc.wait()
|
||||
return (exitcode, data)
|
||||
|
||||
task = run(b'some data')
|
||||
task = asyncio.wait_for(task, 60.0, loop=self.loop)
|
||||
exitcode, stdout = self.loop.run_until_complete(task)
|
||||
self.assertEqual(exitcode, 0)
|
||||
self.assertEqual(stdout, b'some data')
|
||||
|
||||
def test_communicate(self):
|
||||
args = PROGRAM_CAT
|
||||
|
||||
@asyncio.coroutine
|
||||
def run(data):
|
||||
proc = yield from asyncio.create_subprocess_exec(
|
||||
*args,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
loop=self.loop)
|
||||
stdout, stderr = yield from proc.communicate(data)
|
||||
return proc.returncode, stdout
|
||||
|
||||
task = run(b'some data')
|
||||
task = asyncio.wait_for(task, 60.0, loop=self.loop)
|
||||
exitcode, stdout = self.loop.run_until_complete(task)
|
||||
self.assertEqual(exitcode, 0)
|
||||
self.assertEqual(stdout, b'some data')
|
||||
|
||||
def test_shell(self):
|
||||
create = asyncio.create_subprocess_shell('exit 7',
|
||||
loop=self.loop)
|
||||
proc = self.loop.run_until_complete(create)
|
||||
exitcode = self.loop.run_until_complete(proc.wait())
|
||||
self.assertEqual(exitcode, 7)
|
||||
|
||||
def test_start_new_session(self):
|
||||
# start the new process in a new session
|
||||
create = asyncio.create_subprocess_shell('exit 8',
|
||||
start_new_session=True,
|
||||
loop=self.loop)
|
||||
proc = self.loop.run_until_complete(create)
|
||||
exitcode = self.loop.run_until_complete(proc.wait())
|
||||
self.assertEqual(exitcode, 8)
|
||||
|
||||
def test_kill(self):
|
||||
args = PROGRAM_BLOCKED
|
||||
create = asyncio.create_subprocess_exec(*args, loop=self.loop)
|
||||
proc = self.loop.run_until_complete(create)
|
||||
proc.kill()
|
||||
returncode = self.loop.run_until_complete(proc.wait())
|
||||
if sys.platform == 'win32':
|
||||
self.assertIsInstance(returncode, int)
|
||||
# expect 1 but sometimes get 0
|
||||
else:
|
||||
self.assertEqual(-signal.SIGKILL, returncode)
|
||||
|
||||
def test_terminate(self):
|
||||
args = PROGRAM_BLOCKED
|
||||
create = asyncio.create_subprocess_exec(*args, loop=self.loop)
|
||||
proc = self.loop.run_until_complete(create)
|
||||
proc.terminate()
|
||||
returncode = self.loop.run_until_complete(proc.wait())
|
||||
if sys.platform == 'win32':
|
||||
self.assertIsInstance(returncode, int)
|
||||
# expect 1 but sometimes get 0
|
||||
else:
|
||||
self.assertEqual(-signal.SIGTERM, returncode)
|
||||
|
||||
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
|
||||
def test_send_signal(self):
|
||||
code = 'import time; print("sleeping", flush=True); time.sleep(3600)'
|
||||
args = [sys.executable, '-c', code]
|
||||
create = asyncio.create_subprocess_exec(*args, loop=self.loop, stdout=subprocess.PIPE)
|
||||
proc = self.loop.run_until_complete(create)
|
||||
|
||||
@asyncio.coroutine
|
||||
def send_signal(proc):
|
||||
# basic synchronization to wait until the program is sleeping
|
||||
line = yield from proc.stdout.readline()
|
||||
self.assertEqual(line, b'sleeping\n')
|
||||
|
||||
proc.send_signal(signal.SIGHUP)
|
||||
returncode = (yield from proc.wait())
|
||||
return returncode
|
||||
|
||||
returncode = self.loop.run_until_complete(send_signal(proc))
|
||||
self.assertEqual(-signal.SIGHUP, returncode)
|
||||
|
||||
def prepare_broken_pipe_test(self):
|
||||
# buffer large enough to feed the whole pipe buffer
|
||||
large_data = b'x' * support.PIPE_MAX_SIZE
|
||||
|
||||
# the program ends before the stdin can be feeded
|
||||
create = asyncio.create_subprocess_exec(
|
||||
sys.executable, '-c', 'pass',
|
||||
stdin=subprocess.PIPE,
|
||||
loop=self.loop)
|
||||
proc = self.loop.run_until_complete(create)
|
||||
return (proc, large_data)
|
||||
|
||||
def test_stdin_broken_pipe(self):
|
||||
proc, large_data = self.prepare_broken_pipe_test()
|
||||
|
||||
@asyncio.coroutine
|
||||
def write_stdin(proc, data):
|
||||
proc.stdin.write(data)
|
||||
yield from proc.stdin.drain()
|
||||
|
||||
coro = write_stdin(proc, large_data)
|
||||
# drain() must raise BrokenPipeError or ConnectionResetError
|
||||
with test_utils.disable_logger():
|
||||
self.assertRaises((BrokenPipeError, ConnectionResetError),
|
||||
self.loop.run_until_complete, coro)
|
||||
self.loop.run_until_complete(proc.wait())
|
||||
|
||||
def test_communicate_ignore_broken_pipe(self):
|
||||
proc, large_data = self.prepare_broken_pipe_test()
|
||||
|
||||
# communicate() must ignore BrokenPipeError when feeding stdin
|
||||
with test_utils.disable_logger():
|
||||
self.loop.run_until_complete(proc.communicate(large_data))
|
||||
self.loop.run_until_complete(proc.wait())
|
||||
|
||||
def test_pause_reading(self):
|
||||
limit = 10
|
||||
size = (limit * 2 + 1)
|
||||
|
||||
@asyncio.coroutine
|
||||
def test_pause_reading():
|
||||
code = '\n'.join((
|
||||
'import sys',
|
||||
'sys.stdout.write("x" * %s)' % size,
|
||||
'sys.stdout.flush()',
|
||||
))
|
||||
proc = yield from asyncio.create_subprocess_exec(
|
||||
sys.executable, '-c', code,
|
||||
stdin=asyncio.subprocess.PIPE,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
limit=limit,
|
||||
loop=self.loop)
|
||||
stdout_transport = proc._transport.get_pipe_transport(1)
|
||||
stdout_transport.pause_reading = mock.Mock()
|
||||
stdout_transport.resume_reading = mock.Mock()
|
||||
|
||||
stdout, stderr = yield from proc.communicate()
|
||||
|
||||
# The child process produced more than limit bytes of output,
|
||||
# the stream reader transport should pause the protocol to not
|
||||
# allocate too much memory.
|
||||
return (stdout, stdout_transport)
|
||||
|
||||
# Issue #22685: Ensure that the stream reader pauses the protocol
|
||||
# when the child process produces too much data
|
||||
stdout, transport = self.loop.run_until_complete(test_pause_reading())
|
||||
|
||||
self.assertEqual(stdout, b'x' * size)
|
||||
self.assertTrue(transport.pause_reading.called)
|
||||
self.assertTrue(transport.resume_reading.called)
|
||||
|
||||
def test_stdin_not_inheritable(self):
|
||||
# Tulip issue #209: stdin must not be inheritable, otherwise
|
||||
# the Process.communicate() hangs
|
||||
@asyncio.coroutine
|
||||
def len_message(message):
|
||||
code = 'import sys; data = sys.stdin.read(); print(len(data))'
|
||||
proc = yield from asyncio.create_subprocess_exec(
|
||||
sys.executable, '-c', code,
|
||||
stdin=asyncio.subprocess.PIPE,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
close_fds=False,
|
||||
loop=self.loop)
|
||||
stdout, stderr = yield from proc.communicate(message)
|
||||
exitcode = yield from proc.wait()
|
||||
return (stdout, exitcode)
|
||||
|
||||
output, exitcode = self.loop.run_until_complete(len_message(b'abc'))
|
||||
self.assertEqual(output.rstrip(), b'3')
|
||||
self.assertEqual(exitcode, 0)
|
||||
|
||||
|
||||
if sys.platform != 'win32':
|
||||
# Unix
|
||||
class SubprocessWatcherMixin(SubprocessMixin):
|
||||
|
||||
Watcher = None
|
||||
|
||||
def setUp(self):
|
||||
policy = asyncio.get_event_loop_policy()
|
||||
self.loop = policy.new_event_loop()
|
||||
|
||||
# ensure that the event loop is passed explicitly in asyncio
|
||||
policy.set_event_loop(None)
|
||||
|
||||
watcher = self.Watcher()
|
||||
watcher.attach_loop(self.loop)
|
||||
policy.set_child_watcher(watcher)
|
||||
|
||||
def tearDown(self):
|
||||
policy = asyncio.get_event_loop_policy()
|
||||
policy.set_child_watcher(None)
|
||||
self.loop.close()
|
||||
super().tearDown()
|
||||
|
||||
class SubprocessSafeWatcherTests(SubprocessWatcherMixin,
|
||||
test_utils.TestCase):
|
||||
|
||||
Watcher = unix_events.SafeChildWatcher
|
||||
|
||||
class SubprocessFastWatcherTests(SubprocessWatcherMixin,
|
||||
test_utils.TestCase):
|
||||
|
||||
Watcher = unix_events.FastChildWatcher
|
||||
|
||||
else:
|
||||
# Windows
|
||||
class SubprocessProactorTests(SubprocessMixin, test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
policy = asyncio.get_event_loop_policy()
|
||||
self.loop = asyncio.ProactorEventLoop()
|
||||
|
||||
# ensure that the event loop is passed explicitly in asyncio
|
||||
policy.set_event_loop(None)
|
||||
|
||||
def tearDown(self):
|
||||
policy = asyncio.get_event_loop_policy()
|
||||
self.loop.close()
|
||||
policy.set_event_loop(None)
|
||||
super().tearDown()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
1984
tests/test_tasks.py
Normal file
1984
tests/test_tasks.py
Normal file
File diff suppressed because it is too large
Load Diff
91
tests/test_transports.py
Normal file
91
tests/test_transports.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Tests for transports.py."""
|
||||
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
import asyncio
|
||||
from asyncio import transports
|
||||
|
||||
|
||||
class TransportTests(unittest.TestCase):
|
||||
|
||||
def test_ctor_extra_is_none(self):
|
||||
transport = asyncio.Transport()
|
||||
self.assertEqual(transport._extra, {})
|
||||
|
||||
def test_get_extra_info(self):
|
||||
transport = asyncio.Transport({'extra': 'info'})
|
||||
self.assertEqual('info', transport.get_extra_info('extra'))
|
||||
self.assertIsNone(transport.get_extra_info('unknown'))
|
||||
|
||||
default = object()
|
||||
self.assertIs(default, transport.get_extra_info('unknown', default))
|
||||
|
||||
def test_writelines(self):
|
||||
transport = asyncio.Transport()
|
||||
transport.write = mock.Mock()
|
||||
|
||||
transport.writelines([b'line1',
|
||||
bytearray(b'line2'),
|
||||
memoryview(b'line3')])
|
||||
self.assertEqual(1, transport.write.call_count)
|
||||
transport.write.assert_called_with(b'line1line2line3')
|
||||
|
||||
def test_not_implemented(self):
|
||||
transport = asyncio.Transport()
|
||||
|
||||
self.assertRaises(NotImplementedError,
|
||||
transport.set_write_buffer_limits)
|
||||
self.assertRaises(NotImplementedError, transport.get_write_buffer_size)
|
||||
self.assertRaises(NotImplementedError, transport.write, 'data')
|
||||
self.assertRaises(NotImplementedError, transport.write_eof)
|
||||
self.assertRaises(NotImplementedError, transport.can_write_eof)
|
||||
self.assertRaises(NotImplementedError, transport.pause_reading)
|
||||
self.assertRaises(NotImplementedError, transport.resume_reading)
|
||||
self.assertRaises(NotImplementedError, transport.close)
|
||||
self.assertRaises(NotImplementedError, transport.abort)
|
||||
|
||||
def test_dgram_not_implemented(self):
|
||||
transport = asyncio.DatagramTransport()
|
||||
|
||||
self.assertRaises(NotImplementedError, transport.sendto, 'data')
|
||||
self.assertRaises(NotImplementedError, transport.abort)
|
||||
|
||||
def test_subprocess_transport_not_implemented(self):
|
||||
transport = asyncio.SubprocessTransport()
|
||||
|
||||
self.assertRaises(NotImplementedError, transport.get_pid)
|
||||
self.assertRaises(NotImplementedError, transport.get_returncode)
|
||||
self.assertRaises(NotImplementedError, transport.get_pipe_transport, 1)
|
||||
self.assertRaises(NotImplementedError, transport.send_signal, 1)
|
||||
self.assertRaises(NotImplementedError, transport.terminate)
|
||||
self.assertRaises(NotImplementedError, transport.kill)
|
||||
|
||||
def test_flowcontrol_mixin_set_write_limits(self):
|
||||
|
||||
class MyTransport(transports._FlowControlMixin,
|
||||
transports.Transport):
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
return 512
|
||||
|
||||
loop = mock.Mock()
|
||||
transport = MyTransport(loop=loop)
|
||||
transport._protocol = mock.Mock()
|
||||
|
||||
self.assertFalse(transport._protocol_paused)
|
||||
|
||||
with self.assertRaisesRegex(ValueError, 'high.*must be >= low'):
|
||||
transport.set_write_buffer_limits(high=0, low=1)
|
||||
|
||||
transport.set_write_buffer_limits(high=1024, low=128)
|
||||
self.assertFalse(transport._protocol_paused)
|
||||
self.assertEqual(transport.get_write_buffer_limits(), (128, 1024))
|
||||
|
||||
transport.set_write_buffer_limits(high=256, low=128)
|
||||
self.assertTrue(transport._protocol_paused)
|
||||
self.assertEqual(transport.get_write_buffer_limits(), (128, 256))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
1600
tests/test_unix_events.py
Normal file
1600
tests/test_unix_events.py
Normal file
File diff suppressed because it is too large
Load Diff
141
tests/test_windows_events.py
Normal file
141
tests/test_windows_events.py
Normal file
@@ -0,0 +1,141 @@
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
if sys.platform != 'win32':
|
||||
raise unittest.SkipTest('Windows only')
|
||||
|
||||
import _winapi
|
||||
|
||||
import asyncio
|
||||
from asyncio import _overlapped
|
||||
from asyncio import test_utils
|
||||
from asyncio import windows_events
|
||||
|
||||
|
||||
class UpperProto(asyncio.Protocol):
|
||||
def __init__(self):
|
||||
self.buf = []
|
||||
|
||||
def connection_made(self, trans):
|
||||
self.trans = trans
|
||||
|
||||
def data_received(self, data):
|
||||
self.buf.append(data)
|
||||
if b'\n' in data:
|
||||
self.trans.write(b''.join(self.buf).upper())
|
||||
self.trans.close()
|
||||
|
||||
|
||||
class ProactorTests(test_utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.loop = asyncio.ProactorEventLoop()
|
||||
self.set_event_loop(self.loop)
|
||||
|
||||
def test_close(self):
|
||||
a, b = self.loop._socketpair()
|
||||
trans = self.loop._make_socket_transport(a, asyncio.Protocol())
|
||||
f = asyncio.async(self.loop.sock_recv(b, 100))
|
||||
trans.close()
|
||||
self.loop.run_until_complete(f)
|
||||
self.assertEqual(f.result(), b'')
|
||||
b.close()
|
||||
|
||||
def test_double_bind(self):
|
||||
ADDRESS = r'\\.\pipe\test_double_bind-%s' % os.getpid()
|
||||
server1 = windows_events.PipeServer(ADDRESS)
|
||||
with self.assertRaises(PermissionError):
|
||||
windows_events.PipeServer(ADDRESS)
|
||||
server1.close()
|
||||
|
||||
def test_pipe(self):
|
||||
res = self.loop.run_until_complete(self._test_pipe())
|
||||
self.assertEqual(res, 'done')
|
||||
|
||||
def _test_pipe(self):
|
||||
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
|
||||
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
yield from self.loop.create_pipe_connection(
|
||||
asyncio.Protocol, ADDRESS)
|
||||
|
||||
[server] = yield from self.loop.start_serving_pipe(
|
||||
UpperProto, ADDRESS)
|
||||
self.assertIsInstance(server, windows_events.PipeServer)
|
||||
|
||||
clients = []
|
||||
for i in range(5):
|
||||
stream_reader = asyncio.StreamReader(loop=self.loop)
|
||||
protocol = asyncio.StreamReaderProtocol(stream_reader)
|
||||
trans, proto = yield from self.loop.create_pipe_connection(
|
||||
lambda: protocol, ADDRESS)
|
||||
self.assertIsInstance(trans, asyncio.Transport)
|
||||
self.assertEqual(protocol, proto)
|
||||
clients.append((stream_reader, trans))
|
||||
|
||||
for i, (r, w) in enumerate(clients):
|
||||
w.write('lower-{}\n'.format(i).encode())
|
||||
|
||||
for i, (r, w) in enumerate(clients):
|
||||
response = yield from r.readline()
|
||||
self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
|
||||
w.close()
|
||||
|
||||
server.close()
|
||||
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
yield from self.loop.create_pipe_connection(
|
||||
asyncio.Protocol, ADDRESS)
|
||||
|
||||
return 'done'
|
||||
|
||||
def test_wait_for_handle(self):
|
||||
event = _overlapped.CreateEvent(None, True, False, None)
|
||||
self.addCleanup(_winapi.CloseHandle, event)
|
||||
|
||||
# Wait for unset event with 0.5s timeout;
|
||||
# result should be False at timeout
|
||||
fut = self.loop._proactor.wait_for_handle(event, 0.5)
|
||||
start = self.loop.time()
|
||||
self.loop.run_until_complete(fut)
|
||||
elapsed = self.loop.time() - start
|
||||
self.assertFalse(fut.result())
|
||||
self.assertTrue(0.48 < elapsed < 0.9, elapsed)
|
||||
|
||||
_overlapped.SetEvent(event)
|
||||
|
||||
# Wait for set event;
|
||||
# result should be True immediately
|
||||
fut = self.loop._proactor.wait_for_handle(event, 10)
|
||||
start = self.loop.time()
|
||||
self.loop.run_until_complete(fut)
|
||||
elapsed = self.loop.time() - start
|
||||
self.assertTrue(fut.result())
|
||||
self.assertTrue(0 <= elapsed < 0.3, elapsed)
|
||||
|
||||
# Tulip issue #195: cancelling a done _WaitHandleFuture must not crash
|
||||
fut.cancel()
|
||||
|
||||
def test_wait_for_handle_cancel(self):
|
||||
event = _overlapped.CreateEvent(None, True, False, None)
|
||||
self.addCleanup(_winapi.CloseHandle, event)
|
||||
|
||||
# Wait for unset event with a cancelled future;
|
||||
# CancelledError should be raised immediately
|
||||
fut = self.loop._proactor.wait_for_handle(event, 10)
|
||||
fut.cancel()
|
||||
start = self.loop.time()
|
||||
with self.assertRaises(asyncio.CancelledError):
|
||||
self.loop.run_until_complete(fut)
|
||||
elapsed = self.loop.time() - start
|
||||
self.assertTrue(0 <= elapsed < 0.1, elapsed)
|
||||
|
||||
# Tulip issue #195: cancelling a _WaitHandleFuture twice must not crash
|
||||
fut = self.loop._proactor.wait_for_handle(event)
|
||||
fut.cancel()
|
||||
fut.cancel()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
175
tests/test_windows_utils.py
Normal file
175
tests/test_windows_utils.py
Normal file
@@ -0,0 +1,175 @@
|
||||
"""Tests for window_utils"""
|
||||
|
||||
import socket
|
||||
import sys
|
||||
import test.support
|
||||
import unittest
|
||||
from test.support import IPV6_ENABLED
|
||||
from unittest import mock
|
||||
|
||||
if sys.platform != 'win32':
|
||||
raise unittest.SkipTest('Windows only')
|
||||
|
||||
import _winapi
|
||||
|
||||
from asyncio import windows_utils
|
||||
from asyncio import _overlapped
|
||||
|
||||
|
||||
class WinsocketpairTests(unittest.TestCase):
|
||||
|
||||
def check_winsocketpair(self, ssock, csock):
|
||||
csock.send(b'xxx')
|
||||
self.assertEqual(b'xxx', ssock.recv(1024))
|
||||
csock.close()
|
||||
ssock.close()
|
||||
|
||||
def test_winsocketpair(self):
|
||||
ssock, csock = windows_utils.socketpair()
|
||||
self.check_winsocketpair(ssock, csock)
|
||||
|
||||
@unittest.skipUnless(IPV6_ENABLED, 'IPv6 not supported or enabled')
|
||||
def test_winsocketpair_ipv6(self):
|
||||
ssock, csock = windows_utils.socketpair(family=socket.AF_INET6)
|
||||
self.check_winsocketpair(ssock, csock)
|
||||
|
||||
@unittest.skipIf(hasattr(socket, 'socketpair'),
|
||||
'socket.socketpair is available')
|
||||
@mock.patch('asyncio.windows_utils.socket')
|
||||
def test_winsocketpair_exc(self, m_socket):
|
||||
m_socket.AF_INET = socket.AF_INET
|
||||
m_socket.SOCK_STREAM = socket.SOCK_STREAM
|
||||
m_socket.socket.return_value.getsockname.return_value = ('', 12345)
|
||||
m_socket.socket.return_value.accept.return_value = object(), object()
|
||||
m_socket.socket.return_value.connect.side_effect = OSError()
|
||||
|
||||
self.assertRaises(OSError, windows_utils.socketpair)
|
||||
|
||||
def test_winsocketpair_invalid_args(self):
|
||||
self.assertRaises(ValueError,
|
||||
windows_utils.socketpair, family=socket.AF_UNSPEC)
|
||||
self.assertRaises(ValueError,
|
||||
windows_utils.socketpair, type=socket.SOCK_DGRAM)
|
||||
self.assertRaises(ValueError,
|
||||
windows_utils.socketpair, proto=1)
|
||||
|
||||
@unittest.skipIf(hasattr(socket, 'socketpair'),
|
||||
'socket.socketpair is available')
|
||||
@mock.patch('asyncio.windows_utils.socket')
|
||||
def test_winsocketpair_close(self, m_socket):
|
||||
m_socket.AF_INET = socket.AF_INET
|
||||
m_socket.SOCK_STREAM = socket.SOCK_STREAM
|
||||
sock = mock.Mock()
|
||||
m_socket.socket.return_value = sock
|
||||
sock.bind.side_effect = OSError
|
||||
self.assertRaises(OSError, windows_utils.socketpair)
|
||||
self.assertTrue(sock.close.called)
|
||||
|
||||
|
||||
class PipeTests(unittest.TestCase):
|
||||
|
||||
def test_pipe_overlapped(self):
|
||||
h1, h2 = windows_utils.pipe(overlapped=(True, True))
|
||||
try:
|
||||
ov1 = _overlapped.Overlapped()
|
||||
self.assertFalse(ov1.pending)
|
||||
self.assertEqual(ov1.error, 0)
|
||||
|
||||
ov1.ReadFile(h1, 100)
|
||||
self.assertTrue(ov1.pending)
|
||||
self.assertEqual(ov1.error, _winapi.ERROR_IO_PENDING)
|
||||
ERROR_IO_INCOMPLETE = 996
|
||||
try:
|
||||
ov1.getresult()
|
||||
except OSError as e:
|
||||
self.assertEqual(e.winerror, ERROR_IO_INCOMPLETE)
|
||||
else:
|
||||
raise RuntimeError('expected ERROR_IO_INCOMPLETE')
|
||||
|
||||
ov2 = _overlapped.Overlapped()
|
||||
self.assertFalse(ov2.pending)
|
||||
self.assertEqual(ov2.error, 0)
|
||||
|
||||
ov2.WriteFile(h2, b"hello")
|
||||
self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING})
|
||||
|
||||
res = _winapi.WaitForMultipleObjects([ov2.event], False, 100)
|
||||
self.assertEqual(res, _winapi.WAIT_OBJECT_0)
|
||||
|
||||
self.assertFalse(ov1.pending)
|
||||
self.assertEqual(ov1.error, ERROR_IO_INCOMPLETE)
|
||||
self.assertFalse(ov2.pending)
|
||||
self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING})
|
||||
self.assertEqual(ov1.getresult(), b"hello")
|
||||
finally:
|
||||
_winapi.CloseHandle(h1)
|
||||
_winapi.CloseHandle(h2)
|
||||
|
||||
def test_pipe_handle(self):
|
||||
h, _ = windows_utils.pipe(overlapped=(True, True))
|
||||
_winapi.CloseHandle(_)
|
||||
p = windows_utils.PipeHandle(h)
|
||||
self.assertEqual(p.fileno(), h)
|
||||
self.assertEqual(p.handle, h)
|
||||
|
||||
# check garbage collection of p closes handle
|
||||
del p
|
||||
test.support.gc_collect()
|
||||
try:
|
||||
_winapi.CloseHandle(h)
|
||||
except OSError as e:
|
||||
self.assertEqual(e.winerror, 6) # ERROR_INVALID_HANDLE
|
||||
else:
|
||||
raise RuntimeError('expected ERROR_INVALID_HANDLE')
|
||||
|
||||
|
||||
class PopenTests(unittest.TestCase):
|
||||
|
||||
def test_popen(self):
|
||||
command = r"""if 1:
|
||||
import sys
|
||||
s = sys.stdin.readline()
|
||||
sys.stdout.write(s.upper())
|
||||
sys.stderr.write('stderr')
|
||||
"""
|
||||
msg = b"blah\n"
|
||||
|
||||
p = windows_utils.Popen([sys.executable, '-c', command],
|
||||
stdin=windows_utils.PIPE,
|
||||
stdout=windows_utils.PIPE,
|
||||
stderr=windows_utils.PIPE)
|
||||
|
||||
for f in [p.stdin, p.stdout, p.stderr]:
|
||||
self.assertIsInstance(f, windows_utils.PipeHandle)
|
||||
|
||||
ovin = _overlapped.Overlapped()
|
||||
ovout = _overlapped.Overlapped()
|
||||
overr = _overlapped.Overlapped()
|
||||
|
||||
ovin.WriteFile(p.stdin.handle, msg)
|
||||
ovout.ReadFile(p.stdout.handle, 100)
|
||||
overr.ReadFile(p.stderr.handle, 100)
|
||||
|
||||
events = [ovin.event, ovout.event, overr.event]
|
||||
# Super-long timeout for slow buildbots.
|
||||
res = _winapi.WaitForMultipleObjects(events, True, 10000)
|
||||
self.assertEqual(res, _winapi.WAIT_OBJECT_0)
|
||||
self.assertFalse(ovout.pending)
|
||||
self.assertFalse(overr.pending)
|
||||
self.assertFalse(ovin.pending)
|
||||
|
||||
self.assertEqual(ovin.getresult(), len(msg))
|
||||
out = ovout.getresult().rstrip()
|
||||
err = overr.getresult().rstrip()
|
||||
|
||||
self.assertGreater(len(out), 0)
|
||||
self.assertGreater(len(err), 0)
|
||||
# allow for partial reads...
|
||||
self.assertTrue(msg.upper().rstrip().startswith(out))
|
||||
self.assertTrue(b"stderr".startswith(err))
|
||||
|
||||
p.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
65
update_stdlib.sh
Executable file
65
update_stdlib.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to copy asyncio files to the standard library tree.
|
||||
# Optional argument is the root of the Python 3.4 tree.
|
||||
# Assumes you have already created Lib/asyncio and
|
||||
# Lib/test/test_asyncio in the destination tree.
|
||||
|
||||
CPYTHON=${1-$HOME/cpython}
|
||||
|
||||
if [ ! -d $CPYTHON ]
|
||||
then
|
||||
echo Bad destination $CPYTHON
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f asyncio/__init__.py ]
|
||||
then
|
||||
echo Bad current directory
|
||||
exit 1
|
||||
fi
|
||||
|
||||
maybe_copy()
|
||||
{
|
||||
SRC=$1
|
||||
DST=$CPYTHON/$2
|
||||
if cmp $DST $SRC
|
||||
then
|
||||
return
|
||||
fi
|
||||
echo ======== $SRC === $DST ========
|
||||
diff -u $DST $SRC
|
||||
echo -n "Copy $SRC? [y/N/back] "
|
||||
read X
|
||||
case $X in
|
||||
[yY]*) echo Copying $SRC; cp $SRC $DST;;
|
||||
back) echo Copying TO $SRC; cp $DST $SRC;;
|
||||
*) echo Not copying $SRC;;
|
||||
esac
|
||||
}
|
||||
|
||||
for i in `(cd asyncio && ls *.py)`
|
||||
do
|
||||
if [ $i == selectors.py ]
|
||||
then
|
||||
if [ "`(cd $CPYTHON; hg branch)`" == "3.4" ]
|
||||
then
|
||||
echo "Destination is 3.4 branch -- ignoring selectors.py"
|
||||
else
|
||||
maybe_copy asyncio/$i Lib/$i
|
||||
fi
|
||||
else
|
||||
maybe_copy asyncio/$i Lib/asyncio/$i
|
||||
fi
|
||||
done
|
||||
|
||||
for i in `(cd tests && ls *.py *.pem)`
|
||||
do
|
||||
if [ $i == test_selectors.py ]
|
||||
then
|
||||
continue
|
||||
fi
|
||||
maybe_copy tests/$i Lib/test/test_asyncio/$i
|
||||
done
|
||||
|
||||
maybe_copy overlapped.c Modules/overlapped.c
|
||||
Reference in New Issue
Block a user