Commit fbc21c87 authored by Jason Madden's avatar Jason Madden

Run tests in 3.7 dev mode; cleanup resource warnings

Especially as reported on PyPy.

The testrunner will now show the output of a successful test if the
output contains ResourceWarning.

Fix a bunch of such warnings.

Import _testcapi in testrunner to try to avoid the concurrency issues on PyPy.
parent b832bfcb
......@@ -140,6 +140,8 @@
addition, all *timeout* values less than zero are interpreted like
*None* (as they always were under libev). See :issue:`1127`.
- Monkey-patching now defaults to patching ``threading.Event``.
1.3a1 (2018-01-27)
==================
......
......@@ -83,9 +83,9 @@ Running Tests
There are a few different ways to run the tests. To simply run the
tests on one version of Python during development, try this::
python setup.py develop
cd src/greentest
PYTHONPATH=.. python testrunner.py --config known_failures.py
(env) $ pip install -e .
(env) $ cd src/greentest
(env) $ python ./testrunner.py
Before submitting a pull request, it's a good idea to run the tests
across all supported versions of Python, and to check the code quality
......@@ -100,7 +100,7 @@ coverage metrics through the `coverage.py`_ package. That would go
something like this::
cd src/greentest
PYTHONPATH=.. python testrunner.py --config known_failures.py --coverage
python testrunner.py --coverage
coverage combine
coverage html -i
<open htmlcov/index.html>
......
......@@ -132,7 +132,7 @@ install:
# target Python version and architecture
# Note that psutil won't build under PyPy on Windows.
- "%CMD_IN_ENV% pip install -e git+https://github.com/cython/cython.git@63cd3bbb5eac22b92808eeb90b512359e3def20a#egg=cython"
- "%CMD_IN_ENV% pip install -U setuptools wheel greenlet cffi dnspython idna"
- "%CMD_IN_ENV% pip install -U setuptools wheel greenlet cffi dnspython idna requests"
- ps:
if ("${env:PYTHON_ID}" -ne "pypy") {
......
......@@ -26,6 +26,7 @@ psutil
perf
# Used in a test
zope.interface
requests
# For viewing README.rst (restview --long-description),
# CONTRIBUTING.rst, etc.
# https://github.com/mgedmin/restview
......
......@@ -9,26 +9,21 @@ from gevent import monkey
# patches stdlib (including socket and ssl modules) to cooperate with other greenlets
monkey.patch_all()
import sys
import requests
# Note that all of these redirect to HTTPS, so
# Note that we're using HTTPS, so
# this demonstrates that SSL works.
urls = [
'http://www.google.com',
'http://www.apple.com',
'http://www.python.org'
'https://www.google.com/',
'https://www.apple.com/',
'https://www.python.org/'
]
if sys.version_info[0] == 3:
from urllib.request import urlopen # pylint:disable=import-error,no-name-in-module
else:
from urllib2 import urlopen # pylint: disable=import-error
def print_head(url):
print('Starting %s' % url)
data = urlopen(url).read()
data = requests.get(url).text
print('%s: %s bytes: %r' % (url, len(data), data[:50]))
jobs = [gevent.spawn(print_head, _url) for _url in urls]
......
......@@ -38,10 +38,13 @@ if PY3:
if value.__traceback__ is not tb and tb is not None:
raise value.with_traceback(tb)
raise value
def exc_clear():
pass
else:
from gevent._util_py2 import reraise # pylint:disable=import-error,no-name-in-module
reraise = reraise # export
exc_clear = sys.exc_clear
## Functions
if PY3:
......
......@@ -283,19 +283,22 @@ class socket(object):
if self._closed:
self.close()
def _drop_events(self):
if self._read_event is not None:
self.hub.cancel_wait(self._read_event, cancel_wait_ex, True)
self._read_event = None
if self._write_event is not None:
self.hub.cancel_wait(self._write_event, cancel_wait_ex, True)
self._write_event = None
def _real_close(self, _ss=_socket.socket, cancel_wait_ex=cancel_wait_ex):
# This function should not reference any globals. See Python issue #808164.
# Break any reference to the loop.io objects. Our fileno,
# which they were tied to, is now free to be reused, so these
# objects are no longer functional.
self._drop_events()
if self._read_event is not None:
self.hub.cancel_wait(self._read_event, cancel_wait_ex, True)
self._read_event = None
if self._write_event is not None:
self.hub.cancel_wait(self._write_event, cancel_wait_ex, True)
self._write_event = None
_ss.close(self._sock)
# Break any references to the underlying socket object. Tested
......
......@@ -646,6 +646,7 @@ class SSLSocket(socket):
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
newsock._drop_events()
newsock = self._context.wrap_socket(newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
......
......@@ -625,6 +625,7 @@ class SSLSocket(socket):
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
newsock._drop_events()
newsock = self._context.wrap_socket(newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
......
......@@ -413,6 +413,11 @@ def set_hub(hub):
_threadlocal.hub = hub
class _dummy_greenlet(object):
def throw(self):
pass
_dummy_greenlet = _dummy_greenlet()
def _config(default, envvar):
......@@ -637,11 +642,12 @@ class Hub(RawGreenlet):
# See https://github.com/gevent/gevent/issues/1089
return
if watcher.callback is not None:
print('Scheduling close for', watcher, close_watcher)
self.loop.run_callback(self._cancel_wait, watcher, error, close_watcher)
elif close_watcher:
watcher.close()
def _cancel_wait(self, watcher, error, close_watcher):
def _cancel_wait(self, watcher, error, close_watcher, _dummy_greenlet=_dummy_greenlet):
# We have to check again to see if it was still active by the time
# our callback actually runs.
active = watcher.active
......@@ -649,11 +655,9 @@ class Hub(RawGreenlet):
if close_watcher:
watcher.close()
if active:
switch = cb
if switch is not None:
greenlet = getattr(switch, '__self__', None)
if greenlet is not None:
greenlet.throw(error)
# The callback should be greenlet.switch(). It may or may not be None.
greenlet = getattr(cb, '__self__', _dummy_greenlet)
greenlet.throw(error)
def run(self):
"""
......
......@@ -326,11 +326,11 @@ def _patch_existing_locks(threading):
o.owner = tid
def patch_thread(threading=True, _threading_local=True, Event=False, logging=True,
def patch_thread(threading=True, _threading_local=True, Event=True, logging=True,
existing_locks=True,
_warnings=None):
"""
patch_thread(threading=True, _threading_local=True, Event=False, logging=True, existing_locks=True) -> None
patch_thread(threading=True, _threading_local=True, Event=True, logging=True, existing_locks=True) -> None
Replace the standard :mod:`thread` module to make it greenlet-based.
......@@ -354,6 +354,8 @@ def patch_thread(threading=True, _threading_local=True, Event=False, logging=Tru
.. versionchanged:: 1.1b1
Add *logging* and *existing_locks* params.
.. versionchanged:: 1.3a2
``Event`` defaults to True.
"""
# XXX: Simplify
# pylint:disable=too-many-branches,too-many-locals
......@@ -666,7 +668,7 @@ def _check_repatching(**module_settings):
def patch_all(socket=True, dns=True, time=True, select=True, thread=True, os=True, ssl=True, httplib=False,
subprocess=True, sys=False, aggressive=True, Event=False,
subprocess=True, sys=False, aggressive=True, Event=True,
builtins=True, signal=True):
"""
Do all of the default monkey patching (calls every other applicable
......@@ -680,6 +682,8 @@ def patch_all(socket=True, dns=True, time=True, select=True, thread=True, os=Tru
Issue a :mod:`warning <warnings>` if this function is called with ``os=False``
and ``signal=True``. This will cause SIGCHLD handlers to not be called. This may
be an error in the future.
.. versionchanged:: 1.3a2
``Event`` defaults to True.
"""
# pylint:disable=too-many-locals,too-many-branches
......
......@@ -13,8 +13,8 @@ as well as the constants from the :mod:`socket` module are imported into this mo
# Our import magic sadly makes this warning useless
# pylint: disable=undefined-variable
import sys
from gevent._compat import PY3
from gevent._compat import exc_clear
from gevent._util import copy_globals
......@@ -60,16 +60,21 @@ except AttributeError:
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
"""Connect to *address* and return the socket object.
"""
create_connection(address, timeout=None, source_address=None) -> socket
Connect to *address* and return the :class:`gevent.socket.socket`
object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
A host of '' or port 0 tells the OS to use the default.
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by
:func:`getdefaulttimeout` is used. If *source_address* is set it
must be a tuple of (host, port) for the socket to bind as a source
address before making the connection. A host of '' or port 0 tells
the OS to use the default.
"""
host, port = address
......@@ -102,12 +107,7 @@ def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=N
# because _socket.socket.connect() is a built-in. this is
# similar to "getnameinfo loses a reference" failure in
# test_socket.py
try:
c = sys.exc_clear
except AttributeError:
pass # Python 3 doesn't have this
else:
c()
exc_clear()
except BaseException:
# Things like GreenletExit, Timeout and KeyboardInterrupt.
# These get raised immediately, being sure to
......@@ -117,7 +117,10 @@ def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=N
sock = None
raise
else:
return sock
try:
return sock
finally:
sock = None
# This is promised to be in the __all__ of the _source, but, for circularity reasons,
......
......@@ -43,6 +43,7 @@ from greentest.sysinfo import PY37
from greentest.sysinfo import PYPY
from greentest.sysinfo import PYPY3
from greentest.sysinfo import CPYTHON
from greentest.sysinfo import PLATFORM_SPECIFIC_SUFFIXES
from greentest.sysinfo import NON_APPLICABLE_SUFFIXES
......
......@@ -804,6 +804,13 @@ if PY34:
'test_socket.InterruptedSendTimeoutTest.testInterruptedSendmsgTimeout',
]
if TRAVIS:
# This has been seen to produce "Inconsistency detected by
# ld.so: dl-open.c: 231: dl_open_worker: Assertion
# `_dl_debug_initialize (0, args->nsid)->r_state ==
# RT_CONSISTENT' failed!" and fail.
'test_threading.ThreadTests.test_is_alive_after_fork',
if TRAVIS:
disabled_tests += [
'test_subprocess.ProcessTestCase.test_double_close_on_error',
......
......@@ -25,6 +25,7 @@ import gevent.core
from gevent import _compat as gsysinfo
PYPY = gsysinfo.PYPY
CPYTHON = not PYPY
VERBOSE = sys.argv.count('-v') > 1
WIN = gsysinfo.WIN
LINUX = sys.platform.startswith('linux')
......
......@@ -14,17 +14,35 @@ from greentest import util
from greentest.util import log
from greentest.sysinfo import RUNNING_ON_CI
from greentest.sysinfo import PYPY
from greentest.sysinfo import PY3
from greentest.sysinfo import RESOLVER_ARES
from greentest.sysinfo import LIBUV
from greentest import six
# Import this while we're probably single-threaded/single-processed
# to try to avoid issues with PyPy 5.10.
# See https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
try:
__import__('_testcapi')
except (ImportError, OSError, IOError):
# This can raise a wide variety of errors
pass
TIMEOUT = 180
TIMEOUT = 100
NWORKERS = int(os.environ.get('NWORKERS') or max(cpu_count() - 1, 4))
if NWORKERS > 10:
NWORKERS = 10
DEFAULT_RUN_OPTIONS = {
'timeout': TIMEOUT
}
# A mapping from test file basename to a dictionary of
# options that will be applied on top of the DEFAULT_RUN_OPTIONS.
TEST_FILE_OPTIONS = {
}
if RUNNING_ON_CI:
# Too many and we get spurious timeouts
NWORKERS = 4
......@@ -36,11 +54,26 @@ RUN_ALONE = [
'test__examples.py',
]
if RUNNING_ON_CI and PYPY and LIBUV:
if RUNNING_ON_CI:
RUN_ALONE += [
# https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
'test__pywsgi.py',
# Partial workaround for the _testcapi issue on PyPy,
# but also because signal delivery can sometimes be slow, and this
# spawn processes of its own
'test_signal.py',
]
if PYPY:
# This often takes much longer on PyPy on CI.
TEST_FILE_OPTIONS['test__threadpool.py'] = {'timeout': 180}
if PY3:
RUN_ALONE += [
# Sometimes shows unexpected timeouts
'test_socket.py',
]
if LIBUV:
RUN_ALONE += [
# https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
'test__pywsgi.py',
]
# tests that can't be run when coverage is enabled
IGNORE_COVERAGE = [
......@@ -72,7 +105,7 @@ def run_many(tests, configured_failing_tests=(), failfast=False, quiet=False):
passed = {}
NWORKERS = min(len(tests), NWORKERS) or 1
print('thread pool size:', NWORKERS, '\n')
pool = ThreadPool(NWORKERS)
util.BUFFER_OUTPUT = NWORKERS > 1
......@@ -116,6 +149,7 @@ def run_many(tests, configured_failing_tests=(), failfast=False, quiet=False):
try:
try:
log("Running tests in parallel with concurrency %s" % (NWORKERS,))
for cmd, options in tests:
total += 1
options = options or {}
......@@ -126,6 +160,7 @@ def run_many(tests, configured_failing_tests=(), failfast=False, quiet=False):
pool.close()
pool.join()
log("Running tests marked standalone")
for cmd, options in run_alone:
run_one(cmd, **options)
......@@ -174,7 +209,7 @@ def discover(tests=None, ignore_files=None,
tests = sorted(tests)
to_process = []
default_options = {'timeout': TIMEOUT}
for filename in tests:
with open(filename, 'rb') as f:
......@@ -191,7 +226,9 @@ def discover(tests=None, ignore_files=None,
to_process.append((cmd, options))
else:
cmd = [sys.executable, '-u', filename]
to_process.append((cmd, default_options.copy()))
options = DEFAULT_RUN_OPTIONS.copy()
options.update(TEST_FILE_OPTIONS.get(filename, {}))
to_process.append((cmd, options))
return to_process
......@@ -297,10 +334,11 @@ def main():
parser.add_argument('--ignore')
parser.add_argument('--discover', action='store_true')
parser.add_argument('--full', action='store_true')
parser.add_argument('--config')
parser.add_argument('--config', default='known_failures.py')
parser.add_argument('--failfast', action='store_true')
parser.add_argument("--coverage", action="store_true")
parser.add_argument("--quiet", action="store_true")
parser.add_argument("--quiet", action="store_true", default=True)
parser.add_argument("--verbose", action="store_false", dest='quiet')
parser.add_argument('tests', nargs='*')
options = parser.parse_args()
FAILING_TESTS = []
......@@ -318,13 +356,7 @@ def main():
# in this directory; makes them easier to combine and use with coverage report)
os.environ['COVERAGE_FILE'] = os.path.abspath(".") + os.sep + ".coverage"
print("Enabling coverage to", os.environ['COVERAGE_FILE'])
if options.config:
config = {}
with open(options.config) as f:
config_data = f.read()
six.exec_(config_data, config)
FAILING_TESTS = config['FAILING_TESTS']
IGNORED_TESTS = config['IGNORED_TESTS']
if 'PYTHONWARNINGS' not in os.environ and not sys.warnoptions:
# Enable default warnings such as ResourceWarning.
......@@ -337,12 +369,31 @@ def main():
# back on __name__ and __path__". I have no idea what that means, but it seems harmless
# and is annoying.
os.environ['PYTHONWARNINGS'] = 'default,ignore:::site:,ignore:::importlib._bootstrap:,ignore:::importlib._bootstrap_external:'
if 'PYTHONFAULTHANDLER' not in os.environ:
os.environ['PYTHONFAULTHANDLER'] = 'true'
if 'GEVENT_DEBUG' not in os.environ:
os.environ['GEVENT_DEBUG'] = 'debug'
if 'PYTHONTRACEMALLOC' not in os.environ:
os.environ['PYTHONTRACEMALLOC'] = '10'
if 'PYTHONDEVMODE' not in os.environ:
# Python 3.7
os.environ['PYTHONDEVMODE'] = '1'
if options.config:
config = {}
with open(options.config) as f:
config_data = f.read()
six.exec_(config_data, config)
FAILING_TESTS = config['FAILING_TESTS']
IGNORED_TESTS = config['IGNORED_TESTS']
tests = discover(options.tests,
ignore_files=options.ignore,
ignored=IGNORED_TESTS,
......
......@@ -26,17 +26,23 @@ from greentest import sysinfo
from greentest import leakcheck
from greentest.testcase import TestCase
SMALLEST_RELIABLE_DELAY = 0.001 # 1ms, because of libuv
SMALL_TICK = 0.01
SMALL_TICK_MIN_ADJ = SMALLEST_RELIABLE_DELAY
SMALL_TICK_MAX_ADJ = 0.11
if sysinfo.RUNNING_ON_APPVEYOR:
# Timing resolution is extremely poor on Appveyor
# and subject to jitter.
SMALL_TICK_MAX_ADJ = 1.5
class _DelayWaitMixin(object):
_default_wait_timeout = 0.01
_default_delay_min_adj = 0.001
if not sysinfo.RUNNING_ON_APPVEYOR:
_default_delay_max_adj = 0.11
else:
# Timing resolution is extremely poor on Appveyor
# and subject to jitter.
_default_delay_max_adj = 1.5
_default_wait_timeout = SMALL_TICK
_default_delay_min_adj = SMALL_TICK_MIN_ADJ
_default_delay_max_adj = SMALL_TICK_MAX_ADJ
def wait(self, timeout):
raise NotImplementedError('override me in subclass')
......@@ -69,7 +75,7 @@ class _DelayWaitMixin(object):
return result
def test_outer_timeout_is_not_lost(self):
timeout = gevent.Timeout.start_new(0.001, ref=False)
timeout = gevent.Timeout.start_new(SMALLEST_RELIABLE_DELAY, ref=False)
try:
with self.assertRaises(gevent.Timeout) as exc:
self.wait(timeout=1)
......@@ -77,18 +83,16 @@ class _DelayWaitMixin(object):
finally:
timeout.close()
LARGE_TICK = 0.2
LARGE_TICK_MIN_ADJ = LARGE_TICK / 2.0
LARGE_TICK_MAX_ADJ = SMALL_TICK_MAX_ADJ
class AbstractGenericWaitTestCase(_DelayWaitMixin, TestCase):
# pylint:disable=abstract-method
_default_wait_timeout = 0.2
_default_delay_min_adj = 0.1
if not sysinfo.RUNNING_ON_APPVEYOR:
_default_delay_max_adj = 0.11
else:
# Timing resolution is very poor on Appveyor
# and subject to jitter
_default_delay_max_adj = 1.5
_default_wait_timeout = LARGE_TICK
_default_delay_min_adj = LARGE_TICK_MIN_ADJ
_default_delay_max_adj = LARGE_TICK_MAX_ADJ
@leakcheck.ignores_leakcheck # waiting checks can be very sensitive to timing
def test_returns_none_after_timeout(self):
......@@ -107,7 +111,7 @@ class AbstractGenericGetTestCase(_DelayWaitMixin, TestCase):
def test_raises_timeout_number(self):
with self.assertRaises(self.Timeout):
self._wait_and_check(timeout=0.01)
self._wait_and_check(timeout=SMALL_TICK)
# get raises Timeout after timeout expired
self.cleanup()
......
......@@ -121,7 +121,7 @@ def getname(command, env=None, setenv=None):
return ' '.join(result)
def start(command, **kwargs):
def start(command, quiet=False, **kwargs):
timeout = kwargs.pop('timeout', None)
preexec_fn = None
if not os.environ.get('DO_NOT_SETPGRP'):
......@@ -138,7 +138,8 @@ def start(command, **kwargs):
env = os.environ.copy()
env.update(setenv)
log('+ %s', name)
if not quiet:
log('+ %s', name)
popen = Popen(command, preexec_fn=preexec_fn, env=env, **kwargs)
popen.name = name
popen.setpgrp_enabled = preexec_fn is not None
......@@ -176,11 +177,12 @@ def run(command, **kwargs):
buffer_output = kwargs.pop('buffer_output', BUFFER_OUTPUT)
quiet = kwargs.pop('quiet', QUIET)
verbose = not quiet
nested = kwargs.pop('nested', False)
if buffer_output:
assert 'stdout' not in kwargs and 'stderr' not in kwargs, kwargs
kwargs['stderr'] = subprocess.STDOUT
kwargs['stdout'] = subprocess.PIPE
popen = start(command, **kwargs)
popen = start(command, quiet=nested, **kwargs)
name = popen.name
try:
time_start = time.time()
......@@ -195,7 +197,7 @@ def run(command, **kwargs):
assert not err
with lock: # pylint:disable=not-context-manager
failed = bool(result)
if out and (failed or verbose):
if out and (failed or verbose or b'ResourceWarning' in out):
out = out.strip().decode('utf-8', 'ignore')
if out:
out = ' ' + out.replace('\n', '\n ')
......@@ -204,7 +206,7 @@ def run(command, **kwargs):
log('| %s\n%s', name, out)
if result:
log('! %s [code %s] [took %.1fs]', name, result, took)
else:
elif not nested:
log('- %s [took %.1fs]', name, took)
if took >= MIN_RUNTIME:
runtimelog.append((-took, name))
......
......@@ -171,6 +171,7 @@ if PYPY:
if LIBUV:
IGNORED_TESTS += [
# XXX: Re-enable this when we can investigate more.
# This has started crashing with a SystemError.
# I cannot reproduce with the same version on macOS
# and I cannot reproduce with the same version in a Linux vm.
......@@ -179,6 +180,18 @@ if PYPY:
'test__pywsgi.py',
]
IGNORED_TESTS += [
# XXX Re-enable these when we have more time to investigate.
# This test, which normally takes ~60s, sometimes
# hangs forever after running several tests. I cannot reproduce,
# it seems highly load dependent. Observed with both libev and libuv.
'test__threadpool.py',
# This test, which normally takes 4-5s, sometimes
# hangs forever after running two tests. I cannot reproduce,
# it seems highly load dependent. Observed with both libev and libuv.
'test_threading_2.py',
]
if PY3 and TRAVIS:
FAILING_TESTS += [
## ---
......@@ -228,6 +241,15 @@ if sys.version_info[:2] >= (3, 4) and APPVEYOR:
'FLAKY test_selectors.py'
]
if sys.version_info == (3, 7, 0, 'beta', 2) and os.environ.get("PYTHONDEVMODE"):
# These crash when in devmode.
# See https://twitter.com/ossmkitty/status/970693025130311680
# https://bugs.python.org/issue33005
FAILING_TESTS += [
'test__monkey_sigchld_2.py',
'test__monkey_sigchld_3.py'
]
if COVERAGE:
# The gevent concurrency plugin tends to slow things
# down and get us past our default timeout value. These
......
......@@ -49,6 +49,12 @@ def TESTRUNNER(tests=None):
'timeout': TIMEOUT,
'setenv': {
'PYTHONPATH': PYTHONPATH,
# debug produces resource tracking warnings for the
# CFFI backends. On Python 2, many of the stdlib tests
# rely on refcounting to close sockets so they produce
# lots of noise. Python 3 is not completely immune;
# test_ftplib.py tends to produce warnings---and the Python 3
# test framework turns those into test failures!
'GEVENT_DEBUG': 'error',
}
}
......@@ -62,12 +68,10 @@ def TESTRUNNER(tests=None):
util.log("Overriding %s from %s with file from %s", filename, directory, full_directory)
continue
yield basic_args + [filename], options.copy()
yield basic_args + ['--Event', filename], options.copy()
options['cwd'] = full_directory
for filename in version_tests:
yield basic_args + [filename], options.copy()
yield basic_args + ['--Event', filename], options.copy()
def main():
......
from __future__ import absolute_import, print_function
import greentest
from gevent import core
from gevent import config
from greentest.sysinfo import CFFI_BACKEND
from gevent.core import READ # pylint:disable=no-name-in-module
from gevent.core import WRITE # pylint:disable=no-name-in-module
IS_CFFI = hasattr(core, 'libuv') or hasattr(core, 'libev')
class Test(greentest.TestCase):
__timeout__ = None
def test_types(self):
loop = core.loop(default=False)
lst = []
def setUp(self):
super(Test, self).setUp()
self.loop = config.loop(default=False)
self.timer = self.loop.timer(0.01)
io = loop.timer(0.01)
def tearDown(self):
if self.timer is not None:
self.timer.close()
if self.loop is not None:
self.loop.destroy()
self.loop = self.timer = None
super(Test, self).tearDown()
def test_non_callable_to_start(self):
# test that cannot pass non-callable thing to start()
self.assertRaises(TypeError, io.start, None)
self.assertRaises(TypeError, io.start, 5)
self.assertRaises(TypeError, self.timer.start, None)
self.assertRaises(TypeError, self.timer.start, 5)
def test_non_callable_after_start(self):
# test that cannot set 'callback' to non-callable thing later either
io.start(lambda *args: lst.append(args))
self.assertEqual(io.args, ())
try:
io.callback = False
raise AssertionError('"io.callback = False" must raise TypeError')
except TypeError:
pass
try:
io.callback = 5
raise AssertionError('"io.callback = 5" must raise TypeError')
except TypeError:
pass
# test that args can be changed later
io.args = (1, 2, 3)
# test that only tuple and None are accepted by 'args' attribute
self.assertRaises(TypeError, setattr, io, 'args', 5)
self.assertEqual(io.args, (1, 2, 3))
self.assertRaises(TypeError, setattr, io, 'args', [4, 5])
self.assertEqual(io.args, (1, 2, 3))
lst = []
timer = self.timer
timer.start(lst.append)
with self.assertRaises(TypeError):
timer.callback = False
with self.assertRaises(TypeError):
timer.callback = 5
def test_args_can_be_changed_after_start(self):
lst = []
timer = self.timer
self.timer.start(lst.append)
self.assertEqual(timer.args, ())
timer.args = (1, 2, 3)
self.assertEqual(timer.args, (1, 2, 3))
# Only tuple can be args
with self.assertRaises(TypeError):
timer.args = 5
with self.assertRaises(TypeError):
timer.args = [4, 5]
self.assertEqual(timer.args, (1, 2, 3))
# None also works, means empty tuple
# XXX why?
io.args = None
self.assertEqual(io.args, None)
time_f = getattr(core, 'time', loop.now)
start = time_f()
timer.args = None
self.assertEqual(timer.args, None)
def test_run(self):
loop = self.loop
lst = []
self.timer.start(lambda *args: lst.append(args))
loop.run()
took = time_f() - start
loop.update_now()
self.assertEqual(lst, [()])
if hasattr(core, 'time'):
# only useful on libev
assert took < 1, took
io.start(reset, io, lst)
del io
# Even if we lose all references to it, the ref in the callback
# keeps it alive
self.timer.start(reset, self.timer, lst)
self.timer = None
loop.run()
self.assertEqual(lst, [(), 25])
loop.destroy()
def test_invalid_fd(self):
loop = core.loop(default=False)
loop = self.loop
# Negative case caught everywhere. ValueError
# on POSIX, OSError on Windows Py3, IOError on Windows Py2
with self.assertRaises((ValueError, OSError, IOError)):
loop.io(-1, core.READ)
loop.io(-1, READ)
loop.destroy()
@greentest.skipOnWindows("Stdout can't be watched on Win32")
def test_reuse_io(self):
loop = core.loop(default=False)
loop = self.loop
# Watchers aren't reused once all outstanding
# refs go away BUT THEY MUST BE CLOSED
tty_watcher = loop.io(1, core.WRITE)
watcher_handle = tty_watcher._watcher if IS_CFFI else tty_watcher
tty_watcher = loop.io(1, WRITE)
watcher_handle = tty_watcher._watcher if CFFI_BACKEND else tty_watcher
tty_watcher.close()
del tty_watcher
# XXX: Note there is a cycle in the CFFI code
......@@ -84,15 +108,16 @@ class Test(greentest.TestCase):
import gc
gc.collect()
tty_watcher = loop.io(1, core.WRITE)
self.assertIsNot(tty_watcher._watcher if IS_CFFI else tty_watcher, watcher_handle)
tty_watcher = loop.io(1, WRITE)
self.assertIsNot(tty_watcher._watcher if CFFI_BACKEND else tty_watcher, watcher_handle)
tty_watcher.close()
loop.destroy()
def reset(watcher, lst):
watcher.args = None
watcher.callback = lambda: None
lst.append(25)
watcher.close()
if __name__ == '__main__':
......
......@@ -9,11 +9,13 @@ class Test(util.TestServer):
def _run_all_tests(self):
sock = socket.socket(type=socket.SOCK_DGRAM)
sock.connect(('127.0.0.1', 9000))
sock.send(b'Test udp_server')
data, _address = sock.recvfrom(8192)
self.assertEqual(data, b'Received 15 bytes')
sock.close()
try:
sock.connect(('127.0.0.1', 9000))
sock.send(b'Test udp_server')
data, _address = sock.recvfrom(8192)
self.assertEqual(data, b'Received 15 bytes')
finally:
sock.close()
if __name__ == '__main__':
......
......@@ -3,54 +3,59 @@ import os
import glob
import time
import greentest
from greentest import util
cwd = '../../examples/'
ignore = ['wsgiserver.py',
'wsgiserver_ssl.py',
'webproxy.py',
'webpy.py',
'unixsocket_server.py',
'unixsocket_client.py',
'psycopg2_pool.py',
'geventsendfile.py']
ignore = [
'wsgiserver.py',
'wsgiserver_ssl.py',
'webproxy.py',
'webpy.py',
'unixsocket_server.py',
'unixsocket_client.py',
'psycopg2_pool.py',
'geventsendfile.py',
]
ignore += [x[14:] for x in glob.glob('test__example_*.py')]
default_time_range = (2, 4)
time_ranges = {
'concurrent_download.py': (0, 30),
'processes.py': (0, 4)}
'processes.py': (0, 4)
}
class _AbstractTestMixin(object):
time_range = (2, 4)
filename = None
def main(tests=None):
if not tests:
tests = set(os.path.basename(x) for x in glob.glob(cwd + '/*.py'))
tests = sorted(tests)
failed = []
for filename in tests:
if filename in ignore:
continue
min_time, max_time = time_ranges.get(filename, default_time_range)
def test_runs(self):
start = time.time()
if util.run([sys.executable, '-u', filename], timeout=max_time, cwd=cwd):
failed.append(filename)
min_time, max_time = self.time_range
if util.run([sys.executable, '-u', self.filename],
timeout=max_time,
cwd=cwd,
quiet=True,
buffer_output=True,
nested=True,
setenv={'GEVENT_DEBUG': 'error'}):
self.fail("Failed example: " + self.filename)
else:
took = time.time() - start
if took < min_time:
util.log('! Failed example %s: exited too quickly, after %.1fs (expected %.1fs)', filename, took, min_time)
failed.append(filename)
if failed:
util.log('! Failed examples:\n! - %s', '\n! - '.join(failed))
sys.exit(1)
if not tests:
sys.exit('No tests.')
self.assertGreaterEqual(took, min_time)
for filename in glob.glob(cwd + '/*.py'):
bn = os.path.basename(filename)
if bn in ignore:
continue
tc = type('Test_' + bn,
(_AbstractTestMixin, greentest.TestCase),
{
'filename': bn,
'time_range': time_ranges.get(bn, _AbstractTestMixin.time_range)
})
locals()[tc.__name__] = tc
if __name__ == '__main__':
main()
greentest.main()
from __future__ import print_function, division, absolute_import
import time
import greentest
from greentest.flaky import reraiseFlakyTestRaceConditionLibuv
from greentest import timing
import gevent
from gevent import pool
from gevent.timeout import Timeout
DELAY = 0.1
DELAY = timing.LARGE_TICK
class SpecialError(Exception):
......@@ -23,25 +25,23 @@ class Undead(object):
gevent.sleep(1)
except SpecialError:
break
except:
except: # pylint:disable=bare-except
self.shot_count += 1
class Test(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def test_basic(self):
DELAY = 0.05 if not greentest.RUNNING_ON_APPVEYOR else 0.1
s = pool.Group()
s.spawn(gevent.sleep, DELAY)
s.spawn(gevent.sleep, timing.LARGE_TICK)
self.assertEqual(len(s), 1, s)
s.spawn(gevent.sleep, DELAY * 2.)
s.spawn(gevent.sleep, timing.LARGE_TICK * 5)
self.assertEqual(len(s), 2, s)
gevent.sleep(DELAY * 3. / 2.)
try:
self.assertEqual(len(s), 1, s)
except AssertionError:
reraiseFlakyTestRaceConditionLibuv()
gevent.sleep(DELAY)
gevent.sleep(timing.LARGE_TICK * 2 + timing.LARGE_TICK_MIN_ADJ)
self.assertEqual(len(s), 1, s)
gevent.sleep(timing.LARGE_TICK * 5 + timing.LARGE_TICK_MIN_ADJ)
self.assertFalse(s)
def test_waitall(self):
......@@ -86,16 +86,16 @@ class Test(greentest.TestCase):
p2 = gevent.spawn(u2)
def check(count1, count2):
assert p1, p1
assert p2, p2
assert not p1.dead, p1
assert not p2.dead, p2
self.assertTrue(p1)
self.assertTrue(p2)
self.assertFalse(p1.dead, p1)
self.assertFalse(p2.dead, p2)
self.assertEqual(u1.shot_count, count1)
self.assertEqual(u2.shot_count, count2)
gevent.sleep(0.01)
s = pool.Group([p1, p2])
assert len(s) == 2, s
self.assertEqual(len(s), 2, s)
check(0, 0)
s.killone(p1, block=False)
check(0, 0)
......
......@@ -85,6 +85,7 @@ else:
# on Python 2). We still
# count this as success.
self.assertEqual(p.returncode if not WIN else 0, 0)
p.stdout.close()
if __name__ == '__main__':
greentest.main()
......@@ -8,6 +8,7 @@ import unittest
import errno
import weakref
import greentest
......@@ -17,6 +18,8 @@ pid = os.getpid()
PY3 = greentest.PY3
PYPY = greentest.PYPY
CPYTHON = not PYPY
PY2 = not PY3
fd_types = int
if PY3:
long = int
......@@ -115,16 +118,21 @@ class Test(greentest.TestCase):
self.assert_open(s, s.fileno())
return s
def _close_on_teardown(self, resource):
if CPYTHON and PY2:
# Keeping raw sockets alive keeps SSL sockets
# from being closed too, at least on CPython, so we
# need to use weakrefs
self.close_on_teardown.append(weakref.ref(resource))
return resource
# from being closed too, at least on CPython2, so we
# need to use weakrefs.
# In contrast, on PyPy, *only* having a weakref lets the
# original socket die and leak
def _tearDownCloseOnTearDown(self):
self.close_on_teardown = [r() for r in self.close_on_teardown if r() is not None]
super(Test, self)._tearDownCloseOnTearDown()
def _close_on_teardown(self, resource):
self.close_on_teardown.append(weakref.ref(resource))
return resource
def _tearDownCloseOnTearDown(self):
self.close_on_teardown = [r() for r in self.close_on_teardown if r() is not None]
super(Test, self)._tearDownCloseOnTearDown()
class TestSocket(Test):
......@@ -272,7 +280,8 @@ class TestSSL(Test):
# our socket first, so this fails.
pass
else:
self._close_on_teardown(x)
#self._close_on_teardown(x)
x.close()
def _make_ssl_connect_task(self, connector, port):
t = threading.Thread(target=self._ssl_connect_task, args=(connector, port))
......@@ -300,6 +309,9 @@ class TestSSL(Test):
for s in sockets:
s.close()
del sockets
del task
def test_simple_close(self):
s = self.make_open_socket()
fileno = s.fileno()
......@@ -356,7 +368,7 @@ class TestSSL(Test):
try:
client_socket, _addr = listener.accept()
self._close_on_teardown(client_socket)
self._close_on_teardown(client_socket.close)
client_socket = ssl.wrap_socket(client_socket, keyfile=certfile, certfile=certfile, server_side=True)
self._close_on_teardown(client_socket)
fileno = client_socket.fileno()
......@@ -382,7 +394,7 @@ class TestSSL(Test):
try:
client_socket, _addr = listener.accept()
self._close_on_teardown(client_socket)
self._close_on_teardown(client_socket.close) # hard ref
client_socket = ssl.wrap_socket(client_socket, keyfile=certfile, certfile=certfile, server_side=True)
self._close_on_teardown(client_socket)
fileno = client_socket.fileno()
......@@ -432,6 +444,7 @@ class TestSSL(Test):
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
self._close_on_teardown(listener)
listener = ssl.wrap_socket(listener, keyfile=certfile, certfile=certfile)
connector = socket.socket()
......@@ -459,6 +472,7 @@ class TestSSL(Test):
"https://travis-ci.org/gevent/gevent/jobs/327357684")
def test_serverssl_makefile2(self):
listener = socket.socket()
self._close_on_teardown(listener)
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
......
......@@ -30,10 +30,7 @@ class TestMonkey(unittest.TestCase):
from gevent import threading as gthreading
self.assertIs(threading._sleep, gthreading._sleep)
self.assertFalse(monkey.is_object_patched('threading', 'Event'))
monkey.patch_thread(Event=True)
# Event patched by default
self.assertTrue(monkey.is_object_patched('threading', 'Event'))
def test_socket(self):
......
......@@ -8,7 +8,7 @@ import sys
import signal
def handle(*args):
def handle(*_args):
if not pid:
# We only do this is the child so our
# parent's waitpid can get the status.
......
This diff is collapsed.
......@@ -91,13 +91,19 @@ class TestSelectTypes(greentest.TestCase):
def test_int(self):
sock = socket.socket()
select.select([int(sock.fileno())], [], [], 0.001)
try:
select.select([int(sock.fileno())], [], [], 0.001)
finally:
sock.close()
if hasattr(six.builtins, 'long'):
def test_long(self):
sock = socket.socket()
select.select(
[six.builtins.long(sock.fileno())], [], [], 0.001)
try:
select.select(
[six.builtins.long(sock.fileno())], [], [], 0.001)
finally:
sock.close()
def test_string(self):
self.switch_expected = False
......
......@@ -22,7 +22,7 @@ def wrap_error(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
except: # pylint:disable=bare-except
traceback.print_exc()
os._exit(2)
......@@ -278,8 +278,11 @@ class TestTCP(greentest.TestCase):
s.setblocking(0)
std_socket = monkey.get_original('socket', 'socket')(socket.AF_INET, socket.SOCK_DGRAM, 0)
std_socket.setblocking(0)
self.assertEqual(std_socket.type, s.type)
try:
std_socket.setblocking(0)
self.assertEqual(std_socket.type, s.type)
finally:
std_socket.close()
s.close()
......@@ -408,7 +411,7 @@ class TestFunctions(greentest.TestCase):
orig_get_hub = gevent.socket.get_hub
class get_hub(object):
def wait(self, io):
def wait(self, _io):
gevent.sleep(10)
class io(object):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment