Commit 0f3061bc authored by Jason Madden's avatar Jason Madden

Refactor known_failures.py to use a compact DSL instead of deeply nested conditions.

Part of #1367
parent 48f6b449
Refactor ``known_failures.py`` to use a compact DSL instead of deeply
nested condition statements.
...@@ -13,15 +13,17 @@ monkey.patch_all() ...@@ -13,15 +13,17 @@ monkey.patch_all()
from .sysinfo import RUNNING_ON_APPVEYOR from .sysinfo import RUNNING_ON_APPVEYOR
from .sysinfo import PY37 from .sysinfo import PY37
from .sysinfo import PYPY3
from .patched_tests_setup import disable_tests_in_source from .patched_tests_setup import disable_tests_in_source
from . import support from . import support
from . import resources from . import resources
from . import SkipTest from . import SkipTest
from . import util from . import util
if RUNNING_ON_APPVEYOR and PY37: if (RUNNING_ON_APPVEYOR and PY37) or PYPY3:
# 3.7 added a stricter mode for thread cleanup. # 3.7 added a stricter mode for thread cleanup.
# It appears to be unstable on Windows (at least appveyor) # It appears to be unstable on Windows (at least appveyor)
# and PyPy3
# and test_socket.py constantly fails with an extra thread # and test_socket.py constantly fails with an extra thread
# on some random test. We disable it entirely. # on some random test. We disable it entirely.
# XXX: Figure out how to make a *definition* in ./support.py actually # XXX: Figure out how to make a *definition* in ./support.py actually
......
...@@ -32,10 +32,11 @@ except (ImportError, OSError, IOError): ...@@ -32,10 +32,11 @@ except (ImportError, OSError, IOError):
pass pass
TIMEOUT = 100 # seconds TIMEOUT = 100 # seconds
DEFAULT_NWORKERS = int(os.environ.get('NWORKERS') or max(cpu_count() - 1, 4)) AVAIL_NWORKERS = cpu_count() - 1
if DEFAULT_NWORKERS > 10: DEFAULT_NWORKERS = int(os.environ.get('NWORKERS') or max(AVAIL_NWORKERS, 4))
if DEFAULT_NWORKERS > 15:
DEFAULT_NWORKERS = 10 DEFAULT_NWORKERS = 10
SUGGESTED_NWORKERS = DEFAULT_NWORKERS
if RUN_LEAKCHECKS: if RUN_LEAKCHECKS:
# Capturing the stats takes time, and we run each # Capturing the stats takes time, and we run each
...@@ -149,7 +150,7 @@ class Runner(object): ...@@ -149,7 +150,7 @@ class Runner(object):
def __call__(self): def __call__(self):
util.log("Running tests in parallel with concurrency %s %s." % ( util.log("Running tests in parallel with concurrency %s %s." % (
self._worker_count, self._worker_count,
util._colorize('number', '(concurrency available: %d)' % SUGGESTED_NWORKERS) util._colorize('number', '(concurrency available: %d)' % AVAIL_NWORKERS)
),) ),)
# Setting global state, in theory we can be used multiple times. # Setting global state, in theory we can be used multiple times.
# This is fine as long as we are single threaded and call these # This is fine as long as we are single threaded and call these
......
...@@ -2,343 +2,468 @@ ...@@ -2,343 +2,468 @@
# The tests listed there must fail (or testrunner.py will report error) unless they are prefixed with FLAKY # The tests listed there must fail (or testrunner.py will report error) unless they are prefixed with FLAKY
# in which cases the result of them is simply ignored # in which cases the result of them is simply ignored
from __future__ import print_function from __future__ import print_function
import os
import sys import sys
import struct import struct
from gevent.testing.sysinfo import RUNNING_ON_APPVEYOR as APPVEYOR from gevent.testing import sysinfo
from gevent.testing.sysinfo import RUNNING_ON_TRAVIS as TRAVIS
from gevent.testing.sysinfo import RUN_LEAKCHECKS as LEAKTEST class Condition(object):
from gevent.testing.sysinfo import RUN_COVERAGE as COVERAGE __slots__ = ()
from gevent.testing.sysinfo import RESOLVER_NOT_SYSTEM
def __and__(self, other):
from gevent.testing.sysinfo import PYPY return AndCondition(self, other)
from gevent.testing.sysinfo import PY3
from gevent.testing.sysinfo import PY35 def __or__(self, other):
from gevent.testing.sysinfo import OSX return OrCondition(self, other)
from gevent.testing.sysinfo import LIBUV
def __nonzero__(self):
IGNORED_TESTS = [] return self.__bool__()
FAILING_TESTS = [ def __bool__(self):
# test__issue6 (see comments in test file) is really flaky on both Travis and Appveyor; raise NotImplementedError
# on Travis we could just run the test again (but that gets old fast), but on appveyor
# we don't have that option without a new commit---and sometimes we really need a build
# to succeed in order to get a release wheel class AbstractBinaryCondition(Condition): # pylint:disable=abstract-method
'FLAKY test__issue6.py', __slots__ = (
] 'lhs',
'rhs',
)
OP = None
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "(%r %s %r)" % (
self.lhs,
self.OP,
self.rhs
)
class OrCondition(AbstractBinaryCondition):
__slots__ = ()
OP = '|'
def __bool__(self):
return bool(self.lhs) or bool(self.rhs)
class AndCondition(AbstractBinaryCondition):
__slots__ = ()
OP = '&'
def __bool__(self):
return bool(self.lhs) and bool(self.rhs)
class ConstantCondition(Condition):
__slots__ = (
'value',
'__name__',
)
def __init__(self, value, name=None):
self.value = bool(value)
self.__name__ = name or str(value)
def __bool__(self):
return self.value
def __repr__(self):
return self.__name__
ALWAYS = ConstantCondition(True)
NEVER = ConstantCondition(False)
class _AttrCondition(ConstantCondition):
__slots__ = (
)
def __init__(self, name):
ConstantCondition.__init__(self, getattr(sysinfo, name), name)
PYPY = _AttrCondition('PYPY')
PY3 = _AttrCondition('PY3')
PY2 = _AttrCondition('PY2')
OSX = _AttrCondition('OSX')
LIBUV = _AttrCondition('LIBUV')
WIN = _AttrCondition('WIN')
APPVEYOR = _AttrCondition('RUNNING_ON_APPVEYOR')
TRAVIS = _AttrCondition('RUNNING_ON_TRAVIS')
CI = _AttrCondition('RUNNING_ON_CI')
LEAKTEST = _AttrCondition('RUN_LEAKCHECKS')
COVERAGE = _AttrCondition('RUN_COVERAGE')
RESOLVER_NOT_SYSTEM = _AttrCondition('RESOLVER_NOT_SYSTEM')
BIT_64 = ConstantCondition(struct.calcsize('P') * 8 == 64, 'BIT_64')
PY380_EXACTLY = ConstantCondition(sys.version_info[:3] == (3, 8, 0), 'PY380_EXACTLY')
class _Definition(object):
__slots__ = (
'__name__',
# When does the class of this condition apply?
'when',
# When should this test be run alone, if it's run?
'run_alone',
# Should this test be ignored during coverage measurement?
'ignore_coverage',
# {name: (Condition, value)}
'options',
)
def __init__(self, when, run_alone, ignore_coverage, options):
assert isinstance(when, Condition)
assert isinstance(run_alone, Condition)
assert isinstance(ignore_coverage, Condition)
self.when = when
self.__name__ = None
self.run_alone = run_alone
self.ignore_coverage = ignore_coverage
if options:
for v in options.values():
assert isinstance(v, tuple) and len(v) == 2
assert isinstance(v[0], Condition)
self.options = options
def __set_name__(self, owner, name):
self.__name__ = name
def __repr__(self):
return '<%s for %s when=%r=%s run_alone=%r=%s>' % (
type(self).__name__,
self.__name__,
self.when, bool(self.when),
self.run_alone, bool(self.run_alone)
)
class _Action(_Definition):
__slots__ = (
'reason',
)
def __init__(self, reason='', when=ALWAYS, run_alone=NEVER, ignore_coverage=NEVER,
options=None):
_Definition.__init__(self, when, run_alone, ignore_coverage, options)
self.reason = reason
class RunAlone(_Action):
__slots__ = ()
def __init__(self, reason='', when=ALWAYS, ignore_coverage=NEVER):
_Action.__init__(self, reason, run_alone=when, ignore_coverage=ignore_coverage)
class Failing(_Action):
__slots__ = ()
class Flaky(Failing):
__slots__ = ()
class Ignored(_Action):
__slots__ = ()
class Multi(object):
def __init__(self):
self._conds = []
def flaky(self, reason='', when=True):
self._conds.append(Flaky(reason, when))
return self
def ignored(self, reason='', when=True):
self._conds.append(Ignored(reason, when))
return self
def __set_name__(self, owner, name):
for c in self._conds:
c.__set_name__(owner, name)
class DefinitionsMeta(type):
# a metaclass on Python 3 that makes sure we only set attributes once. pylint doesn't
# warn about that.
@classmethod
def __prepare__(cls, name, bases): # pylint:disable=unused-argument
return SetOnceMapping()
class SetOnceMapping(dict):
def __setitem__(self, name, value):
if name in self:
raise AttributeError(name)
dict.__setitem__(self, name, value)
som = SetOnceMapping()
som[1] = 1
try:
som[1] = 2
except AttributeError:
del som
else:
raise AssertionError("SetOnceMapping is broken")
DefinitionsBase = DefinitionsMeta('DefinitionsBase', (object,), {})
class Definitions(DefinitionsBase):
test__issue6 = Flaky(
"""test__issue6 (see comments in test file) is really flaky on both Travis and Appveyor;
on Travis we could just run the test again (but that gets old fast), but on appveyor
we don't have that option without a new commit---and sometimes we really need a build
to succeed in order to get a release wheel"""
)
test__core_fork = Ignored(
"""fork watchers don't get called on windows
because fork is not a concept windows has.
See this file for a detailed explanation.""",
when=WIN
)
test__greenletset = Flaky(
when=WIN,
ignore_coverage=PYPY
)
test__example_udp_client = test__example_udp_server = Flaky(
"""
These both run on port 9000 and can step on each other...seems
like the appveyor containers aren't fully port safe? Or it
takes longer for the processes to shut down? Or we run them in
a different order in the process pool than we do other places?
On PyPy on Travis, this fails to get the correct results,
sometimes. I can't reproduce locally
""",
when=APPVEYOR | (PYPY & TRAVIS)
)
if sys.platform == 'win32':
IGNORED_TESTS = [
# fork watchers don't get called on windows
# because fork is not a concept windows has.
# See this file for a detailed explanation.
'test__core_fork.py',
]
# other Windows-related issues (need investigating)
FAILING_TESTS += [
'FLAKY test__greenletset.py',
]
if APPVEYOR:
FAILING_TESTS += [
# These both run on port 9000 and can step on each other...seems like the
# appveyor containers aren't fully port safe? Or it takes longer
# for the processes to shut down? Or we run them in a different order
# in the process pool than we do other places?
'FLAKY test__example_udp_client.py',
'FLAKY test__example_udp_server.py',
# This one sometimes times out, often after output "The process with PID XXX could not be
# terminated. Reason: There is no running instance of the task."
'FLAKY test__example_portforwarder.py',
# This one sometimes randomly closes connections, but no indication # This one sometimes randomly closes connections, but no indication
# of a server crash, only a client side close. # of a server crash, only a client side close.
'FLAKY test__server_pywsgi.py', test__server_pywsgi = Flaky(when=APPVEYOR)
]
test_threading = Multi().ignored(
if PYPY and LIBUV: """
IGNORED_TESTS += [ This one seems to just stop right after patching is done. It
# This one seems to just stop right after passes on a local win 10 vm, and the main test_threading_2.py
# patching is done. It passes on a local win 10 vm, and the main does as well. Based on the printouts we added, it appears to
# test_threading_2.py does as well. not even finish importing:
# Based on the printouts we added, it appears to not even https://ci.appveyor.com/project/denik/gevent/build/1.0.1277/job/tpvhesij5gldjxqw#L1190
# finish importing: Ignored because it takes two minutes to time out.
# https://ci.appveyor.com/project/denik/gevent/build/1.0.1277/job/tpvhesij5gldjxqw#L1190 """,
# Ignored because it takes two minutes to time out. when=APPVEYOR & LIBUV & PYPY
'test_threading.py', ).flaky(
] """
test_set_and_clear in Py3 relies on 5 threads all starting and
if PY3: coming to an Event wait point while a sixth thread sleeps for a half
FAILING_TESTS += [ second. The sixth thread then does something and checks that
# test_set_and_clear in Py3 relies on 5 threads all starting and the 5 threads were all at the wait point. But the timing is sometimes
# coming to an Event wait point while a sixth thread sleeps for a half too tight for appveyor. This happens even if Event isn't
# second. The sixth thread then does something and checks that monkey-patched
# the 5 threads were all at the wait point. But the timing is sometimes """,
# too tight for appveyor. This happens even if Event isn't when=APPVEYOR & PY3
# monkey-patched )
'FLAKY test_threading.py',
test__socketpair = Ignored(
# Starting in November 2018, on Python 3.7.0, we observe this test crashing. """
# I can't reproduce locally. Py35 added socket.socketpair, all other releases
# | C:\Python37-x64\python.exe -u -mgevent.tests.test__greenness are missing it. No reason to even test it.
# 127.0.0.1 - - [09/Nov/2018 16:34:12] code 501, message Unsupported method ('GET') """,
# 127.0.0.1 - - [09/Nov/2018 16:34:12] "GET / HTTP/1.1" 501 - when=WIN & PY2
# . )
# ----------------------------------------------------------------------
# Ran 1 test in 0.031s test_ftplib = Flaky(
r"""
# OK could be a problem of appveyor - not sure
# Windows fatal exception: access violation ======================================================================
ERROR: test_af (__main__.TestIPv6Environment)
# Current thread 0x000003c8 (most recent call first): ----------------------------------------------------------------------
# File "c:\projects\gevent\src\gevent\threadpool.py", line 261 in _worker File "C:\Python27-x64\lib\ftplib.py", line 135, in connect
self.sock = socket.create_connection((self.host, self.port), self.timeout)
# Thread 0x00000600 (most recent call first): File "c:\projects\gevent\gevent\socket.py", line 73, in create_connection
# File "c:\projects\gevent\src\gevent\libuv\watcher.py", line 577 in send raise err
# File "c:\projects\gevent\src\gevent\threadpool.py", line 408 in set error: [Errno 10049] [Error 10049] The requested address is not valid in its context.
# File "c:\projects\gevent\src\gevent\threadpool.py", line 290 in _worker XXX: On Jan 3 2016 this suddenly started passing on Py27/64; no idea why, the python version
was 2.7.11 before and after.
# Thread 0x000007d4 (most recent call first): """,
# File "C:\Python37-x64\lib\weakref.py", line 356 in remove when=APPVEYOR & BIT_64
)
# ! C:\Python37-x64\python.exe -u -mgevent.tests.test__greenness [code 3221225477] [took 1.3s]
# We have also seen this for Python 3.6.6 Nov 13 2018:
# | C:\Python36-x64\python.exe -u -mgevent.tests.test__backdoor test__backdoor = Flaky(when=LEAKTEST | PYPY)
# ss.s.s test__socket_errors = Flaky(when=LEAKTEST)
# ---------------------------------------------------------------------- test_signal = Flaky(
# Ran 6 tests in 0.953s "On Travis, this very frequently fails due to timing",
when=TRAVIS & LEAKTEST,
# OK (skipped=4) # Partial workaround for the _testcapi issue on PyPy,
# Windows fatal exception: access violation # but also because signal delivery can sometimes be slow, and this
# spawn processes of its own
# Thread 0x00000aec (most recent call first): run_alone=APPVEYOR,
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 84 in wait )
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 166 in get
# File "C:\Python36-x64\lib\site-packages\gevent\threadpool.py", line 270 in _worker test__socket_dns = Flaky(
"""
# Thread 0x00000548 (most recent call first): A few errors and differences:
AssertionError: ('255.255.255.255', 'http') != gaierror(-2,) # DNS Python
# Thread 0x000003d0 (most recent call first): AssertionError: ('255.255.255.255', 'http') != gaierror(4, 'ARES_ENOTFOUND: Domain name not found')
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 84 in wait AssertionError: OverflowError('port must be 0-65535.',) != ('readthedocs.org', '65535')
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 166 in get AssertionError: Lists differ:
# File "C:\Python36-x64\lib\site-packages\gevent\threadpool.py", line 270 in _worker (10, 1, 6, '', ('2607:f8b0:4004:810::200e', 80, 0L, 0L))
(10, 1, 6, '', ('2607:f8b0:4004:805::200e', 80, 0, 0))
# Thread 0x00000ad0 (most recent call first):
Somehow it seems most of these are fixed with PyPy3.6-7 under dnspython,
# Thread 0x00000588 (most recent call first): (once we commented out TestHostname)?
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 84 in wait """,
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 166 in get when=RESOLVER_NOT_SYSTEM | PY3
# File "C:\Python36-x64\lib\site-packages\gevent\threadpool.py", line 270 in _worker )
# Thread 0x00000a54 (most recent call first): test__monkey_sigchld_2 = Ignored(
"""
# Thread 0x00000768 (most recent call first): This hangs for no apparent reason when run by the testrunner,
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 84 in wait even wher maked standalone when run standalone from the
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 166 in get command line, it's fine. Issue in pypy2 6.0?
# File "C:\Python36-x64\lib\site-packages\gevent\threadpool.py", line 270 in _worker """,
when=PYPY & LIBUV
# Current thread 0x00000894 (most recent call first): )
# File "C:\Python36-x64\lib\site-packages\gevent\threadpool.py", line 261 in _worker
test_ssl = Ignored(
# Thread 0x00000634 (most recent call first): """
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 84 in wait PyPy 7.0 and 7.1 on Travis with Ubunto Xenial 16.04 can't
# File "C:\Python36-x64\lib\site-packages\gevent\_threading.py", line 166 in get allocate SSL Context objects, either in Python 2.7 or 3.6.
# File "C:\Python36-x64\lib\site-packages\gevent\threadpool.py", line 270 in _worker There must be some library incompatibility. No point even
running them. XXX: Remember to turn this back on.
# Thread 0x00000538 (most recent call first): """,
when=PYPY & TRAVIS
# Thread 0x0000049c (most recent call first): )
# File "C:\Python36-x64\lib\weakref.py", line 356 in remove
test__pywsgi = Ignored(
# ! C:\Python36-x64\python.exe -u -mgevent.tests.test__backdoor [code 3221225477] [Ran 6 tests in 2.1s] """
XXX: Re-enable this when we can investigate more. This has
# Note the common factors: started crashing with a SystemError. I cannot reproduce with
# - The test is finished (successfully) and we're apparently exiting the VM, the same version on macOS and I cannot reproduce with the same
# doing GC version in a Linux vm. Commenting out individual tests just
# - A weakref is being cleaned up moves the crash around.
https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
# weakref.py line 356 remove() is in WeakKeyDictionary. We only use WeakKeyDictionary
# in gevent._ident.IdentRegistry, which is only used in two places: On Appveyor 3.8.0, for some reason this takes *way* too long, about 100s, which
# gevent.hub.hub_ident_registry, which has weak references to Hub objects, often goes just over the default timeout of 100s. This makes no sense.
# and gevent.greenlet.Greenlet.minimal_ident, which uses its parent Hub's But it also takes nearly that long in 3.7. 3.6 and earlier are much faster.
# IdentRegistry to get its own identifier. So basically they have weak references """,
# to Hub and arbitrary Greenlets. when=(PYPY & TRAVIS & LIBUV) | PY380_EXACTLY,
# Our attempted solution: stop using a module-level IdentRegistry to get
# Hub idents, and reduce how often we auto-generate one for greenlets.
# Commenting out the tests, lets see if it works.
#'FLAKY test__greenness.py',
#'FLAKY test__backdoor.py',
]
if not PY35:
# Py35 added socket.socketpair, all other releases
# are missing it. No reason to even test it.
IGNORED_TESTS += [
'test__socketpair.py',
]
if struct.calcsize('P') * 8 == 64:
# could be a problem of appveyor - not sure
# ======================================================================
# ERROR: test_af (__main__.TestIPv6Environment)
# ----------------------------------------------------------------------
# File "C:\Python27-x64\lib\ftplib.py", line 135, in connect
# self.sock = socket.create_connection((self.host, self.port), self.timeout)
# File "c:\projects\gevent\gevent\socket.py", line 73, in create_connection
# raise err
# error: [Errno 10049] [Error 10049] The requested address is not valid in its context.
# XXX: On Jan 3 2016 this suddenly started passing on Py27/64; no idea why, the python version
# was 2.7.11 before and after.
FAILING_TESTS.append('FLAKY test_ftplib.py')
if PY3:
pass
if LEAKTEST:
FAILING_TESTS += [
'FLAKY test__backdoor.py',
'FLAKY test__socket_errors.py',
]
if os.environ.get("TRAVIS") == "true":
FAILING_TESTS += [
# On Travis, this very frequently fails due to timing
'FLAKY test_signal.py',
]
if PYPY:
FAILING_TESTS += [
## Different in PyPy:
## Not implemented:
## ---
## BUGS:
## UNKNOWN:
# AssertionError: '>>> ' != ''
# test__backdoor.py:52
'FLAKY test__backdoor.py',
]
if RESOLVER_NOT_SYSTEM:
FAILING_TESTS += [
# A few errors and differences:
# AssertionError: ('255.255.255.255', 'http') != gaierror(-2,) # DNS Python
# AssertionError: ('255.255.255.255', 'http') != gaierror(4, 'ARES_ENOTFOUND: Domain name not found')
# AssertionError: OverflowError('port must be 0-65535.',) != ('readthedocs.org', '65535')
# AssertionError: Lists differ:
# (10, 1, 6, '', ('2607:f8b0:4004:810::200e', 80, 0L, 0L))
# (10, 1, 6, '', ('2607:f8b0:4004:805::200e', 80, 0, 0))
#
# Somehow it seems most of these are fixed with PyPy3.6-7 under dnspython,
# (once we commented out TestHostname)?
'FLAKY test__socket_dns.py',
]
if LIBUV:
IGNORED_TESTS += [
# This hangs for no apparent reason when run by the testrunner,
# even wher maked standalone
# when run standalone from the command line, it's fine.
# Issue in pypy2 6.0?
'test__monkey_sigchld_2.py',
]
if TRAVIS:
FAILING_TESTS += [
# This fails to get the correct results, sometimes. I can't reproduce locally
'FLAKY test__example_udp_server.py',
'FLAKY test__example_udp_client.py',
]
IGNORED_TESTS += [
# PyPy 7.0 and 7.1 on Travis with Ubunto Xenial 16.04
# can't allocate SSL Context objects, either in Python 2.7
# or 3.6. There must be some library incompatibility.
# No point even running them.
# XXX: Remember to turn this back on.
'test_ssl.py',
]
if LIBUV:
IGNORED_TESTS += [
# XXX: Re-enable this when we can investigate more.
# This has started crashing with a SystemError.
# I cannot reproduce with the same version on macOS
# and I cannot reproduce with the same version in a Linux vm.
# Commenting out individual tests just moves the crash around.
# https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception # https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
'test__pywsgi.py', run_alone=(CI & LEAKTEST & PY3) | (PYPY & LIBUV),
] )
if PY3 and TRAVIS: test_subprocess = Flaky(
FAILING_TESTS += [ "Unknown, can't reproduce locally; times out one test",
## --- when=PYPY & PY3 & TRAVIS,
ignore_coverage=ALWAYS,
## Unknown; can't reproduce locally on OS X )
'FLAKY test_subprocess.py', # timeouts on one test.
] test__threadpool = Ignored(
"""
if TRAVIS and (PYPY or OSX): XXX: Re-enable these when we have more time to investigate.
IGNORED_TESTS += [
# XXX Re-enable these when we have more time to investigate. This test, which normally takes ~60s, sometimes
# This test, which normally takes ~60s, sometimes hangs forever after running several tests. I cannot reproduce,
# hangs forever after running several tests. I cannot reproduce, it seems highly load dependent. Observed with both libev and libuv.
# it seems highly load dependent. Observed with both libev and libuv. """,
'test__threadpool.py', when=TRAVIS & (PYPY | OSX),
# This test, which normally takes 4-5s, sometimes run_alone=ALWAYS,
# hangs forever after running two tests. I cannot reproduce, # This often takes much longer on PyPy on CI.
# it seems highly load dependent. Observed with both libev and libuv. options={'timeout': (CI & PYPY, 180)},
'test__threading_2.py', )
]
test__threading_2 = Ignored(
if TRAVIS and OSX: """
IGNORED_TESTS += [ This test, which normally takes 4-5s, sometimes
# This rarely hangs for unknown reasons. I cannot reproduce hangs forever after running two tests. I cannot reproduce,
# locally. it seems highly load dependent. Observed with both libev and libuv.
'test__issue230.py', """,
] when=TRAVIS & (PYPY | OSX),
# This often takes much longer on PyPy on CI.
if LIBUV: options={'timeout': (CI & PYPY, 180)},
if OSX: )
FAILING_TESTS += [
] test__issue230 = Ignored(
"""
if PY3: This rarely hangs for unknown reasons. I cannot reproduce
# No idea / TODO locally.
FAILING_TESTS += [ """,
'FLAKY test__socket_dns.py', when=TRAVIS & OSX
] )
test_selectors = Flaky(
"""
if PY3 and APPVEYOR: Timing issues on appveyor.
FAILING_TESTS += [ """,
# Timing issues on appveyor when=PY3 & APPVEYOR,
'FLAKY test_selectors.py' ignore_coverage=ALWAYS,
] )
test__example_portforwarder = Flaky(
if COVERAGE: """
# The gevent concurrency plugin tends to slow things This one sometimes times out, often after output "The process
# down and get us past our default timeout value. These with PID XXX could not be terminated. Reason: There is no
# tests in particular are sensitive to it running instance of the task.",
FAILING_TESTS += [ """,
'FLAKY test__issue302monkey.py', when=APPVEYOR | COVERAGE
'FLAKY test__example_portforwarder.py', )
'FLAKY test__threading_vs_settrace.py',
] test__issue302monkey = test__threading_vs_settrace = Flaky(
"""
FAILING_TESTS = [x.strip() for x in set(FAILING_TESTS) if x.strip()] The gevent concurrency plugin tends to slow things
down and get us past our default timeout value. These
tests in particular are sensitive to it. So in fact we just turn them
off.
""",
when=COVERAGE,
ignore_coverage=ALWAYS
)
test__hub_join_timeout = Ignored(
r"""
This sometimes times out. It appears to happen when the
times take too long and a test raises a FlakyTestTimeout error,
aka a unittest.SkipTest error. This probably indicates that we're
not cleaning something up correctly:
.....ss
GEVENTTEST_USE_RESOURCES=-network C:\Python38-x64\python.exe -u \
-mgevent.tests.test__hub_join_timeout [code TIMEOUT] [took 100.4s]
""",
when=APPVEYOR
)
test__example_wsgiserver = test__example_webproxy = RunAlone(
"""
These share the same port, which means they can conflict
between concurrent test runs too
XXX: Fix this by dynamically picking a port.
""",
)
test__pool = test__queue = RunAlone(
"""
On a heavily loaded box, these can all take upwards of 200s.
""",
when=CI & LEAKTEST | PY3
)
test_socket = RunAlone(
"Sometimes has unexpected timeouts",
when=CI & PYPY & PY3,
ignore_coverage=ALWAYS, # times out
)
test__refcount = RunAlone(
when=NEVER,
ignore_coverage=PYPY
)
# tests that can't be run when coverage is enabled
# TODO: Now that we have this declarative, we could eliminate this list,
# just add them to the main IGNORED_TESTS list.
IGNORE_COVERAGE = [
]
# A mapping from test file basename to a dictionary of # A mapping from test file basename to a dictionary of
# options that will be applied on top of the DEFAULT_RUN_OPTIONS. # options that will be applied on top of the DEFAULT_RUN_OPTIONS.
...@@ -346,89 +471,55 @@ TEST_FILE_OPTIONS = { ...@@ -346,89 +471,55 @@ TEST_FILE_OPTIONS = {
} }
FAILING_TESTS = []
IGNORED_TESTS = []
# tests that don't do well when run on busy box # tests that don't do well when run on busy box
# or that are mutually exclusive # or that are mutually exclusive
RUN_ALONE = [ RUN_ALONE = [
'test__threadpool.py',
# These share the same port, which means they can conflict
# between concurrent test runs too
# XXX: Fix this by dynamically picking a port.
'test__example_wsgiserver.py',
'test__example_webproxy.py',
]
if APPVEYOR or TRAVIS:
RUN_ALONE += [
# Partial workaround for the _testcapi issue on PyPy,
# but also because signal delivery can sometimes be slow, and this
# spawn processes of its own
'test_signal.py',
]
if LEAKTEST and PY3:
# On a heavily loaded box, these can all take upwards of 200s
RUN_ALONE += [
'test__pool.py',
'test__pywsgi.py',
'test__queue.py',
]
if PYPY:
# This often takes much longer on PyPy on CI.
TEST_FILE_OPTIONS['test__threadpool.py'] = {'timeout': 180}
TEST_FILE_OPTIONS['test__threading_2.py'] = {'timeout': 180}
if PY3:
RUN_ALONE += [
# Sometimes shows unexpected timeouts
'test_socket.py',
]
if LIBUV:
RUN_ALONE += [
# https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
'test__pywsgi.py',
]
if APPVEYOR:
IGNORED_TESTS += [
# This sometimes times out. It appears to happen when the
# times take too long and a test raises a FlakyTestTimeout error,
# aka a unittest.SkipTest error. This probably indicates that we're
# not cleaning something up correctly:
#
# .....ss
# GEVENTTEST_USE_RESOURCES=-network C:\Python38-x64\python.exe -u \
# -mgevent.tests.test__hub_join_timeout [code TIMEOUT] [took 100.4s]
'test__hub_join_timeout.py',
]
if sys.version_info[:3] == (3, 8, 0):
# For some reason this takes *way* too long, about 100s, which
# often goes just over the default timeout of 100s. This makes no sense.
# But it also takes nearly that long in 3.7. 3.6 and earlier are much faster.
IGNORED_TESTS += [
'test__pywsgi.py',
]
# tests that can't be run when coverage is enabled
IGNORE_COVERAGE = [
# Hangs forever
'test__threading_vs_settrace.py',
# times out
'test_socket.py',
# Doesn't get the exceptions it expects
'test_selectors.py',
# XXX ?
'test__issue302monkey.py',
"test_subprocess.py",
] ]
if PYPY: def populate(): # pylint:disable=too-many-branches
IGNORE_COVERAGE += [ # TODO: Maybe move to the metaclass.
# Tends to timeout # TODO: This could be better.
'test__refcount.py', for k, v in Definitions.__dict__.items():
'test__greenletset.py' if isinstance(v, Multi):
] actions = v._conds
else:
actions = (v,)
test_name = k + '.py'
del k, v
for action in actions:
if not isinstance(action, _Action):
continue
if action.run_alone:
RUN_ALONE.append(test_name)
if action.ignore_coverage:
IGNORE_COVERAGE.append(test_name)
if action.options:
for opt_name, (condition, value) in action.options.items():
# TODO: Verify that this doesn't match more than once.
if condition:
TEST_FILE_OPTIONS.setdefault(test_name, {})[opt_name] = value
if not action.when:
if isinstance(action, Ignored):
IGNORED_TESTS.append(test_name)
elif isinstance(action, Flaky):
FAILING_TESTS.append('FLAKY ' + test_name)
elif isinstance(action, Failing):
FAILING_TESTS.append(test_name)
FAILING_TESTS.sort()
IGNORED_TESTS.sort()
RUN_ALONE.sort()
populate()
if __name__ == '__main__': if __name__ == '__main__':
print('known_failures:\n', FAILING_TESTS) print('known_failures:\n', FAILING_TESTS)
print('ignored tests:\n', IGNORED_TESTS)
print('run alone:\n', RUN_ALONE)
print('options:\n', TEST_FILE_OPTIONS)
print("ignore during coverage:\n", IGNORE_COVERAGE)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment