Commit e199273c authored by Jason Madden's avatar Jason Madden

Run leakchecks on Python 3.7.

Fixes #1197

Switch to objgraph to handle the measurement for us. That cleared up a
few of the obscure issues with references to
functions/getset_descriptors and the like. (Possibly because it keys
by string names and we were keeping type objects alive.)

Many of the real failures were due to re-using exception instances,
which is bad because of chaining.

Most of the @ignore_leakcheck are for performance, only one is for a
real issue---and that test was skipped already on CI anyway for being
too flaky.
parent 229937b5
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
1.3.0rc1 (unreleased) 1.3.0rc1 (unreleased)
===================== =====================
- Nothing changed yet. - Python 3.7 passes the automated memory leak checks. See :issue:`1197`.
1.3b2 (2018-05-03) 1.3b2 (2018-05-03)
......
...@@ -197,7 +197,7 @@ test-py36: $(PY36) ...@@ -197,7 +197,7 @@ test-py36: $(PY36)
PYTHON=python3.6.4 PATH=$(BUILD_RUNTIMES)/versions/python3.6.4/bin:$(PATH) make develop allbackendtest PYTHON=python3.6.4 PATH=$(BUILD_RUNTIMES)/versions/python3.6.4/bin:$(PATH) make develop allbackendtest
test-py37: $(PY37) test-py37: $(PY37)
LD_LIBRARY_PATH=$(BUILD_RUNTIMES)/versions/python3.7.0b4/openssl/lib PYTHON=python3.7.0b4 PATH=$(BUILD_RUNTIMES)/versions/python3.7.0b4/bin:$(PATH) make develop allbackendtest LD_LIBRARY_PATH=$(BUILD_RUNTIMES)/versions/python3.7.0b4/openssl/lib PYTHON=python3.7.0b4 PATH=$(BUILD_RUNTIMES)/versions/python3.7.0b4/bin:$(PATH) make develop leaktest cffibackendtest coverage_combine
test-pypy: $(PYPY) test-pypy: $(PYPY)
PYTHON=$(PYPY) PATH=$(BUILD_RUNTIMES)/versions/pypy5100/bin:$(PATH) make develop cffibackendtest PYTHON=$(PYPY) PATH=$(BUILD_RUNTIMES)/versions/pypy5100/bin:$(PATH) make develop cffibackendtest
......
...@@ -357,6 +357,9 @@ def run_setup(ext_modules, run_make): ...@@ -357,6 +357,9 @@ def run_setup(ext_modules, run_make):
'futures ; python_version == "2.7"', 'futures ; python_version == "2.7"',
'mock ; python_version == "2.7"', 'mock ; python_version == "2.7"',
# leak checks. previously we had a hand-rolled version.
'objgraph',
] ]
}, },
# It's always safe to pass the CFFI keyword, even if # It's always safe to pass the CFFI keyword, even if
......
...@@ -17,54 +17,176 @@ ...@@ -17,54 +17,176 @@
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE. # THE SOFTWARE.
from __future__ import print_function
import sys import sys
import gc import gc
import collections
import types import types
from functools import wraps from functools import wraps
import unittest
import objgraph
import gevent import gevent
import gevent.core import gevent.core
def ignores_leakcheck(func): def ignores_leakcheck(func):
"""
Ignore the given object during leakchecks.
Can be applied to a method, in which case the method will run, but
will not be subject to leak checks.
If applied to a class, the entire class will be skipped during leakchecks. This
is intended to be used for classes that are very slow and cause problems such as
test timeouts; typically it will be used for classes that are subclasses of a base
class and specify variants of behaviour (such as pool sizes).
"""
func.ignore_leakcheck = True func.ignore_leakcheck = True
return func return func
# Some builtin things that we ignore class _RefCountChecker(object):
IGNORED_TYPES = (tuple, dict, types.FrameType, types.TracebackType)
try: # Some builtin things that we ignore
callback_kind = gevent.core.callback IGNORED_TYPES = (tuple, dict, types.FrameType, types.TracebackType)
except AttributeError: try:
# Must be using FFI. CALLBACK_KIND = gevent.core.callback
from gevent._ffi.callback import callback as callback_kind except AttributeError:
# Must be using FFI.
def _type_hist(): from gevent._ffi.callback import callback as CALLBACK_KIND
d = collections.defaultdict(int)
for x in gc.get_objects():
k = type(x) def __init__(self, testcase, function):
if k in IGNORED_TYPES: self.testcase = testcase
continue self.function = function
if k == callback_kind and x.callback is None and x.args is None: self.deltas = []
self.peak_stats = {}
# The very first time we are called, we have already been
# self.setUp() by the test runner, so we don't need to do it again.
self.needs_setUp = False
def _ignore_object_p(self, obj):
if obj is self or obj in self.__dict__.values() or obj == self._ignore_object_p:
return False
kind = type(obj)
if kind in self.IGNORED_TYPES:
return False
if kind is self.CALLBACK_KIND and obj.callback is None and obj.args is None:
# these represent callbacks that have been stopped, but # these represent callbacks that have been stopped, but
# the event loop hasn't cycled around to run them. The only # the event loop hasn't cycled around to run them. The only
# known cause of this is killing greenlets before they get a chance # known cause of this is killing greenlets before they get a chance
# to run for the first time. # to run for the first time.
continue return False
d[k] += 1 return True
return d
def _growth(self):
return objgraph.growth(limit=None, peak_stats=self.peak_stats, filter=self._ignore_object_p)
def _report_diff(self, growth):
if not growth:
return "<Unable to calculate growth>"
lines = []
width = max(len(name) for name, _, _ in growth)
for name, count, delta in growth:
lines.append('%-*s%9d %+9d' % (width, name, count, delta))
diff = '\n'.join(lines)
return diff
def _run_test(self, args, kwargs):
gc_enabled = gc.isenabled()
gc.disable()
if self.needs_setUp:
self.testcase.setUp()
self.testcase.skipTearDown = False
try:
self.function(self.testcase, *args, **kwargs)
finally:
self.testcase.tearDown()
self.testcase.skipTearDown = True
self.needs_setUp = True
if gc_enabled:
gc.enable()
def _growth_after(self):
# Grab post snapshot
if 'urlparse' in sys.modules:
sys.modules['urlparse'].clear_cache()
if 'urllib.parse' in sys.modules:
sys.modules['urllib.parse'].clear_cache()
return self._growth()
def _report_diff(a, b): def _check_deltas(self, growth):
diff_lines = [] # Return false when we have decided there is no leak,
for k, v in sorted(a.items(), key=lambda i: i[0].__name__): # true if we should keep looping, raises an assertion
if b[k] != v: # if we have decided there is a leak.
diff_lines.append("%s: %s != %s" % (k, v, b[k]))
deltas = self.deltas
if not deltas:
# We haven't run yet, no data, keep looping
return True
if gc.garbage:
raise AssertionError("Generated uncollectable garbage %r" % (gc.garbage,))
# the following configurations are classified as "no leak"
# [0, 0]
# [x, 0, 0]
# [... a, b, c, d] where a+b+c+d = 0
#
# the following configurations are classified as "leak"
# [... z, z, z] where z > 0
if deltas[-2:] == [0, 0] and len(deltas) in (2, 3):
return False
if deltas[-3:] == [0, 0, 0]:
return False
if len(deltas) >= 4 and sum(deltas[-4:]) == 0:
return False
if len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]:
diff = self._report_diff(growth)
raise AssertionError('refcount increased by %r\n%s' % (deltas, diff))
# OK, we don't know for sure yet. Let's search for more
if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2:
# this is suspicious, so give a few more runs
limit = 11
else:
limit = 7
if len(deltas) >= limit:
raise AssertionError('refcount increased by %r\n%s'
% (deltas,
self._report_diff(growth)))
# We couldn't decide yet, keep going
return True
def __call__(self, args, kwargs):
for _ in range(3):
gc.collect()
# Capture state before; the incremental will be
# updated by each call to _growth_after
growth = self._growth()
while self._check_deltas(growth):
self._run_test(args, kwargs)
growth = self._growth_after()
self.deltas.append(sum((stat[2] for stat in growth)))
if not diff_lines:
return None
diff = '\n'.join(diff_lines)
return diff
def wrap_refcount(method): def wrap_refcount(method):
if getattr(method, 'ignore_leakcheck', False): if getattr(method, 'ignore_leakcheck', False):
...@@ -73,74 +195,8 @@ def wrap_refcount(method): ...@@ -73,74 +195,8 @@ def wrap_refcount(method):
@wraps(method) @wraps(method)
def wrapper(self, *args, **kwargs): # pylint:disable=too-many-branches def wrapper(self, *args, **kwargs): # pylint:disable=too-many-branches
gc.collect() if getattr(self, 'ignore_leakcheck', False):
gc.collect() raise unittest.SkipTest("This class ignored during leakchecks")
gc.collect() return _RefCountChecker(self, method)(args, kwargs)
deltas = []
d = None
gc.disable()
# The very first time we are called, we have already been
# self.setUp() by the test runner, so we don't need to do it again.
needs_setUp = False
try:
while True:
# Grab current snapshot
hist_before = _type_hist()
d = sum(hist_before.values())
if needs_setUp:
self.setUp()
self.skipTearDown = False
try:
method(self, *args, **kwargs)
finally:
self.tearDown()
self.skipTearDown = True
needs_setUp = True
# Grab post snapshot
if 'urlparse' in sys.modules:
sys.modules['urlparse'].clear_cache()
if 'urllib.parse' in sys.modules:
sys.modules['urllib.parse'].clear_cache()
hist_after = _type_hist()
d = sum(hist_after.values()) - d
deltas.append(d)
# Reset and check for cycles
gc.collect()
if gc.garbage:
raise AssertionError("Generated uncollectable garbage %r" % (gc.garbage,))
# the following configurations are classified as "no leak"
# [0, 0]
# [x, 0, 0]
# [... a, b, c, d] where a+b+c+d = 0
#
# the following configurations are classified as "leak"
# [... z, z, z] where z > 0
if deltas[-2:] == [0, 0] and len(deltas) in (2, 3):
break
elif deltas[-3:] == [0, 0, 0]:
break
elif len(deltas) >= 4 and sum(deltas[-4:]) == 0:
break
elif len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]:
diff = _report_diff(hist_before, hist_after)
raise AssertionError('refcount increased by %r\n%s' % (deltas, diff))
# OK, we don't know for sure yet. Let's search for more
if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2:
# this is suspicious, so give a few more runs
limit = 11
else:
limit = 7
if len(deltas) >= limit:
raise AssertionError('refcount increased by %r\n%s'
% (deltas,
_report_diff(hist_before, hist_after)))
finally:
gc.enable()
return wrapper return wrapper
...@@ -18,6 +18,7 @@ from greentest.sysinfo import PY3 ...@@ -18,6 +18,7 @@ from greentest.sysinfo import PY3
from greentest.sysinfo import PY2 from greentest.sysinfo import PY2
from greentest.sysinfo import RESOLVER_ARES from greentest.sysinfo import RESOLVER_ARES
from greentest.sysinfo import LIBUV from greentest.sysinfo import LIBUV
from greentest.sysinfo import RUN_LEAKCHECKS
from greentest import six from greentest import six
# Import this while we're probably single-threaded/single-processed # Import this while we're probably single-threaded/single-processed
...@@ -34,6 +35,11 @@ NWORKERS = int(os.environ.get('NWORKERS') or max(cpu_count() - 1, 4)) ...@@ -34,6 +35,11 @@ NWORKERS = int(os.environ.get('NWORKERS') or max(cpu_count() - 1, 4))
if NWORKERS > 10: if NWORKERS > 10:
NWORKERS = 10 NWORKERS = 10
if RUN_LEAKCHECKS:
# Capturing the stats takes time, and we run each
# test at least twice
TIMEOUT = 200
DEFAULT_RUN_OPTIONS = { DEFAULT_RUN_OPTIONS = {
'timeout': TIMEOUT 'timeout': TIMEOUT
} }
......
...@@ -37,6 +37,10 @@ if sysinfo.RUNNING_ON_APPVEYOR: ...@@ -37,6 +37,10 @@ if sysinfo.RUNNING_ON_APPVEYOR:
SMALL_TICK_MAX_ADJ = 1.5 SMALL_TICK_MAX_ADJ = 1.5
LARGE_TICK = 0.2
LARGE_TICK_MIN_ADJ = LARGE_TICK / 2.0
LARGE_TICK_MAX_ADJ = SMALL_TICK_MAX_ADJ
class _DelayWaitMixin(object): class _DelayWaitMixin(object):
...@@ -83,9 +87,6 @@ class _DelayWaitMixin(object): ...@@ -83,9 +87,6 @@ class _DelayWaitMixin(object):
finally: finally:
timeout.close() timeout.close()
LARGE_TICK = 0.2
LARGE_TICK_MIN_ADJ = LARGE_TICK / 2.0
LARGE_TICK_MAX_ADJ = SMALL_TICK_MAX_ADJ
class AbstractGenericWaitTestCase(_DelayWaitMixin, TestCase): class AbstractGenericWaitTestCase(_DelayWaitMixin, TestCase):
# pylint:disable=abstract-method # pylint:disable=abstract-method
......
...@@ -223,25 +223,6 @@ if PY3: ...@@ -223,25 +223,6 @@ if PY3:
'FLAKY test__socket_dns.py', 'FLAKY test__socket_dns.py',
] ]
if LEAKTEST:
FAILING_TESTS += ['FLAKY test__threadpool.py']
# refcount problems:
FAILING_TESTS += [
'test__timeout.py',
'FLAKY test__greenletset.py',
'test__core.py',
'test__systemerror.py',
'test__exc_info.py',
'test__api_timeout.py',
'test__event.py',
'test__api.py',
'test__hub.py',
'test__queue.py',
'test__socket_close.py',
'test__select.py',
'test__greenlet.py',
'FLAKY test__socket.py',
]
if sys.version_info[:2] >= (3, 4) and APPVEYOR: if sys.version_info[:2] >= (3, 4) and APPVEYOR:
......
...@@ -105,7 +105,12 @@ class TestTimers(greentest.TestCase): ...@@ -105,7 +105,12 @@ class TestTimers(greentest.TestCase):
gevent.sleep(0.02) gevent.sleep(0.02)
gevent.spawn(func) gevent.spawn(func)
# Func has not run yet
self.assertEqual(lst, [1]) self.assertEqual(lst, [1])
# Run callbacks but don't yield.
gevent.sleep()
# Let timers fire. Func should be done.
gevent.sleep(0.1) gevent.sleep(0.1)
self.assertEqual(lst, []) self.assertEqual(lst, [])
......
...@@ -2,50 +2,46 @@ import gevent ...@@ -2,50 +2,46 @@ import gevent
import sys import sys
import greentest import greentest
from greentest import six from greentest import six
from greentest import ExpectedException as ExpectedError
if not six.PY3: if not six.PY3:
sys.exc_clear() sys.exc_clear()
class RawException(Exception):
class ExpectedError(Exception):
pass pass
expected_error = ExpectedError('expected exception in hello') def hello(err):
def hello():
assert sys.exc_info() == (None, None, None), sys.exc_info() assert sys.exc_info() == (None, None, None), sys.exc_info()
raise expected_error raise err
def hello2(): def hello2():
try: try:
hello() hello(ExpectedError('expected exception in hello'))
except ExpectedError: except ExpectedError:
pass pass
error = Exception('hello')
class Test(greentest.TestCase): class Test(greentest.TestCase):
def test1(self): def test1(self):
error = RawException('hello')
expected_error = ExpectedError('expected exception in hello')
try: try:
raise error raise error
except: except RawException:
self.expect_one_error() self.expect_one_error()
g = gevent.spawn(hello) g = gevent.spawn(hello, expected_error)
g.join() g.join()
self.assert_error(ExpectedError, expected_error) self.assert_error(ExpectedError, expected_error)
if not isinstance(g.exception, ExpectedError): if not isinstance(g.exception, ExpectedError):
raise g.exception raise g.exception
try: try:
raise raise
except Exception: except: # pylint:disable=bare-except
ex = sys.exc_info()[1] ex = sys.exc_info()[1]
assert ex is error, (ex, error) self.assertIs(ex, error)
def test2(self): def test2(self):
timer = gevent.get_hub().loop.timer(0) timer = gevent.get_hub().loop.timer(0)
......
...@@ -30,8 +30,9 @@ from gevent.queue import Queue, Channel ...@@ -30,8 +30,9 @@ from gevent.queue import Queue, Channel
from greentest.timing import AbstractGenericWaitTestCase from greentest.timing import AbstractGenericWaitTestCase
from greentest.timing import AbstractGenericGetTestCase from greentest.timing import AbstractGenericGetTestCase
from greentest import timing
DELAY = 0.01 DELAY = timing.SMALL_TICK
greentest.TestCase.error_fatal = False greentest.TestCase.error_fatal = False
...@@ -567,11 +568,12 @@ class TestBasic(greentest.TestCase): ...@@ -567,11 +568,12 @@ class TestBasic(greentest.TestCase):
setattr(error, 'myattr', return_value) setattr(error, 'myattr', return_value)
raise error raise error
g = gevent.Greenlet(func, 0.001, return_value=5) g = gevent.Greenlet(func, timing.SMALLEST_RELIABLE_DELAY, return_value=5)
# use rawlink to avoid timing issues on Appveyor (not always successful) # use rawlink to avoid timing issues on Appveyor (not always successful)
g.rawlink(link_test.append) g.rawlink(link_test.append)
g.start() g.start()
gevent.sleep(0.1) gevent.sleep()
gevent.sleep(timing.LARGE_TICK)
self.assertFalse(g) self.assertFalse(g)
self.assertTrue(g.dead) self.assertTrue(g.dead)
self.assertFalse(g.started) self.assertFalse(g.started)
......
...@@ -39,6 +39,7 @@ class Test(greentest.TestCase): ...@@ -39,6 +39,7 @@ class Test(greentest.TestCase):
self.assertEqual(len(s), 1, s) self.assertEqual(len(s), 1, s)
s.spawn(gevent.sleep, timing.LARGE_TICK * 5) s.spawn(gevent.sleep, timing.LARGE_TICK * 5)
self.assertEqual(len(s), 2, s) self.assertEqual(len(s), 2, s)
gevent.sleep()
gevent.sleep(timing.LARGE_TICK * 2 + timing.LARGE_TICK_MIN_ADJ) gevent.sleep(timing.LARGE_TICK * 2 + timing.LARGE_TICK_MIN_ADJ)
self.assertEqual(len(s), 1, s) self.assertEqual(len(s), 1, s)
gevent.sleep(timing.LARGE_TICK * 5 + timing.LARGE_TICK_MIN_ADJ) gevent.sleep(timing.LARGE_TICK * 5 + timing.LARGE_TICK_MIN_ADJ)
......
...@@ -295,7 +295,7 @@ TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.082, 0.035, 0.14 ...@@ -295,7 +295,7 @@ TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.082, 0.035, 0.14
SMALL_RANGE = 10 SMALL_RANGE = 10
LARGE_RANGE = 1000 LARGE_RANGE = 1000
if greentest.PYPY and greentest.WIN: if (greentest.PYPY and greentest.WIN) or greentest.RUN_LEAKCHECKS or greentest.RUN_COVERAGE:
# See comments in test__threadpool.py. # See comments in test__threadpool.py.
LARGE_RANGE = 50 LARGE_RANGE = 50
elif greentest.RUNNING_ON_CI or greentest.EXPECT_POOR_TIMER_RESOLUTION: elif greentest.RUNNING_ON_CI or greentest.EXPECT_POOR_TIMER_RESOLUTION:
...@@ -469,11 +469,11 @@ class TestPool(greentest.TestCase): # pylint:disable=too-many-public-methods ...@@ -469,11 +469,11 @@ class TestPool(greentest.TestCase): # pylint:disable=too-many-public-methods
class TestPool2(TestPool): class TestPool2(TestPool):
size = 2 size = 2
@greentest.ignores_leakcheck
class TestPool3(TestPool): class TestPool3(TestPool):
size = 3 size = 3
@greentest.ignores_leakcheck
class TestPool10(TestPool): class TestPool10(TestPool):
size = 10 size = 10
......
...@@ -899,10 +899,11 @@ class TestInputN(TestCase): ...@@ -899,10 +899,11 @@ class TestInputN(TestCase):
class TestError(TestCase): class TestError(TestCase):
error = greentest.ExpectedException('TestError.application') error = object()
error_fatal = False error_fatal = False
def application(self, env, start_response): def application(self, env, start_response):
self.error = greentest.ExpectedException('TestError.application')
raise self.error raise self.error
def test(self): def test(self):
...@@ -913,9 +914,8 @@ class TestError(TestCase): ...@@ -913,9 +914,8 @@ class TestError(TestCase):
class TestError_after_start_response(TestError): class TestError_after_start_response(TestError):
error = greentest.ExpectedException('TestError_after_start_response.application')
def application(self, env, start_response): def application(self, env, start_response):
self.error = greentest.ExpectedException('TestError_after_start_response.application')
start_response('200 OK', [('Content-Type', 'text/plain')]) start_response('200 OK', [('Content-Type', 'text/plain')])
raise self.error raise self.error
......
...@@ -139,6 +139,9 @@ def run_and_check(run_client): ...@@ -139,6 +139,9 @@ def run_and_check(run_client):
@greentest.skipOnCI("Often fail with timeouts or force closed connections; not sure why.") @greentest.skipOnCI("Often fail with timeouts or force closed connections; not sure why.")
@greentest.skipIf(greentest.RUN_LEAKCHECKS and greentest.PY3,
"Often fail with force closed connections; not sure why. "
)
class Test(greentest.TestCase): class Test(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT __timeout__ = greentest.LARGE_TIMEOUT
......
...@@ -163,10 +163,13 @@ class _AbstractPoolTest(TestCase): ...@@ -163,10 +163,13 @@ class _AbstractPoolTest(TestCase):
SMALL_RANGE = 10 SMALL_RANGE = 10
LARGE_RANGE = 1000 LARGE_RANGE = 1000
if greentest.PYPY and (greentest.WIN or greentest.RUN_COVERAGE): if (greentest.PYPY and (greentest.WIN or greentest.RUN_COVERAGE)) or greentest.RUN_LEAKCHECKS:
# PyPy 5.10 is *really* slow at spawning or switching between # PyPy 5.10 is *really* slow at spawning or switching between
# threads (especially on Windows or when coverage is enabled) Tests that happen # threads (especially on Windows or when coverage is enabled) Tests that happen
# instantaneously on other platforms time out due to the overhead # instantaneously on other platforms time out due to the overhead.
# Leakchecks also take much longer due to all the calls into the GC,
# most especially on Python 3
LARGE_RANGE = 50 LARGE_RANGE = 50
class TestPool(_AbstractPoolTest): class TestPool(_AbstractPoolTest):
...@@ -307,10 +310,11 @@ class TestPool2(TestPool): ...@@ -307,10 +310,11 @@ class TestPool2(TestPool):
self.assertEqual(result, "B") self.assertEqual(result, "B")
@greentest.ignores_leakcheck
class TestPool3(TestPool): class TestPool3(TestPool):
size = 3 size = 3
@greentest.ignores_leakcheck
class TestPool10(TestPool): class TestPool10(TestPool):
size = 10 size = 10
...@@ -348,6 +352,7 @@ class TestJoinEmpty(TestCase): ...@@ -348,6 +352,7 @@ class TestJoinEmpty(TestCase):
class TestSpawn(TestCase): class TestSpawn(TestCase):
switch_expected = True switch_expected = True
@greentest.ignores_leakcheck
def test(self): def test(self):
pool = self._makeOne(1) pool = self._makeOne(1)
self.assertEqual(len(pool), 0) self.assertEqual(len(pool), 0)
...@@ -408,6 +413,7 @@ class TestMaxsize(TestCase): ...@@ -408,6 +413,7 @@ class TestMaxsize(TestCase):
self.assertEqualFlakyRaceCondition(done, [1, 2]) self.assertEqualFlakyRaceCondition(done, [1, 2])
@greentest.ignores_leakcheck
def test_setzero(self): def test_setzero(self):
pool = self.pool = self._makeOne(3) pool = self.pool = self._makeOne(3)
pool.spawn(sleep, 0.1) pool.spawn(sleep, 0.1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment