Commit f173686c authored by Jason Madden's avatar Jason Madden

Fix the test failures and enable debugging under Python 2

parent 51461c84
......@@ -222,6 +222,22 @@ libuv
zero duration timers and turns them into a check watcher. check
watchers do not support the ``again`` method.
- All watchers (e.g., ``loop.io``) and the ``Timeout`` class have a
``close`` method that should be called when code is done using the
object (they also function as context managers and a ``with``
statement will automatically close them). gevent does this
internally for sockets, file objects and internal timeouts.
Neglecting to close an object may result in leaking native
resources. To debug this, set the environment variables
``GEVENT_DEBUG=debug`` and ``PYTHONTRACEMALLOC=n`` before starting
the process.
The traditional cython-based libev backend will not leak if
``close`` is not called and will not produce warnings. The
CFFI-based libev backend will not currently leak but will produce
warnings. The CFFI-based libuv backend may leak and will produce
warnings.
Again, this is extremely experimental and all of it is subject to
change.
......
......@@ -19,3 +19,4 @@ print('Sending %s bytes to %s:%s' % ((len(message), ) + address))
sock.send(message.encode())
data, address = sock.recvfrom(8192)
print('%s:%s: got %r' % (address + (data, )))
sock.close()
......@@ -122,8 +122,6 @@ class AbstractCallbacks(object):
the_watcher.callback(*args)
except: # pylint:disable=bare-except
_dbg("Got exception servicing watcher with handle", handle, sys.exc_info())
import traceback
traceback.print_exc()
# It's possible for ``the_watcher`` to be undefined (UnboundLocalError)
# if we threw an exception (signal) on the line that created that variable.
# This is typically the case with a signal under libuv
......
......@@ -12,16 +12,50 @@ import warnings
try:
from tracemalloc import get_object_traceback
except ImportError:
def get_object_traceback(_obj):
return None
def tracemalloc(init):
# PYTHONTRACEMALLOC env var controls this.
return init
except ImportError: # Python < 3.4
if os.getenv('PYTHONTRACEMALLOC'):
# Use the same env var to turn this on for Python 2
import traceback
class _TB(object):
__slots__ = ('lines',)
def __init__(self, lines):
# These end in newlines, which we don't want for consistency
self.lines = [x.rstrip() for x in lines]
def format(self):
return self.lines
def tracemalloc(init):
@functools.wraps(init)
def traces(self, *args, **kwargs):
init(self, *args, **kwargs)
self._captured_malloc = _TB(traceback.format_stack())
return traces
def get_object_traceback(obj):
return obj._captured_malloc
else:
def get_object_traceback(_obj):
return None
def tracemalloc(init):
return init
from gevent._ffi import _dbg
from gevent._ffi import GEVENT_DEBUG_LEVEL
from gevent._ffi import CRITICAL
from gevent._ffi import DEBUG
from gevent._ffi.loop import GEVENT_CORE_EVENTS
from gevent._ffi.loop import _NOARGS
ALLOW_WATCHER_DEL = GEVENT_DEBUG_LEVEL >= DEBUG
__all__ = [
]
......@@ -113,7 +147,7 @@ class AbstractWatcherType(type):
def __new__(cls, name, bases, cls_dict):
if name != 'watcher' and not cls_dict.get('_watcher_skip_ffi'):
cls._fill_watcher(name, bases, cls_dict)
if '__del__' in cls_dict and GEVENT_DEBUG_LEVEL < CRITICAL:
if '__del__' in cls_dict and not ALLOW_WATCHER_DEL:
raise TypeError("CFFI watchers are not allowed to have __del__")
return type.__new__(cls, name, bases, cls_dict)
......@@ -202,6 +236,7 @@ class watcher(object):
# as possible.
_handle = None
@tracemalloc
def __init__(self, _loop, ref=True, priority=None, args=_NOARGS):
self.loop = _loop
self.__init_priority = priority
......@@ -300,17 +335,23 @@ class watcher(object):
self.stop()
_watcher = self._watcher
self._watcher = None
_watcher.data = self._FFI.NULL # pylint: disable=no-member
self._watcher_set_data(_watcher, self._FFI.NULL) # pylint: disable=no-member
self._watcher_ffi_close(_watcher)
self.loop = None
def _watcher_set_data(self, the_watcher, data):
# This abstraction exists for the sole benefit of
# libuv.watcher.stat, which "subclasses" uv_handle_t.
the_watcher.data = data
return data
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
if GEVENT_DEBUG_LEVEL >= CRITICAL:
if ALLOW_WATCHER_DEL:
def __del__(self):
if self._watcher:
tb = get_object_traceback(self)
......@@ -320,6 +361,8 @@ class watcher(object):
tb_msg = '\nTraceback:\n' + tb_msg
warnings.warn("Failed to close watcher %r%s" % (self, tb_msg),
ResourceWarning)
# may fail if __init__ did; will be harmlessly printed
self.close()
......@@ -388,7 +431,7 @@ class watcher(object):
self.callback = callback
self.args = args or _NOARGS
self.loop._keepaliveset.add(self)
self._handle = self._watcher.data = type(self).new_handle(self) # pylint:disable=no-member
self._handle = self._watcher_set_data(self._watcher, type(self).new_handle(self)) # pylint:disable=no-member
self._watcher_ffi_start()
self._watcher_ffi_start_unref()
......@@ -401,7 +444,7 @@ class watcher(object):
self._watcher_ffi_stop()
self.loop._keepaliveset.discard(self)
self._handle = None
self._watcher.data = self._FFI.NULL
self._watcher_set_data(self._watcher, self._FFI.NULL) # pylint:disable=no-member
_dbg("Finished main stop for", self, "keepalive?", self in self.loop._keepaliveset)
self.callback = None
self.args = None
......
......@@ -36,22 +36,35 @@ else:
# Python 2 doesn't natively support with statements on _fileobject;
# but it eases our test cases if we can do the same with on both Py3
# and Py2. Implementation copied from Python 3
if not hasattr(_fileobject, '__enter__'):
# we could either patch in place:
#_fileobject.__enter__ = lambda self: self
#_fileobject.__exit__ = lambda self, *args: self.close() if not self.closed else None
# or we could subclass. subclassing has the benefit of not
# changing the behaviour of the stdlib if we're just imported; OTOH,
# under Python 2.6/2.7, test_urllib2net.py asserts that the class IS
# socket._fileobject (sigh), so we have to work around that.
class _fileobject(_fileobject): # pylint:disable=function-redefined
def __enter__(self):
return self
def __exit__(self, *args):
if not self.closed:
self.close()
assert not hasattr(_fileobject, '__enter__')
# we could either patch in place:
#_fileobject.__enter__ = lambda self: self
#_fileobject.__exit__ = lambda self, *args: self.close() if not self.closed else None
# or we could subclass. subclassing has the benefit of not
# changing the behaviour of the stdlib if we're just imported; OTOH,
# under Python 2.6/2.7, test_urllib2net.py asserts that the class IS
# socket._fileobject (sigh), so we have to work around that.
# We also make it call our custom socket closing method that disposes
# if IO watchers but not the actual socket itself.
# Python 2 relies on reference counting to close sockets, so this is all
# very ugly and fragile.
class _fileobject(_fileobject): # pylint:disable=function-redefined
def __enter__(self):
return self
def __exit__(self, *args):
if not self.closed:
self.close()
def close(self):
if self._sock is not None:
self._sock._drop_events()
super(_fileobject, self).close()
def _get_memory(data):
try:
......@@ -108,11 +121,13 @@ class socket(object):
self.timeout = _socket.getdefaulttimeout()
else:
if hasattr(_sock, '_sock'):
# passed a gevent socket
self._sock = _sock._sock
self.timeout = getattr(_sock, 'timeout', False)
if self.timeout is False:
self.timeout = _socket.getdefaulttimeout()
else:
# passed a native socket
self._sock = _sock
self.timeout = _socket.getdefaulttimeout()
if PYPY:
......@@ -197,24 +212,26 @@ class socket(object):
client_socket._drop()
return sockobj, address
def close(self, _closedsocket=_closedsocket, cancel_wait_ex=cancel_wait_ex):
# This function should not reference any globals. See Python issue #808164.
# Also break any reference to the loop.io objects. Our fileno, which they were
# tied to, is now free to be reused, so these objects are no longer functional.
def _drop_events(self, cancel_wait_ex=cancel_wait_ex):
if self._read_event is not None:
self.hub.cancel_wait(self._read_event, cancel_wait_ex, True)
self._read_event = None
if self._write_event is not None:
self.hub.cancel_wait(self._write_event, cancel_wait_ex, True)
self._write_event = None
def close(self, _closedsocket=_closedsocket):
# This function should not reference any globals. See Python issue #808164.
# Also break any reference to the loop.io objects. Our fileno, which they were
# tied to, is now free to be reused, so these objects are no longer functional.
self._drop_events()
s = self._sock
self._sock = _closedsocket()
if PYPY:
s._drop()
@property
def closed(self):
return isinstance(self._sock, _closedsocket)
......@@ -264,10 +281,14 @@ class socket(object):
def makefile(self, mode='r', bufsize=-1):
# Two things to look out for:
# 1) Closing the original socket object should not close the
# socket (hence creating a new socket instance);
# fileobject (hence creating a new socket instance);
# An alternate approach is what _socket3.py does, which is to
# keep count of the times makefile objects have been opened (Py3's
# SocketIO helps with that).
# SocketIO helps with that). But the newly created socket, which
# has its own read/write watchers, does need those to be closed
# when the fileobject is; our custom subclass does that. Note that
# we can't pass the 'close=True' argument, as that causes reference counts
# to get screwed up, and Python2 sockets rely on those.
# 2) The resulting fileobject must keep the timeout in order
# to be compatible with the stdlib's socket.makefile.
# Pass self as _sock to preserve timeout.
......
......@@ -820,6 +820,7 @@ class Waiter(object):
>>> timer.start(result.switch, 'hello from Waiter')
>>> result.get() # blocks for 0.1 seconds
'hello from Waiter'
>>> timer.close()
If switch is called before the greenlet gets a chance to call :meth:`get` then
:class:`Waiter` stores the value.
......@@ -830,6 +831,7 @@ class Waiter(object):
>>> sleep(0.2)
>>> result.get() # returns immediately without blocking
'hi from Waiter'
>>> timer.close()
.. warning::
......
......@@ -112,17 +112,21 @@ class watcher(_base.watcher):
# Instead, this is arranged as a callback to GC when the
# watcher class dies. Obviously it's important to keep the ffi
# watcher alive.
_dbg("Request to close handle", ffi_watcher, ffi_watcher.type)
if ffi_watcher.type and not libuv.uv_is_closing(ffi_watcher):
_dbg("Request to close handle", ffi_watcher)
# We can pass in "subclasses" if uv_handle_t that line up at the C level,
# but that don't in CFFI without a cast. But be careful what we use the cast
# for, don't pass it back to C.
ffi_handle_watcher = cls._FFI.cast('uv_handle_t*', ffi_watcher)
if ffi_handle_watcher.type and not libuv.uv_is_closing(ffi_watcher):
# If the type isn't set, we were never properly initialized,
# and trying to close it results in libuv terminating the process.
# Sigh. Same thing if it's already in the process of being
# closed.
_dbg("Closing handle", ffi_watcher, ffi_watcher.type)
_dbg("Closing handle", ffi_watcher)
_closing_handles.add(ffi_watcher)
libuv.uv_close(ffi_watcher, libuv._uv_close_callback)
ffi_watcher.data = ffi.NULL
ffi_handle_watcher.data = ffi.NULL
def _watcher_ffi_set_init_ref(self, ref):
......@@ -665,11 +669,9 @@ class stat(_base.StatMixin, watcher):
_watcher_struct_name = 'gevent_fs_poll_t'
_watcher_callback_name = '_gevent_fs_poll_callback3'
def _watcher_create(self, ref):
self._handle = type(self).new_handle(self)
self._watcher = type(self).new(self._watcher_struct_pointer_type)
self.loop._register_watcher(watcher, self._handle)
self._watcher.handle.data = self._handle
def _watcher_set_data(self, the_watcher, data):
the_watcher.handle.data = data
return data
def _watcher_ffi_init(self, args):
return self._watcher_init(self.loop._ptr, self._watcher)
......
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
"""TCP/SSL server"""
import sys
import _socket
from _socket import error as SocketError
from _socket import SOL_SOCKET
from _socket import SO_REUSEADDR
from _socket import AF_INET
from _socket import SOCK_DGRAM
from gevent.baseserver import BaseServer
from gevent.socket import EWOULDBLOCK, socket
from gevent.socket import EWOULDBLOCK
from gevent.socket import socket as GeventSocket
from gevent._compat import PYPY, PY3
__all__ = ['StreamServer', 'DatagramServer']
......@@ -147,7 +154,8 @@ class StreamServer(BaseServer):
if not sock.timeout:
return
raise
sock = socket(sock.family, sock.type, sock.proto, fileno=fd)
sock = GeventSocket(sock.family, sock.type, sock.proto, fileno=fd)
# XXX Python issue #7995?
return sock, address
......@@ -156,13 +164,20 @@ class StreamServer(BaseServer):
def do_read(self):
try:
client_socket, address = self.socket.accept()
except _socket.error as err:
except SocketError as err:
if err.args[0] == EWOULDBLOCK:
return
raise
sockobj = socket(_sock=client_socket)
if PYPY:
client_socket._drop()
# XXX: When would this not be the case? In Python 3 it makes sense
# because we're using the low-level _accept method,
# but not in Python 2.
if not isinstance(client_socket, GeventSocket):
# This leads to a leak of the watchers in client_socket
sockobj = GeventSocket(_sock=client_socket)
if PYPY:
client_socket._drop()
else:
sockobj = client_socket
return sockobj, address
def do_close(self, sock, *args):
......@@ -209,7 +224,7 @@ class DatagramServer(BaseServer):
def do_read(self):
try:
data, address = self._socket.recvfrom(8192)
except _socket.error as err:
except SocketError as err:
if err.args[0] == EWOULDBLOCK:
return
raise
......@@ -223,14 +238,14 @@ class DatagramServer(BaseServer):
self._writelock.release()
def _tcp_listener(address, backlog=50, reuse_addr=None, family=_socket.AF_INET):
def _tcp_listener(address, backlog=50, reuse_addr=None, family=AF_INET):
"""A shortcut to create a TCP socket, bind it and put it into listening state."""
sock = socket(family=family)
sock = GeventSocket(family=family)
if reuse_addr is not None:
sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, reuse_addr)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, reuse_addr)
try:
sock.bind(address)
except _socket.error as ex:
except SocketError as ex:
strerror = getattr(ex, 'strerror', None)
if strerror is not None:
ex.strerror = strerror + ': ' + repr(address)
......@@ -240,17 +255,17 @@ def _tcp_listener(address, backlog=50, reuse_addr=None, family=_socket.AF_INET):
return sock
def _udp_socket(address, backlog=50, reuse_addr=None, family=_socket.AF_INET):
def _udp_socket(address, backlog=50, reuse_addr=None, family=AF_INET):
# backlog argument for compat with tcp_listener
# pylint:disable=unused-argument
# we want gevent.socket.socket here
sock = socket(family=family, type=_socket.SOCK_DGRAM)
sock = GeventSocket(family=family, type=SOCK_DGRAM)
if reuse_addr is not None:
sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, reuse_addr)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, reuse_addr)
try:
sock.bind(address)
except _socket.error as ex:
except SocketError as ex:
strerror = getattr(ex, 'strerror', None)
if strerror is not None:
ex.strerror = strerror + ': ' + repr(address)
......
......@@ -120,7 +120,6 @@ def wrap_refcount(method):
break
elif len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]:
diff = report_diff(hist_before, hist_after)
print(gevent.get_hub().loop._active_watchers)
raise AssertionError('refcount increased by %r\n%s' % (deltas, diff))
# OK, we don't know for sure yet. Let's search for more
if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2:
......
......@@ -301,12 +301,17 @@ def main():
# On Python 3[.6], the system site.py module has
# "open(fullname, 'rU')" which produces the warning that
# 'U' is deprecated, so ignore warnings from site.py
os.environ['PYTHONWARNINGS'] = 'default,ignore:::site:'
# importlib/_bootstrap.py likes to spit out "ImportWarning:
# can't resolve package from __spec__ or __package__, falling
# back on __name__ and __path__". I have no idea what that means, but it seems harmless
# and is annoying.
os.environ['PYTHONWARNINGS'] = 'default,ignore:::site:,ignore:::importlib._bootstrap:,ignore:::importlib._bootstrap_external:'
if 'PYTHONFAULTHANDLER' not in os.environ:
os.environ['PYTHONFAULTHANDLER'] = 'true'
if 'GEVENT_DEBUG' not in os.environ:
os.environ['GEVENT_DEBUG'] = 'error'
os.environ['GEVENT_DEBUG'] = 'debug'
tests = discover(options.tests, options.ignore, coverage)
if options.discover:
......
......@@ -43,9 +43,14 @@ def TESTRUNNER(tests=None):
tests = [os.path.basename(x) for x in tests]
version_tests = [os.path.basename(x) for x in version_tests]
options = {'cwd': directory,
'timeout': TIMEOUT,
'setenv': {'PYTHONPATH': PYTHONPATH}}
options = {
'cwd': directory,
'timeout': TIMEOUT,
'setenv': {
'PYTHONPATH': PYTHONPATH,
'GEVENT_DEBUG': 'error',
}
}
if tests and not sys.platform.startswith("win"):
atexit.register(os.system, 'rm -f */@test*')
......
......@@ -20,6 +20,6 @@ print('must exit after 0.5 seconds.')
timer = loop.timer(0.5)
timer.start(lambda: None)
loop.run()
timer.close()
loop.destroy()
del loop
......@@ -49,25 +49,26 @@ class TestExceptionInMainloop(greentest.TestCase):
gevent.sleep(DELAY)
delay = time.time() - start
assert delay >= DELAY * 0.9, 'sleep returned after %s seconds (was scheduled for %s)' % (delay, DELAY)
delay_range = DELAY * 0.9
self.assertTimeWithinRange(delay, DELAY - delay_range, DELAY + delay_range)
error = greentest.ExpectedException('TestExceptionInMainloop.test_sleep/fail')
def fail():
raise error
t = get_hub().loop.timer(0.001)
t.start(fail)
with get_hub().loop.timer(0.001) as t:
t.start(fail)
self.expect_one_error()
self.expect_one_error()
start = time.time()
gevent.sleep(DELAY)
delay = time.time() - start
start = time.time()
gevent.sleep(DELAY)
delay = time.time() - start
self.assert_error(value=error)
self.assert_error(value=error)
self.assertTimeWithinRange(delay, DELAY - delay_range, DELAY + delay_range)
assert delay >= DELAY * 0.9, 'sleep returned after %s seconds (was scheduled for %s)' % (delay, DELAY)
class TestSleep(greentest.GenericWaitTestCase):
......@@ -86,12 +87,9 @@ class TestWaiterGet(greentest.GenericWaitTestCase):
self.waiter = Waiter()
def wait(self, timeout):
evt = get_hub().loop.timer(timeout)
evt.start(self.waiter.switch)
try:
with get_hub().loop.timer(timeout) as evt:
evt.start(self.waiter.switch)
return self.waiter.get()
finally:
evt.stop()
class TestWaiter(greentest.TestCase):
......
......@@ -188,6 +188,7 @@ class TestSocket(Test):
finally:
t.join()
listener.close()
connector.close()
def test_server_makefile1(self):
listener = socket.socket()
......@@ -222,6 +223,7 @@ class TestSocket(Test):
finally:
t.join()
listener.close()
connector.close()
def test_server_makefile2(self):
listener = socket.socket()
......@@ -251,6 +253,7 @@ class TestSocket(Test):
finally:
t.join()
listener.close()
connector.close()
......@@ -308,8 +311,8 @@ class TestSSL(Test):
self.assert_closed(s, fileno)
def test_makefile1(self):
s = self.make_open_socket()
s = ssl.wrap_socket(s)
raw_s = self.make_open_socket()
s = ssl.wrap_socket(raw_s)
self._close_on_teardown(s)
fileno = s.fileno()
......@@ -319,8 +322,10 @@ class TestSSL(Test):
s.close()
self.assert_open(s, fileno)
f.close()
raw_s.close()
self.assert_closed(s, fileno)
def test_makefile2(self):
s = self.make_open_socket()
fileno = s.fileno()
......
......@@ -63,7 +63,7 @@ if hasattr(signal, 'SIGCHLD'):
raise
raise AssertionError("Failed to wait using", func)
finally:
timeout.cancel()
timeout.close()
sys.exit(0)
else:
print("No SIGCHLD, not testing")
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment