Commit a30d587e authored by Victor Stinner's avatar Victor Stinner Committed by GitHub

[2.7] bpo-33718: Update regrtest from master (GH-7402)

Backport manually regrtest enhancements from master:

* No longer clear filters, like --match, to re-run failed tests in
  verbose mode (-w option).
* Tests result: always indicate if tests have been interrupted.
* Enhance tests summary
* After failing tests are re-run, display again the summary.
* Add environment_altered to test.support, but it's currently unused
* regrtest: count also ENV_CHANGED as failures
* regrtest: Enhance format_duration()
* Replace test_support with support
parent 134bb46d
......@@ -248,7 +248,7 @@ PROGRESS_MIN_TIME = 30.0 # seconds
# Display the running tests if nothing happened last N seconds
PROGRESS_UPDATE = 30.0 # seconds
from test import test_support
from test import support
ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui',
......@@ -276,7 +276,11 @@ def format_duration(seconds):
return '%.0f sec' % seconds
minutes, seconds = divmod(seconds, 60.0)
return '%.0f min %.0f sec' % (minutes, seconds)
hours, minutes = divmod(minutes, 60.0)
if hours:
return '%.0f hour %.0f min' % (hours, minutes)
else:
return '%.0f min %.0f sec' % (minutes, seconds)
_FORMAT_TEST_RESULT = {
......@@ -317,7 +321,7 @@ def unload_test_modules(save_modules):
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
test_support.unload(module)
support.unload(module)
def main(tests=None, testdir=None, verbose=0, quiet=False,
......@@ -350,7 +354,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
"""
regrtest_start_time = time.time()
test_support.record_original_stdout(sys.stdout)
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:PGm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
......@@ -406,7 +410,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
elif o == '--matchfile':
if match_tests is None:
match_tests = []
filename = os.path.join(test_support.SAVEDCWD, a)
filename = os.path.join(support.SAVEDCWD, a)
with open(filename) as fp:
for line in fp:
match_tests.append(line.strip())
......@@ -439,7 +443,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
elif o in ('-M', '--memlimit'):
test_support.set_memlimit(a)
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
......@@ -525,6 +529,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
skipped = []
resource_denieds = []
environment_changed = []
rerun = []
interrupted = False
if findleaks:
......@@ -552,7 +557,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
if fromfile:
tests = []
fp = open(os.path.join(test_support.SAVEDCWD, fromfile))
fp = open(os.path.join(support.SAVEDCWD, fromfile))
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
......@@ -596,7 +601,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
tracer = trace.Trace(trace=False, count=True)
test_times = []
test_support.use_resources = use_resources
support.use_resources = use_resources
save_modules = set(sys.modules)
def accumulate_result(test, result):
......@@ -637,8 +642,9 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
def display_progress(test_index, test):
# "[ 51/405/1] test_tcl"
line = "{1:{0}}{2}".format(test_count_width, test_index, test_count)
if bad and not pgo:
line = '{}/{}'.format(line, len(bad))
fails = len(bad) + len(environment_changed)
if fails and not pgo:
line = '{}/{}'.format(line, fails)
line = '[{}]'.format(line)
# add the system load prefix: "load avg: 1.80 "
......@@ -695,7 +701,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
)
yield (test, args_tuple)
pending = tests_and_args()
opt_args = test_support.args_from_interpreter_flags()
opt_args = support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
# required to spawn a new process with PGO flag on/off
if pgo:
......@@ -768,7 +774,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
continue
dt = time.time() - worker.start_time
if dt >= PROGRESS_MIN_TIME:
running.append('%s (%.0f sec)' % (current_test, dt))
running.append('%s (%s)' % (current_test, format_duration(dt)))
return running
finished = 0
......@@ -881,56 +887,97 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
unload_test_modules(save_modules)
if interrupted and not pgo:
def get_tests_result():
result = []
if bad:
result.append("FAILURE")
elif fail_env_changed and environment_changed:
result.append("ENV CHANGED")
if interrupted:
result.append("INTERRUPTED")
if not result:
result.append("SUCCESS")
return ', '.join(result)
def display_result():
# print a newline after ^C
print
print "Test suite interrupted by signal SIGINT."
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print count(len(omitted), "test"), "omitted:"
printlist(omitted)
if good and not quiet and not pgo:
if not bad and not skipped and not interrupted and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if print_slow:
test_times.sort(reverse=True)
print "10 slowest tests:"
for test_time, test in test_times[:10]:
print("- %s: %.1fs" % (test, test_time))
if bad and not pgo:
print count(len(bad), "test"), "failed:"
printlist(bad)
if environment_changed and not pgo:
print "{} altered the execution environment:".format(
count(len(environment_changed), "test"))
printlist(environment_changed)
if skipped and not quiet and not pgo:
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
print("== Tests result: %s ==" % get_tests_result())
if interrupted and not pgo:
print
print "Test suite interrupted by signal SIGINT."
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print count(len(omitted), "test"), "omitted:"
printlist(omitted)
if good and not quiet and not pgo:
print
if not bad and not skipped and not interrupted and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if print_slow:
test_times.sort(reverse=True)
print
print "10 slowest tests:"
for test_time, test in test_times[:10]:
print("- %s: %.1fs" % (test, test_time))
if bad and not pgo:
print
print count(len(bad), "test"), "failed:"
printlist(bad)
if environment_changed and not pgo:
print
print "{} altered the execution environment:".format(
count(len(environment_changed), "test"))
printlist(environment_changed)
if skipped and not quiet and not pgo:
print
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
else:
print "Those skips are all expected on", plat + "."
else:
print "Those skips are all expected on", plat + "."
else:
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
if rerun:
print
print("%s:" % count(len(rerun), "re-run test"))
printlist(rerun)
display_result()
if verbose2 and bad:
print
print "Re-running failed tests in verbose mode"
for test in bad[:]:
rerun = bad[:]
for test in rerun:
print "Re-running test %r in verbose mode" % test
sys.stdout.flush()
try:
test_support.verbose = True
support.verbose = True
ok = runtest(test, True, quiet, huntrleaks, None, pgo,
testdir=testdir)
match_tests=match_tests, testdir=testdir)
except KeyboardInterrupt:
# print a newline separate from the ^C
print
......@@ -943,6 +990,8 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
print count(len(bad), "test"), "failed again:"
printlist(bad)
display_result()
if single:
if next_single_test:
with open(filename, 'w') as fp:
......@@ -961,15 +1010,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
duration = time.time() - regrtest_start_time
print("Total duration: %s" % format_duration(duration))
if bad:
result = "FAILURE"
elif interrupted:
result = "INTERRUPTED"
elif fail_env_changed and environment_changed:
result = "ENV CHANGED"
else:
result = "SUCCESS"
print("Tests result: %s" % result)
print("Tests result: %s" % get_tests_result())
if bad:
sys.exit(2)
......@@ -1034,13 +1075,17 @@ def runtest(test, verbose, quiet,
PASSED test passed
"""
test_support.verbose = verbose # Tell tests to be moderately quiet
support.verbose = verbose # Tell tests to be moderately quiet
if use_resources is not None:
test_support.use_resources = use_resources
support.use_resources = use_resources
try:
test_support.set_match_tests(match_tests)
support.set_match_tests(match_tests)
# reset the environment_altered flag to detect if a test altered
# the environment
support.environment_altered = False
if failfast:
test_support.failfast = True
support.failfast = True
return runtest_inner(test, verbose, quiet, huntrleaks, pgo, testdir)
finally:
cleanup_test_droppings(test, verbose)
......@@ -1140,31 +1185,31 @@ class saved_test_environment:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_test_support_TESTFN(self):
if os.path.isfile(test_support.TESTFN):
def get_support_TESTFN(self):
if os.path.isfile(support.TESTFN):
result = 'f'
elif os.path.isdir(test_support.TESTFN):
elif os.path.isdir(support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_test_support_TESTFN(self, saved_value):
def restore_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(test_support.TESTFN):
os.unlink(test_support.TESTFN)
elif os.path.isdir(test_support.TESTFN):
shutil.rmtree(test_support.TESTFN)
if os.path.isfile(support.TESTFN):
os.unlink(support.TESTFN)
elif os.path.isdir(support.TESTFN):
shutil.rmtree(support.TESTFN)
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir(os.curdir))
def restore_files(self, saved_value):
fn = test_support.TESTFN
fn = support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
test_support.unlink(fn)
support.unlink(fn)
elif os.path.isdir(fn):
test_support.rmtree(fn)
support.rmtree(fn)
def resource_info(self):
for name in self.resources:
......@@ -1181,6 +1226,10 @@ class saved_test_environment:
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
# Read support.environment_altered, set by support helper functions
self.changed |= support.environment_altered
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
......@@ -1203,10 +1252,10 @@ class saved_test_environment:
def post_test_cleanup():
test_support.reap_children()
support.reap_children()
def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False, testdir=None):
test_support.unload(test)
support.unload(test)
if verbose:
capture_stdout = None
else:
......@@ -1241,7 +1290,7 @@ def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False, testdir=Non
post_test_cleanup()
finally:
sys.stdout = save_stdout
except test_support.ResourceDenied, msg:
except support.ResourceDenied, msg:
if not quiet and not pgo:
print test, "skipped --", msg
sys.stdout.flush()
......@@ -1253,7 +1302,7 @@ def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False, testdir=Non
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except test_support.TestFailed, msg:
except support.TestFailed, msg:
if not pgo:
print >>sys.stderr, "test", test, "failed --", msg
sys.stderr.flush()
......@@ -1298,7 +1347,7 @@ def cleanup_test_droppings(testname, verbose):
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (test_support.TESTFN,
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
......@@ -1365,7 +1414,7 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(test_support.SAVEDCWD, fname)
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount]
......@@ -1581,12 +1630,12 @@ def _list_cases(suite):
if isinstance(test, unittest.TestSuite):
_list_cases(test)
elif isinstance(test, unittest.TestCase):
if test_support.match_test(test):
if support.match_test(test):
print(test.id())
def list_cases(testdir, selected, match_tests):
test_support.verbose = False
test_support.set_match_tests(match_tests)
support.verbose = False
support.set_match_tests(match_tests)
save_modules = set(sys.modules)
skipped = []
......@@ -2020,8 +2069,8 @@ def main_in_temp_cwd():
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from test_support.SAVEDCWD.
with test_support.temp_cwd(TESTCWD, quiet=True):
# available from support.SAVEDCWD.
with support.temp_cwd(TESTCWD, quiet=True):
main()
if __name__ == '__main__':
......
......@@ -1671,6 +1671,14 @@ def run_doctest(module, verbosity=None):
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# Flag used by saved_test_environment of test.libregrtest.save_env,
# to check if a test modified the environment. The flag should be set to False
# before running a new test.
#
# For example, threading_cleanup() sets the flag is the function fails
# to cleanup threads.
environment_altered = False
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment