Commit d0f49d2f authored by Steve Dower's avatar Steve Dower Committed by GitHub

bpo-34582: Adds JUnit XML output for regression tests (GH-9210)

parent cb5778f0
...@@ -70,6 +70,15 @@ steps: ...@@ -70,6 +70,15 @@ steps:
displayName: 'Run patchcheck.py' displayName: 'Run patchcheck.py'
condition: and(succeeded(), ne(variables['DocOnly'], 'true')) condition: and(succeeded(), ne(variables['DocOnly'], 'true'))
- script: xvfb-run make buildbottest TESTOPTS="-j4 -uall,-cpu" - script: xvfb-run make buildbottest TESTOPTS="-j4 -uall,-cpu --junit-xml=$(build.binariesDirectory)/test-results.xml"
displayName: 'Tests' displayName: 'Tests'
condition: and(succeeded(), ne(variables['DocOnly'], 'true')) condition: and(succeeded(), ne(variables['DocOnly'], 'true'))
- task: PublishTestResults@2
displayName: 'Publish Test Results'
inputs:
testResultsFiles: '$(build.binariesDirectory)/test-results.xml'
mergeTestResults: true
testRunTitle: '$(system.pullRequest.targetBranch)-linux'
platform: linux
condition: and(succeededOrFailed(), ne(variables['DocOnly'], 'true'))
...@@ -50,6 +50,15 @@ steps: ...@@ -50,6 +50,15 @@ steps:
displayName: 'Display build info' displayName: 'Display build info'
condition: and(succeeded(), ne(variables['DocOnly'], 'true')) condition: and(succeeded(), ne(variables['DocOnly'], 'true'))
- script: make buildbottest TESTOPTS="-j4 -uall,-cpu" - script: make buildbottest TESTOPTS="-j4 -uall,-cpu --junit-xml=$(build.binariesDirectory)/test-results.xml"
displayName: 'Tests' displayName: 'Tests'
condition: and(succeeded(), ne(variables['DocOnly'], 'true')) condition: and(succeeded(), ne(variables['DocOnly'], 'true'))
- task: PublishTestResults@2
displayName: 'Publish Test Results'
inputs:
testResultsFiles: '$(build.binariesDirectory)/test-results.xml'
mergeTestResults: true
testRunTitle: '$(system.pullRequest.targetBranch)-macOS'
platform: macOS
condition: and(succeededOrFailed(), ne(variables['DocOnly'], 'true'))
...@@ -54,8 +54,17 @@ steps: ...@@ -54,8 +54,17 @@ steps:
displayName: 'Display build info' displayName: 'Display build info'
condition: and(succeeded(), ne(variables['DocOnly'], 'true')) condition: and(succeeded(), ne(variables['DocOnly'], 'true'))
- script: PCbuild\rt.bat -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0 - script: PCbuild\rt.bat -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0 --junit-xml="$(Build.BinariesDirectory)\test-results.xml"
displayName: 'Tests' displayName: 'Tests'
env: env:
PREFIX: $(Py_OutDir)\$(outDirSuffix) PREFIX: $(Py_OutDir)\$(outDirSuffix)
condition: and(succeeded(), ne(variables['DocOnly'], 'true')) condition: and(succeeded(), ne(variables['DocOnly'], 'true'))
- task: PublishTestResults@2
displayName: 'Publish Test Results'
inputs:
testResultsFiles: '$(Build.BinariesDirectory)\test-results.xml'
mergeTestResults: true
testRunTitle: '$(System.PullRequest.TargetBranch)-$(outDirSuffix)'
platform: $(outDirSuffix)
condition: and(succeededOrFailed(), ne(variables['DocOnly'], 'true'))
...@@ -52,7 +52,8 @@ class EINTRBaseTest(unittest.TestCase): ...@@ -52,7 +52,8 @@ class EINTRBaseTest(unittest.TestCase):
# Issue #25277: Use faulthandler to try to debug a hang on FreeBSD # Issue #25277: Use faulthandler to try to debug a hang on FreeBSD
if hasattr(faulthandler, 'dump_traceback_later'): if hasattr(faulthandler, 'dump_traceback_later'):
faulthandler.dump_traceback_later(10 * 60, exit=True) faulthandler.dump_traceback_later(10 * 60, exit=True,
file=sys.__stderr__)
@classmethod @classmethod
def stop_alarm(cls): def stop_alarm(cls):
......
...@@ -268,6 +268,10 @@ def _create_parser(): ...@@ -268,6 +268,10 @@ def _create_parser():
help='if a test file alters the environment, mark ' help='if a test file alters the environment, mark '
'the test as failed') 'the test as failed')
group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
help='writes JUnit-style XML results to the specified '
'file')
return parser return parser
......
...@@ -100,8 +100,11 @@ class Regrtest: ...@@ -100,8 +100,11 @@ class Regrtest:
self.next_single_test = None self.next_single_test = None
self.next_single_filename = None self.next_single_filename = None
# used by --junit-xml
self.testsuite_xml = None
def accumulate_result(self, test, result): def accumulate_result(self, test, result):
ok, test_time = result ok, test_time, xml_data = result
if ok not in (CHILD_ERROR, INTERRUPTED): if ok not in (CHILD_ERROR, INTERRUPTED):
self.test_times.append((test_time, test)) self.test_times.append((test_time, test))
if ok == PASSED: if ok == PASSED:
...@@ -118,6 +121,15 @@ class Regrtest: ...@@ -118,6 +121,15 @@ class Regrtest:
elif ok != INTERRUPTED: elif ok != INTERRUPTED:
raise ValueError("invalid test result: %r" % ok) raise ValueError("invalid test result: %r" % ok)
if xml_data:
import xml.etree.ElementTree as ET
for e in xml_data:
try:
self.testsuite_xml.append(ET.fromstring(e))
except ET.ParseError:
print(xml_data, file=sys.__stderr__)
raise
def display_progress(self, test_index, test): def display_progress(self, test_index, test):
if self.ns.quiet: if self.ns.quiet:
return return
...@@ -164,6 +176,9 @@ class Regrtest: ...@@ -164,6 +176,9 @@ class Regrtest:
file=sys.stderr) file=sys.stderr)
ns.findleaks = False ns.findleaks = False
if ns.xmlpath:
support.junit_xml_list = self.testsuite_xml = []
# Strip .py extensions. # Strip .py extensions.
removepy(ns.args) removepy(ns.args)
...@@ -384,7 +399,7 @@ class Regrtest: ...@@ -384,7 +399,7 @@ class Regrtest:
result = runtest(self.ns, test) result = runtest(self.ns, test)
except KeyboardInterrupt: except KeyboardInterrupt:
self.interrupted = True self.interrupted = True
self.accumulate_result(test, (INTERRUPTED, None)) self.accumulate_result(test, (INTERRUPTED, None, None))
break break
else: else:
self.accumulate_result(test, result) self.accumulate_result(test, result)
...@@ -508,6 +523,31 @@ class Regrtest: ...@@ -508,6 +523,31 @@ class Regrtest:
if self.ns.runleaks: if self.ns.runleaks:
os.system("leaks %d" % os.getpid()) os.system("leaks %d" % os.getpid())
def save_xml_result(self):
if not self.ns.xmlpath and not self.testsuite_xml:
return
import xml.etree.ElementTree as ET
root = ET.Element("testsuites")
# Manually count the totals for the overall summary
totals = {'tests': 0, 'errors': 0, 'failures': 0}
for suite in self.testsuite_xml:
root.append(suite)
for k in totals:
try:
totals[k] += int(suite.get(k, 0))
except ValueError:
pass
for k, v in totals.items():
root.set(k, str(v))
xmlpath = os.path.join(support.SAVEDCWD, self.ns.xmlpath)
with open(xmlpath, 'wb') as f:
for s in ET.tostringlist(root):
f.write(s)
def main(self, tests=None, **kwargs): def main(self, tests=None, **kwargs):
global TEMPDIR global TEMPDIR
...@@ -570,6 +610,9 @@ class Regrtest: ...@@ -570,6 +610,9 @@ class Regrtest:
self.rerun_failed_tests() self.rerun_failed_tests()
self.finalize() self.finalize()
self.save_xml_result()
if self.bad: if self.bad:
sys.exit(2) sys.exit(2)
if self.interrupted: if self.interrupted:
......
...@@ -85,8 +85,8 @@ def runtest(ns, test): ...@@ -85,8 +85,8 @@ def runtest(ns, test):
ns -- regrtest namespace of options ns -- regrtest namespace of options
test -- the name of the test test -- the name of the test
Returns the tuple (result, test_time), where result is one of the Returns the tuple (result, test_time, xml_data), where result is one
constants: of the constants:
INTERRUPTED KeyboardInterrupt when run under -j INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied RESOURCE_DENIED test skipped because resource denied
...@@ -94,6 +94,9 @@ def runtest(ns, test): ...@@ -94,6 +94,9 @@ def runtest(ns, test):
ENV_CHANGED test failed because it changed the execution environment ENV_CHANGED test failed because it changed the execution environment
FAILED test failed FAILED test failed
PASSED test passed PASSED test passed
If ns.xmlpath is not None, xml_data is a list containing each
generated testsuite element.
""" """
output_on_failure = ns.verbose3 output_on_failure = ns.verbose3
...@@ -106,22 +109,13 @@ def runtest(ns, test): ...@@ -106,22 +109,13 @@ def runtest(ns, test):
# reset the environment_altered flag to detect if a test altered # reset the environment_altered flag to detect if a test altered
# the environment # the environment
support.environment_altered = False support.environment_altered = False
support.junit_xml_list = xml_list = [] if ns.xmlpath else None
if ns.failfast: if ns.failfast:
support.failfast = True support.failfast = True
if output_on_failure: if output_on_failure:
support.verbose = True support.verbose = True
# Reuse the same instance to all calls to runtest(). Some stream = io.StringIO()
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout orig_stdout = sys.stdout
orig_stderr = sys.stderr orig_stderr = sys.stderr
try: try:
...@@ -138,12 +132,18 @@ def runtest(ns, test): ...@@ -138,12 +132,18 @@ def runtest(ns, test):
else: else:
support.verbose = ns.verbose # Tell tests to be moderately quiet support.verbose = ns.verbose # Tell tests to be moderately quiet
result = runtest_inner(ns, test, display_failure=not ns.verbose) result = runtest_inner(ns, test, display_failure=not ns.verbose)
return result
if xml_list:
import xml.etree.ElementTree as ET
xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
else:
xml_data = None
return result + (xml_data,)
finally: finally:
if use_timeout: if use_timeout:
faulthandler.cancel_dump_traceback_later() faulthandler.cancel_dump_traceback_later()
cleanup_test_droppings(test, ns.verbose) cleanup_test_droppings(test, ns.verbose)
runtest.stringio = None support.junit_xml_list = None
def post_test_cleanup(): def post_test_cleanup():
......
...@@ -67,7 +67,7 @@ def run_tests_worker(worker_args): ...@@ -67,7 +67,7 @@ def run_tests_worker(worker_args):
try: try:
result = runtest(ns, testname) result = runtest(ns, testname)
except KeyboardInterrupt: except KeyboardInterrupt:
result = INTERRUPTED, '' result = INTERRUPTED, '', None
except BaseException as e: except BaseException as e:
traceback.print_exc() traceback.print_exc()
result = CHILD_ERROR, str(e) result = CHILD_ERROR, str(e)
...@@ -122,7 +122,7 @@ class MultiprocessThread(threading.Thread): ...@@ -122,7 +122,7 @@ class MultiprocessThread(threading.Thread):
self.current_test = None self.current_test = None
if retcode != 0: if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode) result = (CHILD_ERROR, "Exit code %s" % retcode, None)
self.output.put((test, stdout.rstrip(), stderr.rstrip(), self.output.put((test, stdout.rstrip(), stderr.rstrip(),
result)) result))
return False return False
...@@ -133,6 +133,7 @@ class MultiprocessThread(threading.Thread): ...@@ -133,6 +133,7 @@ class MultiprocessThread(threading.Thread):
return True return True
result = json.loads(result) result = json.loads(result)
assert len(result) == 3, f"Invalid result tuple: {result!r}"
self.output.put((test, stdout.rstrip(), stderr.rstrip(), self.output.put((test, stdout.rstrip(), stderr.rstrip(),
result)) result))
return False return False
...@@ -195,7 +196,7 @@ def run_tests_multiprocess(regrtest): ...@@ -195,7 +196,7 @@ def run_tests_multiprocess(regrtest):
regrtest.accumulate_result(test, result) regrtest.accumulate_result(test, result)
# Display progress # Display progress
ok, test_time = result ok, test_time, xml_data = result
text = format_test_result(test, ok) text = format_test_result(test, ok)
if (ok not in (CHILD_ERROR, INTERRUPTED) if (ok not in (CHILD_ERROR, INTERRUPTED)
and test_time >= PROGRESS_MIN_TIME and test_time >= PROGRESS_MIN_TIME
......
...@@ -6,6 +6,7 @@ if __name__ != 'test.support': ...@@ -6,6 +6,7 @@ if __name__ != 'test.support':
import asyncio.events import asyncio.events
import collections.abc import collections.abc
import contextlib import contextlib
import datetime
import errno import errno
import faulthandler import faulthandler
import fnmatch import fnmatch
...@@ -13,6 +14,7 @@ import functools ...@@ -13,6 +14,7 @@ import functools
import gc import gc
import importlib import importlib
import importlib.util import importlib.util
import io
import logging.handlers import logging.handlers
import nntplib import nntplib
import os import os
...@@ -34,6 +36,8 @@ import unittest ...@@ -34,6 +36,8 @@ import unittest
import urllib.error import urllib.error
import warnings import warnings
from .testresult import get_test_runner
try: try:
import multiprocessing.process import multiprocessing.process
except ImportError: except ImportError:
...@@ -295,6 +299,7 @@ use_resources = None # Flag set to [] by regrtest.py ...@@ -295,6 +299,7 @@ use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.) # small sizes, to make sure they work.)
real_max_memuse = 0 real_max_memuse = 0
junit_xml_list = None # list of testsuite XML elements
failfast = False failfast = False
# _original_stdout is meant to hold stdout at the time regrtest began. # _original_stdout is meant to hold stdout at the time regrtest began.
...@@ -1891,13 +1896,16 @@ def _filter_suite(suite, pred): ...@@ -1891,13 +1896,16 @@ def _filter_suite(suite, pred):
def _run_suite(suite): def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class.""" """Run tests from a unittest.TestSuite-derived class."""
if verbose: runner = get_test_runner(sys.stdout, verbosity=verbose)
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast) # TODO: Remove this before merging (here for easy comparison with old impl)
else: #runner = unittest.TextTestRunner(sys.stdout, verbosity=2, failfast=failfast)
runner = BasicTestRunner()
result = runner.run(suite) result = runner.run(suite)
if junit_xml_list is not None:
junit_xml_list.append(result.get_xml_element())
if not result.wasSuccessful(): if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures: if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1] err = result.errors[0][1]
......
'''Test runner and result class for the regression test suite.
'''
import functools
import io
import sys
import time
import traceback
import unittest
import xml.etree.ElementTree as ET
from datetime import datetime
class RegressionTestResult(unittest.TextTestResult):
separator1 = '=' * 70 + '\n'
separator2 = '-' * 70 + '\n'
def __init__(self, stream, descriptions, verbosity):
super().__init__(stream=stream, descriptions=descriptions, verbosity=0)
self.buffer = True
self.__suite = ET.Element('testsuite')
self.__suite.set('start', datetime.utcnow().isoformat(' '))
self.__e = None
self.__start_time = None
self.__results = []
self.__verbose = bool(verbosity)
@classmethod
def __getId(cls, test):
try:
test_id = test.id
except AttributeError:
return str(test)
try:
return test_id()
except TypeError:
return str(test_id)
return repr(test)
def startTest(self, test):
super().startTest(test)
self.__e = e = ET.SubElement(self.__suite, 'testcase')
self.__start_time = time.perf_counter()
if self.__verbose:
self.stream.write(f'{self.getDescription(test)} ... ')
self.stream.flush()
def _add_result(self, test, capture=False, **args):
e = self.__e
self.__e = None
if e is None:
return
e.set('name', args.pop('name', self.__getId(test)))
e.set('status', args.pop('status', 'run'))
e.set('result', args.pop('result', 'completed'))
if self.__start_time:
e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}')
if capture:
stdout = self._stdout_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-out').text = stdout
stderr = self._stderr_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-err').text = stderr
for k, v in args.items():
if not k or not v:
continue
e2 = ET.SubElement(e, k)
if hasattr(v, 'items'):
for k2, v2 in v.items():
if k2:
e2.set(k2, str(v2))
else:
e2.text = str(v2)
else:
e2.text = str(v)
def __write(self, c, word):
if self.__verbose:
self.stream.write(f'{word}\n')
@classmethod
def __makeErrorDict(cls, err_type, err_value, err_tb):
if isinstance(err_type, type):
if err_type.__module__ == 'builtins':
typename = err_type.__name__
else:
typename = f'{err_type.__module__}.{err_type.__name__}'
else:
typename = repr(err_type)
msg = traceback.format_exception(err_type, err_value, None)
tb = traceback.format_exception(err_type, err_value, err_tb)
return {
'type': typename,
'message': ''.join(msg),
'': ''.join(tb),
}
def addError(self, test, err):
self._add_result(test, True, error=self.__makeErrorDict(*err))
super().addError(test, err)
self.__write('E', 'ERROR')
def addExpectedFailure(self, test, err):
self._add_result(test, True, output=self.__makeErrorDict(*err))
super().addExpectedFailure(test, err)
self.__write('x', 'expected failure')
def addFailure(self, test, err):
self._add_result(test, True, failure=self.__makeErrorDict(*err))
super().addFailure(test, err)
self.__write('F', 'FAIL')
def addSkip(self, test, reason):
self._add_result(test, skipped=reason)
super().addSkip(test, reason)
self.__write('S', f'skipped {reason!r}')
def addSuccess(self, test):
self._add_result(test)
super().addSuccess(test)
self.__write('.', 'ok')
def addUnexpectedSuccess(self, test):
self._add_result(test, outcome='UNEXPECTED_SUCCESS')
super().addUnexpectedSuccess(test)
self.__write('u', 'unexpected success')
def printErrors(self):
if self.__verbose:
self.stream.write('\n')
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavor, errors):
for test, err in errors:
self.stream.write(self.separator1)
self.stream.write(f'{flavor}: {self.getDescription(test)}\n')
self.stream.write(self.separator2)
self.stream.write('%s\n' % err)
def get_xml_element(self):
e = self.__suite
e.set('tests', str(self.testsRun))
e.set('errors', str(len(self.errors)))
e.set('failures', str(len(self.failures)))
return e
class QuietRegressionTestRunner:
def __init__(self, stream):
self.result = RegressionTestResult(stream, None, 0)
def run(self, test):
test(self.result)
return self.result
def get_test_runner_class(verbosity):
if verbosity:
return functools.partial(unittest.TextTestRunner,
resultclass=RegressionTestResult,
buffer=True,
verbosity=verbosity)
return QuietRegressionTestRunner
def get_test_runner(stream, verbosity):
return get_test_runner_class(verbosity)(stream)
if __name__ == '__main__':
class TestTests(unittest.TestCase):
def test_pass(self):
pass
def test_pass_slow(self):
time.sleep(1.0)
def test_fail(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
self.fail('failure message')
def test_error(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
raise RuntimeError('error message')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTests))
stream = io.StringIO()
runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv))
runner = runner_cls(sys.stdout)
result = runner.run(suite)
print('Output:', stream.getvalue())
print('XML: ', end='')
for s in ET.tostringlist(result.get_xml_element()):
print(s.decode(), end='')
print()
...@@ -1459,6 +1459,16 @@ class TestFileTypeRepr(TestCase): ...@@ -1459,6 +1459,16 @@ class TestFileTypeRepr(TestCase):
type = argparse.FileType('r', 1, errors='replace') type = argparse.FileType('r', 1, errors='replace')
self.assertEqual("FileType('r', 1, errors='replace')", repr(type)) self.assertEqual("FileType('r', 1, errors='replace')", repr(type))
class StdStreamComparer:
def __init__(self, attr):
self.attr = attr
def __eq__(self, other):
return other == getattr(sys, self.attr)
eq_stdin = StdStreamComparer('stdin')
eq_stdout = StdStreamComparer('stdout')
eq_stderr = StdStreamComparer('stderr')
class RFile(object): class RFile(object):
seen = {} seen = {}
...@@ -1497,7 +1507,7 @@ class TestFileTypeR(TempDirMixin, ParserTestCase): ...@@ -1497,7 +1507,7 @@ class TestFileTypeR(TempDirMixin, ParserTestCase):
('foo', NS(x=None, spam=RFile('foo'))), ('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))), ('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))), ('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)), ('-x - -', NS(x=eq_stdin, spam=eq_stdin)),
('readonly', NS(x=None, spam=RFile('readonly'))), ('readonly', NS(x=None, spam=RFile('readonly'))),
] ]
...@@ -1537,7 +1547,7 @@ class TestFileTypeRB(TempDirMixin, ParserTestCase): ...@@ -1537,7 +1547,7 @@ class TestFileTypeRB(TempDirMixin, ParserTestCase):
('foo', NS(x=None, spam=RFile('foo'))), ('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))), ('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))), ('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)), ('-x - -', NS(x=eq_stdin, spam=eq_stdin)),
] ]
...@@ -1576,7 +1586,7 @@ class TestFileTypeW(TempDirMixin, ParserTestCase): ...@@ -1576,7 +1586,7 @@ class TestFileTypeW(TempDirMixin, ParserTestCase):
('foo', NS(x=None, spam=WFile('foo'))), ('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))), ('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))), ('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)), ('-x - -', NS(x=eq_stdout, spam=eq_stdout)),
] ]
...@@ -1591,7 +1601,7 @@ class TestFileTypeWB(TempDirMixin, ParserTestCase): ...@@ -1591,7 +1601,7 @@ class TestFileTypeWB(TempDirMixin, ParserTestCase):
('foo', NS(x=None, spam=WFile('foo'))), ('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))), ('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))), ('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)), ('-x - -', NS(x=eq_stdout, spam=eq_stdout)),
] ]
......
Add JUnit XML output for regression tests and update Azure DevOps builds.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment