Commit 403bb39d authored by Benjamin Peterson's avatar Benjamin Peterson

merge heads

parents dcb89f4f d3af6344
...@@ -735,7 +735,7 @@ The p1.stdout.close() call after starting the p2 is important in order for p1 ...@@ -735,7 +735,7 @@ The p1.stdout.close() call after starting the p2 is important in order for p1
to receive a SIGPIPE if p2 exits before p1. to receive a SIGPIPE if p2 exits before p1.
Alternatively, for trusted input, the shell's own pipeline support may still Alternatively, for trusted input, the shell's own pipeline support may still
be used directly: be used directly::
output=`dmesg | grep hda` output=`dmesg | grep hda`
# becomes # becomes
......
...@@ -430,7 +430,7 @@ All methods are executed atomically. ...@@ -430,7 +430,7 @@ All methods are executed atomically.
are blocked waiting for the lock to become unlocked, allow exactly one of them are blocked waiting for the lock to become unlocked, allow exactly one of them
to proceed. to proceed.
Do not call this method when the lock is unlocked. When invoked on an unlocked lock, a :exc:`ThreadError` is raised.
There is no return value. There is no return value.
......
...@@ -78,7 +78,7 @@ class TabSet(Frame): ...@@ -78,7 +78,7 @@ class TabSet(Frame):
def remove_tab(self, tab_name): def remove_tab(self, tab_name):
"""Remove the tab named <tab_name>""" """Remove the tab named <tab_name>"""
if not tab_name in self._tab_names: if not tab_name in self._tab_names:
raise KeyError("No such Tab: '%s" % page_name) raise KeyError("No such Tab: '%s" % tab_name)
self._tab_names.remove(tab_name) self._tab_names.remove(tab_name)
self._arrange_tabs() self._arrange_tabs()
...@@ -88,7 +88,7 @@ class TabSet(Frame): ...@@ -88,7 +88,7 @@ class TabSet(Frame):
if tab_name == self._selected_tab: if tab_name == self._selected_tab:
return return
if tab_name is not None and tab_name not in self._tabs: if tab_name is not None and tab_name not in self._tabs:
raise KeyError("No such Tab: '%s" % page_name) raise KeyError("No such Tab: '%s" % tab_name)
# deselect the current selected tab # deselect the current selected tab
if self._selected_tab is not None: if self._selected_tab is not None:
......
...@@ -101,6 +101,10 @@ def _validate_family(family): ...@@ -101,6 +101,10 @@ def _validate_family(family):
if sys.platform != 'win32' and family == 'AF_PIPE': if sys.platform != 'win32' and family == 'AF_PIPE':
raise ValueError('Family %s is not recognized.' % family) raise ValueError('Family %s is not recognized.' % family)
if sys.platform == 'win32' and family == 'AF_UNIX':
# double check
if not hasattr(socket, family):
raise ValueError('Family %s is not recognized.' % family)
def address_type(address): def address_type(address):
''' '''
......
...@@ -2331,6 +2331,12 @@ class TestInvalidFamily(unittest.TestCase): ...@@ -2331,6 +2331,12 @@ class TestInvalidFamily(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test') multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor, TestInvalidFamily] TestStdinBadfiledescriptor, TestInvalidFamily]
......
...@@ -5,8 +5,11 @@ Tools directory of a Python checkout or tarball, such as reindent.py. ...@@ -5,8 +5,11 @@ Tools directory of a Python checkout or tarball, such as reindent.py.
""" """
import os import os
import sys
import imp
import unittest import unittest
import sysconfig import sysconfig
import tempfile
from test import support from test import support
from test.script_helper import assert_python_ok from test.script_helper import assert_python_ok
...@@ -17,10 +20,11 @@ if not sysconfig.is_python_build(): ...@@ -17,10 +20,11 @@ if not sysconfig.is_python_build():
srcdir = sysconfig.get_config_var('projectbase') srcdir = sysconfig.get_config_var('projectbase')
basepath = os.path.join(os.getcwd(), srcdir, 'Tools') basepath = os.path.join(os.getcwd(), srcdir, 'Tools')
scriptsdir = os.path.join(basepath, 'scripts')
class ReindentTests(unittest.TestCase): class ReindentTests(unittest.TestCase):
script = os.path.join(basepath, 'scripts', 'reindent.py') script = os.path.join(scriptsdir, 'reindent.py')
def test_noargs(self): def test_noargs(self):
assert_python_ok(self.script) assert_python_ok(self.script)
...@@ -31,8 +35,73 @@ class ReindentTests(unittest.TestCase): ...@@ -31,8 +35,73 @@ class ReindentTests(unittest.TestCase):
self.assertGreater(err, b'') self.assertGreater(err, b'')
class TestSundryScripts(unittest.TestCase):
# At least make sure the rest don't have syntax errors. When tests are
# added for a script it should be added to the whitelist below.
# scripts that have independent tests.
whitelist = ['reindent.py']
# scripts that can't be imported without running
blacklist = ['make_ctype.py']
# scripts that use windows-only modules
windows_only = ['win_add2path.py']
# blacklisted for other reasons
other = ['analyze_dxp.py']
skiplist = blacklist + whitelist + windows_only + other
def setUp(self):
cm = support.DirsOnSysPath(scriptsdir)
cm.__enter__()
self.addCleanup(cm.__exit__)
def test_sundry(self):
for fn in os.listdir(scriptsdir):
if fn.endswith('.py') and fn not in self.skiplist:
__import__(fn[:-3])
@unittest.skipIf(sys.platform != "win32", "Windows-only test")
def test_sundry_windows(self):
for fn in self.windows_only:
__import__(fn[:-3])
@unittest.skipIf(not support.threading, "test requires _thread module")
def test_analyze_dxp_import(self):
if hasattr(sys, 'getdxp'):
import analyze_dxp
else:
with self.assertRaises(RuntimeError):
import analyze_dxp
class PdepsTests(unittest.TestCase):
@classmethod
def setUpClass(self):
path = os.path.join(scriptsdir, 'pdeps.py')
self.pdeps = imp.load_source('pdeps', path)
@classmethod
def tearDownClass(self):
if 'pdeps' in sys.modules:
del sys.modules['pdeps']
def test_process_errors(self):
# Issue #14492: m_import.match(line) can be None.
with tempfile.TemporaryDirectory() as tmpdir:
fn = os.path.join(tmpdir, 'foo')
with open(fn, 'w') as stream:
stream.write("#!/this/will/fail")
self.pdeps.process(fn, {})
def test_inverse_attribute_error(self):
# Issue #14492: this used to fail with an AttributeError.
self.pdeps.inverse({'a': []})
def test_main(): def test_main():
support.run_unittest(ReindentTests) support.run_unittest(*[obj for obj in globals().values()
if isinstance(obj, type)])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -1253,7 +1253,7 @@ class Treeview(Widget, tkinter.XView, tkinter.YView): ...@@ -1253,7 +1253,7 @@ class Treeview(Widget, tkinter.XView, tkinter.YView):
def exists(self, item): def exists(self, item):
"""Returns True if the specified item is present in the three, """Returns True if the specified item is present in the tree,
False otherwise.""" False otherwise."""
return bool(self.tk.call(self._w, "exists", item)) return bool(self.tk.call(self._w, "exists", item))
......
...@@ -39,6 +39,13 @@ Core and Builtins ...@@ -39,6 +39,13 @@ Core and Builtins
Library Library
------- -------
- Issue #14496: Fix wrong name in idlelib/tabbedpages.py.
Patch by Popa Claudiu.
- Issue #14482: Raise a ValueError, not a NameError, when trying to create
a multiprocessing Client or Listener with an AF_UNIX type address under
Windows. Patch by Popa Claudiu.
- Issue #14151: Raise a ValueError, not a NameError, when trying to create - Issue #14151: Raise a ValueError, not a NameError, when trying to create
a multiprocessing Client or Listener with an AF_PIPE type address under a multiprocessing Client or Listener with an AF_PIPE type address under
non-Windows platforms. Patch by Popa Claudiu. non-Windows platforms. Patch by Popa Claudiu.
......
...@@ -3,34 +3,6 @@ ...@@ -3,34 +3,6 @@
# Usage: abitype.py < old_code > new_code # Usage: abitype.py < old_code > new_code
import re, sys import re, sys
############ Simplistic C scanner ##################################
tokenizer = re.compile(
r"(?P<preproc>#.*\n)"
r"|(?P<comment>/\*.*?\*/)"
r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
r"|(?P<ws>[ \t\n]+)"
r"|(?P<other>.)",
re.MULTILINE)
tokens = []
source = sys.stdin.read()
pos = 0
while pos != len(source):
m = tokenizer.match(source, pos)
tokens.append([m.lastgroup, m.group()])
pos += len(tokens[-1][1])
if tokens[-1][0] == 'preproc':
# continuation lines are considered
# only in preprocess statements
while tokens[-1][1].endswith('\\\n'):
nl = source.find('\n', pos)
if nl == -1:
line = source[pos:]
else:
line = source[pos:nl+1]
tokens[-1][1] += line
pos += len(line)
###### Replacement of PyTypeObject static instances ############## ###### Replacement of PyTypeObject static instances ##############
# classify each token, giving it a one-letter code: # classify each token, giving it a one-letter code:
...@@ -79,7 +51,7 @@ def get_fields(start, real_end): ...@@ -79,7 +51,7 @@ def get_fields(start, real_end):
while tokens[pos][0] in ('ws', 'comment'): while tokens[pos][0] in ('ws', 'comment'):
pos += 1 pos += 1
if tokens[pos][1] != 'PyVarObject_HEAD_INIT': if tokens[pos][1] != 'PyVarObject_HEAD_INIT':
raise Exception, '%s has no PyVarObject_HEAD_INIT' % name raise Exception('%s has no PyVarObject_HEAD_INIT' % name)
while tokens[pos][1] != ')': while tokens[pos][1] != ')':
pos += 1 pos += 1
pos += 1 pos += 1
...@@ -183,18 +155,48 @@ def make_slots(name, fields): ...@@ -183,18 +155,48 @@ def make_slots(name, fields):
return '\n'.join(res) return '\n'.join(res)
# Main loop: replace all static PyTypeObjects until if __name__ == '__main__':
# there are none left.
while 1: ############ Simplistic C scanner ##################################
c = classify() tokenizer = re.compile(
m = re.search('(SW)?TWIW?=W?{.*?};', c) r"(?P<preproc>#.*\n)"
if not m: r"|(?P<comment>/\*.*?\*/)"
break r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
start = m.start() r"|(?P<ws>[ \t\n]+)"
end = m.end() r"|(?P<other>.)",
name, fields = get_fields(start, m) re.MULTILINE)
tokens[start:end] = [('',make_slots(name, fields))]
tokens = []
source = sys.stdin.read()
pos = 0
while pos != len(source):
m = tokenizer.match(source, pos)
tokens.append([m.lastgroup, m.group()])
pos += len(tokens[-1][1])
if tokens[-1][0] == 'preproc':
# continuation lines are considered
# only in preprocess statements
while tokens[-1][1].endswith('\\\n'):
nl = source.find('\n', pos)
if nl == -1:
line = source[pos:]
else:
line = source[pos:nl+1]
tokens[-1][1] += line
pos += len(line)
# Main loop: replace all static PyTypeObjects until
# there are none left.
while 1:
c = classify()
m = re.search('(SW)?TWIW?=W?{.*?};', c)
if not m:
break
start = m.start()
end = m.end()
name, fields = get_fields(start, m)
tokens[start:end] = [('',make_slots(name, fields))]
# Output result to stdout # Output result to stdout
for t, v in tokens: for t, v in tokens:
sys.stdout.write(v) sys.stdout.write(v)
...@@ -106,14 +106,16 @@ def check_limit(n, test_func_name): ...@@ -106,14 +106,16 @@ def check_limit(n, test_func_name):
else: else:
print("Yikes!") print("Yikes!")
limit = 1000 if __name__ == '__main__':
while 1:
check_limit(limit, "test_recurse") limit = 1000
check_limit(limit, "test_add") while 1:
check_limit(limit, "test_repr") check_limit(limit, "test_recurse")
check_limit(limit, "test_init") check_limit(limit, "test_add")
check_limit(limit, "test_getattr") check_limit(limit, "test_repr")
check_limit(limit, "test_getitem") check_limit(limit, "test_init")
check_limit(limit, "test_cpickle") check_limit(limit, "test_getattr")
print("Limit of %d is fine" % limit) check_limit(limit, "test_getitem")
limit = limit + 100 check_limit(limit, "test_cpickle")
print("Limit of %d is fine" % limit)
limit = limit + 100
...@@ -76,29 +76,31 @@ usage = """Usage: %s [-cd] paths... ...@@ -76,29 +76,31 @@ usage = """Usage: %s [-cd] paths...
-c: recognize Python source files trying to compile them -c: recognize Python source files trying to compile them
-d: debug output""" % sys.argv[0] -d: debug output""" % sys.argv[0]
try: if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'cd')
except getopt.error as msg: try:
print(msg, file=sys.stderr) opts, args = getopt.getopt(sys.argv[1:], 'cd')
print(usage, file=sys.stderr) except getopt.error as msg:
sys.exit(1) print(msg, file=sys.stderr)
print(usage, file=sys.stderr)
is_python = pysource.looks_like_python sys.exit(1)
debug = False
is_python = pysource.looks_like_python
for o, a in opts: debug = False
if o == '-c':
is_python = pysource.can_be_compiled for o, a in opts:
elif o == '-d': if o == '-c':
debug = True is_python = pysource.can_be_compiled
elif o == '-d':
if not args: debug = True
print(usage, file=sys.stderr)
sys.exit(1) if not args:
print(usage, file=sys.stderr)
for fullpath in pysource.walk_python_files(args, is_python): sys.exit(1)
if debug:
print("Testing for coding: %s" % fullpath) for fullpath in pysource.walk_python_files(args, is_python):
result = needs_declaration(fullpath) if debug:
if result: print("Testing for coding: %s" % fullpath)
print(fullpath) result = needs_declaration(fullpath)
if result:
print(fullpath)
...@@ -292,7 +292,7 @@ def addsubst(substfile): ...@@ -292,7 +292,7 @@ def addsubst(substfile):
if not words: continue if not words: continue
if len(words) == 3 and words[0] == 'struct': if len(words) == 3 and words[0] == 'struct':
words[:2] = [words[0] + ' ' + words[1]] words[:2] = [words[0] + ' ' + words[1]]
elif len(words) <> 2: elif len(words) != 2:
err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line)) err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line))
continue continue
if Reverse: if Reverse:
......
...@@ -20,7 +20,7 @@ file ... : files to sum; '-' or no files means stdin ...@@ -20,7 +20,7 @@ file ... : files to sum; '-' or no files means stdin
import sys import sys
import os import os
import getopt import getopt
import md5 from hashlib import md5
def sum(*files): def sum(*files):
sts = 0 sts = 0
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
""" """
import re,sys import re,sys
import TextTools
entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->') entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
...@@ -45,7 +44,7 @@ def writefile(f,defs): ...@@ -45,7 +44,7 @@ def writefile(f,defs):
charcode = repr(charcode) charcode = repr(charcode)
else: else:
charcode = repr(charcode) charcode = repr(charcode)
comment = TextTools.collapse(comment) comment = ' '.join(comment.split())
f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment)) f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
f.write('\n}\n') f.write('\n}\n')
......
...@@ -76,10 +76,9 @@ def process(filename, table): ...@@ -76,10 +76,9 @@ def process(filename, table):
nextline = fp.readline() nextline = fp.readline()
if not nextline: break if not nextline: break
line = line[:-1] + nextline line = line[:-1] + nextline
if m_import.match(line) >= 0: m_found = m_import.match(line) or m_from.match(line)
(a, b), (a1, b1) = m_import.regs[:2] if m_found:
elif m_from.match(line) >= 0: (a, b), (a1, b1) = m_found.regs[:2]
(a, b), (a1, b1) = m_from.regs[:2]
else: continue else: continue
words = line[a1:b1].split(',') words = line[a1:b1].split(',')
# print '#', line, words # print '#', line, words
...@@ -87,6 +86,7 @@ def process(filename, table): ...@@ -87,6 +86,7 @@ def process(filename, table):
word = word.strip() word = word.strip()
if word not in list: if word not in list:
list.append(word) list.append(word)
fp.close()
# Compute closure (this is in fact totally general) # Compute closure (this is in fact totally general)
...@@ -123,7 +123,7 @@ def closure(table): ...@@ -123,7 +123,7 @@ def closure(table):
def inverse(table): def inverse(table):
inv = {} inv = {}
for key in table.keys(): for key in table.keys():
if not inv.has_key(key): if key not in inv:
inv[key] = [] inv[key] = []
for item in table[key]: for item in table[key]:
store(inv, item, key) store(inv, item, key)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment