Commit 68099bd9 authored by Benjamin Peterson's avatar Benjamin Peterson

Merged revisions 80934 via svnmerge from

svn+ssh://pythondev@svn.python.org/python/trunk

................
  r80934 | benjamin.peterson | 2010-05-07 13:58:23 -0500 (Fri, 07 May 2010) | 69 lines

  Merged revisions 79911,79916-79917,80018,80418,80572-80573,80635-80639,80668,80922 via svnmerge from
  svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3

  ........
    r79911 | benjamin.peterson | 2010-04-09 15:38:53 -0500 (Fri, 09 Apr 2010) | 1 line

    use absolute import
  ........
    r79916 | benjamin.peterson | 2010-04-09 16:05:21 -0500 (Fri, 09 Apr 2010) | 1 line

    generalize detection of __future__ imports and attach them to the tree
  ........
    r79917 | benjamin.peterson | 2010-04-09 16:11:44 -0500 (Fri, 09 Apr 2010) | 1 line

    don't try to 'fix' relative imports when absolute_import is enabled #8858
  ........
    r80018 | benjamin.peterson | 2010-04-12 16:12:12 -0500 (Mon, 12 Apr 2010) | 4 lines

    prevent diffs from being mangled is multiprocess mode #6409

    Patch by George Boutsioukis.
  ........
    r80418 | benjamin.peterson | 2010-04-23 16:00:03 -0500 (Fri, 23 Apr 2010) | 1 line

    remove unhelpful description
  ........
    r80572 | benjamin.peterson | 2010-04-27 20:33:54 -0500 (Tue, 27 Apr 2010) | 1 line

    use unicode literals
  ........
    r80573 | jeffrey.yasskin | 2010-04-27 23:08:27 -0500 (Tue, 27 Apr 2010) | 6 lines

    Don't transform imports that are already relative.  2to3 turned
      from . import refactor
    into
      from .. import refactor
    which broke the transformation of 2to3 itself.
  ........
    r80635 | benjamin.peterson | 2010-04-29 16:02:23 -0500 (Thu, 29 Apr 2010) | 1 line

    remove imports
  ........
    r80636 | benjamin.peterson | 2010-04-29 16:02:41 -0500 (Thu, 29 Apr 2010) | 1 line

    unicode literal
  ........
    r80637 | benjamin.peterson | 2010-04-29 16:03:42 -0500 (Thu, 29 Apr 2010) | 1 line

    must pass a string to Number
  ........
    r80638 | benjamin.peterson | 2010-04-29 16:05:34 -0500 (Thu, 29 Apr 2010) | 1 line

    unicode literals
  ........
    r80639 | benjamin.peterson | 2010-04-29 16:06:09 -0500 (Thu, 29 Apr 2010) | 1 line

    pass string to Number
  ........
    r80668 | jeffrey.yasskin | 2010-04-30 18:02:47 -0500 (Fri, 30 Apr 2010) | 4 lines

    Make 2to3 run under Python 2.5 so that the benchmark suite at
    http://hg.python.org/benchmarks/ can use it and still run on implementations
    that haven't gotten to 2.6 yet.  Fixes issue 8566.
  ........
    r80922 | benjamin.peterson | 2010-05-07 11:06:25 -0500 (Fri, 07 May 2010) | 1 line

    prevent xrange transformation from wrapping range calls it produces in list
  ........
................
parent a7da5e8b
...@@ -43,7 +43,13 @@ class FixImport(fixer_base.BaseFix): ...@@ -43,7 +43,13 @@ class FixImport(fixer_base.BaseFix):
import_name< 'import' imp=any > import_name< 'import' imp=any >
""" """
def start_tree(self, tree, name):
super(FixImport, self).start_tree(tree, name)
self.skip = "absolute_import" in tree.future_features
def transform(self, node, results): def transform(self, node, results):
if self.skip:
return
imp = results['imp'] imp = results['imp']
if node.type == syms.import_from: if node.type == syms.import_from:
...@@ -71,19 +77,22 @@ class FixImport(fixer_base.BaseFix): ...@@ -71,19 +77,22 @@ class FixImport(fixer_base.BaseFix):
self.warning(node, "absolute and local imports together") self.warning(node, "absolute and local imports together")
return return
new = FromImport('.', [imp]) new = FromImport(".", [imp])
new.prefix = node.prefix new.prefix = node.prefix
return new return new
def probably_a_local_import(self, imp_name): def probably_a_local_import(self, imp_name):
imp_name = imp_name.split('.', 1)[0] if imp_name.startswith("."):
# Relative imports are certainly not local imports.
return False
imp_name = imp_name.split(".", 1)[0]
base_path = dirname(self.filename) base_path = dirname(self.filename)
base_path = join(base_path, imp_name) base_path = join(base_path, imp_name)
# If there is no __init__.py next to the file its not in a package # If there is no __init__.py next to the file its not in a package
# so can't be a relative import. # so can't be a relative import.
if not exists(join(dirname(base_path), '__init__.py')): if not exists(join(dirname(base_path), "__init__.py")):
return False return False
for ext in ['.py', sep, '.pyc', '.so', '.sl', '.pyd']: for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]:
if exists(base_path + ext): if exists(base_path + ext):
return True return True
return False return False
...@@ -14,10 +14,10 @@ class FixOperator(fixer_base.BaseFix): ...@@ -14,10 +14,10 @@ class FixOperator(fixer_base.BaseFix):
func = "'(' func=any ')'" func = "'(' func=any ')'"
PATTERN = """ PATTERN = """
power< module='operator' power< module='operator'
trailer< '.' {methods} > trailer< {func} > > trailer< '.' %(methods)s > trailer< %(func)s > >
| |
power< {methods} trailer< {func} > > power< %(methods)s trailer< %(func)s > >
""".format(methods=methods, func=func) """ % dict(methods=methods, func=func)
def transform(self, node, results): def transform(self, node, results):
method = results["method"][0] method = results["method"][0]
......
...@@ -7,9 +7,8 @@ Makes sure reduce() is imported from the functools module if reduce is ...@@ -7,9 +7,8 @@ Makes sure reduce() is imported from the functools module if reduce is
used in that module. used in that module.
""" """
from .. import pytree from lib2to3 import fixer_base
from .. import fixer_base from lib2to3.fixer_util import touch_import
from ..fixer_util import Name, Attr, touch_import
......
...@@ -154,7 +154,7 @@ def map_to_index(param_list, prefix=[], d=None): ...@@ -154,7 +154,7 @@ def map_to_index(param_list, prefix=[], d=None):
if d is None: if d is None:
d = {} d = {}
for i, obj in enumerate(param_list): for i, obj in enumerate(param_list):
trailer = [Subscript(Number(i))] trailer = [Subscript(Number(str(i)))]
if isinstance(obj, list): if isinstance(obj, list):
map_to_index(obj, trailer, d=d) map_to_index(obj, trailer, d=d)
else: else:
......
...@@ -17,6 +17,13 @@ class FixXrange(fixer_base.BaseFix): ...@@ -17,6 +17,13 @@ class FixXrange(fixer_base.BaseFix):
rest=any* > rest=any* >
""" """
def start_tree(self, tree, filename):
super(FixXrange, self).start_tree(tree, filename)
self.transformed_xranges = set()
def finish_tree(self, tree, filename):
self.transformed_xranges = None
def transform(self, node, results): def transform(self, node, results):
name = results["name"] name = results["name"]
if name.value == "xrange": if name.value == "xrange":
...@@ -29,9 +36,12 @@ class FixXrange(fixer_base.BaseFix): ...@@ -29,9 +36,12 @@ class FixXrange(fixer_base.BaseFix):
def transform_xrange(self, node, results): def transform_xrange(self, node, results):
name = results["name"] name = results["name"]
name.replace(Name("range", prefix=name.prefix)) name.replace(Name("range", prefix=name.prefix))
# This prevents the new range call from being wrapped in a list later.
self.transformed_xranges.add(id(node))
def transform_range(self, node, results): def transform_range(self, node, results):
if not self.in_special_context(node): if (id(node) not in self.transformed_xranges and
not self.in_special_context(node)):
range_call = Call(Name("range"), [results["args"].clone()]) range_call = Call(Name("range"), [results["args"].clone()])
# Encase the range call in list(). # Encase the range call in list().
list_call = Call(Name("list"), [range_call], list_call = Call(Name("list"), [range_call],
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
Main program for 2to3. Main program for 2to3.
""" """
from __future__ import with_statement
import sys import sys
import os import os
import difflib import difflib
...@@ -62,8 +64,14 @@ class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool): ...@@ -62,8 +64,14 @@ class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
if self.show_diffs: if self.show_diffs:
diff_lines = diff_texts(old, new, filename) diff_lines = diff_texts(old, new, filename)
try: try:
for line in diff_lines: if self.output_lock is not None:
print(line) with self.output_lock:
for line in diff_lines:
print(line)
sys.stdout.flush()
else:
for line in diff_lines:
print(line)
except UnicodeEncodeError: except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" % warn("couldn't encode %s's diff for your terminal" %
(filename,)) (filename,))
...@@ -94,7 +102,7 @@ def main(fixer_pkg, args=None): ...@@ -94,7 +102,7 @@ def main(fixer_pkg, args=None):
parser.add_option("-x", "--nofix", action="append", default=[], parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a fixer from being run.") help="Prevent a fixer from being run.")
parser.add_option("-l", "--list-fixes", action="store_true", parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations (fixes/fix_*.py)") help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true", parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function") help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true", parser.add_option("-v", "--verbose", action="store_true",
......
...@@ -38,6 +38,13 @@ __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", ...@@ -38,6 +38,13 @@ __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"] "generate_tokens", "untokenize"]
del token del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')' def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*' def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?' def maybe(*choices): return group(*choices) + '?'
...@@ -267,7 +274,7 @@ def detect_encoding(readline): ...@@ -267,7 +274,7 @@ def detect_encoding(readline):
try: try:
return readline() return readline()
except StopIteration: except StopIteration:
return b'' return bytes()
def find_cookie(line): def find_cookie(line):
try: try:
......
...@@ -289,8 +289,7 @@ class Node(Base): ...@@ -289,8 +289,7 @@ class Node(Base):
for node in child.post_order(): for node in child.post_order():
yield node yield node
@property def _prefix_getter(self):
def prefix(self):
""" """
The whitespace and comments preceding this node in the input. The whitespace and comments preceding this node in the input.
""" """
...@@ -298,11 +297,12 @@ class Node(Base): ...@@ -298,11 +297,12 @@ class Node(Base):
return "" return ""
return self.children[0].prefix return self.children[0].prefix
@prefix.setter def _prefix_setter(self, prefix):
def prefix(self, prefix):
if self.children: if self.children:
self.children[0].prefix = prefix self.children[0].prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def set_child(self, i, child): def set_child(self, i, child):
""" """
Equivalent to 'node.children[i] = child'. This method also sets the Equivalent to 'node.children[i] = child'. This method also sets the
...@@ -390,18 +390,17 @@ class Leaf(Base): ...@@ -390,18 +390,17 @@ class Leaf(Base):
"""Return a pre-order iterator for the tree.""" """Return a pre-order iterator for the tree."""
yield self yield self
@property def _prefix_getter(self):
def prefix(self):
""" """
The whitespace and comments preceding this token in the input. The whitespace and comments preceding this token in the input.
""" """
return self._prefix return self._prefix
@prefix.setter def _prefix_setter(self, prefix):
def prefix(self, prefix):
self.changed() self.changed()
self._prefix = prefix self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node): def convert(gr, raw_node):
""" """
......
...@@ -8,6 +8,8 @@ recursively descend down directories. Imported as a module, this ...@@ -8,6 +8,8 @@ recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool. provides infrastructure to write your own refactoring tool.
""" """
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>" __author__ = "Guido van Rossum <guido@python.org>"
...@@ -122,13 +124,14 @@ else: ...@@ -122,13 +124,14 @@ else:
_to_system_newlines = _identity _to_system_newlines = _identity
def _detect_future_print(source): def _detect_future_features(source):
have_docstring = False have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline) gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance(): def advance():
tok = next(gen) tok = next(gen)
return tok[0], tok[1] return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT)) ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try: try:
while True: while True:
tp, value = advance() tp, value = advance()
...@@ -140,26 +143,25 @@ def _detect_future_print(source): ...@@ -140,26 +143,25 @@ def _detect_future_print(source):
have_docstring = True have_docstring = True
elif tp == token.NAME and value == "from": elif tp == token.NAME and value == "from":
tp, value = advance() tp, value = advance()
if tp != token.NAME and value != "__future__": if tp != token.NAME or value != "__future__":
break break
tp, value = advance() tp, value = advance()
if tp != token.NAME and value != "import": if tp != token.NAME or value != "import":
break break
tp, value = advance() tp, value = advance()
if tp == token.OP and value == "(": if tp == token.OP and value == "(":
tp, value = advance() tp, value = advance()
while tp == token.NAME: while tp == token.NAME:
if value == "print_function": features.add(value)
return True
tp, value = advance() tp, value = advance()
if tp != token.OP and value != ",": if tp != token.OP or value != ",":
break break
tp, value = advance() tp, value = advance()
else: else:
break break
except StopIteration: except StopIteration:
pass pass
return False return frozenset(features)
class FixerError(Exception): class FixerError(Exception):
...@@ -341,7 +343,8 @@ class RefactoringTool(object): ...@@ -341,7 +343,8 @@ class RefactoringTool(object):
An AST corresponding to the refactored input stream; None if An AST corresponding to the refactored input stream; None if
there were errors during the parse. there were errors during the parse.
""" """
if _detect_future_print(data): features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement self.driver.grammar = pygram.python_grammar_no_print_statement
try: try:
tree = self.driver.parse_string(data) tree = self.driver.parse_string(data)
...@@ -351,6 +354,7 @@ class RefactoringTool(object): ...@@ -351,6 +354,7 @@ class RefactoringTool(object):
return return
finally: finally:
self.driver.grammar = self.grammar self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name) self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name) self.refactor_tree(tree, name)
return tree return tree
...@@ -605,6 +609,7 @@ class MultiprocessRefactoringTool(RefactoringTool): ...@@ -605,6 +609,7 @@ class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs) super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False, def refactor(self, items, write=False, doctests_only=False,
num_processes=1): num_processes=1):
...@@ -618,6 +623,7 @@ class MultiprocessRefactoringTool(RefactoringTool): ...@@ -618,6 +623,7 @@ class MultiprocessRefactoringTool(RefactoringTool):
if self.queue is not None: if self.queue is not None:
raise RuntimeError("already doing multiple processes") raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue() self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child) processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)] for i in range(num_processes)]
try: try:
......
...@@ -1497,6 +1497,17 @@ class Test_xrange(FixerTestCase): ...@@ -1497,6 +1497,17 @@ class Test_xrange(FixerTestCase):
for call in fixer_util.consuming_calls: for call in fixer_util.consuming_calls:
self.unchanged("a = %s(range(10))" % call) self.unchanged("a = %s(range(10))" % call)
class Test_xrange_with_reduce(FixerTestCase):
def setUp(self):
super(Test_xrange_with_reduce, self).setUp(["xrange", "reduce"])
def test_double_transform(self):
b = """reduce(x, xrange(5))"""
a = """from functools import reduce
reduce(x, range(5))"""
self.check(b, a)
class Test_raw_input(FixerTestCase): class Test_raw_input(FixerTestCase):
fixer = "raw_input" fixer = "raw_input"
...@@ -3679,7 +3690,7 @@ class Test_import(FixerTestCase): ...@@ -3679,7 +3690,7 @@ class Test_import(FixerTestCase):
self.files_checked.append(name) self.files_checked.append(name)
return self.always_exists or (name in self.present_files) return self.always_exists or (name in self.present_files)
from ..fixes import fix_import from lib2to3.fixes import fix_import
fix_import.exists = fake_exists fix_import.exists = fake_exists
def tearDown(self): def tearDown(self):
...@@ -3722,6 +3733,12 @@ class Test_import(FixerTestCase): ...@@ -3722,6 +3733,12 @@ class Test_import(FixerTestCase):
self.present_files = set(["bar.py"]) self.present_files = set(["bar.py"])
self.unchanged(s) self.unchanged(s)
def test_with_absolute_import_enabled(self):
s = "from __future__ import absolute_import\nimport bar"
self.always_exists = False
self.present_files = set(["__init__.py", "bar.py"])
self.unchanged(s)
def test_in_package(self): def test_in_package(self):
b = "import bar" b = "import bar"
a = "from . import bar" a = "from . import bar"
...@@ -3736,6 +3753,10 @@ class Test_import(FixerTestCase): ...@@ -3736,6 +3753,10 @@ class Test_import(FixerTestCase):
self.present_files = set(["__init__.py", "bar" + os.path.sep]) self.present_files = set(["__init__.py", "bar" + os.path.sep])
self.check(b, a) self.check(b, a)
def test_already_relative_import(self):
s = "from . import bar"
self.unchanged(s)
def test_comments_and_indent(self): def test_comments_and_indent(self):
b = "import bar # Foo" b = "import bar # Foo"
a = "from . import bar # Foo" a = "from . import bar # Foo"
......
...@@ -6,6 +6,8 @@ parts of the grammar we've changed, we also make sure we can parse the ...@@ -6,6 +6,8 @@ parts of the grammar we've changed, we also make sure we can parse the
test_grammar.py files from both Python 2 and Python 3. test_grammar.py files from both Python 2 and Python 3.
""" """
from __future__ import with_statement
# Testing imports # Testing imports
from . import support from . import support
from .support import driver, test_dir from .support import driver, test_dir
...@@ -149,10 +151,11 @@ class TestParserIdempotency(support.TestCase): ...@@ -149,10 +151,11 @@ class TestParserIdempotency(support.TestCase):
for filepath in support.all_project_files(): for filepath in support.all_project_files():
with open(filepath, "rb") as fp: with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0] encoding = tokenize.detect_encoding(fp.readline)[0]
fp.seek(0) self.assertTrue(encoding is not None,
"can't detect encoding for %s" % filepath)
with open(filepath, "r") as fp:
source = fp.read() source = fp.read()
if encoding: source = source.decode(encoding)
source = source.decode(encoding)
tree = driver.parse_string(source) tree = driver.parse_string(source)
new = str(tree) new = str(tree)
if encoding: if encoding:
...@@ -199,10 +202,10 @@ class TestLiterals(GrammarTest): ...@@ -199,10 +202,10 @@ class TestLiterals(GrammarTest):
self.validate(s) self.validate(s)
def diff(fn, result): def diff(fn, result, encoding):
f = open("@", "wb") f = open("@", "w")
try: try:
f.write(result) f.write(result.encode(encoding))
finally: finally:
f.close() f.close()
try: try:
......
...@@ -9,6 +9,9 @@ more helpful than printing of (the first line of) the docstring, ...@@ -9,6 +9,9 @@ more helpful than printing of (the first line of) the docstring,
especially when debugging a test. especially when debugging a test.
""" """
from __future__ import with_statement
import sys
import warnings import warnings
# Testing imports # Testing imports
...@@ -28,20 +31,22 @@ class TestNodes(support.TestCase): ...@@ -28,20 +31,22 @@ class TestNodes(support.TestCase):
"""Unit tests for nodes (Base, Leaf, Node).""" """Unit tests for nodes (Base, Leaf, Node)."""
def test_deprecated_prefix_methods(self): if sys.version_info >= (2,6):
l = pytree.Leaf(100, "foo") # warnings.catch_warnings is new in 2.6.
with warnings.catch_warnings(record=True) as w: def test_deprecated_prefix_methods(self):
warnings.simplefilter("always", DeprecationWarning) l = pytree.Leaf(100, "foo")
self.assertEqual(l.get_prefix(), "") with warnings.catch_warnings(record=True) as w:
l.set_prefix("hi") warnings.simplefilter("always", DeprecationWarning)
self.assertEqual(l.prefix, "hi") self.assertEqual(l.get_prefix(), "")
self.assertEqual(len(w), 2) l.set_prefix("hi")
for warning in w: self.assertEqual(l.prefix, "hi")
self.assertTrue(warning.category is DeprecationWarning) self.assertEqual(len(w), 2)
self.assertEqual(str(w[0].message), "get_prefix() is deprecated; " \ for warning in w:
"use the prefix property") self.assertTrue(warning.category is DeprecationWarning)
self.assertEqual(str(w[1].message), "set_prefix() is deprecated; " \ self.assertEqual(str(w[0].message), "get_prefix() is deprecated; " \
"use the prefix property") "use the prefix property")
self.assertEqual(str(w[1].message), "set_prefix() is deprecated; " \
"use the prefix property")
def test_instantiate_base(self): def test_instantiate_base(self):
if __debug__: if __debug__:
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
Unit tests for refactor.py. Unit tests for refactor.py.
""" """
from __future__ import with_statement
import sys import sys
import os import os
import codecs import codecs
...@@ -61,42 +63,50 @@ class TestRefactoringTool(unittest.TestCase): ...@@ -61,42 +63,50 @@ class TestRefactoringTool(unittest.TestCase):
self.assertEqual(full_names, self.assertEqual(full_names,
["myfixes.fix_" + name for name in contents]) ["myfixes.fix_" + name for name in contents])
def test_detect_future_print(self): def test_detect_future_features(self):
run = refactor._detect_future_print run = refactor._detect_future_features
self.assertFalse(run("")) fs = frozenset
self.assertTrue(run("from __future__ import print_function")) empty = fs()
self.assertFalse(run("from __future__ import generators")) self.assertEqual(run(""), empty)
self.assertFalse(run("from __future__ import generators, feature")) self.assertEqual(run("from __future__ import print_function"),
input = "from __future__ import generators, print_function" fs(("print_function",)))
self.assertTrue(run(input)) self.assertEqual(run("from __future__ import generators"),
input ="from __future__ import print_function, generators" fs(("generators",)))
self.assertTrue(run(input)) self.assertEqual(run("from __future__ import generators, feature"),
input = "from __future__ import (print_function,)" fs(("generators", "feature")))
self.assertTrue(run(input)) inp = "from __future__ import generators, print_function"
input = "from __future__ import (generators, print_function)" self.assertEqual(run(inp), fs(("generators", "print_function")))
self.assertTrue(run(input)) inp ="from __future__ import print_function, generators"
input = "from __future__ import (generators, nested_scopes)" self.assertEqual(run(inp), fs(("print_function", "generators")))
self.assertFalse(run(input)) inp = "from __future__ import (print_function,)"
input = """from __future__ import generators self.assertEqual(run(inp), fs(("print_function",)))
inp = "from __future__ import (generators, print_function)"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp = "from __future__ import (generators, nested_scopes)"
self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
inp = """from __future__ import generators
from __future__ import print_function""" from __future__ import print_function"""
self.assertTrue(run(input)) self.assertEqual(run(inp), fs(("generators", "print_function")))
self.assertFalse(run("from")) invalid = ("from",
self.assertFalse(run("from 4")) "from 4",
self.assertFalse(run("from x")) "from x",
self.assertFalse(run("from x 5")) "from x 5",
self.assertFalse(run("from x im")) "from x im",
self.assertFalse(run("from x import")) "from x import",
self.assertFalse(run("from x import 4")) "from x import 4",
input = "'docstring'\nfrom __future__ import print_function" )
self.assertTrue(run(input)) for inp in invalid:
input = "'docstring'\n'somng'\nfrom __future__ import print_function" self.assertEqual(run(inp), empty)
self.assertFalse(run(input)) inp = "'docstring'\nfrom __future__ import print_function"
input = "# comment\nfrom __future__ import print_function" self.assertEqual(run(inp), fs(("print_function",)))
self.assertTrue(run(input)) inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
input = "# comment\n'doc'\nfrom __future__ import print_function" self.assertEqual(run(inp), empty)
self.assertTrue(run(input)) inp = "# comment\nfrom __future__ import print_function"
input = "class x: pass\nfrom __future__ import print_function" self.assertEqual(run(inp), fs(("print_function",)))
self.assertFalse(run(input)) inp = "# comment\n'doc'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "class x: pass\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
def test_get_headnode_dict(self): def test_get_headnode_dict(self):
class NoneFix(fixer_base.BaseFix): class NoneFix(fixer_base.BaseFix):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment