Commit 8fe37061 authored by Martin v. Löwis's avatar Martin v. Löwis

Merged revisions 61596-61597 via svnmerge from

svn+ssh://pythondev@svn.python.org/python/trunk

........
  r61596 | martin.v.loewis | 2008-03-18 23:43:46 -0500 (Di, 18 Mär 2008) | 2 lines

  Import lib2to3.
........
  r61597 | martin.v.loewis | 2008-03-18 23:58:04 -0500 (Di, 18 Mär 2008) | 3 lines

  Initialized merge tracking via "svnmerge" with revisions "1-61595" from
  svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3
........
parent 6b727f38
# Grammar for Python
# Note: Changing the grammar specified in this file will most likely
# require corresponding changes in the parser module
# (../Modules/parsermodule.c). If you can't make the changes to
# that module yourself, please co-ordinate the required changes
# with someone who can; ask around on python-dev for help. Fred
# Drake <fdrake@acm.org> will probably be listening there.
# NOTE WELL: You should also follow all the steps listed in PEP 306,
# "How to Change Python's Grammar"
# Commands for Kees Blom's railroad program
#diagram:token NAME
#diagram:token NUMBER
#diagram:token STRING
#diagram:token NEWLINE
#diagram:token ENDMARKER
#diagram:token INDENT
#diagram:output\input python.bla
#diagram:token DEDENT
#diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm
#diagram:rules
# Start symbols for the grammar:
# file_input is a module or sequence of commands read from an input file;
# single_input is a single interactive statement;
# eval_input is the input for the eval() and input() functions.
# NB: compound_stmt in single_input is followed by extra NEWLINE!
file_input: (NEWLINE | stmt)* ENDMARKER
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
eval_input: testlist NEWLINE* ENDMARKER
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
decorators: decorator+
decorated: decorators (classdef | funcdef)
funcdef: 'def' NAME parameters ['->' test] ':' suite
parameters: '(' [typedargslist] ')'
typedargslist: ((tfpdef ['=' test] ',')*
('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname)
| tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
tname: NAME [':' test]
tfpdef: tname | '(' tfplist ')'
tfplist: tfpdef (',' tfpdef)* [',']
varargslist: ((vfpdef ['=' test] ',')*
('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname)
| vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
vname: NAME
vfpdef: vname | '(' vfplist ')'
vfplist: vfpdef (',' vfpdef)* [',']
stmt: simple_stmt | compound_stmt
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
import_stmt | global_stmt | exec_stmt | assert_stmt)
expr_stmt: testlist (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist))*)
augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
# For normal assignments, additional restrictions enforced by the interpreter
print_stmt: 'print' ( [ test (',' test)* [','] ] |
'>>' test [ (',' test)+ [','] ] )
del_stmt: 'del' exprlist
pass_stmt: 'pass'
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
break_stmt: 'break'
continue_stmt: 'continue'
return_stmt: 'return' [testlist]
yield_stmt: yield_expr
raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
import_stmt: import_name | import_from
import_name: 'import' dotted_as_names
import_from: ('from' ('.'* dotted_name | '.'+)
'import' ('*' | '(' import_as_names ')' | import_as_names))
import_as_name: NAME ['as' NAME]
dotted_as_name: dotted_name ['as' NAME]
import_as_names: import_as_name (',' import_as_name)* [',']
dotted_as_names: dotted_as_name (',' dotted_as_name)*
dotted_name: NAME ('.' NAME)*
global_stmt: ('global' | 'nonlocal') NAME (',' NAME)*
exec_stmt: 'exec' expr ['in' test [',' test]]
assert_stmt: 'assert' test [',' test]
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
while_stmt: 'while' test ':' suite ['else' ':' suite]
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
try_stmt: ('try' ':' suite
((except_clause ':' suite)+
['else' ':' suite]
['finally' ':' suite] |
'finally' ':' suite))
with_stmt: 'with' test [ with_var ] ':' suite
with_var: 'as' expr
# NB compile.c makes sure that the default except clause is last
except_clause: 'except' [test [(',' | 'as') test]]
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
# Backward compatibility cruft to support:
# [ x for x in lambda: True, lambda: False if x() ]
# even while also allowing:
# lambda x: 5 if x else 2
# (But not a mix of the two)
testlist_safe: old_test [(',' old_test)+ [',']]
old_test: or_test | old_lambdef
old_lambdef: 'lambda' [varargslist] ':' old_test
test: or_test ['if' or_test 'else' test] | lambdef
or_test: and_test ('or' and_test)*
and_test: not_test ('and' not_test)*
not_test: 'not' not_test | comparison
comparison: expr (comp_op expr)*
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
expr: xor_expr ('|' xor_expr)*
xor_expr: and_expr ('^' and_expr)*
and_expr: shift_expr ('&' shift_expr)*
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
arith_expr: term (('+'|'-') term)*
term: factor (('*'|'/'|'%'|'//') factor)*
factor: ('+'|'-'|'~') factor | power
power: atom trailer* ['**' factor]
atom: ('(' [yield_expr|testlist_gexp] ')' |
'[' [listmaker] ']' |
'{' [dictsetmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | STRING+ | '.' '.' '.')
listmaker: test ( comp_for | (',' test)* [','] )
testlist_gexp: test ( comp_for | (',' test)* [','] )
lambdef: 'lambda' [varargslist] ':' test
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop]
sliceop: ':' [test]
exprlist: expr (',' expr)* [',']
testlist: test (',' test)* [',']
dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
(test (comp_for | (',' test)* [','])) )
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
argument: test [comp_for] | test '=' test # Really [keyword '='] test
comp_iter: comp_for | comp_if
comp_for: 'for' exprlist 'in' testlist_safe [comp_iter]
comp_if: 'if' old_test [comp_iter]
testlist1: test (',' test)*
# not used in grammar, but may appear in "node" passed from Parser to Compiler
encoding_decl: NAME
yield_expr: 'yield' [testlist]
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# A grammar to describe tree matching patterns.
# Not shown here:
# - 'TOKEN' stands for any token (leaf node)
# - 'any' stands for any node (leaf or interior)
# With 'any' we can still specify the sub-structure.
# The start symbol is 'Matcher'.
Matcher: Alternatives ENDMARKER
Alternatives: Alternative ('|' Alternative)*
Alternative: (Unit | NegatedUnit)+
Unit: [NAME '='] ( STRING [Repeater]
| NAME [Details] [Repeater]
| '(' Alternatives ')' [Repeater]
| '[' Alternatives ']'
)
NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')')
Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}'
Details: '<' Alternatives '>'
# Dummy file to make this directory a package.
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Base class for fixers (optional, but recommended)."""
# Python imports
import logging
import itertools
# Get a usable 'set' constructor
try:
set
except NameError:
from sets import Set as set
# Local imports
from ..patcomp import PatternCompiler
from .. import pygram
class BaseFix(object):
"""Optional base class for fixers.
The subclass name must be FixFooBar where FooBar is the result of
removing underscores and capitalizing the words of the fix name.
For example, the class name for a fixer named 'has_key' should be
FixHasKey.
"""
PATTERN = None # Most subclasses should override with a string literal
pattern = None # Compiled pattern, set by compile_pattern()
options = None # Options object passed to initializer
filename = None # The filename (set by set_filename)
logger = None # A logger (set by set_filename)
numbers = itertools.count(1) # For new_name()
used_names = set() # A set of all used NAMEs
order = "post" # Does the fixer prefer pre- or post-order traversal
explicit = False # Is this ignored by refactor.py -f all?
# Shortcut for access to Python grammar symbols
syms = pygram.python_symbols
def __init__(self, options, log):
"""Initializer. Subclass may override.
Args:
options: an optparse.Values instance which can be used
to inspect the command line options.
log: a list to append warnings and other messages to.
"""
self.options = options
self.log = log
self.compile_pattern()
def compile_pattern(self):
"""Compiles self.PATTERN into self.pattern.
Subclass may override if it doesn't want to use
self.{pattern,PATTERN} in .match().
"""
if self.PATTERN is not None:
self.pattern = PatternCompiler().compile_pattern(self.PATTERN)
def set_filename(self, filename):
"""Set the filename, and a logger derived from it.
The main refactoring tool should call this.
"""
self.filename = filename
self.logger = logging.getLogger(filename)
def match(self, node):
"""Returns match for a given parse tree node.
Should return a true or false object (not necessarily a bool).
It may return a non-empty dict of matching sub-nodes as
returned by a matching pattern.
Subclass may override.
"""
results = {"node": node}
return self.pattern.match(node, results) and results
def transform(self, node, results):
"""Returns the transformation for a given parse tree node.
Args:
node: the root of the parse tree that matched the fixer.
results: a dict mapping symbolic names to part of the match.
Returns:
None, or a node that is a modified copy of the
argument node. The node argument may also be modified in-place to
effect the same change.
Subclass *must* override.
"""
raise NotImplementedError()
def parenthesize(self, node):
"""Wrapper around pygram.parenthesize()."""
return pygram.parenthesize(node)
def new_name(self, template="xxx_todo_changeme"):
"""Return a string suitable for use as an identifier
The new name is guaranteed not to conflict with other identifiers.
"""
name = template
while name in self.used_names:
name = template + str(self.numbers.next())
self.used_names.add(name)
return name
def log_message(self, message):
if self.first_log:
self.first_log = False
self.log.append("### In file %s ###" % self.filename)
self.log.append(message)
def cannot_convert(self, node, reason=None):
"""Warn the user that a given chunk of code is not valid Python 3,
but that it cannot be converted automatically.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
for_output = node.clone()
for_output.set_prefix("")
msg = "Line %d: could not convert: %s"
self.log_message(msg % (lineno, for_output))
if reason:
self.log_message(reason)
def warning(self, node, reason):
"""Used for warning the user about possible uncertainty in the
translation.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
self.log_message("Line %d: %s" % (lineno, reason))
def start_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the start of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
self.used_names = tree.used_names
self.set_filename(filename)
self.numbers = itertools.count(1)
self.first_log = True
def finish_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the conclusion of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
pass
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for apply().
This converts apply(func, v, k) into (func)(*v, **k)."""
# Local imports
from .. import pytree
from ..pgen2 import token
from . import basefix
from .util import Call, Comma
class FixApply(basefix.BaseFix):
PATTERN = """
power< 'apply'
trailer<
'('
arglist<
(not argument<NAME '=' any>) func=any ','
(not argument<NAME '=' any>) args=any [','
(not argument<NAME '=' any>) kwds=any] [',']
>
')'
>
>
"""
def transform(self, node, results):
syms = self.syms
assert results
func = results["func"]
args = results["args"]
kwds = results.get("kwds")
prefix = node.get_prefix()
func = func.clone()
if (func.type not in (token.NAME, syms.atom) and
(func.type != syms.power or
func.children[-2].type == token.DOUBLESTAR)):
# Need to parenthesize
func = self.parenthesize(func)
func.set_prefix("")
args = args.clone()
args.set_prefix("")
if kwds is not None:
kwds = kwds.clone()
kwds.set_prefix("")
l_newargs = [pytree.Leaf(token.STAR, "*"), args]
if kwds is not None:
l_newargs.extend([Comma(),
pytree.Leaf(token.DOUBLESTAR, "**"),
kwds])
l_newargs[-2].set_prefix(" ") # that's the ** token
# XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
# can be translated into f(x, y, *t) instead of f(*(x, y) + t)
#new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
return Call(func, l_newargs, prefix=prefix)
"""Fixer for basestring -> str."""
# Author: Christian Heimes
# Local imports
from . import basefix
from .util import Name
class FixBasestring(basefix.BaseFix):
PATTERN = "'basestring'"
def transform(self, node, results):
return Name("str", prefix=node.get_prefix())
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes buffer(...) into memoryview(...)."""
# Local imports
from . import basefix
from .util import Name
class FixBuffer(basefix.BaseFix):
explicit = True # The user must ask for this fixer
PATTERN = """
power< name='buffer' trailer< '(' [any] ')' > >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name("memoryview", prefix=name.get_prefix()))
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for callable().
This converts callable(obj) into hasattr(obj, '__call__')."""
# Local imports
from .. import pytree
from . import basefix
from .util import Call, Name, String
class FixCallable(basefix.BaseFix):
# Ignore callable(*args) or use of keywords.
# Either could be a hint that the builtin callable() is not being used.
PATTERN = """
power< 'callable'
trailer< lpar='('
( not(arglist | argument<any '=' any>) func=any
| func=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
func = results["func"]
args = [func.clone(), String(', '), String("'__call__'")]
return Call(Name("hasattr"), args, prefix=node.get_prefix())
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from . import basefix
from .util import Name, Call, LParen, RParen, ArgList, Dot, set
exempt = set(["sorted", "list", "set", "any", "all", "tuple", "sum"])
iter_exempt = exempt | set(["iter"])
class FixDict(basefix.BaseFix):
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
if isiter:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.get_prefix())]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not special:
new.set_prefix("")
new = Call(Name(isiter and "iter" or "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.set_prefix(node.get_prefix())
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in exempt
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
"""Fixer for except statements with named exceptions.
The following cases will be converted:
- "except E, T:" where T is a name:
except E as T:
- "except E, T:" where T is not a name, tuple or list:
except E as t:
T = t
This is done because the target of an "except" clause must be a
name.
- "except E, T:" where T is a tuple or list literal:
except E as t:
T = t.args
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from . import basefix
from .util import Assign, Attr, Name, is_tuple, is_list, reversed
def find_excepts(nodes):
for i, n in enumerate(nodes):
if isinstance(n, pytree.Node):
if n.children[0].value == 'except':
yield (n, nodes[i+2])
class FixExcept(basefix.BaseFix):
PATTERN = """
try_stmt< 'try' ':' suite
cleanup=((except_clause ':' suite)+ ['else' ':' suite]
['finally' ':' suite]
| 'finally' ':' suite) >
"""
def transform(self, node, results):
syms = self.syms
try_cleanup = [ch.clone() for ch in results['cleanup']]
for except_clause, e_suite in find_excepts(try_cleanup):
if len(except_clause.children) == 4:
(E, comma, N) = except_clause.children[1:4]
comma.replace(Name("as", prefix=" "))
if N.type != token.NAME:
# Generate a new N for the except clause
new_N = Name(self.new_name(), prefix=" ")
target = N.clone()
target.set_prefix("")
N.replace(new_N)
new_N = new_N.clone()
# Insert "old_N = new_N" as the first statement in
# the except body. This loop skips leading whitespace
# and indents
#TODO(cwinter) suite-cleanup
suite_stmts = e_suite.children
for i, stmt in enumerate(suite_stmts):
if isinstance(stmt, pytree.Node):
break
# The assignment is different if old_N is a tuple or list
# In that case, the assignment is old_N = new_N.args
if is_tuple(N) or is_list(N):
assign = Assign(target, Attr(new_N, Name('args')))
else:
assign = Assign(target, new_N)
#TODO(cwinter) stopgap until children becomes a smart list
for child in reversed(suite_stmts[:i]):
e_suite.insert_child(0, child)
e_suite.insert_child(i, assign)
elif N.get_prefix() == "":
# No space after a comma is legal; no space after "as",
# not so much.
N.set_prefix(" ")
#TODO(cwinter) fix this when children becomes a smart list
children = [c.clone() for c in node.children[:3]] + try_cleanup
return pytree.Node(node.type, children)
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for exec.
This converts usages of the exec statement into calls to a built-in
exec() function.
exec code in ns1, ns2 -> exec(code, ns1, ns2)
"""
# Local imports
from .. import pytree
from . import basefix
from .util import Comma, Name, Call
class FixExec(basefix.BaseFix):
PATTERN = """
exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
|
exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
"""
def transform(self, node, results):
assert results
syms = self.syms
a = results["a"]
b = results.get("b")
c = results.get("c")
args = [a.clone()]
args[0].set_prefix("")
if b is not None:
args.extend([Comma(), b.clone()])
if c is not None:
args.extend([Comma(), c.clone()])
return Call(Name("exec"), args, prefix=node.get_prefix())
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for execfile.
This converts usages of the execfile function into calls to the built-in
exec() function.
"""
from .. import pytree
from . import basefix
from .util import Comma, Name, Call, LParen, RParen, Dot
class FixExecfile(basefix.BaseFix):
PATTERN = """
power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
|
power< 'execfile' trailer< '(' filename=any ')' > >
"""
def transform(self, node, results):
assert results
syms = self.syms
filename = results["filename"]
globals = results.get("globals")
locals = results.get("locals")
args = [Name('open'), LParen(), filename.clone(), RParen(), Dot(),
Name('read'), LParen(), RParen()]
args[0].set_prefix("")
if globals is not None:
args.extend([Comma(), globals.clone()])
if locals is not None:
args.extend([Comma(), locals.clone()])
return Call(Name("exec"), args, prefix=node.get_prefix())
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from . import basefix
from .util import Name, Call, ListComp, attr_chain, does_tree_import
class FixFilter(basefix.BaseFix):
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
def start_tree(self, *args):
super(FixFilter, self).start_tree(*args)
self._new_filter = None
def has_new_filter(self, node):
if self._new_filter is not None:
return self._new_filter
self._new_filter = does_tree_import('future_builtins', 'filter', node)
return self._new_filter
def transform(self, node, results):
if self.has_new_filter(node):
# If filter is imported from future_builtins, we don't want to
# do anything here.
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name("_f"),
Name("_f"),
results["seq"].clone(),
Name("_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.set_prefix("")
new = Call(Name("list"), [new])
new.set_prefix(node.get_prefix())
return new
P0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p0 = patcomp.compile_pattern(P0)
P1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p1 = patcomp.compile_pattern(P1)
P2 = """
power<
'sorted'
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(node):
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
"""Fix function attribute names (f.func_x -> f.__x__)."""
# Author: Collin Winter
# Local imports
from . import basefix
from .util import Name
class FixFuncattrs(basefix.BaseFix):
PATTERN = """
power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'
| 'func_name' | 'func_defaults' | 'func_code'
| 'func_dict') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
attr.replace(Name(("__%s__" % attr.value[5:]),
prefix=attr.get_prefix()))
"""Remove __future__ imports
from __future__ import foo is replaced with an empty line.
"""
# Author: Christian Heimes
# Local imports
from . import basefix
from .util import BlankLine
class FixFuture(basefix.BaseFix):
PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
def transform(self, node, results):
return BlankLine()
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for has_key().
Calls to .has_key() methods are expressed in terms of the 'in'
operator:
d.has_key(k) -> k in d
CAVEATS:
1) While the primary target of this fixer is dict.has_key(), the
fixer will change any has_key() method call, regardless of its
class.
2) Cases like this will not be converted:
m = d.has_key
if m(k):
...
Only *calls* to has_key() are converted. While it is possible to
convert the above to something like
m = d.__contains__
if m(k):
...
this is currently not done.
"""
# Local imports
from .. import pytree
from ..pgen2 import token
from . import basefix
from .util import Name
class FixHasKey(basefix.BaseFix):
PATTERN = """
anchor=power<
before=any+
trailer< '.' 'has_key' >
trailer<
'('
( not(arglist | argument<any '=' any>) arg=any
| arglist<(not argument<any '=' any>) arg=any ','>
)
')'
>
after=any*
>
|
negation=not_test<
'not'
anchor=power<
before=any+
trailer< '.' 'has_key' >
trailer<
'('
( not(arglist | argument<any '=' any>) arg=any
| arglist<(not argument<any '=' any>) arg=any ','>
)
')'
>
>
>
"""
def transform(self, node, results):
assert results
syms = self.syms
if (node.parent.type == syms.not_test and
self.pattern.match(node.parent)):
# Don't transform a node matching the first alternative of the
# pattern when its parent matches the second alternative
return None
negation = results.get("negation")
anchor = results["anchor"]
prefix = node.get_prefix()
before = [n.clone() for n in results["before"]]
arg = results["arg"].clone()
after = results.get("after")
if after:
after = [n.clone() for n in after]
if arg.type in (syms.comparison, syms.not_test, syms.and_test,
syms.or_test, syms.test, syms.lambdef, syms.argument):
arg = self.parenthesize(arg)
if len(before) == 1:
before = before[0]
else:
before = pytree.Node(syms.power, before)
before.set_prefix(" ")
n_op = Name("in", prefix=" ")
if negation:
n_not = Name("not", prefix=" ")
n_op = pytree.Node(syms.comp_op, (n_not, n_op))
new = pytree.Node(syms.comparison, (arg, n_op, before))
if after:
new = self.parenthesize(new)
new = pytree.Node(syms.power, (new,) + tuple(after))
if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
syms.and_expr, syms.shift_expr,
syms.arith_expr, syms.term,
syms.factor, syms.power):
new = self.parenthesize(new)
new.set_prefix(prefix)
return new
"""Adjust some old Python 2 idioms to their modern counterparts.
* Change some type comparisons to isinstance() calls:
type(x) == T -> isinstance(x, T)
type(x) is T -> isinstance(x, T)
type(x) != T -> not isinstance(x, T)
type(x) is not T -> not isinstance(x, T)
* Change "while 1:" into "while True:".
* Change both
v = list(EXPR)
v.sort()
foo(v)
and the more general
v = EXPR
v.sort()
foo(v)
into
v = sorted(EXPR)
foo(v)
"""
# Author: Jacques Frechet, Collin Winter
# Local imports
from . import basefix
from .util import Call, Comma, Name, Node, syms
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
class FixIdioms(basefix.BaseFix):
explicit = True # The user must ask for this fixer
PATTERN = r"""
isinstance=comparison< %s %s T=any >
|
isinstance=comparison< T=any %s %s >
|
while_stmt< 'while' while='1' ':' any+ >
|
sorted=any<
any*
simple_stmt<
expr_stmt< id1=any '='
power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
>
'\n'
>
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
|
sorted=any<
any*
simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
""" % (TYPE, CMP, CMP, TYPE)
def match(self, node):
r = super(FixIdioms, self).match(node)
# If we've matched one of the sort/sorted subpatterns above, we
# want to reject matches where the initial assignment and the
# subsequent .sort() call involve different identifiers.
if r and "sorted" in r:
if r["id1"] == r["id2"]:
return r
return None
return r
def transform(self, node, results):
if "isinstance" in results:
return self.transform_isinstance(node, results)
elif "while" in results:
return self.transform_while(node, results)
elif "sorted" in results:
return self.transform_sort(node, results)
else:
raise RuntimeError("Invalid match")
def transform_isinstance(self, node, results):
x = results["x"].clone() # The thing inside of type()
T = results["T"].clone() # The type being compared against
x.set_prefix("")
T.set_prefix(" ")
test = Call(Name("isinstance"), [x, Comma(), T])
if "n" in results:
test.set_prefix(" ")
test = Node(syms.not_test, [Name("not"), test])
test.set_prefix(node.get_prefix())
return test
def transform_while(self, node, results):
one = results["while"]
one.replace(Name("True", prefix=one.get_prefix()))
def transform_sort(self, node, results):
sort_stmt = results["sort"]
next_stmt = results["next"]
list_call = results.get("list")
simple_expr = results.get("expr")
if list_call:
list_call.replace(Name("sorted", prefix=list_call.get_prefix()))
elif simple_expr:
new = simple_expr.clone()
new.set_prefix("")
simple_expr.replace(Call(Name("sorted"), [new],
prefix=simple_expr.get_prefix()))
else:
raise RuntimeError("should not have reached here")
sort_stmt.remove()
if next_stmt:
next_stmt[0].set_prefix(sort_stmt.get_prefix())
"""Fix incompatible imports and module references.
Fixes:
* StringIO -> io
* cStringIO -> io
* md5 -> hashlib
"""
# Author: Collin Winter
# Local imports
from . import basefix
from .util import Name, attr_chain, any, set
import __builtin__
builtin_names = [name for name in dir(__builtin__)
if name not in ("__name__", "__doc__")]
MAPPING = {"StringIO": ("io", ["StringIO"]),
"cStringIO": ("io", ["StringIO"]),
"__builtin__" : ("builtins", builtin_names),
}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
bare = set()
for old_module, (new_module, members) in MAPPING.items():
bare.add(old_module)
bare.update(members)
members = alternates(members)
yield """import_name< 'import' (module=%r
| dotted_as_names< any* module=%r any* >) >
""" % (old_module, old_module)
yield """import_from< 'from' module_name=%r 'import'
( %s | import_as_name< %s 'as' any >) >
""" % (old_module, members, members)
yield """import_from< 'from' module_name=%r 'import' star='*' >
""" % old_module
yield """import_name< 'import'
dotted_as_name< module_name=%r 'as' any > >
""" % old_module
yield """power< module_name=%r trailer< '.' %s > any* >
""" % (old_module, members)
yield """bare_name=%s""" % alternates(bare)
class FixImports(basefix.BaseFix):
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
if any([match(obj) for obj in attr_chain(node, "parent")]):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module")
mod_name = results.get("module_name")
bare_name = results.get("bare_name")
star = results.get("star")
if import_mod or mod_name:
new_name, members = MAPPING[(import_mod or mod_name).value]
if import_mod:
self.replace[import_mod.value] = new_name
import_mod.replace(Name(new_name, prefix=import_mod.get_prefix()))
elif mod_name:
if star:
self.cannot_convert(node, "Cannot handle star imports.")
else:
mod_name.replace(Name(new_name, prefix=mod_name.get_prefix()))
elif bare_name:
bare_name = bare_name[0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.get_prefix()))
"""Fixer that changes input(...) into eval(input(...))."""
# Author: Andre Roberge
# Local imports
from . import basefix
from .util import Call, Name
from .. import patcomp
context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
class FixInput(basefix.BaseFix):
PATTERN = """
power< 'input' args=trailer< '(' [any] ')' > >
"""
def transform(self, node, results):
# If we're already wrapped in a eval() call, we're done.
if context.match(node.parent.parent):
return
new = node.clone()
new.set_prefix("")
return Call(Name("eval"), [new], prefix=node.get_prefix())
# Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from . import basefix
from .util import Name, Attr
class FixIntern(basefix.BaseFix):
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name("sys"), Name("intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.set_prefix(node.get_prefix())
return new
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from . import basefix
from .util import Name
class FixItertools(basefix.BaseFix):
it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
def transform(self, node, results):
prefix = None
func = results['func'][0]
if 'it' in results and func.value != 'ifilterfalse':
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.get_prefix()
it.remove()
# Replace the node wich contains ('.', 'function') with the
# function (to be consistant with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.get_prefix()
func.replace(Name(func.value[1:], prefix=prefix))
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that turns 'long' into 'int' everywhere.
This also strips the trailing 'L' or 'l' from long loterals.
"""
# Local imports
from .. import pytree
from . import basefix
from .util import Name, Number
class FixLong(basefix.BaseFix):
PATTERN = """
(long_type = 'long' | number = NUMBER)
"""
static_long = Name("long")
static_int = Name("int")
def transform(self, node, results):
long_type = results.get("long_type")
number = results.get("number")
new = None
if long_type:
assert node == self.static_long, node
new = self.static_int.clone()
if number and node.value[-1] in ("l", "L"):
new = Number(node.value[:-1])
if new is not None:
new.set_prefix(node.get_prefix())
return new
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
exists a 'from future_builtins import map' statement in the top-level
namespace.
As a special case, map(None, X) is changed into list(X). (This is
necessary because the semantics are changed in this case -- the new
map(None, X) is equivalent to [(x,) for x in X].)
We avoid the transformation (except for the special case mentioned
above) if the map() call is directly contained in iter(<>), list(<>),
tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
NOTE: This is still not correct if the original code was depending on
map(F, X, Y, ...) to go on until the longest argument is exhausted,
substituting None for missing values -- like zip(), it now stops as
soon as the shortest argument is exhausted.
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from . import basefix
from .util import Name, Call, ListComp, attr_chain, does_tree_import
from ..pygram import python_symbols as syms
class FixMap(basefix.BaseFix):
PATTERN = """
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
>
|
map_lambda=power<
'map'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'map'
args=trailer< '(' [any] ')' >
>
"""
def start_tree(self, *args):
super(FixMap, self).start_tree(*args)
self._future_map_found = None
def has_future_map(self, node):
if self._future_map_found is not None:
return self._future_map_found
self._future_map_found = does_tree_import('future_builtins', 'map', node)
return self._future_map_found
def transform(self, node, results):
if self.has_future_map(node):
# If a future map has been imported for this file, we won't
# be making any modifications
return
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
new.set_prefix("")
new = Call(Name("list"), [new])
elif "map_lambda" in results:
new = ListComp(results.get("xp").clone(),
results.get("fp").clone(),
results.get("it").clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if in_special_context(node):
return None
new = node.clone()
new.set_prefix("")
new = Call(Name("list"), [new])
new.set_prefix(node.get_prefix())
return new
P0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p0 = patcomp.compile_pattern(P0)
P1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p1 = patcomp.compile_pattern(P1)
P2 = """
power<
'sorted'
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(node):
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: Christian Heimes
# Local imports
from . import basefix
from .util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(basefix.BaseFix):
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = MAP[attr.value]
attr.replace(Name(new, prefix=attr.get_prefix()))
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that turns <> into !=."""
# Local imports
from .. import pytree
from ..pgen2 import token
from . import basefix
class FixNe(basefix.BaseFix):
# This is so simple that we don't need the pattern compiler.
def match(self, node):
# Override
return node.type == token.NOTEQUAL and node.value == "<>"
def transform(self, node, results):
new = pytree.Leaf(token.NOTEQUAL, "!=")
new.set_prefix(node.get_prefix())
return new
"""Fixer for it.next() -> next(it), per PEP 3114."""
# Author: Collin Winter
# Things that currently aren't covered:
# - listcomp "next" names aren't warned
# - "with" statement targets aren't checked
# Local imports
from ..pgen2 import token
from ..pygram import python_symbols as syms
from . import basefix
from .util import Name, Call, find_binding, any
bind_warning = "Calls to builtin next() possibly shadowed by global binding"
class FixNext(basefix.BaseFix):
PATTERN = """
power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
|
power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
|
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def'
name='next'
parameters< '(' NAME ')' > any+ >
any* > >
|
global=global_stmt< 'global' any* 'next' any* >
|
mod=file_input< any+ >
"""
order = "pre" # Pre-order tree traversal
def start_tree(self, tree, filename):
super(FixNext, self).start_tree(tree, filename)
self.shadowed_next = False
def transform(self, node, results):
assert results
base = results.get("base")
attr = results.get("attr")
name = results.get("name")
mod = results.get("mod")
if base:
if self.shadowed_next:
attr.replace(Name("__next__", prefix=attr.get_prefix()))
else:
base = [n.clone() for n in base]
base[0].set_prefix("")
node.replace(Call(Name("next", prefix=node.get_prefix()), base))
elif name:
n = Name("__next__", prefix=name.get_prefix())
name.replace(n)
elif attr:
# We don't do this transformation if we're assigning to "x.next".
# Unfortunately, it doesn't seem possible to do this in PATTERN,
# so it's being done here.
if is_assign_target(node):
head = results["head"]
if "".join([str(n) for n in head]).strip() == '__builtin__':
self.warning(node, bind_warning)
return
attr.replace(Name("__next__"))
elif "global" in results:
self.warning(node, bind_warning)
self.shadowed_next = True
elif mod:
n = find_binding('next', mod)
if n:
self.warning(n, bind_warning)
self.shadowed_next = True
### The following functions help test if node is part of an assignment
### target.
def is_assign_target(node):
assign = find_assign(node)
if assign is None:
return False
for child in assign.children:
if child.type == token.EQUAL:
return False
elif is_subtree(child, node):
return True
return False
def find_assign(node):
if node.type == syms.expr_stmt:
return node
if node.type == syms.simple_stmt or node.parent is None:
return None
return find_assign(node.parent)
def is_subtree(root, node):
if root == node:
return True
return any([is_subtree(c, node) for c in root.children])
"""Fixer for __nonzero__ -> __bool__ methods."""
# Author: Collin Winter
# Local imports
from .import basefix
from .util import Name, syms
class FixNonzero(basefix.BaseFix):
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' name='__nonzero__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
name = results["name"]
new = Name("__bool__", prefix=name.get_prefix())
name.replace(new)
"""Fixer that turns 1L into 1, 0755 into 0o755.
"""
# Copyright 2007 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
# Local imports
from ..pgen2 import token
from .import basefix
from .util import Number, set
class FixNumliterals(basefix.BaseFix):
# This is so simple that we don't need the pattern compiler.
def match(self, node):
# Override
return (node.type == token.NUMBER and
(node.value.startswith("0") or node.value[-1] in "Ll"))
def transform(self, node, results):
val = node.value
if val[-1] in 'Ll':
val = val[:-1]
elif val.startswith('0') and val.isdigit() and len(set(val)) > 1:
val = "0o" + val[1:]
return Number(val, prefix=node.get_prefix())
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
"""
# Local imports
from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .import basefix
from .util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
"""atom< '(' [atom|STRING|NAME] ')' >"""
)
class FixPrint(basefix.BaseFix):
PATTERN = """
simple_stmt< bare='print' any > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name("print"), [],
prefix=bare_print.get_prefix()))
return
assert node.children[0] == Name("print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].set_prefix("")
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, "sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, "end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, "file", file)
n_stmt = Call(Name("print"), l_args)
n_stmt.set_prefix(node.get_prefix())
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.set_prefix("")
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, "="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.set_prefix(" ")
l_nodes.append(n_argument)
"""Fixer for 'raise E, V, T'
raise -> raise
raise E -> raise E
raise E, V -> raise E(V)
raise E, V, T -> raise E(V).with_traceback(T)
raise (((E, E'), E''), E'''), V -> raise E(V)
raise "foo", V, T -> warns about string exceptions
CAVEATS:
1) "raise E, V" will be incorrectly translated if V is an exception
instance. The correct Python 3 idiom is
raise E from V
but since we can't detect instance-hood by syntax alone and since
any client code would have to be changed as well, we don't automate
this.
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .import basefix
from .util import Name, Call, Attr, ArgList, is_tuple
class FixRaise(basefix.BaseFix):
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type is token.STRING:
self.cannot_convert(node, "Python 3 does not support string exceptions")
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.set_prefix(" ")
if "val" not in results:
# One-argument raise
new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
new.set_prefix(node.get_prefix())
return new
val = results["val"].clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.set_prefix("")
args = [val]
if "tb" in results:
tb = results["tb"].clone()
tb.set_prefix("")
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
new.set_prefix(node.get_prefix())
return new
else:
return pytree.Node(syms.raise_stmt,
[Name("raise"), Call(exc, args)],
prefix=node.get_prefix())
"""Fixer that changes raw_input(...) into input(...)."""
# Author: Andre Roberge
# Local imports
from .import basefix
from .util import Name
class FixRawInput(basefix.BaseFix):
PATTERN = """
power< name='raw_input' trailer< '(' [any] ')' > >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name("input", prefix=name.get_prefix()))
"""Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
from .import basefix
from .util import Name, attr_chain, any, set
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
for module, replace in MAPPING.items():
for old_attr, new_attr in replace.items():
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield """
# import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
yield """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
yield """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#yield """bare_name=%s""" % alternates(bare)
class FixRenames(basefix.BaseFix):
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
if results:
if any([match(obj) for obj in attr_chain(node, "parent")]):
return False
return results
return False
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
if mod_name and attr_name:
new_attr = LOOKUP[(mod_name.value, attr_name.value)]
attr_name.replace(Name(new_attr, prefix=attr_name.get_prefix()))
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
# Local imports
from .import basefix
from .util import Call, Name
class FixRepr(basefix.BaseFix):
PATTERN = """
atom < '`' expr=any '`' >
"""
def transform(self, node, results):
expr = results["expr"].clone()
if expr.type == self.syms.testlist1:
expr = self.parenthesize(expr)
return Call(Name("repr"), [expr], prefix=node.get_prefix())
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for StandardError -> Exception."""
# Local imports
from .import basefix
from .util import Name
class FixStandarderror(basefix.BaseFix):
PATTERN = """
'StandardError'
"""
def transform(self, node, results):
return Name("Exception", prefix=node.get_prefix())
"""Fixer for generator.throw(E, V, T).
g.throw(E) -> g.throw(E)
g.throw(E, V) -> g.throw(E(V))
g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
g.throw("foo"[, V[, T]]) will warn about string exceptions."""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .import basefix
from .util import Name, Call, ArgList, Attr, is_tuple
class FixThrow(basefix.BaseFix):
PATTERN = """
power< any trailer< '.' 'throw' >
trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
>
|
power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type is token.STRING:
self.cannot_convert(node, "Python 3 does not support string exceptions")
return
# Leave "g.throw(E)" alone
val = results.get("val")
if val is None:
return
val = val.clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.set_prefix("")
args = [val]
throw_args = results["args"]
if "tb" in results:
tb = results["tb"].clone()
tb.set_prefix("")
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
throw_args.replace(pytree.Node(syms.power, with_tb))
else:
throw_args.replace(Call(exc, args))
"""Fixer for function definitions with tuple parameters.
def func(((a, b), c), d):
...
->
def func(x, d):
((a, b), c) = x
...
It will also support lambdas:
lambda (x, y): x + y -> lambda t: t[0] + t[1]
# The parens are a syntax error in Python 3
lambda (x): x + y -> lambda x: x + y
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .import basefix
from .util import Assign, Name, Newline, Number, Subscript, syms
def is_docstring(stmt):
return isinstance(stmt, pytree.Node) and \
stmt.children[0].type == token.STRING
class FixTupleParams(basefix.BaseFix):
PATTERN = """
funcdef< 'def' any parameters< '(' args=any ')' >
['->' any] ':' suite=any+ >
|
lambda=
lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
':' body=any
>
"""
def transform(self, node, results):
if "lambda" in results:
return self.transform_lambda(node, results)
new_lines = []
suite = results["suite"]
args = results["args"]
# This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
# TODO(cwinter): suite-cleanup
if suite[0].children[1].type == token.INDENT:
start = 2
indent = suite[0].children[1].value
end = Newline()
else:
start = 0
indent = "; "
end = pytree.Leaf(token.INDENT, "")
# We need access to self for new_name(), and making this a method
# doesn't feel right. Closing over self and new_lines makes the
# code below cleaner.
def handle_tuple(tuple_arg, add_prefix=False):
n = Name(self.new_name())
arg = tuple_arg.clone()
arg.set_prefix("")
stmt = Assign(arg, n.clone())
if add_prefix:
n.set_prefix(" ")
tuple_arg.replace(n)
new_lines.append(pytree.Node(syms.simple_stmt,
[stmt, end.clone()]))
if args.type == syms.tfpdef:
handle_tuple(args)
elif args.type == syms.typedargslist:
for i, arg in enumerate(args.children):
if arg.type == syms.tfpdef:
# Without add_prefix, the emitted code is correct,
# just ugly.
handle_tuple(arg, add_prefix=(i > 0))
if not new_lines:
return node
# This isn't strictly necessary, but it plays nicely with other fixers.
# TODO(cwinter) get rid of this when children becomes a smart list
for line in new_lines:
line.parent = suite[0]
# TODO(cwinter) suite-cleanup
after = start
if start == 0:
new_lines[0].set_prefix(" ")
elif is_docstring(suite[0].children[start]):
new_lines[0].set_prefix(indent)
after = start + 1
suite[0].children[after:after] = new_lines
for i in range(after+1, after+len(new_lines)+1):
suite[0].children[i].set_prefix(indent)
suite[0].changed()
def transform_lambda(self, node, results):
args = results["args"]
body = results["body"]
inner = simplify_args(results["inner"])
# Replace lambda ((((x)))): x with lambda x: x
if inner.type == token.NAME:
inner = inner.clone()
inner.set_prefix(" ")
args.replace(inner)
return
params = find_params(args)
to_index = map_to_index(params)
tup_name = self.new_name(tuple_name(params))
new_param = Name(tup_name, prefix=" ")
args.replace(new_param.clone())
for n in body.post_order():
if n.type == token.NAME and n.value in to_index:
subscripts = [c.clone() for c in to_index[n.value]]
new = pytree.Node(syms.power,
[new_param.clone()] + subscripts)
new.set_prefix(n.get_prefix())
n.replace(new)
### Helper functions for transform_lambda()
def simplify_args(node):
if node.type in (syms.vfplist, token.NAME):
return node
elif node.type == syms.vfpdef:
# These look like vfpdef< '(' x ')' > where x is NAME
# or another vfpdef instance (leading to recursion).
while node.type == syms.vfpdef:
node = node.children[1]
return node
raise RuntimeError("Received unexpected node %s" % node)
def find_params(node):
if node.type == syms.vfpdef:
return find_params(node.children[1])
elif node.type == token.NAME:
return node.value
return [find_params(c) for c in node.children if c.type != token.COMMA]
def map_to_index(param_list, prefix=[], d=None):
if d is None:
d = {}
for i, obj in enumerate(param_list):
trailer = [Subscript(Number(i))]
if isinstance(obj, list):
map_to_index(obj, trailer, d=d)
else:
d[obj] = prefix + trailer
return d
def tuple_name(param_list):
l = []
for obj in param_list:
if isinstance(obj, list):
l.append(tuple_name(obj))
else:
l.append(obj)
return "_".join(l)
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for removing uses of the types module.
These work for only the known names in the types module. The forms above
can include types. or not. ie, It is assumed the module is imported either as:
import types
from types import ... # either * or specific types
The import statements are not modified.
There should be another fixer that handles at least the following constants:
type([]) -> list
type(()) -> tuple
type('') -> str
"""
# Local imports
from ..pgen2 import token
from .import basefix
from .util import Name
_TYPE_MAPPING = {
'BooleanType' : 'bool',
'BufferType' : 'memoryview',
'ClassType' : 'type',
'ComplexType' : 'complex',
'DictType': 'dict',
'DictionaryType' : 'dict',
'EllipsisType' : 'type(Ellipsis)',
#'FileType' : 'io.IOBase',
'FloatType': 'float',
'IntType': 'int',
'ListType': 'list',
'LongType': 'int',
'ObjectType' : 'object',
'NoneType': 'type(None)',
'NotImplementedType' : 'type(NotImplemented)',
'SliceType' : 'slice',
'StringType': 'bytes', # XXX ?
'StringTypes' : 'str', # XXX ?
'TupleType': 'tuple',
'TypeType' : 'type',
'UnicodeType': 'str',
'XRangeType' : 'range',
}
_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
class FixTypes(basefix.BaseFix):
PATTERN = '|'.join(_pats)
def transform(self, node, results):
new_value = _TYPE_MAPPING.get(results["name"].value)
if new_value:
return Name(new_value, prefix=node.get_prefix())
return None
"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
"""
import re
from ..pgen2 import token
from .import basefix
class FixUnicode(basefix.BaseFix):
PATTERN = "STRING | NAME<'unicode' | 'unichr'>"
def transform(self, node, results):
if node.type == token.NAME:
if node.value == "unicode":
new = node.clone()
new.value = "str"
return new
if node.value == "unichr":
new = node.clone()
new.value = "chr"
return new
# XXX Warn when __unicode__ found?
elif node.type == token.STRING:
if re.match(r"[uU][rR]?[\'\"]", node.value):
new = node.clone()
new.value = new.value[1:]
return new
"""Fixer that changes 'a ,b' into 'a, b'.
This also changes '{a :b}' into '{a: b}', but does not touch other
uses of colons. It does not touch other uses of whitespace.
"""
from .. import pytree
from ..pgen2 import token
from .import basefix
class FixWsComma(basefix.BaseFix):
explicit = True # The user must ask for this fixers
PATTERN = """
any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
"""
COMMA = pytree.Leaf(token.COMMA, ",")
COLON = pytree.Leaf(token.COLON, ":")
SEPS = (COMMA, COLON)
def transform(self, node, results):
new = node.clone()
comma = False
for child in new.children:
if child in self.SEPS:
prefix = child.get_prefix()
if prefix.isspace() and "\n" not in prefix:
child.set_prefix("")
comma = True
else:
if comma:
prefix = child.get_prefix()
if not prefix:
child.set_prefix(" ")
comma = False
return new
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes xrange(...) into range(...)."""
# Local imports
from .import basefix
from .util import Name
class FixXrange(basefix.BaseFix):
PATTERN = """
power< name='xrange' trailer< '(' [any] ')' > >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name("range", prefix=name.get_prefix()))
"""Fix "for x in f.xreadlines()" -> "for x in f".
This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
# Author: Collin Winter
# Local imports
from .import basefix
from .util import Name
class FixXreadlines(basefix.BaseFix):
PATTERN = """
power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
|
power< any+ trailer< '.' no_call='xreadlines' > >
"""
def transform(self, node, results):
no_call = results.get("no_call")
if no_call:
no_call.replace(Name("__iter__", prefix=no_call.get_prefix()))
else:
node.replace([x.clone() for x in results["call"]])
This diff is collapsed.
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Pattern compiler.
The grammer is taken from PatternGrammar.txt.
The compiler compiles a pattern to a pytree.*Pattern instance.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
# Fairly local imports
from .pgen2 import driver
from .pgen2 import literals
from .pgen2 import token
from .pgen2 import tokenize
# Really local imports
from . import pytree
from . import pygram
# The pattern grammar file
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = (token.NEWLINE, token.INDENT, token.DEDENT)
tokens = tokenize.generate_tokens(driver.generate_lines(input).next)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple
class PatternCompiler(object):
def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
self.grammar = driver.load_grammar(grammar_file)
self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
def compile_pattern(self, input, debug=False):
"""Compiles a pattern string to a nested pytree.*Pattern object."""
tokens = tokenize_wrapper(input)
root = self.driver.parse_tokens(tokens, debug=debug)
return self.compile_node(root)
def compile_node(self, node):
"""Compiles a node, recursively.
This is one big switch on the node type.
"""
# XXX Optimize certain Wildcard-containing-Wildcard patterns
# that can be merged
if node.type == self.syms.Matcher:
node = node.children[0] # Avoid unneeded recursion
if node.type == self.syms.Alternatives:
# Skip the odd children since they are just '|' tokens
alts = [self.compile_node(ch) for ch in node.children[::2]]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
return p.optimize()
if node.type == self.syms.Alternative:
units = [self.compile_node(ch) for ch in node.children]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
assert node.type == self.syms.Unit
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
assert repeat.type == self.syms.Repeater
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
assert children[-1].type == token.RBRACE
assert len(children) in (3, 5)
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
else:
assert False
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize()
def compile_basic(self, nodes, repeat=None):
# Compile STRING | NAME [Details] | (...) | [...]
assert len(nodes) >= 1
node = nodes[0]
if node.type == token.STRING:
value = literals.evalString(node.value)
return pytree.LeafPattern(content=value)
elif node.type == token.NAME:
value = node.value
if value.isupper():
if value not in TOKEN_MAP:
raise SyntaxError("Invalid token: %r" % value)
return pytree.LeafPattern(TOKEN_MAP[value])
else:
if value == "any":
type = None
elif not value.startswith("_"):
type = getattr(self.pysyms, value, None)
if type is None:
raise SyntaxError("Invalid symbol: %r" % value)
if nodes[1:]: # Details present
content = [self.compile_node(nodes[1].children[1])]
else:
content = None
return pytree.NodePattern(type, content)
elif node.value == "(":
return self.compile_node(nodes[1])
elif node.value == "[":
assert repeat is None
subpattern = self.compile_node(nodes[1])
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
assert False, node
def get_int(self, node):
assert node.type == token.NUMBER
return int(node.value)
# Map named tokens to the type value for a LeafPattern
TOKEN_MAP = {"NAME": token.NAME,
"STRING": token.STRING,
"NUMBER": token.NUMBER,
"TOKEN": None}
def pattern_convert(grammar, raw_node_info):
"""Converts raw node information to a Node or Leaf instance."""
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context)
def compile_pattern(pattern):
return PatternCompiler().compile_pattern(pattern)
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""The pgen2 package."""
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Convert graminit.[ch] spit out by pgen to Python code.
Pgen is the Python parser generator. It is useful to quickly create a
parser from a grammar file in Python's grammar notation. But I don't
want my parsers to be written in C (yet), so I'm translating the
parsing tables to Python data structures and writing a Python parse
engine.
Note that the token numbers are constants determined by the standard
Python tokenizer. The standard token module defines these numbers and
their names (the names are not used much). The token numbers are
hardcoded into the Python tokenizer and into pgen. A Python
implementation of the Python tokenizer is also available, in the
standard tokenize module.
On the other hand, symbol numbers (representing the grammar's
non-terminals) are assigned by pgen based on the actual grammar
input.
Note: this module is pretty much obsolete; the pgen module generates
equivalent grammar tables directly from the Grammar.txt input file
without having to invoke the Python pgen C program.
"""
# Python imports
import re
# Local imports
from pgen2 import grammar, token
class Converter(grammar.Grammar):
"""Grammar subclass that reads classic pgen output files.
The run() method reads the tables as produced by the pgen parser
generator, typically contained in two C files, graminit.h and
graminit.c. The other methods are for internal use only.
See the base class for more documentation.
"""
def run(self, graminit_h, graminit_c):
"""Load the grammar tables from the text files written by pgen."""
self.parse_graminit_h(graminit_h)
self.parse_graminit_c(graminit_c)
self.finish_off()
def parse_graminit_h(self, filename):
"""Parse the .h file writen by pgen. (Internal)
This file is a sequence of #define statements defining the
nonterminals of the grammar as numbers. We build two tables
mapping the numbers to names and back.
"""
try:
f = open(filename)
except IOError, err:
print "Can't open %s: %s" % (filename, err)
return False
self.symbol2number = {}
self.number2symbol = {}
lineno = 0
for line in f:
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
print "%s(%s): can't parse %s" % (filename, lineno,
line.strip())
else:
symbol, number = mo.groups()
number = int(number)
assert symbol not in self.symbol2number
assert number not in self.number2symbol
self.symbol2number[symbol] = number
self.number2symbol[number] = symbol
return True
def parse_graminit_c(self, filename):
"""Parse the .c file writen by pgen. (Internal)
The file looks as follows. The first two lines are always this:
#include "pgenheaders.h"
#include "grammar.h"
After that come four blocks:
1) one or more state definitions
2) a table defining dfas
3) a table defining labels
4) a struct defining the grammar
A state definition has the following form:
- one or more arc arrays, each of the form:
static arc arcs_<n>_<m>[<k>] = {
{<i>, <j>},
...
};
- followed by a state array, of the form:
static state states_<s>[<t>] = {
{<k>, arcs_<n>_<m>},
...
};
"""
try:
f = open(filename)
except IOError, err:
print "Can't open %s: %s" % (filename, err)
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
# Expect the two #include lines
lineno, line = lineno+1, f.next()
assert line == '#include "pgenheaders.h"\n', (lineno, line)
lineno, line = lineno+1, f.next()
assert line == '#include "grammar.h"\n', (lineno, line)
# Parse the state definitions
lineno, line = lineno+1, f.next()
allarcs = {}
states = []
while line.startswith("static arc "):
while line.startswith("static arc "):
mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
line)
assert mo, (lineno, line)
n, m, k = map(int, mo.groups())
arcs = []
for _ in range(k):
lineno, line = lineno+1, f.next()
mo = re.match(r"\s+{(\d+), (\d+)},$", line)
assert mo, (lineno, line)
i, j = map(int, mo.groups())
arcs.append((i, j))
lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
allarcs[(n, m)] = arcs
lineno, line = lineno+1, f.next()
mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
assert mo, (lineno, line)
s, t = map(int, mo.groups())
assert s == len(states), (lineno, line)
state = []
for _ in range(t):
lineno, line = lineno+1, f.next()
mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
assert mo, (lineno, line)
k, n, m = map(int, mo.groups())
arcs = allarcs[n, m]
assert k == len(arcs), (lineno, line)
state.append(arcs)
states.append(state)
lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
lineno, line = lineno+1, f.next()
self.states = states
# Parse the dfas
dfas = {}
mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
for i in range(ndfas):
lineno, line = lineno+1, f.next()
mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
line)
assert mo, (lineno, line)
symbol = mo.group(2)
number, x, y, z = map(int, mo.group(1, 3, 4, 5))
assert self.symbol2number[symbol] == number, (lineno, line)
assert self.number2symbol[number] == symbol, (lineno, line)
assert x == 0, (lineno, line)
state = states[z]
assert y == len(state), (lineno, line)
lineno, line = lineno+1, f.next()
mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
assert mo, (lineno, line)
first = {}
rawbitset = eval(mo.group(1))
for i, c in enumerate(rawbitset):
byte = ord(c)
for j in range(8):
if byte & (1<<j):
first[i*8 + j] = 1
dfas[number] = (state, first)
lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
self.dfas = dfas
# Parse the labels
labels = []
lineno, line = lineno+1, f.next()
mo = re.match(r"static label labels\[(\d+)\] = {$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
for i in range(nlabels):
lineno, line = lineno+1, f.next()
mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
assert mo, (lineno, line)
x, y = mo.groups()
x = int(x)
if y == "0":
y = None
else:
y = eval(y)
labels.append((x, y))
lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
self.labels = labels
# Parse the grammar struct
lineno, line = lineno+1, f.next()
assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
lineno, line = lineno+1, f.next()
mo = re.match(r"\s+(\d+),$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
assert ndfas == len(self.dfas)
lineno, line = lineno+1, f.next()
assert line == "\tdfas,\n", (lineno, line)
lineno, line = lineno+1, f.next()
mo = re.match(r"\s+{(\d+), labels},$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
assert nlabels == len(self.labels), (lineno, line)
lineno, line = lineno+1, f.next()
mo = re.match(r"\s+(\d+)$", line)
assert mo, (lineno, line)
start = int(mo.group(1))
assert start in self.number2symbol, (lineno, line)
self.start = start
lineno, line = lineno+1, f.next()
assert line == "};\n", (lineno, line)
try:
lineno, line = lineno+1, f.next()
except StopIteration:
pass
else:
assert 0, (lineno, line)
def finish_off(self):
"""Create additional useful structures. (Internal)."""
self.keywords = {} # map from keyword strings to arc labels
self.tokens = {} # map from numeric token values to arc labels
for ilabel, (type, value) in enumerate(self.labels):
if type == token.NAME and value is not None:
self.keywords[value] = ilabel
elif value is None:
self.tokens[type] = ilabel
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import os
import logging
import sys
# Pgen imports
from . import grammar, parse, token, tokenize
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = ""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input", t, v, x)
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, debug=False):
"""Parse a file and return the syntax tree."""
stream = open(filename)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(generate_lines(text).next)
return self.parse_tokens(tokens, debug)
def generate_lines(text):
"""Generator that behaves like readline without using StringIO."""
for line in text.splitlines(True):
yield line
while True:
yield ""
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
from pgen2 import pgen
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
g.dump(gp)
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print "s2n"
pprint(self.symbol2number)
print "n2s"
pprint(self.number2symbol)
print "states"
pprint(self.states)
print "dfas"
pprint(self.dfas)
print "labels"
pprint(self.labels)
print "start", self.start
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Safely evaluate Python string literals without using eval()."""
import re
simple_escapes = {"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
"'": "'",
'"': '"',
"\\": "\\"}
def escape(m):
all, tail = m.group(0, 1)
assert all.startswith("\\")
esc = simple_escapes.get(tail)
if esc is not None:
return esc
if tail.startswith("x"):
hexes = tail[1:]
if len(hexes) < 2:
raise ValueError("invalid hex string escape ('\\%s')" % tail)
try:
i = int(hexes, 16)
except ValueError:
raise ValueError("invalid hex string escape ('\\%s')" % tail)
else:
try:
i = int(tail, 8)
except ValueError:
raise ValueError("invalid octal string escape ('\\%s')" % tail)
return chr(i)
def evalString(s):
assert s.startswith("'") or s.startswith('"'), repr(s[:1])
q = s[0]
if s[:3] == q*3:
q = q*3
assert s.endswith(q), repr(s[-len(q):])
assert len(s) >= 2*len(q)
s = s[len(q):-len(q)]
return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
def test():
for i in range(256):
c = chr(i)
s = repr(c)
e = evalString(s)
if e != c:
print i, c, s, e
if __name__ == "__main__":
test()
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser engine for the grammar tables generated by pgen.
The grammar table must be loaded first.
See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.
"""
# Get a usable 'set' constructor
try:
set
except NameError:
from sets import Set as set
# Local imports
from . import token
class ParseError(Exception):
"""Exception to signal the parser is stuck."""
def __init__(self, msg, type, value, context):
Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
(msg, type, value, context))
self.msg = msg
self.type = type
self.value = value
self.context = context
class Parser(object):
"""Parser engine.
The proper usage sequence is:
p = Parser(grammar, [converter]) # create instance
p.setup([start]) # prepare for parsing
<for each input token>:
if p.addtoken(...): # parse a token; may raise ParseError
break
root = p.rootnode # root of abstract syntax tree
A Parser instance may be reused by calling setup() repeatedly.
A Parser instance contains state pertaining to the current token
sequence, and should not be used concurrently by different threads
to parse separate token sequences.
See driver.py for how to get input tokens by tokenizing a file or
string.
Parsing is complete when addtoken() returns True; the root of the
abstract syntax tree can then be retrieved from the rootnode
instance variable. When a syntax error occurs, addtoken() raises
the ParseError exception. There is no error recovery; the parser
cannot be used after a syntax error was reported (but it can be
reinitialized by calling setup()).
"""
def __init__(self, grammar, convert=None):
"""Constructor.
The grammar argument is a grammar.Grammar instance; see the
grammar module for more information.
The parser is not ready yet for parsing; you must call the
setup() method to get it started.
The optional convert argument is a function mapping concrete
syntax tree nodes to abstract syntax tree nodes. If not
given, no conversion is done and the syntax tree produced is
the concrete syntax tree. If given, it must be a function of
two arguments, the first being the grammar (a grammar.Grammar
instance), and the second being the concrete syntax tree node
to be converted. The syntax tree is converted from the bottom
up.
A concrete syntax tree node is a (type, value, context, nodes)
tuple, where type is the node type (a token or symbol number),
value is None for symbols and a string for tokens, context is
None or an opaque value used for error reporting (typically a
(lineno, offset) pair), and nodes is a list of children for
symbols, and None for tokens.
An abstract syntax tree node may be anything; this is entirely
up to the converter function.
"""
self.grammar = grammar
self.convert = convert or (lambda grammar, node: node)
def setup(self, start=None):
"""Prepare for parsing.
This *must* be called before starting to parse.
The optional argument is an alternative start symbol; it
defaults to the grammar's start symbol.
You can use a Parser instance to parse any number of programs;
each time you call setup() the parser is reset to an initial
state determined by the (implicit or explicit) start symbol.
"""
if start is None:
start = self.grammar.start
# Each stack entry is a tuple: (dfa, state, node).
# A node is a tuple: (type, value, context, children),
# where children is a list of nodes or None, and context may be None.
newnode = (start, None, None, [])
stackentry = (self.grammar.dfas[start], 0, newnode)
self.stack = [stackentry]
self.rootnode = None
self.used_names = set() # Aliased to self.rootnode.used_names in pop()
def addtoken(self, type, value, context):
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabel = self.classify(type, value, context)
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
t, v = self.grammar.labels[i]
if ilabel == i:
# Look it up in the list of labels
assert t < 256
# Shift a token; we're done with it
self.shift(type, value, newstate, context)
# Pop while we are in an accept-only state
state = newstate
while states[state] == [(0, state)]:
self.pop()
if not self.stack:
# Done parsing!
return True
dfa, state, node = self.stack[-1]
states, first = dfa
# Done with this token
return False
elif t >= 256:
# See if it's a symbol and if we're in its first set
itsdfa = self.grammar.dfas[t]
itsstates, itsfirst = itsdfa
if ilabel in itsfirst:
# Push a symbol
self.push(t, self.grammar.dfas[t], newstate, context)
break # To continue the outer while loop
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
self.pop()
if not self.stack:
# Done parsing, but another token is input
raise ParseError("too much input",
type, value, context)
else:
# No success finding a transition
raise ParseError("bad input", type, value, context)
def classify(self, type, value, context):
"""Turn a token into a label. (Internal)"""
if type == token.NAME:
# Keep a listing of all used names
self.used_names.add(value)
# Check for reserved words
ilabel = self.grammar.keywords.get(value)
if ilabel is not None:
return ilabel
ilabel = self.grammar.tokens.get(type)
if ilabel is None:
raise ParseError("bad token", type, value, context)
return ilabel
def shift(self, type, value, newstate, context):
"""Shift a token. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = (type, value, context, None)
newnode = self.convert(self.grammar, newnode)
if newnode is not None:
node[-1].append(newnode)
self.stack[-1] = (dfa, newstate, node)
def push(self, type, newdfa, newstate, context):
"""Push a nonterminal. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = (type, None, context, [])
self.stack[-1] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
def pop(self):
"""Pop a nonterminal. (Internal)"""
popdfa, popstate, popnode = self.stack.pop()
newnode = self.convert(self.grammar, popnode)
if newnode is not None:
if self.stack:
dfa, state, node = self.stack[-1]
node[-1].append(newnode)
else:
self.rootnode = newnode
self.rootnode.used_names = self.used_names
This diff is collapsed.
#! /usr/bin/env python
"""Token constants (from "token.h")."""
# Taken from Python (r53757) and modified to include some tokens
# originally monkeypatched in by pgen2.tokenize
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
COMMENT = 52
NL = 53
RARROW = 54
ERRORTOKEN = 55
N_TOKENS = 56
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
This diff is collapsed.
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.iteritems():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
def parenthesize(node):
return pytree.Node(python_symbols.atom,
(pytree.Leaf(token.LPAR, "("),
node,
pytree.Leaf(token.RPAR, ")")))
This diff is collapsed.
This diff is collapsed.
"""Make tests/ into a package. This allows us to "import tests" and
have tests.all_tests be a TestSuite representing all test cases
from all test_*.py files in tests/."""
# Author: Collin Winter
import os
import os.path
import unittest
import types
from . import support
all_tests = unittest.TestSuite()
tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
tests = [t[0:-3] for t in os.listdir(tests_dir)
if t.startswith('test_') and t.endswith('.py')]
loader = unittest.TestLoader()
for t in tests:
__import__("",globals(),locals(),[t],level=1)
mod = globals()[t]
all_tests.addTests(loader.loadTestsFromModule(mod))
#!/usr/bin/env python2.5
"""
This is a benchmarking script to test the speed of 2to3's pattern matching
system. It's equivalent to "refactor.py -f all" for every Python module
in sys.modules, but without engaging the actual transformations.
"""
__author__ = "Collin Winter <collinw at gmail.com>"
# Python imports
import os.path
import sys
from time import time
# Test imports
from support import adjust_path
adjust_path()
# Local imports
from .. import refactor
### Mock code for refactor.py and the fixers
###############################################################################
class Options:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.verbose = False
def dummy_transform(*args, **kwargs):
pass
### Collect list of modules to match against
###############################################################################
files = []
for mod in sys.modules.values():
if mod is None or not hasattr(mod, '__file__'):
continue
f = mod.__file__
if f.endswith('.pyc'):
f = f[:-1]
if f.endswith('.py'):
files.append(f)
### Set up refactor and run the benchmark
###############################################################################
options = Options(fix=["all"], print_function=False, doctests_only=False)
refactor = refactor.RefactoringTool(options)
for fixer in refactor.fixers:
# We don't want them to actually fix the tree, just match against it.
fixer.transform = dummy_transform
t = time()
for f in files:
print "Matching", f
refactor.refactor_file(f)
print "%d seconds to match %d files" % (time() - t, len(sys.modules))
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python2.5
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Main program for testing the infrastructure."""
__author__ = "Guido van Rossum <guido@python.org>"
# Support imports (need to be imported first)
import support
# Python imports
import os
import sys
import logging
# Local imports
from .. import pytree
import pgen2
from pgen2 import driver
logging.basicConfig()
def main():
gr = driver.load_grammar("Grammar.txt")
dr = driver.Driver(gr, convert=pytree.convert)
fn = "example.py"
tree = dr.parse_file(fn, debug=True)
if not diff(fn, tree):
print "No diffs."
if not sys.argv[1:]:
return # Pass a dummy argument to run the complete test suite below
problems = []
# Process every imported module
for name in sys.modules:
mod = sys.modules[name]
if mod is None or not hasattr(mod, "__file__"):
continue
fn = mod.__file__
if fn.endswith(".pyc"):
fn = fn[:-1]
if not fn.endswith(".py"):
continue
print >>sys.stderr, "Parsing", fn
tree = dr.parse_file(fn, debug=True)
if diff(fn, tree):
problems.append(fn)
# Process every single module on sys.path (but not in packages)
for dir in sys.path:
try:
names = os.listdir(dir)
except os.error:
continue
print >>sys.stderr, "Scanning", dir, "..."
for name in names:
if not name.endswith(".py"):
continue
print >>sys.stderr, "Parsing", name
fn = os.path.join(dir, name)
try:
tree = dr.parse_file(fn, debug=True)
except pgen2.parse.ParseError, err:
print "ParseError:", err
else:
if diff(fn, tree):
problems.append(fn)
# Show summary of problem files
if not problems:
print "No problems. Congratulations!"
else:
print "Problems in following files:"
for fn in problems:
print "***", fn
def diff(fn, tree):
f = open("@", "w")
try:
f.write(str(tree))
finally:
f.close()
try:
return os.system("diff -u %s @" % fn)
finally:
os.remove("@")
if __name__ == "__main__":
main()
"""Support code for test_*.py files"""
# Author: Collin Winter
# Python imports
import unittest
import sys
import os
import os.path
import re
from textwrap import dedent
#sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
# Local imports
from .. import pytree
from ..pgen2 import driver
test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = driver.load_grammar(grammar_path)
driver = driver.Driver(grammar, convert=pytree.convert)
def parse_string(string):
return driver.parse_string(reformat(string), debug=True)
# Python 2.3's TestSuite is not iter()-able
if sys.version_info < (2, 4):
def TestSuite_iter(self):
return iter(self._tests)
unittest.TestSuite.__iter__ = TestSuite_iter
def run_all_tests(test_mod=None, tests=None):
if tests is None:
tests = unittest.TestLoader().loadTestsFromModule(test_mod)
unittest.TextTestRunner(verbosity=2).run(tests)
def reformat(string):
return dedent(string) + "\n\n"
def all_project_files():
for dirpath, dirnames, filenames in os.walk(proj_dir):
for filename in filenames:
if filename.endswith(".py"):
yield os.path.join(dirpath, filename)
TestCase = unittest.TestCase
#!/usr/bin/env python2.5
"""Tests that run all fixer modules over an input stream.
This has been broken out into its own test module because of its
running time.
"""
# Author: Collin Winter
# Testing imports
try:
from . import support
except ImportError:
import support
# Python imports
import unittest
# Local imports
from .. import pytree
from .. import refactor
class Options:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.verbose = False
class Test_all(support.TestCase):
def setUp(self):
options = Options(fix=["all", "idioms", "ws_comma"],
print_function=False)
self.refactor = refactor.RefactoringTool(options)
def test_all_project_files(self):
for filepath in support.all_project_files():
print "Fixing %s..." % filepath
self.refactor.refactor_string(open(filepath).read(), filepath)
if __name__ == "__main__":
import __main__
support.run_all_tests(__main__)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment