tokenize.py 23.9 KB
Newer Older
1 2
"""Tokenization help for Python programs.

3 4 5
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens.  It decodes the bytes according to PEP-0263 for
determining source file encoding.
6

7 8 9
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF).  It generates 5-tuples with these
members:
10 11 12 13 14 15 16 17 18

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
19 20 21
operators.  Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
22

23
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
24 25 26
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
               'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
               'Michael Foord')
27
import builtins
28 29
import re
import sys
Guido van Rossum's avatar
Guido van Rossum committed
30
from token import *
31
from codecs import lookup, BOM_UTF8
32
import collections
33
from io import TextIOWrapper
34
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
Guido van Rossum's avatar
Guido van Rossum committed
35

36
import token
37 38
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
                           "NL", "untokenize", "ENCODING", "TokenInfo"]
39 40
del token

41 42
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
43 44
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
45 46 47
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
EXACT_TOKEN_TYPES = {
    '(':   LPAR,
    ')':   RPAR,
    '[':   LSQB,
    ']':   RSQB,
    ':':   COLON,
    ',':   COMMA,
    ';':   SEMI,
    '+':   PLUS,
    '-':   MINUS,
    '*':   STAR,
    '/':   SLASH,
    '|':   VBAR,
    '&':   AMPER,
    '<':   LESS,
    '>':   GREATER,
    '=':   EQUAL,
    '.':   DOT,
    '%':   PERCENT,
    '{':   LBRACE,
    '}':   RBRACE,
    '==':  EQEQUAL,
    '!=':  NOTEQUAL,
    '<=':  LESSEQUAL,
    '>=':  GREATEREQUAL,
    '~':   TILDE,
    '^':   CIRCUMFLEX,
    '<<':  LEFTSHIFT,
    '>>':  RIGHTSHIFT,
    '**':  DOUBLESTAR,
    '+=':  PLUSEQUAL,
    '-=':  MINEQUAL,
    '*=':  STAREQUAL,
    '/=':  SLASHEQUAL,
    '%=':  PERCENTEQUAL,
    '&=':  AMPEREQUAL,
    '|=':  VBAREQUAL,
    '^=': CIRCUMFLEXEQUAL,
    '<<=': LEFTSHIFTEQUAL,
    '>>=': RIGHTSHIFTEQUAL,
    '**=': DOUBLESTAREQUAL,
    '//':  DOUBLESLASH,
    '//=': DOUBLESLASHEQUAL,
    '@':   AT
}
93

94
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
95
    def __repr__(self):
96 97 98
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
                self._replace(type=annotated_type))
99

100 101 102 103 104 105 106
    @property
    def exact_type(self):
        if self.type == OP and self.string in EXACT_TOKEN_TYPES:
            return EXACT_TOKEN_TYPES[self.string]
        else:
            return self.type

Eric S. Raymond's avatar
Eric S. Raymond committed
107
def group(*choices): return '(' + '|'.join(choices) + ')'
108 109
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Guido van Rossum's avatar
Guido van Rossum committed
110

111 112
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
113 114 115
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
116
Name = r'\w+'
Guido van Rossum's avatar
Guido van Rossum committed
117

118
Hexnumber = r'0[xX][0-9a-fA-F]+'
119 120
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
121
Decnumber = r'(?:0+|[1-9][0-9]*)'
122
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
123 124 125
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
126
Floatnumber = group(Pointfloat, Expfloat)
127
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
128
Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum's avatar
Guido van Rossum committed
129

130
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
131

132 133 134 135 136 137 138 139
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
140
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
141
# Single-line ' or " string.
142 143
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
               StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum's avatar
Guido van Rossum committed
144

145 146 147
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
148
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
149
                 r"//=?", r"->",
150 151
                 r"[+\-*/%&|^=<>]=?",
                 r"~")
152

Guido van Rossum's avatar
Guido van Rossum committed
153
Bracket = '[][(){}]'
154
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Guido van Rossum's avatar
Guido van Rossum committed
155
Funny = group(Operator, Bracket, Special)
Guido van Rossum's avatar
Guido van Rossum committed
156

157
PlainToken = group(Number, Funny, String, Name)
Guido van Rossum's avatar
Guido van Rossum committed
158
Token = Ignore + PlainToken
Guido van Rossum's avatar
Guido van Rossum committed
159

160
# First (or only) line of ' or " string.
161
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
162
                group("'", r'\\\r?\n'),
163
                StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
164
                group('"', r'\\\r?\n'))
165
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
166
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
167

168 169 170
def _compile(expr):
    return re.compile(expr, re.UNICODE)

171 172 173 174 175 176
endpats = {"'": Single, '"': Double,
           "'''": Single3, '"""': Double3,
           "r'''": Single3, 'r"""': Double3,
           "b'''": Single3, 'b"""': Double3,
           "R'''": Single3, 'R"""': Double3,
           "B'''": Single3, 'B"""': Double3,
177
           "br'''": Single3, 'br"""': Double3,
178 179 180
           "bR'''": Single3, 'bR"""': Double3,
           "Br'''": Single3, 'Br"""': Double3,
           "BR'''": Single3, 'BR"""': Double3,
181 182 183 184
           "rb'''": Single3, 'rb"""': Double3,
           "Rb'''": Single3, 'Rb"""': Double3,
           "rB'''": Single3, 'rB"""': Double3,
           "RB'''": Single3, 'RB"""': Double3,
185 186 187 188 189
           "u'''": Single3, 'u"""': Double3,
           "R'''": Single3, 'R"""': Double3,
           "U'''": Single3, 'U"""': Double3,
           'r': None, 'R': None, 'b': None, 'B': None,
           'u': None, 'U': None}
Guido van Rossum's avatar
Guido van Rossum committed
190

191 192 193
triple_quoted = {}
for t in ("'''", '"""',
          "r'''", 'r"""', "R'''", 'R"""',
Guido van Rossum's avatar
Guido van Rossum committed
194 195
          "b'''", 'b"""', "B'''", 'B"""',
          "br'''", 'br"""', "Br'''", 'Br"""',
196
          "bR'''", 'bR"""', "BR'''", 'BR"""',
197 198
          "rb'''", 'rb"""', "rB'''", 'rB"""',
          "Rb'''", 'Rb"""', "RB'''", 'RB"""',
199
          "u'''", 'u"""', "U'''", 'U"""',
200
          ):
201 202 203 204
    triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
          "r'", 'r"', "R'", 'R"',
Guido van Rossum's avatar
Guido van Rossum committed
205 206
          "b'", 'b"', "B'", 'B"',
          "br'", 'br"', "Br'", 'Br"',
207
          "bR'", 'bR"', "BR'", 'BR"' ,
208 209
          "rb'", 'rb"', "rB'", 'rB"',
          "Rb'", 'Rb"', "RB'", 'RB"' ,
210
          "u'", 'u"', "U'", 'U"',
211
          ):
212 213
    single_quoted[t] = t

Guido van Rossum's avatar
Guido van Rossum committed
214
tabsize = 8
215

216 217 218
class TokenError(Exception): pass

class StopTokenizing(Exception): pass
219

220

221 222 223 224 225 226
class Untokenizer:

    def __init__(self):
        self.tokens = []
        self.prev_row = 1
        self.prev_col = 0
227
        self.encoding = None
228 229 230 231 232 233 234 235 236 237 238 239 240 241

    def add_whitespace(self, start):
        row, col = start
        assert row <= self.prev_row
        col_offset = col - self.prev_col
        if col_offset:
            self.tokens.append(" " * col_offset)

    def untokenize(self, iterable):
        for t in iterable:
            if len(t) == 2:
                self.compat(t, iterable)
                break
            tok_type, token, start, end, line = t
242 243 244
            if tok_type == ENCODING:
                self.encoding = token
                continue
245 246 247 248 249 250 251 252 253 254 255 256 257
            self.add_whitespace(start)
            self.tokens.append(token)
            self.prev_row, self.prev_col = end
            if tok_type in (NEWLINE, NL):
                self.prev_row += 1
                self.prev_col = 0
        return "".join(self.tokens)

    def compat(self, token, iterable):
        startline = False
        indents = []
        toks_append = self.tokens.append
        toknum, tokval = token
258

259 260 261 262
        if toknum in (NAME, NUMBER):
            tokval += ' '
        if toknum in (NEWLINE, NL):
            startline = True
263
        prevstring = False
264 265
        for tok in iterable:
            toknum, tokval = tok[:2]
266 267 268
            if toknum == ENCODING:
                self.encoding = tokval
                continue
269 270 271 272

            if toknum in (NAME, NUMBER):
                tokval += ' '

273 274 275 276 277 278 279 280
            # Insert a space between two consecutive strings
            if toknum == STRING:
                if prevstring:
                    tokval = ' ' + tokval
                prevstring = True
            else:
                prevstring = False

281 282 283 284 285 286 287 288 289 290 291 292
            if toknum == INDENT:
                indents.append(tokval)
                continue
            elif toknum == DEDENT:
                indents.pop()
                continue
            elif toknum in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                toks_append(indents[-1])
                startline = False
            toks_append(tokval)
293

294

295 296
def untokenize(iterable):
    """Transform tokens back into Python source code.
297 298
    It returns a bytes object, encoded using the ENCODING
    token, which is the first token sequence output by tokenize.
299 300

    Each element returned by the iterable must be a token sequence
301 302 303 304 305
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly
306

307
    Round-trip invariant for limited intput:
308 309
        # Output bytes will tokenize the back to the input
        t1 = [tok[:2] for tok in tokenize(f.readline)]
310
        newcode = untokenize(t1)
311 312
        readline = BytesIO(newcode).readline
        t2 = [tok[:2] for tok in tokenize(readline)]
313 314
        assert t1 == t2
    """
315
    ut = Untokenizer()
316 317 318 319
    out = ut.untokenize(iterable)
    if ut.encoding is not None:
        out = out.encode(ut.encoding)
    return out
320

321

322 323 324 325 326 327 328 329 330 331 332
def _get_normal_name(orig_enc):
    """Imitates get_normal_name in tokenizer.c."""
    # Only care about the first 12 characters.
    enc = orig_enc[:12].lower().replace("_", "-")
    if enc == "utf-8" or enc.startswith("utf-8-"):
        return "utf-8"
    if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
       enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
        return "iso-8859-1"
    return orig_enc

333
def detect_encoding(readline):
334
    """
335
    The detect_encoding() function is used to detect the encoding that should
336
    be used to decode a Python source file.  It requires one argment, readline,
337 338 339
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
340
    (as a string) and a list of any lines (left as bytes) it has read in.
341 342

    It detects the encoding from the presence of a utf-8 bom or an encoding
343 344 345
    cookie as specified in pep-0263.  If both a bom and a cookie are present,
    but disagree, a SyntaxError will be raised.  If the encoding cookie is an
    invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
346
    'utf-8-sig' is returned.
347 348 349

    If no encoding is specified, then the default of 'utf-8' will be returned.
    """
350 351 352 353
    try:
        filename = readline.__self__.name
    except AttributeError:
        filename = None
354 355
    bom_found = False
    encoding = None
356
    default = 'utf-8'
357 358 359 360 361 362 363 364
    def read_or_stop():
        try:
            return readline()
        except StopIteration:
            return b''

    def find_cookie(line):
        try:
365 366 367 368
            # Decode as UTF-8. Either the line is an encoding declaration,
            # in which case it should be pure ASCII, or it must be UTF-8
            # per default encoding.
            line_string = line.decode('utf-8')
369
        except UnicodeDecodeError:
370 371 372 373
            msg = "invalid or missing encoding declaration"
            if filename is not None:
                msg = '{} for {!r}'.format(msg, filename)
            raise SyntaxError(msg)
374

375 376
        match = cookie_re.match(line_string)
        if not match:
377
            return None
378
        encoding = _get_normal_name(match.group(1))
379 380 381 382
        try:
            codec = lookup(encoding)
        except LookupError:
            # This behaviour mimics the Python interpreter
383 384 385 386 387 388
            if filename is None:
                msg = "unknown encoding: " + encoding
            else:
                msg = "unknown encoding for {!r}: {}".format(filename,
                        encoding)
            raise SyntaxError(msg)
389

390
        if bom_found:
391
            if encoding != 'utf-8':
392
                # This behaviour mimics the Python interpreter
393 394 395 396 397
                if filename is None:
                    msg = 'encoding problem: utf-8'
                else:
                    msg = 'encoding problem for {!r}: utf-8'.format(filename)
                raise SyntaxError(msg)
398
            encoding += '-sig'
399
        return encoding
400 401

    first = read_or_stop()
402
    if first.startswith(BOM_UTF8):
403 404
        bom_found = True
        first = first[3:]
405
        default = 'utf-8-sig'
406
    if not first:
407
        return default, []
408 409 410 411 412 413 414

    encoding = find_cookie(first)
    if encoding:
        return encoding, [first]

    second = read_or_stop()
    if not second:
415
        return default, [first]
416 417 418 419 420

    encoding = find_cookie(second)
    if encoding:
        return encoding, [first, second]

421
    return default, [first, second]
422 423


424 425 426 427
def open(filename):
    """Open a file in read only mode using the encoding detected by
    detect_encoding().
    """
428
    buffer = builtins.open(filename, 'rb')
429 430 431 432 433 434 435
    encoding, lines = detect_encoding(buffer.readline)
    buffer.seek(0)
    text = TextIOWrapper(buffer, encoding, line_buffering=True)
    text.mode = 'r'
    return text


436 437 438
def tokenize(readline):
    """
    The tokenize() generator requires one argment, readline, which
439
    must be a callable object which provides the same interface as the
440
    readline() method of built-in file objects.  Each call to the function
441
    should return one line of input as bytes.  Alternately, readline
442
    can be a callable function terminating with StopIteration:
443
        readline = open(myfile, 'rb').__next__  # Example of alternate readline
Tim Peters's avatar
Tim Peters committed
444

445 446 447 448
    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
449
    and the line on which the token was found.  The line passed is the
Tim Peters's avatar
Tim Peters committed
450
    logical line; continuation lines are included.
451 452 453

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
454
    """
455 456
    # This import is here to avoid problems when the itertools module is not
    # built yet and tokenize is imported.
457
    from itertools import chain, repeat
458
    encoding, consumed = detect_encoding(readline)
459 460 461
    rl_gen = iter(readline, b"")
    empty = repeat(b"")
    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
462 463 464


def _tokenize(readline, encoding):
465
    lnum = parenlev = continued = 0
466
    numchars = '0123456789'
467
    contstr, needcont = '', 0
468
    contline = None
Guido van Rossum's avatar
Guido van Rossum committed
469
    indents = [0]
470

471
    if encoding is not None:
472 473 474
        if encoding == "utf-8-sig":
            # BOM will already have been stripped.
            encoding = "utf-8"
475
        yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
476
    while True:             # loop over lines in stream
477 478 479
        try:
            line = readline()
        except StopIteration:
480 481 482 483
            line = b''

        if encoding is not None:
            line = line.decode(encoding)
Benjamin Peterson's avatar
Benjamin Peterson committed
484
        lnum += 1
Guido van Rossum's avatar
Guido van Rossum committed
485 486 487
        pos, max = 0, len(line)

        if contstr:                            # continued string
488
            if not line:
489
                raise TokenError("EOF in multi-line string", strstart)
490 491 492
            endmatch = endprog.match(line)
            if endmatch:
                pos = end = endmatch.end(0)
493
                yield TokenInfo(STRING, contstr + line[:end],
494
                       strstart, (lnum, end), contline + line)
495
                contstr, needcont = '', 0
496
                contline = None
497
            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
498
                yield TokenInfo(ERRORTOKEN, contstr + line,
499
                           strstart, (lnum, len(line)), contline)
Guido van Rossum's avatar
Guido van Rossum committed
500
                contstr = ''
501
                contline = None
502
                continue
Guido van Rossum's avatar
Guido van Rossum committed
503 504
            else:
                contstr = contstr + line
505
                contline = contline + line
Guido van Rossum's avatar
Guido van Rossum committed
506 507
                continue

508
        elif parenlev == 0 and not continued:  # new statement
Guido van Rossum's avatar
Guido van Rossum committed
509 510
            if not line: break
            column = 0
511
            while pos < max:                   # measure leading whitespace
Benjamin Peterson's avatar
Benjamin Peterson committed
512 513 514 515 516 517 518 519 520 521 522
                if line[pos] == ' ':
                    column += 1
                elif line[pos] == '\t':
                    column = (column//tabsize + 1)*tabsize
                elif line[pos] == '\f':
                    column = 0
                else:
                    break
                pos += 1
            if pos == max:
                break
523 524

            if line[pos] in '#\r\n':           # skip comments or blank lines
525 526 527
                if line[pos] == '#':
                    comment_token = line[pos:].rstrip('\r\n')
                    nl_pos = pos + len(comment_token)
528
                    yield TokenInfo(COMMENT, comment_token,
529
                           (lnum, pos), (lnum, pos + len(comment_token)), line)
530
                    yield TokenInfo(NL, line[nl_pos:],
531 532
                           (lnum, nl_pos), (lnum, len(line)), line)
                else:
533
                    yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
534 535
                           (lnum, pos), (lnum, len(line)), line)
                continue
Guido van Rossum's avatar
Guido van Rossum committed
536 537 538

            if column > indents[-1]:           # count indents or dedents
                indents.append(column)
539
                yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossum's avatar
Guido van Rossum committed
540
            while column < indents[-1]:
541 542
                if column not in indents:
                    raise IndentationError(
543 544
                        "unindent does not match any outer indentation level",
                        ("<tokenize>", lnum, pos, line))
Guido van Rossum's avatar
Guido van Rossum committed
545
                indents = indents[:-1]
546
                yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossum's avatar
Guido van Rossum committed
547 548

        else:                                  # continued statement
549
            if not line:
550
                raise TokenError("EOF in multi-line statement", (lnum, 0))
Guido van Rossum's avatar
Guido van Rossum committed
551 552 553
            continued = 0

        while pos < max:
554
            pseudomatch = _compile(PseudoToken).match(line, pos)
555 556
            if pseudomatch:                                # scan for tokens
                start, end = pseudomatch.span(1)
557
                spos, epos, pos = (lnum, start), (lnum, end), end
558 559
                if start == end:
                    continue
560
                token, initial = line[start:end], line[start]
Guido van Rossum's avatar
Guido van Rossum committed
561

562 563
                if (initial in numchars or                  # ordinary number
                    (initial == '.' and token != '.' and token != '...')):
564
                    yield TokenInfo(NUMBER, token, spos, epos, line)
565
                elif initial in '\r\n':
566
                    yield TokenInfo(NL if parenlev > 0 else NEWLINE,
567
                           token, spos, epos, line)
568
                elif initial == '#':
569
                    assert not token.endswith("\n")
570
                    yield TokenInfo(COMMENT, token, spos, epos, line)
571
                elif token in triple_quoted:
572
                    endprog = _compile(endpats[token])
573 574 575
                    endmatch = endprog.match(line, pos)
                    if endmatch:                           # all on one line
                        pos = endmatch.end(0)
576
                        token = line[start:pos]
577
                        yield TokenInfo(STRING, token, spos, (lnum, pos), line)
Guido van Rossum's avatar
Guido van Rossum committed
578
                    else:
579 580
                        strstart = (lnum, start)           # multiple lines
                        contstr = line[start:]
581
                        contline = line
Guido van Rossum's avatar
Guido van Rossum committed
582
                        break
583 584 585
                elif initial in single_quoted or \
                    token[:2] in single_quoted or \
                    token[:3] in single_quoted:
Guido van Rossum's avatar
Guido van Rossum committed
586
                    if token[-1] == '\n':                  # continued string
587
                        strstart = (lnum, start)
588 589 590
                        endprog = _compile(endpats[initial] or
                                           endpats[token[1]] or
                                           endpats[token[2]])
591
                        contstr, needcont = line[start:], 1
592
                        contline = line
Guido van Rossum's avatar
Guido van Rossum committed
593 594
                        break
                    else:                                  # ordinary string
595
                        yield TokenInfo(STRING, token, spos, epos, line)
596
                elif initial.isidentifier():               # ordinary name
597
                    yield TokenInfo(NAME, token, spos, epos, line)
598 599
                elif initial == '\\':                      # continued stmt
                    continued = 1
Guido van Rossum's avatar
Guido van Rossum committed
600
                else:
Benjamin Peterson's avatar
Benjamin Peterson committed
601 602 603 604
                    if initial in '([{':
                        parenlev += 1
                    elif initial in ')]}':
                        parenlev -= 1
605
                    yield TokenInfo(OP, token, spos, epos, line)
Guido van Rossum's avatar
Guido van Rossum committed
606
            else:
607
                yield TokenInfo(ERRORTOKEN, line[pos],
608
                           (lnum, pos), (lnum, pos+1), line)
Benjamin Peterson's avatar
Benjamin Peterson committed
609
                pos += 1
Guido van Rossum's avatar
Guido van Rossum committed
610 611

    for indent in indents[1:]:                 # pop remaining indent levels
612 613
        yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
    yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossum's avatar
Guido van Rossum committed
614

615 616 617 618 619

# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
    return _tokenize(readline, None)
620

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
def main():
    import argparse

    # Helper error handling routines
    def perror(message):
        print(message, file=sys.stderr)

    def error(message, filename=None, location=None):
        if location:
            args = (filename,) + location + (message,)
            perror("%s:%d:%d: error: %s" % args)
        elif filename:
            perror("%s: error: %s" % (filename, message))
        else:
            perror("error: %s" % message)
        sys.exit(1)

    # Parse the arguments and options
    parser = argparse.ArgumentParser(prog='python -m tokenize')
    parser.add_argument(dest='filename', nargs='?',
                        metavar='filename.py',
                        help='the file to tokenize; defaults to stdin')
643 644
    parser.add_argument('-e', '--exact', dest='exact', action='store_true',
                        help='display token names using the exact type')
645 646 647 648 649 650 651 652 653 654 655 656 657 658
    args = parser.parse_args()

    try:
        # Tokenize the input
        if args.filename:
            filename = args.filename
            with builtins.open(filename, 'rb') as f:
                tokens = list(tokenize(f.readline))
        else:
            filename = "<stdin>"
            tokens = _tokenize(sys.stdin.readline, None)

        # Output the tokenization
        for token in tokens:
659 660 661
            token_type = token.type
            if args.exact:
                token_type = token.exact_type
662 663
            token_range = "%d,%d-%d,%d:" % (token.start + token.end)
            print("%-20s%-15s%-15r" %
664
                  (token_range, tok_name[token_type], token.string))
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
    except IndentationError as err:
        line, column = err.args[1][1:3]
        error(err.args[0], filename, (line, column))
    except TokenError as err:
        line, column = err.args[1]
        error(err.args[0], filename, (line, column))
    except SyntaxError as err:
        error(err, filename)
    except IOError as err:
        error(err)
    except KeyboardInterrupt:
        print("interrupted\n")
    except Exception as err:
        perror("unexpected error: %s" % err)
        raise

681
if __name__ == "__main__":
682
    main()