tokenize.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. """Tokenization help for Python programs.
  2. generate_tokens(readline) is a generator that breaks a stream of
  3. text into Python tokens. It accepts a readline-like method which is called
  4. repeatedly to get the next line of input (or "" for EOF). It generates
  5. 5-tuples with these members:
  6. the token type (see token.py)
  7. the token (a string)
  8. the starting (row, column) indices of the token (a 2-tuple of ints)
  9. the ending (row, column) indices of the token (a 2-tuple of ints)
  10. the original line (string)
  11. It is designed to match the working of the Python tokenizer exactly, except
  12. that it produces COMMENT tokens for comments and gives type OP for all
  13. operators
  14. Older entry points
  15. tokenize_loop(readline, tokeneater)
  16. tokenize(readline, tokeneater=printtoken)
  17. are the same, except instead of generating tokens, tokeneater is a callback
  18. function to which the 5 fields described above are passed as 5 arguments,
  19. each time a new token is found."""
  20. __author__ = 'Ka-Ping Yee <[email protected]>'
  21. __credits__ = \
  22. 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
  23. import string, re
  24. from token import *
  25. import token
  26. __all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
  27. "generate_tokens", "NL"]
  28. del x
  29. del token
  30. COMMENT = N_TOKENS
  31. tok_name[COMMENT] = 'COMMENT'
  32. NL = N_TOKENS + 1
  33. tok_name[NL] = 'NL'
  34. N_TOKENS += 2
  35. def group(*choices): return '(' + '|'.join(choices) + ')'
  36. def any(*choices): return group(*choices) + '*'
  37. def maybe(*choices): return group(*choices) + '?'
  38. Whitespace = r'[ \f\t]*'
  39. Comment = r'#[^\r\n]*'
  40. Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
  41. Name = r'[a-zA-Z_]\w*'
  42. Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
  43. Octnumber = r'0[0-7]*[lL]?'
  44. Decnumber = r'[1-9]\d*[lL]?'
  45. Intnumber = group(Hexnumber, Octnumber, Decnumber)
  46. Exponent = r'[eE][-+]?\d+'
  47. Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
  48. Expfloat = r'\d+' + Exponent
  49. Floatnumber = group(Pointfloat, Expfloat)
  50. Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
  51. Number = group(Imagnumber, Floatnumber, Intnumber)
  52. # Tail end of ' string.
  53. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  54. # Tail end of " string.
  55. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  56. # Tail end of ''' string.
  57. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  58. # Tail end of """ string.
  59. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  60. Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
  61. # Single-line ' or " string.
  62. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  63. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  64. # Because of leftmost-then-longest match semantics, be sure to put the
  65. # longest operators first (e.g., if = came before ==, == would get
  66. # recognized as two instances of =).
  67. Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
  68. r"//=?",
  69. r"[+\-*/%&|^=<>]=?",
  70. r"~")
  71. Bracket = '[][(){}]'
  72. Special = group(r'\r?\n', r'[:;.,`@]')
  73. Funny = group(Operator, Bracket, Special)
  74. PlainToken = group(Number, Funny, String, Name)
  75. Token = Ignore + PlainToken
  76. # First (or only) line of ' or " string.
  77. ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
  78. group("'", r'\\\r?\n'),
  79. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
  80. group('"', r'\\\r?\n'))
  81. PseudoExtras = group(r'\\\r?\n', Comment, Triple)
  82. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  83. tokenprog, pseudoprog, single3prog, double3prog = map(
  84. re.compile, (Token, PseudoToken, Single3, Double3))
  85. endprogs = {"'": re.compile(Single), '"': re.compile(Double),
  86. "'''": single3prog, '"""': double3prog,
  87. "r'''": single3prog, 'r"""': double3prog,
  88. "u'''": single3prog, 'u"""': double3prog,
  89. "ur'''": single3prog, 'ur"""': double3prog,
  90. "R'''": single3prog, 'R"""': double3prog,
  91. "U'''": single3prog, 'U"""': double3prog,
  92. "uR'''": single3prog, 'uR"""': double3prog,
  93. "Ur'''": single3prog, 'Ur"""': double3prog,
  94. "UR'''": single3prog, 'UR"""': double3prog,
  95. 'r': None, 'R': None, 'u': None, 'U': None}
  96. triple_quoted = {}
  97. for t in ("'''", '"""',
  98. "r'''", 'r"""', "R'''", 'R"""',
  99. "u'''", 'u"""', "U'''", 'U"""',
  100. "ur'''", 'ur"""', "Ur'''", 'Ur"""',
  101. "uR'''", 'uR"""', "UR'''", 'UR"""'):
  102. triple_quoted[t] = t
  103. single_quoted = {}
  104. for t in ("'", '"',
  105. "r'", 'r"', "R'", 'R"',
  106. "u'", 'u"', "U'", 'U"',
  107. "ur'", 'ur"', "Ur'", 'Ur"',
  108. "uR'", 'uR"', "UR'", 'UR"' ):
  109. single_quoted[t] = t
  110. tabsize = 8
  111. class TokenError(Exception): pass
  112. class StopTokenizing(Exception): pass
  113. def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
  114. print "%d,%d-%d,%d:\t%s\t%s" % \
  115. (srow, scol, erow, ecol, tok_name[type], repr(token))
  116. def tokenize(readline, tokeneater=printtoken):
  117. """
  118. The tokenize() function accepts two parameters: one representing the
  119. input stream, and one providing an output mechanism for tokenize().
  120. The first parameter, readline, must be a callable object which provides
  121. the same interface as the readline() method of built-in file objects.
  122. Each call to the function should return one line of input as a string.
  123. The second parameter, tokeneater, must also be a callable object. It is
  124. called once for each token, with five arguments, corresponding to the
  125. tuples generated by generate_tokens().
  126. """
  127. try:
  128. tokenize_loop(readline, tokeneater)
  129. except StopTokenizing:
  130. pass
  131. # backwards compatible interface
  132. def tokenize_loop(readline, tokeneater):
  133. for token_info in generate_tokens(readline):
  134. tokeneater(*token_info)
  135. def generate_tokens(readline):
  136. """
  137. The generate_tokens() generator requires one argment, readline, which
  138. must be a callable object which provides the same interface as the
  139. readline() method of built-in file objects. Each call to the function
  140. should return one line of input as a string.
  141. The generator produces 5-tuples with these members: the token type; the
  142. token string; a 2-tuple (srow, scol) of ints specifying the row and
  143. column where the token begins in the source; a 2-tuple (erow, ecol) of
  144. ints specifying the row and column where the token ends in the source;
  145. and the line on which the token was found. The line passed is the
  146. logical line; continuation lines are included.
  147. """
  148. lnum = parenlev = continued = 0
  149. namechars, numchars = string.ascii_letters + '_', '0123456789'
  150. contstr, needcont = '', 0
  151. contline = None
  152. indents = [0]
  153. while 1: # loop over lines in stream
  154. line = readline()
  155. lnum = lnum + 1
  156. pos, max = 0, len(line)
  157. if contstr: # continued string
  158. if not line:
  159. raise TokenError, ("EOF in multi-line string", strstart)
  160. endmatch = endprog.match(line)
  161. if endmatch:
  162. pos = end = endmatch.end(0)
  163. yield (STRING, contstr + line[:end],
  164. strstart, (lnum, end), contline + line)
  165. contstr, needcont = '', 0
  166. contline = None
  167. elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
  168. yield (ERRORTOKEN, contstr + line,
  169. strstart, (lnum, len(line)), contline)
  170. contstr = ''
  171. contline = None
  172. continue
  173. else:
  174. contstr = contstr + line
  175. contline = contline + line
  176. continue
  177. elif parenlev == 0 and not continued: # new statement
  178. if not line: break
  179. column = 0
  180. while pos < max: # measure leading whitespace
  181. if line[pos] == ' ': column = column + 1
  182. elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
  183. elif line[pos] == '\f': column = 0
  184. else: break
  185. pos = pos + 1
  186. if pos == max: break
  187. if line[pos] in '#\r\n': # skip comments or blank lines
  188. yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
  189. (lnum, pos), (lnum, len(line)), line)
  190. continue
  191. if column > indents[-1]: # count indents or dedents
  192. indents.append(column)
  193. yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  194. while column < indents[-1]:
  195. if column not in indents:
  196. raise IndentationError(
  197. "unindent does not match any outer indentation level")
  198. indents = indents[:-1]
  199. yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
  200. else: # continued statement
  201. if not line:
  202. raise TokenError, ("EOF in multi-line statement", (lnum, 0))
  203. continued = 0
  204. while pos < max:
  205. pseudomatch = pseudoprog.match(line, pos)
  206. if pseudomatch: # scan for tokens
  207. start, end = pseudomatch.span(1)
  208. spos, epos, pos = (lnum, start), (lnum, end), end
  209. token, initial = line[start:end], line[start]
  210. if initial in numchars or \
  211. (initial == '.' and token != '.'): # ordinary number
  212. yield (NUMBER, token, spos, epos, line)
  213. elif initial in '\r\n':
  214. yield (parenlev > 0 and NL or NEWLINE,
  215. token, spos, epos, line)
  216. elif initial == '#':
  217. yield (COMMENT, token, spos, epos, line)
  218. elif token in triple_quoted:
  219. endprog = endprogs[token]
  220. endmatch = endprog.match(line, pos)
  221. if endmatch: # all on one line
  222. pos = endmatch.end(0)
  223. token = line[start:pos]
  224. yield (STRING, token, spos, (lnum, pos), line)
  225. else:
  226. strstart = (lnum, start) # multiple lines
  227. contstr = line[start:]
  228. contline = line
  229. break
  230. elif initial in single_quoted or \
  231. token[:2] in single_quoted or \
  232. token[:3] in single_quoted:
  233. if token[-1] == '\n': # continued string
  234. strstart = (lnum, start)
  235. endprog = (endprogs[initial] or endprogs[token[1]] or
  236. endprogs[token[2]])
  237. contstr, needcont = line[start:], 1
  238. contline = line
  239. break
  240. else: # ordinary string
  241. yield (STRING, token, spos, epos, line)
  242. elif initial in namechars: # ordinary name
  243. yield (NAME, token, spos, epos, line)
  244. elif initial == '\\': # continued stmt
  245. continued = 1
  246. else:
  247. if initial in '([{': parenlev = parenlev + 1
  248. elif initial in ')]}': parenlev = parenlev - 1
  249. yield (OP, token, spos, epos, line)
  250. else:
  251. yield (ERRORTOKEN, line[pos],
  252. (lnum, pos), (lnum, pos+1), line)
  253. pos = pos + 1
  254. for indent in indents[1:]: # pop remaining indent levels
  255. yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
  256. yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
  257. if __name__ == '__main__': # testing
  258. import sys
  259. if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
  260. else: tokenize(sys.stdin.readline)