tabnanny.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. #! /usr/bin/env python
  2. """The Tab Nanny despises ambiguous indentation. She knows no mercy.
  3. tabnanny -- Detection of ambiguous indentation
  4. For the time being this module is intended to be called as a script.
  5. However it is possible to import it into an IDE and use the function
  6. check() described below.
  7. Warning: The API provided by this module is likely to change in future
  8. releases; such changes may not be backward compatible.
  9. """
  10. # Released to the public domain, by Tim Peters, 15 April 1998.
  11. # XXX Note: this is now a standard library module.
  12. # XXX The API needs to undergo changes however; the current code is too
  13. # XXX script-like. This will be addressed later.
  14. __version__ = "6"
  15. import os
  16. import sys
  17. import getopt
  18. import tokenize
  19. if not hasattr(tokenize, 'NL'):
  20. raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
  21. __all__ = ["check", "NannyNag", "process_tokens"]
  22. verbose = 0
  23. filename_only = 0
  24. def errprint(*args):
  25. sep = ""
  26. for arg in args:
  27. sys.stderr.write(sep + str(arg))
  28. sep = " "
  29. sys.stderr.write("\n")
  30. def main():
  31. global verbose, filename_only
  32. try:
  33. opts, args = getopt.getopt(sys.argv[1:], "qv")
  34. except getopt.error, msg:
  35. errprint(msg)
  36. return
  37. for o, a in opts:
  38. if o == '-q':
  39. filename_only = filename_only + 1
  40. if o == '-v':
  41. verbose = verbose + 1
  42. if not args:
  43. errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
  44. return
  45. for arg in args:
  46. check(arg)
  47. class NannyNag(Exception):
  48. """
  49. Raised by tokeneater() if detecting an ambiguous indent.
  50. Captured and handled in check().
  51. """
  52. def __init__(self, lineno, msg, line):
  53. self.lineno, self.msg, self.line = lineno, msg, line
  54. def get_lineno(self):
  55. return self.lineno
  56. def get_msg(self):
  57. return self.msg
  58. def get_line(self):
  59. return self.line
  60. def check(file):
  61. """check(file_or_dir)
  62. If file_or_dir is a directory and not a symbolic link, then recursively
  63. descend the directory tree named by file_or_dir, checking all .py files
  64. along the way. If file_or_dir is an ordinary Python source file, it is
  65. checked for whitespace related problems. The diagnostic messages are
  66. written to standard output using the print statement.
  67. """
  68. if os.path.isdir(file) and not os.path.islink(file):
  69. if verbose:
  70. print "%r: listing directory" % (file,)
  71. names = os.listdir(file)
  72. for name in names:
  73. fullname = os.path.join(file, name)
  74. if (os.path.isdir(fullname) and
  75. not os.path.islink(fullname) or
  76. os.path.normcase(name[-3:]) == ".py"):
  77. check(fullname)
  78. return
  79. try:
  80. f = open(file)
  81. except IOError, msg:
  82. errprint("%r: I/O Error: %s" % (file, msg))
  83. return
  84. if verbose > 1:
  85. print "checking %r ..." % file
  86. try:
  87. process_tokens(tokenize.generate_tokens(f.readline))
  88. except tokenize.TokenError, msg:
  89. errprint("%r: Token Error: %s" % (file, msg))
  90. return
  91. except NannyNag, nag:
  92. badline = nag.get_lineno()
  93. line = nag.get_line()
  94. if verbose:
  95. print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
  96. print "offending line: %r" % (line,)
  97. print nag.get_msg()
  98. else:
  99. if ' ' in file: file = '"' + file + '"'
  100. if filename_only: print file
  101. else: print file, badline, repr(line)
  102. return
  103. if verbose:
  104. print "%r: Clean bill of health." % (file,)
  105. class Whitespace:
  106. # the characters used for space and tab
  107. S, T = ' \t'
  108. # members:
  109. # raw
  110. # the original string
  111. # n
  112. # the number of leading whitespace characters in raw
  113. # nt
  114. # the number of tabs in raw[:n]
  115. # norm
  116. # the normal form as a pair (count, trailing), where:
  117. # count
  118. # a tuple such that raw[:n] contains count[i]
  119. # instances of S * i + T
  120. # trailing
  121. # the number of trailing spaces in raw[:n]
  122. # It's A Theorem that m.indent_level(t) ==
  123. # n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
  124. # is_simple
  125. # true iff raw[:n] is of the form (T*)(S*)
  126. def __init__(self, ws):
  127. self.raw = ws
  128. S, T = Whitespace.S, Whitespace.T
  129. count = []
  130. b = n = nt = 0
  131. for ch in self.raw:
  132. if ch == S:
  133. n = n + 1
  134. b = b + 1
  135. elif ch == T:
  136. n = n + 1
  137. nt = nt + 1
  138. if b >= len(count):
  139. count = count + [0] * (b - len(count) + 1)
  140. count[b] = count[b] + 1
  141. b = 0
  142. else:
  143. break
  144. self.n = n
  145. self.nt = nt
  146. self.norm = tuple(count), b
  147. self.is_simple = len(count) <= 1
  148. # return length of longest contiguous run of spaces (whether or not
  149. # preceding a tab)
  150. def longest_run_of_spaces(self):
  151. count, trailing = self.norm
  152. return max(len(count)-1, trailing)
  153. def indent_level(self, tabsize):
  154. # count, il = self.norm
  155. # for i in range(len(count)):
  156. # if count[i]:
  157. # il = il + (i/tabsize + 1)*tabsize * count[i]
  158. # return il
  159. # quicker:
  160. # il = trailing + sum (i/ts + 1)*ts*count[i] =
  161. # trailing + ts * sum (i/ts + 1)*count[i] =
  162. # trailing + ts * sum i/ts*count[i] + count[i] =
  163. # trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
  164. # trailing + ts * [(sum i/ts*count[i]) + num_tabs]
  165. # and note that i/ts*count[i] is 0 when i < ts
  166. count, trailing = self.norm
  167. il = 0
  168. for i in range(tabsize, len(count)):
  169. il = il + i/tabsize * count[i]
  170. return trailing + tabsize * (il + self.nt)
  171. # return true iff self.indent_level(t) == other.indent_level(t)
  172. # for all t >= 1
  173. def equal(self, other):
  174. return self.norm == other.norm
  175. # return a list of tuples (ts, i1, i2) such that
  176. # i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
  177. # Intended to be used after not self.equal(other) is known, in which
  178. # case it will return at least one witnessing tab size.
  179. def not_equal_witness(self, other):
  180. n = max(self.longest_run_of_spaces(),
  181. other.longest_run_of_spaces()) + 1
  182. a = []
  183. for ts in range(1, n+1):
  184. if self.indent_level(ts) != other.indent_level(ts):
  185. a.append( (ts,
  186. self.indent_level(ts),
  187. other.indent_level(ts)) )
  188. return a
  189. # Return True iff self.indent_level(t) < other.indent_level(t)
  190. # for all t >= 1.
  191. # The algorithm is due to Vincent Broman.
  192. # Easy to prove it's correct.
  193. # XXXpost that.
  194. # Trivial to prove n is sharp (consider T vs ST).
  195. # Unknown whether there's a faster general way. I suspected so at
  196. # first, but no longer.
  197. # For the special (but common!) case where M and N are both of the
  198. # form (T*)(S*), M.less(N) iff M.len() < N.len() and
  199. # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
  200. # XXXwrite that up.
  201. # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
  202. def less(self, other):
  203. if self.n >= other.n:
  204. return False
  205. if self.is_simple and other.is_simple:
  206. return self.nt <= other.nt
  207. n = max(self.longest_run_of_spaces(),
  208. other.longest_run_of_spaces()) + 1
  209. # the self.n >= other.n test already did it for ts=1
  210. for ts in range(2, n+1):
  211. if self.indent_level(ts) >= other.indent_level(ts):
  212. return False
  213. return True
  214. # return a list of tuples (ts, i1, i2) such that
  215. # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
  216. # Intended to be used after not self.less(other) is known, in which
  217. # case it will return at least one witnessing tab size.
  218. def not_less_witness(self, other):
  219. n = max(self.longest_run_of_spaces(),
  220. other.longest_run_of_spaces()) + 1
  221. a = []
  222. for ts in range(1, n+1):
  223. if self.indent_level(ts) >= other.indent_level(ts):
  224. a.append( (ts,
  225. self.indent_level(ts),
  226. other.indent_level(ts)) )
  227. return a
  228. def format_witnesses(w):
  229. firsts = map(lambda tup: str(tup[0]), w)
  230. prefix = "at tab size"
  231. if len(w) > 1:
  232. prefix = prefix + "s"
  233. return prefix + " " + ', '.join(firsts)
  234. def process_tokens(tokens):
  235. INDENT = tokenize.INDENT
  236. DEDENT = tokenize.DEDENT
  237. NEWLINE = tokenize.NEWLINE
  238. JUNK = tokenize.COMMENT, tokenize.NL
  239. indents = [Whitespace("")]
  240. check_equal = 0
  241. for (type, token, start, end, line) in tokens:
  242. if type == NEWLINE:
  243. # a program statement, or ENDMARKER, will eventually follow,
  244. # after some (possibly empty) run of tokens of the form
  245. # (NL | COMMENT)* (INDENT | DEDENT+)?
  246. # If an INDENT appears, setting check_equal is wrong, and will
  247. # be undone when we see the INDENT.
  248. check_equal = 1
  249. elif type == INDENT:
  250. check_equal = 0
  251. thisguy = Whitespace(token)
  252. if not indents[-1].less(thisguy):
  253. witness = indents[-1].not_less_witness(thisguy)
  254. msg = "indent not greater e.g. " + format_witnesses(witness)
  255. raise NannyNag(start[0], msg, line)
  256. indents.append(thisguy)
  257. elif type == DEDENT:
  258. # there's nothing we need to check here! what's important is
  259. # that when the run of DEDENTs ends, the indentation of the
  260. # program statement (or ENDMARKER) that triggered the run is
  261. # equal to what's left at the top of the indents stack
  262. # Ouch! This assert triggers if the last line of the source
  263. # is indented *and* lacks a newline -- then DEDENTs pop out
  264. # of thin air.
  265. # assert check_equal # else no earlier NEWLINE, or an earlier INDENT
  266. check_equal = 1
  267. del indents[-1]
  268. elif check_equal and type not in JUNK:
  269. # this is the first "real token" following a NEWLINE, so it
  270. # must be the first token of the next program statement, or an
  271. # ENDMARKER; the "line" argument exposes the leading whitespace
  272. # for this statement; in the case of ENDMARKER, line is an empty
  273. # string, so will properly match the empty string with which the
  274. # "indents" stack was seeded
  275. check_equal = 0
  276. thisguy = Whitespace(line)
  277. if not indents[-1].equal(thisguy):
  278. witness = indents[-1].not_equal_witness(thisguy)
  279. msg = "indent not equal e.g. " + format_witnesses(witness)
  280. raise NannyNag(start[0], msg, line)
  281. if __name__ == '__main__':
  282. main()