PageRenderTime 47ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 0ms

/python/Tools/Scripts/reindent.py

https://gitlab.com/pmuontains/Odoo
Python | 315 lines | 298 code | 10 blank | 7 comment | 15 complexity | 80665d4b6847c8cec6d13e900e418737 MD5 | raw file
  1. #! /usr/bin/env python
  2. # Released to the public domain, by Tim Peters, 03 October 2000.
  3. """reindent [-d][-r][-v] [ path ... ]
  4. -d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
  5. -r (--recurse) Recurse. Search for all .py files in subdirectories too.
  6. -n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
  7. -v (--verbose) Verbose. Print informative msgs; else no output.
  8. -h (--help) Help. Print this usage information and exit.
  9. Change Python (.py) files to use 4-space indents and no hard tab characters.
  10. Also trim excess spaces and tabs from ends of lines, and remove empty lines
  11. at the end of files. Also ensure the last line ends with a newline.
  12. If no paths are given on the command line, reindent operates as a filter,
  13. reading a single source file from standard input and writing the transformed
  14. source to standard output. In this case, the -d, -r and -v flags are
  15. ignored.
  16. You can pass one or more file and/or directory paths. When a directory
  17. path, all .py files within the directory will be examined, and, if the -r
  18. option is given, likewise recursively for subdirectories.
  19. If output is not to standard output, reindent overwrites files in place,
  20. renaming the originals with a .bak extension. If it finds nothing to
  21. change, the file is left alone. If reindent does change a file, the changed
  22. file is a fixed-point for future runs (i.e., running reindent on the
  23. resulting .py file won't change it again).
  24. The hard part of reindenting is figuring out what to do with comment
  25. lines. So long as the input files get a clean bill of health from
  26. tabnanny.py, reindent should do a good job.
  27. The backup file is a copy of the one that is being reindented. The ".bak"
  28. file is generated with shutil.copy(), but some corner cases regarding
  29. user/group and permissions could leave the backup file more readable than
  30. you'd prefer. You can always use the --nobackup option to prevent this.
  31. """
  32. __version__ = "1"
  33. import tokenize
  34. import os, shutil
  35. import sys
  36. import io
  37. verbose = 0
  38. recurse = 0
  39. dryrun = 0
  40. makebackup = True
  41. def usage(msg=None):
  42. if msg is not None:
  43. print >> sys.stderr, msg
  44. print >> sys.stderr, __doc__
  45. def errprint(*args):
  46. sep = ""
  47. for arg in args:
  48. sys.stderr.write(sep + str(arg))
  49. sep = " "
  50. sys.stderr.write("\n")
  51. def main():
  52. import getopt
  53. global verbose, recurse, dryrun, makebackup
  54. try:
  55. opts, args = getopt.getopt(sys.argv[1:], "drnvh",
  56. ["dryrun", "recurse", "nobackup", "verbose", "help"])
  57. except getopt.error, msg:
  58. usage(msg)
  59. return
  60. for o, a in opts:
  61. if o in ('-d', '--dryrun'):
  62. dryrun += 1
  63. elif o in ('-r', '--recurse'):
  64. recurse += 1
  65. elif o in ('-n', '--nobackup'):
  66. makebackup = False
  67. elif o in ('-v', '--verbose'):
  68. verbose += 1
  69. elif o in ('-h', '--help'):
  70. usage()
  71. return
  72. if not args:
  73. r = Reindenter(sys.stdin)
  74. r.run()
  75. r.write(sys.stdout)
  76. return
  77. for arg in args:
  78. check(arg)
  79. def check(file):
  80. if os.path.isdir(file) and not os.path.islink(file):
  81. if verbose:
  82. print "listing directory", file
  83. names = os.listdir(file)
  84. for name in names:
  85. fullname = os.path.join(file, name)
  86. if ((recurse and os.path.isdir(fullname) and
  87. not os.path.islink(fullname) and
  88. not os.path.split(fullname)[1].startswith("."))
  89. or name.lower().endswith(".py")):
  90. check(fullname)
  91. return
  92. if verbose:
  93. print "checking", file, "...",
  94. try:
  95. f = io.open(file)
  96. except IOError, msg:
  97. errprint("%s: I/O Error: %s" % (file, str(msg)))
  98. return
  99. r = Reindenter(f)
  100. f.close()
  101. newline = r.newlines
  102. if isinstance(newline, tuple):
  103. errprint("%s: mixed newlines detected; cannot process file" % file)
  104. return
  105. if r.run():
  106. if verbose:
  107. print "changed."
  108. if dryrun:
  109. print "But this is a dry run, so leaving it alone."
  110. if not dryrun:
  111. bak = file + ".bak"
  112. if makebackup:
  113. shutil.copyfile(file, bak)
  114. if verbose:
  115. print "backed up", file, "to", bak
  116. f = io.open(file, "w", newline=newline)
  117. r.write(f)
  118. f.close()
  119. if verbose:
  120. print "wrote new", file
  121. return True
  122. else:
  123. if verbose:
  124. print "unchanged."
  125. return False
  126. def _rstrip(line, JUNK='\n \t'):
  127. """Return line stripped of trailing spaces, tabs, newlines.
  128. Note that line.rstrip() instead also strips sundry control characters,
  129. but at least one known Emacs user expects to keep junk like that, not
  130. mentioning Barry by name or anything <wink>.
  131. """
  132. i = len(line)
  133. while i > 0 and line[i-1] in JUNK:
  134. i -= 1
  135. return line[:i]
  136. class Reindenter:
  137. def __init__(self, f):
  138. self.find_stmt = 1 # next token begins a fresh stmt?
  139. self.level = 0 # current indent level
  140. # Raw file lines.
  141. self.raw = f.readlines()
  142. # File lines, rstripped & tab-expanded. Dummy at start is so
  143. # that we can use tokenize's 1-based line numbering easily.
  144. # Note that a line is all-blank iff it's "\n".
  145. self.lines = [_rstrip(line).expandtabs() + "\n"
  146. for line in self.raw]
  147. self.lines.insert(0, None)
  148. self.index = 1 # index into self.lines of next line
  149. # List of (lineno, indentlevel) pairs, one for each stmt and
  150. # comment line. indentlevel is -1 for comment lines, as a
  151. # signal that tokenize doesn't know what to do about them;
  152. # indeed, they're our headache!
  153. self.stats = []
  154. # Save the newlines found in the file so they can be used to
  155. # create output without mutating the newlines.
  156. self.newlines = f.newlines
  157. def run(self):
  158. tokenize.tokenize(self.getline, self.tokeneater)
  159. # Remove trailing empty lines.
  160. lines = self.lines
  161. while lines and lines[-1] == "\n":
  162. lines.pop()
  163. # Sentinel.
  164. stats = self.stats
  165. stats.append((len(lines), 0))
  166. # Map count of leading spaces to # we want.
  167. have2want = {}
  168. # Program after transformation.
  169. after = self.after = []
  170. # Copy over initial empty lines -- there's nothing to do until
  171. # we see a line with *something* on it.
  172. i = stats[0][0]
  173. after.extend(lines[1:i])
  174. for i in range(len(stats)-1):
  175. thisstmt, thislevel = stats[i]
  176. nextstmt = stats[i+1][0]
  177. have = getlspace(lines[thisstmt])
  178. want = thislevel * 4
  179. if want < 0:
  180. # A comment line.
  181. if have:
  182. # An indented comment line. If we saw the same
  183. # indentation before, reuse what it most recently
  184. # mapped to.
  185. want = have2want.get(have, -1)
  186. if want < 0:
  187. # Then it probably belongs to the next real stmt.
  188. for j in xrange(i+1, len(stats)-1):
  189. jline, jlevel = stats[j]
  190. if jlevel >= 0:
  191. if have == getlspace(lines[jline]):
  192. want = jlevel * 4
  193. break
  194. if want < 0: # Maybe it's a hanging
  195. # comment like this one,
  196. # in which case we should shift it like its base
  197. # line got shifted.
  198. for j in xrange(i-1, -1, -1):
  199. jline, jlevel = stats[j]
  200. if jlevel >= 0:
  201. want = have + getlspace(after[jline-1]) - \
  202. getlspace(lines[jline])
  203. break
  204. if want < 0:
  205. # Still no luck -- leave it alone.
  206. want = have
  207. else:
  208. want = 0
  209. assert want >= 0
  210. have2want[have] = want
  211. diff = want - have
  212. if diff == 0 or have == 0:
  213. after.extend(lines[thisstmt:nextstmt])
  214. else:
  215. for line in lines[thisstmt:nextstmt]:
  216. if diff > 0:
  217. if line == "\n":
  218. after.append(line)
  219. else:
  220. after.append(" " * diff + line)
  221. else:
  222. remove = min(getlspace(line), -diff)
  223. after.append(line[remove:])
  224. return self.raw != self.after
  225. def write(self, f):
  226. f.writelines(self.after)
  227. # Line-getter for tokenize.
  228. def getline(self):
  229. if self.index >= len(self.lines):
  230. line = ""
  231. else:
  232. line = self.lines[self.index]
  233. self.index += 1
  234. return line
  235. # Line-eater for tokenize.
  236. def tokeneater(self, type, token, (sline, scol), end, line,
  237. INDENT=tokenize.INDENT,
  238. DEDENT=tokenize.DEDENT,
  239. NEWLINE=tokenize.NEWLINE,
  240. COMMENT=tokenize.COMMENT,
  241. NL=tokenize.NL):
  242. if type == NEWLINE:
  243. # A program statement, or ENDMARKER, will eventually follow,
  244. # after some (possibly empty) run of tokens of the form
  245. # (NL | COMMENT)* (INDENT | DEDENT+)?
  246. self.find_stmt = 1
  247. elif type == INDENT:
  248. self.find_stmt = 1
  249. self.level += 1
  250. elif type == DEDENT:
  251. self.find_stmt = 1
  252. self.level -= 1
  253. elif type == COMMENT:
  254. if self.find_stmt:
  255. self.stats.append((sline, -1))
  256. # but we're still looking for a new stmt, so leave
  257. # find_stmt alone
  258. elif type == NL:
  259. pass
  260. elif self.find_stmt:
  261. # This is the first "real token" following a NEWLINE, so it
  262. # must be the first token of the next program statement, or an
  263. # ENDMARKER.
  264. self.find_stmt = 0
  265. if line: # not endmarker
  266. self.stats.append((sline, self.level))
  267. # Count number of leading blanks.
  268. def getlspace(line):
  269. i, n = 0, len(line)
  270. while i < n and line[i] == " ":
  271. i += 1
  272. return i
  273. if __name__ == '__main__':
  274. main()