PageRenderTime 40ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 0ms

/external/chromium-trace/trace-viewer/third_party/closure_linter/closure_linter/error_fixer.py

https://gitlab.com/brian0218/rk3188_r-box_android4.2.2_sdk
Python | 447 lines | 322 code | 49 blank | 76 comment | 47 complexity | 1e9f817e1a9221bfc6b3e9c0146c0f21 MD5 | raw file
  1. #!/usr/bin/env python
  2. #
  3. # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS-IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. """Main class responsible for automatically fixing simple style violations."""
  17. __author__ = 'robbyw@google.com (Robert Walker)'
  18. import re
  19. import gflags as flags
  20. from closure_linter import errors
  21. from closure_linter import javascriptstatetracker
  22. from closure_linter import javascripttokens
  23. from closure_linter import requireprovidesorter
  24. from closure_linter import tokenutil
  25. from closure_linter.common import errorhandler
  26. # Shorthand
  27. Token = javascripttokens.JavaScriptToken
  28. Type = javascripttokens.JavaScriptTokenType
  29. END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
  30. # Regex to represent common mistake inverting author name and email as
  31. # @author User Name (user@company)
  32. INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
  33. '(?P<name>[^(]+)'
  34. '(?P<whitespace_after_name>\s+)'
  35. '\('
  36. '(?P<email>[^\s]+@[^)\s]+)'
  37. '\)'
  38. '(?P<trailing_characters>.*)')
  39. FLAGS = flags.FLAGS
  40. flags.DEFINE_boolean('disable_indentation_fixing', False,
  41. 'Whether to disable automatic fixing of indentation.')
  42. class ErrorFixer(errorhandler.ErrorHandler):
  43. """Object that fixes simple style errors."""
  44. def __init__(self, external_file=None):
  45. """Initialize the error fixer.
  46. Args:
  47. external_file: If included, all output will be directed to this file
  48. instead of overwriting the files the errors are found in.
  49. """
  50. errorhandler.ErrorHandler.__init__(self)
  51. self._file_name = None
  52. self._file_token = None
  53. self._external_file = external_file
  54. def HandleFile(self, filename, first_token):
  55. """Notifies this ErrorPrinter that subsequent errors are in filename.
  56. Args:
  57. filename: The name of the file about to be checked.
  58. first_token: The first token in the file.
  59. """
  60. self._file_name = filename
  61. self._file_token = first_token
  62. self._file_fix_count = 0
  63. self._file_changed_lines = set()
  64. def _AddFix(self, tokens):
  65. """Adds the fix to the internal count.
  66. Args:
  67. tokens: The token or sequence of tokens changed to fix an error.
  68. """
  69. self._file_fix_count += 1
  70. if hasattr(tokens, 'line_number'):
  71. self._file_changed_lines.add(tokens.line_number)
  72. else:
  73. for token in tokens:
  74. self._file_changed_lines.add(token.line_number)
  75. def HandleError(self, error):
  76. """Attempts to fix the error.
  77. Args:
  78. error: The error object
  79. """
  80. code = error.code
  81. token = error.token
  82. if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
  83. iterator = token.attached_object.type_start_token
  84. if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
  85. iterator = iterator.next
  86. leading_space = len(iterator.string) - len(iterator.string.lstrip())
  87. iterator.string = '%s?%s' % (' ' * leading_space,
  88. iterator.string.lstrip())
  89. # Cover the no outer brace case where the end token is part of the type.
  90. while iterator and iterator != token.attached_object.type_end_token.next:
  91. iterator.string = iterator.string.replace(
  92. 'null|', '').replace('|null', '')
  93. iterator = iterator.next
  94. # Create a new flag object with updated type info.
  95. token.attached_object = javascriptstatetracker.JsDocFlag(token)
  96. self._AddFix(token)
  97. elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
  98. iterator = token.attached_object.type_end_token
  99. if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
  100. iterator = iterator.previous
  101. ending_space = len(iterator.string) - len(iterator.string.rstrip())
  102. iterator.string = '%s=%s' % (iterator.string.rstrip(),
  103. ' ' * ending_space)
  104. # Create a new flag object with updated type info.
  105. token.attached_object = javascriptstatetracker.JsDocFlag(token)
  106. self._AddFix(token)
  107. elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
  108. errors.MISSING_SEMICOLON):
  109. semicolon_token = Token(';', Type.SEMICOLON, token.line,
  110. token.line_number)
  111. tokenutil.InsertTokenAfter(semicolon_token, token)
  112. token.metadata.is_implied_semicolon = False
  113. semicolon_token.metadata.is_implied_semicolon = False
  114. self._AddFix(token)
  115. elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
  116. errors.REDUNDANT_SEMICOLON,
  117. errors.COMMA_AT_END_OF_LITERAL):
  118. tokenutil.DeleteToken(token)
  119. self._AddFix(token)
  120. elif code == errors.INVALID_JSDOC_TAG:
  121. if token.string == '@returns':
  122. token.string = '@return'
  123. self._AddFix(token)
  124. elif code == errors.FILE_MISSING_NEWLINE:
  125. # This error is fixed implicitly by the way we restore the file
  126. self._AddFix(token)
  127. elif code == errors.MISSING_SPACE:
  128. if error.position:
  129. if error.position.IsAtBeginning():
  130. tokenutil.InsertSpaceTokenAfter(token.previous)
  131. elif error.position.IsAtEnd(token.string):
  132. tokenutil.InsertSpaceTokenAfter(token)
  133. else:
  134. token.string = error.position.Set(token.string, ' ')
  135. self._AddFix(token)
  136. elif code == errors.EXTRA_SPACE:
  137. if error.position:
  138. token.string = error.position.Set(token.string, '')
  139. self._AddFix(token)
  140. elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER:
  141. token.string = error.position.Set(token.string, '.')
  142. self._AddFix(token)
  143. elif code == errors.MISSING_LINE:
  144. if error.position.IsAtBeginning():
  145. tokenutil.InsertBlankLineAfter(token.previous)
  146. else:
  147. tokenutil.InsertBlankLineAfter(token)
  148. self._AddFix(token)
  149. elif code == errors.EXTRA_LINE:
  150. tokenutil.DeleteToken(token)
  151. self._AddFix(token)
  152. elif code == errors.WRONG_BLANK_LINE_COUNT:
  153. if not token.previous:
  154. # TODO(user): Add an insertBefore method to tokenutil.
  155. return
  156. num_lines = error.fix_data
  157. should_delete = False
  158. if num_lines < 0:
  159. num_lines *= -1
  160. should_delete = True
  161. for i in xrange(1, num_lines + 1):
  162. if should_delete:
  163. # TODO(user): DeleteToken should update line numbers.
  164. tokenutil.DeleteToken(token.previous)
  165. else:
  166. tokenutil.InsertBlankLineAfter(token.previous)
  167. self._AddFix(token)
  168. elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
  169. end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
  170. if end_quote:
  171. single_quote_start = Token(
  172. "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
  173. single_quote_end = Token(
  174. "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
  175. token.line_number)
  176. tokenutil.InsertTokenAfter(single_quote_start, token)
  177. tokenutil.InsertTokenAfter(single_quote_end, end_quote)
  178. tokenutil.DeleteToken(token)
  179. tokenutil.DeleteToken(end_quote)
  180. self._AddFix([token, end_quote])
  181. elif code == errors.MISSING_BRACES_AROUND_TYPE:
  182. fixed_tokens = []
  183. start_token = token.attached_object.type_start_token
  184. if start_token.type != Type.DOC_START_BRACE:
  185. leading_space = (
  186. len(start_token.string) - len(start_token.string.lstrip()))
  187. if leading_space:
  188. start_token = tokenutil.SplitToken(start_token, leading_space)
  189. # Fix case where start and end token were the same.
  190. if token.attached_object.type_end_token == start_token.previous:
  191. token.attached_object.type_end_token = start_token
  192. new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
  193. start_token.line_number)
  194. tokenutil.InsertTokenAfter(new_token, start_token.previous)
  195. token.attached_object.type_start_token = new_token
  196. fixed_tokens.append(new_token)
  197. end_token = token.attached_object.type_end_token
  198. if end_token.type != Type.DOC_END_BRACE:
  199. # If the start token was a brace, the end token will be a
  200. # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
  201. # the end token is the last token of the actual type.
  202. last_type = end_token
  203. if not fixed_tokens:
  204. last_type = end_token.previous
  205. while last_type.string.isspace():
  206. last_type = last_type.previous
  207. # If there was no starting brace then a lone end brace wouldn't have
  208. # been type end token. Now that we've added any missing start brace,
  209. # see if the last effective type token was an end brace.
  210. if last_type.type != Type.DOC_END_BRACE:
  211. trailing_space = (len(last_type.string) -
  212. len(last_type.string.rstrip()))
  213. if trailing_space:
  214. tokenutil.SplitToken(last_type,
  215. len(last_type.string) - trailing_space)
  216. new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
  217. last_type.line_number)
  218. tokenutil.InsertTokenAfter(new_token, last_type)
  219. token.attached_object.type_end_token = new_token
  220. fixed_tokens.append(new_token)
  221. self._AddFix(fixed_tokens)
  222. elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
  223. require_start_token = error.fix_data
  224. sorter = requireprovidesorter.RequireProvideSorter()
  225. sorter.FixRequires(require_start_token)
  226. self._AddFix(require_start_token)
  227. elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
  228. provide_start_token = error.fix_data
  229. sorter = requireprovidesorter.RequireProvideSorter()
  230. sorter.FixProvides(provide_start_token)
  231. self._AddFix(provide_start_token)
  232. elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
  233. if token.previous.string == '{' and token.next.string == '}':
  234. tokenutil.DeleteToken(token.previous)
  235. tokenutil.DeleteToken(token.next)
  236. self._AddFix([token])
  237. elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
  238. match = INVERTED_AUTHOR_SPEC.match(token.string)
  239. if match:
  240. token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
  241. match.group('email'),
  242. match.group('whitespace_after_name'),
  243. match.group('name'),
  244. match.group('trailing_characters'))
  245. self._AddFix(token)
  246. elif (code == errors.WRONG_INDENTATION and
  247. not FLAGS.disable_indentation_fixing):
  248. token = tokenutil.GetFirstTokenInSameLine(token)
  249. actual = error.position.start
  250. expected = error.position.length
  251. if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
  252. token.string = token.string.lstrip() + (' ' * expected)
  253. self._AddFix([token])
  254. else:
  255. # We need to add indentation.
  256. new_token = Token(' ' * expected, Type.WHITESPACE,
  257. token.line, token.line_number)
  258. # Note that we'll never need to add indentation at the first line,
  259. # since it will always not be indented. Therefore it's safe to assume
  260. # token.previous exists.
  261. tokenutil.InsertTokenAfter(new_token, token.previous)
  262. self._AddFix([token])
  263. elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
  264. errors.MISSING_END_OF_SCOPE_COMMENT]:
  265. # Only fix cases where }); is found with no trailing content on the line
  266. # other than a comment. Value of 'token' is set to } for this error.
  267. if (token.type == Type.END_BLOCK and
  268. token.next.type == Type.END_PAREN and
  269. token.next.next.type == Type.SEMICOLON):
  270. current_token = token.next.next.next
  271. removed_tokens = []
  272. while current_token and current_token.line_number == token.line_number:
  273. if current_token.IsAnyType(Type.WHITESPACE,
  274. Type.START_SINGLE_LINE_COMMENT,
  275. Type.COMMENT):
  276. removed_tokens.append(current_token)
  277. current_token = current_token.next
  278. else:
  279. return
  280. if removed_tokens:
  281. tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens))
  282. whitespace_token = Token(' ', Type.WHITESPACE, token.line,
  283. token.line_number)
  284. start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
  285. token.line, token.line_number)
  286. comment_token = Token(' goog.scope', Type.COMMENT, token.line,
  287. token.line_number)
  288. insertion_tokens = [whitespace_token, start_comment_token,
  289. comment_token]
  290. tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
  291. self._AddFix(removed_tokens + insertion_tokens)
  292. elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
  293. tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
  294. tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line))
  295. self._AddFix(tokens_in_line)
  296. elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
  297. is_provide = code == errors.MISSING_GOOG_PROVIDE
  298. is_require = code == errors.MISSING_GOOG_REQUIRE
  299. missing_namespaces = error.fix_data[0]
  300. need_blank_line = error.fix_data[1]
  301. if need_blank_line is None:
  302. # TODO(user): This happens when there are no existing
  303. # goog.provide or goog.require statements to position new statements
  304. # relative to. Consider handling this case with a heuristic.
  305. return
  306. insert_location = token.previous
  307. # If inserting a missing require with no existing requires, insert a
  308. # blank line first.
  309. if need_blank_line and is_require:
  310. tokenutil.InsertBlankLineAfter(insert_location)
  311. insert_location = insert_location.next
  312. for missing_namespace in missing_namespaces:
  313. new_tokens = self._GetNewRequireOrProvideTokens(
  314. is_provide, missing_namespace, insert_location.line_number + 1)
  315. tokenutil.InsertLineAfter(insert_location, new_tokens)
  316. insert_location = new_tokens[-1]
  317. self._AddFix(new_tokens)
  318. # If inserting a missing provide with no existing provides, insert a
  319. # blank line after.
  320. if need_blank_line and is_provide:
  321. tokenutil.InsertBlankLineAfter(insert_location)
  322. def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
  323. """Returns a list of tokens to create a goog.require/provide statement.
  324. Args:
  325. is_provide: True if getting tokens for a provide, False for require.
  326. namespace: The required or provided namespaces to get tokens for.
  327. line_number: The line number the new require or provide statement will be
  328. on.
  329. Returns:
  330. Tokens to create a new goog.require or goog.provide statement.
  331. """
  332. string = 'goog.require'
  333. if is_provide:
  334. string = 'goog.provide'
  335. line_text = string + '(\'' + namespace + '\');\n'
  336. return [
  337. Token(string, Type.IDENTIFIER, line_text, line_number),
  338. Token('(', Type.START_PAREN, line_text, line_number),
  339. Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
  340. Token(namespace, Type.STRING_TEXT, line_text, line_number),
  341. Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
  342. Token(')', Type.END_PAREN, line_text, line_number),
  343. Token(';', Type.SEMICOLON, line_text, line_number)
  344. ]
  345. def FinishFile(self):
  346. """Called when the current file has finished style checking.
  347. Used to go back and fix any errors in the file.
  348. """
  349. if self._file_fix_count:
  350. f = self._external_file
  351. if not f:
  352. print 'Fixed %d errors in %s' % (self._file_fix_count, self._file_name)
  353. f = open(self._file_name, 'w')
  354. token = self._file_token
  355. char_count = 0
  356. while token:
  357. f.write(token.string)
  358. char_count += len(token.string)
  359. if token.IsLastInLine():
  360. f.write('\n')
  361. if char_count > 80 and token.line_number in self._file_changed_lines:
  362. print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
  363. token.line_number, self._file_name)
  364. char_count = 0
  365. token = token.next
  366. if not self._external_file:
  367. # Close the file if we created it
  368. f.close()