/nose/plugins/testid.py

https://bitbucket.org/jpellerin/nose/ · Python · 306 lines · 261 code · 16 blank · 29 comment · 29 complexity · 349951a91efa32d9dad32a1b5e76e13e MD5 · raw file

  1. """
  2. This plugin adds a test id (like #1) to each test name output. After
  3. you've run once to generate test ids, you can re-run individual
  4. tests by activating the plugin and passing the ids (with or
  5. without the # prefix) instead of test names.
  6. For example, if your normal test run looks like::
  7. % nosetests -v
  8. tests.test_a ... ok
  9. tests.test_b ... ok
  10. tests.test_c ... ok
  11. When adding ``--with-id`` you'll see::
  12. % nosetests -v --with-id
  13. #1 tests.test_a ... ok
  14. #2 tests.test_b ... ok
  15. #2 tests.test_c ... ok
  16. Then you can re-run individual tests by supplying just an id number::
  17. % nosetests -v --with-id 2
  18. #2 tests.test_b ... ok
  19. You can also pass multiple id numbers::
  20. % nosetests -v --with-id 2 3
  21. #2 tests.test_b ... ok
  22. #3 tests.test_c ... ok
  23. Since most shells consider '#' a special character, you can leave it out when
  24. specifying a test id.
  25. Note that when run without the -v switch, no special output is displayed, but
  26. the ids file is still written.
  27. Looping over failed tests
  28. -------------------------
  29. This plugin also adds a mode that will direct the test runner to record
  30. failed tests. Subsequent test runs will then run only the tests that failed
  31. last time. Activate this mode with the ``--failed`` switch::
  32. % nosetests -v --failed
  33. #1 test.test_a ... ok
  34. #2 test.test_b ... ERROR
  35. #3 test.test_c ... FAILED
  36. #4 test.test_d ... ok
  37. On the second run, only tests #2 and #3 will run::
  38. % nosetests -v --failed
  39. #2 test.test_b ... ERROR
  40. #3 test.test_c ... FAILED
  41. As you correct errors and tests pass, they'll drop out of subsequent runs.
  42. First::
  43. % nosetests -v --failed
  44. #2 test.test_b ... ok
  45. #3 test.test_c ... FAILED
  46. Second::
  47. % nosetests -v --failed
  48. #3 test.test_c ... FAILED
  49. When all tests pass, the full set will run on the next invocation.
  50. First::
  51. % nosetests -v --failed
  52. #3 test.test_c ... ok
  53. Second::
  54. % nosetests -v --failed
  55. #1 test.test_a ... ok
  56. #2 test.test_b ... ok
  57. #3 test.test_c ... ok
  58. #4 test.test_d ... ok
  59. .. note ::
  60. If you expect to use ``--failed`` regularly, it's a good idea to always run
  61. run using the ``--with-id`` option. This will ensure that an id file is
  62. always created, allowing you to add ``--failed`` to the command line as soon
  63. as you have failing tests. Otherwise, your first run using ``--failed`` will
  64. (perhaps surprisingly) run *all* tests, because there won't be an id file
  65. containing the record of failed tests from your previous run.
  66. """
  67. __test__ = False
  68. import logging
  69. import os
  70. from nose.plugins import Plugin
  71. from nose.util import src, set
  72. try:
  73. from pickle import dump, load
  74. except ImportError:
  75. from pickle import dump, load
  76. log = logging.getLogger(__name__)
  77. class TestId(Plugin):
  78. """
  79. Activate to add a test id (like #1) to each test name output. Activate
  80. with --failed to rerun failing tests only.
  81. """
  82. name = 'id'
  83. idfile = None
  84. collecting = True
  85. loopOnFailed = False
  86. def options(self, parser, env):
  87. """Register commandline options.
  88. """
  89. Plugin.options(self, parser, env)
  90. parser.add_option('--id-file', action='store', dest='testIdFile',
  91. default='.noseids', metavar="FILE",
  92. help="Store test ids found in test runs in this "
  93. "file. Default is the file .noseids in the "
  94. "working directory.")
  95. parser.add_option('--failed', action='store_true',
  96. dest='failed', default=False,
  97. help="Run the tests that failed in the last "
  98. "test run.")
  99. def configure(self, options, conf):
  100. """Configure plugin.
  101. """
  102. Plugin.configure(self, options, conf)
  103. if options.failed:
  104. self.enabled = True
  105. self.loopOnFailed = True
  106. log.debug("Looping on failed tests")
  107. self.idfile = os.path.expanduser(options.testIdFile)
  108. if not os.path.isabs(self.idfile):
  109. self.idfile = os.path.join(conf.workingDir, self.idfile)
  110. self.id = 1
  111. # Ids and tests are mirror images: ids are {id: test address} and
  112. # tests are {test address: id}
  113. self.ids = {}
  114. self.tests = {}
  115. self.failed = []
  116. self.source_names = []
  117. # used to track ids seen when tests is filled from
  118. # loaded ids file
  119. self._seen = {}
  120. self._write_hashes = conf.verbosity >= 2
  121. def finalize(self, result):
  122. """Save new ids file, if needed.
  123. """
  124. if result.wasSuccessful():
  125. self.failed = []
  126. if self.collecting:
  127. ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
  128. else:
  129. ids = self.ids
  130. fh = open(self.idfile, 'wb')
  131. dump({'ids': ids,
  132. 'failed': self.failed,
  133. 'source_names': self.source_names}, fh)
  134. fh.close()
  135. log.debug('Saved test ids: %s, failed %s to %s',
  136. ids, self.failed, self.idfile)
  137. def loadTestsFromNames(self, names, module=None):
  138. """Translate ids in the list of requested names into their
  139. test addresses, if they are found in my dict of tests.
  140. """
  141. log.debug('ltfn %s %s', names, module)
  142. try:
  143. fh = open(self.idfile, 'rb')
  144. data = load(fh)
  145. if 'ids' in data:
  146. self.ids = data['ids']
  147. self.failed = data['failed']
  148. self.source_names = data['source_names']
  149. else:
  150. # old ids field
  151. self.ids = data
  152. self.failed = []
  153. self.source_names = names
  154. if self.ids:
  155. self.id = max(self.ids) + 1
  156. self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
  157. else:
  158. self.id = 1
  159. log.debug(
  160. 'Loaded test ids %s tests %s failed %s sources %s from %s',
  161. self.ids, self.tests, self.failed, self.source_names,
  162. self.idfile)
  163. fh.close()
  164. except IOError:
  165. log.debug('IO error reading %s', self.idfile)
  166. if self.loopOnFailed and self.failed:
  167. self.collecting = False
  168. names = self.failed
  169. self.failed = []
  170. # I don't load any tests myself, only translate names like '#2'
  171. # into the associated test addresses
  172. translated = []
  173. new_source = []
  174. really_new = []
  175. for name in names:
  176. trans = self.tr(name)
  177. if trans != name:
  178. translated.append(trans)
  179. else:
  180. new_source.append(name)
  181. # names that are not ids and that are not in the current
  182. # list of source names go into the list for next time
  183. if new_source:
  184. new_set = set(new_source)
  185. old_set = set(self.source_names)
  186. log.debug("old: %s new: %s", old_set, new_set)
  187. really_new = [s for s in new_source
  188. if not s in old_set]
  189. if really_new:
  190. # remember new sources
  191. self.source_names.extend(really_new)
  192. if not translated:
  193. # new set of source names, no translations
  194. # means "run the requested tests"
  195. names = new_source
  196. else:
  197. # no new names to translate and add to id set
  198. self.collecting = False
  199. log.debug("translated: %s new sources %s names %s",
  200. translated, really_new, names)
  201. return (None, translated + really_new or names)
  202. def makeName(self, addr):
  203. log.debug("Make name %s", addr)
  204. filename, module, call = addr
  205. if filename is not None:
  206. head = src(filename)
  207. else:
  208. head = module
  209. if call is not None:
  210. return "%s:%s" % (head, call)
  211. return head
  212. def setOutputStream(self, stream):
  213. """Get handle on output stream so the plugin can print id #s
  214. """
  215. self.stream = stream
  216. def startTest(self, test):
  217. """Maybe output an id # before the test name.
  218. Example output::
  219. #1 test.test ... ok
  220. #2 test.test_two ... ok
  221. """
  222. adr = test.address()
  223. log.debug('start test %s (%s)', adr, adr in self.tests)
  224. if adr in self.tests:
  225. if adr in self._seen:
  226. self.write(' ')
  227. else:
  228. self.write('#%s ' % self.tests[adr])
  229. self._seen[adr] = 1
  230. return
  231. self.tests[adr] = self.id
  232. self.write('#%s ' % self.id)
  233. self.id += 1
  234. def afterTest(self, test):
  235. # None means test never ran, False means failed/err
  236. if test.passed is False:
  237. try:
  238. key = str(self.tests[test.address()])
  239. except KeyError:
  240. # never saw this test -- startTest didn't run
  241. pass
  242. else:
  243. if key not in self.failed:
  244. self.failed.append(key)
  245. def tr(self, name):
  246. log.debug("tr '%s'", name)
  247. try:
  248. key = int(name.replace('#', ''))
  249. except ValueError:
  250. return name
  251. log.debug("Got key %s", key)
  252. # I'm running tests mapped from the ids file,
  253. # not collecting new ones
  254. if key in self.ids:
  255. return self.makeName(self.ids[key])
  256. return name
  257. def write(self, output):
  258. if self._write_hashes:
  259. self.stream.write(output)