/js/src/jit-test/jit_test.py

http://github.com/zpao/v8monkey · Python · 528 lines · 423 code · 75 blank · 30 comment · 125 complexity · c85e2815b793b1668aa718832192d06f MD5 · raw file

  1. #!/usr/bin/env python
  2. # jit_test.py -- Python harness for JavaScript trace tests.
  3. import datetime, os, re, sys, tempfile, traceback, time, shlex
  4. import subprocess
  5. from subprocess import *
  6. from threading import Thread
  7. DEBUGGER_INFO = {
  8. "gdb": {
  9. "interactive": True,
  10. "args": "-q --args"
  11. },
  12. "valgrind": {
  13. "interactive": False,
  14. "args": "--leak-check=full"
  15. }
  16. }
  17. # Backported from Python 3.1 posixpath.py
  18. def _relpath(path, start=None):
  19. """Return a relative version of a path"""
  20. if not path:
  21. raise ValueError("no path specified")
  22. if start is None:
  23. start = os.curdir
  24. start_list = os.path.abspath(start).split(os.sep)
  25. path_list = os.path.abspath(path).split(os.sep)
  26. # Work out how much of the filepath is shared by start and path.
  27. i = len(os.path.commonprefix([start_list, path_list]))
  28. rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
  29. if not rel_list:
  30. return os.curdir
  31. return os.path.join(*rel_list)
  32. os.path.relpath = _relpath
  33. class Test:
  34. def __init__(self, path):
  35. self.path = path # path to test file
  36. self.jitflags = [] # jit flags to enable
  37. self.slow = False # True means the test is slow-running
  38. self.allow_oom = False # True means that OOM is not considered a failure
  39. self.valgrind = False # True means run under valgrind
  40. self.expect_error = '' # Errors to expect and consider passing
  41. self.expect_status = 0 # Exit status to expect from shell
  42. def copy(self):
  43. t = Test(self.path)
  44. t.jitflags = self.jitflags[:]
  45. t.slow = self.slow
  46. t.allow_oom = self.allow_oom
  47. t.valgrind = self.valgrind
  48. t.expect_error = self.expect_error
  49. t.expect_status = self.expect_status
  50. return t
  51. COOKIE = '|jit-test|'
  52. @classmethod
  53. def from_file(cls, path, options):
  54. test = cls(path)
  55. line = open(path).readline()
  56. i = line.find(cls.COOKIE)
  57. if i != -1:
  58. meta = line[i + len(cls.COOKIE):].strip('\n')
  59. parts = meta.split(';')
  60. for part in parts:
  61. part = part.strip()
  62. if not part:
  63. continue
  64. name, _, value = part.partition(':')
  65. if value:
  66. value = value.strip()
  67. if name == 'error':
  68. test.expect_error = value
  69. elif name == 'exitstatus':
  70. try:
  71. test.expect_status = int(value, 0);
  72. except ValueError:
  73. print("warning: couldn't parse exit status %s"%value)
  74. else:
  75. print('warning: unrecognized |jit-test| attribute %s'%part)
  76. else:
  77. if name == 'slow':
  78. test.slow = True
  79. elif name == 'allow-oom':
  80. test.allow_oom = True
  81. elif name == 'valgrind':
  82. test.valgrind = options.valgrind
  83. elif name == 'mjitalways':
  84. test.jitflags.append('-a')
  85. elif name == 'debug':
  86. test.jitflags.append('-d')
  87. elif name == 'mjit':
  88. test.jitflags.append('-m')
  89. else:
  90. print('warning: unrecognized |jit-test| attribute %s'%part)
  91. if options.valgrind_all:
  92. test.valgrind = True
  93. return test
  94. def find_tests(dir, substring = None):
  95. ans = []
  96. for dirpath, dirnames, filenames in os.walk(dir):
  97. dirnames.sort()
  98. filenames.sort()
  99. if dirpath == '.':
  100. continue
  101. for filename in filenames:
  102. if not filename.endswith('.js'):
  103. continue
  104. if filename in ('shell.js', 'browser.js', 'jsref.js'):
  105. continue
  106. test = os.path.join(dirpath, filename)
  107. if substring is None or substring in os.path.relpath(test, dir):
  108. ans.append(test)
  109. return ans
  110. def get_test_cmd(path, jitflags, lib_dir, shell_args):
  111. libdir_var = lib_dir
  112. if not libdir_var.endswith('/'):
  113. libdir_var += '/'
  114. expr = "const platform=%r; const libdir=%r;"%(sys.platform, libdir_var)
  115. # We may have specified '-a' or '-d' twice: once via --jitflags, once
  116. # via the "|jit-test|" line. Remove dups because they are toggles.
  117. return ([ JS ] + list(set(jitflags)) + shell_args +
  118. [ '-e', expr, '-f', os.path.join(lib_dir, 'prolog.js'), '-f', path ])
  119. def set_limits():
  120. # resource module not supported on all platforms
  121. try:
  122. import resource
  123. GB = 2**30
  124. resource.setrlimit(resource.RLIMIT_AS, (1*GB, 1*GB))
  125. except:
  126. return
  127. def tmppath(token):
  128. fd, path = tempfile.mkstemp(prefix=token)
  129. os.close(fd)
  130. return path
  131. def read_and_unlink(path):
  132. f = open(path)
  133. d = f.read()
  134. f.close()
  135. os.unlink(path)
  136. return d
  137. def th_run_cmd(cmdline, options, l):
  138. # close_fds and preexec_fn are not supported on Windows and will
  139. # cause a ValueError.
  140. if sys.platform != 'win32':
  141. options["close_fds"] = True
  142. options["preexec_fn"] = set_limits
  143. p = Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE, **options)
  144. l[0] = p
  145. out, err = p.communicate()
  146. l[1] = (out, err, p.returncode)
  147. def run_timeout_cmd(cmdline, options, timeout=60.0):
  148. l = [ None, None ]
  149. timed_out = False
  150. th = Thread(target=th_run_cmd, args=(cmdline, options, l))
  151. th.start()
  152. th.join(timeout)
  153. while th.isAlive():
  154. if l[0] is not None:
  155. try:
  156. # In Python 3, we could just do l[0].kill().
  157. import signal
  158. if sys.platform != 'win32':
  159. os.kill(l[0].pid, signal.SIGKILL)
  160. time.sleep(.1)
  161. timed_out = True
  162. except OSError:
  163. # Expecting a "No such process" error
  164. pass
  165. th.join()
  166. (out, err, code) = l[1]
  167. return (out, err, code, timed_out)
  168. def run_cmd(cmdline, env, timeout):
  169. return run_timeout_cmd(cmdline, { 'env': env }, timeout)
  170. def run_cmd_avoid_stdio(cmdline, env, timeout):
  171. stdoutPath, stderrPath = tmppath('jsstdout'), tmppath('jsstderr')
  172. env['JS_STDOUT'] = stdoutPath
  173. env['JS_STDERR'] = stderrPath
  174. _, __, code = run_timeout_cmd(cmdline, { 'env': env }, timeout)
  175. return read_and_unlink(stdoutPath), read_and_unlink(stderrPath), code
  176. def run_test(test, lib_dir, shell_args):
  177. cmd = get_test_cmd(test.path, test.jitflags, lib_dir, shell_args)
  178. if (test.valgrind and
  179. any([os.path.exists(os.path.join(d, 'valgrind'))
  180. for d in os.environ['PATH'].split(os.pathsep)])):
  181. valgrind_prefix = [ 'valgrind',
  182. '-q',
  183. '--smc-check=all',
  184. '--error-exitcode=1',
  185. '--leak-check=full']
  186. if os.uname()[0] == 'Darwin':
  187. valgrind_prefix += ['--dsymutil=yes']
  188. cmd = valgrind_prefix + cmd
  189. if OPTIONS.show_cmd:
  190. print(subprocess.list2cmdline(cmd))
  191. if OPTIONS.avoid_stdio:
  192. run = run_cmd_avoid_stdio
  193. else:
  194. run = run_cmd
  195. out, err, code, timed_out = run(cmd, os.environ, OPTIONS.timeout)
  196. if OPTIONS.show_output:
  197. sys.stdout.write(out)
  198. sys.stdout.write(err)
  199. sys.stdout.write('Exit code: %s\n' % code)
  200. if test.valgrind:
  201. sys.stdout.write(err)
  202. return (check_output(out, err, code, test),
  203. out, err, code, timed_out)
  204. def check_output(out, err, rc, test):
  205. if test.expect_error:
  206. return test.expect_error in err
  207. for line in out.split('\n'):
  208. if line.startswith('Trace stats check failed'):
  209. return False
  210. for line in err.split('\n'):
  211. if 'Assertion failed:' in line:
  212. return False
  213. if rc != test.expect_status:
  214. # Allow a non-zero exit code if we want to allow OOM, but only if we
  215. # actually got OOM.
  216. return test.allow_oom and ': out of memory' in err
  217. return True
  218. def print_tinderbox(label, test, message=None):
  219. jitflags = " ".join(test.jitflags)
  220. result = "%s | jit_test.py %-15s| %s" % (label, jitflags, test.path)
  221. if message:
  222. result += ": " + message
  223. print result
  224. def run_tests(tests, test_dir, lib_dir, shell_args):
  225. pb = None
  226. if not OPTIONS.hide_progress and not OPTIONS.show_cmd:
  227. try:
  228. from progressbar import ProgressBar
  229. pb = ProgressBar('', len(tests), 24)
  230. except ImportError:
  231. pass
  232. failures = []
  233. timeouts = 0
  234. complete = False
  235. doing = 'before starting'
  236. try:
  237. for i, test in enumerate(tests):
  238. doing = 'on %s'%test.path
  239. ok, out, err, code, timed_out = run_test(test, lib_dir, shell_args)
  240. doing = 'after %s'%test.path
  241. if not ok:
  242. failures.append([ test, out, err, code, timed_out ])
  243. if timed_out:
  244. timeouts += 1
  245. if OPTIONS.tinderbox:
  246. if ok:
  247. print_tinderbox("TEST-PASS", test);
  248. else:
  249. lines = [ _ for _ in out.split('\n') + err.split('\n')
  250. if _ != '' ]
  251. if len(lines) >= 1:
  252. msg = lines[-1]
  253. else:
  254. msg = ''
  255. print_tinderbox("TEST-UNEXPECTED-FAIL", test, msg);
  256. n = i + 1
  257. if pb:
  258. pb.label = '[%4d|%4d|%4d|%4d]'%(n - len(failures), len(failures), timeouts, n)
  259. pb.update(n)
  260. complete = True
  261. except KeyboardInterrupt:
  262. print_tinderbox("TEST-UNEXPECTED-FAIL", test);
  263. if pb:
  264. pb.finish()
  265. if failures:
  266. if OPTIONS.write_failures:
  267. try:
  268. out = open(OPTIONS.write_failures, 'w')
  269. # Don't write duplicate entries when we are doing multiple failures per job.
  270. written = set()
  271. for test, fout, ferr, fcode, _ in failures:
  272. if test.path not in written:
  273. out.write(os.path.relpath(test.path, test_dir) + '\n')
  274. if OPTIONS.write_failure_output:
  275. out.write(fout)
  276. out.write(ferr)
  277. out.write('Exit code: ' + str(fcode) + "\n")
  278. written.add(test.path)
  279. out.close()
  280. except IOError:
  281. sys.stderr.write("Exception thrown trying to write failure file '%s'\n"%
  282. OPTIONS.write_failures)
  283. traceback.print_exc()
  284. sys.stderr.write('---\n')
  285. def show_test(test):
  286. if OPTIONS.show_failed:
  287. print(' ' + subprocess.list2cmdline(get_test_cmd(test.path, test.jitflags, lib_dir, shell_args)))
  288. else:
  289. print(' ' + ' '.join(test.jitflags + [ test.path ]))
  290. print('FAILURES:')
  291. for test, _, __, ___, timed_out in failures:
  292. if not timed_out:
  293. show_test(test)
  294. print('TIMEOUTS:')
  295. for test, _, __, ___, timed_out in failures:
  296. if timed_out:
  297. show_test(test)
  298. return False
  299. else:
  300. print('PASSED ALL' + ('' if complete else ' (partial run -- interrupted by user %s)'%doing))
  301. return True
  302. def parse_jitflags():
  303. jitflags = [ [ '-' + flag for flag in flags ]
  304. for flags in OPTIONS.jitflags.split(',') ]
  305. for flags in jitflags:
  306. for flag in flags:
  307. if flag not in ('-m', '-a', '-p', '-d', '-n'):
  308. print('Invalid jit flag: "%s"'%flag)
  309. sys.exit(1)
  310. return jitflags
  311. def platform_might_be_android():
  312. try:
  313. # The python package for SL4A provides an |android| module.
  314. # If that module is present, we're likely in SL4A-python on
  315. # device. False positives and negatives are possible,
  316. # however.
  317. import android
  318. return True
  319. except ImportError:
  320. return False
  321. def stdio_might_be_broken():
  322. return platform_might_be_android()
  323. JS = None
  324. OPTIONS = None
  325. def main(argv):
  326. global JS, OPTIONS
  327. script_path = os.path.abspath(__file__)
  328. script_dir = os.path.dirname(script_path)
  329. test_dir = os.path.join(script_dir, 'tests')
  330. lib_dir = os.path.join(script_dir, 'lib')
  331. # The [TESTS] optional arguments are paths of test files relative
  332. # to the jit-test/tests directory.
  333. from optparse import OptionParser
  334. op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
  335. op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
  336. help='show js shell command run')
  337. op.add_option('-f', '--show-failed-cmd', dest='show_failed',
  338. action='store_true', help='show command lines of failed tests')
  339. op.add_option('-o', '--show-output', dest='show_output', action='store_true',
  340. help='show output from js shell')
  341. op.add_option('-x', '--exclude', dest='exclude', action='append',
  342. help='exclude given test dir or path')
  343. op.add_option('--no-slow', dest='run_slow', action='store_false',
  344. help='do not run tests marked as slow')
  345. op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
  346. help='set test timeout in seconds')
  347. op.add_option('--no-progress', dest='hide_progress', action='store_true',
  348. help='hide progress bar')
  349. op.add_option('--tinderbox', dest='tinderbox', action='store_true',
  350. help='Tinderbox-parseable output format')
  351. op.add_option('--args', dest='shell_args', default='',
  352. help='extra args to pass to the JS shell')
  353. op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
  354. help='Write a list of failed tests to [FILE]')
  355. op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
  356. help='Run test files listed in [FILE]')
  357. op.add_option('-R', '--retest', dest='retest', metavar='FILE',
  358. help='Retest using test list file [FILE]')
  359. op.add_option('-g', '--debug', dest='debug', action='store_true',
  360. help='Run test in gdb')
  361. op.add_option('--valgrind', dest='valgrind', action='store_true',
  362. help='Enable the |valgrind| flag, if valgrind is in $PATH.')
  363. op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
  364. help='Run all tests with valgrind, if valgrind is in $PATH.')
  365. op.add_option('--jitflags', dest='jitflags', default='m,mn',
  366. help='Example: --jitflags=m,mn to run each test with -m, -m -n [default=%default]')
  367. op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
  368. help='Use js-shell file indirection instead of piping stdio.')
  369. op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
  370. help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
  371. (OPTIONS, args) = op.parse_args(argv)
  372. if len(args) < 1:
  373. op.error('missing JS_SHELL argument')
  374. # We need to make sure we are using backslashes on Windows.
  375. JS, test_args = os.path.normpath(args[0]), args[1:]
  376. JS = os.path.realpath(JS) # Burst through the symlinks!
  377. if stdio_might_be_broken():
  378. # Prefer erring on the side of caution and not using stdio if
  379. # it might be broken on this platform. The file-redirect
  380. # fallback should work on any platform, so at worst by
  381. # guessing wrong we might have slowed down the tests a bit.
  382. #
  383. # XXX technically we could check for broken stdio, but it
  384. # really seems like overkill.
  385. OPTIONS.avoid_stdio = True
  386. if OPTIONS.retest:
  387. OPTIONS.read_tests = OPTIONS.retest
  388. OPTIONS.write_failures = OPTIONS.retest
  389. test_list = []
  390. read_all = True
  391. if test_args:
  392. read_all = False
  393. for arg in test_args:
  394. test_list += find_tests(test_dir, arg)
  395. if OPTIONS.read_tests:
  396. read_all = False
  397. try:
  398. f = open(OPTIONS.read_tests)
  399. for line in f:
  400. test_list.append(os.path.join(test_dir, line.strip('\n')))
  401. f.close()
  402. except IOError:
  403. if OPTIONS.retest:
  404. read_all = True
  405. else:
  406. sys.stderr.write("Exception thrown trying to read test file '%s'\n"%
  407. OPTIONS.read_tests)
  408. traceback.print_exc()
  409. sys.stderr.write('---\n')
  410. if read_all:
  411. test_list = find_tests(test_dir)
  412. if OPTIONS.exclude:
  413. exclude_list = []
  414. for exclude in OPTIONS.exclude:
  415. exclude_list += find_tests(test_dir, exclude)
  416. test_list = [ test for test in test_list if test not in set(exclude_list) ]
  417. if not test_list:
  418. print >> sys.stderr, "No tests found matching command line arguments."
  419. sys.exit(0)
  420. test_list = [ Test.from_file(_, OPTIONS) for _ in test_list ]
  421. if not OPTIONS.run_slow:
  422. test_list = [ _ for _ in test_list if not _.slow ]
  423. # The full test list is ready. Now create copies for each JIT configuration.
  424. job_list = []
  425. jitflags_list = parse_jitflags()
  426. for test in test_list:
  427. for jitflags in jitflags_list:
  428. new_test = test.copy()
  429. new_test.jitflags.extend(jitflags)
  430. job_list.append(new_test)
  431. shell_args = shlex.split(OPTIONS.shell_args)
  432. if OPTIONS.debug:
  433. if len(job_list) > 1:
  434. print('Multiple tests match command line arguments, debugger can only run one')
  435. for tc in job_list:
  436. print(' %s'%tc.path)
  437. sys.exit(1)
  438. tc = job_list[0]
  439. cmd = [ 'gdb', '--args' ] + get_test_cmd(tc.path, tc.jitflags, lib_dir, shell_args)
  440. call(cmd)
  441. sys.exit()
  442. try:
  443. ok = run_tests(job_list, test_dir, lib_dir, shell_args)
  444. if not ok:
  445. sys.exit(2)
  446. except OSError:
  447. if not os.path.exists(JS):
  448. print >> sys.stderr, "JS shell argument: file does not exist: '%s'"%JS
  449. sys.exit(1)
  450. else:
  451. raise
  452. if __name__ == '__main__':
  453. main(sys.argv[1:])