PageRenderTime 46ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/media/webrtc/trunk/build/android/pylib/test_package.py

https://bitbucket.org/hsoft/mozilla-central
Python | 199 lines | 155 code | 8 blank | 36 comment | 8 complexity | f7a6024ccdd2dad60026dc6b056cc47c MD5 | raw file
Possible License(s): JSON, LGPL-2.1, LGPL-3.0, AGPL-1.0, MIT, MPL-2.0-no-copyleft-exception, Apache-2.0, GPL-2.0, BSD-2-Clause, MPL-2.0, BSD-3-Clause, 0BSD
  1. # Copyright (c) 2012 The Chromium Authors. All rights reserved.
  2. # Use of this source code is governed by a BSD-style license that can be
  3. # found in the LICENSE file.
  4. import logging
  5. import re
  6. import os
  7. import pexpect
  8. from perf_tests_helper import PrintPerfResult
  9. from test_result import BaseTestResult, TestResults
  10. # TODO(bulach): TestPackage, TestPackageExecutable and
  11. # TestPackageApk are a work in progress related to making the native tests
  12. # run as a NDK-app from an APK rather than a stand-alone executable.
  13. class TestPackage(object):
  14. """A helper base class for both APK and stand-alone executables.
  15. Args:
  16. adb: ADB interface the tests are using.
  17. device: Device to run the tests.
  18. test_suite: A specific test suite to run, empty to run all.
  19. timeout: Timeout for each test.
  20. rebaseline: Whether or not to run tests in isolation and update the filter.
  21. performance_test: Whether or not performance test(s).
  22. cleanup_test_files: Whether or not to cleanup test files on device.
  23. tool: Name of the Valgrind tool.
  24. dump_debug_info: A debug_info object.
  25. """
  26. def __init__(self, adb, device, test_suite, timeout, rebaseline,
  27. performance_test, cleanup_test_files, tool, dump_debug_info):
  28. self.adb = adb
  29. self.device = device
  30. self.test_suite_full = test_suite
  31. self.test_suite = os.path.splitext(test_suite)[0]
  32. self.test_suite_basename = self._GetTestSuiteBaseName()
  33. self.test_suite_dirname = os.path.dirname(
  34. self.test_suite.split(self.test_suite_basename)[0]);
  35. self.rebaseline = rebaseline
  36. self.performance_test = performance_test
  37. self.cleanup_test_files = cleanup_test_files
  38. self.tool = tool
  39. if timeout == 0:
  40. timeout = 60
  41. # On a VM (e.g. chromium buildbots), this timeout is way too small.
  42. if os.environ.get('BUILDBOT_SLAVENAME'):
  43. timeout = timeout * 2
  44. self.timeout = timeout * self.tool.GetTimeoutScale()
  45. self.dump_debug_info = dump_debug_info
  46. def _BeginGetIOStats(self):
  47. """Gets I/O statistics before running test.
  48. Return:
  49. I/O stats object.The I/O stats object may be None if the test is not
  50. performance test.
  51. """
  52. initial_io_stats = None
  53. # Try to get the disk I/O statistics for all performance tests.
  54. if self.performance_test and not self.rebaseline:
  55. initial_io_stats = self.adb.GetIoStats()
  56. return initial_io_stats
  57. def _EndGetIOStats(self, initial_io_stats):
  58. """Gets I/O statistics after running test and calcuate the I/O delta.
  59. Args:
  60. initial_io_stats: I/O stats object got from _BeginGetIOStats.
  61. Return:
  62. String for formated diso I/O statistics.
  63. """
  64. disk_io = ''
  65. if self.performance_test and initial_io_stats:
  66. final_io_stats = self.adb.GetIoStats()
  67. for stat in final_io_stats:
  68. disk_io += '\n' + PrintPerfResult(stat, stat,
  69. [final_io_stats[stat] -
  70. initial_io_stats[stat]],
  71. stat.split('_')[1],
  72. print_to_stdout=False)
  73. logging.info(disk_io)
  74. return disk_io
  75. def GetDisabledPrefixes(self):
  76. return ['DISABLED_', 'FLAKY_', 'FAILS_']
  77. def _ParseGTestListTests(self, all_tests):
  78. ret = []
  79. current = ''
  80. disabled_prefixes = self.GetDisabledPrefixes()
  81. for test in all_tests:
  82. if not test:
  83. continue
  84. if test[0] != ' ' and test.endswith('.'):
  85. current = test
  86. continue
  87. if 'YOU HAVE' in test:
  88. break
  89. test_name = test[2:]
  90. if not any([test_name.startswith(x) for x in disabled_prefixes]):
  91. ret += [current + test_name]
  92. return ret
  93. def PushDataAndPakFiles(self):
  94. if (self.test_suite_basename == 'ui_unittests' or
  95. self.test_suite_basename == 'unit_tests'):
  96. self.adb.PushIfNeeded(
  97. self.test_suite_dirname + '/chrome.pak',
  98. '/data/local/tmp/paks/chrome.pak')
  99. self.adb.PushIfNeeded(
  100. self.test_suite_dirname + '/locales/en-US.pak',
  101. '/data/local/tmp/paks/en-US.pak')
  102. if self.test_suite_basename == 'unit_tests':
  103. self.adb.PushIfNeeded(
  104. self.test_suite_dirname + '/resources.pak',
  105. '/data/local/tmp/paks/resources.pak')
  106. self.adb.PushIfNeeded(
  107. self.test_suite_dirname + '/chrome_100_percent.pak',
  108. '/data/local/tmp/paks/chrome_100_percent.pak')
  109. self.adb.PushIfNeeded(self.test_suite_dirname + '/test_data',
  110. '/data/local/tmp/test_data')
  111. if self.test_suite_basename == 'content_unittests':
  112. self.adb.PushIfNeeded(
  113. self.test_suite_dirname + '/content_resources.pak',
  114. '/data/local/tmp/paks/content_resources.pak')
  115. def _WatchTestOutput(self, p):
  116. """Watches the test output.
  117. Args:
  118. p: the process generating output as created by pexpect.spawn.
  119. """
  120. ok_tests = []
  121. failed_tests = []
  122. crashed_tests = []
  123. timed_out = False
  124. overall_fail = False
  125. re_run = re.compile('\[ RUN \] ?(.*)\r\n')
  126. # APK tests rely on the END tag.
  127. re_end = re.compile('\[ END \] ?(.*)\r\n')
  128. # Signal handlers are installed before starting tests
  129. # to output the CRASHED marker when a crash happens.
  130. re_crash = re.compile('\[ CRASHED \](.*)\r\n')
  131. re_fail = re.compile('\[ FAILED \] ?(.*)\r\n')
  132. re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
  133. re_ok = re.compile('\[ OK \] ?(.*)\r\n')
  134. io_stats_before = self._BeginGetIOStats()
  135. while True:
  136. found = p.expect([re_run, pexpect.EOF, re_end, re_runner_fail],
  137. timeout=self.timeout)
  138. if found == 1: # matched pexpect.EOF
  139. break
  140. if found == 2: # matched END.
  141. break
  142. if found == 3: # RUNNER_FAILED
  143. logging.error('RUNNER_FAILED')
  144. overall_fail = True
  145. break
  146. if self.dump_debug_info:
  147. self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
  148. full_test_name = p.match.group(1)
  149. found = p.expect([re_ok, re_fail, re_crash, pexpect.EOF, pexpect.TIMEOUT],
  150. timeout=self.timeout)
  151. if found == 0: # re_ok
  152. ok_tests += [BaseTestResult(full_test_name.replace('\r', ''),
  153. p.before)]
  154. continue
  155. if found == 2: # re_crash
  156. crashed_tests += [BaseTestResult(full_test_name.replace('\r', ''),
  157. p.before)]
  158. overall_fail = True
  159. break
  160. # The test failed.
  161. failed_tests += [BaseTestResult(full_test_name.replace('\r', ''),
  162. p.before)]
  163. if found >= 3:
  164. # The test bailed out (i.e., didn't print OK or FAIL).
  165. if found == 4: # pexpect.TIMEOUT
  166. logging.error('Test terminated after %d second timeout.',
  167. self.timeout)
  168. timed_out = True
  169. break
  170. p.close()
  171. if not self.rebaseline:
  172. ok_tests += self._EndGetIOStats(io_stats_before)
  173. ret_code = self._GetGTestReturnCode()
  174. if ret_code:
  175. failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code,
  176. 'pexpect.before: %s'
  177. '\npexpect.after: %s'
  178. % (p.before,
  179. p.after))]
  180. # Create TestResults and return
  181. return TestResults.FromRun(ok=ok_tests, failed=failed_tests,
  182. crashed=crashed_tests, timed_out=timed_out,
  183. overall_fail=overall_fail)