/Tools/pybench/pybench.py

http://unladen-swallow.googlecode.com/ · Python · 961 lines · 867 code · 49 blank · 45 comment · 65 complexity · d420a3678ba2b0a09e518b3123db8d17 MD5 · raw file

  1. #!/usr/local/bin/python -O
  2. """ A Python Benchmark Suite
  3. """
  4. #
  5. # Note: Please keep this module compatible to Python 1.5.2.
  6. #
  7. # Tests may include features in later Python versions, but these
  8. # should then be embedded in try-except clauses in the configuration
  9. # module Setup.py.
  10. #
  11. # pybench Copyright
  12. __copyright__ = """\
  13. Copyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com)
  14. Copyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com)
  15. All Rights Reserved.
  16. Permission to use, copy, modify, and distribute this software and its
  17. documentation for any purpose and without fee or royalty is hereby
  18. granted, provided that the above copyright notice appear in all copies
  19. and that both that copyright notice and this permission notice appear
  20. in supporting documentation or portions thereof, including
  21. modifications, that you make.
  22. THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
  23. THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
  24. FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
  25. INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
  26. FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
  27. NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
  28. WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
  29. """
  30. import sys, time, operator, string, platform
  31. from CommandLine import *
  32. try:
  33. import cPickle
  34. pickle = cPickle
  35. except ImportError:
  36. import pickle
  37. # Version number; version history: see README file !
  38. __version__ = '2.0'
  39. ### Constants
  40. # Second fractions
  41. MILLI_SECONDS = 1e3
  42. MICRO_SECONDS = 1e6
  43. # Percent unit
  44. PERCENT = 100
  45. # Horizontal line length
  46. LINE = 79
  47. # Minimum test run-time
  48. MIN_TEST_RUNTIME = 1e-3
  49. # Number of calibration runs to use for calibrating the tests
  50. CALIBRATION_RUNS = 20
  51. # Number of calibration loops to run for each calibration run
  52. CALIBRATION_LOOPS = 20
  53. # Allow skipping calibration ?
  54. ALLOW_SKIPPING_CALIBRATION = 1
  55. # Timer types
  56. TIMER_TIME_TIME = 'time.time'
  57. TIMER_TIME_CLOCK = 'time.clock'
  58. TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
  59. # Choose platform default timer
  60. if sys.platform[:3] == 'win':
  61. # On WinXP this has 2.5ms resolution
  62. TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
  63. else:
  64. # On Linux this has 1ms resolution
  65. TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
  66. # Print debug information ?
  67. _debug = 0
  68. ### Helpers
  69. def get_timer(timertype):
  70. if timertype == TIMER_TIME_TIME:
  71. return time.time
  72. elif timertype == TIMER_TIME_CLOCK:
  73. return time.clock
  74. elif timertype == TIMER_SYSTIMES_PROCESSTIME:
  75. import systimes
  76. return systimes.processtime
  77. else:
  78. raise TypeError('unknown timer type: %s' % timertype)
  79. def get_machine_details():
  80. if _debug:
  81. print 'Getting machine details...'
  82. buildno, builddate = platform.python_build()
  83. python = platform.python_version()
  84. try:
  85. unichr(100000)
  86. except ValueError:
  87. # UCS2 build (standard)
  88. unicode = 'UCS2'
  89. except NameError:
  90. unicode = None
  91. else:
  92. # UCS4 build (most recent Linux distros)
  93. unicode = 'UCS4'
  94. bits, linkage = platform.architecture()
  95. return {
  96. 'platform': platform.platform(),
  97. 'processor': platform.processor(),
  98. 'executable': sys.executable,
  99. 'implementation': getattr(platform, 'python_implementation',
  100. lambda:'n/a')(),
  101. 'python': platform.python_version(),
  102. 'compiler': platform.python_compiler(),
  103. 'buildno': buildno,
  104. 'builddate': builddate,
  105. 'unicode': unicode,
  106. 'bits': bits,
  107. }
  108. def print_machine_details(d, indent=''):
  109. l = ['Machine Details:',
  110. ' Platform ID: %s' % d.get('platform', 'n/a'),
  111. ' Processor: %s' % d.get('processor', 'n/a'),
  112. '',
  113. 'Python:',
  114. ' Implementation: %s' % d.get('implementation', 'n/a'),
  115. ' Executable: %s' % d.get('executable', 'n/a'),
  116. ' Version: %s' % d.get('python', 'n/a'),
  117. ' Compiler: %s' % d.get('compiler', 'n/a'),
  118. ' Bits: %s' % d.get('bits', 'n/a'),
  119. ' Build: %s (#%s)' % (d.get('builddate', 'n/a'),
  120. d.get('buildno', 'n/a')),
  121. ' Unicode: %s' % d.get('unicode', 'n/a'),
  122. ]
  123. print indent + string.join(l, '\n' + indent) + '\n'
  124. ### Test baseclass
  125. class Test:
  126. """ All test must have this class as baseclass. It provides
  127. the necessary interface to the benchmark machinery.
  128. The tests must set .rounds to a value high enough to let the
  129. test run between 20-50 seconds. This is needed because
  130. clock()-timing only gives rather inaccurate values (on Linux,
  131. for example, it is accurate to a few hundreths of a
  132. second). If you don't want to wait that long, use a warp
  133. factor larger than 1.
  134. It is also important to set the .operations variable to a
  135. value representing the number of "virtual operations" done per
  136. call of .run().
  137. If you change a test in some way, don't forget to increase
  138. its version number.
  139. """
  140. ### Instance variables that each test should override
  141. # Version number of the test as float (x.yy); this is important
  142. # for comparisons of benchmark runs - tests with unequal version
  143. # number will not get compared.
  144. version = 2.0
  145. # The number of abstract operations done in each round of the
  146. # test. An operation is the basic unit of what you want to
  147. # measure. The benchmark will output the amount of run-time per
  148. # operation. Note that in order to raise the measured timings
  149. # significantly above noise level, it is often required to repeat
  150. # sets of operations more than once per test round. The measured
  151. # overhead per test round should be less than 1 second.
  152. operations = 1
  153. # Number of rounds to execute per test run. This should be
  154. # adjusted to a figure that results in a test run-time of between
  155. # 1-2 seconds.
  156. rounds = 100000
  157. ### Internal variables
  158. # Mark this class as implementing a test
  159. is_a_test = 1
  160. # Last timing: (real, run, overhead)
  161. last_timing = (0.0, 0.0, 0.0)
  162. # Warp factor to use for this test
  163. warp = 1
  164. # Number of calibration runs to use
  165. calibration_runs = CALIBRATION_RUNS
  166. # List of calibration timings
  167. overhead_times = None
  168. # List of test run timings
  169. times = []
  170. # Timer used for the benchmark
  171. timer = TIMER_PLATFORM_DEFAULT
  172. def __init__(self, warp=None, calibration_runs=None, timer=None):
  173. # Set parameters
  174. if warp is not None:
  175. self.rounds = int(self.rounds / warp)
  176. if self.rounds == 0:
  177. raise ValueError('warp factor set too high')
  178. self.warp = warp
  179. if calibration_runs is not None:
  180. if (not ALLOW_SKIPPING_CALIBRATION and
  181. calibration_runs < 1):
  182. raise ValueError('at least one calibration run is required')
  183. self.calibration_runs = calibration_runs
  184. if timer is not None:
  185. timer = timer
  186. # Init variables
  187. self.times = []
  188. self.overhead_times = []
  189. # We want these to be in the instance dict, so that pickle
  190. # saves them
  191. self.version = self.version
  192. self.operations = self.operations
  193. self.rounds = self.rounds
  194. def get_timer(self):
  195. """ Return the timer function to use for the test.
  196. """
  197. return get_timer(self.timer)
  198. def compatible(self, other):
  199. """ Return 1/0 depending on whether the test is compatible
  200. with the other Test instance or not.
  201. """
  202. if self.version != other.version:
  203. return 0
  204. if self.rounds != other.rounds:
  205. return 0
  206. return 1
  207. def calibrate_test(self):
  208. if self.calibration_runs == 0:
  209. self.overhead_times = [0.0]
  210. return
  211. calibrate = self.calibrate
  212. timer = self.get_timer()
  213. calibration_loops = range(CALIBRATION_LOOPS)
  214. # Time the calibration loop overhead
  215. prep_times = []
  216. for i in range(self.calibration_runs):
  217. t = timer()
  218. for i in calibration_loops:
  219. pass
  220. t = timer() - t
  221. prep_times.append(t)
  222. min_prep_time = min(prep_times)
  223. if _debug:
  224. print
  225. print 'Calib. prep time = %.6fms' % (
  226. min_prep_time * MILLI_SECONDS)
  227. # Time the calibration runs (doing CALIBRATION_LOOPS loops of
  228. # .calibrate() method calls each)
  229. for i in range(self.calibration_runs):
  230. t = timer()
  231. for i in calibration_loops:
  232. calibrate()
  233. t = timer() - t
  234. self.overhead_times.append(t / CALIBRATION_LOOPS
  235. - min_prep_time)
  236. # Check the measured times
  237. min_overhead = min(self.overhead_times)
  238. max_overhead = max(self.overhead_times)
  239. if _debug:
  240. print 'Calib. overhead time = %.6fms' % (
  241. min_overhead * MILLI_SECONDS)
  242. if min_overhead < 0.0:
  243. raise ValueError('calibration setup did not work')
  244. if max_overhead - min_overhead > 0.1:
  245. raise ValueError(
  246. 'overhead calibration timing range too inaccurate: '
  247. '%r - %r' % (min_overhead, max_overhead))
  248. def run(self):
  249. """ Run the test in two phases: first calibrate, then
  250. do the actual test. Be careful to keep the calibration
  251. timing low w/r to the test timing.
  252. """
  253. test = self.test
  254. timer = self.get_timer()
  255. # Get calibration
  256. min_overhead = min(self.overhead_times)
  257. # Test run
  258. t = timer()
  259. test()
  260. t = timer() - t
  261. if t < MIN_TEST_RUNTIME:
  262. raise ValueError('warp factor too high: '
  263. 'test times are < 10ms')
  264. eff_time = t - min_overhead
  265. if eff_time < 0:
  266. raise ValueError('wrong calibration')
  267. self.last_timing = (eff_time, t, min_overhead)
  268. self.times.append(eff_time)
  269. def calibrate(self):
  270. """ Calibrate the test.
  271. This method should execute everything that is needed to
  272. setup and run the test - except for the actual operations
  273. that you intend to measure. pybench uses this method to
  274. measure the test implementation overhead.
  275. """
  276. return
  277. def test(self):
  278. """ Run the test.
  279. The test needs to run self.rounds executing
  280. self.operations number of operations each.
  281. """
  282. return
  283. def stat(self):
  284. """ Return test run statistics as tuple:
  285. (minimum run time,
  286. average run time,
  287. total run time,
  288. average time per operation,
  289. minimum overhead time)
  290. """
  291. runs = len(self.times)
  292. if runs == 0:
  293. return 0.0, 0.0, 0.0, 0.0
  294. min_time = min(self.times)
  295. total_time = reduce(operator.add, self.times, 0.0)
  296. avg_time = total_time / float(runs)
  297. operation_avg = total_time / float(runs
  298. * self.rounds
  299. * self.operations)
  300. if self.overhead_times:
  301. min_overhead = min(self.overhead_times)
  302. else:
  303. min_overhead = self.last_timing[2]
  304. return min_time, avg_time, total_time, operation_avg, min_overhead
  305. ### Load Setup
  306. # This has to be done after the definition of the Test class, since
  307. # the Setup module will import subclasses using this class.
  308. import Setup
  309. ### Benchmark base class
  310. class Benchmark:
  311. # Name of the benchmark
  312. name = ''
  313. # Number of benchmark rounds to run
  314. rounds = 1
  315. # Warp factor use to run the tests
  316. warp = 1 # Warp factor
  317. # Average benchmark round time
  318. roundtime = 0
  319. # Benchmark version number as float x.yy
  320. version = 2.0
  321. # Produce verbose output ?
  322. verbose = 0
  323. # Dictionary with the machine details
  324. machine_details = None
  325. # Timer used for the benchmark
  326. timer = TIMER_PLATFORM_DEFAULT
  327. def __init__(self, name, verbose=None, timer=None, warp=None,
  328. calibration_runs=None):
  329. if name:
  330. self.name = name
  331. else:
  332. self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
  333. (time.localtime(time.time())[:6])
  334. if verbose is not None:
  335. self.verbose = verbose
  336. if timer is not None:
  337. self.timer = timer
  338. if warp is not None:
  339. self.warp = warp
  340. if calibration_runs is not None:
  341. self.calibration_runs = calibration_runs
  342. # Init vars
  343. self.tests = {}
  344. if _debug:
  345. print 'Getting machine details...'
  346. self.machine_details = get_machine_details()
  347. # Make .version an instance attribute to have it saved in the
  348. # Benchmark pickle
  349. self.version = self.version
  350. def get_timer(self):
  351. """ Return the timer function to use for the test.
  352. """
  353. return get_timer(self.timer)
  354. def compatible(self, other):
  355. """ Return 1/0 depending on whether the benchmark is
  356. compatible with the other Benchmark instance or not.
  357. """
  358. if self.version != other.version:
  359. return 0
  360. if (self.machine_details == other.machine_details and
  361. self.timer != other.timer):
  362. return 0
  363. if (self.calibration_runs == 0 and
  364. other.calibration_runs != 0):
  365. return 0
  366. if (self.calibration_runs != 0 and
  367. other.calibration_runs == 0):
  368. return 0
  369. return 1
  370. def load_tests(self, setupmod, limitnames=None):
  371. # Add tests
  372. if self.verbose:
  373. print 'Searching for tests ...'
  374. print '--------------------------------------'
  375. for testclass in setupmod.__dict__.values():
  376. if not hasattr(testclass, 'is_a_test'):
  377. continue
  378. name = testclass.__name__
  379. if name == 'Test':
  380. continue
  381. if (limitnames is not None and
  382. limitnames.search(name) is None):
  383. continue
  384. self.tests[name] = testclass(
  385. warp=self.warp,
  386. calibration_runs=self.calibration_runs,
  387. timer=self.timer)
  388. l = self.tests.keys()
  389. l.sort()
  390. if self.verbose:
  391. for name in l:
  392. print ' %s' % name
  393. print '--------------------------------------'
  394. print ' %i tests found' % len(l)
  395. print
  396. def calibrate(self):
  397. print 'Calibrating tests. Please wait...',
  398. sys.stdout.flush()
  399. if self.verbose:
  400. print
  401. print
  402. print 'Test min max'
  403. print '-' * LINE
  404. tests = self.tests.items()
  405. tests.sort()
  406. for i in range(len(tests)):
  407. name, test = tests[i]
  408. test.calibrate_test()
  409. if self.verbose:
  410. print '%30s: %6.3fms %6.3fms' % \
  411. (name,
  412. min(test.overhead_times) * MILLI_SECONDS,
  413. max(test.overhead_times) * MILLI_SECONDS)
  414. if self.verbose:
  415. print
  416. print 'Done with the calibration.'
  417. else:
  418. print 'done.'
  419. print
  420. def run(self):
  421. tests = self.tests.items()
  422. tests.sort()
  423. timer = self.get_timer()
  424. print 'Running %i round(s) of the suite at warp factor %i:' % \
  425. (self.rounds, self.warp)
  426. print
  427. self.roundtimes = []
  428. for i in range(self.rounds):
  429. if self.verbose:
  430. print ' Round %-25i effective absolute overhead' % (i+1)
  431. total_eff_time = 0.0
  432. for j in range(len(tests)):
  433. name, test = tests[j]
  434. if self.verbose:
  435. print '%30s:' % name,
  436. test.run()
  437. (eff_time, abs_time, min_overhead) = test.last_timing
  438. total_eff_time = total_eff_time + eff_time
  439. if self.verbose:
  440. print ' %5.0fms %5.0fms %7.3fms' % \
  441. (eff_time * MILLI_SECONDS,
  442. abs_time * MILLI_SECONDS,
  443. min_overhead * MILLI_SECONDS)
  444. self.roundtimes.append(total_eff_time)
  445. if self.verbose:
  446. print (' '
  447. ' ------------------------------')
  448. print (' '
  449. ' Totals: %6.0fms' %
  450. (total_eff_time * MILLI_SECONDS))
  451. print
  452. else:
  453. print '* Round %i done in %.3f seconds.' % (i+1,
  454. total_eff_time)
  455. print
  456. def stat(self):
  457. """ Return benchmark run statistics as tuple:
  458. (minimum round time,
  459. average round time,
  460. maximum round time)
  461. XXX Currently not used, since the benchmark does test
  462. statistics across all rounds.
  463. """
  464. runs = len(self.roundtimes)
  465. if runs == 0:
  466. return 0.0, 0.0
  467. min_time = min(self.roundtimes)
  468. total_time = reduce(operator.add, self.roundtimes, 0.0)
  469. avg_time = total_time / float(runs)
  470. max_time = max(self.roundtimes)
  471. return (min_time, avg_time, max_time)
  472. def print_header(self, title='Benchmark'):
  473. print '-' * LINE
  474. print '%s: %s' % (title, self.name)
  475. print '-' * LINE
  476. print
  477. print ' Rounds: %s' % self.rounds
  478. print ' Warp: %s' % self.warp
  479. print ' Timer: %s' % self.timer
  480. print
  481. if self.machine_details:
  482. print_machine_details(self.machine_details, indent=' ')
  483. print
  484. def print_benchmark(self, hidenoise=0, limitnames=None):
  485. print ('Test '
  486. ' minimum average operation overhead')
  487. print '-' * LINE
  488. tests = self.tests.items()
  489. tests.sort()
  490. total_min_time = 0.0
  491. total_avg_time = 0.0
  492. for name, test in tests:
  493. if (limitnames is not None and
  494. limitnames.search(name) is None):
  495. continue
  496. (min_time,
  497. avg_time,
  498. total_time,
  499. op_avg,
  500. min_overhead) = test.stat()
  501. total_min_time = total_min_time + min_time
  502. total_avg_time = total_avg_time + avg_time
  503. print '%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \
  504. (name,
  505. min_time * MILLI_SECONDS,
  506. avg_time * MILLI_SECONDS,
  507. op_avg * MICRO_SECONDS,
  508. min_overhead *MILLI_SECONDS)
  509. print '-' * LINE
  510. print ('Totals: '
  511. ' %6.0fms %6.0fms' %
  512. (total_min_time * MILLI_SECONDS,
  513. total_avg_time * MILLI_SECONDS,
  514. ))
  515. print
  516. def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
  517. # Check benchmark versions
  518. if compare_to.version != self.version:
  519. print ('* Benchmark versions differ: '
  520. 'cannot compare this benchmark to "%s" !' %
  521. compare_to.name)
  522. print
  523. self.print_benchmark(hidenoise=hidenoise,
  524. limitnames=limitnames)
  525. return
  526. # Print header
  527. compare_to.print_header('Comparing with')
  528. print ('Test '
  529. ' minimum run-time average run-time')
  530. print (' '
  531. ' this other diff this other diff')
  532. print '-' * LINE
  533. # Print test comparisons
  534. tests = self.tests.items()
  535. tests.sort()
  536. total_min_time = other_total_min_time = 0.0
  537. total_avg_time = other_total_avg_time = 0.0
  538. benchmarks_compatible = self.compatible(compare_to)
  539. tests_compatible = 1
  540. for name, test in tests:
  541. if (limitnames is not None and
  542. limitnames.search(name) is None):
  543. continue
  544. (min_time,
  545. avg_time,
  546. total_time,
  547. op_avg,
  548. min_overhead) = test.stat()
  549. total_min_time = total_min_time + min_time
  550. total_avg_time = total_avg_time + avg_time
  551. try:
  552. other = compare_to.tests[name]
  553. except KeyError:
  554. other = None
  555. if other is None:
  556. # Other benchmark doesn't include the given test
  557. min_diff, avg_diff = 'n/a', 'n/a'
  558. other_min_time = 0.0
  559. other_avg_time = 0.0
  560. tests_compatible = 0
  561. else:
  562. (other_min_time,
  563. other_avg_time,
  564. other_total_time,
  565. other_op_avg,
  566. other_min_overhead) = other.stat()
  567. other_total_min_time = other_total_min_time + other_min_time
  568. other_total_avg_time = other_total_avg_time + other_avg_time
  569. if (benchmarks_compatible and
  570. test.compatible(other)):
  571. # Both benchmark and tests are comparible
  572. min_diff = ((min_time * self.warp) /
  573. (other_min_time * other.warp) - 1.0)
  574. avg_diff = ((avg_time * self.warp) /
  575. (other_avg_time * other.warp) - 1.0)
  576. if hidenoise and abs(min_diff) < 10.0:
  577. min_diff = ''
  578. else:
  579. min_diff = '%+5.1f%%' % (min_diff * PERCENT)
  580. if hidenoise and abs(avg_diff) < 10.0:
  581. avg_diff = ''
  582. else:
  583. avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
  584. else:
  585. # Benchmark or tests are not comparible
  586. min_diff, avg_diff = 'n/a', 'n/a'
  587. tests_compatible = 0
  588. print '%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
  589. (name,
  590. min_time * MILLI_SECONDS,
  591. other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
  592. min_diff,
  593. avg_time * MILLI_SECONDS,
  594. other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
  595. avg_diff)
  596. print '-' * LINE
  597. # Summarise test results
  598. if not benchmarks_compatible or not tests_compatible:
  599. min_diff, avg_diff = 'n/a', 'n/a'
  600. else:
  601. if other_total_min_time != 0.0:
  602. min_diff = '%+5.1f%%' % (
  603. ((total_min_time * self.warp) /
  604. (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
  605. else:
  606. min_diff = 'n/a'
  607. if other_total_avg_time != 0.0:
  608. avg_diff = '%+5.1f%%' % (
  609. ((total_avg_time * self.warp) /
  610. (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
  611. else:
  612. avg_diff = 'n/a'
  613. print ('Totals: '
  614. ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
  615. (total_min_time * MILLI_SECONDS,
  616. (other_total_min_time * compare_to.warp/self.warp
  617. * MILLI_SECONDS),
  618. min_diff,
  619. total_avg_time * MILLI_SECONDS,
  620. (other_total_avg_time * compare_to.warp/self.warp
  621. * MILLI_SECONDS),
  622. avg_diff
  623. ))
  624. print
  625. print '(this=%s, other=%s)' % (self.name,
  626. compare_to.name)
  627. print
  628. class PyBenchCmdline(Application):
  629. header = ("PYBENCH - a benchmark test suite for Python "
  630. "interpreters/compilers.")
  631. version = __version__
  632. debug = _debug
  633. options = [ArgumentOption('-n',
  634. 'number of rounds',
  635. Setup.Number_of_rounds),
  636. ArgumentOption('-f',
  637. 'save benchmark to file arg',
  638. ''),
  639. ArgumentOption('-c',
  640. 'compare benchmark with the one in file arg',
  641. ''),
  642. ArgumentOption('-s',
  643. 'show benchmark in file arg, then exit',
  644. ''),
  645. ArgumentOption('-w',
  646. 'set warp factor to arg',
  647. Setup.Warp_factor),
  648. ArgumentOption('-t',
  649. 'run only tests with names matching arg',
  650. ''),
  651. ArgumentOption('-C',
  652. 'set the number of calibration runs to arg',
  653. CALIBRATION_RUNS),
  654. SwitchOption('-d',
  655. 'hide noise in comparisons',
  656. 0),
  657. SwitchOption('-v',
  658. 'verbose output (not recommended)',
  659. 0),
  660. SwitchOption('--with-gc',
  661. 'enable garbage collection',
  662. 0),
  663. SwitchOption('--with-syscheck',
  664. 'use default sys check interval',
  665. 0),
  666. ArgumentOption('--timer',
  667. 'use given timer',
  668. TIMER_PLATFORM_DEFAULT),
  669. ]
  670. about = """\
  671. The normal operation is to run the suite and display the
  672. results. Use -f to save them for later reuse or comparisons.
  673. Available timers:
  674. time.time
  675. time.clock
  676. systimes.processtime
  677. Examples:
  678. python2.1 pybench.py -f p21.pybench
  679. python2.5 pybench.py -f p25.pybench
  680. python pybench.py -s p25.pybench -c p21.pybench
  681. """
  682. copyright = __copyright__
  683. def main(self):
  684. rounds = self.values['-n']
  685. reportfile = self.values['-f']
  686. show_bench = self.values['-s']
  687. compare_to = self.values['-c']
  688. hidenoise = self.values['-d']
  689. warp = int(self.values['-w'])
  690. withgc = self.values['--with-gc']
  691. limitnames = self.values['-t']
  692. if limitnames:
  693. if _debug:
  694. print '* limiting test names to one with substring "%s"' % \
  695. limitnames
  696. limitnames = re.compile(limitnames, re.I)
  697. else:
  698. limitnames = None
  699. verbose = self.verbose
  700. withsyscheck = self.values['--with-syscheck']
  701. calibration_runs = self.values['-C']
  702. timer = self.values['--timer']
  703. print '-' * LINE
  704. print 'PYBENCH %s' % __version__
  705. print '-' * LINE
  706. print '* using %s %s' % (
  707. getattr(platform, 'python_implementation', lambda:'Python')(),
  708. string.join(string.split(sys.version), ' '))
  709. # Switch off garbage collection
  710. if not withgc:
  711. try:
  712. import gc
  713. except ImportError:
  714. print '* Python version doesn\'t support garbage collection'
  715. else:
  716. try:
  717. gc.disable()
  718. except NotImplementedError:
  719. print '* Python version doesn\'t support gc.disable'
  720. else:
  721. print '* disabled garbage collection'
  722. # "Disable" sys check interval
  723. if not withsyscheck:
  724. # Too bad the check interval uses an int instead of a long...
  725. value = 2147483647
  726. try:
  727. sys.setcheckinterval(value)
  728. except (AttributeError, NotImplementedError):
  729. print '* Python version doesn\'t support sys.setcheckinterval'
  730. else:
  731. print '* system check interval set to maximum: %s' % value
  732. if timer == TIMER_SYSTIMES_PROCESSTIME:
  733. import systimes
  734. print '* using timer: systimes.processtime (%s)' % \
  735. systimes.SYSTIMES_IMPLEMENTATION
  736. else:
  737. print '* using timer: %s' % timer
  738. print
  739. if compare_to:
  740. try:
  741. f = open(compare_to,'rb')
  742. bench = pickle.load(f)
  743. bench.name = compare_to
  744. f.close()
  745. compare_to = bench
  746. except IOError, reason:
  747. print '* Error opening/reading file %s: %s' % (
  748. repr(compare_to),
  749. reason)
  750. compare_to = None
  751. if show_bench:
  752. try:
  753. f = open(show_bench,'rb')
  754. bench = pickle.load(f)
  755. bench.name = show_bench
  756. f.close()
  757. bench.print_header()
  758. if compare_to:
  759. bench.print_comparison(compare_to,
  760. hidenoise=hidenoise,
  761. limitnames=limitnames)
  762. else:
  763. bench.print_benchmark(hidenoise=hidenoise,
  764. limitnames=limitnames)
  765. except IOError, reason:
  766. print '* Error opening/reading file %s: %s' % (
  767. repr(show_bench),
  768. reason)
  769. print
  770. return
  771. if reportfile:
  772. print 'Creating benchmark: %s (rounds=%i, warp=%i)' % \
  773. (reportfile, rounds, warp)
  774. print
  775. # Create benchmark object
  776. bench = Benchmark(reportfile,
  777. verbose=verbose,
  778. timer=timer,
  779. warp=warp,
  780. calibration_runs=calibration_runs)
  781. bench.rounds = rounds
  782. bench.load_tests(Setup, limitnames=limitnames)
  783. try:
  784. bench.calibrate()
  785. bench.run()
  786. except KeyboardInterrupt:
  787. print
  788. print '*** KeyboardInterrupt -- Aborting'
  789. print
  790. return
  791. bench.print_header()
  792. if compare_to:
  793. bench.print_comparison(compare_to,
  794. hidenoise=hidenoise,
  795. limitnames=limitnames)
  796. else:
  797. bench.print_benchmark(hidenoise=hidenoise,
  798. limitnames=limitnames)
  799. # Ring bell
  800. sys.stderr.write('\007')
  801. if reportfile:
  802. try:
  803. f = open(reportfile,'wb')
  804. bench.name = reportfile
  805. pickle.dump(bench,f)
  806. f.close()
  807. except IOError, reason:
  808. print '* Error opening/writing reportfile'
  809. except IOError, reason:
  810. print '* Error opening/writing reportfile %s: %s' % (
  811. reportfile,
  812. reason)
  813. print
  814. if __name__ == '__main__':
  815. PyBenchCmdline()