PageRenderTime 65ms CodeModel.GetById 42ms RepoModel.GetById 0ms app.codeStats 0ms

/benchmark/utils/DriverUtils.swift

https://gitlab.com/dwiktor/swift
Swift | 375 lines | 275 code | 57 blank | 43 comment | 48 complexity | d80d127a91ffb73e1d3f5a35169a5e8d MD5 | raw file
  1. //===--- DriverUtils.swift ------------------------------------------------===//
  2. //
  3. // This source file is part of the Swift.org open source project
  4. //
  5. // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
  6. // Licensed under Apache License v2.0 with Runtime Library Exception
  7. //
  8. // See http://swift.org/LICENSE.txt for license information
  9. // See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
  10. //
  11. //===----------------------------------------------------------------------===//
  12. import Darwin
  13. struct BenchResults {
  14. var delim: String = ","
  15. var sampleCount: UInt64 = 0
  16. var min: UInt64 = 0
  17. var max: UInt64 = 0
  18. var mean: UInt64 = 0
  19. var sd: UInt64 = 0
  20. var median: UInt64 = 0
  21. init() {}
  22. init(delim: String, sampleCount: UInt64, min: UInt64, max: UInt64, mean: UInt64, sd: UInt64, median: UInt64) {
  23. self.delim = delim
  24. self.sampleCount = sampleCount
  25. self.min = min
  26. self.max = max
  27. self.mean = mean
  28. self.sd = sd
  29. self.median = median
  30. // Sanity the bounds of our results
  31. precondition(self.min <= self.max, "min should always be <= max")
  32. precondition(self.min <= self.mean, "min should always be <= mean")
  33. precondition(self.min <= self.median, "min should always be <= median")
  34. precondition(self.max >= self.mean, "max should always be >= mean")
  35. precondition(self.max >= self.median, "max should always be >= median")
  36. }
  37. }
  38. extension BenchResults : CustomStringConvertible {
  39. var description: String {
  40. return "\(sampleCount)\(delim)\(min)\(delim)\(max)\(delim)\(mean)\(delim)\(sd)\(delim)\(median)"
  41. }
  42. }
  43. struct Test {
  44. let name: String
  45. let index: Int
  46. let f: (Int) -> ()
  47. var run: Bool
  48. init(name: String, n: Int, f: @escaping (Int) -> ()) {
  49. self.name = name
  50. self.index = n
  51. self.f = f
  52. run = true
  53. }
  54. }
  55. public var precommitTests: [String : (Int) -> ()] = [:]
  56. public var otherTests: [String : (Int) -> ()] = [:]
  57. enum TestAction {
  58. case Run
  59. case ListTests
  60. case Fail(String)
  61. }
  62. struct TestConfig {
  63. /// The delimiter to use when printing output.
  64. var delim: String = ","
  65. /// The filters applied to our test names.
  66. var filters = [String]()
  67. /// The scalar multiple of the amount of times a test should be run. This
  68. /// enables one to cause tests to run for N iterations longer than they
  69. /// normally would. This is useful when one wishes for a test to run for a
  70. /// longer amount of time to perform performance analysis on the test in
  71. /// instruments.
  72. var iterationScale: Int = 1
  73. /// If we are asked to have a fixed number of iterations, the number of fixed
  74. /// iterations.
  75. var fixedNumIters: UInt = 0
  76. /// The number of samples we should take of each test.
  77. var numSamples: Int = 1
  78. /// Is verbose output enabled?
  79. var verbose: Bool = false
  80. /// Should we only run the "pre-commit" tests?
  81. var onlyPrecommit: Bool = true
  82. /// After we run the tests, should the harness sleep to allow for utilities
  83. /// like leaks that require a PID to run on the test harness.
  84. var afterRunSleep: Int? = nil
  85. /// The list of tests to run.
  86. var tests = [Test]()
  87. mutating func processArguments() -> TestAction {
  88. let validOptions = [
  89. "--iter-scale", "--num-samples", "--num-iters",
  90. "--verbose", "--delim", "--run-all", "--list", "--sleep"
  91. ]
  92. let maybeBenchArgs: Arguments? = parseArgs(validOptions)
  93. if maybeBenchArgs == nil {
  94. return .Fail("Failed to parse arguments")
  95. }
  96. let benchArgs = maybeBenchArgs!
  97. if let _ = benchArgs.optionalArgsMap["--list"] {
  98. return .ListTests
  99. }
  100. if let x = benchArgs.optionalArgsMap["--iter-scale"] {
  101. if x.isEmpty { return .Fail("--iter-scale requires a value") }
  102. iterationScale = Int(x)!
  103. }
  104. if let x = benchArgs.optionalArgsMap["--num-iters"] {
  105. if x.isEmpty { return .Fail("--num-iters requires a value") }
  106. fixedNumIters = numericCast(Int(x)!)
  107. }
  108. if let x = benchArgs.optionalArgsMap["--num-samples"] {
  109. if x.isEmpty { return .Fail("--num-samples requires a value") }
  110. numSamples = Int(x)!
  111. }
  112. if let _ = benchArgs.optionalArgsMap["--verbose"] {
  113. verbose = true
  114. print("Verbose")
  115. }
  116. if let x = benchArgs.optionalArgsMap["--delim"] {
  117. if x.isEmpty { return .Fail("--delim requires a value") }
  118. delim = x
  119. }
  120. if let _ = benchArgs.optionalArgsMap["--run-all"] {
  121. onlyPrecommit = false
  122. }
  123. if let x = benchArgs.optionalArgsMap["--sleep"] {
  124. if x.isEmpty {
  125. return .Fail("--sleep requires a non-empty integer value")
  126. }
  127. let v: Int? = Int(x)
  128. if v == nil {
  129. return .Fail("--sleep requires a non-empty integer value")
  130. }
  131. afterRunSleep = v!
  132. }
  133. filters = benchArgs.positionalArgs
  134. return .Run
  135. }
  136. mutating func findTestsToRun() {
  137. var i = 1
  138. for benchName in precommitTests.keys.sorted() {
  139. tests.append(Test(name: benchName, n: i, f: precommitTests[benchName]!))
  140. i += 1
  141. }
  142. for benchName in otherTests.keys.sorted() {
  143. tests.append(Test(name: benchName, n: i, f: otherTests[benchName]!))
  144. i += 1
  145. }
  146. for i in 0..<tests.count {
  147. if onlyPrecommit && precommitTests[tests[i].name] == nil {
  148. tests[i].run = false
  149. }
  150. if !filters.isEmpty &&
  151. !filters.contains(String(tests[i].index)) &&
  152. !filters.contains(tests[i].name) {
  153. tests[i].run = false
  154. }
  155. }
  156. }
  157. }
  158. func internalMeanSD(_ inputs: [UInt64]) -> (UInt64, UInt64) {
  159. // If we are empty, return 0, 0.
  160. if inputs.isEmpty {
  161. return (0, 0)
  162. }
  163. // If we have one element, return elt, 0.
  164. if inputs.count == 1 {
  165. return (inputs[0], 0)
  166. }
  167. // Ok, we have 2 elements.
  168. var sum1: UInt64 = 0
  169. var sum2: UInt64 = 0
  170. for i in inputs {
  171. sum1 += i
  172. }
  173. let mean: UInt64 = sum1 / UInt64(inputs.count)
  174. for i in inputs {
  175. sum2 = sum2 &+ UInt64((Int64(i) &- Int64(mean))&*(Int64(i) &- Int64(mean)))
  176. }
  177. return (mean, UInt64(sqrt(Double(sum2)/(Double(inputs.count) - 1))))
  178. }
  179. func internalMedian(_ inputs: [UInt64]) -> UInt64 {
  180. return inputs.sorted()[inputs.count / 2]
  181. }
  182. #if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
  183. @_silgen_name("swift_leaks_startTrackingObjects")
  184. func startTrackingObjects(_: UnsafeMutableRawPointer) -> ()
  185. @_silgen_name("swift_leaks_stopTrackingObjects")
  186. func stopTrackingObjects(_: UnsafeMutableRawPointer) -> Int
  187. #endif
  188. class SampleRunner {
  189. var info = mach_timebase_info_data_t(numer: 0, denom: 0)
  190. init() {
  191. mach_timebase_info(&info)
  192. }
  193. func run(_ name: String, fn: (Int) -> Void, num_iters: UInt) -> UInt64 {
  194. // Start the timer.
  195. #if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
  196. var str = name
  197. startTrackingObjects(UnsafeMutableRawPointer(str._core.startASCII))
  198. #endif
  199. let start_ticks = mach_absolute_time()
  200. fn(Int(num_iters))
  201. // Stop the timer.
  202. let end_ticks = mach_absolute_time()
  203. #if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
  204. stopTrackingObjects(UnsafeMutableRawPointer(str._core.startASCII))
  205. #endif
  206. // Compute the spent time and the scaling factor.
  207. let elapsed_ticks = end_ticks - start_ticks
  208. return elapsed_ticks * UInt64(info.numer) / UInt64(info.denom)
  209. }
  210. }
  211. /// Invoke the benchmark entry point and return the run time in milliseconds.
  212. func runBench(_ name: String, _ fn: (Int) -> Void, _ c: TestConfig) -> BenchResults {
  213. var samples = [UInt64](repeating: 0, count: c.numSamples)
  214. if c.verbose {
  215. print("Running \(name) for \(c.numSamples) samples.")
  216. }
  217. let sampler = SampleRunner()
  218. for s in 0..<c.numSamples {
  219. let time_per_sample: UInt64 = 1_000_000_000 * UInt64(c.iterationScale)
  220. var scale : UInt
  221. var elapsed_time : UInt64 = 0
  222. if c.fixedNumIters == 0 {
  223. elapsed_time = sampler.run(name, fn: fn, num_iters: 1)
  224. scale = UInt(time_per_sample / elapsed_time)
  225. } else {
  226. // Compute the scaling factor if a fixed c.fixedNumIters is not specified.
  227. scale = c.fixedNumIters
  228. }
  229. // Rerun the test with the computed scale factor.
  230. if scale > 1 {
  231. if c.verbose {
  232. print(" Measuring with scale \(scale).")
  233. }
  234. elapsed_time = sampler.run(name, fn: fn, num_iters: scale)
  235. } else {
  236. scale = 1
  237. }
  238. // save result in microseconds or k-ticks
  239. samples[s] = elapsed_time / UInt64(scale) / 1000
  240. if c.verbose {
  241. print(" Sample \(s),\(samples[s])")
  242. }
  243. }
  244. let (mean, sd) = internalMeanSD(samples)
  245. // Return our benchmark results.
  246. return BenchResults(delim: c.delim, sampleCount: UInt64(samples.count),
  247. min: samples.min()!, max: samples.max()!,
  248. mean: mean, sd: sd, median: internalMedian(samples))
  249. }
  250. func printRunInfo(_ c: TestConfig) {
  251. if c.verbose {
  252. print("--- CONFIG ---")
  253. print("NumSamples: \(c.numSamples)")
  254. print("Verbose: \(c.verbose)")
  255. print("IterScale: \(c.iterationScale)")
  256. if c.fixedNumIters != 0 {
  257. print("FixedIters: \(c.fixedNumIters)")
  258. }
  259. print("Tests Filter: \(c.filters)")
  260. print("Tests to run: ", terminator: "")
  261. for t in c.tests {
  262. if t.run {
  263. print("\(t.name), ", terminator: "")
  264. }
  265. }
  266. print("")
  267. print("")
  268. print("--- DATA ---")
  269. }
  270. }
  271. func runBenchmarks(_ c: TestConfig) {
  272. let units = "us"
  273. print("#\(c.delim)TEST\(c.delim)SAMPLES\(c.delim)MIN(\(units))\(c.delim)MAX(\(units))\(c.delim)MEAN(\(units))\(c.delim)SD(\(units))\(c.delim)MEDIAN(\(units))")
  274. var SumBenchResults = BenchResults()
  275. SumBenchResults.sampleCount = 0
  276. for t in c.tests {
  277. if !t.run {
  278. continue
  279. }
  280. let BenchIndex = t.index
  281. let BenchName = t.name
  282. let BenchFunc = t.f
  283. let results = runBench(BenchName, BenchFunc, c)
  284. print("\(BenchIndex)\(c.delim)\(BenchName)\(c.delim)\(results.description)")
  285. fflush(stdout)
  286. SumBenchResults.min += results.min
  287. SumBenchResults.max += results.max
  288. SumBenchResults.mean += results.mean
  289. SumBenchResults.sampleCount += 1
  290. // Don't accumulate SD and Median, as simple sum isn't valid for them.
  291. // TODO: Compute SD and Median for total results as well.
  292. // SumBenchResults.sd += results.sd
  293. // SumBenchResults.median += results.median
  294. }
  295. print("")
  296. print("Totals\(c.delim)\(SumBenchResults.description)")
  297. }
  298. public func main() {
  299. var config = TestConfig()
  300. switch (config.processArguments()) {
  301. case let .Fail(msg):
  302. // We do this since we need an autoclosure...
  303. fatalError("\(msg)")
  304. case .ListTests:
  305. config.findTestsToRun()
  306. print("Enabled Tests:")
  307. for t in config.tests {
  308. print(" \(t.name)")
  309. }
  310. case .Run:
  311. config.findTestsToRun()
  312. printRunInfo(config)
  313. runBenchmarks(config)
  314. if let x = config.afterRunSleep {
  315. sleep(UInt32(x))
  316. }
  317. }
  318. }