PageRenderTime 57ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/tensorflow/python/platform/benchmark.py

https://gitlab.com/hrishikeshvganu/tensorflow
Python | 301 lines | 242 code | 17 blank | 42 comment | 14 complexity | 4d9e0081c76fef1cce308d8227c11371 MD5 | raw file
  1. # Copyright 2016 Google Inc. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Utilities to run benchmarks."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import inspect
  20. import numbers
  21. import os
  22. import re
  23. import sys
  24. import time
  25. import six
  26. from tensorflow.core.protobuf import config_pb2
  27. from tensorflow.core.util import test_log_pb2
  28. # timeline is outside of the platform target, but is brought in by the target
  29. # ":platform_test", which also brings in ":platform" (and thus this library).
  30. from tensorflow.python.client import timeline
  31. from tensorflow.python.platform import app
  32. from tensorflow.python.platform import gfile
  33. # When a subclass of the Benchmark class is created, it is added to
  34. # the registry automatically
  35. GLOBAL_BENCHMARK_REGISTRY = set()
  36. # Environment variable that determines whether benchmarks are written.
  37. # See also tensorflow/core/util/reporter.h TestReporter::kTestReporterEnv.
  38. TEST_REPORTER_TEST_ENV = "TEST_REPORT_FILE_PREFIX"
  39. def _global_report_benchmark(
  40. name, iters=None, cpu_time=None, wall_time=None,
  41. throughput=None, extras=None):
  42. """Method for recording a benchmark directly.
  43. Args:
  44. name: The BenchmarkEntry name.
  45. iters: (optional) How many iterations were run
  46. cpu_time: (optional) Total cpu time in seconds
  47. wall_time: (optional) Total wall time in seconds
  48. throughput: (optional) Throughput (in MB/s)
  49. extras: (optional) Dict mapping string keys to additional benchmark info.
  50. Raises:
  51. TypeError: if extras is not a dict.
  52. IOError: if the benchmark output file already exists.
  53. """
  54. if extras is not None:
  55. if not isinstance(extras, dict):
  56. raise TypeError("extras must be a dict")
  57. test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
  58. if test_env is None:
  59. # Reporting was not requested
  60. return
  61. entries = test_log_pb2.BenchmarkEntries()
  62. entry = entries.entry.add()
  63. entry.name = name
  64. if iters is not None:
  65. entry.iters = iters
  66. if cpu_time is not None:
  67. entry.cpu_time = cpu_time
  68. if wall_time is not None:
  69. entry.wall_time = wall_time
  70. if throughput is not None:
  71. entry.throughput = throughput
  72. if extras is not None:
  73. for (k, v) in extras.items():
  74. if isinstance(v, numbers.Number):
  75. entry.extras[k].double_value = v
  76. else:
  77. entry.extras[k].string_value = str(v)
  78. serialized_entry = entries.SerializeToString()
  79. mangled_name = name.replace("/", "__")
  80. output_path = "%s%s" % (test_env, mangled_name)
  81. if gfile.Exists(output_path):
  82. raise IOError("File already exists: %s" % output_path)
  83. with gfile.GFile(output_path, "wb") as out:
  84. out.write(serialized_entry)
  85. class _BenchmarkRegistrar(type):
  86. """The Benchmark class registrar. Used by abstract Benchmark class."""
  87. def __new__(mcs, clsname, base, attrs):
  88. newclass = super(mcs, _BenchmarkRegistrar).__new__(
  89. mcs, clsname, base, attrs)
  90. if not newclass.is_abstract():
  91. GLOBAL_BENCHMARK_REGISTRY.add(newclass)
  92. return newclass
  93. class Benchmark(six.with_metaclass(_BenchmarkRegistrar, object)):
  94. """Abstract class that provides helper functions for running benchmarks.
  95. Any class subclassing this one is immediately registered in the global
  96. benchmark registry.
  97. Only methods whose names start with the word "benchmark" will be run during
  98. benchmarking.
  99. """
  100. @classmethod
  101. def is_abstract(cls):
  102. # mro: (_BenchmarkRegistrar, Benchmark) means this is Benchmark
  103. return len(cls.mro()) <= 2
  104. def _get_name(self, overwrite_name):
  105. """Returns full name of class and method calling report_benchmark."""
  106. # Find the caller method (outermost Benchmark class)
  107. stack = inspect.stack()
  108. calling_class = None
  109. name = None
  110. for frame in stack[::-1]:
  111. f_locals = frame[0].f_locals
  112. f_self = f_locals.get("self", None)
  113. if isinstance(f_self, Benchmark):
  114. calling_class = f_self # Get the outermost stack Benchmark call
  115. name = frame[3] # Get the method name
  116. break
  117. if calling_class is None:
  118. raise ValueError("Unable to determine calling Benchmark class.")
  119. # Use the method name, or overwrite_name is provided.
  120. name = overwrite_name or name
  121. # Prefix the name with the class name.
  122. class_name = type(calling_class).__name__
  123. name = "%s.%s" % (class_name, name)
  124. return name
  125. def report_benchmark(
  126. self,
  127. iters=None,
  128. cpu_time=None,
  129. wall_time=None,
  130. throughput=None,
  131. extras=None,
  132. name=None):
  133. """Report a benchmark.
  134. Args:
  135. iters: (optional) How many iterations were run
  136. cpu_time: (optional) Total cpu time in seconds
  137. wall_time: (optional) Total wall time in seconds
  138. throughput: (optional) Throughput (in MB/s)
  139. extras: (optional) Dict mapping string keys to additional benchmark info.
  140. name: (optional) Override the BenchmarkEntry name with `name`.
  141. Otherwise it is inferred from the top-level method name.
  142. """
  143. name = self._get_name(overwrite_name=name)
  144. _global_report_benchmark(
  145. name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time,
  146. throughput=throughput, extras=extras)
  147. class TensorFlowBenchmark(Benchmark):
  148. """Abstract class that provides helpers for TensorFlow benchmarks."""
  149. @classmethod
  150. def is_abstract(cls):
  151. # mro: (_BenchmarkRegistrar, Benchmark, TensorFlowBenchmark) means
  152. # this is TensorFlowBenchmark.
  153. return len(cls.mro()) <= 3
  154. def run_op_benchmark(self,
  155. sess,
  156. op_or_tensor,
  157. feed_dict=None,
  158. burn_iters=2,
  159. min_iters=10,
  160. store_trace=False,
  161. name=None):
  162. """Run an op or tensor in the given session. Report the results.
  163. Args:
  164. sess: `Session` object to use for timing.
  165. op_or_tensor: `Operation` or `Tensor` to benchmark.
  166. feed_dict: A `dict` of values to feed for each op iteration (see the
  167. `feed_dict` parameter of `Session.run`).
  168. burn_iters: Number of burn-in iterations to run.
  169. min_iters: Minimum number of iterations to use for timing.
  170. store_trace: Boolean, whether to run an extra untimed iteration and
  171. store the trace of iteration in the benchmark report.
  172. The trace will be stored as a string in Google Chrome trace format
  173. in the extras field "full_trace_chrome_format".
  174. name: (optional) Override the BenchmarkEntry name with `name`.
  175. Otherwise it is inferred from the top-level method name.
  176. """
  177. for _ in range(burn_iters):
  178. sess.run(op_or_tensor, feed_dict=feed_dict)
  179. deltas = [None] * min_iters
  180. for i in range(min_iters):
  181. start_time = time.time()
  182. sess.run(op_or_tensor, feed_dict=feed_dict)
  183. end_time = time.time()
  184. delta = end_time - start_time
  185. deltas[i] = delta
  186. extras = {}
  187. if store_trace:
  188. run_options = config_pb2.RunOptions(
  189. trace_level=config_pb2.RunOptions.FULL_TRACE)
  190. run_metadata = config_pb2.RunMetadata()
  191. sess.run(op_or_tensor, feed_dict=feed_dict,
  192. options=run_options, run_metadata=run_metadata)
  193. tl = timeline.Timeline(run_metadata.step_stats)
  194. extras["full_trace_chrome_format"] = tl.generate_chrome_trace_format()
  195. def _median(x):
  196. if not x:
  197. return -1
  198. s = sorted(x)
  199. l = len(x)
  200. lm1 = l - 1
  201. return (s[l//2] + s[lm1//2]) / 2.0
  202. median_delta = _median(deltas)
  203. self.report_benchmark(
  204. iters=min_iters,
  205. wall_time=median_delta,
  206. extras=extras,
  207. name=name)
  208. def _run_benchmarks(regex):
  209. """Run benchmarks that match regex `regex`.
  210. This function goes through the global benchmark registry, and matches
  211. benchmark class and method names of the form
  212. `module.name.BenchmarkClass.benchmarkMethod` to the given regex.
  213. If a method matches, it is run.
  214. Args:
  215. regex: The string regular expression to match Benchmark classes against.
  216. """
  217. registry = list(GLOBAL_BENCHMARK_REGISTRY)
  218. # Match benchmarks in registry against regex
  219. for benchmark in registry:
  220. benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
  221. attrs = dir(benchmark)
  222. # Don't instantiate the benchmark class unless necessary
  223. benchmark_instance = None
  224. for attr in attrs:
  225. if not attr.startswith("benchmark"):
  226. continue
  227. candidate_benchmark_fn = getattr(benchmark, attr)
  228. if not callable(candidate_benchmark_fn):
  229. continue
  230. full_benchmark_name = "%s.%s" % (benchmark_name, attr)
  231. if regex == "all" or re.search(regex, full_benchmark_name):
  232. # Instantiate the class if it hasn't been instantiated
  233. benchmark_instance = benchmark_instance or benchmark()
  234. # Get the method tied to the class
  235. instance_benchmark_fn = getattr(benchmark_instance, attr)
  236. # Call the instance method
  237. instance_benchmark_fn()
  238. def benchmarks_main(true_main):
  239. """Run benchmarks as declared in args.
  240. Args:
  241. true_main: True main function to run if benchmarks are not requested.
  242. """
  243. argv = sys.argv
  244. found_arg = [arg for arg in argv
  245. if arg.startswith("--benchmarks=")
  246. or arg.startswith("-benchmarks=")]
  247. if found_arg:
  248. # Remove --benchmarks arg from sys.argv
  249. argv.remove(found_arg[0])
  250. regex = found_arg[0].split("=")[1]
  251. app.run(lambda _: _run_benchmarks(regex))
  252. else:
  253. true_main()