PageRenderTime 25ms CodeModel.GetById 11ms RepoModel.GetById 1ms app.codeStats 0ms

/tools/singletest.py

https://gitlab.com/YaoQ/mbed-for-linknode
Python | 263 lines | 256 code | 2 blank | 5 comment | 0 complexity | 4e766c78715232ff1d40a23c8c155383 MD5 | raw file
  1. #!/usr/bin/env python2
  2. """
  3. mbed SDK
  4. Copyright (c) 2011-2014 ARM Limited
  5. Licensed under the Apache License, Version 2.0 (the "License");
  6. you may not use this file except in compliance with the License.
  7. You may obtain a copy of the License at
  8. http://www.apache.org/licenses/LICENSE-2.0
  9. Unless required by applicable law or agreed to in writing, software
  10. distributed under the License is distributed on an "AS IS" BASIS,
  11. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. See the License for the specific language governing permissions and
  13. limitations under the License.
  14. Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
  15. """
  16. """
  17. File format example: test_spec.json:
  18. {
  19. "targets": {
  20. "KL46Z": ["ARM", "GCC_ARM"],
  21. "LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "IAR"],
  22. "LPC11U24": ["uARM"],
  23. "NRF51822": ["ARM"]
  24. }
  25. }
  26. File format example: muts_all.json:
  27. {
  28. "1" : {"mcu": "LPC1768",
  29. "port":"COM4",
  30. "disk":"J:\\",
  31. "peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
  32. },
  33. "2" : {"mcu": "KL25Z",
  34. "port":"COM7",
  35. "disk":"G:\\",
  36. "peripherals": ["digital_loop", "port_loop", "analog_loop"]
  37. }
  38. }
  39. """
  40. # Be sure that the tools directory is in the search path
  41. import sys
  42. from os.path import join, abspath, dirname
  43. ROOT = abspath(join(dirname(__file__), ".."))
  44. sys.path.insert(0, ROOT)
  45. # Check: Extra modules which are required by core test suite
  46. from tools.utils import check_required_modules
  47. check_required_modules(['prettytable', 'serial'])
  48. # Imports related to mbed build api
  49. from tools.build_api import mcu_toolchain_matrix
  50. # Imports from TEST API
  51. from tools.test_api import SingleTestRunner
  52. from tools.test_api import singletest_in_cli_mode
  53. from tools.test_api import detect_database_verbose
  54. from tools.test_api import get_json_data_from_file
  55. from tools.test_api import get_avail_tests_summary_table
  56. from tools.test_api import get_default_test_options_parser
  57. from tools.test_api import print_muts_configuration_from_json
  58. from tools.test_api import print_test_configuration_from_json
  59. from tools.test_api import get_autodetected_MUTS_list
  60. from tools.test_api import get_autodetected_TEST_SPEC
  61. from tools.test_api import get_module_avail
  62. from tools.test_exporters import ReportExporter, ResultExporterType
  63. # Importing extra modules which can be not installed but if available they can extend test suite functionality
  64. try:
  65. import mbed_lstools
  66. from tools.compliance.ioper_runner import IOperTestRunner
  67. from tools.compliance.ioper_runner import get_available_oper_test_scopes
  68. except:
  69. pass
  70. def get_version():
  71. """ Returns test script version
  72. """
  73. single_test_version_major = 1
  74. single_test_version_minor = 5
  75. return (single_test_version_major, single_test_version_minor)
  76. if __name__ == '__main__':
  77. # Command line options
  78. parser = get_default_test_options_parser()
  79. parser.description = """This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s)."""
  80. parser.epilog = """Example: singletest.py -i test_spec.json -M muts_all.json"""
  81. opts = parser.parse_args()
  82. # Print scrip version
  83. if opts.version:
  84. print parser.description
  85. print parser.epilog
  86. print "Version %d.%d"% get_version()
  87. exit(0)
  88. if opts.db_url and opts.verbose_test_configuration_only:
  89. detect_database_verbose(opts.db_url)
  90. exit(0)
  91. # Print summary / information about automation test status
  92. if opts.test_automation_report:
  93. print get_avail_tests_summary_table(platform_filter=opts.general_filter_regex)
  94. exit(0)
  95. # Print summary / information about automation test status
  96. if opts.test_case_report:
  97. test_case_report_cols = ['id',
  98. 'automated',
  99. 'description',
  100. 'peripherals',
  101. 'host_test',
  102. 'duration',
  103. 'source_dir']
  104. print get_avail_tests_summary_table(cols=test_case_report_cols,
  105. result_summary=False,
  106. join_delim='\n',
  107. platform_filter=opts.general_filter_regex)
  108. exit(0)
  109. # Only prints matrix of supported toolchains
  110. if opts.supported_toolchains:
  111. print mcu_toolchain_matrix(platform_filter=opts.general_filter_regex)
  112. exit(0)
  113. test_spec = None
  114. MUTs = None
  115. if hasattr(opts, 'auto_detect') and opts.auto_detect:
  116. # If auto_detect attribute is present, we assume other auto-detection
  117. # parameters like 'toolchains_filter' are also set.
  118. print "MBEDLS: Detecting connected mbed-enabled devices... "
  119. MUTs = get_autodetected_MUTS_list()
  120. for mut in MUTs.values():
  121. print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu'],
  122. mut['port'],
  123. mut['disk'])
  124. # Set up parameters for test specification filter function (we need to set toolchains per target here)
  125. use_default_toolchain = 'default' in opts.toolchains_filter if opts.toolchains_filter is not None else True
  126. use_supported_toolchains = 'all' in opts.toolchains_filter if opts.toolchains_filter is not None else False
  127. toolchain_filter = opts.toolchains_filter
  128. platform_name_filter = opts.general_filter_regex if opts.general_filter_regex is not None else opts.general_filter_regex
  129. # Test specification with information about each target and associated toolchain
  130. test_spec = get_autodetected_TEST_SPEC(MUTs.values(),
  131. use_default_toolchain=use_default_toolchain,
  132. use_supported_toolchains=use_supported_toolchains,
  133. toolchain_filter=toolchain_filter,
  134. platform_name_filter=platform_name_filter)
  135. else:
  136. # Open file with test specification
  137. # test_spec_filename tells script which targets and their toolchain(s)
  138. # should be covered by the test scenario
  139. opts.auto_detect = False
  140. test_spec = get_json_data_from_file(opts.test_spec_filename) if opts.test_spec_filename else None
  141. if test_spec is None:
  142. if not opts.test_spec_filename:
  143. parser.print_help()
  144. exit(-1)
  145. # Get extra MUTs if applicable
  146. MUTs = get_json_data_from_file(opts.muts_spec_filename) if opts.muts_spec_filename else None
  147. if MUTs is None:
  148. if not opts.muts_spec_filename:
  149. parser.print_help()
  150. exit(-1)
  151. if opts.verbose_test_configuration_only:
  152. print "MUTs configuration in %s:" % ('auto-detected' if opts.auto_detect else opts.muts_spec_filename)
  153. if MUTs:
  154. print print_muts_configuration_from_json(MUTs, platform_filter=opts.general_filter_regex)
  155. print
  156. print "Test specification in %s:" % ('auto-detected' if opts.auto_detect else opts.test_spec_filename)
  157. if test_spec:
  158. print print_test_configuration_from_json(test_spec)
  159. exit(0)
  160. if get_module_avail('mbed_lstools'):
  161. if opts.operability_checks:
  162. # Check if test scope is valid and run tests
  163. test_scope = get_available_oper_test_scopes()
  164. if opts.operability_checks in test_scope:
  165. tests = IOperTestRunner(scope=opts.operability_checks)
  166. test_results = tests.run()
  167. # Export results in form of JUnit XML report to separate file
  168. if opts.report_junit_file_name:
  169. report_exporter = ReportExporter(ResultExporterType.JUNIT_OPER)
  170. report_exporter.report_to_file(test_results, opts.report_junit_file_name)
  171. else:
  172. print "Unknown interoperability test scope name: '%s'" % (opts.operability_checks)
  173. print "Available test scopes: %s" % (','.join(["'%s'" % n for n in test_scope]))
  174. exit(0)
  175. # Verbose test specification and MUTs configuration
  176. if MUTs and opts.verbose:
  177. print print_muts_configuration_from_json(MUTs)
  178. if test_spec and opts.verbose:
  179. print print_test_configuration_from_json(test_spec)
  180. if opts.only_build_tests:
  181. # We are skipping testing phase, and suppress summary
  182. opts.suppress_summary = True
  183. single_test = SingleTestRunner(_global_loops_count=opts.test_global_loops_value,
  184. _test_loops_list=opts.test_loops_list,
  185. _muts=MUTs,
  186. _clean=opts.clean,
  187. _opts_db_url=opts.db_url,
  188. _opts_log_file_name=opts.log_file_name,
  189. _opts_report_html_file_name=opts.report_html_file_name,
  190. _opts_report_junit_file_name=opts.report_junit_file_name,
  191. _opts_report_build_file_name=opts.report_build_file_name,
  192. _opts_report_text_file_name=opts.report_text_file_name,
  193. _test_spec=test_spec,
  194. _opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk,
  195. _opts_goanna_for_tests=opts.goanna_for_tests,
  196. _opts_shuffle_test_order=opts.shuffle_test_order,
  197. _opts_shuffle_test_seed=opts.shuffle_test_seed,
  198. _opts_test_by_names=opts.test_by_names,
  199. _opts_peripheral_by_names=opts.peripheral_by_names,
  200. _opts_test_only_peripheral=opts.test_only_peripheral,
  201. _opts_test_only_common=opts.test_only_common,
  202. _opts_verbose_skipped_tests=opts.verbose_skipped_tests,
  203. _opts_verbose_test_result_only=opts.verbose_test_result_only,
  204. _opts_verbose=opts.verbose,
  205. _opts_firmware_global_name=opts.firmware_global_name,
  206. _opts_only_build_tests=opts.only_build_tests,
  207. _opts_parallel_test_exec=opts.parallel_test_exec,
  208. _opts_suppress_summary=opts.suppress_summary,
  209. _opts_test_x_toolchain_summary=opts.test_x_toolchain_summary,
  210. _opts_copy_method=opts.copy_method,
  211. _opts_mut_reset_type=opts.mut_reset_type,
  212. _opts_jobs=opts.jobs,
  213. _opts_waterfall_test=opts.waterfall_test,
  214. _opts_consolidate_waterfall_test=opts.consolidate_waterfall_test,
  215. _opts_extend_test_timeout=opts.extend_test_timeout,
  216. _opts_auto_detect=opts.auto_detect)
  217. # Runs test suite in CLI mode
  218. if (singletest_in_cli_mode(single_test)):
  219. exit(0)
  220. else:
  221. exit(-1)