/3rd_party/llvm/utils/lit/lit/Test.py
Python | 190 lines | 153 code | 18 blank | 19 comment | 1 complexity | 1e15bfbc343744bf7cc2ffa45d3377a2 MD5 | raw file
Possible License(s): LGPL-2.1, BSD-3-Clause, JSON, MPL-2.0-no-copyleft-exception, GPL-2.0, GPL-3.0, LGPL-3.0, BSD-2-Clause
1import os 2 3# Test result codes. 4 5class ResultCode(object): 6 """Test result codes.""" 7 8 # We override __new__ and __getnewargs__ to ensure that pickling still 9 # provides unique ResultCode objects in any particular instance. 10 _instances = {} 11 def __new__(cls, name, isFailure): 12 res = cls._instances.get(name) 13 if res is None: 14 cls._instances[name] = res = super(ResultCode, cls).__new__(cls) 15 return res 16 def __getnewargs__(self): 17 return (self.name, self.isFailure) 18 19 def __init__(self, name, isFailure): 20 self.name = name 21 self.isFailure = isFailure 22 23 def __repr__(self): 24 return '%s%r' % (self.__class__.__name__, 25 (self.name, self.isFailure)) 26 27PASS = ResultCode('PASS', False) 28XFAIL = ResultCode('XFAIL', False) 29FAIL = ResultCode('FAIL', True) 30XPASS = ResultCode('XPASS', True) 31UNRESOLVED = ResultCode('UNRESOLVED', True) 32UNSUPPORTED = ResultCode('UNSUPPORTED', False) 33 34# Test metric values. 35 36class MetricValue(object): 37 def format(self): 38 """ 39 format() -> str 40 41 Convert this metric to a string suitable for displaying as part of the 42 console output. 43 """ 44 raise RuntimeError("abstract method") 45 46 def todata(self): 47 """ 48 todata() -> json-serializable data 49 50 Convert this metric to content suitable for serializing in the JSON test 51 output. 52 """ 53 raise RuntimeError("abstract method") 54 55class IntMetricValue(MetricValue): 56 def __init__(self, value): 57 self.value = value 58 59 def format(self): 60 return str(self.value) 61 62 def todata(self): 63 return self.value 64 65class RealMetricValue(MetricValue): 66 def __init__(self, value): 67 self.value = value 68 69 def format(self): 70 return '%.4f' % self.value 71 72 def todata(self): 73 return self.value 74 75# Test results. 76 77class Result(object): 78 """Wrapper for the results of executing an individual test.""" 79 80 def __init__(self, code, output='', elapsed=None): 81 # The result code. 82 self.code = code 83 # The test output. 84 self.output = output 85 # The wall timing to execute the test, if timing. 86 self.elapsed = elapsed 87 # The metrics reported by this test. 88 self.metrics = {} 89 90 def addMetric(self, name, value): 91 """ 92 addMetric(name, value) 93 94 Attach a test metric to the test result, with the given name and list of 95 values. It is an error to attempt to attach the metrics with the same 96 name multiple times. 97 98 Each value must be an instance of a MetricValue subclass. 99 """ 100 if name in self.metrics: 101 raise ValueError("result already includes metrics for %r" % ( 102 name,)) 103 if not isinstance(value, MetricValue): 104 raise TypeError("unexpected metric value: %r" % (value,)) 105 self.metrics[name] = value 106 107# Test classes. 108 109class TestSuite: 110 """TestSuite - Information on a group of tests. 111 112 A test suite groups together a set of logically related tests. 113 """ 114 115 def __init__(self, name, source_root, exec_root, config): 116 self.name = name 117 self.source_root = source_root 118 self.exec_root = exec_root 119 # The test suite configuration. 120 self.config = config 121 122 def getSourcePath(self, components): 123 return os.path.join(self.source_root, *components) 124 125 def getExecPath(self, components): 126 return os.path.join(self.exec_root, *components) 127 128class Test: 129 """Test - Information on a single test instance.""" 130 131 def __init__(self, suite, path_in_suite, config): 132 self.suite = suite 133 self.path_in_suite = path_in_suite 134 self.config = config 135 # A list of conditions under which this test is expected to fail. These 136 # can optionally be provided by test format handlers, and will be 137 # honored when the test result is supplied. 138 self.xfails = [] 139 # The test result, once complete. 140 self.result = None 141 142 def setResult(self, result): 143 if self.result is not None: 144 raise ArgumentError("test result already set") 145 if not isinstance(result, Result): 146 raise ArgumentError("unexpected result type") 147 148 self.result = result 149 150 # Apply the XFAIL handling to resolve the result exit code. 151 if self.isExpectedToFail(): 152 if self.result.code == PASS: 153 self.result.code = XPASS 154 elif self.result.code == FAIL: 155 self.result.code = XFAIL 156 157 def getFullName(self): 158 return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite) 159 160 def getSourcePath(self): 161 return self.suite.getSourcePath(self.path_in_suite) 162 163 def getExecPath(self): 164 return self.suite.getExecPath(self.path_in_suite) 165 166 def isExpectedToFail(self): 167 """ 168 isExpectedToFail() -> bool 169 170 Check whether this test is expected to fail in the current 171 configuration. This check relies on the test xfails property which by 172 some test formats may not be computed until the test has first been 173 executed. 174 """ 175 176 # Check if any of the xfails match an available feature or the target. 177 for item in self.xfails: 178 # If this is the wildcard, it always fails. 179 if item == '*': 180 return True 181 182 # If this is an exact match for one of the features, it fails. 183 if item in self.config.available_features: 184 return True 185 186 # If this is a part of the target triple, it fails. 187 if item in self.suite.config.target_triple: 188 return True 189 190 return False