PageRenderTime 79ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 0ms

/js/lib/Socket.IO-node/support/expresso/deps/jscoverage/js/jstracer.h

http://github.com/onedayitwillmake/RealtimeMultiplayerNodeJs
C++ Header | 552 lines | 416 code | 74 blank | 62 comment | 16 complexity | 497b4e28ac1290750e0c64d5d8aaa80d MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-3-Clause
  1. /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  2. * vim: set ts=8 sw=4 et tw=99 ft=cpp:
  3. *
  4. * ***** BEGIN LICENSE BLOCK *****
  5. * Version: MPL 1.1/GPL 2.0/LGPL 2.1
  6. *
  7. * The contents of this file are subject to the Mozilla Public License Version
  8. * 1.1 (the "License"); you may not use this file except in compliance with
  9. * the License. You may obtain a copy of the License at
  10. * http://www.mozilla.org/MPL/
  11. *
  12. * Software distributed under the License is distributed on an "AS IS" basis,
  13. * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
  14. * for the specific language governing rights and limitations under the
  15. * License.
  16. *
  17. * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
  18. * May 28, 2008.
  19. *
  20. * The Initial Developer of the Original Code is
  21. * Brendan Eich <brendan@mozilla.org>
  22. *
  23. * Contributor(s):
  24. * Andreas Gal <gal@mozilla.com>
  25. * Mike Shaver <shaver@mozilla.org>
  26. * David Anderson <danderson@mozilla.com>
  27. *
  28. * Alternatively, the contents of this file may be used under the terms of
  29. * either of the GNU General Public License Version 2 or later (the "GPL"),
  30. * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  31. * in which case the provisions of the GPL or the LGPL are applicable instead
  32. * of those above. If you wish to allow use of your version of this file only
  33. * under the terms of either the GPL or the LGPL, and not to allow others to
  34. * use your version of this file under the terms of the MPL, indicate your
  35. * decision by deleting the provisions above and replace them with the notice
  36. * and other provisions required by the GPL or the LGPL. If you do not delete
  37. * the provisions above, a recipient may use your version of this file under
  38. * the terms of any one of the MPL, the GPL or the LGPL.
  39. *
  40. * ***** END LICENSE BLOCK ***** */
  41. #ifndef jstracer_h___
  42. #define jstracer_h___
  43. #ifdef JS_TRACER
  44. #include "jscntxt.h"
  45. #include "jsstddef.h"
  46. #include "jstypes.h"
  47. #include "jslock.h"
  48. #include "jsnum.h"
  49. #include "jsinterp.h"
  50. #include "jsbuiltins.h"
  51. #if defined(DEBUG) && !defined(JS_JIT_SPEW)
  52. #define JS_JIT_SPEW
  53. #endif
  54. template <typename T>
  55. class Queue : public avmplus::GCObject {
  56. T* _data;
  57. unsigned _len;
  58. unsigned _max;
  59. void ensure(unsigned size) {
  60. while (_max < size)
  61. _max <<= 1;
  62. _data = (T*)realloc(_data, _max * sizeof(T));
  63. }
  64. public:
  65. Queue(unsigned max = 16) {
  66. this->_max = max;
  67. this->_len = 0;
  68. this->_data = (T*)malloc(max * sizeof(T));
  69. }
  70. ~Queue() {
  71. free(_data);
  72. }
  73. bool contains(T a) {
  74. for (unsigned n = 0; n < _len; ++n)
  75. if (_data[n] == a)
  76. return true;
  77. return false;
  78. }
  79. void add(T a) {
  80. ensure(_len + 1);
  81. JS_ASSERT(_len <= _max);
  82. _data[_len++] = a;
  83. }
  84. void add(T* chunk, unsigned size) {
  85. ensure(_len + size);
  86. JS_ASSERT(_len <= _max);
  87. memcpy(&_data[_len], chunk, size * sizeof(T));
  88. _len += size;
  89. }
  90. void addUnique(T a) {
  91. if (!contains(a))
  92. add(a);
  93. }
  94. void setLength(unsigned len) {
  95. ensure(len + 1);
  96. _len = len;
  97. }
  98. void clear() {
  99. _len = 0;
  100. }
  101. unsigned length() const {
  102. return _len;
  103. }
  104. T* data() const {
  105. return _data;
  106. }
  107. };
  108. /*
  109. * Tracker is used to keep track of values being manipulated by the interpreter
  110. * during trace recording.
  111. */
  112. class Tracker {
  113. struct Page {
  114. struct Page* next;
  115. jsuword base;
  116. nanojit::LIns* map[1];
  117. };
  118. struct Page* pagelist;
  119. jsuword getPageBase(const void* v) const;
  120. struct Page* findPage(const void* v) const;
  121. struct Page* addPage(const void* v);
  122. public:
  123. Tracker();
  124. ~Tracker();
  125. bool has(const void* v) const;
  126. nanojit::LIns* get(const void* v) const;
  127. void set(const void* v, nanojit::LIns* ins);
  128. void clear();
  129. };
  130. /*
  131. * The oracle keeps track of slots that should not be demoted to int because we know them
  132. * to overflow or they result in type-unstable traces. We are using a simple hash table.
  133. * Collisions lead to loss of optimization (demotable slots are not demoted) but have no
  134. * correctness implications.
  135. */
  136. #define ORACLE_SIZE 4096
  137. class Oracle {
  138. avmplus::BitSet _dontDemote;
  139. public:
  140. void markGlobalSlotUndemotable(JSScript* script, unsigned slot);
  141. bool isGlobalSlotUndemotable(JSScript* script, unsigned slot) const;
  142. void markStackSlotUndemotable(JSScript* script, jsbytecode* ip, unsigned slot);
  143. bool isStackSlotUndemotable(JSScript* script, jsbytecode* ip, unsigned slot) const;
  144. void clear();
  145. };
  146. typedef Queue<uint16> SlotList;
  147. class TypeMap : public Queue<uint8> {
  148. public:
  149. void captureGlobalTypes(JSContext* cx, SlotList& slots);
  150. void captureStackTypes(JSContext* cx, unsigned callDepth);
  151. bool matches(TypeMap& other) const;
  152. };
  153. enum ExitType {
  154. BRANCH_EXIT,
  155. LOOP_EXIT,
  156. NESTED_EXIT,
  157. MISMATCH_EXIT,
  158. OOM_EXIT,
  159. OVERFLOW_EXIT,
  160. UNSTABLE_LOOP_EXIT,
  161. TIMEOUT_EXIT
  162. };
  163. struct VMSideExit : public nanojit::SideExit
  164. {
  165. intptr_t ip_adj;
  166. intptr_t sp_adj;
  167. intptr_t rp_adj;
  168. int32_t calldepth;
  169. uint32 numGlobalSlots;
  170. uint32 numStackSlots;
  171. uint32 numStackSlotsBelowCurrentFrame;
  172. ExitType exitType;
  173. };
  174. static inline uint8* getTypeMap(nanojit::SideExit* exit)
  175. {
  176. return (uint8*)(((VMSideExit*)exit) + 1);
  177. }
  178. struct InterpState
  179. {
  180. void* sp; /* native stack pointer, stack[0] is spbase[0] */
  181. void* rp; /* call stack pointer */
  182. void* gp; /* global frame pointer */
  183. JSContext *cx; /* current VM context handle */
  184. void* eos; /* first unusable word after the native stack */
  185. void* eor; /* first unusable word after the call stack */
  186. VMSideExit* lastTreeExitGuard; /* guard we exited on during a tree call */
  187. VMSideExit* lastTreeCallGuard; /* guard we want to grow from if the tree
  188. call exit guard mismatched */
  189. void* rpAtLastTreeCall; /* value of rp at innermost tree call guard */
  190. };
  191. struct UnstableExit
  192. {
  193. nanojit::Fragment* fragment;
  194. VMSideExit* exit;
  195. UnstableExit* next;
  196. };
  197. class TreeInfo MMGC_SUBCLASS_DECL {
  198. nanojit::Fragment* fragment;
  199. public:
  200. JSScript* script;
  201. unsigned maxNativeStackSlots;
  202. ptrdiff_t nativeStackBase;
  203. unsigned maxCallDepth;
  204. TypeMap stackTypeMap;
  205. Queue<nanojit::Fragment*> dependentTrees;
  206. unsigned branchCount;
  207. Queue<VMSideExit*> sideExits;
  208. UnstableExit* unstableExits;
  209. TreeInfo(nanojit::Fragment* _fragment) : unstableExits(NULL) {
  210. fragment = _fragment;
  211. }
  212. ~TreeInfo();
  213. };
  214. struct FrameInfo {
  215. JSObject* callee; // callee function object
  216. intptr_t ip_adj; // callee script-based pc index and imacro pc
  217. uint8* typemap; // typemap for the stack frame
  218. union {
  219. struct {
  220. uint16 spdist; // distance from fp->slots to fp->regs->sp at JSOP_CALL
  221. uint16 argc; // actual argument count, may be < fun->nargs
  222. } s;
  223. uint32 word; // for spdist/argc LIR store in record_JSOP_CALL
  224. };
  225. };
  226. class TraceRecorder : public avmplus::GCObject {
  227. JSContext* cx;
  228. JSTraceMonitor* traceMonitor;
  229. JSObject* globalObj;
  230. Tracker tracker;
  231. Tracker nativeFrameTracker;
  232. char* entryTypeMap;
  233. unsigned callDepth;
  234. JSAtom** atoms;
  235. VMSideExit* anchor;
  236. nanojit::Fragment* fragment;
  237. TreeInfo* treeInfo;
  238. nanojit::LirBuffer* lirbuf;
  239. nanojit::LirWriter* lir;
  240. nanojit::LirBufWriter* lir_buf_writer;
  241. nanojit::LirWriter* verbose_filter;
  242. nanojit::LirWriter* cse_filter;
  243. nanojit::LirWriter* expr_filter;
  244. nanojit::LirWriter* func_filter;
  245. #ifdef NJ_SOFTFLOAT
  246. nanojit::LirWriter* float_filter;
  247. #endif
  248. nanojit::LIns* cx_ins;
  249. nanojit::LIns* gp_ins;
  250. nanojit::LIns* eos_ins;
  251. nanojit::LIns* eor_ins;
  252. nanojit::LIns* rval_ins;
  253. nanojit::LIns* inner_sp_ins;
  254. bool deepAborted;
  255. bool applyingArguments;
  256. bool trashTree;
  257. nanojit::Fragment* whichTreeToTrash;
  258. Queue<jsbytecode*> cfgMerges;
  259. jsval* global_dslots;
  260. JSTraceableNative* pendingTraceableNative;
  261. bool terminate;
  262. intptr_t terminate_ip_adj;
  263. nanojit::Fragment* outerToBlacklist;
  264. nanojit::Fragment* promotedPeer;
  265. TraceRecorder* nextRecorderToAbort;
  266. bool wasRootFragment;
  267. bool isGlobal(jsval* p) const;
  268. ptrdiff_t nativeGlobalOffset(jsval* p) const;
  269. ptrdiff_t nativeStackOffset(jsval* p) const;
  270. void import(nanojit::LIns* base, ptrdiff_t offset, jsval* p, uint8& t,
  271. const char *prefix, uintN index, JSStackFrame *fp);
  272. void import(TreeInfo* treeInfo, nanojit::LIns* sp, unsigned ngslots, unsigned callDepth,
  273. uint8* globalTypeMap, uint8* stackTypeMap);
  274. void trackNativeStackUse(unsigned slots);
  275. bool lazilyImportGlobalSlot(unsigned slot);
  276. nanojit::LIns* guard(bool expected, nanojit::LIns* cond, ExitType exitType);
  277. nanojit::LIns* guard(bool expected, nanojit::LIns* cond, nanojit::LIns* exit);
  278. nanojit::LIns* addName(nanojit::LIns* ins, const char* name);
  279. nanojit::LIns* get(jsval* p) const;
  280. nanojit::LIns* writeBack(nanojit::LIns* i, nanojit::LIns* base, ptrdiff_t offset);
  281. void set(jsval* p, nanojit::LIns* l, bool initializing = false);
  282. bool checkType(jsval& v, uint8 t, jsval*& stage_val, nanojit::LIns*& stage_ins,
  283. unsigned& stage_count);
  284. bool deduceTypeStability(nanojit::Fragment* root_peer, nanojit::Fragment** stable_peer,
  285. unsigned* demotes);
  286. jsval& argval(unsigned n) const;
  287. jsval& varval(unsigned n) const;
  288. jsval& stackval(int n) const;
  289. nanojit::LIns* scopeChain() const;
  290. bool activeCallOrGlobalSlot(JSObject* obj, jsval*& vp);
  291. nanojit::LIns* arg(unsigned n);
  292. void arg(unsigned n, nanojit::LIns* i);
  293. nanojit::LIns* var(unsigned n);
  294. void var(unsigned n, nanojit::LIns* i);
  295. nanojit::LIns* stack(int n);
  296. void stack(int n, nanojit::LIns* i);
  297. nanojit::LIns* alu(nanojit::LOpcode op, jsdouble v0, jsdouble v1,
  298. nanojit::LIns* s0, nanojit::LIns* s1);
  299. nanojit::LIns* f2i(nanojit::LIns* f);
  300. nanojit::LIns* makeNumberInt32(nanojit::LIns* f);
  301. nanojit::LIns* stringify(jsval& v);
  302. bool call_imacro(jsbytecode* imacro);
  303. bool ifop();
  304. bool switchop();
  305. bool inc(jsval& v, jsint incr, bool pre = true);
  306. bool inc(jsval& v, nanojit::LIns*& v_ins, jsint incr, bool pre = true);
  307. bool incProp(jsint incr, bool pre = true);
  308. bool incElem(jsint incr, bool pre = true);
  309. bool incName(jsint incr, bool pre = true);
  310. enum { CMP_NEGATE = 1, CMP_TRY_BRANCH_AFTER_COND = 2, CMP_CASE = 4, CMP_STRICT = 8 };
  311. bool cmp(nanojit::LOpcode op, int flags = 0);
  312. bool unary(nanojit::LOpcode op);
  313. bool binary(nanojit::LOpcode op);
  314. bool ibinary(nanojit::LOpcode op);
  315. bool iunary(nanojit::LOpcode op);
  316. bool bbinary(nanojit::LOpcode op);
  317. void demote(jsval& v, jsdouble result);
  318. bool map_is_native(JSObjectMap* map, nanojit::LIns* map_ins, nanojit::LIns*& ops_ins,
  319. size_t op_offset = 0);
  320. bool test_property_cache(JSObject* obj, nanojit::LIns* obj_ins, JSObject*& obj2,
  321. jsuword& pcval);
  322. bool test_property_cache_direct_slot(JSObject* obj, nanojit::LIns* obj_ins, uint32& slot);
  323. void stobj_set_slot(nanojit::LIns* obj_ins, unsigned slot,
  324. nanojit::LIns*& dslots_ins, nanojit::LIns* v_ins);
  325. nanojit::LIns* stobj_get_fslot(nanojit::LIns* obj_ins, unsigned slot);
  326. nanojit::LIns* stobj_get_slot(nanojit::LIns* obj_ins, unsigned slot,
  327. nanojit::LIns*& dslots_ins);
  328. bool native_set(nanojit::LIns* obj_ins, JSScopeProperty* sprop,
  329. nanojit::LIns*& dslots_ins, nanojit::LIns* v_ins);
  330. bool native_get(nanojit::LIns* obj_ins, nanojit::LIns* pobj_ins, JSScopeProperty* sprop,
  331. nanojit::LIns*& dslots_ins, nanojit::LIns*& v_ins);
  332. bool name(jsval*& vp);
  333. bool prop(JSObject* obj, nanojit::LIns* obj_ins, uint32& slot, nanojit::LIns*& v_ins);
  334. bool elem(jsval& oval, jsval& idx, jsval*& vp, nanojit::LIns*& v_ins, nanojit::LIns*& addr_ins);
  335. bool getProp(JSObject* obj, nanojit::LIns* obj_ins);
  336. bool getProp(jsval& v);
  337. bool getThis(nanojit::LIns*& this_ins);
  338. bool box_jsval(jsval v, nanojit::LIns*& v_ins);
  339. bool unbox_jsval(jsval v, nanojit::LIns*& v_ins);
  340. bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp,
  341. ExitType exitType = MISMATCH_EXIT);
  342. bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins,
  343. ExitType exitType = MISMATCH_EXIT);
  344. bool guardDenseArrayIndex(JSObject* obj, jsint idx, nanojit::LIns* obj_ins,
  345. nanojit::LIns* dslots_ins, nanojit::LIns* idx_ins,
  346. ExitType exitType);
  347. bool guardElemOp(JSObject* obj, nanojit::LIns* obj_ins, jsid id, size_t op_offset, jsval* vp);
  348. void clearFrameSlotsFromCache();
  349. bool guardShapelessCallee(jsval& callee);
  350. bool interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, bool constructing);
  351. bool functionCall(bool constructing);
  352. void trackCfgMerges(jsbytecode* pc);
  353. void flipIf(jsbytecode* pc, bool& cond);
  354. void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
  355. bool hasMethod(JSObject* obj, jsid id);
  356. bool hasToStringMethod(JSObject* obj);
  357. bool hasToStringMethod(jsval v) {
  358. JS_ASSERT(JSVAL_IS_OBJECT(v));
  359. return hasToStringMethod(JSVAL_TO_OBJECT(v));
  360. }
  361. bool hasValueOfMethod(JSObject* obj);
  362. bool hasValueOfMethod(jsval v) {
  363. JS_ASSERT(JSVAL_IS_OBJECT(v));
  364. return hasValueOfMethod(JSVAL_TO_OBJECT(v));
  365. }
  366. bool hasIteratorMethod(JSObject* obj);
  367. bool hasIteratorMethod(jsval v) {
  368. JS_ASSERT(JSVAL_IS_OBJECT(v));
  369. return hasIteratorMethod(JSVAL_TO_OBJECT(v));
  370. }
  371. public:
  372. friend bool js_MonitorRecording(TraceRecorder* tr);
  373. TraceRecorder(JSContext* cx, VMSideExit*, nanojit::Fragment*, TreeInfo*,
  374. unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
  375. VMSideExit* expectedInnerExit, nanojit::Fragment* outerToBlacklist);
  376. ~TraceRecorder();
  377. uint8 determineSlotType(jsval* vp) const;
  378. nanojit::LIns* snapshot(ExitType exitType);
  379. nanojit::Fragment* getFragment() const { return fragment; }
  380. bool isLoopHeader(JSContext* cx) const;
  381. void compile(nanojit::Fragmento* fragmento);
  382. bool closeLoop(nanojit::Fragmento* fragmento, bool& demote, unsigned *demotes);
  383. void endLoop(nanojit::Fragmento* fragmento);
  384. void joinEdgesToEntry(nanojit::Fragmento* fragmento, nanojit::Fragment* peer_root);
  385. void blacklist() { fragment->blacklist(); }
  386. bool adjustCallerTypes(nanojit::Fragment* f, unsigned* demote_slots, bool& trash);
  387. nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f, nanojit::Fragment** empty);
  388. void prepareTreeCall(nanojit::Fragment* inner);
  389. void emitTreeCall(nanojit::Fragment* inner, VMSideExit* exit);
  390. unsigned getCallDepth() const;
  391. void pushAbortStack();
  392. void popAbortStack();
  393. void removeFragmentoReferences();
  394. bool record_EnterFrame();
  395. bool record_LeaveFrame();
  396. bool record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop);
  397. bool record_SetPropMiss(JSPropCacheEntry* entry);
  398. bool record_DefLocalFunSetSlot(uint32 slot, JSObject* obj);
  399. bool record_FastNativeCallComplete();
  400. bool record_IteratorNextComplete();
  401. nanojit::Fragment* getOuterToBlacklist() { return outerToBlacklist; }
  402. void deepAbort() { deepAborted = true; }
  403. bool wasDeepAborted() { return deepAborted; }
  404. bool walkedOutOfLoop() { return terminate; }
  405. void setPromotedPeer(nanojit::Fragment* peer) { promotedPeer = peer; }
  406. TreeInfo* getTreeInfo() { return treeInfo; }
  407. #define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
  408. bool record_##op();
  409. # include "jsopcode.tbl"
  410. #undef OPDEF
  411. };
  412. #define TRACING_ENABLED(cx) JS_HAS_OPTION(cx, JSOPTION_JIT)
  413. #define TRACE_RECORDER(cx) (JS_TRACE_MONITOR(cx).recorder)
  414. #define SET_TRACE_RECORDER(cx,tr) (JS_TRACE_MONITOR(cx).recorder = (tr))
  415. #define JSOP_IS_BINARY(op) ((uintN)((op) - JSOP_BITOR) <= (uintN)(JSOP_MOD - JSOP_BITOR))
  416. /*
  417. * See jsinterp.cpp for the ENABLE_TRACER definition. Also note how comparing x
  418. * to JSOP_* constants specializes trace-recording code at compile time either
  419. * to include imacro support, or exclude it altogether for this particular x.
  420. *
  421. * We save macro-generated code size also via bool TraceRecorder::record_JSOP_*
  422. * return type, instead of a three-state: OK, ABORTED, IMACRO_STARTED. But the
  423. * price of this is the JSFRAME_IMACRO_START frame flag. We need one more bit
  424. * to detect that TraceRecorder::call_imacro was invoked by the record_JSOP_*
  425. * method invoked by TRACE_ARGS_.
  426. */
  427. #define RECORD_ARGS(x,args) \
  428. JS_BEGIN_MACRO \
  429. if (!js_MonitorRecording(TRACE_RECORDER(cx))) { \
  430. ENABLE_TRACER(0); \
  431. } else { \
  432. TRACE_ARGS_(x, args, \
  433. if ((fp->flags & JSFRAME_IMACRO_START) && \
  434. (x == JSOP_ITER || x == JSOP_NEXTITER || \
  435. JSOP_IS_BINARY(x))) { \
  436. fp->flags &= ~JSFRAME_IMACRO_START; \
  437. atoms = COMMON_ATOMS_START(&rt->atomState); \
  438. op = JSOp(*regs.pc); \
  439. DO_OP(); \
  440. } \
  441. ); \
  442. } \
  443. JS_END_MACRO
  444. #define TRACE_ARGS_(x,args,onfalse) \
  445. JS_BEGIN_MACRO \
  446. TraceRecorder* tr_ = TRACE_RECORDER(cx); \
  447. if (tr_ && !tr_->record_##x args) { \
  448. onfalse \
  449. js_AbortRecording(cx, #x); \
  450. ENABLE_TRACER(0); \
  451. } \
  452. JS_END_MACRO
  453. #define TRACE_ARGS(x,args) TRACE_ARGS_(x, args, )
  454. #define RECORD(x) RECORD_ARGS(x, ())
  455. #define TRACE_0(x) TRACE_ARGS(x, ())
  456. #define TRACE_1(x,a) TRACE_ARGS(x, (a))
  457. #define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b))
  458. extern bool
  459. js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount);
  460. extern bool
  461. js_MonitorRecording(TraceRecorder *tr);
  462. extern void
  463. js_AbortRecording(JSContext* cx, const char* reason);
  464. extern void
  465. js_InitJIT(JSTraceMonitor *tm);
  466. extern void
  467. js_FinishJIT(JSTraceMonitor *tm);
  468. extern void
  469. js_FlushJITCache(JSContext* cx);
  470. extern void
  471. js_FlushJITOracle(JSContext* cx);
  472. #else /* !JS_TRACER */
  473. #define RECORD(x) ((void)0)
  474. #define TRACE_0(x) ((void)0)
  475. #define TRACE_1(x,a) ((void)0)
  476. #define TRACE_2(x,a,b) ((void)0)
  477. #endif /* !JS_TRACER */
  478. #endif /* jstracer_h___ */