PageRenderTime 66ms CodeModel.GetById 24ms RepoModel.GetById 0ms app.codeStats 1ms

/js/lib/Socket.IO-node/support/expresso/deps/jscoverage/js/jsemit.cpp

http://github.com/onedayitwillmake/RealtimeMultiplayerNodeJs
C++ | 1964 lines | 1374 code | 204 blank | 386 comment | 322 complexity | 16f85088fb3a3e12c285222bebf12ed7 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-3-Clause
  1. /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  2. * vim: set ts=8 sw=4 et tw=99:
  3. *
  4. * ***** BEGIN LICENSE BLOCK *****
  5. * Version: MPL 1.1/GPL 2.0/LGPL 2.1
  6. *
  7. * The contents of this file are subject to the Mozilla Public License Version
  8. * 1.1 (the "License"); you may not use this file except in compliance with
  9. * the License. You may obtain a copy of the License at
  10. * http://www.mozilla.org/MPL/
  11. *
  12. * Software distributed under the License is distributed on an "AS IS" basis,
  13. * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
  14. * for the specific language governing rights and limitations under the
  15. * License.
  16. *
  17. * The Original Code is Mozilla Communicator client code, released
  18. * March 31, 1998.
  19. *
  20. * The Initial Developer of the Original Code is
  21. * Netscape Communications Corporation.
  22. * Portions created by the Initial Developer are Copyright (C) 1998
  23. * the Initial Developer. All Rights Reserved.
  24. *
  25. * Contributor(s):
  26. *
  27. * Alternatively, the contents of this file may be used under the terms of
  28. * either of the GNU General Public License Version 2 or later (the "GPL"),
  29. * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  30. * in which case the provisions of the GPL or the LGPL are applicable instead
  31. * of those above. If you wish to allow use of your version of this file only
  32. * under the terms of either the GPL or the LGPL, and not to allow others to
  33. * use your version of this file under the terms of the MPL, indicate your
  34. * decision by deleting the provisions above and replace them with the notice
  35. * and other provisions required by the GPL or the LGPL. If you do not delete
  36. * the provisions above, a recipient may use your version of this file under
  37. * the terms of any one of the MPL, the GPL or the LGPL.
  38. *
  39. * ***** END LICENSE BLOCK ***** */
  40. /*
  41. * JS bytecode generation.
  42. */
  43. #include "jsstddef.h"
  44. #ifdef HAVE_MEMORY_H
  45. #include <memory.h>
  46. #endif
  47. #include <string.h>
  48. #include "jstypes.h"
  49. #include "jsarena.h" /* Added by JSIFY */
  50. #include "jsutil.h" /* Added by JSIFY */
  51. #include "jsbit.h"
  52. #include "jsprf.h"
  53. #include "jsapi.h"
  54. #include "jsatom.h"
  55. #include "jsbool.h"
  56. #include "jscntxt.h"
  57. #include "jsversion.h"
  58. #include "jsemit.h"
  59. #include "jsfun.h"
  60. #include "jsnum.h"
  61. #include "jsopcode.h"
  62. #include "jsparse.h"
  63. #include "jsregexp.h"
  64. #include "jsscan.h"
  65. #include "jsscope.h"
  66. #include "jsscript.h"
  67. #include "jsautooplen.h"
  68. #include "jsstaticcheck.h"
  69. /* Allocation chunk counts, must be powers of two in general. */
  70. #define BYTECODE_CHUNK 256 /* code allocation increment */
  71. #define SRCNOTE_CHUNK 64 /* initial srcnote allocation increment */
  72. #define TRYNOTE_CHUNK 64 /* trynote allocation increment */
  73. /* Macros to compute byte sizes from typed element counts. */
  74. #define BYTECODE_SIZE(n) ((n) * sizeof(jsbytecode))
  75. #define SRCNOTE_SIZE(n) ((n) * sizeof(jssrcnote))
  76. #define TRYNOTE_SIZE(n) ((n) * sizeof(JSTryNote))
  77. static JSBool
  78. NewTryNote(JSContext *cx, JSCodeGenerator *cg, JSTryNoteKind kind,
  79. uintN stackDepth, size_t start, size_t end);
  80. JS_FRIEND_API(void)
  81. js_InitCodeGenerator(JSContext *cx, JSCodeGenerator *cg, JSParseContext *pc,
  82. JSArenaPool *codePool, JSArenaPool *notePool,
  83. uintN lineno)
  84. {
  85. memset(cg, 0, sizeof *cg);
  86. TREE_CONTEXT_INIT(&cg->treeContext, pc);
  87. cg->codePool = codePool;
  88. cg->notePool = notePool;
  89. cg->codeMark = JS_ARENA_MARK(codePool);
  90. cg->noteMark = JS_ARENA_MARK(notePool);
  91. cg->current = &cg->main;
  92. cg->firstLine = cg->prolog.currentLine = cg->main.currentLine = lineno;
  93. ATOM_LIST_INIT(&cg->atomList);
  94. cg->prolog.noteMask = cg->main.noteMask = SRCNOTE_CHUNK - 1;
  95. ATOM_LIST_INIT(&cg->constList);
  96. ATOM_LIST_INIT(&cg->upvarList);
  97. }
  98. JS_FRIEND_API(void)
  99. js_FinishCodeGenerator(JSContext *cx, JSCodeGenerator *cg)
  100. {
  101. TREE_CONTEXT_FINISH(cx, &cg->treeContext);
  102. JS_ARENA_RELEASE(cg->codePool, cg->codeMark);
  103. JS_ARENA_RELEASE(cg->notePool, cg->noteMark);
  104. /* NB: non-null only after OOM. */
  105. if (cg->spanDeps)
  106. JS_free(cx, cg->spanDeps);
  107. if (cg->upvarMap.vector)
  108. JS_free(cx, cg->upvarMap.vector);
  109. }
  110. static ptrdiff_t
  111. EmitCheck(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t delta)
  112. {
  113. jsbytecode *base, *limit, *next;
  114. ptrdiff_t offset, length;
  115. size_t incr, size;
  116. base = CG_BASE(cg);
  117. next = CG_NEXT(cg);
  118. limit = CG_LIMIT(cg);
  119. offset = PTRDIFF(next, base, jsbytecode);
  120. if (next + delta > limit) {
  121. length = offset + delta;
  122. length = (length <= BYTECODE_CHUNK)
  123. ? BYTECODE_CHUNK
  124. : JS_BIT(JS_CeilingLog2(length));
  125. incr = BYTECODE_SIZE(length);
  126. if (!base) {
  127. JS_ARENA_ALLOCATE_CAST(base, jsbytecode *, cg->codePool, incr);
  128. } else {
  129. size = BYTECODE_SIZE(PTRDIFF(limit, base, jsbytecode));
  130. incr -= size;
  131. JS_ARENA_GROW_CAST(base, jsbytecode *, cg->codePool, size, incr);
  132. }
  133. if (!base) {
  134. js_ReportOutOfScriptQuota(cx);
  135. return -1;
  136. }
  137. CG_BASE(cg) = base;
  138. CG_LIMIT(cg) = base + length;
  139. CG_NEXT(cg) = base + offset;
  140. }
  141. return offset;
  142. }
  143. static void
  144. UpdateDepth(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t target)
  145. {
  146. jsbytecode *pc;
  147. JSOp op;
  148. const JSCodeSpec *cs;
  149. uintN depth;
  150. intN nuses, ndefs;
  151. pc = CG_CODE(cg, target);
  152. op = (JSOp) *pc;
  153. cs = &js_CodeSpec[op];
  154. if (cs->format & JOF_TMPSLOT_MASK) {
  155. depth = (uintN) cg->stackDepth +
  156. ((cs->format & JOF_TMPSLOT_MASK) >> JOF_TMPSLOT_SHIFT);
  157. if (depth > cg->maxStackDepth)
  158. cg->maxStackDepth = depth;
  159. }
  160. nuses = cs->nuses;
  161. if (nuses < 0)
  162. nuses = js_GetVariableStackUseLength(op, pc);
  163. cg->stackDepth -= nuses;
  164. JS_ASSERT(cg->stackDepth >= 0);
  165. if (cg->stackDepth < 0) {
  166. char numBuf[12];
  167. JSTokenStream *ts;
  168. JS_snprintf(numBuf, sizeof numBuf, "%d", target);
  169. ts = &cg->treeContext.parseContext->tokenStream;
  170. JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING,
  171. js_GetErrorMessage, NULL,
  172. JSMSG_STACK_UNDERFLOW,
  173. ts->filename ? ts->filename : "stdin",
  174. numBuf);
  175. }
  176. ndefs = cs->ndefs;
  177. if (ndefs < 0) {
  178. JSObject *blockObj;
  179. /* We just executed IndexParsedObject */
  180. JS_ASSERT(op == JSOP_ENTERBLOCK);
  181. JS_ASSERT(nuses == 0);
  182. blockObj = cg->objectList.lastPob->object;
  183. JS_ASSERT(STOBJ_GET_CLASS(blockObj) == &js_BlockClass);
  184. JS_ASSERT(JSVAL_IS_VOID(blockObj->fslots[JSSLOT_BLOCK_DEPTH]));
  185. OBJ_SET_BLOCK_DEPTH(cx, blockObj, cg->stackDepth);
  186. ndefs = OBJ_BLOCK_COUNT(cx, blockObj);
  187. }
  188. cg->stackDepth += ndefs;
  189. if ((uintN)cg->stackDepth > cg->maxStackDepth)
  190. cg->maxStackDepth = cg->stackDepth;
  191. }
  192. ptrdiff_t
  193. js_Emit1(JSContext *cx, JSCodeGenerator *cg, JSOp op)
  194. {
  195. ptrdiff_t offset = EmitCheck(cx, cg, op, 1);
  196. if (offset >= 0) {
  197. *CG_NEXT(cg)++ = (jsbytecode)op;
  198. UpdateDepth(cx, cg, offset);
  199. }
  200. return offset;
  201. }
  202. ptrdiff_t
  203. js_Emit2(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1)
  204. {
  205. ptrdiff_t offset = EmitCheck(cx, cg, op, 2);
  206. if (offset >= 0) {
  207. jsbytecode *next = CG_NEXT(cg);
  208. next[0] = (jsbytecode)op;
  209. next[1] = op1;
  210. CG_NEXT(cg) = next + 2;
  211. UpdateDepth(cx, cg, offset);
  212. }
  213. return offset;
  214. }
  215. ptrdiff_t
  216. js_Emit3(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1,
  217. jsbytecode op2)
  218. {
  219. ptrdiff_t offset = EmitCheck(cx, cg, op, 3);
  220. if (offset >= 0) {
  221. jsbytecode *next = CG_NEXT(cg);
  222. next[0] = (jsbytecode)op;
  223. next[1] = op1;
  224. next[2] = op2;
  225. CG_NEXT(cg) = next + 3;
  226. UpdateDepth(cx, cg, offset);
  227. }
  228. return offset;
  229. }
  230. ptrdiff_t
  231. js_EmitN(JSContext *cx, JSCodeGenerator *cg, JSOp op, size_t extra)
  232. {
  233. ptrdiff_t length = 1 + (ptrdiff_t)extra;
  234. ptrdiff_t offset = EmitCheck(cx, cg, op, length);
  235. if (offset >= 0) {
  236. jsbytecode *next = CG_NEXT(cg);
  237. *next = (jsbytecode)op;
  238. memset(next + 1, 0, BYTECODE_SIZE(extra));
  239. CG_NEXT(cg) = next + length;
  240. /*
  241. * Don't UpdateDepth if op's use-count comes from the immediate
  242. * operand yet to be stored in the extra bytes after op.
  243. */
  244. if (js_CodeSpec[op].nuses >= 0)
  245. UpdateDepth(cx, cg, offset);
  246. }
  247. return offset;
  248. }
  249. /* XXX too many "... statement" L10N gaffes below -- fix via js.msg! */
  250. const char js_with_statement_str[] = "with statement";
  251. const char js_finally_block_str[] = "finally block";
  252. const char js_script_str[] = "script";
  253. static const char *statementName[] = {
  254. "label statement", /* LABEL */
  255. "if statement", /* IF */
  256. "else statement", /* ELSE */
  257. "destructuring body", /* BODY */
  258. "switch statement", /* SWITCH */
  259. "block", /* BLOCK */
  260. js_with_statement_str, /* WITH */
  261. "catch block", /* CATCH */
  262. "try block", /* TRY */
  263. js_finally_block_str, /* FINALLY */
  264. js_finally_block_str, /* SUBROUTINE */
  265. "do loop", /* DO_LOOP */
  266. "for loop", /* FOR_LOOP */
  267. "for/in loop", /* FOR_IN_LOOP */
  268. "while loop", /* WHILE_LOOP */
  269. };
  270. JS_STATIC_ASSERT(JS_ARRAY_LENGTH(statementName) == STMT_LIMIT);
  271. static const char *
  272. StatementName(JSCodeGenerator *cg)
  273. {
  274. if (!cg->treeContext.topStmt)
  275. return js_script_str;
  276. return statementName[cg->treeContext.topStmt->type];
  277. }
  278. static void
  279. ReportStatementTooLarge(JSContext *cx, JSCodeGenerator *cg)
  280. {
  281. JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NEED_DIET,
  282. StatementName(cg));
  283. }
  284. /**
  285. Span-dependent instructions in JS bytecode consist of the jump (JOF_JUMP)
  286. and switch (JOF_LOOKUPSWITCH, JOF_TABLESWITCH) format opcodes, subdivided
  287. into unconditional (gotos and gosubs), and conditional jumps or branches
  288. (which pop a value, test it, and jump depending on its value). Most jumps
  289. have just one immediate operand, a signed offset from the jump opcode's pc
  290. to the target bytecode. The lookup and table switch opcodes may contain
  291. many jump offsets.
  292. Mozilla bug #80981 (http://bugzilla.mozilla.org/show_bug.cgi?id=80981) was
  293. fixed by adding extended "X" counterparts to the opcodes/formats (NB: X is
  294. suffixed to prefer JSOP_ORX thereby avoiding a JSOP_XOR name collision for
  295. the extended form of the JSOP_OR branch opcode). The unextended or short
  296. formats have 16-bit signed immediate offset operands, the extended or long
  297. formats have 32-bit signed immediates. The span-dependency problem consists
  298. of selecting as few long instructions as possible, or about as few -- since
  299. jumps can span other jumps, extending one jump may cause another to need to
  300. be extended.
  301. Most JS scripts are short, so need no extended jumps. We optimize for this
  302. case by generating short jumps until we know a long jump is needed. After
  303. that point, we keep generating short jumps, but each jump's 16-bit immediate
  304. offset operand is actually an unsigned index into cg->spanDeps, an array of
  305. JSSpanDep structs. Each struct tells the top offset in the script of the
  306. opcode, the "before" offset of the jump (which will be the same as top for
  307. simplex jumps, but which will index further into the bytecode array for a
  308. non-initial jump offset in a lookup or table switch), the after "offset"
  309. adjusted during span-dependent instruction selection (initially the same
  310. value as the "before" offset), and the jump target (more below).
  311. Since we generate cg->spanDeps lazily, from within js_SetJumpOffset, we must
  312. ensure that all bytecode generated so far can be inspected to discover where
  313. the jump offset immediate operands lie within CG_CODE(cg). But the bonus is
  314. that we generate span-dependency records sorted by their offsets, so we can
  315. binary-search when trying to find a JSSpanDep for a given bytecode offset,
  316. or the nearest JSSpanDep at or above a given pc.
  317. To avoid limiting scripts to 64K jumps, if the cg->spanDeps index overflows
  318. 65534, we store SPANDEP_INDEX_HUGE in the jump's immediate operand. This
  319. tells us that we need to binary-search for the cg->spanDeps entry by the
  320. jump opcode's bytecode offset (sd->before).
  321. Jump targets need to be maintained in a data structure that lets us look
  322. up an already-known target by its address (jumps may have a common target),
  323. and that also lets us update the addresses (script-relative, a.k.a. absolute
  324. offsets) of targets that come after a jump target (for when a jump below
  325. that target needs to be extended). We use an AVL tree, implemented using
  326. recursion, but with some tricky optimizations to its height-balancing code
  327. (see http://www.cmcrossroads.com/bradapp/ftp/src/libs/C++/AvlTrees.html).
  328. A final wrinkle: backpatch chains are linked by jump-to-jump offsets with
  329. positive sign, even though they link "backward" (i.e., toward lower bytecode
  330. address). We don't want to waste space and search time in the AVL tree for
  331. such temporary backpatch deltas, so we use a single-bit wildcard scheme to
  332. tag true JSJumpTarget pointers and encode untagged, signed (positive) deltas
  333. in JSSpanDep.target pointers, depending on whether the JSSpanDep has a known
  334. target, or is still awaiting backpatching.
  335. Note that backpatch chains would present a problem for BuildSpanDepTable,
  336. which inspects bytecode to build cg->spanDeps on demand, when the first
  337. short jump offset overflows. To solve this temporary problem, we emit a
  338. proxy bytecode (JSOP_BACKPATCH; JSOP_BACKPATCH_POP for branch ops) whose
  339. nuses/ndefs counts help keep the stack balanced, but whose opcode format
  340. distinguishes its backpatch delta immediate operand from a normal jump
  341. offset.
  342. */
  343. static int
  344. BalanceJumpTargets(JSJumpTarget **jtp)
  345. {
  346. JSJumpTarget *jt, *jt2, *root;
  347. int dir, otherDir, heightChanged;
  348. JSBool doubleRotate;
  349. jt = *jtp;
  350. JS_ASSERT(jt->balance != 0);
  351. if (jt->balance < -1) {
  352. dir = JT_RIGHT;
  353. doubleRotate = (jt->kids[JT_LEFT]->balance > 0);
  354. } else if (jt->balance > 1) {
  355. dir = JT_LEFT;
  356. doubleRotate = (jt->kids[JT_RIGHT]->balance < 0);
  357. } else {
  358. return 0;
  359. }
  360. otherDir = JT_OTHER_DIR(dir);
  361. if (doubleRotate) {
  362. jt2 = jt->kids[otherDir];
  363. *jtp = root = jt2->kids[dir];
  364. jt->kids[otherDir] = root->kids[dir];
  365. root->kids[dir] = jt;
  366. jt2->kids[dir] = root->kids[otherDir];
  367. root->kids[otherDir] = jt2;
  368. heightChanged = 1;
  369. root->kids[JT_LEFT]->balance = -JS_MAX(root->balance, 0);
  370. root->kids[JT_RIGHT]->balance = -JS_MIN(root->balance, 0);
  371. root->balance = 0;
  372. } else {
  373. *jtp = root = jt->kids[otherDir];
  374. jt->kids[otherDir] = root->kids[dir];
  375. root->kids[dir] = jt;
  376. heightChanged = (root->balance != 0);
  377. jt->balance = -((dir == JT_LEFT) ? --root->balance : ++root->balance);
  378. }
  379. return heightChanged;
  380. }
  381. typedef struct AddJumpTargetArgs {
  382. JSContext *cx;
  383. JSCodeGenerator *cg;
  384. ptrdiff_t offset;
  385. JSJumpTarget *node;
  386. } AddJumpTargetArgs;
  387. static int
  388. AddJumpTarget(AddJumpTargetArgs *args, JSJumpTarget **jtp)
  389. {
  390. JSJumpTarget *jt;
  391. int balanceDelta;
  392. jt = *jtp;
  393. if (!jt) {
  394. JSCodeGenerator *cg = args->cg;
  395. jt = cg->jtFreeList;
  396. if (jt) {
  397. cg->jtFreeList = jt->kids[JT_LEFT];
  398. } else {
  399. JS_ARENA_ALLOCATE_CAST(jt, JSJumpTarget *, &args->cx->tempPool,
  400. sizeof *jt);
  401. if (!jt) {
  402. js_ReportOutOfScriptQuota(args->cx);
  403. return 0;
  404. }
  405. }
  406. jt->offset = args->offset;
  407. jt->balance = 0;
  408. jt->kids[JT_LEFT] = jt->kids[JT_RIGHT] = NULL;
  409. cg->numJumpTargets++;
  410. args->node = jt;
  411. *jtp = jt;
  412. return 1;
  413. }
  414. if (jt->offset == args->offset) {
  415. args->node = jt;
  416. return 0;
  417. }
  418. if (args->offset < jt->offset)
  419. balanceDelta = -AddJumpTarget(args, &jt->kids[JT_LEFT]);
  420. else
  421. balanceDelta = AddJumpTarget(args, &jt->kids[JT_RIGHT]);
  422. if (!args->node)
  423. return 0;
  424. jt->balance += balanceDelta;
  425. return (balanceDelta && jt->balance)
  426. ? 1 - BalanceJumpTargets(jtp)
  427. : 0;
  428. }
  429. #ifdef DEBUG_brendan
  430. static int AVLCheck(JSJumpTarget *jt)
  431. {
  432. int lh, rh;
  433. if (!jt) return 0;
  434. JS_ASSERT(-1 <= jt->balance && jt->balance <= 1);
  435. lh = AVLCheck(jt->kids[JT_LEFT]);
  436. rh = AVLCheck(jt->kids[JT_RIGHT]);
  437. JS_ASSERT(jt->balance == rh - lh);
  438. return 1 + JS_MAX(lh, rh);
  439. }
  440. #endif
  441. static JSBool
  442. SetSpanDepTarget(JSContext *cx, JSCodeGenerator *cg, JSSpanDep *sd,
  443. ptrdiff_t off)
  444. {
  445. AddJumpTargetArgs args;
  446. if (off < JUMPX_OFFSET_MIN || JUMPX_OFFSET_MAX < off) {
  447. ReportStatementTooLarge(cx, cg);
  448. return JS_FALSE;
  449. }
  450. args.cx = cx;
  451. args.cg = cg;
  452. args.offset = sd->top + off;
  453. args.node = NULL;
  454. AddJumpTarget(&args, &cg->jumpTargets);
  455. if (!args.node)
  456. return JS_FALSE;
  457. #ifdef DEBUG_brendan
  458. AVLCheck(cg->jumpTargets);
  459. #endif
  460. SD_SET_TARGET(sd, args.node);
  461. return JS_TRUE;
  462. }
  463. #define SPANDEPS_MIN 256
  464. #define SPANDEPS_SIZE(n) ((n) * sizeof(JSSpanDep))
  465. #define SPANDEPS_SIZE_MIN SPANDEPS_SIZE(SPANDEPS_MIN)
  466. static JSBool
  467. AddSpanDep(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc, jsbytecode *pc2,
  468. ptrdiff_t off)
  469. {
  470. uintN index;
  471. JSSpanDep *sdbase, *sd;
  472. size_t size;
  473. index = cg->numSpanDeps;
  474. if (index + 1 == 0) {
  475. ReportStatementTooLarge(cx, cg);
  476. return JS_FALSE;
  477. }
  478. if ((index & (index - 1)) == 0 &&
  479. (!(sdbase = cg->spanDeps) || index >= SPANDEPS_MIN)) {
  480. size = sdbase ? SPANDEPS_SIZE(index) : SPANDEPS_SIZE_MIN / 2;
  481. sdbase = (JSSpanDep *) JS_realloc(cx, sdbase, size + size);
  482. if (!sdbase)
  483. return JS_FALSE;
  484. cg->spanDeps = sdbase;
  485. }
  486. cg->numSpanDeps = index + 1;
  487. sd = cg->spanDeps + index;
  488. sd->top = PTRDIFF(pc, CG_BASE(cg), jsbytecode);
  489. sd->offset = sd->before = PTRDIFF(pc2, CG_BASE(cg), jsbytecode);
  490. if (js_CodeSpec[*pc].format & JOF_BACKPATCH) {
  491. /* Jump offset will be backpatched if off is a non-zero "bpdelta". */
  492. if (off != 0) {
  493. JS_ASSERT(off >= 1 + JUMP_OFFSET_LEN);
  494. if (off > BPDELTA_MAX) {
  495. ReportStatementTooLarge(cx, cg);
  496. return JS_FALSE;
  497. }
  498. }
  499. SD_SET_BPDELTA(sd, off);
  500. } else if (off == 0) {
  501. /* Jump offset will be patched directly, without backpatch chaining. */
  502. SD_SET_TARGET(sd, 0);
  503. } else {
  504. /* The jump offset in off is non-zero, therefore it's already known. */
  505. if (!SetSpanDepTarget(cx, cg, sd, off))
  506. return JS_FALSE;
  507. }
  508. if (index > SPANDEP_INDEX_MAX)
  509. index = SPANDEP_INDEX_HUGE;
  510. SET_SPANDEP_INDEX(pc2, index);
  511. return JS_TRUE;
  512. }
  513. static jsbytecode *
  514. AddSwitchSpanDeps(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc)
  515. {
  516. JSOp op;
  517. jsbytecode *pc2;
  518. ptrdiff_t off;
  519. jsint low, high;
  520. uintN njumps, indexlen;
  521. op = (JSOp) *pc;
  522. JS_ASSERT(op == JSOP_TABLESWITCH || op == JSOP_LOOKUPSWITCH);
  523. pc2 = pc;
  524. off = GET_JUMP_OFFSET(pc2);
  525. if (!AddSpanDep(cx, cg, pc, pc2, off))
  526. return NULL;
  527. pc2 += JUMP_OFFSET_LEN;
  528. if (op == JSOP_TABLESWITCH) {
  529. low = GET_JUMP_OFFSET(pc2);
  530. pc2 += JUMP_OFFSET_LEN;
  531. high = GET_JUMP_OFFSET(pc2);
  532. pc2 += JUMP_OFFSET_LEN;
  533. njumps = (uintN) (high - low + 1);
  534. indexlen = 0;
  535. } else {
  536. njumps = GET_UINT16(pc2);
  537. pc2 += UINT16_LEN;
  538. indexlen = INDEX_LEN;
  539. }
  540. while (njumps) {
  541. --njumps;
  542. pc2 += indexlen;
  543. off = GET_JUMP_OFFSET(pc2);
  544. if (!AddSpanDep(cx, cg, pc, pc2, off))
  545. return NULL;
  546. pc2 += JUMP_OFFSET_LEN;
  547. }
  548. return 1 + pc2;
  549. }
  550. static JSBool
  551. BuildSpanDepTable(JSContext *cx, JSCodeGenerator *cg)
  552. {
  553. jsbytecode *pc, *end;
  554. JSOp op;
  555. const JSCodeSpec *cs;
  556. ptrdiff_t off;
  557. pc = CG_BASE(cg) + cg->spanDepTodo;
  558. end = CG_NEXT(cg);
  559. while (pc != end) {
  560. JS_ASSERT(pc < end);
  561. op = (JSOp)*pc;
  562. cs = &js_CodeSpec[op];
  563. switch (JOF_TYPE(cs->format)) {
  564. case JOF_TABLESWITCH:
  565. case JOF_LOOKUPSWITCH:
  566. pc = AddSwitchSpanDeps(cx, cg, pc);
  567. if (!pc)
  568. return JS_FALSE;
  569. break;
  570. case JOF_JUMP:
  571. off = GET_JUMP_OFFSET(pc);
  572. if (!AddSpanDep(cx, cg, pc, pc, off))
  573. return JS_FALSE;
  574. /* FALL THROUGH */
  575. default:
  576. pc += cs->length;
  577. break;
  578. }
  579. }
  580. return JS_TRUE;
  581. }
  582. static JSSpanDep *
  583. GetSpanDep(JSCodeGenerator *cg, jsbytecode *pc)
  584. {
  585. uintN index;
  586. ptrdiff_t offset;
  587. int lo, hi, mid;
  588. JSSpanDep *sd;
  589. index = GET_SPANDEP_INDEX(pc);
  590. if (index != SPANDEP_INDEX_HUGE)
  591. return cg->spanDeps + index;
  592. offset = PTRDIFF(pc, CG_BASE(cg), jsbytecode);
  593. lo = 0;
  594. hi = cg->numSpanDeps - 1;
  595. while (lo <= hi) {
  596. mid = (lo + hi) / 2;
  597. sd = cg->spanDeps + mid;
  598. if (sd->before == offset)
  599. return sd;
  600. if (sd->before < offset)
  601. lo = mid + 1;
  602. else
  603. hi = mid - 1;
  604. }
  605. JS_ASSERT(0);
  606. return NULL;
  607. }
  608. static JSBool
  609. SetBackPatchDelta(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
  610. ptrdiff_t delta)
  611. {
  612. JSSpanDep *sd;
  613. JS_ASSERT(delta >= 1 + JUMP_OFFSET_LEN);
  614. if (!cg->spanDeps && delta < JUMP_OFFSET_MAX) {
  615. SET_JUMP_OFFSET(pc, delta);
  616. return JS_TRUE;
  617. }
  618. if (delta > BPDELTA_MAX) {
  619. ReportStatementTooLarge(cx, cg);
  620. return JS_FALSE;
  621. }
  622. if (!cg->spanDeps && !BuildSpanDepTable(cx, cg))
  623. return JS_FALSE;
  624. sd = GetSpanDep(cg, pc);
  625. JS_ASSERT(SD_GET_BPDELTA(sd) == 0);
  626. SD_SET_BPDELTA(sd, delta);
  627. return JS_TRUE;
  628. }
  629. static void
  630. UpdateJumpTargets(JSJumpTarget *jt, ptrdiff_t pivot, ptrdiff_t delta)
  631. {
  632. if (jt->offset > pivot) {
  633. jt->offset += delta;
  634. if (jt->kids[JT_LEFT])
  635. UpdateJumpTargets(jt->kids[JT_LEFT], pivot, delta);
  636. }
  637. if (jt->kids[JT_RIGHT])
  638. UpdateJumpTargets(jt->kids[JT_RIGHT], pivot, delta);
  639. }
  640. static JSSpanDep *
  641. FindNearestSpanDep(JSCodeGenerator *cg, ptrdiff_t offset, int lo,
  642. JSSpanDep *guard)
  643. {
  644. int num, hi, mid;
  645. JSSpanDep *sdbase, *sd;
  646. num = cg->numSpanDeps;
  647. JS_ASSERT(num > 0);
  648. hi = num - 1;
  649. sdbase = cg->spanDeps;
  650. while (lo <= hi) {
  651. mid = (lo + hi) / 2;
  652. sd = sdbase + mid;
  653. if (sd->before == offset)
  654. return sd;
  655. if (sd->before < offset)
  656. lo = mid + 1;
  657. else
  658. hi = mid - 1;
  659. }
  660. if (lo == num)
  661. return guard;
  662. sd = sdbase + lo;
  663. JS_ASSERT(sd->before >= offset && (lo == 0 || sd[-1].before < offset));
  664. return sd;
  665. }
  666. static void
  667. FreeJumpTargets(JSCodeGenerator *cg, JSJumpTarget *jt)
  668. {
  669. if (jt->kids[JT_LEFT])
  670. FreeJumpTargets(cg, jt->kids[JT_LEFT]);
  671. if (jt->kids[JT_RIGHT])
  672. FreeJumpTargets(cg, jt->kids[JT_RIGHT]);
  673. jt->kids[JT_LEFT] = cg->jtFreeList;
  674. cg->jtFreeList = jt;
  675. }
  676. static JSBool
  677. OptimizeSpanDeps(JSContext *cx, JSCodeGenerator *cg)
  678. {
  679. jsbytecode *pc, *oldpc, *base, *limit, *next;
  680. JSSpanDep *sd, *sd2, *sdbase, *sdlimit, *sdtop, guard;
  681. ptrdiff_t offset, growth, delta, top, pivot, span, length, target;
  682. JSBool done;
  683. JSOp op;
  684. uint32 type;
  685. size_t size, incr;
  686. jssrcnote *sn, *snlimit;
  687. JSSrcNoteSpec *spec;
  688. uintN i, n, noteIndex;
  689. JSTryNode *tryNode;
  690. #ifdef DEBUG_brendan
  691. int passes = 0;
  692. #endif
  693. base = CG_BASE(cg);
  694. sdbase = cg->spanDeps;
  695. sdlimit = sdbase + cg->numSpanDeps;
  696. offset = CG_OFFSET(cg);
  697. growth = 0;
  698. do {
  699. done = JS_TRUE;
  700. delta = 0;
  701. top = pivot = -1;
  702. sdtop = NULL;
  703. pc = NULL;
  704. op = JSOP_NOP;
  705. type = 0;
  706. #ifdef DEBUG_brendan
  707. passes++;
  708. #endif
  709. for (sd = sdbase; sd < sdlimit; sd++) {
  710. JS_ASSERT(JT_HAS_TAG(sd->target));
  711. sd->offset += delta;
  712. if (sd->top != top) {
  713. sdtop = sd;
  714. top = sd->top;
  715. JS_ASSERT(top == sd->before);
  716. pivot = sd->offset;
  717. pc = base + top;
  718. op = (JSOp) *pc;
  719. type = JOF_OPTYPE(op);
  720. if (JOF_TYPE_IS_EXTENDED_JUMP(type)) {
  721. /*
  722. * We already extended all the jump offset operands for
  723. * the opcode at sd->top. Jumps and branches have only
  724. * one jump offset operand, but switches have many, all
  725. * of which are adjacent in cg->spanDeps.
  726. */
  727. continue;
  728. }
  729. JS_ASSERT(type == JOF_JUMP ||
  730. type == JOF_TABLESWITCH ||
  731. type == JOF_LOOKUPSWITCH);
  732. }
  733. if (!JOF_TYPE_IS_EXTENDED_JUMP(type)) {
  734. span = SD_SPAN(sd, pivot);
  735. if (span < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < span) {
  736. ptrdiff_t deltaFromTop = 0;
  737. done = JS_FALSE;
  738. switch (op) {
  739. case JSOP_GOTO: op = JSOP_GOTOX; break;
  740. case JSOP_IFEQ: op = JSOP_IFEQX; break;
  741. case JSOP_IFNE: op = JSOP_IFNEX; break;
  742. case JSOP_OR: op = JSOP_ORX; break;
  743. case JSOP_AND: op = JSOP_ANDX; break;
  744. case JSOP_GOSUB: op = JSOP_GOSUBX; break;
  745. case JSOP_CASE: op = JSOP_CASEX; break;
  746. case JSOP_DEFAULT: op = JSOP_DEFAULTX; break;
  747. case JSOP_TABLESWITCH: op = JSOP_TABLESWITCHX; break;
  748. case JSOP_LOOKUPSWITCH: op = JSOP_LOOKUPSWITCHX; break;
  749. default:
  750. ReportStatementTooLarge(cx, cg);
  751. return JS_FALSE;
  752. }
  753. *pc = (jsbytecode) op;
  754. for (sd2 = sdtop; sd2 < sdlimit && sd2->top == top; sd2++) {
  755. if (sd2 <= sd) {
  756. /*
  757. * sd2->offset already includes delta as it stood
  758. * before we entered this loop, but it must also
  759. * include the delta relative to top due to all the
  760. * extended jump offset immediates for the opcode
  761. * starting at top, which we extend in this loop.
  762. *
  763. * If there is only one extended jump offset, then
  764. * sd2->offset won't change and this for loop will
  765. * iterate once only.
  766. */
  767. sd2->offset += deltaFromTop;
  768. deltaFromTop += JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN;
  769. } else {
  770. /*
  771. * sd2 comes after sd, and won't be revisited by
  772. * the outer for loop, so we have to increase its
  773. * offset by delta, not merely by deltaFromTop.
  774. */
  775. sd2->offset += delta;
  776. }
  777. delta += JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN;
  778. UpdateJumpTargets(cg->jumpTargets, sd2->offset,
  779. JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN);
  780. }
  781. sd = sd2 - 1;
  782. }
  783. }
  784. }
  785. growth += delta;
  786. } while (!done);
  787. if (growth) {
  788. #ifdef DEBUG_brendan
  789. JSTokenStream *ts = &cg->treeContext.parseContext->tokenStream;
  790. printf("%s:%u: %u/%u jumps extended in %d passes (%d=%d+%d)\n",
  791. ts->filename ? ts->filename : "stdin", cg->firstLine,
  792. growth / (JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN), cg->numSpanDeps,
  793. passes, offset + growth, offset, growth);
  794. #endif
  795. /*
  796. * Ensure that we have room for the extended jumps, but don't round up
  797. * to a power of two -- we're done generating code, so we cut to fit.
  798. */
  799. limit = CG_LIMIT(cg);
  800. length = offset + growth;
  801. next = base + length;
  802. if (next > limit) {
  803. JS_ASSERT(length > BYTECODE_CHUNK);
  804. size = BYTECODE_SIZE(PTRDIFF(limit, base, jsbytecode));
  805. incr = BYTECODE_SIZE(length) - size;
  806. JS_ARENA_GROW_CAST(base, jsbytecode *, cg->codePool, size, incr);
  807. if (!base) {
  808. js_ReportOutOfScriptQuota(cx);
  809. return JS_FALSE;
  810. }
  811. CG_BASE(cg) = base;
  812. CG_LIMIT(cg) = next = base + length;
  813. }
  814. CG_NEXT(cg) = next;
  815. /*
  816. * Set up a fake span dependency record to guard the end of the code
  817. * being generated. This guard record is returned as a fencepost by
  818. * FindNearestSpanDep if there is no real spandep at or above a given
  819. * unextended code offset.
  820. */
  821. guard.top = -1;
  822. guard.offset = offset + growth;
  823. guard.before = offset;
  824. guard.target = NULL;
  825. }
  826. /*
  827. * Now work backwards through the span dependencies, copying chunks of
  828. * bytecode between each extended jump toward the end of the grown code
  829. * space, and restoring immediate offset operands for all jump bytecodes.
  830. * The first chunk of bytecodes, starting at base and ending at the first
  831. * extended jump offset (NB: this chunk includes the operation bytecode
  832. * just before that immediate jump offset), doesn't need to be copied.
  833. */
  834. JS_ASSERT(sd == sdlimit);
  835. top = -1;
  836. while (--sd >= sdbase) {
  837. if (sd->top != top) {
  838. top = sd->top;
  839. op = (JSOp) base[top];
  840. type = JOF_OPTYPE(op);
  841. for (sd2 = sd - 1; sd2 >= sdbase && sd2->top == top; sd2--)
  842. continue;
  843. sd2++;
  844. pivot = sd2->offset;
  845. JS_ASSERT(top == sd2->before);
  846. }
  847. oldpc = base + sd->before;
  848. span = SD_SPAN(sd, pivot);
  849. /*
  850. * If this jump didn't need to be extended, restore its span immediate
  851. * offset operand now, overwriting the index of sd within cg->spanDeps
  852. * that was stored temporarily after *pc when BuildSpanDepTable ran.
  853. *
  854. * Note that span might fit in 16 bits even for an extended jump op,
  855. * if the op has multiple span operands, not all of which overflowed
  856. * (e.g. JSOP_LOOKUPSWITCH or JSOP_TABLESWITCH where some cases are in
  857. * range for a short jump, but others are not).
  858. */
  859. if (!JOF_TYPE_IS_EXTENDED_JUMP(type)) {
  860. JS_ASSERT(JUMP_OFFSET_MIN <= span && span <= JUMP_OFFSET_MAX);
  861. SET_JUMP_OFFSET(oldpc, span);
  862. continue;
  863. }
  864. /*
  865. * Set up parameters needed to copy the next run of bytecode starting
  866. * at offset (which is a cursor into the unextended, original bytecode
  867. * vector), down to sd->before (a cursor of the same scale as offset,
  868. * it's the index of the original jump pc). Reuse delta to count the
  869. * nominal number of bytes to copy.
  870. */
  871. pc = base + sd->offset;
  872. delta = offset - sd->before;
  873. JS_ASSERT(delta >= 1 + JUMP_OFFSET_LEN);
  874. /*
  875. * Don't bother copying the jump offset we're about to reset, but do
  876. * copy the bytecode at oldpc (which comes just before its immediate
  877. * jump offset operand), on the next iteration through the loop, by
  878. * including it in offset's new value.
  879. */
  880. offset = sd->before + 1;
  881. size = BYTECODE_SIZE(delta - (1 + JUMP_OFFSET_LEN));
  882. if (size) {
  883. memmove(pc + 1 + JUMPX_OFFSET_LEN,
  884. oldpc + 1 + JUMP_OFFSET_LEN,
  885. size);
  886. }
  887. SET_JUMPX_OFFSET(pc, span);
  888. }
  889. if (growth) {
  890. /*
  891. * Fix source note deltas. Don't hardwire the delta fixup adjustment,
  892. * even though currently it must be JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN
  893. * at each sd that moved. The future may bring different offset sizes
  894. * for span-dependent instruction operands. However, we fix only main
  895. * notes here, not prolog notes -- we know that prolog opcodes are not
  896. * span-dependent, and aren't likely ever to be.
  897. */
  898. offset = growth = 0;
  899. sd = sdbase;
  900. for (sn = cg->main.notes, snlimit = sn + cg->main.noteCount;
  901. sn < snlimit;
  902. sn = SN_NEXT(sn)) {
  903. /*
  904. * Recall that the offset of a given note includes its delta, and
  905. * tells the offset of the annotated bytecode from the main entry
  906. * point of the script.
  907. */
  908. offset += SN_DELTA(sn);
  909. while (sd < sdlimit && sd->before < offset) {
  910. /*
  911. * To compute the delta to add to sn, we need to look at the
  912. * spandep after sd, whose offset - (before + growth) tells by
  913. * how many bytes sd's instruction grew.
  914. */
  915. sd2 = sd + 1;
  916. if (sd2 == sdlimit)
  917. sd2 = &guard;
  918. delta = sd2->offset - (sd2->before + growth);
  919. if (delta > 0) {
  920. JS_ASSERT(delta == JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN);
  921. sn = js_AddToSrcNoteDelta(cx, cg, sn, delta);
  922. if (!sn)
  923. return JS_FALSE;
  924. snlimit = cg->main.notes + cg->main.noteCount;
  925. growth += delta;
  926. }
  927. sd++;
  928. }
  929. /*
  930. * If sn has span-dependent offset operands, check whether each
  931. * covers further span-dependencies, and increase those operands
  932. * accordingly. Some source notes measure offset not from the
  933. * annotated pc, but from that pc plus some small bias. NB: we
  934. * assume that spec->offsetBias can't itself span span-dependent
  935. * instructions!
  936. */
  937. spec = &js_SrcNoteSpec[SN_TYPE(sn)];
  938. if (spec->isSpanDep) {
  939. pivot = offset + spec->offsetBias;
  940. n = spec->arity;
  941. for (i = 0; i < n; i++) {
  942. span = js_GetSrcNoteOffset(sn, i);
  943. if (span == 0)
  944. continue;
  945. target = pivot + span * spec->isSpanDep;
  946. sd2 = FindNearestSpanDep(cg, target,
  947. (target >= pivot)
  948. ? sd - sdbase
  949. : 0,
  950. &guard);
  951. /*
  952. * Increase target by sd2's before-vs-after offset delta,
  953. * which is absolute (i.e., relative to start of script,
  954. * as is target). Recompute the span by subtracting its
  955. * adjusted pivot from target.
  956. */
  957. target += sd2->offset - sd2->before;
  958. span = target - (pivot + growth);
  959. span *= spec->isSpanDep;
  960. noteIndex = sn - cg->main.notes;
  961. if (!js_SetSrcNoteOffset(cx, cg, noteIndex, i, span))
  962. return JS_FALSE;
  963. sn = cg->main.notes + noteIndex;
  964. snlimit = cg->main.notes + cg->main.noteCount;
  965. }
  966. }
  967. }
  968. cg->main.lastNoteOffset += growth;
  969. /*
  970. * Fix try/catch notes (O(numTryNotes * log2(numSpanDeps)), but it's
  971. * not clear how we can beat that).
  972. */
  973. for (tryNode = cg->lastTryNode; tryNode; tryNode = tryNode->prev) {
  974. /*
  975. * First, look for the nearest span dependency at/above tn->start.
  976. * There may not be any such spandep, in which case the guard will
  977. * be returned.
  978. */
  979. offset = tryNode->note.start;
  980. sd = FindNearestSpanDep(cg, offset, 0, &guard);
  981. delta = sd->offset - sd->before;
  982. tryNode->note.start = offset + delta;
  983. /*
  984. * Next, find the nearest spandep at/above tn->start + tn->length.
  985. * Use its delta minus tn->start's delta to increase tn->length.
  986. */
  987. length = tryNode->note.length;
  988. sd2 = FindNearestSpanDep(cg, offset + length, sd - sdbase, &guard);
  989. if (sd2 != sd) {
  990. tryNode->note.length =
  991. length + sd2->offset - sd2->before - delta;
  992. }
  993. }
  994. }
  995. #ifdef DEBUG_brendan
  996. {
  997. uintN bigspans = 0;
  998. top = -1;
  999. for (sd = sdbase; sd < sdlimit; sd++) {
  1000. offset = sd->offset;
  1001. /* NB: sd->top cursors into the original, unextended bytecode vector. */
  1002. if (sd->top != top) {
  1003. JS_ASSERT(top == -1 ||
  1004. !JOF_TYPE_IS_EXTENDED_JUMP(type) ||
  1005. bigspans != 0);
  1006. bigspans = 0;
  1007. top = sd->top;
  1008. JS_ASSERT(top == sd->before);
  1009. op = (JSOp) base[offset];
  1010. type = JOF_OPTYPE(op);
  1011. JS_ASSERT(type == JOF_JUMP ||
  1012. type == JOF_JUMPX ||
  1013. type == JOF_TABLESWITCH ||
  1014. type == JOF_TABLESWITCHX ||
  1015. type == JOF_LOOKUPSWITCH ||
  1016. type == JOF_LOOKUPSWITCHX);
  1017. pivot = offset;
  1018. }
  1019. pc = base + offset;
  1020. if (JOF_TYPE_IS_EXTENDED_JUMP(type)) {
  1021. span = GET_JUMPX_OFFSET(pc);
  1022. if (span < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < span) {
  1023. bigspans++;
  1024. } else {
  1025. JS_ASSERT(type == JOF_TABLESWITCHX ||
  1026. type == JOF_LOOKUPSWITCHX);
  1027. }
  1028. } else {
  1029. span = GET_JUMP_OFFSET(pc);
  1030. }
  1031. JS_ASSERT(SD_SPAN(sd, pivot) == span);
  1032. }
  1033. JS_ASSERT(!JOF_TYPE_IS_EXTENDED_JUMP(type) || bigspans != 0);
  1034. }
  1035. #endif
  1036. /*
  1037. * Reset so we optimize at most once -- cg may be used for further code
  1038. * generation of successive, independent, top-level statements. No jump
  1039. * can span top-level statements, because JS lacks goto.
  1040. */
  1041. size = SPANDEPS_SIZE(JS_BIT(JS_CeilingLog2(cg->numSpanDeps)));
  1042. JS_free(cx, cg->spanDeps);
  1043. cg->spanDeps = NULL;
  1044. FreeJumpTargets(cg, cg->jumpTargets);
  1045. cg->jumpTargets = NULL;
  1046. cg->numSpanDeps = cg->numJumpTargets = 0;
  1047. cg->spanDepTodo = CG_OFFSET(cg);
  1048. return JS_TRUE;
  1049. }
  1050. static ptrdiff_t
  1051. EmitJump(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t off)
  1052. {
  1053. JSBool extend;
  1054. ptrdiff_t jmp;
  1055. jsbytecode *pc;
  1056. extend = off < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < off;
  1057. if (extend && !cg->spanDeps && !BuildSpanDepTable(cx, cg))
  1058. return -1;
  1059. jmp = js_Emit3(cx, cg, op, JUMP_OFFSET_HI(off), JUMP_OFFSET_LO(off));
  1060. if (jmp >= 0 && (extend || cg->spanDeps)) {
  1061. pc = CG_CODE(cg, jmp);
  1062. if (!AddSpanDep(cx, cg, pc, pc, off))
  1063. return -1;
  1064. }
  1065. return jmp;
  1066. }
  1067. static ptrdiff_t
  1068. GetJumpOffset(JSCodeGenerator *cg, jsbytecode *pc)
  1069. {
  1070. JSSpanDep *sd;
  1071. JSJumpTarget *jt;
  1072. ptrdiff_t top;
  1073. if (!cg->spanDeps)
  1074. return GET_JUMP_OFFSET(pc);
  1075. sd = GetSpanDep(cg, pc);
  1076. jt = sd->target;
  1077. if (!JT_HAS_TAG(jt))
  1078. return JT_TO_BPDELTA(jt);
  1079. top = sd->top;
  1080. while (--sd >= cg->spanDeps && sd->top == top)
  1081. continue;
  1082. sd++;
  1083. return JT_CLR_TAG(jt)->offset - sd->offset;
  1084. }
  1085. JSBool
  1086. js_SetJumpOffset(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
  1087. ptrdiff_t off)
  1088. {
  1089. if (!cg->spanDeps) {
  1090. if (JUMP_OFFSET_MIN <= off && off <= JUMP_OFFSET_MAX) {
  1091. SET_JUMP_OFFSET(pc, off);
  1092. return JS_TRUE;
  1093. }
  1094. if (!BuildSpanDepTable(cx, cg))
  1095. return JS_FALSE;
  1096. }
  1097. return SetSpanDepTarget(cx, cg, GetSpanDep(cg, pc), off);
  1098. }
  1099. JSBool
  1100. js_InStatement(JSTreeContext *tc, JSStmtType type)
  1101. {
  1102. JSStmtInfo *stmt;
  1103. for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
  1104. if (stmt->type == type)
  1105. return JS_TRUE;
  1106. }
  1107. return JS_FALSE;
  1108. }
  1109. void
  1110. js_PushStatement(JSTreeContext *tc, JSStmtInfo *stmt, JSStmtType type,
  1111. ptrdiff_t top)
  1112. {
  1113. stmt->type = type;
  1114. stmt->flags = 0;
  1115. SET_STATEMENT_TOP(stmt, top);
  1116. stmt->u.label = NULL;
  1117. JS_ASSERT(!stmt->u.blockObj);
  1118. stmt->down = tc->topStmt;
  1119. tc->topStmt = stmt;
  1120. if (STMT_LINKS_SCOPE(stmt)) {
  1121. stmt->downScope = tc->topScopeStmt;
  1122. tc->topScopeStmt = stmt;
  1123. } else {
  1124. stmt->downScope = NULL;
  1125. }
  1126. }
  1127. void
  1128. js_PushBlockScope(JSTreeContext *tc, JSStmtInfo *stmt, JSObject *blockObj,
  1129. ptrdiff_t top)
  1130. {
  1131. js_PushStatement(tc, stmt, STMT_BLOCK, top);
  1132. stmt->flags |= SIF_SCOPE;
  1133. STOBJ_SET_PARENT(blockObj, tc->blockChain);
  1134. stmt->downScope = tc->topScopeStmt;
  1135. tc->topScopeStmt = stmt;
  1136. tc->blockChain = blockObj;
  1137. stmt->u.blockObj = blockObj;
  1138. }
  1139. /*
  1140. * Emit a backpatch op with offset pointing to the previous jump of this type,
  1141. * so that we can walk back up the chain fixing up the op and jump offset.
  1142. */
  1143. static ptrdiff_t
  1144. EmitBackPatchOp(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t *lastp)
  1145. {
  1146. ptrdiff_t offset, delta;
  1147. offset = CG_OFFSET(cg);
  1148. delta = offset - *lastp;
  1149. *lastp = offset;
  1150. JS_ASSERT(delta > 0);
  1151. return EmitJump(cx, cg, op, delta);
  1152. }
  1153. /*
  1154. * Macro to emit a bytecode followed by a uint16 immediate operand stored in
  1155. * big-endian order, used for arg and var numbers as well as for atomIndexes.
  1156. * NB: We use cx and cg from our caller's lexical environment, and return
  1157. * false on error.
  1158. */
  1159. #define EMIT_UINT16_IMM_OP(op, i) \
  1160. JS_BEGIN_MACRO \
  1161. if (js_Emit3(cx, cg, op, UINT16_HI(i), UINT16_LO(i)) < 0) \
  1162. return JS_FALSE; \
  1163. JS_END_MACRO
  1164. static JSBool
  1165. FlushPops(JSContext *cx, JSCodeGenerator *cg, intN *npops)
  1166. {
  1167. JS_ASSERT(*npops != 0);
  1168. if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
  1169. return JS_FALSE;
  1170. EMIT_UINT16_IMM_OP(JSOP_POPN, *npops);
  1171. *npops = 0;
  1172. return JS_TRUE;
  1173. }
  1174. /*
  1175. * Emit additional bytecode(s) for non-local jumps.
  1176. */
  1177. static JSBool
  1178. EmitNonLocalJumpFixup(JSContext *cx, JSCodeGenerator *cg, JSStmtInfo *toStmt)
  1179. {
  1180. intN depth, npops;
  1181. JSStmtInfo *stmt;
  1182. /*
  1183. * The non-local jump fixup we emit will unbalance cg->stackDepth, because
  1184. * the fixup replicates balanced code such as JSOP_LEAVEWITH emitted at the
  1185. * end of a with statement, so we save cg->stackDepth here and restore it
  1186. * just before a successful return.
  1187. */
  1188. depth = cg->stackDepth;
  1189. npops = 0;
  1190. #define FLUSH_POPS() if (npops && !FlushPops(cx, cg, &npops)) return JS_FALSE
  1191. for (stmt = cg->treeContext.topStmt; stmt != toStmt; stmt = stmt->down) {
  1192. switch (stmt->type) {
  1193. case STMT_FINALLY:
  1194. FLUSH_POPS();
  1195. if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
  1196. return JS_FALSE;
  1197. if (EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &GOSUBS(*stmt)) < 0)
  1198. return JS_FALSE;
  1199. break;
  1200. case STMT_WITH:
  1201. /* There's a With object on the stack that we need to pop. */
  1202. FLUSH_POPS();
  1203. if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
  1204. return JS_FALSE;
  1205. if (js_Emit1(cx, cg, JSOP_LEAVEWITH) < 0)
  1206. return JS_FALSE;
  1207. break;
  1208. case STMT_FOR_IN_LOOP:
  1209. /*
  1210. * The iterator and the object being iterated need to be popped.
  1211. */
  1212. FLUSH_POPS();
  1213. if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
  1214. return JS_FALSE;
  1215. if (js_Emit1(cx, cg, JSOP_ENDITER) < 0)
  1216. return JS_FALSE;
  1217. break;
  1218. case STMT_SUBROUTINE:
  1219. /*
  1220. * There's a [exception or hole, retsub pc-index] pair on the
  1221. * stack that we need to pop.
  1222. */
  1223. npops += 2;
  1224. break;
  1225. default:;
  1226. }
  1227. if (stmt->flags & SIF_SCOPE) {
  1228. uintN i;
  1229. /* There is a Block object with locals on the stack to pop. */
  1230. FLUSH_POPS();
  1231. if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
  1232. return JS_FALSE;
  1233. i = OBJ_BLOCK_COUNT(cx, stmt->u.blockObj);
  1234. EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, i);
  1235. }
  1236. }
  1237. FLUSH_POPS();
  1238. cg->stackDepth = depth;
  1239. return JS_TRUE;
  1240. #undef FLUSH_POPS
  1241. }
  1242. static ptrdiff_t
  1243. EmitGoto(JSContext *cx, JSCodeGenerator *cg, JSStmtInfo *toStmt,
  1244. ptrdiff_t *lastp, JSAtomListElement *label, JSSrcNoteType noteType)
  1245. {
  1246. intN index;
  1247. if (!EmitNonLocalJumpFixup(cx, cg, toStmt))
  1248. return -1;
  1249. if (label)
  1250. index = js_NewSrcNote2(cx, cg, noteType, (ptrdiff_t) ALE_INDEX(label));
  1251. else if (noteType != SRC_NULL)
  1252. index = js_NewSrcNote(cx, cg, noteType);
  1253. else
  1254. index = 0;
  1255. if (index < 0)
  1256. return -1;
  1257. return EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, lastp);
  1258. }
  1259. static JSBool
  1260. BackPatch(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t last,
  1261. jsbytecode *target, jsbytecode op)
  1262. {
  1263. jsbytecode *pc, *stop;
  1264. ptrdiff_t delta, span;
  1265. pc = CG_CODE(cg, last);
  1266. stop = CG_CODE(cg, -1);
  1267. while (pc != stop) {
  1268. delta = GetJumpOffset(cg, pc);
  1269. span = PTRDIFF(target, pc, jsbytecode);
  1270. CHECK_AND_SET_JUMP_OFFSET(cx, cg, pc, span);
  1271. /*
  1272. * Set *pc after jump offset in case bpdelta didn't overflow, but span
  1273. * does (if so, CHECK_AND_SET_JUMP_OFFSET might call BuildSpanDepTable
  1274. * and need to see the JSOP_BACKPATCH* op at *pc).
  1275. */
  1276. *pc = op;
  1277. pc -= delta;
  1278. }
  1279. return JS_TRUE;
  1280. }
  1281. void
  1282. js_PopStatement(JSTreeContext *tc)
  1283. {
  1284. JSStmtInfo *stmt;
  1285. stmt = tc->topStmt;
  1286. tc->topStmt = stmt->down;
  1287. if (STMT_LINKS_SCOPE(stmt)) {
  1288. tc->topScopeStmt = stmt->downScope;
  1289. if (stmt->flags & SIF_SCOPE) {
  1290. tc->blockChain = STOBJ_GET_PARENT(stmt->u.blockObj);
  1291. JS_SCOPE_DEPTH_METERING(--tc->scopeDepth);
  1292. }
  1293. }
  1294. }
  1295. JSBool
  1296. js_PopStatementCG(JSContext *cx, JSCodeGenerator *cg)
  1297. {
  1298. JSStmtInfo *stmt;
  1299. stmt = cg->treeContext.topStmt;
  1300. if (!STMT_IS_TRYING(stmt) &&
  1301. (!BackPatch(cx, cg, stmt->breaks, CG_NEXT(cg), JSOP_GOTO) ||
  1302. !BackPatch(cx, cg, stmt->continues, CG_CODE(cg, stmt->update),
  1303. JSOP_GOTO))) {
  1304. return JS_FALSE;
  1305. }
  1306. js_PopStatement(&cg->treeContext);
  1307. return JS_TRUE;
  1308. }
  1309. JSBool
  1310. js_DefineCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
  1311. JSParseNode *pn)
  1312. {
  1313. jsdouble dval;
  1314. jsint ival;
  1315. JSAtom *valueAtom;
  1316. jsval v;
  1317. JSAtomListElement *ale;
  1318. /* XXX just do numbers for now */
  1319. if (pn->pn_type == TOK_NUMBER) {
  1320. dval = pn->pn_dval;
  1321. if (JSDOUBLE_IS_INT(dval, ival) && INT_FITS_IN_JSVAL(ival)) {
  1322. v = INT_TO_JSVAL(ival);
  1323. } else {
  1324. /*
  1325. * We atomize double to root a jsdouble instance that we wrap as
  1326. * jsval and store in cg->constList. This works because atoms are
  1327. * protected from GC during compilation.
  1328. */
  1329. valueAtom = js_AtomizeDouble(cx, dval);
  1330. if (!valueAtom)
  1331. return JS_FALSE;
  1332. v = ATOM_KEY(valueAtom);
  1333. }
  1334. ale = js_IndexAtom(cx, atom, &cg->constList);
  1335. if (!ale)
  1336. return JS_FALSE;
  1337. ALE_SET_VALUE(ale, v);
  1338. }
  1339. return JS_TRUE;
  1340. }
  1341. JSStmtInfo *
  1342. js_LexicalLookup(JSTreeContext *tc, JSAtom *atom, jsint *slotp)
  1343. {
  1344. JSStmtInfo *stmt;
  1345. JSObject *obj;
  1346. JSScope *scope;
  1347. JSScopeProperty *sprop;
  1348. for (stmt = tc->topScopeStmt; stmt; stmt = stmt->downScope) {
  1349. if (stmt->type == STMT_WITH)
  1350. break;
  1351. /* Skip "maybe scope" statements that don't contain let bindings. */
  1352. if (!(stmt->flags & SIF_SCOPE))
  1353. continue;
  1354. obj = stmt->u.blockObj;
  1355. JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_BlockClass);
  1356. scope = OBJ_SCOPE(obj);
  1357. sprop = SCOPE_GET_PROPERTY(scope, ATOM_TO_JSID(atom));
  1358. if (sprop) {
  1359. JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
  1360. if (slotp) {
  1361. JS_ASSERT(JSVAL_IS_INT(obj->fslots[JSSLOT_BLOCK_DEPTH]));
  1362. *slotp = JSVAL_TO_INT(obj->fslots[JSSLOT_BLOCK_DEPTH]) +
  1363. sprop->shortid;
  1364. }
  1365. return stmt;
  1366. }
  1367. }
  1368. if (slotp)
  1369. *slotp = -1;
  1370. return stmt;
  1371. }
  1372. /*
  1373. * Check if the attributes describe a property holding a compile-time constant
  1374. * or a permanent, read-only property without a getter.
  1375. */
  1376. #define IS_CONSTANT_PROPERTY(attrs) \
  1377. (((attrs) & (JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_GETTER)) == \
  1378. (JSPROP_READONLY | JSPROP_PERMANENT))
  1379. /*
  1380. * The function sets vp to JSVAL_HOLE when the atom does not corresponds to a
  1381. * name defining a constant.
  1382. */
  1383. static JSBool
  1384. LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
  1385. jsval *vp)
  1386. {
  1387. JSBool ok;
  1388. JSStmtInfo *stmt;
  1389. JSAtomListElement *ale;
  1390. JSObject *obj, *pobj;
  1391. JSProperty *prop;
  1392. uintN attrs;
  1393. /*
  1394. * Chase down the cg stack, but only until we reach the outermost cg.
  1395. * This enables propagating consts from top-level into switch cases in a
  1396. * function compiled along with the top-level script.
  1397. */
  1398. *vp = JSVAL_HOLE;
  1399. do {
  1400. if (cg->treeContext.flags & (TCF_IN_FUNCTION | TCF_COMPILE_N_GO)) {
  1401. /* XXX this will need revising when 'let const' is added. */
  1402. stmt = js_LexicalLookup(&cg->treeContext, atom, NULL);
  1403. if (stmt)
  1404. return JS_TRUE;
  1405. ATOM_LIST_SEARCH(ale, &cg->constList, atom);
  1406. if (ale) {
  1407. JS_ASSERT(ALE_VALUE(ale) != JSVAL_HOLE);
  1408. *vp = ALE_VALUE(ale);
  1409. return JS_TRUE;
  1410. }
  1411. /*
  1412. * Try looking in the variable object for a direct property that
  1413. * is readonly and permanent. We know such a property can't be
  1414. * shadowed by another property on obj's prototype chain, or a
  1415. * with object or catch variable; nor can prop's value be changed,
  1416. * nor can prop be deleted.
  1417. */
  1418. if (cg->treeContext.flags & TCF_IN_FUNCTION) {
  1419. if (js_LookupLocal(cx, cg->treeContext.u.fun, atom, NULL) !=
  1420. JSLOCAL_NONE) {
  1421. break;
  1422. }
  1423. } else {
  1424. JS_ASSERT(cg->treeContext.flags & TCF_COMPILE_N_GO);
  1425. obj = cg->treeContext.u.scopeChain;
  1426. ok = OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &pobj,
  1427. &prop);
  1428. if (!ok)
  1429. return JS_FALSE;
  1430. if (pobj == obj) {
  1431. /*
  1432. * We're compiling code that will be executed immediately,
  1433. * not re-executed against a different scope chain and/or
  1434. * variable object. Therefore we can get constant values
  1435. * from our variable object here.
  1436. */
  1437. ok = OBJ_GET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop,
  1438. &attrs);
  1439. if (ok && IS_CONSTANT_PROPERTY(attrs)) {
  1440. ok = OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
  1441. JS_ASSERT_IF(ok, *vp != JSVAL_HOLE);
  1442. }
  1443. }
  1444. if (prop)
  1445. OBJ_DROP_PROPERTY(cx, pobj, prop);
  1446. if (!ok)
  1447. return JS_FALSE;
  1448. if (prop)
  1449. break;
  1450. }
  1451. }
  1452. } while ((cg = cg->parent) != NULL);
  1453. return JS_TRUE;
  1454. }
  1455. /*
  1456. * Return JSOP_NOP to indicate that index fits 2 bytes and no index segment
  1457. * reset instruction is necessary, JSOP_FALSE to indicate an error or either
  1458. * JSOP_RESETBASE0 or JSOP_RESETBASE1 to indicate the reset bytecode to issue
  1459. * after the main bytecode sequence.
  1460. */
  1461. static JSOp
  1462. EmitBigIndexPrefix(JSContext *cx, JSCodeGenerator *cg, uintN index)
  1463. {
  1464. uintN indexBase;
  1465. /*
  1466. * We have max 3 bytes for indexes and check for INDEX_LIMIT overflow only
  1467. * for big indexes.
  1468. */
  1469. JS_STATIC_ASSERT(INDEX_LIMIT <= JS_BIT(24));
  1470. JS_STATIC_ASSERT(INDEX_LIMIT >=
  1471. (JSOP_INDEXBASE3 - JSOP_INDEXBASE1 + 2) << 16);
  1472. if (index < JS_BIT(16))
  1473. return JSOP_NOP;
  1474. indexBase = index >> 16;
  1475. if (indexBase <= JSOP_INDEXBASE3 - JSOP_INDEXBASE1 + 1) {
  1476. if (js_Emit1(cx, cg, (JSOp)(JSOP_INDEXBASE1 + indexBase - 1)) < 0)
  1477. return JSOP_FALSE;
  1478. return JSOP_RESETBASE0;
  1479. }
  1480. if (index >= INDEX_LIMIT) {
  1481. JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
  1482. JSMSG_TOO_MANY_LITERALS);
  1483. return JSOP_FALSE;
  1484. }
  1485. if (js_Emit2(cx, cg, JSOP_INDEXBASE, (JSOp)indexBase) < 0)
  1486. return JSOP_FALSE;
  1487. return JSOP_RESETBASE;
  1488. }
  1489. /*
  1490. * Emit a bytecode and its 2-byte constant index immediate operand. If the
  1491. * index requires more than 2 bytes, emit a prefix op whose 8-bit immediate
  1492. * operand effectively extends the 16-bit immediate of the prefixed opcode,
  1493. * by changing index "segment" (see jsinterp.c). We optimize segments 1-3
  1494. * with single-byte JSOP_INDEXBASE[123] codes.
  1495. *
  1496. * Such prefixing currently requires a suffix to restore the "zero segment"
  1497. * register setting, but this could be optimized further.
  1498. */
  1499. static JSBool
  1500. EmitIndexOp(JSContext *cx, JSOp op, uintN index, JSCodeGenerator *cg)
  1501. {
  1502. JSOp bigSuffix;
  1503. bigSuffix = EmitBigIndexPrefix(cx, cg, index);
  1504. if (bigSuffix == JSOP_FALSE)
  1505. return JS_FALSE;
  1506. EMIT_UINT16_IMM_OP(op, index);
  1507. return bigSuffix == JSOP_NOP || js_Emit1(cx, cg, bigSuffix) >= 0;
  1508. }
  1509. /*
  1510. * Slight sugar for EmitIndexOp, again accessing cx and cg from the macro
  1511. * caller's lexical environment, and embedding a false return on error.
  1512. */
  1513. #define EMIT_INDEX_OP(op, index) \
  1514. JS_BEGIN_MACRO \
  1515. if (!EmitIndexOp(cx, op, index, cg)) \
  1516. return JS_FALSE; \
  1517. JS_END_MACRO
  1518. static JSBool
  1519. EmitAtomOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg)
  1520. {
  1521. JSAtomListElement *ale;
  1522. JS_ASSERT(JOF_OPTYPE(op) == JOF_ATOM);
  1523. if (op == JSOP_GETPROP &&
  1524. pn->pn_atom == cx->runtime->atomState.lengthAtom) {
  1525. return js_Emit1(cx, cg, JSOP_LENGTH) >= 0;
  1526. }
  1527. ale = js_IndexAtom(cx, pn->pn_atom, &cg->atomList);
  1528. if (!ale)
  1529. return JS_FALSE;
  1530. return EmitIndexOp(cx, op, ALE_INDEX(ale), cg);
  1531. }
  1532. static uintN
  1533. IndexParsedObject(JSParsedObjectBox *pob, JSEmittedObjectList *list);
  1534. static JSBool
  1535. EmitObjectOp(JSContext *cx, JSParsedObjectBox *pob, JSOp op,
  1536. JSCodeGenerator *cg)
  1537. {
  1538. JS_ASSERT(JOF_OPTYPE(op) == JOF_OBJECT);
  1539. return EmitIndexOp(cx, op, IndexParsedObject(pob, &cg->objectList), cg);
  1540. }
  1541. /*
  1542. * What good are ARGNO_LEN and SLOTNO_LEN, you ask? The answer is that, apart
  1543. * from EmitSlotIndexOp, they abstract out the detail that both are 2, and in
  1544. * other parts of the code there's no necessary relationship between the two.
  1545. * The abstraction cracks here in order to share EmitSlotIndexOp code among
  1546. * the JSOP_DEFLOCALFUN and JSOP_GET{ARG,VAR,LOCAL}PROP cases.
  1547. */
  1548. JS_STATIC_ASSERT(ARGNO_LEN == 2);
  1549. JS_STATIC_ASSERT(SLOTNO_LEN == 2);
  1550. static JSBool
  1551. EmitSlotIndexOp(JSContext *cx, JSOp op, uintN slot, uintN index,
  1552. JSCodeGenerator *cg)
  1553. {
  1554. JSOp bigSuffix;
  1555. ptrdiff_t off;
  1556. jsbytecode *pc;
  1557. JS_ASSERT(JOF_OPTYPE(op) == JOF_SLOTATOM ||
  1558. JOF_OPTYPE(op) == JOF_SLOTOBJECT);
  1559. bigSuffix = EmitBigIndexPrefix(cx, cg, index);
  1560. if (bigSuffix == JSOP_FALSE)
  1561. return JS_FALSE;
  1562. /* Emit [op, slot, index]. */
  1563. off = js_EmitN(cx, cg, op, 2 + INDEX_LEN);
  1564. if (off < 0)
  1565. return JS_FALSE;
  1566. pc = CG_CODE(cg, off);
  1567. SET_UINT16(pc, slot);
  1568. pc += 2;
  1569. SET_INDEX(pc, index);
  1570. return bigSuffix == JSOP_NOP || js_Emit1(cx, cg, bigSuffix) >= 0;
  1571. }
  1572. /*
  1573. * Adjust the slot for a block local to account for the number of variables
  1574. * that share the same index space with locals. Due to the incremental code
  1575. * generation for top-level script, we do the adjustment via code patching in
  1576. * js_CompileScript; see comments there.
  1577. *
  1578. * The function returns -1 on failures.
  1579. */
  1580. static jsint
  1581. AdjustBlockSlot(JSContext *cx, JSCodeGenerator *cg, jsint slot)
  1582. {
  1583. JS_ASSERT((jsuint) slot < cg->maxStackDepth);
  1584. if (cg->treeContext.flags & TCF_IN_FUNCTION) {
  1585. slot += cg->treeContext.u.fun->u.i.nvars;
  1586. if ((uintN) slot >= SLOTNO_LIMIT) {
  1587. js_ReportCompileErrorNumber(cx, CG_TS(cg), NULL,
  1588. JSREPORT_ERROR,
  1589. JSMSG_TOO_MANY_LOCALS);
  1590. slot = -1;
  1591. }
  1592. }
  1593. return slot;
  1594. }
  1595. /*
  1596. * This routine tries to optimize name gets and sets to stack slot loads and
  1597. * stores, given the variables object and scope chain in cx's top frame, the
  1598. * compile-time context in tc, and a TOK_NAME node pn. It returns false on
  1599. * error, true on success.
  1600. *
  1601. * The caller can inspect pn->pn_slot for a non-negative slot number to tell
  1602. * whether optimization occurred, in which case BindNameToSlot also updated
  1603. * pn->pn_op. If pn->pn_slot is still -1 on return, pn->pn_op nevertheless
  1604. * may have been optimized, e.g., from JSOP_NAME to JSOP_ARGUMENTS. Whether
  1605. * or not pn->pn_op was modified, if this function finds an argument or local
  1606. * variable name, pn->pn_const will be true for const properties after a
  1607. * successful return.
  1608. *
  1609. * NB: if you add more opcodes specialized from JSOP_NAME, etc., don't forget
  1610. * to update the TOK_FOR (for-in) and TOK_ASSIGN (op=, e.g. +=) special cases
  1611. * in js_EmitTree.
  1612. */
  1613. static JSBool
  1614. BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
  1615. {
  1616. JSTreeContext *tc;
  1617. JSAtom *atom;
  1618. JSStmtInfo *stmt;
  1619. jsint slot;
  1620. JSOp op;
  1621. JSLocalKind localKind;
  1622. uintN index;
  1623. JSAtomListElement *ale;
  1624. JSBool constOp;
  1625. JS_ASSERT(pn->pn_type == TOK_NAME);
  1626. if (pn->pn_slot >= 0 || pn->pn_op == JSOP_ARGUMENTS)
  1627. return JS_TRUE;
  1628. /* QNAME references can never be optimized to use arg/var storage. */
  1629. if (pn->pn_op == JSOP_QNAMEPART)
  1630. return JS_TRUE;
  1631. /*
  1632. * We can't optimize if we are compiling a with statement and its body,
  1633. * or we're in a catch block whose exception variable has the same name
  1634. * as this node. FIXME: we should be able to optimize catch vars to be
  1635. * block-locals.
  1636. */
  1637. tc = &cg->treeContext;
  1638. atom = pn->pn_atom;
  1639. stmt = js_LexicalLookup(tc, atom, &slot);
  1640. if (stmt) {
  1641. if (stmt->type == STMT_WITH)
  1642. return JS_TRUE;
  1643. JS_ASSERT(stmt->flags & SIF_SCOPE);
  1644. JS_ASSERT(slot >= 0);
  1645. op = PN_OP(pn);
  1646. switch (op) {
  1647. case JSOP_NAME: op = JSOP_GETLOCAL; break;
  1648. case JSOP_SETNAME: op = JSOP_SETLOCAL; break;
  1649. case JSOP_INCNAME: op = JSOP_INCLOCAL; break;
  1650. case JSOP_NAMEINC: op = JSOP_LOCALINC; break;
  1651. case JSOP_DECNAME: op = JSOP_DECLOCAL; break;
  1652. case JSOP_NAMEDEC: op = JSOP_LOCALDEC; break;
  1653. case JSOP_FORNAME: op = JSOP_FORLOCAL; break;
  1654. case JSOP_DELNAME: op = JSOP_FALSE; break;
  1655. default: JS_ASSERT(0);
  1656. }
  1657. if (op != pn->pn_op) {
  1658. slot = AdjustBlockSlot(cx, cg, slot);
  1659. if (slot < 0)
  1660. return JS_FALSE;
  1661. pn->pn_op = op;
  1662. pn->pn_slot = slot;
  1663. }
  1664. return JS_TRUE;
  1665. }
  1666. /*
  1667. * We can't optimize if var and closure (a local function not in a larger
  1668. * expression and not at top-level within another's body) collide.
  1669. * XXX suboptimal: keep track of colliding names and deoptimize only those
  1670. */
  1671. if (tc->flags & TCF_FUN_CLOSURE_VS_VAR)
  1672. return JS_TRUE;
  1673. if (!(tc->flags & TCF_IN_FUNCTION)) {
  1674. JSStackFrame *caller;
  1675. caller = tc->parseContext->callerFrame;
  1676. if (caller) {
  1677. JS_ASSERT(tc->flags & TCF_COMPILE_N_GO);
  1678. JS_ASSERT(caller->script);
  1679. if (!caller->fun || caller->varobj != tc->u.scopeChain)
  1680. return JS_TRUE;
  1681. /*
  1682. * We are compiling eval or debug script inside a function frame
  1683. * and the scope chain matches function's variable object.
  1684. * Optimize access to function's arguments and variable and the
  1685. * arguments object.
  1686. */
  1687. if (PN_OP(pn) != JSOP_NAME || cg->staticDepth > JS_DISPLAY_SIZE)
  1688. goto arguments_check;
  1689. localKind = js_LookupLocal(cx, caller->fun, atom, &index);
  1690. if (localKind == JSLOCAL_NONE)
  1691. goto arguments_check;
  1692. ATOM_LIST_SEARCH(ale, &cg->upvarList, atom);
  1693. if (!ale) {
  1694. uint32 length, *vector;
  1695. ale = js_IndexAtom(cx, atom, &cg->upvarList);
  1696. if (!ale)
  1697. return JS_FALSE;
  1698. JS_ASSERT(ALE_INDEX(ale) == cg->upvarList.count - 1);
  1699. length = cg->upvarMap.length;
  1700. JS_ASSERT(ALE_INDEX(ale) <= length);
  1701. if (ALE_INDEX(ale) == length) {
  1702. length = 2 * JS_MAX(2, length);
  1703. vector = (uint32 *)
  1704. JS_realloc(cx, cg->upvarMap.vector,
  1705. length * sizeof *vector);
  1706. if (!vector)
  1707. return JS_FALSE;
  1708. cg->upvarMap.vector = vector;
  1709. cg->upvarMap.length = length;
  1710. }
  1711. if (localKind != JSLOCAL_ARG)
  1712. index += caller->fun->nargs;
  1713. if (index >= JS_BIT(16)) {
  1714. cg->treeContext.flags |= TCF_FUN_USES_NONLOCALS;
  1715. return JS_TRUE;
  1716. }
  1717. JS_ASSERT(cg->staticDepth > caller->fun->u.i.script->staticDepth);
  1718. uintN skip = cg->staticDepth - caller->fun->u.i.script->staticDepth;
  1719. cg->upvarMap.vector[ALE_INDEX(ale)] = MAKE_UPVAR_COOKIE(skip, index);
  1720. }
  1721. pn->pn_op = JSOP_GETUPVAR;
  1722. pn->pn_slot = ALE_INDEX(ale);
  1723. return JS_TRUE;
  1724. }
  1725. /*
  1726. * We are optimizing global variables and there may be no pre-existing
  1727. * global property named atom. If atom was declared via const or var,
  1728. * optimize pn to access fp->vars using the appropriate JSOP_*GVAR op.
  1729. */
  1730. ATOM_LIST_SEARCH(ale, &tc->decls, atom);
  1731. if (!ale) {
  1732. /* Use precedes declaration, or name is never declared. */
  1733. return JS_TRUE;
  1734. }
  1735. constOp = (ALE_JSOP(ale) == JSOP_DEFCONST);
  1736. /* Index atom so we can map fast global number to name. */
  1737. ale = js_IndexAtom(cx, atom, &cg->atomList);
  1738. if (!ale)
  1739. return JS_FALSE;
  1740. /* Defend against tc->ngvars 16-bit overflow. */
  1741. slot = ALE_INDEX(ale);
  1742. if ((slot + 1) >> 16)
  1743. return JS_TRUE;
  1744. if ((uint16)(slot + 1) > tc->ngvars)
  1745. tc->ngvars = (uint16)(slot + 1);
  1746. op = PN_OP(pn);
  1747. switch (op) {
  1748. case JSOP_NAME: op = JSOP_GETGVAR; break;
  1749. case JSOP_SETNAME: op = JSOP_SETGVAR; break;
  1750. case JSOP_SETCONST: /* NB: no change */ break;
  1751. case JSOP_INCNAME: op = JSOP_INCGVAR; break;
  1752. case JSOP_NAMEINC: op = JSOP_GVARINC; break;
  1753. case JSOP_DECNAME: o