/js/lib/Socket.IO-node/support/expresso/deps/jscoverage/js/jsemit.cpp
C++ | 1964 lines | 1374 code | 204 blank | 386 comment | 322 complexity | 16f85088fb3a3e12c285222bebf12ed7 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-3-Clause
- /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sw=4 et tw=99:
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is Mozilla Communicator client code, released
- * March 31, 1998.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1998
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either of the GNU General Public License Version 2 or later (the "GPL"),
- * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
- /*
- * JS bytecode generation.
- */
- #include "jsstddef.h"
- #ifdef HAVE_MEMORY_H
- #include <memory.h>
- #endif
- #include <string.h>
- #include "jstypes.h"
- #include "jsarena.h" /* Added by JSIFY */
- #include "jsutil.h" /* Added by JSIFY */
- #include "jsbit.h"
- #include "jsprf.h"
- #include "jsapi.h"
- #include "jsatom.h"
- #include "jsbool.h"
- #include "jscntxt.h"
- #include "jsversion.h"
- #include "jsemit.h"
- #include "jsfun.h"
- #include "jsnum.h"
- #include "jsopcode.h"
- #include "jsparse.h"
- #include "jsregexp.h"
- #include "jsscan.h"
- #include "jsscope.h"
- #include "jsscript.h"
- #include "jsautooplen.h"
- #include "jsstaticcheck.h"
- /* Allocation chunk counts, must be powers of two in general. */
- #define BYTECODE_CHUNK 256 /* code allocation increment */
- #define SRCNOTE_CHUNK 64 /* initial srcnote allocation increment */
- #define TRYNOTE_CHUNK 64 /* trynote allocation increment */
- /* Macros to compute byte sizes from typed element counts. */
- #define BYTECODE_SIZE(n) ((n) * sizeof(jsbytecode))
- #define SRCNOTE_SIZE(n) ((n) * sizeof(jssrcnote))
- #define TRYNOTE_SIZE(n) ((n) * sizeof(JSTryNote))
- static JSBool
- NewTryNote(JSContext *cx, JSCodeGenerator *cg, JSTryNoteKind kind,
- uintN stackDepth, size_t start, size_t end);
- JS_FRIEND_API(void)
- js_InitCodeGenerator(JSContext *cx, JSCodeGenerator *cg, JSParseContext *pc,
- JSArenaPool *codePool, JSArenaPool *notePool,
- uintN lineno)
- {
- memset(cg, 0, sizeof *cg);
- TREE_CONTEXT_INIT(&cg->treeContext, pc);
- cg->codePool = codePool;
- cg->notePool = notePool;
- cg->codeMark = JS_ARENA_MARK(codePool);
- cg->noteMark = JS_ARENA_MARK(notePool);
- cg->current = &cg->main;
- cg->firstLine = cg->prolog.currentLine = cg->main.currentLine = lineno;
- ATOM_LIST_INIT(&cg->atomList);
- cg->prolog.noteMask = cg->main.noteMask = SRCNOTE_CHUNK - 1;
- ATOM_LIST_INIT(&cg->constList);
- ATOM_LIST_INIT(&cg->upvarList);
- }
- JS_FRIEND_API(void)
- js_FinishCodeGenerator(JSContext *cx, JSCodeGenerator *cg)
- {
- TREE_CONTEXT_FINISH(cx, &cg->treeContext);
- JS_ARENA_RELEASE(cg->codePool, cg->codeMark);
- JS_ARENA_RELEASE(cg->notePool, cg->noteMark);
- /* NB: non-null only after OOM. */
- if (cg->spanDeps)
- JS_free(cx, cg->spanDeps);
- if (cg->upvarMap.vector)
- JS_free(cx, cg->upvarMap.vector);
- }
- static ptrdiff_t
- EmitCheck(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t delta)
- {
- jsbytecode *base, *limit, *next;
- ptrdiff_t offset, length;
- size_t incr, size;
- base = CG_BASE(cg);
- next = CG_NEXT(cg);
- limit = CG_LIMIT(cg);
- offset = PTRDIFF(next, base, jsbytecode);
- if (next + delta > limit) {
- length = offset + delta;
- length = (length <= BYTECODE_CHUNK)
- ? BYTECODE_CHUNK
- : JS_BIT(JS_CeilingLog2(length));
- incr = BYTECODE_SIZE(length);
- if (!base) {
- JS_ARENA_ALLOCATE_CAST(base, jsbytecode *, cg->codePool, incr);
- } else {
- size = BYTECODE_SIZE(PTRDIFF(limit, base, jsbytecode));
- incr -= size;
- JS_ARENA_GROW_CAST(base, jsbytecode *, cg->codePool, size, incr);
- }
- if (!base) {
- js_ReportOutOfScriptQuota(cx);
- return -1;
- }
- CG_BASE(cg) = base;
- CG_LIMIT(cg) = base + length;
- CG_NEXT(cg) = base + offset;
- }
- return offset;
- }
- static void
- UpdateDepth(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t target)
- {
- jsbytecode *pc;
- JSOp op;
- const JSCodeSpec *cs;
- uintN depth;
- intN nuses, ndefs;
- pc = CG_CODE(cg, target);
- op = (JSOp) *pc;
- cs = &js_CodeSpec[op];
- if (cs->format & JOF_TMPSLOT_MASK) {
- depth = (uintN) cg->stackDepth +
- ((cs->format & JOF_TMPSLOT_MASK) >> JOF_TMPSLOT_SHIFT);
- if (depth > cg->maxStackDepth)
- cg->maxStackDepth = depth;
- }
- nuses = cs->nuses;
- if (nuses < 0)
- nuses = js_GetVariableStackUseLength(op, pc);
- cg->stackDepth -= nuses;
- JS_ASSERT(cg->stackDepth >= 0);
- if (cg->stackDepth < 0) {
- char numBuf[12];
- JSTokenStream *ts;
- JS_snprintf(numBuf, sizeof numBuf, "%d", target);
- ts = &cg->treeContext.parseContext->tokenStream;
- JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING,
- js_GetErrorMessage, NULL,
- JSMSG_STACK_UNDERFLOW,
- ts->filename ? ts->filename : "stdin",
- numBuf);
- }
- ndefs = cs->ndefs;
- if (ndefs < 0) {
- JSObject *blockObj;
- /* We just executed IndexParsedObject */
- JS_ASSERT(op == JSOP_ENTERBLOCK);
- JS_ASSERT(nuses == 0);
- blockObj = cg->objectList.lastPob->object;
- JS_ASSERT(STOBJ_GET_CLASS(blockObj) == &js_BlockClass);
- JS_ASSERT(JSVAL_IS_VOID(blockObj->fslots[JSSLOT_BLOCK_DEPTH]));
- OBJ_SET_BLOCK_DEPTH(cx, blockObj, cg->stackDepth);
- ndefs = OBJ_BLOCK_COUNT(cx, blockObj);
- }
- cg->stackDepth += ndefs;
- if ((uintN)cg->stackDepth > cg->maxStackDepth)
- cg->maxStackDepth = cg->stackDepth;
- }
- ptrdiff_t
- js_Emit1(JSContext *cx, JSCodeGenerator *cg, JSOp op)
- {
- ptrdiff_t offset = EmitCheck(cx, cg, op, 1);
- if (offset >= 0) {
- *CG_NEXT(cg)++ = (jsbytecode)op;
- UpdateDepth(cx, cg, offset);
- }
- return offset;
- }
- ptrdiff_t
- js_Emit2(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1)
- {
- ptrdiff_t offset = EmitCheck(cx, cg, op, 2);
- if (offset >= 0) {
- jsbytecode *next = CG_NEXT(cg);
- next[0] = (jsbytecode)op;
- next[1] = op1;
- CG_NEXT(cg) = next + 2;
- UpdateDepth(cx, cg, offset);
- }
- return offset;
- }
- ptrdiff_t
- js_Emit3(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1,
- jsbytecode op2)
- {
- ptrdiff_t offset = EmitCheck(cx, cg, op, 3);
- if (offset >= 0) {
- jsbytecode *next = CG_NEXT(cg);
- next[0] = (jsbytecode)op;
- next[1] = op1;
- next[2] = op2;
- CG_NEXT(cg) = next + 3;
- UpdateDepth(cx, cg, offset);
- }
- return offset;
- }
- ptrdiff_t
- js_EmitN(JSContext *cx, JSCodeGenerator *cg, JSOp op, size_t extra)
- {
- ptrdiff_t length = 1 + (ptrdiff_t)extra;
- ptrdiff_t offset = EmitCheck(cx, cg, op, length);
- if (offset >= 0) {
- jsbytecode *next = CG_NEXT(cg);
- *next = (jsbytecode)op;
- memset(next + 1, 0, BYTECODE_SIZE(extra));
- CG_NEXT(cg) = next + length;
- /*
- * Don't UpdateDepth if op's use-count comes from the immediate
- * operand yet to be stored in the extra bytes after op.
- */
- if (js_CodeSpec[op].nuses >= 0)
- UpdateDepth(cx, cg, offset);
- }
- return offset;
- }
- /* XXX too many "... statement" L10N gaffes below -- fix via js.msg! */
- const char js_with_statement_str[] = "with statement";
- const char js_finally_block_str[] = "finally block";
- const char js_script_str[] = "script";
- static const char *statementName[] = {
- "label statement", /* LABEL */
- "if statement", /* IF */
- "else statement", /* ELSE */
- "destructuring body", /* BODY */
- "switch statement", /* SWITCH */
- "block", /* BLOCK */
- js_with_statement_str, /* WITH */
- "catch block", /* CATCH */
- "try block", /* TRY */
- js_finally_block_str, /* FINALLY */
- js_finally_block_str, /* SUBROUTINE */
- "do loop", /* DO_LOOP */
- "for loop", /* FOR_LOOP */
- "for/in loop", /* FOR_IN_LOOP */
- "while loop", /* WHILE_LOOP */
- };
- JS_STATIC_ASSERT(JS_ARRAY_LENGTH(statementName) == STMT_LIMIT);
- static const char *
- StatementName(JSCodeGenerator *cg)
- {
- if (!cg->treeContext.topStmt)
- return js_script_str;
- return statementName[cg->treeContext.topStmt->type];
- }
- static void
- ReportStatementTooLarge(JSContext *cx, JSCodeGenerator *cg)
- {
- JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NEED_DIET,
- StatementName(cg));
- }
- /**
- Span-dependent instructions in JS bytecode consist of the jump (JOF_JUMP)
- and switch (JOF_LOOKUPSWITCH, JOF_TABLESWITCH) format opcodes, subdivided
- into unconditional (gotos and gosubs), and conditional jumps or branches
- (which pop a value, test it, and jump depending on its value). Most jumps
- have just one immediate operand, a signed offset from the jump opcode's pc
- to the target bytecode. The lookup and table switch opcodes may contain
- many jump offsets.
- Mozilla bug #80981 (http://bugzilla.mozilla.org/show_bug.cgi?id=80981) was
- fixed by adding extended "X" counterparts to the opcodes/formats (NB: X is
- suffixed to prefer JSOP_ORX thereby avoiding a JSOP_XOR name collision for
- the extended form of the JSOP_OR branch opcode). The unextended or short
- formats have 16-bit signed immediate offset operands, the extended or long
- formats have 32-bit signed immediates. The span-dependency problem consists
- of selecting as few long instructions as possible, or about as few -- since
- jumps can span other jumps, extending one jump may cause another to need to
- be extended.
- Most JS scripts are short, so need no extended jumps. We optimize for this
- case by generating short jumps until we know a long jump is needed. After
- that point, we keep generating short jumps, but each jump's 16-bit immediate
- offset operand is actually an unsigned index into cg->spanDeps, an array of
- JSSpanDep structs. Each struct tells the top offset in the script of the
- opcode, the "before" offset of the jump (which will be the same as top for
- simplex jumps, but which will index further into the bytecode array for a
- non-initial jump offset in a lookup or table switch), the after "offset"
- adjusted during span-dependent instruction selection (initially the same
- value as the "before" offset), and the jump target (more below).
- Since we generate cg->spanDeps lazily, from within js_SetJumpOffset, we must
- ensure that all bytecode generated so far can be inspected to discover where
- the jump offset immediate operands lie within CG_CODE(cg). But the bonus is
- that we generate span-dependency records sorted by their offsets, so we can
- binary-search when trying to find a JSSpanDep for a given bytecode offset,
- or the nearest JSSpanDep at or above a given pc.
- To avoid limiting scripts to 64K jumps, if the cg->spanDeps index overflows
- 65534, we store SPANDEP_INDEX_HUGE in the jump's immediate operand. This
- tells us that we need to binary-search for the cg->spanDeps entry by the
- jump opcode's bytecode offset (sd->before).
- Jump targets need to be maintained in a data structure that lets us look
- up an already-known target by its address (jumps may have a common target),
- and that also lets us update the addresses (script-relative, a.k.a. absolute
- offsets) of targets that come after a jump target (for when a jump below
- that target needs to be extended). We use an AVL tree, implemented using
- recursion, but with some tricky optimizations to its height-balancing code
- (see http://www.cmcrossroads.com/bradapp/ftp/src/libs/C++/AvlTrees.html).
- A final wrinkle: backpatch chains are linked by jump-to-jump offsets with
- positive sign, even though they link "backward" (i.e., toward lower bytecode
- address). We don't want to waste space and search time in the AVL tree for
- such temporary backpatch deltas, so we use a single-bit wildcard scheme to
- tag true JSJumpTarget pointers and encode untagged, signed (positive) deltas
- in JSSpanDep.target pointers, depending on whether the JSSpanDep has a known
- target, or is still awaiting backpatching.
- Note that backpatch chains would present a problem for BuildSpanDepTable,
- which inspects bytecode to build cg->spanDeps on demand, when the first
- short jump offset overflows. To solve this temporary problem, we emit a
- proxy bytecode (JSOP_BACKPATCH; JSOP_BACKPATCH_POP for branch ops) whose
- nuses/ndefs counts help keep the stack balanced, but whose opcode format
- distinguishes its backpatch delta immediate operand from a normal jump
- offset.
- */
- static int
- BalanceJumpTargets(JSJumpTarget **jtp)
- {
- JSJumpTarget *jt, *jt2, *root;
- int dir, otherDir, heightChanged;
- JSBool doubleRotate;
- jt = *jtp;
- JS_ASSERT(jt->balance != 0);
- if (jt->balance < -1) {
- dir = JT_RIGHT;
- doubleRotate = (jt->kids[JT_LEFT]->balance > 0);
- } else if (jt->balance > 1) {
- dir = JT_LEFT;
- doubleRotate = (jt->kids[JT_RIGHT]->balance < 0);
- } else {
- return 0;
- }
- otherDir = JT_OTHER_DIR(dir);
- if (doubleRotate) {
- jt2 = jt->kids[otherDir];
- *jtp = root = jt2->kids[dir];
- jt->kids[otherDir] = root->kids[dir];
- root->kids[dir] = jt;
- jt2->kids[dir] = root->kids[otherDir];
- root->kids[otherDir] = jt2;
- heightChanged = 1;
- root->kids[JT_LEFT]->balance = -JS_MAX(root->balance, 0);
- root->kids[JT_RIGHT]->balance = -JS_MIN(root->balance, 0);
- root->balance = 0;
- } else {
- *jtp = root = jt->kids[otherDir];
- jt->kids[otherDir] = root->kids[dir];
- root->kids[dir] = jt;
- heightChanged = (root->balance != 0);
- jt->balance = -((dir == JT_LEFT) ? --root->balance : ++root->balance);
- }
- return heightChanged;
- }
- typedef struct AddJumpTargetArgs {
- JSContext *cx;
- JSCodeGenerator *cg;
- ptrdiff_t offset;
- JSJumpTarget *node;
- } AddJumpTargetArgs;
- static int
- AddJumpTarget(AddJumpTargetArgs *args, JSJumpTarget **jtp)
- {
- JSJumpTarget *jt;
- int balanceDelta;
- jt = *jtp;
- if (!jt) {
- JSCodeGenerator *cg = args->cg;
- jt = cg->jtFreeList;
- if (jt) {
- cg->jtFreeList = jt->kids[JT_LEFT];
- } else {
- JS_ARENA_ALLOCATE_CAST(jt, JSJumpTarget *, &args->cx->tempPool,
- sizeof *jt);
- if (!jt) {
- js_ReportOutOfScriptQuota(args->cx);
- return 0;
- }
- }
- jt->offset = args->offset;
- jt->balance = 0;
- jt->kids[JT_LEFT] = jt->kids[JT_RIGHT] = NULL;
- cg->numJumpTargets++;
- args->node = jt;
- *jtp = jt;
- return 1;
- }
- if (jt->offset == args->offset) {
- args->node = jt;
- return 0;
- }
- if (args->offset < jt->offset)
- balanceDelta = -AddJumpTarget(args, &jt->kids[JT_LEFT]);
- else
- balanceDelta = AddJumpTarget(args, &jt->kids[JT_RIGHT]);
- if (!args->node)
- return 0;
- jt->balance += balanceDelta;
- return (balanceDelta && jt->balance)
- ? 1 - BalanceJumpTargets(jtp)
- : 0;
- }
- #ifdef DEBUG_brendan
- static int AVLCheck(JSJumpTarget *jt)
- {
- int lh, rh;
- if (!jt) return 0;
- JS_ASSERT(-1 <= jt->balance && jt->balance <= 1);
- lh = AVLCheck(jt->kids[JT_LEFT]);
- rh = AVLCheck(jt->kids[JT_RIGHT]);
- JS_ASSERT(jt->balance == rh - lh);
- return 1 + JS_MAX(lh, rh);
- }
- #endif
- static JSBool
- SetSpanDepTarget(JSContext *cx, JSCodeGenerator *cg, JSSpanDep *sd,
- ptrdiff_t off)
- {
- AddJumpTargetArgs args;
- if (off < JUMPX_OFFSET_MIN || JUMPX_OFFSET_MAX < off) {
- ReportStatementTooLarge(cx, cg);
- return JS_FALSE;
- }
- args.cx = cx;
- args.cg = cg;
- args.offset = sd->top + off;
- args.node = NULL;
- AddJumpTarget(&args, &cg->jumpTargets);
- if (!args.node)
- return JS_FALSE;
- #ifdef DEBUG_brendan
- AVLCheck(cg->jumpTargets);
- #endif
- SD_SET_TARGET(sd, args.node);
- return JS_TRUE;
- }
- #define SPANDEPS_MIN 256
- #define SPANDEPS_SIZE(n) ((n) * sizeof(JSSpanDep))
- #define SPANDEPS_SIZE_MIN SPANDEPS_SIZE(SPANDEPS_MIN)
- static JSBool
- AddSpanDep(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc, jsbytecode *pc2,
- ptrdiff_t off)
- {
- uintN index;
- JSSpanDep *sdbase, *sd;
- size_t size;
- index = cg->numSpanDeps;
- if (index + 1 == 0) {
- ReportStatementTooLarge(cx, cg);
- return JS_FALSE;
- }
- if ((index & (index - 1)) == 0 &&
- (!(sdbase = cg->spanDeps) || index >= SPANDEPS_MIN)) {
- size = sdbase ? SPANDEPS_SIZE(index) : SPANDEPS_SIZE_MIN / 2;
- sdbase = (JSSpanDep *) JS_realloc(cx, sdbase, size + size);
- if (!sdbase)
- return JS_FALSE;
- cg->spanDeps = sdbase;
- }
- cg->numSpanDeps = index + 1;
- sd = cg->spanDeps + index;
- sd->top = PTRDIFF(pc, CG_BASE(cg), jsbytecode);
- sd->offset = sd->before = PTRDIFF(pc2, CG_BASE(cg), jsbytecode);
- if (js_CodeSpec[*pc].format & JOF_BACKPATCH) {
- /* Jump offset will be backpatched if off is a non-zero "bpdelta". */
- if (off != 0) {
- JS_ASSERT(off >= 1 + JUMP_OFFSET_LEN);
- if (off > BPDELTA_MAX) {
- ReportStatementTooLarge(cx, cg);
- return JS_FALSE;
- }
- }
- SD_SET_BPDELTA(sd, off);
- } else if (off == 0) {
- /* Jump offset will be patched directly, without backpatch chaining. */
- SD_SET_TARGET(sd, 0);
- } else {
- /* The jump offset in off is non-zero, therefore it's already known. */
- if (!SetSpanDepTarget(cx, cg, sd, off))
- return JS_FALSE;
- }
- if (index > SPANDEP_INDEX_MAX)
- index = SPANDEP_INDEX_HUGE;
- SET_SPANDEP_INDEX(pc2, index);
- return JS_TRUE;
- }
- static jsbytecode *
- AddSwitchSpanDeps(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc)
- {
- JSOp op;
- jsbytecode *pc2;
- ptrdiff_t off;
- jsint low, high;
- uintN njumps, indexlen;
- op = (JSOp) *pc;
- JS_ASSERT(op == JSOP_TABLESWITCH || op == JSOP_LOOKUPSWITCH);
- pc2 = pc;
- off = GET_JUMP_OFFSET(pc2);
- if (!AddSpanDep(cx, cg, pc, pc2, off))
- return NULL;
- pc2 += JUMP_OFFSET_LEN;
- if (op == JSOP_TABLESWITCH) {
- low = GET_JUMP_OFFSET(pc2);
- pc2 += JUMP_OFFSET_LEN;
- high = GET_JUMP_OFFSET(pc2);
- pc2 += JUMP_OFFSET_LEN;
- njumps = (uintN) (high - low + 1);
- indexlen = 0;
- } else {
- njumps = GET_UINT16(pc2);
- pc2 += UINT16_LEN;
- indexlen = INDEX_LEN;
- }
- while (njumps) {
- --njumps;
- pc2 += indexlen;
- off = GET_JUMP_OFFSET(pc2);
- if (!AddSpanDep(cx, cg, pc, pc2, off))
- return NULL;
- pc2 += JUMP_OFFSET_LEN;
- }
- return 1 + pc2;
- }
- static JSBool
- BuildSpanDepTable(JSContext *cx, JSCodeGenerator *cg)
- {
- jsbytecode *pc, *end;
- JSOp op;
- const JSCodeSpec *cs;
- ptrdiff_t off;
- pc = CG_BASE(cg) + cg->spanDepTodo;
- end = CG_NEXT(cg);
- while (pc != end) {
- JS_ASSERT(pc < end);
- op = (JSOp)*pc;
- cs = &js_CodeSpec[op];
- switch (JOF_TYPE(cs->format)) {
- case JOF_TABLESWITCH:
- case JOF_LOOKUPSWITCH:
- pc = AddSwitchSpanDeps(cx, cg, pc);
- if (!pc)
- return JS_FALSE;
- break;
- case JOF_JUMP:
- off = GET_JUMP_OFFSET(pc);
- if (!AddSpanDep(cx, cg, pc, pc, off))
- return JS_FALSE;
- /* FALL THROUGH */
- default:
- pc += cs->length;
- break;
- }
- }
- return JS_TRUE;
- }
- static JSSpanDep *
- GetSpanDep(JSCodeGenerator *cg, jsbytecode *pc)
- {
- uintN index;
- ptrdiff_t offset;
- int lo, hi, mid;
- JSSpanDep *sd;
- index = GET_SPANDEP_INDEX(pc);
- if (index != SPANDEP_INDEX_HUGE)
- return cg->spanDeps + index;
- offset = PTRDIFF(pc, CG_BASE(cg), jsbytecode);
- lo = 0;
- hi = cg->numSpanDeps - 1;
- while (lo <= hi) {
- mid = (lo + hi) / 2;
- sd = cg->spanDeps + mid;
- if (sd->before == offset)
- return sd;
- if (sd->before < offset)
- lo = mid + 1;
- else
- hi = mid - 1;
- }
- JS_ASSERT(0);
- return NULL;
- }
- static JSBool
- SetBackPatchDelta(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
- ptrdiff_t delta)
- {
- JSSpanDep *sd;
- JS_ASSERT(delta >= 1 + JUMP_OFFSET_LEN);
- if (!cg->spanDeps && delta < JUMP_OFFSET_MAX) {
- SET_JUMP_OFFSET(pc, delta);
- return JS_TRUE;
- }
- if (delta > BPDELTA_MAX) {
- ReportStatementTooLarge(cx, cg);
- return JS_FALSE;
- }
- if (!cg->spanDeps && !BuildSpanDepTable(cx, cg))
- return JS_FALSE;
- sd = GetSpanDep(cg, pc);
- JS_ASSERT(SD_GET_BPDELTA(sd) == 0);
- SD_SET_BPDELTA(sd, delta);
- return JS_TRUE;
- }
- static void
- UpdateJumpTargets(JSJumpTarget *jt, ptrdiff_t pivot, ptrdiff_t delta)
- {
- if (jt->offset > pivot) {
- jt->offset += delta;
- if (jt->kids[JT_LEFT])
- UpdateJumpTargets(jt->kids[JT_LEFT], pivot, delta);
- }
- if (jt->kids[JT_RIGHT])
- UpdateJumpTargets(jt->kids[JT_RIGHT], pivot, delta);
- }
- static JSSpanDep *
- FindNearestSpanDep(JSCodeGenerator *cg, ptrdiff_t offset, int lo,
- JSSpanDep *guard)
- {
- int num, hi, mid;
- JSSpanDep *sdbase, *sd;
- num = cg->numSpanDeps;
- JS_ASSERT(num > 0);
- hi = num - 1;
- sdbase = cg->spanDeps;
- while (lo <= hi) {
- mid = (lo + hi) / 2;
- sd = sdbase + mid;
- if (sd->before == offset)
- return sd;
- if (sd->before < offset)
- lo = mid + 1;
- else
- hi = mid - 1;
- }
- if (lo == num)
- return guard;
- sd = sdbase + lo;
- JS_ASSERT(sd->before >= offset && (lo == 0 || sd[-1].before < offset));
- return sd;
- }
- static void
- FreeJumpTargets(JSCodeGenerator *cg, JSJumpTarget *jt)
- {
- if (jt->kids[JT_LEFT])
- FreeJumpTargets(cg, jt->kids[JT_LEFT]);
- if (jt->kids[JT_RIGHT])
- FreeJumpTargets(cg, jt->kids[JT_RIGHT]);
- jt->kids[JT_LEFT] = cg->jtFreeList;
- cg->jtFreeList = jt;
- }
- static JSBool
- OptimizeSpanDeps(JSContext *cx, JSCodeGenerator *cg)
- {
- jsbytecode *pc, *oldpc, *base, *limit, *next;
- JSSpanDep *sd, *sd2, *sdbase, *sdlimit, *sdtop, guard;
- ptrdiff_t offset, growth, delta, top, pivot, span, length, target;
- JSBool done;
- JSOp op;
- uint32 type;
- size_t size, incr;
- jssrcnote *sn, *snlimit;
- JSSrcNoteSpec *spec;
- uintN i, n, noteIndex;
- JSTryNode *tryNode;
- #ifdef DEBUG_brendan
- int passes = 0;
- #endif
- base = CG_BASE(cg);
- sdbase = cg->spanDeps;
- sdlimit = sdbase + cg->numSpanDeps;
- offset = CG_OFFSET(cg);
- growth = 0;
- do {
- done = JS_TRUE;
- delta = 0;
- top = pivot = -1;
- sdtop = NULL;
- pc = NULL;
- op = JSOP_NOP;
- type = 0;
- #ifdef DEBUG_brendan
- passes++;
- #endif
- for (sd = sdbase; sd < sdlimit; sd++) {
- JS_ASSERT(JT_HAS_TAG(sd->target));
- sd->offset += delta;
- if (sd->top != top) {
- sdtop = sd;
- top = sd->top;
- JS_ASSERT(top == sd->before);
- pivot = sd->offset;
- pc = base + top;
- op = (JSOp) *pc;
- type = JOF_OPTYPE(op);
- if (JOF_TYPE_IS_EXTENDED_JUMP(type)) {
- /*
- * We already extended all the jump offset operands for
- * the opcode at sd->top. Jumps and branches have only
- * one jump offset operand, but switches have many, all
- * of which are adjacent in cg->spanDeps.
- */
- continue;
- }
- JS_ASSERT(type == JOF_JUMP ||
- type == JOF_TABLESWITCH ||
- type == JOF_LOOKUPSWITCH);
- }
- if (!JOF_TYPE_IS_EXTENDED_JUMP(type)) {
- span = SD_SPAN(sd, pivot);
- if (span < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < span) {
- ptrdiff_t deltaFromTop = 0;
- done = JS_FALSE;
- switch (op) {
- case JSOP_GOTO: op = JSOP_GOTOX; break;
- case JSOP_IFEQ: op = JSOP_IFEQX; break;
- case JSOP_IFNE: op = JSOP_IFNEX; break;
- case JSOP_OR: op = JSOP_ORX; break;
- case JSOP_AND: op = JSOP_ANDX; break;
- case JSOP_GOSUB: op = JSOP_GOSUBX; break;
- case JSOP_CASE: op = JSOP_CASEX; break;
- case JSOP_DEFAULT: op = JSOP_DEFAULTX; break;
- case JSOP_TABLESWITCH: op = JSOP_TABLESWITCHX; break;
- case JSOP_LOOKUPSWITCH: op = JSOP_LOOKUPSWITCHX; break;
- default:
- ReportStatementTooLarge(cx, cg);
- return JS_FALSE;
- }
- *pc = (jsbytecode) op;
- for (sd2 = sdtop; sd2 < sdlimit && sd2->top == top; sd2++) {
- if (sd2 <= sd) {
- /*
- * sd2->offset already includes delta as it stood
- * before we entered this loop, but it must also
- * include the delta relative to top due to all the
- * extended jump offset immediates for the opcode
- * starting at top, which we extend in this loop.
- *
- * If there is only one extended jump offset, then
- * sd2->offset won't change and this for loop will
- * iterate once only.
- */
- sd2->offset += deltaFromTop;
- deltaFromTop += JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN;
- } else {
- /*
- * sd2 comes after sd, and won't be revisited by
- * the outer for loop, so we have to increase its
- * offset by delta, not merely by deltaFromTop.
- */
- sd2->offset += delta;
- }
- delta += JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN;
- UpdateJumpTargets(cg->jumpTargets, sd2->offset,
- JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN);
- }
- sd = sd2 - 1;
- }
- }
- }
- growth += delta;
- } while (!done);
- if (growth) {
- #ifdef DEBUG_brendan
- JSTokenStream *ts = &cg->treeContext.parseContext->tokenStream;
- printf("%s:%u: %u/%u jumps extended in %d passes (%d=%d+%d)\n",
- ts->filename ? ts->filename : "stdin", cg->firstLine,
- growth / (JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN), cg->numSpanDeps,
- passes, offset + growth, offset, growth);
- #endif
- /*
- * Ensure that we have room for the extended jumps, but don't round up
- * to a power of two -- we're done generating code, so we cut to fit.
- */
- limit = CG_LIMIT(cg);
- length = offset + growth;
- next = base + length;
- if (next > limit) {
- JS_ASSERT(length > BYTECODE_CHUNK);
- size = BYTECODE_SIZE(PTRDIFF(limit, base, jsbytecode));
- incr = BYTECODE_SIZE(length) - size;
- JS_ARENA_GROW_CAST(base, jsbytecode *, cg->codePool, size, incr);
- if (!base) {
- js_ReportOutOfScriptQuota(cx);
- return JS_FALSE;
- }
- CG_BASE(cg) = base;
- CG_LIMIT(cg) = next = base + length;
- }
- CG_NEXT(cg) = next;
- /*
- * Set up a fake span dependency record to guard the end of the code
- * being generated. This guard record is returned as a fencepost by
- * FindNearestSpanDep if there is no real spandep at or above a given
- * unextended code offset.
- */
- guard.top = -1;
- guard.offset = offset + growth;
- guard.before = offset;
- guard.target = NULL;
- }
- /*
- * Now work backwards through the span dependencies, copying chunks of
- * bytecode between each extended jump toward the end of the grown code
- * space, and restoring immediate offset operands for all jump bytecodes.
- * The first chunk of bytecodes, starting at base and ending at the first
- * extended jump offset (NB: this chunk includes the operation bytecode
- * just before that immediate jump offset), doesn't need to be copied.
- */
- JS_ASSERT(sd == sdlimit);
- top = -1;
- while (--sd >= sdbase) {
- if (sd->top != top) {
- top = sd->top;
- op = (JSOp) base[top];
- type = JOF_OPTYPE(op);
- for (sd2 = sd - 1; sd2 >= sdbase && sd2->top == top; sd2--)
- continue;
- sd2++;
- pivot = sd2->offset;
- JS_ASSERT(top == sd2->before);
- }
- oldpc = base + sd->before;
- span = SD_SPAN(sd, pivot);
- /*
- * If this jump didn't need to be extended, restore its span immediate
- * offset operand now, overwriting the index of sd within cg->spanDeps
- * that was stored temporarily after *pc when BuildSpanDepTable ran.
- *
- * Note that span might fit in 16 bits even for an extended jump op,
- * if the op has multiple span operands, not all of which overflowed
- * (e.g. JSOP_LOOKUPSWITCH or JSOP_TABLESWITCH where some cases are in
- * range for a short jump, but others are not).
- */
- if (!JOF_TYPE_IS_EXTENDED_JUMP(type)) {
- JS_ASSERT(JUMP_OFFSET_MIN <= span && span <= JUMP_OFFSET_MAX);
- SET_JUMP_OFFSET(oldpc, span);
- continue;
- }
- /*
- * Set up parameters needed to copy the next run of bytecode starting
- * at offset (which is a cursor into the unextended, original bytecode
- * vector), down to sd->before (a cursor of the same scale as offset,
- * it's the index of the original jump pc). Reuse delta to count the
- * nominal number of bytes to copy.
- */
- pc = base + sd->offset;
- delta = offset - sd->before;
- JS_ASSERT(delta >= 1 + JUMP_OFFSET_LEN);
- /*
- * Don't bother copying the jump offset we're about to reset, but do
- * copy the bytecode at oldpc (which comes just before its immediate
- * jump offset operand), on the next iteration through the loop, by
- * including it in offset's new value.
- */
- offset = sd->before + 1;
- size = BYTECODE_SIZE(delta - (1 + JUMP_OFFSET_LEN));
- if (size) {
- memmove(pc + 1 + JUMPX_OFFSET_LEN,
- oldpc + 1 + JUMP_OFFSET_LEN,
- size);
- }
- SET_JUMPX_OFFSET(pc, span);
- }
- if (growth) {
- /*
- * Fix source note deltas. Don't hardwire the delta fixup adjustment,
- * even though currently it must be JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN
- * at each sd that moved. The future may bring different offset sizes
- * for span-dependent instruction operands. However, we fix only main
- * notes here, not prolog notes -- we know that prolog opcodes are not
- * span-dependent, and aren't likely ever to be.
- */
- offset = growth = 0;
- sd = sdbase;
- for (sn = cg->main.notes, snlimit = sn + cg->main.noteCount;
- sn < snlimit;
- sn = SN_NEXT(sn)) {
- /*
- * Recall that the offset of a given note includes its delta, and
- * tells the offset of the annotated bytecode from the main entry
- * point of the script.
- */
- offset += SN_DELTA(sn);
- while (sd < sdlimit && sd->before < offset) {
- /*
- * To compute the delta to add to sn, we need to look at the
- * spandep after sd, whose offset - (before + growth) tells by
- * how many bytes sd's instruction grew.
- */
- sd2 = sd + 1;
- if (sd2 == sdlimit)
- sd2 = &guard;
- delta = sd2->offset - (sd2->before + growth);
- if (delta > 0) {
- JS_ASSERT(delta == JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN);
- sn = js_AddToSrcNoteDelta(cx, cg, sn, delta);
- if (!sn)
- return JS_FALSE;
- snlimit = cg->main.notes + cg->main.noteCount;
- growth += delta;
- }
- sd++;
- }
- /*
- * If sn has span-dependent offset operands, check whether each
- * covers further span-dependencies, and increase those operands
- * accordingly. Some source notes measure offset not from the
- * annotated pc, but from that pc plus some small bias. NB: we
- * assume that spec->offsetBias can't itself span span-dependent
- * instructions!
- */
- spec = &js_SrcNoteSpec[SN_TYPE(sn)];
- if (spec->isSpanDep) {
- pivot = offset + spec->offsetBias;
- n = spec->arity;
- for (i = 0; i < n; i++) {
- span = js_GetSrcNoteOffset(sn, i);
- if (span == 0)
- continue;
- target = pivot + span * spec->isSpanDep;
- sd2 = FindNearestSpanDep(cg, target,
- (target >= pivot)
- ? sd - sdbase
- : 0,
- &guard);
- /*
- * Increase target by sd2's before-vs-after offset delta,
- * which is absolute (i.e., relative to start of script,
- * as is target). Recompute the span by subtracting its
- * adjusted pivot from target.
- */
- target += sd2->offset - sd2->before;
- span = target - (pivot + growth);
- span *= spec->isSpanDep;
- noteIndex = sn - cg->main.notes;
- if (!js_SetSrcNoteOffset(cx, cg, noteIndex, i, span))
- return JS_FALSE;
- sn = cg->main.notes + noteIndex;
- snlimit = cg->main.notes + cg->main.noteCount;
- }
- }
- }
- cg->main.lastNoteOffset += growth;
- /*
- * Fix try/catch notes (O(numTryNotes * log2(numSpanDeps)), but it's
- * not clear how we can beat that).
- */
- for (tryNode = cg->lastTryNode; tryNode; tryNode = tryNode->prev) {
- /*
- * First, look for the nearest span dependency at/above tn->start.
- * There may not be any such spandep, in which case the guard will
- * be returned.
- */
- offset = tryNode->note.start;
- sd = FindNearestSpanDep(cg, offset, 0, &guard);
- delta = sd->offset - sd->before;
- tryNode->note.start = offset + delta;
- /*
- * Next, find the nearest spandep at/above tn->start + tn->length.
- * Use its delta minus tn->start's delta to increase tn->length.
- */
- length = tryNode->note.length;
- sd2 = FindNearestSpanDep(cg, offset + length, sd - sdbase, &guard);
- if (sd2 != sd) {
- tryNode->note.length =
- length + sd2->offset - sd2->before - delta;
- }
- }
- }
- #ifdef DEBUG_brendan
- {
- uintN bigspans = 0;
- top = -1;
- for (sd = sdbase; sd < sdlimit; sd++) {
- offset = sd->offset;
- /* NB: sd->top cursors into the original, unextended bytecode vector. */
- if (sd->top != top) {
- JS_ASSERT(top == -1 ||
- !JOF_TYPE_IS_EXTENDED_JUMP(type) ||
- bigspans != 0);
- bigspans = 0;
- top = sd->top;
- JS_ASSERT(top == sd->before);
- op = (JSOp) base[offset];
- type = JOF_OPTYPE(op);
- JS_ASSERT(type == JOF_JUMP ||
- type == JOF_JUMPX ||
- type == JOF_TABLESWITCH ||
- type == JOF_TABLESWITCHX ||
- type == JOF_LOOKUPSWITCH ||
- type == JOF_LOOKUPSWITCHX);
- pivot = offset;
- }
- pc = base + offset;
- if (JOF_TYPE_IS_EXTENDED_JUMP(type)) {
- span = GET_JUMPX_OFFSET(pc);
- if (span < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < span) {
- bigspans++;
- } else {
- JS_ASSERT(type == JOF_TABLESWITCHX ||
- type == JOF_LOOKUPSWITCHX);
- }
- } else {
- span = GET_JUMP_OFFSET(pc);
- }
- JS_ASSERT(SD_SPAN(sd, pivot) == span);
- }
- JS_ASSERT(!JOF_TYPE_IS_EXTENDED_JUMP(type) || bigspans != 0);
- }
- #endif
- /*
- * Reset so we optimize at most once -- cg may be used for further code
- * generation of successive, independent, top-level statements. No jump
- * can span top-level statements, because JS lacks goto.
- */
- size = SPANDEPS_SIZE(JS_BIT(JS_CeilingLog2(cg->numSpanDeps)));
- JS_free(cx, cg->spanDeps);
- cg->spanDeps = NULL;
- FreeJumpTargets(cg, cg->jumpTargets);
- cg->jumpTargets = NULL;
- cg->numSpanDeps = cg->numJumpTargets = 0;
- cg->spanDepTodo = CG_OFFSET(cg);
- return JS_TRUE;
- }
- static ptrdiff_t
- EmitJump(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t off)
- {
- JSBool extend;
- ptrdiff_t jmp;
- jsbytecode *pc;
- extend = off < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < off;
- if (extend && !cg->spanDeps && !BuildSpanDepTable(cx, cg))
- return -1;
- jmp = js_Emit3(cx, cg, op, JUMP_OFFSET_HI(off), JUMP_OFFSET_LO(off));
- if (jmp >= 0 && (extend || cg->spanDeps)) {
- pc = CG_CODE(cg, jmp);
- if (!AddSpanDep(cx, cg, pc, pc, off))
- return -1;
- }
- return jmp;
- }
- static ptrdiff_t
- GetJumpOffset(JSCodeGenerator *cg, jsbytecode *pc)
- {
- JSSpanDep *sd;
- JSJumpTarget *jt;
- ptrdiff_t top;
- if (!cg->spanDeps)
- return GET_JUMP_OFFSET(pc);
- sd = GetSpanDep(cg, pc);
- jt = sd->target;
- if (!JT_HAS_TAG(jt))
- return JT_TO_BPDELTA(jt);
- top = sd->top;
- while (--sd >= cg->spanDeps && sd->top == top)
- continue;
- sd++;
- return JT_CLR_TAG(jt)->offset - sd->offset;
- }
- JSBool
- js_SetJumpOffset(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
- ptrdiff_t off)
- {
- if (!cg->spanDeps) {
- if (JUMP_OFFSET_MIN <= off && off <= JUMP_OFFSET_MAX) {
- SET_JUMP_OFFSET(pc, off);
- return JS_TRUE;
- }
- if (!BuildSpanDepTable(cx, cg))
- return JS_FALSE;
- }
- return SetSpanDepTarget(cx, cg, GetSpanDep(cg, pc), off);
- }
- JSBool
- js_InStatement(JSTreeContext *tc, JSStmtType type)
- {
- JSStmtInfo *stmt;
- for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
- if (stmt->type == type)
- return JS_TRUE;
- }
- return JS_FALSE;
- }
- void
- js_PushStatement(JSTreeContext *tc, JSStmtInfo *stmt, JSStmtType type,
- ptrdiff_t top)
- {
- stmt->type = type;
- stmt->flags = 0;
- SET_STATEMENT_TOP(stmt, top);
- stmt->u.label = NULL;
- JS_ASSERT(!stmt->u.blockObj);
- stmt->down = tc->topStmt;
- tc->topStmt = stmt;
- if (STMT_LINKS_SCOPE(stmt)) {
- stmt->downScope = tc->topScopeStmt;
- tc->topScopeStmt = stmt;
- } else {
- stmt->downScope = NULL;
- }
- }
- void
- js_PushBlockScope(JSTreeContext *tc, JSStmtInfo *stmt, JSObject *blockObj,
- ptrdiff_t top)
- {
- js_PushStatement(tc, stmt, STMT_BLOCK, top);
- stmt->flags |= SIF_SCOPE;
- STOBJ_SET_PARENT(blockObj, tc->blockChain);
- stmt->downScope = tc->topScopeStmt;
- tc->topScopeStmt = stmt;
- tc->blockChain = blockObj;
- stmt->u.blockObj = blockObj;
- }
- /*
- * Emit a backpatch op with offset pointing to the previous jump of this type,
- * so that we can walk back up the chain fixing up the op and jump offset.
- */
- static ptrdiff_t
- EmitBackPatchOp(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t *lastp)
- {
- ptrdiff_t offset, delta;
- offset = CG_OFFSET(cg);
- delta = offset - *lastp;
- *lastp = offset;
- JS_ASSERT(delta > 0);
- return EmitJump(cx, cg, op, delta);
- }
- /*
- * Macro to emit a bytecode followed by a uint16 immediate operand stored in
- * big-endian order, used for arg and var numbers as well as for atomIndexes.
- * NB: We use cx and cg from our caller's lexical environment, and return
- * false on error.
- */
- #define EMIT_UINT16_IMM_OP(op, i) \
- JS_BEGIN_MACRO \
- if (js_Emit3(cx, cg, op, UINT16_HI(i), UINT16_LO(i)) < 0) \
- return JS_FALSE; \
- JS_END_MACRO
- static JSBool
- FlushPops(JSContext *cx, JSCodeGenerator *cg, intN *npops)
- {
- JS_ASSERT(*npops != 0);
- if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
- return JS_FALSE;
- EMIT_UINT16_IMM_OP(JSOP_POPN, *npops);
- *npops = 0;
- return JS_TRUE;
- }
- /*
- * Emit additional bytecode(s) for non-local jumps.
- */
- static JSBool
- EmitNonLocalJumpFixup(JSContext *cx, JSCodeGenerator *cg, JSStmtInfo *toStmt)
- {
- intN depth, npops;
- JSStmtInfo *stmt;
- /*
- * The non-local jump fixup we emit will unbalance cg->stackDepth, because
- * the fixup replicates balanced code such as JSOP_LEAVEWITH emitted at the
- * end of a with statement, so we save cg->stackDepth here and restore it
- * just before a successful return.
- */
- depth = cg->stackDepth;
- npops = 0;
- #define FLUSH_POPS() if (npops && !FlushPops(cx, cg, &npops)) return JS_FALSE
- for (stmt = cg->treeContext.topStmt; stmt != toStmt; stmt = stmt->down) {
- switch (stmt->type) {
- case STMT_FINALLY:
- FLUSH_POPS();
- if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
- return JS_FALSE;
- if (EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &GOSUBS(*stmt)) < 0)
- return JS_FALSE;
- break;
- case STMT_WITH:
- /* There's a With object on the stack that we need to pop. */
- FLUSH_POPS();
- if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
- return JS_FALSE;
- if (js_Emit1(cx, cg, JSOP_LEAVEWITH) < 0)
- return JS_FALSE;
- break;
- case STMT_FOR_IN_LOOP:
- /*
- * The iterator and the object being iterated need to be popped.
- */
- FLUSH_POPS();
- if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
- return JS_FALSE;
- if (js_Emit1(cx, cg, JSOP_ENDITER) < 0)
- return JS_FALSE;
- break;
- case STMT_SUBROUTINE:
- /*
- * There's a [exception or hole, retsub pc-index] pair on the
- * stack that we need to pop.
- */
- npops += 2;
- break;
- default:;
- }
- if (stmt->flags & SIF_SCOPE) {
- uintN i;
- /* There is a Block object with locals on the stack to pop. */
- FLUSH_POPS();
- if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
- return JS_FALSE;
- i = OBJ_BLOCK_COUNT(cx, stmt->u.blockObj);
- EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, i);
- }
- }
- FLUSH_POPS();
- cg->stackDepth = depth;
- return JS_TRUE;
- #undef FLUSH_POPS
- }
- static ptrdiff_t
- EmitGoto(JSContext *cx, JSCodeGenerator *cg, JSStmtInfo *toStmt,
- ptrdiff_t *lastp, JSAtomListElement *label, JSSrcNoteType noteType)
- {
- intN index;
- if (!EmitNonLocalJumpFixup(cx, cg, toStmt))
- return -1;
- if (label)
- index = js_NewSrcNote2(cx, cg, noteType, (ptrdiff_t) ALE_INDEX(label));
- else if (noteType != SRC_NULL)
- index = js_NewSrcNote(cx, cg, noteType);
- else
- index = 0;
- if (index < 0)
- return -1;
- return EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, lastp);
- }
- static JSBool
- BackPatch(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t last,
- jsbytecode *target, jsbytecode op)
- {
- jsbytecode *pc, *stop;
- ptrdiff_t delta, span;
- pc = CG_CODE(cg, last);
- stop = CG_CODE(cg, -1);
- while (pc != stop) {
- delta = GetJumpOffset(cg, pc);
- span = PTRDIFF(target, pc, jsbytecode);
- CHECK_AND_SET_JUMP_OFFSET(cx, cg, pc, span);
- /*
- * Set *pc after jump offset in case bpdelta didn't overflow, but span
- * does (if so, CHECK_AND_SET_JUMP_OFFSET might call BuildSpanDepTable
- * and need to see the JSOP_BACKPATCH* op at *pc).
- */
- *pc = op;
- pc -= delta;
- }
- return JS_TRUE;
- }
- void
- js_PopStatement(JSTreeContext *tc)
- {
- JSStmtInfo *stmt;
- stmt = tc->topStmt;
- tc->topStmt = stmt->down;
- if (STMT_LINKS_SCOPE(stmt)) {
- tc->topScopeStmt = stmt->downScope;
- if (stmt->flags & SIF_SCOPE) {
- tc->blockChain = STOBJ_GET_PARENT(stmt->u.blockObj);
- JS_SCOPE_DEPTH_METERING(--tc->scopeDepth);
- }
- }
- }
- JSBool
- js_PopStatementCG(JSContext *cx, JSCodeGenerator *cg)
- {
- JSStmtInfo *stmt;
- stmt = cg->treeContext.topStmt;
- if (!STMT_IS_TRYING(stmt) &&
- (!BackPatch(cx, cg, stmt->breaks, CG_NEXT(cg), JSOP_GOTO) ||
- !BackPatch(cx, cg, stmt->continues, CG_CODE(cg, stmt->update),
- JSOP_GOTO))) {
- return JS_FALSE;
- }
- js_PopStatement(&cg->treeContext);
- return JS_TRUE;
- }
- JSBool
- js_DefineCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
- JSParseNode *pn)
- {
- jsdouble dval;
- jsint ival;
- JSAtom *valueAtom;
- jsval v;
- JSAtomListElement *ale;
- /* XXX just do numbers for now */
- if (pn->pn_type == TOK_NUMBER) {
- dval = pn->pn_dval;
- if (JSDOUBLE_IS_INT(dval, ival) && INT_FITS_IN_JSVAL(ival)) {
- v = INT_TO_JSVAL(ival);
- } else {
- /*
- * We atomize double to root a jsdouble instance that we wrap as
- * jsval and store in cg->constList. This works because atoms are
- * protected from GC during compilation.
- */
- valueAtom = js_AtomizeDouble(cx, dval);
- if (!valueAtom)
- return JS_FALSE;
- v = ATOM_KEY(valueAtom);
- }
- ale = js_IndexAtom(cx, atom, &cg->constList);
- if (!ale)
- return JS_FALSE;
- ALE_SET_VALUE(ale, v);
- }
- return JS_TRUE;
- }
- JSStmtInfo *
- js_LexicalLookup(JSTreeContext *tc, JSAtom *atom, jsint *slotp)
- {
- JSStmtInfo *stmt;
- JSObject *obj;
- JSScope *scope;
- JSScopeProperty *sprop;
- for (stmt = tc->topScopeStmt; stmt; stmt = stmt->downScope) {
- if (stmt->type == STMT_WITH)
- break;
- /* Skip "maybe scope" statements that don't contain let bindings. */
- if (!(stmt->flags & SIF_SCOPE))
- continue;
- obj = stmt->u.blockObj;
- JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_BlockClass);
- scope = OBJ_SCOPE(obj);
- sprop = SCOPE_GET_PROPERTY(scope, ATOM_TO_JSID(atom));
- if (sprop) {
- JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
- if (slotp) {
- JS_ASSERT(JSVAL_IS_INT(obj->fslots[JSSLOT_BLOCK_DEPTH]));
- *slotp = JSVAL_TO_INT(obj->fslots[JSSLOT_BLOCK_DEPTH]) +
- sprop->shortid;
- }
- return stmt;
- }
- }
- if (slotp)
- *slotp = -1;
- return stmt;
- }
- /*
- * Check if the attributes describe a property holding a compile-time constant
- * or a permanent, read-only property without a getter.
- */
- #define IS_CONSTANT_PROPERTY(attrs) \
- (((attrs) & (JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_GETTER)) == \
- (JSPROP_READONLY | JSPROP_PERMANENT))
- /*
- * The function sets vp to JSVAL_HOLE when the atom does not corresponds to a
- * name defining a constant.
- */
- static JSBool
- LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
- jsval *vp)
- {
- JSBool ok;
- JSStmtInfo *stmt;
- JSAtomListElement *ale;
- JSObject *obj, *pobj;
- JSProperty *prop;
- uintN attrs;
- /*
- * Chase down the cg stack, but only until we reach the outermost cg.
- * This enables propagating consts from top-level into switch cases in a
- * function compiled along with the top-level script.
- */
- *vp = JSVAL_HOLE;
- do {
- if (cg->treeContext.flags & (TCF_IN_FUNCTION | TCF_COMPILE_N_GO)) {
- /* XXX this will need revising when 'let const' is added. */
- stmt = js_LexicalLookup(&cg->treeContext, atom, NULL);
- if (stmt)
- return JS_TRUE;
- ATOM_LIST_SEARCH(ale, &cg->constList, atom);
- if (ale) {
- JS_ASSERT(ALE_VALUE(ale) != JSVAL_HOLE);
- *vp = ALE_VALUE(ale);
- return JS_TRUE;
- }
- /*
- * Try looking in the variable object for a direct property that
- * is readonly and permanent. We know such a property can't be
- * shadowed by another property on obj's prototype chain, or a
- * with object or catch variable; nor can prop's value be changed,
- * nor can prop be deleted.
- */
- if (cg->treeContext.flags & TCF_IN_FUNCTION) {
- if (js_LookupLocal(cx, cg->treeContext.u.fun, atom, NULL) !=
- JSLOCAL_NONE) {
- break;
- }
- } else {
- JS_ASSERT(cg->treeContext.flags & TCF_COMPILE_N_GO);
- obj = cg->treeContext.u.scopeChain;
- ok = OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &pobj,
- &prop);
- if (!ok)
- return JS_FALSE;
- if (pobj == obj) {
- /*
- * We're compiling code that will be executed immediately,
- * not re-executed against a different scope chain and/or
- * variable object. Therefore we can get constant values
- * from our variable object here.
- */
- ok = OBJ_GET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop,
- &attrs);
- if (ok && IS_CONSTANT_PROPERTY(attrs)) {
- ok = OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
- JS_ASSERT_IF(ok, *vp != JSVAL_HOLE);
- }
- }
- if (prop)
- OBJ_DROP_PROPERTY(cx, pobj, prop);
- if (!ok)
- return JS_FALSE;
- if (prop)
- break;
- }
- }
- } while ((cg = cg->parent) != NULL);
- return JS_TRUE;
- }
- /*
- * Return JSOP_NOP to indicate that index fits 2 bytes and no index segment
- * reset instruction is necessary, JSOP_FALSE to indicate an error or either
- * JSOP_RESETBASE0 or JSOP_RESETBASE1 to indicate the reset bytecode to issue
- * after the main bytecode sequence.
- */
- static JSOp
- EmitBigIndexPrefix(JSContext *cx, JSCodeGenerator *cg, uintN index)
- {
- uintN indexBase;
- /*
- * We have max 3 bytes for indexes and check for INDEX_LIMIT overflow only
- * for big indexes.
- */
- JS_STATIC_ASSERT(INDEX_LIMIT <= JS_BIT(24));
- JS_STATIC_ASSERT(INDEX_LIMIT >=
- (JSOP_INDEXBASE3 - JSOP_INDEXBASE1 + 2) << 16);
- if (index < JS_BIT(16))
- return JSOP_NOP;
- indexBase = index >> 16;
- if (indexBase <= JSOP_INDEXBASE3 - JSOP_INDEXBASE1 + 1) {
- if (js_Emit1(cx, cg, (JSOp)(JSOP_INDEXBASE1 + indexBase - 1)) < 0)
- return JSOP_FALSE;
- return JSOP_RESETBASE0;
- }
- if (index >= INDEX_LIMIT) {
- JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
- JSMSG_TOO_MANY_LITERALS);
- return JSOP_FALSE;
- }
- if (js_Emit2(cx, cg, JSOP_INDEXBASE, (JSOp)indexBase) < 0)
- return JSOP_FALSE;
- return JSOP_RESETBASE;
- }
- /*
- * Emit a bytecode and its 2-byte constant index immediate operand. If the
- * index requires more than 2 bytes, emit a prefix op whose 8-bit immediate
- * operand effectively extends the 16-bit immediate of the prefixed opcode,
- * by changing index "segment" (see jsinterp.c). We optimize segments 1-3
- * with single-byte JSOP_INDEXBASE[123] codes.
- *
- * Such prefixing currently requires a suffix to restore the "zero segment"
- * register setting, but this could be optimized further.
- */
- static JSBool
- EmitIndexOp(JSContext *cx, JSOp op, uintN index, JSCodeGenerator *cg)
- {
- JSOp bigSuffix;
- bigSuffix = EmitBigIndexPrefix(cx, cg, index);
- if (bigSuffix == JSOP_FALSE)
- return JS_FALSE;
- EMIT_UINT16_IMM_OP(op, index);
- return bigSuffix == JSOP_NOP || js_Emit1(cx, cg, bigSuffix) >= 0;
- }
- /*
- * Slight sugar for EmitIndexOp, again accessing cx and cg from the macro
- * caller's lexical environment, and embedding a false return on error.
- */
- #define EMIT_INDEX_OP(op, index) \
- JS_BEGIN_MACRO \
- if (!EmitIndexOp(cx, op, index, cg)) \
- return JS_FALSE; \
- JS_END_MACRO
- static JSBool
- EmitAtomOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg)
- {
- JSAtomListElement *ale;
- JS_ASSERT(JOF_OPTYPE(op) == JOF_ATOM);
- if (op == JSOP_GETPROP &&
- pn->pn_atom == cx->runtime->atomState.lengthAtom) {
- return js_Emit1(cx, cg, JSOP_LENGTH) >= 0;
- }
- ale = js_IndexAtom(cx, pn->pn_atom, &cg->atomList);
- if (!ale)
- return JS_FALSE;
- return EmitIndexOp(cx, op, ALE_INDEX(ale), cg);
- }
- static uintN
- IndexParsedObject(JSParsedObjectBox *pob, JSEmittedObjectList *list);
- static JSBool
- EmitObjectOp(JSContext *cx, JSParsedObjectBox *pob, JSOp op,
- JSCodeGenerator *cg)
- {
- JS_ASSERT(JOF_OPTYPE(op) == JOF_OBJECT);
- return EmitIndexOp(cx, op, IndexParsedObject(pob, &cg->objectList), cg);
- }
- /*
- * What good are ARGNO_LEN and SLOTNO_LEN, you ask? The answer is that, apart
- * from EmitSlotIndexOp, they abstract out the detail that both are 2, and in
- * other parts of the code there's no necessary relationship between the two.
- * The abstraction cracks here in order to share EmitSlotIndexOp code among
- * the JSOP_DEFLOCALFUN and JSOP_GET{ARG,VAR,LOCAL}PROP cases.
- */
- JS_STATIC_ASSERT(ARGNO_LEN == 2);
- JS_STATIC_ASSERT(SLOTNO_LEN == 2);
- static JSBool
- EmitSlotIndexOp(JSContext *cx, JSOp op, uintN slot, uintN index,
- JSCodeGenerator *cg)
- {
- JSOp bigSuffix;
- ptrdiff_t off;
- jsbytecode *pc;
- JS_ASSERT(JOF_OPTYPE(op) == JOF_SLOTATOM ||
- JOF_OPTYPE(op) == JOF_SLOTOBJECT);
- bigSuffix = EmitBigIndexPrefix(cx, cg, index);
- if (bigSuffix == JSOP_FALSE)
- return JS_FALSE;
- /* Emit [op, slot, index]. */
- off = js_EmitN(cx, cg, op, 2 + INDEX_LEN);
- if (off < 0)
- return JS_FALSE;
- pc = CG_CODE(cg, off);
- SET_UINT16(pc, slot);
- pc += 2;
- SET_INDEX(pc, index);
- return bigSuffix == JSOP_NOP || js_Emit1(cx, cg, bigSuffix) >= 0;
- }
- /*
- * Adjust the slot for a block local to account for the number of variables
- * that share the same index space with locals. Due to the incremental code
- * generation for top-level script, we do the adjustment via code patching in
- * js_CompileScript; see comments there.
- *
- * The function returns -1 on failures.
- */
- static jsint
- AdjustBlockSlot(JSContext *cx, JSCodeGenerator *cg, jsint slot)
- {
- JS_ASSERT((jsuint) slot < cg->maxStackDepth);
- if (cg->treeContext.flags & TCF_IN_FUNCTION) {
- slot += cg->treeContext.u.fun->u.i.nvars;
- if ((uintN) slot >= SLOTNO_LIMIT) {
- js_ReportCompileErrorNumber(cx, CG_TS(cg), NULL,
- JSREPORT_ERROR,
- JSMSG_TOO_MANY_LOCALS);
- slot = -1;
- }
- }
- return slot;
- }
- /*
- * This routine tries to optimize name gets and sets to stack slot loads and
- * stores, given the variables object and scope chain in cx's top frame, the
- * compile-time context in tc, and a TOK_NAME node pn. It returns false on
- * error, true on success.
- *
- * The caller can inspect pn->pn_slot for a non-negative slot number to tell
- * whether optimization occurred, in which case BindNameToSlot also updated
- * pn->pn_op. If pn->pn_slot is still -1 on return, pn->pn_op nevertheless
- * may have been optimized, e.g., from JSOP_NAME to JSOP_ARGUMENTS. Whether
- * or not pn->pn_op was modified, if this function finds an argument or local
- * variable name, pn->pn_const will be true for const properties after a
- * successful return.
- *
- * NB: if you add more opcodes specialized from JSOP_NAME, etc., don't forget
- * to update the TOK_FOR (for-in) and TOK_ASSIGN (op=, e.g. +=) special cases
- * in js_EmitTree.
- */
- static JSBool
- BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
- {
- JSTreeContext *tc;
- JSAtom *atom;
- JSStmtInfo *stmt;
- jsint slot;
- JSOp op;
- JSLocalKind localKind;
- uintN index;
- JSAtomListElement *ale;
- JSBool constOp;
- JS_ASSERT(pn->pn_type == TOK_NAME);
- if (pn->pn_slot >= 0 || pn->pn_op == JSOP_ARGUMENTS)
- return JS_TRUE;
- /* QNAME references can never be optimized to use arg/var storage. */
- if (pn->pn_op == JSOP_QNAMEPART)
- return JS_TRUE;
- /*
- * We can't optimize if we are compiling a with statement and its body,
- * or we're in a catch block whose exception variable has the same name
- * as this node. FIXME: we should be able to optimize catch vars to be
- * block-locals.
- */
- tc = &cg->treeContext;
- atom = pn->pn_atom;
- stmt = js_LexicalLookup(tc, atom, &slot);
- if (stmt) {
- if (stmt->type == STMT_WITH)
- return JS_TRUE;
- JS_ASSERT(stmt->flags & SIF_SCOPE);
- JS_ASSERT(slot >= 0);
- op = PN_OP(pn);
- switch (op) {
- case JSOP_NAME: op = JSOP_GETLOCAL; break;
- case JSOP_SETNAME: op = JSOP_SETLOCAL; break;
- case JSOP_INCNAME: op = JSOP_INCLOCAL; break;
- case JSOP_NAMEINC: op = JSOP_LOCALINC; break;
- case JSOP_DECNAME: op = JSOP_DECLOCAL; break;
- case JSOP_NAMEDEC: op = JSOP_LOCALDEC; break;
- case JSOP_FORNAME: op = JSOP_FORLOCAL; break;
- case JSOP_DELNAME: op = JSOP_FALSE; break;
- default: JS_ASSERT(0);
- }
- if (op != pn->pn_op) {
- slot = AdjustBlockSlot(cx, cg, slot);
- if (slot < 0)
- return JS_FALSE;
- pn->pn_op = op;
- pn->pn_slot = slot;
- }
- return JS_TRUE;
- }
- /*
- * We can't optimize if var and closure (a local function not in a larger
- * expression and not at top-level within another's body) collide.
- * XXX suboptimal: keep track of colliding names and deoptimize only those
- */
- if (tc->flags & TCF_FUN_CLOSURE_VS_VAR)
- return JS_TRUE;
- if (!(tc->flags & TCF_IN_FUNCTION)) {
- JSStackFrame *caller;
- caller = tc->parseContext->callerFrame;
- if (caller) {
- JS_ASSERT(tc->flags & TCF_COMPILE_N_GO);
- JS_ASSERT(caller->script);
- if (!caller->fun || caller->varobj != tc->u.scopeChain)
- return JS_TRUE;
- /*
- * We are compiling eval or debug script inside a function frame
- * and the scope chain matches function's variable object.
- * Optimize access to function's arguments and variable and the
- * arguments object.
- */
- if (PN_OP(pn) != JSOP_NAME || cg->staticDepth > JS_DISPLAY_SIZE)
- goto arguments_check;
- localKind = js_LookupLocal(cx, caller->fun, atom, &index);
- if (localKind == JSLOCAL_NONE)
- goto arguments_check;
- ATOM_LIST_SEARCH(ale, &cg->upvarList, atom);
- if (!ale) {
- uint32 length, *vector;
- ale = js_IndexAtom(cx, atom, &cg->upvarList);
- if (!ale)
- return JS_FALSE;
- JS_ASSERT(ALE_INDEX(ale) == cg->upvarList.count - 1);
- length = cg->upvarMap.length;
- JS_ASSERT(ALE_INDEX(ale) <= length);
- if (ALE_INDEX(ale) == length) {
- length = 2 * JS_MAX(2, length);
- vector = (uint32 *)
- JS_realloc(cx, cg->upvarMap.vector,
- length * sizeof *vector);
- if (!vector)
- return JS_FALSE;
- cg->upvarMap.vector = vector;
- cg->upvarMap.length = length;
- }
- if (localKind != JSLOCAL_ARG)
- index += caller->fun->nargs;
- if (index >= JS_BIT(16)) {
- cg->treeContext.flags |= TCF_FUN_USES_NONLOCALS;
- return JS_TRUE;
- }
- JS_ASSERT(cg->staticDepth > caller->fun->u.i.script->staticDepth);
- uintN skip = cg->staticDepth - caller->fun->u.i.script->staticDepth;
- cg->upvarMap.vector[ALE_INDEX(ale)] = MAKE_UPVAR_COOKIE(skip, index);
- }
- pn->pn_op = JSOP_GETUPVAR;
- pn->pn_slot = ALE_INDEX(ale);
- return JS_TRUE;
- }
- /*
- * We are optimizing global variables and there may be no pre-existing
- * global property named atom. If atom was declared via const or var,
- * optimize pn to access fp->vars using the appropriate JSOP_*GVAR op.
- */
- ATOM_LIST_SEARCH(ale, &tc->decls, atom);
- if (!ale) {
- /* Use precedes declaration, or name is never declared. */
- return JS_TRUE;
- }
- constOp = (ALE_JSOP(ale) == JSOP_DEFCONST);
- /* Index atom so we can map fast global number to name. */
- ale = js_IndexAtom(cx, atom, &cg->atomList);
- if (!ale)
- return JS_FALSE;
- /* Defend against tc->ngvars 16-bit overflow. */
- slot = ALE_INDEX(ale);
- if ((slot + 1) >> 16)
- return JS_TRUE;
- if ((uint16)(slot + 1) > tc->ngvars)
- tc->ngvars = (uint16)(slot + 1);
- op = PN_OP(pn);
- switch (op) {
- case JSOP_NAME: op = JSOP_GETGVAR; break;
- case JSOP_SETNAME: op = JSOP_SETGVAR; break;
- case JSOP_SETCONST: /* NB: no change */ break;
- case JSOP_INCNAME: op = JSOP_INCGVAR; break;
- case JSOP_NAMEINC: op = JSOP_GVARINC; break;
- case JSOP_DECNAME: o