/js/lib/Socket.IO-node/support/expresso/deps/jscoverage/js/jstracer.cpp
C++ | 1877 lines | 1476 code | 187 blank | 214 comment | 360 complexity | c3178098a8b605185c2e4b7b07c99e8c MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-3-Clause
Large files files are truncated, but you can click here to view the full file
- /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=4 sw=4 et tw=99:
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
- * May 28, 2008.
- *
- * The Initial Developer of the Original Code is
- * Brendan Eich <brendan@mozilla.org>
- *
- * Contributor(s):
- * Andreas Gal <gal@mozilla.com>
- * Mike Shaver <shaver@mozilla.org>
- * David Anderson <danderson@mozilla.com>
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either of the GNU General Public License Version 2 or later (the "GPL"),
- * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
- #include "jsstddef.h" // always first
- #include "jsbit.h" // low-level (NSPR-based) headers next
- #include "jsprf.h"
- #include <math.h> // standard headers next
- #ifdef _MSC_VER
- #include <malloc.h>
- #define alloca _alloca
- #endif
- #ifdef SOLARIS
- #include <alloca.h>
- #endif
- #include "nanojit/nanojit.h"
- #include "jsarray.h" // higher-level library and API headers
- #include "jsbool.h"
- #include "jscntxt.h"
- #include "jsdbgapi.h"
- #include "jsemit.h"
- #include "jsfun.h"
- #include "jsinterp.h"
- #include "jsiter.h"
- #include "jsobj.h"
- #include "jsopcode.h"
- #include "jsregexp.h"
- #include "jsscope.h"
- #include "jsscript.h"
- #include "jsdate.h"
- #include "jsstaticcheck.h"
- #include "jstracer.h"
- #include "jsautooplen.h" // generated headers last
- /* Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and
- the type. What you want to use is JSVAL_TAG(x) == JSVAL_BOOLEAN and then
- handle the undefined case properly (bug 457363). */
- #undef JSVAL_IS_BOOLEAN
- #define JSVAL_IS_BOOLEAN(x) JS_STATIC_ASSERT(0)
- /* Use a fake tag to represent boxed values, borrowing from the integer tag
- range since we only use JSVAL_INT to indicate integers. */
- #define JSVAL_BOXED 3
- /* Map to translate a type tag into a printable representation. */
- static const char typeChar[] = "OIDVS?B?";
- /* Number of iterations of a loop where we start tracing. That is, we don't
- start tracing until the beginning of the HOTLOOP-th iteration. */
- #define HOTLOOP 2
- /* Number of times we wait to exit on a side exit before we try to extend the tree. */
- #define HOTEXIT 1
- /* Max call depths for inlining. */
- #define MAX_CALLDEPTH 10
- /* Max number of type mismatchs before we trash the tree. */
- #define MAX_MISMATCH 20
- /* Max blacklist level of inner tree immediate recompiling */
- #define MAX_INNER_RECORD_BLACKLIST -16
- /* Max native stack size. */
- #define MAX_NATIVE_STACK_SLOTS 1024
- /* Max call stack size. */
- #define MAX_CALL_STACK_ENTRIES 64
- /* Max number of branches per tree. */
- #define MAX_BRANCHES 16
- /* Macros for demote slot lists */
- #define ALLOCA_UNDEMOTE_SLOTLIST(num) (unsigned*)alloca(((num) + 1) * sizeof(unsigned))
- #define ADD_UNDEMOTE_SLOT(list, slot) list[++list[0]] = slot
- #define NUM_UNDEMOTE_SLOTS(list) list[0]
- #define CLEAR_UNDEMOTE_SLOTLIST(list) list[0] = 0
- #ifdef JS_JIT_SPEW
- #define ABORT_TRACE(msg) do { debug_only_v(fprintf(stdout, "abort: %d: %s\n", __LINE__, msg);) return false; } while (0)
- #else
- #define ABORT_TRACE(msg) return false
- #endif
- #ifdef JS_JIT_SPEW
- struct __jitstats {
- #define JITSTAT(x) uint64 x;
- #include "jitstats.tbl"
- #undef JITSTAT
- } jitstats = { 0LL, };
- JS_STATIC_ASSERT(sizeof(jitstats) % sizeof(uint64) == 0);
- enum jitstat_ids {
- #define JITSTAT(x) STAT ## x ## ID,
- #include "jitstats.tbl"
- #undef JITSTAT
- STAT_IDS_TOTAL
- };
- static JSPropertySpec jitstats_props[] = {
- #define JITSTAT(x) { #x, STAT ## x ## ID, JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT },
- #include "jitstats.tbl"
- #undef JITSTAT
- { 0 }
- };
- static JSBool
- jitstats_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
- {
- int index = -1;
- if (JSVAL_IS_STRING(id)) {
- JSString* str = JSVAL_TO_STRING(id);
- if (strcmp(JS_GetStringBytes(str), "HOTLOOP") == 0) {
- *vp = INT_TO_JSVAL(HOTLOOP);
- return JS_TRUE;
- }
- }
- if (JSVAL_IS_INT(id))
- index = JSVAL_TO_INT(id);
- uint64 result = 0;
- switch (index) {
- #define JITSTAT(x) case STAT ## x ## ID: result = jitstats.x; break;
- #include "jitstats.tbl"
- #undef JITSTAT
- default:
- *vp = JSVAL_VOID;
- return JS_TRUE;
- }
- if (result < JSVAL_INT_MAX) {
- *vp = INT_TO_JSVAL(result);
- return JS_TRUE;
- }
- char retstr[64];
- JS_snprintf(retstr, sizeof retstr, "%llu", result);
- *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, retstr));
- return JS_TRUE;
- }
- JSClass jitstats_class = {
- "jitstats",
- JSCLASS_HAS_PRIVATE,
- JS_PropertyStub, JS_PropertyStub,
- jitstats_getProperty, JS_PropertyStub,
- JS_EnumerateStub, JS_ResolveStub,
- JS_ConvertStub, JS_FinalizeStub,
- JSCLASS_NO_OPTIONAL_MEMBERS
- };
- void
- js_InitJITStatsClass(JSContext *cx, JSObject *glob)
- {
- JS_InitClass(cx, glob, NULL, &jitstats_class, NULL, 0, jitstats_props, NULL, NULL, NULL);
- }
- #define AUDIT(x) (jitstats.x++)
- #else
- #define AUDIT(x) ((void)0)
- #endif /* JS_JIT_SPEW */
- #define INS_CONST(c) addName(lir->insImm(c), #c)
- #define INS_CONSTPTR(p) addName(lir->insImmPtr((void*) (p)), #p)
- using namespace avmplus;
- using namespace nanojit;
- static GC gc = GC();
- static avmplus::AvmCore s_core = avmplus::AvmCore();
- static avmplus::AvmCore* core = &s_core;
- #ifdef JS_JIT_SPEW
- void
- js_DumpPeerStability(Fragmento* frago, const void* ip);
- #endif
- /* We really need a better way to configure the JIT. Shaver, where is my fancy JIT object? */
- static bool nesting_enabled = true;
- #if defined(NANOJIT_IA32)
- static bool did_we_check_sse2 = false;
- #endif
- #ifdef JS_JIT_SPEW
- static bool verbose_debug = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "verbose");
- #define debug_only_v(x) if (verbose_debug) { x; }
- #else
- #define debug_only_v(x)
- #endif
- /* The entire VM shares one oracle. Collisions and concurrent updates are tolerated and worst
- case cause performance regressions. */
- static Oracle oracle;
- /* Blacklists the root peer fragment at a fragment's PC. This is so blacklisting stays at the
- top of the peer list and not scattered around. */
- void
- js_BlacklistPC(Fragmento* frago, Fragment* frag);
- Tracker::Tracker()
- {
- pagelist = 0;
- }
- Tracker::~Tracker()
- {
- clear();
- }
- jsuword
- Tracker::getPageBase(const void* v) const
- {
- return jsuword(v) & ~jsuword(NJ_PAGE_SIZE-1);
- }
- struct Tracker::Page*
- Tracker::findPage(const void* v) const
- {
- jsuword base = getPageBase(v);
- struct Tracker::Page* p = pagelist;
- while (p) {
- if (p->base == base) {
- return p;
- }
- p = p->next;
- }
- return 0;
- }
- struct Tracker::Page*
- Tracker::addPage(const void* v) {
- jsuword base = getPageBase(v);
- struct Tracker::Page* p = (struct Tracker::Page*)
- GC::Alloc(sizeof(*p) - sizeof(p->map) + (NJ_PAGE_SIZE >> 2) * sizeof(LIns*));
- p->base = base;
- p->next = pagelist;
- pagelist = p;
- return p;
- }
- void
- Tracker::clear()
- {
- while (pagelist) {
- Page* p = pagelist;
- pagelist = pagelist->next;
- GC::Free(p);
- }
- }
- bool
- Tracker::has(const void *v) const
- {
- return get(v) != NULL;
- }
- #if defined NANOJIT_64BIT
- #define PAGEMASK 0x7ff
- #else
- #define PAGEMASK 0xfff
- #endif
- LIns*
- Tracker::get(const void* v) const
- {
- struct Tracker::Page* p = findPage(v);
- if (!p)
- return NULL;
- return p->map[(jsuword(v) & PAGEMASK) >> 2];
- }
- void
- Tracker::set(const void* v, LIns* i)
- {
- struct Tracker::Page* p = findPage(v);
- if (!p)
- p = addPage(v);
- p->map[(jsuword(v) & PAGEMASK) >> 2] = i;
- }
- static inline bool isNumber(jsval v)
- {
- return JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v);
- }
- static inline jsdouble asNumber(jsval v)
- {
- JS_ASSERT(isNumber(v));
- if (JSVAL_IS_DOUBLE(v))
- return *JSVAL_TO_DOUBLE(v);
- return (jsdouble)JSVAL_TO_INT(v);
- }
- static inline bool isInt32(jsval v)
- {
- if (!isNumber(v))
- return false;
- jsdouble d = asNumber(v);
- jsint i;
- return JSDOUBLE_IS_INT(d, i);
- }
- /* Return JSVAL_DOUBLE for all numbers (int and double) and the tag otherwise. */
- static inline uint8 getPromotedType(jsval v)
- {
- return JSVAL_IS_INT(v) ? JSVAL_DOUBLE : uint8(JSVAL_TAG(v));
- }
- /* Return JSVAL_INT for all whole numbers that fit into signed 32-bit and the tag otherwise. */
- static inline uint8 getCoercedType(jsval v)
- {
- return isInt32(v) ? JSVAL_INT : (uint8) JSVAL_TAG(v);
- }
- /* Tell the oracle that a certain global variable should not be demoted. */
- void
- Oracle::markGlobalSlotUndemotable(JSScript* script, unsigned slot)
- {
- _dontDemote.set(&gc, (slot % ORACLE_SIZE));
- }
- /* Consult with the oracle whether we shouldn't demote a certain global variable. */
- bool
- Oracle::isGlobalSlotUndemotable(JSScript* script, unsigned slot) const
- {
- return _dontDemote.get(slot % ORACLE_SIZE);
- }
- /* Tell the oracle that a certain slot at a certain bytecode location should not be demoted. */
- void
- Oracle::markStackSlotUndemotable(JSScript* script, jsbytecode* ip, unsigned slot)
- {
- uint32 hash = uint32(intptr_t(ip)) + (slot << 5);
- hash %= ORACLE_SIZE;
- _dontDemote.set(&gc, hash);
- }
- /* Consult with the oracle whether we shouldn't demote a certain slot. */
- bool
- Oracle::isStackSlotUndemotable(JSScript* script, jsbytecode* ip, unsigned slot) const
- {
- uint32 hash = uint32(intptr_t(ip)) + (slot << 5);
- hash %= ORACLE_SIZE;
- return _dontDemote.get(hash);
- }
- /* Clear the oracle. */
- void
- Oracle::clear()
- {
- _dontDemote.reset();
- }
- #if defined(NJ_SOFTFLOAT)
- JS_DEFINE_CALLINFO_1(static, DOUBLE, i2f, INT32, 1, 1)
- JS_DEFINE_CALLINFO_1(static, DOUBLE, u2f, UINT32, 1, 1)
- #endif
- static bool isi2f(LInsp i)
- {
- if (i->isop(LIR_i2f))
- return true;
- #if defined(NJ_SOFTFLOAT)
- if (i->isop(LIR_qjoin) &&
- i->oprnd1()->isop(LIR_call) &&
- i->oprnd2()->isop(LIR_callh))
- {
- if (i->oprnd1()->callInfo() == &i2f_ci)
- return true;
- }
- #endif
- return false;
- }
- static bool isu2f(LInsp i)
- {
- if (i->isop(LIR_u2f))
- return true;
- #if defined(NJ_SOFTFLOAT)
- if (i->isop(LIR_qjoin) &&
- i->oprnd1()->isop(LIR_call) &&
- i->oprnd2()->isop(LIR_callh))
- {
- if (i->oprnd1()->callInfo() == &u2f_ci)
- return true;
- }
- #endif
- return false;
- }
- static LInsp iu2fArg(LInsp i)
- {
- #if defined(NJ_SOFTFLOAT)
- if (i->isop(LIR_qjoin))
- return i->oprnd1()->arg(0);
- #endif
- return i->oprnd1();
- }
- static LIns* demote(LirWriter *out, LInsp i)
- {
- if (i->isCall())
- return callArgN(i, 0);
- if (isi2f(i) || isu2f(i))
- return iu2fArg(i);
- if (i->isconst())
- return i;
- AvmAssert(i->isconstq());
- double cf = i->constvalf();
- int32_t ci = cf > 0x7fffffff ? uint32_t(cf) : int32_t(cf);
- return out->insImm(ci);
- }
- static bool isPromoteInt(LIns* i)
- {
- jsdouble d;
- return isi2f(i) || i->isconst() ||
- (i->isconstq() && (d = i->constvalf()) == jsdouble(jsint(d)) && !JSDOUBLE_IS_NEGZERO(d));
- }
- static bool isPromoteUint(LIns* i)
- {
- jsdouble d;
- return isu2f(i) || i->isconst() ||
- (i->isconstq() && (d = i->constvalf()) == (jsdouble)(jsuint)d && !JSDOUBLE_IS_NEGZERO(d));
- }
- static bool isPromote(LIns* i)
- {
- return isPromoteInt(i) || isPromoteUint(i);
- }
- static bool isconst(LIns* i, int32_t c)
- {
- return i->isconst() && i->constval() == c;
- }
- static bool overflowSafe(LIns* i)
- {
- LIns* c;
- return (i->isop(LIR_and) && ((c = i->oprnd2())->isconst()) &&
- ((c->constval() & 0xc0000000) == 0)) ||
- (i->isop(LIR_rsh) && ((c = i->oprnd2())->isconst()) &&
- ((c->constval() > 0)));
- }
- #if defined(NJ_SOFTFLOAT)
- /* soft float */
- JS_DEFINE_CALLINFO_1(static, DOUBLE, fneg, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, INT32, fcmpeq, DOUBLE, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, INT32, fcmplt, DOUBLE, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, INT32, fcmple, DOUBLE, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, INT32, fcmpgt, DOUBLE, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, INT32, fcmpge, DOUBLE, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, DOUBLE, fmul, DOUBLE, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, DOUBLE, fadd, DOUBLE, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, DOUBLE, fdiv, DOUBLE, DOUBLE, 1, 1)
- JS_DEFINE_CALLINFO_2(static, DOUBLE, fsub, DOUBLE, DOUBLE, 1, 1)
- jsdouble FASTCALL
- fneg(jsdouble x)
- {
- return -x;
- }
- jsdouble FASTCALL
- i2f(int32 i)
- {
- return i;
- }
- jsdouble FASTCALL
- u2f(jsuint u)
- {
- return u;
- }
- int32 FASTCALL
- fcmpeq(jsdouble x, jsdouble y)
- {
- return x==y;
- }
- int32 FASTCALL
- fcmplt(jsdouble x, jsdouble y)
- {
- return x < y;
- }
- int32 FASTCALL
- fcmple(jsdouble x, jsdouble y)
- {
- return x <= y;
- }
- int32 FASTCALL
- fcmpgt(jsdouble x, jsdouble y)
- {
- return x > y;
- }
- int32 FASTCALL
- fcmpge(jsdouble x, jsdouble y)
- {
- return x >= y;
- }
- jsdouble FASTCALL
- fmul(jsdouble x, jsdouble y)
- {
- return x * y;
- }
- jsdouble FASTCALL
- fadd(jsdouble x, jsdouble y)
- {
- return x + y;
- }
- jsdouble FASTCALL
- fdiv(jsdouble x, jsdouble y)
- {
- return x / y;
- }
- jsdouble FASTCALL
- fsub(jsdouble x, jsdouble y)
- {
- return x - y;
- }
- class SoftFloatFilter: public LirWriter
- {
- public:
- SoftFloatFilter(LirWriter* out):
- LirWriter(out)
- {
- }
- LInsp quadCall(const CallInfo *ci, LInsp args[]) {
- LInsp qlo, qhi;
- qlo = out->insCall(ci, args);
- qhi = out->ins1(LIR_callh, qlo);
- return out->qjoin(qlo, qhi);
- }
- LInsp ins1(LOpcode v, LInsp s0)
- {
- if (v == LIR_fneg)
- return quadCall(&fneg_ci, &s0);
- if (v == LIR_i2f)
- return quadCall(&i2f_ci, &s0);
- if (v == LIR_u2f)
- return quadCall(&u2f_ci, &s0);
- return out->ins1(v, s0);
- }
- LInsp ins2(LOpcode v, LInsp s0, LInsp s1)
- {
- LInsp args[2];
- LInsp bv;
- // change the numeric value and order of these LIR opcodes and die
- if (LIR_fadd <= v && v <= LIR_fdiv) {
- static const CallInfo *fmap[] = { &fadd_ci, &fsub_ci, &fmul_ci, &fdiv_ci };
- args[0] = s1;
- args[1] = s0;
- return quadCall(fmap[v - LIR_fadd], args);
- }
- if (LIR_feq <= v && v <= LIR_fge) {
- static const CallInfo *fmap[] = { &fcmpeq_ci, &fcmplt_ci, &fcmpgt_ci, &fcmple_ci, &fcmpge_ci };
- args[0] = s1;
- args[1] = s0;
- bv = out->insCall(fmap[v - LIR_feq], args);
- return out->ins2(LIR_eq, bv, out->insImm(1));
- }
- return out->ins2(v, s0, s1);
- }
- LInsp insCall(const CallInfo *ci, LInsp args[])
- {
- // if the return type is ARGSIZE_F, we have
- // to do a quadCall ( qjoin(call,callh) )
- if ((ci->_argtypes & 3) == ARGSIZE_F)
- return quadCall(ci, args);
- return out->insCall(ci, args);
- }
- };
- #endif // NJ_SOFTFLOAT
- class FuncFilter: public LirWriter
- {
- public:
- FuncFilter(LirWriter* out):
- LirWriter(out)
- {
- }
- LInsp ins2(LOpcode v, LInsp s0, LInsp s1)
- {
- if (s0 == s1 && v == LIR_feq) {
- if (isPromote(s0)) {
- // double(int) and double(uint) cannot be nan
- return insImm(1);
- }
- if (s0->isop(LIR_fmul) || s0->isop(LIR_fsub) || s0->isop(LIR_fadd)) {
- LInsp lhs = s0->oprnd1();
- LInsp rhs = s0->oprnd2();
- if (isPromote(lhs) && isPromote(rhs)) {
- // add/sub/mul promoted ints can't be nan
- return insImm(1);
- }
- }
- } else if (LIR_feq <= v && v <= LIR_fge) {
- if (isPromoteInt(s0) && isPromoteInt(s1)) {
- // demote fcmp to cmp
- v = LOpcode(v + (LIR_eq - LIR_feq));
- return out->ins2(v, demote(out, s0), demote(out, s1));
- } else if (isPromoteUint(s0) && isPromoteUint(s1)) {
- // uint compare
- v = LOpcode(v + (LIR_eq - LIR_feq));
- if (v != LIR_eq)
- v = LOpcode(v + (LIR_ult - LIR_lt)); // cmp -> ucmp
- return out->ins2(v, demote(out, s0), demote(out, s1));
- }
- } else if (v == LIR_or &&
- s0->isop(LIR_lsh) && isconst(s0->oprnd2(), 16) &&
- s1->isop(LIR_and) && isconst(s1->oprnd2(), 0xffff)) {
- LIns* msw = s0->oprnd1();
- LIns* lsw = s1->oprnd1();
- LIns* x;
- LIns* y;
- if (lsw->isop(LIR_add) &&
- lsw->oprnd1()->isop(LIR_and) &&
- lsw->oprnd2()->isop(LIR_and) &&
- isconst(lsw->oprnd1()->oprnd2(), 0xffff) &&
- isconst(lsw->oprnd2()->oprnd2(), 0xffff) &&
- msw->isop(LIR_add) &&
- msw->oprnd1()->isop(LIR_add) &&
- msw->oprnd2()->isop(LIR_rsh) &&
- msw->oprnd1()->oprnd1()->isop(LIR_rsh) &&
- msw->oprnd1()->oprnd2()->isop(LIR_rsh) &&
- isconst(msw->oprnd2()->oprnd2(), 16) &&
- isconst(msw->oprnd1()->oprnd1()->oprnd2(), 16) &&
- isconst(msw->oprnd1()->oprnd2()->oprnd2(), 16) &&
- (x = lsw->oprnd1()->oprnd1()) == msw->oprnd1()->oprnd1()->oprnd1() &&
- (y = lsw->oprnd2()->oprnd1()) == msw->oprnd1()->oprnd2()->oprnd1() &&
- lsw == msw->oprnd2()->oprnd1()) {
- return out->ins2(LIR_add, x, y);
- }
- }
- #ifdef NANOJIT_ARM
- else if (v == LIR_lsh ||
- v == LIR_rsh ||
- v == LIR_ush)
- {
- // needed on ARM -- arm doesn't mask shifts to 31 like x86 does
- if (s1->isconst())
- s1->setimm16(s1->constval() & 31);
- else
- s1 = out->ins2(LIR_and, s1, out->insImm(31));
- return out->ins2(v, s0, s1);
- }
- #endif
- return out->ins2(v, s0, s1);
- }
- LInsp insCall(const CallInfo *ci, LInsp args[])
- {
- LInsp s0 = args[0];
- if (ci == &js_DoubleToUint32_ci) {
- if (s0->isconstq())
- return out->insImm(js_DoubleToECMAUint32(s0->constvalf()));
- if (isi2f(s0) || isu2f(s0))
- return iu2fArg(s0);
- } else if (ci == &js_DoubleToInt32_ci) {
- if (s0->isconstq())
- return out->insImm(js_DoubleToECMAInt32(s0->constvalf()));
- if (s0->isop(LIR_fadd) || s0->isop(LIR_fsub)) {
- LInsp lhs = s0->oprnd1();
- LInsp rhs = s0->oprnd2();
- if (isPromote(lhs) && isPromote(rhs)) {
- LOpcode op = LOpcode(s0->opcode() & ~LIR64);
- return out->ins2(op, demote(out, lhs), demote(out, rhs));
- }
- }
- if (isi2f(s0) || isu2f(s0))
- return iu2fArg(s0);
- // XXX ARM -- check for qjoin(call(UnboxDouble),call(UnboxDouble))
- if (s0->isCall() && s0->callInfo() == &js_UnboxDouble_ci) {
- LIns* args2[] = { callArgN(s0, 0) };
- return out->insCall(&js_UnboxInt32_ci, args2);
- }
- if (s0->isCall() && s0->callInfo() == &js_StringToNumber_ci) {
- // callArgN's ordering is that as seen by the builtin, not as stored in args here.
- // True story!
- LIns* args2[] = { callArgN(s0, 1), callArgN(s0, 0) };
- return out->insCall(&js_StringToInt32_ci, args2);
- }
- } else if (ci == &js_BoxDouble_ci) {
- JS_ASSERT(s0->isQuad());
- if (s0->isop(LIR_i2f)) {
- LIns* args2[] = { s0->oprnd1(), args[1] };
- return out->insCall(&js_BoxInt32_ci, args2);
- }
- if (s0->isCall() && s0->callInfo() == &js_UnboxDouble_ci)
- return callArgN(s0, 0);
- }
- return out->insCall(ci, args);
- }
- };
- /* In debug mode vpname contains a textual description of the type of the
- slot during the forall iteration over al slots. */
- #ifdef JS_JIT_SPEW
- #define DEF_VPNAME const char* vpname; unsigned vpnum
- #define SET_VPNAME(name) do { vpname = name; vpnum = 0; } while(0)
- #define INC_VPNUM() do { ++vpnum; } while(0)
- #else
- #define DEF_VPNAME do {} while (0)
- #define vpname ""
- #define vpnum 0
- #define SET_VPNAME(name) ((void)0)
- #define INC_VPNUM() ((void)0)
- #endif
- /* Iterate over all interned global variables. */
- #define FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, code) \
- JS_BEGIN_MACRO \
- DEF_VPNAME; \
- JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); \
- unsigned n; \
- jsval* vp; \
- SET_VPNAME("global"); \
- for (n = 0; n < ngslots; ++n) { \
- vp = &STOBJ_GET_SLOT(globalObj, gslots[n]); \
- { code; } \
- INC_VPNUM(); \
- } \
- JS_END_MACRO
- /* Iterate over all slots in the frame, consisting of args, vars, and stack
- (except for the top-level frame which does not have args or vars. */
- #define FORALL_FRAME_SLOTS(fp, depth, code) \
- JS_BEGIN_MACRO \
- jsval* vp; \
- jsval* vpstop; \
- if (fp->callee) { \
- if (depth == 0) { \
- SET_VPNAME("callee"); \
- vp = &fp->argv[-2]; \
- { code; } \
- SET_VPNAME("this"); \
- vp = &fp->argv[-1]; \
- { code; } \
- SET_VPNAME("argv"); \
- vp = &fp->argv[0]; vpstop = &fp->argv[fp->fun->nargs]; \
- while (vp < vpstop) { code; ++vp; INC_VPNUM(); } \
- } \
- SET_VPNAME("vars"); \
- vp = fp->slots; vpstop = &fp->slots[fp->script->nfixed]; \
- while (vp < vpstop) { code; ++vp; INC_VPNUM(); } \
- } \
- SET_VPNAME("stack"); \
- vp = StackBase(fp); vpstop = fp->regs->sp; \
- while (vp < vpstop) { code; ++vp; INC_VPNUM(); } \
- if (fsp < fspstop - 1) { \
- JSStackFrame* fp2 = fsp[1]; \
- int missing = fp2->fun->nargs - fp2->argc; \
- if (missing > 0) { \
- SET_VPNAME("missing"); \
- vp = fp->regs->sp; \
- vpstop = vp + missing; \
- while (vp < vpstop) { code; ++vp; INC_VPNUM(); } \
- } \
- } \
- JS_END_MACRO
- /* Iterate over all slots in each pending frame. */
- #define FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, code) \
- JS_BEGIN_MACRO \
- DEF_VPNAME; \
- unsigned n; \
- JSStackFrame* currentFrame = cx->fp; \
- JSStackFrame* entryFrame; \
- JSStackFrame* fp = currentFrame; \
- for (n = 0; n < callDepth; ++n) { fp = fp->down; } \
- entryFrame = fp; \
- unsigned frames = callDepth+1; \
- JSStackFrame** fstack = \
- (JSStackFrame**) alloca(frames * sizeof (JSStackFrame*)); \
- JSStackFrame** fspstop = &fstack[frames]; \
- JSStackFrame** fsp = fspstop-1; \
- fp = currentFrame; \
- for (;; fp = fp->down) { *fsp-- = fp; if (fp == entryFrame) break; } \
- unsigned depth; \
- for (depth = 0, fsp = fstack; fsp < fspstop; ++fsp, ++depth) { \
- fp = *fsp; \
- FORALL_FRAME_SLOTS(fp, depth, code); \
- } \
- JS_END_MACRO
- #define FORALL_SLOTS(cx, ngslots, gslots, callDepth, code) \
- JS_BEGIN_MACRO \
- FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, code); \
- FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, code); \
- JS_END_MACRO
- /* Calculate the total number of native frame slots we need from this frame
- all the way back to the entry frame, including the current stack usage. */
- unsigned
- js_NativeStackSlots(JSContext *cx, unsigned callDepth)
- {
- JSStackFrame* fp = cx->fp;
- unsigned slots = 0;
- #if defined _DEBUG
- unsigned int origCallDepth = callDepth;
- #endif
- for (;;) {
- unsigned operands = fp->regs->sp - StackBase(fp);
- slots += operands;
- if (fp->callee)
- slots += fp->script->nfixed;
- if (callDepth-- == 0) {
- if (fp->callee)
- slots += 2/*callee,this*/ + fp->fun->nargs;
- #if defined _DEBUG
- unsigned int m = 0;
- FORALL_SLOTS_IN_PENDING_FRAMES(cx, origCallDepth, m++);
- JS_ASSERT(m == slots);
- #endif
- return slots;
- }
- JSStackFrame* fp2 = fp;
- fp = fp->down;
- int missing = fp2->fun->nargs - fp2->argc;
- if (missing > 0)
- slots += missing;
- }
- JS_NOT_REACHED("js_NativeStackSlots");
- }
- /* Capture the type map for the selected slots of the global object. */
- void
- TypeMap::captureGlobalTypes(JSContext* cx, SlotList& slots)
- {
- unsigned ngslots = slots.length();
- uint16* gslots = slots.data();
- setLength(ngslots);
- uint8* map = data();
- uint8* m = map;
- FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
- uint8 type = getCoercedType(*vp);
- if ((type == JSVAL_INT) && oracle.isGlobalSlotUndemotable(cx->fp->script, gslots[n]))
- type = JSVAL_DOUBLE;
- JS_ASSERT(type != JSVAL_BOXED);
- *m++ = type;
- );
- }
- /* Capture the type map for the currently pending stack frames. */
- void
- TypeMap::captureStackTypes(JSContext* cx, unsigned callDepth)
- {
- setLength(js_NativeStackSlots(cx, callDepth));
- uint8* map = data();
- uint8* m = map;
- FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth,
- uint8 type = getCoercedType(*vp);
- if ((type == JSVAL_INT) &&
- oracle.isStackSlotUndemotable(cx->fp->script, cx->fp->regs->pc, unsigned(m - map))) {
- type = JSVAL_DOUBLE;
- }
- debug_only_v(printf("capture %s%d: %d\n", vpname, vpnum, type);)
- *m++ = type;
- );
- }
- /* Compare this type map to another one and see whether they match. */
- bool
- TypeMap::matches(TypeMap& other) const
- {
- if (length() != other.length())
- return false;
- return !memcmp(data(), other.data(), length());
- }
- /* Use the provided storage area to create a new type map that contains the partial type map
- with the rest of it filled up from the complete type map. */
- static void
- mergeTypeMaps(uint8** partial, unsigned* plength, uint8* complete, unsigned clength, uint8* mem)
- {
- unsigned l = *plength;
- JS_ASSERT(l < clength);
- memcpy(mem, *partial, l * sizeof(uint8));
- memcpy(mem + l, complete + l, (clength - l) * sizeof(uint8));
- *partial = mem;
- *plength = clength;
- }
- static void
- js_TrashTree(JSContext* cx, Fragment* f);
- TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
- TreeInfo* ti, unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
- VMSideExit* innermostNestedGuard, Fragment* outerToBlacklist)
- {
- JS_ASSERT(!_fragment->vmprivate && ti);
- this->cx = cx;
- this->traceMonitor = &JS_TRACE_MONITOR(cx);
- this->globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
- this->anchor = _anchor;
- this->fragment = _fragment;
- this->lirbuf = _fragment->lirbuf;
- this->treeInfo = ti;
- this->callDepth = _anchor ? _anchor->calldepth : 0;
- this->atoms = cx->fp->script->atomMap.vector;
- this->deepAborted = false;
- this->applyingArguments = false;
- this->trashTree = false;
- this->whichTreeToTrash = _fragment->root;
- this->global_dslots = this->globalObj->dslots;
- this->terminate = false;
- this->outerToBlacklist = outerToBlacklist;
- this->wasRootFragment = _fragment == _fragment->root;
- debug_only_v(printf("recording starting from %s:%u@%u\n",
- cx->fp->script->filename,
- js_FramePCToLineNumber(cx, cx->fp),
- FramePCOffset(cx->fp));)
- debug_only_v(printf("globalObj=%p, shape=%d\n", this->globalObj, OBJ_SHAPE(this->globalObj));)
- lir = lir_buf_writer = new (&gc) LirBufWriter(lirbuf);
- #ifdef DEBUG
- if (verbose_debug)
- lir = verbose_filter = new (&gc) VerboseWriter(&gc, lir, lirbuf->names);
- #endif
- #ifdef NJ_SOFTFLOAT
- lir = float_filter = new (&gc) SoftFloatFilter(lir);
- #endif
- lir = cse_filter = new (&gc) CseFilter(lir, &gc);
- lir = expr_filter = new (&gc) ExprFilter(lir);
- lir = func_filter = new (&gc) FuncFilter(lir);
- lir->ins0(LIR_start);
- if (!nanojit::AvmCore::config.tree_opt || fragment->root == fragment)
- lirbuf->state = addName(lir->insParam(0, 0), "state");
- lirbuf->sp = addName(lir->insLoad(LIR_ldp, lirbuf->state, (int)offsetof(InterpState, sp)), "sp");
- lirbuf->rp = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp)), "rp");
- cx_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, cx)), "cx");
- gp_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, gp)), "gp");
- eos_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eos)), "eos");
- eor_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eor)), "eor");
- /* read into registers all values on the stack and all globals we know so far */
- import(treeInfo, lirbuf->sp, ngslots, callDepth, globalTypeMap, stackTypeMap);
- /* If we are attached to a tree call guard, make sure the guard the inner tree exited from
- is what we expect it to be. */
- if (_anchor && _anchor->exitType == NESTED_EXIT) {
- LIns* nested_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state,
- offsetof(InterpState, lastTreeExitGuard)),
- "lastTreeExitGuard");
- guard(true, lir->ins2(LIR_eq, nested_ins, INS_CONSTPTR(innermostNestedGuard)), NESTED_EXIT);
- }
- }
- TreeInfo::~TreeInfo()
- {
- UnstableExit* temp;
- while (unstableExits) {
- temp = unstableExits->next;
- delete unstableExits;
- unstableExits = temp;
- }
- }
- TraceRecorder::~TraceRecorder()
- {
- JS_ASSERT(nextRecorderToAbort == NULL);
- JS_ASSERT(treeInfo && (fragment || wasDeepAborted()));
- #ifdef DEBUG
- TraceRecorder* tr = JS_TRACE_MONITOR(cx).abortStack;
- while (tr != NULL)
- {
- JS_ASSERT(this != tr);
- tr = tr->nextRecorderToAbort;
- }
- #endif
- if (fragment) {
- if (wasRootFragment && !fragment->root->code()) {
- JS_ASSERT(!fragment->root->vmprivate);
- delete treeInfo;
- }
- if (trashTree)
- js_TrashTree(cx, whichTreeToTrash);
- } else if (wasRootFragment) {
- delete treeInfo;
- }
- #ifdef DEBUG
- delete verbose_filter;
- #endif
- delete cse_filter;
- delete expr_filter;
- delete func_filter;
- #ifdef NJ_SOFTFLOAT
- delete float_filter;
- #endif
- delete lir_buf_writer;
- }
- void TraceRecorder::removeFragmentoReferences()
- {
- fragment = NULL;
- }
- /* Add debug information to a LIR instruction as we emit it. */
- inline LIns*
- TraceRecorder::addName(LIns* ins, const char* name)
- {
- #ifdef DEBUG
- lirbuf->names->addName(ins, name);
- #endif
- return ins;
- }
- /* Determine the current call depth (starting with the entry frame.) */
- unsigned
- TraceRecorder::getCallDepth() const
- {
- return callDepth;
- }
- /* Determine the offset in the native global frame for a jsval we track */
- ptrdiff_t
- TraceRecorder::nativeGlobalOffset(jsval* p) const
- {
- JS_ASSERT(isGlobal(p));
- if (size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS)
- return size_t(p - globalObj->fslots) * sizeof(double);
- return ((p - globalObj->dslots) + JS_INITIAL_NSLOTS) * sizeof(double);
- }
- /* Determine whether a value is a global stack slot */
- bool
- TraceRecorder::isGlobal(jsval* p) const
- {
- return ((size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS) ||
- (size_t(p - globalObj->dslots) < (STOBJ_NSLOTS(globalObj) - JS_INITIAL_NSLOTS)));
- }
- /* Determine the offset in the native stack for a jsval we track */
- ptrdiff_t
- TraceRecorder::nativeStackOffset(jsval* p) const
- {
- #ifdef DEBUG
- size_t slow_offset = 0;
- FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth,
- if (vp == p) goto done;
- slow_offset += sizeof(double)
- );
- /*
- * If it's not in a pending frame, it must be on the stack of the current frame above
- * sp but below fp->slots + script->nslots.
- */
- JS_ASSERT(size_t(p - cx->fp->slots) < cx->fp->script->nslots);
- slow_offset += size_t(p - cx->fp->regs->sp) * sizeof(double);
- done:
- #define RETURN(offset) { JS_ASSERT((offset) == slow_offset); return offset; }
- #else
- #define RETURN(offset) { return offset; }
- #endif
- size_t offset = 0;
- JSStackFrame* currentFrame = cx->fp;
- JSStackFrame* entryFrame;
- JSStackFrame* fp = currentFrame;
- for (unsigned n = 0; n < callDepth; ++n) { fp = fp->down; }
- entryFrame = fp;
- unsigned frames = callDepth+1;
- JSStackFrame** fstack = (JSStackFrame **)alloca(frames * sizeof (JSStackFrame *));
- JSStackFrame** fspstop = &fstack[frames];
- JSStackFrame** fsp = fspstop-1;
- fp = currentFrame;
- for (;; fp = fp->down) { *fsp-- = fp; if (fp == entryFrame) break; }
- for (fsp = fstack; fsp < fspstop; ++fsp) {
- fp = *fsp;
- if (fp->callee) {
- if (fsp == fstack) {
- if (size_t(p - &fp->argv[-2]) < size_t(2/*callee,this*/ + fp->fun->nargs))
- RETURN(offset + size_t(p - &fp->argv[-2]) * sizeof(double));
- offset += (2/*callee,this*/ + fp->fun->nargs) * sizeof(double);
- }
- if (size_t(p - &fp->slots[0]) < fp->script->nfixed)
- RETURN(offset + size_t(p - &fp->slots[0]) * sizeof(double));
- offset += fp->script->nfixed * sizeof(double);
- }
- jsval* spbase = StackBase(fp);
- if (size_t(p - spbase) < size_t(fp->regs->sp - spbase))
- RETURN(offset + size_t(p - spbase) * sizeof(double));
- offset += size_t(fp->regs->sp - spbase) * sizeof(double);
- if (fsp < fspstop - 1) {
- JSStackFrame* fp2 = fsp[1];
- int missing = fp2->fun->nargs - fp2->argc;
- if (missing > 0) {
- if (size_t(p - fp->regs->sp) < size_t(missing))
- RETURN(offset + size_t(p - fp->regs->sp) * sizeof(double));
- offset += size_t(missing) * sizeof(double);
- }
- }
- }
- /*
- * If it's not in a pending frame, it must be on the stack of the current frame above
- * sp but below fp->slots + script->nslots.
- */
- JS_ASSERT(size_t(p - currentFrame->slots) < currentFrame->script->nslots);
- offset += size_t(p - currentFrame->regs->sp) * sizeof(double);
- RETURN(offset);
- #undef RETURN
- }
- /* Track the maximum number of native frame slots we need during
- execution. */
- void
- TraceRecorder::trackNativeStackUse(unsigned slots)
- {
- if (slots > treeInfo->maxNativeStackSlots)
- treeInfo->maxNativeStackSlots = slots;
- }
- /* Unbox a jsval into a slot. Slots are wide enough to hold double values directly (instead of
- storing a pointer to them). We now assert instead of type checking, the caller must ensure the
- types are compatible. */
- static void
- ValueToNative(JSContext* cx, jsval v, uint8 type, double* slot)
- {
- unsigned tag = JSVAL_TAG(v);
- switch (type) {
- case JSVAL_INT:
- jsint i;
- if (JSVAL_IS_INT(v))
- *(jsint*)slot = JSVAL_TO_INT(v);
- else if ((tag == JSVAL_DOUBLE) && JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(v), i))
- *(jsint*)slot = i;
- else
- JS_ASSERT(JSVAL_IS_INT(v));
- debug_only_v(printf("int<%d> ", *(jsint*)slot);)
- return;
- case JSVAL_DOUBLE:
- jsdouble d;
- if (JSVAL_IS_INT(v))
- d = JSVAL_TO_INT(v);
- else
- d = *JSVAL_TO_DOUBLE(v);
- JS_ASSERT(JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v));
- *(jsdouble*)slot = d;
- debug_only_v(printf("double<%g> ", d);)
- return;
- case JSVAL_BOOLEAN:
- JS_ASSERT(tag == JSVAL_BOOLEAN);
- *(JSBool*)slot = JSVAL_TO_BOOLEAN(v);
- debug_only_v(printf("boolean<%d> ", *(JSBool*)slot);)
- return;
- case JSVAL_STRING:
- JS_ASSERT(tag == JSVAL_STRING);
- *(JSString**)slot = JSVAL_TO_STRING(v);
- debug_only_v(printf("string<%p> ", *(JSString**)slot);)
- return;
- default:
- /* Note: we should never see JSVAL_BOXED in an entry type map. */
- JS_ASSERT(type == JSVAL_OBJECT);
- JS_ASSERT(tag == JSVAL_OBJECT);
- *(JSObject**)slot = JSVAL_TO_OBJECT(v);
- debug_only_v(printf("object<%p:%s> ", JSVAL_TO_OBJECT(v),
- JSVAL_IS_NULL(v)
- ? "null"
- : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);)
- return;
- }
- }
- /* We maintain an emergency recovery pool of doubles so we can recover safely if a trace runs
- out of memory (doubles or objects). */
- static jsval
- AllocateDoubleFromRecoveryPool(JSContext* cx)
- {
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- JS_ASSERT(tm->recoveryDoublePoolPtr > tm->recoveryDoublePool);
- return *--tm->recoveryDoublePoolPtr;
- }
- static bool
- js_ReplenishRecoveryPool(JSContext* cx, JSTraceMonitor* tm)
- {
- /* We should not be called with a full pool. */
- JS_ASSERT((size_t) (tm->recoveryDoublePoolPtr - tm->recoveryDoublePool) <
- MAX_NATIVE_STACK_SLOTS);
- /*
- * When the GC runs in js_NewDoubleInRootedValue, it resets
- * tm->recoveryDoublePoolPtr back to tm->recoveryDoublePool.
- */
- JSRuntime* rt = cx->runtime;
- uintN gcNumber = rt->gcNumber;
- jsval* ptr = tm->recoveryDoublePoolPtr;
- while (ptr < tm->recoveryDoublePool + MAX_NATIVE_STACK_SLOTS) {
- if (!js_NewDoubleInRootedValue(cx, 0.0, ptr))
- goto oom;
- if (rt->gcNumber != gcNumber) {
- JS_ASSERT(tm->recoveryDoublePoolPtr == tm->recoveryDoublePool);
- ptr = tm->recoveryDoublePool;
- if (uintN(rt->gcNumber - gcNumber) > uintN(1))
- goto oom;
- continue;
- }
- ++ptr;
- }
- tm->recoveryDoublePoolPtr = ptr;
- return true;
- oom:
- /*
- * Already massive GC pressure, no need to hold doubles back.
- * We won't run any native code anyway.
- */
- tm->recoveryDoublePoolPtr = tm->recoveryDoublePool;
- return false;
- }
- /* Box a value from the native stack back into the jsval format. Integers
- that are too large to fit into a jsval are automatically boxed into
- heap-allocated doubles. */
- static bool
- NativeToValue(JSContext* cx, jsval& v, uint8 type, double* slot)
- {
- jsint i;
- jsdouble d;
- switch (type) {
- case JSVAL_BOOLEAN:
- v = BOOLEAN_TO_JSVAL(*(JSBool*)slot);
- debug_only_v(printf("boolean<%d> ", *(JSBool*)slot);)
- break;
- case JSVAL_INT:
- i = *(jsint*)slot;
- debug_only_v(printf("int<%d> ", i);)
- store_int:
- if (INT_FITS_IN_JSVAL(i)) {
- v = INT_TO_JSVAL(i);
- break;
- }
- d = (jsdouble)i;
- goto store_double;
- case JSVAL_DOUBLE:
- d = *slot;
- debug_only_v(printf("double<%g> ", d);)
- if (JSDOUBLE_IS_INT(d, i))
- goto store_int;
- store_double: {
- /* Its not safe to trigger the GC here, so use an emergency heap if we are out of
- double boxes. */
- if (cx->doubleFreeList) {
- #ifdef DEBUG
- bool ok =
- #endif
- js_NewDoubleInRootedValue(cx, d, &v);
- JS_ASSERT(ok);
- return true;
- }
- v = AllocateDoubleFromRecoveryPool(cx);
- JS_ASSERT(JSVAL_IS_DOUBLE(v) && *JSVAL_TO_DOUBLE(v) == 0.0);
- *JSVAL_TO_DOUBLE(v) = d;
- return true;
- }
- case JSVAL_STRING:
- v = STRING_TO_JSVAL(*(JSString**)slot);
- JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING); /* if this fails the pointer was not aligned */
- debug_only_v(printf("string<%p> ", *(JSString**)slot);)
- break;
- case JSVAL_BOXED:
- v = *(jsval*)slot;
- debug_only_v(printf("box<%lx> ", v));
- break;
- default:
- JS_ASSERT(type == JSVAL_OBJECT);
- v = OBJECT_TO_JSVAL(*(JSObject**)slot);
- JS_ASSERT(JSVAL_TAG(v) == JSVAL_OBJECT); /* if this fails the pointer was not aligned */
- debug_only_v(printf("object<%p:%s> ", JSVAL_TO_OBJECT(v),
- JSVAL_IS_NULL(v)
- ? "null"
- : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);)
- break;
- }
- return true;
- }
- /* Attempt to unbox the given list of interned globals onto the native global frame. */
- static void
- BuildNativeGlobalFrame(JSContext* cx, unsigned ngslots, uint16* gslots, uint8* mp, double* np)
- {
- debug_only_v(printf("global: ");)
- FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
- ValueToNative(cx, *vp, *mp, np + gslots[n]);
- ++mp;
- );
- debug_only_v(printf("\n");)
- }
- /* Attempt to unbox the given JS frame onto a native frame. */
- static void
- BuildNativeStackFrame(JSContext* cx, unsigned callDepth, uint8* mp, double* np)
- {
- debug_only_v(printf("stack: ");)
- FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth,
- debug_only_v(printf("%s%u=", vpname, vpnum);)
- ValueToNative(cx, *vp, *mp, np);
- ++mp; ++np;
- );
- debug_only_v(printf("\n");)
- }
- /* Box the given native frame into a JS frame. This only fails due to a hard error
- (out of memory for example). */
- static int
- FlushNativeGlobalFrame(JSContext* cx, unsigned ngslots, uint16* gslots, uint8* mp, double* np)
- {
- uint8* mp_base = mp;
- FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
- if (!NativeToValue(cx, *vp, *mp, np + gslots[n]))
- return -1;
- ++mp;
- );
- debug_only_v(printf("\n");)
- return mp - mp_base;
- }
- /**
- * Box the given native stack frame into the virtual machine stack. This fails
- * only due to a hard error (out of memory for example).
- *
- * @param callDepth the distance between the entry frame into our trace and
- * cx->fp when we make this call. If this is not called as a
- * result of a nested exit, callDepth is 0.
- * @param mp pointer to an array of type tags (JSVAL_INT, etc.) that indicate
- * what the types of the things on the stack are.
- * @param np pointer to the native stack. We want to copy values from here to
- * the JS stack as needed.
- * @param stopFrame if non-null, this frame and everything above it should not
- * be restored.
- * @return the number of things we popped off of np.
- */
- static int
- FlushNativeStackFrame(JSContext* cx, unsigned callDepth, uint8* mp, double* np,
- JSStackFrame* stopFrame)
- {
- jsval* stopAt = stopFrame ? &stopFrame->argv[-2] : NULL;
- uint8* mp_base = mp;
- /* Root all string and object references first (we don't need to call the GC for this). */
- FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth,
- if (vp == stopAt) goto skip;
- debug_only_v(printf("%s%u=", vpname, vpnum);)
- if (!NativeToValue(cx, *vp, *mp, np))
- return -1;
- ++mp; ++np
- );
- skip:
- // Restore thisp from the now-restored argv[-1] in each pending frame.
- // Keep in mind that we didn't restore frames at stopFrame and above!
- // Scope to keep |fp| from leaking into the macros we're using.
- {
- unsigned n = callDepth+1; // +1 to make sure we restore the entry frame
- JSStackFrame* fp = cx->fp;
- if (stopFrame) {
- for (; fp != stopFrame; fp = fp->down) {
- JS_ASSERT(n != 0);
- --n;
- }
- // Skip over stopFrame itself.
- JS_ASSERT(n != 0);
- --n;
- fp = fp->down;
- }
- for (; n != 0; fp = fp->down) {
- --n;
- if (fp->callee) { // might not have it if the entry frame is global
- JS_ASSERT(JSVAL_IS_OBJECT(fp->argv[-1]));
- fp->thisp = JSVAL_TO_OBJECT(fp->argv[-1]);
- }
- }
- }
- debug_only_v(printf("\n");)
- return mp - mp_base;
- }
- /* Emit load instructions onto the trace that read the initial stack state. */
- void
- TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, uint8& t,
- const char *prefix, uintN index, JSStackFrame *fp)
- {
- LIns* ins;
- if (t == JSVAL_INT) { /* demoted */
- JS_ASSERT(isInt32(*p));
- /* Ok, we have a valid demotion attempt pending, so insert an integer
- read and promote it to double since all arithmetic operations expect
- to see doubles on entry. The first op to use this slot will emit a
- f2i cast which will cancel out the i2f we insert here. */
- ins = lir->insLoadi(base, offset);
- ins = lir->ins1(LIR_i2f, ins);
- } else {
- JS_ASSERT(t == JSVAL_BOXED || isNumber(*p) == (t == JSVAL_DOUBLE));
- if (t == JSVAL_DOUBLE) {
- ins = lir->insLoad(LIR_ldq, base, offset);
- } else if (t == JSVAL_BOOLEAN) {
- ins = lir->insLoad(LIR_ld, base, offset);
- } else {
- ins = lir->insLoad(LIR_ldp, base, offset);
- }
- }
- tracker.set(p, ins);
- #ifdef DEBUG
- char name[64];
- JS_ASSERT(strlen(prefix) < 10);
- void* mark = NULL;
- jsuword* localNames = NULL;
- const char* funName = NULL;
- if (*prefix == 'a' || *prefix == 'v') {
- mark = JS_ARENA_MARK(&cx->tempPool);
- if (JS_GET_LOCAL_NAME_COUNT(fp->fun) != 0)
- localNames = js_GetLocalNameArray(cx, fp->fun, &cx->tempPool);
- funName = fp->fun->atom ? js_AtomToPrintableString(cx, fp->fun->atom) : "<anonymous>";
- }
- if (!strcmp(prefix, "argv")) {
- if (index < fp->fun->nargs) {
- JSAtom *atom = JS_LOCAL_NAME_TO_ATOM(localNames[index]);
- JS_snprintf(name, sizeof name, "$%s.%s", funName, js_AtomToPrintableString(cx, atom));
- } else {
- JS_snprintf(name, sizeof name, "$%s…
Large files files are truncated, but you can click here to view the full file