/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h

https://bitbucket.org/ultra_iter/qt-vtl · C Header · 867 lines · 690 code · 137 blank · 40 comment · 91 complexity · a6cd35ff0bd9d2979457533dec204159 MD5 · raw file

  1. /*
  2. * Copyright (C) 2008 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
  17. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  18. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  19. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  20. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  21. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #ifndef JITInlineMethods_h
  26. #define JITInlineMethods_h
  27. #include <wtf/Platform.h>
  28. #if ENABLE(JIT)
  29. namespace JSC {
  30. /* Deprecated: Please use JITStubCall instead. */
  31. // puts an arg onto the stack, as an arg to a context threaded function.
  32. ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
  33. {
  34. unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
  35. poke(src, argumentStackOffset);
  36. }
  37. /* Deprecated: Please use JITStubCall instead. */
  38. ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
  39. {
  40. unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
  41. poke(Imm32(value), argumentStackOffset);
  42. }
  43. /* Deprecated: Please use JITStubCall instead. */
  44. ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
  45. {
  46. unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
  47. poke(ImmPtr(value), argumentStackOffset);
  48. }
  49. /* Deprecated: Please use JITStubCall instead. */
  50. ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
  51. {
  52. unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
  53. peek(dst, argumentStackOffset);
  54. }
  55. ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
  56. {
  57. return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
  58. }
  59. ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
  60. {
  61. ASSERT(m_codeBlock->isConstantRegisterIndex(src));
  62. return m_codeBlock->getConstant(src);
  63. }
  64. ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
  65. {
  66. storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
  67. }
  68. ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
  69. {
  70. storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
  71. }
  72. ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
  73. {
  74. loadPtr(Address(from, entry * sizeof(Register)), to);
  75. #if !USE(JSVALUE32_64)
  76. killLastResultRegister();
  77. #endif
  78. }
  79. ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
  80. {
  81. load32(Address(from, entry * sizeof(Register)), to);
  82. #if !USE(JSVALUE32_64)
  83. killLastResultRegister();
  84. #endif
  85. }
  86. ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
  87. {
  88. ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
  89. Call nakedCall = nearCall();
  90. m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
  91. return nakedCall;
  92. }
  93. #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
  94. ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
  95. {
  96. #if CPU(ARM_TRADITIONAL)
  97. #ifndef NDEBUG
  98. // Ensure the label after the sequence can also fit
  99. insnSpace += sizeof(ARMWord);
  100. constSpace += sizeof(uint64_t);
  101. #endif
  102. ensureSpace(insnSpace, constSpace);
  103. #endif
  104. #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
  105. #ifndef NDEBUG
  106. m_uninterruptedInstructionSequenceBegin = label();
  107. m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
  108. #endif
  109. #endif
  110. }
  111. ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
  112. {
  113. #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
  114. ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
  115. ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
  116. #endif
  117. }
  118. #endif
  119. #if CPU(ARM)
  120. ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
  121. {
  122. move(linkRegister, reg);
  123. }
  124. ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
  125. {
  126. move(reg, linkRegister);
  127. }
  128. ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
  129. {
  130. loadPtr(address, linkRegister);
  131. }
  132. #else // CPU(X86) || CPU(X86_64)
  133. ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
  134. {
  135. pop(reg);
  136. }
  137. ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
  138. {
  139. push(reg);
  140. }
  141. ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
  142. {
  143. push(address);
  144. }
  145. #endif
  146. #if USE(JIT_STUB_ARGUMENT_VA_LIST)
  147. ALWAYS_INLINE void JIT::restoreArgumentReference()
  148. {
  149. poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
  150. }
  151. ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
  152. #else
  153. ALWAYS_INLINE void JIT::restoreArgumentReference()
  154. {
  155. move(stackPointerRegister, firstArgumentRegister);
  156. poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
  157. }
  158. ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
  159. {
  160. #if CPU(X86)
  161. // Within a trampoline the return address will be on the stack at this point.
  162. addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
  163. #elif CPU(ARM)
  164. move(stackPointerRegister, firstArgumentRegister);
  165. #endif
  166. // In the trampoline on x86-64, the first argument register is not overwritten.
  167. }
  168. #endif
  169. ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
  170. {
  171. return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
  172. }
  173. ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
  174. {
  175. if (!m_codeBlock->isKnownNotImmediate(vReg))
  176. linkSlowCase(iter);
  177. }
  178. ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
  179. {
  180. ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
  181. m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
  182. }
  183. ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
  184. {
  185. ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
  186. const JumpList::JumpVector& jumpVector = jumpList.jumps();
  187. size_t size = jumpVector.size();
  188. for (size_t i = 0; i < size; ++i)
  189. m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
  190. }
  191. ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
  192. {
  193. ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
  194. m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
  195. }
  196. ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
  197. {
  198. ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
  199. jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
  200. }
  201. #if ENABLE(SAMPLING_FLAGS)
  202. ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
  203. {
  204. ASSERT(flag >= 1);
  205. ASSERT(flag <= 32);
  206. or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
  207. }
  208. ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
  209. {
  210. ASSERT(flag >= 1);
  211. ASSERT(flag <= 32);
  212. and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
  213. }
  214. #endif
  215. #if ENABLE(SAMPLING_COUNTERS)
  216. ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
  217. {
  218. #if CPU(X86_64) // Or any other 64-bit plattform.
  219. addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
  220. #elif CPU(X86) // Or any other little-endian 32-bit plattform.
  221. intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
  222. add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
  223. addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
  224. #else
  225. #error "SAMPLING_FLAGS not implemented on this platform."
  226. #endif
  227. }
  228. #endif
  229. #if ENABLE(OPCODE_SAMPLING)
  230. #if CPU(X86_64)
  231. ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
  232. {
  233. move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
  234. storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
  235. }
  236. #else
  237. ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
  238. {
  239. storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
  240. }
  241. #endif
  242. #endif
  243. #if ENABLE(CODEBLOCK_SAMPLING)
  244. #if CPU(X86_64)
  245. ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
  246. {
  247. move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
  248. storePtr(ImmPtr(codeBlock), X86Registers::ecx);
  249. }
  250. #else
  251. ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
  252. {
  253. storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
  254. }
  255. #endif
  256. #endif
  257. inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
  258. {
  259. return Address(base, (index * sizeof(Register)));
  260. }
  261. #if USE(JSVALUE32_64)
  262. inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
  263. {
  264. return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
  265. }
  266. inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
  267. {
  268. return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
  269. }
  270. inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
  271. {
  272. RegisterID mappedTag;
  273. if (getMappedTag(index, mappedTag)) {
  274. move(mappedTag, tag);
  275. unmap(tag);
  276. return;
  277. }
  278. if (m_codeBlock->isConstantRegisterIndex(index)) {
  279. move(Imm32(getConstantOperand(index).tag()), tag);
  280. unmap(tag);
  281. return;
  282. }
  283. load32(tagFor(index), tag);
  284. unmap(tag);
  285. }
  286. inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
  287. {
  288. RegisterID mappedPayload;
  289. if (getMappedPayload(index, mappedPayload)) {
  290. move(mappedPayload, payload);
  291. unmap(payload);
  292. return;
  293. }
  294. if (m_codeBlock->isConstantRegisterIndex(index)) {
  295. move(Imm32(getConstantOperand(index).payload()), payload);
  296. unmap(payload);
  297. return;
  298. }
  299. load32(payloadFor(index), payload);
  300. unmap(payload);
  301. }
  302. inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
  303. {
  304. move(Imm32(v.payload()), payload);
  305. move(Imm32(v.tag()), tag);
  306. }
  307. inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
  308. {
  309. ASSERT(tag != payload);
  310. if (base == callFrameRegister) {
  311. ASSERT(payload != base);
  312. emitLoadPayload(index, payload);
  313. emitLoadTag(index, tag);
  314. return;
  315. }
  316. if (payload == base) { // avoid stomping base
  317. load32(tagFor(index, base), tag);
  318. load32(payloadFor(index, base), payload);
  319. return;
  320. }
  321. load32(payloadFor(index, base), payload);
  322. load32(tagFor(index, base), tag);
  323. }
  324. inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
  325. {
  326. if (isMapped(index1)) {
  327. emitLoad(index1, tag1, payload1);
  328. emitLoad(index2, tag2, payload2);
  329. return;
  330. }
  331. emitLoad(index2, tag2, payload2);
  332. emitLoad(index1, tag1, payload1);
  333. }
  334. inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
  335. {
  336. if (m_codeBlock->isConstantRegisterIndex(index)) {
  337. Register& inConstantPool = m_codeBlock->constantRegister(index);
  338. loadDouble(&inConstantPool, value);
  339. } else
  340. loadDouble(addressFor(index), value);
  341. }
  342. inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
  343. {
  344. if (m_codeBlock->isConstantRegisterIndex(index)) {
  345. Register& inConstantPool = m_codeBlock->constantRegister(index);
  346. char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
  347. convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
  348. } else
  349. convertInt32ToDouble(payloadFor(index), value);
  350. }
  351. inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
  352. {
  353. store32(payload, payloadFor(index, base));
  354. store32(tag, tagFor(index, base));
  355. }
  356. inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
  357. {
  358. store32(payload, payloadFor(index, callFrameRegister));
  359. if (!indexIsInt32)
  360. store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
  361. }
  362. inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32)
  363. {
  364. store32(payload, payloadFor(index, callFrameRegister));
  365. if (!indexIsInt32)
  366. store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
  367. }
  368. inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
  369. {
  370. store32(payload, payloadFor(index, callFrameRegister));
  371. if (!indexIsCell)
  372. store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister));
  373. }
  374. inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool)
  375. {
  376. if (!indexIsBool)
  377. store32(Imm32(0), payloadFor(index, callFrameRegister));
  378. store32(tag, tagFor(index, callFrameRegister));
  379. }
  380. inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
  381. {
  382. storeDouble(value, addressFor(index));
  383. }
  384. inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
  385. {
  386. store32(Imm32(constant.payload()), payloadFor(index, base));
  387. store32(Imm32(constant.tag()), tagFor(index, base));
  388. }
  389. ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
  390. {
  391. emitStore(dst, jsUndefined());
  392. }
  393. inline bool JIT::isLabeled(unsigned bytecodeIndex)
  394. {
  395. for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
  396. unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
  397. if (jumpTarget == bytecodeIndex)
  398. return true;
  399. if (jumpTarget > bytecodeIndex)
  400. return false;
  401. }
  402. return false;
  403. }
  404. inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
  405. {
  406. if (isLabeled(bytecodeIndex))
  407. return;
  408. m_mappedBytecodeIndex = bytecodeIndex;
  409. m_mappedVirtualRegisterIndex = virtualRegisterIndex;
  410. m_mappedTag = tag;
  411. m_mappedPayload = payload;
  412. }
  413. inline void JIT::unmap(RegisterID registerID)
  414. {
  415. if (m_mappedTag == registerID)
  416. m_mappedTag = (RegisterID)-1;
  417. else if (m_mappedPayload == registerID)
  418. m_mappedPayload = (RegisterID)-1;
  419. }
  420. inline void JIT::unmap()
  421. {
  422. m_mappedBytecodeIndex = (unsigned)-1;
  423. m_mappedVirtualRegisterIndex = (unsigned)-1;
  424. m_mappedTag = (RegisterID)-1;
  425. m_mappedPayload = (RegisterID)-1;
  426. }
  427. inline bool JIT::isMapped(unsigned virtualRegisterIndex)
  428. {
  429. if (m_mappedBytecodeIndex != m_bytecodeIndex)
  430. return false;
  431. if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
  432. return false;
  433. return true;
  434. }
  435. inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
  436. {
  437. if (m_mappedBytecodeIndex != m_bytecodeIndex)
  438. return false;
  439. if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
  440. return false;
  441. if (m_mappedPayload == (RegisterID)-1)
  442. return false;
  443. payload = m_mappedPayload;
  444. return true;
  445. }
  446. inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
  447. {
  448. if (m_mappedBytecodeIndex != m_bytecodeIndex)
  449. return false;
  450. if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
  451. return false;
  452. if (m_mappedTag == (RegisterID)-1)
  453. return false;
  454. tag = m_mappedTag;
  455. return true;
  456. }
  457. inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
  458. {
  459. if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
  460. addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag)));
  461. }
  462. inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
  463. {
  464. if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
  465. addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
  466. }
  467. inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
  468. {
  469. if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
  470. linkSlowCase(iter);
  471. }
  472. ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
  473. {
  474. return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
  475. }
  476. ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
  477. {
  478. if (isOperandConstantImmediateInt(op1)) {
  479. constant = getConstantOperand(op1).asInt32();
  480. op = op2;
  481. return true;
  482. }
  483. if (isOperandConstantImmediateInt(op2)) {
  484. constant = getConstantOperand(op2).asInt32();
  485. op = op1;
  486. return true;
  487. }
  488. return false;
  489. }
  490. /* Deprecated: Please use JITStubCall instead. */
  491. ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber)
  492. {
  493. unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
  494. poke(payload, argumentStackOffset);
  495. poke(tag, argumentStackOffset + 1);
  496. }
  497. /* Deprecated: Please use JITStubCall instead. */
  498. ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
  499. {
  500. unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
  501. if (m_codeBlock->isConstantRegisterIndex(src)) {
  502. JSValue constant = m_codeBlock->getConstant(src);
  503. poke(Imm32(constant.payload()), argumentStackOffset);
  504. poke(Imm32(constant.tag()), argumentStackOffset + 1);
  505. } else {
  506. emitLoad(src, scratch1, scratch2);
  507. poke(scratch2, argumentStackOffset);
  508. poke(scratch1, argumentStackOffset + 1);
  509. }
  510. }
  511. #else // USE(JSVALUE32_64)
  512. ALWAYS_INLINE void JIT::killLastResultRegister()
  513. {
  514. m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
  515. }
  516. // get arg puts an arg from the SF register array into a h/w register
  517. ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
  518. {
  519. ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
  520. // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
  521. if (m_codeBlock->isConstantRegisterIndex(src)) {
  522. JSValue value = m_codeBlock->getConstant(src);
  523. move(ImmPtr(JSValue::encode(value)), dst);
  524. killLastResultRegister();
  525. return;
  526. }
  527. if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
  528. bool atJumpTarget = false;
  529. while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
  530. if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
  531. atJumpTarget = true;
  532. ++m_jumpTargetsPosition;
  533. }
  534. if (!atJumpTarget) {
  535. // The argument we want is already stored in eax
  536. if (dst != cachedResultRegister)
  537. move(cachedResultRegister, dst);
  538. killLastResultRegister();
  539. return;
  540. }
  541. }
  542. loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
  543. killLastResultRegister();
  544. }
  545. ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
  546. {
  547. if (src2 == m_lastResultBytecodeRegister) {
  548. emitGetVirtualRegister(src2, dst2);
  549. emitGetVirtualRegister(src1, dst1);
  550. } else {
  551. emitGetVirtualRegister(src1, dst1);
  552. emitGetVirtualRegister(src2, dst2);
  553. }
  554. }
  555. ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
  556. {
  557. return getConstantOperand(src).asInt32();
  558. }
  559. ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
  560. {
  561. return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
  562. }
  563. ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
  564. {
  565. storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
  566. m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
  567. }
  568. ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
  569. {
  570. storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
  571. }
  572. ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
  573. {
  574. #if USE(JSVALUE64)
  575. return branchTestPtr(Zero, reg, tagMaskRegister);
  576. #else
  577. return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
  578. #endif
  579. }
  580. ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
  581. {
  582. move(reg1, scratch);
  583. orPtr(reg2, scratch);
  584. return emitJumpIfJSCell(scratch);
  585. }
  586. ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
  587. {
  588. addSlowCase(emitJumpIfJSCell(reg));
  589. }
  590. ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
  591. {
  592. #if USE(JSVALUE64)
  593. return branchTestPtr(NonZero, reg, tagMaskRegister);
  594. #else
  595. return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
  596. #endif
  597. }
  598. ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
  599. {
  600. addSlowCase(emitJumpIfNotJSCell(reg));
  601. }
  602. ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
  603. {
  604. if (!m_codeBlock->isKnownNotImmediate(vReg))
  605. emitJumpSlowCaseIfNotJSCell(reg);
  606. }
  607. #if USE(JSVALUE64)
  608. ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
  609. {
  610. return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
  611. }
  612. ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
  613. {
  614. return branchTestPtr(Zero, reg, tagTypeNumberRegister);
  615. }
  616. inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
  617. {
  618. if (m_codeBlock->isConstantRegisterIndex(index)) {
  619. Register& inConstantPool = m_codeBlock->constantRegister(index);
  620. loadDouble(&inConstantPool, value);
  621. } else
  622. loadDouble(addressFor(index), value);
  623. }
  624. inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
  625. {
  626. if (m_codeBlock->isConstantRegisterIndex(index)) {
  627. Register& inConstantPool = m_codeBlock->constantRegister(index);
  628. convertInt32ToDouble(AbsoluteAddress(&inConstantPool), value);
  629. } else
  630. convertInt32ToDouble(addressFor(index), value);
  631. }
  632. #endif
  633. ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
  634. {
  635. #if USE(JSVALUE64)
  636. return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
  637. #else
  638. return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
  639. #endif
  640. }
  641. ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
  642. {
  643. #if USE(JSVALUE64)
  644. return branchPtr(Below, reg, tagTypeNumberRegister);
  645. #else
  646. return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
  647. #endif
  648. }
  649. ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
  650. {
  651. move(reg1, scratch);
  652. andPtr(reg2, scratch);
  653. return emitJumpIfNotImmediateInteger(scratch);
  654. }
  655. ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
  656. {
  657. addSlowCase(emitJumpIfNotImmediateInteger(reg));
  658. }
  659. ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
  660. {
  661. addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
  662. }
  663. ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
  664. {
  665. addSlowCase(emitJumpIfNotImmediateNumber(reg));
  666. }
  667. #if !USE(JSVALUE64)
  668. ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
  669. {
  670. subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
  671. }
  672. ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
  673. {
  674. return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
  675. }
  676. #endif
  677. ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
  678. {
  679. #if USE(JSVALUE64)
  680. emitFastArithIntToImmNoCheck(src, dest);
  681. #else
  682. if (src != dest)
  683. move(src, dest);
  684. addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
  685. #endif
  686. }
  687. ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
  688. {
  689. #if USE(JSVALUE64)
  690. UNUSED_PARAM(reg);
  691. #else
  692. rshift32(Imm32(JSImmediate::IntegerPayloadShift), reg);
  693. #endif
  694. }
  695. // operand is int32_t, must have been zero-extended if register is 64-bit.
  696. ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
  697. {
  698. #if USE(JSVALUE64)
  699. if (src != dest)
  700. move(src, dest);
  701. orPtr(tagTypeNumberRegister, dest);
  702. #else
  703. signExtend32ToPtr(src, dest);
  704. addPtr(dest, dest);
  705. emitFastArithReTagImmediate(dest, dest);
  706. #endif
  707. }
  708. ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
  709. {
  710. lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
  711. or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
  712. }
  713. /* Deprecated: Please use JITStubCall instead. */
  714. // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
  715. ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
  716. {
  717. unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
  718. if (m_codeBlock->isConstantRegisterIndex(src)) {
  719. JSValue value = m_codeBlock->getConstant(src);
  720. poke(ImmPtr(JSValue::encode(value)), argumentStackOffset);
  721. } else {
  722. loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
  723. poke(scratch, argumentStackOffset);
  724. }
  725. killLastResultRegister();
  726. }
  727. #endif // USE(JSVALUE32_64)
  728. } // namespace JSC
  729. #endif // ENABLE(JIT)
  730. #endif