PageRenderTime 57ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/js/src/nanojit/NativeSparc.cpp

http://github.com/zpao/v8monkey
C++ | 1645 lines | 1366 code | 173 blank | 106 comment | 345 complexity | 09d312df707a0a2eda395986d9e7f3c1 MD5 | raw file
Possible License(s): MPL-2.0-no-copyleft-exception, LGPL-3.0, AGPL-1.0, LGPL-2.1, BSD-3-Clause, GPL-2.0, JSON, Apache-2.0, 0BSD
  1. /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
  2. /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
  3. /* ***** BEGIN LICENSE BLOCK *****
  4. * Version: MPL 1.1/GPL 2.0/LGPL 2.1
  5. *
  6. * The contents of this file are subject to the Mozilla Public License Version
  7. * 1.1 (the "License"); you may not use this file except in compliance with
  8. * the License. You may obtain a copy of the License at
  9. * http://www.mozilla.org/MPL/
  10. *
  11. * Software distributed under the License is distributed on an "AS IS" basis,
  12. * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
  13. * for the specific language governing rights and limitations under the
  14. * License.
  15. *
  16. * The Original Code is [Open Source Virtual Machine].
  17. *
  18. * The Initial Developer of the Original Code is
  19. * Adobe System Incorporated.
  20. * Portions created by the Initial Developer are Copyright (C) 2004-2007
  21. * the Initial Developer. All Rights Reserved.
  22. *
  23. * Contributor(s):
  24. * Adobe AS3 Team
  25. * leon.sha@oracle.com
  26. * ginn.chen@oracle.com
  27. *
  28. * Alternatively, the contents of this file may be used under the terms of
  29. * either the GNU General Public License Version 2 or later (the "GPL"), or
  30. * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  31. * in which case the provisions of the GPL or the LGPL are applicable instead
  32. * of those above. If you wish to allow use of your version of this file only
  33. * under the terms of either the GPL or the LGPL, and not to allow others to
  34. * use your version of this file under the terms of the MPL, indicate your
  35. * decision by deleting the provisions above and replace them with the notice
  36. * and other provisions required by the GPL or the LGPL. If you do not delete
  37. * the provisions above, a recipient may use your version of this file under
  38. * the terms of any one of the MPL, the GPL or the LGPL.
  39. *
  40. * ***** END LICENSE BLOCK ***** */
  41. #include <sys/types.h>
  42. #include <sys/mman.h>
  43. #include <errno.h>
  44. #include "nanojit.h"
  45. namespace nanojit
  46. {
  47. #ifdef FEATURE_NANOJIT
  48. #ifdef NJ_VERBOSE
  49. const char *regNames[] = {
  50. "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7",
  51. "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7",
  52. "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7",
  53. "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7",
  54. "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
  55. "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
  56. "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
  57. "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31"
  58. };
  59. #endif
  60. const Register Assembler::argRegs[] = { I0, I1, I2, I3, I4, I5 };
  61. const Register Assembler::retRegs[] = { O0 };
  62. const Register Assembler::savedRegs[] = { L1 }; // Dummy element not used, as NumSavedRegs == 0
  63. static const int kLinkageAreaSize = 68;
  64. static const int kcalleeAreaSize = 80; // The max size.
  65. #define BIT_ROUND_UP(v,q) ( (((uintptr_t)v)+(q)-1) & ~((q)-1) )
  66. #define TODO(x) do{ verbose_only(outputf(#x);) NanoAssertMsgf(false, "%s", #x); } while(0)
  67. inline void Assembler::CALL(const CallInfo* ci) {
  68. int32_t offset = (ci->_address) - ((int32_t)_nIns) + 4;
  69. int32_t i = 0x40000000 | ((offset >> 2) & 0x3FFFFFFF);
  70. IMM32(i);
  71. asm_output("call %s",(ci->_name));
  72. }
  73. inline void Assembler::IntegerOperation
  74. (Register rs1, Register rs2, Register rd, int32_t op3, const char *opcode) {
  75. Format_3_1(2, rd, op3, rs1, 0, rs2);
  76. asm_output("%s %s, %s, %s", opcode, gpn(rs1), gpn(rs2), gpn(rd));
  77. }
  78. inline void Assembler::IntegerOperationI
  79. (Register rs1, int32_t simm13, Register rd, int32_t op3, const char *opcode) {
  80. Format_3_1I(2, rd, op3, rs1, simm13);
  81. asm_output("%s %s, %d, %s", opcode, gpn(rs1), simm13, gpn(rd));
  82. }
  83. inline void Assembler::ADD(Register rs1, Register rs2, Register rd) {
  84. IntegerOperation(rs1, rs2, rd, 0, "add");
  85. }
  86. inline void Assembler::ADDCC(Register rs1, Register rs2, Register rd) {
  87. IntegerOperation(rs1, rs2, rd, 0x10, "addcc");
  88. }
  89. inline void Assembler::AND(Register rs1, Register rs2, Register rd) {
  90. IntegerOperation(rs1, rs2, rd, 0x1, "and");
  91. }
  92. inline void Assembler::ANDCC(Register rs1, Register rs2, Register rd) {
  93. IntegerOperation(rs1, rs2, rd, 0x11, "andcc");
  94. }
  95. inline void Assembler::OR(Register rs1, Register rs2, Register rd) {
  96. IntegerOperation(rs1, rs2, rd, 0x2, "or");
  97. }
  98. inline void Assembler::ORI(Register rs1, int32_t simm13, Register rd) {
  99. IntegerOperationI(rs1, simm13, rd, 0x2, "or");
  100. }
  101. inline void Assembler::ORN(Register rs1, Register rs2, Register rd) {
  102. IntegerOperation(rs1, rs2, rd, 0x6, "orn");
  103. }
  104. inline void Assembler::SMULCC(Register rs1, Register rs2, Register rd) {
  105. IntegerOperation(rs1, rs2, rd, 0x1b, "smulcc");
  106. }
  107. inline void Assembler::SUB(Register rs1, Register rs2, Register rd) {
  108. IntegerOperation(rs1, rs2, rd, 0x4, "sub");
  109. }
  110. inline void Assembler::SUBCC(Register rs1, Register rs2, Register rd) {
  111. IntegerOperation(rs1, rs2, rd, 0x14, "subcc");
  112. }
  113. inline void Assembler::SUBI(Register rs1, int32_t simm13, Register rd) {
  114. IntegerOperationI(rs1, simm13, rd, 0x4, "sub");
  115. }
  116. inline void Assembler::XOR(Register rs1, Register rs2, Register rd) {
  117. IntegerOperation(rs1, rs2, rd, 0x3, "xor");
  118. }
  119. inline void Assembler::Bicc(int32_t a, int32_t dsp22, int32_t cond, const char *opcode) {
  120. Format_2_2(a, cond, 0x2, dsp22);
  121. asm_output("%s 0x%x", opcode, _nIns + dsp22 - 1);
  122. }
  123. inline void Assembler::BA (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x8, "ba"); }
  124. inline void Assembler::BE (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x1, "be"); }
  125. inline void Assembler::BNE (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x9, "bne"); }
  126. inline void Assembler::BG (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xa, "bg"); }
  127. inline void Assembler::BGU (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xc, "bgu"); }
  128. inline void Assembler::BGE (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xb, "bge"); }
  129. inline void Assembler::BL (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x3, "bl"); }
  130. inline void Assembler::BLE (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x2, "ble"); }
  131. inline void Assembler::BLEU(int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x4, "bleu"); }
  132. inline void Assembler::BCC (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xd, "bcc"); }
  133. inline void Assembler::BCS (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x5, "bcs"); }
  134. inline void Assembler::BVC (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xf, "bvc"); }
  135. inline void Assembler::BVS (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x7, "bvs"); }
  136. inline void Assembler::FABSS(Register rs2, Register rd) {
  137. Format_3_8(2, rd, 0x34, G0, 0x9, rs2);
  138. asm_output("fabs %s, %s", gpn(rs2), gpn(rd));
  139. }
  140. inline void Assembler::FADDD(Register rs1, Register rs2, Register rd) {
  141. Format_3_8(2, rd, 0x34, rs1, 0x42, rs2);
  142. asm_output("faddd %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd));
  143. }
  144. inline void Assembler::FBfcc(int32_t a, int32_t dsp22, int32_t cond, const char *opcode) {
  145. Format_2_2(a, cond, 0x6, dsp22);
  146. asm_output("%s 0x%x", opcode, _nIns + dsp22 - 1);
  147. }
  148. inline void Assembler::FBE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x9, "fbe"); }
  149. inline void Assembler::FBNE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x1, "fbne"); }
  150. inline void Assembler::FBUE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xa, "fbue"); }
  151. inline void Assembler::FBG (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x6, "fbg"); }
  152. inline void Assembler::FBUG (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x5, "fbug"); }
  153. inline void Assembler::FBGE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xb, "fbge"); }
  154. inline void Assembler::FBUGE(int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xc, "fbuge"); }
  155. inline void Assembler::FBL (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x4, "fbl"); }
  156. inline void Assembler::FBUL (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x3, "fbul"); }
  157. inline void Assembler::FBLE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xd, "fble"); }
  158. inline void Assembler::FBULE(int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xe, "fbule"); }
  159. inline void Assembler::FCMPD(Register rs1, Register rs2) {
  160. Format_3_9(2, 0, 0, 0x35, rs1, 0x52, rs2);
  161. asm_output("fcmpd %s, %s", gpn(rs1), gpn(rs2));
  162. }
  163. inline void Assembler::FloatOperation
  164. (Register rs1, Register rs2, Register rd, int32_t opf, const char *opcode) {
  165. Format_3_8(2, rd, 0x34, rs1, opf, rs2);
  166. if (rs1 != G0) {
  167. asm_output("%s %s, %s, %s", opcode, gpn(rs1), gpn(rs2), gpn(rd));
  168. } else {
  169. asm_output("%s %s, %s", opcode, gpn(rs2), gpn(rd));
  170. }
  171. }
  172. inline void Assembler::FSUBD(Register rs1, Register rs2, Register rd) {
  173. FloatOperation(rs1, rs2, rd, 0x46, "fsubd");
  174. }
  175. inline void Assembler::FMULD(Register rs1, Register rs2, Register rd) {
  176. FloatOperation(rs1, rs2, rd, 0x4a, "fsubd");
  177. }
  178. inline void Assembler::FDTOI(Register rs2, Register rd) {
  179. FloatOperation(G0, rs2, rd, 0xd2, "fdtoi");
  180. }
  181. inline void Assembler::FDIVD(Register rs1, Register rs2, Register rd) {
  182. FloatOperation(rs1, rs2, rd, 0x4e, "fdivd");
  183. }
  184. inline void Assembler::FMOVD(Register rs2, Register rd) {
  185. FloatOperation(G0, rs2, rd, 0x2, "fmovd");
  186. }
  187. inline void Assembler::FNEGD(Register rs2, Register rd) {
  188. FloatOperation(G0, rs2, rd, 0x6, "fnegd");
  189. }
  190. inline void Assembler::FITOD(Register rs2, Register rd) {
  191. FloatOperation(G0, rs2, rd, 0xc8, "fitod");
  192. }
  193. inline void Assembler::FDTOS(Register rs2, Register rd) {
  194. FloatOperation(G0, rs2, rd, 0xc6, "fdtos");
  195. }
  196. inline void Assembler::FSTOD(Register rs2, Register rd) {
  197. FloatOperation(G0, rs2, rd, 0xc9, "fstod");
  198. }
  199. inline void Assembler::JMPL(Register rs1, Register rs2, Register rd) {
  200. Format_3_1(2, rd, 0x38, rs1, 0, rs2);
  201. asm_output("jmpl [%s + %s]", gpn(rs1), gpn(rs2));
  202. }
  203. inline void Assembler::JMPLI(Register rs1, int32_t simm13, Register rd) {
  204. Format_3_1I(2, rd, 0x38, rs1, simm13);
  205. asm_output("jmpl [%s + 0x%x]", gpn(rs1), simm13);
  206. }
  207. inline void Assembler::LoadOperation
  208. (Register rs1, Register rs2, Register rd, int32_t op3, const char* opcode) {
  209. Format_3_1(3, rd, op3, rs1, 0, rs2);
  210. asm_output("%s [%s + %s], %s", opcode, gpn(rs1), gpn(rs2), gpn(rd));
  211. }
  212. inline void Assembler::LoadOperationI
  213. (Register rs1, int32_t simm13, Register rd, int32_t op3, const char* opcode) {
  214. Format_3_1I(3, rd, op3, rs1, simm13);
  215. asm_output("%s [%s + 0x%x], %s", opcode, gpn(rs1), simm13, gpn(rd));
  216. }
  217. inline void Assembler::LDF(Register rs1, Register rs2, Register rd) {
  218. LoadOperation(rs1, rs2, rd, 0x20, "ldf");
  219. }
  220. inline void Assembler::LDFI(Register rs1, int32_t simm13, Register rd) {
  221. LoadOperationI(rs1, simm13, rd, 0x20, "ldf");
  222. }
  223. inline void Assembler::LDF32(Register rs1, int32_t immI, Register rd) {
  224. if (isIMM13(immI)) {
  225. LDFI(rs1, immI, rd);
  226. } else {
  227. LDF(rs1, L0, rd);
  228. SET32(immI, L0);
  229. }
  230. }
  231. inline void Assembler::LDDF32(Register rs1, int32_t immI, Register rd) {
  232. if (isIMM13(immI+4)) {
  233. LDFI(rs1, immI+4, rd + 1);
  234. LDFI(rs1, immI, rd);
  235. } else {
  236. LDF(rs1, L0, rd + 1);
  237. SET32(immI+4, L0);
  238. LDF(rs1, L0, rd);
  239. SET32(immI, L0);
  240. }
  241. }
  242. inline void Assembler::LDUB(Register rs1, Register rs2, Register rd) {
  243. LoadOperation(rs1, rs2, rd, 0x1, "ldub");
  244. }
  245. inline void Assembler::LDUBI(Register rs1, int32_t simm13, Register rd) {
  246. LoadOperationI(rs1, simm13, rd, 0x1, "ldub");
  247. }
  248. inline void Assembler::LDUB32(Register rs1, int32_t immI, Register rd) {
  249. if (isIMM13(immI)) {
  250. LDUBI(rs1, immI, rd);
  251. } else {
  252. LDUB(rs1, L0, rd);
  253. SET32(immI, L0);
  254. }
  255. }
  256. inline void Assembler::LDSB(Register rs1, Register rs2, Register rd) {
  257. LoadOperation(rs1, rs2, rd, 0x9, "ldsb");
  258. }
  259. inline void Assembler::LDSBI(Register rs1, int32_t simm13, Register rd) {
  260. LoadOperationI(rs1, simm13, rd, 0x9, "ldsb");
  261. }
  262. inline void Assembler::LDSB32(Register rs1, int32_t immI, Register rd) {
  263. if (isIMM13(immI)) {
  264. LDSBI(rs1, immI, rd);
  265. } else {
  266. LDSB(rs1, L0, rd);
  267. SET32(immI, L0);
  268. }
  269. }
  270. inline void Assembler::LDUH(Register rs1, Register rs2, Register rd) {
  271. LoadOperation(rs1, rs2, rd, 0x2, "lduh");
  272. }
  273. inline void Assembler::LDUHI(Register rs1, int32_t simm13, Register rd) {
  274. LoadOperationI(rs1, simm13, rd, 0x2, "lduh");
  275. }
  276. inline void Assembler::LDUH32(Register rs1, int32_t immI, Register rd) {
  277. if (isIMM13(immI)) {
  278. LDUHI(rs1, immI, rd);
  279. } else {
  280. LDUH(rs1, L0, rd);
  281. SET32(immI, L0);
  282. }
  283. }
  284. inline void Assembler::LDSH(Register rs1, Register rs2, Register rd) {
  285. LoadOperation(rs1, rs2, rd, 0xa, "ldsh");
  286. }
  287. inline void Assembler::LDSHI(Register rs1, int32_t simm13, Register rd) {
  288. LoadOperationI(rs1, simm13, rd, 0xa, "ldsh");
  289. }
  290. inline void Assembler::LDSH32(Register rs1, int32_t immI, Register rd) {
  291. if (isIMM13(immI)) {
  292. LDSHI(rs1, immI, rd);
  293. } else {
  294. LDSH(rs1, L0, rd);
  295. SET32(immI, L0);
  296. }
  297. }
  298. inline void Assembler::LDSW(Register rs1, Register rs2, Register rd) {
  299. LoadOperation(rs1, rs2, rd, 0x8, "ldsw");
  300. }
  301. inline void Assembler::LDSWI(Register rs1, int32_t simm13, Register rd) {
  302. LoadOperationI(rs1, simm13, rd, 0x8, "ldsw");
  303. }
  304. inline void Assembler::LDSW32(Register rs1, int32_t immI, Register rd) {
  305. if (isIMM13(immI)) {
  306. LDSWI(rs1, immI, rd);
  307. } else {
  308. LDSW(rs1, L0, rd);
  309. SET32(immI, L0);
  310. }
  311. }
  312. inline void Assembler::MOVcc
  313. (Register rs, int32_t cc2, int32_t cc1, int32_t cc0, Register rd, int32_t cond, const char *opcode) {
  314. Format_4_2(rd, 0x2c, cc2, cond, cc1, cc0, rs);
  315. asm_output("%s %s, %s", opcode, gpn(rs), gpn(rd));
  316. }
  317. inline void Assembler::MOVccI
  318. (int32_t simm11, int32_t cc2, int32_t cc1, int32_t cc0, Register rd, int32_t cond, const char *opcode) {
  319. Format_4_2I(rd, 0x2c, cc2, cond, cc1, cc0, simm11);
  320. asm_output("%s 0x%x, %s", opcode, simm11, gpn(rd));
  321. }
  322. inline void Assembler::MOVE (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x1, "move"); }
  323. inline void Assembler::MOVNE (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x9, "movne"); }
  324. inline void Assembler::MOVL (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x3, "movl"); }
  325. inline void Assembler::MOVLE (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x2, "movle"); }
  326. inline void Assembler::MOVG (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xa, "movg"); }
  327. inline void Assembler::MOVGE (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xb, "movge"); }
  328. inline void Assembler::MOVLEU(Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x4, "movleu"); }
  329. inline void Assembler::MOVGU (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xc, "movgu"); }
  330. inline void Assembler::MOVCC (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xd, "movcc"); }
  331. inline void Assembler::MOVCS (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x5, "movcs"); }
  332. inline void Assembler::MOVVC (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xf, "movvc"); }
  333. inline void Assembler::MOVEI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x1, "move"); }
  334. inline void Assembler::MOVNEI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x9, "movne"); }
  335. inline void Assembler::MOVLI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x3, "movl"); }
  336. inline void Assembler::MOVLEI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x2, "movle"); }
  337. inline void Assembler::MOVGI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0xa, "movg"); }
  338. inline void Assembler::MOVGEI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0xb, "movge"); }
  339. inline void Assembler::MOVLEUI(int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x4, "movleu"); }
  340. inline void Assembler::MOVGUI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0xc, "movgu"); }
  341. inline void Assembler::MOVCCI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0xd, "movcc"); }
  342. inline void Assembler::MOVCSI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x5, "movcs"); }
  343. inline void Assembler::MOVVSI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x7, "movvs"); }
  344. inline void Assembler::MOVFEI (int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0x9, "movfe"); }
  345. inline void Assembler::MOVFLI (int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0x4, "movfl"); }
  346. inline void Assembler::MOVFLEI(int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0xd, "movfle"); }
  347. inline void Assembler::MOVFGI (int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0x6, "movfg"); }
  348. inline void Assembler::MOVFGEI(int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0xb, "movfge"); }
  349. inline void Assembler::FMOVDcc(Register rs, int32_t opt_cc, Register rd, int32_t cond, const char *opcode) {
  350. Format_4_5(rd, 0x35, cond, opt_cc, 0x2, rs);
  351. asm_output("%s %s, %s", opcode, gpn(rs), gpn(rd));
  352. }
  353. inline void Assembler::FMOVDNE (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x9, "fmovdne"); }
  354. inline void Assembler::FMOVDL (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x3, "fmovdl"); }
  355. inline void Assembler::FMOVDLE (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x2, "fmovdle"); }
  356. inline void Assembler::FMOVDLEU (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x4, "fmovdleu");}
  357. inline void Assembler::FMOVDG (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0xa, "fmovdg"); }
  358. inline void Assembler::FMOVDGU (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0xc, "fmovdgu"); }
  359. inline void Assembler::FMOVDGE (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0xb, "fmovdfge");}
  360. inline void Assembler::FMOVDCC (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0xd, "fmovdcc"); }
  361. inline void Assembler::FMOVDCS (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x5, "fmovdcs"); }
  362. inline void Assembler::FMOVDFNE (Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0x1, "fmovdfne"); }
  363. inline void Assembler::FMOVDFUG (Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0x5, "fmovdfug"); }
  364. inline void Assembler::FMOVDFUGE(Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0xc, "fmovdfuge");}
  365. inline void Assembler::FMOVDFUL (Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0x3, "fmovdful"); }
  366. inline void Assembler::FMOVDFULE(Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0xe, "fmovdfule");}
  367. inline void Assembler::NOP() {
  368. Format_2(0, 0x4, 0);
  369. asm_output("nop");
  370. }
  371. inline void Assembler::RDY(Register rd) {
  372. Format_3_1(2, rd, 0x28, G0, 0, G0);
  373. asm_output("rdy %s", gpn(rd));
  374. }
  375. inline void Assembler::RESTORE(Register rs1, Register rs2, Register rd) {
  376. Format_3_1(2, rd, 0x3d, rs1, 0, rs2);
  377. asm_output("restore");
  378. }
  379. inline void Assembler::SAVE(Register rs1, Register rs2, Register rd) {
  380. IntegerOperation(rs1, rs2, rd, 0x3c, "save");
  381. }
  382. inline void Assembler::SAVEI(Register rs1, int32_t simm13, Register rd) {
  383. IntegerOperationI(rs1, simm13, rd, 0x3c, "save");
  384. }
  385. inline void Assembler::SETHI(int32_t immI, Register rd) {
  386. Format_2A(rd, 0x4, immI >> 10);
  387. asm_output("sethi 0x%x, %s ! 0x%x", immI >> 10, gpn(rd), immI);
  388. }
  389. inline void Assembler::SET32(int32_t immI, Register rd) {
  390. if (isIMM13(immI)) {
  391. ORI(G0, immI, rd);
  392. } else {
  393. ORI(rd, immI & 0x3FF, rd);
  394. SETHI(immI, rd);
  395. }
  396. }
  397. inline void Assembler::ShiftOperation
  398. (Register rs1, Register rs2, Register rd, int32_t op3, const char* opcode) {
  399. Format_3_5(2, rd, op3, rs1, 0, rs2);
  400. asm_output("%s %s, %s, %s", opcode, gpn(rs1), gpn(rs2), gpn(rd));
  401. }
  402. inline void Assembler::ShiftOperationI
  403. (Register rs1, int32_t shcnt32, Register rd, int32_t op3, const char* opcode) {
  404. Format_3_6(2, rd, op3, rs1, shcnt32);
  405. asm_output("%s %s, %d, %s", opcode, gpn(rs1), shcnt32, gpn(rd));
  406. }
  407. inline void Assembler::SLL(Register rs1, Register rs2, Register rd) {
  408. ShiftOperation(rs1, rs2, rd, 0x25, "sll");
  409. }
  410. inline void Assembler::SRA(Register rs1, Register rs2, Register rd) {
  411. ShiftOperation(rs1, rs2, rd, 0x27, "sra");
  412. }
  413. inline void Assembler::SRAI(Register rs1, int32_t shcnt32, Register rd) {
  414. ShiftOperationI(rs1, shcnt32, rd, 0x27, "sra");
  415. }
  416. inline void Assembler::SRL(Register rs1, Register rs2, Register rd) {
  417. ShiftOperation(rs1, rs2, rd, 0x26, "srl");
  418. }
  419. inline void Assembler::Store
  420. (Register rd, Register rs1, Register rs2, int32_t op3, const char* opcode) {
  421. Format_3_1(3, rd, op3, rs1, 0, rs2);
  422. asm_output("%s %s, [%s + %s]", opcode, gpn(rd), gpn(rs1), gpn(rs2));
  423. }
  424. inline void Assembler::StoreI
  425. (Register rd, int32_t simm13, Register rs1, int32_t op3, const char* opcode) {
  426. Format_3_1I(3, rd, op3, rs1, simm13);
  427. asm_output("%s %s, [%s + 0x%x]", opcode, gpn(rd), gpn(rs1), simm13);
  428. }
  429. inline void Assembler::STF(Register rd, Register rs1, Register rs2) {
  430. Store(rd, rs1, rs2, 0x24, "stf");
  431. }
  432. inline void Assembler::STFI(Register rd, int32_t simm13, Register rs1) {
  433. StoreI(rd, simm13, rs1, 0x24, "stf");
  434. }
  435. inline void Assembler::STF32(Register rd, int32_t immI, Register rs1) {
  436. if (isIMM13(immI)) {
  437. STFI(rd, immI, rs1);
  438. } else {
  439. STF(rd, L0, rs1);
  440. SET32(immI, L0);
  441. }
  442. }
  443. inline void Assembler::STDF32(Register rd, int32_t immI, Register rs1) {
  444. if (isIMM13(immI+4)) {
  445. STFI(rd + 1, immI+4, rs1);
  446. STFI(rd, immI, rs1);
  447. } else {
  448. STF(rd + 1, L0, rs1);
  449. SET32(immI+4, L0);
  450. STF(rd, L0, rs1);
  451. SET32(immI, L0);
  452. }
  453. }
  454. inline void Assembler::STW(Register rd, Register rs1, Register rs2) {
  455. Store(rd, rs1, rs2, 0x4, "st");
  456. }
  457. inline void Assembler::STWI(Register rd, int32_t simm13, Register rs1) {
  458. StoreI(rd, simm13, rs1, 0x4, "st");
  459. }
  460. inline void Assembler::STW32(Register rd, int32_t immI, Register rs1) {
  461. if (isIMM13(immI)) {
  462. STWI(rd, immI, rs1);
  463. } else {
  464. STW(rd, L0, rs1);
  465. SET32(immI, L0);
  466. }
  467. }
  468. inline void Assembler::STH(Register rd, Register rs1, Register rs2) {
  469. Store(rd, rs1, rs2, 0x6, "sth");
  470. }
  471. inline void Assembler::STHI(Register rd, int32_t simm13, Register rs1) {
  472. StoreI(rd, simm13, rs1, 0x6, "sth");
  473. }
  474. inline void Assembler::STH32(Register rd, int32_t immI, Register rs1) {
  475. if (isIMM13(immI)) {
  476. STHI(rd, immI, rs1);
  477. } else {
  478. STH(rd, L0, rs1);
  479. SET32(immI, L0);
  480. }
  481. }
  482. inline void Assembler::STB(Register rd, Register rs1, Register rs2) {
  483. Store(rd, rs1, rs2, 0x5, "stb");
  484. }
  485. inline void Assembler::STBI(Register rd, int32_t simm13, Register rs1) {
  486. StoreI(rd, simm13, rs1, 0x5, "stb");
  487. }
  488. inline void Assembler::STB32(Register rd, int32_t immI, Register rs1) {
  489. if (isIMM13(immI)) {
  490. STBI(rd, immI, rs1);
  491. } else {
  492. STB(rd, L0, rs1);
  493. SET32(immI, L0);
  494. }
  495. }
  496. // general Assemble
  497. inline void Assembler::JMP_long_nocheck(int32_t t) {
  498. NOP();
  499. JMPL(G0, G2, G0);
  500. ORI(G2, t & 0x3FF, G2);
  501. SETHI(t, G2);
  502. }
  503. inline void Assembler::JMP_long(int32_t t) {
  504. underrunProtect(16);
  505. JMP_long_nocheck(t);
  506. }
  507. inline void Assembler::JMP_long_placeholder() {
  508. JMP_long(0);
  509. }
  510. inline int32_t Assembler::JCC(void *t) {
  511. underrunProtect(32);
  512. int32_t tt = ((intptr_t)t - (intptr_t)_nIns + 8) >> 2;
  513. if( !(isIMM22(tt)) ) {
  514. NOP();
  515. JMPL(G0, G2, G0);
  516. SET32((intptr_t)t, G2);
  517. NOP();
  518. BA(0, 5);
  519. tt = 4;
  520. }
  521. NOP();
  522. return tt;
  523. }
  524. void Assembler::JMP(void *t) {
  525. if (!t) {
  526. JMP_long_placeholder();
  527. } else {
  528. int32_t tt = JCC(t);
  529. BA(0, tt);
  530. }
  531. }
  532. void Assembler::MR(Register rd, Register rs) {
  533. underrunProtect(4);
  534. ORI(rs, 0, rd);
  535. }
  536. void Assembler::nInit() {
  537. }
  538. void Assembler::nBeginAssembly() {
  539. }
  540. NIns* Assembler::genPrologue()
  541. {
  542. /**
  543. * Prologue
  544. */
  545. underrunProtect(16);
  546. uint32_t stackNeeded = STACK_GRANULARITY * _activation.stackSlotsNeeded();
  547. uint32_t frameSize = stackNeeded + kcalleeAreaSize + kLinkageAreaSize;
  548. frameSize = BIT_ROUND_UP(frameSize, 8);
  549. if (frameSize <= 4096)
  550. SUBI(FP, frameSize, SP);
  551. else {
  552. SUB(FP, G1, SP);
  553. ORI(G1, frameSize & 0x3FF, G1);
  554. SETHI(frameSize, G1);
  555. }
  556. verbose_only(
  557. if (_logc->lcbits & LC_Native) {
  558. outputf(" 0x%x:",_nIns);
  559. outputf(" patch entry:");
  560. })
  561. NIns *patchEntry = _nIns;
  562. // The frame size in SAVE is faked. We will still re-caculate SP later.
  563. // We can use 0 here but it is not good for debuggers.
  564. SAVEI(SP, -148, SP);
  565. // align the entry point
  566. asm_align_code();
  567. return patchEntry;
  568. }
  569. void Assembler::asm_align_code() {
  570. while(uintptr_t(_nIns) & 15) {
  571. NOP();
  572. }
  573. }
  574. void Assembler::nFragExit(LIns* guard)
  575. {
  576. SideExit* exit = guard->record()->exit;
  577. Fragment *frag = exit->target;
  578. GuardRecord *lr;
  579. if (frag && frag->fragEntry)
  580. {
  581. JMP(frag->fragEntry);
  582. lr = 0;
  583. }
  584. else
  585. {
  586. // Target doesn't exit yet. Emit jump to epilog, and set up to patch later.
  587. if (!_epilogue)
  588. _epilogue = genEpilogue();
  589. lr = guard->record();
  590. JMP_long((intptr_t)_epilogue);
  591. lr->jmp = _nIns;
  592. }
  593. // return value is GuardRecord*
  594. SET32(int(lr), O0);
  595. }
  596. NIns *Assembler::genEpilogue()
  597. {
  598. underrunProtect(12);
  599. RESTORE(G0, G0, G0); //restore
  600. JMPLI(I7, 8, G0); //ret
  601. ORI(O0, 0, I0);
  602. return _nIns;
  603. }
  604. void Assembler::asm_call(LIns* ins)
  605. {
  606. if (!ins->isop(LIR_callv)) {
  607. Register retReg = ( ins->isop(LIR_calld) ? F0 : retRegs[0] );
  608. deprecated_prepResultReg(ins, rmask(retReg));
  609. }
  610. // Do this after we've handled the call result, so we don't
  611. // force the call result to be spilled unnecessarily.
  612. evictScratchRegsExcept(0);
  613. const CallInfo* ci = ins->callInfo();
  614. underrunProtect(8);
  615. NOP();
  616. ArgType argTypes[MAXARGS];
  617. uint32_t argc = ci->getArgTypes(argTypes);
  618. NanoAssert(ins->isop(LIR_callv) || ins->isop(LIR_callp) ||
  619. ins->isop(LIR_calld));
  620. verbose_only(if (_logc->lcbits & LC_Native)
  621. outputf(" 0x%x:", _nIns);
  622. )
  623. bool indirect = ci->isIndirect();
  624. if (!indirect) {
  625. CALL(ci);
  626. }
  627. else {
  628. argc--;
  629. Register r = findSpecificRegFor(ins->arg(argc), I0);
  630. JMPL(G0, I0, O7);
  631. }
  632. Register GPRIndex = O0;
  633. uint32_t offset = kLinkageAreaSize; // start of parameters stack postion.
  634. for(int i=0; i<argc; i++)
  635. {
  636. uint32_t j = argc-i-1;
  637. ArgType ty = argTypes[j];
  638. if (ty == ARGTYPE_D) {
  639. Register r = findRegFor(ins->arg(j), FpRegs);
  640. underrunProtect(48);
  641. // We might be calling a varargs function.
  642. // So, make sure the GPR's are also loaded with
  643. // the value, or the stack contains it.
  644. if (GPRIndex <= O5) {
  645. LDSW32(SP, offset, GPRIndex);
  646. }
  647. GPRIndex = GPRIndex + 1;
  648. if (GPRIndex <= O5) {
  649. LDSW32(SP, offset+4, GPRIndex);
  650. }
  651. GPRIndex = GPRIndex + 1;
  652. STDF32(r, offset, SP);
  653. offset += 8;
  654. } else {
  655. if (GPRIndex > O5) {
  656. underrunProtect(12);
  657. Register r = findRegFor(ins->arg(j), GpRegs);
  658. STW32(r, offset, SP);
  659. } else {
  660. Register r = findSpecificRegFor(ins->arg(j), GPRIndex);
  661. }
  662. GPRIndex = GPRIndex + 1;
  663. offset += 4;
  664. }
  665. }
  666. }
  667. Register Assembler::nRegisterAllocFromSet(RegisterMask set)
  668. {
  669. // need to implement faster way
  670. Register i = G0;
  671. while (!(set & rmask(i)))
  672. i = i + 1;
  673. _allocator.free &= ~rmask(i);
  674. return i;
  675. }
  676. void Assembler::nRegisterResetAll(RegAlloc& a)
  677. {
  678. a.clear();
  679. a.free = GpRegs | FpRegs;
  680. }
  681. void Assembler::nPatchBranch(NIns* branch, NIns* location)
  682. {
  683. *(uint32_t*)&branch[0] &= 0xFFC00000;
  684. *(uint32_t*)&branch[0] |= ((intptr_t)location >> 10) & 0x3FFFFF;
  685. *(uint32_t*)&branch[1] &= 0xFFFFFC00;
  686. *(uint32_t*)&branch[1] |= (intptr_t)location & 0x3FF;
  687. }
  688. RegisterMask Assembler::nHint(LIns* ins)
  689. {
  690. // Never called, because no entries in nHints[] == PREFER_SPECIAL.
  691. NanoAssert(0);
  692. return 0;
  693. }
  694. bool Assembler::canRemat(LIns* ins)
  695. {
  696. return ins->isImmI() || ins->isop(LIR_allocp);
  697. }
  698. void Assembler::asm_restore(LIns* i, Register r)
  699. {
  700. underrunProtect(24);
  701. if (i->isop(LIR_allocp)) {
  702. ADD(FP, L2, r);
  703. int32_t d = deprecated_disp(i);
  704. SET32(d, L2);
  705. }
  706. else if (i->isImmI()) {
  707. int v = i->immI();
  708. SET32(v, r);
  709. } else {
  710. int d = findMemFor(i);
  711. if (rmask(r) & FpRegs) {
  712. LDDF32(FP, d, r);
  713. } else {
  714. LDSW32(FP, d, r);
  715. }
  716. }
  717. }
  718. void Assembler::asm_store32(LOpcode op, LIns *value, int dr, LIns *base)
  719. {
  720. switch (op) {
  721. case LIR_sti:
  722. case LIR_sti2c:
  723. case LIR_sti2s:
  724. // handled by mainline code below for now
  725. break;
  726. default:
  727. NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
  728. return;
  729. }
  730. underrunProtect(20);
  731. if (value->isImmI())
  732. {
  733. Register rb = getBaseReg(base, dr, GpRegs);
  734. int c = value->immI();
  735. switch (op) {
  736. case LIR_sti:
  737. STW32(L2, dr, rb);
  738. break;
  739. case LIR_sti2c:
  740. STB32(L2, dr, rb);
  741. break;
  742. case LIR_sti2s:
  743. STH32(L2, dr, rb);
  744. break;
  745. }
  746. SET32(c, L2);
  747. }
  748. else
  749. {
  750. // make sure what is in a register
  751. Register ra, rb;
  752. if (base->isImmI()) {
  753. // absolute address
  754. dr += base->immI();
  755. ra = findRegFor(value, GpRegs);
  756. rb = G0;
  757. } else {
  758. getBaseReg2(GpRegs, value, ra, GpRegs, base, rb, dr);
  759. }
  760. switch (op) {
  761. case LIR_sti:
  762. STW32(ra, dr, rb);
  763. break;
  764. case LIR_sti2c:
  765. STB32(ra, dr, rb);
  766. break;
  767. case LIR_sti2s:
  768. STH32(ra, dr, rb);
  769. break;
  770. }
  771. }
  772. }
  773. void Assembler::asm_spill(Register rr, int d, bool quad)
  774. {
  775. underrunProtect(24);
  776. (void)quad;
  777. NanoAssert(d);
  778. if (rmask(rr) & FpRegs) {
  779. STDF32(rr, d, FP);
  780. } else {
  781. STW32(rr, d, FP);
  782. }
  783. }
  784. void Assembler::asm_load64(LIns* ins)
  785. {
  786. switch (ins->opcode()) {
  787. case LIR_ldd:
  788. case LIR_ldf2d:
  789. // handled by mainline code below for now
  790. break;
  791. default:
  792. NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
  793. return;
  794. }
  795. underrunProtect(48);
  796. LIns* base = ins->oprnd1();
  797. int db = ins->disp();
  798. Register rb = getBaseReg(base, db, GpRegs);
  799. if (ins->isInReg()) {
  800. Register rr = ins->getReg();
  801. asm_maybe_spill(ins, false);
  802. NanoAssert(rmask(rr) & FpRegs);
  803. if (ins->opcode() == LIR_ldd) {
  804. LDDF32(rb, db, rr);
  805. } else {
  806. FSTOD(F28, rr);
  807. LDF32(rb, db, F28);
  808. }
  809. } else {
  810. NanoAssert(ins->isInAr());
  811. int dr = arDisp(ins);
  812. if (ins->opcode() == LIR_ldd) {
  813. // don't use an fpu reg to simply load & store the value.
  814. asm_mmq(FP, dr, rb, db);
  815. } else {
  816. STDF32(F28, dr, FP);
  817. FSTOD(F28, F28);
  818. LDF32(rb, db, F28);
  819. }
  820. }
  821. freeResourcesOf(ins);
  822. }
  823. void Assembler::asm_store64(LOpcode op, LIns* value, int dr, LIns* base)
  824. {
  825. switch (op) {
  826. case LIR_std:
  827. case LIR_std2f:
  828. // handled by mainline code below for now
  829. break;
  830. default:
  831. NanoAssertMsg(0, "asm_store64 should never receive this LIR opcode");
  832. return;
  833. }
  834. underrunProtect(48);
  835. Register rb = getBaseReg(base, dr, GpRegs);
  836. if (op == LIR_std2f) {
  837. Register rv = ( !value->isInReg()
  838. ? findRegFor(value, FpRegs)
  839. : value->getReg() );
  840. NanoAssert(rmask(rv) & FpRegs);
  841. STF32(F28, dr, rb);
  842. FDTOS(rv, F28);
  843. return;
  844. }
  845. if (value->isImmD())
  846. {
  847. // if a constant 64-bit value just store it now rather than
  848. // generating a pointless store/load/store sequence
  849. STW32(L2, dr+4, rb);
  850. SET32(value->immDlo(), L2);
  851. STW32(L2, dr, rb);
  852. SET32(value->immDhi(), L2);
  853. return;
  854. }
  855. if (value->isop(LIR_ldd))
  856. {
  857. // value is 64bit struct or int64_t, or maybe a double.
  858. // it may be live in an FPU reg. Either way, don't
  859. // put it in an FPU reg just to load & store it.
  860. // a) if we know it's not a double, this is right.
  861. // b) if we guarded that its a double, this store could be on
  862. // the side exit, copying a non-double.
  863. // c) maybe its a double just being stored. oh well.
  864. int da = findMemFor(value);
  865. asm_mmq(rb, dr, FP, da);
  866. return;
  867. }
  868. // if value already in a reg, use that, otherwise
  869. // get it into FPU regs.
  870. Register rv = ( !value->isInReg()
  871. ? findRegFor(value, FpRegs)
  872. : value->getReg() );
  873. STDF32(rv, dr, rb);
  874. }
  875. /**
  876. * copy 64 bits: (rd+dd) <- (rs+ds)
  877. */
  878. void Assembler::asm_mmq(Register rd, int dd, Register rs, int ds)
  879. {
  880. // value is either a 64bit struct or maybe a float
  881. // that isn't live in an FPU reg. Either way, don't
  882. // put it in an FPU reg just to load & store it.
  883. Register t = registerAllocTmp(GpRegs & ~(rmask(rd)|rmask(rs)));
  884. STW32(t, dd+4, rd);
  885. LDSW32(rs, ds+4, t);
  886. STW32(t, dd, rd);
  887. LDSW32(rs, ds, t);
  888. }
  889. Branches Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
  890. {
  891. NIns* at = 0;
  892. LOpcode condop = cond->opcode();
  893. NanoAssert(cond->isCmp());
  894. if (isCmpDOpcode(condop))
  895. {
  896. return Branches(asm_branchd(branchOnFalse, cond, targ));
  897. }
  898. underrunProtect(32);
  899. intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
  900. // !targ means that it needs patch.
  901. if( !(isIMM22((int32_t)tt)) || !targ ) {
  902. JMP_long_nocheck((intptr_t)targ);
  903. at = _nIns;
  904. NOP();
  905. BA(0, 5);
  906. tt = 4;
  907. }
  908. NOP();
  909. // produce the branch
  910. if (branchOnFalse)
  911. {
  912. if (condop == LIR_eqi)
  913. BNE(0, tt);
  914. else if (condop == LIR_lti)
  915. BGE(0, tt);
  916. else if (condop == LIR_lei)
  917. BG(0, tt);
  918. else if (condop == LIR_gti)
  919. BLE(0, tt);
  920. else if (condop == LIR_gei)
  921. BL(0, tt);
  922. else if (condop == LIR_ltui)
  923. BCC(0, tt);
  924. else if (condop == LIR_leui)
  925. BGU(0, tt);
  926. else if (condop == LIR_gtui)
  927. BLEU(0, tt);
  928. else //if (condop == LIR_geui)
  929. BCS(0, tt);
  930. }
  931. else // op == LIR_xt
  932. {
  933. if (condop == LIR_eqi)
  934. BE(0, tt);
  935. else if (condop == LIR_lti)
  936. BL(0, tt);
  937. else if (condop == LIR_lei)
  938. BLE(0, tt);
  939. else if (condop == LIR_gti)
  940. BG(0, tt);
  941. else if (condop == LIR_gei)
  942. BGE(0, tt);
  943. else if (condop == LIR_ltui)
  944. BCS(0, tt);
  945. else if (condop == LIR_leui)
  946. BLEU(0, tt);
  947. else if (condop == LIR_gtui)
  948. BGU(0, tt);
  949. else //if (condop == LIR_geui)
  950. BCC(0, tt);
  951. }
  952. asm_cmp(cond);
  953. return Branches(at);
  954. }
  955. NIns* Assembler::asm_branch_ov(LOpcode op, NIns* targ)
  956. {
  957. NIns* at = 0;
  958. underrunProtect(32);
  959. intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
  960. // !targ means that it needs patch.
  961. if( !(isIMM22((int32_t)tt)) || !targ ) {
  962. JMP_long_nocheck((intptr_t)targ);
  963. at = _nIns;
  964. NOP();
  965. BA(0, 5);
  966. tt = 4;
  967. }
  968. NOP();
  969. if( op == LIR_mulxovi || op == LIR_muljovi )
  970. BNE(0, tt);
  971. else
  972. BVS(0, tt);
  973. return at;
  974. }
  975. void Assembler::asm_cmp(LIns *cond)
  976. {
  977. underrunProtect(12);
  978. LIns* lhs = cond->oprnd1();
  979. LIns* rhs = cond->oprnd2();
  980. NanoAssert(lhs->isI() && rhs->isI());
  981. // ready to issue the compare
  982. if (rhs->isImmI())
  983. {
  984. int c = rhs->immI();
  985. Register r = findRegFor(lhs, GpRegs);
  986. if (c == 0 && cond->isop(LIR_eqi)) {
  987. ANDCC(r, r, G0);
  988. }
  989. else {
  990. SUBCC(r, L2, G0);
  991. SET32(c, L2);
  992. }
  993. }
  994. else
  995. {
  996. Register ra, rb;
  997. findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb);
  998. SUBCC(ra, rb, G0);
  999. }
  1000. }
  1001. void Assembler::asm_condd(LIns* ins)
  1002. {
  1003. // only want certain regs
  1004. Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
  1005. underrunProtect(8);
  1006. LOpcode condop = ins->opcode();
  1007. NanoAssert(isCmpDOpcode(condop));
  1008. if (condop == LIR_eqd)
  1009. MOVFEI(1, r);
  1010. else if (condop == LIR_led)
  1011. MOVFLEI(1, r);
  1012. else if (condop == LIR_ltd)
  1013. MOVFLI(1, r);
  1014. else if (condop == LIR_ged)
  1015. MOVFGEI(1, r);
  1016. else // if (condop == LIR_gtd)
  1017. MOVFGI(1, r);
  1018. ORI(G0, 0, r);
  1019. asm_cmpd(ins);
  1020. }
  1021. void Assembler::asm_cond(LIns* ins)
  1022. {
  1023. underrunProtect(8);
  1024. // only want certain regs
  1025. LOpcode op = ins->opcode();
  1026. Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
  1027. if (op == LIR_eqi)
  1028. MOVEI(1, r);
  1029. else if (op == LIR_lti)
  1030. MOVLI(1, r);
  1031. else if (op == LIR_lei)
  1032. MOVLEI(1, r);
  1033. else if (op == LIR_gti)
  1034. MOVGI(1, r);
  1035. else if (op == LIR_gei)
  1036. MOVGEI(1, r);
  1037. else if (op == LIR_ltui)
  1038. MOVCSI(1, r);
  1039. else if (op == LIR_leui)
  1040. MOVLEUI(1, r);
  1041. else if (op == LIR_gtui)
  1042. MOVGUI(1, r);
  1043. else // if (op == LIR_geui)
  1044. MOVCCI(1, r);
  1045. ORI(G0, 0, r);
  1046. asm_cmp(ins);
  1047. }
  1048. void Assembler::asm_arith(LIns* ins)
  1049. {
  1050. underrunProtect(28);
  1051. LOpcode op = ins->opcode();
  1052. LIns* lhs = ins->oprnd1();
  1053. LIns* rhs = ins->oprnd2();
  1054. Register rb = deprecated_UnknownReg;
  1055. RegisterMask allow = GpRegs;
  1056. bool forceReg = (op == LIR_muli || op == LIR_mulxovi || op == LIR_muljovi || !rhs->isImmI());
  1057. if (lhs != rhs && forceReg)
  1058. {
  1059. if ((rb = asm_binop_rhs_reg(ins)) == deprecated_UnknownReg) {
  1060. rb = findRegFor(rhs, allow);
  1061. }
  1062. allow &= ~rmask(rb);
  1063. }
  1064. else if ((op == LIR_addi || op == LIR_addxovi || op == LIR_addjovi) && lhs->isop(LIR_allocp) && rhs->isImmI()) {
  1065. // add alloc+const, use lea
  1066. Register rr = deprecated_prepResultReg(ins, allow);
  1067. int d = findMemFor(lhs) + rhs->immI();
  1068. ADD(FP, L2, rr);
  1069. SET32(d, L2);
  1070. return;
  1071. }
  1072. Register rr = deprecated_prepResultReg(ins, allow);
  1073. // if this is last use of lhs in reg, we can re-use result reg
  1074. // else, lhs already has a register assigned.
  1075. Register ra = ( !lhs->isInReg()
  1076. ? findSpecificRegFor(lhs, rr)
  1077. : lhs->deprecated_getReg() );
  1078. if (forceReg)
  1079. {
  1080. if (lhs == rhs)
  1081. rb = ra;
  1082. if (op == LIR_addi || op == LIR_addxovi || op == LIR_addjovi)
  1083. ADDCC(rr, rb, rr);
  1084. else if (op == LIR_subi || op == LIR_subxovi || op == LIR_subjovi)
  1085. SUBCC(rr, rb, rr);
  1086. else if (op == LIR_muli)
  1087. SMULCC(rr, rb, rr);
  1088. else if (op == LIR_mulxovi || op == LIR_muljovi) {
  1089. SUBCC(L4, L6, L4);
  1090. SRAI(rr, 31, L6);
  1091. RDY(L4);
  1092. SMULCC(rr, rb, rr);
  1093. }
  1094. else if (op == LIR_andi)
  1095. AND(rr, rb, rr);
  1096. else if (op == LIR_ori)
  1097. OR(rr, rb, rr);
  1098. else if (op == LIR_xori)
  1099. XOR(rr, rb, rr);
  1100. else if (op == LIR_lshi)
  1101. SLL(rr, rb, rr);
  1102. else if (op == LIR_rshi)
  1103. SRA(rr, rb, rr);
  1104. else if (op == LIR_rshui)
  1105. SRL(rr, rb, rr);
  1106. else
  1107. NanoAssertMsg(0, "Unsupported");
  1108. }
  1109. else
  1110. {
  1111. int c = rhs->immI();
  1112. if (op == LIR_addi || op == LIR_addxovi || op == LIR_addjovi)
  1113. ADDCC(rr, L2, rr);
  1114. else if (op == LIR_subi || op == LIR_subxovi || op == LIR_subjovi)
  1115. SUBCC(rr, L2, rr);
  1116. else if (op == LIR_andi)
  1117. AND(rr, L2, rr);
  1118. else if (op == LIR_ori)
  1119. OR(rr, L2, rr);
  1120. else if (op == LIR_xori)
  1121. XOR(rr, L2, rr);
  1122. else if (op == LIR_lshi)
  1123. SLL(rr, L2, rr);
  1124. else if (op == LIR_rshi)
  1125. SRA(rr, L2, rr);
  1126. else if (op == LIR_rshui)
  1127. SRL(rr, L2, rr);
  1128. else
  1129. NanoAssertMsg(0, "Unsupported");
  1130. SET32(c, L2);
  1131. }
  1132. if ( rr != ra )
  1133. ORI(ra, 0, rr);
  1134. }
  1135. void Assembler::asm_neg_not(LIns* ins)
  1136. {
  1137. underrunProtect(8);
  1138. LOpcode op = ins->opcode();
  1139. Register rr = deprecated_prepResultReg(ins, GpRegs);
  1140. LIns* lhs = ins->oprnd1();
  1141. // if this is last use of lhs in reg, we can re-use result reg
  1142. // else, lhs already has a register assigned.
  1143. Register ra = ( !lhs->isInReg()
  1144. ? findSpecificRegFor(lhs, rr)
  1145. : lhs->deprecated_getReg() );
  1146. if (op == LIR_noti)
  1147. ORN(G0, rr, rr);
  1148. else
  1149. SUB(G0, rr, rr);
  1150. if ( rr != ra )
  1151. ORI(ra, 0, rr);
  1152. }
  1153. void Assembler::asm_load32(LIns* ins)
  1154. {
  1155. underrunProtect(12);
  1156. LOpcode op = ins->opcode();
  1157. LIns* base = ins->oprnd1();
  1158. int d = ins->disp();
  1159. Register rr = deprecated_prepResultReg(ins, GpRegs);
  1160. Register ra = getBaseReg(base, d, GpRegs);
  1161. switch(op) {
  1162. case LIR_lduc2ui:
  1163. LDUB32(ra, d, rr);
  1164. break;
  1165. case LIR_ldus2ui:
  1166. LDUH32(ra, d, rr);
  1167. break;
  1168. case LIR_ldi:
  1169. LDSW32(ra, d, rr);
  1170. break;
  1171. case LIR_ldc2i:
  1172. LDSB32(ra, d, rr);
  1173. break;
  1174. case LIR_lds2i:
  1175. LDSH32(ra, d, rr);
  1176. break;
  1177. default:
  1178. NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
  1179. return;
  1180. }
  1181. }
  1182. void Assembler::asm_cmov(LIns* ins)
  1183. {
  1184. underrunProtect(4);
  1185. LOpcode op = ins->opcode();
  1186. LIns* condval = ins->oprnd1();
  1187. LIns* iftrue = ins->oprnd2();
  1188. LIns* iffalse = ins->oprnd3();
  1189. NanoAssert(condval->isCmp());
  1190. NanoAssert(op == LIR_cmovi && iftrue->isI() && iffalse->isI() ||
  1191. (op == LIR_cmovd && iftrue->isD() && iffalse->isD()));
  1192. RegisterMask rm = (op == LIR_cmovi) ? GpRegs : FpRegs;
  1193. const Register rr = deprecated_prepResultReg(ins, rm);
  1194. const Register iffalsereg = findRegFor(iffalse, rm & ~rmask(rr));
  1195. bool isIcc = true;
  1196. if (op == LIR_cmovi) {
  1197. switch (condval->opcode()) {
  1198. // note that these are all opposites...
  1199. case LIR_eqi: MOVNE (iffalsereg, rr); break;
  1200. case LIR_lti: MOVGE (iffalsereg, rr); break;
  1201. case LIR_lei: MOVG (iffalsereg, rr); break;
  1202. case LIR_gti: MOVLE (iffalsereg, rr); break;
  1203. case LIR_gei: MOVL (iffalsereg, rr); break;
  1204. case LIR_ltui: MOVCC (iffalsereg, rr); break;
  1205. case LIR_leui: MOVGU (iffalsereg, rr); break;
  1206. case LIR_gtui: MOVLEU(iffalsereg, rr); break;
  1207. case LIR_geui: MOVCS (iffalsereg, rr); break;
  1208. debug_only( default: NanoAssert(0); break; )
  1209. }
  1210. } else {
  1211. switch (condval->opcode()) {
  1212. // note that these are all opposites...
  1213. case LIR_eqi: FMOVDNE (iffalsereg, rr); break;
  1214. case LIR_lti: FMOVDGE (iffalsereg, rr); break;
  1215. case LIR_lei: FMOVDG (iffalsereg, rr); break;
  1216. case LIR_gti: FMOVDLE (iffalsereg, rr); break;
  1217. case LIR_gei: FMOVDL (iffalsereg, rr); break;
  1218. case LIR_ltui: FMOVDCC (iffalsereg, rr); break;
  1219. case LIR_leui: FMOVDGU (iffalsereg, rr); break;
  1220. case LIR_gtui: FMOVDLEU (iffalsereg, rr); break;
  1221. case LIR_geui: FMOVDCS (iffalsereg, rr); break;
  1222. case LIR_eqd: FMOVDFNE (iffalsereg, rr); isIcc = false; break;
  1223. case LIR_led: FMOVDFUG (iffalsereg, rr); isIcc = false; break;
  1224. case LIR_ltd: FMOVDFUGE(iffalsereg, rr); isIcc = false; break;
  1225. case LIR_ged: FMOVDFUL (iffalsereg, rr); isIcc = false; break;
  1226. case LIR_gtd: FMOVDFULE(iffalsereg, rr); isIcc = false; break;
  1227. debug_only( default: NanoAssert(0); break; )
  1228. }
  1229. }
  1230. /*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
  1231. if (isIcc)
  1232. asm_cmp(condval);
  1233. else
  1234. asm_cmpd(condval);
  1235. }
  1236. void Assembler::asm_param(LIns* ins)
  1237. {
  1238. underrunProtect(12);
  1239. uint32_t a = ins->paramArg();
  1240. NanoAssertMsg(ins->paramKind() == 0, "savedRegs are not used on SPARC");
  1241. if (a < sizeof(argRegs)/sizeof(argRegs[0])) { // i0 - i5
  1242. prepareResultReg(ins, rmask(argRegs[a]));
  1243. } else {
  1244. // Incoming arg is on stack
  1245. Register r = prepareResultReg(ins, GpRegs);
  1246. int32_t d = a * sizeof (intptr_t) + kLinkageAreaSize;
  1247. LDSW32(FP, d, r);
  1248. }
  1249. freeResourcesOf(ins);
  1250. }
  1251. void Assembler::asm_immi(LIns* ins)
  1252. {
  1253. underrunProtect(8);
  1254. Register rr = deprecated_prepResultReg(ins, GpRegs);
  1255. int32_t val = ins->immI();
  1256. if (val == 0)
  1257. XOR(rr, rr, rr);
  1258. else
  1259. SET32(val, rr);
  1260. }
  1261. void Assembler::asm_immd(LIns* ins)
  1262. {
  1263. underrunProtect(64);
  1264. Register rr = ins->deprecated_getReg();
  1265. if (rr != deprecated_UnknownReg)
  1266. {
  1267. // @todo -- add special-cases for 0 and 1
  1268. _allocator.retire(rr);
  1269. ins->clearReg();
  1270. NanoAssert((rmask(rr) & FpRegs) != 0);
  1271. findMemFor(ins);
  1272. int d = deprecated_disp(ins);
  1273. LDDF32(FP, d, rr);
  1274. }
  1275. // @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
  1276. int d = deprecated_disp(ins);
  1277. deprecated_freeRsrcOf(ins);
  1278. if (d)
  1279. {
  1280. STW32(L2, d+4, FP);
  1281. SET32(ins->immDlo(), L2);
  1282. STW32(L2, d, FP);
  1283. SET32(ins->immDhi(), L2);
  1284. }
  1285. }
  1286. void Assembler::asm_fneg(LIns* ins)
  1287. {
  1288. underrunProtect(4);
  1289. Register rr = deprecated_prepResultReg(ins, FpRegs);
  1290. LIns* lhs = ins->oprnd1();
  1291. // lhs into reg, prefer same reg as result
  1292. // if this is last use of lhs in reg, we can re-use result reg
  1293. // else, lhs already has a different reg assigned
  1294. Register ra = ( !lhs->isInReg()
  1295. ? findSpecificRegFor(lhs, rr)
  1296. : findRegFor(lhs, FpRegs) );
  1297. FNEGD(ra, rr);
  1298. }
  1299. void Assembler::asm_fop(LIns* ins)
  1300. {
  1301. underrunProtect(4);
  1302. LOpcode op = ins->opcode();
  1303. LIns *lhs = ins->oprnd1();
  1304. LIns *rhs = ins->oprnd2();
  1305. RegisterMask allow = FpRegs;
  1306. Register ra, rb;
  1307. findRegFor2(allow, lhs, ra, allow, rhs, rb);
  1308. Register rr = deprecated_prepResultReg(ins, allow);
  1309. if (op == LIR_addd)
  1310. FADDD(ra, rb, rr);
  1311. else if (op == LIR_subd)
  1312. FSUBD(ra, rb, rr);
  1313. else if (op == LIR_muld)
  1314. FMULD(ra, rb, rr);
  1315. else //if (op == LIR_divd)
  1316. FDIVD(ra, rb, rr);
  1317. }
  1318. void Assembler::asm_i2d(LIns* ins)
  1319. {
  1320. underrunProtect(32);
  1321. // where our result goes
  1322. Register rr = deprecated_prepResultReg(ins, FpRegs);
  1323. int d = findMemFor(ins->oprnd1());
  1324. FITOD(rr, rr);
  1325. LDDF32(FP, d, rr);
  1326. }
  1327. void Assembler::asm_ui2d(LIns* ins)
  1328. {
  1329. underrunProtect(72);
  1330. // where our result goes
  1331. Register rr = deprecated_prepResultReg(ins, FpRegs);
  1332. Register rt = registerAllocTmp(FpRegs & ~(rmask(rr)));
  1333. Register gr = findRegFor(ins->oprnd1(), GpRegs);
  1334. int disp = -8;
  1335. FABSS(rr, rr);
  1336. FSUBD(rt, rr, rr);
  1337. LDDF32(SP, disp, rr);
  1338. STWI(G0, disp+4, SP);
  1339. LDDF32(SP, disp, rt);
  1340. STWI(gr, disp+4, SP);
  1341. STWI(G1, disp, SP);
  1342. SETHI(0x43300000, G1);
  1343. }
  1344. void Assembler::asm_d2i(LIns* ins) {
  1345. underrunProtect(28);
  1346. LIns *lhs = ins->oprnd1();
  1347. Register rr = prepareResultReg(ins, GpRegs);
  1348. Register ra = findRegFor(lhs, FpRegs);
  1349. int d = findMemFor(ins);
  1350. LDSW32(FP, d, rr);
  1351. STF32(ra, d, FP);
  1352. FDTOI(ra, ra);
  1353. freeResourcesOf(ins);
  1354. }
  1355. void Assembler::asm_nongp_copy(Register r, Register s)
  1356. {
  1357. underrunProtect(4);
  1358. NanoAssert((rmask(r) & FpRegs) && (rmask(s) & FpRegs));
  1359. FMOVD(s, r);
  1360. }
  1361. NIns * Assembler::asm_branchd(bool branchOnFalse, LIns *cond, NIns *targ)
  1362. {
  1363. NIns *at = 0;
  1364. LOpcode condop = cond->opcode();
  1365. NanoAssert(isCmpDOpcode(condop));
  1366. underrunProtect(32);
  1367. intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
  1368. // !targ means that it needs patch.
  1369. if( !(isIMM22((int32_t)tt)) || !targ ) {
  1370. JMP_long_nocheck((intptr_t)targ);
  1371. at = _nIns;
  1372. NOP();
  1373. BA(0, 5);
  1374. tt = 4;
  1375. }
  1376. NOP();
  1377. // produce the branch
  1378. if (branchOnFalse)
  1379. {
  1380. if (condop == LIR_eqd)
  1381. FBNE(0, tt);
  1382. else if (condop == LIR_led)
  1383. FBUG(0, tt);
  1384. else if (condop == LIR_ltd)
  1385. FBUGE(0, tt);
  1386. else if (condop == LIR_ged)
  1387. FBUL(0, tt);
  1388. else //if (condop == LIR_gtd)
  1389. FBULE(0, tt);
  1390. }
  1391. else // op == LIR_xt
  1392. {
  1393. if (condop == LIR_eqd)
  1394. FBE(0, tt);
  1395. else if (condop == LIR_led)
  1396. FBLE(0, tt);
  1397. else if (condop == LIR_ltd)
  1398. FBL(0, tt);
  1399. else if (condop == LIR_ged)
  1400. FBGE(0, tt);
  1401. else //if (condop == LIR_gtd)
  1402. FBG(0, tt);
  1403. }
  1404. asm_cmpd(cond);
  1405. return at;
  1406. }
  1407. void Assembler::asm_cmpd(LIns *cond)
  1408. {
  1409. underrunProtect(4);
  1410. LIns* lhs = cond->oprnd1();
  1411. LIns* rhs = cond->oprnd2();
  1412. Register rLhs = findRegFor(lhs, FpRegs);
  1413. Register rRhs = findRegFor(rhs, FpRegs);
  1414. FCMPD(rLhs, rRhs);
  1415. }
  1416. void Assembler::nativePageReset()
  1417. {
  1418. }
  1419. Register Assembler::asm_binop_rhs_reg(LIns* ins)
  1420. {
  1421. return deprecated_UnknownReg;
  1422. }
  1423. void Assembler::nativePageSetup()
  1424. {
  1425. NanoAssert(!_inExit);
  1426. if (!_nIns)
  1427. codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
  1428. }
  1429. // Increment the 32-bit profiling counter at pCtr, without
  1430. // changing any registers.
  1431. verbose_only(
  1432. void Assembler::asm_inc_m32(uint32_t*)
  1433. {
  1434. // todo: implement this
  1435. }
  1436. )
  1437. void
  1438. Assembler::underrunProtect(int n)
  1439. {
  1440. NIns *eip = _nIns;
  1441. // This may be in a normal code chunk or an exit code chunk.
  1442. if (eip - n < codeStart) {
  1443. codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
  1444. JMP_long_nocheck((intptr_t)eip);
  1445. }
  1446. }
  1447. void Assembler::asm_ret(LIns* ins)
  1448. {
  1449. genEpilogue();
  1450. releaseRegisters();
  1451. assignSavedRegs();
  1452. LIns *val = ins->oprnd1();
  1453. if (ins->isop(LIR_reti)) {
  1454. findSpecificRegFor(val, retRegs[0]);
  1455. } else {
  1456. NanoAssert(ins->isop(LIR_retd));
  1457. findSpecificRegFor(val, F0);
  1458. }
  1459. }
  1460. void Assembler::swapCodeChunks() {
  1461. if (!_nExitIns)
  1462. codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
  1463. SWAP(NIns*, _nIns, _nExitIns);
  1464. SWAP(NIns*, codeStart, exitStart);
  1465. SWAP(NIns*, codeEnd, exitEnd);
  1466. verbose_only( SWAP(size_t, codeBytes, exitBytes); )
  1467. }
  1468. void Assembler::asm_insert_random_nop() {
  1469. NanoAssert(0); // not supported
  1470. }
  1471. #endif /* FEATURE_NANOJIT */
  1472. }