PageRenderTime 177ms CodeModel.GetById 16ms app.highlight 150ms RepoModel.GetById 2ms app.codeStats 0ms

/js/src/nanojit/NativeSparc.cpp

http://github.com/zpao/v8monkey
C++ | 1645 lines | 1366 code | 173 blank | 106 comment | 345 complexity | 09d312df707a0a2eda395986d9e7f3c1 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
   2/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
   3/* ***** BEGIN LICENSE BLOCK *****
   4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
   5 *
   6 * The contents of this file are subject to the Mozilla Public License Version
   7 * 1.1 (the "License"); you may not use this file except in compliance with
   8 * the License. You may obtain a copy of the License at
   9 * http://www.mozilla.org/MPL/
  10 *
  11 * Software distributed under the License is distributed on an "AS IS" basis,
  12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
  13 * for the specific language governing rights and limitations under the
  14 * License.
  15 *
  16 * The Original Code is [Open Source Virtual Machine].
  17 *
  18 * The Initial Developer of the Original Code is
  19 * Adobe System Incorporated.
  20 * Portions created by the Initial Developer are Copyright (C) 2004-2007
  21 * the Initial Developer. All Rights Reserved.
  22 *
  23 * Contributor(s):
  24 *   Adobe AS3 Team
  25 *   leon.sha@oracle.com
  26 *   ginn.chen@oracle.com
  27 *
  28 * Alternatively, the contents of this file may be used under the terms of
  29 * either the GNU General Public License Version 2 or later (the "GPL"), or
  30 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  31 * in which case the provisions of the GPL or the LGPL are applicable instead
  32 * of those above. If you wish to allow use of your version of this file only
  33 * under the terms of either the GPL or the LGPL, and not to allow others to
  34 * use your version of this file under the terms of the MPL, indicate your
  35 * decision by deleting the provisions above and replace them with the notice
  36 * and other provisions required by the GPL or the LGPL. If you do not delete
  37 * the provisions above, a recipient may use your version of this file under
  38 * the terms of any one of the MPL, the GPL or the LGPL.
  39 *
  40 * ***** END LICENSE BLOCK ***** */
  41
  42#include <sys/types.h>
  43#include <sys/mman.h>
  44#include <errno.h>
  45#include "nanojit.h"
  46
  47namespace nanojit
  48{
  49#ifdef FEATURE_NANOJIT
  50
  51#ifdef NJ_VERBOSE
  52    const char *regNames[] = {
  53        "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7",
  54        "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7",
  55        "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7",
  56        "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7",
  57        "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
  58        "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
  59        "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
  60        "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31"
  61    };
  62#endif
  63
  64    const Register Assembler::argRegs[] = { I0, I1, I2, I3, I4, I5 };
  65    const Register Assembler::retRegs[] = { O0 };
  66    const Register Assembler::savedRegs[] = { L1 }; // Dummy element not used, as NumSavedRegs == 0
  67
  68    static const int kLinkageAreaSize = 68;
  69    static const int kcalleeAreaSize = 80; // The max size.
  70
  71#define BIT_ROUND_UP(v,q)      ( (((uintptr_t)v)+(q)-1) & ~((q)-1) )
  72#define TODO(x) do{ verbose_only(outputf(#x);) NanoAssertMsgf(false, "%s", #x); } while(0)
  73
  74    inline void Assembler::CALL(const CallInfo* ci) {
  75        int32_t offset = (ci->_address) - ((int32_t)_nIns) + 4;
  76        int32_t i = 0x40000000 | ((offset >> 2) & 0x3FFFFFFF);
  77        IMM32(i);
  78        asm_output("call %s",(ci->_name));
  79    }
  80
  81    inline void Assembler::IntegerOperation
  82        (Register rs1, Register rs2, Register rd, int32_t op3, const char *opcode) {
  83        Format_3_1(2, rd, op3, rs1, 0, rs2);
  84        asm_output("%s %s, %s, %s", opcode, gpn(rs1), gpn(rs2), gpn(rd));
  85    }
  86
  87    inline void Assembler::IntegerOperationI
  88        (Register rs1, int32_t simm13, Register rd, int32_t op3, const char *opcode) {
  89        Format_3_1I(2, rd, op3, rs1, simm13);
  90        asm_output("%s %s, %d, %s", opcode, gpn(rs1), simm13, gpn(rd));
  91    }
  92
  93    inline void Assembler::ADD(Register rs1, Register rs2, Register rd) {
  94        IntegerOperation(rs1, rs2, rd, 0, "add");
  95    }
  96    inline void Assembler::ADDCC(Register rs1, Register rs2, Register rd) {
  97        IntegerOperation(rs1, rs2, rd, 0x10, "addcc");
  98    }
  99    inline void Assembler::AND(Register rs1, Register rs2, Register rd) {
 100        IntegerOperation(rs1, rs2, rd, 0x1, "and");
 101    }
 102    inline void Assembler::ANDCC(Register rs1, Register rs2, Register rd) {
 103        IntegerOperation(rs1, rs2, rd, 0x11, "andcc");
 104    }
 105    inline void Assembler::OR(Register rs1, Register rs2, Register rd) {
 106        IntegerOperation(rs1, rs2, rd, 0x2, "or");
 107    }
 108    inline void Assembler::ORI(Register rs1, int32_t simm13, Register rd) {
 109        IntegerOperationI(rs1, simm13, rd, 0x2, "or");
 110    }
 111    inline void Assembler::ORN(Register rs1, Register rs2, Register rd) {
 112        IntegerOperation(rs1, rs2, rd, 0x6, "orn");
 113    }
 114    inline void Assembler::SMULCC(Register rs1, Register rs2, Register rd) {
 115        IntegerOperation(rs1, rs2, rd, 0x1b, "smulcc");
 116    }
 117    inline void Assembler::SUB(Register rs1, Register rs2, Register rd) {
 118        IntegerOperation(rs1, rs2, rd, 0x4, "sub");
 119    }
 120    inline void Assembler::SUBCC(Register rs1, Register rs2, Register rd) {
 121        IntegerOperation(rs1, rs2, rd, 0x14, "subcc");
 122    }
 123    inline void Assembler::SUBI(Register rs1, int32_t simm13, Register rd) {
 124        IntegerOperationI(rs1, simm13, rd, 0x4, "sub");
 125    }
 126    inline void Assembler::XOR(Register rs1, Register rs2, Register rd) {
 127        IntegerOperation(rs1, rs2, rd, 0x3, "xor");
 128    }
 129
 130    inline void Assembler::Bicc(int32_t a, int32_t dsp22, int32_t cond, const char *opcode) {
 131        Format_2_2(a, cond, 0x2, dsp22);
 132        asm_output("%s 0x%x", opcode, _nIns + dsp22 - 1);
 133    }
 134
 135    inline void Assembler::BA  (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x8, "ba");   }
 136    inline void Assembler::BE  (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x1, "be");   }
 137    inline void Assembler::BNE (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x9, "bne");  }
 138    inline void Assembler::BG  (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xa, "bg");   }
 139    inline void Assembler::BGU (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xc, "bgu");  }
 140    inline void Assembler::BGE (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xb, "bge");  }
 141    inline void Assembler::BL  (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x3, "bl");  }
 142    inline void Assembler::BLE (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x2, "ble");  }
 143    inline void Assembler::BLEU(int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x4, "bleu"); }
 144    inline void Assembler::BCC (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xd, "bcc");  }
 145    inline void Assembler::BCS (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x5, "bcs");  }
 146    inline void Assembler::BVC (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0xf, "bvc");  }
 147    inline void Assembler::BVS (int32_t a, int32_t dsp22) { Bicc(a, dsp22, 0x7, "bvs");  }
 148
 149    inline void Assembler::FABSS(Register rs2, Register rd) {
 150        Format_3_8(2, rd, 0x34, G0, 0x9, rs2);
 151        asm_output("fabs %s, %s", gpn(rs2), gpn(rd));
 152    }
 153
 154    inline void Assembler::FADDD(Register rs1, Register rs2, Register rd) {
 155        Format_3_8(2, rd, 0x34, rs1, 0x42, rs2);
 156        asm_output("faddd %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd));
 157    }
 158
 159    inline void Assembler::FBfcc(int32_t a, int32_t dsp22, int32_t cond, const char *opcode) {
 160        Format_2_2(a, cond, 0x6, dsp22);
 161        asm_output("%s 0x%x", opcode, _nIns + dsp22 - 1);
 162    }
 163
 164    inline void Assembler::FBE  (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x9, "fbe");   }
 165    inline void Assembler::FBNE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x1, "fbne");  }
 166    inline void Assembler::FBUE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xa, "fbue");  }
 167    inline void Assembler::FBG  (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x6, "fbg");   }
 168    inline void Assembler::FBUG (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x5, "fbug");  }
 169    inline void Assembler::FBGE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xb, "fbge");  }
 170    inline void Assembler::FBUGE(int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xc, "fbuge"); }
 171    inline void Assembler::FBL  (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x4, "fbl");   }
 172    inline void Assembler::FBUL (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0x3, "fbul");  }
 173    inline void Assembler::FBLE (int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xd, "fble");  }
 174    inline void Assembler::FBULE(int32_t a, int32_t dsp22) { FBfcc(a, dsp22, 0xe, "fbule"); }
 175
 176    inline void Assembler::FCMPD(Register rs1, Register rs2) {
 177        Format_3_9(2, 0, 0, 0x35, rs1, 0x52, rs2);
 178        asm_output("fcmpd %s, %s", gpn(rs1), gpn(rs2));
 179    }
 180
 181    inline void Assembler::FloatOperation
 182        (Register rs1, Register rs2, Register rd, int32_t opf, const char *opcode) {
 183        Format_3_8(2, rd, 0x34, rs1, opf, rs2);
 184        if (rs1 != G0) {
 185          asm_output("%s %s, %s, %s", opcode, gpn(rs1), gpn(rs2), gpn(rd));
 186        } else {
 187          asm_output("%s %s, %s", opcode, gpn(rs2), gpn(rd));
 188        }
 189    }
 190
 191    inline void Assembler::FSUBD(Register rs1, Register rs2, Register rd) {
 192        FloatOperation(rs1, rs2, rd, 0x46, "fsubd");
 193    }
 194    inline void Assembler::FMULD(Register rs1, Register rs2, Register rd) {
 195        FloatOperation(rs1, rs2, rd, 0x4a, "fsubd");
 196    }
 197    inline void Assembler::FDTOI(Register rs2, Register rd) {
 198        FloatOperation(G0, rs2, rd, 0xd2, "fdtoi");
 199    }
 200    inline void Assembler::FDIVD(Register rs1, Register rs2, Register rd) {
 201        FloatOperation(rs1, rs2, rd, 0x4e, "fdivd");
 202    }
 203    inline void Assembler::FMOVD(Register rs2, Register rd) {
 204        FloatOperation(G0, rs2, rd, 0x2, "fmovd");
 205    }
 206    inline void Assembler::FNEGD(Register rs2, Register rd) {
 207        FloatOperation(G0, rs2, rd, 0x6, "fnegd");
 208    }
 209    inline void Assembler::FITOD(Register rs2, Register rd) {
 210        FloatOperation(G0, rs2, rd, 0xc8, "fitod");
 211    }
 212    inline void Assembler::FDTOS(Register rs2, Register rd) {
 213        FloatOperation(G0, rs2, rd, 0xc6, "fdtos");
 214    }
 215    inline void Assembler::FSTOD(Register rs2, Register rd) {
 216        FloatOperation(G0, rs2, rd, 0xc9, "fstod");
 217    }
 218
 219    inline void Assembler::JMPL(Register rs1, Register rs2, Register rd) {
 220        Format_3_1(2, rd, 0x38, rs1, 0, rs2);
 221        asm_output("jmpl [%s + %s]", gpn(rs1), gpn(rs2));
 222    }
 223
 224    inline void Assembler::JMPLI(Register rs1, int32_t simm13, Register rd) {
 225        Format_3_1I(2, rd, 0x38, rs1, simm13);
 226        asm_output("jmpl [%s + 0x%x]", gpn(rs1), simm13);
 227    }
 228
 229    inline void Assembler::LoadOperation
 230        (Register rs1, Register rs2, Register rd, int32_t op3, const char* opcode) {
 231        Format_3_1(3, rd, op3, rs1, 0, rs2);
 232        asm_output("%s [%s + %s], %s", opcode, gpn(rs1), gpn(rs2), gpn(rd));
 233    }
 234
 235    inline void Assembler::LoadOperationI
 236        (Register rs1, int32_t simm13, Register rd, int32_t op3, const char* opcode) {
 237        Format_3_1I(3, rd, op3, rs1, simm13);
 238        asm_output("%s [%s + 0x%x], %s", opcode, gpn(rs1), simm13, gpn(rd));
 239    }
 240
 241    inline void Assembler::LDF(Register rs1, Register rs2, Register rd) {
 242        LoadOperation(rs1, rs2, rd, 0x20, "ldf");
 243    }
 244    inline void Assembler::LDFI(Register rs1, int32_t simm13, Register rd) {
 245        LoadOperationI(rs1, simm13, rd, 0x20, "ldf");
 246    }
 247
 248    inline void Assembler::LDF32(Register rs1, int32_t immI, Register rd) {
 249        if (isIMM13(immI)) {
 250            LDFI(rs1, immI, rd);
 251        } else {
 252            LDF(rs1, L0, rd);
 253            SET32(immI, L0);
 254        }
 255    }
 256
 257    inline void Assembler::LDDF32(Register rs1, int32_t immI, Register rd) {
 258        if (isIMM13(immI+4)) {
 259            LDFI(rs1, immI+4, rd + 1);
 260            LDFI(rs1, immI, rd);
 261        } else {
 262            LDF(rs1, L0, rd + 1);
 263            SET32(immI+4, L0);
 264            LDF(rs1, L0, rd);
 265            SET32(immI, L0);
 266        }
 267    }
 268
 269    inline void Assembler::LDUB(Register rs1, Register rs2, Register rd) {
 270        LoadOperation(rs1, rs2, rd,  0x1, "ldub");
 271    }
 272    inline void Assembler::LDUBI(Register rs1, int32_t simm13, Register rd) {
 273        LoadOperationI(rs1, simm13, rd, 0x1, "ldub");
 274    }
 275
 276    inline void Assembler::LDUB32(Register rs1, int32_t immI, Register rd) {
 277        if (isIMM13(immI)) {
 278            LDUBI(rs1, immI, rd);
 279        } else {
 280            LDUB(rs1, L0, rd);
 281            SET32(immI, L0);
 282        }
 283    }
 284
 285    inline void Assembler::LDSB(Register rs1, Register rs2, Register rd) {
 286        LoadOperation(rs1, rs2, rd,  0x9, "ldsb");
 287    }
 288    inline void Assembler::LDSBI(Register rs1, int32_t simm13, Register rd) {
 289        LoadOperationI(rs1, simm13, rd, 0x9, "ldsb");
 290    }
 291
 292    inline void Assembler::LDSB32(Register rs1, int32_t immI, Register rd) {
 293        if (isIMM13(immI)) {
 294            LDSBI(rs1, immI, rd);
 295        } else {
 296            LDSB(rs1, L0, rd);
 297            SET32(immI, L0);
 298        }
 299    }
 300
 301    inline void Assembler::LDUH(Register rs1, Register rs2, Register rd) {
 302        LoadOperation(rs1, rs2, rd,  0x2, "lduh");
 303    }
 304    inline void Assembler::LDUHI(Register rs1, int32_t simm13, Register rd) {
 305        LoadOperationI(rs1, simm13, rd, 0x2, "lduh");
 306    }
 307
 308    inline void Assembler::LDUH32(Register rs1, int32_t immI, Register rd) {
 309        if (isIMM13(immI)) {
 310            LDUHI(rs1, immI, rd);
 311        } else {
 312            LDUH(rs1, L0, rd);
 313            SET32(immI, L0);
 314        }
 315    }
 316
 317    inline void Assembler::LDSH(Register rs1, Register rs2, Register rd) {
 318        LoadOperation(rs1, rs2, rd,  0xa, "ldsh");
 319    }
 320    inline void Assembler::LDSHI(Register rs1, int32_t simm13, Register rd) {
 321        LoadOperationI(rs1, simm13, rd, 0xa, "ldsh");
 322    }
 323
 324    inline void Assembler::LDSH32(Register rs1, int32_t immI, Register rd) {
 325        if (isIMM13(immI)) {
 326            LDSHI(rs1, immI, rd);
 327        } else {
 328            LDSH(rs1, L0, rd);
 329            SET32(immI, L0);
 330        }
 331    }
 332
 333    inline void Assembler::LDSW(Register rs1, Register rs2, Register rd) {
 334        LoadOperation(rs1, rs2, rd,  0x8, "ldsw");
 335    }
 336    inline void Assembler::LDSWI(Register rs1, int32_t simm13, Register rd) {
 337        LoadOperationI(rs1, simm13, rd, 0x8, "ldsw");
 338    }
 339
 340    inline void Assembler::LDSW32(Register rs1, int32_t immI, Register rd) {
 341        if (isIMM13(immI)) {
 342            LDSWI(rs1, immI, rd);
 343        } else {
 344            LDSW(rs1, L0, rd);
 345            SET32(immI, L0);
 346        }
 347    }
 348
 349    inline void Assembler::MOVcc
 350        (Register rs, int32_t cc2, int32_t cc1, int32_t cc0, Register rd, int32_t cond, const char *opcode) {
 351        Format_4_2(rd, 0x2c, cc2, cond, cc1, cc0, rs);
 352        asm_output("%s %s, %s", opcode, gpn(rs), gpn(rd));
 353    }
 354
 355    inline void Assembler::MOVccI
 356        (int32_t simm11, int32_t cc2, int32_t cc1, int32_t cc0, Register rd, int32_t cond, const char *opcode) {
 357        Format_4_2I(rd, 0x2c, cc2, cond, cc1, cc0, simm11);
 358        asm_output("%s 0x%x, %s", opcode, simm11, gpn(rd));
 359    }
 360
 361    inline void Assembler::MOVE  (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x1, "move");   }
 362    inline void Assembler::MOVNE (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x9, "movne");  }
 363    inline void Assembler::MOVL  (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x3, "movl");   }
 364    inline void Assembler::MOVLE (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x2, "movle");  }
 365    inline void Assembler::MOVG  (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xa, "movg");   }
 366    inline void Assembler::MOVGE (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xb, "movge");  }
 367    inline void Assembler::MOVLEU(Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x4, "movleu"); }
 368    inline void Assembler::MOVGU (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xc, "movgu");  }
 369    inline void Assembler::MOVCC (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xd, "movcc");  }
 370    inline void Assembler::MOVCS (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0x5, "movcs");  }
 371    inline void Assembler::MOVVC (Register rs, Register rd) { MOVcc(rs, 1, 0, 0, rd, 0xf, "movvc");  }
 372    inline void Assembler::MOVEI  (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x1, "move");   }
 373    inline void Assembler::MOVNEI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x9, "movne");  }
 374    inline void Assembler::MOVLI  (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x3, "movl");   }
 375    inline void Assembler::MOVLEI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x2, "movle");  }
 376    inline void Assembler::MOVGI  (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0xa, "movg");   }
 377    inline void Assembler::MOVGEI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0xb, "movge");  }
 378    inline void Assembler::MOVLEUI(int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x4, "movleu"); }
 379    inline void Assembler::MOVGUI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0xc, "movgu");  }
 380    inline void Assembler::MOVCCI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0xd, "movcc");  }
 381    inline void Assembler::MOVCSI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x5, "movcs");  }
 382    inline void Assembler::MOVVSI (int32_t simm11, Register rd) { MOVccI(simm11, 1, 0, 0, rd, 0x7, "movvs");  }
 383    inline void Assembler::MOVFEI (int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0x9, "movfe");  }
 384    inline void Assembler::MOVFLI (int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0x4, "movfl");  }
 385    inline void Assembler::MOVFLEI(int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0xd, "movfle"); }
 386    inline void Assembler::MOVFGI (int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0x6, "movfg");  }
 387    inline void Assembler::MOVFGEI(int32_t simm11, Register rd) { MOVccI(simm11, 0, 0, 0, rd, 0xb, "movfge"); }
 388
 389    inline void Assembler::FMOVDcc(Register rs, int32_t opt_cc, Register rd, int32_t cond, const char *opcode) {
 390        Format_4_5(rd, 0x35, cond, opt_cc, 0x2, rs);
 391        asm_output("%s %s, %s", opcode, gpn(rs), gpn(rd));
 392    }
 393
 394    inline void Assembler::FMOVDNE  (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x9, "fmovdne"); }
 395    inline void Assembler::FMOVDL   (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x3, "fmovdl");  }
 396    inline void Assembler::FMOVDLE  (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x2, "fmovdle"); }
 397    inline void Assembler::FMOVDLEU (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x4, "fmovdleu");}
 398    inline void Assembler::FMOVDG   (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0xa, "fmovdg");  }
 399    inline void Assembler::FMOVDGU  (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0xc, "fmovdgu"); }
 400    inline void Assembler::FMOVDGE  (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0xb, "fmovdfge");}
 401    inline void Assembler::FMOVDCC  (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0xd, "fmovdcc"); }
 402    inline void Assembler::FMOVDCS  (Register rs, Register rd) { FMOVDcc(rs, 0x4, rd, 0x5, "fmovdcs"); }
 403    inline void Assembler::FMOVDFNE (Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0x1, "fmovdfne"); }
 404    inline void Assembler::FMOVDFUG (Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0x5, "fmovdfug"); }
 405    inline void Assembler::FMOVDFUGE(Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0xc, "fmovdfuge");}
 406    inline void Assembler::FMOVDFUL (Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0x3, "fmovdful"); }
 407    inline void Assembler::FMOVDFULE(Register rs, Register rd) { FMOVDcc(rs, 0x0, rd, 0xe, "fmovdfule");}
 408
 409    inline void Assembler::NOP() {
 410        Format_2(0, 0x4, 0);
 411        asm_output("nop");
 412    }
 413
 414    inline void Assembler::RDY(Register rd) {
 415        Format_3_1(2, rd, 0x28, G0, 0, G0);
 416        asm_output("rdy %s", gpn(rd));
 417    }
 418
 419    inline void Assembler::RESTORE(Register rs1, Register rs2, Register rd) {
 420        Format_3_1(2, rd, 0x3d, rs1, 0, rs2);
 421        asm_output("restore");
 422    }
 423
 424    inline void Assembler::SAVE(Register rs1, Register rs2, Register rd) {
 425        IntegerOperation(rs1, rs2, rd, 0x3c, "save");
 426    }
 427    inline void Assembler::SAVEI(Register rs1, int32_t simm13, Register rd) {
 428        IntegerOperationI(rs1, simm13, rd, 0x3c, "save");
 429    }
 430
 431    inline void Assembler::SETHI(int32_t immI, Register rd) {
 432        Format_2A(rd, 0x4, immI >> 10);
 433        asm_output("sethi 0x%x, %s     ! 0x%x", immI >> 10, gpn(rd), immI);
 434    }
 435
 436    inline void Assembler::SET32(int32_t immI, Register rd) {
 437        if (isIMM13(immI)) {
 438            ORI(G0, immI, rd);
 439        } else {
 440            ORI(rd, immI & 0x3FF, rd);
 441            SETHI(immI, rd);
 442        }
 443    }
 444
 445    inline void Assembler::ShiftOperation
 446        (Register rs1, Register rs2, Register rd, int32_t op3, const char* opcode) {
 447        Format_3_5(2, rd, op3, rs1, 0, rs2);
 448        asm_output("%s %s, %s, %s", opcode, gpn(rs1), gpn(rs2), gpn(rd));
 449    }
 450
 451    inline void Assembler::ShiftOperationI
 452        (Register rs1, int32_t shcnt32, Register rd, int32_t op3, const char* opcode) {
 453        Format_3_6(2, rd, op3, rs1, shcnt32);
 454        asm_output("%s %s, %d, %s", opcode, gpn(rs1), shcnt32, gpn(rd));
 455    }
 456
 457    inline void Assembler::SLL(Register rs1, Register rs2, Register rd) {
 458        ShiftOperation(rs1, rs2, rd, 0x25, "sll");
 459    }
 460    inline void Assembler::SRA(Register rs1, Register rs2, Register rd) {
 461        ShiftOperation(rs1, rs2, rd, 0x27, "sra");
 462    }
 463    inline void Assembler::SRAI(Register rs1, int32_t shcnt32, Register rd) {
 464        ShiftOperationI(rs1, shcnt32, rd, 0x27, "sra");
 465    }
 466    inline void Assembler::SRL(Register rs1, Register rs2, Register rd) {
 467        ShiftOperation(rs1, rs2, rd, 0x26, "srl");
 468    }
 469
 470    inline void Assembler::Store
 471        (Register rd, Register rs1, Register rs2, int32_t op3, const char* opcode) {
 472        Format_3_1(3, rd, op3, rs1, 0, rs2);
 473        asm_output("%s %s, [%s + %s]", opcode, gpn(rd), gpn(rs1), gpn(rs2));
 474    }
 475
 476    inline void Assembler::StoreI
 477        (Register rd, int32_t simm13, Register rs1, int32_t op3, const char* opcode) {
 478        Format_3_1I(3, rd, op3, rs1, simm13);
 479        asm_output("%s %s, [%s + 0x%x]", opcode, gpn(rd), gpn(rs1), simm13);
 480    }
 481
 482    inline void Assembler::STF(Register rd, Register rs1, Register rs2) {
 483        Store(rd, rs1, rs2, 0x24, "stf");
 484    }
 485    inline void Assembler::STFI(Register rd, int32_t simm13, Register rs1) {
 486        StoreI(rd, simm13, rs1, 0x24, "stf");
 487    }
 488
 489    inline void Assembler::STF32(Register rd, int32_t immI, Register rs1) {
 490        if (isIMM13(immI)) {
 491            STFI(rd, immI, rs1);
 492        } else {
 493            STF(rd, L0, rs1);
 494            SET32(immI, L0);
 495        }
 496    }
 497
 498    inline void Assembler::STDF32(Register rd, int32_t immI, Register rs1) {
 499        if (isIMM13(immI+4)) {
 500            STFI(rd + 1, immI+4, rs1);
 501            STFI(rd, immI, rs1);
 502        } else {
 503            STF(rd + 1, L0, rs1);
 504            SET32(immI+4, L0);
 505            STF(rd, L0, rs1);
 506            SET32(immI, L0);
 507        }
 508    }
 509
 510    inline void Assembler::STW(Register rd, Register rs1, Register rs2) {
 511        Store(rd, rs1, rs2, 0x4, "st");
 512    }
 513    inline void Assembler::STWI(Register rd, int32_t simm13, Register rs1) {
 514        StoreI(rd, simm13, rs1, 0x4, "st");
 515    }
 516
 517    inline void Assembler::STW32(Register rd, int32_t immI, Register rs1) {
 518        if (isIMM13(immI)) {
 519            STWI(rd, immI, rs1);
 520         } else {
 521            STW(rd, L0, rs1);
 522            SET32(immI, L0);
 523         }
 524    }
 525
 526    inline void Assembler::STH(Register rd, Register rs1, Register rs2) {
 527        Store(rd, rs1, rs2, 0x6, "sth");
 528    }
 529    inline void Assembler::STHI(Register rd, int32_t simm13, Register rs1) {
 530        StoreI(rd, simm13, rs1, 0x6, "sth");
 531    }
 532
 533    inline void Assembler::STH32(Register rd, int32_t immI, Register rs1) {
 534        if (isIMM13(immI)) {
 535            STHI(rd, immI, rs1);
 536         } else {
 537            STH(rd, L0, rs1);
 538            SET32(immI, L0);
 539         }
 540    }
 541
 542    inline void Assembler::STB(Register rd, Register rs1, Register rs2) {
 543        Store(rd, rs1, rs2, 0x5, "stb");
 544    }
 545    inline void Assembler::STBI(Register rd, int32_t simm13, Register rs1) {
 546        StoreI(rd, simm13, rs1, 0x5, "stb");
 547    }
 548
 549    inline void Assembler::STB32(Register rd, int32_t immI, Register rs1) {
 550        if (isIMM13(immI)) {
 551            STBI(rd, immI, rs1);
 552        } else {
 553            STB(rd, L0, rs1);
 554            SET32(immI, L0);
 555        }
 556    }
 557
 558    // general Assemble
 559    inline void Assembler::JMP_long_nocheck(int32_t t) {
 560        NOP();
 561        JMPL(G0, G2, G0);
 562        ORI(G2, t & 0x3FF, G2);
 563        SETHI(t, G2);
 564    }
 565
 566    inline void Assembler::JMP_long(int32_t t) {
 567        underrunProtect(16);
 568        JMP_long_nocheck(t);
 569    }
 570
 571    inline void Assembler::JMP_long_placeholder() {
 572        JMP_long(0);
 573    }
 574
 575    inline int32_t Assembler::JCC(void *t) {
 576        underrunProtect(32);
 577        int32_t tt = ((intptr_t)t - (intptr_t)_nIns + 8) >> 2;
 578        if( !(isIMM22(tt)) ) {
 579            NOP();
 580            JMPL(G0, G2, G0);
 581            SET32((intptr_t)t, G2);
 582            NOP();
 583            BA(0, 5);
 584            tt = 4;
 585        }
 586        NOP();
 587        return tt;
 588    }
 589
 590    void Assembler::JMP(void *t) {
 591        if (!t) {
 592            JMP_long_placeholder();
 593        } else {
 594            int32_t tt = JCC(t);
 595            BA(0, tt);
 596        }
 597    }
 598
 599    void Assembler::MR(Register rd, Register rs) {
 600        underrunProtect(4);
 601        ORI(rs, 0, rd);
 602    }
 603
 604    void Assembler::nInit() {
 605    }
 606
 607    void Assembler::nBeginAssembly() {
 608    }
 609
 610    NIns* Assembler::genPrologue()
 611    {
 612        /**
 613         * Prologue
 614         */
 615        underrunProtect(16);
 616        uint32_t stackNeeded = STACK_GRANULARITY * _activation.stackSlotsNeeded();
 617        uint32_t frameSize = stackNeeded + kcalleeAreaSize + kLinkageAreaSize;
 618        frameSize = BIT_ROUND_UP(frameSize, 8);
 619
 620        if (frameSize <= 4096)
 621            SUBI(FP, frameSize, SP);
 622        else {
 623            SUB(FP, G1, SP);
 624            ORI(G1, frameSize & 0x3FF, G1);
 625            SETHI(frameSize, G1);
 626        }
 627
 628        verbose_only(
 629        if (_logc->lcbits & LC_Native) {
 630            outputf("        0x%x:",_nIns);
 631            outputf("        patch entry:");
 632        })
 633        NIns *patchEntry = _nIns;
 634
 635        // The frame size in SAVE is faked. We will still re-caculate SP later.
 636        // We can use 0 here but it is not good for debuggers.
 637        SAVEI(SP, -148, SP);
 638
 639        // align the entry point
 640        asm_align_code();
 641
 642        return patchEntry;
 643    }
 644
 645    void Assembler::asm_align_code() {
 646        while(uintptr_t(_nIns) & 15) {
 647            NOP();
 648        }
 649    }
 650
 651    void Assembler::nFragExit(LIns* guard)
 652    {
 653        SideExit* exit = guard->record()->exit;
 654        Fragment *frag = exit->target;
 655        GuardRecord *lr;
 656        if (frag && frag->fragEntry)
 657            {
 658                JMP(frag->fragEntry);
 659                lr = 0;
 660            }
 661        else
 662            {
 663                // Target doesn't exit yet. Emit jump to epilog, and set up to patch later.
 664                if (!_epilogue)
 665                    _epilogue = genEpilogue();
 666                lr = guard->record();
 667                JMP_long((intptr_t)_epilogue);
 668                lr->jmp = _nIns;
 669            }
 670
 671        // return value is GuardRecord*
 672        SET32(int(lr), O0);
 673    }
 674
 675    NIns *Assembler::genEpilogue()
 676    {
 677        underrunProtect(12);
 678        RESTORE(G0, G0, G0); //restore
 679        JMPLI(I7, 8, G0); //ret
 680        ORI(O0, 0, I0);
 681        return  _nIns;
 682    }
 683
 684    void Assembler::asm_call(LIns* ins)
 685    {
 686        if (!ins->isop(LIR_callv)) {
 687            Register retReg = ( ins->isop(LIR_calld) ? F0 : retRegs[0] );
 688            deprecated_prepResultReg(ins, rmask(retReg));
 689        }
 690
 691        // Do this after we've handled the call result, so we don't
 692        // force the call result to be spilled unnecessarily.
 693        evictScratchRegsExcept(0);
 694
 695        const CallInfo* ci = ins->callInfo();
 696
 697        underrunProtect(8);
 698        NOP();
 699
 700        ArgType argTypes[MAXARGS];
 701        uint32_t argc = ci->getArgTypes(argTypes);
 702
 703        NanoAssert(ins->isop(LIR_callv) || ins->isop(LIR_callp) ||
 704                   ins->isop(LIR_calld));
 705        verbose_only(if (_logc->lcbits & LC_Native)
 706                     outputf("        0x%x:", _nIns);
 707                     )
 708        bool indirect = ci->isIndirect();
 709        if (!indirect) {
 710            CALL(ci);
 711        }
 712        else {
 713            argc--;
 714            Register r = findSpecificRegFor(ins->arg(argc), I0);
 715            JMPL(G0, I0, O7);
 716        }
 717
 718        Register GPRIndex = O0;
 719        uint32_t offset = kLinkageAreaSize; // start of parameters stack postion.
 720
 721        for(int i=0; i<argc; i++)
 722            {
 723                uint32_t j = argc-i-1;
 724                ArgType ty = argTypes[j];
 725                if (ty == ARGTYPE_D) {
 726                    Register r = findRegFor(ins->arg(j), FpRegs);
 727
 728                    underrunProtect(48);
 729                    // We might be calling a varargs function.
 730                    // So, make sure the GPR's are also loaded with
 731                    // the value, or the stack contains it.
 732                    if (GPRIndex <= O5) {
 733                        LDSW32(SP, offset, GPRIndex);
 734                    }
 735                    GPRIndex = GPRIndex + 1;
 736                    if (GPRIndex <= O5) {
 737                        LDSW32(SP, offset+4, GPRIndex);
 738                    }
 739                    GPRIndex = GPRIndex + 1;
 740                    STDF32(r, offset, SP);
 741                    offset += 8;
 742                } else {
 743                    if (GPRIndex > O5) {
 744                        underrunProtect(12);
 745                        Register r = findRegFor(ins->arg(j), GpRegs);
 746                        STW32(r, offset, SP);
 747                    } else {
 748                        Register r = findSpecificRegFor(ins->arg(j), GPRIndex);
 749                    }
 750                    GPRIndex = GPRIndex + 1;
 751                    offset += 4;
 752                }
 753            }
 754    }
 755
 756    Register Assembler::nRegisterAllocFromSet(RegisterMask set)
 757    {
 758        // need to implement faster way
 759        Register i = G0;
 760        while (!(set & rmask(i)))
 761            i = i + 1;
 762        _allocator.free &= ~rmask(i);
 763        return i;
 764    }
 765
 766    void Assembler::nRegisterResetAll(RegAlloc& a)
 767    {
 768        a.clear();
 769        a.free = GpRegs | FpRegs;
 770    }
 771
 772    void Assembler::nPatchBranch(NIns* branch, NIns* location)
 773    {
 774        *(uint32_t*)&branch[0] &= 0xFFC00000;
 775        *(uint32_t*)&branch[0] |= ((intptr_t)location >> 10) & 0x3FFFFF;
 776        *(uint32_t*)&branch[1] &= 0xFFFFFC00;
 777        *(uint32_t*)&branch[1] |= (intptr_t)location & 0x3FF;
 778    }
 779
 780    RegisterMask Assembler::nHint(LIns* ins)
 781    {
 782        // Never called, because no entries in nHints[] == PREFER_SPECIAL.
 783        NanoAssert(0);
 784        return 0;
 785    }
 786
 787    bool Assembler::canRemat(LIns* ins)
 788    {
 789        return ins->isImmI() || ins->isop(LIR_allocp);
 790    }
 791
 792    void Assembler::asm_restore(LIns* i, Register r)
 793    {
 794        underrunProtect(24);
 795        if (i->isop(LIR_allocp)) {
 796            ADD(FP, L2, r);
 797            int32_t d = deprecated_disp(i);
 798            SET32(d, L2);
 799        }
 800        else if (i->isImmI()) {
 801            int v = i->immI();
 802            SET32(v, r);
 803        } else {
 804            int d = findMemFor(i);
 805            if (rmask(r) & FpRegs) {
 806                LDDF32(FP, d, r);
 807            } else {
 808                LDSW32(FP, d, r);
 809            }
 810        }
 811    }
 812
 813    void Assembler::asm_store32(LOpcode op, LIns *value, int dr, LIns *base)
 814    {
 815        switch (op) {
 816            case LIR_sti:
 817            case LIR_sti2c:
 818            case LIR_sti2s:
 819                // handled by mainline code below for now
 820                break;
 821            default:
 822                NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
 823                return;
 824        }
 825
 826        underrunProtect(20);
 827        if (value->isImmI())
 828            {
 829                Register rb = getBaseReg(base, dr, GpRegs);
 830                int c = value->immI();
 831                switch (op) {
 832                case LIR_sti:
 833                    STW32(L2, dr, rb);
 834                    break;
 835                case LIR_sti2c:
 836                    STB32(L2, dr, rb);
 837                    break;
 838                case LIR_sti2s:
 839                    STH32(L2, dr, rb);
 840                    break;
 841                }
 842                SET32(c, L2);
 843            }
 844        else
 845            {
 846                // make sure what is in a register
 847                Register ra, rb;
 848                if (base->isImmI()) {
 849                    // absolute address
 850                    dr += base->immI();
 851                    ra = findRegFor(value, GpRegs);
 852                    rb = G0;
 853                } else {
 854                    getBaseReg2(GpRegs, value, ra, GpRegs, base, rb, dr);
 855                }
 856                switch (op) {
 857                case LIR_sti:
 858                    STW32(ra, dr, rb);
 859                    break;
 860                case LIR_sti2c:
 861                    STB32(ra, dr, rb);
 862                    break;
 863                case LIR_sti2s:
 864                    STH32(ra, dr, rb);
 865                    break;
 866                }
 867            }
 868    }
 869
 870    void Assembler::asm_spill(Register rr, int d, bool quad)
 871    {
 872        underrunProtect(24);
 873        (void)quad;
 874        NanoAssert(d);
 875        if (rmask(rr) & FpRegs) {
 876            STDF32(rr, d, FP);
 877        } else {
 878            STW32(rr, d, FP);
 879        }
 880    }
 881
 882    void Assembler::asm_load64(LIns* ins)
 883    {
 884        switch (ins->opcode()) {
 885            case LIR_ldd:
 886            case LIR_ldf2d:
 887                // handled by mainline code below for now
 888                break;
 889            default:
 890                NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
 891                return;
 892        }
 893
 894        underrunProtect(48);
 895        LIns* base = ins->oprnd1();
 896        int db = ins->disp();
 897        Register rb = getBaseReg(base, db, GpRegs);
 898
 899        if (ins->isInReg()) {
 900            Register rr =  ins->getReg();
 901            asm_maybe_spill(ins, false);
 902            NanoAssert(rmask(rr) & FpRegs);
 903
 904            if (ins->opcode() == LIR_ldd) {
 905                LDDF32(rb, db, rr);
 906            } else {
 907                FSTOD(F28, rr);
 908                LDF32(rb, db, F28);
 909            }
 910        } else {
 911            NanoAssert(ins->isInAr());
 912            int dr = arDisp(ins);
 913
 914            if (ins->opcode() == LIR_ldd) {
 915                // don't use an fpu reg to simply load & store the value.
 916                asm_mmq(FP, dr, rb, db);
 917            } else {
 918                STDF32(F28, dr, FP);
 919                FSTOD(F28, F28);
 920                LDF32(rb, db, F28);
 921            }
 922        }
 923
 924        freeResourcesOf(ins);
 925    }
 926
 927    void Assembler::asm_store64(LOpcode op, LIns* value, int dr, LIns* base)
 928    {
 929        switch (op) {
 930            case LIR_std:
 931            case LIR_std2f:
 932                // handled by mainline code below for now
 933                break;
 934            default:
 935                NanoAssertMsg(0, "asm_store64 should never receive this LIR opcode");
 936                return;
 937        }
 938
 939        underrunProtect(48);
 940        Register rb = getBaseReg(base, dr, GpRegs);
 941        if (op == LIR_std2f) {
 942            Register rv = ( !value->isInReg()
 943                            ? findRegFor(value, FpRegs)
 944                            : value->getReg() );
 945            NanoAssert(rmask(rv) & FpRegs);
 946            STF32(F28, dr, rb);
 947            FDTOS(rv, F28);
 948            return;
 949        }
 950
 951        if (value->isImmD())
 952            {
 953                // if a constant 64-bit value just store it now rather than
 954                // generating a pointless store/load/store sequence
 955                STW32(L2, dr+4, rb);
 956                SET32(value->immDlo(), L2);
 957                STW32(L2, dr, rb);
 958                SET32(value->immDhi(), L2);
 959                return;
 960            }
 961
 962        if (value->isop(LIR_ldd))
 963            {
 964                // value is 64bit struct or int64_t, or maybe a double.
 965                // it may be live in an FPU reg.  Either way, don't
 966                // put it in an FPU reg just to load & store it.
 967
 968                // a) if we know it's not a double, this is right.
 969                // b) if we guarded that its a double, this store could be on
 970                // the side exit, copying a non-double.
 971                // c) maybe its a double just being stored.  oh well.
 972
 973                int da = findMemFor(value);
 974                asm_mmq(rb, dr, FP, da);
 975                return;
 976            }
 977
 978        // if value already in a reg, use that, otherwise
 979        // get it into FPU regs.
 980        Register rv = ( !value->isInReg()
 981                      ? findRegFor(value, FpRegs)
 982                      : value->getReg() );
 983
 984        STDF32(rv, dr, rb);
 985    }
 986
 987    /**
 988     * copy 64 bits: (rd+dd) <- (rs+ds)
 989     */
 990    void Assembler::asm_mmq(Register rd, int dd, Register rs, int ds)
 991    {
 992        // value is either a 64bit struct or maybe a float
 993        // that isn't live in an FPU reg.  Either way, don't
 994        // put it in an FPU reg just to load & store it.
 995        Register t = registerAllocTmp(GpRegs & ~(rmask(rd)|rmask(rs)));
 996        STW32(t, dd+4, rd);
 997        LDSW32(rs, ds+4, t);
 998        STW32(t, dd, rd);
 999        LDSW32(rs, ds, t);
1000    }
1001
1002    Branches Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
1003    {
1004        NIns* at = 0;
1005        LOpcode condop = cond->opcode();
1006        NanoAssert(cond->isCmp());
1007        if (isCmpDOpcode(condop))
1008            {
1009                return Branches(asm_branchd(branchOnFalse, cond, targ));
1010            }
1011
1012        underrunProtect(32);
1013        intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
1014        // !targ means that it needs patch.
1015        if( !(isIMM22((int32_t)tt)) || !targ ) {
1016            JMP_long_nocheck((intptr_t)targ);
1017            at = _nIns;
1018            NOP();
1019            BA(0, 5);
1020            tt = 4;
1021        }
1022        NOP();
1023
1024        // produce the branch
1025        if (branchOnFalse)
1026            {
1027                if (condop == LIR_eqi)
1028                    BNE(0, tt);
1029                else if (condop == LIR_lti)
1030                    BGE(0, tt);
1031                else if (condop == LIR_lei)
1032                    BG(0, tt);
1033                else if (condop == LIR_gti)
1034                    BLE(0, tt);
1035                else if (condop == LIR_gei)
1036                    BL(0, tt);
1037                else if (condop == LIR_ltui)
1038                    BCC(0, tt);
1039                else if (condop == LIR_leui)
1040                    BGU(0, tt);
1041                else if (condop == LIR_gtui)
1042                    BLEU(0, tt);
1043                else //if (condop == LIR_geui)
1044                    BCS(0, tt);
1045            }
1046        else // op == LIR_xt
1047            {
1048                if (condop == LIR_eqi)
1049                    BE(0, tt);
1050                else if (condop == LIR_lti)
1051                    BL(0, tt);
1052                else if (condop == LIR_lei)
1053                    BLE(0, tt);
1054                else if (condop == LIR_gti)
1055                    BG(0, tt);
1056                else if (condop == LIR_gei)
1057                    BGE(0, tt);
1058                else if (condop == LIR_ltui)
1059                    BCS(0, tt);
1060                else if (condop == LIR_leui)
1061                    BLEU(0, tt);
1062                else if (condop == LIR_gtui)
1063                    BGU(0, tt);
1064                else //if (condop == LIR_geui)
1065                    BCC(0, tt);
1066            }
1067        asm_cmp(cond);
1068        return Branches(at);
1069    }
1070
1071    NIns* Assembler::asm_branch_ov(LOpcode op, NIns* targ)
1072    {
1073        NIns* at = 0;
1074        underrunProtect(32);
1075        intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
1076        // !targ means that it needs patch.
1077        if( !(isIMM22((int32_t)tt)) || !targ ) {
1078            JMP_long_nocheck((intptr_t)targ);
1079            at = _nIns;
1080            NOP();
1081            BA(0, 5);
1082            tt = 4;
1083        }
1084        NOP();
1085
1086        if( op == LIR_mulxovi || op == LIR_muljovi )
1087            BNE(0, tt);
1088        else
1089            BVS(0, tt);
1090        return at;
1091    }
1092
1093    void Assembler::asm_cmp(LIns *cond)
1094    {
1095        underrunProtect(12);
1096
1097        LIns* lhs = cond->oprnd1();
1098        LIns* rhs = cond->oprnd2();
1099
1100        NanoAssert(lhs->isI() && rhs->isI());
1101
1102        // ready to issue the compare
1103        if (rhs->isImmI())
1104            {
1105                int c = rhs->immI();
1106                Register r = findRegFor(lhs, GpRegs);
1107                if (c == 0 && cond->isop(LIR_eqi)) {
1108                    ANDCC(r, r, G0);
1109                }
1110                else {
1111                    SUBCC(r, L2, G0);
1112                    SET32(c, L2);
1113                }
1114            }
1115        else
1116            {
1117                Register ra, rb;
1118                findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb);
1119                SUBCC(ra, rb, G0);
1120            }
1121    }
1122
1123    void Assembler::asm_condd(LIns* ins)
1124    {
1125        // only want certain regs
1126        Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
1127        underrunProtect(8);
1128        LOpcode condop = ins->opcode();
1129        NanoAssert(isCmpDOpcode(condop));
1130        if (condop == LIR_eqd)
1131            MOVFEI(1, r);
1132        else if (condop == LIR_led)
1133            MOVFLEI(1, r);
1134        else if (condop == LIR_ltd)
1135            MOVFLI(1, r);
1136        else if (condop == LIR_ged)
1137            MOVFGEI(1, r);
1138        else // if (condop == LIR_gtd)
1139            MOVFGI(1, r);
1140        ORI(G0, 0, r);
1141        asm_cmpd(ins);
1142    }
1143
1144    void Assembler::asm_cond(LIns* ins)
1145    {
1146        underrunProtect(8);
1147        // only want certain regs
1148        LOpcode op = ins->opcode();
1149        Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
1150
1151        if (op == LIR_eqi)
1152            MOVEI(1, r);
1153        else if (op == LIR_lti)
1154            MOVLI(1, r);
1155        else if (op == LIR_lei)
1156            MOVLEI(1, r);
1157        else if (op == LIR_gti)
1158            MOVGI(1, r);
1159        else if (op == LIR_gei)
1160            MOVGEI(1, r);
1161        else if (op == LIR_ltui)
1162            MOVCSI(1, r);
1163        else if (op == LIR_leui)
1164            MOVLEUI(1, r);
1165        else if (op == LIR_gtui)
1166            MOVGUI(1, r);
1167        else // if (op == LIR_geui)
1168            MOVCCI(1, r);
1169        ORI(G0, 0, r);
1170        asm_cmp(ins);
1171    }
1172
1173    void Assembler::asm_arith(LIns* ins)
1174    {
1175        underrunProtect(28);
1176        LOpcode op = ins->opcode();
1177        LIns* lhs = ins->oprnd1();
1178        LIns* rhs = ins->oprnd2();
1179
1180        Register rb = deprecated_UnknownReg;
1181        RegisterMask allow = GpRegs;
1182        bool forceReg = (op == LIR_muli || op == LIR_mulxovi || op == LIR_muljovi || !rhs->isImmI());
1183
1184        if (lhs != rhs && forceReg)
1185            {
1186                if ((rb = asm_binop_rhs_reg(ins)) == deprecated_UnknownReg) {
1187                    rb = findRegFor(rhs, allow);
1188                }
1189                allow &= ~rmask(rb);
1190            }
1191        else if ((op == LIR_addi || op == LIR_addxovi || op == LIR_addjovi) && lhs->isop(LIR_allocp) && rhs->isImmI()) {
1192            // add alloc+const, use lea
1193            Register rr = deprecated_prepResultReg(ins, allow);
1194            int d = findMemFor(lhs) + rhs->immI();
1195            ADD(FP, L2, rr);
1196            SET32(d, L2);
1197            return;
1198        }
1199
1200        Register rr = deprecated_prepResultReg(ins, allow);
1201        // if this is last use of lhs in reg, we can re-use result reg
1202        // else, lhs already has a register assigned.
1203        Register ra = ( !lhs->isInReg()
1204                      ? findSpecificRegFor(lhs, rr)
1205                      : lhs->deprecated_getReg() );
1206
1207        if (forceReg)
1208            {
1209                if (lhs == rhs)
1210                    rb = ra;
1211
1212                if (op == LIR_addi || op == LIR_addxovi || op == LIR_addjovi)
1213                    ADDCC(rr, rb, rr);
1214                else if (op == LIR_subi || op == LIR_subxovi || op == LIR_subjovi)
1215                    SUBCC(rr, rb, rr);
1216                else if (op == LIR_muli)
1217                    SMULCC(rr, rb, rr);
1218                else if (op == LIR_mulxovi || op == LIR_muljovi) {
1219                    SUBCC(L4, L6, L4);
1220                    SRAI(rr, 31, L6);
1221                    RDY(L4);
1222                    SMULCC(rr, rb, rr);
1223                }
1224                else if (op == LIR_andi)
1225                    AND(rr, rb, rr);
1226                else if (op == LIR_ori)
1227                    OR(rr, rb, rr);
1228                else if (op == LIR_xori)
1229                    XOR(rr, rb, rr);
1230                else if (op == LIR_lshi)
1231                    SLL(rr, rb, rr);
1232                else if (op == LIR_rshi)
1233                    SRA(rr, rb, rr);
1234                else if (op == LIR_rshui)
1235                    SRL(rr, rb, rr);
1236                else
1237                    NanoAssertMsg(0, "Unsupported");
1238            }
1239        else
1240            {
1241                int c = rhs->immI();
1242                if (op == LIR_addi || op == LIR_addxovi || op == LIR_addjovi)
1243                    ADDCC(rr, L2, rr);
1244                else if (op == LIR_subi || op == LIR_subxovi || op == LIR_subjovi)
1245                    SUBCC(rr, L2, rr);
1246                else if (op == LIR_andi)
1247                    AND(rr, L2, rr);
1248                else if (op == LIR_ori)
1249                    OR(rr, L2, rr);
1250                else if (op == LIR_xori)
1251                    XOR(rr, L2, rr);
1252                else if (op == LIR_lshi)
1253                    SLL(rr, L2, rr);
1254                else if (op == LIR_rshi)
1255                    SRA(rr, L2, rr);
1256                else if (op == LIR_rshui)
1257                    SRL(rr, L2, rr);
1258                else
1259                    NanoAssertMsg(0, "Unsupported");
1260                SET32(c, L2);
1261            }
1262
1263        if ( rr != ra )
1264            ORI(ra, 0, rr);
1265    }
1266
1267    void Assembler::asm_neg_not(LIns* ins)
1268    {
1269        underrunProtect(8);
1270        LOpcode op = ins->opcode();
1271        Register rr = deprecated_prepResultReg(ins, GpRegs);
1272
1273        LIns* lhs = ins->oprnd1();
1274        // if this is last use of lhs in reg, we can re-use result reg
1275        // else, lhs already has a register assigned.
1276        Register ra = ( !lhs->isInReg()
1277                      ? findSpecificRegFor(lhs, rr)
1278                      : lhs->deprecated_getReg() );
1279
1280        if (op == LIR_noti)
1281            ORN(G0, rr, rr);
1282        else
1283            SUB(G0, rr, rr);
1284
1285        if ( rr != ra )
1286            ORI(ra, 0, rr);
1287    }
1288
1289    void Assembler::asm_load32(LIns* ins)
1290    {
1291        underrunProtect(12);
1292        LOpcode op = ins->opcode();
1293        LIns* base = ins->oprnd1();
1294        int d = ins->disp();
1295        Register rr = deprecated_prepResultReg(ins, GpRegs);
1296        Register ra = getBaseReg(base, d, GpRegs);
1297        switch(op) {
1298            case LIR_lduc2ui:
1299                LDUB32(ra, d, rr);
1300                break;
1301            case LIR_ldus2ui:
1302                LDUH32(ra, d, rr);
1303                break;
1304            case LIR_ldi:
1305                LDSW32(ra, d, rr);
1306                break;
1307            case LIR_ldc2i:
1308                LDSB32(ra, d, rr);
1309                break;
1310            case LIR_lds2i:
1311                LDSH32(ra, d, rr);
1312                break;
1313            default:
1314                NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
1315                return;
1316        }
1317    }
1318
1319    void Assembler::asm_cmov(LIns* ins)
1320    {
1321        underrunProtect(4);
1322        LOpcode op = ins->opcode();
1323        LIns* condval = ins->oprnd1();
1324        LIns* iftrue  = ins->oprnd2();
1325        LIns* iffalse = ins->oprnd3();
1326
1327        NanoAssert(condval->isCmp());
1328        NanoAssert(op == LIR_cmovi && iftrue->isI() && iffalse->isI() ||
1329                  (op == LIR_cmovd && iftrue->isD() && iffalse->isD()));
1330
1331        RegisterMask rm = (op == LIR_cmovi) ? GpRegs : FpRegs;
1332        const Register rr = deprecated_prepResultReg(ins, rm);
1333        const Register iffalsereg = findRegFor(iffalse, rm & ~rmask(rr));
1334        bool isIcc = true;
1335
1336        if (op == LIR_cmovi) {
1337            switch (condval->opcode()) {
1338                // note that these are all opposites...
1339                case LIR_eqi:  MOVNE (iffalsereg, rr); break;
1340                case LIR_lti:  MOVGE (iffalsereg, rr); break;
1341                case LIR_lei:  MOVG  (iffalsereg, rr); break;
1342                case LIR_gti:  MOVLE (iffalsereg, rr); break;
1343                case LIR_gei:  MOVL  (iffalsereg, rr); break;
1344                case LIR_ltui: MOVCC (iffalsereg, rr); break;
1345                case LIR_leui: MOVGU (iffalsereg, rr); break;
1346                case LIR_gtui: MOVLEU(iffalsereg, rr); break;
1347                case LIR_geui: MOVCS (iffalsereg, rr); break;
1348                debug_only( default: NanoAssert(0); break; )
1349            }
1350        } else {
1351            switch (condval->opcode()) {
1352                // note that these are all opposites...
1353                case LIR_eqi:  FMOVDNE  (iffalsereg, rr); break;
1354                case LIR_lti:  FMOVDGE  (iffalsereg, rr); break;
1355                case LIR_lei:  FMOVDG   (iffalsereg, rr); break;
1356                case LIR_gti:  FMOVDLE  (iffalsereg, rr); break;
1357                case LIR_gei:  FMOVDL   (iffalsereg, rr); break;
1358                case LIR_ltui: FMOVDCC  (iffalsereg, rr); break;
1359                case LIR_leui: FMOVDGU  (iffalsereg, rr); break;
1360                case LIR_gtui: FMOVDLEU (iffalsereg, rr); break;
1361                case LIR_geui: FMOVDCS  (iffalsereg, rr); break;
1362                case LIR_eqd:  FMOVDFNE (iffalsereg, rr); isIcc = false; break;
1363                case LIR_led:  FMOVDFUG (iffalsereg, rr); isIcc = false; break;
1364                case LIR_ltd:  FMOVDFUGE(iffalsereg, rr); isIcc = false; break;
1365                case LIR_ged:  FMOVDFUL (iffalsereg, rr); isIcc = false; break;
1366                case LIR_gtd:  FMOVDFULE(iffalsereg, rr); isIcc = false; break;
1367                debug

Large files files are truncated, but you can click here to view the full file