/vm/src/any/sic/nodeGen.cpp

http://github.com/ticking/self · C++ · 468 lines · 364 code · 68 blank · 36 comment · 46 complexity · 2e3d77dde6f8e93cac12256012533f24 MD5 · raw file

  1. /* Sun-$Revision: 30.11 $ */
  2. /* Copyright 1992-2006 Sun Microsystems, Inc. and Stanford University.
  3. See the LICENSE file for license information. */
  4. # ifdef SIC_COMPILER
  5. # pragma implementation "nodeGen.hh"
  6. # include "_nodeGen.cpp.incl"
  7. # define APPEND(node) current = current->append (node)
  8. # define APPEND1(node) current = current->append1(node)
  9. # define APPENDN(n, node) current = current->append(n, node)
  10. # define COMMENT(s) current = current->append(new CommentNode(s))
  11. NodeGen* theNodeGen;
  12. NodeGen::NodeGen(compilingLookup* l, sendDesc* sd, nmln* d) {
  13. initPRegs();
  14. initNodes();
  15. scopeStack = new SSelfScopeBList(30);
  16. start = current = NULL;
  17. L =l; send_desc = sd; diLink = d;
  18. haveStackFrame = false;
  19. delPR = new PReg(NULL, PerformDelegateeLoc, true, true);
  20. selPR = new PReg(NULL, PerformSelectorLoc, true, true);
  21. nlrHomePR = new PReg(NULL, NLRHomeReg, true, true);
  22. nlrHomeIDPR = new PReg(NULL, NLRHomeIDReg, true, true);
  23. nlrResultPR = new PReg(NULL, NLRResultReg, true, true);
  24. nlrTempPR = new PReg(NULL, NLRTempReg, true, true);
  25. framePR = new PReg(NULL, FrameReg, true, true);
  26. noPR = new NoPReg;
  27. theNodeGen = this;
  28. }
  29. void NodeGen::enterScope(SSelfScope* s) { scopeStack->push(s); }
  30. void NodeGen::exitScope (SSelfScope* s) {
  31. assert(currentScope() == s, "exiting wrong scope");
  32. scopeStack->pop();
  33. }
  34. void NodeGen::prologue(bool needToFlushRegWindow, bool isAccessMethod,
  35. fint nargs) {
  36. assert(current == NULL, "prologue must be first");
  37. current = start = new PrologueNode(needToFlushRegWindow, isAccessMethod,
  38. nargs, L);
  39. }
  40. Node* NodeGen::append(Node* n) {
  41. return APPEND(n);
  42. }
  43. Node* NodeGen::comment(char* s) { return APPEND(new CommentNode(s)); }
  44. void NodeGen::testStackOverflow(PRegBList* exprStack, SplitSig* sig) {
  45. APPEND(new InterruptCheckNode(exprStack, sig));
  46. ((SCodeScope*)currentScope())->addSend(exprStack);
  47. }
  48. void NodeGen::loadBlockParent(PReg* block, PReg* dst) {
  49. // given a block, load the frame pointer of its enclosing scope
  50. APPEND(new LoadOffsetNode(block, scope_offset(), dst));
  51. }
  52. # ifdef UNUSED
  53. void NodeGen::loadSender(PReg* sp, PReg* dest) {
  54. // load frame's sender
  55. APPEND(new LoadOffsetNode(sp, frame_offset * oopSize, dest));
  56. }
  57. # endif
  58. void NodeGen::loadSaved(PReg* frame, nmethod* nm, NameDesc* nd, PReg* dest, oop name) {
  59. assert(!nd || !nd->isValue(), "should have a location");
  60. APPEND(new LoadStackNode(frame, nm, nd, dest, name));
  61. }
  62. void NodeGen::storeSaved(PReg* val, PReg* frame, nmethod* nm, NameDesc* nd, oop name) {
  63. assert(!nd->isValue(), "should have a location");
  64. APPEND(new StoreStackNode(val, frame, nm, nd, name));
  65. }
  66. void NodeGen::pathAssign(PReg* rcvr,
  67. realSlotRef* path,
  68. PReg* val,
  69. bool checkStore) {
  70. COMMENT("Begin slot assignment");
  71. int32 offset = smiOop(path->desc->data)->byte_count() - Mem_Tag;
  72. if (path->holder->is_object_or_map()) {
  73. PReg* t = new TempPReg(currentScope());
  74. t = loadPath(path->holder, rcvr, t);
  75. APPEND(new StoreOffsetNode(val, t, offset, checkStore));
  76. } else {
  77. fatal("don't support vframe lookups");
  78. }
  79. }
  80. PReg* NodeGen::loadPath(lookupTarget* target,
  81. PReg* receiver,
  82. PReg* dest) {
  83. // returns register to use for base address of load
  84. if (target->is_receiver()) {
  85. return receiver;
  86. } else {
  87. assert(target->is_object(), "must be an object path search");
  88. objectLookupTarget* otarget = (objectLookupTarget*) target;
  89. if (otarget->prevTargetSlot) {
  90. pathLookup(otarget->prevTargetSlot, receiver, dest);
  91. } else {
  92. loadOop(otarget->obj, dest);
  93. }
  94. return dest;
  95. }
  96. }
  97. void NodeGen::pathLookup(realSlotRef* path, PReg* receiver, PReg* dest) {
  98. if (path->holder->is_object_or_map()) {
  99. PReg* base = loadPath(path->holder, receiver, dest);
  100. fint offset = smiOop(path->desc->data)->byte_count() - Mem_Tag;
  101. APPEND(new LoadOffsetNode(base, offset, dest));
  102. } else {
  103. fatal("don't support vframe lookups");
  104. }
  105. }
  106. Node* NodeGen::restart(MergeNode* loopStart,
  107. PRegBList* exprStack, SplitSig* s) {
  108. Node* n = APPEND(new RestartNode(exprStack, s, loopStart));
  109. // reset current so that any code generated after the restart will
  110. // be ignored in later phases since there is no path reaching it
  111. // Next line added by dmu 4/26/07 to try to fix bug:
  112. // inlined [3] loop had nsends of 0, but needed mask for call to InterruptCheck
  113. ((SCodeScope*)currentScope())->addSend(exprStack);
  114. current = new NopNode;
  115. return n;
  116. }
  117. Node* NodeGen::selfCall(SCodeScope* sc, LookupType l, PReg* self,
  118. oop sel, oop del, oop methodHolder,
  119. MergeNode* nlrPoint, fint argc,
  120. PRegBList* exprStack, SplitSig* sig) {
  121. Unused(self);
  122. APPEND(new SendNode(l, sel, del, methodHolder, nlrPoint, argc,
  123. exprStack, sig));
  124. sc->addSend(exprStack);
  125. return current;
  126. }
  127. Node* NodeGen::perform(SCodeScope* sc, LookupType l, PReg* self,
  128. fint argc, oop del, oop mh, MergeNode* nlrPoint,
  129. PRegBList* exprStack, SplitSig* sig) {
  130. return selfCall(sc, l, self, as_smiOop(argc), del, mh, nlrPoint, argc,
  131. exprStack, sig);
  132. }
  133. PrimNode* NodeGen::primCall(SCodeScope* sc, PrimDesc* p,
  134. MergeNode* nlrPoint, fint argc,
  135. PRegBList* exprStack, SplitSig* sig,
  136. BlockPReg* failBlock) {
  137. PrimNode* pn;
  138. APPEND(pn = new PrimNode(p, nlrPoint, argc, exprStack, sig, failBlock));
  139. if (pn->exprStack) sc->addSend(pn->exprStack);
  140. return pn;
  141. }
  142. Node* NodeGen::uncommonBranch(PRegBList* exprStack, bool restartSend) {
  143. assert(SICDeferUncommonBranches, "shouldn't use uncommon traps");
  144. Node* n = APPEND(new UncommonNode(exprStack, restartSend));
  145. assert(currentScope()->isCodeScope(), "must be non-access");
  146. ((SCodeScope*)currentScope())->addSend(exprStack);
  147. current = NULL;
  148. return n;
  149. }
  150. void NodeGen::nonLifoTrap(PRegBList* exprStack, SplitSig* s) {
  151. APPEND(new DeadBlockNode(exprStack, s));
  152. }
  153. void NodeGen::deadEnd() {
  154. APPEND(new DeadEndNode);
  155. current = NULL;
  156. }
  157. void NodeGen::loadOop(oop p, PReg* dest) {
  158. APPEND(new AssignNode(new_ConstPReg(currentScope(), p), dest));
  159. }
  160. void NodeGen::loadBlockOop(BlockPReg* b, SplitSig* s) {
  161. APPEND(new BlockCloneNode(b, s)); }
  162. void NodeGen::move(PReg* from, PReg* to) { APPEND(new AssignNode(from, to));}
  163. void NodeGen::zapBlock(BlockPReg* block) {
  164. assert(current, "should not generate unreachable zaps");
  165. APPEND(new BlockZapNode(block)); }
  166. static PRegBList* mlist;
  167. static SplitSig* msig;
  168. static void materializeHelper(PReg*, PReg* r, bool) {
  169. theNodeGen->materializeBlock(r, msig, mlist, true);
  170. }
  171. void NodeGen::materializeBlock(PReg* r, SplitSig* sig,
  172. PRegBList* materialized, bool recursive) {
  173. Unused(recursive);
  174. if (r->isBlockPReg() && !materialized->contains(r)) {
  175. BlockPReg* bpr = (BlockPReg*)r;
  176. bpr->isMaterialized = true;
  177. // make sure the block is created
  178. APPEND(new BlockCreateNode(bpr, sig));
  179. // flush if uplevel-accessed
  180. // done by BlockCreateNode
  181. // if (recursive) APPEND(new FlushNode(currentScope(), bpr, true));
  182. materialized->append(bpr);
  183. // also make sure all uplevel-accessed blocks exist
  184. msig = sig; mlist = materialized; // params for materializeHelper
  185. bpr->nscope()->doUplevelAccesses(bpr, materializeHelper);
  186. }
  187. }
  188. void NodeGen::loadArg(PReg* from, PReg* to) {
  189. APPEND(new AssignNode(from, to));
  190. }
  191. void NodeGen::loadArg(fint argNo, PReg* from, bool isPrimCall) {
  192. Unused(isPrimCall);
  193. assert(currentScope()->isCodeScope(), "oops");
  194. SCodeScope* s = (SCodeScope*)currentScope();
  195. fint bci = s->bci();
  196. Location l = argNo == -1 ? ReceiverReg : ArgLocation(argNo);
  197. // weird arg numbering - 0 is 1st arg, not receiver
  198. // uses aren't right yet (call should have use) -fix this
  199. loadArg(from, new ArgSAPReg(s, l, true, false, bci, bci));
  200. }
  201. void NodeGen::prepareNLR(PReg* result, PReg* scope, smi homeID) {
  202. Unused(scope);
  203. // set up NLR registers
  204. loadArg(result, nlrResultPR);
  205. APPEND(new LoadIntNode(homeID, nlrHomeIDPR));
  206. // (nlrHomePR is loaded by caller)
  207. }
  208. Node* NodeGen::testNLR(smi homeID) {
  209. // test if NLR has reached home; the node returned is the success
  210. // branch (i.e. the home has been reached), current is the other
  211. // branch
  212. Node* homeIDTest = NULL;
  213. if (homeID) { // note: will be 0 if no inlining
  214. if (isImmediate(smiOop(homeID))) {
  215. APPEND(new ArithRCNode(SubCCArithOp, nlrHomeIDPR, homeID, noPR));
  216. } else {
  217. APPEND(new LoadIntNode(homeID, nlrTempPR));
  218. APPEND(new ArithRRNode(SubCCArithOp, nlrHomeIDPR, nlrTempPR, noPR));
  219. }
  220. homeIDTest = APPEND(new BranchNode(EQBranchOp));
  221. APPEND1(new ArithRRNode(SubCCArithOp, nlrHomePR, framePR, noPR));
  222. } else {
  223. APPEND (new ArithRRNode(SubCCArithOp, nlrHomePR, framePR, noPR));
  224. }
  225. Node* homeFrameTest = APPEND(new BranchNode(EQBranchOp));
  226. // home & homeID match
  227. Node* finalReturn = new NopNode;
  228. APPEND1(finalReturn);
  229. // no match, continue NLR
  230. Node* cont = new MergeNode("testNLR cont");
  231. if (homeIDTest) homeIDTest->append(cont);
  232. homeFrameTest->append(cont);
  233. current = cont;
  234. return finalReturn;
  235. }
  236. void NodeGen::continueNonLocalReturn() {
  237. // continue NLR (return through caller's inline cache)
  238. APPEND(new NonLocalReturnNode(NULL, NULL));
  239. }
  240. void NodeGen::branch(MergeNode* target) {
  241. // connect current with target
  242. if (current != NULL && current != target) {
  243. current->append(target);
  244. }
  245. current = target;
  246. }
  247. void NodeGen::branchCode( MergeNode* targetNode,
  248. bool isBackwards,
  249. PReg* targetPR,
  250. SExpr* testExpr,
  251. BranchBCTargetStack* targetStack,
  252. SExprStack* exprStack,
  253. PRegBList* exprStackPRs,
  254. SplitSig* s ) {
  255. // gen nodes for all but indexed branch bytecode
  256. // branch if top of stack == target_oop, uncond if PRs NULL
  257. if ( targetPR != NULL && SICBranchSplitting ) {
  258. char* whyNot = splitCondBranch( targetNode,
  259. isBackwards,
  260. targetPR,
  261. testExpr,
  262. targetStack,
  263. exprStack,
  264. exprStackPRs,
  265. s );
  266. if (PrintSICBranchSplitting)
  267. if (!whyNot)
  268. lprintf("branch splitting succeeded\n");
  269. else
  270. lprintf("branch splitting failed: %s\n", whyNot);
  271. if ( !whyNot )
  272. return;
  273. }
  274. BranchOpCode op;
  275. if (targetPR == NULL)
  276. op = ALBranchOp;
  277. else {
  278. APPEND( new ArithRRNode( SubCCArithOp, targetPR, testExpr->preg(), noPR));
  279. op = EQBranchOp;
  280. }
  281. Node* condBranch = new BranchNode(op);
  282. append( condBranch );
  283. appendBranchCodeNodes( 1,
  284. isBackwards,
  285. targetNode,
  286. targetStack,
  287. exprStack,
  288. exprStackPRs,
  289. s );
  290. current = condBranch;
  291. }
  292. char* NodeGen::splitCondBranch( MergeNode* targetNode,
  293. bool isBackwards,
  294. PReg* targetPR,
  295. SExpr* testExpr,
  296. BranchBCTargetStack* targetStack,
  297. SExprStack* exprStack,
  298. PRegBList* exprStackPRs,
  299. SplitSig* s ) {
  300. // try to split a conditional branch bc to avoid materializing
  301. // the boolean
  302. // local splitting only for now
  303. assert(targetPR->isConstPReg(),
  304. "cond branch must be testing for constant");
  305. oop targetOop = ((ConstPReg*)targetPR)->constant;
  306. if (!testExpr->isMergeSExpr())
  307. return "testExpr not MergeSExpr";
  308. if (!((MergeSExpr*)testExpr)->isSplittable())
  309. return "textExpr not splittable";
  310. SExprBList* exprs = ((MergeSExpr*)testExpr)->exprs;
  311. assert(testExpr->node(), "splittable implies node()");
  312. Node* preceedingMerge = testExpr->node();
  313. if (current != preceedingMerge)
  314. return "not local"; // local only for now
  315. if ( preceedingMerge->nPredecessors() != exprs->length() )
  316. return "would have to iterate over predecessors";
  317. fint i;
  318. for ( i = 0;
  319. i < exprs->length();
  320. ++i) {
  321. SExpr* e = exprs->nth(i);
  322. Node* n = e->node();
  323. if ( !preceedingMerge->isPredecessor(n) )
  324. return "merge not immediately after expr node";
  325. if ( !e->isConstantSExpr() )
  326. return "merge contains non-constant expression";
  327. }
  328. MergeNode* mergeForBranching = new MergeNode("for branching");
  329. MergeNode* mergeForFallingThrough = new MergeNode("for falling through");
  330. mergeForBranching ->setScope(currentScope());
  331. mergeForFallingThrough->setScope(currentScope());
  332. for ( i = 0;
  333. i < exprs->length();
  334. ++i) {
  335. SExpr* e = exprs->nth(i);
  336. Node* n = e->node();
  337. MergeNode* mn = e->constant() == targetOop
  338. ? mergeForBranching
  339. : mergeForFallingThrough;
  340. mn->setPrev(n);
  341. n->moveNext(preceedingMerge, mn);
  342. }
  343. while (preceedingMerge->nPredecessors())
  344. preceedingMerge->removePrev(preceedingMerge->prev(0));
  345. current = mergeForBranching;
  346. branchCode( targetNode,
  347. isBackwards,
  348. NULL,
  349. NULL,
  350. targetStack,
  351. exprStack,
  352. exprStackPRs,
  353. s);
  354. append(mergeForFallingThrough);
  355. return NULL;
  356. }
  357. void NodeGen::branchIndexedCode(
  358. int32 nCases,
  359. MergeNode** targetNodes,
  360. bool* isBackwards,
  361. PReg* testPR,
  362. BranchBCTargetStack** targetStacks,
  363. SExprStack* exprStack,
  364. PRegBList* exprStackPRs,
  365. SplitSig* s ) {
  366. // generate indexed branch
  367. IndexedBranchNode* ib = new IndexedBranchNode(testPR, nCases);
  368. current->append(ib);
  369. for (int32 i = 0; i < nCases; ++i) {
  370. current = ib;
  371. appendBranchCodeNodes( i + 1,
  372. isBackwards[i],
  373. targetNodes[i],
  374. targetStacks[i],
  375. exprStack,
  376. exprStackPRs,
  377. s );
  378. }
  379. current = ib;
  380. }
  381. void NodeGen::appendBranchCodeNodes( int32 whichSucc,
  382. bool isBackwards,
  383. MergeNode* target,
  384. BranchBCTargetStack* targetStack,
  385. SExprStack* exprStack,
  386. PRegBList* exprStackPRs,
  387. SplitSig* s ) {
  388. // use nop nodes to avoid indexed br node being pred twice of
  389. // same merge node if two cases goto same place
  390. APPENDN(whichSucc, new NopNode);
  391. // move stack values to targetStack
  392. targetStack->mergeInExprsFromStack(exprStack, target, isBackwards);
  393. // append to current for a fwd or back branch for a branch bc
  394. Node* n = isBackwards
  395. ? (Node*) new RestartNode(exprStackPRs, s, target)
  396. : target;
  397. append( n);
  398. }
  399. void NodeGen::print_short() { lprintf("NodeGen %#lx", (unsigned long)this); }
  400. # endif