PageRenderTime 69ms CodeModel.GetById 29ms RepoModel.GetById 0ms app.codeStats 1ms

/vm_insnhelper.c

https://github.com/diabolo/ruby
C | 1713 lines | 1420 code | 225 blank | 68 comment | 371 complexity | 0d8ce23a4729dbcd621d7b5ffd58a14e MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
  1. /**********************************************************************
  2. vm_insnhelper.c - instruction helper functions.
  3. $Author$
  4. Copyright (C) 2007 Koichi Sasada
  5. **********************************************************************/
  6. /* finish iseq array */
  7. #include "insns.inc"
  8. #include <math.h>
  9. /* control stack frame */
  10. #ifndef INLINE
  11. #define INLINE inline
  12. #endif
  13. static rb_control_frame_t *vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
  14. static inline rb_control_frame_t *
  15. vm_push_frame(rb_thread_t * th, const rb_iseq_t * iseq,
  16. VALUE type, VALUE self, VALUE specval,
  17. const VALUE *pc, VALUE *sp, VALUE *lfp,
  18. int local_size)
  19. {
  20. rb_control_frame_t * const cfp = th->cfp - 1;
  21. int i;
  22. if ((void *)(sp + local_size) >= (void *)cfp) {
  23. rb_exc_raise(sysstack_error);
  24. }
  25. th->cfp = cfp;
  26. /* setup vm value stack */
  27. /* nil initialize */
  28. for (i=0; i < local_size; i++) {
  29. *sp = Qnil;
  30. sp++;
  31. }
  32. /* set special val */
  33. *sp = GC_GUARDED_PTR(specval);
  34. if (lfp == 0) {
  35. lfp = sp;
  36. }
  37. /* setup vm control frame stack */
  38. cfp->pc = (VALUE *)pc;
  39. cfp->sp = sp + 1;
  40. cfp->bp = sp + 1;
  41. cfp->iseq = (rb_iseq_t *) iseq;
  42. cfp->flag = type;
  43. cfp->self = self;
  44. cfp->lfp = lfp;
  45. cfp->dfp = sp;
  46. cfp->proc = 0;
  47. cfp->me = 0;
  48. #define COLLECT_PROFILE 0
  49. #if COLLECT_PROFILE
  50. cfp->prof_time_self = clock();
  51. cfp->prof_time_chld = 0;
  52. #endif
  53. if (VMDEBUG == 2) {
  54. SDR();
  55. }
  56. return cfp;
  57. }
  58. static inline void
  59. vm_pop_frame(rb_thread_t *th)
  60. {
  61. #if COLLECT_PROFILE
  62. rb_control_frame_t *cfp = th->cfp;
  63. if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
  64. VALUE current_time = clock();
  65. rb_control_frame_t *cfp = th->cfp;
  66. cfp->prof_time_self = current_time - cfp->prof_time_self;
  67. (cfp+1)->prof_time_chld += cfp->prof_time_self;
  68. cfp->iseq->profile.count++;
  69. cfp->iseq->profile.time_cumu = cfp->prof_time_self;
  70. cfp->iseq->profile.time_self = cfp->prof_time_self - cfp->prof_time_chld;
  71. }
  72. else if (0 /* c method? */) {
  73. }
  74. #endif
  75. th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
  76. if (VMDEBUG == 2) {
  77. SDR();
  78. }
  79. }
  80. /* method dispatch */
  81. NORETURN(static void argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc));
  82. static void
  83. argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc)
  84. {
  85. VALUE mesg = rb_sprintf("wrong number of arguments (%d for %d)", miss_argc, correct_argc);
  86. VALUE exc = rb_exc_new3(rb_eArgError, mesg);
  87. VALUE bt = rb_make_backtrace();
  88. VALUE err_line = 0;
  89. if (iseq) {
  90. int line_no = 1;
  91. if (iseq->insn_info_size) {
  92. line_no = iseq->insn_info_table[0].line_no;
  93. }
  94. err_line = rb_sprintf("%s:%d:in `%s'",
  95. RSTRING_PTR(iseq->filename),
  96. line_no, RSTRING_PTR(iseq->name));
  97. rb_funcall(bt, rb_intern("unshift"), 1, err_line);
  98. }
  99. rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
  100. rb_exc_raise(exc);
  101. }
  102. #define VM_CALLEE_SETUP_ARG(ret, th, iseq, orig_argc, orig_argv, block) \
  103. if (LIKELY(iseq->arg_simple & 0x01)) { \
  104. /* simple check */ \
  105. if (orig_argc != iseq->argc) { \
  106. argument_error(iseq, orig_argc, iseq->argc); \
  107. } \
  108. ret = 0; \
  109. } \
  110. else { \
  111. ret = vm_callee_setup_arg_complex(th, iseq, orig_argc, orig_argv, block); \
  112. }
  113. static inline int
  114. vm_callee_setup_arg_complex(rb_thread_t *th, const rb_iseq_t * iseq,
  115. int orig_argc, VALUE * orig_argv,
  116. const rb_block_t **block)
  117. {
  118. const int m = iseq->argc;
  119. int argc = orig_argc;
  120. VALUE *argv = orig_argv;
  121. rb_num_t opt_pc = 0;
  122. th->mark_stack_len = argc + iseq->arg_size;
  123. /* mandatory */
  124. if (argc < (m + iseq->arg_post_len)) { /* check with post arg */
  125. argument_error(iseq, argc, m + iseq->arg_post_len);
  126. }
  127. argv += m;
  128. argc -= m;
  129. /* post arguments */
  130. if (iseq->arg_post_len) {
  131. if (!(orig_argc < iseq->arg_post_start)) {
  132. VALUE *new_argv = ALLOCA_N(VALUE, argc);
  133. MEMCPY(new_argv, argv, VALUE, argc);
  134. argv = new_argv;
  135. }
  136. MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
  137. VALUE, iseq->arg_post_len);
  138. }
  139. /* opt arguments */
  140. if (iseq->arg_opts) {
  141. const int opts = iseq->arg_opts - 1 /* no opt */;
  142. if (iseq->arg_rest == -1 && argc > opts) {
  143. argument_error(iseq, orig_argc, m + opts + iseq->arg_post_len);
  144. }
  145. if (argc > opts) {
  146. argc -= opts;
  147. argv += opts;
  148. opt_pc = iseq->arg_opt_table[opts]; /* no opt */
  149. }
  150. else {
  151. int i;
  152. for (i = argc; i<opts; i++) {
  153. orig_argv[i + m] = Qnil;
  154. }
  155. opt_pc = iseq->arg_opt_table[argc];
  156. argc = 0;
  157. }
  158. }
  159. /* rest arguments */
  160. if (iseq->arg_rest != -1) {
  161. orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv);
  162. argc = 0;
  163. }
  164. /* block arguments */
  165. if (block && iseq->arg_block != -1) {
  166. VALUE blockval = Qnil;
  167. const rb_block_t *blockptr = *block;
  168. if (argc != 0) {
  169. argument_error(iseq, orig_argc, m + iseq->arg_post_len);
  170. }
  171. if (blockptr) {
  172. /* make Proc object */
  173. if (blockptr->proc == 0) {
  174. rb_proc_t *proc;
  175. blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
  176. GetProcPtr(blockval, proc);
  177. *block = &proc->block;
  178. }
  179. else {
  180. blockval = blockptr->proc;
  181. }
  182. }
  183. orig_argv[iseq->arg_block] = blockval; /* Proc or nil */
  184. }
  185. th->mark_stack_len = 0;
  186. return (int)opt_pc;
  187. }
  188. static inline int
  189. caller_setup_args(const rb_thread_t *th, rb_control_frame_t *cfp, VALUE flag,
  190. int argc, rb_iseq_t *blockiseq, rb_block_t **block)
  191. {
  192. rb_block_t *blockptr = 0;
  193. if (block) {
  194. if (flag & VM_CALL_ARGS_BLOCKARG_BIT) {
  195. rb_proc_t *po;
  196. VALUE proc;
  197. proc = *(--cfp->sp);
  198. if (proc != Qnil) {
  199. if (!rb_obj_is_proc(proc)) {
  200. VALUE b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
  201. if (NIL_P(b) || !rb_obj_is_proc(b)) {
  202. rb_raise(rb_eTypeError,
  203. "wrong argument type %s (expected Proc)",
  204. rb_obj_classname(proc));
  205. }
  206. proc = b;
  207. }
  208. GetProcPtr(proc, po);
  209. blockptr = &po->block;
  210. RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
  211. *block = blockptr;
  212. }
  213. }
  214. else if (blockiseq) {
  215. blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
  216. blockptr->iseq = blockiseq;
  217. blockptr->proc = 0;
  218. *block = blockptr;
  219. }
  220. }
  221. /* expand top of stack? */
  222. if (flag & VM_CALL_ARGS_SPLAT_BIT) {
  223. VALUE ary = *(cfp->sp - 1);
  224. VALUE *ptr;
  225. int i;
  226. VALUE tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a");
  227. if (NIL_P(tmp)) {
  228. /* do nothing */
  229. }
  230. else {
  231. long len = RARRAY_LEN(tmp);
  232. ptr = RARRAY_PTR(tmp);
  233. cfp->sp -= 1;
  234. CHECK_STACK_OVERFLOW(cfp, len);
  235. for (i = 0; i < len; i++) {
  236. *cfp->sp++ = ptr[i];
  237. }
  238. argc += i-1;
  239. }
  240. }
  241. return argc;
  242. }
  243. static inline VALUE
  244. call_cfunc(VALUE (*func)(), VALUE recv,
  245. int len, int argc, const VALUE *argv)
  246. {
  247. /* printf("len: %d, argc: %d\n", len, argc); */
  248. if (len >= 0 && argc != len) {
  249. rb_raise(rb_eArgError, "wrong number of arguments(%d for %d)",
  250. argc, len);
  251. }
  252. switch (len) {
  253. case -2:
  254. return (*func) (recv, rb_ary_new4(argc, argv));
  255. break;
  256. case -1:
  257. return (*func) (argc, argv, recv);
  258. break;
  259. case 0:
  260. return (*func) (recv);
  261. break;
  262. case 1:
  263. return (*func) (recv, argv[0]);
  264. break;
  265. case 2:
  266. return (*func) (recv, argv[0], argv[1]);
  267. break;
  268. case 3:
  269. return (*func) (recv, argv[0], argv[1], argv[2]);
  270. break;
  271. case 4:
  272. return (*func) (recv, argv[0], argv[1], argv[2], argv[3]);
  273. break;
  274. case 5:
  275. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
  276. break;
  277. case 6:
  278. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  279. argv[5]);
  280. break;
  281. case 7:
  282. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  283. argv[5], argv[6]);
  284. break;
  285. case 8:
  286. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  287. argv[5], argv[6], argv[7]);
  288. break;
  289. case 9:
  290. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  291. argv[5], argv[6], argv[7], argv[8]);
  292. break;
  293. case 10:
  294. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  295. argv[5], argv[6], argv[7], argv[8], argv[9]);
  296. break;
  297. case 11:
  298. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  299. argv[5], argv[6], argv[7], argv[8], argv[9],
  300. argv[10]);
  301. break;
  302. case 12:
  303. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  304. argv[5], argv[6], argv[7], argv[8], argv[9],
  305. argv[10], argv[11]);
  306. break;
  307. case 13:
  308. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  309. argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
  310. argv[11], argv[12]);
  311. break;
  312. case 14:
  313. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  314. argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
  315. argv[11], argv[12], argv[13]);
  316. break;
  317. case 15:
  318. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  319. argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
  320. argv[11], argv[12], argv[13], argv[14]);
  321. break;
  322. default:
  323. rb_raise(rb_eArgError, "too many arguments(%d)", len);
  324. return Qundef; /* not reached */
  325. }
  326. }
  327. static inline VALUE
  328. vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp,
  329. int num, VALUE recv, const rb_block_t *blockptr,
  330. const rb_method_entry_t *me)
  331. {
  332. VALUE val = 0;
  333. const rb_method_definition_t *def = me->def;
  334. rb_control_frame_t *cfp;
  335. EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass);
  336. cfp = vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC,
  337. recv, (VALUE) blockptr, 0, reg_cfp->sp, 0, 1);
  338. cfp->me = me;
  339. reg_cfp->sp -= num + 1;
  340. val = call_cfunc(def->body.cfunc.func, recv, (int)def->body.cfunc.argc, num, reg_cfp->sp + 1);
  341. if (reg_cfp != th->cfp + 1) {
  342. rb_bug("cfp consistency error - send");
  343. }
  344. vm_pop_frame(th);
  345. EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass);
  346. return val;
  347. }
  348. static inline VALUE
  349. vm_call_bmethod(rb_thread_t *th, VALUE recv, int argc, const VALUE *argv,
  350. const rb_block_t *blockptr, const rb_method_entry_t *me)
  351. {
  352. rb_proc_t *proc;
  353. VALUE val;
  354. /* control block frame */
  355. th->passed_me = me;
  356. GetProcPtr(me->def->body.proc, proc);
  357. val = rb_vm_invoke_proc(th, proc, recv, argc, argv, blockptr);
  358. return val;
  359. }
  360. static inline void
  361. vm_method_missing_args(rb_thread_t *th, VALUE *argv,
  362. int num, const rb_block_t *blockptr, int opt)
  363. {
  364. rb_control_frame_t * const reg_cfp = th->cfp;
  365. MEMCPY(argv, STACK_ADDR_FROM_TOP(num + 1), VALUE, num + 1);
  366. th->method_missing_reason = opt;
  367. th->passed_block = blockptr;
  368. POPN(num + 1);
  369. }
  370. static inline VALUE
  371. vm_method_missing(rb_thread_t *th, ID id, VALUE recv,
  372. int num, const rb_block_t *blockptr, int opt)
  373. {
  374. VALUE *argv = ALLOCA_N(VALUE, num + 1);
  375. vm_method_missing_args(th, argv, num, blockptr, opt);
  376. argv[0] = ID2SYM(id);
  377. return rb_funcall2(recv, idMethodMissing, num + 1, argv);
  378. }
  379. static inline void
  380. vm_setup_method(rb_thread_t *th, rb_control_frame_t *cfp,
  381. VALUE recv, int argc, const rb_block_t *blockptr, VALUE flag,
  382. const rb_method_entry_t *me)
  383. {
  384. int opt_pc, i;
  385. VALUE *sp, *rsp = cfp->sp - argc;
  386. rb_iseq_t *iseq = me->def->body.iseq;
  387. VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, rsp, &blockptr);
  388. /* stack overflow check */
  389. CHECK_STACK_OVERFLOW(cfp, iseq->stack_max);
  390. sp = rsp + iseq->arg_size;
  391. if (LIKELY(!(flag & VM_CALL_TAILCALL_BIT))) {
  392. if (0) printf("local_size: %d, arg_size: %d\n",
  393. iseq->local_size, iseq->arg_size);
  394. /* clear local variables */
  395. for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
  396. *sp++ = Qnil;
  397. }
  398. vm_push_frame(th, iseq,
  399. VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
  400. iseq->iseq_encoded + opt_pc, sp, 0, 0);
  401. cfp->sp = rsp - 1 /* recv */;
  402. }
  403. else {
  404. VALUE *p_rsp;
  405. th->cfp++; /* pop cf */
  406. p_rsp = th->cfp->sp;
  407. /* copy arguments */
  408. for (i=0; i < (sp - rsp); i++) {
  409. p_rsp[i] = rsp[i];
  410. }
  411. sp -= rsp - p_rsp;
  412. /* clear local variables */
  413. for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
  414. *sp++ = Qnil;
  415. }
  416. vm_push_frame(th, iseq,
  417. VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
  418. iseq->iseq_encoded + opt_pc, sp, 0, 0);
  419. }
  420. }
  421. static inline VALUE
  422. vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp,
  423. int num, const rb_block_t *blockptr, VALUE flag,
  424. ID id, const rb_method_entry_t *me, VALUE recv)
  425. {
  426. VALUE val;
  427. start_method_dispatch:
  428. if (me != 0) {
  429. if ((me->flag == 0)) {
  430. normal_method_dispatch:
  431. switch (me->def->type) {
  432. case VM_METHOD_TYPE_ISEQ:{
  433. vm_setup_method(th, cfp, recv, num, blockptr, flag, me);
  434. return Qundef;
  435. }
  436. case VM_METHOD_TYPE_NOTIMPLEMENTED:
  437. case VM_METHOD_TYPE_CFUNC:{
  438. val = vm_call_cfunc(th, cfp, num, recv, blockptr, me);
  439. break;
  440. }
  441. case VM_METHOD_TYPE_ATTRSET:{
  442. if (num != 1) {
  443. rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", num);
  444. }
  445. val = rb_ivar_set(recv, me->def->body.attr.id, *(cfp->sp - 1));
  446. cfp->sp -= 2;
  447. break;
  448. }
  449. case VM_METHOD_TYPE_IVAR:{
  450. if (num != 0) {
  451. rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num);
  452. }
  453. val = rb_attr_get(recv, me->def->body.attr.id);
  454. cfp->sp -= 1;
  455. break;
  456. }
  457. case VM_METHOD_TYPE_MISSING:{
  458. VALUE *argv = ALLOCA_N(VALUE, num+1);
  459. argv[0] = ID2SYM(me->def->original_id);
  460. MEMCPY(argv+1, cfp->sp - num, VALUE, num);
  461. cfp->sp += - num - 1;
  462. val = rb_funcall2(recv, rb_intern("method_missing"), num+1, argv);
  463. break;
  464. }
  465. case VM_METHOD_TYPE_BMETHOD:{
  466. VALUE *argv = ALLOCA_N(VALUE, num);
  467. MEMCPY(argv, cfp->sp - num, VALUE, num);
  468. cfp->sp += - num - 1;
  469. val = vm_call_bmethod(th, recv, num, argv, blockptr, me);
  470. break;
  471. }
  472. case VM_METHOD_TYPE_ZSUPER:{
  473. VALUE klass = RCLASS_SUPER(me->klass);
  474. me = rb_method_entry(klass, id);
  475. if (me != 0) {
  476. goto normal_method_dispatch;
  477. }
  478. else {
  479. goto start_method_dispatch;
  480. }
  481. }
  482. case VM_METHOD_TYPE_OPTIMIZED:{
  483. switch (me->def->body.optimize_type) {
  484. case OPTIMIZED_METHOD_TYPE_SEND: {
  485. rb_control_frame_t *reg_cfp = cfp;
  486. rb_num_t i = num - 1;
  487. VALUE sym;
  488. if (num == 0) {
  489. rb_raise(rb_eArgError, "no method name given");
  490. }
  491. sym = TOPN(i);
  492. id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym);
  493. /* shift arguments */
  494. if (i > 0) {
  495. MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
  496. }
  497. me = rb_method_entry(CLASS_OF(recv), id);
  498. num -= 1;
  499. DEC_SP(1);
  500. flag |= VM_CALL_FCALL_BIT | VM_CALL_OPT_SEND_BIT;
  501. goto start_method_dispatch;
  502. }
  503. case OPTIMIZED_METHOD_TYPE_CALL: {
  504. rb_proc_t *proc;
  505. int argc = num;
  506. VALUE *argv = ALLOCA_N(VALUE, num);
  507. GetProcPtr(recv, proc);
  508. MEMCPY(argv, cfp->sp - num, VALUE, num);
  509. cfp->sp -= num + 1;
  510. val = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, blockptr);
  511. break;
  512. }
  513. default:
  514. rb_bug("eval_invoke_method: unsupported optimized method type (%d)",
  515. me->def->body.optimize_type);
  516. }
  517. break;
  518. }
  519. default:{
  520. rb_bug("eval_invoke_method: unsupported method type (%d)", me->def->type);
  521. break;
  522. }
  523. }
  524. }
  525. else {
  526. int noex_safe;
  527. if (!(flag & VM_CALL_FCALL_BIT) &&
  528. (me->flag & NOEX_MASK) & NOEX_PRIVATE) {
  529. int stat = NOEX_PRIVATE;
  530. if (flag & VM_CALL_VCALL_BIT) {
  531. stat |= NOEX_VCALL;
  532. }
  533. val = vm_method_missing(th, id, recv, num, blockptr, stat);
  534. }
  535. else if (!(flag & VM_CALL_OPT_SEND_BIT) && (me->flag & NOEX_MASK) & NOEX_PROTECTED) {
  536. VALUE defined_class = me->klass;
  537. if (TYPE(defined_class) == T_ICLASS) {
  538. defined_class = RBASIC(defined_class)->klass;
  539. }
  540. if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
  541. val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED);
  542. }
  543. else {
  544. goto normal_method_dispatch;
  545. }
  546. }
  547. else if ((noex_safe = NOEX_SAFE(me->flag)) > th->safe_level &&
  548. (noex_safe > 2)) {
  549. rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id));
  550. }
  551. else {
  552. goto normal_method_dispatch;
  553. }
  554. }
  555. }
  556. else {
  557. /* method missing */
  558. int stat = 0;
  559. if (flag & VM_CALL_VCALL_BIT) {
  560. stat |= NOEX_VCALL;
  561. }
  562. if (flag & VM_CALL_SUPER_BIT) {
  563. stat |= NOEX_SUPER;
  564. }
  565. if (id == idMethodMissing) {
  566. VALUE *argv = ALLOCA_N(VALUE, num);
  567. vm_method_missing_args(th, argv, num - 1, 0, stat);
  568. rb_raise_method_missing(th, num, argv, recv, stat);
  569. }
  570. else {
  571. val = vm_method_missing(th, id, recv, num, blockptr, stat);
  572. }
  573. }
  574. RUBY_VM_CHECK_INTS();
  575. return val;
  576. }
  577. /* yield */
  578. static inline int
  579. block_proc_is_lambda(const VALUE procval)
  580. {
  581. rb_proc_t *proc;
  582. if (procval) {
  583. GetProcPtr(procval, proc);
  584. return proc->is_lambda;
  585. }
  586. else {
  587. return 0;
  588. }
  589. }
  590. static inline VALUE
  591. vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block,
  592. VALUE self, int argc, const VALUE *argv,
  593. const rb_block_t *blockargptr)
  594. {
  595. NODE *ifunc = (NODE *) block->iseq;
  596. VALUE val, arg, blockarg;
  597. int lambda = block_proc_is_lambda(block->proc);
  598. if (lambda) {
  599. arg = rb_ary_new4(argc, argv);
  600. }
  601. else if (argc == 0) {
  602. arg = Qnil;
  603. }
  604. else {
  605. arg = argv[0];
  606. }
  607. if (blockargptr) {
  608. if (blockargptr->proc) {
  609. blockarg = blockargptr->proc;
  610. }
  611. else {
  612. blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
  613. }
  614. }
  615. else {
  616. blockarg = Qnil;
  617. }
  618. vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC,
  619. self, (VALUE)block->dfp,
  620. 0, th->cfp->sp, block->lfp, 1);
  621. val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg);
  622. th->cfp++;
  623. return val;
  624. }
  625. /*--
  626. * @brief on supplied all of optional, rest and post parameters.
  627. * @pre iseq is block style (not lambda style)
  628. */
  629. static inline int
  630. vm_yield_setup_block_args_complex(rb_thread_t *th, const rb_iseq_t *iseq,
  631. int argc, VALUE *argv)
  632. {
  633. rb_num_t opt_pc = 0;
  634. int i;
  635. const int m = iseq->argc;
  636. const int r = iseq->arg_rest;
  637. int len = iseq->arg_post_len;
  638. int start = iseq->arg_post_start;
  639. int rsize = argc > m ? argc - m : 0; /* # of arguments which did not consumed yet */
  640. int psize = rsize > len ? len : rsize; /* # of post arguments */
  641. int osize = 0; /* # of opt arguments */
  642. VALUE ary;
  643. /* reserves arguments for post parameters */
  644. rsize -= psize;
  645. if (iseq->arg_opts) {
  646. const int opts = iseq->arg_opts - 1;
  647. if (rsize > opts) {
  648. osize = opts;
  649. opt_pc = iseq->arg_opt_table[opts];
  650. }
  651. else {
  652. osize = rsize;
  653. opt_pc = iseq->arg_opt_table[rsize];
  654. }
  655. }
  656. rsize -= osize;
  657. if (0) {
  658. printf(" argc: %d\n", argc);
  659. printf(" len: %d\n", len);
  660. printf("start: %d\n", start);
  661. printf("rsize: %d\n", rsize);
  662. }
  663. if (r == -1) {
  664. /* copy post argument */
  665. MEMMOVE(&argv[start], &argv[m+osize], VALUE, psize);
  666. }
  667. else {
  668. ary = rb_ary_new4(rsize, &argv[r]);
  669. /* copy post argument */
  670. MEMMOVE(&argv[start], &argv[m+rsize+osize], VALUE, psize);
  671. argv[r] = ary;
  672. }
  673. for (i=psize; i<len; i++) {
  674. argv[start + i] = Qnil;
  675. }
  676. return (int)opt_pc;
  677. }
  678. static inline int
  679. vm_yield_setup_block_args(rb_thread_t *th, const rb_iseq_t * iseq,
  680. int orig_argc, VALUE *argv,
  681. const rb_block_t *blockptr)
  682. {
  683. int i;
  684. int argc = orig_argc;
  685. const int m = iseq->argc;
  686. VALUE ary, arg0;
  687. int opt_pc = 0;
  688. th->mark_stack_len = argc;
  689. /*
  690. * yield [1, 2]
  691. * => {|a|} => a = [1, 2]
  692. * => {|a, b|} => a, b = [1, 2]
  693. */
  694. arg0 = argv[0];
  695. if (!(iseq->arg_simple & 0x02) && /* exclude {|a|} */
  696. (m + iseq->arg_post_len) > 0 && /* this process is meaningful */
  697. argc == 1 && !NIL_P(ary = rb_check_array_type(arg0))) { /* rhs is only an array */
  698. th->mark_stack_len = argc = RARRAY_LENINT(ary);
  699. CHECK_STACK_OVERFLOW(th->cfp, argc);
  700. MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc);
  701. }
  702. else {
  703. argv[0] = arg0;
  704. }
  705. for (i=argc; i<m; i++) {
  706. argv[i] = Qnil;
  707. }
  708. if (iseq->arg_rest == -1 && iseq->arg_opts == 0) {
  709. const int arg_size = iseq->arg_size;
  710. if (arg_size < argc) {
  711. /*
  712. * yield 1, 2
  713. * => {|a|} # truncate
  714. */
  715. th->mark_stack_len = argc = arg_size;
  716. }
  717. }
  718. else {
  719. int r = iseq->arg_rest;
  720. if (iseq->arg_post_len ||
  721. iseq->arg_opts) { /* TODO: implement simple version for (iseq->arg_post_len==0 && iseq->arg_opts > 0) */
  722. opt_pc = vm_yield_setup_block_args_complex(th, iseq, argc, argv);
  723. }
  724. else {
  725. if (argc < r) {
  726. /* yield 1
  727. * => {|a, b, *r|}
  728. */
  729. for (i=argc; i<r; i++) {
  730. argv[i] = Qnil;
  731. }
  732. argv[r] = rb_ary_new();
  733. }
  734. else {
  735. argv[r] = rb_ary_new4(argc-r, &argv[r]);
  736. }
  737. }
  738. th->mark_stack_len = iseq->arg_size;
  739. }
  740. /* {|&b|} */
  741. if (iseq->arg_block != -1) {
  742. VALUE procval = Qnil;
  743. if (blockptr) {
  744. if (blockptr->proc == 0) {
  745. procval = rb_vm_make_proc(th, blockptr, rb_cProc);
  746. }
  747. else {
  748. procval = blockptr->proc;
  749. }
  750. }
  751. argv[iseq->arg_block] = procval;
  752. }
  753. th->mark_stack_len = 0;
  754. return opt_pc;
  755. }
  756. static inline int
  757. vm_yield_setup_args(rb_thread_t * const th, const rb_iseq_t *iseq,
  758. int argc, VALUE *argv,
  759. const rb_block_t *blockptr, int lambda)
  760. {
  761. if (0) { /* for debug */
  762. printf(" argc: %d\n", argc);
  763. printf("iseq argc: %d\n", iseq->argc);
  764. printf("iseq opts: %d\n", iseq->arg_opts);
  765. printf("iseq rest: %d\n", iseq->arg_rest);
  766. printf("iseq post: %d\n", iseq->arg_post_len);
  767. printf("iseq blck: %d\n", iseq->arg_block);
  768. printf("iseq smpl: %d\n", iseq->arg_simple);
  769. printf(" lambda: %s\n", lambda ? "true" : "false");
  770. }
  771. if (lambda) {
  772. /* call as method */
  773. int opt_pc;
  774. VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, argv, &blockptr);
  775. return opt_pc;
  776. }
  777. else {
  778. return vm_yield_setup_block_args(th, iseq, argc, argv, blockptr);
  779. }
  780. }
  781. static VALUE
  782. vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t num, rb_num_t flag)
  783. {
  784. const rb_block_t *block = GET_BLOCK_PTR();
  785. rb_iseq_t *iseq;
  786. int argc = (int)num;
  787. VALUE type = GET_ISEQ()->local_iseq->type;
  788. if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
  789. rb_vm_localjump_error("no block given (yield)", Qnil, 0);
  790. }
  791. iseq = block->iseq;
  792. argc = caller_setup_args(th, GET_CFP(), flag, argc, 0, 0);
  793. if (BUILTIN_TYPE(iseq) != T_NODE) {
  794. int opt_pc;
  795. const int arg_size = iseq->arg_size;
  796. VALUE * const rsp = GET_SP() - argc;
  797. SET_SP(rsp);
  798. CHECK_STACK_OVERFLOW(GET_CFP(), iseq->stack_max);
  799. opt_pc = vm_yield_setup_args(th, iseq, argc, rsp, 0,
  800. block_proc_is_lambda(block->proc));
  801. vm_push_frame(th, iseq,
  802. VM_FRAME_MAGIC_BLOCK, block->self, (VALUE) block->dfp,
  803. iseq->iseq_encoded + opt_pc, rsp + arg_size, block->lfp,
  804. iseq->local_size - arg_size);
  805. return Qundef;
  806. }
  807. else {
  808. VALUE val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc), 0);
  809. POPN(argc); /* TODO: should put before C/yield? */
  810. return val;
  811. }
  812. }
  813. /* svar */
  814. static inline NODE *
  815. lfp_svar_place(rb_thread_t *th, VALUE *lfp)
  816. {
  817. VALUE *svar;
  818. if (lfp && th->local_lfp != lfp) {
  819. svar = &lfp[-1];
  820. }
  821. else {
  822. svar = &th->local_svar;
  823. }
  824. if (NIL_P(*svar)) {
  825. *svar = (VALUE)NEW_IF(Qnil, Qnil, Qnil);
  826. }
  827. return (NODE *)*svar;
  828. }
  829. static VALUE
  830. lfp_svar_get(rb_thread_t *th, VALUE *lfp, VALUE key)
  831. {
  832. NODE *svar = lfp_svar_place(th, lfp);
  833. switch (key) {
  834. case 0:
  835. return svar->u1.value;
  836. case 1:
  837. return svar->u2.value;
  838. default: {
  839. const VALUE hash = svar->u3.value;
  840. if (hash == Qnil) {
  841. return Qnil;
  842. }
  843. else {
  844. return rb_hash_lookup(hash, key);
  845. }
  846. }
  847. }
  848. }
  849. static void
  850. lfp_svar_set(rb_thread_t *th, VALUE *lfp, VALUE key, VALUE val)
  851. {
  852. NODE *svar = lfp_svar_place(th, lfp);
  853. switch (key) {
  854. case 0:
  855. svar->u1.value = val;
  856. return;
  857. case 1:
  858. svar->u2.value = val;
  859. return;
  860. default: {
  861. VALUE hash = svar->u3.value;
  862. if (hash == Qnil) {
  863. svar->u3.value = hash = rb_hash_new();
  864. }
  865. rb_hash_aset(hash, key, val);
  866. }
  867. }
  868. }
  869. static inline VALUE
  870. vm_getspecial(rb_thread_t *th, VALUE *lfp, VALUE key, rb_num_t type)
  871. {
  872. VALUE val;
  873. if (type == 0) {
  874. VALUE k = key;
  875. if (FIXNUM_P(key)) {
  876. k = FIX2INT(key);
  877. }
  878. val = lfp_svar_get(th, lfp, k);
  879. }
  880. else {
  881. VALUE backref = lfp_svar_get(th, lfp, 1);
  882. if (type & 0x01) {
  883. switch (type >> 1) {
  884. case '&':
  885. val = rb_reg_last_match(backref);
  886. break;
  887. case '`':
  888. val = rb_reg_match_pre(backref);
  889. break;
  890. case '\'':
  891. val = rb_reg_match_post(backref);
  892. break;
  893. case '+':
  894. val = rb_reg_match_last(backref);
  895. break;
  896. default:
  897. rb_bug("unexpected back-ref");
  898. }
  899. }
  900. else {
  901. val = rb_reg_nth_match((int)(type >> 1), backref);
  902. }
  903. }
  904. return val;
  905. }
  906. static NODE *
  907. vm_get_cref(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
  908. {
  909. NODE *cref = 0;
  910. while (1) {
  911. if (lfp == dfp) {
  912. cref = iseq->cref_stack;
  913. break;
  914. }
  915. else if (dfp[-1] != Qnil) {
  916. cref = (NODE *)dfp[-1];
  917. break;
  918. }
  919. dfp = GET_PREV_DFP(dfp);
  920. }
  921. if (cref == 0) {
  922. rb_bug("vm_get_cref: unreachable");
  923. }
  924. return cref;
  925. }
  926. static NODE *
  927. vm_cref_push(rb_thread_t *th, VALUE klass, int noex, rb_block_t *blockptr)
  928. {
  929. rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
  930. NODE *cref = NEW_BLOCK(klass);
  931. cref->nd_file = 0;
  932. cref->nd_visi = noex;
  933. if (blockptr) {
  934. cref->nd_next = vm_get_cref(blockptr->iseq, blockptr->lfp, blockptr->dfp);
  935. }
  936. else if (cfp) {
  937. cref->nd_next = vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
  938. }
  939. return cref;
  940. }
  941. static inline VALUE
  942. vm_get_cbase(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
  943. {
  944. NODE *cref = vm_get_cref(iseq, lfp, dfp);
  945. VALUE klass = Qundef;
  946. while (cref) {
  947. if ((klass = cref->nd_clss) != 0) {
  948. break;
  949. }
  950. cref = cref->nd_next;
  951. }
  952. return klass;
  953. }
  954. static inline VALUE
  955. vm_get_const_base(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
  956. {
  957. NODE *cref = vm_get_cref(iseq, lfp, dfp);
  958. VALUE klass = Qundef;
  959. while (cref) {
  960. if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) &&
  961. (klass = cref->nd_clss) != 0) {
  962. break;
  963. }
  964. cref = cref->nd_next;
  965. }
  966. return klass;
  967. }
  968. static inline void
  969. vm_check_if_namespace(VALUE klass)
  970. {
  971. switch (TYPE(klass)) {
  972. case T_CLASS:
  973. case T_MODULE:
  974. break;
  975. default:
  976. rb_raise(rb_eTypeError, "%s is not a class/module",
  977. RSTRING_PTR(rb_inspect(klass)));
  978. }
  979. }
  980. static inline VALUE
  981. vm_get_ev_const(rb_thread_t *th, const rb_iseq_t *iseq,
  982. VALUE orig_klass, ID id, int is_defined)
  983. {
  984. VALUE val;
  985. if (orig_klass == Qnil) {
  986. /* in current lexical scope */
  987. const NODE *cref = vm_get_cref(iseq, th->cfp->lfp, th->cfp->dfp);
  988. const NODE *root_cref = NULL;
  989. VALUE klass = orig_klass;
  990. while (cref && cref->nd_next) {
  991. if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL)) {
  992. klass = cref->nd_clss;
  993. if (root_cref == NULL)
  994. root_cref = cref;
  995. }
  996. cref = cref->nd_next;
  997. if (!NIL_P(klass)) {
  998. VALUE am = 0;
  999. search_continue:
  1000. if (RCLASS_IV_TBL(klass) &&
  1001. st_lookup(RCLASS_IV_TBL(klass), id, &val)) {
  1002. if (val == Qundef) {
  1003. if (am == klass) break;
  1004. am = klass;
  1005. rb_autoload_load(klass, id);
  1006. goto search_continue;
  1007. }
  1008. else {
  1009. if (is_defined) {
  1010. return 1;
  1011. }
  1012. else {
  1013. return val;
  1014. }
  1015. }
  1016. }
  1017. }
  1018. }
  1019. /* search self */
  1020. if (root_cref && !NIL_P(root_cref->nd_clss)) {
  1021. klass = root_cref->nd_clss;
  1022. }
  1023. else {
  1024. klass = CLASS_OF(th->cfp->self);
  1025. }
  1026. if (is_defined) {
  1027. return rb_const_defined(klass, id);
  1028. }
  1029. else {
  1030. return rb_const_get(klass, id);
  1031. }
  1032. }
  1033. else {
  1034. vm_check_if_namespace(orig_klass);
  1035. if (is_defined) {
  1036. return rb_const_defined_from(orig_klass, id);
  1037. }
  1038. else {
  1039. return rb_const_get_from(orig_klass, id);
  1040. }
  1041. }
  1042. }
  1043. static inline VALUE
  1044. vm_get_cvar_base(NODE *cref)
  1045. {
  1046. VALUE klass;
  1047. while (cref && cref->nd_next &&
  1048. (NIL_P(cref->nd_clss) || FL_TEST(cref->nd_clss, FL_SINGLETON) ||
  1049. (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL))) {
  1050. cref = cref->nd_next;
  1051. if (!cref->nd_next) {
  1052. rb_warn("class variable access from toplevel");
  1053. }
  1054. }
  1055. klass = cref->nd_clss;
  1056. if (NIL_P(klass)) {
  1057. rb_raise(rb_eTypeError, "no class variables available");
  1058. }
  1059. return klass;
  1060. }
  1061. #ifndef USE_IC_FOR_IVAR
  1062. #define USE_IC_FOR_IVAR 1
  1063. #endif
  1064. static VALUE
  1065. vm_getivar(VALUE obj, ID id, IC ic)
  1066. {
  1067. #if USE_IC_FOR_IVAR
  1068. if (TYPE(obj) == T_OBJECT) {
  1069. VALUE val = Qundef;
  1070. VALUE klass = RBASIC(obj)->klass;
  1071. if (ic->ic_class == klass) {
  1072. long index = ic->ic_value.index;
  1073. long len = ROBJECT_NUMIV(obj);
  1074. VALUE *ptr = ROBJECT_IVPTR(obj);
  1075. if (index < len) {
  1076. val = ptr[index];
  1077. }
  1078. }
  1079. else {
  1080. st_data_t index;
  1081. long len = ROBJECT_NUMIV(obj);
  1082. VALUE *ptr = ROBJECT_IVPTR(obj);
  1083. struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
  1084. if (iv_index_tbl) {
  1085. if (st_lookup(iv_index_tbl, id, &index)) {
  1086. if ((long)index < len) {
  1087. val = ptr[index];
  1088. }
  1089. ic->ic_class = klass;
  1090. ic->ic_value.index = index;
  1091. }
  1092. }
  1093. }
  1094. if (UNLIKELY(val == Qundef)) {
  1095. rb_warning("instance variable %s not initialized", rb_id2name(id));
  1096. val = Qnil;
  1097. }
  1098. return val;
  1099. }
  1100. else {
  1101. return rb_ivar_get(obj, id);
  1102. }
  1103. #else
  1104. return rb_ivar_get(obj, id);
  1105. #endif
  1106. }
  1107. static void
  1108. vm_setivar(VALUE obj, ID id, VALUE val, IC ic)
  1109. {
  1110. #if USE_IC_FOR_IVAR
  1111. if (!OBJ_UNTRUSTED(obj) && rb_safe_level() >= 4) {
  1112. rb_raise(rb_eSecurityError, "Insecure: can't modify instance variable");
  1113. }
  1114. if (OBJ_FROZEN(obj)) {
  1115. rb_error_frozen("object");
  1116. }
  1117. if (TYPE(obj) == T_OBJECT) {
  1118. VALUE klass = RBASIC(obj)->klass;
  1119. st_data_t index;
  1120. if (ic->ic_class == klass) {
  1121. long index = ic->ic_value.index;
  1122. long len = ROBJECT_NUMIV(obj);
  1123. VALUE *ptr = ROBJECT_IVPTR(obj);
  1124. if (index < len) {
  1125. ptr[index] = val;
  1126. return; /* inline cache hit */
  1127. }
  1128. }
  1129. else {
  1130. struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
  1131. if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
  1132. ic->ic_class = klass;
  1133. ic->ic_value.index = index;
  1134. }
  1135. /* fall through */
  1136. }
  1137. }
  1138. rb_ivar_set(obj, id, val);
  1139. #else
  1140. rb_ivar_set(obj, id, val);
  1141. #endif
  1142. }
  1143. static inline const rb_method_entry_t *
  1144. vm_method_search(VALUE id, VALUE klass, IC ic)
  1145. {
  1146. rb_method_entry_t *me;
  1147. #if OPT_INLINE_METHOD_CACHE
  1148. if (LIKELY(klass == ic->ic_class) &&
  1149. LIKELY(GET_VM_STATE_VERSION() == ic->ic_vmstat)) {
  1150. me = ic->ic_value.method;
  1151. }
  1152. else {
  1153. me = rb_method_entry(klass, id);
  1154. ic->ic_class = klass;
  1155. ic->ic_value.method = me;
  1156. ic->ic_vmstat = GET_VM_STATE_VERSION();
  1157. }
  1158. #else
  1159. me = rb_method_entry(klass, id);
  1160. #endif
  1161. return me;
  1162. }
  1163. static inline VALUE
  1164. vm_search_normal_superclass(VALUE klass, VALUE recv)
  1165. {
  1166. if (BUILTIN_TYPE(klass) == T_CLASS) {
  1167. return RCLASS_SUPER(klass);
  1168. }
  1169. else if (BUILTIN_TYPE(klass) == T_MODULE) {
  1170. VALUE k = CLASS_OF(recv);
  1171. while (k) {
  1172. if (BUILTIN_TYPE(k) == T_ICLASS && RBASIC(k)->klass == klass) {
  1173. return RCLASS_SUPER(k);
  1174. }
  1175. k = RCLASS_SUPER(k);
  1176. }
  1177. return rb_cObject;
  1178. }
  1179. else {
  1180. rb_bug("vm_search_normal_superclass: should not be reach here");
  1181. }
  1182. }
  1183. static void
  1184. vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *ip,
  1185. VALUE recv, VALUE sigval,
  1186. ID *idp, VALUE *klassp)
  1187. {
  1188. ID id;
  1189. VALUE klass;
  1190. while (ip && !ip->klass) {
  1191. ip = ip->parent_iseq;
  1192. }
  1193. if (ip == 0) {
  1194. rb_raise(rb_eNoMethodError, "super called outside of method");
  1195. }
  1196. id = ip->defined_method_id;
  1197. if (ip != ip->local_iseq) {
  1198. /* defined by Module#define_method() */
  1199. rb_control_frame_t *lcfp = GET_CFP();
  1200. if (!sigval) {
  1201. /* zsuper */
  1202. rb_raise(rb_eRuntimeError, "implicit argument passing of super from method defined by define_method() is not supported. Specify all arguments explicitly.");
  1203. }
  1204. while (lcfp->iseq != ip) {
  1205. VALUE *tdfp = GET_PREV_DFP(lcfp->dfp);
  1206. while (1) {
  1207. lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
  1208. if (lcfp->dfp == tdfp) {
  1209. break;
  1210. }
  1211. }
  1212. }
  1213. /* temporary measure for [Bug #2420] [Bug #3136] */
  1214. if (!lcfp->me) {
  1215. rb_raise(rb_eNoMethodError, "super called outside of method");
  1216. }
  1217. id = lcfp->me->def->original_id;
  1218. klass = vm_search_normal_superclass(lcfp->me->klass, recv);
  1219. }
  1220. else {
  1221. klass = vm_search_normal_superclass(ip->klass, recv);
  1222. }
  1223. *idp = id;
  1224. *klassp = klass;
  1225. }
  1226. static VALUE
  1227. vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp,
  1228. rb_num_t throw_state, VALUE throwobj)
  1229. {
  1230. int state = (int)(throw_state & 0xff);
  1231. int flag = (int)(throw_state & 0x8000);
  1232. rb_num_t level = throw_state >> 16;
  1233. if (state != 0) {
  1234. VALUE *pt = 0;
  1235. if (flag != 0) {
  1236. pt = (void *) 1;
  1237. }
  1238. else {
  1239. if (state == TAG_BREAK) {
  1240. rb_control_frame_t *cfp = GET_CFP();
  1241. VALUE *dfp = GET_DFP();
  1242. int is_orphan = 1;
  1243. rb_iseq_t *base_iseq = GET_ISEQ();
  1244. search_parent:
  1245. if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
  1246. if (cfp->iseq->type == ISEQ_TYPE_CLASS) {
  1247. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1248. dfp = cfp->dfp;
  1249. goto search_parent;
  1250. }
  1251. dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
  1252. base_iseq = base_iseq->parent_iseq;
  1253. while ((VALUE *) cfp < th->stack + th->stack_size) {
  1254. if (cfp->dfp == dfp) {
  1255. goto search_parent;
  1256. }
  1257. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1258. }
  1259. rb_bug("VM (throw): can't find break base.");
  1260. }
  1261. if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
  1262. /* lambda{... break ...} */
  1263. is_orphan = 0;
  1264. pt = cfp->dfp;
  1265. state = TAG_RETURN;
  1266. }
  1267. else {
  1268. dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
  1269. while ((VALUE *)cfp < th->stack + th->stack_size) {
  1270. if (cfp->dfp == dfp) {
  1271. VALUE epc = epc = cfp->pc - cfp->iseq->iseq_encoded;
  1272. rb_iseq_t *iseq = cfp->iseq;
  1273. int i;
  1274. for (i=0; i<iseq->catch_table_size; i++) {
  1275. struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
  1276. if (entry->type == CATCH_TYPE_BREAK &&
  1277. entry->start < epc && entry->end >= epc) {
  1278. if (entry->cont == epc) {
  1279. goto found;
  1280. }
  1281. else {
  1282. break;
  1283. }
  1284. }
  1285. }
  1286. break;
  1287. found:
  1288. pt = dfp;
  1289. is_orphan = 0;
  1290. break;
  1291. }
  1292. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1293. }
  1294. }
  1295. if (is_orphan) {
  1296. rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
  1297. }
  1298. }
  1299. else if (state == TAG_RETRY) {
  1300. rb_num_t i;
  1301. pt = GC_GUARDED_PTR_REF((VALUE *) * GET_DFP());
  1302. for (i = 0; i < level; i++) {
  1303. pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
  1304. }
  1305. }
  1306. else if (state == TAG_RETURN) {
  1307. rb_control_frame_t *cfp = GET_CFP();
  1308. VALUE *dfp = GET_DFP();
  1309. VALUE *lfp = GET_LFP();
  1310. /* check orphan and get dfp */
  1311. while ((VALUE *) cfp < th->stack + th->stack_size) {
  1312. if (!lfp) {
  1313. lfp = cfp->lfp;
  1314. }
  1315. if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_CLASS) {
  1316. lfp = 0;
  1317. }
  1318. if (cfp->lfp == lfp) {
  1319. if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
  1320. VALUE *tdfp = dfp;
  1321. while (lfp != tdfp) {
  1322. if (cfp->dfp == tdfp) {
  1323. /* in lambda */
  1324. dfp = cfp->dfp;
  1325. goto valid_return;
  1326. }
  1327. tdfp = GC_GUARDED_PTR_REF((VALUE *)*tdfp);
  1328. }
  1329. }
  1330. }
  1331. if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_METHOD) {
  1332. dfp = lfp;
  1333. goto valid_return;
  1334. }
  1335. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1336. }
  1337. rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
  1338. valid_return:
  1339. pt = dfp;
  1340. }
  1341. else {
  1342. rb_bug("isns(throw): unsupport throw type");
  1343. }
  1344. }
  1345. th->state = state;
  1346. return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
  1347. }
  1348. else {
  1349. /* continue throw */
  1350. VALUE err = throwobj;
  1351. if (FIXNUM_P(err)) {
  1352. th->state = FIX2INT(err);
  1353. }
  1354. else if (SYMBOL_P(err)) {
  1355. th->state = TAG_THROW;
  1356. }
  1357. else if (BUILTIN_TYPE(err) == T_NODE) {
  1358. th->state = GET_THROWOBJ_STATE(err);
  1359. }
  1360. else {
  1361. th->state = TAG_RAISE;
  1362. /*th->state = FIX2INT(rb_ivar_get(err, idThrowState));*/
  1363. }
  1364. return err;
  1365. }
  1366. }
  1367. static inline void
  1368. vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
  1369. {
  1370. int is_splat = flag & 0x01;
  1371. rb_num_t space_size = num + is_splat;
  1372. VALUE *base = cfp->sp, *ptr;
  1373. volatile VALUE tmp_ary;
  1374. rb_num_t len;
  1375. if (TYPE(ary) != T_ARRAY) {
  1376. ary = rb_ary_to_ary(ary);
  1377. }
  1378. cfp->sp += space_size;
  1379. tmp_ary = ary;
  1380. ptr = RARRAY_PTR(ary);
  1381. len = (rb_num_t)RARRAY_LEN(ary);
  1382. if (flag & 0x02) {
  1383. /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
  1384. rb_num_t i = 0, j;
  1385. if (len < num) {
  1386. for (i=0; i<num-len; i++) {
  1387. *base++ = Qnil;
  1388. }
  1389. }
  1390. for (j=0; i<num; i++, j++) {
  1391. VALUE v = ptr[len - j - 1];
  1392. *base++ = v;
  1393. }
  1394. if (is_splat) {
  1395. *base = rb_ary_new4(len - j, ptr);
  1396. }
  1397. }
  1398. else {
  1399. /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
  1400. rb_num_t i;
  1401. VALUE *bptr = &base[space_size - 1];
  1402. for (i=0; i<num; i++) {
  1403. if (len <= i) {
  1404. for (; i<num; i++) {
  1405. *bptr-- = Qnil;
  1406. }
  1407. break;
  1408. }
  1409. *bptr-- = ptr[i];
  1410. }
  1411. if (is_splat) {
  1412. if (num > len) {
  1413. *bptr = rb_ary_new();
  1414. }
  1415. else {
  1416. *bptr = rb_ary_new4(len - num, ptr + num);
  1417. }
  1418. }
  1419. }
  1420. }
  1421. static inline int
  1422. check_cfunc(const rb_method_entry_t *me, VALUE (*func)())
  1423. {
  1424. if (me && me->def->type == VM_METHOD_TYPE_CFUNC &&
  1425. me->def->body.cfunc.func == func) {
  1426. return 1;
  1427. }
  1428. else {
  1429. return 0;
  1430. }
  1431. }
  1432. static
  1433. #ifndef NO_BIG_INLINE
  1434. inline
  1435. #endif
  1436. VALUE
  1437. opt_eq_func(VALUE recv, VALUE obj, IC ic)
  1438. {
  1439. if (FIXNUM_2_P(recv, obj) &&
  1440. BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
  1441. return (recv == obj) ? Qtrue : Qfalse;
  1442. }
  1443. else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
  1444. if (HEAP_CLASS_OF(recv) == rb_cFloat &&
  1445. HEAP_CLASS_OF(obj) == rb_cFloat &&
  1446. BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
  1447. double a = RFLOAT_VALUE(recv);
  1448. double b = RFLOAT_VALUE(obj);
  1449. if (isnan(a) || isnan(b)) {
  1450. return Qfalse;
  1451. }
  1452. return (a == b) ? Qtrue : Qfalse;
  1453. }
  1454. else if (HEAP_CLASS_OF(recv) == rb_cString &&
  1455. HEAP_CLASS_OF(obj) == rb_cString &&
  1456. BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
  1457. return rb_str_equal(recv, obj);
  1458. }
  1459. }
  1460. {
  1461. const rb_method_entry_t *me = vm_method_search(idEq, CLASS_OF(recv), ic);
  1462. extern VALUE rb_obj_equal(VALUE obj1, VALUE obj2);
  1463. if (check_cfunc(me, rb_obj_equal)) {
  1464. return recv == obj ? Qtrue : Qfalse;
  1465. }
  1466. }
  1467. return Qundef;
  1468. }
  1469. struct opt_case_dispatch_i_arg {
  1470. VALUE obj;
  1471. int label;
  1472. };
  1473. static int
  1474. opt_case_dispatch_i(st_data_t key, st_data_t data, st_data_t p)
  1475. {
  1476. struct opt_case_dispatch_i_arg *arg = (void *)p;
  1477. if (RTEST(rb_funcall((VALUE)key, idEqq, 1, arg->obj))) {
  1478. arg->label = FIX2INT((VALUE)data);
  1479. return ST_STOP;
  1480. }
  1481. else {
  1482. return ST_CONTINUE;
  1483. }
  1484. }