PageRenderTime 67ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/vm_insnhelper.c

https://github.com/nazy/ruby
C | 1720 lines | 1427 code | 225 blank | 68 comment | 373 complexity | 800c2e199d9deaaae23278a80fd324fd MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause, AGPL-3.0, 0BSD, Unlicense
  1. /**********************************************************************
  2. vm_insnhelper.c - instruction helper functions.
  3. $Author$
  4. Copyright (C) 2007 Koichi Sasada
  5. **********************************************************************/
  6. /* finish iseq array */
  7. #include "insns.inc"
  8. #include <math.h>
  9. /* control stack frame */
  10. #ifndef INLINE
  11. #define INLINE inline
  12. #endif
  13. static rb_control_frame_t *vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
  14. static inline rb_control_frame_t *
  15. vm_push_frame(rb_thread_t * th, const rb_iseq_t * iseq,
  16. VALUE type, VALUE self, VALUE specval,
  17. const VALUE *pc, VALUE *sp, VALUE *lfp,
  18. int local_size)
  19. {
  20. rb_control_frame_t * const cfp = th->cfp - 1;
  21. int i;
  22. if ((void *)(sp + local_size) >= (void *)cfp) {
  23. rb_exc_raise(sysstack_error);
  24. }
  25. th->cfp = cfp;
  26. /* setup vm value stack */
  27. /* nil initialize */
  28. for (i=0; i < local_size; i++) {
  29. *sp = Qnil;
  30. sp++;
  31. }
  32. /* set special val */
  33. *sp = GC_GUARDED_PTR(specval);
  34. if (lfp == 0) {
  35. lfp = sp;
  36. }
  37. /* setup vm control frame stack */
  38. cfp->pc = (VALUE *)pc;
  39. cfp->sp = sp + 1;
  40. cfp->bp = sp + 1;
  41. cfp->iseq = (rb_iseq_t *) iseq;
  42. cfp->flag = type;
  43. cfp->self = self;
  44. cfp->lfp = lfp;
  45. cfp->dfp = sp;
  46. cfp->block_iseq = 0;
  47. cfp->proc = 0;
  48. cfp->me = 0;
  49. #define COLLECT_PROFILE 0
  50. #if COLLECT_PROFILE
  51. cfp->prof_time_self = clock();
  52. cfp->prof_time_chld = 0;
  53. #endif
  54. if (VMDEBUG == 2) {
  55. SDR();
  56. }
  57. return cfp;
  58. }
  59. static inline void
  60. vm_pop_frame(rb_thread_t *th)
  61. {
  62. #if COLLECT_PROFILE
  63. rb_control_frame_t *cfp = th->cfp;
  64. if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
  65. VALUE current_time = clock();
  66. rb_control_frame_t *cfp = th->cfp;
  67. cfp->prof_time_self = current_time - cfp->prof_time_self;
  68. (cfp+1)->prof_time_chld += cfp->prof_time_self;
  69. cfp->iseq->profile.count++;
  70. cfp->iseq->profile.time_cumu = cfp->prof_time_self;
  71. cfp->iseq->profile.time_self = cfp->prof_time_self - cfp->prof_time_chld;
  72. }
  73. else if (0 /* c method? */) {
  74. }
  75. #endif
  76. th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
  77. if (VMDEBUG == 2) {
  78. SDR();
  79. }
  80. }
  81. /* method dispatch */
  82. NORETURN(static void argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc));
  83. static void
  84. argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc)
  85. {
  86. VALUE mesg = rb_sprintf("wrong number of arguments (%d for %d)", miss_argc, correct_argc);
  87. VALUE exc = rb_exc_new3(rb_eArgError, mesg);
  88. VALUE bt = rb_make_backtrace();
  89. VALUE err_line = 0;
  90. if (iseq) {
  91. int line_no = 1;
  92. if (iseq->insn_info_size) {
  93. line_no = iseq->insn_info_table[0].line_no;
  94. }
  95. err_line = rb_sprintf("%s:%d:in `%s'",
  96. RSTRING_PTR(iseq->filename),
  97. line_no, RSTRING_PTR(iseq->name));
  98. rb_funcall(bt, rb_intern("unshift"), 1, err_line);
  99. }
  100. rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
  101. rb_exc_raise(exc);
  102. }
  103. #define VM_CALLEE_SETUP_ARG(ret, th, iseq, orig_argc, orig_argv, block) \
  104. if (LIKELY(iseq->arg_simple & 0x01)) { \
  105. /* simple check */ \
  106. if (orig_argc != iseq->argc) { \
  107. argument_error(iseq, orig_argc, iseq->argc); \
  108. } \
  109. ret = 0; \
  110. } \
  111. else { \
  112. ret = vm_callee_setup_arg_complex(th, iseq, orig_argc, orig_argv, block); \
  113. }
  114. static inline int
  115. vm_callee_setup_arg_complex(rb_thread_t *th, const rb_iseq_t * iseq,
  116. int orig_argc, VALUE * orig_argv,
  117. const rb_block_t **block)
  118. {
  119. const int m = iseq->argc;
  120. int argc = orig_argc;
  121. VALUE *argv = orig_argv;
  122. rb_num_t opt_pc = 0;
  123. th->mark_stack_len = argc + iseq->arg_size;
  124. /* mandatory */
  125. if (argc < (m + iseq->arg_post_len)) { /* check with post arg */
  126. argument_error(iseq, argc, m + iseq->arg_post_len);
  127. }
  128. argv += m;
  129. argc -= m;
  130. /* post arguments */
  131. if (iseq->arg_post_len) {
  132. if (!(orig_argc < iseq->arg_post_start)) {
  133. VALUE *new_argv = ALLOCA_N(VALUE, argc);
  134. MEMCPY(new_argv, argv, VALUE, argc);
  135. argv = new_argv;
  136. }
  137. MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
  138. VALUE, iseq->arg_post_len);
  139. }
  140. /* opt arguments */
  141. if (iseq->arg_opts) {
  142. const int opts = iseq->arg_opts - 1 /* no opt */;
  143. if (iseq->arg_rest == -1 && argc > opts) {
  144. argument_error(iseq, orig_argc, m + opts + iseq->arg_post_len);
  145. }
  146. if (argc > opts) {
  147. argc -= opts;
  148. argv += opts;
  149. opt_pc = iseq->arg_opt_table[opts]; /* no opt */
  150. }
  151. else {
  152. int i;
  153. for (i = argc; i<opts; i++) {
  154. orig_argv[i + m] = Qnil;
  155. }
  156. opt_pc = iseq->arg_opt_table[argc];
  157. argc = 0;
  158. }
  159. }
  160. /* rest arguments */
  161. if (iseq->arg_rest != -1) {
  162. orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv);
  163. argc = 0;
  164. }
  165. /* block arguments */
  166. if (block && iseq->arg_block != -1) {
  167. VALUE blockval = Qnil;
  168. const rb_block_t *blockptr = *block;
  169. if (argc != 0) {
  170. argument_error(iseq, orig_argc, m + iseq->arg_post_len);
  171. }
  172. if (blockptr) {
  173. /* make Proc object */
  174. if (blockptr->proc == 0) {
  175. rb_proc_t *proc;
  176. blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
  177. GetProcPtr(blockval, proc);
  178. *block = &proc->block;
  179. }
  180. else {
  181. blockval = blockptr->proc;
  182. }
  183. }
  184. orig_argv[iseq->arg_block] = blockval; /* Proc or nil */
  185. }
  186. th->mark_stack_len = 0;
  187. return (int)opt_pc;
  188. }
  189. static inline int
  190. caller_setup_args(const rb_thread_t *th, rb_control_frame_t *cfp, VALUE flag,
  191. int argc, rb_iseq_t *blockiseq, rb_block_t **block)
  192. {
  193. rb_block_t *blockptr = 0;
  194. if (block) {
  195. if (flag & VM_CALL_ARGS_BLOCKARG_BIT) {
  196. rb_proc_t *po;
  197. VALUE proc;
  198. proc = *(--cfp->sp);
  199. if (proc != Qnil) {
  200. if (!rb_obj_is_proc(proc)) {
  201. VALUE b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
  202. if (NIL_P(b) || !rb_obj_is_proc(b)) {
  203. rb_raise(rb_eTypeError,
  204. "wrong argument type %s (expected Proc)",
  205. rb_obj_classname(proc));
  206. }
  207. proc = b;
  208. }
  209. GetProcPtr(proc, po);
  210. blockptr = &po->block;
  211. RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
  212. *block = blockptr;
  213. }
  214. }
  215. else if (blockiseq) {
  216. blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
  217. blockptr->iseq = blockiseq;
  218. blockptr->proc = 0;
  219. *block = blockptr;
  220. }
  221. }
  222. /* expand top of stack? */
  223. if (flag & VM_CALL_ARGS_SPLAT_BIT) {
  224. VALUE ary = *(cfp->sp - 1);
  225. VALUE *ptr;
  226. int i;
  227. VALUE tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a");
  228. if (NIL_P(tmp)) {
  229. /* do nothing */
  230. }
  231. else {
  232. long len = RARRAY_LEN(tmp);
  233. ptr = RARRAY_PTR(tmp);
  234. cfp->sp -= 1;
  235. CHECK_STACK_OVERFLOW(cfp, len);
  236. for (i = 0; i < len; i++) {
  237. *cfp->sp++ = ptr[i];
  238. }
  239. argc += i-1;
  240. }
  241. }
  242. return argc;
  243. }
  244. static inline VALUE
  245. call_cfunc(VALUE (*func)(), VALUE recv,
  246. int len, int argc, const VALUE *argv)
  247. {
  248. /* printf("len: %d, argc: %d\n", len, argc); */
  249. if (len >= 0 && argc != len) {
  250. rb_raise(rb_eArgError, "wrong number of arguments(%d for %d)",
  251. argc, len);
  252. }
  253. switch (len) {
  254. case -2:
  255. return (*func) (recv, rb_ary_new4(argc, argv));
  256. break;
  257. case -1:
  258. return (*func) (argc, argv, recv);
  259. break;
  260. case 0:
  261. return (*func) (recv);
  262. break;
  263. case 1:
  264. return (*func) (recv, argv[0]);
  265. break;
  266. case 2:
  267. return (*func) (recv, argv[0], argv[1]);
  268. break;
  269. case 3:
  270. return (*func) (recv, argv[0], argv[1], argv[2]);
  271. break;
  272. case 4:
  273. return (*func) (recv, argv[0], argv[1], argv[2], argv[3]);
  274. break;
  275. case 5:
  276. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
  277. break;
  278. case 6:
  279. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  280. argv[5]);
  281. break;
  282. case 7:
  283. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  284. argv[5], argv[6]);
  285. break;
  286. case 8:
  287. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  288. argv[5], argv[6], argv[7]);
  289. break;
  290. case 9:
  291. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  292. argv[5], argv[6], argv[7], argv[8]);
  293. break;
  294. case 10:
  295. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  296. argv[5], argv[6], argv[7], argv[8], argv[9]);
  297. break;
  298. case 11:
  299. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  300. argv[5], argv[6], argv[7], argv[8], argv[9],
  301. argv[10]);
  302. break;
  303. case 12:
  304. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  305. argv[5], argv[6], argv[7], argv[8], argv[9],
  306. argv[10], argv[11]);
  307. break;
  308. case 13:
  309. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  310. argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
  311. argv[11], argv[12]);
  312. break;
  313. case 14:
  314. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  315. argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
  316. argv[11], argv[12], argv[13]);
  317. break;
  318. case 15:
  319. return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
  320. argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
  321. argv[11], argv[12], argv[13], argv[14]);
  322. break;
  323. default:
  324. rb_raise(rb_eArgError, "too many arguments(%d)", len);
  325. return Qundef; /* not reached */
  326. }
  327. }
  328. static inline VALUE
  329. vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp,
  330. int num, VALUE recv, const rb_block_t *blockptr,
  331. const rb_method_entry_t *me)
  332. {
  333. VALUE val = 0;
  334. const rb_method_definition_t *def = me->def;
  335. rb_control_frame_t *cfp;
  336. EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass);
  337. cfp = vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC,
  338. recv, (VALUE) blockptr, 0, reg_cfp->sp, 0, 1);
  339. cfp->me = me;
  340. reg_cfp->sp -= num + 1;
  341. val = call_cfunc(def->body.cfunc.func, recv, (int)def->body.cfunc.argc, num, reg_cfp->sp + 1);
  342. if (reg_cfp != th->cfp + 1) {
  343. rb_bug("cfp consistency error - send");
  344. }
  345. vm_pop_frame(th);
  346. EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass);
  347. return val;
  348. }
  349. static inline VALUE
  350. vm_call_bmethod(rb_thread_t *th, VALUE recv, int argc, const VALUE *argv,
  351. const rb_block_t *blockptr, const rb_method_entry_t *me)
  352. {
  353. rb_proc_t *proc;
  354. VALUE val;
  355. /* control block frame */
  356. th->passed_me = me;
  357. GetProcPtr(me->def->body.proc, proc);
  358. val = rb_vm_invoke_proc(th, proc, recv, argc, argv, blockptr);
  359. return val;
  360. }
  361. static inline void
  362. vm_method_missing_args(rb_thread_t *th, VALUE *argv,
  363. int num, const rb_block_t *blockptr, int opt)
  364. {
  365. rb_control_frame_t * const reg_cfp = th->cfp;
  366. MEMCPY(argv, STACK_ADDR_FROM_TOP(num + 1), VALUE, num + 1);
  367. th->method_missing_reason = opt;
  368. th->passed_block = blockptr;
  369. POPN(num + 1);
  370. }
  371. static inline VALUE
  372. vm_method_missing(rb_thread_t *th, ID id, VALUE recv,
  373. int num, const rb_block_t *blockptr, int opt)
  374. {
  375. VALUE *argv = ALLOCA_N(VALUE, num + 1);
  376. vm_method_missing_args(th, argv, num, blockptr, opt);
  377. argv[0] = ID2SYM(id);
  378. return rb_funcall2(recv, idMethodMissing, num + 1, argv);
  379. }
  380. static inline void
  381. vm_setup_method(rb_thread_t *th, rb_control_frame_t *cfp,
  382. VALUE recv, int argc, const rb_block_t *blockptr, VALUE flag,
  383. const rb_method_entry_t *me)
  384. {
  385. int opt_pc, i;
  386. VALUE *sp, *rsp = cfp->sp - argc;
  387. rb_iseq_t *iseq = me->def->body.iseq;
  388. VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, rsp, &blockptr);
  389. /* stack overflow check */
  390. CHECK_STACK_OVERFLOW(cfp, iseq->stack_max);
  391. sp = rsp + iseq->arg_size;
  392. if (LIKELY(!(flag & VM_CALL_TAILCALL_BIT))) {
  393. if (0) printf("local_size: %d, arg_size: %d\n",
  394. iseq->local_size, iseq->arg_size);
  395. /* clear local variables */
  396. for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
  397. *sp++ = Qnil;
  398. }
  399. vm_push_frame(th, iseq,
  400. VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
  401. iseq->iseq_encoded + opt_pc, sp, 0, 0);
  402. cfp->sp = rsp - 1 /* recv */;
  403. }
  404. else {
  405. VALUE *p_rsp;
  406. th->cfp++; /* pop cf */
  407. p_rsp = th->cfp->sp;
  408. /* copy arguments */
  409. for (i=0; i < (sp - rsp); i++) {
  410. p_rsp[i] = rsp[i];
  411. }
  412. sp -= rsp - p_rsp;
  413. /* clear local variables */
  414. for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
  415. *sp++ = Qnil;
  416. }
  417. vm_push_frame(th, iseq,
  418. VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
  419. iseq->iseq_encoded + opt_pc, sp, 0, 0);
  420. }
  421. }
  422. static inline VALUE
  423. vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp,
  424. int num, const rb_block_t *blockptr, VALUE flag,
  425. ID id, const rb_method_entry_t *me, VALUE recv)
  426. {
  427. VALUE val;
  428. start_method_dispatch:
  429. if (me != 0) {
  430. if ((me->flag == 0)) {
  431. normal_method_dispatch:
  432. switch (me->def->type) {
  433. case VM_METHOD_TYPE_ISEQ:{
  434. vm_setup_method(th, cfp, recv, num, blockptr, flag, me);
  435. return Qundef;
  436. }
  437. case VM_METHOD_TYPE_NOTIMPLEMENTED:
  438. case VM_METHOD_TYPE_CFUNC:{
  439. val = vm_call_cfunc(th, cfp, num, recv, blockptr, me);
  440. break;
  441. }
  442. case VM_METHOD_TYPE_ATTRSET:{
  443. if (num != 1) {
  444. rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", num);
  445. }
  446. val = rb_ivar_set(recv, me->def->body.attr.id, *(cfp->sp - 1));
  447. cfp->sp -= 2;
  448. break;
  449. }
  450. case VM_METHOD_TYPE_IVAR:{
  451. if (num != 0) {
  452. rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num);
  453. }
  454. val = rb_attr_get(recv, me->def->body.attr.id);
  455. cfp->sp -= 1;
  456. break;
  457. }
  458. case VM_METHOD_TYPE_MISSING:{
  459. VALUE *argv = ALLOCA_N(VALUE, num+1);
  460. argv[0] = ID2SYM(me->def->original_id);
  461. MEMCPY(argv+1, cfp->sp - num, VALUE, num);
  462. cfp->sp += - num - 1;
  463. val = rb_funcall2(recv, rb_intern("method_missing"), num+1, argv);
  464. break;
  465. }
  466. case VM_METHOD_TYPE_BMETHOD:{
  467. VALUE *argv = ALLOCA_N(VALUE, num);
  468. MEMCPY(argv, cfp->sp - num, VALUE, num);
  469. cfp->sp += - num - 1;
  470. val = vm_call_bmethod(th, recv, num, argv, blockptr, me);
  471. break;
  472. }
  473. case VM_METHOD_TYPE_ZSUPER:{
  474. VALUE klass = RCLASS_SUPER(me->klass);
  475. me = rb_method_entry(klass, id);
  476. if (me != 0) {
  477. goto normal_method_dispatch;
  478. }
  479. else {
  480. goto start_method_dispatch;
  481. }
  482. }
  483. case VM_METHOD_TYPE_OPTIMIZED:{
  484. switch (me->def->body.optimize_type) {
  485. case OPTIMIZED_METHOD_TYPE_SEND: {
  486. rb_control_frame_t *reg_cfp = cfp;
  487. rb_num_t i = num - 1;
  488. VALUE sym;
  489. if (num == 0) {
  490. rb_raise(rb_eArgError, "no method name given");
  491. }
  492. sym = TOPN(i);
  493. id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym);
  494. /* shift arguments */
  495. if (i > 0) {
  496. MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
  497. }
  498. me = rb_method_entry(CLASS_OF(recv), id);
  499. num -= 1;
  500. DEC_SP(1);
  501. flag |= VM_CALL_FCALL_BIT | VM_CALL_OPT_SEND_BIT;
  502. goto start_method_dispatch;
  503. }
  504. case OPTIMIZED_METHOD_TYPE_CALL: {
  505. rb_proc_t *proc;
  506. int argc = num;
  507. VALUE *argv = ALLOCA_N(VALUE, num);
  508. GetProcPtr(recv, proc);
  509. MEMCPY(argv, cfp->sp - num, VALUE, num);
  510. cfp->sp -= num + 1;
  511. val = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, blockptr);
  512. break;
  513. }
  514. default:
  515. rb_bug("eval_invoke_method: unsupported optimized method type (%d)",
  516. me->def->body.optimize_type);
  517. }
  518. break;
  519. }
  520. default:{
  521. rb_bug("eval_invoke_method: unsupported method type (%d)", me->def->type);
  522. break;
  523. }
  524. }
  525. }
  526. else {
  527. int noex_safe;
  528. if (!(flag & VM_CALL_FCALL_BIT) &&
  529. (me->flag & NOEX_MASK) & NOEX_PRIVATE) {
  530. int stat = NOEX_PRIVATE;
  531. if (flag & VM_CALL_VCALL_BIT) {
  532. stat |= NOEX_VCALL;
  533. }
  534. val = vm_method_missing(th, id, recv, num, blockptr, stat);
  535. }
  536. else if (!(flag & VM_CALL_OPT_SEND_BIT) && (me->flag & NOEX_MASK) & NOEX_PROTECTED) {
  537. VALUE defined_class = me->klass;
  538. if (TYPE(defined_class) == T_ICLASS) {
  539. defined_class = RBASIC(defined_class)->klass;
  540. }
  541. if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
  542. val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED);
  543. }
  544. else {
  545. goto normal_method_dispatch;
  546. }
  547. }
  548. else if ((noex_safe = NOEX_SAFE(me->flag)) > th->safe_level &&
  549. (noex_safe > 2)) {
  550. rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id));
  551. }
  552. else {
  553. goto normal_method_dispatch;
  554. }
  555. }
  556. }
  557. else {
  558. /* method missing */
  559. int stat = 0;
  560. if (flag & VM_CALL_VCALL_BIT) {
  561. stat |= NOEX_VCALL;
  562. }
  563. if (flag & VM_CALL_SUPER_BIT) {
  564. stat |= NOEX_SUPER;
  565. }
  566. if (id == idMethodMissing) {
  567. VALUE *argv = ALLOCA_N(VALUE, num);
  568. vm_method_missing_args(th, argv, num - 1, 0, stat);
  569. rb_raise_method_missing(th, num, argv, recv, stat);
  570. }
  571. else {
  572. val = vm_method_missing(th, id, recv, num, blockptr, stat);
  573. }
  574. }
  575. RUBY_VM_CHECK_INTS();
  576. return val;
  577. }
  578. /* yield */
  579. static inline int
  580. block_proc_is_lambda(const VALUE procval)
  581. {
  582. rb_proc_t *proc;
  583. if (procval) {
  584. GetProcPtr(procval, proc);
  585. return proc->is_lambda;
  586. }
  587. else {
  588. return 0;
  589. }
  590. }
  591. static inline VALUE
  592. vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block,
  593. VALUE self, int argc, const VALUE *argv,
  594. const rb_block_t *blockargptr)
  595. {
  596. NODE *ifunc = (NODE *) block->iseq;
  597. VALUE val, arg, blockarg;
  598. int lambda = block_proc_is_lambda(block->proc);
  599. if (lambda) {
  600. arg = rb_ary_new4(argc, argv);
  601. }
  602. else if (argc == 0) {
  603. arg = Qnil;
  604. }
  605. else {
  606. arg = argv[0];
  607. }
  608. if (blockargptr) {
  609. if (blockargptr->proc) {
  610. blockarg = blockargptr->proc;
  611. }
  612. else {
  613. blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
  614. }
  615. }
  616. else {
  617. blockarg = Qnil;
  618. }
  619. vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC,
  620. self, (VALUE)block->dfp,
  621. 0, th->cfp->sp, block->lfp, 1);
  622. if (blockargptr) {
  623. th->cfp->lfp[0] = GC_GUARDED_PTR((VALUE)blockargptr);
  624. }
  625. val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg);
  626. th->cfp++;
  627. return val;
  628. }
  629. /*--
  630. * @brief on supplied all of optional, rest and post parameters.
  631. * @pre iseq is block style (not lambda style)
  632. */
  633. static inline int
  634. vm_yield_setup_block_args_complex(rb_thread_t *th, const rb_iseq_t *iseq,
  635. int argc, VALUE *argv)
  636. {
  637. rb_num_t opt_pc = 0;
  638. int i;
  639. const int m = iseq->argc;
  640. const int r = iseq->arg_rest;
  641. int len = iseq->arg_post_len;
  642. int start = iseq->arg_post_start;
  643. int rsize = argc > m ? argc - m : 0; /* # of arguments which did not consumed yet */
  644. int psize = rsize > len ? len : rsize; /* # of post arguments */
  645. int osize = 0; /* # of opt arguments */
  646. VALUE ary;
  647. /* reserves arguments for post parameters */
  648. rsize -= psize;
  649. if (iseq->arg_opts) {
  650. const int opts = iseq->arg_opts - 1;
  651. if (rsize > opts) {
  652. osize = opts;
  653. opt_pc = iseq->arg_opt_table[opts];
  654. }
  655. else {
  656. osize = rsize;
  657. opt_pc = iseq->arg_opt_table[rsize];
  658. }
  659. }
  660. rsize -= osize;
  661. if (0) {
  662. printf(" argc: %d\n", argc);
  663. printf(" len: %d\n", len);
  664. printf("start: %d\n", start);
  665. printf("rsize: %d\n", rsize);
  666. }
  667. if (r == -1) {
  668. /* copy post argument */
  669. MEMMOVE(&argv[start], &argv[m+osize], VALUE, psize);
  670. }
  671. else {
  672. ary = rb_ary_new4(rsize, &argv[r]);
  673. /* copy post argument */
  674. MEMMOVE(&argv[start], &argv[m+rsize+osize], VALUE, psize);
  675. argv[r] = ary;
  676. }
  677. for (i=psize; i<len; i++) {
  678. argv[start + i] = Qnil;
  679. }
  680. return (int)opt_pc;
  681. }
  682. static inline int
  683. vm_yield_setup_block_args(rb_thread_t *th, const rb_iseq_t * iseq,
  684. int orig_argc, VALUE *argv,
  685. const rb_block_t *blockptr)
  686. {
  687. int i;
  688. int argc = orig_argc;
  689. const int m = iseq->argc;
  690. VALUE ary, arg0;
  691. int opt_pc = 0;
  692. th->mark_stack_len = argc;
  693. /*
  694. * yield [1, 2]
  695. * => {|a|} => a = [1, 2]
  696. * => {|a, b|} => a, b = [1, 2]
  697. */
  698. arg0 = argv[0];
  699. if (!(iseq->arg_simple & 0x02) && /* exclude {|a|} */
  700. (m + iseq->arg_post_len) > 0 && /* this process is meaningful */
  701. argc == 1 && !NIL_P(ary = rb_check_array_type(arg0))) { /* rhs is only an array */
  702. th->mark_stack_len = argc = RARRAY_LENINT(ary);
  703. CHECK_STACK_OVERFLOW(th->cfp, argc);
  704. MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc);
  705. }
  706. else {
  707. argv[0] = arg0;
  708. }
  709. for (i=argc; i<m; i++) {
  710. argv[i] = Qnil;
  711. }
  712. if (iseq->arg_rest == -1 && iseq->arg_opts == 0) {
  713. const int arg_size = iseq->arg_size;
  714. if (arg_size < argc) {
  715. /*
  716. * yield 1, 2
  717. * => {|a|} # truncate
  718. */
  719. th->mark_stack_len = argc = arg_size;
  720. }
  721. }
  722. else {
  723. int r = iseq->arg_rest;
  724. if (iseq->arg_post_len ||
  725. iseq->arg_opts) { /* TODO: implement simple version for (iseq->arg_post_len==0 && iseq->arg_opts > 0) */
  726. opt_pc = vm_yield_setup_block_args_complex(th, iseq, argc, argv);
  727. }
  728. else {
  729. if (argc < r) {
  730. /* yield 1
  731. * => {|a, b, *r|}
  732. */
  733. for (i=argc; i<r; i++) {
  734. argv[i] = Qnil;
  735. }
  736. argv[r] = rb_ary_new();
  737. }
  738. else {
  739. argv[r] = rb_ary_new4(argc-r, &argv[r]);
  740. }
  741. }
  742. th->mark_stack_len = iseq->arg_size;
  743. }
  744. /* {|&b|} */
  745. if (iseq->arg_block != -1) {
  746. VALUE procval = Qnil;
  747. if (blockptr) {
  748. if (blockptr->proc == 0) {
  749. procval = rb_vm_make_proc(th, blockptr, rb_cProc);
  750. }
  751. else {
  752. procval = blockptr->proc;
  753. }
  754. }
  755. argv[iseq->arg_block] = procval;
  756. }
  757. th->mark_stack_len = 0;
  758. return opt_pc;
  759. }
  760. static inline int
  761. vm_yield_setup_args(rb_thread_t * const th, const rb_iseq_t *iseq,
  762. int argc, VALUE *argv,
  763. const rb_block_t *blockptr, int lambda)
  764. {
  765. if (0) { /* for debug */
  766. printf(" argc: %d\n", argc);
  767. printf("iseq argc: %d\n", iseq->argc);
  768. printf("iseq opts: %d\n", iseq->arg_opts);
  769. printf("iseq rest: %d\n", iseq->arg_rest);
  770. printf("iseq post: %d\n", iseq->arg_post_len);
  771. printf("iseq blck: %d\n", iseq->arg_block);
  772. printf("iseq smpl: %d\n", iseq->arg_simple);
  773. printf(" lambda: %s\n", lambda ? "true" : "false");
  774. }
  775. if (lambda) {
  776. /* call as method */
  777. int opt_pc;
  778. VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, argv, &blockptr);
  779. return opt_pc;
  780. }
  781. else {
  782. return vm_yield_setup_block_args(th, iseq, argc, argv, blockptr);
  783. }
  784. }
  785. static VALUE
  786. vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t num, rb_num_t flag)
  787. {
  788. const rb_block_t *block = GET_BLOCK_PTR();
  789. rb_iseq_t *iseq;
  790. int argc = (int)num;
  791. VALUE type = GET_ISEQ()->local_iseq->type;
  792. if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
  793. rb_vm_localjump_error("no block given (yield)", Qnil, 0);
  794. }
  795. iseq = block->iseq;
  796. argc = caller_setup_args(th, GET_CFP(), flag, argc, 0, 0);
  797. if (BUILTIN_TYPE(iseq) != T_NODE) {
  798. int opt_pc;
  799. const int arg_size = iseq->arg_size;
  800. VALUE * const rsp = GET_SP() - argc;
  801. SET_SP(rsp);
  802. CHECK_STACK_OVERFLOW(GET_CFP(), iseq->stack_max);
  803. opt_pc = vm_yield_setup_args(th, iseq, argc, rsp, 0,
  804. block_proc_is_lambda(block->proc));
  805. vm_push_frame(th, iseq,
  806. VM_FRAME_MAGIC_BLOCK, block->self, (VALUE) block->dfp,
  807. iseq->iseq_encoded + opt_pc, rsp + arg_size, block->lfp,
  808. iseq->local_size - arg_size);
  809. return Qundef;
  810. }
  811. else {
  812. VALUE val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc), 0);
  813. POPN(argc); /* TODO: should put before C/yield? */
  814. return val;
  815. }
  816. }
  817. /* svar */
  818. static inline NODE *
  819. lfp_svar_place(rb_thread_t *th, VALUE *lfp)
  820. {
  821. VALUE *svar;
  822. if (lfp && th->local_lfp != lfp) {
  823. svar = &lfp[-1];
  824. }
  825. else {
  826. svar = &th->local_svar;
  827. }
  828. if (NIL_P(*svar)) {
  829. *svar = (VALUE)NEW_IF(Qnil, Qnil, Qnil);
  830. }
  831. return (NODE *)*svar;
  832. }
  833. static VALUE
  834. lfp_svar_get(rb_thread_t *th, VALUE *lfp, VALUE key)
  835. {
  836. NODE *svar = lfp_svar_place(th, lfp);
  837. switch (key) {
  838. case 0:
  839. return svar->u1.value;
  840. case 1:
  841. return svar->u2.value;
  842. default: {
  843. const VALUE hash = svar->u3.value;
  844. if (hash == Qnil) {
  845. return Qnil;
  846. }
  847. else {
  848. return rb_hash_lookup(hash, key);
  849. }
  850. }
  851. }
  852. }
  853. static void
  854. lfp_svar_set(rb_thread_t *th, VALUE *lfp, VALUE key, VALUE val)
  855. {
  856. NODE *svar = lfp_svar_place(th, lfp);
  857. switch (key) {
  858. case 0:
  859. svar->u1.value = val;
  860. return;
  861. case 1:
  862. svar->u2.value = val;
  863. return;
  864. default: {
  865. VALUE hash = svar->u3.value;
  866. if (hash == Qnil) {
  867. svar->u3.value = hash = rb_hash_new();
  868. }
  869. rb_hash_aset(hash, key, val);
  870. }
  871. }
  872. }
  873. static inline VALUE
  874. vm_getspecial(rb_thread_t *th, VALUE *lfp, VALUE key, rb_num_t type)
  875. {
  876. VALUE val;
  877. if (type == 0) {
  878. VALUE k = key;
  879. if (FIXNUM_P(key)) {
  880. k = FIX2INT(key);
  881. }
  882. val = lfp_svar_get(th, lfp, k);
  883. }
  884. else {
  885. VALUE backref = lfp_svar_get(th, lfp, 1);
  886. if (type & 0x01) {
  887. switch (type >> 1) {
  888. case '&':
  889. val = rb_reg_last_match(backref);
  890. break;
  891. case '`':
  892. val = rb_reg_match_pre(backref);
  893. break;
  894. case '\'':
  895. val = rb_reg_match_post(backref);
  896. break;
  897. case '+':
  898. val = rb_reg_match_last(backref);
  899. break;
  900. default:
  901. rb_bug("unexpected back-ref");
  902. }
  903. }
  904. else {
  905. val = rb_reg_nth_match((int)(type >> 1), backref);
  906. }
  907. }
  908. return val;
  909. }
  910. static NODE *
  911. vm_get_cref0(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
  912. {
  913. while (1) {
  914. if (lfp == dfp) {
  915. if (!RUBY_VM_NORMAL_ISEQ_P(iseq)) return NULL;
  916. return iseq->cref_stack;
  917. }
  918. else if (dfp[-1] != Qnil) {
  919. return (NODE *)dfp[-1];
  920. }
  921. dfp = GET_PREV_DFP(dfp);
  922. }
  923. }
  924. static NODE *
  925. vm_get_cref(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
  926. {
  927. NODE *cref = vm_get_cref0(iseq, lfp, dfp);
  928. if (cref == 0) {
  929. rb_bug("vm_get_cref: unreachable");
  930. }
  931. return cref;
  932. }
  933. static NODE *
  934. vm_cref_push(rb_thread_t *th, VALUE klass, int noex, rb_block_t *blockptr)
  935. {
  936. rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
  937. NODE *cref = NEW_BLOCK(klass);
  938. cref->nd_file = 0;
  939. cref->nd_visi = noex;
  940. if (blockptr) {
  941. cref->nd_next = vm_get_cref0(blockptr->iseq, blockptr->lfp, blockptr->dfp);
  942. }
  943. else if (cfp) {
  944. cref->nd_next = vm_get_cref0(cfp->iseq, cfp->lfp, cfp->dfp);
  945. }
  946. return cref;
  947. }
  948. static inline VALUE
  949. vm_get_cbase(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
  950. {
  951. NODE *cref = vm_get_cref(iseq, lfp, dfp);
  952. VALUE klass = Qundef;
  953. while (cref) {
  954. if ((klass = cref->nd_clss) != 0) {
  955. break;
  956. }
  957. cref = cref->nd_next;
  958. }
  959. return klass;
  960. }
  961. static inline VALUE
  962. vm_get_const_base(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
  963. {
  964. NODE *cref = vm_get_cref(iseq, lfp, dfp);
  965. VALUE klass = Qundef;
  966. while (cref) {
  967. if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) &&
  968. (klass = cref->nd_clss) != 0) {
  969. break;
  970. }
  971. cref = cref->nd_next;
  972. }
  973. return klass;
  974. }
  975. static inline void
  976. vm_check_if_namespace(VALUE klass)
  977. {
  978. switch (TYPE(klass)) {
  979. case T_CLASS:
  980. case T_MODULE:
  981. break;
  982. default:
  983. rb_raise(rb_eTypeError, "%s is not a class/module",
  984. RSTRING_PTR(rb_inspect(klass)));
  985. }
  986. }
  987. static inline VALUE
  988. vm_get_ev_const(rb_thread_t *th, const rb_iseq_t *iseq,
  989. VALUE orig_klass, ID id, int is_defined)
  990. {
  991. VALUE val;
  992. if (orig_klass == Qnil) {
  993. /* in current lexical scope */
  994. const NODE *cref = vm_get_cref(iseq, th->cfp->lfp, th->cfp->dfp);
  995. const NODE *root_cref = NULL;
  996. VALUE klass = orig_klass;
  997. while (cref && cref->nd_next) {
  998. if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL)) {
  999. klass = cref->nd_clss;
  1000. if (root_cref == NULL)
  1001. root_cref = cref;
  1002. }
  1003. cref = cref->nd_next;
  1004. if (!NIL_P(klass)) {
  1005. VALUE am = 0;
  1006. search_continue:
  1007. if (RCLASS_IV_TBL(klass) &&
  1008. st_lookup(RCLASS_IV_TBL(klass), id, &val)) {
  1009. if (val == Qundef) {
  1010. if (am == klass) break;
  1011. am = klass;
  1012. rb_autoload_load(klass, id);
  1013. goto search_continue;
  1014. }
  1015. else {
  1016. if (is_defined) {
  1017. return 1;
  1018. }
  1019. else {
  1020. return val;
  1021. }
  1022. }
  1023. }
  1024. }
  1025. }
  1026. /* search self */
  1027. if (root_cref && !NIL_P(root_cref->nd_clss)) {
  1028. klass = root_cref->nd_clss;
  1029. }
  1030. else {
  1031. klass = CLASS_OF(th->cfp->self);
  1032. }
  1033. if (is_defined) {
  1034. return rb_const_defined(klass, id);
  1035. }
  1036. else {
  1037. return rb_const_get(klass, id);
  1038. }
  1039. }
  1040. else {
  1041. vm_check_if_namespace(orig_klass);
  1042. if (is_defined) {
  1043. return rb_const_defined_from(orig_klass, id);
  1044. }
  1045. else {
  1046. return rb_const_get_from(orig_klass, id);
  1047. }
  1048. }
  1049. }
  1050. static inline VALUE
  1051. vm_get_cvar_base(NODE *cref)
  1052. {
  1053. VALUE klass;
  1054. while (cref && cref->nd_next &&
  1055. (NIL_P(cref->nd_clss) || FL_TEST(cref->nd_clss, FL_SINGLETON) ||
  1056. (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL))) {
  1057. cref = cref->nd_next;
  1058. if (!cref->nd_next) {
  1059. rb_warn("class variable access from toplevel");
  1060. }
  1061. }
  1062. klass = cref->nd_clss;
  1063. if (NIL_P(klass)) {
  1064. rb_raise(rb_eTypeError, "no class variables available");
  1065. }
  1066. return klass;
  1067. }
  1068. #ifndef USE_IC_FOR_IVAR
  1069. #define USE_IC_FOR_IVAR 1
  1070. #endif
  1071. static VALUE
  1072. vm_getivar(VALUE obj, ID id, IC ic)
  1073. {
  1074. #if USE_IC_FOR_IVAR
  1075. if (TYPE(obj) == T_OBJECT) {
  1076. VALUE val = Qundef;
  1077. VALUE klass = RBASIC(obj)->klass;
  1078. if (ic->ic_class == klass) {
  1079. long index = ic->ic_value.index;
  1080. long len = ROBJECT_NUMIV(obj);
  1081. VALUE *ptr = ROBJECT_IVPTR(obj);
  1082. if (index < len) {
  1083. val = ptr[index];
  1084. }
  1085. }
  1086. else {
  1087. st_data_t index;
  1088. long len = ROBJECT_NUMIV(obj);
  1089. VALUE *ptr = ROBJECT_IVPTR(obj);
  1090. struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
  1091. if (iv_index_tbl) {
  1092. if (st_lookup(iv_index_tbl, id, &index)) {
  1093. if ((long)index < len) {
  1094. val = ptr[index];
  1095. }
  1096. ic->ic_class = klass;
  1097. ic->ic_value.index = index;
  1098. }
  1099. }
  1100. }
  1101. if (UNLIKELY(val == Qundef)) {
  1102. rb_warning("instance variable %s not initialized", rb_id2name(id));
  1103. val = Qnil;
  1104. }
  1105. return val;
  1106. }
  1107. else {
  1108. return rb_ivar_get(obj, id);
  1109. }
  1110. #else
  1111. return rb_ivar_get(obj, id);
  1112. #endif
  1113. }
  1114. static void
  1115. vm_setivar(VALUE obj, ID id, VALUE val, IC ic)
  1116. {
  1117. #if USE_IC_FOR_IVAR
  1118. if (!OBJ_UNTRUSTED(obj) && rb_safe_level() >= 4) {
  1119. rb_raise(rb_eSecurityError, "Insecure: can't modify instance variable");
  1120. }
  1121. if (OBJ_FROZEN(obj)) {
  1122. rb_error_frozen("object");
  1123. }
  1124. if (TYPE(obj) == T_OBJECT) {
  1125. VALUE klass = RBASIC(obj)->klass;
  1126. st_data_t index;
  1127. if (ic->ic_class == klass) {
  1128. long index = ic->ic_value.index;
  1129. long len = ROBJECT_NUMIV(obj);
  1130. VALUE *ptr = ROBJECT_IVPTR(obj);
  1131. if (index < len) {
  1132. ptr[index] = val;
  1133. return; /* inline cache hit */
  1134. }
  1135. }
  1136. else {
  1137. struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
  1138. if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
  1139. ic->ic_class = klass;
  1140. ic->ic_value.index = index;
  1141. }
  1142. /* fall through */
  1143. }
  1144. }
  1145. rb_ivar_set(obj, id, val);
  1146. #else
  1147. rb_ivar_set(obj, id, val);
  1148. #endif
  1149. }
  1150. static inline const rb_method_entry_t *
  1151. vm_method_search(VALUE id, VALUE klass, IC ic)
  1152. {
  1153. rb_method_entry_t *me;
  1154. #if OPT_INLINE_METHOD_CACHE
  1155. if (LIKELY(klass == ic->ic_class) &&
  1156. LIKELY(GET_VM_STATE_VERSION() == ic->ic_vmstat)) {
  1157. me = ic->ic_value.method;
  1158. }
  1159. else {
  1160. me = rb_method_entry(klass, id);
  1161. ic->ic_class = klass;
  1162. ic->ic_value.method = me;
  1163. ic->ic_vmstat = GET_VM_STATE_VERSION();
  1164. }
  1165. #else
  1166. me = rb_method_entry(klass, id);
  1167. #endif
  1168. return me;
  1169. }
  1170. static inline VALUE
  1171. vm_search_normal_superclass(VALUE klass, VALUE recv)
  1172. {
  1173. if (BUILTIN_TYPE(klass) == T_CLASS) {
  1174. return RCLASS_SUPER(klass);
  1175. }
  1176. else if (BUILTIN_TYPE(klass) == T_MODULE) {
  1177. VALUE k = CLASS_OF(recv);
  1178. while (k) {
  1179. if (BUILTIN_TYPE(k) == T_ICLASS && RBASIC(k)->klass == klass) {
  1180. return RCLASS_SUPER(k);
  1181. }
  1182. k = RCLASS_SUPER(k);
  1183. }
  1184. return rb_cObject;
  1185. }
  1186. else {
  1187. rb_bug("vm_search_normal_superclass: should not be reach here");
  1188. }
  1189. }
  1190. static void
  1191. vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *ip,
  1192. VALUE recv, VALUE sigval,
  1193. ID *idp, VALUE *klassp)
  1194. {
  1195. ID id;
  1196. VALUE klass;
  1197. while (ip && !ip->klass) {
  1198. ip = ip->parent_iseq;
  1199. }
  1200. if (ip == 0) {
  1201. rb_raise(rb_eNoMethodError, "super called outside of method");
  1202. }
  1203. id = ip->defined_method_id;
  1204. if (ip != ip->local_iseq) {
  1205. /* defined by Module#define_method() */
  1206. rb_control_frame_t *lcfp = GET_CFP();
  1207. if (!sigval) {
  1208. /* zsuper */
  1209. rb_raise(rb_eRuntimeError, "implicit argument passing of super from method defined by define_method() is not supported. Specify all arguments explicitly.");
  1210. }
  1211. while (lcfp->iseq != ip) {
  1212. VALUE *tdfp = GET_PREV_DFP(lcfp->dfp);
  1213. while (1) {
  1214. lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
  1215. if (lcfp->dfp == tdfp) {
  1216. break;
  1217. }
  1218. }
  1219. }
  1220. /* temporary measure for [Bug #2420] [Bug #3136] */
  1221. if (!lcfp->me) {
  1222. rb_raise(rb_eNoMethodError, "super called outside of method");
  1223. }
  1224. id = lcfp->me->def->original_id;
  1225. klass = vm_search_normal_superclass(lcfp->me->klass, recv);
  1226. }
  1227. else {
  1228. klass = vm_search_normal_superclass(ip->klass, recv);
  1229. }
  1230. *idp = id;
  1231. *klassp = klass;
  1232. }
  1233. static VALUE
  1234. vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp,
  1235. rb_num_t throw_state, VALUE throwobj)
  1236. {
  1237. int state = (int)(throw_state & 0xff);
  1238. int flag = (int)(throw_state & 0x8000);
  1239. rb_num_t level = throw_state >> 16;
  1240. if (state != 0) {
  1241. VALUE *pt = 0;
  1242. if (flag != 0) {
  1243. pt = (void *) 1;
  1244. }
  1245. else {
  1246. if (state == TAG_BREAK) {
  1247. rb_control_frame_t *cfp = GET_CFP();
  1248. VALUE *dfp = GET_DFP();
  1249. int is_orphan = 1;
  1250. rb_iseq_t *base_iseq = GET_ISEQ();
  1251. search_parent:
  1252. if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
  1253. if (cfp->iseq->type == ISEQ_TYPE_CLASS) {
  1254. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1255. dfp = cfp->dfp;
  1256. goto search_parent;
  1257. }
  1258. dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
  1259. base_iseq = base_iseq->parent_iseq;
  1260. while ((VALUE *) cfp < th->stack + th->stack_size) {
  1261. if (cfp->dfp == dfp) {
  1262. goto search_parent;
  1263. }
  1264. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1265. }
  1266. rb_bug("VM (throw): can't find break base.");
  1267. }
  1268. if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
  1269. /* lambda{... break ...} */
  1270. is_orphan = 0;
  1271. pt = cfp->dfp;
  1272. state = TAG_RETURN;
  1273. }
  1274. else {
  1275. dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
  1276. while ((VALUE *)cfp < th->stack + th->stack_size) {
  1277. if (cfp->dfp == dfp) {
  1278. VALUE epc = epc = cfp->pc - cfp->iseq->iseq_encoded;
  1279. rb_iseq_t *iseq = cfp->iseq;
  1280. int i;
  1281. for (i=0; i<iseq->catch_table_size; i++) {
  1282. struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
  1283. if (entry->type == CATCH_TYPE_BREAK &&
  1284. entry->start < epc && entry->end >= epc) {
  1285. if (entry->cont == epc) {
  1286. goto found;
  1287. }
  1288. else {
  1289. break;
  1290. }
  1291. }
  1292. }
  1293. break;
  1294. found:
  1295. pt = dfp;
  1296. is_orphan = 0;
  1297. break;
  1298. }
  1299. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1300. }
  1301. }
  1302. if (is_orphan) {
  1303. rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
  1304. }
  1305. }
  1306. else if (state == TAG_RETRY) {
  1307. rb_num_t i;
  1308. pt = GC_GUARDED_PTR_REF((VALUE *) * GET_DFP());
  1309. for (i = 0; i < level; i++) {
  1310. pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
  1311. }
  1312. }
  1313. else if (state == TAG_RETURN) {
  1314. rb_control_frame_t *cfp = GET_CFP();
  1315. VALUE *dfp = GET_DFP();
  1316. VALUE *lfp = GET_LFP();
  1317. /* check orphan and get dfp */
  1318. while ((VALUE *) cfp < th->stack + th->stack_size) {
  1319. if (!lfp) {
  1320. lfp = cfp->lfp;
  1321. }
  1322. if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_CLASS) {
  1323. lfp = 0;
  1324. }
  1325. if (cfp->lfp == lfp) {
  1326. if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
  1327. VALUE *tdfp = dfp;
  1328. while (lfp != tdfp) {
  1329. if (cfp->dfp == tdfp) {
  1330. /* in lambda */
  1331. dfp = cfp->dfp;
  1332. goto valid_return;
  1333. }
  1334. tdfp = GC_GUARDED_PTR_REF((VALUE *)*tdfp);
  1335. }
  1336. }
  1337. }
  1338. if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_METHOD) {
  1339. dfp = lfp;
  1340. goto valid_return;
  1341. }
  1342. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1343. }
  1344. rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
  1345. valid_return:
  1346. pt = dfp;
  1347. }
  1348. else {
  1349. rb_bug("isns(throw): unsupport throw type");
  1350. }
  1351. }
  1352. th->state = state;
  1353. return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
  1354. }
  1355. else {
  1356. /* continue throw */
  1357. VALUE err = throwobj;
  1358. if (FIXNUM_P(err)) {
  1359. th->state = FIX2INT(err);
  1360. }
  1361. else if (SYMBOL_P(err)) {
  1362. th->state = TAG_THROW;
  1363. }
  1364. else if (BUILTIN_TYPE(err) == T_NODE) {
  1365. th->state = GET_THROWOBJ_STATE(err);
  1366. }
  1367. else {
  1368. th->state = TAG_RAISE;
  1369. /*th->state = FIX2INT(rb_ivar_get(err, idThrowState));*/
  1370. }
  1371. return err;
  1372. }
  1373. }
  1374. static inline void
  1375. vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
  1376. {
  1377. int is_splat = flag & 0x01;
  1378. rb_num_t space_size = num + is_splat;
  1379. VALUE *base = cfp->sp, *ptr;
  1380. volatile VALUE tmp_ary;
  1381. rb_num_t len;
  1382. if (TYPE(ary) != T_ARRAY) {
  1383. ary = rb_ary_to_ary(ary);
  1384. }
  1385. cfp->sp += space_size;
  1386. tmp_ary = ary;
  1387. ptr = RARRAY_PTR(ary);
  1388. len = (rb_num_t)RARRAY_LEN(ary);
  1389. if (flag & 0x02) {
  1390. /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
  1391. rb_num_t i = 0, j;
  1392. if (len < num) {
  1393. for (i=0; i<num-len; i++) {
  1394. *base++ = Qnil;
  1395. }
  1396. }
  1397. for (j=0; i<num; i++, j++) {
  1398. VALUE v = ptr[len - j - 1];
  1399. *base++ = v;
  1400. }
  1401. if (is_splat) {
  1402. *base = rb_ary_new4(len - j, ptr);
  1403. }
  1404. }
  1405. else {
  1406. /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
  1407. rb_num_t i;
  1408. VALUE *bptr = &base[space_size - 1];
  1409. for (i=0; i<num; i++) {
  1410. if (len <= i) {
  1411. for (; i<num; i++) {
  1412. *bptr-- = Qnil;
  1413. }
  1414. break;
  1415. }
  1416. *bptr-- = ptr[i];
  1417. }
  1418. if (is_splat) {
  1419. if (num > len) {
  1420. *bptr = rb_ary_new();
  1421. }
  1422. else {
  1423. *bptr = rb_ary_new4(len - num, ptr + num);
  1424. }
  1425. }
  1426. }
  1427. }
  1428. static inline int
  1429. check_cfunc(const rb_method_entry_t *me, VALUE (*func)())
  1430. {
  1431. if (me && me->def->type == VM_METHOD_TYPE_CFUNC &&
  1432. me->def->body.cfunc.func == func) {
  1433. return 1;
  1434. }
  1435. else {
  1436. return 0;
  1437. }
  1438. }
  1439. static
  1440. #ifndef NO_BIG_INLINE
  1441. inline
  1442. #endif
  1443. VALUE
  1444. opt_eq_func(VALUE recv, VALUE obj, IC ic)
  1445. {
  1446. if (FIXNUM_2_P(recv, obj) &&
  1447. BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
  1448. return (recv == obj) ? Qtrue : Qfalse;
  1449. }
  1450. else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
  1451. if (HEAP_CLASS_OF(recv) == rb_cFloat &&
  1452. HEAP_CLASS_OF(obj) == rb_cFloat &&
  1453. BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
  1454. double a = RFLOAT_VALUE(recv);
  1455. double b = RFLOAT_VALUE(obj);
  1456. if (isnan(a) || isnan(b)) {
  1457. return Qfalse;
  1458. }
  1459. return (a == b) ? Qtrue : Qfalse;
  1460. }
  1461. else if (HEAP_CLASS_OF(recv) == rb_cString &&
  1462. HEAP_CLASS_OF(obj) == rb_cString &&
  1463. BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
  1464. return rb_str_equal(recv, obj);
  1465. }
  1466. }
  1467. {
  1468. const rb_method_entry_t *me = vm_method_search(idEq, CLASS_OF(recv), ic);
  1469. extern VALUE rb_obj_equal(VALUE obj1, VALUE obj2);
  1470. if (check_cfunc(me, rb_obj_equal)) {
  1471. return recv == obj ? Qtrue : Qfalse;
  1472. }
  1473. }
  1474. return Qundef;
  1475. }
  1476. struct opt_case_dispatch_i_arg {
  1477. VALUE obj;
  1478. int label;
  1479. };
  1480. static int
  1481. opt_case_dispatch_i(st_data_t key, st_data_t data, st_data_t p)
  1482. {
  1483. struct opt_case_dispatch_i_arg *arg = (void *)p;
  1484. if (RTEST(rb_funcall((VALUE)key, idEqq, 1, arg->obj))) {
  1485. arg->label = FIX2INT((VALUE)data);
  1486. return ST_STOP;
  1487. }
  1488. else {
  1489. return ST_CONTINUE;
  1490. }
  1491. }