PageRenderTime 111ms CodeModel.GetById 34ms RepoModel.GetById 1ms app.codeStats 1ms

/vm_insnhelper.c

https://github.com/wanabe/ruby
C | 5688 lines | 4748 code | 730 blank | 210 comment | 1082 complexity | 1fdc5951c2a5719152838f55edfc1cfa MD5 | raw file
Possible License(s): LGPL-2.1, AGPL-3.0, 0BSD, Unlicense, GPL-2.0, BSD-3-Clause
  1. /**********************************************************************
  2. vm_insnhelper.c - instruction helper functions.
  3. $Author$
  4. Copyright (C) 2007 Koichi Sasada
  5. **********************************************************************/
  6. #include "ruby/internal/config.h"
  7. #include <math.h>
  8. #include "constant.h"
  9. #include "debug_counter.h"
  10. #include "internal.h"
  11. #include "internal/class.h"
  12. #include "internal/compar.h"
  13. #include "internal/hash.h"
  14. #include "internal/numeric.h"
  15. #include "internal/proc.h"
  16. #include "internal/random.h"
  17. #include "internal/variable.h"
  18. #include "variable.h"
  19. /* finish iseq array */
  20. #include "insns.inc"
  21. #ifndef MJIT_HEADER
  22. #include "insns_info.inc"
  23. #endif
  24. extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
  25. extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
  26. extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
  27. extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
  28. int argc, const VALUE *argv, int priv);
  29. #ifndef MJIT_HEADER
  30. static const struct rb_callcache vm_empty_cc;
  31. #endif
  32. /* control stack frame */
  33. static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
  34. MJIT_STATIC VALUE
  35. ruby_vm_special_exception_copy(VALUE exc)
  36. {
  37. VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
  38. rb_obj_copy_ivar(e, exc);
  39. return e;
  40. }
  41. NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
  42. static void
  43. ec_stack_overflow(rb_execution_context_t *ec, int setup)
  44. {
  45. VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
  46. ec->raised_flag = RAISED_STACKOVERFLOW;
  47. if (setup) {
  48. VALUE at = rb_ec_backtrace_object(ec);
  49. mesg = ruby_vm_special_exception_copy(mesg);
  50. rb_ivar_set(mesg, idBt, at);
  51. rb_ivar_set(mesg, idBt_locations, at);
  52. }
  53. ec->errinfo = mesg;
  54. EC_JUMP_TAG(ec, TAG_RAISE);
  55. }
  56. NORETURN(static void vm_stackoverflow(void));
  57. #ifdef MJIT_HEADER
  58. NOINLINE(static COLDFUNC void vm_stackoverflow(void));
  59. #endif
  60. static void
  61. vm_stackoverflow(void)
  62. {
  63. ec_stack_overflow(GET_EC(), TRUE);
  64. }
  65. NORETURN(MJIT_STATIC void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
  66. MJIT_STATIC void
  67. rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
  68. {
  69. if (rb_during_gc()) {
  70. rb_bug("system stack overflow during GC. Faulty native extension?");
  71. }
  72. if (crit) {
  73. ec->raised_flag = RAISED_STACKOVERFLOW;
  74. ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
  75. EC_JUMP_TAG(ec, TAG_RAISE);
  76. }
  77. #ifdef USE_SIGALTSTACK
  78. ec_stack_overflow(ec, TRUE);
  79. #else
  80. ec_stack_overflow(ec, FALSE);
  81. #endif
  82. }
  83. #if VM_CHECK_MODE > 0
  84. static int
  85. callable_class_p(VALUE klass)
  86. {
  87. #if VM_CHECK_MODE >= 2
  88. if (!klass) return FALSE;
  89. switch (RB_BUILTIN_TYPE(klass)) {
  90. default:
  91. break;
  92. case T_ICLASS:
  93. if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
  94. case T_MODULE:
  95. return TRUE;
  96. }
  97. while (klass) {
  98. if (klass == rb_cBasicObject) {
  99. return TRUE;
  100. }
  101. klass = RCLASS_SUPER(klass);
  102. }
  103. return FALSE;
  104. #else
  105. return klass != 0;
  106. #endif
  107. }
  108. static int
  109. callable_method_entry_p(const rb_callable_method_entry_t *cme)
  110. {
  111. if (cme == NULL) {
  112. return TRUE;
  113. }
  114. else {
  115. VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment));
  116. if (callable_class_p(cme->defined_class)) {
  117. return TRUE;
  118. }
  119. else {
  120. return FALSE;
  121. }
  122. }
  123. }
  124. static void
  125. vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
  126. {
  127. unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
  128. enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
  129. if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
  130. cref_or_me_type = imemo_type(cref_or_me);
  131. }
  132. if (type & VM_FRAME_FLAG_BMETHOD) {
  133. req_me = TRUE;
  134. }
  135. if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
  136. rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
  137. }
  138. if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
  139. rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
  140. }
  141. if (req_me) {
  142. if (cref_or_me_type != imemo_ment) {
  143. rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
  144. }
  145. }
  146. else {
  147. if (req_cref && cref_or_me_type != imemo_cref) {
  148. rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
  149. }
  150. else { /* cref or Qfalse */
  151. if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
  152. if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
  153. /* ignore */
  154. }
  155. else {
  156. rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
  157. }
  158. }
  159. }
  160. }
  161. if (cref_or_me_type == imemo_ment) {
  162. const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
  163. if (!callable_method_entry_p(me)) {
  164. rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
  165. }
  166. }
  167. if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
  168. VM_ASSERT(iseq == NULL ||
  169. RUBY_VM_NORMAL_ISEQ_P(iseq) /* argument error. it should be fixed */);
  170. }
  171. else {
  172. VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
  173. }
  174. }
  175. static void
  176. vm_check_frame(VALUE type,
  177. VALUE specval,
  178. VALUE cref_or_me,
  179. const rb_iseq_t *iseq)
  180. {
  181. VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
  182. VM_ASSERT(FIXNUM_P(type));
  183. #define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
  184. case magic: \
  185. vm_check_frame_detail(type, req_block, req_me, req_cref, \
  186. specval, cref_or_me, is_cframe, iseq); \
  187. break
  188. switch (given_magic) {
  189. /* BLK ME CREF CFRAME */
  190. CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
  191. CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
  192. CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
  193. CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
  194. CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
  195. CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
  196. CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
  197. CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
  198. CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
  199. default:
  200. rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
  201. }
  202. #undef CHECK
  203. }
  204. static VALUE vm_stack_canary; /* Initialized later */
  205. static bool vm_stack_canary_was_born = false;
  206. #ifndef MJIT_HEADER
  207. MJIT_FUNC_EXPORTED void
  208. rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
  209. {
  210. const struct rb_control_frame_struct *reg_cfp = ec->cfp;
  211. const struct rb_iseq_struct *iseq;
  212. if (! LIKELY(vm_stack_canary_was_born)) {
  213. return; /* :FIXME: isn't it rather fatal to enter this branch? */
  214. }
  215. else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
  216. /* This is at the very beginning of a thread. cfp does not exist. */
  217. return;
  218. }
  219. else if (! (iseq = GET_ISEQ())) {
  220. return;
  221. }
  222. else if (LIKELY(sp[0] != vm_stack_canary)) {
  223. return;
  224. }
  225. else {
  226. /* we are going to call methods below; squash the canary to
  227. * prevent infinite loop. */
  228. sp[0] = Qundef;
  229. }
  230. const VALUE *orig = rb_iseq_original_iseq(iseq);
  231. const VALUE *encoded = iseq->body->iseq_encoded;
  232. const ptrdiff_t pos = GET_PC() - encoded;
  233. const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
  234. const char *name = insn_name(insn);
  235. const VALUE iseqw = rb_iseqw_new(iseq);
  236. const VALUE inspection = rb_inspect(iseqw);
  237. const char *stri = rb_str_to_cstr(inspection);
  238. const VALUE disasm = rb_iseq_disasm(iseq);
  239. const char *strd = rb_str_to_cstr(disasm);
  240. /* rb_bug() is not capable of outputting this large contents. It
  241. is designed to run form a SIGSEGV handler, which tends to be
  242. very restricted. */
  243. ruby_debug_printf(
  244. "We are killing the stack canary set by %s, "
  245. "at %s@pc=%"PRIdPTR"\n"
  246. "watch out the C stack trace.\n"
  247. "%s",
  248. name, stri, pos, strd);
  249. rb_bug("see above.");
  250. }
  251. #endif
  252. #define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
  253. #else
  254. #define vm_check_canary(ec, sp)
  255. #define vm_check_frame(a, b, c, d)
  256. #endif /* VM_CHECK_MODE > 0 */
  257. #if USE_DEBUG_COUNTER
  258. static void
  259. vm_push_frame_debug_counter_inc(
  260. const struct rb_execution_context_struct *ec,
  261. const struct rb_control_frame_struct *reg_cfp,
  262. VALUE type)
  263. {
  264. const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
  265. RB_DEBUG_COUNTER_INC(frame_push);
  266. if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
  267. const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
  268. const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
  269. if (prev) {
  270. if (curr) {
  271. RB_DEBUG_COUNTER_INC(frame_R2R);
  272. }
  273. else {
  274. RB_DEBUG_COUNTER_INC(frame_R2C);
  275. }
  276. }
  277. else {
  278. if (curr) {
  279. RB_DEBUG_COUNTER_INC(frame_C2R);
  280. }
  281. else {
  282. RB_DEBUG_COUNTER_INC(frame_C2C);
  283. }
  284. }
  285. }
  286. switch (type & VM_FRAME_MAGIC_MASK) {
  287. case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
  288. case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
  289. case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
  290. case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
  291. case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
  292. case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
  293. case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
  294. case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
  295. case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
  296. }
  297. rb_bug("unreachable");
  298. }
  299. #else
  300. #define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
  301. #endif
  302. STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
  303. STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
  304. STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
  305. static void
  306. vm_push_frame(rb_execution_context_t *ec,
  307. const rb_iseq_t *iseq,
  308. VALUE type,
  309. VALUE self,
  310. VALUE specval,
  311. VALUE cref_or_me,
  312. const VALUE *pc,
  313. VALUE *sp,
  314. int local_size,
  315. int stack_max)
  316. {
  317. rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
  318. vm_check_frame(type, specval, cref_or_me, iseq);
  319. VM_ASSERT(local_size >= 0);
  320. /* check stack overflow */
  321. CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
  322. vm_check_canary(ec, sp);
  323. /* setup vm value stack */
  324. /* initialize local variables */
  325. for (int i=0; i < local_size; i++) {
  326. *sp++ = Qnil;
  327. }
  328. /* setup ep with managing data */
  329. *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
  330. *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
  331. *sp++ = type; /* ep[-0] / ENV_FLAGS */
  332. /* setup new frame */
  333. *cfp = (const struct rb_control_frame_struct) {
  334. .pc = pc,
  335. .sp = sp,
  336. .iseq = iseq,
  337. .self = self,
  338. .ep = sp - 1,
  339. .block_code = NULL,
  340. .__bp__ = sp, /* Store initial value of ep as bp to skip calculation cost of bp on JIT cancellation. */
  341. #if VM_DEBUG_BP_CHECK
  342. .bp_check = sp,
  343. #endif
  344. };
  345. ec->cfp = cfp;
  346. if (VMDEBUG == 2) {
  347. SDR();
  348. }
  349. vm_push_frame_debug_counter_inc(ec, cfp, type);
  350. }
  351. /* return TRUE if the frame is finished */
  352. static inline int
  353. vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
  354. {
  355. VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
  356. if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
  357. if (VMDEBUG == 2) SDR();
  358. RUBY_VM_CHECK_INTS(ec);
  359. ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  360. return flags & VM_FRAME_FLAG_FINISH;
  361. }
  362. MJIT_STATIC void
  363. rb_vm_pop_frame(rb_execution_context_t *ec)
  364. {
  365. vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
  366. }
  367. /* method dispatch */
  368. static inline VALUE
  369. rb_arity_error_new(int argc, int min, int max)
  370. {
  371. VALUE err_mess = 0;
  372. if (min == max) {
  373. err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d)", argc, min);
  374. }
  375. else if (max == UNLIMITED_ARGUMENTS) {
  376. err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d+)", argc, min);
  377. }
  378. else {
  379. err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d..%d)", argc, min, max);
  380. }
  381. return rb_exc_new3(rb_eArgError, err_mess);
  382. }
  383. MJIT_STATIC void
  384. rb_error_arity(int argc, int min, int max)
  385. {
  386. rb_exc_raise(rb_arity_error_new(argc, min, max));
  387. }
  388. /* lvar */
  389. NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
  390. static void
  391. vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
  392. {
  393. /* remember env value forcely */
  394. rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
  395. VM_FORCE_WRITE(&ep[index], v);
  396. VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
  397. RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
  398. }
  399. static inline void
  400. vm_env_write(const VALUE *ep, int index, VALUE v)
  401. {
  402. VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
  403. if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
  404. VM_STACK_ENV_WRITE(ep, index, v);
  405. }
  406. else {
  407. vm_env_write_slowpath(ep, index, v);
  408. }
  409. }
  410. MJIT_STATIC VALUE
  411. rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
  412. {
  413. if (block_handler == VM_BLOCK_HANDLER_NONE) {
  414. return Qnil;
  415. }
  416. else {
  417. switch (vm_block_handler_type(block_handler)) {
  418. case block_handler_type_iseq:
  419. case block_handler_type_ifunc:
  420. return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
  421. case block_handler_type_symbol:
  422. return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
  423. case block_handler_type_proc:
  424. return VM_BH_TO_PROC(block_handler);
  425. default:
  426. VM_UNREACHABLE(rb_vm_bh_to_procval);
  427. }
  428. }
  429. }
  430. /* svar */
  431. #if VM_CHECK_MODE > 0
  432. static int
  433. vm_svar_valid_p(VALUE svar)
  434. {
  435. if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
  436. switch (imemo_type(svar)) {
  437. case imemo_svar:
  438. case imemo_cref:
  439. case imemo_ment:
  440. return TRUE;
  441. default:
  442. break;
  443. }
  444. }
  445. rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
  446. return FALSE;
  447. }
  448. #endif
  449. static inline struct vm_svar *
  450. lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
  451. {
  452. VALUE svar;
  453. if (lep && (ec == NULL || ec->root_lep != lep)) {
  454. svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
  455. }
  456. else {
  457. svar = ec->root_svar;
  458. }
  459. VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
  460. return (struct vm_svar *)svar;
  461. }
  462. static inline void
  463. lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
  464. {
  465. VM_ASSERT(vm_svar_valid_p((VALUE)svar));
  466. if (lep && (ec == NULL || ec->root_lep != lep)) {
  467. vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
  468. }
  469. else {
  470. RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
  471. }
  472. }
  473. static VALUE
  474. lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
  475. {
  476. const struct vm_svar *svar = lep_svar(ec, lep);
  477. if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
  478. switch (key) {
  479. case VM_SVAR_LASTLINE:
  480. return svar->lastline;
  481. case VM_SVAR_BACKREF:
  482. return svar->backref;
  483. default: {
  484. const VALUE ary = svar->others;
  485. if (NIL_P(ary)) {
  486. return Qnil;
  487. }
  488. else {
  489. return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
  490. }
  491. }
  492. }
  493. }
  494. static struct vm_svar *
  495. svar_new(VALUE obj)
  496. {
  497. return (struct vm_svar *)rb_imemo_new(imemo_svar, Qnil, Qnil, Qnil, obj);
  498. }
  499. static void
  500. lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
  501. {
  502. struct vm_svar *svar = lep_svar(ec, lep);
  503. if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
  504. lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
  505. }
  506. switch (key) {
  507. case VM_SVAR_LASTLINE:
  508. RB_OBJ_WRITE(svar, &svar->lastline, val);
  509. return;
  510. case VM_SVAR_BACKREF:
  511. RB_OBJ_WRITE(svar, &svar->backref, val);
  512. return;
  513. default: {
  514. VALUE ary = svar->others;
  515. if (NIL_P(ary)) {
  516. RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
  517. }
  518. rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
  519. }
  520. }
  521. }
  522. static inline VALUE
  523. vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
  524. {
  525. VALUE val;
  526. if (type == 0) {
  527. val = lep_svar_get(ec, lep, key);
  528. }
  529. else {
  530. VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
  531. if (type & 0x01) {
  532. switch (type >> 1) {
  533. case '&':
  534. val = rb_reg_last_match(backref);
  535. break;
  536. case '`':
  537. val = rb_reg_match_pre(backref);
  538. break;
  539. case '\'':
  540. val = rb_reg_match_post(backref);
  541. break;
  542. case '+':
  543. val = rb_reg_match_last(backref);
  544. break;
  545. default:
  546. rb_bug("unexpected back-ref");
  547. }
  548. }
  549. else {
  550. val = rb_reg_nth_match((int)(type >> 1), backref);
  551. }
  552. }
  553. return val;
  554. }
  555. PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
  556. static rb_callable_method_entry_t *
  557. check_method_entry(VALUE obj, int can_be_svar)
  558. {
  559. if (obj == Qfalse) return NULL;
  560. #if VM_CHECK_MODE > 0
  561. if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
  562. #endif
  563. switch (imemo_type(obj)) {
  564. case imemo_ment:
  565. return (rb_callable_method_entry_t *)obj;
  566. case imemo_cref:
  567. return NULL;
  568. case imemo_svar:
  569. if (can_be_svar) {
  570. return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
  571. }
  572. default:
  573. #if VM_CHECK_MODE > 0
  574. rb_bug("check_method_entry: svar should not be there:");
  575. #endif
  576. return NULL;
  577. }
  578. }
  579. MJIT_STATIC const rb_callable_method_entry_t *
  580. rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
  581. {
  582. const VALUE *ep = cfp->ep;
  583. rb_callable_method_entry_t *me;
  584. while (!VM_ENV_LOCAL_P(ep)) {
  585. if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
  586. ep = VM_ENV_PREV_EP(ep);
  587. }
  588. return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
  589. }
  590. static rb_iseq_t *
  591. method_entry_iseqptr(const rb_callable_method_entry_t *me)
  592. {
  593. switch (me->def->type) {
  594. case VM_METHOD_TYPE_ISEQ:
  595. return me->def->body.iseq.iseqptr;
  596. default:
  597. return NULL;
  598. }
  599. }
  600. static rb_cref_t *
  601. method_entry_cref(const rb_callable_method_entry_t *me)
  602. {
  603. switch (me->def->type) {
  604. case VM_METHOD_TYPE_ISEQ:
  605. return me->def->body.iseq.cref;
  606. default:
  607. return NULL;
  608. }
  609. }
  610. #if VM_CHECK_MODE == 0
  611. PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
  612. #endif
  613. static rb_cref_t *
  614. check_cref(VALUE obj, int can_be_svar)
  615. {
  616. if (obj == Qfalse) return NULL;
  617. #if VM_CHECK_MODE > 0
  618. if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
  619. #endif
  620. switch (imemo_type(obj)) {
  621. case imemo_ment:
  622. return method_entry_cref((rb_callable_method_entry_t *)obj);
  623. case imemo_cref:
  624. return (rb_cref_t *)obj;
  625. case imemo_svar:
  626. if (can_be_svar) {
  627. return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
  628. }
  629. default:
  630. #if VM_CHECK_MODE > 0
  631. rb_bug("check_method_entry: svar should not be there:");
  632. #endif
  633. return NULL;
  634. }
  635. }
  636. static inline rb_cref_t *
  637. vm_env_cref(const VALUE *ep)
  638. {
  639. rb_cref_t *cref;
  640. while (!VM_ENV_LOCAL_P(ep)) {
  641. if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
  642. ep = VM_ENV_PREV_EP(ep);
  643. }
  644. return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
  645. }
  646. static int
  647. is_cref(const VALUE v, int can_be_svar)
  648. {
  649. if (RB_TYPE_P(v, T_IMEMO)) {
  650. switch (imemo_type(v)) {
  651. case imemo_cref:
  652. return TRUE;
  653. case imemo_svar:
  654. if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
  655. default:
  656. break;
  657. }
  658. }
  659. return FALSE;
  660. }
  661. static int
  662. vm_env_cref_by_cref(const VALUE *ep)
  663. {
  664. while (!VM_ENV_LOCAL_P(ep)) {
  665. if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
  666. ep = VM_ENV_PREV_EP(ep);
  667. }
  668. return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
  669. }
  670. static rb_cref_t *
  671. cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
  672. {
  673. const VALUE v = *vptr;
  674. rb_cref_t *cref, *new_cref;
  675. if (RB_TYPE_P(v, T_IMEMO)) {
  676. switch (imemo_type(v)) {
  677. case imemo_cref:
  678. cref = (rb_cref_t *)v;
  679. new_cref = vm_cref_dup(cref);
  680. if (parent) {
  681. RB_OBJ_WRITE(parent, vptr, new_cref);
  682. }
  683. else {
  684. VM_FORCE_WRITE(vptr, (VALUE)new_cref);
  685. }
  686. return (rb_cref_t *)new_cref;
  687. case imemo_svar:
  688. if (can_be_svar) {
  689. return cref_replace_with_duplicated_cref_each_frame((const VALUE *)&((struct vm_svar *)v)->cref_or_me, FALSE, v);
  690. }
  691. /* fall through */
  692. case imemo_ment:
  693. rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
  694. default:
  695. break;
  696. }
  697. }
  698. return FALSE;
  699. }
  700. static rb_cref_t *
  701. vm_cref_replace_with_duplicated_cref(const VALUE *ep)
  702. {
  703. if (vm_env_cref_by_cref(ep)) {
  704. rb_cref_t *cref;
  705. VALUE envval;
  706. while (!VM_ENV_LOCAL_P(ep)) {
  707. envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
  708. if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
  709. return cref;
  710. }
  711. ep = VM_ENV_PREV_EP(ep);
  712. }
  713. envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
  714. return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
  715. }
  716. else {
  717. rb_bug("vm_cref_dup: unreachable");
  718. }
  719. }
  720. static rb_cref_t *
  721. vm_get_cref(const VALUE *ep)
  722. {
  723. rb_cref_t *cref = vm_env_cref(ep);
  724. if (cref != NULL) {
  725. return cref;
  726. }
  727. else {
  728. rb_bug("vm_get_cref: unreachable");
  729. }
  730. }
  731. static rb_cref_t *
  732. vm_ec_cref(const rb_execution_context_t *ec)
  733. {
  734. const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
  735. if (cfp == NULL) {
  736. return NULL;
  737. }
  738. return vm_get_cref(cfp->ep);
  739. }
  740. static const rb_cref_t *
  741. vm_get_const_key_cref(const VALUE *ep)
  742. {
  743. const rb_cref_t *cref = vm_get_cref(ep);
  744. const rb_cref_t *key_cref = cref;
  745. while (cref) {
  746. if (FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
  747. FL_TEST(CREF_CLASS(cref), RCLASS_CLONED)) {
  748. return key_cref;
  749. }
  750. cref = CREF_NEXT(cref);
  751. }
  752. /* does not include singleton class */
  753. return NULL;
  754. }
  755. void
  756. rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
  757. {
  758. rb_cref_t *new_cref;
  759. while (cref) {
  760. if (CREF_CLASS(cref) == old_klass) {
  761. new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
  762. *new_cref_ptr = new_cref;
  763. return;
  764. }
  765. new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
  766. cref = CREF_NEXT(cref);
  767. *new_cref_ptr = new_cref;
  768. new_cref_ptr = (rb_cref_t **)&new_cref->next;
  769. }
  770. *new_cref_ptr = NULL;
  771. }
  772. static rb_cref_t *
  773. vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval)
  774. {
  775. rb_cref_t *prev_cref = NULL;
  776. if (ep) {
  777. prev_cref = vm_env_cref(ep);
  778. }
  779. else {
  780. rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
  781. if (cfp) {
  782. prev_cref = vm_env_cref(cfp->ep);
  783. }
  784. }
  785. return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval);
  786. }
  787. static inline VALUE
  788. vm_get_cbase(const VALUE *ep)
  789. {
  790. const rb_cref_t *cref = vm_get_cref(ep);
  791. VALUE klass = Qundef;
  792. while (cref) {
  793. if ((klass = CREF_CLASS(cref)) != 0) {
  794. break;
  795. }
  796. cref = CREF_NEXT(cref);
  797. }
  798. return klass;
  799. }
  800. static inline VALUE
  801. vm_get_const_base(const VALUE *ep)
  802. {
  803. const rb_cref_t *cref = vm_get_cref(ep);
  804. VALUE klass = Qundef;
  805. while (cref) {
  806. if (!CREF_PUSHED_BY_EVAL(cref) &&
  807. (klass = CREF_CLASS(cref)) != 0) {
  808. break;
  809. }
  810. cref = CREF_NEXT(cref);
  811. }
  812. return klass;
  813. }
  814. static inline void
  815. vm_check_if_namespace(VALUE klass)
  816. {
  817. if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
  818. rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
  819. }
  820. }
  821. static inline void
  822. vm_ensure_not_refinement_module(VALUE self)
  823. {
  824. if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
  825. rb_warn("not defined at the refinement, but at the outer class/module");
  826. }
  827. }
  828. static inline VALUE
  829. vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
  830. {
  831. return klass;
  832. }
  833. static inline VALUE
  834. vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
  835. {
  836. void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
  837. VALUE val;
  838. if (orig_klass == Qnil && allow_nil) {
  839. /* in current lexical scope */
  840. const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
  841. const rb_cref_t *cref;
  842. VALUE klass = Qnil;
  843. while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
  844. root_cref = CREF_NEXT(root_cref);
  845. }
  846. cref = root_cref;
  847. while (cref && CREF_NEXT(cref)) {
  848. if (CREF_PUSHED_BY_EVAL(cref)) {
  849. klass = Qnil;
  850. }
  851. else {
  852. klass = CREF_CLASS(cref);
  853. }
  854. cref = CREF_NEXT(cref);
  855. if (!NIL_P(klass)) {
  856. VALUE av, am = 0;
  857. rb_const_entry_t *ce;
  858. search_continue:
  859. if ((ce = rb_const_lookup(klass, id))) {
  860. rb_const_warn_if_deprecated(ce, klass, id);
  861. val = ce->value;
  862. if (val == Qundef) {
  863. if (am == klass) break;
  864. am = klass;
  865. if (is_defined) return 1;
  866. if (rb_autoloading_value(klass, id, &av, NULL)) return av;
  867. rb_autoload_load(klass, id);
  868. goto search_continue;
  869. }
  870. else {
  871. if (is_defined) {
  872. return 1;
  873. }
  874. else {
  875. if (UNLIKELY(!rb_ractor_main_p())) {
  876. if (!rb_ractor_shareable_p(val)) {
  877. rb_raise(rb_eRactorIsolationError,
  878. "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
  879. }
  880. }
  881. return val;
  882. }
  883. }
  884. }
  885. }
  886. }
  887. /* search self */
  888. if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
  889. klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
  890. }
  891. else {
  892. klass = CLASS_OF(ec->cfp->self);
  893. }
  894. if (is_defined) {
  895. return rb_const_defined(klass, id);
  896. }
  897. else {
  898. return rb_const_get(klass, id);
  899. }
  900. }
  901. else {
  902. vm_check_if_namespace(orig_klass);
  903. if (is_defined) {
  904. return rb_public_const_defined_from(orig_klass, id);
  905. }
  906. else {
  907. return rb_public_const_get_from(orig_klass, id);
  908. }
  909. }
  910. }
  911. static inline VALUE
  912. vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
  913. {
  914. VALUE klass;
  915. if (!cref) {
  916. rb_bug("vm_get_cvar_base: no cref");
  917. }
  918. while (CREF_NEXT(cref) &&
  919. (NIL_P(CREF_CLASS(cref)) || FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
  920. CREF_PUSHED_BY_EVAL(cref))) {
  921. cref = CREF_NEXT(cref);
  922. }
  923. if (top_level_raise && !CREF_NEXT(cref)) {
  924. rb_raise(rb_eRuntimeError, "class variable access from toplevel");
  925. }
  926. klass = vm_get_iclass(cfp, CREF_CLASS(cref));
  927. if (NIL_P(klass)) {
  928. rb_raise(rb_eTypeError, "no class variables available");
  929. }
  930. return klass;
  931. }
  932. static VALUE
  933. vm_search_const_defined_class(const VALUE cbase, ID id)
  934. {
  935. if (rb_const_defined_at(cbase, id)) return cbase;
  936. if (cbase == rb_cObject) {
  937. VALUE tmp = RCLASS_SUPER(cbase);
  938. while (tmp) {
  939. if (rb_const_defined_at(tmp, id)) return tmp;
  940. tmp = RCLASS_SUPER(tmp);
  941. }
  942. }
  943. return 0;
  944. }
  945. static bool
  946. iv_index_tbl_lookup(struct st_table *iv_index_tbl, ID id, struct rb_iv_index_tbl_entry **ent)
  947. {
  948. int found;
  949. if (iv_index_tbl == NULL) return false;
  950. RB_VM_LOCK_ENTER();
  951. {
  952. found = st_lookup(iv_index_tbl, (st_data_t)id, (st_data_t *)ent);
  953. }
  954. RB_VM_LOCK_LEAVE();
  955. return found ? true : false;
  956. }
  957. ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent));
  958. static inline void
  959. fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent)
  960. {
  961. // fill cache
  962. if (!is_attr) {
  963. ic->entry = ent;
  964. RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
  965. }
  966. else {
  967. vm_cc_attr_index_set(cc, (int)ent->index + 1);
  968. }
  969. }
  970. ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int));
  971. static inline VALUE
  972. vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
  973. {
  974. #if OPT_IC_FOR_IVAR
  975. VALUE val = Qundef;
  976. if (SPECIAL_CONST_P(obj)) {
  977. // frozen?
  978. }
  979. else if (LIKELY(is_attr ?
  980. RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index(cc) > 0) :
  981. RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial,
  982. ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
  983. uint32_t index = !is_attr ? ic->entry->index : (vm_cc_attr_index(cc) - 1);
  984. RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
  985. if (LIKELY(BUILTIN_TYPE(obj) == T_OBJECT) &&
  986. LIKELY(index < ROBJECT_NUMIV(obj))) {
  987. val = ROBJECT_IVPTR(obj)[index];
  988. VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
  989. }
  990. else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
  991. val = rb_ivar_generic_lookup_with_index(obj, id, index);
  992. }
  993. goto ret;
  994. }
  995. else {
  996. struct rb_iv_index_tbl_entry *ent;
  997. if (BUILTIN_TYPE(obj) == T_OBJECT) {
  998. struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
  999. if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
  1000. fill_ivar_cache(iseq, ic, cc, is_attr, ent);
  1001. // get value
  1002. if (ent->index < ROBJECT_NUMIV(obj)) {
  1003. val = ROBJECT_IVPTR(obj)[ent->index];
  1004. VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
  1005. }
  1006. }
  1007. }
  1008. else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
  1009. struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
  1010. if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
  1011. fill_ivar_cache(iseq, ic, cc, is_attr, ent);
  1012. val = rb_ivar_generic_lookup_with_index(obj, id, ent->index);
  1013. }
  1014. }
  1015. else {
  1016. // T_CLASS / T_MODULE
  1017. goto general_path;
  1018. }
  1019. ret:
  1020. if (LIKELY(val != Qundef)) {
  1021. return val;
  1022. }
  1023. else {
  1024. return Qnil;
  1025. }
  1026. }
  1027. general_path:
  1028. #endif /* OPT_IC_FOR_IVAR */
  1029. RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
  1030. if (is_attr) {
  1031. return rb_attr_get(obj, id);
  1032. }
  1033. else {
  1034. return rb_ivar_get(obj, id);
  1035. }
  1036. }
  1037. ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
  1038. NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
  1039. NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
  1040. static VALUE
  1041. vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
  1042. {
  1043. rb_check_frozen_internal(obj);
  1044. #if OPT_IC_FOR_IVAR
  1045. if (RB_TYPE_P(obj, T_OBJECT)) {
  1046. struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
  1047. struct rb_iv_index_tbl_entry *ent;
  1048. if (iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
  1049. if (!is_attr) {
  1050. ic->entry = ent;
  1051. RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
  1052. }
  1053. else if (ent->index >= INT_MAX) {
  1054. rb_raise(rb_eArgError, "too many instance variables");
  1055. }
  1056. else {
  1057. vm_cc_attr_index_set(cc, (int)(ent->index + 1));
  1058. }
  1059. uint32_t index = ent->index;
  1060. if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
  1061. rb_init_iv_list(obj);
  1062. }
  1063. VALUE *ptr = ROBJECT_IVPTR(obj);
  1064. RB_OBJ_WRITE(obj, &ptr[index], val);
  1065. RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
  1066. return val;
  1067. }
  1068. }
  1069. #endif
  1070. RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
  1071. return rb_ivar_set(obj, id, val);
  1072. }
  1073. static VALUE
  1074. vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
  1075. {
  1076. return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
  1077. }
  1078. static VALUE
  1079. vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
  1080. {
  1081. return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
  1082. }
  1083. static inline VALUE
  1084. vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
  1085. {
  1086. #if OPT_IC_FOR_IVAR
  1087. if (LIKELY(RB_TYPE_P(obj, T_OBJECT)) &&
  1088. LIKELY(!RB_OBJ_FROZEN_RAW(obj))) {
  1089. VM_ASSERT(!rb_ractor_shareable_p(obj));
  1090. if (LIKELY(
  1091. (!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
  1092. ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index(cc) > 0)))) {
  1093. uint32_t index = !is_attr ? ic->entry->index : vm_cc_attr_index(cc)-1;
  1094. if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
  1095. rb_init_iv_list(obj);
  1096. }
  1097. VALUE *ptr = ROBJECT_IVPTR(obj);
  1098. RB_OBJ_WRITE(obj, &ptr[index], val);
  1099. RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
  1100. return val; /* inline cache hit */
  1101. }
  1102. }
  1103. else {
  1104. RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
  1105. }
  1106. #endif /* OPT_IC_FOR_IVAR */
  1107. if (is_attr) {
  1108. return vm_setivar_slowpath_attr(obj, id, val, cc);
  1109. }
  1110. else {
  1111. return vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
  1112. }
  1113. }
  1114. static VALUE
  1115. update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, ICVARC ic)
  1116. {
  1117. VALUE defined_class = 0;
  1118. VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
  1119. if (RB_TYPE_P(defined_class, T_ICLASS)) {
  1120. defined_class = RBASIC(defined_class)->klass;
  1121. }
  1122. struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
  1123. if (!rb_cvc_tbl) {
  1124. rb_bug("the cvc table should be set");
  1125. }
  1126. VALUE ent_data;
  1127. if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
  1128. rb_bug("should have cvar cache entry");
  1129. }
  1130. struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
  1131. ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
  1132. ic->entry = ent;
  1133. RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
  1134. return cvar_value;
  1135. }
  1136. static inline VALUE
  1137. vm_getclassvariable(const rb_iseq_t *iseq, const rb_cref_t *cref, const rb_control_frame_t *cfp, ID id, ICVARC ic)
  1138. {
  1139. if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE()) {
  1140. VALUE v = Qundef;
  1141. RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
  1142. if (st_lookup(RCLASS_IV_TBL(ic->entry->class_value), (st_data_t)id, &v)) {
  1143. return v;
  1144. }
  1145. }
  1146. VALUE klass = vm_get_cvar_base(cref, cfp, 1);
  1147. return update_classvariable_cache(iseq, klass, id, ic);
  1148. }
  1149. static inline void
  1150. vm_setclassvariable(const rb_iseq_t *iseq, const rb_cref_t *cref, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
  1151. {
  1152. if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE()) {
  1153. RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
  1154. rb_class_ivar_set(ic->entry->class_value, id, val);
  1155. return;
  1156. }
  1157. VALUE klass = vm_get_cvar_base(cref, cfp, 1);
  1158. rb_cvar_set(klass, id, val);
  1159. update_classvariable_cache(iseq, klass, id, ic);
  1160. }
  1161. static inline VALUE
  1162. vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
  1163. {
  1164. return vm_getivar(obj, id, iseq, ic, NULL, FALSE);
  1165. }
  1166. static inline void
  1167. vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
  1168. {
  1169. vm_setivar(obj, id, val, iseq, ic, 0, 0);
  1170. }
  1171. static VALUE
  1172. vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
  1173. {
  1174. /* continue throw */
  1175. if (FIXNUM_P(err)) {
  1176. ec->tag->state = FIX2INT(err);
  1177. }
  1178. else if (SYMBOL_P(err)) {
  1179. ec->tag->state = TAG_THROW;
  1180. }
  1181. else if (THROW_DATA_P(err)) {
  1182. ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
  1183. }
  1184. else {
  1185. ec->tag->state = TAG_RAISE;
  1186. }
  1187. return err;
  1188. }
  1189. static VALUE
  1190. vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
  1191. const int flag, const VALUE throwobj)
  1192. {
  1193. const rb_control_frame_t *escape_cfp = NULL;
  1194. const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
  1195. if (flag != 0) {
  1196. /* do nothing */
  1197. }
  1198. else if (state == TAG_BREAK) {
  1199. int is_orphan = 1;
  1200. const VALUE *ep = GET_EP();
  1201. const rb_iseq_t *base_iseq = GET_ISEQ();
  1202. escape_cfp = reg_cfp;
  1203. while (base_iseq->body->type != ISEQ_TYPE_BLOCK) {
  1204. if (escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
  1205. escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
  1206. ep = escape_cfp->ep;
  1207. base_iseq = escape_cfp->iseq;
  1208. }
  1209. else {
  1210. ep = VM_ENV_PREV_EP(ep);
  1211. base_iseq = base_iseq->body->parent_iseq;
  1212. escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
  1213. VM_ASSERT(escape_cfp->iseq == base_iseq);
  1214. }
  1215. }
  1216. if (VM_FRAME_LAMBDA_P(escape_cfp)) {
  1217. /* lambda{... break ...} */
  1218. is_orphan = 0;
  1219. state = TAG_RETURN;
  1220. }
  1221. else {
  1222. ep = VM_ENV_PREV_EP(ep);
  1223. while (escape_cfp < eocfp) {
  1224. if (escape_cfp->ep == ep) {
  1225. const rb_iseq_t *const iseq = escape_cfp->iseq;
  1226. const VALUE epc = escape_cfp->pc - iseq->body->iseq_encoded;
  1227. const struct iseq_catch_table *const ct = iseq->body->catch_table;
  1228. unsigned int i;
  1229. if (!ct) break;
  1230. for (i=0; i < ct->size; i++) {
  1231. const struct iseq_catch_table_entry *const entry =
  1232. UNALIGNED_MEMBER_PTR(ct, entries[i]);
  1233. if (entry->type == CATCH_TYPE_BREAK &&
  1234. entry->iseq == base_iseq &&
  1235. entry->start < epc && entry->end >= epc) {
  1236. if (entry->cont == epc) { /* found! */
  1237. is_orphan = 0;
  1238. }
  1239. break;
  1240. }
  1241. }
  1242. break;
  1243. }
  1244. escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
  1245. }
  1246. }
  1247. if (is_orphan) {
  1248. rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
  1249. }
  1250. }
  1251. else if (state == TAG_RETRY) {
  1252. const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
  1253. escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
  1254. }
  1255. else if (state == TAG_RETURN) {
  1256. const VALUE *current_ep = GET_EP();
  1257. const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
  1258. int in_class_frame = 0;
  1259. int toplevel = 1;
  1260. escape_cfp = reg_cfp;
  1261. // find target_lep, target_ep
  1262. while (!VM_ENV_LOCAL_P(ep)) {
  1263. if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
  1264. target_ep = ep;
  1265. }
  1266. ep = VM_ENV_PREV_EP(ep);
  1267. }
  1268. target_lep = ep;
  1269. while (escape_cfp < eocfp) {
  1270. const VALUE *lep = VM_CF_LEP(escape_cfp);
  1271. if (!target_lep) {
  1272. target_lep = lep;
  1273. }
  1274. if (lep == target_lep &&
  1275. VM_FRAME_RUBYFRAME_P(escape_cfp) &&
  1276. escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
  1277. in_class_frame = 1;
  1278. target_lep = 0;
  1279. }
  1280. if (lep == target_lep) {
  1281. if (VM_FRAME_LAMBDA_P(escape_cfp)) {
  1282. toplevel = 0;
  1283. if (in_class_frame) {
  1284. /* lambda {class A; ... return ...; end} */
  1285. goto valid_return;
  1286. }
  1287. else {
  1288. const VALUE *tep = current_ep;
  1289. while (target_lep != tep) {
  1290. if (escape_cfp->ep == tep) {
  1291. /* in lambda */
  1292. if (tep == target_ep) {
  1293. goto valid_return;
  1294. }
  1295. else {
  1296. goto unexpected_return;
  1297. }
  1298. }
  1299. tep = VM_ENV_PREV_EP(tep);
  1300. }
  1301. }
  1302. }
  1303. else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
  1304. switch (escape_cfp->iseq->body->type) {
  1305. case ISEQ_TYPE_TOP:
  1306. case ISEQ_TYPE_MAIN:
  1307. if (toplevel) {
  1308. if (in_class_frame) goto unexpected_return;
  1309. if (target_ep == NULL) {
  1310. goto valid_return;
  1311. }
  1312. else {
  1313. goto unexpected_return;
  1314. }
  1315. }
  1316. break;
  1317. case ISEQ_TYPE_EVAL:
  1318. case ISEQ_TYPE_CLASS:
  1319. toplevel = 0;
  1320. break;
  1321. default:
  1322. break;
  1323. }
  1324. }
  1325. }
  1326. if (escape_cfp->ep == target_lep && escape_cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
  1327. if (target_ep == NULL) {
  1328. goto valid_return;
  1329. }
  1330. else {
  1331. goto unexpected_return;
  1332. }
  1333. }
  1334. escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
  1335. }
  1336. unexpected_return:;
  1337. rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
  1338. valid_return:;
  1339. /* do nothing */
  1340. }
  1341. else {
  1342. rb_bug("isns(throw): unsupported throw type");
  1343. }
  1344. ec->tag->state = state;
  1345. return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
  1346. }
  1347. static VALUE
  1348. vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  1349. rb_num_t throw_state, VALUE throwobj)
  1350. {
  1351. const int state = (int)(throw_state & VM_THROW_STATE_MASK);
  1352. const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
  1353. if (state != 0) {
  1354. return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
  1355. }
  1356. else {
  1357. return vm_throw_continue(ec, throwobj);
  1358. }
  1359. }
  1360. static inline void
  1361. vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
  1362. {
  1363. int is_splat = flag & 0x01;
  1364. rb_num_t space_size = num + is_splat;
  1365. VALUE *base = sp - 1;
  1366. const VALUE *ptr;
  1367. rb_num_t len;
  1368. const VALUE obj = ary;
  1369. if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
  1370. ary = obj;
  1371. ptr = &ary;
  1372. len = 1;
  1373. }
  1374. else {
  1375. ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
  1376. len = (rb_num_t)RARRAY_LEN(ary);
  1377. }
  1378. if (space_size == 0) {
  1379. /* no space left on stack */
  1380. }
  1381. else if (flag & 0x02) {
  1382. /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
  1383. rb_num_t i = 0, j;
  1384. if (len < num) {
  1385. for (i=0; i<num-len; i++) {
  1386. *base++ = Qnil;
  1387. }
  1388. }
  1389. for (j=0; i<num; i++, j++) {
  1390. VALUE v = ptr[len - j - 1];
  1391. *base++ = v;
  1392. }
  1393. if (is_splat) {
  1394. *base = rb_ary_new4(len - j, ptr);
  1395. }
  1396. }
  1397. else {
  1398. /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
  1399. rb_num_t i;
  1400. VALUE *bptr = &base[space_size - 1];
  1401. for (i=0; i<num; i++) {
  1402. if (len <= i) {
  1403. for (; i<num; i++) {
  1404. *bptr-- = Qnil;
  1405. }
  1406. break;
  1407. }
  1408. *bptr-- = ptr[i];
  1409. }
  1410. if (is_splat) {
  1411. if (num > len) {
  1412. *bptr = rb_ary_new();
  1413. }
  1414. else {
  1415. *bptr = rb_ary_new4(len - num, ptr + num);
  1416. }
  1417. }
  1418. }
  1419. RB_GC_GUARD(ary);
  1420. }
  1421. static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
  1422. static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
  1423. static struct rb_class_cc_entries *
  1424. vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme)
  1425. {
  1426. struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
  1427. #if VM_CHECK_MODE > 0
  1428. ccs->debug_sig = ~(VALUE)ccs;
  1429. #endif
  1430. ccs->capa = 0;
  1431. ccs->len = 0;
  1432. RB_OBJ_WRITE(klass, &ccs->cme, cme);
  1433. METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
  1434. ccs->entries = NULL;
  1435. return ccs;
  1436. }
  1437. static void
  1438. vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
  1439. {
  1440. if (! vm_cc_markable(cc)) {
  1441. return;
  1442. }
  1443. else if (! vm_ci_markable(ci)) {
  1444. return;
  1445. }
  1446. if (UNLIKELY(ccs->len == ccs->capa)) {
  1447. if (ccs->capa == 0) {
  1448. ccs->capa = 1;
  1449. ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
  1450. }
  1451. else {
  1452. ccs->capa *= 2;
  1453. REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
  1454. }
  1455. }
  1456. VM_ASSERT(ccs->len < ccs->capa);
  1457. const int pos = ccs->len++;
  1458. RB_OBJ_WRITE(klass, &ccs->entries[pos].ci, ci);
  1459. RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
  1460. if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
  1461. // for tuning
  1462. // vm_mtbl_dump(klass, 0);
  1463. }
  1464. }
  1465. #if VM_CHECK_MODE > 0
  1466. void
  1467. rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
  1468. {
  1469. ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
  1470. for (int i=0; i<ccs->len; i++) {
  1471. vm_ci_dump(ccs->entries[i].ci);
  1472. rp(ccs->entries[i].cc);
  1473. }
  1474. }
  1475. static int
  1476. vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
  1477. {
  1478. VM_ASSERT(vm_ccs_p(ccs));
  1479. VM_ASSERT(ccs->len <= ccs->capa);
  1480. for (int i=0; i<ccs->len; i++) {
  1481. const struct rb_callinfo *ci = ccs->entries[i].ci;
  1482. const struct rb_callcache *cc = ccs->entries[i].cc;
  1483. VM_ASSERT(vm_ci_p(ci));
  1484. VM_ASSERT(vm_ci_mid(ci) == mid);
  1485. VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
  1486. VM_ASSERT(vm_cc_class_check(cc, klass));
  1487. VM_ASSERT(vm_cc_cme(cc) == ccs->cme);
  1488. }
  1489. return TRUE;
  1490. }
  1491. #endif
  1492. #ifndef MJIT_HEADER
  1493. static const struct rb_callcache *
  1494. vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
  1495. {
  1496. const ID mid = vm_ci_mid(ci);
  1497. struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
  1498. struct rb_class_cc_entries *ccs = NULL;
  1499. VALUE ccs_data;
  1500. if (cc_tbl) {
  1501. if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
  1502. ccs = (struct rb_class_cc_entries *)ccs_data;
  1503. const int ccs_len = ccs->len;
  1504. VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
  1505. if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
  1506. rb_vm_ccs_free(ccs);
  1507. rb_id_table_delete(cc_tbl, mid);
  1508. ccs = NULL;
  1509. }
  1510. else {
  1511. for (int i=0; i<ccs_len; i++) {
  1512. const struct rb_callinfo *ccs_ci = ccs->entries[i].ci;
  1513. const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
  1514. VM_ASSERT(vm_ci_p(ccs_ci));
  1515. VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
  1516. if (ccs_ci == ci) { // TODO: equality
  1517. RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
  1518. VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
  1519. VM_ASSERT(ccs_cc->klass == klass);
  1520. VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
  1521. return ccs_cc;
  1522. }
  1523. }
  1524. }
  1525. }
  1526. }
  1527. else {
  1528. cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
  1529. }
  1530. RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
  1531. const rb_callable_method_entry_t *cme;
  1532. if (ccs) {
  1533. cme = ccs->cme;
  1534. cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
  1535. VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
  1536. }
  1537. else {
  1538. cme = rb_callable_method_entry(klass, mid);
  1539. }
  1540. VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
  1541. if (cme == NULL) {
  1542. // undef or not found: can't cache the information
  1543. VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
  1544. return &vm_empty_cc;
  1545. }
  1546. VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
  1547. const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general);
  1548. METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
  1549. if (ccs == NULL) {
  1550. VM_ASSERT(cc_tbl != NULL);
  1551. if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
  1552. // rb_callable_method_entry() prepares ccs.
  1553. ccs = (struct rb_class_cc_entries *)ccs_data;
  1554. }
  1555. else {
  1556. // TODO: required?
  1557. ccs = vm_ccs_create(klass, cme);
  1558. rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
  1559. }
  1560. }
  1561. vm_ccs_push(klass, ccs, ci, cc);
  1562. VM_ASSERT(vm_cc_cme(cc) != NULL);
  1563. VM_ASSERT(cme->called_id == mid);
  1564. VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
  1565. return cc;
  1566. }
  1567. MJIT_FUNC_EXPORTED const struct rb_callcache *
  1568. rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
  1569. {
  1570. const struct rb_callcache *cc;
  1571. VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
  1572. RB_VM_LOCK_ENTER();
  1573. {
  1574. cc = vm_search_cc(klass, ci);
  1575. VM_ASSERT(cc);
  1576. VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
  1577. VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
  1578. VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
  1579. VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
  1580. VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci));
  1581. }
  1582. RB_VM_LOCK_LEAVE();
  1583. return cc;
  1584. }
  1585. #endif
  1586. static const struct rb_callcache *
  1587. vm_search_method_slowpath0(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
  1588. {
  1589. #if USE_DEBUG_COUNTER
  1590. const struct rb_callcache *old_cc = cd->cc;
  1591. #endif
  1592. const struct rb_callcache *cc = rb_vm_search_method_slowpath(cd->ci, klass);
  1593. #if OPT_INLINE_METHOD_CACHE
  1594. cd->cc = cc;
  1595. const struct rb_callcache *empty_cc =
  1596. #ifdef MJIT_HEADER
  1597. rb_vm_empty_cc();
  1598. #else
  1599. &vm_empty_cc;
  1600. #endif
  1601. if (cd_owner && cc != empty_cc) RB_OBJ_WRITTEN(cd_owner, Qundef, cc);
  1602. #if USE_DEBUG_COUNTER
  1603. if (old_cc == &empty_cc) {
  1604. // empty
  1605. RB_DEBUG_COUNTER_INC(mc_inline_miss_empty);
  1606. }
  1607. else if (old_cc == cc) {
  1608. RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cc);
  1609. }
  1610. else if (vm_cc_cme(old_cc) == vm_cc_cme(cc)) {
  1611. RB_DEBUG_COUNTER_INC(mc_inline_miss_same_cme);
  1612. }
  1613. else if (vm_cc_cme(old_cc) && vm_cc_cme(cc) &&
  1614. vm_cc_cme(old_cc)->def == vm_cc_cme(cc)->def) {
  1615. RB_DEBUG_COUNTER_INC(mc_inline_miss_same_def);
  1616. }
  1617. else {
  1618. RB_DEBUG_COUNTER_INC(mc_inline_miss_diff);
  1619. }
  1620. #endif
  1621. #endif // OPT_INLINE_METHOD_CACHE
  1622. VM_ASSERT(vm_cc_cme(cc) == NULL ||
  1623. vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
  1624. return cc;
  1625. }
  1626. #ifndef MJIT_HEADER
  1627. ALWAYS_INLINE(static const struct rb_callcache *vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass));
  1628. #endif
  1629. static const struct rb_callcache *
  1630. vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
  1631. {
  1632. const struct rb_callcache *cc = cd->cc;
  1633. #if OPT_INLINE_METHOD_CACHE
  1634. if (LIKELY(vm_cc_class_check(cc, klass))) {
  1635. if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
  1636. VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
  1637. RB_DEBUG_COUNTER_INC(mc_inline_hit);
  1638. VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
  1639. (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
  1640. vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
  1641. return cc;
  1642. }
  1643. RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
  1644. }
  1645. else {
  1646. RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
  1647. }
  1648. #endif
  1649. return vm_search_method_slowpath0(cd_owner, cd, klass);
  1650. }
  1651. static const struct rb_callcache *
  1652. vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
  1653. {
  1654. VALUE klass = CLASS_OF(recv);
  1655. VM_ASSERT(klass != Qfalse);
  1656. VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
  1657. return vm_search_method_fastpath(cd_owner, cd, klass);
  1658. }
  1659. static inline int
  1660. check_cfunc(const rb_callable_method_entry_t *me, VALUE (*func)(ANYARGS))
  1661. {
  1662. if (! me) {
  1663. return false;
  1664. }
  1665. else {
  1666. VM_ASSERT(IMEMO_TYPE_P(me, imemo_ment));
  1667. VM_ASSERT(callable_method_entry_p(me));
  1668. VM_ASSERT(me->def);
  1669. if (me->def->type != VM_METHOD_TYPE_CFUNC) {
  1670. return false;
  1671. }
  1672. else {
  1673. return me->def->body.cfunc.func == func;
  1674. }
  1675. }
  1676. }
  1677. static inline int
  1678. vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, VALUE (*func)(ANYARGS))
  1679. {
  1680. VM_ASSERT(iseq != NULL);
  1681. const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
  1682. return check_cfunc(vm_cc_cme(cc), func);
  1683. }
  1684. #define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
  1685. static inline bool
  1686. FIXNUM_2_P(VALUE a, VALUE b)
  1687. {
  1688. /* FIXNUM_P(a) && FIXNUM_P(b)
  1689. * == ((a & 1) && (b & 1))
  1690. * == a & b & 1 */
  1691. SIGNED_VALUE x = a;
  1692. SIGNED_VALUE y = b;
  1693. SIGNED_VALUE z = x & y & 1;
  1694. return z == 1;
  1695. }
  1696. static inline bool
  1697. FLONUM_2_P(VALUE a, VALUE b)
  1698. {
  1699. #if USE_FLONUM
  1700. /* FLONUM_P(a) && FLONUM_P(b)
  1701. * == ((a & 3) == 2) && ((b & 3) == 2)
  1702. * == ! ((a ^ 2) | (b ^ 2) & 3)
  1703. */
  1704. SIGNED_VALUE x = a;
  1705. SIGNED_VALUE y = b;
  1706. SIGNED_VALUE z = ((x ^ 2) | (y ^ 2)) & 3;
  1707. return !z;
  1708. #else
  1709. return false;
  1710. #endif
  1711. }
  1712. static VALUE
  1713. opt_equality_specialized(VALUE recv, VALUE obj)
  1714. {
  1715. if (FIXNUM_2_P(recv, obj) && EQ_UNREDEFINED_P(INTEGER)) {
  1716. goto compare_by_identity;
  1717. }
  1718. else if (FLONUM_2_P(recv, obj) && EQ_UNREDEFINED_P(FLOAT)) {
  1719. goto compare_by_identity;
  1720. }
  1721. else if (STATIC_SYM_P(recv) && STATIC_SYM_P(obj) && EQ_UNREDEFINED_P(SYMBOL)) {
  1722. goto compare_by_identity;
  1723. }
  1724. else if (SPECIAL_CONST_P(recv)) {
  1725. //
  1726. }
  1727. else if (RBASIC_CLASS(recv) == rb_cFloat && RB_FLOAT_TYPE_P(obj) && EQ_UNREDEFINED_P(FLOAT)) {
  1728. double a = RFLOAT_VALUE(recv);
  1729. double b = RFLOAT_VALUE(obj);
  1730. #if MSC_VERSION_BEFORE(1300)
  1731. if (isnan(a)) {
  1732. return Qfalse;
  1733. }
  1734. else if (isnan(b)) {
  1735. return Qfalse;
  1736. }
  1737. else
  1738. #endif
  1739. return RBOOL(a == b);
  1740. }
  1741. else if (RBASIC_CLASS(recv) == rb_cString && EQ_UNREDEFINED_P(STRING)) {
  1742. if (recv == obj) {
  1743. return Qtrue;
  1744. }
  1745. else if (RB_TYPE_P(obj, T_STRING)) {
  1746. return rb_str_eql_internal(obj, recv);
  1747. }
  1748. }
  1749. return Qundef;
  1750. compare_by_identity:
  1751. return RBOOL(recv == obj);
  1752. }
  1753. static VALUE
  1754. opt_equality(const rb_iseq_t *cd_owner, VALUE recv, VALUE obj, CALL_DATA cd)
  1755. {
  1756. VM_ASSERT(cd_owner != NULL);
  1757. VALUE val = opt_equality_specialized(recv, obj);
  1758. if (val != Qundef) return val;
  1759. if (!vm_method_cfunc_is(cd_owner, cd, recv, rb_obj_equal)) {
  1760. return Qundef;
  1761. }
  1762. else {
  1763. return RBOOL(recv == obj);
  1764. }
  1765. }
  1766. #undef EQ_UNREDEFINED_P
  1767. #ifndef MJIT_HEADER
  1768. static inline const struct rb_callcache *gccct_method_search(rb_execution_context_t *ec, VALUE recv, ID mid, int argc); // vm_eval.c
  1769. NOINLINE(static VALUE opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid));
  1770. static VALUE
  1771. opt_equality_by_mid_slowpath(VALUE recv, VALUE obj, ID mid)
  1772. {
  1773. const struct rb_callcache *cc = gccct_method_search(GET_EC(), recv, mid, 1);
  1774. if (cc && check_cfunc(vm_cc_cme(cc), rb_obj_equal)) {
  1775. return RBOOL(recv == obj);
  1776. }
  1777. else {
  1778. return Qundef;
  1779. }
  1780. }
  1781. static VALUE
  1782. opt_equality_by_mid(VALUE recv, VALUE obj, ID mid)
  1783. {
  1784. VALUE val = opt_equality_specialized(recv, obj);
  1785. if (val != Qundef) {
  1786. return val;
  1787. }
  1788. else {
  1789. return opt_equality_by_mid_slowpath(recv, obj, mid);
  1790. }
  1791. }
  1792. VALUE
  1793. rb_equal_opt(VALUE obj1, VALUE obj2)
  1794. {
  1795. return opt_equality_by_mid(obj1, obj2, idEq);
  1796. }
  1797. VALUE
  1798. rb_eql_opt(VALUE obj1, VALUE obj2)
  1799. {
  1800. return opt_equality_by_mid(obj1, obj2, idEqlP);
  1801. }
  1802. #endif // MJIT_HEADER
  1803. extern VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *, int kw_splat);
  1804. static VALUE
  1805. check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
  1806. {
  1807. switch (type) {
  1808. case VM_CHECKMATCH_TYPE_WHEN:
  1809. return pattern;
  1810. case VM_CHECKMATCH_TYPE_RESCUE:
  1811. if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
  1812. rb_raise(rb_eTypeError, "class or module required for rescue clause");
  1813. }
  1814. /* fall through */
  1815. case VM_CHECKMATCH_TYPE_CASE: {
  1816. const rb_callable_method_entry_t *me =
  1817. rb_callable_method_entry_with_refinements(CLASS_OF(pattern), idEqq, NULL);
  1818. if (me) {
  1819. return rb_vm_call0(ec, pattern, idEqq, 1, &target, me, RB_NO_KEYWORDS);
  1820. }
  1821. else {
  1822. /* fallback to funcall (e.g. method_missing) */
  1823. return rb_funcallv(pattern, idEqq, 1, &target);
  1824. }
  1825. }
  1826. default:
  1827. rb_bug("check_match: unreachable");
  1828. }
  1829. }
  1830. #if MSC_VERSION_BEFORE(1300)
  1831. #define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
  1832. #else
  1833. #define CHECK_CMP_NAN(a, b) /* do nothing */
  1834. #endif
  1835. static inline VALUE
  1836. double_cmp_lt(double a, double b)
  1837. {
  1838. CHECK_CMP_NAN(a, b);
  1839. return RBOOL(a < b);
  1840. }
  1841. static inline VALUE
  1842. double_cmp_le(double a, double b)
  1843. {
  1844. CHECK_CMP_NAN(a, b);
  1845. return RBOOL(a <= b);
  1846. }
  1847. static inline VALUE
  1848. double_cmp_gt(double a, double b)
  1849. {
  1850. CHECK_CMP_NAN(a, b);
  1851. return RBOOL(a > b);
  1852. }
  1853. static inline VALUE
  1854. double_cmp_ge(double a, double b)
  1855. {
  1856. CHECK_CMP_NAN(a, b);
  1857. return RBOOL(a >= b);
  1858. }
  1859. static inline VALUE *
  1860. vm_base_ptr(const rb_control_frame_t *cfp)
  1861. {
  1862. #if 0 // we may optimize and use this once we confirm it does not spoil performance on JIT.
  1863. const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  1864. if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
  1865. VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
  1866. if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
  1867. /* adjust `self' */
  1868. bp += 1;
  1869. }
  1870. #if VM_DEBUG_BP_CHECK
  1871. if (bp != cfp->bp_check) {
  1872. ruby_debug_printf("bp_check: %ld, bp: %ld\n",
  1873. (long)(cfp->bp_check - GET_EC()->vm_stack),
  1874. (long)(bp - GET_EC()->vm_stack));
  1875. rb_bug("vm_base_ptr: unreachable");
  1876. }
  1877. #endif
  1878. return bp;
  1879. }
  1880. else {
  1881. return NULL;
  1882. }
  1883. #else
  1884. return cfp->__bp__;
  1885. #endif
  1886. }
  1887. /* method call processes with call_info */
  1888. #include "vm_args.c"
  1889. static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc, int param_size, int local_size);
  1890. ALWAYS_INLINE(static VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me, int opt_pc, int param_size, int local_size));
  1891. static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc);
  1892. static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
  1893. static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
  1894. static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
  1895. static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling);
  1896. static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
  1897. static VALUE
  1898. vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  1899. {
  1900. RB_DEBUG_COUNTER_INC(ccf_iseq_setup_tailcall_0start);
  1901. return vm_call_iseq_setup_tailcall(ec, cfp, calling, 0);
  1902. }
  1903. static VALUE
  1904. vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  1905. {
  1906. RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
  1907. const struct rb_callcache *cc = calling->cc;
  1908. const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
  1909. int param = iseq->body->param.size;
  1910. int local = iseq->body->local_table_size;
  1911. return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
  1912. }
  1913. MJIT_STATIC bool
  1914. rb_simple_iseq_p(const rb_iseq_t *iseq)
  1915. {
  1916. return iseq->body->param.flags.has_opt == FALSE &&
  1917. iseq->body->param.flags.has_rest == FALSE &&
  1918. iseq->body->param.flags.has_post == FALSE &&
  1919. iseq->body->param.flags.has_kw == FALSE &&
  1920. iseq->body->param.flags.has_kwrest == FALSE &&
  1921. iseq->body->param.flags.accepts_no_kwarg == FALSE &&
  1922. iseq->body->param.flags.has_block == FALSE;
  1923. }
  1924. static bool
  1925. rb_iseq_only_optparam_p(const rb_iseq_t *iseq)
  1926. {
  1927. return iseq->body->param.flags.has_opt == TRUE &&
  1928. iseq->body->param.flags.has_rest == FALSE &&
  1929. iseq->body->param.flags.has_post == FALSE &&
  1930. iseq->body->param.flags.has_kw == FALSE &&
  1931. iseq->body->param.flags.has_kwrest == FALSE &&
  1932. iseq->body->param.flags.accepts_no_kwarg == FALSE &&
  1933. iseq->body->param.flags.has_block == FALSE;
  1934. }
  1935. static bool
  1936. rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
  1937. {
  1938. return iseq->body->param.flags.has_opt == FALSE &&
  1939. iseq->body->param.flags.has_rest == FALSE &&
  1940. iseq->body->param.flags.has_post == FALSE &&
  1941. iseq->body->param.flags.has_kw == TRUE &&
  1942. iseq->body->param.flags.has_kwrest == FALSE &&
  1943. iseq->body->param.flags.has_block == FALSE;
  1944. }
  1945. // If true, cc->call needs to include `CALLER_SETUP_ARG` (i.e. can't be skipped in fastpath)
  1946. MJIT_STATIC bool
  1947. rb_splat_or_kwargs_p(const struct rb_callinfo *restrict ci)
  1948. {
  1949. return IS_ARGS_SPLAT(ci) || IS_ARGS_KW_OR_KW_SPLAT(ci);
  1950. }
  1951. static inline void
  1952. CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
  1953. struct rb_calling_info *restrict calling,
  1954. const struct rb_callinfo *restrict ci)
  1955. {
  1956. if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
  1957. VALUE final_hash;
  1958. /* This expands the rest argument to the stack.
  1959. * So, vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT is now inconsistent.
  1960. */
  1961. vm_caller_setup_arg_splat(cfp, calling);
  1962. if (!IS_ARGS_KW_OR_KW_SPLAT(ci) &&
  1963. calling->argc > 0 &&
  1964. RB_TYPE_P((final_hash = *(cfp->sp - 1)), T_HASH) &&
  1965. (((struct RHash *)final_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
  1966. *(cfp->sp - 1) = rb_hash_dup(final_hash);
  1967. calling->kw_splat = 1;
  1968. }
  1969. }
  1970. if (UNLIKELY(IS_ARGS_KW_OR_KW_SPLAT(ci))) {
  1971. if (IS_ARGS_KEYWORD(ci)) {
  1972. /* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
  1973. * by creating a keyword hash.
  1974. * So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
  1975. */
  1976. vm_caller_setup_arg_kw(cfp, calling, ci);
  1977. }
  1978. else {
  1979. VALUE keyword_hash = cfp->sp[-1];
  1980. if (!RB_TYPE_P(keyword_hash, T_HASH)) {
  1981. /* Convert a non-hash keyword splat to a new hash */
  1982. cfp->sp[-1] = rb_hash_dup(rb_to_hash_type(keyword_hash));
  1983. }
  1984. else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
  1985. /* Convert a hash keyword splat to a new hash unless
  1986. * a mutable keyword splat was passed.
  1987. */
  1988. cfp->sp[-1] = rb_hash_dup(keyword_hash);
  1989. }
  1990. }
  1991. }
  1992. }
  1993. static inline void
  1994. CALLER_REMOVE_EMPTY_KW_SPLAT(struct rb_control_frame_struct *restrict cfp,
  1995. struct rb_calling_info *restrict calling,
  1996. const struct rb_callinfo *restrict ci)
  1997. {
  1998. if (UNLIKELY(calling->kw_splat)) {
  1999. /* This removes the last Hash object if it is empty.
  2000. * So, vm_ci_flag(ci) & VM_CALL_KW_SPLAT is now inconsistent.
  2001. */
  2002. if (RHASH_EMPTY_P(cfp->sp[-1])) {
  2003. cfp->sp--;
  2004. calling->argc--;
  2005. calling->kw_splat = 0;
  2006. }
  2007. }
  2008. }
  2009. #define USE_OPT_HIST 0
  2010. #if USE_OPT_HIST
  2011. #define OPT_HIST_MAX 64
  2012. static int opt_hist[OPT_HIST_MAX+1];
  2013. __attribute__((destructor))
  2014. static void
  2015. opt_hist_show_results_at_exit(void)
  2016. {
  2017. for (int i=0; i<OPT_HIST_MAX; i++) {
  2018. ruby_debug_printf("opt_hist\t%d\t%d\n", i, opt_hist[i]);
  2019. }
  2020. }
  2021. #endif
  2022. static VALUE
  2023. vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
  2024. struct rb_calling_info *calling)
  2025. {
  2026. const struct rb_callcache *cc = calling->cc;
  2027. const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
  2028. const int lead_num = iseq->body->param.lead_num;
  2029. const int opt = calling->argc - lead_num;
  2030. const int opt_num = iseq->body->param.opt_num;
  2031. const int opt_pc = (int)iseq->body->param.opt_table[opt];
  2032. const int param = iseq->body->param.size;
  2033. const int local = iseq->body->local_table_size;
  2034. const int delta = opt_num - opt;
  2035. RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
  2036. #if USE_OPT_HIST
  2037. if (opt_pc < OPT_HIST_MAX) {
  2038. opt_hist[opt]++;
  2039. }
  2040. else {
  2041. opt_hist[OPT_HIST_MAX]++;
  2042. }
  2043. #endif
  2044. return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
  2045. }
  2046. static VALUE
  2047. vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_frame_t *cfp,
  2048. struct rb_calling_info *calling)
  2049. {
  2050. const struct rb_callcache *cc = calling->cc;
  2051. const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
  2052. const int lead_num = iseq->body->param.lead_num;
  2053. const int opt = calling->argc - lead_num;
  2054. const int opt_pc = (int)iseq->body->param.opt_table[opt];
  2055. RB_DEBUG_COUNTER_INC(ccf_iseq_opt);
  2056. #if USE_OPT_HIST
  2057. if (opt_pc < OPT_HIST_MAX) {
  2058. opt_hist[opt]++;
  2059. }
  2060. else {
  2061. opt_hist[OPT_HIST_MAX]++;
  2062. }
  2063. #endif
  2064. return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
  2065. }
  2066. static void
  2067. args_setup_kw_parameters(rb_execution_context_t *const ec, const rb_iseq_t *const iseq,
  2068. VALUE *const passed_values, const int passed_keyword_len, const VALUE *const passed_keywords,
  2069. VALUE *const locals);
  2070. static VALUE
  2071. vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
  2072. struct rb_calling_info *calling)
  2073. {
  2074. const struct rb_callinfo *ci = calling->ci;
  2075. const struct rb_callcache *cc = calling->cc;
  2076. VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
  2077. RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
  2078. const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
  2079. const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
  2080. const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
  2081. const int ci_kw_len = kw_arg->keyword_len;
  2082. const VALUE * const ci_keywords = kw_arg->keywords;
  2083. VALUE *argv = cfp->sp - calling->argc;
  2084. VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
  2085. const int lead_num = iseq->body->param.lead_num;
  2086. VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
  2087. MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
  2088. args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
  2089. int param = iseq->body->param.size;
  2090. int local = iseq->body->local_table_size;
  2091. return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
  2092. }
  2093. static VALUE
  2094. vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t *cfp,
  2095. struct rb_calling_info *calling)
  2096. {
  2097. const struct rb_callinfo *MAYBE_UNUSED(ci) = calling->ci;
  2098. const struct rb_callcache *cc = calling->cc;
  2099. VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
  2100. RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
  2101. const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
  2102. const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
  2103. VALUE * const argv = cfp->sp - calling->argc;
  2104. VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
  2105. int i;
  2106. for (i=0; i<kw_param->num; i++) {
  2107. klocals[i] = kw_param->default_values[i];
  2108. }
  2109. klocals[i] = INT2FIX(0); // kw specify flag
  2110. // NOTE:
  2111. // nobody check this value, but it should be cleared because it can
  2112. // points invalid VALUE (T_NONE objects, raw pointer and so on).
  2113. int param = iseq->body->param.size;
  2114. int local = iseq->body->local_table_size;
  2115. return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
  2116. }
  2117. static inline int
  2118. vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
  2119. const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
  2120. {
  2121. const struct rb_callinfo *ci = calling->ci;
  2122. const struct rb_callcache *cc = calling->cc;
  2123. bool cacheable_ci = vm_ci_markable(ci);
  2124. if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
  2125. if (LIKELY(rb_simple_iseq_p(iseq))) {
  2126. rb_control_frame_t *cfp = ec->cfp;
  2127. CALLER_SETUP_ARG(cfp, calling, ci);
  2128. CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
  2129. if (calling->argc != iseq->body->param.lead_num) {
  2130. argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
  2131. }
  2132. VM_ASSERT(ci == calling->ci);
  2133. VM_ASSERT(cc == calling->cc);
  2134. CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), cacheable_ci && vm_call_iseq_optimizable_p(ci, cc));
  2135. return 0;
  2136. }
  2137. else if (rb_iseq_only_optparam_p(iseq)) {
  2138. rb_control_frame_t *cfp = ec->cfp;
  2139. CALLER_SETUP_ARG(cfp, calling, ci);
  2140. CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
  2141. const int lead_num = iseq->body->param.lead_num;
  2142. const int opt_num = iseq->body->param.opt_num;
  2143. const int argc = calling->argc;
  2144. const int opt = argc - lead_num;
  2145. if (opt < 0 || opt > opt_num) {
  2146. argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
  2147. }
  2148. if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
  2149. CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
  2150. !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
  2151. cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
  2152. }
  2153. else {
  2154. CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
  2155. !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
  2156. cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
  2157. }
  2158. /* initialize opt vars for self-references */
  2159. VM_ASSERT((int)iseq->body->param.size == lead_num + opt_num);
  2160. for (int i=argc; i<lead_num + opt_num; i++) {
  2161. argv[i] = Qnil;
  2162. }
  2163. return (int)iseq->body->param.opt_table[opt];
  2164. }
  2165. else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) {
  2166. const int lead_num = iseq->body->param.lead_num;
  2167. const int argc = calling->argc;
  2168. const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
  2169. if (vm_ci_flag(ci) & VM_CALL_KWARG) {
  2170. const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
  2171. if (argc - kw_arg->keyword_len == lead_num) {
  2172. const int ci_kw_len = kw_arg->keyword_len;
  2173. const VALUE * const ci_keywords = kw_arg->keywords;
  2174. VALUE * const ci_kws = ALLOCA_N(VALUE, ci_kw_len);
  2175. MEMCPY(ci_kws, argv + lead_num, VALUE, ci_kw_len);
  2176. VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
  2177. args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
  2178. CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
  2179. cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
  2180. return 0;
  2181. }
  2182. }
  2183. else if (argc == lead_num) {
  2184. /* no kwarg */
  2185. VALUE *const klocals = argv + kw_param->bits_start - kw_param->num;
  2186. args_setup_kw_parameters(ec, iseq, NULL, 0, NULL, klocals);
  2187. if (klocals[kw_param->num] == INT2FIX(0)) {
  2188. /* copy from default_values */
  2189. CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
  2190. cacheable_ci && METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc)));
  2191. }
  2192. return 0;
  2193. }
  2194. }
  2195. }
  2196. return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
  2197. }
  2198. static VALUE
  2199. vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  2200. {
  2201. RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
  2202. const struct rb_callcache *cc = calling->cc;
  2203. const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
  2204. const int param_size = iseq->body->param.size;
  2205. const int local_size = iseq->body->local_table_size;
  2206. const int opt_pc = vm_callee_setup_arg(ec, calling, def_iseq_ptr(vm_cc_cme(cc)->def), cfp->sp - calling->argc, param_size, local_size);
  2207. return vm_call_iseq_setup_2(ec, cfp, calling, opt_pc, param_size, local_size);
  2208. }
  2209. static inline VALUE
  2210. vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling,
  2211. int opt_pc, int param_size, int local_size)
  2212. {
  2213. const struct rb_callinfo *ci = calling->ci;
  2214. const struct rb_callcache *cc = calling->cc;
  2215. if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
  2216. return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
  2217. }
  2218. else {
  2219. return vm_call_iseq_setup_tailcall(ec, cfp, calling, opt_pc);
  2220. }
  2221. }
  2222. static inline VALUE
  2223. vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const rb_callable_method_entry_t *me,
  2224. int opt_pc, int param_size, int local_size)
  2225. {
  2226. const rb_iseq_t *iseq = def_iseq_ptr(me->def);
  2227. VALUE *argv = cfp->sp - calling->argc;
  2228. VALUE *sp = argv + param_size;
  2229. cfp->sp = argv - 1 /* recv */;
  2230. vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
  2231. calling->block_handler, (VALUE)me,
  2232. iseq->body->iseq_encoded + opt_pc, sp,
  2233. local_size - param_size,
  2234. iseq->body->stack_max);
  2235. return Qundef;
  2236. }
  2237. static inline VALUE
  2238. vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, int opt_pc)
  2239. {
  2240. const struct rb_callcache *cc = calling->cc;
  2241. unsigned int i;
  2242. VALUE *argv = cfp->sp - calling->argc;
  2243. const rb_callable_method_entry_t *me = vm_cc_cme(cc);
  2244. const rb_iseq_t *iseq = def_iseq_ptr(me->def);
  2245. VALUE *src_argv = argv;
  2246. VALUE *sp_orig, *sp;
  2247. VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
  2248. if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
  2249. struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
  2250. const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
  2251. dst_captured->code.val = src_captured->code.val;
  2252. if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
  2253. calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
  2254. }
  2255. else {
  2256. calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
  2257. }
  2258. }
  2259. vm_pop_frame(ec, cfp, cfp->ep);
  2260. cfp = ec->cfp;
  2261. sp_orig = sp = cfp->sp;
  2262. /* push self */
  2263. sp[0] = calling->recv;
  2264. sp++;
  2265. /* copy arguments */
  2266. for (i=0; i < iseq->body->param.size; i++) {
  2267. *sp++ = src_argv[i];
  2268. }
  2269. vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
  2270. calling->recv, calling->block_handler, (VALUE)me,
  2271. iseq->body->iseq_encoded + opt_pc, sp,
  2272. iseq->body->local_table_size - iseq->body->param.size,
  2273. iseq->body->stack_max);
  2274. cfp->sp = sp_orig;
  2275. return Qundef;
  2276. }
  2277. static void
  2278. ractor_unsafe_check(void)
  2279. {
  2280. if (!rb_ractor_main_p()) {
  2281. rb_raise(rb_eRactorUnsafeError, "ractor unsafe method called from not main ractor");
  2282. }
  2283. }
  2284. static VALUE
  2285. call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2286. {
  2287. ractor_unsafe_check();
  2288. return (*func)(recv, rb_ary_new4(argc, argv));
  2289. }
  2290. static VALUE
  2291. call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2292. {
  2293. ractor_unsafe_check();
  2294. return (*func)(argc, argv, recv);
  2295. }
  2296. static VALUE
  2297. call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2298. {
  2299. ractor_unsafe_check();
  2300. VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
  2301. return (*f)(recv);
  2302. }
  2303. static VALUE
  2304. call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2305. {
  2306. ractor_unsafe_check();
  2307. VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
  2308. return (*f)(recv, argv[0]);
  2309. }
  2310. static VALUE
  2311. call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2312. {
  2313. ractor_unsafe_check();
  2314. VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
  2315. return (*f)(recv, argv[0], argv[1]);
  2316. }
  2317. static VALUE
  2318. call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2319. {
  2320. ractor_unsafe_check();
  2321. VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
  2322. return (*f)(recv, argv[0], argv[1], argv[2]);
  2323. }
  2324. static VALUE
  2325. call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2326. {
  2327. ractor_unsafe_check();
  2328. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2329. return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
  2330. }
  2331. static VALUE
  2332. call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2333. {
  2334. ractor_unsafe_check();
  2335. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2336. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
  2337. }
  2338. static VALUE
  2339. call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2340. {
  2341. ractor_unsafe_check();
  2342. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2343. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
  2344. }
  2345. static VALUE
  2346. call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2347. {
  2348. ractor_unsafe_check();
  2349. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2350. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
  2351. }
  2352. static VALUE
  2353. call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2354. {
  2355. ractor_unsafe_check();
  2356. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2357. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
  2358. }
  2359. static VALUE
  2360. call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2361. {
  2362. ractor_unsafe_check();
  2363. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2364. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
  2365. }
  2366. static VALUE
  2367. call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2368. {
  2369. ractor_unsafe_check();
  2370. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2371. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
  2372. }
  2373. static VALUE
  2374. call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2375. {
  2376. ractor_unsafe_check();
  2377. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2378. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
  2379. }
  2380. static VALUE
  2381. call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2382. {
  2383. ractor_unsafe_check();
  2384. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2385. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
  2386. }
  2387. static VALUE
  2388. call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2389. {
  2390. ractor_unsafe_check();
  2391. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2392. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
  2393. }
  2394. static VALUE
  2395. call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2396. {
  2397. ractor_unsafe_check();
  2398. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2399. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
  2400. }
  2401. static VALUE
  2402. call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2403. {
  2404. ractor_unsafe_check();
  2405. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2406. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
  2407. }
  2408. static VALUE
  2409. ractor_safe_call_cfunc_m2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2410. {
  2411. return (*func)(recv, rb_ary_new4(argc, argv));
  2412. }
  2413. static VALUE
  2414. ractor_safe_call_cfunc_m1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2415. {
  2416. return (*func)(argc, argv, recv);
  2417. }
  2418. static VALUE
  2419. ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2420. {
  2421. VALUE(*f)(VALUE) = (VALUE(*)(VALUE))func;
  2422. return (*f)(recv);
  2423. }
  2424. static VALUE
  2425. ractor_safe_call_cfunc_1(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2426. {
  2427. VALUE(*f)(VALUE, VALUE) = (VALUE(*)(VALUE, VALUE))func;
  2428. return (*f)(recv, argv[0]);
  2429. }
  2430. static VALUE
  2431. ractor_safe_call_cfunc_2(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2432. {
  2433. VALUE(*f)(VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE))func;
  2434. return (*f)(recv, argv[0], argv[1]);
  2435. }
  2436. static VALUE
  2437. ractor_safe_call_cfunc_3(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2438. {
  2439. VALUE(*f)(VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE))func;
  2440. return (*f)(recv, argv[0], argv[1], argv[2]);
  2441. }
  2442. static VALUE
  2443. ractor_safe_call_cfunc_4(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2444. {
  2445. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2446. return (*f)(recv, argv[0], argv[1], argv[2], argv[3]);
  2447. }
  2448. static VALUE
  2449. ractor_safe_call_cfunc_5(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2450. {
  2451. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2452. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
  2453. }
  2454. static VALUE
  2455. ractor_safe_call_cfunc_6(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2456. {
  2457. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2458. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
  2459. }
  2460. static VALUE
  2461. ractor_safe_call_cfunc_7(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2462. {
  2463. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2464. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
  2465. }
  2466. static VALUE
  2467. ractor_safe_call_cfunc_8(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2468. {
  2469. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2470. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
  2471. }
  2472. static VALUE
  2473. ractor_safe_call_cfunc_9(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2474. {
  2475. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2476. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
  2477. }
  2478. static VALUE
  2479. ractor_safe_call_cfunc_10(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2480. {
  2481. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2482. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
  2483. }
  2484. static VALUE
  2485. ractor_safe_call_cfunc_11(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2486. {
  2487. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2488. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
  2489. }
  2490. static VALUE
  2491. ractor_safe_call_cfunc_12(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2492. {
  2493. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2494. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
  2495. }
  2496. static VALUE
  2497. ractor_safe_call_cfunc_13(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2498. {
  2499. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2500. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
  2501. }
  2502. static VALUE
  2503. ractor_safe_call_cfunc_14(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2504. {
  2505. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2506. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
  2507. }
  2508. static VALUE
  2509. ractor_safe_call_cfunc_15(VALUE recv, int argc, const VALUE *argv, VALUE (*func)(ANYARGS))
  2510. {
  2511. VALUE(*f)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE) = (VALUE(*)(VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE))func;
  2512. return (*f)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
  2513. }
  2514. static inline int
  2515. vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
  2516. {
  2517. const int ov_flags = RAISED_STACKOVERFLOW;
  2518. if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
  2519. if (rb_ec_raised_p(ec, ov_flags)) {
  2520. rb_ec_raised_reset(ec, ov_flags);
  2521. return TRUE;
  2522. }
  2523. return FALSE;
  2524. }
  2525. #define CHECK_CFP_CONSISTENCY(func) \
  2526. (LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
  2527. rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
  2528. static inline
  2529. const rb_method_cfunc_t *
  2530. vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
  2531. {
  2532. #if VM_DEBUG_VERIFY_METHOD_CACHE
  2533. switch (me->def->type) {
  2534. case VM_METHOD_TYPE_CFUNC:
  2535. case VM_METHOD_TYPE_NOTIMPLEMENTED:
  2536. break;
  2537. # define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
  2538. METHOD_BUG(ISEQ);
  2539. METHOD_BUG(ATTRSET);
  2540. METHOD_BUG(IVAR);
  2541. METHOD_BUG(BMETHOD);
  2542. METHOD_BUG(ZSUPER);
  2543. METHOD_BUG(UNDEF);
  2544. METHOD_BUG(OPTIMIZED);
  2545. METHOD_BUG(MISSING);
  2546. METHOD_BUG(REFINED);
  2547. METHOD_BUG(ALIAS);
  2548. # undef METHOD_BUG
  2549. default:
  2550. rb_bug("wrong method type: %d", me->def->type);
  2551. }
  2552. #endif
  2553. return UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
  2554. }
  2555. static VALUE
  2556. vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
  2557. {
  2558. RB_DEBUG_COUNTER_INC(ccf_cfunc_with_frame);
  2559. const struct rb_callinfo *ci = calling->ci;
  2560. const struct rb_callcache *cc = calling->cc;
  2561. VALUE val;
  2562. const rb_callable_method_entry_t *me = vm_cc_cme(cc);
  2563. const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
  2564. int len = cfunc->argc;
  2565. VALUE recv = calling->recv;
  2566. VALUE block_handler = calling->block_handler;
  2567. VALUE frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
  2568. int argc = calling->argc;
  2569. int orig_argc = argc;
  2570. if (UNLIKELY(calling->kw_splat)) {
  2571. frame_type |= VM_FRAME_FLAG_CFRAME_KW;
  2572. }
  2573. RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
  2574. EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
  2575. vm_push_frame(ec, NULL, frame_type, recv,
  2576. block_handler, (VALUE)me,
  2577. 0, ec->cfp->sp, 0, 0);
  2578. if (len >= 0) rb_check_arity(argc, len, len);
  2579. reg_cfp->sp -= orig_argc + 1;
  2580. val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
  2581. CHECK_CFP_CONSISTENCY("vm_call_cfunc");
  2582. rb_vm_pop_frame(ec);
  2583. EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
  2584. RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
  2585. return val;
  2586. }
  2587. static VALUE
  2588. vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
  2589. {
  2590. const struct rb_callinfo *ci = calling->ci;
  2591. RB_DEBUG_COUNTER_INC(ccf_cfunc);
  2592. CALLER_SETUP_ARG(reg_cfp, calling, ci);
  2593. CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
  2594. CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame, !rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
  2595. return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
  2596. }
  2597. static VALUE
  2598. vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  2599. {
  2600. const struct rb_callcache *cc = calling->cc;
  2601. RB_DEBUG_COUNTER_INC(ccf_ivar);
  2602. cfp->sp -= 1;
  2603. return vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
  2604. }
  2605. static VALUE
  2606. vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  2607. {
  2608. const struct rb_callcache *cc = calling->cc;
  2609. RB_DEBUG_COUNTER_INC(ccf_attrset);
  2610. VALUE val = *(cfp->sp - 1);
  2611. cfp->sp -= 2;
  2612. return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
  2613. }
  2614. bool
  2615. rb_vm_call_ivar_attrset_p(const vm_call_handler ch)
  2616. {
  2617. return (ch == vm_call_ivar || ch == vm_call_attrset);
  2618. }
  2619. static inline VALUE
  2620. vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
  2621. {
  2622. rb_proc_t *proc;
  2623. VALUE val;
  2624. const struct rb_callcache *cc = calling->cc;
  2625. const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
  2626. VALUE procv = cme->def->body.bmethod.proc;
  2627. if (!RB_OBJ_SHAREABLE_P(procv) &&
  2628. cme->def->body.bmethod.defined_ractor != rb_ractor_self(rb_ec_ractor_ptr(ec))) {
  2629. rb_raise(rb_eRuntimeError, "defined in a different Ractor");
  2630. }
  2631. /* control block frame */
  2632. GetProcPtr(procv, proc);
  2633. val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
  2634. return val;
  2635. }
  2636. static VALUE
  2637. vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  2638. {
  2639. RB_DEBUG_COUNTER_INC(ccf_bmethod);
  2640. VALUE *argv;
  2641. int argc;
  2642. const struct rb_callinfo *ci = calling->ci;
  2643. CALLER_SETUP_ARG(cfp, calling, ci);
  2644. argc = calling->argc;
  2645. argv = ALLOCA_N(VALUE, argc);
  2646. MEMCPY(argv, cfp->sp - argc, VALUE, argc);
  2647. cfp->sp += - argc - 1;
  2648. return vm_call_bmethod_body(ec, calling, argv);
  2649. }
  2650. MJIT_FUNC_EXPORTED VALUE
  2651. rb_find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
  2652. {
  2653. VALUE klass = current_class;
  2654. /* for prepended Module, then start from cover class */
  2655. if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
  2656. RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
  2657. klass = RBASIC_CLASS(klass);
  2658. }
  2659. while (RTEST(klass)) {
  2660. VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
  2661. if (owner == target_owner) {
  2662. return klass;
  2663. }
  2664. klass = RCLASS_SUPER(klass);
  2665. }
  2666. return current_class; /* maybe module function */
  2667. }
  2668. static const rb_callable_method_entry_t *
  2669. aliased_callable_method_entry(const rb_callable_method_entry_t *me)
  2670. {
  2671. const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
  2672. const rb_callable_method_entry_t *cme;
  2673. if (orig_me->defined_class == 0) {
  2674. VALUE defined_class = rb_find_defined_class_by_owner(me->defined_class, orig_me->owner);
  2675. VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE));
  2676. cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
  2677. if (me->def->alias_count + me->def->complemented_count == 0) {
  2678. RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
  2679. }
  2680. else {
  2681. rb_method_definition_t *def =
  2682. rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
  2683. rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
  2684. }
  2685. }
  2686. else {
  2687. cme = (const rb_callable_method_entry_t *)orig_me;
  2688. }
  2689. VM_ASSERT(callable_method_entry_p(cme));
  2690. return cme;
  2691. }
  2692. static VALUE
  2693. vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  2694. {
  2695. calling->cc = &VM_CC_ON_STACK(Qundef,
  2696. vm_call_general,
  2697. { 0 },
  2698. aliased_callable_method_entry(vm_cc_cme(calling->cc)));
  2699. return vm_call_method_each_type(ec, cfp, calling);
  2700. }
  2701. static enum method_missing_reason
  2702. ci_missing_reason(const struct rb_callinfo *ci)
  2703. {
  2704. enum method_missing_reason stat = MISSING_NOENTRY;
  2705. if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
  2706. if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
  2707. if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
  2708. return stat;
  2709. }
  2710. static VALUE
  2711. vm_call_symbol(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  2712. struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE symbol)
  2713. {
  2714. ASSUME(calling->argc >= 0);
  2715. /* Also assumes CALLER_SETUP_ARG is already done. */
  2716. enum method_missing_reason missing_reason = MISSING_NOENTRY;
  2717. int argc = calling->argc;
  2718. VALUE recv = calling->recv;
  2719. VALUE klass = CLASS_OF(recv);
  2720. ID mid = rb_check_id(&symbol);
  2721. int flags = VM_CALL_FCALL |
  2722. VM_CALL_OPT_SEND |
  2723. (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
  2724. if (UNLIKELY(! mid)) {
  2725. mid = idMethodMissing;
  2726. missing_reason = ci_missing_reason(ci);
  2727. ec->method_missing_reason = missing_reason;
  2728. /* E.g. when argc == 2
  2729. *
  2730. * | | | | TOPN
  2731. * | | +------+
  2732. * | | +---> | arg1 | 0
  2733. * +------+ | +------+
  2734. * | arg1 | -+ +-> | arg0 | 1
  2735. * +------+ | +------+
  2736. * | arg0 | ---+ | sym | 2
  2737. * +------+ +------+
  2738. * | recv | | recv | 3
  2739. * --+------+--------+------+------
  2740. */
  2741. int i = argc;
  2742. CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
  2743. INC_SP(1);
  2744. MEMMOVE(&TOPN(i - 1), &TOPN(i), VALUE, i);
  2745. argc = ++calling->argc;
  2746. if (rb_method_basic_definition_p(klass, idMethodMissing)) {
  2747. /* Inadvertent symbol creation shall be forbidden, see [Feature #5112] */
  2748. TOPN(i) = symbol;
  2749. int priv = vm_ci_flag(ci) & (VM_CALL_FCALL | VM_CALL_VCALL);
  2750. const VALUE *argv = STACK_ADDR_FROM_TOP(argc);
  2751. VALUE exc = rb_make_no_method_exception(
  2752. rb_eNoMethodError, 0, recv, argc, argv, priv);
  2753. rb_exc_raise(exc);
  2754. }
  2755. else {
  2756. TOPN(i) = rb_str_intern(symbol);
  2757. }
  2758. }
  2759. calling->ci = &VM_CI_ON_STACK(mid, flags, argc, vm_ci_kwarg(ci));
  2760. calling->cc = &VM_CC_ON_STACK(klass,
  2761. vm_call_general,
  2762. { .method_missing_reason = missing_reason },
  2763. rb_callable_method_entry_with_refinements(klass, mid, NULL));
  2764. return vm_call_method(ec, reg_cfp, calling);
  2765. }
  2766. static VALUE
  2767. vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
  2768. {
  2769. RB_DEBUG_COUNTER_INC(ccf_opt_send);
  2770. int i;
  2771. VALUE sym;
  2772. CALLER_SETUP_ARG(reg_cfp, calling, calling->ci);
  2773. i = calling->argc - 1;
  2774. if (calling->argc == 0) {
  2775. rb_raise(rb_eArgError, "no method name given");
  2776. }
  2777. else {
  2778. sym = TOPN(i);
  2779. /* E.g. when i == 2
  2780. *
  2781. * | | | | TOPN
  2782. * +------+ | |
  2783. * | arg1 | ---+ | | 0
  2784. * +------+ | +------+
  2785. * | arg0 | -+ +-> | arg1 | 1
  2786. * +------+ | +------+
  2787. * | sym | +---> | arg0 | 2
  2788. * +------+ +------+
  2789. * | recv | | recv | 3
  2790. * --+------+--------+------+------
  2791. */
  2792. /* shift arguments */
  2793. if (i > 0) {
  2794. MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
  2795. }
  2796. calling->argc -= 1;
  2797. DEC_SP(1);
  2798. return vm_call_symbol(ec, reg_cfp, calling, calling->ci, sym);
  2799. }
  2800. }
  2801. static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, bool is_lambda, VALUE block_handler);
  2802. NOINLINE(static VALUE
  2803. vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  2804. struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
  2805. static VALUE
  2806. vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  2807. struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
  2808. {
  2809. int argc = calling->argc;
  2810. /* remove self */
  2811. if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
  2812. DEC_SP(1);
  2813. return vm_invoke_block(ec, reg_cfp, calling, ci, false, block_handler);
  2814. }
  2815. static VALUE
  2816. vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
  2817. {
  2818. RB_DEBUG_COUNTER_INC(ccf_opt_call);
  2819. const struct rb_callinfo *ci = calling->ci;
  2820. VALUE procval = calling->recv;
  2821. return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
  2822. }
  2823. static VALUE
  2824. vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
  2825. {
  2826. RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
  2827. VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
  2828. const struct rb_callinfo *ci = calling->ci;
  2829. if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
  2830. return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
  2831. }
  2832. else {
  2833. calling->recv = rb_vm_bh_to_procval(ec, block_handler);
  2834. calling->cc = rb_vm_search_method_slowpath(ci, CLASS_OF(calling->recv));
  2835. return vm_call_general(ec, reg_cfp, calling);
  2836. }
  2837. }
  2838. static VALUE
  2839. vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
  2840. const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
  2841. {
  2842. RB_DEBUG_COUNTER_INC(ccf_method_missing);
  2843. VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
  2844. unsigned int argc;
  2845. CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
  2846. argc = calling->argc + 1;
  2847. unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
  2848. calling->argc = argc;
  2849. /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
  2850. CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
  2851. vm_check_canary(ec, reg_cfp->sp);
  2852. if (argc > 1) {
  2853. MEMMOVE(argv+1, argv, VALUE, argc-1);
  2854. }
  2855. argv[0] = ID2SYM(vm_ci_mid(orig_ci));
  2856. INC_SP(1);
  2857. ec->method_missing_reason = reason;
  2858. calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
  2859. calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
  2860. rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
  2861. return vm_call_method(ec, reg_cfp, calling);
  2862. }
  2863. static VALUE
  2864. vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
  2865. {
  2866. return vm_call_method_missing_body(ec, reg_cfp, calling, calling->ci, vm_cc_cmethod_missing_reason(calling->cc));
  2867. }
  2868. static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
  2869. static VALUE
  2870. vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, VALUE klass)
  2871. {
  2872. klass = RCLASS_SUPER(klass);
  2873. const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(calling->ci)) : NULL;
  2874. if (cme == NULL) {
  2875. return vm_call_method_nome(ec, cfp, calling);
  2876. }
  2877. if (cme->def->type == VM_METHOD_TYPE_REFINED &&
  2878. cme->def->body.refined.orig_me) {
  2879. cme = refined_method_callable_without_refinement(cme);
  2880. }
  2881. calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, cme);
  2882. return vm_call_method_each_type(ec, cfp, calling);
  2883. }
  2884. static inline VALUE
  2885. find_refinement(VALUE refinements, VALUE klass)
  2886. {
  2887. if (NIL_P(refinements)) {
  2888. return Qnil;
  2889. }
  2890. return rb_hash_lookup(refinements, klass);
  2891. }
  2892. PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
  2893. static rb_control_frame_t *
  2894. current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
  2895. {
  2896. rb_control_frame_t *top_cfp = cfp;
  2897. if (cfp->iseq && cfp->iseq->body->type == ISEQ_TYPE_BLOCK) {
  2898. const rb_iseq_t *local_iseq = cfp->iseq->body->local_iseq;
  2899. do {
  2900. cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  2901. if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
  2902. /* TODO: orphan block */
  2903. return top_cfp;
  2904. }
  2905. } while (cfp->iseq != local_iseq);
  2906. }
  2907. return cfp;
  2908. }
  2909. static const rb_callable_method_entry_t *
  2910. refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
  2911. {
  2912. const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
  2913. const rb_callable_method_entry_t *cme;
  2914. if (orig_me->defined_class == 0) {
  2915. cme = NULL;
  2916. rb_notimplement();
  2917. }
  2918. else {
  2919. cme = (const rb_callable_method_entry_t *)orig_me;
  2920. }
  2921. VM_ASSERT(callable_method_entry_p(cme));
  2922. if (UNDEFINED_METHOD_ENTRY_P(cme)) {
  2923. cme = NULL;
  2924. }
  2925. return cme;
  2926. }
  2927. static const rb_callable_method_entry_t *
  2928. search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  2929. {
  2930. ID mid = vm_ci_mid(calling->ci);
  2931. const rb_cref_t *cref = vm_get_cref(cfp->ep);
  2932. const struct rb_callcache * const cc = calling->cc;
  2933. const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
  2934. for (; cref; cref = CREF_NEXT(cref)) {
  2935. const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
  2936. if (NIL_P(refinement)) continue;
  2937. const rb_callable_method_entry_t *const ref_me =
  2938. rb_callable_method_entry(refinement, mid);
  2939. if (ref_me) {
  2940. if (vm_cc_call(cc) == vm_call_super_method) {
  2941. const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
  2942. const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
  2943. if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
  2944. continue;
  2945. }
  2946. }
  2947. if (cme->def->type != VM_METHOD_TYPE_REFINED ||
  2948. cme->def != ref_me->def) {
  2949. cme = ref_me;
  2950. }
  2951. if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
  2952. return cme;
  2953. }
  2954. }
  2955. else {
  2956. return NULL;
  2957. }
  2958. }
  2959. if (vm_cc_cme(cc)->def->body.refined.orig_me) {
  2960. return refined_method_callable_without_refinement(vm_cc_cme(cc));
  2961. }
  2962. else {
  2963. VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
  2964. const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
  2965. return cme;
  2966. }
  2967. }
  2968. static VALUE
  2969. vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  2970. {
  2971. struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
  2972. search_refined_method(ec, cfp, calling));
  2973. if (vm_cc_cme(ref_cc)) {
  2974. calling->cc= ref_cc;
  2975. return vm_call_method(ec, cfp, calling);
  2976. }
  2977. else {
  2978. return vm_call_method_nome(ec, cfp, calling);
  2979. }
  2980. }
  2981. #define VM_CALL_METHOD_ATTR(var, func, nohook) \
  2982. if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) { \
  2983. EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id, \
  2984. vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef); \
  2985. var = func; \
  2986. EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id, \
  2987. vm_ci_mid(ci), vm_cc_cme(cc)->owner, (var)); \
  2988. } \
  2989. else { \
  2990. nohook; \
  2991. var = func; \
  2992. }
  2993. static VALUE
  2994. vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  2995. {
  2996. const struct rb_callinfo *ci = calling->ci;
  2997. const struct rb_callcache *cc = calling->cc;
  2998. VALUE v;
  2999. switch (vm_cc_cme(cc)->def->type) {
  3000. case VM_METHOD_TYPE_ISEQ:
  3001. CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
  3002. return vm_call_iseq_setup(ec, cfp, calling);
  3003. case VM_METHOD_TYPE_NOTIMPLEMENTED:
  3004. case VM_METHOD_TYPE_CFUNC:
  3005. CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
  3006. return vm_call_cfunc(ec, cfp, calling);
  3007. case VM_METHOD_TYPE_ATTRSET:
  3008. CALLER_SETUP_ARG(cfp, calling, ci);
  3009. CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
  3010. rb_check_arity(calling->argc, 1, 1);
  3011. vm_cc_attr_index_set(cc, 0);
  3012. const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG);
  3013. VM_CALL_METHOD_ATTR(v,
  3014. vm_call_attrset(ec, cfp, calling),
  3015. CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
  3016. return v;
  3017. case VM_METHOD_TYPE_IVAR:
  3018. CALLER_SETUP_ARG(cfp, calling, ci);
  3019. CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
  3020. rb_check_arity(calling->argc, 0, 0);
  3021. vm_cc_attr_index_set(cc, 0);
  3022. const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
  3023. VM_CALL_METHOD_ATTR(v,
  3024. vm_call_ivar(ec, cfp, calling),
  3025. CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & ivar_mask)));
  3026. return v;
  3027. case VM_METHOD_TYPE_MISSING:
  3028. vm_cc_method_missing_reason_set(cc, 0);
  3029. CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
  3030. return vm_call_method_missing(ec, cfp, calling);
  3031. case VM_METHOD_TYPE_BMETHOD:
  3032. CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
  3033. return vm_call_bmethod(ec, cfp, calling);
  3034. case VM_METHOD_TYPE_ALIAS:
  3035. CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
  3036. return vm_call_alias(ec, cfp, calling);
  3037. case VM_METHOD_TYPE_OPTIMIZED:
  3038. switch (vm_cc_cme(cc)->def->body.optimize_type) {
  3039. case OPTIMIZED_METHOD_TYPE_SEND:
  3040. CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
  3041. return vm_call_opt_send(ec, cfp, calling);
  3042. case OPTIMIZED_METHOD_TYPE_CALL:
  3043. CC_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
  3044. return vm_call_opt_call(ec, cfp, calling);
  3045. case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
  3046. CC_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
  3047. return vm_call_opt_block_call(ec, cfp, calling);
  3048. default:
  3049. rb_bug("vm_call_method: unsupported optimized method type (%d)",
  3050. vm_cc_cme(cc)->def->body.optimize_type);
  3051. }
  3052. case VM_METHOD_TYPE_UNDEF:
  3053. break;
  3054. case VM_METHOD_TYPE_ZSUPER:
  3055. return vm_call_zsuper(ec, cfp, calling, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
  3056. case VM_METHOD_TYPE_REFINED:
  3057. // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
  3058. // should not set FASTPATH since vm_call_refined assumes cc->call is vm_call_super_method on invokesuper.
  3059. return vm_call_refined(ec, cfp, calling);
  3060. }
  3061. rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
  3062. }
  3063. NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
  3064. static VALUE
  3065. vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  3066. {
  3067. /* method missing */
  3068. const struct rb_callinfo *ci = calling->ci;
  3069. const int stat = ci_missing_reason(ci);
  3070. if (vm_ci_mid(ci) == idMethodMissing) {
  3071. rb_control_frame_t *reg_cfp = cfp;
  3072. VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
  3073. vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
  3074. }
  3075. else {
  3076. return vm_call_method_missing_body(ec, cfp, calling, ci, stat);
  3077. }
  3078. }
  3079. static inline VALUE
  3080. vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
  3081. {
  3082. const struct rb_callinfo *ci = calling->ci;
  3083. const struct rb_callcache *cc = calling->cc;
  3084. VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
  3085. if (vm_cc_cme(cc) != NULL) {
  3086. switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
  3087. case METHOD_VISI_PUBLIC: /* likely */
  3088. return vm_call_method_each_type(ec, cfp, calling);
  3089. case METHOD_VISI_PRIVATE:
  3090. if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
  3091. enum method_missing_reason stat = MISSING_PRIVATE;
  3092. if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
  3093. vm_cc_method_missing_reason_set(cc, stat);
  3094. CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
  3095. return vm_call_method_missing(ec, cfp, calling);
  3096. }
  3097. return vm_call_method_each_type(ec, cfp, calling);
  3098. case METHOD_VISI_PROTECTED:
  3099. if (!(vm_ci_flag(ci) & VM_CALL_OPT_SEND)) {
  3100. if (!rb_obj_is_kind_of(cfp->self, vm_cc_cme(cc)->defined_class)) {
  3101. vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
  3102. return vm_call_method_missing(ec, cfp, calling);
  3103. }
  3104. else {
  3105. /* caching method info to dummy cc */
  3106. VM_ASSERT(vm_cc_cme(cc) != NULL);
  3107. struct rb_callcache cc_on_stack = *cc;
  3108. FL_SET_RAW((VALUE)&cc_on_stack, VM_CALLCACHE_UNMARKABLE);
  3109. calling->cc = &cc_on_stack;
  3110. return vm_call_method_each_type(ec, cfp, calling);
  3111. }
  3112. }
  3113. return vm_call_method_each_type(ec, cfp, calling);
  3114. default:
  3115. rb_bug("unreachable");
  3116. }
  3117. }
  3118. else {
  3119. return vm_call_method_nome(ec, cfp, calling);
  3120. }
  3121. }
  3122. static VALUE
  3123. vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
  3124. {
  3125. RB_DEBUG_COUNTER_INC(ccf_general);
  3126. return vm_call_method(ec, reg_cfp, calling);
  3127. }
  3128. void
  3129. rb_vm_cc_general(const struct rb_callcache *cc)
  3130. {
  3131. VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
  3132. VM_ASSERT(cc != vm_cc_empty());
  3133. *(vm_call_handler *)&cc->call_ = vm_call_general;
  3134. }
  3135. static VALUE
  3136. vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
  3137. {
  3138. RB_DEBUG_COUNTER_INC(ccf_super_method);
  3139. /* this check is required to distinguish with other functions. */
  3140. const struct rb_callcache *cc = calling->cc;
  3141. if (vm_cc_call(cc) != vm_call_super_method) rb_bug("bug");
  3142. return vm_call_method(ec, reg_cfp, calling);
  3143. }
  3144. /* super */
  3145. static inline VALUE
  3146. vm_search_normal_superclass(VALUE klass)
  3147. {
  3148. if (BUILTIN_TYPE(klass) == T_ICLASS &&
  3149. FL_TEST_RAW(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
  3150. klass = RBASIC(klass)->klass;
  3151. }
  3152. klass = RCLASS_ORIGIN(klass);
  3153. return RCLASS_SUPER(klass);
  3154. }
  3155. NORETURN(static void vm_super_outside(void));
  3156. static void
  3157. vm_super_outside(void)
  3158. {
  3159. rb_raise(rb_eNoMethodError, "super called outside of method");
  3160. }
  3161. static const struct rb_callcache *
  3162. vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *cd, VALUE recv)
  3163. {
  3164. VALUE current_defined_class;
  3165. const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
  3166. if (!me) {
  3167. vm_super_outside();
  3168. }
  3169. current_defined_class = me->defined_class;
  3170. if (!NIL_P(RCLASS_REFINED_CLASS(current_defined_class))) {
  3171. current_defined_class = RCLASS_REFINED_CLASS(current_defined_class);
  3172. }
  3173. if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
  3174. !FL_TEST_RAW(current_defined_class, RMODULE_INCLUDED_INTO_REFINEMENT) &&
  3175. reg_cfp->iseq != method_entry_iseqptr(me) &&
  3176. !rb_obj_is_kind_of(recv, current_defined_class)) {
  3177. VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
  3178. RCLASS_INCLUDER(current_defined_class) : current_defined_class;
  3179. if (m) { /* not bound UnboundMethod */
  3180. rb_raise(rb_eTypeError,
  3181. "self has wrong type to call super in this context: "
  3182. "%"PRIsVALUE" (expected %"PRIsVALUE")",
  3183. rb_obj_class(recv), m);
  3184. }
  3185. }
  3186. if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
  3187. rb_raise(rb_eRuntimeError,
  3188. "implicit argument passing of super from method defined"
  3189. " by define_method() is not supported."
  3190. " Specify all arguments explicitly.");
  3191. }
  3192. ID mid = me->def->original_id;
  3193. // update iseq. really? (TODO)
  3194. cd->ci = vm_ci_new_runtime(mid,
  3195. vm_ci_flag(cd->ci),
  3196. vm_ci_argc(cd->ci),
  3197. vm_ci_kwarg(cd->ci));
  3198. RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
  3199. const struct rb_callcache *cc;
  3200. VALUE klass = vm_search_normal_superclass(me->defined_class);
  3201. if (!klass) {
  3202. /* bound instance method of module */
  3203. cc = vm_cc_new(klass, NULL, vm_call_method_missing);
  3204. RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
  3205. }
  3206. else {
  3207. cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
  3208. const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cc);
  3209. // define_method can cache for different method id
  3210. if (cached_cme == NULL) {
  3211. // temporary CC. revisit it
  3212. static const struct rb_callcache *empty_cc_for_super = NULL;
  3213. if (empty_cc_for_super == NULL) {
  3214. empty_cc_for_super = vm_cc_new(0, NULL, vm_call_super_method);
  3215. FL_SET_RAW((VALUE)empty_cc_for_super, VM_CALLCACHE_UNMARKABLE);
  3216. rb_gc_register_mark_object((VALUE)empty_cc_for_super);
  3217. }
  3218. RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc = empty_cc_for_super);
  3219. }
  3220. else if (cached_cme->called_id != mid) {
  3221. const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
  3222. cc = vm_cc_new(klass, cme, vm_call_super_method);
  3223. RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
  3224. }
  3225. else {
  3226. switch (cached_cme->def->type) {
  3227. // vm_call_refined (search_refined_method) assumes cc->call is vm_call_super_method on invokesuper
  3228. case VM_METHOD_TYPE_REFINED:
  3229. // cc->klass is superclass of receiver class. Checking cc->klass is not enough to invalidate IVC for the receiver class.
  3230. case VM_METHOD_TYPE_ATTRSET:
  3231. case VM_METHOD_TYPE_IVAR:
  3232. vm_cc_call_set(cc, vm_call_super_method); // invalidate fastpath
  3233. break;
  3234. default:
  3235. break; // use fastpath
  3236. }
  3237. }
  3238. }
  3239. return cc;
  3240. }
  3241. /* yield */
  3242. static inline int
  3243. block_proc_is_lambda(const VALUE procval)
  3244. {
  3245. rb_proc_t *proc;
  3246. if (procval) {
  3247. GetProcPtr(procval, proc);
  3248. return proc->is_lambda;
  3249. }
  3250. else {
  3251. return 0;
  3252. }
  3253. }
  3254. static VALUE
  3255. vm_yield_with_cfunc(rb_execution_context_t *ec,
  3256. const struct rb_captured_block *captured,
  3257. VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
  3258. const rb_callable_method_entry_t *me)
  3259. {
  3260. int is_lambda = FALSE; /* TODO */
  3261. VALUE val, arg, blockarg;
  3262. int frame_flag;
  3263. const struct vm_ifunc *ifunc = captured->code.ifunc;
  3264. if (is_lambda) {
  3265. arg = rb_ary_new4(argc, argv);
  3266. }
  3267. else if (argc == 0) {
  3268. arg = Qnil;
  3269. }
  3270. else {
  3271. arg = argv[0];
  3272. }
  3273. blockarg = rb_vm_bh_to_procval(ec, block_handler);
  3274. frame_flag = VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME | (me ? VM_FRAME_FLAG_BMETHOD : 0);
  3275. if (kw_splat) {
  3276. frame_flag |= VM_FRAME_FLAG_CFRAME_KW;
  3277. }
  3278. vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
  3279. frame_flag,
  3280. self,
  3281. VM_GUARDED_PREV_EP(captured->ep),
  3282. (VALUE)me,
  3283. 0, ec->cfp->sp, 0, 0);
  3284. val = (*ifunc->func)(arg, (VALUE)ifunc->data, argc, argv, blockarg);
  3285. rb_vm_pop_frame(ec);
  3286. return val;
  3287. }
  3288. static VALUE
  3289. vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
  3290. {
  3291. return rb_sym_proc_call(SYM2ID(symbol), argc, argv, kw_splat, rb_vm_bh_to_procval(ec, block_handler));
  3292. }
  3293. static inline int
  3294. vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
  3295. {
  3296. int i;
  3297. long len = RARRAY_LEN(ary);
  3298. CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
  3299. for (i=0; i<len && i<iseq->body->param.lead_num; i++) {
  3300. argv[i] = RARRAY_AREF(ary, i);
  3301. }
  3302. return i;
  3303. }
  3304. static inline VALUE
  3305. vm_callee_setup_block_arg_arg0_check(VALUE *argv)
  3306. {
  3307. VALUE ary, arg0 = argv[0];
  3308. ary = rb_check_array_type(arg0);
  3309. #if 0
  3310. argv[0] = arg0;
  3311. #else
  3312. VM_ASSERT(argv[0] == arg0);
  3313. #endif
  3314. return ary;
  3315. }
  3316. static int
  3317. vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
  3318. {
  3319. if (rb_simple_iseq_p(iseq)) {
  3320. rb_control_frame_t *cfp = ec->cfp;
  3321. VALUE arg0;
  3322. CALLER_SETUP_ARG(cfp, calling, ci);
  3323. CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
  3324. if (arg_setup_type == arg_setup_block &&
  3325. calling->argc == 1 &&
  3326. iseq->body->param.flags.has_lead &&
  3327. !iseq->body->param.flags.ambiguous_param0 &&
  3328. !NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
  3329. calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
  3330. }
  3331. if (calling->argc != iseq->body->param.lead_num) {
  3332. if (arg_setup_type == arg_setup_block) {
  3333. if (calling->argc < iseq->body->param.lead_num) {
  3334. int i;
  3335. CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
  3336. for (i=calling->argc; i<iseq->body->param.lead_num; i++) argv[i] = Qnil;
  3337. calling->argc = iseq->body->param.lead_num; /* fill rest parameters */
  3338. }
  3339. else if (calling->argc > iseq->body->param.lead_num) {
  3340. calling->argc = iseq->body->param.lead_num; /* simply truncate arguments */
  3341. }
  3342. }
  3343. else {
  3344. argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
  3345. }
  3346. }
  3347. return 0;
  3348. }
  3349. else {
  3350. return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
  3351. }
  3352. }
  3353. static int
  3354. vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int kw_splat, VALUE block_handler, enum arg_setup_type arg_setup_type)
  3355. {
  3356. struct rb_calling_info calling_entry, *calling;
  3357. calling = &calling_entry;
  3358. calling->argc = argc;
  3359. calling->block_handler = block_handler;
  3360. calling->kw_splat = kw_splat;
  3361. calling->recv = Qundef;
  3362. struct rb_callinfo dummy_ci = VM_CI_ON_STACK(0, (kw_splat ? VM_CALL_KW_SPLAT : 0), 0, 0);
  3363. return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
  3364. }
  3365. /* ruby iseq -> ruby block */
  3366. static VALUE
  3367. vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  3368. struct rb_calling_info *calling, const struct rb_callinfo *ci,
  3369. bool is_lambda, VALUE block_handler)
  3370. {
  3371. const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
  3372. const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
  3373. const int arg_size = iseq->body->param.size;
  3374. VALUE * const rsp = GET_SP() - calling->argc;
  3375. int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, rsp, is_lambda ? arg_setup_method : arg_setup_block);
  3376. SET_SP(rsp);
  3377. vm_push_frame(ec, iseq,
  3378. VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
  3379. captured->self,
  3380. VM_GUARDED_PREV_EP(captured->ep), 0,
  3381. iseq->body->iseq_encoded + opt_pc,
  3382. rsp + arg_size,
  3383. iseq->body->local_table_size - arg_size, iseq->body->stack_max);
  3384. return Qundef;
  3385. }
  3386. static VALUE
  3387. vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  3388. struct rb_calling_info *calling, const struct rb_callinfo *ci,
  3389. MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
  3390. {
  3391. if (calling->argc < 1) {
  3392. rb_raise(rb_eArgError, "no receiver given");
  3393. }
  3394. else {
  3395. VALUE symbol = VM_BH_TO_SYMBOL(block_handler);
  3396. CALLER_SETUP_ARG(reg_cfp, calling, ci);
  3397. calling->recv = TOPN(--calling->argc);
  3398. return vm_call_symbol(ec, reg_cfp, calling, ci, symbol);
  3399. }
  3400. }
  3401. static VALUE
  3402. vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  3403. struct rb_calling_info *calling, const struct rb_callinfo *ci,
  3404. MAYBE_UNUSED(bool is_lambda), VALUE block_handler)
  3405. {
  3406. VALUE val;
  3407. int argc;
  3408. const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
  3409. CALLER_SETUP_ARG(ec->cfp, calling, ci);
  3410. CALLER_REMOVE_EMPTY_KW_SPLAT(ec->cfp, calling, ci);
  3411. argc = calling->argc;
  3412. val = vm_yield_with_cfunc(ec, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL);
  3413. POPN(argc); /* TODO: should put before C/yield? */
  3414. return val;
  3415. }
  3416. static VALUE
  3417. vm_proc_to_block_handler(VALUE procval)
  3418. {
  3419. const struct rb_block *block = vm_proc_block(procval);
  3420. switch (vm_block_type(block)) {
  3421. case block_type_iseq:
  3422. return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
  3423. case block_type_ifunc:
  3424. return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
  3425. case block_type_symbol:
  3426. return VM_BH_FROM_SYMBOL(block->as.symbol);
  3427. case block_type_proc:
  3428. return VM_BH_FROM_PROC(block->as.proc);
  3429. }
  3430. VM_UNREACHABLE(vm_yield_with_proc);
  3431. return Qundef;
  3432. }
  3433. static VALUE
  3434. vm_invoke_proc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  3435. struct rb_calling_info *calling, const struct rb_callinfo *ci,
  3436. bool is_lambda, VALUE block_handler)
  3437. {
  3438. while (vm_block_handler_type(block_handler) == block_handler_type_proc) {
  3439. VALUE proc = VM_BH_TO_PROC(block_handler);
  3440. is_lambda = block_proc_is_lambda(proc);
  3441. block_handler = vm_proc_to_block_handler(proc);
  3442. }
  3443. return vm_invoke_block(ec, reg_cfp, calling, ci, is_lambda, block_handler);
  3444. }
  3445. static inline VALUE
  3446. vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  3447. struct rb_calling_info *calling, const struct rb_callinfo *ci,
  3448. bool is_lambda, VALUE block_handler)
  3449. {
  3450. VALUE (*func)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  3451. struct rb_calling_info *calling, const struct rb_callinfo *ci,
  3452. bool is_lambda, VALUE block_handler);
  3453. switch (vm_block_handler_type(block_handler)) {
  3454. case block_handler_type_iseq: func = vm_invoke_iseq_block; break;
  3455. case block_handler_type_ifunc: func = vm_invoke_ifunc_block; break;
  3456. case block_handler_type_proc: func = vm_invoke_proc_block; break;
  3457. case block_handler_type_symbol: func = vm_invoke_symbol_block; break;
  3458. default: rb_bug("vm_invoke_block: unreachable");
  3459. }
  3460. return func(ec, reg_cfp, calling, ci, is_lambda, block_handler);
  3461. }
  3462. static VALUE
  3463. vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
  3464. {
  3465. const rb_execution_context_t *ec = GET_EC();
  3466. const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
  3467. struct rb_captured_block *captured;
  3468. if (cfp == 0) {
  3469. rb_bug("vm_make_proc_with_iseq: unreachable");
  3470. }
  3471. captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
  3472. captured->code.iseq = blockiseq;
  3473. return rb_vm_make_proc(ec, captured, rb_cProc);
  3474. }
  3475. static VALUE
  3476. vm_once_exec(VALUE iseq)
  3477. {
  3478. VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
  3479. return rb_proc_call_with_block(proc, 0, 0, Qnil);
  3480. }
  3481. static VALUE
  3482. vm_once_clear(VALUE data)
  3483. {
  3484. union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
  3485. is->once.running_thread = NULL;
  3486. return Qnil;
  3487. }
  3488. /* defined insn */
  3489. static bool
  3490. check_respond_to_missing(VALUE obj, VALUE v)
  3491. {
  3492. VALUE args[2];
  3493. VALUE r;
  3494. args[0] = obj; args[1] = Qfalse;
  3495. r = rb_check_funcall(v, idRespond_to_missing, 2, args);
  3496. if (r != Qundef && RTEST(r)) {
  3497. return true;
  3498. }
  3499. else {
  3500. return false;
  3501. }
  3502. }
  3503. static bool
  3504. vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v)
  3505. {
  3506. VALUE klass;
  3507. enum defined_type type = (enum defined_type)op_type;
  3508. switch (type) {
  3509. case DEFINED_IVAR:
  3510. return rb_ivar_defined(GET_SELF(), SYM2ID(obj));
  3511. break;
  3512. case DEFINED_GVAR:
  3513. return rb_gvar_defined(SYM2ID(obj));
  3514. break;
  3515. case DEFINED_CVAR: {
  3516. const rb_cref_t *cref = vm_get_cref(GET_EP());
  3517. klass = vm_get_cvar_base(cref, GET_CFP(), 0);
  3518. return rb_cvar_defined(klass, SYM2ID(obj));
  3519. break;
  3520. }
  3521. case DEFINED_CONST:
  3522. case DEFINED_CONST_FROM: {
  3523. bool allow_nil = type == DEFINED_CONST;
  3524. klass = v;
  3525. return vm_get_ev_const(ec, klass, SYM2ID(obj), allow_nil, true);
  3526. break;
  3527. }
  3528. case DEFINED_FUNC:
  3529. klass = CLASS_OF(v);
  3530. return rb_ec_obj_respond_to(ec, v, SYM2ID(obj), TRUE);
  3531. break;
  3532. case DEFINED_METHOD:{
  3533. VALUE klass = CLASS_OF(v);
  3534. const rb_method_entry_t *me = rb_method_entry_with_refinements(klass, SYM2ID(obj), NULL);
  3535. if (me) {
  3536. switch (METHOD_ENTRY_VISI(me)) {
  3537. case METHOD_VISI_PRIVATE:
  3538. break;
  3539. case METHOD_VISI_PROTECTED:
  3540. if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(me->defined_class))) {
  3541. break;
  3542. }
  3543. case METHOD_VISI_PUBLIC:
  3544. return true;
  3545. break;
  3546. default:
  3547. rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
  3548. }
  3549. }
  3550. else {
  3551. return check_respond_to_missing(obj, v);
  3552. }
  3553. break;
  3554. }
  3555. case DEFINED_YIELD:
  3556. if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
  3557. return true;
  3558. }
  3559. break;
  3560. case DEFINED_ZSUPER:
  3561. {
  3562. const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
  3563. if (me) {
  3564. VALUE klass = vm_search_normal_superclass(me->defined_class);
  3565. ID id = me->def->original_id;
  3566. return rb_method_boundp(klass, id, 0);
  3567. }
  3568. }
  3569. break;
  3570. case DEFINED_REF:{
  3571. return vm_getspecial(ec, GET_LEP(), Qfalse, FIX2INT(obj)) != Qnil;
  3572. break;
  3573. }
  3574. default:
  3575. rb_bug("unimplemented defined? type (VM)");
  3576. break;
  3577. }
  3578. return false;
  3579. }
  3580. static const VALUE *
  3581. vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
  3582. {
  3583. rb_num_t i;
  3584. const VALUE *ep = reg_ep;
  3585. for (i = 0; i < lv; i++) {
  3586. ep = GET_PREV_EP(ep);
  3587. }
  3588. return ep;
  3589. }
  3590. static VALUE
  3591. vm_get_special_object(const VALUE *const reg_ep,
  3592. enum vm_special_object_type type)
  3593. {
  3594. switch (type) {
  3595. case VM_SPECIAL_OBJECT_VMCORE:
  3596. return rb_mRubyVMFrozenCore;
  3597. case VM_SPECIAL_OBJECT_CBASE:
  3598. return vm_get_cbase(reg_ep);
  3599. case VM_SPECIAL_OBJECT_CONST_BASE:
  3600. return vm_get_const_base(reg_ep);
  3601. default:
  3602. rb_bug("putspecialobject insn: unknown value_type %d", type);
  3603. }
  3604. }
  3605. static VALUE
  3606. vm_concat_array(VALUE ary1, VALUE ary2st)
  3607. {
  3608. const VALUE ary2 = ary2st;
  3609. VALUE tmp1 = rb_check_to_array(ary1);
  3610. VALUE tmp2 = rb_check_to_array(ary2);
  3611. if (NIL_P(tmp1)) {
  3612. tmp1 = rb_ary_new3(1, ary1);
  3613. }
  3614. if (NIL_P(tmp2)) {
  3615. tmp2 = rb_ary_new3(1, ary2);
  3616. }
  3617. if (tmp1 == ary1) {
  3618. tmp1 = rb_ary_dup(ary1);
  3619. }
  3620. return rb_ary_concat(tmp1, tmp2);
  3621. }
  3622. static VALUE
  3623. vm_splat_array(VALUE flag, VALUE ary)
  3624. {
  3625. VALUE tmp = rb_check_to_array(ary);
  3626. if (NIL_P(tmp)) {
  3627. return rb_ary_new3(1, ary);
  3628. }
  3629. else if (RTEST(flag)) {
  3630. return rb_ary_dup(tmp);
  3631. }
  3632. else {
  3633. return tmp;
  3634. }
  3635. }
  3636. static VALUE
  3637. vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
  3638. {
  3639. enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
  3640. if (flag & VM_CHECKMATCH_ARRAY) {
  3641. long i;
  3642. const long n = RARRAY_LEN(pattern);
  3643. for (i = 0; i < n; i++) {
  3644. VALUE v = RARRAY_AREF(pattern, i);
  3645. VALUE c = check_match(ec, v, target, type);
  3646. if (RTEST(c)) {
  3647. return c;
  3648. }
  3649. }
  3650. return Qfalse;
  3651. }
  3652. else {
  3653. return check_match(ec, pattern, target, type);
  3654. }
  3655. }
  3656. static VALUE
  3657. vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
  3658. {
  3659. const VALUE kw_bits = *(ep - bits);
  3660. if (FIXNUM_P(kw_bits)) {
  3661. unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
  3662. if ((idx < KW_SPECIFIED_BITS_MAX) && (b & (0x01 << idx)))
  3663. return Qfalse;
  3664. }
  3665. else {
  3666. VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
  3667. if (rb_hash_has_key(kw_bits, INT2FIX(idx))) return Qfalse;
  3668. }
  3669. return Qtrue;
  3670. }
  3671. static void
  3672. vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
  3673. {
  3674. if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
  3675. RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
  3676. RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
  3677. RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
  3678. switch (flag) {
  3679. case RUBY_EVENT_CALL:
  3680. RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
  3681. return;
  3682. case RUBY_EVENT_C_CALL:
  3683. RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
  3684. return;
  3685. case RUBY_EVENT_RETURN:
  3686. RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
  3687. return;
  3688. case RUBY_EVENT_C_RETURN:
  3689. RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
  3690. return;
  3691. }
  3692. }
  3693. }
  3694. static VALUE
  3695. vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
  3696. {
  3697. VALUE ns;
  3698. if ((ns = vm_search_const_defined_class(cbase, id)) == 0) {
  3699. return ns;
  3700. }
  3701. else if (VM_DEFINECLASS_SCOPED_P(flags)) {
  3702. return rb_public_const_get_at(ns, id);
  3703. }
  3704. else {
  3705. return rb_const_get_at(ns, id);
  3706. }
  3707. }
  3708. static VALUE
  3709. vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
  3710. {
  3711. if (!RB_TYPE_P(klass, T_CLASS)) {
  3712. return 0;
  3713. }
  3714. else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
  3715. VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
  3716. if (tmp != super) {
  3717. rb_raise(rb_eTypeError,
  3718. "superclass mismatch for class %"PRIsVALUE"",
  3719. rb_id2str(id));
  3720. }
  3721. else {
  3722. return klass;
  3723. }
  3724. }
  3725. else {
  3726. return klass;
  3727. }
  3728. }
  3729. static VALUE
  3730. vm_check_if_module(ID id, VALUE mod)
  3731. {
  3732. if (!RB_TYPE_P(mod, T_MODULE)) {
  3733. return 0;
  3734. }
  3735. else {
  3736. return mod;
  3737. }
  3738. }
  3739. static VALUE
  3740. declare_under(ID id, VALUE cbase, VALUE c)
  3741. {
  3742. rb_set_class_path_string(c, cbase, rb_id2str(id));
  3743. rb_const_set(cbase, id, c);
  3744. return c;
  3745. }
  3746. static VALUE
  3747. vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
  3748. {
  3749. /* new class declaration */
  3750. VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
  3751. VALUE c = declare_under(id, cbase, rb_define_class_id(id, s));
  3752. rb_define_alloc_func(c, rb_get_alloc_func(c));
  3753. rb_class_inherited(s, c);
  3754. return c;
  3755. }
  3756. static VALUE
  3757. vm_declare_module(ID id, VALUE cbase)
  3758. {
  3759. /* new module declaration */
  3760. return declare_under(id, cbase, rb_module_new());
  3761. }
  3762. NORETURN(static void unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old));
  3763. static void
  3764. unmatched_redefinition(const char *type, VALUE cbase, ID id, VALUE old)
  3765. {
  3766. VALUE name = rb_id2str(id);
  3767. VALUE message = rb_sprintf("%"PRIsVALUE" is not a %s",
  3768. name, type);
  3769. VALUE location = rb_const_source_location_at(cbase, id);
  3770. if (!NIL_P(location)) {
  3771. rb_str_catf(message, "\n%"PRIsVALUE":%"PRIsVALUE":"
  3772. " previous definition of %"PRIsVALUE" was here",
  3773. rb_ary_entry(location, 0), rb_ary_entry(location, 1), name);
  3774. }
  3775. rb_exc_raise(rb_exc_new_str(rb_eTypeError, message));
  3776. }
  3777. static VALUE
  3778. vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
  3779. {
  3780. VALUE klass;
  3781. if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
  3782. rb_raise(rb_eTypeError,
  3783. "superclass must be an instance of Class (given an instance of %"PRIsVALUE")",
  3784. rb_obj_class(super));
  3785. }
  3786. vm_check_if_namespace(cbase);
  3787. /* find klass */
  3788. rb_autoload_load(cbase, id);
  3789. if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
  3790. if (!vm_check_if_class(id, flags, super, klass))
  3791. unmatched_redefinition("class", cbase, id, klass);
  3792. return klass;
  3793. }
  3794. else {
  3795. return vm_declare_class(id, flags, cbase, super);
  3796. }
  3797. }
  3798. static VALUE
  3799. vm_define_module(ID id, rb_num_t flags, VALUE cbase)
  3800. {
  3801. VALUE mod;
  3802. vm_check_if_namespace(cbase);
  3803. if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
  3804. if (!vm_check_if_module(id, mod))
  3805. unmatched_redefinition("module", cbase, id, mod);
  3806. return mod;
  3807. }
  3808. else {
  3809. return vm_declare_module(id, cbase);
  3810. }
  3811. }
  3812. static VALUE
  3813. vm_find_or_create_class_by_id(ID id,
  3814. rb_num_t flags,
  3815. VALUE cbase,
  3816. VALUE super)
  3817. {
  3818. rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
  3819. switch (type) {
  3820. case VM_DEFINECLASS_TYPE_CLASS:
  3821. /* classdef returns class scope value */
  3822. return vm_define_class(id, flags, cbase, super);
  3823. case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
  3824. /* classdef returns class scope value */
  3825. return rb_singleton_class(cbase);
  3826. case VM_DEFINECLASS_TYPE_MODULE:
  3827. /* classdef returns class scope value */
  3828. return vm_define_module(id, flags, cbase);
  3829. default:
  3830. rb_bug("unknown defineclass type: %d", (int)type);
  3831. }
  3832. }
  3833. static rb_method_visibility_t
  3834. vm_scope_visibility_get(const rb_execution_context_t *ec)
  3835. {
  3836. const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
  3837. if (!vm_env_cref_by_cref(cfp->ep)) {
  3838. return METHOD_VISI_PUBLIC;
  3839. }
  3840. else {
  3841. return CREF_SCOPE_VISI(vm_ec_cref(ec))->method_visi;
  3842. }
  3843. }
  3844. static int
  3845. vm_scope_module_func_check(const rb_execution_context_t *ec)
  3846. {
  3847. const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
  3848. if (!vm_env_cref_by_cref(cfp->ep)) {
  3849. return FALSE;
  3850. }
  3851. else {
  3852. return CREF_SCOPE_VISI(vm_ec_cref(ec))->module_func;
  3853. }
  3854. }
  3855. static void
  3856. vm_define_method(const rb_execution_context_t *ec, VALUE obj, ID id, VALUE iseqval, int is_singleton)
  3857. {
  3858. VALUE klass;
  3859. rb_method_visibility_t visi;
  3860. rb_cref_t *cref = vm_ec_cref(ec);
  3861. if (!is_singleton) {
  3862. klass = CREF_CLASS(cref);
  3863. visi = vm_scope_visibility_get(ec);
  3864. }
  3865. else { /* singleton */
  3866. klass = rb_singleton_class(obj); /* class and frozen checked in this API */
  3867. visi = METHOD_VISI_PUBLIC;
  3868. }
  3869. if (NIL_P(klass)) {
  3870. rb_raise(rb_eTypeError, "no class/module to add method");
  3871. }
  3872. rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
  3873. if (!is_singleton && vm_scope_module_func_check(ec)) {
  3874. klass = rb_singleton_class(klass);
  3875. rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
  3876. }
  3877. }
  3878. static VALUE
  3879. vm_invokeblock_i(struct rb_execution_context_struct *ec,
  3880. struct rb_control_frame_struct *reg_cfp,
  3881. struct rb_calling_info *calling)
  3882. {
  3883. const struct rb_callinfo *ci = calling->ci;
  3884. VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
  3885. if (block_handler == VM_BLOCK_HANDLER_NONE) {
  3886. rb_vm_localjump_error("no block given (yield)", Qnil, 0);
  3887. }
  3888. else {
  3889. return vm_invoke_block(ec, GET_CFP(), calling, ci, false, block_handler);
  3890. }
  3891. }
  3892. #ifdef MJIT_HEADER
  3893. static const struct rb_callcache *
  3894. vm_search_method_wrap(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
  3895. {
  3896. return vm_search_method((VALUE)reg_cfp->iseq, cd, recv);
  3897. }
  3898. static const struct rb_callcache *
  3899. vm_search_invokeblock(const struct rb_control_frame_struct *reg_cfp, struct rb_call_data *cd, VALUE recv)
  3900. {
  3901. static const struct rb_callcache cc = {
  3902. .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
  3903. .klass = 0,
  3904. .cme_ = 0,
  3905. .call_ = vm_invokeblock_i,
  3906. .aux_ = {0},
  3907. };
  3908. return &cc;
  3909. }
  3910. # define mexp_search_method vm_search_method_wrap
  3911. # define mexp_search_super vm_search_super_method
  3912. # define mexp_search_invokeblock vm_search_invokeblock
  3913. #else
  3914. enum method_explorer_type {
  3915. mexp_search_method,
  3916. mexp_search_invokeblock,
  3917. mexp_search_super,
  3918. };
  3919. #endif
  3920. static
  3921. #ifndef MJIT_HEADER
  3922. inline
  3923. #endif
  3924. VALUE
  3925. vm_sendish(
  3926. struct rb_execution_context_struct *ec,
  3927. struct rb_control_frame_struct *reg_cfp,
  3928. struct rb_call_data *cd,
  3929. VALUE block_handler,
  3930. #ifdef MJIT_HEADER
  3931. const struct rb_callcache *(*method_explorer)(const struct rb_control_frame_struct *cfp, struct rb_call_data *cd, VALUE recv)
  3932. #else
  3933. enum method_explorer_type method_explorer
  3934. #endif
  3935. ) {
  3936. VALUE val = Qundef;
  3937. const struct rb_callinfo *ci = cd->ci;
  3938. const struct rb_callcache *cc;
  3939. int argc = vm_ci_argc(ci);
  3940. VALUE recv = TOPN(argc);
  3941. struct rb_calling_info calling = {
  3942. .block_handler = block_handler,
  3943. .kw_splat = IS_ARGS_KW_SPLAT(ci) > 0,
  3944. .recv = recv,
  3945. .argc = argc,
  3946. .ci = ci,
  3947. };
  3948. // The enum-based branch and inlining are faster in VM, but function pointers without inlining are faster in JIT.
  3949. #ifdef MJIT_HEADER
  3950. calling.cc = cc = method_explorer(GET_CFP(), cd, recv);
  3951. val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
  3952. #else
  3953. switch (method_explorer) {
  3954. case mexp_search_method:
  3955. calling.cc = cc = vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, CLASS_OF(recv));
  3956. val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
  3957. break;
  3958. case mexp_search_super:
  3959. calling.cc = cc = vm_search_super_method(reg_cfp, cd, recv);
  3960. calling.ci = cd->ci; // TODO: does it safe?
  3961. val = vm_cc_call(cc)(ec, GET_CFP(), &calling);
  3962. break;
  3963. case mexp_search_invokeblock:
  3964. val = vm_invokeblock_i(ec, GET_CFP(), &calling);
  3965. break;
  3966. }
  3967. #endif
  3968. if (val != Qundef) {
  3969. return val; /* CFUNC normal return */
  3970. }
  3971. else {
  3972. RESTORE_REGS(); /* CFP pushed in cc->call() */
  3973. }
  3974. #ifdef MJIT_HEADER
  3975. /* When calling ISeq which may catch an exception from JIT-ed
  3976. code, we should not call mjit_exec directly to prevent the
  3977. caller frame from being canceled. That's because the caller
  3978. frame may have stack values in the local variables and the
  3979. cancelling the caller frame will purge them. But directly
  3980. calling mjit_exec is faster... */
  3981. if (GET_ISEQ()->body->catch_except_p) {
  3982. VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
  3983. return vm_exec(ec, true);
  3984. }
  3985. else if ((val = mjit_exec(ec)) == Qundef) {
  3986. VM_ENV_FLAGS_SET(GET_EP(), VM_FRAME_FLAG_FINISH);
  3987. return vm_exec(ec, false);
  3988. }
  3989. else {
  3990. return val;
  3991. }
  3992. #else
  3993. /* When calling from VM, longjmp in the callee won't purge any
  3994. JIT-ed caller frames. So it's safe to directly call
  3995. mjit_exec. */
  3996. return mjit_exec(ec);
  3997. #endif
  3998. }
  3999. static VALUE
  4000. vm_opt_str_freeze(VALUE str, int bop, ID id)
  4001. {
  4002. if (BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
  4003. return str;
  4004. }
  4005. else {
  4006. return Qundef;
  4007. }
  4008. }
  4009. /* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
  4010. #define id_cmp idCmp
  4011. static VALUE
  4012. vm_opt_newarray_max(rb_num_t num, const VALUE *ptr)
  4013. {
  4014. if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
  4015. if (num == 0) {
  4016. return Qnil;
  4017. }
  4018. else {
  4019. struct cmp_opt_data cmp_opt = { 0, 0 };
  4020. VALUE result = *ptr;
  4021. rb_snum_t i = num - 1;
  4022. while (i-- > 0) {
  4023. const VALUE v = *++ptr;
  4024. if (OPTIMIZED_CMP(v, result, cmp_opt) > 0) {
  4025. result = v;
  4026. }
  4027. }
  4028. return result;
  4029. }
  4030. }
  4031. else {
  4032. VALUE ary = rb_ary_new4(num, ptr);
  4033. return rb_funcall(ary, idMax, 0);
  4034. }
  4035. }
  4036. static VALUE
  4037. vm_opt_newarray_min(rb_num_t num, const VALUE *ptr)
  4038. {
  4039. if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
  4040. if (num == 0) {
  4041. return Qnil;
  4042. }
  4043. else {
  4044. struct cmp_opt_data cmp_opt = { 0, 0 };
  4045. VALUE result = *ptr;
  4046. rb_snum_t i = num - 1;
  4047. while (i-- > 0) {
  4048. const VALUE v = *++ptr;
  4049. if (OPTIMIZED_CMP(v, result, cmp_opt) < 0) {
  4050. result = v;
  4051. }
  4052. }
  4053. return result;
  4054. }
  4055. }
  4056. else {
  4057. VALUE ary = rb_ary_new4(num, ptr);
  4058. return rb_funcall(ary, idMin, 0);
  4059. }
  4060. }
  4061. #undef id_cmp
  4062. #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
  4063. // For MJIT inlining
  4064. static inline bool
  4065. vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, rb_serial_t ic_serial, const VALUE *reg_ep)
  4066. {
  4067. if (ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
  4068. ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p())) {
  4069. VM_ASSERT((flags & IMEMO_CONST_CACHE_SHAREABLE) ? rb_ractor_shareable_p(value) : true);
  4070. return (ic_cref == NULL || // no need to check CREF
  4071. ic_cref == vm_get_cref(reg_ep));
  4072. }
  4073. return false;
  4074. }
  4075. static bool
  4076. vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
  4077. {
  4078. VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
  4079. return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, ice->ic_serial, reg_ep);
  4080. }
  4081. static void
  4082. vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep)
  4083. {
  4084. struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0);
  4085. RB_OBJ_WRITE(ice, &ice->value, val);
  4086. ice->ic_cref = vm_get_const_key_cref(reg_ep);
  4087. ice->ic_serial = GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count;
  4088. if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
  4089. ruby_vm_const_missing_count = 0;
  4090. RB_OBJ_WRITE(iseq, &ic->entry, ice);
  4091. }
  4092. static VALUE
  4093. vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
  4094. {
  4095. rb_thread_t *th = rb_ec_thread_ptr(ec);
  4096. rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
  4097. again:
  4098. if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
  4099. return is->once.value;
  4100. }
  4101. else if (is->once.running_thread == NULL) {
  4102. VALUE val;
  4103. is->once.running_thread = th;
  4104. val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
  4105. RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
  4106. /* is->once.running_thread is cleared by vm_once_clear() */
  4107. is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
  4108. return val;
  4109. }
  4110. else if (is->once.running_thread == th) {
  4111. /* recursive once */
  4112. return vm_once_exec((VALUE)iseq);
  4113. }
  4114. else {
  4115. /* waiting for finish */
  4116. RUBY_VM_CHECK_INTS(ec);
  4117. rb_thread_schedule();
  4118. goto again;
  4119. }
  4120. }
  4121. static OFFSET
  4122. vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
  4123. {
  4124. switch (OBJ_BUILTIN_TYPE(key)) {
  4125. case -1:
  4126. case T_FLOAT:
  4127. case T_SYMBOL:
  4128. case T_BIGNUM:
  4129. case T_STRING:
  4130. if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
  4131. SYMBOL_REDEFINED_OP_FLAG |
  4132. INTEGER_REDEFINED_OP_FLAG |
  4133. FLOAT_REDEFINED_OP_FLAG |
  4134. NIL_REDEFINED_OP_FLAG |
  4135. TRUE_REDEFINED_OP_FLAG |
  4136. FALSE_REDEFINED_OP_FLAG |
  4137. STRING_REDEFINED_OP_FLAG)) {
  4138. st_data_t val;
  4139. if (RB_FLOAT_TYPE_P(key)) {
  4140. double kval = RFLOAT_VALUE(key);
  4141. if (!isinf(kval) && modf(kval, &kval) == 0.0) {
  4142. key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
  4143. }
  4144. }
  4145. if (rb_hash_stlike_lookup(hash, key, &val)) {
  4146. return FIX2LONG((VALUE)val);
  4147. }
  4148. else {
  4149. return else_offset;
  4150. }
  4151. }
  4152. }
  4153. return 0;
  4154. }
  4155. NORETURN(static void
  4156. vm_stack_consistency_error(const rb_execution_context_t *ec,
  4157. const rb_control_frame_t *,
  4158. const VALUE *));
  4159. static void
  4160. vm_stack_consistency_error(const rb_execution_context_t *ec,
  4161. const rb_control_frame_t *cfp,
  4162. const VALUE *bp)
  4163. {
  4164. const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
  4165. const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
  4166. static const char stack_consistency_error[] =
  4167. "Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
  4168. #if defined RUBY_DEVEL
  4169. VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
  4170. rb_str_cat_cstr(mesg, "\n");
  4171. rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
  4172. rb_exc_fatal(rb_exc_new3(rb_eFatal, mesg));
  4173. #else
  4174. rb_bug(stack_consistency_error, nsp, nbp);
  4175. #endif
  4176. }
  4177. static VALUE
  4178. vm_opt_plus(VALUE recv, VALUE obj)
  4179. {
  4180. if (FIXNUM_2_P(recv, obj) &&
  4181. BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
  4182. return rb_fix_plus_fix(recv, obj);
  4183. }
  4184. else if (FLONUM_2_P(recv, obj) &&
  4185. BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
  4186. return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
  4187. }
  4188. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4189. return Qundef;
  4190. }
  4191. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4192. RBASIC_CLASS(obj) == rb_cFloat &&
  4193. BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
  4194. return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
  4195. }
  4196. else if (RBASIC_CLASS(recv) == rb_cString &&
  4197. RBASIC_CLASS(obj) == rb_cString &&
  4198. BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
  4199. return rb_str_opt_plus(recv, obj);
  4200. }
  4201. else if (RBASIC_CLASS(recv) == rb_cArray &&
  4202. RBASIC_CLASS(obj) == rb_cArray &&
  4203. BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
  4204. return rb_ary_plus(recv, obj);
  4205. }
  4206. else {
  4207. return Qundef;
  4208. }
  4209. }
  4210. static VALUE
  4211. vm_opt_minus(VALUE recv, VALUE obj)
  4212. {
  4213. if (FIXNUM_2_P(recv, obj) &&
  4214. BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
  4215. return rb_fix_minus_fix(recv, obj);
  4216. }
  4217. else if (FLONUM_2_P(recv, obj) &&
  4218. BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
  4219. return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
  4220. }
  4221. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4222. return Qundef;
  4223. }
  4224. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4225. RBASIC_CLASS(obj) == rb_cFloat &&
  4226. BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
  4227. return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
  4228. }
  4229. else {
  4230. return Qundef;
  4231. }
  4232. }
  4233. static VALUE
  4234. vm_opt_mult(VALUE recv, VALUE obj)
  4235. {
  4236. if (FIXNUM_2_P(recv, obj) &&
  4237. BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
  4238. return rb_fix_mul_fix(recv, obj);
  4239. }
  4240. else if (FLONUM_2_P(recv, obj) &&
  4241. BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
  4242. return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
  4243. }
  4244. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4245. return Qundef;
  4246. }
  4247. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4248. RBASIC_CLASS(obj) == rb_cFloat &&
  4249. BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
  4250. return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
  4251. }
  4252. else {
  4253. return Qundef;
  4254. }
  4255. }
  4256. static VALUE
  4257. vm_opt_div(VALUE recv, VALUE obj)
  4258. {
  4259. if (FIXNUM_2_P(recv, obj) &&
  4260. BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
  4261. return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
  4262. }
  4263. else if (FLONUM_2_P(recv, obj) &&
  4264. BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
  4265. return rb_flo_div_flo(recv, obj);
  4266. }
  4267. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4268. return Qundef;
  4269. }
  4270. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4271. RBASIC_CLASS(obj) == rb_cFloat &&
  4272. BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
  4273. return rb_flo_div_flo(recv, obj);
  4274. }
  4275. else {
  4276. return Qundef;
  4277. }
  4278. }
  4279. static VALUE
  4280. vm_opt_mod(VALUE recv, VALUE obj)
  4281. {
  4282. if (FIXNUM_2_P(recv, obj) &&
  4283. BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
  4284. return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
  4285. }
  4286. else if (FLONUM_2_P(recv, obj) &&
  4287. BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
  4288. return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
  4289. }
  4290. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4291. return Qundef;
  4292. }
  4293. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4294. RBASIC_CLASS(obj) == rb_cFloat &&
  4295. BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
  4296. return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
  4297. }
  4298. else {
  4299. return Qundef;
  4300. }
  4301. }
  4302. static VALUE
  4303. vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
  4304. {
  4305. if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
  4306. VALUE val = opt_equality(iseq, recv, obj, cd_eq);
  4307. if (val != Qundef) {
  4308. return RTEST(val) ? Qfalse : Qtrue;
  4309. }
  4310. }
  4311. return Qundef;
  4312. }
  4313. static VALUE
  4314. vm_opt_lt(VALUE recv, VALUE obj)
  4315. {
  4316. if (FIXNUM_2_P(recv, obj) &&
  4317. BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
  4318. return RBOOL((SIGNED_VALUE)recv < (SIGNED_VALUE)obj);
  4319. }
  4320. else if (FLONUM_2_P(recv, obj) &&
  4321. BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
  4322. return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
  4323. }
  4324. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4325. return Qundef;
  4326. }
  4327. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4328. RBASIC_CLASS(obj) == rb_cFloat &&
  4329. BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
  4330. CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
  4331. return RBOOL(RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj));
  4332. }
  4333. else {
  4334. return Qundef;
  4335. }
  4336. }
  4337. static VALUE
  4338. vm_opt_le(VALUE recv, VALUE obj)
  4339. {
  4340. if (FIXNUM_2_P(recv, obj) &&
  4341. BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
  4342. return RBOOL((SIGNED_VALUE)recv <= (SIGNED_VALUE)obj);
  4343. }
  4344. else if (FLONUM_2_P(recv, obj) &&
  4345. BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
  4346. return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
  4347. }
  4348. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4349. return Qundef;
  4350. }
  4351. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4352. RBASIC_CLASS(obj) == rb_cFloat &&
  4353. BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
  4354. CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
  4355. return RBOOL(RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj));
  4356. }
  4357. else {
  4358. return Qundef;
  4359. }
  4360. }
  4361. static VALUE
  4362. vm_opt_gt(VALUE recv, VALUE obj)
  4363. {
  4364. if (FIXNUM_2_P(recv, obj) &&
  4365. BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
  4366. return RBOOL((SIGNED_VALUE)recv > (SIGNED_VALUE)obj);
  4367. }
  4368. else if (FLONUM_2_P(recv, obj) &&
  4369. BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
  4370. return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
  4371. }
  4372. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4373. return Qundef;
  4374. }
  4375. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4376. RBASIC_CLASS(obj) == rb_cFloat &&
  4377. BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
  4378. CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
  4379. return RBOOL(RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj));
  4380. }
  4381. else {
  4382. return Qundef;
  4383. }
  4384. }
  4385. static VALUE
  4386. vm_opt_ge(VALUE recv, VALUE obj)
  4387. {
  4388. if (FIXNUM_2_P(recv, obj) &&
  4389. BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
  4390. return RBOOL((SIGNED_VALUE)recv >= (SIGNED_VALUE)obj);
  4391. }
  4392. else if (FLONUM_2_P(recv, obj) &&
  4393. BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
  4394. return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
  4395. }
  4396. else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
  4397. return Qundef;
  4398. }
  4399. else if (RBASIC_CLASS(recv) == rb_cFloat &&
  4400. RBASIC_CLASS(obj) == rb_cFloat &&
  4401. BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
  4402. CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
  4403. return RBOOL(RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj));
  4404. }
  4405. else {
  4406. return Qundef;
  4407. }
  4408. }
  4409. static VALUE
  4410. vm_opt_ltlt(VALUE recv, VALUE obj)
  4411. {
  4412. if (SPECIAL_CONST_P(recv)) {
  4413. return Qundef;
  4414. }
  4415. else if (RBASIC_CLASS(recv) == rb_cString &&
  4416. BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
  4417. return rb_str_concat(recv, obj);
  4418. }
  4419. else if (RBASIC_CLASS(recv) == rb_cArray &&
  4420. BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
  4421. return rb_ary_push(recv, obj);
  4422. }
  4423. else {
  4424. return Qundef;
  4425. }
  4426. }
  4427. static VALUE
  4428. vm_opt_and(VALUE recv, VALUE obj)
  4429. {
  4430. if (FIXNUM_2_P(recv, obj) &&
  4431. BASIC_OP_UNREDEFINED_P(BOP_AND, INTEGER_REDEFINED_OP_FLAG)) {
  4432. return (recv & obj) | 1;
  4433. }
  4434. else {
  4435. return Qundef;
  4436. }
  4437. }
  4438. static VALUE
  4439. vm_opt_or(VALUE recv, VALUE obj)
  4440. {
  4441. if (FIXNUM_2_P(recv, obj) &&
  4442. BASIC_OP_UNREDEFINED_P(BOP_OR, INTEGER_REDEFINED_OP_FLAG)) {
  4443. return recv | obj;
  4444. }
  4445. else {
  4446. return Qundef;
  4447. }
  4448. }
  4449. static VALUE
  4450. vm_opt_aref(VALUE recv, VALUE obj)
  4451. {
  4452. if (SPECIAL_CONST_P(recv)) {
  4453. if (FIXNUM_2_P(recv, obj) &&
  4454. BASIC_OP_UNREDEFINED_P(BOP_AREF, INTEGER_REDEFINED_OP_FLAG)) {
  4455. return rb_fix_aref(recv, obj);
  4456. }
  4457. return Qundef;
  4458. }
  4459. else if (RBASIC_CLASS(recv) == rb_cArray &&
  4460. BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
  4461. if (FIXNUM_P(obj)) {
  4462. return rb_ary_entry_internal(recv, FIX2LONG(obj));
  4463. }
  4464. else {
  4465. return rb_ary_aref1(recv, obj);
  4466. }
  4467. }
  4468. else if (RBASIC_CLASS(recv) == rb_cHash &&
  4469. BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
  4470. return rb_hash_aref(recv, obj);
  4471. }
  4472. else {
  4473. return Qundef;
  4474. }
  4475. }
  4476. static VALUE
  4477. vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
  4478. {
  4479. if (SPECIAL_CONST_P(recv)) {
  4480. return Qundef;
  4481. }
  4482. else if (RBASIC_CLASS(recv) == rb_cArray &&
  4483. BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
  4484. FIXNUM_P(obj)) {
  4485. rb_ary_store(recv, FIX2LONG(obj), set);
  4486. return set;
  4487. }
  4488. else if (RBASIC_CLASS(recv) == rb_cHash &&
  4489. BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
  4490. rb_hash_aset(recv, obj, set);
  4491. return set;
  4492. }
  4493. else {
  4494. return Qundef;
  4495. }
  4496. }
  4497. static VALUE
  4498. vm_opt_aref_with(VALUE recv, VALUE key)
  4499. {
  4500. if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
  4501. BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
  4502. rb_hash_compare_by_id_p(recv) == Qfalse) {
  4503. return rb_hash_aref(recv, key);
  4504. }
  4505. else {
  4506. return Qundef;
  4507. }
  4508. }
  4509. static VALUE
  4510. vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
  4511. {
  4512. if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
  4513. BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
  4514. rb_hash_compare_by_id_p(recv) == Qfalse) {
  4515. return rb_hash_aset(recv, key, val);
  4516. }
  4517. else {
  4518. return Qundef;
  4519. }
  4520. }
  4521. static VALUE
  4522. vm_opt_length(VALUE recv, int bop)
  4523. {
  4524. if (SPECIAL_CONST_P(recv)) {
  4525. return Qundef;
  4526. }
  4527. else if (RBASIC_CLASS(recv) == rb_cString &&
  4528. BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
  4529. if (bop == BOP_EMPTY_P) {
  4530. return LONG2NUM(RSTRING_LEN(recv));
  4531. }
  4532. else {
  4533. return rb_str_length(recv);
  4534. }
  4535. }
  4536. else if (RBASIC_CLASS(recv) == rb_cArray &&
  4537. BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
  4538. return LONG2NUM(RARRAY_LEN(recv));
  4539. }
  4540. else if (RBASIC_CLASS(recv) == rb_cHash &&
  4541. BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
  4542. return INT2FIX(RHASH_SIZE(recv));
  4543. }
  4544. else {
  4545. return Qundef;
  4546. }
  4547. }
  4548. static VALUE
  4549. vm_opt_empty_p(VALUE recv)
  4550. {
  4551. switch (vm_opt_length(recv, BOP_EMPTY_P)) {
  4552. case Qundef: return Qundef;
  4553. case INT2FIX(0): return Qtrue;
  4554. default: return Qfalse;
  4555. }
  4556. }
  4557. VALUE rb_false(VALUE obj);
  4558. static VALUE
  4559. vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
  4560. {
  4561. if (recv == Qnil &&
  4562. BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
  4563. return Qtrue;
  4564. }
  4565. else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
  4566. return Qfalse;
  4567. }
  4568. else {
  4569. return Qundef;
  4570. }
  4571. }
  4572. static VALUE
  4573. fix_succ(VALUE x)
  4574. {
  4575. switch (x) {
  4576. case ~0UL:
  4577. /* 0xFFFF_FFFF == INT2FIX(-1)
  4578. * `-1.succ` is of course 0. */
  4579. return INT2FIX(0);
  4580. case RSHIFT(~0UL, 1):
  4581. /* 0x7FFF_FFFF == LONG2FIX(0x3FFF_FFFF)
  4582. * 0x3FFF_FFFF + 1 == 0x4000_0000, which is a Bignum. */
  4583. return rb_uint2big(1UL << (SIZEOF_LONG * CHAR_BIT - 2));
  4584. default:
  4585. /* LONG2FIX(FIX2LONG(x)+FIX2LONG(y))
  4586. * == ((lx*2+1)/2 + (ly*2+1)/2)*2+1
  4587. * == lx*2 + ly*2 + 1
  4588. * == (lx*2+1) + (ly*2+1) - 1
  4589. * == x + y - 1
  4590. *
  4591. * Here, if we put y := INT2FIX(1):
  4592. *
  4593. * == x + INT2FIX(1) - 1
  4594. * == x + 2 .
  4595. */
  4596. return x + 2;
  4597. }
  4598. }
  4599. static VALUE
  4600. vm_opt_succ(VALUE recv)
  4601. {
  4602. if (FIXNUM_P(recv) &&
  4603. BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
  4604. return fix_succ(recv);
  4605. }
  4606. else if (SPECIAL_CONST_P(recv)) {
  4607. return Qundef;
  4608. }
  4609. else if (RBASIC_CLASS(recv) == rb_cString &&
  4610. BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
  4611. return rb_str_succ(recv);
  4612. }
  4613. else {
  4614. return Qundef;
  4615. }
  4616. }
  4617. static VALUE
  4618. vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
  4619. {
  4620. if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
  4621. return RTEST(recv) ? Qfalse : Qtrue;
  4622. }
  4623. else {
  4624. return Qundef;
  4625. }
  4626. }
  4627. static VALUE
  4628. vm_opt_regexpmatch2(VALUE recv, VALUE obj)
  4629. {
  4630. if (SPECIAL_CONST_P(recv)) {
  4631. return Qundef;
  4632. }
  4633. else if (RBASIC_CLASS(recv) == rb_cString &&
  4634. CLASS_OF(obj) == rb_cRegexp &&
  4635. BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
  4636. return rb_reg_match(obj, recv);
  4637. }
  4638. else if (RBASIC_CLASS(recv) == rb_cRegexp &&
  4639. BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
  4640. return rb_reg_match(recv, obj);
  4641. }
  4642. else {
  4643. return Qundef;
  4644. }
  4645. }
  4646. rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
  4647. NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp));
  4648. static inline void
  4649. vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
  4650. rb_event_flag_t pc_events, rb_event_flag_t target_event,
  4651. rb_hook_list_t *global_hooks, rb_hook_list_t *local_hooks, VALUE val)
  4652. {
  4653. rb_event_flag_t event = pc_events & target_event;
  4654. VALUE self = GET_SELF();
  4655. VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
  4656. if (event & global_hooks->events) {
  4657. /* increment PC because source line is calculated with PC-1 */
  4658. reg_cfp->pc++;
  4659. vm_dtrace(event, ec);
  4660. rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
  4661. reg_cfp->pc--;
  4662. }
  4663. if (local_hooks != NULL) {
  4664. if (event & local_hooks->events) {
  4665. /* increment PC because source line is calculated with PC-1 */
  4666. reg_cfp->pc++;
  4667. rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
  4668. reg_cfp->pc--;
  4669. }
  4670. }
  4671. }
  4672. // Return true if given cc has cfunc which is NOT handled by opt_send_without_block.
  4673. bool
  4674. rb_vm_opt_cfunc_p(CALL_CACHE cc, int insn)
  4675. {
  4676. switch (insn) {
  4677. case BIN(opt_eq):
  4678. return check_cfunc(vm_cc_cme(cc), rb_obj_equal);
  4679. case BIN(opt_nil_p):
  4680. return check_cfunc(vm_cc_cme(cc), rb_false);
  4681. case BIN(opt_not):
  4682. return check_cfunc(vm_cc_cme(cc), rb_obj_not);
  4683. default:
  4684. return false;
  4685. }
  4686. }
  4687. #define VM_TRACE_HOOK(target_event, val) do { \
  4688. if ((pc_events & (target_event)) & enabled_flags) { \
  4689. vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks, (val)); \
  4690. } \
  4691. } while (0)
  4692. static void
  4693. vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
  4694. {
  4695. const VALUE *pc = reg_cfp->pc;
  4696. rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
  4697. if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
  4698. return;
  4699. }
  4700. else {
  4701. const rb_iseq_t *iseq = reg_cfp->iseq;
  4702. size_t pos = pc - iseq->body->iseq_encoded;
  4703. rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
  4704. rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
  4705. rb_event_flag_t local_hook_events = local_hooks != NULL ? local_hooks->events : 0;
  4706. enabled_flags |= local_hook_events;
  4707. VM_ASSERT((local_hook_events & ~ISEQ_TRACE_EVENTS) == 0);
  4708. if ((pc_events & enabled_flags) == 0) {
  4709. #if 0
  4710. /* disable trace */
  4711. /* TODO: incomplete */
  4712. rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
  4713. #else
  4714. /* do not disable trace because of performance problem
  4715. * (re-enable overhead)
  4716. */
  4717. #endif
  4718. return;
  4719. }
  4720. else if (ec->trace_arg != NULL) {
  4721. /* already tracing */
  4722. return;
  4723. }
  4724. else {
  4725. rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
  4726. if (0) {
  4727. ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
  4728. (int)pos,
  4729. (int)pc_events,
  4730. RSTRING_PTR(rb_iseq_path(iseq)),
  4731. (int)rb_iseq_line_no(iseq, pos),
  4732. RSTRING_PTR(rb_iseq_label(iseq)));
  4733. }
  4734. VM_ASSERT(reg_cfp->pc == pc);
  4735. VM_ASSERT(pc_events != 0);
  4736. VM_ASSERT(enabled_flags & pc_events);
  4737. /* check traces */
  4738. VM_TRACE_HOOK(RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL, Qundef);
  4739. VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
  4740. VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
  4741. VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
  4742. VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
  4743. }
  4744. }
  4745. }
  4746. #if VM_CHECK_MODE > 0
  4747. NORETURN( NOINLINE( COLDFUNC
  4748. void rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)));
  4749. void
  4750. Init_vm_stack_canary(void)
  4751. {
  4752. /* This has to be called _after_ our PRNG is properly set up. */
  4753. int n = ruby_fill_random_bytes(&vm_stack_canary, sizeof vm_stack_canary, false);
  4754. vm_stack_canary |= 0x01; // valid VALUE (Fixnum)
  4755. vm_stack_canary_was_born = true;
  4756. VM_ASSERT(n == 0);
  4757. }
  4758. #ifndef MJIT_HEADER
  4759. MJIT_FUNC_EXPORTED void
  4760. rb_vm_canary_is_found_dead(enum ruby_vminsn_type i, VALUE c)
  4761. {
  4762. /* Because a method has already been called, why not call
  4763. * another one. */
  4764. const char *insn = rb_insns_name(i);
  4765. VALUE inspection = rb_inspect(c);
  4766. const char *str = StringValueCStr(inspection);
  4767. rb_bug("dead canary found at %s: %s", insn, str);
  4768. }
  4769. #endif
  4770. #else
  4771. void Init_vm_stack_canary(void) { /* nothing to do */ }
  4772. #endif
  4773. /* a part of the following code is generated by this ruby script:
  4774. 16.times{|i|
  4775. typedef_args = (0...i).map{|j| "VALUE v#{j+1}"}.join(", ")
  4776. typedef_args.prepend(", ") if i != 0
  4777. call_args = (0...i).map{|j| "argv[#{j}]"}.join(", ")
  4778. call_args.prepend(", ") if i != 0
  4779. puts %Q{
  4780. static VALUE
  4781. builtin_invoker#{i}(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4782. {
  4783. typedef VALUE (*rb_invoke_funcptr#{i}_t)(rb_execution_context_t *ec, VALUE self#{typedef_args});
  4784. return (*(rb_invoke_funcptr#{i}_t)funcptr)(ec, self#{call_args});
  4785. }}
  4786. }
  4787. puts
  4788. puts "static VALUE (* const cfunc_invokers[])(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr) = {"
  4789. 16.times{|i|
  4790. puts " builtin_invoker#{i},"
  4791. }
  4792. puts "};"
  4793. */
  4794. static VALUE
  4795. builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4796. {
  4797. typedef VALUE (*rb_invoke_funcptr0_t)(rb_execution_context_t *ec, VALUE self);
  4798. return (*(rb_invoke_funcptr0_t)funcptr)(ec, self);
  4799. }
  4800. static VALUE
  4801. builtin_invoker1(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4802. {
  4803. typedef VALUE (*rb_invoke_funcptr1_t)(rb_execution_context_t *ec, VALUE self, VALUE v1);
  4804. return (*(rb_invoke_funcptr1_t)funcptr)(ec, self, argv[0]);
  4805. }
  4806. static VALUE
  4807. builtin_invoker2(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4808. {
  4809. typedef VALUE (*rb_invoke_funcptr2_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2);
  4810. return (*(rb_invoke_funcptr2_t)funcptr)(ec, self, argv[0], argv[1]);
  4811. }
  4812. static VALUE
  4813. builtin_invoker3(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4814. {
  4815. typedef VALUE (*rb_invoke_funcptr3_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3);
  4816. return (*(rb_invoke_funcptr3_t)funcptr)(ec, self, argv[0], argv[1], argv[2]);
  4817. }
  4818. static VALUE
  4819. builtin_invoker4(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4820. {
  4821. typedef VALUE (*rb_invoke_funcptr4_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4);
  4822. return (*(rb_invoke_funcptr4_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3]);
  4823. }
  4824. static VALUE
  4825. builtin_invoker5(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4826. {
  4827. typedef VALUE (*rb_invoke_funcptr5_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5);
  4828. return (*(rb_invoke_funcptr5_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4]);
  4829. }
  4830. static VALUE
  4831. builtin_invoker6(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4832. {
  4833. typedef VALUE (*rb_invoke_funcptr6_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6);
  4834. return (*(rb_invoke_funcptr6_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
  4835. }
  4836. static VALUE
  4837. builtin_invoker7(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4838. {
  4839. typedef VALUE (*rb_invoke_funcptr7_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7);
  4840. return (*(rb_invoke_funcptr7_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
  4841. }
  4842. static VALUE
  4843. builtin_invoker8(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4844. {
  4845. typedef VALUE (*rb_invoke_funcptr8_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8);
  4846. return (*(rb_invoke_funcptr8_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
  4847. }
  4848. static VALUE
  4849. builtin_invoker9(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4850. {
  4851. typedef VALUE (*rb_invoke_funcptr9_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9);
  4852. return (*(rb_invoke_funcptr9_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
  4853. }
  4854. static VALUE
  4855. builtin_invoker10(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4856. {
  4857. typedef VALUE (*rb_invoke_funcptr10_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10);
  4858. return (*(rb_invoke_funcptr10_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
  4859. }
  4860. static VALUE
  4861. builtin_invoker11(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4862. {
  4863. typedef VALUE (*rb_invoke_funcptr11_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11);
  4864. return (*(rb_invoke_funcptr11_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
  4865. }
  4866. static VALUE
  4867. builtin_invoker12(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4868. {
  4869. typedef VALUE (*rb_invoke_funcptr12_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12);
  4870. return (*(rb_invoke_funcptr12_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
  4871. }
  4872. static VALUE
  4873. builtin_invoker13(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4874. {
  4875. typedef VALUE (*rb_invoke_funcptr13_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13);
  4876. return (*(rb_invoke_funcptr13_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
  4877. }
  4878. static VALUE
  4879. builtin_invoker14(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4880. {
  4881. typedef VALUE (*rb_invoke_funcptr14_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14);
  4882. return (*(rb_invoke_funcptr14_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
  4883. }
  4884. static VALUE
  4885. builtin_invoker15(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr)
  4886. {
  4887. typedef VALUE (*rb_invoke_funcptr15_t)(rb_execution_context_t *ec, VALUE self, VALUE v1, VALUE v2, VALUE v3, VALUE v4, VALUE v5, VALUE v6, VALUE v7, VALUE v8, VALUE v9, VALUE v10, VALUE v11, VALUE v12, VALUE v13, VALUE v14, VALUE v15);
  4888. return (*(rb_invoke_funcptr15_t)funcptr)(ec, self, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
  4889. }
  4890. typedef VALUE (*builtin_invoker)(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
  4891. static builtin_invoker
  4892. lookup_builtin_invoker(int argc)
  4893. {
  4894. static const builtin_invoker invokers[] = {
  4895. builtin_invoker0,
  4896. builtin_invoker1,
  4897. builtin_invoker2,
  4898. builtin_invoker3,
  4899. builtin_invoker4,
  4900. builtin_invoker5,
  4901. builtin_invoker6,
  4902. builtin_invoker7,
  4903. builtin_invoker8,
  4904. builtin_invoker9,
  4905. builtin_invoker10,
  4906. builtin_invoker11,
  4907. builtin_invoker12,
  4908. builtin_invoker13,
  4909. builtin_invoker14,
  4910. builtin_invoker15,
  4911. };
  4912. return invokers[argc];
  4913. }
  4914. static inline VALUE
  4915. invoke_bf(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const struct rb_builtin_function* bf, const VALUE *argv)
  4916. {
  4917. const bool canary_p = reg_cfp->iseq->body->builtin_inline_p; // Verify an assumption of `Primitive.attr! 'inline'`
  4918. SETUP_CANARY(canary_p);
  4919. VALUE ret = (*lookup_builtin_invoker(bf->argc))(ec, reg_cfp->self, argv, (rb_insn_func_t)bf->func_ptr);
  4920. CHECK_CANARY(canary_p, BIN(invokebuiltin));
  4921. return ret;
  4922. }
  4923. static VALUE
  4924. vm_invoke_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function* bf, const VALUE *argv)
  4925. {
  4926. return invoke_bf(ec, cfp, bf, argv);
  4927. }
  4928. static VALUE
  4929. vm_invoke_builtin_delegate(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_builtin_function *bf, unsigned int start_index)
  4930. {
  4931. if (0) { // debug print
  4932. fputs("vm_invoke_builtin_delegate: passing -> ", stderr);
  4933. for (int i=0; i<bf->argc; i++) {
  4934. ruby_debug_printf(":%s ", rb_id2name(cfp->iseq->body->local_table[i+start_index]));
  4935. }
  4936. ruby_debug_printf("\n" "%s %s(%d):%p\n", RUBY_FUNCTION_NAME_STRING, bf->name, bf->argc, bf->func_ptr);
  4937. }
  4938. if (bf->argc == 0) {
  4939. return invoke_bf(ec, cfp, bf, NULL);
  4940. }
  4941. else {
  4942. const VALUE *argv = cfp->ep - cfp->iseq->body->local_table_size - VM_ENV_DATA_SIZE + 1 + start_index;
  4943. return invoke_bf(ec, cfp, bf, argv);
  4944. }
  4945. }
  4946. // for __builtin_inline!()
  4947. VALUE
  4948. rb_vm_lvar_exposed(rb_execution_context_t *ec, int index)
  4949. {
  4950. const rb_control_frame_t *cfp = ec->cfp;
  4951. return cfp->ep[index];
  4952. }