PageRenderTime 81ms CodeModel.GetById 41ms RepoModel.GetById 0ms app.codeStats 1ms

/vm_insnhelper.c

https://github.com/wanabe/ruby
C | 5688 lines | 4748 code | 730 blank | 210 comment | 1082 complexity | 1fdc5951c2a5719152838f55edfc1cfa MD5 | raw file
Possible License(s): LGPL-2.1, AGPL-3.0, 0BSD, Unlicense, GPL-2.0, BSD-3-Clause

Large files files are truncated, but you can click here to view the full file

  1. /**********************************************************************
  2. vm_insnhelper.c - instruction helper functions.
  3. $Author$
  4. Copyright (C) 2007 Koichi Sasada
  5. **********************************************************************/
  6. #include "ruby/internal/config.h"
  7. #include <math.h>
  8. #include "constant.h"
  9. #include "debug_counter.h"
  10. #include "internal.h"
  11. #include "internal/class.h"
  12. #include "internal/compar.h"
  13. #include "internal/hash.h"
  14. #include "internal/numeric.h"
  15. #include "internal/proc.h"
  16. #include "internal/random.h"
  17. #include "internal/variable.h"
  18. #include "variable.h"
  19. /* finish iseq array */
  20. #include "insns.inc"
  21. #ifndef MJIT_HEADER
  22. #include "insns_info.inc"
  23. #endif
  24. extern rb_method_definition_t *rb_method_definition_create(rb_method_type_t type, ID mid);
  25. extern void rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
  26. extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
  27. extern VALUE rb_make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
  28. int argc, const VALUE *argv, int priv);
  29. #ifndef MJIT_HEADER
  30. static const struct rb_callcache vm_empty_cc;
  31. #endif
  32. /* control stack frame */
  33. static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
  34. MJIT_STATIC VALUE
  35. ruby_vm_special_exception_copy(VALUE exc)
  36. {
  37. VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
  38. rb_obj_copy_ivar(e, exc);
  39. return e;
  40. }
  41. NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
  42. static void
  43. ec_stack_overflow(rb_execution_context_t *ec, int setup)
  44. {
  45. VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
  46. ec->raised_flag = RAISED_STACKOVERFLOW;
  47. if (setup) {
  48. VALUE at = rb_ec_backtrace_object(ec);
  49. mesg = ruby_vm_special_exception_copy(mesg);
  50. rb_ivar_set(mesg, idBt, at);
  51. rb_ivar_set(mesg, idBt_locations, at);
  52. }
  53. ec->errinfo = mesg;
  54. EC_JUMP_TAG(ec, TAG_RAISE);
  55. }
  56. NORETURN(static void vm_stackoverflow(void));
  57. #ifdef MJIT_HEADER
  58. NOINLINE(static COLDFUNC void vm_stackoverflow(void));
  59. #endif
  60. static void
  61. vm_stackoverflow(void)
  62. {
  63. ec_stack_overflow(GET_EC(), TRUE);
  64. }
  65. NORETURN(MJIT_STATIC void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
  66. MJIT_STATIC void
  67. rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
  68. {
  69. if (rb_during_gc()) {
  70. rb_bug("system stack overflow during GC. Faulty native extension?");
  71. }
  72. if (crit) {
  73. ec->raised_flag = RAISED_STACKOVERFLOW;
  74. ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
  75. EC_JUMP_TAG(ec, TAG_RAISE);
  76. }
  77. #ifdef USE_SIGALTSTACK
  78. ec_stack_overflow(ec, TRUE);
  79. #else
  80. ec_stack_overflow(ec, FALSE);
  81. #endif
  82. }
  83. #if VM_CHECK_MODE > 0
  84. static int
  85. callable_class_p(VALUE klass)
  86. {
  87. #if VM_CHECK_MODE >= 2
  88. if (!klass) return FALSE;
  89. switch (RB_BUILTIN_TYPE(klass)) {
  90. default:
  91. break;
  92. case T_ICLASS:
  93. if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
  94. case T_MODULE:
  95. return TRUE;
  96. }
  97. while (klass) {
  98. if (klass == rb_cBasicObject) {
  99. return TRUE;
  100. }
  101. klass = RCLASS_SUPER(klass);
  102. }
  103. return FALSE;
  104. #else
  105. return klass != 0;
  106. #endif
  107. }
  108. static int
  109. callable_method_entry_p(const rb_callable_method_entry_t *cme)
  110. {
  111. if (cme == NULL) {
  112. return TRUE;
  113. }
  114. else {
  115. VM_ASSERT(IMEMO_TYPE_P((VALUE)cme, imemo_ment));
  116. if (callable_class_p(cme->defined_class)) {
  117. return TRUE;
  118. }
  119. else {
  120. return FALSE;
  121. }
  122. }
  123. }
  124. static void
  125. vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
  126. {
  127. unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
  128. enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
  129. if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
  130. cref_or_me_type = imemo_type(cref_or_me);
  131. }
  132. if (type & VM_FRAME_FLAG_BMETHOD) {
  133. req_me = TRUE;
  134. }
  135. if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
  136. rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
  137. }
  138. if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
  139. rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
  140. }
  141. if (req_me) {
  142. if (cref_or_me_type != imemo_ment) {
  143. rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
  144. }
  145. }
  146. else {
  147. if (req_cref && cref_or_me_type != imemo_cref) {
  148. rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
  149. }
  150. else { /* cref or Qfalse */
  151. if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
  152. if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
  153. /* ignore */
  154. }
  155. else {
  156. rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
  157. }
  158. }
  159. }
  160. }
  161. if (cref_or_me_type == imemo_ment) {
  162. const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
  163. if (!callable_method_entry_p(me)) {
  164. rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
  165. }
  166. }
  167. if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
  168. VM_ASSERT(iseq == NULL ||
  169. RUBY_VM_NORMAL_ISEQ_P(iseq) /* argument error. it should be fixed */);
  170. }
  171. else {
  172. VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
  173. }
  174. }
  175. static void
  176. vm_check_frame(VALUE type,
  177. VALUE specval,
  178. VALUE cref_or_me,
  179. const rb_iseq_t *iseq)
  180. {
  181. VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
  182. VM_ASSERT(FIXNUM_P(type));
  183. #define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
  184. case magic: \
  185. vm_check_frame_detail(type, req_block, req_me, req_cref, \
  186. specval, cref_or_me, is_cframe, iseq); \
  187. break
  188. switch (given_magic) {
  189. /* BLK ME CREF CFRAME */
  190. CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
  191. CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
  192. CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
  193. CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
  194. CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
  195. CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
  196. CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
  197. CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
  198. CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
  199. default:
  200. rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
  201. }
  202. #undef CHECK
  203. }
  204. static VALUE vm_stack_canary; /* Initialized later */
  205. static bool vm_stack_canary_was_born = false;
  206. #ifndef MJIT_HEADER
  207. MJIT_FUNC_EXPORTED void
  208. rb_vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
  209. {
  210. const struct rb_control_frame_struct *reg_cfp = ec->cfp;
  211. const struct rb_iseq_struct *iseq;
  212. if (! LIKELY(vm_stack_canary_was_born)) {
  213. return; /* :FIXME: isn't it rather fatal to enter this branch? */
  214. }
  215. else if ((VALUE *)reg_cfp == ec->vm_stack + ec->vm_stack_size) {
  216. /* This is at the very beginning of a thread. cfp does not exist. */
  217. return;
  218. }
  219. else if (! (iseq = GET_ISEQ())) {
  220. return;
  221. }
  222. else if (LIKELY(sp[0] != vm_stack_canary)) {
  223. return;
  224. }
  225. else {
  226. /* we are going to call methods below; squash the canary to
  227. * prevent infinite loop. */
  228. sp[0] = Qundef;
  229. }
  230. const VALUE *orig = rb_iseq_original_iseq(iseq);
  231. const VALUE *encoded = iseq->body->iseq_encoded;
  232. const ptrdiff_t pos = GET_PC() - encoded;
  233. const enum ruby_vminsn_type insn = (enum ruby_vminsn_type)orig[pos];
  234. const char *name = insn_name(insn);
  235. const VALUE iseqw = rb_iseqw_new(iseq);
  236. const VALUE inspection = rb_inspect(iseqw);
  237. const char *stri = rb_str_to_cstr(inspection);
  238. const VALUE disasm = rb_iseq_disasm(iseq);
  239. const char *strd = rb_str_to_cstr(disasm);
  240. /* rb_bug() is not capable of outputting this large contents. It
  241. is designed to run form a SIGSEGV handler, which tends to be
  242. very restricted. */
  243. ruby_debug_printf(
  244. "We are killing the stack canary set by %s, "
  245. "at %s@pc=%"PRIdPTR"\n"
  246. "watch out the C stack trace.\n"
  247. "%s",
  248. name, stri, pos, strd);
  249. rb_bug("see above.");
  250. }
  251. #endif
  252. #define vm_check_canary(ec, sp) rb_vm_check_canary(ec, sp)
  253. #else
  254. #define vm_check_canary(ec, sp)
  255. #define vm_check_frame(a, b, c, d)
  256. #endif /* VM_CHECK_MODE > 0 */
  257. #if USE_DEBUG_COUNTER
  258. static void
  259. vm_push_frame_debug_counter_inc(
  260. const struct rb_execution_context_struct *ec,
  261. const struct rb_control_frame_struct *reg_cfp,
  262. VALUE type)
  263. {
  264. const struct rb_control_frame_struct *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(reg_cfp);
  265. RB_DEBUG_COUNTER_INC(frame_push);
  266. if (RUBY_VM_END_CONTROL_FRAME(ec) != prev_cfp) {
  267. const bool curr = VM_FRAME_RUBYFRAME_P(reg_cfp);
  268. const bool prev = VM_FRAME_RUBYFRAME_P(prev_cfp);
  269. if (prev) {
  270. if (curr) {
  271. RB_DEBUG_COUNTER_INC(frame_R2R);
  272. }
  273. else {
  274. RB_DEBUG_COUNTER_INC(frame_R2C);
  275. }
  276. }
  277. else {
  278. if (curr) {
  279. RB_DEBUG_COUNTER_INC(frame_C2R);
  280. }
  281. else {
  282. RB_DEBUG_COUNTER_INC(frame_C2C);
  283. }
  284. }
  285. }
  286. switch (type & VM_FRAME_MAGIC_MASK) {
  287. case VM_FRAME_MAGIC_METHOD: RB_DEBUG_COUNTER_INC(frame_push_method); return;
  288. case VM_FRAME_MAGIC_BLOCK: RB_DEBUG_COUNTER_INC(frame_push_block); return;
  289. case VM_FRAME_MAGIC_CLASS: RB_DEBUG_COUNTER_INC(frame_push_class); return;
  290. case VM_FRAME_MAGIC_TOP: RB_DEBUG_COUNTER_INC(frame_push_top); return;
  291. case VM_FRAME_MAGIC_CFUNC: RB_DEBUG_COUNTER_INC(frame_push_cfunc); return;
  292. case VM_FRAME_MAGIC_IFUNC: RB_DEBUG_COUNTER_INC(frame_push_ifunc); return;
  293. case VM_FRAME_MAGIC_EVAL: RB_DEBUG_COUNTER_INC(frame_push_eval); return;
  294. case VM_FRAME_MAGIC_RESCUE: RB_DEBUG_COUNTER_INC(frame_push_rescue); return;
  295. case VM_FRAME_MAGIC_DUMMY: RB_DEBUG_COUNTER_INC(frame_push_dummy); return;
  296. }
  297. rb_bug("unreachable");
  298. }
  299. #else
  300. #define vm_push_frame_debug_counter_inc(ec, cfp, t) /* void */
  301. #endif
  302. STATIC_ASSERT(VM_ENV_DATA_INDEX_ME_CREF, VM_ENV_DATA_INDEX_ME_CREF == -2);
  303. STATIC_ASSERT(VM_ENV_DATA_INDEX_SPECVAL, VM_ENV_DATA_INDEX_SPECVAL == -1);
  304. STATIC_ASSERT(VM_ENV_DATA_INDEX_FLAGS, VM_ENV_DATA_INDEX_FLAGS == -0);
  305. static void
  306. vm_push_frame(rb_execution_context_t *ec,
  307. const rb_iseq_t *iseq,
  308. VALUE type,
  309. VALUE self,
  310. VALUE specval,
  311. VALUE cref_or_me,
  312. const VALUE *pc,
  313. VALUE *sp,
  314. int local_size,
  315. int stack_max)
  316. {
  317. rb_control_frame_t *const cfp = RUBY_VM_NEXT_CONTROL_FRAME(ec->cfp);
  318. vm_check_frame(type, specval, cref_or_me, iseq);
  319. VM_ASSERT(local_size >= 0);
  320. /* check stack overflow */
  321. CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
  322. vm_check_canary(ec, sp);
  323. /* setup vm value stack */
  324. /* initialize local variables */
  325. for (int i=0; i < local_size; i++) {
  326. *sp++ = Qnil;
  327. }
  328. /* setup ep with managing data */
  329. *sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
  330. *sp++ = specval /* ep[-1] / block handler or prev env ptr */;
  331. *sp++ = type; /* ep[-0] / ENV_FLAGS */
  332. /* setup new frame */
  333. *cfp = (const struct rb_control_frame_struct) {
  334. .pc = pc,
  335. .sp = sp,
  336. .iseq = iseq,
  337. .self = self,
  338. .ep = sp - 1,
  339. .block_code = NULL,
  340. .__bp__ = sp, /* Store initial value of ep as bp to skip calculation cost of bp on JIT cancellation. */
  341. #if VM_DEBUG_BP_CHECK
  342. .bp_check = sp,
  343. #endif
  344. };
  345. ec->cfp = cfp;
  346. if (VMDEBUG == 2) {
  347. SDR();
  348. }
  349. vm_push_frame_debug_counter_inc(ec, cfp, type);
  350. }
  351. /* return TRUE if the frame is finished */
  352. static inline int
  353. vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
  354. {
  355. VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
  356. if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
  357. if (VMDEBUG == 2) SDR();
  358. RUBY_VM_CHECK_INTS(ec);
  359. ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
  360. return flags & VM_FRAME_FLAG_FINISH;
  361. }
  362. MJIT_STATIC void
  363. rb_vm_pop_frame(rb_execution_context_t *ec)
  364. {
  365. vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
  366. }
  367. /* method dispatch */
  368. static inline VALUE
  369. rb_arity_error_new(int argc, int min, int max)
  370. {
  371. VALUE err_mess = 0;
  372. if (min == max) {
  373. err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d)", argc, min);
  374. }
  375. else if (max == UNLIMITED_ARGUMENTS) {
  376. err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d+)", argc, min);
  377. }
  378. else {
  379. err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d..%d)", argc, min, max);
  380. }
  381. return rb_exc_new3(rb_eArgError, err_mess);
  382. }
  383. MJIT_STATIC void
  384. rb_error_arity(int argc, int min, int max)
  385. {
  386. rb_exc_raise(rb_arity_error_new(argc, min, max));
  387. }
  388. /* lvar */
  389. NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
  390. static void
  391. vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
  392. {
  393. /* remember env value forcely */
  394. rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
  395. VM_FORCE_WRITE(&ep[index], v);
  396. VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
  397. RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
  398. }
  399. static inline void
  400. vm_env_write(const VALUE *ep, int index, VALUE v)
  401. {
  402. VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
  403. if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
  404. VM_STACK_ENV_WRITE(ep, index, v);
  405. }
  406. else {
  407. vm_env_write_slowpath(ep, index, v);
  408. }
  409. }
  410. MJIT_STATIC VALUE
  411. rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
  412. {
  413. if (block_handler == VM_BLOCK_HANDLER_NONE) {
  414. return Qnil;
  415. }
  416. else {
  417. switch (vm_block_handler_type(block_handler)) {
  418. case block_handler_type_iseq:
  419. case block_handler_type_ifunc:
  420. return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
  421. case block_handler_type_symbol:
  422. return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
  423. case block_handler_type_proc:
  424. return VM_BH_TO_PROC(block_handler);
  425. default:
  426. VM_UNREACHABLE(rb_vm_bh_to_procval);
  427. }
  428. }
  429. }
  430. /* svar */
  431. #if VM_CHECK_MODE > 0
  432. static int
  433. vm_svar_valid_p(VALUE svar)
  434. {
  435. if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
  436. switch (imemo_type(svar)) {
  437. case imemo_svar:
  438. case imemo_cref:
  439. case imemo_ment:
  440. return TRUE;
  441. default:
  442. break;
  443. }
  444. }
  445. rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
  446. return FALSE;
  447. }
  448. #endif
  449. static inline struct vm_svar *
  450. lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
  451. {
  452. VALUE svar;
  453. if (lep && (ec == NULL || ec->root_lep != lep)) {
  454. svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
  455. }
  456. else {
  457. svar = ec->root_svar;
  458. }
  459. VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
  460. return (struct vm_svar *)svar;
  461. }
  462. static inline void
  463. lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
  464. {
  465. VM_ASSERT(vm_svar_valid_p((VALUE)svar));
  466. if (lep && (ec == NULL || ec->root_lep != lep)) {
  467. vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
  468. }
  469. else {
  470. RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
  471. }
  472. }
  473. static VALUE
  474. lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
  475. {
  476. const struct vm_svar *svar = lep_svar(ec, lep);
  477. if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
  478. switch (key) {
  479. case VM_SVAR_LASTLINE:
  480. return svar->lastline;
  481. case VM_SVAR_BACKREF:
  482. return svar->backref;
  483. default: {
  484. const VALUE ary = svar->others;
  485. if (NIL_P(ary)) {
  486. return Qnil;
  487. }
  488. else {
  489. return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
  490. }
  491. }
  492. }
  493. }
  494. static struct vm_svar *
  495. svar_new(VALUE obj)
  496. {
  497. return (struct vm_svar *)rb_imemo_new(imemo_svar, Qnil, Qnil, Qnil, obj);
  498. }
  499. static void
  500. lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
  501. {
  502. struct vm_svar *svar = lep_svar(ec, lep);
  503. if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
  504. lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
  505. }
  506. switch (key) {
  507. case VM_SVAR_LASTLINE:
  508. RB_OBJ_WRITE(svar, &svar->lastline, val);
  509. return;
  510. case VM_SVAR_BACKREF:
  511. RB_OBJ_WRITE(svar, &svar->backref, val);
  512. return;
  513. default: {
  514. VALUE ary = svar->others;
  515. if (NIL_P(ary)) {
  516. RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
  517. }
  518. rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
  519. }
  520. }
  521. }
  522. static inline VALUE
  523. vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
  524. {
  525. VALUE val;
  526. if (type == 0) {
  527. val = lep_svar_get(ec, lep, key);
  528. }
  529. else {
  530. VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
  531. if (type & 0x01) {
  532. switch (type >> 1) {
  533. case '&':
  534. val = rb_reg_last_match(backref);
  535. break;
  536. case '`':
  537. val = rb_reg_match_pre(backref);
  538. break;
  539. case '\'':
  540. val = rb_reg_match_post(backref);
  541. break;
  542. case '+':
  543. val = rb_reg_match_last(backref);
  544. break;
  545. default:
  546. rb_bug("unexpected back-ref");
  547. }
  548. }
  549. else {
  550. val = rb_reg_nth_match((int)(type >> 1), backref);
  551. }
  552. }
  553. return val;
  554. }
  555. PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
  556. static rb_callable_method_entry_t *
  557. check_method_entry(VALUE obj, int can_be_svar)
  558. {
  559. if (obj == Qfalse) return NULL;
  560. #if VM_CHECK_MODE > 0
  561. if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
  562. #endif
  563. switch (imemo_type(obj)) {
  564. case imemo_ment:
  565. return (rb_callable_method_entry_t *)obj;
  566. case imemo_cref:
  567. return NULL;
  568. case imemo_svar:
  569. if (can_be_svar) {
  570. return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
  571. }
  572. default:
  573. #if VM_CHECK_MODE > 0
  574. rb_bug("check_method_entry: svar should not be there:");
  575. #endif
  576. return NULL;
  577. }
  578. }
  579. MJIT_STATIC const rb_callable_method_entry_t *
  580. rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
  581. {
  582. const VALUE *ep = cfp->ep;
  583. rb_callable_method_entry_t *me;
  584. while (!VM_ENV_LOCAL_P(ep)) {
  585. if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
  586. ep = VM_ENV_PREV_EP(ep);
  587. }
  588. return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
  589. }
  590. static rb_iseq_t *
  591. method_entry_iseqptr(const rb_callable_method_entry_t *me)
  592. {
  593. switch (me->def->type) {
  594. case VM_METHOD_TYPE_ISEQ:
  595. return me->def->body.iseq.iseqptr;
  596. default:
  597. return NULL;
  598. }
  599. }
  600. static rb_cref_t *
  601. method_entry_cref(const rb_callable_method_entry_t *me)
  602. {
  603. switch (me->def->type) {
  604. case VM_METHOD_TYPE_ISEQ:
  605. return me->def->body.iseq.cref;
  606. default:
  607. return NULL;
  608. }
  609. }
  610. #if VM_CHECK_MODE == 0
  611. PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
  612. #endif
  613. static rb_cref_t *
  614. check_cref(VALUE obj, int can_be_svar)
  615. {
  616. if (obj == Qfalse) return NULL;
  617. #if VM_CHECK_MODE > 0
  618. if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
  619. #endif
  620. switch (imemo_type(obj)) {
  621. case imemo_ment:
  622. return method_entry_cref((rb_callable_method_entry_t *)obj);
  623. case imemo_cref:
  624. return (rb_cref_t *)obj;
  625. case imemo_svar:
  626. if (can_be_svar) {
  627. return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
  628. }
  629. default:
  630. #if VM_CHECK_MODE > 0
  631. rb_bug("check_method_entry: svar should not be there:");
  632. #endif
  633. return NULL;
  634. }
  635. }
  636. static inline rb_cref_t *
  637. vm_env_cref(const VALUE *ep)
  638. {
  639. rb_cref_t *cref;
  640. while (!VM_ENV_LOCAL_P(ep)) {
  641. if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
  642. ep = VM_ENV_PREV_EP(ep);
  643. }
  644. return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
  645. }
  646. static int
  647. is_cref(const VALUE v, int can_be_svar)
  648. {
  649. if (RB_TYPE_P(v, T_IMEMO)) {
  650. switch (imemo_type(v)) {
  651. case imemo_cref:
  652. return TRUE;
  653. case imemo_svar:
  654. if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
  655. default:
  656. break;
  657. }
  658. }
  659. return FALSE;
  660. }
  661. static int
  662. vm_env_cref_by_cref(const VALUE *ep)
  663. {
  664. while (!VM_ENV_LOCAL_P(ep)) {
  665. if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
  666. ep = VM_ENV_PREV_EP(ep);
  667. }
  668. return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
  669. }
  670. static rb_cref_t *
  671. cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
  672. {
  673. const VALUE v = *vptr;
  674. rb_cref_t *cref, *new_cref;
  675. if (RB_TYPE_P(v, T_IMEMO)) {
  676. switch (imemo_type(v)) {
  677. case imemo_cref:
  678. cref = (rb_cref_t *)v;
  679. new_cref = vm_cref_dup(cref);
  680. if (parent) {
  681. RB_OBJ_WRITE(parent, vptr, new_cref);
  682. }
  683. else {
  684. VM_FORCE_WRITE(vptr, (VALUE)new_cref);
  685. }
  686. return (rb_cref_t *)new_cref;
  687. case imemo_svar:
  688. if (can_be_svar) {
  689. return cref_replace_with_duplicated_cref_each_frame((const VALUE *)&((struct vm_svar *)v)->cref_or_me, FALSE, v);
  690. }
  691. /* fall through */
  692. case imemo_ment:
  693. rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
  694. default:
  695. break;
  696. }
  697. }
  698. return FALSE;
  699. }
  700. static rb_cref_t *
  701. vm_cref_replace_with_duplicated_cref(const VALUE *ep)
  702. {
  703. if (vm_env_cref_by_cref(ep)) {
  704. rb_cref_t *cref;
  705. VALUE envval;
  706. while (!VM_ENV_LOCAL_P(ep)) {
  707. envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
  708. if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
  709. return cref;
  710. }
  711. ep = VM_ENV_PREV_EP(ep);
  712. }
  713. envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
  714. return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
  715. }
  716. else {
  717. rb_bug("vm_cref_dup: unreachable");
  718. }
  719. }
  720. static rb_cref_t *
  721. vm_get_cref(const VALUE *ep)
  722. {
  723. rb_cref_t *cref = vm_env_cref(ep);
  724. if (cref != NULL) {
  725. return cref;
  726. }
  727. else {
  728. rb_bug("vm_get_cref: unreachable");
  729. }
  730. }
  731. static rb_cref_t *
  732. vm_ec_cref(const rb_execution_context_t *ec)
  733. {
  734. const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
  735. if (cfp == NULL) {
  736. return NULL;
  737. }
  738. return vm_get_cref(cfp->ep);
  739. }
  740. static const rb_cref_t *
  741. vm_get_const_key_cref(const VALUE *ep)
  742. {
  743. const rb_cref_t *cref = vm_get_cref(ep);
  744. const rb_cref_t *key_cref = cref;
  745. while (cref) {
  746. if (FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
  747. FL_TEST(CREF_CLASS(cref), RCLASS_CLONED)) {
  748. return key_cref;
  749. }
  750. cref = CREF_NEXT(cref);
  751. }
  752. /* does not include singleton class */
  753. return NULL;
  754. }
  755. void
  756. rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
  757. {
  758. rb_cref_t *new_cref;
  759. while (cref) {
  760. if (CREF_CLASS(cref) == old_klass) {
  761. new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
  762. *new_cref_ptr = new_cref;
  763. return;
  764. }
  765. new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
  766. cref = CREF_NEXT(cref);
  767. *new_cref_ptr = new_cref;
  768. new_cref_ptr = (rb_cref_t **)&new_cref->next;
  769. }
  770. *new_cref_ptr = NULL;
  771. }
  772. static rb_cref_t *
  773. vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval)
  774. {
  775. rb_cref_t *prev_cref = NULL;
  776. if (ep) {
  777. prev_cref = vm_env_cref(ep);
  778. }
  779. else {
  780. rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
  781. if (cfp) {
  782. prev_cref = vm_env_cref(cfp->ep);
  783. }
  784. }
  785. return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval);
  786. }
  787. static inline VALUE
  788. vm_get_cbase(const VALUE *ep)
  789. {
  790. const rb_cref_t *cref = vm_get_cref(ep);
  791. VALUE klass = Qundef;
  792. while (cref) {
  793. if ((klass = CREF_CLASS(cref)) != 0) {
  794. break;
  795. }
  796. cref = CREF_NEXT(cref);
  797. }
  798. return klass;
  799. }
  800. static inline VALUE
  801. vm_get_const_base(const VALUE *ep)
  802. {
  803. const rb_cref_t *cref = vm_get_cref(ep);
  804. VALUE klass = Qundef;
  805. while (cref) {
  806. if (!CREF_PUSHED_BY_EVAL(cref) &&
  807. (klass = CREF_CLASS(cref)) != 0) {
  808. break;
  809. }
  810. cref = CREF_NEXT(cref);
  811. }
  812. return klass;
  813. }
  814. static inline void
  815. vm_check_if_namespace(VALUE klass)
  816. {
  817. if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
  818. rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
  819. }
  820. }
  821. static inline void
  822. vm_ensure_not_refinement_module(VALUE self)
  823. {
  824. if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
  825. rb_warn("not defined at the refinement, but at the outer class/module");
  826. }
  827. }
  828. static inline VALUE
  829. vm_get_iclass(const rb_control_frame_t *cfp, VALUE klass)
  830. {
  831. return klass;
  832. }
  833. static inline VALUE
  834. vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, bool allow_nil, int is_defined)
  835. {
  836. void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
  837. VALUE val;
  838. if (orig_klass == Qnil && allow_nil) {
  839. /* in current lexical scope */
  840. const rb_cref_t *root_cref = vm_get_cref(ec->cfp->ep);
  841. const rb_cref_t *cref;
  842. VALUE klass = Qnil;
  843. while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
  844. root_cref = CREF_NEXT(root_cref);
  845. }
  846. cref = root_cref;
  847. while (cref && CREF_NEXT(cref)) {
  848. if (CREF_PUSHED_BY_EVAL(cref)) {
  849. klass = Qnil;
  850. }
  851. else {
  852. klass = CREF_CLASS(cref);
  853. }
  854. cref = CREF_NEXT(cref);
  855. if (!NIL_P(klass)) {
  856. VALUE av, am = 0;
  857. rb_const_entry_t *ce;
  858. search_continue:
  859. if ((ce = rb_const_lookup(klass, id))) {
  860. rb_const_warn_if_deprecated(ce, klass, id);
  861. val = ce->value;
  862. if (val == Qundef) {
  863. if (am == klass) break;
  864. am = klass;
  865. if (is_defined) return 1;
  866. if (rb_autoloading_value(klass, id, &av, NULL)) return av;
  867. rb_autoload_load(klass, id);
  868. goto search_continue;
  869. }
  870. else {
  871. if (is_defined) {
  872. return 1;
  873. }
  874. else {
  875. if (UNLIKELY(!rb_ractor_main_p())) {
  876. if (!rb_ractor_shareable_p(val)) {
  877. rb_raise(rb_eRactorIsolationError,
  878. "can not access non-shareable objects in constant %"PRIsVALUE"::%s by non-main ractor.", rb_class_path(klass), rb_id2name(id));
  879. }
  880. }
  881. return val;
  882. }
  883. }
  884. }
  885. }
  886. }
  887. /* search self */
  888. if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
  889. klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
  890. }
  891. else {
  892. klass = CLASS_OF(ec->cfp->self);
  893. }
  894. if (is_defined) {
  895. return rb_const_defined(klass, id);
  896. }
  897. else {
  898. return rb_const_get(klass, id);
  899. }
  900. }
  901. else {
  902. vm_check_if_namespace(orig_klass);
  903. if (is_defined) {
  904. return rb_public_const_defined_from(orig_klass, id);
  905. }
  906. else {
  907. return rb_public_const_get_from(orig_klass, id);
  908. }
  909. }
  910. }
  911. static inline VALUE
  912. vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_level_raise)
  913. {
  914. VALUE klass;
  915. if (!cref) {
  916. rb_bug("vm_get_cvar_base: no cref");
  917. }
  918. while (CREF_NEXT(cref) &&
  919. (NIL_P(CREF_CLASS(cref)) || FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
  920. CREF_PUSHED_BY_EVAL(cref))) {
  921. cref = CREF_NEXT(cref);
  922. }
  923. if (top_level_raise && !CREF_NEXT(cref)) {
  924. rb_raise(rb_eRuntimeError, "class variable access from toplevel");
  925. }
  926. klass = vm_get_iclass(cfp, CREF_CLASS(cref));
  927. if (NIL_P(klass)) {
  928. rb_raise(rb_eTypeError, "no class variables available");
  929. }
  930. return klass;
  931. }
  932. static VALUE
  933. vm_search_const_defined_class(const VALUE cbase, ID id)
  934. {
  935. if (rb_const_defined_at(cbase, id)) return cbase;
  936. if (cbase == rb_cObject) {
  937. VALUE tmp = RCLASS_SUPER(cbase);
  938. while (tmp) {
  939. if (rb_const_defined_at(tmp, id)) return tmp;
  940. tmp = RCLASS_SUPER(tmp);
  941. }
  942. }
  943. return 0;
  944. }
  945. static bool
  946. iv_index_tbl_lookup(struct st_table *iv_index_tbl, ID id, struct rb_iv_index_tbl_entry **ent)
  947. {
  948. int found;
  949. if (iv_index_tbl == NULL) return false;
  950. RB_VM_LOCK_ENTER();
  951. {
  952. found = st_lookup(iv_index_tbl, (st_data_t)id, (st_data_t *)ent);
  953. }
  954. RB_VM_LOCK_LEAVE();
  955. return found ? true : false;
  956. }
  957. ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent));
  958. static inline void
  959. fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent)
  960. {
  961. // fill cache
  962. if (!is_attr) {
  963. ic->entry = ent;
  964. RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
  965. }
  966. else {
  967. vm_cc_attr_index_set(cc, (int)ent->index + 1);
  968. }
  969. }
  970. ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const struct rb_callcache *, int));
  971. static inline VALUE
  972. vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
  973. {
  974. #if OPT_IC_FOR_IVAR
  975. VALUE val = Qundef;
  976. if (SPECIAL_CONST_P(obj)) {
  977. // frozen?
  978. }
  979. else if (LIKELY(is_attr ?
  980. RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index(cc) > 0) :
  981. RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial,
  982. ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
  983. uint32_t index = !is_attr ? ic->entry->index : (vm_cc_attr_index(cc) - 1);
  984. RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
  985. if (LIKELY(BUILTIN_TYPE(obj) == T_OBJECT) &&
  986. LIKELY(index < ROBJECT_NUMIV(obj))) {
  987. val = ROBJECT_IVPTR(obj)[index];
  988. VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
  989. }
  990. else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
  991. val = rb_ivar_generic_lookup_with_index(obj, id, index);
  992. }
  993. goto ret;
  994. }
  995. else {
  996. struct rb_iv_index_tbl_entry *ent;
  997. if (BUILTIN_TYPE(obj) == T_OBJECT) {
  998. struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
  999. if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
  1000. fill_ivar_cache(iseq, ic, cc, is_attr, ent);
  1001. // get value
  1002. if (ent->index < ROBJECT_NUMIV(obj)) {
  1003. val = ROBJECT_IVPTR(obj)[ent->index];
  1004. VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
  1005. }
  1006. }
  1007. }
  1008. else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
  1009. struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
  1010. if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
  1011. fill_ivar_cache(iseq, ic, cc, is_attr, ent);
  1012. val = rb_ivar_generic_lookup_with_index(obj, id, ent->index);
  1013. }
  1014. }
  1015. else {
  1016. // T_CLASS / T_MODULE
  1017. goto general_path;
  1018. }
  1019. ret:
  1020. if (LIKELY(val != Qundef)) {
  1021. return val;
  1022. }
  1023. else {
  1024. return Qnil;
  1025. }
  1026. }
  1027. general_path:
  1028. #endif /* OPT_IC_FOR_IVAR */
  1029. RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
  1030. if (is_attr) {
  1031. return rb_attr_get(obj, id);
  1032. }
  1033. else {
  1034. return rb_ivar_get(obj, id);
  1035. }
  1036. }
  1037. ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
  1038. NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
  1039. NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
  1040. static VALUE
  1041. vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
  1042. {
  1043. rb_check_frozen_internal(obj);
  1044. #if OPT_IC_FOR_IVAR
  1045. if (RB_TYPE_P(obj, T_OBJECT)) {
  1046. struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
  1047. struct rb_iv_index_tbl_entry *ent;
  1048. if (iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
  1049. if (!is_attr) {
  1050. ic->entry = ent;
  1051. RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
  1052. }
  1053. else if (ent->index >= INT_MAX) {
  1054. rb_raise(rb_eArgError, "too many instance variables");
  1055. }
  1056. else {
  1057. vm_cc_attr_index_set(cc, (int)(ent->index + 1));
  1058. }
  1059. uint32_t index = ent->index;
  1060. if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
  1061. rb_init_iv_list(obj);
  1062. }
  1063. VALUE *ptr = ROBJECT_IVPTR(obj);
  1064. RB_OBJ_WRITE(obj, &ptr[index], val);
  1065. RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
  1066. return val;
  1067. }
  1068. }
  1069. #endif
  1070. RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
  1071. return rb_ivar_set(obj, id, val);
  1072. }
  1073. static VALUE
  1074. vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)
  1075. {
  1076. return vm_setivar_slowpath(obj, id, val, iseq, ic, NULL, false);
  1077. }
  1078. static VALUE
  1079. vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)
  1080. {
  1081. return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
  1082. }
  1083. static inline VALUE
  1084. vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
  1085. {
  1086. #if OPT_IC_FOR_IVAR
  1087. if (LIKELY(RB_TYPE_P(obj, T_OBJECT)) &&
  1088. LIKELY(!RB_OBJ_FROZEN_RAW(obj))) {
  1089. VM_ASSERT(!rb_ractor_shareable_p(obj));
  1090. if (LIKELY(
  1091. (!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->entry && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
  1092. ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index(cc) > 0)))) {
  1093. uint32_t index = !is_attr ? ic->entry->index : vm_cc_attr_index(cc)-1;
  1094. if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
  1095. rb_init_iv_list(obj);
  1096. }
  1097. VALUE *ptr = ROBJECT_IVPTR(obj);
  1098. RB_OBJ_WRITE(obj, &ptr[index], val);
  1099. RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
  1100. return val; /* inline cache hit */
  1101. }
  1102. }
  1103. else {
  1104. RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
  1105. }
  1106. #endif /* OPT_IC_FOR_IVAR */
  1107. if (is_attr) {
  1108. return vm_setivar_slowpath_attr(obj, id, val, cc);
  1109. }
  1110. else {
  1111. return vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
  1112. }
  1113. }
  1114. static VALUE
  1115. update_classvariable_cache(const rb_iseq_t *iseq, VALUE klass, ID id, ICVARC ic)
  1116. {
  1117. VALUE defined_class = 0;
  1118. VALUE cvar_value = rb_cvar_find(klass, id, &defined_class);
  1119. if (RB_TYPE_P(defined_class, T_ICLASS)) {
  1120. defined_class = RBASIC(defined_class)->klass;
  1121. }
  1122. struct rb_id_table *rb_cvc_tbl = RCLASS_CVC_TBL(defined_class);
  1123. if (!rb_cvc_tbl) {
  1124. rb_bug("the cvc table should be set");
  1125. }
  1126. VALUE ent_data;
  1127. if (!rb_id_table_lookup(rb_cvc_tbl, id, &ent_data)) {
  1128. rb_bug("should have cvar cache entry");
  1129. }
  1130. struct rb_cvar_class_tbl_entry *ent = (void *)ent_data;
  1131. ent->global_cvar_state = GET_GLOBAL_CVAR_STATE();
  1132. ic->entry = ent;
  1133. RB_OBJ_WRITTEN(iseq, Qundef, ent->class_value);
  1134. return cvar_value;
  1135. }
  1136. static inline VALUE
  1137. vm_getclassvariable(const rb_iseq_t *iseq, const rb_cref_t *cref, const rb_control_frame_t *cfp, ID id, ICVARC ic)
  1138. {
  1139. if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE()) {
  1140. VALUE v = Qundef;
  1141. RB_DEBUG_COUNTER_INC(cvar_read_inline_hit);
  1142. if (st_lookup(RCLASS_IV_TBL(ic->entry->class_value), (st_data_t)id, &v)) {
  1143. return v;
  1144. }
  1145. }
  1146. VALUE klass = vm_get_cvar_base(cref, cfp, 1);
  1147. return update_classvariable_cache(iseq, klass, id, ic);
  1148. }
  1149. static inline void
  1150. vm_setclassvariable(const rb_iseq_t *iseq, const rb_cref_t *cref, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic)
  1151. {
  1152. if (ic->entry && ic->entry->global_cvar_state == GET_GLOBAL_CVAR_STATE()) {
  1153. RB_DEBUG_COUNTER_INC(cvar_write_inline_hit);
  1154. rb_class_ivar_set(ic->entry->class_value, id, val);
  1155. return;
  1156. }
  1157. VALUE klass = vm_get_cvar_base(cref, cfp, 1);
  1158. rb_cvar_set(klass, id, val);
  1159. update_classvariable_cache(iseq, klass, id, ic);
  1160. }
  1161. static inline VALUE
  1162. vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
  1163. {
  1164. return vm_getivar(obj, id, iseq, ic, NULL, FALSE);
  1165. }
  1166. static inline void
  1167. vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
  1168. {
  1169. vm_setivar(obj, id, val, iseq, ic, 0, 0);
  1170. }
  1171. static VALUE
  1172. vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
  1173. {
  1174. /* continue throw */
  1175. if (FIXNUM_P(err)) {
  1176. ec->tag->state = FIX2INT(err);
  1177. }
  1178. else if (SYMBOL_P(err)) {
  1179. ec->tag->state = TAG_THROW;
  1180. }
  1181. else if (THROW_DATA_P(err)) {
  1182. ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
  1183. }
  1184. else {
  1185. ec->tag->state = TAG_RAISE;
  1186. }
  1187. return err;
  1188. }
  1189. static VALUE
  1190. vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
  1191. const int flag, const VALUE throwobj)
  1192. {
  1193. const rb_control_frame_t *escape_cfp = NULL;
  1194. const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
  1195. if (flag != 0) {
  1196. /* do nothing */
  1197. }
  1198. else if (state == TAG_BREAK) {
  1199. int is_orphan = 1;
  1200. const VALUE *ep = GET_EP();
  1201. const rb_iseq_t *base_iseq = GET_ISEQ();
  1202. escape_cfp = reg_cfp;
  1203. while (base_iseq->body->type != ISEQ_TYPE_BLOCK) {
  1204. if (escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
  1205. escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
  1206. ep = escape_cfp->ep;
  1207. base_iseq = escape_cfp->iseq;
  1208. }
  1209. else {
  1210. ep = VM_ENV_PREV_EP(ep);
  1211. base_iseq = base_iseq->body->parent_iseq;
  1212. escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
  1213. VM_ASSERT(escape_cfp->iseq == base_iseq);
  1214. }
  1215. }
  1216. if (VM_FRAME_LAMBDA_P(escape_cfp)) {
  1217. /* lambda{... break ...} */
  1218. is_orphan = 0;
  1219. state = TAG_RETURN;
  1220. }
  1221. else {
  1222. ep = VM_ENV_PREV_EP(ep);
  1223. while (escape_cfp < eocfp) {
  1224. if (escape_cfp->ep == ep) {
  1225. const rb_iseq_t *const iseq = escape_cfp->iseq;
  1226. const VALUE epc = escape_cfp->pc - iseq->body->iseq_encoded;
  1227. const struct iseq_catch_table *const ct = iseq->body->catch_table;
  1228. unsigned int i;
  1229. if (!ct) break;
  1230. for (i=0; i < ct->size; i++) {
  1231. const struct iseq_catch_table_entry *const entry =
  1232. UNALIGNED_MEMBER_PTR(ct, entries[i]);
  1233. if (entry->type == CATCH_TYPE_BREAK &&
  1234. entry->iseq == base_iseq &&
  1235. entry->start < epc && entry->end >= epc) {
  1236. if (entry->cont == epc) { /* found! */
  1237. is_orphan = 0;
  1238. }
  1239. break;
  1240. }
  1241. }
  1242. break;
  1243. }
  1244. escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
  1245. }
  1246. }
  1247. if (is_orphan) {
  1248. rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
  1249. }
  1250. }
  1251. else if (state == TAG_RETRY) {
  1252. const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
  1253. escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
  1254. }
  1255. else if (state == TAG_RETURN) {
  1256. const VALUE *current_ep = GET_EP();
  1257. const VALUE *target_ep = NULL, *target_lep, *ep = current_ep;
  1258. int in_class_frame = 0;
  1259. int toplevel = 1;
  1260. escape_cfp = reg_cfp;
  1261. // find target_lep, target_ep
  1262. while (!VM_ENV_LOCAL_P(ep)) {
  1263. if (VM_ENV_FLAGS(ep, VM_FRAME_FLAG_LAMBDA) && target_ep == NULL) {
  1264. target_ep = ep;
  1265. }
  1266. ep = VM_ENV_PREV_EP(ep);
  1267. }
  1268. target_lep = ep;
  1269. while (escape_cfp < eocfp) {
  1270. const VALUE *lep = VM_CF_LEP(escape_cfp);
  1271. if (!target_lep) {
  1272. target_lep = lep;
  1273. }
  1274. if (lep == target_lep &&
  1275. VM_FRAME_RUBYFRAME_P(escape_cfp) &&
  1276. escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
  1277. in_class_frame = 1;
  1278. target_lep = 0;
  1279. }
  1280. if (lep == target_lep) {
  1281. if (VM_FRAME_LAMBDA_P(escape_cfp)) {
  1282. toplevel = 0;
  1283. if (in_class_frame) {
  1284. /* lambda {class A; ... return ...; end} */
  1285. goto valid_return;
  1286. }
  1287. else {
  1288. const VALUE *tep = current_ep;
  1289. while (target_lep != tep) {
  1290. if (escape_cfp->ep == tep) {
  1291. /* in lambda */
  1292. if (tep == target_ep) {
  1293. goto valid_return;
  1294. }
  1295. else {
  1296. goto unexpected_return;
  1297. }
  1298. }
  1299. tep = VM_ENV_PREV_EP(tep);
  1300. }
  1301. }
  1302. }
  1303. else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
  1304. switch (escape_cfp->iseq->body->type) {
  1305. case ISEQ_TYPE_TOP:
  1306. case ISEQ_TYPE_MAIN:
  1307. if (toplevel) {
  1308. if (in_class_frame) goto unexpected_return;
  1309. if (target_ep == NULL) {
  1310. goto valid_return;
  1311. }
  1312. else {
  1313. goto unexpected_return;
  1314. }
  1315. }
  1316. break;
  1317. case ISEQ_TYPE_EVAL:
  1318. case ISEQ_TYPE_CLASS:
  1319. toplevel = 0;
  1320. break;
  1321. default:
  1322. break;
  1323. }
  1324. }
  1325. }
  1326. if (escape_cfp->ep == target_lep && escape_cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
  1327. if (target_ep == NULL) {
  1328. goto valid_return;
  1329. }
  1330. else {
  1331. goto unexpected_return;
  1332. }
  1333. }
  1334. escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
  1335. }
  1336. unexpected_return:;
  1337. rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
  1338. valid_return:;
  1339. /* do nothing */
  1340. }
  1341. else {
  1342. rb_bug("isns(throw): unsupported throw type");
  1343. }
  1344. ec->tag->state = state;
  1345. return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
  1346. }
  1347. static VALUE
  1348. vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
  1349. rb_num_t throw_state, VALUE throwobj)
  1350. {
  1351. const int state = (int)(throw_state & VM_THROW_STATE_MASK);
  1352. const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
  1353. if (state != 0) {
  1354. return vm_throw_start(ec, reg_cfp, state, flag, throwobj);
  1355. }
  1356. else {
  1357. return vm_throw_continue(ec, throwobj);
  1358. }
  1359. }
  1360. static inline void
  1361. vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
  1362. {
  1363. int is_splat = flag & 0x01;
  1364. rb_num_t space_size = num + is_splat;
  1365. VALUE *base = sp - 1;
  1366. const VALUE *ptr;
  1367. rb_num_t len;
  1368. const VALUE obj = ary;
  1369. if (!RB_TYPE_P(ary, T_ARRAY) && NIL_P(ary = rb_check_array_type(ary))) {
  1370. ary = obj;
  1371. ptr = &ary;
  1372. len = 1;
  1373. }
  1374. else {
  1375. ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
  1376. len = (rb_num_t)RARRAY_LEN(ary);
  1377. }
  1378. if (space_size == 0) {
  1379. /* no space left on stack */
  1380. }
  1381. else if (flag & 0x02) {
  1382. /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
  1383. rb_num_t i = 0, j;
  1384. if (len < num) {
  1385. for (i=0; i<num-len; i++) {
  1386. *base++ = Qnil;
  1387. }
  1388. }
  1389. for (j=0; i<num; i++, j++) {
  1390. VALUE v = ptr[len - j - 1];
  1391. *base++ = v;
  1392. }
  1393. if (is_splat) {
  1394. *base = rb_ary_new4(len - j, ptr);
  1395. }
  1396. }
  1397. else {
  1398. /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
  1399. rb_num_t i;
  1400. VALUE *bptr = &base[space_size - 1];
  1401. for (i=0; i<num; i++) {
  1402. if (len <= i) {
  1403. for (; i<num; i++) {
  1404. *bptr-- = Qnil;
  1405. }
  1406. break;
  1407. }
  1408. *bptr-- = ptr[i];
  1409. }
  1410. if (is_splat) {
  1411. if (num > len) {
  1412. *bptr = rb_ary_new();
  1413. }
  1414. else {
  1415. *bptr = rb_ary_new4(len - num, ptr + num);
  1416. }
  1417. }
  1418. }
  1419. RB_GC_GUARD(ary);
  1420. }
  1421. static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling);
  1422. static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
  1423. static struct rb_class_cc_entries *
  1424. vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme)
  1425. {
  1426. struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
  1427. #if VM_CHECK_MODE > 0
  1428. ccs->debug_sig = ~(VALUE)ccs;
  1429. #endif
  1430. ccs->capa = 0;
  1431. ccs->len = 0;
  1432. RB_OBJ_WRITE(klass, &ccs->cme, cme);
  1433. METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
  1434. ccs->entries = NULL;
  1435. return ccs;
  1436. }
  1437. static void
  1438. vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
  1439. {
  1440. if (! vm_cc_markable(cc)) {
  1441. return;
  1442. }
  1443. else if (! vm_ci_markable(ci)) {
  1444. return;
  1445. }
  1446. if (UNLIKELY(ccs->len == ccs->capa)) {
  1447. if (ccs->capa == 0) {
  1448. ccs->capa = 1;
  1449. ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
  1450. }
  1451. else {
  1452. ccs->capa *= 2;
  1453. REALLOC_N(ccs->entries, struct rb_class_cc_entries_entry, ccs->capa);
  1454. }
  1455. }
  1456. VM_ASSERT(ccs->len < ccs->capa);
  1457. const int pos = ccs->len++;
  1458. RB_OBJ_WRITE(klass, &ccs->entries[pos].ci, ci);
  1459. RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
  1460. if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
  1461. // for tuning
  1462. // vm_mtbl_dump(klass, 0);
  1463. }
  1464. }
  1465. #if VM_CHECK_MODE > 0
  1466. void
  1467. rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
  1468. {
  1469. ruby_debug_printf("ccs:%p (%d,%d)\n", (void *)ccs, ccs->len, ccs->capa);
  1470. for (int i=0; i<ccs->len; i++) {
  1471. vm_ci_dump(ccs->entries[i].ci);
  1472. rp(ccs->entries[i].cc);
  1473. }
  1474. }
  1475. static int
  1476. vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
  1477. {
  1478. VM_ASSERT(vm_ccs_p(ccs));
  1479. VM_ASSERT(ccs->len <= ccs->capa);
  1480. for (int i=0; i<ccs->len; i++) {
  1481. const struct rb_callinfo *ci = ccs->entries[i].ci;
  1482. const struct rb_callcache *cc = ccs->entries[i].cc;
  1483. VM_ASSERT(vm_ci_p(ci));
  1484. VM_ASSERT(vm_ci_mid(ci) == mid);
  1485. VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
  1486. VM_ASSERT(vm_cc_class_check(cc, klass));
  1487. VM_ASSERT(vm_cc_cme(cc) == ccs->cme);
  1488. }
  1489. return TRUE;
  1490. }
  1491. #endif
  1492. #ifndef MJIT_HEADER
  1493. static const struct rb_callcache *
  1494. vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci)
  1495. {
  1496. const ID mid = vm_ci_mid(ci);
  1497. struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
  1498. struct rb_class_cc_entries *ccs = NULL;
  1499. VALUE ccs_data;
  1500. if (cc_tbl) {
  1501. if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) {
  1502. ccs = (struct rb_class_cc_entries *)ccs_data;
  1503. const int ccs_len = ccs->len;
  1504. VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
  1505. if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
  1506. rb_vm_ccs_free(ccs);
  1507. rb_id_table_delete(cc_tbl, mid);
  1508. ccs = NULL;
  1509. }
  1510. else {
  1511. for (int i=0; i<ccs_len; i++) {
  1512. const struct rb_callinfo *ccs_ci = ccs->entries[i].ci;
  1513. const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
  1514. VM_ASSERT(vm_ci_p(ccs_ci));
  1515. VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
  1516. if (ccs_ci == ci) { // TODO: equality
  1517. RB_DEBUG_COUNTER_INC(cc_found_in_ccs);
  1518. VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
  1519. VM_ASSERT(ccs_cc->klass == klass);
  1520. VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
  1521. return ccs_cc;
  1522. }
  1523. }
  1524. }
  1525. }
  1526. }
  1527. else {
  1528. cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
  1529. }
  1530. RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs);
  1531. const rb_callable_method_entry_t *cme;
  1532. if (ccs) {
  1533. cme = ccs->cme;
  1534. cme = UNDEFINED_METHOD_ENTRY_P(cme) ? NULL : cme;
  1535. VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
  1536. }
  1537. else {
  1538. cme = rb_callable_method_entry(klass, mid);
  1539. }
  1540. VM_ASSERT(cme == NULL || IMEMO_TYPE_P(cme, imemo_ment));
  1541. if (cme == NULL) {
  1542. // undef or not found: can't cache the information
  1543. VM_ASSERT(vm_cc_cme(&vm_empty_cc) == NULL);
  1544. return &vm_empty_cc;
  1545. }
  1546. VM_ASSERT(cme == rb_callable_method_entry(klass, mid));
  1547. const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general);
  1548. METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
  1549. if (ccs == NULL) {
  1550. VM_ASSERT(cc_tbl != NULL);
  1551. if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) {
  1552. // rb_callable_method_entry() prepares ccs.
  1553. ccs = (struct rb_class_cc_entries *)ccs_data;
  1554. }
  1555. else {
  1556. // TODO: required?
  1557. ccs = vm_ccs_create(klass, cme);
  1558. rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
  1559. }
  1560. }
  1561. vm_ccs_push(klass, ccs, ci, cc);
  1562. VM_ASSERT(vm_cc_cme(cc) != NULL);
  1563. VM_ASSERT(cme->called_id == mid);
  1564. VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
  1565. return cc;
  1566. }
  1567. MJIT_FUNC_EXPORTED const struct rb_callcache *
  1568. rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass)
  1569. {
  1570. const struct rb_callcache *cc;
  1571. VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
  1572. RB_VM_LOCK_ENTER();
  1573. {
  1574. cc = vm_search_cc(klass, ci);
  1575. VM_ASSERT(cc);
  1576. VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
  1577. VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
  1578. VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
  1579. VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED

Large files files are truncated, but you can click here to view the full file