PageRenderTime 41ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 1ms

/erts/emulator/beam/beam_emu.c

https://github.com/erlang/otp
C | 7154 lines | 5270 code | 818 blank | 1066 comment | 944 complexity | 8fafc27c9f0239013dc4323c88a73dcb MD5 | raw file
Possible License(s): BSD-3-Clause, Apache-2.0, Unlicense, LGPL-2.1
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 1996-2014. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. #ifdef HAVE_CONFIG_H
  21. # include "config.h"
  22. #endif
  23. #include <stddef.h> /* offsetof() */
  24. #include "sys.h"
  25. #include "erl_vm.h"
  26. #include "global.h"
  27. #include "erl_process.h"
  28. #include "error.h"
  29. #include "bif.h"
  30. #include "big.h"
  31. #include "beam_load.h"
  32. #include "erl_binary.h"
  33. #include "erl_map.h"
  34. #include "erl_bits.h"
  35. #include "dist.h"
  36. #include "beam_bp.h"
  37. #include "beam_catches.h"
  38. #include "erl_thr_progress.h"
  39. #ifdef HIPE
  40. #include "hipe_mode_switch.h"
  41. #include "hipe_bif1.h"
  42. #endif
  43. #include "dtrace-wrapper.h"
  44. /* #define HARDDEBUG 1 */
  45. #if defined(NO_JUMP_TABLE)
  46. # define OpCase(OpCode) case op_##OpCode
  47. # define CountCase(OpCode) case op_count_##OpCode
  48. # define OpCode(OpCode) ((Uint*)op_##OpCode)
  49. # define Goto(Rel) {Go = (int)(UWord)(Rel); goto emulator_loop;}
  50. # define LabelAddr(Addr) &&##Addr
  51. #else
  52. # define OpCase(OpCode) lb_##OpCode
  53. # define CountCase(OpCode) lb_count_##OpCode
  54. # define Goto(Rel) goto *((void *)Rel)
  55. # define LabelAddr(Label) &&Label
  56. # define OpCode(OpCode) (&&lb_##OpCode)
  57. #endif
  58. #ifdef ERTS_ENABLE_LOCK_CHECK
  59. # ifdef ERTS_SMP
  60. # define PROCESS_MAIN_CHK_LOCKS(P) \
  61. do { \
  62. if ((P)) { \
  63. erts_proc_lc_chk_only_proc_main((P)); \
  64. } \
  65. else \
  66. erts_lc_check_exact(NULL, 0); \
  67. ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
  68. } while (0)
  69. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
  70. if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
  71. __FILE__, __LINE__)
  72. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
  73. if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
  74. # else
  75. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  76. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  77. # define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
  78. # endif
  79. #else
  80. # define PROCESS_MAIN_CHK_LOCKS(P)
  81. # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
  82. # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
  83. #endif
  84. /*
  85. * Define macros for deep checking of terms.
  86. */
  87. #if defined(HARDDEBUG)
  88. # define CHECK_TERM(T) size_object(T)
  89. # define CHECK_ARGS(PC) \
  90. do { \
  91. int i_; \
  92. int Arity_ = PC[-1]; \
  93. if (Arity_ > 0) { \
  94. CHECK_TERM(r(0)); \
  95. } \
  96. for (i_ = 1; i_ < Arity_; i_++) { \
  97. CHECK_TERM(x(i_)); \
  98. } \
  99. } while (0)
  100. #else
  101. # define CHECK_TERM(T) ASSERT(!is_CP(T))
  102. # define CHECK_ARGS(T)
  103. #endif
  104. #ifndef MAX
  105. #define MAX(x, y) (((x) > (y)) ? (x) : (y))
  106. #endif
  107. #define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
  108. #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
  109. /*
  110. * We reuse some of fields in the save area in the process structure.
  111. * This is safe to do, since this space is only activly used when
  112. * the process is switched out.
  113. */
  114. #define REDS_IN(p) ((p)->def_arg_reg[5])
  115. /*
  116. * Add a byte offset to a pointer to Eterm. This is useful when the
  117. * the loader has precalculated a byte offset.
  118. */
  119. #define ADD_BYTE_OFFSET(ptr, offset) \
  120. ((Eterm *) (((unsigned char *)ptr) + (offset)))
  121. /* We don't check the range if an ordinary switch is used */
  122. #ifdef NO_JUMP_TABLE
  123. #define VALID_INSTR(IP) ((UWord)(IP) < (NUMBER_OF_OPCODES*2+10))
  124. #else
  125. #define VALID_INSTR(IP) \
  126. ((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
  127. (SWord)(IP) < (SWord)LabelAddr(end_emulator_loop))
  128. #endif /* NO_JUMP_TABLE */
  129. #define SET_CP(p, ip) \
  130. ASSERT(VALID_INSTR(*(ip))); \
  131. (p)->cp = (ip)
  132. #define SET_I(ip) \
  133. ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
  134. I = (ip)
  135. #define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
  136. /*
  137. * Store a result into a register given a destination descriptor.
  138. */
  139. #define StoreResult(Result, DestDesc) \
  140. do { \
  141. Eterm stb_reg; \
  142. stb_reg = (DestDesc); \
  143. CHECK_TERM(Result); \
  144. switch (beam_reg_tag(stb_reg)) { \
  145. case R_REG_DEF: \
  146. r(0) = (Result); break; \
  147. case X_REG_DEF: \
  148. xb(x_reg_offset(stb_reg)) = (Result); break; \
  149. default: \
  150. yb(y_reg_offset(stb_reg)) = (Result); break; \
  151. } \
  152. } while (0)
  153. #define StoreSimpleDest(Src, Dest) Dest = (Src)
  154. /*
  155. * Store a result into a register and execute the next instruction.
  156. * Dst points to the word with a destination descriptor, which MUST
  157. * be just before the next instruction.
  158. */
  159. #define StoreBifResult(Dst, Result) \
  160. do { \
  161. BeamInstr* stb_next; \
  162. Eterm stb_reg; \
  163. stb_reg = Arg(Dst); \
  164. I += (Dst) + 2; \
  165. stb_next = (BeamInstr *) *I; \
  166. CHECK_TERM(Result); \
  167. switch (beam_reg_tag(stb_reg)) { \
  168. case R_REG_DEF: \
  169. r(0) = (Result); Goto(stb_next); \
  170. case X_REG_DEF: \
  171. xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  172. default: \
  173. yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
  174. } \
  175. } while (0)
  176. #define ClauseFail() goto jump_f
  177. #define SAVE_CP(X) \
  178. do { \
  179. *(X) = make_cp(c_p->cp); \
  180. c_p->cp = 0; \
  181. } while(0)
  182. #define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X)))
  183. #define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
  184. /*
  185. * Special Beam instructions.
  186. */
  187. BeamInstr beam_apply[2];
  188. BeamInstr beam_exit[1];
  189. BeamInstr beam_continue_exit[1];
  190. BeamInstr* em_call_error_handler;
  191. BeamInstr* em_apply_bif;
  192. BeamInstr* em_call_nif;
  193. /* NOTE These should be the only variables containing trace instructions.
  194. ** Sometimes tests are form the instruction value, and sometimes
  195. ** for the refering variable (one of these), and rouge references
  196. ** will most likely cause chaos.
  197. */
  198. BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
  199. BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
  200. BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
  201. BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
  202. /*
  203. * All Beam instructions in numerical order.
  204. */
  205. #ifndef NO_JUMP_TABLE
  206. void** beam_ops;
  207. #endif
  208. #define SWAPIN \
  209. HTOP = HEAP_TOP(c_p); \
  210. E = c_p->stop
  211. #define SWAPOUT \
  212. HEAP_TOP(c_p) = HTOP; \
  213. c_p->stop = E
  214. /*
  215. * Use LIGHT_SWAPOUT when the called function
  216. * will call HeapOnlyAlloc() (and never HAlloc()).
  217. */
  218. #ifdef DEBUG
  219. # /* The stack pointer is used in an assertion. */
  220. # define LIGHT_SWAPOUT SWAPOUT
  221. #else
  222. # define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
  223. #endif
  224. /*
  225. * Use LIGHT_SWAPIN when we know that c_p->stop cannot
  226. * have been updated (i.e. if there cannot have been
  227. * a garbage-collection).
  228. */
  229. #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
  230. #ifdef FORCE_HEAP_FRAGS
  231. # define HEAP_SPACE_VERIFIED(Words) do { \
  232. c_p->space_verified = (Words); \
  233. c_p->space_verified_from = HTOP; \
  234. }while(0)
  235. #else
  236. # define HEAP_SPACE_VERIFIED(Words) ((void)0)
  237. #endif
  238. #define PRE_BIF_SWAPOUT(P) \
  239. HEAP_TOP((P)) = HTOP; \
  240. (P)->stop = E; \
  241. PROCESS_MAIN_CHK_LOCKS((P)); \
  242. ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
  243. #define db(N) (N)
  244. #define tb(N) (N)
  245. #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
  246. #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
  247. #define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
  248. #define Qb(N) (N)
  249. #define Ib(N) (N)
  250. #define x(N) reg[N]
  251. #define y(N) E[N]
  252. #define r(N) x##N
  253. /*
  254. * Makes sure that there are StackNeed + HeapNeed + 1 words available
  255. * on the combined heap/stack segment, then allocates StackNeed + 1
  256. * words on the stack and saves CP.
  257. *
  258. * M is number of live registers to preserve during garbage collection
  259. */
  260. #define AH(StackNeed, HeapNeed, M) \
  261. do { \
  262. int needed; \
  263. needed = (StackNeed) + 1; \
  264. if (E - HTOP < (needed + (HeapNeed))) { \
  265. SWAPOUT; \
  266. reg[0] = r(0); \
  267. PROCESS_MAIN_CHK_LOCKS(c_p); \
  268. FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
  269. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  270. PROCESS_MAIN_CHK_LOCKS(c_p); \
  271. r(0) = reg[0]; \
  272. SWAPIN; \
  273. } \
  274. E -= needed; \
  275. SAVE_CP(E); \
  276. } while (0)
  277. #define Allocate(Ns, Live) AH(Ns, 0, Live)
  278. #define AllocateZero(Ns, Live) \
  279. do { Eterm* ptr; \
  280. int i = (Ns); \
  281. AH(i, 0, Live); \
  282. for (ptr = E + i; ptr > E; ptr--) { \
  283. make_blank(*ptr); \
  284. } \
  285. } while (0)
  286. #define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
  287. #define AllocateHeapZero(Ns, Nh, Live) \
  288. do { Eterm* ptr; \
  289. int i = (Ns); \
  290. AH(i, Nh, Live); \
  291. for (ptr = E + i; ptr > E; ptr--) { \
  292. make_blank(*ptr); \
  293. } \
  294. } while (0)
  295. #define AllocateInit(Ns, Live, Y) \
  296. do { AH(Ns, 0, Live); make_blank(Y); } while (0)
  297. /*
  298. * Like the AH macro, but allocates no additional heap space.
  299. */
  300. #define A(StackNeed, M) AH(StackNeed, 0, M)
  301. #define D(N) \
  302. RESTORE_CP(E); \
  303. E += (N) + 1;
  304. #define TestBinVHeap(VNh, Nh, Live) \
  305. do { \
  306. unsigned need = (Nh); \
  307. if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\
  308. SWAPOUT; \
  309. reg[0] = r(0); \
  310. PROCESS_MAIN_CHK_LOCKS(c_p); \
  311. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  312. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  313. PROCESS_MAIN_CHK_LOCKS(c_p); \
  314. r(0) = reg[0]; \
  315. SWAPIN; \
  316. } \
  317. HEAP_SPACE_VERIFIED(need); \
  318. } while (0)
  319. /*
  320. * Check if Nh words of heap are available; if not, do a garbage collection.
  321. * Live is number of active argument registers to be preserved.
  322. */
  323. #define TestHeap(Nh, Live) \
  324. do { \
  325. unsigned need = (Nh); \
  326. if (E - HTOP < need) { \
  327. SWAPOUT; \
  328. reg[0] = r(0); \
  329. PROCESS_MAIN_CHK_LOCKS(c_p); \
  330. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
  331. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  332. PROCESS_MAIN_CHK_LOCKS(c_p); \
  333. r(0) = reg[0]; \
  334. SWAPIN; \
  335. } \
  336. HEAP_SPACE_VERIFIED(need); \
  337. } while (0)
  338. /*
  339. * Check if Nh words of heap are available; if not, do a garbage collection.
  340. * Live is number of active argument registers to be preserved.
  341. * Takes special care to preserve Extra if a garbage collection occurs.
  342. */
  343. #define TestHeapPreserve(Nh, Live, Extra) \
  344. do { \
  345. unsigned need = (Nh); \
  346. if (E - HTOP < need) { \
  347. SWAPOUT; \
  348. reg[0] = r(0); \
  349. reg[Live] = Extra; \
  350. PROCESS_MAIN_CHK_LOCKS(c_p); \
  351. FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
  352. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
  353. PROCESS_MAIN_CHK_LOCKS(c_p); \
  354. if (Live > 0) { \
  355. r(0) = reg[0]; \
  356. } \
  357. Extra = reg[Live]; \
  358. SWAPIN; \
  359. } \
  360. HEAP_SPACE_VERIFIED(need); \
  361. } while (0)
  362. #define TestHeapPutList(Need, Reg) \
  363. do { \
  364. TestHeap((Need), 1); \
  365. PutList(Reg, r(0), r(0), StoreSimpleDest); \
  366. CHECK_TERM(r(0)); \
  367. } while (0)
  368. #define Init(N) make_blank(yb(N))
  369. #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
  370. #define Init3(Y1, Y2, Y3) \
  371. do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
  372. #define MakeFun(FunP, NumFree) \
  373. do { \
  374. SWAPOUT; \
  375. reg[0] = r(0); \
  376. r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
  377. SWAPIN; \
  378. } while (0)
  379. #define PutTuple(Dst, Arity) \
  380. do { \
  381. Dst = make_tuple(HTOP); \
  382. pt_arity = (Arity); \
  383. } while (0)
  384. /*
  385. * Check that we haven't used the reductions and jump to function pointed to by
  386. * the I register. If we are out of reductions, do a context switch.
  387. */
  388. #define DispatchMacro() \
  389. do { \
  390. BeamInstr* dis_next; \
  391. dis_next = (BeamInstr *) *I; \
  392. CHECK_ARGS(I); \
  393. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  394. FCALLS--; \
  395. Goto(dis_next); \
  396. } else { \
  397. goto context_switch; \
  398. } \
  399. } while (0)
  400. #define DispatchMacroFun() \
  401. do { \
  402. BeamInstr* dis_next; \
  403. dis_next = (BeamInstr *) *I; \
  404. CHECK_ARGS(I); \
  405. if (FCALLS > 0 || FCALLS > neg_o_reds) { \
  406. FCALLS--; \
  407. Goto(dis_next); \
  408. } else { \
  409. goto context_switch_fun; \
  410. } \
  411. } while (0)
  412. #define DispatchMacrox() \
  413. do { \
  414. if (FCALLS > 0) { \
  415. Eterm* dis_next; \
  416. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
  417. dis_next = (Eterm *) *I; \
  418. FCALLS--; \
  419. CHECK_ARGS(I); \
  420. Goto(dis_next); \
  421. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
  422. && FCALLS > neg_o_reds) { \
  423. goto save_calls1; \
  424. } else { \
  425. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
  426. CHECK_ARGS(I); \
  427. goto context_switch; \
  428. } \
  429. } while (0)
  430. #ifdef DEBUG
  431. /*
  432. * To simplify breakpoint setting, put the code in one place only and jump to it.
  433. */
  434. # define Dispatch() goto do_dispatch
  435. # define Dispatchx() goto do_dispatchx
  436. # define Dispatchfun() goto do_dispatchfun
  437. #else
  438. /*
  439. * Inline for speed.
  440. */
  441. # define Dispatch() DispatchMacro()
  442. # define Dispatchx() DispatchMacrox()
  443. # define Dispatchfun() DispatchMacroFun()
  444. #endif
  445. #define Self(R) R = c_p->common.id
  446. #define Node(R) R = erts_this_node->sysname
  447. #define Arg(N) I[(N)+1]
  448. #define Next(N) \
  449. I += (N) + 1; \
  450. ASSERT(VALID_INSTR(*I)); \
  451. Goto(*I)
  452. #define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0)
  453. #define NextPF(N, Dst) \
  454. I += N + 1; \
  455. ASSERT(VALID_INSTR(Dst)); \
  456. Goto(Dst)
  457. #define GetR(pos, tr) \
  458. do { \
  459. tr = Arg(pos); \
  460. switch (beam_reg_tag(tr)) { \
  461. case R_REG_DEF: tr = r(0); break; \
  462. case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
  463. case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
  464. } \
  465. CHECK_TERM(tr); \
  466. } while (0)
  467. #define GetArg1(N, Dst) GetR((N), Dst)
  468. #define GetArg2(N, Dst1, Dst2) \
  469. do { \
  470. GetR(N, Dst1); \
  471. GetR((N)+1, Dst2); \
  472. } while (0)
  473. #define PutList(H, T, Dst, Store) \
  474. do { \
  475. HTOP[0] = (H); HTOP[1] = (T); \
  476. Store(make_list(HTOP), Dst); \
  477. HTOP += 2; \
  478. } while (0)
  479. #define Move(Src, Dst, Store) \
  480. do { \
  481. Eterm term = (Src); \
  482. Store(term, Dst); \
  483. } while (0)
  484. #define Move2(S1, D1, S2, D2) D1 = (S1); D2 = (S2)
  485. #define Move3(S1, D1, S2, D2, S3, D3) D1 = (S1); D2 = (S2); D3 = (S3)
  486. #define MoveGenDest(src, dstp) \
  487. if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
  488. #define MoveReturn(Src, Dest) \
  489. (Dest) = (Src); \
  490. I = c_p->cp; \
  491. ASSERT(VALID_INSTR(*c_p->cp)); \
  492. c_p->cp = 0; \
  493. CHECK_TERM(r(0)); \
  494. Goto(*I)
  495. #define DeallocateReturn(Deallocate) \
  496. do { \
  497. int words_to_pop = (Deallocate); \
  498. SET_I((BeamInstr *) cp_val(*E)); \
  499. E = ADD_BYTE_OFFSET(E, words_to_pop); \
  500. CHECK_TERM(r(0)); \
  501. Goto(*I); \
  502. } while (0)
  503. #define MoveDeallocateReturn(Src, Dest, Deallocate) \
  504. (Dest) = (Src); \
  505. DeallocateReturn(Deallocate)
  506. #define MoveCall(Src, Dest, CallDest, Size) \
  507. (Dest) = (Src); \
  508. SET_CP(c_p, I+Size+1); \
  509. SET_I((BeamInstr *) CallDest); \
  510. Dispatch();
  511. #define MoveCallLast(Src, Dest, CallDest, Deallocate) \
  512. (Dest) = (Src); \
  513. RESTORE_CP(E); \
  514. E = ADD_BYTE_OFFSET(E, (Deallocate)); \
  515. SET_I((BeamInstr *) CallDest); \
  516. Dispatch();
  517. #define MoveCallOnly(Src, Dest, CallDest) \
  518. (Dest) = (Src); \
  519. SET_I((BeamInstr *) CallDest); \
  520. Dispatch();
  521. #define MoveJump(Src) \
  522. r(0) = (Src); \
  523. SET_I((BeamInstr *) Arg(0)); \
  524. Goto(*I);
  525. #define GetList(Src, H, T) do { \
  526. Eterm* tmp_ptr = list_val(Src); \
  527. H = CAR(tmp_ptr); \
  528. T = CDR(tmp_ptr); } while (0)
  529. #define GetTupleElement(Src, Element, Dest) \
  530. do { \
  531. tmp_arg1 = (Eterm) COMPRESS_POINTER(((unsigned char *) tuple_val(Src)) + \
  532. (Element)); \
  533. (Dest) = (*(Eterm *) EXPAND_POINTER(tmp_arg1)); \
  534. } while (0)
  535. #define ExtractNextElement(Dest) \
  536. tmp_arg1 += sizeof(Eterm); \
  537. (Dest) = (* (Eterm *) (((unsigned char *) EXPAND_POINTER(tmp_arg1))))
  538. #define ExtractNextElement2(Dest) \
  539. do { \
  540. Eterm* ene_dstp = &(Dest); \
  541. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  542. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  543. tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
  544. } while (0)
  545. #define ExtractNextElement3(Dest) \
  546. do { \
  547. Eterm* ene_dstp = &(Dest); \
  548. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  549. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  550. ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
  551. tmp_arg1 += 3*sizeof(Eterm); \
  552. } while (0)
  553. #define ExtractNextElement4(Dest) \
  554. do { \
  555. Eterm* ene_dstp = &(Dest); \
  556. ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
  557. ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
  558. ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
  559. ene_dstp[3] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[4]; \
  560. tmp_arg1 += 4*sizeof(Eterm); \
  561. } while (0)
  562. #define ExtractElement(Element, Dest) \
  563. do { \
  564. tmp_arg1 += (Element); \
  565. (Dest) = (* (Eterm *) EXPAND_POINTER(tmp_arg1)); \
  566. } while (0)
  567. #define EqualImmed(X, Y, Action) if (X != Y) { Action; }
  568. #define NotEqualImmed(X, Y, Action) if (X == Y) { Action; }
  569. #define EqualExact(X, Y, Action) if (!EQ(X,Y)) { Action; }
  570. #define IsLessThan(X, Y, Action) if (CMP_GE(X, Y)) { Action; }
  571. #define IsGreaterEqual(X, Y, Action) if (CMP_LT(X, Y)) { Action; }
  572. #define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
  573. #define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
  574. #define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
  575. #define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
  576. #define IsIntegerAllocate(Src, Need, Alive, Fail) \
  577. if (is_not_integer(Src)) { Fail; } \
  578. A(Need, Alive)
  579. #define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
  580. #define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
  581. #define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
  582. #define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
  583. if (is_not_list(Src)) { Fail; } \
  584. A(Need, Alive)
  585. #define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
  586. if (is_not_list(Src)) { Fail; } \
  587. TestHeap(Need, Alive)
  588. #define IsTuple(X, Action) if (is_not_tuple(X)) Action
  589. #define IsArity(Pointer, Arity, Fail) \
  590. if (*(Eterm *) \
  591. EXPAND_POINTER(tmp_arg1 = (Eterm) \
  592. COMPRESS_POINTER(tuple_val(Pointer))) != (Arity)) \
  593. { \
  594. Fail; \
  595. }
  596. #define IsMap(Src, Fail) if (!is_map(Src)) { Fail; }
  597. #define GetMapElement(Src, Key, Dst, Fail) \
  598. do { \
  599. Eterm _res = get_map_element(Src, Key); \
  600. if (is_non_value(_res)) { \
  601. Fail; \
  602. } \
  603. Dst = _res; \
  604. } while (0)
  605. #define GetMapElementHash(Src, Key, Hx, Dst, Fail) \
  606. do { \
  607. Eterm _res = get_map_element_hash(Src, Key, Hx); \
  608. if (is_non_value(_res)) { \
  609. Fail; \
  610. } \
  611. Dst = _res; \
  612. } while (0)
  613. #define IsFunction(X, Action) \
  614. do { \
  615. if ( !(is_any_fun(X)) ) { \
  616. Action; \
  617. } \
  618. } while (0)
  619. #define IsFunction2(F, A, Action) \
  620. do { \
  621. if (erl_is_function(c_p, F, A) != am_true ) { \
  622. Action; \
  623. } \
  624. } while (0)
  625. #define IsTupleOfArity(Src, Arity, Fail) \
  626. do { \
  627. if (is_not_tuple(Src) || \
  628. *(Eterm *) \
  629. EXPAND_POINTER(tmp_arg1 = \
  630. (Eterm) COMPRESS_POINTER(tuple_val(Src))) != Arity) { \
  631. Fail; \
  632. } \
  633. } while (0)
  634. #define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
  635. #define IsBinary(Src, Fail) \
  636. if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
  637. #define IsBitstring(Src, Fail) \
  638. if (is_not_binary(Src)) { Fail; }
  639. #if defined(ARCH_64) && !HALFWORD_HEAP
  640. #define BsSafeMul(A, B, Fail, Target) \
  641. do { Uint64 _res = (A) * (B); \
  642. if (_res / B != A) { Fail; } \
  643. Target = _res; \
  644. } while (0)
  645. #else
  646. #define BsSafeMul(A, B, Fail, Target) \
  647. do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
  648. if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
  649. Target = _res; \
  650. } while (0)
  651. #endif
  652. #define BsGetFieldSize(Bits, Unit, Fail, Target) \
  653. do { \
  654. Sint _signed_size; Uint _uint_size; \
  655. if (is_small(Bits)) { \
  656. _signed_size = signed_val(Bits); \
  657. if (_signed_size < 0) { Fail; } \
  658. _uint_size = (Uint) _signed_size; \
  659. } else { \
  660. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  661. _uint_size = temp_bits; \
  662. } \
  663. BsSafeMul(_uint_size, Unit, Fail, Target); \
  664. } while (0)
  665. #define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
  666. do { \
  667. Sint _signed_size; Uint _uint_size; \
  668. if (is_small(Bits)) { \
  669. _signed_size = signed_val(Bits); \
  670. if (_signed_size < 0) { Fail; } \
  671. _uint_size = (Uint) _signed_size; \
  672. } else { \
  673. if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
  674. _uint_size = (Uint) temp_bits; \
  675. } \
  676. Target = _uint_size * Unit; \
  677. } while (0)
  678. #define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  679. do { \
  680. ErlBinMatchBuffer *_mb; \
  681. Eterm _result; Sint _size; \
  682. if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
  683. _size *= ((Flags) >> 3); \
  684. TestHeap(FLOAT_SIZE_OBJECT, Live); \
  685. _mb = ms_matchbuffer(Ms); \
  686. LIGHT_SWAPOUT; \
  687. _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
  688. LIGHT_SWAPIN; \
  689. HEAP_SPACE_VERIFIED(0); \
  690. if (is_non_value(_result)) { Fail; } \
  691. else { Store(_result, Dst); } \
  692. } while (0)
  693. #define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  694. do { \
  695. ErlBinMatchBuffer *_mb; \
  696. Eterm _result; \
  697. TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
  698. _mb = ms_matchbuffer(Ms); \
  699. LIGHT_SWAPOUT; \
  700. _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
  701. LIGHT_SWAPIN; \
  702. HEAP_SPACE_VERIFIED(0); \
  703. if (is_non_value(_result)) { Fail; } \
  704. else { Store(_result, Dst); } \
  705. } while (0)
  706. #define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
  707. do { \
  708. ErlBinMatchBuffer *_mb; \
  709. Eterm _result; Uint _size; \
  710. BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
  711. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  712. _mb = ms_matchbuffer(Ms); \
  713. LIGHT_SWAPOUT; \
  714. _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
  715. LIGHT_SWAPIN; \
  716. HEAP_SPACE_VERIFIED(0); \
  717. if (is_non_value(_result)) { Fail; } \
  718. else { Store(_result, Dst); } \
  719. } while (0)
  720. #define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
  721. do { \
  722. ErlBinMatchBuffer *_mb; \
  723. Eterm _result; \
  724. TestHeap(ERL_SUB_BIN_SIZE, Live); \
  725. _mb = ms_matchbuffer(Ms); \
  726. if (((_mb->size - _mb->offset) % Unit) == 0) { \
  727. LIGHT_SWAPOUT; \
  728. _result = erts_bs_get_binary_all_2(c_p, _mb); \
  729. LIGHT_SWAPIN; \
  730. HEAP_SPACE_VERIFIED(0); \
  731. ASSERT(is_value(_result)); \
  732. Store(_result, Dst); \
  733. } else { \
  734. HEAP_SPACE_VERIFIED(0); \
  735. Fail; } \
  736. } while (0)
  737. #define BsSkipBits2(Ms, Bits, Unit, Fail) \
  738. do { \
  739. ErlBinMatchBuffer *_mb; \
  740. size_t new_offset; \
  741. Uint _size; \
  742. _mb = ms_matchbuffer(Ms); \
  743. BsGetFieldSize(Bits, Unit, Fail, _size); \
  744. new_offset = _mb->offset + _size; \
  745. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  746. else { Fail; } \
  747. } while (0)
  748. #define BsSkipBitsAll2(Ms, Unit, Fail) \
  749. do { \
  750. ErlBinMatchBuffer *_mb; \
  751. _mb = ms_matchbuffer(Ms); \
  752. if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
  753. else { Fail; } \
  754. } while (0)
  755. #define BsSkipBitsImm2(Ms, Bits, Fail) \
  756. do { \
  757. ErlBinMatchBuffer *_mb; \
  758. size_t new_offset; \
  759. _mb = ms_matchbuffer(Ms); \
  760. new_offset = _mb->offset + (Bits); \
  761. if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
  762. else { Fail; } \
  763. } while (0)
  764. #define NewBsPutIntegerImm(Sz, Flags, Src) \
  765. do { \
  766. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
  767. } while (0)
  768. #define NewBsPutInteger(Sz, Flags, Src) \
  769. do { \
  770. Sint _size; \
  771. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  772. if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
  773. { goto badarg; } \
  774. } while (0)
  775. #define NewBsPutFloatImm(Sz, Flags, Src) \
  776. do { \
  777. if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
  778. } while (0)
  779. #define NewBsPutFloat(Sz, Flags, Src) \
  780. do { \
  781. Sint _size; \
  782. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  783. if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
  784. } while (0)
  785. #define NewBsPutBinary(Sz, Flags, Src) \
  786. do { \
  787. Sint _size; \
  788. BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
  789. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
  790. } while (0)
  791. #define NewBsPutBinaryImm(Sz, Src) \
  792. do { \
  793. if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
  794. } while (0)
  795. #define NewBsPutBinaryAll(Src, Unit) \
  796. do { \
  797. if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
  798. } while (0)
  799. #define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
  800. #define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
  801. #define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
  802. /*
  803. * process_main() is already huge, so we want to avoid inlining
  804. * into it. Especially functions that are seldom used.
  805. */
  806. #ifdef __GNUC__
  807. # define NOINLINE __attribute__((__noinline__))
  808. #else
  809. # define NOINLINE
  810. #endif
  811. /*
  812. * The following functions are called directly by process_main().
  813. * Don't inline them.
  814. */
  815. static BifFunction translate_gc_bif(void* gcf) NOINLINE;
  816. static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
  817. Eterm* reg, BifFunction bf) NOINLINE;
  818. static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
  819. Eterm* reg, Eterm func) NOINLINE;
  820. static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
  821. static BeamInstr* apply(Process* p, Eterm module, Eterm function,
  822. Eterm args, Eterm* reg) NOINLINE;
  823. static BeamInstr* call_fun(Process* p, int arity,
  824. Eterm* reg, Eterm args) NOINLINE;
  825. static BeamInstr* apply_fun(Process* p, Eterm fun,
  826. Eterm args, Eterm* reg) NOINLINE;
  827. static Eterm new_fun(Process* p, Eterm* reg,
  828. ErlFunEntry* fe, int num_free) NOINLINE;
  829. static Eterm new_map(Process* p, Eterm* reg, BeamInstr* I) NOINLINE;
  830. static Eterm update_map_assoc(Process* p, Eterm* reg,
  831. Eterm map, BeamInstr* I) NOINLINE;
  832. static Eterm update_map_exact(Process* p, Eterm* reg,
  833. Eterm map, BeamInstr* I) NOINLINE;
  834. static Eterm get_map_element(Eterm map, Eterm key);
  835. static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx);
  836. /*
  837. * Functions not directly called by process_main(). OK to inline.
  838. */
  839. static BeamInstr* next_catch(Process* c_p, Eterm *reg);
  840. static void terminate_proc(Process* c_p, Eterm Value);
  841. static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
  842. static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
  843. BifFunction bf, Eterm args);
  844. static struct StackTrace * get_trace_from_exc(Eterm exc);
  845. static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
  846. void
  847. init_emulator(void)
  848. {
  849. process_main();
  850. }
  851. /*
  852. * On certain platforms, make sure that the main variables really are placed
  853. * in registers.
  854. */
  855. #if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
  856. # define REG_x0 asm("%l0")
  857. # define REG_xregs asm("%l1")
  858. # define REG_htop asm("%l2")
  859. # define REG_stop asm("%l3")
  860. # define REG_I asm("%l4")
  861. # define REG_fcalls asm("%l5")
  862. # define REG_tmp_arg1 asm("%l6")
  863. # define REG_tmp_arg2 asm("%l7")
  864. #else
  865. # define REG_x0
  866. # define REG_xregs
  867. # define REG_htop
  868. # define REG_stop
  869. # define REG_I
  870. # define REG_fcalls
  871. # define REG_tmp_arg1
  872. # define REG_tmp_arg2
  873. #endif
  874. #ifdef USE_VM_PROBES
  875. # define USE_VM_CALL_PROBES
  876. #endif
  877. #ifdef USE_VM_CALL_PROBES
  878. #define DTRACE_LOCAL_CALL(p, m, f, a) \
  879. if (DTRACE_ENABLED(local_function_entry)) { \
  880. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  881. DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
  882. int depth = STACK_START(p) - STACK_TOP(p); \
  883. dtrace_fun_decode(p, m, f, a, \
  884. process_name, mfa); \
  885. DTRACE3(local_function_entry, process_name, mfa, depth); \
  886. }
  887. #define DTRACE_GLOBAL_CALL(p, m, f, a) \
  888. if (DTRACE_ENABLED(global_function_entry)) { \
  889. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  890. DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
  891. int depth = STACK_START(p) - STACK_TOP(p); \
  892. dtrace_fun_decode(p, m, f, a, \
  893. process_name, mfa); \
  894. DTRACE3(global_function_entry, process_name, mfa, depth); \
  895. }
  896. #define DTRACE_RETURN(p, m, f, a) \
  897. if (DTRACE_ENABLED(function_return)) { \
  898. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  899. DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
  900. int depth = STACK_START(p) - STACK_TOP(p); \
  901. dtrace_fun_decode(p, m, f, a, \
  902. process_name, mfa); \
  903. DTRACE3(function_return, process_name, mfa, depth); \
  904. }
  905. #define DTRACE_BIF_ENTRY(p, m, f, a) \
  906. if (DTRACE_ENABLED(bif_entry)) { \
  907. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  908. DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
  909. dtrace_fun_decode(p, m, f, a, \
  910. process_name, mfa); \
  911. DTRACE2(bif_entry, process_name, mfa); \
  912. }
  913. #define DTRACE_BIF_RETURN(p, m, f, a) \
  914. if (DTRACE_ENABLED(bif_return)) { \
  915. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  916. DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
  917. dtrace_fun_decode(p, m, f, a, \
  918. process_name, mfa); \
  919. DTRACE2(bif_return, process_name, mfa); \
  920. }
  921. #define DTRACE_NIF_ENTRY(p, m, f, a) \
  922. if (DTRACE_ENABLED(nif_entry)) { \
  923. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  924. DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
  925. dtrace_fun_decode(p, m, f, a, \
  926. process_name, mfa); \
  927. DTRACE2(nif_entry, process_name, mfa); \
  928. }
  929. #define DTRACE_NIF_RETURN(p, m, f, a) \
  930. if (DTRACE_ENABLED(nif_return)) { \
  931. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
  932. DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
  933. dtrace_fun_decode(p, m, f, a, \
  934. process_name, mfa); \
  935. DTRACE2(nif_return, process_name, mfa); \
  936. }
  937. #define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e) \
  938. do { \
  939. if (DTRACE_ENABLED(global_function_entry)) { \
  940. BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
  941. DTRACE_GLOBAL_CALL((p), (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]); \
  942. } \
  943. } while(0)
  944. #define DTRACE_RETURN_FROM_PC(p) \
  945. do { \
  946. BeamInstr* fp; \
  947. if (DTRACE_ENABLED(function_return) && (fp = find_function_from_pc((p)->cp))) { \
  948. DTRACE_RETURN((p), (Eterm)fp[0], (Eterm)fp[1], (Uint)fp[2]); \
  949. } \
  950. } while(0)
  951. #else /* USE_VM_PROBES */
  952. #define DTRACE_LOCAL_CALL(p, m, f, a) do {} while (0)
  953. #define DTRACE_GLOBAL_CALL(p, m, f, a) do {} while (0)
  954. #define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
  955. #define DTRACE_RETURN(p, m, f, a) do {} while (0)
  956. #define DTRACE_RETURN_FROM_PC(p) do {} while (0)
  957. #define DTRACE_BIF_ENTRY(p, m, f, a) do {} while (0)
  958. #define DTRACE_BIF_RETURN(p, m, f, a) do {} while (0)
  959. #define DTRACE_NIF_ENTRY(p, m, f, a) do {} while (0)
  960. #define DTRACE_NIF_RETURN(p, m, f, a) do {} while (0)
  961. #endif /* USE_VM_PROBES */
  962. /*
  963. * process_main() is called twice:
  964. * The first call performs some initialisation, including exporting
  965. * the instructions' C labels to the loader.
  966. * The second call starts execution of BEAM code. This call never returns.
  967. */
  968. void process_main(void)
  969. {
  970. static int init_done = 0;
  971. Process* c_p = NULL;
  972. int reds_used;
  973. #ifdef DEBUG
  974. ERTS_DECLARE_DUMMY(Eterm pid);
  975. #endif
  976. /*
  977. * X register zero; also called r(0)
  978. */
  979. register Eterm x0 REG_x0 = NIL;
  980. /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
  981. * in all other cases x0 is used.
  982. */
  983. register Eterm* reg REG_xregs = NULL;
  984. /*
  985. * Top of heap (next free location); grows upwards.
  986. */
  987. register Eterm* HTOP REG_htop = NULL;
  988. /* Stack pointer. Grows downwards; points
  989. * to last item pushed (normally a saved
  990. * continuation pointer).
  991. */
  992. register Eterm* E REG_stop = NULL;
  993. /*
  994. * Pointer to next threaded instruction.
  995. */
  996. register BeamInstr *I REG_I = NULL;
  997. /* Number of reductions left. This function
  998. * returns to the scheduler when FCALLS reaches zero.
  999. */
  1000. register Sint FCALLS REG_fcalls = 0;
  1001. /*
  1002. * Temporaries used for picking up arguments for instructions.
  1003. */
  1004. register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
  1005. register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
  1006. #if HEAP_ON_C_STACK
  1007. Eterm tmp_big[2]; /* Temporary buffer for small bignums if HEAP_ON_C_STACK. */
  1008. #else
  1009. Eterm *tmp_big; /* Temporary buffer for small bignums if !HEAP_ON_C_STACK. */
  1010. #endif
  1011. /*
  1012. * X registers and floating point registers are located in
  1013. * scheduler specific data.
  1014. */
  1015. register FloatDef *freg;
  1016. /*
  1017. * For keeping the negative old value of 'reds' when call saving is active.
  1018. */
  1019. int neg_o_reds = 0;
  1020. Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
  1021. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  1022. static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
  1023. #else
  1024. #ifndef NO_JUMP_TABLE
  1025. static void* opcodes[] = { DEFINE_OPCODES };
  1026. #else
  1027. int Go;
  1028. #endif
  1029. #endif
  1030. Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
  1031. Eterm pt_arity; /* Used by do_put_tuple */
  1032. Uint64 start_time = 0; /* Monitor long schedule */
  1033. BeamInstr* start_time_i = NULL;
  1034. ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
  1035. /*
  1036. * Note: In this function, we attempt to place rarely executed code towards
  1037. * the end of the function, in the hope that the cache hit rate will be better.
  1038. * The initialization code is only run once, so it is at the very end.
  1039. *
  1040. * Note: c_p->arity must be set to reflect the number of useful terms in
  1041. * c_p->arg_reg before calling the scheduler.
  1042. */
  1043. if (!init_done) {
  1044. /* This should only be reached during the init phase when only the main
  1045. * process is running. I.e. there is no race for init_done.
  1046. */
  1047. init_done = 1;
  1048. goto init_emulator;
  1049. }
  1050. c_p = NULL;
  1051. reds_used = 0;
  1052. goto do_schedule1;
  1053. do_schedule:
  1054. reds_used = REDS_IN(c_p) - FCALLS;
  1055. do_schedule1:
  1056. if (start_time != 0) {
  1057. Sint64 diff = erts_timestamp_millis() - start_time;
  1058. if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
  1059. #ifdef ERTS_DIRTY_SCHEDULERS
  1060. && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data)
  1061. #endif
  1062. ) {
  1063. BeamInstr *inptr = find_function_from_pc(start_time_i);
  1064. BeamInstr *outptr = find_function_from_pc(c_p->i);
  1065. monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
  1066. }
  1067. }
  1068. PROCESS_MAIN_CHK_LOCKS(c_p);
  1069. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  1070. #if HALFWORD_HEAP
  1071. ASSERT(erts_get_scheduler_data()->num_tmp_heap_used == 0);
  1072. #endif
  1073. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1074. c_p = schedule(c_p, reds_used);
  1075. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1076. start_time = 0;
  1077. #ifdef DEBUG
  1078. pid = c_p->common.id; /* Save for debugging purpouses */
  1079. #endif
  1080. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  1081. PROCESS_MAIN_CHK_LOCKS(c_p);
  1082. if (erts_system_monitor_long_schedule != 0) {
  1083. start_time = erts_timestamp_millis();
  1084. start_time_i = c_p->i;
  1085. }
  1086. reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
  1087. freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
  1088. #if !HEAP_ON_C_STACK
  1089. tmp_big = ERTS_PROC_GET_SCHDATA(c_p)->beam_emu_tmp_heap;
  1090. #endif
  1091. ERL_BITS_RELOAD_STATEP(c_p);
  1092. {
  1093. int reds;
  1094. Eterm* argp;
  1095. BeamInstr *next;
  1096. int i;
  1097. argp = c_p->arg_reg;
  1098. for (i = c_p->arity - 1; i > 0; i--) {
  1099. reg[i] = argp[i];
  1100. CHECK_TERM(reg[i]);
  1101. }
  1102. /*
  1103. * We put the original reduction count in the process structure, to reduce
  1104. * the code size (referencing a field in a struct through a pointer stored
  1105. * in a register gives smaller code than referencing a global variable).
  1106. */
  1107. SET_I(c_p->i);
  1108. reds = c_p->fcalls;
  1109. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
  1110. && (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE) == 0) {
  1111. neg_o_reds = -reds;
  1112. FCALLS = REDS_IN(c_p) = 0;
  1113. } else {
  1114. neg_o_reds = 0;
  1115. FCALLS = REDS_IN(c_p) = reds;
  1116. }
  1117. next = (BeamInstr *) *I;
  1118. r(0) = c_p->arg_reg[0];
  1119. #ifdef HARDDEBUG
  1120. if (c_p->arity > 0) {
  1121. CHECK_TERM(r(0));
  1122. }
  1123. #endif
  1124. SWAPIN;
  1125. ASSERT(VALID_INSTR(next));
  1126. #ifdef USE_VM_PROBES
  1127. if (DTRACE_ENABLED(process_scheduled)) {
  1128. DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
  1129. DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
  1130. dtrace_proc_str(c_p, process_buf);
  1131. if (ERTS_PROC_IS_EXITING(c_p)) {
  1132. strcpy(fun_buf, "<exiting>");
  1133. } else {
  1134. BeamInstr *fptr = find_function_from_pc(c_p->i);
  1135. if (fptr) {
  1136. dtrace_fun_decode(c_p, (Eterm)fptr[0],
  1137. (Eterm)fptr[1], (Uint)fptr[2],
  1138. NULL, fun_buf);
  1139. } else {
  1140. erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
  1141. "<unknown/%p>", next);
  1142. }
  1143. }
  1144. DTRACE2(process_scheduled, process_buf, fun_buf);
  1145. }
  1146. #endif
  1147. Goto(next);
  1148. }
  1149. #if defined(DEBUG) || defined(NO_JUMP_TABLE)
  1150. emulator_loop:
  1151. #endif
  1152. #ifdef NO_JUMP_TABLE
  1153. switch (Go) {
  1154. #endif
  1155. #include "beam_hot.h"
  1156. #define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
  1157. #define ARITH_FUNC(name) erts_gc_##name
  1158. {
  1159. Eterm increment_reg_val;
  1160. Eterm increment_val;
  1161. Uint live;
  1162. Eterm result;
  1163. OpCase(i_increment_yIId):
  1164. increment_reg_val = yb(Arg(0));
  1165. goto do_increment;
  1166. OpCase(i_increment_xIId):
  1167. increment_reg_val = xb(Arg(0));
  1168. goto do_increment;
  1169. OpCase(i_increment_rIId):
  1170. increment_reg_val = r(0);
  1171. I--;
  1172. do_increment:
  1173. increment_val = Arg(1);
  1174. if (is_small(increment_reg_val)) {
  1175. Sint i = signed_val(increment_reg_val) + increment_val;
  1176. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1177. if (MY_IS_SSMALL(i)) {
  1178. result = make_small(i);
  1179. store_result:
  1180. StoreBifResult(3, result);
  1181. }
  1182. }
  1183. live = Arg(2);
  1184. SWAPOUT;
  1185. reg[0] = r(0);
  1186. reg[live] = increment_reg_val;
  1187. reg[live+1] = make_small(increment_val);
  1188. result = erts_gc_mixed_plus(c_p, reg, live);
  1189. r(0) = reg[0];
  1190. SWAPIN;
  1191. ERTS_HOLE_CHECK(c_p);
  1192. if (is_value(result)) {
  1193. goto store_result;
  1194. }
  1195. ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
  1196. goto find_func_info;
  1197. }
  1198. #define DO_BIG_ARITH(Func,Arg1,Arg2) \
  1199. do { \
  1200. Uint live = Arg(1); \
  1201. SWAPOUT; \
  1202. reg[0] = r(0); \
  1203. reg[live] = (Arg1); \
  1204. reg[live+1] = (Arg2); \
  1205. result = (Func)(c_p, reg, live); \
  1206. r(0) = reg[0]; \
  1207. SWAPIN; \
  1208. ERTS_HOLE_CHECK(c_p); \
  1209. if (is_value(result)) { \
  1210. StoreBifResult(4,result); \
  1211. } \
  1212. goto lb_Cl_error; \
  1213. } while(0)
  1214. OpCase(i_plus_jIxxd):
  1215. {
  1216. Eterm result;
  1217. if (is_both_small(xb(Arg(2)), xb(Arg(3)))) {
  1218. Sint i = signed_val(xb(Arg(2))) + signed_val(xb(Arg(3)));
  1219. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1220. if (MY_IS_SSMALL(i)) {
  1221. result = make_small(i);
  1222. StoreBifResult(4, result);
  1223. }
  1224. }
  1225. DO_BIG_ARITH(ARITH_FUNC(mixed_plus), xb(Arg(2)), xb(Arg(3)));
  1226. }
  1227. OpCase(i_plus_jId):
  1228. {
  1229. Eterm result;
  1230. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1231. Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
  1232. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1233. if (MY_IS_SSMALL(i)) {
  1234. result = make_small(i);
  1235. STORE_ARITH_RESULT(result);
  1236. }
  1237. }
  1238. arith_func = ARITH_FUNC(mixed_plus);
  1239. goto do_big_arith2;
  1240. }
  1241. OpCase(i_minus_jIxxd):
  1242. {
  1243. Eterm result;
  1244. if (is_both_small(xb(Arg(2)), xb(Arg(3)))) {
  1245. Sint i = signed_val(xb(Arg(2))) - signed_val(xb(Arg(3)));
  1246. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1247. if (MY_IS_SSMALL(i)) {
  1248. result = make_small(i);
  1249. StoreBifResult(4, result);
  1250. }
  1251. }
  1252. DO_BIG_ARITH(ARITH_FUNC(mixed_minus), xb(Arg(2)), xb(Arg(3)));
  1253. }
  1254. OpCase(i_minus_jId):
  1255. {
  1256. Eterm result;
  1257. if (is_both_small(tmp_arg1, tmp_arg2)) {
  1258. Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
  1259. ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
  1260. if (MY_IS_SSMALL(i)) {
  1261. result = make_small(i);
  1262. STORE_ARITH_RESULT(result);
  1263. }
  1264. }
  1265. arith_func = ARITH_FUNC(mixed_minus);
  1266. goto do_big_arith2;
  1267. }
  1268. OpCase(i_is_lt_f):
  1269. if (CMP_GE(tmp_arg1, tmp_arg2)) {
  1270. ClauseFail();
  1271. }
  1272. Next(1);
  1273. OpCase(i_is_ge_f):
  1274. if (CMP_LT(tmp_arg1, tmp_arg2)) {
  1275. ClauseFail();
  1276. }
  1277. Next(1);
  1278. OpCase(i_is_eq_f):
  1279. if (CMP_NE(tmp_arg1, tmp_arg2)) {
  1280. ClauseFail();
  1281. }
  1282. Next(1);
  1283. OpCase(i_is_ne_f):
  1284. if (CMP_EQ(tmp_arg1, tmp_arg2)) {
  1285. ClauseFail();
  1286. }
  1287. Next(1);
  1288. OpCase(i_is_eq_exact_f):
  1289. if (!EQ(tmp_arg1, tmp_arg2)) {
  1290. ClauseFail();
  1291. }
  1292. Next(1);
  1293. {
  1294. Eterm is_eq_exact_lit_val;
  1295. OpCase(i_is_eq_exact_literal_xfc):
  1296. is_eq_exact_lit_val = xb(Arg(0));
  1297. I++;
  1298. goto do_is_eq_exact_literal;
  1299. OpCase(i_is_eq_exact_literal_yfc):
  1300. is_eq_exact_lit_val = yb(Arg(0));
  1301. I++;
  1302. goto do_is_eq_exact_literal;
  1303. OpCase(i_is_eq_exact_literal_rfc):
  1304. is_eq_exact_lit_val = r(0);
  1305. do_is_eq_exact_literal:
  1306. if (!eq(Arg(1), is_eq_exact_lit_val)) {
  1307. ClauseFail();
  1308. }
  1309. Next(2);
  1310. }
  1311. {
  1312. Eterm is_ne_exact_lit_val;
  1313. OpCase(i_is_ne_exact_literal_xfc):
  1314. is_ne_exact_lit_val = xb(Arg(0));
  1315. I++;
  1316. goto do_is_ne_exact_literal;
  1317. OpCase(i_is_ne_exact_literal_yfc):
  1318. is_ne_exact_lit_val = yb(Arg(0));
  1319. I++;
  1320. goto do_is_ne_exact_literal;
  1321. OpCase(i_is_ne_exact_literal_rfc):
  1322. is_ne_exact_lit_val = r(0);
  1323. do_is_ne_exact_literal:
  1324. if (eq(Arg(1), is_ne_exact_lit_val)) {
  1325. ClauseFail();
  1326. }
  1327. Next(2);
  1328. }
  1329. OpCase(move_window3_xxxy): {
  1330. BeamInstr *next;
  1331. Eterm xt0, xt1, xt2;
  1332. Eterm *y = (Eterm *)(((unsigned char *)E) + (Arg(3)));
  1333. PreFetch(4, next);
  1334. xt0 = xb(Arg(0));
  1335. xt1 = xb(Arg(1));
  1336. xt2 = xb(Arg(2));
  1337. y[0] = xt0;
  1338. y[1] = xt1;
  1339. y[2] = xt2;
  1340. NextPF(4, next);
  1341. }
  1342. OpCase(move_window4_xxxxy): {
  1343. BeamInstr *next;
  1344. Eterm xt0, xt1, xt2, xt3;
  1345. Eterm *y = (Eterm *)(((unsigned char *)E) + (Arg(4)));
  1346. PreFetch(5, next);
  1347. xt0 = xb(Arg(0));
  1348. xt1 = xb(Arg(1));
  1349. xt2 = xb(Arg(2));
  1350. xt3 = xb(Arg(3));
  1351. y[0] = xt0;
  1352. y[1] = xt1;
  1353. y[2] = xt2;
  1354. y[3] = xt3;
  1355. NextPF(5, next);
  1356. }
  1357. OpCase(move_window5_xxxxxy): {
  1358. BeamInstr *next;
  1359. Eterm xt0, xt1, xt2, xt3, xt4;
  1360. Eterm *y = (Eterm *)(((unsigned char *)E) + (Arg(5)));
  1361. PreFetch(6, next);
  1362. xt0 = xb(Arg(0));
  1363. xt1 = xb(Arg(1));
  1364. xt2 = xb(Arg(2));
  1365. xt3 = xb(Arg(3));
  1366. xt4 = xb(Arg(4));
  1367. y[0] = xt0;
  1368. y[1] = xt1;
  1369. y[2] = xt2;
  1370. y[3] = xt3;
  1371. y[4] = xt4;
  1372. NextPF(6, next);
  1373. }
  1374. OpCase(i_move_call_only_fcr): {
  1375. r(0) = Arg(1);
  1376. }
  1377. /* FALL THROUGH */
  1378. OpCase(i_call_only_f): {
  1379. SET_I((BeamInstr *) Arg(0));
  1380. DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
  1381. Dispatch();
  1382. }
  1383. OpCase(i_move_call_last_fPcr): {
  1384. r(0) = Arg(2);
  1385. }
  1386. /* FALL THROUGH */
  1387. OpCase(i_call_last_fP): {
  1388. RESTORE_CP(E);
  1389. E = ADD_BYTE_OFFSET(E, Arg(1));
  1390. SET_I((BeamInstr *) Arg(0));
  1391. DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
  1392. Dispatch();
  1393. }
  1394. OpCase(i_move_call_crf): {
  1395. r(0) = Arg(0);
  1396. I++;
  1397. }
  1398. /* FALL THROUGH */
  1399. OpCase(i_call_f): {
  1400. SET_CP(c_p, I+2);
  1401. SET_I((BeamInstr *) Arg(0));
  1402. DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
  1403. Dispatch();
  1404. }
  1405. OpCase(i_move_call_ext_last_ePcr): {
  1406. r(0) = Arg(2);
  1407. }
  1408. /* FALL THROUGH */
  1409. OpCase(i_call_ext_last_eP):
  1410. RESTORE_CP(E);
  1411. E = ADD_BYTE_OFFSET(E, Arg(1));
  1412. /*
  1413. * Note: The pointer to the export entry is never NULL; if the module
  1414. * is not loaded, it points to code which will invoke the error handler
  1415. * (see lb_call_error_handler below).
  1416. */
  1417. DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0));
  1418. Dispatchx();
  1419. OpCase(i_move_call_ext_cre): {
  1420. r(0) = Arg(0);
  1421. I++;
  1422. }
  1423. /* FALL THROUGH */
  1424. OpCase(i_call_ext_e):
  1425. SET_CP(c_p, I+2);
  1426. DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0));
  1427. Dispatchx();
  1428. OpCase(i_move_call_ext_only_ecr): {
  1429. r(0) = Arg(1);
  1430. }
  1431. /* FALL THROUGH */
  1432. OpCase(i_call_ext_only_e):
  1433. DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0));
  1434. Dispatchx();
  1435. OpCase(init_y): {
  1436. BeamInstr *next;
  1437. PreFetch(1, next);
  1438. make_blank(yb(Arg(0)));
  1439. NextPF(1, next);
  1440. }
  1441. OpCase(i_trim_I): {
  1442. BeamInstr *next;
  1443. Uint words;
  1444. Uint cp;
  1445. words = Arg(0);
  1446. cp = E[0];
  1447. PreFetch(1, next);
  1448. E += words;
  1449. E[0] = cp;
  1450. NextPF(1, next);
  1451. }
  1452. OpCase(move_x1_c): {
  1453. x(1) = Arg(0);
  1454. Next(1);
  1455. }
  1456. OpCase(move_x2_c): {
  1457. x(2) = Arg(0);
  1458. Next(1);
  1459. }
  1460. OpCase(return): {
  1461. SET_I(c_p->cp);
  1462. DTRACE_RETURN_FROM_PC(c_p);
  1463. /*
  1464. * We must clear the CP to make sure that a stale value do not
  1465. * create a false module dependcy preventing code upgrading.
  1466. * It also means that we can use the CP in stack backtraces.
  1467. */
  1468. c_p->cp = 0;
  1469. CHECK_TERM(r(0));
  1470. HEAP_SPACE_VERIFIED(0);
  1471. Goto(*I);
  1472. }
  1473. /*
  1474. * Send is almost a standard call-BIF with two arguments, except for:
  1475. * 1) It cannot be traced.
  1476. * 2) There is no pointer to the send_2 function stored in
  1477. * the instruction.
  1478. */
  1479. OpCase(send): {
  1480. BeamInstr *next;
  1481. Eterm result;
  1482. PRE_BIF_SWAPOUT(c_p);
  1483. c_p->fcalls = FCALLS - 1;
  1484. reg[0] = r(0);
  1485. result = erl_send(c_p, r(0), x(1));
  1486. PreFetch(0, next);
  1487. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1488. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  1489. PROCESS_MAIN_CHK_LOCKS(c_p);
  1490. if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
  1491. result = erts_gc_after_bif_call(c_p, result, reg, 2);
  1492. r(0) = reg[0];
  1493. E = c_p->stop;
  1494. }
  1495. HTOP = HEAP_TOP(c_p);
  1496. FCALLS = c_p->fcalls;
  1497. if (is_value(result)) {
  1498. r(0) = result;
  1499. CHECK_TERM(r(0));
  1500. NextPF(0, next);
  1501. } else if (c_p->freason == TRAP) {
  1502. SET_CP(c_p, I+1);
  1503. SET_I(c_p->i);
  1504. SWAPIN;
  1505. r(0) = reg[0];
  1506. Dispatch();
  1507. }
  1508. goto find_func_info;
  1509. }
  1510. {
  1511. Eterm element_index;
  1512. Eterm element_tuple;
  1513. OpCase(i_element_xjsd):
  1514. element_tuple = xb(Arg(0));
  1515. I++;
  1516. goto do_element;
  1517. OpCase(i_element_yjsd):
  1518. element_tuple = yb(Arg(0));
  1519. I++;
  1520. goto do_element;
  1521. OpCase(i_element_rjsd):
  1522. element_tuple = r(0);
  1523. /* Fall through */
  1524. do_element:
  1525. GetArg1(1, element_index);
  1526. if (is_small(element_index) && is_tuple(element_tuple)) {
  1527. Eterm* tp = tuple_val(element_tuple);
  1528. if ((signed_val(element_index) >= 1) &&
  1529. (signed_val(element_index) <= arityval(*tp))) {
  1530. Eterm result = tp[signed_val(element_index)];
  1531. StoreBifResult(2, result);
  1532. }
  1533. }
  1534. }
  1535. /* Fall through */
  1536. OpCase(badarg_j):
  1537. badarg:
  1538. c_p->freason = BADARG;
  1539. goto lb_Cl_error;
  1540. {
  1541. Eterm fast_element_tuple;
  1542. OpCase(i_fast_element_rjId):
  1543. fast_element_tuple = r(0);
  1544. do_fast_element:
  1545. if (is_tuple(fast_element_tuple)) {
  1546. Eterm* tp = tuple_val(fast_element_tuple);
  1547. Eterm pos = Arg(1); /* Untagged integer >= 1 */
  1548. if (pos <= arityval(*tp)) {
  1549. Eterm result = tp[pos];
  1550. StoreBifResult(2, result);
  1551. }
  1552. }
  1553. goto badarg;
  1554. OpCase(i_fast_element_xjId):
  1555. fast_element_tuple = xb(Arg(0));
  1556. I++;
  1557. goto do_fast_element;
  1558. OpCase(i_fast_element_yjId):
  1559. fast_element_tuple = yb(Arg(0));
  1560. I++;
  1561. goto do_fast_element;
  1562. }
  1563. OpCase(catch_yf):
  1564. c_p->catches++;
  1565. yb(Arg(0)) = Arg(1);
  1566. Next(2);
  1567. OpCase(catch_end_y): {
  1568. c_p->catches--;
  1569. make_blank(yb(Arg(0)));
  1570. if (is_non_value(r(0))) {
  1571. if (x(1) == am_throw) {
  1572. r(0) = x(2);
  1573. } else {
  1574. if (x(1) == am_error) {
  1575. SWAPOUT;
  1576. x(2) = add_stacktrace(c_p, x(2), x(3));
  1577. SWAPIN;
  1578. }
  1579. /* only x(2) is included in the rootset here */
  1580. if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
  1581. * created heap fragments */
  1582. SWAPOUT;
  1583. PROCESS_MAIN_CHK_LOCKS(c_p);
  1584. FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
  1585. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1586. PROCESS_MAIN_CHK_LOCKS(c_p);
  1587. SWAPIN;
  1588. }
  1589. r(0) = TUPLE2(HTOP, am_EXIT, x(2));
  1590. HTOP += 3;
  1591. }
  1592. }
  1593. CHECK_TERM(r(0));
  1594. Next(1);
  1595. }
  1596. OpCase(try_end_y): {
  1597. c_p->catches--;
  1598. make_blank(yb(Arg(0)));
  1599. if (is_non_value(r(0))) {
  1600. r(0) = x(1);
  1601. x(1) = x(2);
  1602. x(2) = x(3);
  1603. }
  1604. Next(1);
  1605. }
  1606. /*
  1607. * Skeleton for receive statement:
  1608. *
  1609. * recv_mark L1 Optional
  1610. * call make_ref/monitor Optional
  1611. * ...
  1612. * recv_set L1 Optional
  1613. * L1: <-------------------+
  1614. * <-----------+ |
  1615. * | |
  1616. * loop_rec L2 ------+---+ |
  1617. * ... | | |
  1618. * remove_message | | |
  1619. * jump L3 | | |
  1620. * ... | | |
  1621. * loop_rec_end L1 --+ | |
  1622. * L2: <---------------+ |
  1623. * wait L1 -----------------+ or wait_timeout
  1624. * timeout
  1625. *
  1626. * L3: Code after receive...
  1627. *
  1628. *
  1629. */
  1630. OpCase(recv_mark_f): {
  1631. /*
  1632. * Save the current position in message buffer and the
  1633. * the label for the loop_rec/2 instruction for the
  1634. * the receive statement.
  1635. */
  1636. c_p->msg.mark = (BeamInstr *) Arg(0);
  1637. c_p->msg.saved_last = c_p->msg.last;
  1638. Next(1);
  1639. }
  1640. OpCase(i_recv_set): {
  1641. /*
  1642. * If the mark is valid (points to the loop_rec/2
  1643. * instruction that follows), we know that the saved
  1644. * position points to the first message that could
  1645. * possibly be matched out.
  1646. *
  1647. * If the mark is invalid, we do nothing, meaning that
  1648. * we will look through all messages in the message queue.
  1649. */
  1650. if (c_p->msg.mark == (BeamInstr *) (I+1)) {
  1651. c_p->msg.save = c_p->msg.saved_last;
  1652. }
  1653. I++;
  1654. /* Fall through to the loop_rec/2 instruction */
  1655. }
  1656. /*
  1657. * Pick up the next message and place it in x(0).
  1658. * If no message, jump to a wait or wait_timeout instruction.
  1659. */
  1660. OpCase(i_loop_rec_fr):
  1661. {
  1662. BeamInstr *next;
  1663. ErlMessage* msgp;
  1664. loop_rec__:
  1665. PROCESS_MAIN_CHK_LOCKS(c_p);
  1666. msgp = PEEK_MESSAGE(c_p);
  1667. if (!msgp) {
  1668. #ifdef ERTS_SMP
  1669. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1670. /* Make sure messages wont pass exit signals... */
  1671. if (ERTS_PROC_PENDING_EXIT(c_p)) {
  1672. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1673. SWAPOUT;
  1674. goto do_schedule; /* Will be rescheduled for exit */
  1675. }
  1676. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  1677. msgp = PEEK_MESSAGE(c_p);
  1678. if (msgp)
  1679. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1680. else
  1681. #endif
  1682. {
  1683. SET_I((BeamInstr *) Arg(0));
  1684. Goto(*I); /* Jump to a wait or wait_timeout instruction */
  1685. }
  1686. }
  1687. ErtsMoveMsgAttachmentIntoProc(msgp, c_p, E, HTOP, FCALLS,
  1688. {
  1689. SWAPOUT;
  1690. reg[0] = r(0);
  1691. PROCESS_MAIN_CHK_LOCKS(c_p);
  1692. },
  1693. {
  1694. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1695. PROCESS_MAIN_CHK_LOCKS(c_p);
  1696. r(0) = reg[0];
  1697. SWAPIN;
  1698. });
  1699. if (is_non_value(ERL_MESSAGE_TERM(msgp))) {
  1700. /*
  1701. * A corrupt distribution message that we weren't able to decode;
  1702. * remove it...
  1703. */
  1704. ASSERT(!msgp->data.attached);
  1705. /* TODO: Add DTrace probe for this bad message situation? */
  1706. UNLINK_MESSAGE(c_p, msgp);
  1707. free_message(msgp);
  1708. goto loop_rec__;
  1709. }
  1710. PreFetch(1, next);
  1711. r(0) = ERL_MESSAGE_TERM(msgp);
  1712. NextPF(1, next);
  1713. }
  1714. /*
  1715. * Remove a (matched) message from the message queue.
  1716. */
  1717. OpCase(remove_message): {
  1718. BeamInstr *next;
  1719. ErlMessage* msgp;
  1720. PROCESS_MAIN_CHK_LOCKS(c_p);
  1721. PreFetch(0, next);
  1722. msgp = PEEK_MESSAGE(c_p);
  1723. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  1724. save_calls(c_p, &exp_receive);
  1725. }
  1726. if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
  1727. #ifdef USE_VM_PROBES
  1728. if (DT_UTAG(c_p) != NIL) {
  1729. if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) {
  1730. SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag;
  1731. #ifdef DTRACE_TAG_HARDDEBUG
  1732. if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)
  1733. erts_fprintf(stderr,
  1734. "Dtrace -> (%T) stop spreading "
  1735. "tag %T with message %T\r\n",
  1736. c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
  1737. #endif
  1738. } else {
  1739. #ifdef DTRACE_TAG_HARDDEBUG
  1740. erts_fprintf(stderr,
  1741. "Dtrace -> (%T) kill tag %T with "
  1742. "message %T\r\n",
  1743. c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
  1744. #endif
  1745. DT_UTAG(c_p) = NIL;
  1746. SEQ_TRACE_TOKEN(c_p) = NIL;
  1747. }
  1748. } else {
  1749. #endif
  1750. SEQ_TRACE_TOKEN(c_p) = NIL;
  1751. #ifdef USE_VM_PROBES
  1752. }
  1753. DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING;
  1754. #endif
  1755. } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
  1756. Eterm msg;
  1757. SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
  1758. #ifdef USE_VM_PROBES
  1759. if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) {
  1760. if (DT_UTAG(c_p) == NIL) {
  1761. DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp);
  1762. }
  1763. DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING;
  1764. #ifdef DTRACE_TAG_HARDDEBUG
  1765. erts_fprintf(stderr,
  1766. "Dtrace -> (%T) receive tag (%T) "
  1767. "with message %T\r\n",
  1768. c_p->common.id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp));
  1769. #endif
  1770. } else {
  1771. #endif
  1772. ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
  1773. ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
  1774. ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
  1775. ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
  1776. ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
  1777. ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
  1778. c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1779. if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
  1780. c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
  1781. }
  1782. msg = ERL_MESSAGE_TERM(msgp);
  1783. seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
  1784. c_p->common.id, c_p);
  1785. #ifdef USE_VM_PROBES
  1786. }
  1787. #endif
  1788. }
  1789. #ifdef USE_VM_PROBES
  1790. if (DTRACE_ENABLED(message_receive)) {
  1791. Eterm token2 = NIL;
  1792. DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
  1793. Sint tok_label = 0;
  1794. Sint tok_lastcnt = 0;
  1795. Sint tok_serial = 0;
  1796. dtrace_proc_str(c_p, receiver_name);
  1797. token2 = SEQ_TRACE_TOKEN(c_p);
  1798. if (token2 != NIL && token2 != am_have_dt_utag) {
  1799. tok_label = signed_val(SEQ_TRACE_T_LABEL(token2));
  1800. tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2));
  1801. tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2));
  1802. }
  1803. DTRACE6(message_receive,
  1804. receiver_name, size_object(ERL_MESSAGE_TERM(msgp)),
  1805. c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial);
  1806. }
  1807. #endif
  1808. UNLINK_MESSAGE(c_p, msgp);
  1809. JOIN_MESSAGE(c_p);
  1810. CANCEL_TIMER(c_p);
  1811. free_message(msgp);
  1812. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  1813. PROCESS_MAIN_CHK_LOCKS(c_p);
  1814. NextPF(0, next);
  1815. }
  1816. /*
  1817. * Advance the save pointer to the next message (the current
  1818. * message didn't match), then jump to the loop_rec instruction.
  1819. */
  1820. OpCase(loop_rec_end_f): {
  1821. SET_I((BeamInstr *) Arg(0));
  1822. SAVE_MESSAGE(c_p);
  1823. goto loop_rec__;
  1824. }
  1825. /*
  1826. * Prepare to wait for a message or a timeout, whichever occurs first.
  1827. *
  1828. * Note: In order to keep the compatibility between 32 and 64 bits
  1829. * emulators, only timeout values that can be represented in 32 bits
  1830. * (unsigned) or less are allowed.
  1831. */
  1832. OpCase(i_wait_timeout_fs): {
  1833. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1834. /* Fall through */
  1835. }
  1836. OpCase(i_wait_timeout_locked_fs): {
  1837. Eterm timeout_value;
  1838. /*
  1839. * If we have already set the timer, we must NOT set it again. Therefore,
  1840. * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
  1841. */
  1842. if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) {
  1843. goto wait2;
  1844. }
  1845. GetArg1(1, timeout_value);
  1846. if (timeout_value != make_small(0)) {
  1847. if (timeout_value == am_infinity)
  1848. c_p->flags |= F_TIMO;
  1849. else {
  1850. int tres = erts_set_proc_timer_term(c_p, timeout_value);
  1851. if (tres == 0) {
  1852. /*
  1853. * The timer routiner will set c_p->i to the value in
  1854. * c_p->def_arg_reg[0]. Note that it is safe to use this
  1855. * location because there are no living x registers in
  1856. * a receive statement.
  1857. * Note that for the halfword emulator, the two first elements
  1858. * of the array are used.
  1859. */
  1860. BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
  1861. *pi = I+3;
  1862. }
  1863. else { /* Wrong time */
  1864. OpCase(i_wait_error_locked): {
  1865. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1866. /* Fall through */
  1867. }
  1868. OpCase(i_wait_error): {
  1869. c_p->freason = EXC_TIMEOUT_VALUE;
  1870. goto find_func_info;
  1871. }
  1872. }
  1873. }
  1874. /*
  1875. * Prepare to wait indefinitely for a new message to arrive
  1876. * (or the time set above if falling through from above).
  1877. *
  1878. * When a new message arrives, control will be transferred
  1879. * the loop_rec instruction (at label L1). In case of
  1880. * of timeout, control will be transferred to the timeout
  1881. * instruction following the wait_timeout instruction.
  1882. */
  1883. OpCase(wait_locked_f):
  1884. OpCase(wait_f):
  1885. wait2: {
  1886. #ifndef ERTS_SMP
  1887. if (ERTS_PROC_IS_EXITING(c_p)) {
  1888. /*
  1889. * I non smp case:
  1890. *
  1891. * Currently executing process might be sent an exit
  1892. * signal if it is traced by a port that it also is
  1893. * linked to, and the port terminates during the
  1894. * trace. In this case we do *not* want to clear
  1895. * the active flag, which will make the process hang
  1896. * in limbo forever.
  1897. */
  1898. SWAPOUT;
  1899. goto do_schedule;
  1900. }
  1901. #endif
  1902. c_p->i = (BeamInstr *) Arg(0); /* L1 */
  1903. SWAPOUT;
  1904. c_p->arity = 0;
  1905. erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
  1906. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  1907. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1908. c_p->current = NULL;
  1909. goto do_schedule;
  1910. }
  1911. OpCase(wait_unlocked_f): {
  1912. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1913. goto wait2;
  1914. }
  1915. }
  1916. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1917. Next(2);
  1918. }
  1919. OpCase(i_wait_timeout_fI): {
  1920. erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1921. }
  1922. OpCase(i_wait_timeout_locked_fI):
  1923. {
  1924. /*
  1925. * If we have already set the timer, we must NOT set it again. Therefore,
  1926. * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
  1927. */
  1928. if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
  1929. BeamInstr** p = (BeamInstr **) c_p->def_arg_reg;
  1930. *p = I+3;
  1931. erts_set_proc_timer_uword(c_p, Arg(1));
  1932. }
  1933. goto wait2;
  1934. }
  1935. /*
  1936. * A timeout has occurred. Reset the save pointer so that the next
  1937. * receive statement will examine the first message first.
  1938. */
  1939. OpCase(timeout_locked): {
  1940. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
  1941. }
  1942. OpCase(timeout): {
  1943. BeamInstr *next;
  1944. PreFetch(0, next);
  1945. if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
  1946. trace_receive(c_p, am_timeout);
  1947. }
  1948. if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
  1949. save_calls(c_p, &exp_timeout);
  1950. }
  1951. c_p->flags &= ~F_TIMO;
  1952. JOIN_MESSAGE(c_p);
  1953. NextPF(0, next);
  1954. }
  1955. {
  1956. Eterm select_val2;
  1957. OpCase(i_select_tuple_arity2_yfAAff):
  1958. select_val2 = yb(Arg(0));
  1959. goto do_select_tuple_arity2;
  1960. OpCase(i_select_tuple_arity2_xfAAff):
  1961. select_val2 = xb(Arg(0));
  1962. goto do_select_tuple_arity2;
  1963. OpCase(i_select_tuple_arity2_rfAAff):
  1964. select_val2 = r(0);
  1965. I--;
  1966. do_select_tuple_arity2:
  1967. if (is_not_tuple(select_val2)) {
  1968. goto select_val2_fail;
  1969. }
  1970. select_val2 = *tuple_val(select_val2);
  1971. goto do_select_val2;
  1972. OpCase(i_select_val2_yfccff):
  1973. select_val2 = yb(Arg(0));
  1974. goto do_select_val2;
  1975. OpCase(i_select_val2_xfccff):
  1976. select_val2 = xb(Arg(0));
  1977. goto do_select_val2;
  1978. OpCase(i_select_val2_rfccff):
  1979. select_val2 = r(0);
  1980. I--;
  1981. do_select_val2:
  1982. if (select_val2 == Arg(2)) {
  1983. I += 3;
  1984. } else if (select_val2 == Arg(3)) {
  1985. I += 4;
  1986. }
  1987. select_val2_fail:
  1988. SET_I((BeamInstr *) Arg(1));
  1989. Goto(*I);
  1990. }
  1991. {
  1992. Eterm select_val;
  1993. OpCase(i_select_tuple_arity_xfI):
  1994. select_val = xb(Arg(0));
  1995. goto do_select_tuple_arity;
  1996. OpCase(i_select_tuple_arity_yfI):
  1997. select_val = yb(Arg(0));
  1998. goto do_select_tuple_arity;
  1999. OpCase(i_select_tuple_arity_rfI):
  2000. select_val = r(0);
  2001. I--;
  2002. do_select_tuple_arity:
  2003. if (is_tuple(select_val)) {
  2004. select_val = *tuple_val(select_val);
  2005. goto do_linear_search;
  2006. }
  2007. SET_I((BeamInstr *) Arg(1));
  2008. Goto(*I);
  2009. OpCase(i_select_val_lins_xfI):
  2010. select_val = xb(Arg(0));
  2011. goto do_linear_search;
  2012. OpCase(i_select_val_lins_yfI):
  2013. select_val = yb(Arg(0));
  2014. goto do_linear_search;
  2015. OpCase(i_select_val_lins_rfI):
  2016. select_val = r(0);
  2017. I--;
  2018. do_linear_search: {
  2019. BeamInstr *vs = &Arg(3);
  2020. int ix = 0;
  2021. for(;;) {
  2022. if (vs[ix+0] >= select_val) { ix += 0; break; }
  2023. if (vs[ix+1] >= select_val) { ix += 1; break; }
  2024. ix += 2;
  2025. }
  2026. if (vs[ix] == select_val) {
  2027. I += ix + Arg(2) + 2;
  2028. }
  2029. SET_I((BeamInstr *) Arg(1));
  2030. Goto(*I);
  2031. }
  2032. OpCase(i_select_val_bins_xfI):
  2033. select_val = xb(Arg(0));
  2034. goto do_binary_search;
  2035. OpCase(i_select_val_bins_yfI):
  2036. select_val = yb(Arg(0));
  2037. goto do_binary_search;
  2038. OpCase(i_select_val_bins_rfI):
  2039. select_val = r(0);
  2040. I--;
  2041. do_binary_search:
  2042. {
  2043. struct Pairs {
  2044. BeamInstr val;
  2045. BeamInstr* addr;
  2046. };
  2047. struct Pairs* low;
  2048. struct Pairs* high;
  2049. struct Pairs* mid;
  2050. int bdiff; /* int not long because the arrays aren't that large */
  2051. low = (struct Pairs *) &Arg(3);
  2052. high = low + Arg(2);
  2053. /* The pointer subtraction (high-low) below must produce
  2054. * a signed result, because high could be < low. That
  2055. * requires the compiler to insert quite a bit of code.
  2056. *
  2057. * However, high will be > low so the result will be
  2058. * positive. We can use that knowledge to optimise the
  2059. * entire sequence, from the initial comparison to the
  2060. * computation of mid.
  2061. *
  2062. * -- Mikael Pettersson, Acumem AB
  2063. *
  2064. * Original loop control code:
  2065. *
  2066. * while (low < high) {
  2067. * mid = low + (high-low) / 2;
  2068. *
  2069. */
  2070. while ((bdiff = (int)((char*)high - (char*)low)) > 0) {
  2071. unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Pairs)-1);
  2072. mid = (struct Pairs*)((char*)low + boffset);
  2073. if (select_val < mid->val) {
  2074. high = mid;
  2075. } else if (select_val > mid->val) {
  2076. low = mid + 1;
  2077. } else {
  2078. SET_I(mid->addr);
  2079. Goto(*I);
  2080. }
  2081. }
  2082. SET_I((BeamInstr *) Arg(1));
  2083. Goto(*I);
  2084. }
  2085. }
  2086. {
  2087. Eterm jump_on_val_zero_index;
  2088. OpCase(i_jump_on_val_zero_yfI):
  2089. jump_on_val_zero_index = yb(Arg(0));
  2090. goto do_jump_on_val_zero_index;
  2091. OpCase(i_jump_on_val_zero_xfI):
  2092. jump_on_val_zero_index = xb(Arg(0));
  2093. goto do_jump_on_val_zero_index;
  2094. OpCase(i_jump_on_val_zero_rfI):
  2095. jump_on_val_zero_index = r(0);
  2096. I--;
  2097. do_jump_on_val_zero_index:
  2098. if (is_small(jump_on_val_zero_index)) {
  2099. jump_on_val_zero_index = signed_val(jump_on_val_zero_index);
  2100. if (jump_on_val_zero_index < Arg(2)) {
  2101. SET_I((BeamInstr *) (&Arg(3))[jump_on_val_zero_index]);
  2102. Goto(*I);
  2103. }
  2104. }
  2105. SET_I((BeamInstr *) Arg(1));
  2106. Goto(*I);
  2107. }
  2108. {
  2109. Eterm jump_on_val_index;
  2110. OpCase(i_jump_on_val_yfII):
  2111. jump_on_val_index = yb(Arg(0));
  2112. goto do_jump_on_val_index;
  2113. OpCase(i_jump_on_val_xfII):
  2114. jump_on_val_index = xb(Arg(0));
  2115. goto do_jump_on_val_index;
  2116. OpCase(i_jump_on_val_rfII):
  2117. jump_on_val_index = r(0);
  2118. I--;
  2119. do_jump_on_val_index:
  2120. if (is_small(jump_on_val_index)) {
  2121. jump_on_val_index = (Uint) (signed_val(jump_on_val_index) - Arg(3));
  2122. if (jump_on_val_index < Arg(2)) {
  2123. SET_I((BeamInstr *) (&Arg(4))[jump_on_val_index]);
  2124. Goto(*I);
  2125. }
  2126. }
  2127. SET_I((BeamInstr *) Arg(1));
  2128. Goto(*I);
  2129. }
  2130. do_put_tuple: {
  2131. Eterm* hp = HTOP;
  2132. *hp++ = make_arityval(pt_arity);
  2133. do {
  2134. Eterm term = *I++;
  2135. switch (term & _TAG_IMMED1_MASK) {
  2136. case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
  2137. *hp++ = r(0);
  2138. break;
  2139. case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
  2140. *hp++ = x(term >> _TAG_IMMED1_SIZE);
  2141. break;
  2142. case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
  2143. *hp++ = y(term >> _TAG_IMMED1_SIZE);
  2144. break;
  2145. default:
  2146. *hp++ = term;
  2147. break;
  2148. }
  2149. } while (--pt_arity != 0);
  2150. HTOP = hp;
  2151. Goto(*I);
  2152. }
  2153. OpCase(new_map_dII): {
  2154. Eterm res;
  2155. x(0) = r(0);
  2156. SWAPOUT;
  2157. res = new_map(c_p, reg, I-1);
  2158. SWAPIN;
  2159. r(0) = x(0);
  2160. StoreResult(res, Arg(0));
  2161. Next(3+Arg(2));
  2162. }
  2163. #define PUT_TERM_REG(term, desc) \
  2164. do { \
  2165. switch ((desc) & _TAG_IMMED1_MASK) { \
  2166. case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
  2167. r(0) = (term); \
  2168. break; \
  2169. case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
  2170. x((desc) >> _TAG_IMMED1_SIZE) = (term); \
  2171. break; \
  2172. case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
  2173. y((desc) >> _TAG_IMMED1_SIZE) = (term); \
  2174. break; \
  2175. default: \
  2176. ASSERT(0); \
  2177. break; \
  2178. } \
  2179. } while(0)
  2180. OpCase(i_get_map_elements_fsI): {
  2181. Eterm map;
  2182. BeamInstr *fs;
  2183. Uint sz, n;
  2184. GetArg1(1, map);
  2185. /* this instruction assumes Arg1 is a map,
  2186. * i.e. that it follows a test is_map if needed.
  2187. */
  2188. n = (Uint)Arg(2) / 3;
  2189. fs = &Arg(3); /* pattern fields and target registers */
  2190. if (is_flatmap(map)) {
  2191. flatmap_t *mp;
  2192. Eterm *ks;
  2193. Eterm *vs;
  2194. mp = (flatmap_t *)flatmap_val(map);
  2195. sz = flatmap_get_size(mp);
  2196. if (sz == 0) {
  2197. ClauseFail();
  2198. }
  2199. ks = flatmap_get_keys(mp);
  2200. vs = flatmap_get_values(mp);
  2201. while(sz) {
  2202. if (EQ((Eterm) fs[0], *ks)) {
  2203. PUT_TERM_REG(*vs, fs[1]);
  2204. n--;
  2205. fs += 3;
  2206. /* no more values to fetch, we are done */
  2207. if (n == 0) {
  2208. I = fs;
  2209. Next(-1);
  2210. }
  2211. }
  2212. ks++, sz--, vs++;
  2213. }
  2214. ClauseFail();
  2215. } else {
  2216. const Eterm *v;
  2217. Uint32 hx;
  2218. ASSERT(is_hashmap(map));
  2219. while(n--) {
  2220. hx = fs[2];
  2221. ASSERT(hx == hashmap_make_hash((Eterm)fs[0]));
  2222. if ((v = erts_hashmap_get(hx, (Eterm)fs[0], map)) == NULL) {
  2223. ClauseFail();
  2224. }
  2225. PUT_TERM_REG(*v, fs[1]);
  2226. fs += 3;
  2227. }
  2228. I = fs;
  2229. Next(-1);
  2230. }
  2231. }
  2232. #undef PUT_TERM_REG
  2233. OpCase(update_map_assoc_jsdII): {
  2234. Eterm res;
  2235. Eterm map;
  2236. GetArg1(1, map);
  2237. x(0) = r(0);
  2238. SWAPOUT;
  2239. res = update_map_assoc(c_p, reg, map, I);
  2240. SWAPIN;
  2241. if (is_value(res)) {
  2242. r(0) = x(0);
  2243. StoreResult(res, Arg(2));
  2244. Next(5+Arg(4));
  2245. } else {
  2246. /*
  2247. * This can only happen if the code was compiled
  2248. * with the compiler in OTP 17.
  2249. */
  2250. c_p->freason = BADMAP;
  2251. c_p->fvalue = map;
  2252. goto lb_Cl_error;
  2253. }
  2254. }
  2255. OpCase(update_map_exact_jsdII): {
  2256. Eterm res;
  2257. Eterm map;
  2258. GetArg1(1, map);
  2259. x(0) = r(0);
  2260. SWAPOUT;
  2261. res = update_map_exact(c_p, reg, map, I);
  2262. SWAPIN;
  2263. if (is_value(res)) {
  2264. r(0) = x(0);
  2265. StoreResult(res, Arg(2));
  2266. Next(5+Arg(4));
  2267. } else {
  2268. goto lb_Cl_error;
  2269. }
  2270. }
  2271. /*
  2272. * All guards with zero arguments have special instructions:
  2273. * self/0
  2274. * node/0
  2275. *
  2276. * All other guard BIFs take one or two arguments.
  2277. */
  2278. /*
  2279. * Guard BIF in head. On failure, ignore the error and jump
  2280. * to the code for the next clause. We don't support tracing
  2281. * of guard BIFs.
  2282. */
  2283. OpCase(bif1_fbsd):
  2284. {
  2285. Eterm (*bf)(Process*, Eterm*);
  2286. Eterm tmp_reg[1];
  2287. Eterm result;
  2288. GetArg1(2, tmp_reg[0]);
  2289. bf = (BifFunction) Arg(1);
  2290. c_p->fcalls = FCALLS;
  2291. PROCESS_MAIN_CHK_LOCKS(c_p);
  2292. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2293. result = (*bf)(c_p, tmp_reg);
  2294. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  2295. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2296. PROCESS_MAIN_CHK_LOCKS(c_p);
  2297. ERTS_HOLE_CHECK(c_p);
  2298. FCALLS = c_p->fcalls;
  2299. if (is_value(result)) {
  2300. StoreBifResult(3, result);
  2301. }
  2302. SET_I((BeamInstr *) Arg(0));
  2303. Goto(*I);
  2304. }
  2305. /*
  2306. * Guard BIF in body. It can fail like any BIF. No trace support.
  2307. */
  2308. OpCase(bif1_body_bsd):
  2309. {
  2310. Eterm (*bf)(Process*, Eterm*);
  2311. Eterm tmp_reg[1];
  2312. Eterm result;
  2313. GetArg1(1, tmp_reg[0]);
  2314. bf = (BifFunction) Arg(0);
  2315. c_p->fcalls = FCALLS;
  2316. PROCESS_MAIN_CHK_LOCKS(c_p);
  2317. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2318. result = (*bf)(c_p, tmp_reg);
  2319. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  2320. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2321. PROCESS_MAIN_CHK_LOCKS(c_p);
  2322. ERTS_HOLE_CHECK(c_p);
  2323. FCALLS = c_p->fcalls;
  2324. if (is_value(result)) {
  2325. StoreBifResult(2, result);
  2326. }
  2327. reg[0] = tmp_reg[0];
  2328. SWAPOUT;
  2329. I = handle_error(c_p, I, reg, bf);
  2330. goto post_error_handling;
  2331. }
  2332. OpCase(i_gc_bif1_jIsId):
  2333. {
  2334. typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
  2335. GcBifFunction bf;
  2336. Eterm arg;
  2337. Eterm result;
  2338. Uint live = (Uint) Arg(3);
  2339. GetArg1(2, arg);
  2340. reg[0] = r(0);
  2341. reg[live] = arg;
  2342. bf = (GcBifFunction) Arg(1);
  2343. c_p->fcalls = FCALLS;
  2344. SWAPOUT;
  2345. PROCESS_MAIN_CHK_LOCKS(c_p);
  2346. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  2347. result = (*bf)(c_p, reg, live);
  2348. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2349. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  2350. PROCESS_MAIN_CHK_LOCKS(c_p);
  2351. SWAPIN;
  2352. r(0) = reg[0];
  2353. ERTS_HOLE_CHECK(c_p);
  2354. FCALLS = c_p->fcalls;
  2355. if (is_value(result)) {
  2356. StoreBifResult(4, result);
  2357. }
  2358. if (Arg(0) != 0) {
  2359. SET_I((BeamInstr *) Arg(0));
  2360. Goto(*I);
  2361. }
  2362. reg[0] = arg;
  2363. I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
  2364. goto post_error_handling;
  2365. }
  2366. OpCase(i_gc_bif2_jIId): /* Note, one less parameter than the i_gc_bif1
  2367. and i_gc_bif3 */
  2368. {
  2369. typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
  2370. GcBifFunction bf;
  2371. Eterm result;
  2372. Uint live = (Uint) Arg(2);
  2373. reg[0] = r(0);
  2374. reg[live++] = tmp_arg1;
  2375. reg[live] = tmp_arg2;
  2376. bf = (GcBifFunction) Arg(1);
  2377. c_p->fcalls = FCALLS;
  2378. SWAPOUT;
  2379. PROCESS_MAIN_CHK_LOCKS(c_p);
  2380. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  2381. result = (*bf)(c_p, reg, live);
  2382. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2383. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  2384. PROCESS_MAIN_CHK_LOCKS(c_p);
  2385. SWAPIN;
  2386. r(0) = reg[0];
  2387. ERTS_HOLE_CHECK(c_p);
  2388. FCALLS = c_p->fcalls;
  2389. if (is_value(result)) {
  2390. StoreBifResult(3, result);
  2391. }
  2392. if (Arg(0) != 0) {
  2393. SET_I((BeamInstr *) Arg(0));
  2394. Goto(*I);
  2395. }
  2396. reg[0] = tmp_arg1;
  2397. reg[1] = tmp_arg2;
  2398. I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
  2399. goto post_error_handling;
  2400. }
  2401. OpCase(i_gc_bif3_jIsId):
  2402. {
  2403. typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
  2404. GcBifFunction bf;
  2405. Eterm arg;
  2406. Eterm result;
  2407. Uint live = (Uint) Arg(3);
  2408. GetArg1(2, arg);
  2409. reg[0] = r(0);
  2410. reg[live++] = arg;
  2411. reg[live++] = tmp_arg1;
  2412. reg[live] = tmp_arg2;
  2413. bf = (GcBifFunction) Arg(1);
  2414. c_p->fcalls = FCALLS;
  2415. SWAPOUT;
  2416. PROCESS_MAIN_CHK_LOCKS(c_p);
  2417. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  2418. result = (*bf)(c_p, reg, live);
  2419. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2420. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  2421. PROCESS_MAIN_CHK_LOCKS(c_p);
  2422. SWAPIN;
  2423. r(0) = reg[0];
  2424. ERTS_HOLE_CHECK(c_p);
  2425. FCALLS = c_p->fcalls;
  2426. if (is_value(result)) {
  2427. StoreBifResult(4, result);
  2428. }
  2429. if (Arg(0) != 0) {
  2430. SET_I((BeamInstr *) Arg(0));
  2431. Goto(*I);
  2432. }
  2433. reg[0] = arg;
  2434. reg[1] = tmp_arg1;
  2435. reg[2] = tmp_arg2;
  2436. I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
  2437. goto post_error_handling;
  2438. }
  2439. /*
  2440. * Guards bifs and, or, xor in guards.
  2441. */
  2442. OpCase(i_bif2_fbd):
  2443. {
  2444. Eterm tmp_reg[2] = {tmp_arg1, tmp_arg2};
  2445. Eterm (*bf)(Process*, Eterm*);
  2446. Eterm result;
  2447. bf = (BifFunction) Arg(1);
  2448. c_p->fcalls = FCALLS;
  2449. PROCESS_MAIN_CHK_LOCKS(c_p);
  2450. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2451. result = (*bf)(c_p, tmp_reg);
  2452. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  2453. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2454. PROCESS_MAIN_CHK_LOCKS(c_p);
  2455. ERTS_HOLE_CHECK(c_p);
  2456. FCALLS = c_p->fcalls;
  2457. if (is_value(result)) {
  2458. StoreBifResult(2, result);
  2459. }
  2460. SET_I((BeamInstr *) Arg(0));
  2461. Goto(*I);
  2462. }
  2463. /*
  2464. * Guards bifs and, or, xor, relational operators in body.
  2465. */
  2466. OpCase(i_bif2_body_bd):
  2467. {
  2468. Eterm tmp_reg[2] = {tmp_arg1, tmp_arg2};
  2469. Eterm (*bf)(Process*, Eterm*);
  2470. Eterm result;
  2471. bf = (BifFunction) Arg(0);
  2472. PROCESS_MAIN_CHK_LOCKS(c_p);
  2473. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2474. result = (*bf)(c_p, tmp_reg);
  2475. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  2476. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2477. PROCESS_MAIN_CHK_LOCKS(c_p);
  2478. ERTS_HOLE_CHECK(c_p);
  2479. if (is_value(result)) {
  2480. ASSERT(!is_CP(result));
  2481. StoreBifResult(1, result);
  2482. }
  2483. reg[0] = tmp_arg1;
  2484. reg[1] = tmp_arg2;
  2485. SWAPOUT;
  2486. I = handle_error(c_p, I, reg, bf);
  2487. goto post_error_handling;
  2488. }
  2489. /*
  2490. * The most general BIF call. The BIF may build any amount of data
  2491. * on the heap. The result is always returned in r(0).
  2492. */
  2493. OpCase(call_bif_e):
  2494. {
  2495. Eterm (*bf)(Process*, Eterm*, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
  2496. Eterm result;
  2497. BeamInstr *next;
  2498. PRE_BIF_SWAPOUT(c_p);
  2499. c_p->fcalls = FCALLS - 1;
  2500. if (FCALLS <= 0) {
  2501. save_calls(c_p, (Export *) Arg(0));
  2502. }
  2503. PreFetch(1, next);
  2504. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  2505. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2506. reg[0] = r(0);
  2507. result = (*bf)(c_p, reg, I);
  2508. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
  2509. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  2510. ERTS_HOLE_CHECK(c_p);
  2511. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  2512. PROCESS_MAIN_CHK_LOCKS(c_p);
  2513. if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
  2514. Uint arity = ((Export *)Arg(0))->code[2];
  2515. result = erts_gc_after_bif_call(c_p, result, reg, arity);
  2516. E = c_p->stop;
  2517. }
  2518. HTOP = HEAP_TOP(c_p);
  2519. FCALLS = c_p->fcalls;
  2520. if (is_value(result)) {
  2521. r(0) = result;
  2522. CHECK_TERM(r(0));
  2523. NextPF(1, next);
  2524. } else if (c_p->freason == TRAP) {
  2525. SET_CP(c_p, I+2);
  2526. SET_I(c_p->i);
  2527. SWAPIN;
  2528. r(0) = reg[0];
  2529. Dispatch();
  2530. }
  2531. /*
  2532. * Error handling. SWAPOUT is not needed because it was done above.
  2533. */
  2534. ASSERT(c_p->stop == E);
  2535. I = handle_error(c_p, I, reg, bf);
  2536. goto post_error_handling;
  2537. }
  2538. /*
  2539. * Arithmetic operations.
  2540. */
  2541. OpCase(i_times_jId):
  2542. {
  2543. arith_func = ARITH_FUNC(mixed_times);
  2544. goto do_big_arith2;
  2545. }
  2546. OpCase(i_m_div_jId):
  2547. {
  2548. arith_func = ARITH_FUNC(mixed_div);
  2549. goto do_big_arith2;
  2550. }
  2551. OpCase(i_int_div_jId):
  2552. {
  2553. Eterm result;
  2554. if (tmp_arg2 == SMALL_ZERO) {
  2555. goto badarith;
  2556. } else if (is_both_small(tmp_arg1, tmp_arg2)) {
  2557. Sint ires = signed_val(tmp_arg1) / signed_val(tmp_arg2);
  2558. if (MY_IS_SSMALL(ires)) {
  2559. result = make_small(ires);
  2560. STORE_ARITH_RESULT(result);
  2561. }
  2562. }
  2563. arith_func = ARITH_FUNC(int_div);
  2564. goto do_big_arith2;
  2565. }
  2566. OpCase(i_rem_jIxxd):
  2567. {
  2568. Eterm result;
  2569. if (xb(Arg(3)) == SMALL_ZERO) {
  2570. goto badarith;
  2571. } else if (is_both_small(xb(Arg(2)), xb(Arg(3)))) {
  2572. result = make_small(signed_val(xb(Arg(2))) % signed_val(xb(Arg(3))));
  2573. StoreBifResult(4, result);
  2574. }
  2575. DO_BIG_ARITH(ARITH_FUNC(int_rem),xb(Arg(2)),xb(Arg(3)));
  2576. }
  2577. OpCase(i_rem_jId):
  2578. {
  2579. Eterm result;
  2580. if (tmp_arg2 == SMALL_ZERO) {
  2581. goto badarith;
  2582. } else if (is_both_small(tmp_arg1, tmp_arg2)) {
  2583. result = make_small(signed_val(tmp_arg1) % signed_val(tmp_arg2));
  2584. STORE_ARITH_RESULT(result);
  2585. } else {
  2586. arith_func = ARITH_FUNC(int_rem);
  2587. goto do_big_arith2;
  2588. }
  2589. }
  2590. OpCase(i_band_jIxcd):
  2591. {
  2592. Eterm result;
  2593. if (is_both_small(xb(Arg(2)), Arg(3))) {
  2594. /*
  2595. * No need to untag -- TAG & TAG == TAG.
  2596. */
  2597. result = xb(Arg(2)) & Arg(3);
  2598. StoreBifResult(4, result);
  2599. }
  2600. DO_BIG_ARITH(ARITH_FUNC(band),xb(Arg(2)),Arg(3));
  2601. }
  2602. OpCase(i_band_jId):
  2603. {
  2604. Eterm result;
  2605. if (is_both_small(tmp_arg1, tmp_arg2)) {
  2606. /*
  2607. * No need to untag -- TAG & TAG == TAG.
  2608. */
  2609. result = tmp_arg1 & tmp_arg2;
  2610. STORE_ARITH_RESULT(result);
  2611. }
  2612. arith_func = ARITH_FUNC(band);
  2613. goto do_big_arith2;
  2614. }
  2615. #undef DO_BIG_ARITH
  2616. do_big_arith2:
  2617. {
  2618. Eterm result;
  2619. Uint live = Arg(1);
  2620. SWAPOUT;
  2621. reg[0] = r(0);
  2622. reg[live] = tmp_arg1;
  2623. reg[live+1] = tmp_arg2;
  2624. result = arith_func(c_p, reg, live);
  2625. r(0) = reg[0];
  2626. SWAPIN;
  2627. ERTS_HOLE_CHECK(c_p);
  2628. if (is_value(result)) {
  2629. STORE_ARITH_RESULT(result);
  2630. }
  2631. goto lb_Cl_error;
  2632. }
  2633. /*
  2634. * An error occured in an arithmetic operation or test that could
  2635. * appear either in a head or in a body.
  2636. * In a head, execution should continue at failure address in Arg(0).
  2637. * In a body, Arg(0) == 0 and an exception should be raised.
  2638. */
  2639. lb_Cl_error: {
  2640. if (Arg(0) != 0) {
  2641. OpCase(jump_f): {
  2642. jump_f:
  2643. SET_I((BeamInstr *) Arg(0));
  2644. Goto(*I);
  2645. }
  2646. }
  2647. ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
  2648. goto find_func_info;
  2649. }
  2650. OpCase(i_bor_jId):
  2651. {
  2652. Eterm result;
  2653. if (is_both_small(tmp_arg1, tmp_arg2)) {
  2654. /*
  2655. * No need to untag -- TAG | TAG == TAG.
  2656. */
  2657. result = tmp_arg1 | tmp_arg2;
  2658. STORE_ARITH_RESULT(result);
  2659. }
  2660. arith_func = ARITH_FUNC(bor);
  2661. goto do_big_arith2;
  2662. }
  2663. OpCase(i_bxor_jId):
  2664. {
  2665. Eterm result;
  2666. if (is_both_small(tmp_arg1, tmp_arg2)) {
  2667. /*
  2668. * We could extract the tag from one argument, but a tag extraction
  2669. * could mean a shift. Therefore, play it safe here.
  2670. */
  2671. result = make_small(signed_val(tmp_arg1) ^ signed_val(tmp_arg2));
  2672. STORE_ARITH_RESULT(result);
  2673. }
  2674. arith_func = ARITH_FUNC(bxor);
  2675. goto do_big_arith2;
  2676. }
  2677. {
  2678. Sint i;
  2679. Sint ires;
  2680. Eterm* bigp;
  2681. OpCase(i_bsr_jId):
  2682. if (is_small(tmp_arg2)) {
  2683. i = -signed_val(tmp_arg2);
  2684. if (is_small(tmp_arg1)) {
  2685. goto small_shift;
  2686. } else if (is_big(tmp_arg1)) {
  2687. if (i == 0) {
  2688. StoreBifResult(2, tmp_arg1);
  2689. }
  2690. goto big_shift;
  2691. }
  2692. } else if (is_big(tmp_arg2)) {
  2693. /*
  2694. * N bsr NegativeBigNum == N bsl MAX_SMALL
  2695. * N bsr PositiveBigNum == N bsl MIN_SMALL
  2696. */
  2697. tmp_arg2 = make_small(bignum_header_is_neg(*big_val(tmp_arg2)) ?
  2698. MAX_SMALL : MIN_SMALL);
  2699. goto do_bsl;
  2700. }
  2701. goto badarith;
  2702. OpCase(i_bsl_jId):
  2703. do_bsl:
  2704. if (is_small(tmp_arg2)) {
  2705. i = signed_val(tmp_arg2);
  2706. if (is_small(tmp_arg1)) {
  2707. small_shift:
  2708. ires = signed_val(tmp_arg1);
  2709. if (i == 0 || ires == 0) {
  2710. StoreBifResult(2, tmp_arg1);
  2711. } else if (i < 0) { /* Right shift */
  2712. i = -i;
  2713. if (i >= SMALL_BITS-1) {
  2714. tmp_arg1 = (ires < 0) ? SMALL_MINUS_ONE : SMALL_ZERO;
  2715. } else {
  2716. tmp_arg1 = make_small(ires >> i);
  2717. }
  2718. StoreBifResult(2, tmp_arg1);
  2719. } else if (i < SMALL_BITS-1) { /* Left shift */
  2720. if ((ires > 0 && ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ires) == 0) ||
  2721. ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ~ires) == 0) {
  2722. tmp_arg1 = make_small(ires << i);
  2723. StoreBifResult(2, tmp_arg1);
  2724. }
  2725. }
  2726. tmp_arg1 = small_to_big(ires, tmp_big);
  2727. big_shift:
  2728. if (i > 0) { /* Left shift. */
  2729. ires = big_size(tmp_arg1) + (i / D_EXP);
  2730. } else { /* Right shift. */
  2731. ires = big_size(tmp_arg1);
  2732. if (ires <= (-i / D_EXP))
  2733. ires = 3; /* ??? */
  2734. else
  2735. ires -= (-i / D_EXP);
  2736. }
  2737. {
  2738. ires = BIG_NEED_SIZE(ires+1);
  2739. /*
  2740. * Slightly conservative check the size to avoid
  2741. * allocating huge amounts of memory for bignums that
  2742. * clearly would overflow the arity in the header
  2743. * word.
  2744. */
  2745. if (ires-8 > BIG_ARITY_MAX) {
  2746. c_p->freason = SYSTEM_LIMIT;
  2747. goto lb_Cl_error;
  2748. }
  2749. TestHeapPreserve(ires+1, Arg(1), tmp_arg1);
  2750. bigp = HTOP;
  2751. tmp_arg1 = big_lshift(tmp_arg1, i, bigp);
  2752. if (is_big(tmp_arg1)) {
  2753. HTOP += bignum_header_arity(*HTOP) + 1;
  2754. }
  2755. HEAP_SPACE_VERIFIED(0);
  2756. if (is_nil(tmp_arg1)) {
  2757. /*
  2758. * This result must have been only slight larger
  2759. * than allowed since it wasn't caught by the
  2760. * previous test.
  2761. */
  2762. c_p->freason = SYSTEM_LIMIT;
  2763. goto lb_Cl_error;
  2764. }
  2765. ERTS_HOLE_CHECK(c_p);
  2766. StoreBifResult(2, tmp_arg1);
  2767. }
  2768. } else if (is_big(tmp_arg1)) {
  2769. if (i == 0) {
  2770. StoreBifResult(2, tmp_arg1);
  2771. }
  2772. goto big_shift;
  2773. }
  2774. } else if (is_big(tmp_arg2)) {
  2775. if (bignum_header_is_neg(*big_val(tmp_arg2))) {
  2776. /*
  2777. * N bsl NegativeBigNum is either 0 or -1, depending on
  2778. * the sign of N. Since we don't believe this case
  2779. * is common, do the calculation with the minimum
  2780. * amount of code.
  2781. */
  2782. tmp_arg2 = make_small(MIN_SMALL);
  2783. goto do_bsl;
  2784. } else if (is_small(tmp_arg1) || is_big(tmp_arg1)) {
  2785. /*
  2786. * N bsl PositiveBigNum is too large to represent.
  2787. */
  2788. c_p->freason = SYSTEM_LIMIT;
  2789. goto lb_Cl_error;
  2790. }
  2791. /* Fall through if the left argument is not an integer. */
  2792. }
  2793. /*
  2794. * One or more non-integer arguments.
  2795. */
  2796. goto badarith;
  2797. }
  2798. OpCase(i_int_bnot_jsId):
  2799. {
  2800. Eterm bnot_val;
  2801. GetArg1(1, bnot_val);
  2802. if (is_small(bnot_val)) {
  2803. bnot_val = make_small(~signed_val(bnot_val));
  2804. } else {
  2805. Uint live = Arg(2);
  2806. SWAPOUT;
  2807. reg[0] = r(0);
  2808. reg[live] = bnot_val;
  2809. bnot_val = erts_gc_bnot(c_p, reg, live);
  2810. r(0) = reg[0];
  2811. SWAPIN;
  2812. ERTS_HOLE_CHECK(c_p);
  2813. if (is_nil(bnot_val)) {
  2814. goto lb_Cl_error;
  2815. }
  2816. }
  2817. StoreBifResult(3, bnot_val);
  2818. }
  2819. badarith:
  2820. c_p->freason = BADARITH;
  2821. goto lb_Cl_error;
  2822. OpCase(i_apply): {
  2823. BeamInstr *next;
  2824. SWAPOUT;
  2825. next = apply(c_p, r(0), x(1), x(2), reg);
  2826. SWAPIN;
  2827. if (next != NULL) {
  2828. r(0) = reg[0];
  2829. SET_CP(c_p, I+1);
  2830. SET_I(next);
  2831. Dispatch();
  2832. }
  2833. I = handle_error(c_p, I, reg, apply_3);
  2834. goto post_error_handling;
  2835. }
  2836. OpCase(i_apply_last_P): {
  2837. BeamInstr *next;
  2838. SWAPOUT;
  2839. next = apply(c_p, r(0), x(1), x(2), reg);
  2840. SWAPIN;
  2841. if (next != NULL) {
  2842. r(0) = reg[0];
  2843. SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
  2844. E = ADD_BYTE_OFFSET(E, Arg(0));
  2845. SET_I(next);
  2846. Dispatch();
  2847. }
  2848. I = handle_error(c_p, I, reg, apply_3);
  2849. goto post_error_handling;
  2850. }
  2851. OpCase(i_apply_only): {
  2852. BeamInstr *next;
  2853. SWAPOUT;
  2854. next = apply(c_p, r(0), x(1), x(2), reg);
  2855. SWAPIN;
  2856. if (next != NULL) {
  2857. r(0) = reg[0];
  2858. SET_I(next);
  2859. Dispatch();
  2860. }
  2861. I = handle_error(c_p, I, reg, apply_3);
  2862. goto post_error_handling;
  2863. }
  2864. OpCase(apply_I): {
  2865. BeamInstr *next;
  2866. reg[0] = r(0);
  2867. SWAPOUT;
  2868. next = fixed_apply(c_p, reg, Arg(0));
  2869. SWAPIN;
  2870. if (next != NULL) {
  2871. r(0) = reg[0];
  2872. SET_CP(c_p, I+2);
  2873. SET_I(next);
  2874. Dispatch();
  2875. }
  2876. I = handle_error(c_p, I, reg, apply_3);
  2877. goto post_error_handling;
  2878. }
  2879. OpCase(apply_last_IP): {
  2880. BeamInstr *next;
  2881. reg[0] = r(0);
  2882. SWAPOUT;
  2883. next = fixed_apply(c_p, reg, Arg(0));
  2884. SWAPIN;
  2885. if (next != NULL) {
  2886. r(0) = reg[0];
  2887. SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
  2888. E = ADD_BYTE_OFFSET(E, Arg(1));
  2889. SET_I(next);
  2890. Dispatch();
  2891. }
  2892. I = handle_error(c_p, I, reg, apply_3);
  2893. goto post_error_handling;
  2894. }
  2895. OpCase(i_apply_fun): {
  2896. BeamInstr *next;
  2897. SWAPOUT;
  2898. next = apply_fun(c_p, r(0), x(1), reg);
  2899. SWAPIN;
  2900. if (next != NULL) {
  2901. r(0) = reg[0];
  2902. SET_CP(c_p, I+1);
  2903. SET_I(next);
  2904. Dispatchfun();
  2905. }
  2906. goto find_func_info;
  2907. }
  2908. OpCase(i_apply_fun_last_P): {
  2909. BeamInstr *next;
  2910. SWAPOUT;
  2911. next = apply_fun(c_p, r(0), x(1), reg);
  2912. SWAPIN;
  2913. if (next != NULL) {
  2914. r(0) = reg[0];
  2915. SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
  2916. E = ADD_BYTE_OFFSET(E, Arg(0));
  2917. SET_I(next);
  2918. Dispatchfun();
  2919. }
  2920. goto find_func_info;
  2921. }
  2922. OpCase(i_apply_fun_only): {
  2923. BeamInstr *next;
  2924. SWAPOUT;
  2925. next = apply_fun(c_p, r(0), x(1), reg);
  2926. SWAPIN;
  2927. if (next != NULL) {
  2928. r(0) = reg[0];
  2929. SET_I(next);
  2930. Dispatchfun();
  2931. }
  2932. goto find_func_info;
  2933. }
  2934. OpCase(i_call_fun_I): {
  2935. BeamInstr *next;
  2936. SWAPOUT;
  2937. reg[0] = r(0);
  2938. next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
  2939. SWAPIN;
  2940. if (next != NULL) {
  2941. r(0) = reg[0];
  2942. SET_CP(c_p, I+2);
  2943. SET_I(next);
  2944. Dispatchfun();
  2945. }
  2946. goto find_func_info;
  2947. }
  2948. OpCase(i_call_fun_last_IP): {
  2949. BeamInstr *next;
  2950. SWAPOUT;
  2951. reg[0] = r(0);
  2952. next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
  2953. SWAPIN;
  2954. if (next != NULL) {
  2955. r(0) = reg[0];
  2956. SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
  2957. E = ADD_BYTE_OFFSET(E, Arg(1));
  2958. SET_I(next);
  2959. Dispatchfun();
  2960. }
  2961. goto find_func_info;
  2962. }
  2963. #ifdef DEBUG
  2964. /*
  2965. * Set a breakpoint here to get control just after a call instruction.
  2966. * I points to the first instruction in the called function.
  2967. *
  2968. * In gdb, use 'call dis(I-5, 1)' to show the name of the function.
  2969. */
  2970. do_dispatch:
  2971. DispatchMacro();
  2972. do_dispatchx:
  2973. DispatchMacrox();
  2974. do_dispatchfun:
  2975. DispatchMacroFun();
  2976. #endif
  2977. /*
  2978. * Jumped to from the Dispatch() macro when the reductions are used up.
  2979. *
  2980. * Since the I register points just beyond the FuncBegin instruction, we
  2981. * can get the module, function, and arity for the function being
  2982. * called from I[-3], I[-2], and I[-1] respectively.
  2983. */
  2984. context_switch_fun:
  2985. c_p->arity = I[-1] + 1;
  2986. goto context_switch2;
  2987. context_switch:
  2988. c_p->arity = I[-1];
  2989. context_switch2: /* Entry for fun calls. */
  2990. c_p->current = I-3; /* Pointer to Mod, Func, Arity */
  2991. {
  2992. Eterm* argp;
  2993. int i;
  2994. /*
  2995. * Make sure that there is enough room for the argument registers to be saved.
  2996. */
  2997. if (c_p->arity > c_p->max_arg_reg) {
  2998. /*
  2999. * Yes, this is an expensive operation, but you only pay it the first
  3000. * time you call a function with more than 6 arguments which is
  3001. * scheduled out. This is better than paying for 26 words of wasted
  3002. * space for most processes which never call functions with more than
  3003. * 6 arguments.
  3004. */
  3005. Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
  3006. if (c_p->arg_reg != c_p->def_arg_reg) {
  3007. c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
  3008. (void *) c_p->arg_reg,
  3009. size);
  3010. } else {
  3011. c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
  3012. }
  3013. c_p->max_arg_reg = c_p->arity;
  3014. }
  3015. /*
  3016. * Since REDS_IN(c_p) is stored in the save area (c_p->arg_reg) we must read it
  3017. * now before saving registers.
  3018. *
  3019. * The '+ 1' compensates for the last increment which was not done
  3020. * (beacuse the code for the Dispatch() macro becomes shorter that way).
  3021. */
  3022. reds_used = REDS_IN(c_p) - FCALLS + 1;
  3023. /*
  3024. * Save the argument registers and everything else.
  3025. */
  3026. argp = c_p->arg_reg;
  3027. for (i = c_p->arity - 1; i > 0; i--) {
  3028. argp[i] = reg[i];
  3029. }
  3030. c_p->arg_reg[0] = r(0);
  3031. SWAPOUT;
  3032. c_p->i = I;
  3033. goto do_schedule1;
  3034. }
  3035. OpCase(set_tuple_element_sdP): {
  3036. Eterm element;
  3037. Eterm tuple;
  3038. BeamInstr *next;
  3039. Eterm* p;
  3040. PreFetch(3, next);
  3041. GetArg2(0, element, tuple);
  3042. ASSERT(is_tuple(tuple));
  3043. p = (Eterm *) ((unsigned char *) tuple_val(tuple) + Arg(2));
  3044. *p = element;
  3045. NextPF(3, next);
  3046. }
  3047. OpCase(i_is_ne_exact_f):
  3048. if (EQ(tmp_arg1, tmp_arg2)) {
  3049. ClauseFail();
  3050. }
  3051. Next(1);
  3052. OpCase(normal_exit): {
  3053. SWAPOUT;
  3054. c_p->freason = EXC_NORMAL;
  3055. c_p->arity = 0; /* In case this process will ever be garbed again. */
  3056. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3057. erts_do_exit_process(c_p, am_normal);
  3058. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  3059. goto do_schedule;
  3060. }
  3061. OpCase(continue_exit): {
  3062. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3063. erts_continue_exit_process(c_p);
  3064. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  3065. goto do_schedule;
  3066. }
  3067. OpCase(raise_ss): {
  3068. /* This was not done very well in R10-0; then, we passed the tag in
  3069. the first argument and hoped that the existing c_p->ftrace was
  3070. still correct. But the ftrace-object already includes the tag
  3071. (or rather, the freason). Now, we pass the original ftrace in
  3072. the first argument. We also handle atom tags in the first
  3073. argument for backwards compatibility.
  3074. */
  3075. Eterm raise_val1;
  3076. Eterm raise_val2;
  3077. GetArg2(0, raise_val1, raise_val2);
  3078. c_p->fvalue = raise_val2;
  3079. if (c_p->freason == EXC_NULL) {
  3080. /* a safety check for the R10-0 case; should not happen */
  3081. c_p->ftrace = NIL;
  3082. c_p->freason = EXC_ERROR;
  3083. }
  3084. /* for R10-0 code, keep existing c_p->ftrace and hope it's correct */
  3085. switch (raise_val1) {
  3086. case am_throw:
  3087. c_p->freason = EXC_THROWN & ~EXF_SAVETRACE;
  3088. break;
  3089. case am_error:
  3090. c_p->freason = EXC_ERROR & ~EXF_SAVETRACE;
  3091. break;
  3092. case am_exit:
  3093. c_p->freason = EXC_EXIT & ~EXF_SAVETRACE;
  3094. break;
  3095. default:
  3096. {/* R10-1 and later
  3097. XXX note: should do sanity check on given trace if it can be
  3098. passed from a user! Currently only expecting generated calls.
  3099. */
  3100. struct StackTrace *s;
  3101. c_p->ftrace = raise_val1;
  3102. s = get_trace_from_exc(raise_val1);
  3103. if (s == NULL) {
  3104. c_p->freason = EXC_ERROR;
  3105. } else {
  3106. c_p->freason = PRIMARY_EXCEPTION(s->freason);
  3107. }
  3108. }
  3109. }
  3110. goto find_func_info;
  3111. }
  3112. {
  3113. Eterm badmatch_val;
  3114. OpCase(badmatch_y):
  3115. badmatch_val = yb(Arg(0));
  3116. goto do_badmatch;
  3117. OpCase(badmatch_x):
  3118. badmatch_val = xb(Arg(0));
  3119. goto do_badmatch;
  3120. OpCase(badmatch_r):
  3121. badmatch_val = r(0);
  3122. do_badmatch:
  3123. c_p->fvalue = badmatch_val;
  3124. c_p->freason = BADMATCH;
  3125. }
  3126. /* Fall through here */
  3127. find_func_info: {
  3128. reg[0] = r(0);
  3129. SWAPOUT;
  3130. I = handle_error(c_p, I, reg, NULL);
  3131. goto post_error_handling;
  3132. }
  3133. OpCase(call_error_handler):
  3134. /*
  3135. * At this point, I points to the code[3] in the export entry for
  3136. * a function which is not loaded.
  3137. *
  3138. * code[0]: Module
  3139. * code[1]: Function
  3140. * code[2]: Arity
  3141. * code[3]: &&call_error_handler
  3142. * code[4]: Not used
  3143. */
  3144. SWAPOUT;
  3145. reg[0] = r(0);
  3146. I = call_error_handler(c_p, I-3, reg, am_undefined_function);
  3147. r(0) = reg[0];
  3148. SWAPIN;
  3149. if (I) {
  3150. Goto(*I);
  3151. }
  3152. /* Fall through */
  3153. OpCase(error_action_code): {
  3154. handle_error:
  3155. reg[0] = r(0);
  3156. SWAPOUT;
  3157. I = handle_error(c_p, NULL, reg, NULL);
  3158. post_error_handling:
  3159. if (I == 0) {
  3160. goto do_schedule;
  3161. } else {
  3162. r(0) = reg[0];
  3163. ASSERT(!is_value(r(0)));
  3164. if (c_p->mbuf) {
  3165. erts_garbage_collect(c_p, 0, reg+1, 3);
  3166. }
  3167. SWAPIN;
  3168. Goto(*I);
  3169. }
  3170. }
  3171. {
  3172. Eterm nif_bif_result;
  3173. Eterm bif_nif_arity;
  3174. OpCase(call_nif):
  3175. {
  3176. /*
  3177. * call_nif is always first instruction in function:
  3178. *
  3179. * I[-3]: Module
  3180. * I[-2]: Function
  3181. * I[-1]: Arity
  3182. * I[0]: &&call_nif
  3183. * I[1]: Function pointer to NIF function
  3184. * I[2]: Pointer to erl_module_nif
  3185. * I[3]: Function pointer to dirty NIF
  3186. */
  3187. BifFunction vbf;
  3188. DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
  3189. c_p->current = I-3; /* current and vbf set to please handle_error */
  3190. SWAPOUT;
  3191. c_p->fcalls = FCALLS - 1;
  3192. PROCESS_MAIN_CHK_LOCKS(c_p);
  3193. bif_nif_arity = I[-1];
  3194. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3195. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  3196. {
  3197. typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
  3198. NifF* fp = vbf = (NifF*) I[1];
  3199. struct enif_environment_t env;
  3200. erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2]);
  3201. reg[0] = r(0);
  3202. nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
  3203. if (env.exception_thrown)
  3204. nif_bif_result = THE_NON_VALUE;
  3205. erts_post_nif(&env);
  3206. }
  3207. ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
  3208. PROCESS_MAIN_CHK_LOCKS(c_p);
  3209. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  3210. DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
  3211. goto apply_bif_or_nif_epilogue;
  3212. OpCase(apply_bif):
  3213. /*
  3214. * At this point, I points to the code[3] in the export entry for
  3215. * the BIF:
  3216. *
  3217. * code[0]: Module
  3218. * code[1]: Function
  3219. * code[2]: Arity
  3220. * code[3]: &&apply_bif
  3221. * code[4]: Function pointer to BIF function
  3222. */
  3223. c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
  3224. c_p->i = I; /* In case we apply check_process_code/2. */
  3225. c_p->arity = 0; /* To allow garbage collection on ourselves
  3226. * (check_process_code/2).
  3227. */
  3228. DTRACE_BIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
  3229. SWAPOUT;
  3230. c_p->fcalls = FCALLS - 1;
  3231. vbf = (BifFunction) Arg(0);
  3232. PROCESS_MAIN_CHK_LOCKS(c_p);
  3233. bif_nif_arity = I[-1];
  3234. ASSERT(bif_nif_arity <= 4);
  3235. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  3236. reg[0] = r(0);
  3237. {
  3238. Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
  3239. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  3240. nif_bif_result = (*bf)(c_p, reg, I);
  3241. ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
  3242. is_non_value(nif_bif_result));
  3243. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  3244. PROCESS_MAIN_CHK_LOCKS(c_p);
  3245. }
  3246. DTRACE_BIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
  3247. apply_bif_or_nif_epilogue:
  3248. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  3249. ERTS_HOLE_CHECK(c_p);
  3250. if (c_p->mbuf) {
  3251. nif_bif_result = erts_gc_after_bif_call(c_p, nif_bif_result,
  3252. reg, bif_nif_arity);
  3253. }
  3254. SWAPIN; /* There might have been a garbage collection. */
  3255. FCALLS = c_p->fcalls;
  3256. if (is_value(nif_bif_result)) {
  3257. r(0) = nif_bif_result;
  3258. CHECK_TERM(r(0));
  3259. SET_I(c_p->cp);
  3260. c_p->cp = 0;
  3261. Goto(*I);
  3262. } else if (c_p->freason == TRAP) {
  3263. SET_I(c_p->i);
  3264. r(0) = reg[0];
  3265. if (c_p->flags & F_HIBERNATE_SCHED) {
  3266. c_p->flags &= ~F_HIBERNATE_SCHED;
  3267. goto do_schedule;
  3268. }
  3269. Dispatch();
  3270. }
  3271. I = handle_error(c_p, c_p->cp, reg, vbf);
  3272. goto post_error_handling;
  3273. }
  3274. }
  3275. OpCase(i_get_sd):
  3276. {
  3277. Eterm arg;
  3278. Eterm result;
  3279. GetArg1(0, arg);
  3280. result = erts_pd_hash_get(c_p, arg);
  3281. StoreBifResult(1, result);
  3282. }
  3283. {
  3284. Eterm case_end_val;
  3285. OpCase(case_end_x):
  3286. case_end_val = xb(Arg(0));
  3287. goto do_case_end;
  3288. OpCase(case_end_y):
  3289. case_end_val = yb(Arg(0));
  3290. goto do_case_end;
  3291. OpCase(case_end_r):
  3292. case_end_val = r(0);
  3293. do_case_end:
  3294. c_p->fvalue = case_end_val;
  3295. c_p->freason = EXC_CASE_CLAUSE;
  3296. goto find_func_info;
  3297. }
  3298. OpCase(if_end):
  3299. c_p->freason = EXC_IF_CLAUSE;
  3300. goto find_func_info;
  3301. OpCase(i_func_info_IaaI): {
  3302. c_p->freason = EXC_FUNCTION_CLAUSE;
  3303. c_p->current = I + 2;
  3304. goto handle_error;
  3305. }
  3306. OpCase(try_case_end_s):
  3307. {
  3308. Eterm try_case_end_val;
  3309. GetArg1(0, try_case_end_val);
  3310. c_p->fvalue = try_case_end_val;
  3311. c_p->freason = EXC_TRY_CLAUSE;
  3312. goto find_func_info;
  3313. }
  3314. /*
  3315. * Construction of binaries using new instructions.
  3316. */
  3317. {
  3318. Eterm new_binary;
  3319. Eterm num_bits_term;
  3320. Uint num_bits;
  3321. Uint alloc;
  3322. Uint num_bytes;
  3323. OpCase(i_bs_init_bits_heap_IIId): {
  3324. num_bits = Arg(0);
  3325. alloc = Arg(1);
  3326. I++;
  3327. goto do_bs_init_bits_known;
  3328. }
  3329. OpCase(i_bs_init_bits_IId): {
  3330. num_bits = Arg(0);
  3331. alloc = 0;
  3332. goto do_bs_init_bits_known;
  3333. }
  3334. OpCase(i_bs_init_bits_fail_heap_IjId): {
  3335. /* tmp_arg1 was fetched by an i_fetch instruction */
  3336. num_bits_term = tmp_arg1;
  3337. alloc = Arg(0);
  3338. I++;
  3339. goto do_bs_init_bits;
  3340. }
  3341. OpCase(i_bs_init_bits_fail_rjId): {
  3342. num_bits_term = r(0);
  3343. alloc = 0;
  3344. goto do_bs_init_bits;
  3345. }
  3346. OpCase(i_bs_init_bits_fail_yjId): {
  3347. num_bits_term = yb(Arg(0));
  3348. I++;
  3349. alloc = 0;
  3350. goto do_bs_init_bits;
  3351. }
  3352. OpCase(i_bs_init_bits_fail_xjId): {
  3353. num_bits_term = xb(Arg(0));
  3354. I++;
  3355. alloc = 0;
  3356. /* FALL THROUGH */
  3357. }
  3358. /* num_bits_term = Term for number of bits to build (small/big)
  3359. * alloc = Number of words to allocate on heap
  3360. * Operands: Fail Live Dst
  3361. */
  3362. do_bs_init_bits:
  3363. if (is_small(num_bits_term)) {
  3364. Sint size = signed_val(num_bits_term);
  3365. if (size < 0) {
  3366. goto badarg;
  3367. }
  3368. num_bits = (Uint) size;
  3369. } else {
  3370. Uint bits;
  3371. if (!term_to_Uint(num_bits_term, &bits)) {
  3372. c_p->freason = bits;
  3373. goto lb_Cl_error;
  3374. }
  3375. num_bits = (Eterm) bits;
  3376. }
  3377. /* num_bits = Number of bits to build
  3378. * alloc = Number of extra words to allocate on heap
  3379. * Operands: NotUsed Live Dst
  3380. */
  3381. do_bs_init_bits_known:
  3382. num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3;
  3383. if (num_bits & 7) {
  3384. alloc += ERL_SUB_BIN_SIZE;
  3385. }
  3386. if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
  3387. alloc += heap_bin_size(num_bytes);
  3388. } else {
  3389. alloc += PROC_BIN_SIZE;
  3390. }
  3391. TestHeap(alloc, Arg(1));
  3392. /* num_bits = Number of bits to build
  3393. * num_bytes = Number of bytes to allocate in the binary
  3394. * alloc = Total number of words to allocate on heap
  3395. * Operands: NotUsed NotUsed Dst
  3396. */
  3397. if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
  3398. ErlHeapBin* hb;
  3399. erts_bin_offset = 0;
  3400. erts_writable_bin = 0;
  3401. hb = (ErlHeapBin *) HTOP;
  3402. HTOP += heap_bin_size(num_bytes);
  3403. hb->thing_word = header_heap_bin(num_bytes);
  3404. hb->size = num_bytes;
  3405. erts_current_bin = (byte *) hb->data;
  3406. new_binary = make_binary(hb);
  3407. do_bits_sub_bin:
  3408. if (num_bits & 7) {
  3409. ErlSubBin* sb;
  3410. sb = (ErlSubBin *) HTOP;
  3411. HTOP += ERL_SUB_BIN_SIZE;
  3412. sb->thing_word = HEADER_SUB_BIN;
  3413. sb->size = num_bytes - 1;
  3414. sb->bitsize = num_bits & 7;
  3415. sb->offs = 0;
  3416. sb->bitoffs = 0;
  3417. sb->is_writable = 0;
  3418. sb->orig = new_binary;
  3419. new_binary = make_binary(sb);
  3420. }
  3421. HEAP_SPACE_VERIFIED(0);
  3422. StoreBifResult(2, new_binary);
  3423. } else {
  3424. Binary* bptr;
  3425. ProcBin* pb;
  3426. erts_bin_offset = 0;
  3427. erts_writable_bin = 0;
  3428. /*
  3429. * Allocate the binary struct itself.
  3430. */
  3431. bptr = erts_bin_nrml_alloc(num_bytes);
  3432. erts_refc_init(&bptr->refc, 1);
  3433. erts_current_bin = (byte *) bptr->orig_bytes;
  3434. /*
  3435. * Now allocate the ProcBin on the heap.
  3436. */
  3437. pb = (ProcBin *) HTOP;
  3438. HTOP += PROC_BIN_SIZE;
  3439. pb->thing_word = HEADER_PROC_BIN;
  3440. pb->size = num_bytes;
  3441. pb->next = MSO(c_p).first;
  3442. MSO(c_p).first = (struct erl_off_heap_header*) pb;
  3443. pb->val = bptr;
  3444. pb->bytes = (byte*) bptr->orig_bytes;
  3445. pb->flags = 0;
  3446. OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
  3447. new_binary = make_binary(pb);
  3448. goto do_bits_sub_bin;
  3449. }
  3450. }
  3451. {
  3452. OpCase(i_bs_init_fail_heap_IjId): {
  3453. /* tmp_arg1 was fetched by an i_fetch instruction */
  3454. tmp_arg2 = Arg(0);
  3455. I++;
  3456. goto do_bs_init;
  3457. }
  3458. OpCase(i_bs_init_fail_rjId): {
  3459. tmp_arg1 = r(0);
  3460. tmp_arg2 = 0;
  3461. goto do_bs_init;
  3462. }
  3463. OpCase(i_bs_init_fail_yjId): {
  3464. tmp_arg1 = yb(Arg(0));
  3465. tmp_arg2 = 0;
  3466. I++;
  3467. goto do_bs_init;
  3468. }
  3469. OpCase(i_bs_init_fail_xjId): {
  3470. tmp_arg1 = xb(Arg(0));
  3471. tmp_arg2 = 0;
  3472. I++;
  3473. }
  3474. /* FALL THROUGH */
  3475. do_bs_init:
  3476. if (is_small(tmp_arg1)) {
  3477. Sint size = signed_val(tmp_arg1);
  3478. if (size < 0) {
  3479. goto badarg;
  3480. }
  3481. tmp_arg1 = (Eterm) size;
  3482. } else {
  3483. Uint bytes;
  3484. if (!term_to_Uint(tmp_arg1, &bytes)) {
  3485. c_p->freason = bytes;
  3486. goto lb_Cl_error;
  3487. }
  3488. if ((bytes >> (8*sizeof(Uint)-3)) != 0) {
  3489. goto system_limit;
  3490. }
  3491. tmp_arg1 = (Eterm) bytes;
  3492. }
  3493. if (tmp_arg1 <= ERL_ONHEAP_BIN_LIMIT) {
  3494. goto do_heap_bin_alloc;
  3495. } else {
  3496. goto do_proc_bin_alloc;
  3497. }
  3498. OpCase(i_bs_init_heap_IIId): {
  3499. tmp_arg1 = Arg(0);
  3500. tmp_arg2 = Arg(1);
  3501. I++;
  3502. goto do_proc_bin_alloc;
  3503. }
  3504. OpCase(i_bs_init_IId): {
  3505. tmp_arg1 = Arg(0);
  3506. tmp_arg2 = 0;
  3507. }
  3508. /* FALL THROUGH */
  3509. do_proc_bin_alloc: {
  3510. Binary* bptr;
  3511. ProcBin* pb;
  3512. erts_bin_offset = 0;
  3513. erts_writable_bin = 0;
  3514. TestBinVHeap(tmp_arg1 / sizeof(Eterm),
  3515. tmp_arg2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, Arg(1));
  3516. /*
  3517. * Allocate the binary struct itself.
  3518. */
  3519. bptr = erts_bin_nrml_alloc(tmp_arg1);
  3520. erts_refc_init(&bptr->refc, 1);
  3521. erts_current_bin = (byte *) bptr->orig_bytes;
  3522. /*
  3523. * Now allocate the ProcBin on the heap.
  3524. */
  3525. pb = (ProcBin *) HTOP;
  3526. HTOP += PROC_BIN_SIZE;
  3527. pb->thing_word = HEADER_PROC_BIN;
  3528. pb->size = tmp_arg1;
  3529. pb->next = MSO(c_p).first;
  3530. MSO(c_p).first = (struct erl_off_heap_header*) pb;
  3531. pb->val = bptr;
  3532. pb->bytes = (byte*) bptr->orig_bytes;
  3533. pb->flags = 0;
  3534. OH_OVERHEAD(&(MSO(c_p)), tmp_arg1 / sizeof(Eterm));
  3535. StoreBifResult(2, make_binary(pb));
  3536. }
  3537. OpCase(i_bs_init_heap_bin_heap_IIId): {
  3538. tmp_arg1 = Arg(0);
  3539. tmp_arg2 = Arg(1);
  3540. I++;
  3541. goto do_heap_bin_alloc;
  3542. }
  3543. OpCase(i_bs_init_heap_bin_IId): {
  3544. tmp_arg1 = Arg(0);
  3545. tmp_arg2 = 0;
  3546. }
  3547. /* Fall through */
  3548. do_heap_bin_alloc:
  3549. {
  3550. ErlHeapBin* hb;
  3551. Uint bin_need;
  3552. bin_need = heap_bin_size(tmp_arg1);
  3553. erts_bin_offset = 0;
  3554. erts_writable_bin = 0;
  3555. TestHeap(bin_need+tmp_arg2+ERL_SUB_BIN_SIZE, Arg(1));
  3556. hb = (ErlHeapBin *) HTOP;
  3557. HTOP += bin_need;
  3558. hb->thing_word = header_heap_bin(tmp_arg1);
  3559. hb->size = tmp_arg1;
  3560. erts_current_bin = (byte *) hb->data;
  3561. tmp_arg1 = make_binary(hb);
  3562. StoreBifResult(2, tmp_arg1);
  3563. }
  3564. }
  3565. OpCase(i_bs_add_jId): {
  3566. Uint Unit = Arg(1);
  3567. if (is_both_small(tmp_arg1, tmp_arg2)) {
  3568. Sint Arg1 = signed_val(tmp_arg1);
  3569. Sint Arg2 = signed_val(tmp_arg2);
  3570. if (Arg1 >= 0 && Arg2 >= 0) {
  3571. BsSafeMul(Arg2, Unit, goto system_limit, tmp_arg1);
  3572. tmp_arg1 += Arg1;
  3573. store_bs_add_result:
  3574. if (MY_IS_SSMALL((Sint) tmp_arg1)) {
  3575. tmp_arg1 = make_small(tmp_arg1);
  3576. } else {
  3577. /*
  3578. * May generate a heap fragment, but in this
  3579. * particular case it is OK, since the value will be
  3580. * stored into an x register (the GC will scan x
  3581. * registers for references to heap fragments) and
  3582. * there is no risk that value can be stored into a
  3583. * location that is not scanned for heap-fragment
  3584. * references (such as the heap).
  3585. */
  3586. SWAPOUT;
  3587. tmp_arg1 = erts_make_integer(tmp_arg1, c_p);
  3588. HTOP = HEAP_TOP(c_p);
  3589. }
  3590. StoreBifResult(2, tmp_arg1);
  3591. }
  3592. goto badarg;
  3593. } else {
  3594. Uint a;
  3595. Uint b;
  3596. Uint c;
  3597. /*
  3598. * Now we know that one of the arguments is
  3599. * not a small. We must convert both arguments
  3600. * to Uints and check for errors at the same time.
  3601. *
  3602. * Error checking is tricky.
  3603. *
  3604. * If one of the arguments is not numeric or
  3605. * not positive, the error reason is BADARG.
  3606. *
  3607. * Otherwise if both arguments are numeric,
  3608. * but at least one argument does not fit in
  3609. * an Uint, the reason is SYSTEM_LIMIT.
  3610. */
  3611. if (!term_to_Uint(tmp_arg1, &a)) {
  3612. if (a == BADARG) {
  3613. goto badarg;
  3614. }
  3615. if (!term_to_Uint(tmp_arg2, &b)) {
  3616. c_p->freason = b;
  3617. goto lb_Cl_error;
  3618. }
  3619. goto system_limit;
  3620. } else if (!term_to_Uint(tmp_arg2, &b)) {
  3621. c_p->freason = b;
  3622. goto lb_Cl_error;
  3623. }
  3624. /*
  3625. * The arguments are now correct and stored in a and b.
  3626. */
  3627. BsSafeMul(b, Unit, goto system_limit, c);
  3628. tmp_arg1 = a + c;
  3629. if (tmp_arg1 < a) {
  3630. /*
  3631. * If the result is less than one of the
  3632. * arguments, there must have been an overflow.
  3633. */
  3634. goto system_limit;
  3635. }
  3636. goto store_bs_add_result;
  3637. }
  3638. /* No fallthrough */
  3639. ASSERT(0);
  3640. }
  3641. OpCase(bs_put_string_II):
  3642. {
  3643. BeamInstr *next;
  3644. PreFetch(2, next);
  3645. erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) Arg(1), Arg(0)));
  3646. NextPF(2, next);
  3647. }
  3648. /*
  3649. * tmp_arg1 = Number of bytes to build
  3650. * tmp_arg2 = Source binary
  3651. * Operands: Fail ExtraHeap Live Unit Dst
  3652. */
  3653. OpCase(i_bs_append_jIIId): {
  3654. Uint live = Arg(2);
  3655. Uint res;
  3656. SWAPOUT;
  3657. reg[0] = r(0);
  3658. reg[live] = tmp_arg2;
  3659. res = erts_bs_append(c_p, reg, live, tmp_arg1, Arg(1), Arg(3));
  3660. r(0) = reg[0];
  3661. SWAPIN;
  3662. if (is_non_value(res)) {
  3663. /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
  3664. goto lb_Cl_error;
  3665. }
  3666. StoreBifResult(4, res);
  3667. }
  3668. /*
  3669. * tmp_arg1 = Number of bytes to build
  3670. * tmp_arg2 = Source binary
  3671. * Operands: Fail Unit Dst
  3672. */
  3673. OpCase(i_bs_private_append_jId): {
  3674. Eterm res;
  3675. res = erts_bs_private_append(c_p, tmp_arg2, tmp_arg1, Arg(1));
  3676. if (is_non_value(res)) {
  3677. /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
  3678. goto lb_Cl_error;
  3679. }
  3680. StoreBifResult(2, res);
  3681. }
  3682. /*
  3683. * tmp_arg1 = Initial size of writable binary
  3684. * Operands: Live Dst
  3685. */
  3686. OpCase(bs_init_writable): {
  3687. SWAPOUT;
  3688. r(0) = erts_bs_init_writable(c_p, r(0));
  3689. SWAPIN;
  3690. Next(0);
  3691. }
  3692. /*
  3693. * Calculate the number of bytes needed to encode the source
  3694. * operarand to UTF-8. If the source operand is invalid (e.g. wrong
  3695. * type or range) we return a nonsense integer result (0 or 4). We
  3696. * can get away with that because we KNOW that bs_put_utf8 will do
  3697. * full error checking.
  3698. */
  3699. OpCase(i_bs_utf8_size_sd): {
  3700. Eterm arg;
  3701. Eterm result;
  3702. GetArg1(0, arg);
  3703. if (arg < make_small(0x80UL)) {
  3704. result = make_small(1);
  3705. } else if (arg < make_small(0x800UL)) {
  3706. result = make_small(2);
  3707. } else if (arg < make_small(0x10000UL)) {
  3708. result = make_small(3);
  3709. } else {
  3710. result = make_small(4);
  3711. }
  3712. StoreBifResult(1, result);
  3713. }
  3714. OpCase(i_bs_put_utf8_js): {
  3715. Eterm arg;
  3716. GetArg1(1, arg);
  3717. if (!erts_bs_put_utf8(ERL_BITS_ARGS_1(arg))) {
  3718. goto badarg;
  3719. }
  3720. Next(2);
  3721. }
  3722. /*
  3723. * Calculate the number of bytes needed to encode the source
  3724. * operarand to UTF-8. If the source operand is invalid (e.g. wrong
  3725. * type or range) we return a nonsense integer result (2 or 4). We
  3726. * can get away with that because we KNOW that bs_put_utf16 will do
  3727. * full error checking.
  3728. */
  3729. OpCase(i_bs_utf16_size_sd): {
  3730. Eterm arg;
  3731. Eterm result = make_small(2);
  3732. GetArg1(0, arg);
  3733. if (arg >= make_small(0x10000UL)) {
  3734. result = make_small(4);
  3735. }
  3736. StoreBifResult(1, result);
  3737. }
  3738. OpCase(i_bs_put_utf16_jIs): {
  3739. Eterm arg;
  3740. GetArg1(2, arg);
  3741. if (!erts_bs_put_utf16(ERL_BITS_ARGS_2(arg, Arg(1)))) {
  3742. goto badarg;
  3743. }
  3744. Next(3);
  3745. }
  3746. /*
  3747. * Only used for validating a value about to be stored in a binary.
  3748. */
  3749. OpCase(i_bs_validate_unicode_js): {
  3750. Eterm val;
  3751. GetArg1(1, val);
  3752. /*
  3753. * There is no need to untag the integer, but it IS necessary
  3754. * to make sure it is small (if the term is a bignum, it could
  3755. * slip through the test, and there is no further test that
  3756. * would catch it, since bit syntax construction silently masks
  3757. * too big numbers).
  3758. */
  3759. if (is_not_small(val) || val > make_small(0x10FFFFUL) ||
  3760. (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) {
  3761. goto badarg;
  3762. }
  3763. Next(2);
  3764. }
  3765. /*
  3766. * Only used for validating a value matched out.
  3767. *
  3768. * tmp_arg1 = Integer to validate
  3769. * tmp_arg2 = Match context
  3770. */
  3771. OpCase(i_bs_validate_unicode_retract_j): {
  3772. /*
  3773. * There is no need to untag the integer, but it IS necessary
  3774. * to make sure it is small (a bignum pointer could fall in
  3775. * the valid range).
  3776. */
  3777. if (is_not_small(tmp_arg1) || tmp_arg1 > make_small(0x10FFFFUL) ||
  3778. (make_small(0xD800UL) <= tmp_arg1 &&
  3779. tmp_arg1 <= make_small(0xDFFFUL))) {
  3780. ErlBinMatchBuffer *mb = ms_matchbuffer(tmp_arg2);
  3781. mb->offset -= 32;
  3782. goto badarg;
  3783. }
  3784. Next(1);
  3785. }
  3786. /*
  3787. * Matching of binaries.
  3788. */
  3789. {
  3790. Eterm header;
  3791. BeamInstr *next;
  3792. Uint slots;
  3793. Eterm context;
  3794. OpCase(i_bs_start_match2_rfIId): {
  3795. context = r(0);
  3796. do_start_match:
  3797. slots = Arg(2);
  3798. if (!is_boxed(context)) {
  3799. ClauseFail();
  3800. }
  3801. PreFetch(4, next);
  3802. header = *boxed_val(context);
  3803. if (header_is_bin_matchstate(header)) {
  3804. ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context);
  3805. Uint actual_slots = HEADER_NUM_SLOTS(header);
  3806. ms->save_offset[0] = ms->mb.offset;
  3807. if (actual_slots < slots) {
  3808. ErlBinMatchState* dst;
  3809. Uint live = Arg(1);
  3810. Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
  3811. TestHeapPreserve(wordsneeded, live, context);
  3812. ms = (ErlBinMatchState *) boxed_val(context);
  3813. dst = (ErlBinMatchState *) HTOP;
  3814. *dst = *ms;
  3815. *HTOP = HEADER_BIN_MATCHSTATE(slots);
  3816. HTOP += wordsneeded;
  3817. HEAP_SPACE_VERIFIED(0);
  3818. StoreResult(make_matchstate(dst), Arg(3));
  3819. }
  3820. } else if (is_binary_header(header)) {
  3821. Eterm result;
  3822. Uint live = Arg(1);
  3823. Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
  3824. TestHeapPreserve(wordsneeded, live, context);
  3825. HEAP_TOP(c_p) = HTOP;
  3826. #ifdef DEBUG
  3827. c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
  3828. #endif
  3829. result = erts_bs_start_match_2(c_p, context, slots);
  3830. HTOP = HEAP_TOP(c_p);
  3831. HEAP_SPACE_VERIFIED(0);
  3832. if (is_non_value(result)) {
  3833. ClauseFail();
  3834. } else {
  3835. StoreResult(result, Arg(3));
  3836. }
  3837. } else {
  3838. ClauseFail();
  3839. }
  3840. NextPF(4, next);
  3841. }
  3842. OpCase(i_bs_start_match2_xfIId): {
  3843. context = xb(Arg(0));
  3844. I++;
  3845. goto do_start_match;
  3846. }
  3847. OpCase(i_bs_start_match2_yfIId): {
  3848. context = yb(Arg(0));
  3849. I++;
  3850. goto do_start_match;
  3851. }
  3852. }
  3853. OpCase(bs_test_zero_tail2_fr): {
  3854. BeamInstr *next;
  3855. ErlBinMatchBuffer *_mb;
  3856. PreFetch(1, next);
  3857. _mb = (ErlBinMatchBuffer*) ms_matchbuffer(r(0));
  3858. if (_mb->size != _mb->offset) {
  3859. ClauseFail();
  3860. }
  3861. NextPF(1, next);
  3862. }
  3863. OpCase(bs_test_zero_tail2_fx): {
  3864. BeamInstr *next;
  3865. ErlBinMatchBuffer *_mb;
  3866. PreFetch(2, next);
  3867. _mb = (ErlBinMatchBuffer*) ms_matchbuffer(xb(Arg(1)));
  3868. if (_mb->size != _mb->offset) {
  3869. ClauseFail();
  3870. }
  3871. NextPF(2, next);
  3872. }
  3873. OpCase(bs_test_tail_imm2_frI): {
  3874. BeamInstr *next;
  3875. ErlBinMatchBuffer *_mb;
  3876. PreFetch(2, next);
  3877. _mb = ms_matchbuffer(r(0));
  3878. if (_mb->size - _mb->offset != Arg(1)) {
  3879. ClauseFail();
  3880. }
  3881. NextPF(2, next);
  3882. }
  3883. OpCase(bs_test_tail_imm2_fxI): {
  3884. BeamInstr *next;
  3885. ErlBinMatchBuffer *_mb;
  3886. PreFetch(3, next);
  3887. _mb = ms_matchbuffer(xb(Arg(1)));
  3888. if (_mb->size - _mb->offset != Arg(2)) {
  3889. ClauseFail();
  3890. }
  3891. NextPF(3, next);
  3892. }
  3893. OpCase(bs_test_unit_frI): {
  3894. BeamInstr *next;
  3895. ErlBinMatchBuffer *_mb;
  3896. PreFetch(2, next);
  3897. _mb = ms_matchbuffer(r(0));
  3898. if ((_mb->size - _mb->offset) % Arg(1)) {
  3899. ClauseFail();
  3900. }
  3901. NextPF(2, next);
  3902. }
  3903. OpCase(bs_test_unit_fxI): {
  3904. BeamInstr *next;
  3905. ErlBinMatchBuffer *_mb;
  3906. PreFetch(3, next);
  3907. _mb = ms_matchbuffer(xb(Arg(1)));
  3908. if ((_mb->size - _mb->offset) % Arg(2)) {
  3909. ClauseFail();
  3910. }
  3911. NextPF(3, next);
  3912. }
  3913. OpCase(bs_test_unit8_fr): {
  3914. BeamInstr *next;
  3915. ErlBinMatchBuffer *_mb;
  3916. PreFetch(1, next);
  3917. _mb = ms_matchbuffer(r(0));
  3918. if ((_mb->size - _mb->offset) & 7) {
  3919. ClauseFail();
  3920. }
  3921. NextPF(1, next);
  3922. }
  3923. OpCase(bs_test_unit8_fx): {
  3924. BeamInstr *next;
  3925. ErlBinMatchBuffer *_mb;
  3926. PreFetch(2, next);
  3927. _mb = ms_matchbuffer(xb(Arg(1)));
  3928. if ((_mb->size - _mb->offset) & 7) {
  3929. ClauseFail();
  3930. }
  3931. NextPF(2, next);
  3932. }
  3933. {
  3934. Eterm bs_get_integer8_context;
  3935. OpCase(i_bs_get_integer_8_rfd): {
  3936. bs_get_integer8_context = r(0);
  3937. goto do_bs_get_integer_8;
  3938. }
  3939. OpCase(i_bs_get_integer_8_xfd): {
  3940. bs_get_integer8_context = xb(Arg(0));
  3941. I++;
  3942. }
  3943. do_bs_get_integer_8: {
  3944. ErlBinMatchBuffer *_mb;
  3945. Eterm _result;
  3946. _mb = ms_matchbuffer(bs_get_integer8_context);
  3947. if (_mb->size - _mb->offset < 8) {
  3948. ClauseFail();
  3949. }
  3950. if (BIT_OFFSET(_mb->offset) != 0) {
  3951. _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
  3952. } else {
  3953. _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
  3954. _mb->offset += 8;
  3955. }
  3956. StoreBifResult(1, _result);
  3957. }
  3958. }
  3959. {
  3960. Eterm bs_get_integer_16_context;
  3961. OpCase(i_bs_get_integer_16_rfd):
  3962. bs_get_integer_16_context = r(0);
  3963. goto do_bs_get_integer_16;
  3964. OpCase(i_bs_get_integer_16_xfd):
  3965. bs_get_integer_16_context = xb(Arg(0));
  3966. I++;
  3967. do_bs_get_integer_16:
  3968. {
  3969. ErlBinMatchBuffer *_mb;
  3970. Eterm _result;
  3971. _mb = ms_matchbuffer(bs_get_integer_16_context);
  3972. if (_mb->size - _mb->offset < 16) {
  3973. ClauseFail();
  3974. }
  3975. if (BIT_OFFSET(_mb->offset) != 0) {
  3976. _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
  3977. } else {
  3978. _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
  3979. _mb->offset += 16;
  3980. }
  3981. StoreBifResult(1, _result);
  3982. }
  3983. }
  3984. {
  3985. Eterm bs_get_integer_32_context;
  3986. OpCase(i_bs_get_integer_32_rfId):
  3987. bs_get_integer_32_context = r(0);
  3988. goto do_bs_get_integer_32;
  3989. OpCase(i_bs_get_integer_32_xfId):
  3990. bs_get_integer_32_context = xb(Arg(0));
  3991. I++;
  3992. do_bs_get_integer_32:
  3993. {
  3994. ErlBinMatchBuffer *_mb;
  3995. Uint32 _integer;
  3996. Eterm _result;
  3997. _mb = ms_matchbuffer(bs_get_integer_32_context);
  3998. if (_mb->size - _mb->offset < 32) { ClauseFail(); }
  3999. if (BIT_OFFSET(_mb->offset) != 0) {
  4000. _integer = erts_bs_get_unaligned_uint32(_mb);
  4001. } else {
  4002. _integer = get_int32(_mb->base + _mb->offset/8);
  4003. }
  4004. _mb->offset += 32;
  4005. #if !defined(ARCH_64) || HALFWORD_HEAP
  4006. if (IS_USMALL(0, _integer)) {
  4007. #endif
  4008. _result = make_small(_integer);
  4009. #if !defined(ARCH_64) || HALFWORD_HEAP
  4010. } else {
  4011. TestHeap(BIG_UINT_HEAP_SIZE, Arg(1));
  4012. _result = uint_to_big((Uint) _integer, HTOP);
  4013. HTOP += BIG_UINT_HEAP_SIZE;
  4014. HEAP_SPACE_VERIFIED(0);
  4015. }
  4016. #endif
  4017. StoreBifResult(2, _result);
  4018. }
  4019. }
  4020. /* Operands: Size Live Fail Flags Dst */
  4021. OpCase(i_bs_get_integer_imm_rIIfId): {
  4022. tmp_arg1 = r(0);
  4023. /* Operands: Size Live Fail Flags Dst */
  4024. goto do_bs_get_integer_imm_test_heap;
  4025. }
  4026. /* Operands: x(Reg) Size Live Fail Flags Dst */
  4027. OpCase(i_bs_get_integer_imm_xIIfId): {
  4028. tmp_arg1 = xb(Arg(0));
  4029. I++;
  4030. /* Operands: Size Live Fail Flags Dst */
  4031. goto do_bs_get_integer_imm_test_heap;
  4032. }
  4033. /*
  4034. * tmp_arg1 = match context
  4035. * Operands: Size Live Fail Flags Dst
  4036. */
  4037. do_bs_get_integer_imm_test_heap: {
  4038. Uint wordsneeded;
  4039. tmp_arg2 = Arg(0);
  4040. wordsneeded = 1+WSIZE(NBYTES(tmp_arg2));
  4041. TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
  4042. I += 2;
  4043. /* Operands: Fail Flags Dst */
  4044. goto do_bs_get_integer_imm;
  4045. }
  4046. /* Operands: Size Fail Flags Dst */
  4047. OpCase(i_bs_get_integer_small_imm_rIfId): {
  4048. tmp_arg1 = r(0);
  4049. tmp_arg2 = Arg(0);
  4050. I++;
  4051. /* Operands: Fail Flags Dst */
  4052. goto do_bs_get_integer_imm;
  4053. }
  4054. /* Operands: x(Reg) Size Fail Flags Dst */
  4055. OpCase(i_bs_get_integer_small_imm_xIfId): {
  4056. tmp_arg1 = xb(Arg(0));
  4057. tmp_arg2 = Arg(1);
  4058. I += 2;
  4059. /* Operands: Fail Flags Dst */
  4060. goto do_bs_get_integer_imm;
  4061. }
  4062. /*
  4063. * tmp_arg1 = match context
  4064. * tmp_arg2 = size of field
  4065. * Operands: Fail Flags Dst
  4066. */
  4067. do_bs_get_integer_imm: {
  4068. ErlBinMatchBuffer* mb;
  4069. Eterm result;
  4070. mb = ms_matchbuffer(tmp_arg1);
  4071. LIGHT_SWAPOUT;
  4072. result = erts_bs_get_integer_2(c_p, tmp_arg2, Arg(1), mb);
  4073. LIGHT_SWAPIN;
  4074. HEAP_SPACE_VERIFIED(0);
  4075. if (is_non_value(result)) {
  4076. ClauseFail();
  4077. }
  4078. StoreBifResult(2, result);
  4079. }
  4080. /*
  4081. * tmp_arg1 = Match context
  4082. * tmp_arg2 = Size field
  4083. * Operands: Fail Live FlagsAndUnit Dst
  4084. */
  4085. OpCase(i_bs_get_integer_fIId): {
  4086. Uint flags;
  4087. Uint size;
  4088. ErlBinMatchBuffer* mb;
  4089. Eterm result;
  4090. flags = Arg(2);
  4091. BsGetFieldSize(tmp_arg2, (flags >> 3), ClauseFail(), size);
  4092. if (size >= SMALL_BITS) {
  4093. Uint wordsneeded;
  4094. /* check bits size before potential gc.
  4095. * We do not want a gc and then realize we don't need
  4096. * the allocated space (i.e. if the op fails)
  4097. *
  4098. * remember to reacquire the matchbuffer after gc.
  4099. */
  4100. mb = ms_matchbuffer(tmp_arg1);
  4101. if (mb->size - mb->offset < size) {
  4102. ClauseFail();
  4103. }
  4104. wordsneeded = 1+WSIZE(NBYTES((Uint) size));
  4105. TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
  4106. }
  4107. mb = ms_matchbuffer(tmp_arg1);
  4108. LIGHT_SWAPOUT;
  4109. result = erts_bs_get_integer_2(c_p, size, flags, mb);
  4110. LIGHT_SWAPIN;
  4111. HEAP_SPACE_VERIFIED(0);
  4112. if (is_non_value(result)) {
  4113. ClauseFail();
  4114. }
  4115. StoreBifResult(3, result);
  4116. }
  4117. {
  4118. Eterm get_utf8_context;
  4119. /* Operands: MatchContext Fail Dst */
  4120. OpCase(i_bs_get_utf8_rfd): {
  4121. get_utf8_context = r(0);
  4122. goto do_bs_get_utf8;
  4123. }
  4124. OpCase(i_bs_get_utf8_xfd): {
  4125. get_utf8_context = xb(Arg(0));
  4126. I++;
  4127. }
  4128. /*
  4129. * get_utf8_context = match_context
  4130. * Operands: Fail Dst
  4131. */
  4132. do_bs_get_utf8: {
  4133. Eterm result = erts_bs_get_utf8(ms_matchbuffer(get_utf8_context));
  4134. if (is_non_value(result)) {
  4135. ClauseFail();
  4136. }
  4137. StoreBifResult(1, result);
  4138. }
  4139. }
  4140. {
  4141. Eterm get_utf16_context;
  4142. /* Operands: MatchContext Fail Flags Dst */
  4143. OpCase(i_bs_get_utf16_rfId): {
  4144. get_utf16_context = r(0);
  4145. goto do_bs_get_utf16;
  4146. }
  4147. OpCase(i_bs_get_utf16_xfId): {
  4148. get_utf16_context = xb(Arg(0));
  4149. I++;
  4150. }
  4151. /*
  4152. * get_utf16_context = match_context
  4153. * Operands: Fail Flags Dst
  4154. */
  4155. do_bs_get_utf16: {
  4156. Eterm result = erts_bs_get_utf16(ms_matchbuffer(get_utf16_context),
  4157. Arg(1));
  4158. if (is_non_value(result)) {
  4159. ClauseFail();
  4160. }
  4161. StoreBifResult(2, result);
  4162. }
  4163. }
  4164. {
  4165. Eterm context_to_binary_context;
  4166. ErlBinMatchBuffer* mb;
  4167. ErlSubBin* sb;
  4168. Uint size;
  4169. Uint offs;
  4170. Uint orig;
  4171. Uint hole_size;
  4172. OpCase(bs_context_to_binary_r): {
  4173. context_to_binary_context = x0;
  4174. I -= 2;
  4175. goto do_context_to_binary;
  4176. }
  4177. /* Unfortunately, inlining can generate this instruction. */
  4178. OpCase(bs_context_to_binary_y): {
  4179. context_to_binary_context = yb(Arg(0));
  4180. goto do_context_to_binary0;
  4181. }
  4182. OpCase(bs_context_to_binary_x): {
  4183. context_to_binary_context = xb(Arg(0));
  4184. do_context_to_binary0:
  4185. I--;
  4186. }
  4187. do_context_to_binary:
  4188. if (is_boxed(context_to_binary_context) &&
  4189. header_is_bin_matchstate(*boxed_val(context_to_binary_context))) {
  4190. ErlBinMatchState* ms;
  4191. ms = (ErlBinMatchState *) boxed_val(context_to_binary_context);
  4192. mb = &ms->mb;
  4193. offs = ms->save_offset[0];
  4194. size = mb->size - offs;
  4195. goto do_bs_get_binary_all_reuse_common;
  4196. }
  4197. Next(2);
  4198. OpCase(i_bs_get_binary_all_reuse_rfI): {
  4199. context_to_binary_context = x0;
  4200. goto do_bs_get_binary_all_reuse;
  4201. }
  4202. OpCase(i_bs_get_binary_all_reuse_xfI): {
  4203. context_to_binary_context = xb(Arg(0));
  4204. I++;
  4205. }
  4206. do_bs_get_binary_all_reuse:
  4207. mb = ms_matchbuffer(context_to_binary_context);
  4208. size = mb->size - mb->offset;
  4209. if (size % Arg(1) != 0) {
  4210. ClauseFail();
  4211. }
  4212. offs = mb->offset;
  4213. do_bs_get_binary_all_reuse_common:
  4214. orig = mb->orig;
  4215. sb = (ErlSubBin *) boxed_val(context_to_binary_context);
  4216. hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
  4217. sb->thing_word = HEADER_SUB_BIN;
  4218. sb->size = BYTE_OFFSET(size);
  4219. sb->bitsize = BIT_OFFSET(size);
  4220. sb->offs = BYTE_OFFSET(offs);
  4221. sb->bitoffs = BIT_OFFSET(offs);
  4222. sb->is_writable = 0;
  4223. sb->orig = orig;
  4224. if (hole_size) {
  4225. sb[1].thing_word = make_pos_bignum_header(hole_size-1);
  4226. }
  4227. Next(2);
  4228. }
  4229. {
  4230. Eterm match_string_context;
  4231. OpCase(i_bs_match_string_rfII): {
  4232. match_string_context = r(0);
  4233. goto do_bs_match_string;
  4234. }
  4235. OpCase(i_bs_match_string_xfII): {
  4236. match_string_context = xb(Arg(0));
  4237. I++;
  4238. }
  4239. do_bs_match_string:
  4240. {
  4241. BeamInstr *next;
  4242. byte* bytes;
  4243. Uint bits;
  4244. ErlBinMatchBuffer* mb;
  4245. Uint offs;
  4246. PreFetch(3, next);
  4247. bits = Arg(1);
  4248. bytes = (byte *) Arg(2);
  4249. mb = ms_matchbuffer(match_string_context);
  4250. if (mb->size - mb->offset < bits) {
  4251. ClauseFail();
  4252. }
  4253. offs = mb->offset & 7;
  4254. if (offs == 0 && (bits & 7) == 0) {
  4255. if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) {
  4256. ClauseFail();
  4257. }
  4258. } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) {
  4259. ClauseFail();
  4260. }
  4261. mb->offset += bits;
  4262. NextPF(3, next);
  4263. }
  4264. }
  4265. OpCase(i_bs_save2_rI): {
  4266. BeamInstr *next;
  4267. ErlBinMatchState *_ms;
  4268. PreFetch(1, next);
  4269. _ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
  4270. _ms->save_offset[Arg(0)] = _ms->mb.offset;
  4271. NextPF(1, next);
  4272. }
  4273. OpCase(i_bs_save2_xI): {
  4274. BeamInstr *next;
  4275. ErlBinMatchState *_ms;
  4276. PreFetch(2, next);
  4277. _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
  4278. _ms->save_offset[Arg(1)] = _ms->mb.offset;
  4279. NextPF(2, next);
  4280. }
  4281. OpCase(i_bs_restore2_rI): {
  4282. BeamInstr *next;
  4283. ErlBinMatchState *_ms;
  4284. PreFetch(1, next);
  4285. _ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
  4286. _ms->mb.offset = _ms->save_offset[Arg(0)];
  4287. NextPF(1, next);
  4288. }
  4289. OpCase(i_bs_restore2_xI): {
  4290. BeamInstr *next;
  4291. ErlBinMatchState *_ms;
  4292. PreFetch(2, next);
  4293. _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
  4294. _ms->mb.offset = _ms->save_offset[Arg(1)];
  4295. NextPF(2, next);
  4296. }
  4297. #include "beam_cold.h"
  4298. /*
  4299. * This instruction is probably never used (because it is combined with a
  4300. * a return). However, a future compiler might for some reason emit a
  4301. * deallocate not followed by a return, and that should work.
  4302. */
  4303. OpCase(deallocate_I): {
  4304. BeamInstr *next;
  4305. PreFetch(1, next);
  4306. D(Arg(0));
  4307. NextPF(1, next);
  4308. }
  4309. /*
  4310. * Trace and debugging support.
  4311. */
  4312. OpCase(return_trace): {
  4313. BeamInstr* code = (BeamInstr *) (UWord) E[0];
  4314. SWAPOUT; /* Needed for shared heap */
  4315. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  4316. erts_trace_return(c_p, code, r(0), E+1/*Process tracer*/);
  4317. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  4318. SWAPIN;
  4319. c_p->cp = NULL;
  4320. SET_I((BeamInstr *) cp_val(E[2]));
  4321. E += 3;
  4322. Goto(*I);
  4323. }
  4324. OpCase(i_generic_breakpoint): {
  4325. BeamInstr real_I;
  4326. ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
  4327. SWAPOUT;
  4328. reg[0] = r(0);
  4329. real_I = erts_generic_breakpoint(c_p, I, reg);
  4330. r(0) = reg[0];
  4331. SWAPIN;
  4332. ASSERT(VALID_INSTR(real_I));
  4333. Goto(real_I);
  4334. }
  4335. OpCase(i_return_time_trace): {
  4336. BeamInstr *pc = (BeamInstr *) (UWord) E[0];
  4337. SWAPOUT;
  4338. erts_trace_time_return(c_p, pc);
  4339. SWAPIN;
  4340. c_p->cp = NULL;
  4341. SET_I((BeamInstr *) cp_val(E[1]));
  4342. E += 2;
  4343. Goto(*I);
  4344. }
  4345. OpCase(i_return_to_trace): {
  4346. if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
  4347. Uint *cpp = (Uint*) E;
  4348. for(;;) {
  4349. ASSERT(is_CP(*cpp));
  4350. if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
  4351. do ++cpp; while(is_not_CP(*cpp));
  4352. cpp += 2;
  4353. } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
  4354. do ++cpp; while(is_not_CP(*cpp));
  4355. } else break;
  4356. }
  4357. SWAPOUT; /* Needed for shared heap */
  4358. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  4359. erts_trace_return_to(c_p, cp_val(*cpp));
  4360. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  4361. SWAPIN;
  4362. }
  4363. c_p->cp = NULL;
  4364. SET_I((BeamInstr *) cp_val(E[0]));
  4365. E += 1;
  4366. Goto(*I);
  4367. }
  4368. /*
  4369. * New floating point instructions.
  4370. */
  4371. OpCase(fmove_ql): {
  4372. Eterm fr = Arg(1);
  4373. BeamInstr *next;
  4374. PreFetch(2, next);
  4375. GET_DOUBLE(Arg(0), *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
  4376. NextPF(2, next);
  4377. }
  4378. OpCase(fmove_dl): {
  4379. Eterm targ1;
  4380. Eterm fr = Arg(1);
  4381. BeamInstr *next;
  4382. PreFetch(2, next);
  4383. GetR(0, targ1);
  4384. /* Arg(0) == HEADER_FLONUM */
  4385. GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
  4386. NextPF(2, next);
  4387. }
  4388. OpCase(fmove_ld): {
  4389. Eterm fr = Arg(0);
  4390. Eterm dest = make_float(HTOP);
  4391. PUT_DOUBLE(*(FloatDef*)ADD_BYTE_OFFSET(freg, fr), HTOP);
  4392. HTOP += FLOAT_SIZE_OBJECT;
  4393. StoreBifResult(1, dest);
  4394. }
  4395. OpCase(fconv_dl): {
  4396. Eterm targ1;
  4397. Eterm fr = Arg(1);
  4398. BeamInstr *next;
  4399. GetR(0, targ1);
  4400. PreFetch(2, next);
  4401. if (is_small(targ1)) {
  4402. fb(fr) = (double) signed_val(targ1);
  4403. } else if (is_big(targ1)) {
  4404. if (big_to_double(targ1, &fb(fr)) < 0) {
  4405. goto fbadarith;
  4406. }
  4407. } else if (is_float(targ1)) {
  4408. GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
  4409. } else {
  4410. goto fbadarith;
  4411. }
  4412. NextPF(2, next);
  4413. }
  4414. #ifdef NO_FPE_SIGNALS
  4415. OpCase(fclearerror):
  4416. OpCase(i_fcheckerror):
  4417. erl_exit(1, "fclearerror/i_fcheckerror without fpe signals (beam_emu)");
  4418. # define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
  4419. # define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
  4420. #else
  4421. # define ERTS_NO_FPE_CHECK_INIT(p)
  4422. # define ERTS_NO_FPE_ERROR(p, a, b)
  4423. OpCase(fclearerror): {
  4424. BeamInstr *next;
  4425. PreFetch(0, next);
  4426. ERTS_FP_CHECK_INIT(c_p);
  4427. NextPF(0, next);
  4428. }
  4429. OpCase(i_fcheckerror): {
  4430. BeamInstr *next;
  4431. PreFetch(0, next);
  4432. ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith);
  4433. NextPF(0, next);
  4434. }
  4435. #endif
  4436. OpCase(i_fadd_lll): {
  4437. BeamInstr *next;
  4438. PreFetch(3, next);
  4439. ERTS_NO_FPE_CHECK_INIT(c_p);
  4440. fb(Arg(2)) = fb(Arg(0)) + fb(Arg(1));
  4441. ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
  4442. NextPF(3, next);
  4443. }
  4444. OpCase(i_fsub_lll): {
  4445. BeamInstr *next;
  4446. PreFetch(3, next);
  4447. ERTS_NO_FPE_CHECK_INIT(c_p);
  4448. fb(Arg(2)) = fb(Arg(0)) - fb(Arg(1));
  4449. ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
  4450. NextPF(3, next);
  4451. }
  4452. OpCase(i_fmul_lll): {
  4453. BeamInstr *next;
  4454. PreFetch(3, next);
  4455. ERTS_NO_FPE_CHECK_INIT(c_p);
  4456. fb(Arg(2)) = fb(Arg(0)) * fb(Arg(1));
  4457. ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
  4458. NextPF(3, next);
  4459. }
  4460. OpCase(i_fdiv_lll): {
  4461. BeamInstr *next;
  4462. PreFetch(3, next);
  4463. ERTS_NO_FPE_CHECK_INIT(c_p);
  4464. fb(Arg(2)) = fb(Arg(0)) / fb(Arg(1));
  4465. ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
  4466. NextPF(3, next);
  4467. }
  4468. OpCase(i_fnegate_ll): {
  4469. BeamInstr *next;
  4470. PreFetch(2, next);
  4471. ERTS_NO_FPE_CHECK_INIT(c_p);
  4472. fb(Arg(1)) = -fb(Arg(0));
  4473. ERTS_NO_FPE_ERROR(c_p, fb(Arg(1)), goto fbadarith);
  4474. NextPF(2, next);
  4475. fbadarith:
  4476. c_p->freason = BADARITH;
  4477. goto find_func_info;
  4478. }
  4479. #ifdef HIPE
  4480. {
  4481. unsigned cmd;
  4482. OpCase(hipe_trap_call): {
  4483. /*
  4484. * I[-5]: &&lb_i_func_info_IaaI
  4485. * I[-4]: Native code callee (inserted by HiPE)
  4486. * I[-3]: Module (tagged atom)
  4487. * I[-2]: Function (tagged atom)
  4488. * I[-1]: Arity (untagged integer)
  4489. * I[ 0]: &&lb_hipe_trap_call
  4490. * ... remainder of original BEAM code
  4491. */
  4492. ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
  4493. c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
  4494. cmd = HIPE_MODE_SWITCH_CMD_CALL | (I[-1] << 8);
  4495. ++hipe_trap_count;
  4496. goto L_hipe_mode_switch;
  4497. }
  4498. OpCase(hipe_trap_call_closure): {
  4499. ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
  4500. c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
  4501. cmd = HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (I[-1] << 8);
  4502. ++hipe_trap_count;
  4503. goto L_hipe_mode_switch;
  4504. }
  4505. OpCase(hipe_trap_return): {
  4506. cmd = HIPE_MODE_SWITCH_CMD_RETURN;
  4507. goto L_hipe_mode_switch;
  4508. }
  4509. OpCase(hipe_trap_throw): {
  4510. cmd = HIPE_MODE_SWITCH_CMD_THROW;
  4511. goto L_hipe_mode_switch;
  4512. }
  4513. OpCase(hipe_trap_resume): {
  4514. cmd = HIPE_MODE_SWITCH_CMD_RESUME;
  4515. goto L_hipe_mode_switch;
  4516. }
  4517. L_hipe_mode_switch:
  4518. /* XXX: this abuse of def_arg_reg[] is horrid! */
  4519. SWAPOUT;
  4520. c_p->fcalls = FCALLS;
  4521. c_p->def_arg_reg[4] = -neg_o_reds;
  4522. reg[0] = r(0);
  4523. c_p = hipe_mode_switch(c_p, cmd, reg);
  4524. reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
  4525. freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
  4526. ERL_BITS_RELOAD_STATEP(c_p);
  4527. neg_o_reds = -c_p->def_arg_reg[4];
  4528. FCALLS = c_p->fcalls;
  4529. SWAPIN;
  4530. switch( c_p->def_arg_reg[3] ) { /* Halfword wont work with hipe yet! */
  4531. case HIPE_MODE_SWITCH_RES_RETURN:
  4532. ASSERT(is_value(reg[0]));
  4533. MoveReturn(reg[0], r(0));
  4534. case HIPE_MODE_SWITCH_RES_CALL_EXPORTED:
  4535. c_p->i = c_p->hipe.u.callee_exp->addressv[erts_active_code_ix()];
  4536. /*fall through*/
  4537. case HIPE_MODE_SWITCH_RES_CALL_BEAM:
  4538. SET_I(c_p->i);
  4539. r(0) = reg[0];
  4540. Dispatch();
  4541. case HIPE_MODE_SWITCH_RES_CALL_CLOSURE:
  4542. /* This can be used to call any function value, but currently it's
  4543. only used to call closures referring to unloaded modules. */
  4544. {
  4545. BeamInstr *next;
  4546. next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE);
  4547. SWAPIN;
  4548. if (next != NULL) {
  4549. r(0) = reg[0];
  4550. SET_I(next);
  4551. Dispatchfun();
  4552. }
  4553. goto find_func_info;
  4554. }
  4555. case HIPE_MODE_SWITCH_RES_THROW:
  4556. c_p->cp = NULL;
  4557. I = handle_error(c_p, I, reg, NULL);
  4558. goto post_error_handling;
  4559. default:
  4560. erl_exit(1, "hipe_mode_switch: result %u\n", c_p->def_arg_reg[3]);
  4561. }
  4562. }
  4563. OpCase(hipe_call_count): {
  4564. /*
  4565. * I[-5]: &&lb_i_func_info_IaaI
  4566. * I[-4]: pointer to struct hipe_call_count (inserted by HiPE)
  4567. * I[-3]: Module (tagged atom)
  4568. * I[-2]: Function (tagged atom)
  4569. * I[-1]: Arity (untagged integer)
  4570. * I[ 0]: &&lb_hipe_call_count
  4571. * ... remainder of original BEAM code
  4572. */
  4573. struct hipe_call_count *hcc = (struct hipe_call_count*)I[-4];
  4574. ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
  4575. ASSERT(hcc != NULL);
  4576. ASSERT(VALID_INSTR(hcc->opcode));
  4577. ++(hcc->count);
  4578. Goto(hcc->opcode);
  4579. }
  4580. #endif /* HIPE */
  4581. OpCase(i_yield):
  4582. {
  4583. /* This is safe as long as REDS_IN(c_p) is never stored
  4584. * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5],
  4585. * which may be c_p->arg_reg[5], which is close, but no banana.
  4586. */
  4587. c_p->arg_reg[0] = am_true;
  4588. c_p->arity = 1; /* One living register (the 'true' return value) */
  4589. SWAPOUT;
  4590. c_p->i = I + 1; /* Next instruction */
  4591. c_p->current = NULL;
  4592. goto do_schedule;
  4593. }
  4594. OpCase(i_hibernate): {
  4595. SWAPOUT;
  4596. if (erts_hibernate(c_p, r(0), x(1), x(2), reg)) {
  4597. c_p->flags &= ~F_HIBERNATE_SCHED;
  4598. goto do_schedule;
  4599. } else {
  4600. I = handle_error(c_p, I, reg, hibernate_3);
  4601. goto post_error_handling;
  4602. }
  4603. }
  4604. OpCase(i_debug_breakpoint): {
  4605. SWAPOUT;
  4606. reg[0] = r(0);
  4607. I = call_error_handler(c_p, I-3, reg, am_breakpoint);
  4608. r(0) = reg[0];
  4609. SWAPIN;
  4610. if (I) {
  4611. Goto(*I);
  4612. }
  4613. goto handle_error;
  4614. }
  4615. OpCase(system_limit_j):
  4616. system_limit:
  4617. c_p->freason = SYSTEM_LIMIT;
  4618. goto lb_Cl_error;
  4619. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  4620. DEFINE_COUNTING_LABELS;
  4621. #endif
  4622. #ifndef NO_JUMP_TABLE
  4623. #ifdef DEBUG
  4624. end_emulator_loop:
  4625. #endif
  4626. #endif
  4627. OpCase(int_code_end):
  4628. OpCase(label_L):
  4629. OpCase(on_load):
  4630. OpCase(line_I):
  4631. erl_exit(1, "meta op\n");
  4632. /*
  4633. * One-time initialization of Beam emulator.
  4634. */
  4635. init_emulator:
  4636. {
  4637. int i;
  4638. Export* ep;
  4639. #ifndef NO_JUMP_TABLE
  4640. #ifdef ERTS_OPCODE_COUNTER_SUPPORT
  4641. #ifdef DEBUG
  4642. counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
  4643. #endif
  4644. counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
  4645. beam_ops = counting_opcodes;
  4646. #else /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
  4647. beam_ops = opcodes;
  4648. #endif /* ERTS_OPCODE_COUNTER_SUPPORT */
  4649. #endif /* NO_JUMP_TABLE */
  4650. em_call_error_handler = OpCode(call_error_handler);
  4651. em_apply_bif = OpCode(apply_bif);
  4652. em_call_nif = OpCode(call_nif);
  4653. beam_apply[0] = (BeamInstr) OpCode(i_apply);
  4654. beam_apply[1] = (BeamInstr) OpCode(normal_exit);
  4655. beam_exit[0] = (BeamInstr) OpCode(error_action_code);
  4656. beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit);
  4657. beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace);
  4658. beam_return_trace[0] = (BeamInstr) OpCode(return_trace);
  4659. beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */
  4660. beam_return_time_trace[0] = (BeamInstr) OpCode(i_return_time_trace);
  4661. /*
  4662. * Enter all BIFs into the export table.
  4663. */
  4664. for (i = 0; i < BIF_SIZE; i++) {
  4665. ep = erts_export_put(bif_table[i].module,
  4666. bif_table[i].name,
  4667. bif_table[i].arity);
  4668. bif_export[i] = ep;
  4669. ep->code[3] = (BeamInstr) OpCode(apply_bif);
  4670. ep->code[4] = (BeamInstr) bif_table[i].f;
  4671. /* XXX: set func info for bifs */
  4672. ep->fake_op_func_info_for_hipe[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
  4673. }
  4674. return;
  4675. }
  4676. #ifdef NO_JUMP_TABLE
  4677. default:
  4678. erl_exit(1, "unexpected op code %d\n",Go);
  4679. }
  4680. #endif
  4681. return; /* Never executed */
  4682. save_calls1:
  4683. {
  4684. Eterm* dis_next;
  4685. save_calls(c_p, (Export *) Arg(0));
  4686. SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]);
  4687. dis_next = (Eterm *) *I;
  4688. FCALLS--;
  4689. Goto(dis_next);
  4690. }
  4691. }
  4692. static BifFunction
  4693. translate_gc_bif(void* gcf)
  4694. {
  4695. if (gcf == erts_gc_length_1) {
  4696. return length_1;
  4697. } else if (gcf == erts_gc_size_1) {
  4698. return size_1;
  4699. } else if (gcf == erts_gc_bit_size_1) {
  4700. return bit_size_1;
  4701. } else if (gcf == erts_gc_byte_size_1) {
  4702. return byte_size_1;
  4703. } else if (gcf == erts_gc_map_size_1) {
  4704. return map_size_1;
  4705. } else if (gcf == erts_gc_abs_1) {
  4706. return abs_1;
  4707. } else if (gcf == erts_gc_float_1) {
  4708. return float_1;
  4709. } else if (gcf == erts_gc_round_1) {
  4710. return round_1;
  4711. } else if (gcf == erts_gc_trunc_1) {
  4712. return round_1;
  4713. } else if (gcf == erts_gc_binary_part_2) {
  4714. return binary_part_2;
  4715. } else if (gcf == erts_gc_binary_part_3) {
  4716. return binary_part_3;
  4717. } else {
  4718. erl_exit(1, "bad gc bif");
  4719. }
  4720. }
  4721. /*
  4722. * Mapping from the error code 'class tag' to atoms.
  4723. */
  4724. Eterm exception_tag[NUMBER_EXC_TAGS] = {
  4725. am_error, /* 0 */
  4726. am_exit, /* 1 */
  4727. am_throw, /* 2 */
  4728. };
  4729. /*
  4730. * Mapping from error code 'index' to atoms.
  4731. */
  4732. Eterm error_atom[NUMBER_EXIT_CODES] = {
  4733. am_internal_error, /* 0 */
  4734. am_normal, /* 1 */
  4735. am_internal_error, /* 2 */
  4736. am_badarg, /* 3 */
  4737. am_badarith, /* 4 */
  4738. am_badmatch, /* 5 */
  4739. am_function_clause, /* 6 */
  4740. am_case_clause, /* 7 */
  4741. am_if_clause, /* 8 */
  4742. am_undef, /* 9 */
  4743. am_badfun, /* 10 */
  4744. am_badarity, /* 11 */
  4745. am_timeout_value, /* 12 */
  4746. am_noproc, /* 13 */
  4747. am_notalive, /* 14 */
  4748. am_system_limit, /* 15 */
  4749. am_try_clause, /* 16 */
  4750. am_notsup, /* 17 */
  4751. am_badmap, /* 18 */
  4752. am_badkey, /* 19 */
  4753. };
  4754. /*
  4755. * To fully understand the error handling, one must keep in mind that
  4756. * when an exception is thrown, the search for a handler can jump back
  4757. * and forth between Beam and native code. Upon each mode switch, a
  4758. * dummy handler is inserted so that if an exception reaches that point,
  4759. * the handler is invoked (like any handler) and transfers control so
  4760. * that the search for a real handler is continued in the other mode.
  4761. * Therefore, c_p->freason and c_p->fvalue must still hold the exception
  4762. * info when the handler is executed, but normalized so that creation of
  4763. * error terms and saving of the stack trace is only done once, even if
  4764. * we pass through the error handling code several times.
  4765. *
  4766. * When a new exception is raised, the current stack trace information
  4767. * is quick-saved in a small structure allocated on the heap. Depending
  4768. * on how the exception is eventually caught (perhaps by causing the
  4769. * current process to terminate), the saved information may be used to
  4770. * create a symbolic (human-readable) representation of the stack trace
  4771. * at the point of the original exception.
  4772. */
  4773. static BeamInstr*
  4774. handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
  4775. {
  4776. Eterm* hp;
  4777. Eterm Value = c_p->fvalue;
  4778. Eterm Args = am_true;
  4779. c_p->i = pc; /* In case we call erl_exit(). */
  4780. ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
  4781. /*
  4782. * Check if we have an arglist for the top level call. If so, this
  4783. * is encoded in Value, so we have to dig out the real Value as well
  4784. * as the Arglist.
  4785. */
  4786. if (c_p->freason & EXF_ARGLIST) {
  4787. Eterm* tp;
  4788. ASSERT(is_tuple(Value));
  4789. tp = tuple_val(Value);
  4790. Value = tp[1];
  4791. Args = tp[2];
  4792. }
  4793. /*
  4794. * Save the stack trace info if the EXF_SAVETRACE flag is set. The
  4795. * main reason for doing this separately is to allow throws to later
  4796. * become promoted to errors without losing the original stack
  4797. * trace, even if they have passed through one or more catch and
  4798. * rethrow. It also makes the creation of symbolic stack traces much
  4799. * more modular.
  4800. */
  4801. if (c_p->freason & EXF_SAVETRACE) {
  4802. save_stacktrace(c_p, pc, reg, bf, Args);
  4803. }
  4804. /*
  4805. * Throws that are not caught are turned into 'nocatch' errors
  4806. */
  4807. if ((c_p->freason & EXF_THROWN) && (c_p->catches <= 0) ) {
  4808. hp = HAlloc(c_p, 3);
  4809. Value = TUPLE2(hp, am_nocatch, Value);
  4810. c_p->freason = EXC_ERROR;
  4811. }
  4812. /* Get the fully expanded error term */
  4813. Value = expand_error_value(c_p, c_p->freason, Value);
  4814. /* Save final error term and stabilize the exception flags so no
  4815. further expansion is done. */
  4816. c_p->fvalue = Value;
  4817. c_p->freason = PRIMARY_EXCEPTION(c_p->freason);
  4818. /* Find a handler or die */
  4819. if ((c_p->catches > 0 || IS_TRACED_FL(c_p, F_EXCEPTION_TRACE))
  4820. && !(c_p->freason & EXF_PANIC)) {
  4821. BeamInstr *new_pc;
  4822. /* The Beam handler code (catch_end or try_end) checks reg[0]
  4823. for THE_NON_VALUE to see if the previous code finished
  4824. abnormally. If so, reg[1], reg[2] and reg[3] should hold the
  4825. exception class, term and trace, respectively. (If the
  4826. handler is just a trap to native code, these registers will
  4827. be ignored.) */
  4828. reg[0] = THE_NON_VALUE;
  4829. reg[1] = exception_tag[GET_EXC_CLASS(c_p->freason)];
  4830. reg[2] = Value;
  4831. reg[3] = c_p->ftrace;
  4832. if ((new_pc = next_catch(c_p, reg))) {
  4833. c_p->cp = 0; /* To avoid keeping stale references. */
  4834. return new_pc;
  4835. }
  4836. if (c_p->catches > 0) erl_exit(1, "Catch not found");
  4837. }
  4838. ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
  4839. terminate_proc(c_p, Value);
  4840. ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
  4841. return NULL;
  4842. }
  4843. /*
  4844. * Find the nearest catch handler
  4845. */
  4846. static BeamInstr*
  4847. next_catch(Process* c_p, Eterm *reg) {
  4848. int active_catches = c_p->catches > 0;
  4849. int have_return_to_trace = 0;
  4850. Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
  4851. BeamInstr i_return_trace = beam_return_trace[0];
  4852. BeamInstr i_return_to_trace = beam_return_to_trace[0];
  4853. BeamInstr i_return_time_trace = beam_return_time_trace[0];
  4854. ptr = prev = c_p->stop;
  4855. ASSERT(is_CP(*ptr));
  4856. ASSERT(ptr <= STACK_START(c_p));
  4857. if (ptr == STACK_START(c_p)) return NULL;
  4858. if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
  4859. *cp_val(*ptr) != i_return_to_trace &&
  4860. *cp_val(*ptr) != i_return_time_trace ))
  4861. && c_p->cp) {
  4862. /* Can not follow cp here - code may be unloaded */
  4863. BeamInstr *cpp = c_p->cp;
  4864. if (cpp == beam_exception_trace) {
  4865. erts_trace_exception(c_p, cp_val(ptr[0]),
  4866. reg[1], reg[2], ptr+1);
  4867. /* Skip return_trace parameters */
  4868. ptr += 2;
  4869. } else if (cpp == beam_return_trace) {
  4870. /* Skip return_trace parameters */
  4871. ptr += 2;
  4872. } else if (cpp == beam_return_time_trace) {
  4873. /* Skip return_trace parameters */
  4874. ptr += 1;
  4875. } else if (cpp == beam_return_to_trace) {
  4876. have_return_to_trace = !0; /* Record next cp */
  4877. }
  4878. }
  4879. while (ptr < STACK_START(c_p)) {
  4880. if (is_catch(*ptr)) {
  4881. if (active_catches) goto found_catch;
  4882. ptr++;
  4883. }
  4884. else if (is_CP(*ptr)) {
  4885. prev = ptr;
  4886. if (*cp_val(*prev) == i_return_trace) {
  4887. /* Skip stack frame variables */
  4888. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  4889. if (is_catch(*ptr) && active_catches) goto found_catch;
  4890. }
  4891. if (cp_val(*prev) == beam_exception_trace) {
  4892. erts_trace_exception(c_p, cp_val(ptr[0]),
  4893. reg[1], reg[2], ptr+1);
  4894. }
  4895. /* Skip return_trace parameters */
  4896. ptr += 2;
  4897. } else if (*cp_val(*prev) == i_return_to_trace) {
  4898. /* Skip stack frame variables */
  4899. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  4900. if (is_catch(*ptr) && active_catches) goto found_catch;
  4901. }
  4902. have_return_to_trace = !0; /* Record next cp */
  4903. return_to_trace_ptr = NULL;
  4904. } else if (*cp_val(*prev) == i_return_time_trace) {
  4905. /* Skip stack frame variables */
  4906. while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
  4907. if (is_catch(*ptr) && active_catches) goto found_catch;
  4908. }
  4909. /* Skip return_trace parameters */
  4910. ptr += 1;
  4911. } else {
  4912. if (have_return_to_trace) {
  4913. /* Record this cp as possible return_to trace cp */
  4914. have_return_to_trace = 0;
  4915. return_to_trace_ptr = ptr;
  4916. } else return_to_trace_ptr = NULL;
  4917. ptr++;
  4918. }
  4919. } else ptr++;
  4920. }
  4921. return NULL;
  4922. found_catch:
  4923. ASSERT(ptr < STACK_START(c_p));
  4924. c_p->stop = prev;
  4925. if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO) && return_to_trace_ptr) {
  4926. /* The stackframe closest to the catch contained an
  4927. * return_to_trace entry, so since the execution now
  4928. * continues after the catch, a return_to trace message
  4929. * would be appropriate.
  4930. */
  4931. erts_trace_return_to(c_p, cp_val(*return_to_trace_ptr));
  4932. }
  4933. return catch_pc(*ptr);
  4934. }
  4935. /*
  4936. * Terminating the process when an exception is not caught
  4937. */
  4938. static void
  4939. terminate_proc(Process* c_p, Eterm Value)
  4940. {
  4941. Eterm *hp;
  4942. Eterm Args = NIL;
  4943. /* Add a stacktrace if this is an error. */
  4944. if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
  4945. Value = add_stacktrace(c_p, Value, c_p->ftrace);
  4946. }
  4947. /* EXF_LOG is a primary exception flag */
  4948. if (c_p->freason & EXF_LOG) {
  4949. int alive = erts_is_alive;
  4950. erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
  4951. /* Build the format message */
  4952. erts_dsprintf(dsbufp, "Error in process ~p ");
  4953. if (alive)
  4954. erts_dsprintf(dsbufp, "on node ~p ");
  4955. erts_dsprintf(dsbufp, "with exit value:~n~p~n");
  4956. /* Build the args in reverse order */
  4957. hp = HAlloc(c_p, 2);
  4958. Args = CONS(hp, Value, Args);
  4959. if (alive) {
  4960. hp = HAlloc(c_p, 2);
  4961. Args = CONS(hp, erts_this_node->sysname, Args);
  4962. }
  4963. hp = HAlloc(c_p, 2);
  4964. Args = CONS(hp, c_p->common.id, Args);
  4965. erts_send_error_term_to_logger(c_p->group_leader, dsbufp, Args);
  4966. }
  4967. /*
  4968. * If we use a shared heap, the process will be garbage-collected.
  4969. * Must zero c_p->arity to indicate that there are no live registers.
  4970. */
  4971. c_p->arity = 0;
  4972. erts_do_exit_process(c_p, Value);
  4973. }
  4974. /*
  4975. * Build and add a symbolic stack trace to the error value.
  4976. */
  4977. static Eterm
  4978. add_stacktrace(Process* c_p, Eterm Value, Eterm exc) {
  4979. Eterm Where = build_stacktrace(c_p, exc);
  4980. Eterm* hp = HAlloc(c_p, 3);
  4981. return TUPLE2(hp, Value, Where);
  4982. }
  4983. /*
  4984. * Forming the correct error value from the internal error code.
  4985. * This does not update c_p->fvalue or c_p->freason.
  4986. */
  4987. Eterm
  4988. expand_error_value(Process* c_p, Uint freason, Eterm Value) {
  4989. Eterm* hp;
  4990. Uint r;
  4991. r = GET_EXC_INDEX(freason);
  4992. ASSERT(r < NUMBER_EXIT_CODES); /* range check */
  4993. ASSERT(is_value(Value));
  4994. switch (r) {
  4995. case (GET_EXC_INDEX(EXC_PRIMARY)):
  4996. /* Primary exceptions use fvalue as it is */
  4997. break;
  4998. case (GET_EXC_INDEX(EXC_BADMATCH)):
  4999. case (GET_EXC_INDEX(EXC_CASE_CLAUSE)):
  5000. case (GET_EXC_INDEX(EXC_TRY_CLAUSE)):
  5001. case (GET_EXC_INDEX(EXC_BADFUN)):
  5002. case (GET_EXC_INDEX(EXC_BADARITY)):
  5003. case (GET_EXC_INDEX(EXC_BADMAP)):
  5004. case (GET_EXC_INDEX(EXC_BADKEY)):
  5005. /* Some common exceptions: value -> {atom, value} */
  5006. ASSERT(is_value(Value));
  5007. hp = HAlloc(c_p, 3);
  5008. Value = TUPLE2(hp, error_atom[r], Value);
  5009. break;
  5010. default:
  5011. /* Other exceptions just use an atom as descriptor */
  5012. Value = error_atom[r];
  5013. break;
  5014. }
  5015. #ifdef DEBUG
  5016. ASSERT(Value != am_internal_error);
  5017. #endif
  5018. return Value;
  5019. }
  5020. /*
  5021. * Quick-saving the stack trace in an internal form on the heap. Note
  5022. * that c_p->ftrace will point to a cons cell which holds the given args
  5023. * and the saved data (encoded as a bignum).
  5024. *
  5025. * There is an issue with line number information. Line number
  5026. * information is associated with the address *before* an operation
  5027. * that may fail or be stored stored on the stack. But continuation
  5028. * pointers point after its call instruction, not before. To avoid
  5029. * finding the wrong line number, we'll need to adjust them so that
  5030. * they point at the beginning of the call instruction or inside the
  5031. * call instruction. Since its impractical to point at the beginning,
  5032. * we'll do the simplest thing and decrement the continuation pointers
  5033. * by one.
  5034. *
  5035. * Here is an example of what can go wrong. Without the adjustment
  5036. * of continuation pointers, the call at line 42 below would seem to
  5037. * be at line 43:
  5038. *
  5039. * line 42
  5040. * call ...
  5041. * line 43
  5042. * gc_bif ...
  5043. *
  5044. * (It would be much better to put the arglist - when it exists - in the
  5045. * error value instead of in the actual trace; e.g. '{badarg, Args}'
  5046. * instead of using 'badarg' with Args in the trace. The arglist may
  5047. * contain very large values, and right now they will be kept alive as
  5048. * long as the stack trace is live. Preferably, the stack trace should
  5049. * always be small, so that it does not matter if it is long-lived.
  5050. * However, it is probably not possible to ever change the format of
  5051. * error terms.)
  5052. */
  5053. static void
  5054. save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
  5055. Eterm args) {
  5056. struct StackTrace* s;
  5057. int sz;
  5058. int depth = erts_backtrace_depth; /* max depth (never negative) */
  5059. if (depth > 0) {
  5060. /* There will always be a current function */
  5061. depth --;
  5062. }
  5063. /* Create a container for the exception data */
  5064. sz = (offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth
  5065. + sizeof(Eterm) - 1) / sizeof(Eterm);
  5066. s = (struct StackTrace *) HAlloc(c_p, 1 + sz);
  5067. /* The following fields are inside the bignum */
  5068. s->header = make_pos_bignum_header(sz);
  5069. s->freason = c_p->freason;
  5070. s->depth = 0;
  5071. /*
  5072. * If the failure was in a BIF other than 'error', 'exit' or
  5073. * 'throw', find the bif-table index and save the argument
  5074. * registers by consing up an arglist.
  5075. */
  5076. if (bf != NULL && bf != error_1 && bf != error_2 &&
  5077. bf != exit_1 && bf != throw_1) {
  5078. int i;
  5079. int a = 0;
  5080. for (i = 0; i < BIF_SIZE; i++) {
  5081. if (bf == bif_table[i].f || bf == bif_table[i].traced) {
  5082. Export *ep = bif_export[i];
  5083. s->current = ep->code;
  5084. a = bif_table[i].arity;
  5085. break;
  5086. }
  5087. }
  5088. if (i >= BIF_SIZE) {
  5089. /*
  5090. * The Bif does not really exist (no BIF entry). It is a
  5091. * TRAP and traps are called through apply_bif, which also
  5092. * sets c_p->current (luckily).
  5093. * OR it is a NIF called by call_nif where current is also set.
  5094. */
  5095. ASSERT(c_p->current);
  5096. s->current = c_p->current;
  5097. a = s->current[2];
  5098. }
  5099. /* Save first stack entry */
  5100. ASSERT(pc);
  5101. if (depth > 0) {
  5102. s->trace[s->depth++] = pc;
  5103. depth--;
  5104. }
  5105. /* Save second stack entry if CP is valid and different from pc */
  5106. if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
  5107. s->trace[s->depth++] = c_p->cp - 1;
  5108. depth--;
  5109. }
  5110. s->pc = NULL;
  5111. args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
  5112. } else {
  5113. s->current = c_p->current;
  5114. /*
  5115. * For a function_clause error, the arguments are in the beam
  5116. * registers, c_p->cp is valid, and c_p->current is set.
  5117. */
  5118. if ( (GET_EXC_INDEX(s->freason)) ==
  5119. (GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
  5120. int a;
  5121. ASSERT(s->current);
  5122. a = s->current[2];
  5123. args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
  5124. /* Save first stack entry */
  5125. ASSERT(c_p->cp);
  5126. if (depth > 0) {
  5127. s->trace[s->depth++] = c_p->cp - 1;
  5128. depth--;
  5129. }
  5130. s->pc = NULL; /* Ignore pc */
  5131. } else {
  5132. if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
  5133. s->trace[s->depth++] = c_p->cp - 1;
  5134. depth--;
  5135. }
  5136. s->pc = pc;
  5137. }
  5138. }
  5139. /* Package args and stack trace */
  5140. {
  5141. Eterm *hp;
  5142. hp = HAlloc(c_p, 2);
  5143. c_p->ftrace = CONS(hp, args, make_big((Eterm *) s));
  5144. }
  5145. /* Save the actual stack trace */
  5146. erts_save_stacktrace(c_p, s, depth);
  5147. }
  5148. void
  5149. erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
  5150. {
  5151. if (depth > 0) {
  5152. Eterm *ptr;
  5153. BeamInstr *prev = s->depth ? s->trace[s->depth-1] : NULL;
  5154. BeamInstr i_return_trace = beam_return_trace[0];
  5155. BeamInstr i_return_to_trace = beam_return_to_trace[0];
  5156. /*
  5157. * Traverse the stack backwards and add all unique continuation
  5158. * pointers to the buffer, up to the maximum stack trace size.
  5159. *
  5160. * Skip trace stack frames.
  5161. */
  5162. ptr = p->stop;
  5163. if (ptr < STACK_START(p) &&
  5164. (is_not_CP(*ptr)|| (*cp_val(*ptr) != i_return_trace &&
  5165. *cp_val(*ptr) != i_return_to_trace)) &&
  5166. p->cp) {
  5167. /* Cannot follow cp here - code may be unloaded */
  5168. BeamInstr *cpp = p->cp;
  5169. if (cpp == beam_exception_trace || cpp == beam_return_trace) {
  5170. /* Skip return_trace parameters */
  5171. ptr += 2;
  5172. } else if (cpp == beam_return_to_trace) {
  5173. /* Skip return_to_trace parameters */
  5174. ptr += 1;
  5175. }
  5176. }
  5177. while (ptr < STACK_START(p) && depth > 0) {
  5178. if (is_CP(*ptr)) {
  5179. if (*cp_val(*ptr) == i_return_trace) {
  5180. /* Skip stack frame variables */
  5181. do ++ptr; while (is_not_CP(*ptr));
  5182. /* Skip return_trace parameters */
  5183. ptr += 2;
  5184. } else if (*cp_val(*ptr) == i_return_to_trace) {
  5185. /* Skip stack frame variables */
  5186. do ++ptr; while (is_not_CP(*ptr));
  5187. } else {
  5188. BeamInstr *cp = cp_val(*ptr);
  5189. if (cp != prev) {
  5190. /* Record non-duplicates only */
  5191. prev = cp;
  5192. s->trace[s->depth++] = cp - 1;
  5193. depth--;
  5194. }
  5195. ptr++;
  5196. }
  5197. } else ptr++;
  5198. }
  5199. }
  5200. }
  5201. /*
  5202. * Getting the relevant fields from the term pointed to by ftrace
  5203. */
  5204. static struct StackTrace *get_trace_from_exc(Eterm exc) {
  5205. if (exc == NIL) {
  5206. return NULL;
  5207. } else {
  5208. ASSERT(is_list(exc));
  5209. return (struct StackTrace *) big_val(CDR(list_val(exc)));
  5210. }
  5211. }
  5212. static Eterm get_args_from_exc(Eterm exc) {
  5213. if (exc == NIL) {
  5214. return NIL;
  5215. } else {
  5216. ASSERT(is_list(exc));
  5217. return CAR(list_val(exc));
  5218. }
  5219. }
  5220. static int is_raised_exc(Eterm exc) {
  5221. if (exc == NIL) {
  5222. return 0;
  5223. } else {
  5224. ASSERT(is_list(exc));
  5225. return bignum_header_is_neg(*big_val(CDR(list_val(exc))));
  5226. }
  5227. }
  5228. /*
  5229. * Creating a list with the argument registers
  5230. */
  5231. static Eterm
  5232. make_arglist(Process* c_p, Eterm* reg, int a) {
  5233. Eterm args = NIL;
  5234. Eterm* hp = HAlloc(c_p, 2*a);
  5235. while (a > 0) {
  5236. args = CONS(hp, reg[a-1], args);
  5237. hp += 2;
  5238. a--;
  5239. }
  5240. return args;
  5241. }
  5242. /*
  5243. * Building a symbolic representation of a saved stack trace. Note that
  5244. * the exception object 'exc', unless NIL, points to a cons cell which
  5245. * holds the given args and the quick-saved data (encoded as a bignum).
  5246. *
  5247. * If the bignum is negative, the given args is a complete stacktrace.
  5248. */
  5249. Eterm
  5250. build_stacktrace(Process* c_p, Eterm exc) {
  5251. struct StackTrace* s;
  5252. Eterm args;
  5253. int depth;
  5254. FunctionInfo fi;
  5255. FunctionInfo* stk;
  5256. FunctionInfo* stkp;
  5257. Eterm res = NIL;
  5258. Uint heap_size;
  5259. Eterm* hp;
  5260. Eterm mfa;
  5261. int i;
  5262. if (! (s = get_trace_from_exc(exc))) {
  5263. return NIL;
  5264. }
  5265. #ifdef HIPE
  5266. if (s->freason & EXF_NATIVE) {
  5267. return hipe_build_stacktrace(c_p, s);
  5268. }
  5269. #endif
  5270. if (is_raised_exc(exc)) {
  5271. return get_args_from_exc(exc);
  5272. }
  5273. /*
  5274. * Find the current function. If the saved s->pc is null, then the
  5275. * saved s->current should already contain the proper value.
  5276. */
  5277. if (s->pc != NULL) {
  5278. erts_lookup_function_info(&fi, s->pc, 1);
  5279. } else if (GET_EXC_INDEX(s->freason) ==
  5280. GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
  5281. erts_lookup_function_info(&fi, s->current, 1);
  5282. } else {
  5283. erts_set_current_function(&fi, s->current);
  5284. }
  5285. /*
  5286. * If fi.current is still NULL, default to the initial function
  5287. * (e.g. spawn_link(erlang, abs, [1])).
  5288. */
  5289. if (fi.current == NULL) {
  5290. erts_set_current_function(&fi, c_p->u.initial);
  5291. args = am_true; /* Just in case */
  5292. } else {
  5293. args = get_args_from_exc(exc);
  5294. }
  5295. /*
  5296. * Look up all saved continuation pointers and calculate
  5297. * needed heap space.
  5298. */
  5299. depth = s->depth;
  5300. stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
  5301. depth*sizeof(FunctionInfo));
  5302. heap_size = fi.needed + 2;
  5303. for (i = 0; i < depth; i++) {
  5304. erts_lookup_function_info(stkp, s->trace[i], 1);
  5305. if (stkp->current) {
  5306. heap_size += stkp->needed + 2;
  5307. stkp++;
  5308. }
  5309. }
  5310. /*
  5311. * Allocate heap space and build the stacktrace.
  5312. */
  5313. hp = HAlloc(c_p, heap_size);
  5314. while (stkp > stk) {
  5315. stkp--;
  5316. hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
  5317. res = CONS(hp, mfa, res);
  5318. hp += 2;
  5319. }
  5320. hp = erts_build_mfa_item(&fi, hp, args, &mfa);
  5321. res = CONS(hp, mfa, res);
  5322. erts_free(ERTS_ALC_T_TMP, (void *) stk);
  5323. return res;
  5324. }
  5325. static BeamInstr*
  5326. call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
  5327. {
  5328. Eterm* hp;
  5329. Export* ep;
  5330. int arity;
  5331. Eterm args;
  5332. Uint sz;
  5333. int i;
  5334. /*
  5335. * Search for the error_handler module.
  5336. */
  5337. ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
  5338. erts_active_code_ix());
  5339. if (ep == NULL) { /* No error handler */
  5340. p->current = fi;
  5341. p->freason = EXC_UNDEF;
  5342. return 0;
  5343. }
  5344. /*
  5345. * Create a list with all arguments in the x registers.
  5346. */
  5347. arity = fi[2];
  5348. sz = 2 * arity;
  5349. if (HeapWordsLeft(p) < sz) {
  5350. erts_garbage_collect(p, sz, reg, arity);
  5351. }
  5352. hp = HEAP_TOP(p);
  5353. HEAP_TOP(p) += sz;
  5354. args = NIL;
  5355. for (i = arity-1; i >= 0; i--) {
  5356. args = CONS(hp, reg[i], args);
  5357. hp += 2;
  5358. }
  5359. /*
  5360. * Set up registers for call to error_handler:<func>/3.
  5361. */
  5362. reg[0] = fi[0];
  5363. reg[1] = fi[1];
  5364. reg[2] = args;
  5365. return ep->addressv[erts_active_code_ix()];
  5366. }
  5367. static Export*
  5368. apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg)
  5369. {
  5370. Export* ep;
  5371. /*
  5372. * Find the export table index for the error handler. Return NULL if
  5373. * there is no error handler module.
  5374. */
  5375. if ((ep = erts_active_export_entry(erts_proc_get_error_handler(p),
  5376. am_undefined_function, 3)) == NULL) {
  5377. return NULL;
  5378. } else {
  5379. int i;
  5380. Uint sz = 2*arity;
  5381. Eterm* hp;
  5382. Eterm args = NIL;
  5383. /*
  5384. * Always copy args from registers to a new list; this ensures
  5385. * that we have the same behaviour whether or not this was
  5386. * called from apply or fixed_apply (any additional last
  5387. * THIS-argument will be included, assuming that arity has been
  5388. * properly adjusted).
  5389. */
  5390. if (HeapWordsLeft(p) < sz) {
  5391. erts_garbage_collect(p, sz, reg, arity);
  5392. }
  5393. hp = HEAP_TOP(p);
  5394. HEAP_TOP(p) += sz;
  5395. for (i = arity-1; i >= 0; i--) {
  5396. args = CONS(hp, reg[i], args);
  5397. hp += 2;
  5398. }
  5399. reg[0] = module;
  5400. reg[1] = function;
  5401. reg[2] = args;
  5402. }
  5403. return ep;
  5404. }
  5405. static BeamInstr*
  5406. apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
  5407. {
  5408. int arity;
  5409. Export* ep;
  5410. Eterm tmp, this;
  5411. /*
  5412. * Check the arguments which should be of the form apply(Module,
  5413. * Function, Arguments) where Function is an atom and
  5414. * Arguments is an arity long list of terms.
  5415. */
  5416. if (is_not_atom(function)) {
  5417. /*
  5418. * No need to test args here -- done below.
  5419. */
  5420. error:
  5421. p->freason = BADARG;
  5422. error2:
  5423. reg[0] = module;
  5424. reg[1] = function;
  5425. reg[2] = args;
  5426. return 0;
  5427. }
  5428. /* The module argument may be either an atom or an abstract module
  5429. * (currently implemented using tuples, but this might change).
  5430. */
  5431. this = THE_NON_VALUE;
  5432. if (is_not_atom(module)) {
  5433. Eterm* tp;
  5434. if (is_not_tuple(module)) goto error;
  5435. tp = tuple_val(module);
  5436. if (arityval(tp[0]) < 1) goto error;
  5437. this = module;
  5438. module = tp[1];
  5439. if (is_not_atom(module)) goto error;
  5440. }
  5441. /*
  5442. * Walk down the 3rd parameter of apply (the argument list) and copy
  5443. * the parameters to the x registers (reg[]). If the module argument
  5444. * was an abstract module, add 1 to the function arity and put the
  5445. * module argument in the n+1st x register as a THIS reference.
  5446. */
  5447. tmp = args;
  5448. arity = 0;
  5449. while (is_list(tmp)) {
  5450. if (arity < (MAX_REG - 1)) {
  5451. reg[arity++] = CAR(list_val(tmp));
  5452. tmp = CDR(list_val(tmp));
  5453. } else {
  5454. p->freason = SYSTEM_LIMIT;
  5455. goto error2;
  5456. }
  5457. }
  5458. if (is_not_nil(tmp)) { /* Must be well-formed list */
  5459. goto error;
  5460. }
  5461. if (this != THE_NON_VALUE) {
  5462. reg[arity++] = this;
  5463. }
  5464. /*
  5465. * Get the index into the export table, or failing that the export
  5466. * entry for the error handler.
  5467. *
  5468. * Note: All BIFs have export entries; thus, no special case is needed.
  5469. */
  5470. if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
  5471. if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error;
  5472. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
  5473. save_calls(p, ep);
  5474. }
  5475. DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
  5476. return ep->addressv[erts_active_code_ix()];
  5477. }
  5478. static BeamInstr*
  5479. fixed_apply(Process* p, Eterm* reg, Uint arity)
  5480. {
  5481. Export* ep;
  5482. Eterm module;
  5483. Eterm function;
  5484. module = reg[arity]; /* The THIS pointer already in place */
  5485. function = reg[arity+1];
  5486. if (is_not_atom(function)) {
  5487. error:
  5488. p->freason = BADARG;
  5489. reg[0] = module;
  5490. reg[1] = function;
  5491. reg[2] = NIL;
  5492. return 0;
  5493. }
  5494. /* The module argument may be either an atom or an abstract module
  5495. * (currently implemented using tuples, but this might change).
  5496. */
  5497. if (is_not_atom(module)) {
  5498. Eterm* tp;
  5499. if (is_not_tuple(module)) goto error;
  5500. tp = tuple_val(module);
  5501. if (arityval(tp[0]) < 1) goto error;
  5502. module = tp[1];
  5503. if (is_not_atom(module)) goto error;
  5504. ++arity;
  5505. }
  5506. /*
  5507. * Get the index into the export table, or failing that the export
  5508. * entry for the error handler module.
  5509. *
  5510. * Note: All BIFs have export entries; thus, no special case is needed.
  5511. */
  5512. if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
  5513. if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL)
  5514. goto error;
  5515. } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
  5516. save_calls(p, ep);
  5517. }
  5518. DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
  5519. return ep->addressv[erts_active_code_ix()];
  5520. }
  5521. int
  5522. erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg)
  5523. {
  5524. int arity;
  5525. Eterm tmp;
  5526. #ifndef ERTS_SMP
  5527. if (ERTS_PROC_IS_EXITING(c_p)) {
  5528. /*
  5529. * I non smp case:
  5530. *
  5531. * Currently executing process might be sent an exit
  5532. * signal if it is traced by a port that it also is
  5533. * linked to, and the port terminates during the
  5534. * trace. In this case we do *not* want to clear
  5535. * the active flag, which will make the process hang
  5536. * in limbo forever. Get out of here and terminate
  5537. * the process...
  5538. */
  5539. return -1;
  5540. }
  5541. #endif
  5542. if (is_not_atom(module) || is_not_atom(function)) {
  5543. /*
  5544. * No need to test args here -- done below.
  5545. */
  5546. error:
  5547. c_p->freason = BADARG;
  5548. error2:
  5549. reg[0] = module;
  5550. reg[1] = function;
  5551. reg[2] = args;
  5552. return 0;
  5553. }
  5554. arity = 0;
  5555. tmp = args;
  5556. while (is_list(tmp)) {
  5557. if (arity < MAX_REG) {
  5558. tmp = CDR(list_val(tmp));
  5559. arity++;
  5560. } else {
  5561. c_p->freason = SYSTEM_LIMIT;
  5562. goto error2;
  5563. }
  5564. }
  5565. if (is_not_nil(tmp)) { /* Must be well-formed list */
  5566. goto error;
  5567. }
  5568. /*
  5569. * At this point, arguments are known to be good.
  5570. */
  5571. if (c_p->arg_reg != c_p->def_arg_reg) {
  5572. /* Save some memory */
  5573. erts_free(ERTS_ALC_T_ARG_REG, c_p->arg_reg);
  5574. c_p->arg_reg = c_p->def_arg_reg;
  5575. c_p->max_arg_reg = sizeof(c_p->def_arg_reg)/sizeof(c_p->def_arg_reg[0]);
  5576. }
  5577. #ifdef USE_VM_PROBES
  5578. if (DTRACE_ENABLED(process_hibernate)) {
  5579. DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
  5580. DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
  5581. dtrace_fun_decode(c_p, module, function, arity,
  5582. process_name, mfa);
  5583. DTRACE2(process_hibernate, process_name, mfa);
  5584. }
  5585. #endif
  5586. /*
  5587. * Arrange for the process to be resumed at the given MFA with
  5588. * the stack cleared.
  5589. */
  5590. c_p->arity = 3;
  5591. c_p->arg_reg[0] = module;
  5592. c_p->arg_reg[1] = function;
  5593. c_p->arg_reg[2] = args;
  5594. c_p->stop = STACK_START(c_p);
  5595. c_p->catches = 0;
  5596. c_p->i = beam_apply;
  5597. c_p->cp = (BeamInstr *) beam_apply+1;
  5598. /*
  5599. * If there are no waiting messages, garbage collect and
  5600. * shrink the heap.
  5601. */
  5602. erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  5603. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  5604. if (!c_p->msg.len) {
  5605. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  5606. c_p->fvalue = NIL;
  5607. PROCESS_MAIN_CHK_LOCKS(c_p);
  5608. erts_garbage_collect_hibernate(c_p);
  5609. ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
  5610. PROCESS_MAIN_CHK_LOCKS(c_p);
  5611. erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  5612. #ifndef ERTS_SMP
  5613. if (ERTS_PROC_IS_EXITING(c_p)) {
  5614. /*
  5615. * See comment in the begining of the function...
  5616. *
  5617. * This second test is needed since gc might be traced.
  5618. */
  5619. return -1;
  5620. }
  5621. #else /* ERTS_SMP */
  5622. ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
  5623. if (!c_p->msg.len)
  5624. #endif
  5625. erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
  5626. ASSERT(!ERTS_PROC_IS_EXITING(c_p));
  5627. }
  5628. erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
  5629. c_p->current = bif_export[BIF_hibernate_3]->code;
  5630. c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */
  5631. return 1;
  5632. }
  5633. static BeamInstr*
  5634. call_fun(Process* p, /* Current process. */
  5635. int arity, /* Number of arguments for Fun. */
  5636. Eterm* reg, /* Contents of registers. */
  5637. Eterm args) /* THE_NON_VALUE or pre-built list of arguments. */
  5638. {
  5639. Eterm fun = reg[arity];
  5640. Eterm hdr;
  5641. int i;
  5642. Eterm* hp;
  5643. if (!is_boxed(fun)) {
  5644. goto badfun;
  5645. }
  5646. hdr = *boxed_val(fun);
  5647. if (is_fun_header(hdr)) {
  5648. ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
  5649. ErlFunEntry* fe;
  5650. BeamInstr* code_ptr;
  5651. Eterm* var_ptr;
  5652. int actual_arity;
  5653. unsigned num_free;
  5654. fe = funp->fe;
  5655. num_free = funp->num_free;
  5656. code_ptr = fe->address;
  5657. actual_arity = (int) code_ptr[-1];
  5658. if (actual_arity == arity+num_free) {
  5659. DTRACE_LOCAL_CALL(p, (Eterm)code_ptr[-3],
  5660. (Eterm)code_ptr[-2],
  5661. code_ptr[-1]);
  5662. if (num_free == 0) {
  5663. return code_ptr;
  5664. } else {
  5665. var_ptr = funp->env;
  5666. reg += arity;
  5667. i = 0;
  5668. do {
  5669. reg[i] = var_ptr[i];
  5670. i++;
  5671. } while (i < num_free);
  5672. reg[i] = fun;
  5673. return code_ptr;
  5674. }
  5675. return code_ptr;
  5676. } else {
  5677. /*
  5678. * Something wrong here. First build a list of the arguments.
  5679. */
  5680. if (is_non_value(args)) {
  5681. Uint sz = 2 * arity;
  5682. args = NIL;
  5683. if (HeapWordsLeft(p) < sz) {
  5684. erts_garbage_collect(p, sz, reg, arity+1);
  5685. fun = reg[arity];
  5686. }
  5687. hp = HEAP_TOP(p);
  5688. HEAP_TOP(p) += sz;
  5689. for (i = arity-1; i >= 0; i--) {
  5690. args = CONS(hp, reg[i], args);
  5691. hp += 2;
  5692. }
  5693. }
  5694. if (actual_arity >= 0) {
  5695. /*
  5696. * There is a fun defined, but the call has the wrong arity.
  5697. */
  5698. hp = HAlloc(p, 3);
  5699. p->freason = EXC_BADARITY;
  5700. p->fvalue = TUPLE2(hp, fun, args);
  5701. return NULL;
  5702. } else {
  5703. Export* ep;
  5704. Module* modp;
  5705. Eterm module;
  5706. ErtsCodeIndex code_ix = erts_active_code_ix();
  5707. /*
  5708. * No arity. There is no module loaded that defines the fun,
  5709. * either because the fun is newly created from the external
  5710. * representation (the module has never been loaded),
  5711. * or the module defining the fun has been unloaded.
  5712. */
  5713. module = fe->module;
  5714. if ((modp = erts_get_module(module, code_ix)) != NULL
  5715. && modp->curr.code != NULL) {
  5716. /*
  5717. * There is a module loaded, but obviously the fun is not
  5718. * defined in it. We must not call the error_handler
  5719. * (or we will get into an infinite loop).
  5720. */
  5721. goto badfun;
  5722. }
  5723. /*
  5724. * No current code for this module. Call the error_handler module
  5725. * to attempt loading the module.
  5726. */
  5727. ep = erts_find_function(erts_proc_get_error_handler(p),
  5728. am_undefined_lambda, 3, code_ix);
  5729. if (ep == NULL) { /* No error handler */
  5730. p->current = NULL;
  5731. p->freason = EXC_UNDEF;
  5732. return NULL;
  5733. }
  5734. reg[0] = module;
  5735. reg[1] = fun;
  5736. reg[2] = args;
  5737. reg[3] = NIL;
  5738. return ep->addressv[erts_active_code_ix()];
  5739. }
  5740. }
  5741. } else if (is_export_header(hdr)) {
  5742. Export *ep;
  5743. int actual_arity;
  5744. ep = *((Export **) (export_val(fun) + 1));
  5745. actual_arity = (int) ep->code[2];
  5746. if (arity == actual_arity) {
  5747. DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]);
  5748. return ep->addressv[erts_active_code_ix()];
  5749. } else {
  5750. /*
  5751. * Wrong arity. First build a list of the arguments.
  5752. */
  5753. if (is_non_value(args)) {
  5754. args = NIL;
  5755. hp = HAlloc(p, arity*2);
  5756. for (i = arity-1; i >= 0; i--) {
  5757. args = CONS(hp, reg[i], args);
  5758. hp += 2;
  5759. }
  5760. }
  5761. hp = HAlloc(p, 3);
  5762. p->freason = EXC_BADARITY;
  5763. p->fvalue = TUPLE2(hp, fun, args);
  5764. return NULL;
  5765. }
  5766. } else {
  5767. badfun:
  5768. p->current = NULL;
  5769. p->freason = EXC_BADFUN;
  5770. p->fvalue = fun;
  5771. return NULL;
  5772. }
  5773. }
  5774. static BeamInstr*
  5775. apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
  5776. {
  5777. int arity;
  5778. Eterm tmp;
  5779. /*
  5780. * Walk down the 3rd parameter of apply (the argument list) and copy
  5781. * the parameters to the x registers (reg[]).
  5782. */
  5783. tmp = args;
  5784. arity = 0;
  5785. while (is_list(tmp)) {
  5786. if (arity < MAX_REG-1) {
  5787. reg[arity++] = CAR(list_val(tmp));
  5788. tmp = CDR(list_val(tmp));
  5789. } else {
  5790. p->freason = SYSTEM_LIMIT;
  5791. return NULL;
  5792. }
  5793. }
  5794. if (is_not_nil(tmp)) { /* Must be well-formed list */
  5795. p->freason = EXC_UNDEF;
  5796. return NULL;
  5797. }
  5798. reg[arity] = fun;
  5799. return call_fun(p, arity, reg, args);
  5800. }
  5801. static Eterm
  5802. new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
  5803. {
  5804. unsigned needed = ERL_FUN_SIZE + num_free;
  5805. ErlFunThing* funp;
  5806. Eterm* hp;
  5807. int i;
  5808. if (HEAP_LIMIT(p) - HEAP_TOP(p) <= needed) {
  5809. PROCESS_MAIN_CHK_LOCKS(p);
  5810. erts_garbage_collect(p, needed, reg, num_free);
  5811. ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
  5812. PROCESS_MAIN_CHK_LOCKS(p);
  5813. }
  5814. hp = p->htop;
  5815. p->htop = hp + needed;
  5816. funp = (ErlFunThing *) hp;
  5817. hp = funp->env;
  5818. erts_refc_inc(&fe->refc, 2);
  5819. funp->thing_word = HEADER_FUN;
  5820. funp->next = MSO(p).first;
  5821. MSO(p).first = (struct erl_off_heap_header*) funp;
  5822. funp->fe = fe;
  5823. funp->num_free = num_free;
  5824. funp->creator = p->common.id;
  5825. #ifdef HIPE
  5826. funp->native_address = fe->native_address;
  5827. #endif
  5828. funp->arity = (int)fe->address[-1] - num_free;
  5829. for (i = 0; i < num_free; i++) {
  5830. *hp++ = reg[i];
  5831. }
  5832. return make_fun(funp);
  5833. }
  5834. static Eterm get_map_element(Eterm map, Eterm key)
  5835. {
  5836. Uint32 hx;
  5837. const Eterm *vs;
  5838. if (is_flatmap(map)) {
  5839. flatmap_t *mp;
  5840. Eterm *ks;
  5841. Uint i;
  5842. Uint n;
  5843. mp = (flatmap_t *)flatmap_val(map);
  5844. ks = flatmap_get_keys(mp);
  5845. vs = flatmap_get_values(mp);
  5846. n = flatmap_get_size(mp);
  5847. if (is_immed(key)) {
  5848. for (i = 0; i < n; i++) {
  5849. if (ks[i] == key) {
  5850. return vs[i];
  5851. }
  5852. }
  5853. } else {
  5854. for (i = 0; i < n; i++) {
  5855. if (EQ(ks[i], key)) {
  5856. return vs[i];
  5857. }
  5858. }
  5859. }
  5860. return THE_NON_VALUE;
  5861. }
  5862. ASSERT(is_hashmap(map));
  5863. hx = hashmap_make_hash(key);
  5864. vs = erts_hashmap_get(hx,key,map);
  5865. return vs ? *vs : THE_NON_VALUE;
  5866. }
  5867. static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx)
  5868. {
  5869. const Eterm *vs;
  5870. if (is_flatmap(map)) {
  5871. flatmap_t *mp;
  5872. Eterm *ks;
  5873. Uint i;
  5874. Uint n;
  5875. mp = (flatmap_t *)flatmap_val(map);
  5876. ks = flatmap_get_keys(mp);
  5877. vs = flatmap_get_values(mp);
  5878. n = flatmap_get_size(mp);
  5879. if (is_immed(key)) {
  5880. for (i = 0; i < n; i++) {
  5881. if (ks[i] == key) {
  5882. return vs[i];
  5883. }
  5884. }
  5885. } else {
  5886. for (i = 0; i < n; i++) {
  5887. if (EQ(ks[i], key)) {
  5888. return vs[i];
  5889. }
  5890. }
  5891. }
  5892. return THE_NON_VALUE;
  5893. }
  5894. ASSERT(is_hashmap(map));
  5895. ASSERT(hx == hashmap_make_hash(key));
  5896. vs = erts_hashmap_get(hx, key, map);
  5897. return vs ? *vs : THE_NON_VALUE;
  5898. }
  5899. #define GET_TERM(term, dest) \
  5900. do { \
  5901. Eterm src = (Eterm)(term); \
  5902. switch (src & _TAG_IMMED1_MASK) { \
  5903. case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
  5904. dest = x(0); \
  5905. break; \
  5906. case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
  5907. dest = x(src >> _TAG_IMMED1_SIZE); \
  5908. break; \
  5909. case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \
  5910. dest = y(src >> _TAG_IMMED1_SIZE); \
  5911. break; \
  5912. default: \
  5913. dest = src; \
  5914. break; \
  5915. } \
  5916. } while(0)
  5917. static Eterm
  5918. new_map(Process* p, Eterm* reg, BeamInstr* I)
  5919. {
  5920. Uint n = Arg(3);
  5921. Uint i;
  5922. Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
  5923. Eterm keys;
  5924. Eterm *mhp,*thp;
  5925. Eterm *E;
  5926. BeamInstr *ptr;
  5927. flatmap_t *mp;
  5928. ErtsHeapFactory factory;
  5929. ptr = &Arg(4);
  5930. if (n > 2*MAP_SMALL_MAP_LIMIT) {
  5931. Eterm res;
  5932. if (HeapWordsLeft(p) < n) {
  5933. erts_garbage_collect(p, n, reg, Arg(2));
  5934. }
  5935. mhp = p->htop;
  5936. thp = p->htop;
  5937. E = p->stop;
  5938. for (i = 0; i < n/2; i++) {
  5939. GET_TERM(*ptr++, *mhp++);
  5940. GET_TERM(*ptr++, *mhp++);
  5941. }
  5942. p->htop = mhp;
  5943. erts_factory_proc_init(&factory, p);
  5944. res = erts_hashmap_from_array(&factory, thp, n/2, 0);
  5945. erts_factory_close(&factory);
  5946. if (p->mbuf) {
  5947. Uint live = Arg(2);
  5948. reg[live] = res;
  5949. erts_garbage_collect(p, 0, reg, live+1);
  5950. res = reg[live];
  5951. E = p->stop;
  5952. }
  5953. return res;
  5954. }
  5955. if (HeapWordsLeft(p) < need) {
  5956. erts_garbage_collect(p, need, reg, Arg(2));
  5957. }
  5958. thp = p->htop;
  5959. mhp = thp + 1 + n/2;
  5960. E = p->stop;
  5961. keys = make_tuple(thp);
  5962. *thp++ = make_arityval(n/2);
  5963. mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ;
  5964. mp->thing_word = MAP_HEADER_FLATMAP;
  5965. mp->size = n/2;
  5966. mp->keys = keys;
  5967. for (i = 0; i < n/2; i++) {
  5968. GET_TERM(*ptr++, *thp++);
  5969. GET_TERM(*ptr++, *mhp++);
  5970. }
  5971. p->htop = mhp;
  5972. return make_flatmap(mp);
  5973. }
  5974. static Eterm
  5975. update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
  5976. {
  5977. Uint n;
  5978. Uint num_old;
  5979. Uint num_updates;
  5980. Uint need;
  5981. flatmap_t *old_mp, *mp;
  5982. Eterm res;
  5983. Eterm* hp;
  5984. Eterm* E;
  5985. Eterm* old_keys;
  5986. Eterm* old_vals;
  5987. BeamInstr* new_p;
  5988. Eterm new_key;
  5989. Eterm* kp;
  5990. new_p = &Arg(5);
  5991. num_updates = Arg(4) / 2;
  5992. if (is_not_flatmap(map)) {
  5993. Uint32 hx;
  5994. Eterm val;
  5995. /* apparently the compiler does not emit is_map instructions,
  5996. * bad compiler */
  5997. if (is_not_hashmap(map))
  5998. return THE_NON_VALUE;
  5999. res = map;
  6000. E = p->stop;
  6001. while(num_updates--) {
  6002. /* assoc can't fail */
  6003. GET_TERM(new_p[0], new_key);
  6004. GET_TERM(new_p[1], val);
  6005. hx = hashmap_make_hash(new_key);
  6006. res = erts_hashmap_insert(p, hx, new_key, val, res, 0);
  6007. if (p->mbuf) {
  6008. Uint live = Arg(3);
  6009. reg[live] = res;
  6010. erts_garbage_collect(p, 0, reg, live+1);
  6011. res = reg[live];
  6012. E = p->stop;
  6013. }
  6014. new_p += 2;
  6015. }
  6016. return res;
  6017. }
  6018. old_mp = (flatmap_t *) flatmap_val(map);
  6019. num_old = flatmap_get_size(old_mp);
  6020. /*
  6021. * If the old map is empty, create a new map.
  6022. */
  6023. if (num_old == 0) {
  6024. return new_map(p, reg, I+1);
  6025. }
  6026. /*
  6027. * Allocate heap space for the worst case (i.e. all keys in the
  6028. * update list are new).
  6029. */
  6030. need = 2*(num_old+num_updates) + 1 + MAP_HEADER_FLATMAP_SZ;
  6031. if (HeapWordsLeft(p) < need) {
  6032. Uint live = Arg(3);
  6033. reg[live] = map;
  6034. erts_garbage_collect(p, need, reg, live+1);
  6035. map = reg[live];
  6036. old_mp = (flatmap_t *)flatmap_val(map);
  6037. }
  6038. /*
  6039. * Build the skeleton for the map, ready to be filled in.
  6040. *
  6041. * +-----------------------------------+
  6042. * | (Space for aritvyal for keys) | <-----------+
  6043. * +-----------------------------------+ |
  6044. * | (Space for key 1) | | <-- kp
  6045. * +-----------------------------------+ |
  6046. * . |
  6047. * . |
  6048. * . |
  6049. * +-----------------------------------+ |
  6050. * | (Space for last key) | |
  6051. * +-----------------------------------+ |
  6052. * | MAP_HEADER | |
  6053. * +-----------------------------------+ |
  6054. * | (Space for number of keys/values) | |
  6055. * +-----------------------------------+ |
  6056. * | Boxed tuple pointer >----------------+
  6057. * +-----------------------------------+
  6058. * | (Space for value 1) | <-- hp
  6059. * +-----------------------------------+
  6060. */
  6061. E = p->stop;
  6062. kp = p->htop + 1; /* Point to first key */
  6063. hp = kp + num_old + num_updates;
  6064. res = make_flatmap(hp);
  6065. mp = (flatmap_t *)hp;
  6066. hp += MAP_HEADER_FLATMAP_SZ;
  6067. mp->thing_word = MAP_HEADER_FLATMAP;
  6068. mp->keys = make_tuple(kp-1);
  6069. old_vals = flatmap_get_values(old_mp);
  6070. old_keys = flatmap_get_keys(old_mp);
  6071. GET_TERM(*new_p, new_key);
  6072. n = num_updates;
  6073. /*
  6074. * Fill in keys and values, until we run out of either updates
  6075. * or old values and keys.
  6076. */
  6077. for (;;) {
  6078. Eterm key;
  6079. Sint c;
  6080. ASSERT(kp < (Eterm *)mp);
  6081. key = *old_keys;
  6082. if ((c = CMP_TERM(key, new_key)) < 0) {
  6083. /* Copy old key and value */
  6084. *kp++ = key;
  6085. *hp++ = *old_vals;
  6086. old_keys++, old_vals++, num_old--;
  6087. } else { /* Replace or insert new */
  6088. GET_TERM(new_p[1], *hp++);
  6089. if (c > 0) { /* If new new key */
  6090. *kp++ = new_key;
  6091. } else { /* If replacement */
  6092. *kp++ = key;
  6093. old_keys++, old_vals++, num_old--;
  6094. }
  6095. n--;
  6096. if (n == 0) {
  6097. break;
  6098. } else {
  6099. new_p += 2;
  6100. GET_TERM(*new_p, new_key);
  6101. }
  6102. }
  6103. if (num_old == 0) {
  6104. break;
  6105. }
  6106. }
  6107. /*
  6108. * At this point, we have run out of either old keys and values,
  6109. * or the update list. In other words, at least of one n and
  6110. * num_old must be zero.
  6111. */
  6112. if (n > 0) {
  6113. /*
  6114. * All old keys and values have been copied, but there
  6115. * are still new keys and values in the update list that
  6116. * must be copied.
  6117. */
  6118. ASSERT(num_old == 0);
  6119. while (n-- > 0) {
  6120. GET_TERM(new_p[0], *kp++);
  6121. GET_TERM(new_p[1], *hp++);
  6122. new_p += 2;
  6123. }
  6124. } else {
  6125. /*
  6126. * All updates are now done. We may still have old
  6127. * keys and values that we must copy.
  6128. */
  6129. ASSERT(n == 0);
  6130. while (num_old-- > 0) {
  6131. ASSERT(kp < (Eterm *)mp);
  6132. *kp++ = *old_keys++;
  6133. *hp++ = *old_vals++;
  6134. }
  6135. }
  6136. /*
  6137. * Calculate how many values that are unused at the end of the
  6138. * key tuple and fill it out with a bignum header.
  6139. */
  6140. if ((n = (Eterm *)mp - kp) > 0) {
  6141. *kp = make_pos_bignum_header(n-1);
  6142. }
  6143. /*
  6144. * Fill in the size of the map in both the key tuple and in the map.
  6145. */
  6146. n = kp - p->htop - 1; /* Actual number of keys/values */
  6147. *p->htop = make_arityval(n);
  6148. p->htop = hp;
  6149. mp->size = n;
  6150. /* The expensive case, need to build a hashmap */
  6151. if (n > MAP_SMALL_MAP_LIMIT) {
  6152. res = erts_hashmap_from_ks_and_vs(p,flatmap_get_keys(mp),flatmap_get_values(mp),n);
  6153. if (p->mbuf) {
  6154. Uint live = Arg(3);
  6155. reg[live] = res;
  6156. erts_garbage_collect(p, 0, reg, live+1);
  6157. res = reg[live];
  6158. }
  6159. }
  6160. return res;
  6161. }
  6162. /*
  6163. * Update values for keys that already exist in the map.
  6164. */
  6165. static Eterm
  6166. update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
  6167. {
  6168. Uint n;
  6169. Uint i;
  6170. Uint num_old;
  6171. Uint need;
  6172. flatmap_t *old_mp, *mp;
  6173. Eterm res;
  6174. Eterm* hp;
  6175. Eterm* E;
  6176. Eterm* old_keys;
  6177. Eterm* old_vals;
  6178. BeamInstr* new_p;
  6179. Eterm new_key;
  6180. new_p = &Arg(5);
  6181. n = Arg(4) / 2; /* Number of values to be updated */
  6182. ASSERT(n > 0);
  6183. if (is_not_flatmap(map)) {
  6184. Uint32 hx;
  6185. Eterm val;
  6186. /* apparently the compiler does not emit is_map instructions,
  6187. * bad compiler */
  6188. if (is_not_hashmap(map)) {
  6189. p->freason = BADMAP;
  6190. p->fvalue = map;
  6191. return THE_NON_VALUE;
  6192. }
  6193. res = map;
  6194. E = p->stop;
  6195. while(n--) {
  6196. GET_TERM(new_p[0], new_key);
  6197. GET_TERM(new_p[1], val);
  6198. hx = hashmap_make_hash(new_key);
  6199. res = erts_hashmap_insert(p, hx, new_key, val, res, 1);
  6200. if (is_non_value(res)) {
  6201. p->fvalue = new_key;
  6202. p->freason = BADKEY;
  6203. return res;
  6204. }
  6205. if (p->mbuf) {
  6206. Uint live = Arg(3);
  6207. reg[live] = res;
  6208. erts_garbage_collect(p, 0, reg, live+1);
  6209. res = reg[live];
  6210. E = p->stop;
  6211. }
  6212. new_p += 2;
  6213. }
  6214. return res;
  6215. }
  6216. old_mp = (flatmap_t *) flatmap_val(map);
  6217. num_old = flatmap_get_size(old_mp);
  6218. /*
  6219. * If the old map is empty, fail.
  6220. */
  6221. if (num_old == 0) {
  6222. E = p->stop;
  6223. p->freason = BADKEY;
  6224. GET_TERM(new_p[0], p->fvalue);
  6225. return THE_NON_VALUE;
  6226. }
  6227. /*
  6228. * Allocate the exact heap space needed.
  6229. */
  6230. need = num_old + MAP_HEADER_FLATMAP_SZ;
  6231. if (HeapWordsLeft(p) < need) {
  6232. Uint live = Arg(3);
  6233. reg[live] = map;
  6234. erts_garbage_collect(p, need, reg, live+1);
  6235. map = reg[live];
  6236. old_mp = (flatmap_t *)flatmap_val(map);
  6237. }
  6238. /*
  6239. * Update map, keeping the old key tuple.
  6240. */
  6241. hp = p->htop;
  6242. E = p->stop;
  6243. old_vals = flatmap_get_values(old_mp);
  6244. old_keys = flatmap_get_keys(old_mp);
  6245. res = make_flatmap(hp);
  6246. mp = (flatmap_t *)hp;
  6247. hp += MAP_HEADER_FLATMAP_SZ;
  6248. mp->thing_word = MAP_HEADER_FLATMAP;
  6249. mp->size = num_old;
  6250. mp->keys = old_mp->keys;
  6251. /* Get array of key/value pairs to be updated */
  6252. GET_TERM(*new_p, new_key);
  6253. /* Update all values */
  6254. for (i = 0; i < num_old; i++) {
  6255. if (!EQ(*old_keys, new_key)) {
  6256. /* Not same keys */
  6257. *hp++ = *old_vals;
  6258. } else {
  6259. GET_TERM(new_p[1], *hp);
  6260. hp++;
  6261. n--;
  6262. if (n == 0) {
  6263. /*
  6264. * All updates done. Copy remaining values
  6265. * and return the result.
  6266. */
  6267. for (i++, old_vals++; i < num_old; i++) {
  6268. *hp++ = *old_vals++;
  6269. }
  6270. ASSERT(hp == p->htop + need);
  6271. p->htop = hp;
  6272. return res;
  6273. } else {
  6274. new_p += 2;
  6275. GET_TERM(*new_p, new_key);
  6276. }
  6277. }
  6278. old_vals++, old_keys++;
  6279. }
  6280. /*
  6281. * Updates left. That means that at least one the keys in the
  6282. * update list did not previously exist.
  6283. */
  6284. ASSERT(hp == p->htop + need);
  6285. p->freason = BADKEY;
  6286. p->fvalue = new_key;
  6287. return THE_NON_VALUE;
  6288. }
  6289. #undef GET_TERM
  6290. int catchlevel(Process *p)
  6291. {
  6292. return p->catches;
  6293. }
  6294. /*
  6295. * Check if the given function is built-in (i.e. a BIF implemented in C).
  6296. *
  6297. * Returns 0 if not built-in, and a non-zero value if built-in.
  6298. */
  6299. int
  6300. erts_is_builtin(Eterm Mod, Eterm Name, int arity)
  6301. {
  6302. Export e;
  6303. Export* ep;
  6304. e.code[0] = Mod;
  6305. e.code[1] = Name;
  6306. e.code[2] = arity;
  6307. if ((ep = export_get(&e)) == NULL) {
  6308. return 0;
  6309. }
  6310. return ep->addressv[erts_active_code_ix()] == ep->code+3
  6311. && (ep->code[3] == (BeamInstr) em_apply_bif);
  6312. }
  6313. /*
  6314. * Return the current number of reductions for the given process.
  6315. * To get the total number of reductions, p->reds must be added.
  6316. */
  6317. Uint
  6318. erts_current_reductions(Process *current, Process *p)
  6319. {
  6320. if (current != p) {
  6321. return 0;
  6322. } else if (current->fcalls < 0 && ERTS_PROC_GET_SAVED_CALLS_BUF(current)) {
  6323. return -current->fcalls;
  6324. } else {
  6325. return REDS_IN(current) - current->fcalls;
  6326. }
  6327. }
  6328. int
  6329. erts_beam_jump_table(void)
  6330. {
  6331. #if defined(NO_JUMP_TABLE)
  6332. return 0;
  6333. #else
  6334. return 1;
  6335. #endif
  6336. }