PageRenderTime 63ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/erts/emulator/beam/erl_alloc.c

https://github.com/misterXavier/otp
C | 3192 lines | 2749 code | 361 blank | 82 comment | 479 complexity | 1e3753b3e78a88fc8646c9d35b6d1dfd MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception, BSD-2-Clause
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2002-2010. All Rights Reserved.
  5. *
  6. * The contents of this file are subject to the Erlang Public License,
  7. * Version 1.1, (the "License"); you may not use this file except in
  8. * compliance with the License. You should have received a copy of the
  9. * Erlang Public License along with this software. If not, it can be
  10. * retrieved online at http://www.erlang.org/.
  11. *
  12. * Software distributed under the License is distributed on an "AS IS"
  13. * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
  14. * the License for the specific language governing rights and limitations
  15. * under the License.
  16. *
  17. * %CopyrightEnd%
  18. */
  19. /*
  20. * Description: Management of memory allocators.
  21. *
  22. * Author: Rickard Green
  23. */
  24. #ifdef HAVE_CONFIG_H
  25. # include "config.h"
  26. #endif
  27. #define ERTS_ALLOC_C__
  28. #define ERTS_ALC_INTERNAL__
  29. #include "sys.h"
  30. #define ERL_THREADS_EMU_INTERNAL__
  31. #include "erl_threads.h"
  32. #include "global.h"
  33. #include "erl_db.h"
  34. #include "erl_binary.h"
  35. #include "erl_bits.h"
  36. #include "erl_instrument.h"
  37. #include "erl_mseg.h"
  38. #ifdef ELIB_ALLOC_IS_CLIB
  39. #include "erl_version.h"
  40. #endif
  41. #include "erl_monitors.h"
  42. #include "erl_bif_timer.h"
  43. #if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
  44. #include "erl_check_io.h"
  45. #endif
  46. #define GET_ERL_GF_ALLOC_IMPL
  47. #include "erl_goodfit_alloc.h"
  48. #define GET_ERL_BF_ALLOC_IMPL
  49. #include "erl_bestfit_alloc.h"
  50. #define GET_ERL_AF_ALLOC_IMPL
  51. #include "erl_afit_alloc.h"
  52. #define ERTS_ALC_DEFAULT_MAX_THR_PREF 16
  53. #if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND)
  54. #define AU_ALLOC_DEFAULT_ENABLE(X) 0
  55. #else
  56. #define AU_ALLOC_DEFAULT_ENABLE(X) (X)
  57. #endif
  58. #ifdef DEBUG
  59. static Uint install_debug_functions(void);
  60. #endif
  61. extern void elib_ensure_initialized(void);
  62. ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
  63. ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
  64. ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
  65. #define ERTS_MIN(A, B) ((A) < (B) ? (A) : (B))
  66. #define ERTS_MAX(A, B) ((A) > (B) ? (A) : (B))
  67. typedef union {
  68. GFAllctr_t gfa;
  69. char align_gfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(GFAllctr_t))];
  70. BFAllctr_t bfa;
  71. char align_bfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(BFAllctr_t))];
  72. AFAllctr_t afa;
  73. char align_afa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AFAllctr_t))];
  74. } ErtsAllocatorState_t;
  75. static ErtsAllocatorState_t sl_alloc_state;
  76. static ErtsAllocatorState_t std_alloc_state;
  77. static ErtsAllocatorState_t ll_alloc_state;
  78. static ErtsAllocatorState_t temp_alloc_state;
  79. static ErtsAllocatorState_t eheap_alloc_state;
  80. static ErtsAllocatorState_t binary_alloc_state;
  81. static ErtsAllocatorState_t ets_alloc_state;
  82. static ErtsAllocatorState_t driver_alloc_state;
  83. ErtsAlcType_t erts_fix_core_allocator_ix;
  84. #ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
  85. static void *(*fix_core_allocator)(ErtsAlcType_t, void *, Uint);
  86. static void *fix_core_extra;
  87. static void *fix_core_alloc(Uint size)
  88. {
  89. void *res;
  90. res = (*fix_core_allocator)(ERTS_ALC_T_UNDEF, fix_core_extra, size);
  91. if (erts_mtrace_enabled)
  92. erts_mtrace_crr_alloc(res,
  93. ERTS_ALC_A_FIXED_SIZE,
  94. erts_fix_core_allocator_ix,
  95. size);
  96. return res;
  97. }
  98. #endif
  99. enum allctr_type {
  100. GOODFIT,
  101. BESTFIT,
  102. AFIT
  103. };
  104. struct au_init {
  105. int enable;
  106. int thr_spec;
  107. enum allctr_type atype;
  108. struct {
  109. AllctrInit_t util;
  110. GFAllctrInit_t gf;
  111. BFAllctrInit_t bf;
  112. AFAllctrInit_t af;
  113. } init;
  114. struct {
  115. int mmbcs;
  116. int lmbcs;
  117. int smbcs;
  118. int mmmbc;
  119. } default_;
  120. };
  121. #define DEFAULT_ALLCTR_INIT { \
  122. ERTS_DEFAULT_ALLCTR_INIT, \
  123. ERTS_DEFAULT_GF_ALLCTR_INIT, \
  124. ERTS_DEFAULT_BF_ALLCTR_INIT, \
  125. ERTS_DEFAULT_AF_ALLCTR_INIT \
  126. }
  127. typedef struct {
  128. int erts_alloc_config;
  129. #if HAVE_ERTS_MSEG
  130. ErtsMsegInit_t mseg;
  131. #endif
  132. int trim_threshold;
  133. int top_pad;
  134. AlcUInit_t alloc_util;
  135. struct {
  136. int stat;
  137. int map;
  138. char *mtrace;
  139. char *nodename;
  140. } instr;
  141. struct au_init sl_alloc;
  142. struct au_init std_alloc;
  143. struct au_init ll_alloc;
  144. struct au_init temp_alloc;
  145. struct au_init eheap_alloc;
  146. struct au_init binary_alloc;
  147. struct au_init ets_alloc;
  148. struct au_init driver_alloc;
  149. } erts_alc_hndl_args_init_t;
  150. #define ERTS_AU_INIT__ {0, 0, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}}
  151. #define SET_DEFAULT_ALLOC_OPTS(IP) \
  152. do { \
  153. struct au_init aui__ = ERTS_AU_INIT__; \
  154. sys_memcpy((void *) (IP), (void *) &aui__, sizeof(struct au_init)); \
  155. } while (0)
  156. static void
  157. set_default_sl_alloc_opts(struct au_init *ip)
  158. {
  159. SET_DEFAULT_ALLOC_OPTS(ip);
  160. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  161. ip->thr_spec = 1;
  162. ip->atype = GOODFIT;
  163. ip->init.util.name_prefix = "sl_";
  164. ip->init.util.mmmbc = 5;
  165. ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
  166. #ifndef SMALL_MEMORY
  167. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  168. #else
  169. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  170. #endif
  171. ip->init.util.ts = ERTS_ALC_MTA_SHORT_LIVED;
  172. ip->init.util.rsbcst = 80;
  173. }
  174. static void
  175. set_default_std_alloc_opts(struct au_init *ip)
  176. {
  177. SET_DEFAULT_ALLOC_OPTS(ip);
  178. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  179. ip->thr_spec = 1;
  180. ip->atype = BESTFIT;
  181. ip->init.util.name_prefix = "std_";
  182. ip->init.util.mmmbc = 5;
  183. ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
  184. #ifndef SMALL_MEMORY
  185. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  186. #else
  187. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  188. #endif
  189. ip->init.util.ts = ERTS_ALC_MTA_STANDARD;
  190. }
  191. static void
  192. set_default_ll_alloc_opts(struct au_init *ip)
  193. {
  194. SET_DEFAULT_ALLOC_OPTS(ip);
  195. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  196. ip->thr_spec = 0;
  197. ip->atype = BESTFIT;
  198. ip->init.bf.ao = 1;
  199. ip->init.util.ramv = 0;
  200. ip->init.util.mmsbc = 0;
  201. ip->init.util.mmmbc = 0;
  202. ip->init.util.sbct = ~((UWord) 0);
  203. ip->init.util.name_prefix = "ll_";
  204. ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
  205. #ifndef SMALL_MEMORY
  206. ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
  207. #else
  208. ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
  209. #endif
  210. ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
  211. ip->init.util.asbcst = 0;
  212. ip->init.util.rsbcst = 0;
  213. ip->init.util.rsbcmt = 0;
  214. ip->init.util.rmbcmt = 0;
  215. }
  216. static void
  217. set_default_temp_alloc_opts(struct au_init *ip)
  218. {
  219. SET_DEFAULT_ALLOC_OPTS(ip);
  220. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  221. ip->thr_spec = 1;
  222. ip->atype = AFIT;
  223. ip->init.util.name_prefix = "temp_";
  224. ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY;
  225. #ifndef SMALL_MEMORY
  226. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  227. #else
  228. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  229. #endif
  230. ip->init.util.ts = ERTS_ALC_MTA_TEMPORARY;
  231. ip->init.util.rsbcst = 90;
  232. ip->init.util.rmbcmt = 100;
  233. }
  234. static void
  235. set_default_eheap_alloc_opts(struct au_init *ip)
  236. {
  237. SET_DEFAULT_ALLOC_OPTS(ip);
  238. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  239. ip->thr_spec = 1;
  240. ip->atype = GOODFIT;
  241. ip->init.util.mmmbc = 100;
  242. ip->init.util.name_prefix = "eheap_";
  243. ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
  244. #ifndef SMALL_MEMORY
  245. ip->init.util.mmbcs = 512*1024; /* Main carrier size */
  246. #else
  247. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  248. #endif
  249. ip->init.util.ts = ERTS_ALC_MTA_EHEAP;
  250. ip->init.util.rsbcst = 50;
  251. }
  252. static void
  253. set_default_binary_alloc_opts(struct au_init *ip)
  254. {
  255. SET_DEFAULT_ALLOC_OPTS(ip);
  256. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  257. ip->thr_spec = 1;
  258. ip->atype = BESTFIT;
  259. ip->init.util.mmmbc = 50;
  260. ip->init.util.name_prefix = "binary_";
  261. ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
  262. #ifndef SMALL_MEMORY
  263. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  264. #else
  265. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  266. #endif
  267. ip->init.util.ts = ERTS_ALC_MTA_BINARY;
  268. }
  269. static void
  270. set_default_ets_alloc_opts(struct au_init *ip)
  271. {
  272. SET_DEFAULT_ALLOC_OPTS(ip);
  273. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  274. ip->thr_spec = 1;
  275. ip->atype = BESTFIT;
  276. ip->init.util.mmmbc = 100;
  277. ip->init.util.name_prefix = "ets_";
  278. ip->init.util.alloc_no = ERTS_ALC_A_ETS;
  279. #ifndef SMALL_MEMORY
  280. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  281. #else
  282. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  283. #endif
  284. ip->init.util.ts = ERTS_ALC_MTA_ETS;
  285. }
  286. static void
  287. set_default_driver_alloc_opts(struct au_init *ip)
  288. {
  289. SET_DEFAULT_ALLOC_OPTS(ip);
  290. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  291. ip->thr_spec = 1;
  292. ip->atype = BESTFIT;
  293. ip->init.util.name_prefix = "driver_";
  294. ip->init.util.alloc_no = ERTS_ALC_A_DRIVER;
  295. #ifndef SMALL_MEMORY
  296. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  297. #else
  298. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  299. #endif
  300. ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
  301. }
  302. #ifdef ERTS_SMP
  303. static void
  304. adjust_tpref(struct au_init *ip, int no_sched)
  305. {
  306. if (ip->thr_spec) {
  307. Uint allocs;
  308. if (ip->thr_spec < 0) {/* User specified amount */
  309. allocs = abs(ip->thr_spec);
  310. if (allocs > no_sched)
  311. allocs = no_sched;
  312. }
  313. else if (no_sched > ERTS_ALC_DEFAULT_MAX_THR_PREF)
  314. allocs = ERTS_ALC_DEFAULT_MAX_THR_PREF;
  315. else
  316. allocs = no_sched;
  317. if (allocs <= 1)
  318. ip->thr_spec = 0;
  319. else {
  320. ip->thr_spec = (int) allocs;
  321. ip->thr_spec *= -1; /* thread preferred */
  322. /* If default ... */
  323. /* ... shrink main multi-block carrier size */
  324. if (ip->default_.mmbcs)
  325. ip->init.util.mmbcs /= ERTS_MIN(4, allocs);
  326. /* ... shrink largest multi-block carrier size */
  327. if (ip->default_.lmbcs)
  328. ip->init.util.lmbcs /= ERTS_MIN(2, allocs);
  329. /* ... shrink smallest multi-block carrier size */
  330. if (ip->default_.smbcs)
  331. ip->init.util.smbcs /= ERTS_MIN(4, allocs);
  332. /* ... and more than three allocators shrink
  333. max mseg multi-block carriers */
  334. if (ip->default_.mmmbc && allocs > 2) {
  335. ip->init.util.mmmbc /= ERTS_MIN(4, allocs - 1);
  336. if (ip->init.util.mmmbc < 3)
  337. ip->init.util.mmmbc = 3;
  338. }
  339. }
  340. }
  341. }
  342. #endif
  343. static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
  344. static void
  345. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init);
  346. static void
  347. start_au_allocator(ErtsAlcType_t alctr_n,
  348. struct au_init *init,
  349. ErtsAllocatorState_t *state);
  350. static void
  351. refuse_af_strategy(struct au_init *init)
  352. {
  353. if (init->atype == AFIT)
  354. init->atype = GOODFIT;
  355. }
  356. static void init_thr_ix(int static_ixs);
  357. void
  358. erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
  359. {
  360. UWord extra_block_size = 0;
  361. int i;
  362. erts_alc_hndl_args_init_t init = {
  363. 0,
  364. #if HAVE_ERTS_MSEG
  365. ERTS_MSEG_INIT_DEFAULT_INITIALIZER,
  366. #endif
  367. ERTS_DEFAULT_TRIM_THRESHOLD,
  368. ERTS_DEFAULT_TOP_PAD,
  369. ERTS_DEFAULT_ALCU_INIT
  370. };
  371. erts_sys_alloc_init();
  372. init_thr_ix(erts_no_schedulers);
  373. erts_init_utils_mem();
  374. set_default_sl_alloc_opts(&init.sl_alloc);
  375. set_default_std_alloc_opts(&init.std_alloc);
  376. set_default_ll_alloc_opts(&init.ll_alloc);
  377. set_default_temp_alloc_opts(&init.temp_alloc);
  378. set_default_eheap_alloc_opts(&init.eheap_alloc);
  379. set_default_binary_alloc_opts(&init.binary_alloc);
  380. set_default_ets_alloc_opts(&init.ets_alloc);
  381. set_default_driver_alloc_opts(&init.driver_alloc);
  382. if (argc && argv)
  383. handle_args(argc, argv, &init);
  384. if (erts_no_schedulers <= 1) {
  385. init.sl_alloc.thr_spec = 0;
  386. init.std_alloc.thr_spec = 0;
  387. init.ll_alloc.thr_spec = 0;
  388. init.eheap_alloc.thr_spec = 0;
  389. init.binary_alloc.thr_spec = 0;
  390. init.ets_alloc.thr_spec = 0;
  391. init.driver_alloc.thr_spec = 0;
  392. }
  393. if (init.erts_alloc_config) {
  394. /* Adjust flags that erts_alloc_config won't like */
  395. init.temp_alloc.thr_spec = 0;
  396. init.sl_alloc.thr_spec = 0;
  397. init.std_alloc.thr_spec = 0;
  398. init.ll_alloc.thr_spec = 0;
  399. init.eheap_alloc.thr_spec = 0;
  400. init.binary_alloc.thr_spec = 0;
  401. init.ets_alloc.thr_spec = 0;
  402. init.driver_alloc.thr_spec = 0;
  403. }
  404. #ifdef ERTS_SMP
  405. /* Only temp_alloc can use thread specific interface */
  406. if (init.temp_alloc.thr_spec)
  407. init.temp_alloc.thr_spec = erts_no_schedulers;
  408. /* Others must use thread preferred interface */
  409. adjust_tpref(&init.sl_alloc, erts_no_schedulers);
  410. adjust_tpref(&init.std_alloc, erts_no_schedulers);
  411. adjust_tpref(&init.ll_alloc, erts_no_schedulers);
  412. adjust_tpref(&init.eheap_alloc, erts_no_schedulers);
  413. adjust_tpref(&init.binary_alloc, erts_no_schedulers);
  414. adjust_tpref(&init.ets_alloc, erts_no_schedulers);
  415. adjust_tpref(&init.driver_alloc, erts_no_schedulers);
  416. #else
  417. /* No thread specific if not smp */
  418. init.temp_alloc.thr_spec = 0;
  419. #endif
  420. /*
  421. * The following allocators cannot be run with afit strategy.
  422. * Make sure they don't...
  423. */
  424. refuse_af_strategy(&init.sl_alloc);
  425. refuse_af_strategy(&init.std_alloc);
  426. refuse_af_strategy(&init.ll_alloc);
  427. refuse_af_strategy(&init.eheap_alloc);
  428. refuse_af_strategy(&init.binary_alloc);
  429. refuse_af_strategy(&init.ets_alloc);
  430. refuse_af_strategy(&init.driver_alloc);
  431. #ifdef ERTS_SMP
  432. if (!init.temp_alloc.thr_spec)
  433. refuse_af_strategy(&init.temp_alloc);
  434. #endif
  435. erts_mtrace_pre_init();
  436. #if HAVE_ERTS_MSEG
  437. erts_mseg_init(&init.mseg);
  438. #endif
  439. erts_alcu_init(&init.alloc_util);
  440. erts_afalc_init();
  441. erts_bfalc_init();
  442. erts_gfalc_init();
  443. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  444. erts_allctrs[i].alloc = NULL;
  445. erts_allctrs[i].realloc = NULL;
  446. erts_allctrs[i].free = NULL;
  447. erts_allctrs[i].extra = NULL;
  448. erts_allctrs_info[i].alloc_util = 0;
  449. erts_allctrs_info[i].enabled = 0;
  450. erts_allctrs_info[i].thr_spec = 0;
  451. erts_allctrs_info[i].extra = NULL;
  452. }
  453. #ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
  454. #if !defined(PURIFY) && !defined(VALGRIND)
  455. erts_allctrs[ERTS_ALC_A_FIXED_SIZE].alloc = erts_fix_alloc;
  456. erts_allctrs[ERTS_ALC_A_FIXED_SIZE].realloc = erts_fix_realloc;
  457. erts_allctrs[ERTS_ALC_A_FIXED_SIZE].free = erts_fix_free;
  458. erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled = 1;
  459. #else
  460. erts_allctrs[ERTS_ALC_A_FIXED_SIZE].alloc = erts_sys_alloc;
  461. erts_allctrs[ERTS_ALC_A_FIXED_SIZE].realloc = erts_sys_realloc;
  462. erts_allctrs[ERTS_ALC_A_FIXED_SIZE].free = erts_sys_free;
  463. erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled = 0;
  464. #endif
  465. #endif
  466. erts_allctrs[ERTS_ALC_A_SYSTEM].alloc = erts_sys_alloc;
  467. erts_allctrs[ERTS_ALC_A_SYSTEM].realloc = erts_sys_realloc;
  468. erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
  469. erts_allctrs_info[ERTS_ALC_A_SYSTEM].enabled = 1;
  470. set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc);
  471. set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc);
  472. set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc);
  473. set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc);
  474. set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc);
  475. set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc);
  476. set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc);
  477. set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc);
  478. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  479. if (!erts_allctrs[i].alloc)
  480. erl_exit(ERTS_ABORT_EXIT,
  481. "Missing alloc function for %s\n", ERTS_ALC_A2AD(i));
  482. if (!erts_allctrs[i].realloc)
  483. erl_exit(ERTS_ABORT_EXIT,
  484. "Missing realloc function for %s\n", ERTS_ALC_A2AD(i));
  485. if (!erts_allctrs[i].free)
  486. erl_exit(ERTS_ABORT_EXIT,
  487. "Missing free function for %s\n", ERTS_ALC_A2AD(i));
  488. }
  489. sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
  490. sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
  491. if (erts_allctrs_info[ERTS_FIX_CORE_ALLOCATOR].enabled)
  492. erts_fix_core_allocator_ix = ERTS_FIX_CORE_ALLOCATOR;
  493. else
  494. erts_fix_core_allocator_ix = ERTS_ALC_A_SYSTEM;
  495. erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
  496. start_au_allocator(ERTS_ALC_A_TEMPORARY,
  497. &init.temp_alloc,
  498. &temp_alloc_state);
  499. start_au_allocator(ERTS_ALC_A_SHORT_LIVED,
  500. &init.sl_alloc,
  501. &sl_alloc_state);
  502. start_au_allocator(ERTS_ALC_A_STANDARD,
  503. &init.std_alloc,
  504. &std_alloc_state);
  505. start_au_allocator(ERTS_ALC_A_LONG_LIVED,
  506. &init.ll_alloc,
  507. &ll_alloc_state);
  508. start_au_allocator(ERTS_ALC_A_EHEAP,
  509. &init.eheap_alloc,
  510. &eheap_alloc_state);
  511. start_au_allocator(ERTS_ALC_A_BINARY,
  512. &init.binary_alloc,
  513. &binary_alloc_state);
  514. start_au_allocator(ERTS_ALC_A_ETS,
  515. &init.ets_alloc,
  516. &ets_alloc_state);
  517. start_au_allocator(ERTS_ALC_A_DRIVER,
  518. &init.driver_alloc,
  519. &driver_alloc_state);
  520. fix_core_allocator = erts_allctrs[erts_fix_core_allocator_ix].alloc;
  521. fix_core_extra = erts_allctrs[erts_fix_core_allocator_ix].extra;
  522. erts_mtrace_install_wrapper_functions();
  523. extra_block_size += erts_instr_init(init.instr.stat, init.instr.map);
  524. #ifdef DEBUG
  525. extra_block_size += install_debug_functions();
  526. #endif
  527. #ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
  528. erts_init_fix_alloc(extra_block_size, fix_core_alloc);
  529. #if !defined(PURIFY) && !defined(VALGRIND)
  530. erts_set_fix_size(ERTS_ALC_T_PROC, sizeof(Process));
  531. erts_set_fix_size(ERTS_ALC_T_DB_TABLE, sizeof(DbTable));
  532. erts_set_fix_size(ERTS_ALC_T_ATOM, sizeof(Atom));
  533. erts_set_fix_size(ERTS_ALC_T_EXPORT, sizeof(Export));
  534. erts_set_fix_size(ERTS_ALC_T_MODULE, sizeof(Module));
  535. erts_set_fix_size(ERTS_ALC_T_REG_PROC, sizeof(RegProc));
  536. erts_set_fix_size(ERTS_ALC_T_MONITOR_SH, ERTS_MONITOR_SH_SIZE*sizeof(Uint));
  537. erts_set_fix_size(ERTS_ALC_T_NLINK_SH, ERTS_LINK_SH_SIZE*sizeof(Uint));
  538. erts_set_fix_size(ERTS_ALC_T_FUN_ENTRY, sizeof(ErlFunEntry));
  539. #ifdef ERTS_ALC_T_DRV_EV_D_STATE
  540. erts_set_fix_size(ERTS_ALC_T_DRV_EV_D_STATE,
  541. sizeof(ErtsDrvEventDataState));
  542. #endif
  543. #ifdef ERTS_ALC_T_DRV_SEL_D_STATE
  544. erts_set_fix_size(ERTS_ALC_T_DRV_SEL_D_STATE,
  545. sizeof(ErtsDrvSelectDataState));
  546. #endif
  547. #endif
  548. #endif
  549. }
  550. static void
  551. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
  552. {
  553. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  554. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  555. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  556. if (!init->enable) {
  557. af->alloc = erts_sys_alloc;
  558. af->realloc = erts_sys_realloc;
  559. af->free = erts_sys_free;
  560. af->extra = NULL;
  561. ai->alloc_util = 0;
  562. ai->enabled = 0;
  563. ai->extra = NULL;
  564. return;
  565. }
  566. tspec->enabled = 0;
  567. tspec->all_thr_safe = 0;
  568. ai->thr_spec = 0;
  569. #ifdef USE_THREADS
  570. if (init->thr_spec) {
  571. if (init->thr_spec > 0) {
  572. af->alloc = erts_alcu_alloc_thr_spec;
  573. if (init->init.util.ramv)
  574. af->realloc = erts_alcu_realloc_mv_thr_spec;
  575. else
  576. af->realloc = erts_alcu_realloc_thr_spec;
  577. af->free = erts_alcu_free_thr_spec;
  578. }
  579. else {
  580. af->alloc = erts_alcu_alloc_thr_pref;
  581. if (init->init.util.ramv)
  582. af->realloc = erts_alcu_realloc_mv_thr_pref;
  583. else
  584. af->realloc = erts_alcu_realloc_thr_pref;
  585. af->free = erts_alcu_free_thr_pref;
  586. tspec->all_thr_safe = 1;
  587. }
  588. tspec->enabled = 1;
  589. tspec->size = abs(init->thr_spec) + 1;
  590. ai->thr_spec = tspec->size;
  591. }
  592. else if (init->init.util.ts) {
  593. af->alloc = erts_alcu_alloc_ts;
  594. if (init->init.util.ramv)
  595. af->realloc = erts_alcu_realloc_mv_ts;
  596. else
  597. af->realloc = erts_alcu_realloc_ts;
  598. af->free = erts_alcu_free_ts;
  599. }
  600. else
  601. #endif
  602. {
  603. af->alloc = erts_alcu_alloc;
  604. if (init->init.util.ramv)
  605. af->realloc = erts_alcu_realloc_mv;
  606. else
  607. af->realloc = erts_alcu_realloc;
  608. af->free = erts_alcu_free;
  609. }
  610. af->extra = NULL;
  611. ai->alloc_util = 1;
  612. ai->enabled = 1;
  613. }
  614. static void
  615. start_au_allocator(ErtsAlcType_t alctr_n,
  616. struct au_init *init,
  617. ErtsAllocatorState_t *state)
  618. {
  619. int i;
  620. int size = 1;
  621. void *as0;
  622. enum allctr_type atype;
  623. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  624. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  625. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  626. if (!init->enable)
  627. return;
  628. if (init->thr_spec) {
  629. void *states = erts_sys_alloc(0,
  630. NULL,
  631. ((sizeof(Allctr_t *)
  632. * (tspec->size + 1))
  633. + (sizeof(ErtsAllocatorState_t)
  634. * tspec->size)
  635. + ERTS_CACHE_LINE_SIZE - 1));
  636. if (!states)
  637. erl_exit(ERTS_ABORT_EXIT,
  638. "Failed to allocate allocator states for %salloc\n",
  639. init->init.util.name_prefix);
  640. tspec->allctr = (Allctr_t **) states;
  641. states = ((char *) states) + sizeof(Allctr_t *) * (tspec->size + 1);
  642. states = ((((UWord) states) & ERTS_CACHE_LINE_MASK)
  643. ? (void *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
  644. + ERTS_CACHE_LINE_SIZE)
  645. : (void *) states);
  646. tspec->allctr[0] = init->thr_spec > 0 ? (Allctr_t *) state : (Allctr_t *) NULL;
  647. size = tspec->size;
  648. for (i = 1; i < size; i++)
  649. tspec->allctr[i] = (Allctr_t *)
  650. &((ErtsAllocatorState_t *) states)[i-1];
  651. }
  652. for (i = 0; i < size; i++) {
  653. void *as;
  654. atype = init->atype;
  655. if (!init->thr_spec)
  656. as0 = state;
  657. else {
  658. as0 = (void *) tspec->allctr[i];
  659. if (!as0)
  660. continue;
  661. if (i == 0) {
  662. if (atype == AFIT)
  663. atype = GOODFIT;
  664. init->init.util.ts = 1;
  665. }
  666. else {
  667. if (init->thr_spec < 0) {
  668. init->init.util.ts = 1;
  669. init->init.util.tspec = 0;
  670. init->init.util.tpref = -1*init->thr_spec;
  671. }
  672. else {
  673. init->init.util.ts = 0;
  674. init->init.util.tspec = init->thr_spec + 1;
  675. init->init.util.tpref = 0;
  676. }
  677. }
  678. }
  679. switch (atype) {
  680. case GOODFIT:
  681. as = (void *) erts_gfalc_start((GFAllctr_t *) as0,
  682. &init->init.gf,
  683. &init->init.util);
  684. break;
  685. case BESTFIT:
  686. as = (void *) erts_bfalc_start((BFAllctr_t *) as0,
  687. &init->init.bf,
  688. &init->init.util);
  689. break;
  690. case AFIT:
  691. as = (void *) erts_afalc_start((AFAllctr_t *) as0,
  692. &init->init.af,
  693. &init->init.util);
  694. break;
  695. default:
  696. as = NULL;
  697. ASSERT(0);
  698. }
  699. if (!as)
  700. erl_exit(ERTS_ABORT_EXIT,
  701. "Failed to start %salloc\n", init->init.util.name_prefix);
  702. ASSERT(as == (void *) as0);
  703. af->extra = as;
  704. }
  705. if (init->thr_spec) {
  706. af->extra = tspec;
  707. init->init.util.ts = 1;
  708. }
  709. ai->extra = af->extra;
  710. }
  711. static void bad_param(char *param_start, char *param_end)
  712. {
  713. size_t len = param_end - param_start;
  714. char param[100];
  715. if (len > 99)
  716. len = 99;
  717. sys_memcpy((void *) param, (void *) param_start, len);
  718. param[len] = '\0';
  719. erts_fprintf(stderr, "bad \"%s\" parameter\n", param);
  720. erts_usage();
  721. }
  722. static void bad_value(char *param_start, char *param_end, char *value)
  723. {
  724. size_t len = param_end - param_start;
  725. char param[100];
  726. if (len > 99)
  727. len = 99;
  728. sys_memcpy((void *) param, (void *) param_start, len);
  729. param[len] = '\0';
  730. erts_fprintf(stderr, "bad \"%s\" value: %s\n", param, value);
  731. erts_usage();
  732. }
  733. /* Get arg marks argument as handled by
  734. putting NULL in argv */
  735. static char *
  736. get_value(char* rest, char** argv, int* ip)
  737. {
  738. char *param = argv[*ip]+1;
  739. argv[*ip] = NULL;
  740. if (*rest == '\0') {
  741. char *next = argv[*ip + 1];
  742. if (next[0] == '-'
  743. && next[1] == '-'
  744. && next[2] == '\0') {
  745. bad_value(param, rest, "");
  746. }
  747. (*ip)++;
  748. argv[*ip] = NULL;
  749. return next;
  750. }
  751. return rest;
  752. }
  753. static ERTS_INLINE int
  754. has_prefix(const char *prefix, const char *string)
  755. {
  756. int i;
  757. for (i = 0; prefix[i]; i++)
  758. if (prefix[i] != string[i])
  759. return 0;
  760. return 1;
  761. }
  762. static int
  763. get_bool_value(char *param_end, char** argv, int* ip)
  764. {
  765. char *param = argv[*ip]+1;
  766. char *value = get_value(param_end, argv, ip);
  767. if (strcmp(value, "true") == 0)
  768. return 1;
  769. else if (strcmp(value, "false") == 0)
  770. return 0;
  771. else
  772. bad_value(param, param_end, value);
  773. return -1;
  774. }
  775. static Uint
  776. get_kb_value(char *param_end, char** argv, int* ip)
  777. {
  778. Sint tmp;
  779. Uint max = ((~((Uint) 0))/1024) + 1;
  780. char *rest;
  781. char *param = argv[*ip]+1;
  782. char *value = get_value(param_end, argv, ip);
  783. errno = 0;
  784. tmp = (Sint) strtol(value, &rest, 10);
  785. if (errno != 0 || rest == value || tmp < 0 || max < ((Uint) tmp))
  786. bad_value(param, param_end, value);
  787. if (max == (Uint) tmp)
  788. return ~((Uint) 0);
  789. else
  790. return ((Uint) tmp)*1024;
  791. }
  792. static Uint
  793. get_amount_value(char *param_end, char** argv, int* ip)
  794. {
  795. Sint tmp;
  796. char *rest;
  797. char *param = argv[*ip]+1;
  798. char *value = get_value(param_end, argv, ip);
  799. errno = 0;
  800. tmp = (Sint) strtol(value, &rest, 10);
  801. if (errno != 0 || rest == value || tmp < 0)
  802. bad_value(param, param_end, value);
  803. return (Uint) tmp;
  804. }
  805. static int
  806. get_bool_or_possitive_amount_value(int *bool, Uint *amount,
  807. char *param_end, char** argv, int* ip)
  808. {
  809. char *param = argv[*ip]+1;
  810. char *value = get_value(param_end, argv, ip);
  811. if (strcmp(value, "true") == 0) {
  812. *bool = 1;
  813. return 1;
  814. }
  815. else if (strcmp(value, "false") == 0) {
  816. *bool = 0;
  817. return 1;
  818. }
  819. else {
  820. Sint tmp;
  821. char *rest;
  822. errno = 0;
  823. tmp = (Sint) strtol(value, &rest, 10);
  824. if (errno != 0 || rest == value || tmp <= 0) {
  825. bad_value(param, param_end, value);
  826. return -1;
  827. }
  828. *amount = (Uint) tmp;
  829. return 0;
  830. }
  831. }
  832. static void
  833. handle_au_arg(struct au_init *auip,
  834. char* sub_param,
  835. char** argv,
  836. int* ip)
  837. {
  838. char *param = argv[*ip]+1;
  839. switch (sub_param[0]) {
  840. case 'a':
  841. if(has_prefix("asbcst", sub_param)) {
  842. auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip);
  843. }
  844. else if(has_prefix("as", sub_param)) {
  845. char *alg = get_value(sub_param + 2, argv, ip);
  846. if (strcmp("bf", alg) == 0) {
  847. auip->atype = BESTFIT;
  848. auip->init.bf.ao = 0;
  849. }
  850. else if (strcmp("aobf", alg) == 0) {
  851. auip->atype = BESTFIT;
  852. auip->init.bf.ao = 1;
  853. }
  854. else if (strcmp("gf", alg) == 0) {
  855. auip->atype = GOODFIT;
  856. }
  857. else if (strcmp("af", alg) == 0) {
  858. auip->atype = AFIT;
  859. }
  860. else {
  861. bad_value(param, sub_param + 1, alg);
  862. }
  863. }
  864. else
  865. goto bad_switch;
  866. break;
  867. case 'e':
  868. auip->enable = get_bool_value(sub_param+1, argv, ip);
  869. break;
  870. case 'l':
  871. if (has_prefix("lmbcs", sub_param)) {
  872. auip->default_.lmbcs = 0;
  873. auip->init.util.lmbcs = get_kb_value(sub_param + 5, argv, ip);
  874. }
  875. else
  876. goto bad_switch;
  877. break;
  878. case 'm':
  879. if (has_prefix("mbcgs", sub_param)) {
  880. auip->init.util.mbcgs = get_amount_value(sub_param + 5, argv, ip);
  881. }
  882. else if (has_prefix("mbsd", sub_param)) {
  883. auip->init.gf.mbsd = get_amount_value(sub_param + 4, argv, ip);
  884. if (auip->init.gf.mbsd < 1)
  885. auip->init.gf.mbsd = 1;
  886. }
  887. else if (has_prefix("mmbcs", sub_param)) {
  888. auip->default_.mmbcs = 0;
  889. auip->init.util.mmbcs = get_kb_value(sub_param + 5, argv, ip);
  890. }
  891. else if (has_prefix("mmmbc", sub_param)) {
  892. auip->default_.mmmbc = 0;
  893. auip->init.util.mmmbc = get_amount_value(sub_param + 5, argv, ip);
  894. }
  895. else if (has_prefix("mmsbc", sub_param)) {
  896. auip->init.util.mmsbc = get_amount_value(sub_param + 5, argv, ip);
  897. }
  898. else
  899. goto bad_switch;
  900. break;
  901. case 'r':
  902. if(has_prefix("rsbcmt", sub_param)) {
  903. auip->init.util.rsbcmt = get_amount_value(sub_param + 6, argv, ip);
  904. if (auip->init.util.rsbcmt > 100)
  905. auip->init.util.rsbcmt = 100;
  906. }
  907. else if(has_prefix("rsbcst", sub_param)) {
  908. auip->init.util.rsbcst = get_amount_value(sub_param + 6, argv, ip);
  909. if (auip->init.util.rsbcst > 100)
  910. auip->init.util.rsbcst = 100;
  911. }
  912. else if (has_prefix("rmbcmt", sub_param)) {
  913. auip->init.util.rmbcmt = get_amount_value(sub_param + 6, argv, ip);
  914. if (auip->init.util.rmbcmt > 100)
  915. auip->init.util.rmbcmt = 100;
  916. }
  917. else if (has_prefix("ramv", sub_param)) {
  918. auip->init.util.ramv = get_bool_value(sub_param + 4, argv, ip);
  919. }
  920. else
  921. goto bad_switch;
  922. break;
  923. case 's':
  924. if(has_prefix("sbct", sub_param)) {
  925. auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
  926. }
  927. else if (has_prefix("smbcs", sub_param)) {
  928. auip->default_.smbcs = 0;
  929. auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip);
  930. }
  931. else
  932. goto bad_switch;
  933. break;
  934. case 't': {
  935. Uint no;
  936. int enable;
  937. int res = get_bool_or_possitive_amount_value(&enable,
  938. &no,
  939. sub_param+1,
  940. argv,
  941. ip);
  942. if (res > 0)
  943. auip->thr_spec = enable ? 1 : 0;
  944. else if (res == 0) {
  945. int allocs = (int) no;
  946. if (allocs < 0)
  947. allocs = INT_MIN;
  948. else {
  949. allocs *= -1;
  950. }
  951. auip->thr_spec = allocs;
  952. }
  953. break;
  954. }
  955. default:
  956. bad_switch:
  957. bad_param(param, sub_param);
  958. }
  959. }
  960. static void
  961. handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
  962. {
  963. struct au_init *aui[] = {
  964. &init->binary_alloc,
  965. &init->std_alloc,
  966. &init->ets_alloc,
  967. &init->eheap_alloc,
  968. &init->ll_alloc,
  969. &init->driver_alloc,
  970. &init->sl_alloc,
  971. &init->temp_alloc
  972. };
  973. int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
  974. char *arg;
  975. char *rest;
  976. int i, j;
  977. i = 1;
  978. ASSERT(argc && argv && init);
  979. while (i < *argc) {
  980. if(argv[i][0] == '-') {
  981. char *param = argv[i]+1;
  982. switch (argv[i][1]) {
  983. case 'M':
  984. switch (argv[i][2]) {
  985. case 'B':
  986. handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i);
  987. break;
  988. case 'D':
  989. handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i);
  990. break;
  991. case 'E':
  992. handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i);
  993. break;
  994. case 'F': /* fix_alloc */
  995. if (has_prefix("e", param+2)) {
  996. arg = get_value(param+3, argv, &i);
  997. if (strcmp("true", arg) != 0)
  998. bad_value(param, param+3, arg);
  999. }
  1000. else
  1001. bad_param(param, param+2);
  1002. break;
  1003. case 'H':
  1004. handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i);
  1005. break;
  1006. case 'L':
  1007. handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i);
  1008. break;
  1009. case 'M':
  1010. if (has_prefix("amcbf", argv[i]+3)) {
  1011. #if HAVE_ERTS_MSEG
  1012. init->mseg.amcbf =
  1013. #endif
  1014. get_kb_value(argv[i]+8, argv, &i);
  1015. }
  1016. else if (has_prefix("rmcbf", argv[i]+3)) {
  1017. #if HAVE_ERTS_MSEG
  1018. init->mseg.rmcbf =
  1019. #endif
  1020. get_amount_value(argv[i]+8, argv, &i);
  1021. }
  1022. else if (has_prefix("mcs", argv[i]+3)) {
  1023. #if HAVE_ERTS_MSEG
  1024. init->mseg.mcs =
  1025. #endif
  1026. get_amount_value(argv[i]+6, argv, &i);
  1027. }
  1028. else if (has_prefix("cci", argv[i]+3)) {
  1029. #if HAVE_ERTS_MSEG
  1030. init->mseg.cci =
  1031. #endif
  1032. get_amount_value(argv[i]+6, argv, &i);
  1033. }
  1034. else {
  1035. bad_param(param, param+2);
  1036. }
  1037. break;
  1038. case 'R':
  1039. handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i);
  1040. break;
  1041. case 'S':
  1042. handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i);
  1043. break;
  1044. case 'T':
  1045. handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i);
  1046. break;
  1047. case 'Y': { /* sys_alloc */
  1048. if (has_prefix("tt", param+2)) {
  1049. /* set trim threshold */
  1050. arg = get_value(param+4, argv, &i);
  1051. errno = 0;
  1052. init->trim_threshold = (int) strtol(arg, &rest, 10);
  1053. if (errno != 0
  1054. || rest == arg
  1055. || init->trim_threshold < 0
  1056. || (INT_MAX/1024) < init->trim_threshold) {
  1057. bad_value(param, param+4, arg);
  1058. }
  1059. VERBOSE(DEBUG_SYSTEM,
  1060. ("using trim threshold: %d\n",
  1061. init->trim_threshold));
  1062. init->trim_threshold *= 1024;
  1063. }
  1064. else if (has_prefix("tp", param+2)) {
  1065. /* set top pad */
  1066. arg = get_value(param+4, argv, &i);
  1067. errno = 0;
  1068. init->top_pad = (int) strtol(arg, &rest, 10);
  1069. if (errno != 0
  1070. || rest == arg
  1071. || init->top_pad < 0
  1072. || (INT_MAX/1024) < init->top_pad) {
  1073. bad_value(param, param+4, arg);
  1074. }
  1075. VERBOSE(DEBUG_SYSTEM,
  1076. ("using top pad: %d\n",init->top_pad));
  1077. init->top_pad *= 1024;
  1078. }
  1079. else if (has_prefix("m", param+2)) {
  1080. /* Has been handled by erlexec */
  1081. (void) get_value(param+3, argv, &i);
  1082. }
  1083. else if (has_prefix("e", param+2)) {
  1084. arg = get_value(param+3, argv, &i);
  1085. if (strcmp("true", arg) != 0)
  1086. bad_value(param, param+3, arg);
  1087. }
  1088. else
  1089. bad_param(param, param+2);
  1090. break;
  1091. }
  1092. case 'e':
  1093. switch (argv[i][3]) {
  1094. case 'a': {
  1095. int a;
  1096. arg = get_value(argv[i]+4, argv, &i);
  1097. if (strcmp("min", arg) == 0) {
  1098. for (a = 0; a < aui_sz; a++)
  1099. aui[a]->enable = 0;
  1100. }
  1101. else if (strcmp("max", arg) == 0) {
  1102. for (a = 0; a < aui_sz; a++)
  1103. aui[a]->enable = 1;
  1104. }
  1105. else if (strcmp("config", arg) == 0) {
  1106. init->erts_alloc_config = 1;
  1107. }
  1108. else if (strcmp("r9c", arg) == 0
  1109. || strcmp("r10b", arg) == 0
  1110. || strcmp("r11b", arg) == 0) {
  1111. set_default_sl_alloc_opts(&init->sl_alloc);
  1112. set_default_std_alloc_opts(&init->std_alloc);
  1113. set_default_ll_alloc_opts(&init->ll_alloc);
  1114. set_default_temp_alloc_opts(&init->temp_alloc);
  1115. set_default_eheap_alloc_opts(&init->eheap_alloc);
  1116. set_default_binary_alloc_opts(&init->binary_alloc);
  1117. set_default_ets_alloc_opts(&init->ets_alloc);
  1118. set_default_driver_alloc_opts(&init->driver_alloc);
  1119. init->driver_alloc.enable = 0;
  1120. if (strcmp("r9c", arg) == 0) {
  1121. init->sl_alloc.enable = 0;
  1122. init->std_alloc.enable = 0;
  1123. init->binary_alloc.enable = 0;
  1124. init->ets_alloc.enable = 0;
  1125. }
  1126. for (a = 0; a < aui_sz; a++) {
  1127. aui[a]->thr_spec = 0;
  1128. aui[a]->init.util.ramv = 0;
  1129. aui[a]->init.util.mmmbc = 10;
  1130. aui[a]->init.util.lmbcs = 5*1024*1024;
  1131. }
  1132. }
  1133. else {
  1134. bad_param(param, param+3);
  1135. }
  1136. break;
  1137. }
  1138. default:
  1139. bad_param(param, param+1);
  1140. }
  1141. break;
  1142. case 'i':
  1143. switch (argv[i][3]) {
  1144. case 's':
  1145. arg = get_value(argv[i]+4, argv, &i);
  1146. if (strcmp("true", arg) == 0)
  1147. init->instr.stat = 1;
  1148. else if (strcmp("false", arg) == 0)
  1149. init->instr.stat = 0;
  1150. else
  1151. bad_value(param, param+3, arg);
  1152. break;
  1153. case 'm':
  1154. arg = get_value(argv[i]+4, argv, &i);
  1155. if (strcmp("true", arg) == 0)
  1156. init->instr.map = 1;
  1157. else if (strcmp("false", arg) == 0)
  1158. init->instr.map = 0;
  1159. else
  1160. bad_value(param, param+3, arg);
  1161. break;
  1162. case 't':
  1163. init->instr.mtrace = get_value(argv[i]+4, argv, &i);
  1164. break;
  1165. default:
  1166. bad_param(param, param+2);
  1167. }
  1168. break;
  1169. case 'u':
  1170. if (has_prefix("ycs", argv[i]+3)) {
  1171. init->alloc_util.ycs
  1172. = get_kb_value(argv[i]+6, argv, &i);
  1173. }
  1174. else if (has_prefix("mmc", argv[i]+3)) {
  1175. init->alloc_util.mmc
  1176. = get_amount_value(argv[i]+6, argv, &i);
  1177. }
  1178. else {
  1179. int a;
  1180. int start = i;
  1181. char *param = argv[i];
  1182. char *val = i+1 < *argc ? argv[i+1] : NULL;
  1183. for (a = 0; a < aui_sz; a++) {
  1184. if (a > 0) {
  1185. ASSERT(i == start || i == start+1);
  1186. argv[start] = param;
  1187. if (i != start)
  1188. argv[start + 1] = val;
  1189. i = start;
  1190. }
  1191. handle_au_arg(aui[a], &argv[i][3], argv, &i);
  1192. }
  1193. }
  1194. break;
  1195. default:
  1196. bad_param(param, param+1);
  1197. }
  1198. break;
  1199. case '-':
  1200. if (argv[i][2] == '\0') {
  1201. /* End of system flags reached */
  1202. if (init->instr.mtrace
  1203. /* || init->instr.stat
  1204. || init->instr.map */) {
  1205. while (i < *argc) {
  1206. if(strcmp(argv[i], "-sname") == 0
  1207. || strcmp(argv[i], "-name") == 0) {
  1208. if (i + 1 <*argc) {
  1209. init->instr.nodename = argv[i+1];
  1210. break;
  1211. }
  1212. }
  1213. i++;
  1214. }
  1215. }
  1216. goto args_parsed;
  1217. }
  1218. break;
  1219. default:
  1220. break;
  1221. }
  1222. }
  1223. i++;
  1224. }
  1225. args_parsed:
  1226. /* Handled arguments have been marked with NULL. Slide arguments
  1227. not handled towards the beginning of argv. */
  1228. for (i = 0, j = 0; i < *argc; i++) {
  1229. if (argv[i])
  1230. argv[j++] = argv[i];
  1231. }
  1232. *argc = j;
  1233. }
  1234. static char *type_no_str(ErtsAlcType_t n)
  1235. {
  1236. #if ERTS_ALC_N_MIN != 0
  1237. if (n < ERTS_ALC_N_MIN)
  1238. return NULL;
  1239. #endif
  1240. if (n > ERTS_ALC_N_MAX)
  1241. return NULL;
  1242. return (char *) ERTS_ALC_N2TD(n);
  1243. }
  1244. #define type_str(T) type_no_str(ERTS_ALC_T2N((T)))
  1245. erts_tsd_key_t thr_ix_key;
  1246. erts_spinlock_t alloc_thr_ix_lock;
  1247. int last_thr_ix;
  1248. int first_dyn_thr_ix;
  1249. static void
  1250. init_thr_ix(int static_ixs)
  1251. {
  1252. erts_tsd_key_create(&thr_ix_key);
  1253. erts_spinlock_init(&alloc_thr_ix_lock, "alloc_thr_ix_lock");
  1254. last_thr_ix = -4711;
  1255. first_dyn_thr_ix = static_ixs+1;
  1256. }
  1257. int
  1258. erts_alc_get_thr_ix(void)
  1259. {
  1260. int ix = (int)(long) erts_tsd_get(thr_ix_key);
  1261. if (ix == 0) {
  1262. erts_spin_lock(&alloc_thr_ix_lock);
  1263. last_thr_ix++;
  1264. if (last_thr_ix < 0)
  1265. last_thr_ix = first_dyn_thr_ix;
  1266. ix = last_thr_ix;
  1267. erts_spin_unlock(&alloc_thr_ix_lock);
  1268. erts_tsd_set(thr_ix_key, (void *)(long) ix);
  1269. }
  1270. ASSERT(ix > 0);
  1271. return ix;
  1272. }
  1273. void erts_alloc_reg_scheduler_id(Uint id)
  1274. {
  1275. int ix = (int) id;
  1276. ASSERT(0 < ix && ix <= first_dyn_thr_ix);
  1277. ASSERT(0 == (int) (long) erts_tsd_get(thr_ix_key));
  1278. erts_tsd_set(thr_ix_key, (void *)(long) ix);
  1279. }
  1280. __decl_noreturn void
  1281. erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
  1282. {
  1283. char buf[10];
  1284. char *t_str;
  1285. char *allctr_str;
  1286. ASSERT(n >= ERTS_ALC_N_MIN);
  1287. ASSERT(n <= ERTS_ALC_N_MAX);
  1288. if (n < ERTS_ALC_N_MIN || ERTS_ALC_N_MAX < n)
  1289. allctr_str = "UNKNOWN";
  1290. else {
  1291. ErtsAlcType_t a = ERTS_ALC_T2A(ERTS_ALC_N2T(n));
  1292. if (erts_allctrs_info[a].enabled)
  1293. allctr_str = (char *) ERTS_ALC_A2AD(a);
  1294. else
  1295. allctr_str = (char *) ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
  1296. }
  1297. t_str = type_no_str(n);
  1298. if (!t_str) {
  1299. sprintf(buf, "%d", (int) n);
  1300. t_str = buf;
  1301. }
  1302. switch (error) {
  1303. case ERTS_ALC_E_NOTSUP: {
  1304. char *op_str;
  1305. switch (func) {
  1306. case ERTS_ALC_O_ALLOC: op_str = "alloc"; break;
  1307. case ERTS_ALC_O_REALLOC: op_str = "realloc"; break;
  1308. case ERTS_ALC_O_FREE: op_str = "free"; break;
  1309. default: op_str = "UNKNOWN"; break;
  1310. }
  1311. erl_exit(ERTS_ABORT_EXIT,
  1312. "%s: %s operation not supported (memory type: \"%s\")\n",
  1313. allctr_str, op_str, t_str);
  1314. break;
  1315. }
  1316. case ERTS_ALC_E_NOMEM: {
  1317. Uint size;
  1318. va_list argp;
  1319. char *op = func == ERTS_ALC_O_REALLOC ? "reallocate" : "allocate";
  1320. va_start(argp, n);
  1321. size = va_arg(argp, Uint);
  1322. va_end(argp);
  1323. erl_exit(1,
  1324. "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
  1325. allctr_str, op, size, t_str);
  1326. break;
  1327. }
  1328. case ERTS_ALC_E_NOALLCTR:
  1329. erl_exit(ERTS_ABORT_EXIT,
  1330. "erts_alloc: Unknown allocator type: %d\n",
  1331. ERTS_ALC_T2A(ERTS_ALC_N2T(n)));
  1332. break;
  1333. default:
  1334. erl_exit(ERTS_ABORT_EXIT, "erts_alloc: Unknown error: %d\n", error);
  1335. break;
  1336. }
  1337. }
  1338. __decl_noreturn void
  1339. erts_alloc_enomem(ErtsAlcType_t type, Uint size)
  1340. {
  1341. erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
  1342. }
  1343. __decl_noreturn void
  1344. erts_alloc_n_enomem(ErtsAlcType_t n, Uint size)
  1345. {
  1346. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_ALLOC, n, size);
  1347. }
  1348. __decl_noreturn void
  1349. erts_realloc_enomem(ErtsAlcType_t type, void *ptr, Uint size)
  1350. {
  1351. erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
  1352. }
  1353. __decl_noreturn void
  1354. erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
  1355. {
  1356. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_REALLOC, n, size);
  1357. }
  1358. static ERTS_INLINE Uint
  1359. alcu_size(ErtsAlcType_t ai)
  1360. {
  1361. Uint res = 0;
  1362. ASSERT(erts_allctrs_info[ai].enabled);
  1363. ASSERT(erts_allctrs_info[ai].alloc_util);
  1364. if (!erts_allctrs_info[ai].thr_spec) {
  1365. Allctr_t *allctr = erts_allctrs_info[ai].extra;
  1366. AllctrSize_t asize;
  1367. erts_alcu_current_size(allctr, &asize);
  1368. res += asize.blocks;
  1369. }
  1370. else {
  1371. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ai];
  1372. int i;
  1373. ASSERT(tspec->all_thr_safe);
  1374. ASSERT(tspec->enabled);
  1375. for (i = tspec->size - 1; i >= 0; i--) {
  1376. Allctr_t *allctr = tspec->allctr[i];
  1377. AllctrSize_t asize;
  1378. if (allctr) {
  1379. erts_alcu_current_size(allctr, &asize);
  1380. res += asize.blocks;
  1381. }
  1382. }
  1383. }
  1384. return res;
  1385. }
  1386. Eterm
  1387. erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
  1388. {
  1389. #define ERTS_MEM_NEED_ALL_ALCU (!erts_instr_stat && want_tot_or_sys)
  1390. ErtsFixInfo efi;
  1391. struct {
  1392. int total;
  1393. int processes;
  1394. int processes_used;
  1395. int system;
  1396. int atom;
  1397. int atom_used;
  1398. int binary;
  1399. int code;
  1400. int ets;
  1401. int maximum;
  1402. } want = {0};
  1403. struct {
  1404. Uint total;
  1405. Uint processes;
  1406. Uint processes_used;
  1407. Uint system;
  1408. Uint atom;
  1409. Uint atom_used;
  1410. Uint binary;
  1411. Uint code;
  1412. Uint ets;
  1413. Uint maximum;
  1414. } size = {0};
  1415. Eterm atoms[sizeof(size)/sizeof(Uint)];
  1416. Uint *uintps[sizeof(size)/sizeof(Uint)];
  1417. Eterm euints[sizeof(size)/sizeof(Uint)];
  1418. int need_atom;
  1419. int want_tot_or_sys;
  1420. int length;
  1421. Eterm res = THE_NON_VALUE;
  1422. ErtsAlcType_t ai;
  1423. int only_one_value = 0;
  1424. /* Figure out whats wanted... */
  1425. length = 0;
  1426. if (is_non_value(earg)) { /* i.e. wants all */
  1427. want.total = 1;
  1428. atoms[length] = am_total;
  1429. uintps[length++] = &size.total;
  1430. want.processes = 1;
  1431. atoms[length] = am_processes;
  1432. uintps[length++] = &size.processes;
  1433. want.processes_used = 1;
  1434. atoms[length] = am_processes_used;
  1435. uintps[length++] = &size.processes_used;
  1436. want.system = 1;
  1437. atoms[length] = am_system;
  1438. uintps[length++] = &size.system;
  1439. want.atom = 1;
  1440. atoms[length] = am_atom;
  1441. uintps[length++] = &size.atom;
  1442. want.atom_used = 1;
  1443. atoms[length] = am_atom_used;
  1444. uintps[length++] = &size.atom_used;
  1445. want.binary = 1;
  1446. atoms[length] = am_binary;
  1447. uintps[length++] = &size.binary;
  1448. want.code = 1;
  1449. atoms[length] = am_code;
  1450. uintps[length++] = &size.code;
  1451. want.ets = 1;
  1452. atoms[length] = am_ets;
  1453. uintps[length++] = &size.ets;
  1454. want.maximum = erts_instr_stat;
  1455. if (want.maximum) {
  1456. atoms[length] = am_maximum;
  1457. uintps[length++] = &size.maximum;
  1458. }
  1459. }
  1460. else {
  1461. DeclareTmpHeapNoproc(tmp_heap,2);
  1462. Eterm wanted_list;
  1463. if (is_nil(earg))
  1464. return NIL;
  1465. UseTmpHeapNoproc(2);
  1466. if (is_not_atom(earg))
  1467. wanted_list = earg;
  1468. else {
  1469. wanted_list = CONS(&tmp_heap[0], earg, NIL);
  1470. only_one_value = 1;
  1471. }
  1472. while (is_list(wanted_list)) {
  1473. switch (CAR(list_val(wanted_list))) {
  1474. case am_total:
  1475. if (!want.total) {
  1476. want.total = 1;
  1477. atoms[length] = am_total;
  1478. uintps[length++] = &size.total;
  1479. }
  1480. break;
  1481. case am_processes:
  1482. if (!want.processes) {
  1483. want.processes = 1;
  1484. atoms[length] = am_processes;
  1485. uintps[length++] = &size.processes;
  1486. }
  1487. break;
  1488. case am_processes_used:
  1489. if (!want.processes_used) {
  1490. want.processes_used = 1;
  1491. atoms[length] = am_processes_used;
  1492. uintps[length++] = &size.processes_used;
  1493. }
  1494. break;
  1495. case am_system:
  1496. if (!want.system) {
  1497. want.system = 1;
  1498. atoms[length] = am_system;
  1499. uintps[length++] = &size.system;
  1500. }
  1501. break;
  1502. case am_atom:
  1503. if (!want.atom) {
  1504. want.atom = 1;
  1505. atoms[length] = am_atom;
  1506. uintps[length++] = &size.atom;
  1507. }
  1508. break;
  1509. case am_atom_used:
  1510. if (!want.atom_used) {
  1511. want.atom_used = 1;
  1512. atoms[length] = am_atom_used;
  1513. uintps[length++] = &size.atom_used;
  1514. }
  1515. break;
  1516. case am_binary:
  1517. if (!want.binary) {
  1518. want.binary = 1;
  1519. atoms[length] = am_binary;
  1520. uintps[length++] = &size.binary;
  1521. }
  1522. break;
  1523. case am_code:
  1524. if (!want.code) {
  1525. want.code = 1;
  1526. atoms[length] = am_code;
  1527. uintps[length++] = &size.code;
  1528. }
  1529. break;
  1530. case am_ets:
  1531. if (!want.ets) {
  1532. want.ets = 1;
  1533. atoms[length] = am_ets;
  1534. uintps[length++] = &size.ets;
  1535. }
  1536. break;
  1537. case am_maximum:
  1538. if (erts_instr_stat) {
  1539. if (!want.maximum) {
  1540. want.maximum = 1;
  1541. atoms[length] = am_maximum;
  1542. uintps[length++] = &size.maximum;
  1543. }
  1544. } else {
  1545. UnUseTmpHeapNoproc(2);
  1546. return am_badarg;
  1547. }
  1548. break;
  1549. default:
  1550. UnUseTmpHeapNoproc(2);
  1551. return am_badarg;
  1552. }
  1553. wanted_list = CDR(list_val(wanted_list));
  1554. }
  1555. UnUseTmpHeapNoproc(2);
  1556. if (is_not_nil(wanted_list))
  1557. return am_badarg;
  1558. }
  1559. /* All alloc_util allocators *have* to be enabled */
  1560. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  1561. switch (ai) {
  1562. case ERTS_ALC_A_SYSTEM:
  1563. case ERTS_ALC_A_FIXED_SIZE:
  1564. break;
  1565. default:
  1566. if (!erts_allctrs_info[ai].enabled
  1567. || !erts_allctrs_info[ai].alloc_util) {
  1568. return am_notsup;
  1569. }
  1570. break;
  1571. }
  1572. }
  1573. ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
  1574. ASSERT(length <= sizeof(euints)/sizeof(Eterm));
  1575. ASSERT(length <= sizeof(uintps)/sizeof(Uint));
  1576. if (proc) {
  1577. ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  1578. == erts_proc_lc_my_proc_locks(proc));
  1579. /* We'll need locks early in the lock order */
  1580. erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  1581. }
  1582. /* Calculate values needed... */
  1583. want_tot_or_sys = want.total || want.system;
  1584. need_atom = ERTS_MEM_NEED_ALL_ALCU || want.atom;
  1585. if (ERTS_MEM_NEED_ALL_ALCU) {
  1586. size.total = 0;
  1587. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  1588. if (erts_allctrs_info[ai].alloc_util) {
  1589. Uint *save;
  1590. Uint asz;
  1591. switch (ai) {
  1592. case ERTS_ALC_A_TEMPORARY:
  1593. /*
  1594. * Often not thread safe and usually never
  1595. * contain any allocated memory.
  1596. */
  1597. continue;
  1598. case ERTS_ALC_A_EHEAP:
  1599. save = &size.processes;
  1600. break;
  1601. case ERTS_ALC_A_ETS:
  1602. save = &size.ets;
  1603. break;
  1604. case ERTS_ALC_A_BINARY:
  1605. save = &size.binary;
  1606. break;
  1607. default:
  1608. save = NULL;
  1609. break;
  1610. }
  1611. asz = alcu_size(ai);
  1612. if (save)
  1613. *save = asz;
  1614. size.total += asz;
  1615. }
  1616. }
  1617. }
  1618. if (want_tot_or_sys || want.processes || want.processes_used) {
  1619. Uint tmp;
  1620. if (ERTS_MEM_NEED_ALL_ALCU)
  1621. tmp = size.processes;
  1622. else
  1623. tmp = alcu_size(ERTS_ALC_A_EHEAP);
  1624. tmp += erts_max_processes*sizeof(Process*);
  1625. #ifdef HYBRID
  1626. tmp += erts_max_processes*sizeof(Process*);
  1627. #endif
  1628. tmp += erts_bif_timer_memory_size();
  1629. tmp += erts_tot_link_lh_size();
  1630. size.processes = size.processes_used = tmp;
  1631. erts_fix_info(ERTS_ALC_T_NLINK_SH, &efi);
  1632. size.processes += efi.total;
  1633. size.processes_used += efi.used;
  1634. erts_fix_info(ERTS_ALC_T_MONITOR_SH, &efi);
  1635. size.processes += efi.total;
  1636. size.processes_used += efi.used;
  1637. erts_fix_info(ERTS_ALC_T_PROC, &efi);
  1638. size.processes += efi.total;
  1639. size.processes_used += efi.used;
  1640. erts_fix_info(ERTS_ALC_T_REG_PROC, &efi);
  1641. size.processes += efi.total;
  1642. size.processes_used += efi.used;
  1643. }
  1644. if (want.atom || want.atom_used) {
  1645. Uint reserved_atom_space, atom_space;
  1646. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  1647. size.atom = size.atom_used = atom_table_sz();
  1648. erts_fix_info(ERTS_ALC_T_ATOM, &efi);
  1649. if (want.atom) {
  1650. size.atom += reserved_atom_space;
  1651. size.atom += efi.total;
  1652. }
  1653. if (want.atom_used) {
  1654. size.atom_used += atom_space;
  1655. size.atom_used += efi.used;
  1656. }
  1657. }
  1658. if (!ERTS_MEM_NEED_ALL_ALCU && want.binary)
  1659. size.binary = alcu_size(ERTS_ALC_A_BINARY);
  1660. if (want.code) {
  1661. size.code = module_table_sz();
  1662. erts_fix_info(ERTS_ALC_T_MODULE, &efi);
  1663. size.code += efi.used;
  1664. size.code += export_table_sz();
  1665. erts_fix_info(ERTS_ALC_T_EXPORT, &efi);
  1666. size.code += efi.used;
  1667. size.code += erts_fun_table_sz();
  1668. erts_fix_info(ERTS_ALC_T_FUN_ENTRY, &efi);
  1669. size.code += efi.used;
  1670. size.code += allocated_modules*sizeof(Range);
  1671. size.code += erts_total_code_size;
  1672. }
  1673. if (want.ets) {
  1674. if (!ERTS_MEM_NEED_ALL_ALCU)
  1675. size.ets = alcu_size(ERTS_ALC_A_ETS);
  1676. size.ets += erts_get_ets_misc_mem_size();
  1677. }
  1678. if (erts_instr_stat && (want_tot_or_sys || want.maximum)) {
  1679. if (want_tot_or_sys) {
  1680. size.total = erts_instr_get_total();
  1681. size.system = size.total - size.processes;
  1682. }
  1683. size.maximum = erts_instr_get_max_total();
  1684. }
  1685. else if (want_tot_or_sys) {
  1686. size.system = size.total - size.processes;
  1687. }
  1688. if (print_to_p) {
  1689. int i;
  1690. int to = *print_to_p;
  1691. void *arg = print_to_arg;
  1692. /* Print result... */
  1693. erts_print(to, arg, "=memory\n");
  1694. for (i = 0; i < length; i++)
  1695. erts_print(to, arg, "%T: %bpu\n", atoms[i], *uintps[i]);
  1696. }
  1697. if (proc) {
  1698. /* Build erlang term result... */
  1699. Uint *hp;
  1700. Uint hsz;
  1701. erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  1702. if (only_one_value) {
  1703. ASSERT(length == 1);
  1704. hsz = 0;
  1705. erts_bld_uint(NULL, &hsz, *uintps[0]);
  1706. hp = hsz ? HAlloc((Process *) proc, hsz) : NULL;
  1707. res = erts_bld_uint(&hp, NULL, *uintps[0]);
  1708. }
  1709. else {
  1710. Uint **hpp = NULL;
  1711. Uint *hszp = &hsz;
  1712. hsz = 0;
  1713. while (1) {
  1714. int i;
  1715. for (i = 0; i < length; i++)
  1716. euints[i] = erts_bld_uint(hpp, hszp, *uintps[i]);
  1717. res = erts_bld_2tup_list(hpp, hszp, length, atoms, euints);
  1718. if (hpp)
  1719. break;
  1720. hp = HAlloc((Process *) proc, hsz);
  1721. hpp = &hp;
  1722. hszp = NULL;
  1723. }
  1724. }
  1725. }
  1726. return res;
  1727. #undef ERTS_MEM_NEED_ALL_ALCU
  1728. }
  1729. struct aa_values {
  1730. Uint arity;
  1731. const char *name;
  1732. Uint ui[2];
  1733. };
  1734. Eterm
  1735. erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
  1736. {
  1737. #define MAX_AA_VALUES \
  1738. (20 + (ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1))
  1739. struct aa_values values[MAX_AA_VALUES];
  1740. Eterm res = THE_NON_VALUE;
  1741. int i, length;
  1742. ErtsFixInfo efi;
  1743. Uint reserved_atom_space, atom_space;
  1744. if (proc) {
  1745. ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  1746. == erts_proc_lc_my_proc_locks(proc));
  1747. /* We'll need locks early in the lock order */
  1748. erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  1749. }
  1750. i = 0;
  1751. if (erts_instr_stat) {
  1752. values[i].arity = 2;
  1753. values[i].name = "total";
  1754. values[i].ui[0] = erts_instr_get_total();
  1755. i++;
  1756. values[i].arity = 2;
  1757. values[i].name = "maximum";
  1758. values[i].ui[0] = erts_instr_get_max_total();
  1759. i++;
  1760. }
  1761. values[i].arity = 2;
  1762. values[i].name = "sys_misc";
  1763. values[i].ui[0] = erts_sys_misc_mem_sz();
  1764. i++;
  1765. values[i].arity = 2;
  1766. values[i].name = "static";
  1767. values[i].ui[0] =
  1768. erts_max_ports*sizeof(Port) /* Port table */
  1769. + erts_timer_wheel_memory_size() /* Timer wheel */
  1770. #ifdef SYS_TMP_BUF_SIZE
  1771. + SYS_TMP_BUF_SIZE /* tmp_buf in sys on vxworks & ose */
  1772. #endif
  1773. ;
  1774. i++;
  1775. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  1776. values[i].arity = 3;
  1777. values[i].name = "atom_space";
  1778. values[i].ui[0] = reserved_atom_space;
  1779. values[i].ui[1] = atom_space;
  1780. i++;
  1781. values[i].arity = 2;
  1782. values[i].name = "atom_table";
  1783. values[i].ui[0] = atom_table_sz();
  1784. i++;
  1785. values[i].arity = 2;
  1786. values[i].name = "module_table";
  1787. values[i].ui[0] = module_table_sz();
  1788. i++;
  1789. values[i].arity = 2;
  1790. values[i].name = "export_table";
  1791. values[i].ui[0] = export_table_sz();
  1792. i++;
  1793. values[i].arity = 2;
  1794. values[i].name = "register_table";
  1795. values[i].ui[0] = process_reg_sz();
  1796. i++;
  1797. values[i].arity = 2;
  1798. values[i].name = "fun_table";
  1799. values[i].ui[0] = erts_fun_table_sz();
  1800. i++;
  1801. values[i].arity = 2;
  1802. values[i].name = "module_refs";
  1803. values[i].ui[0] = allocated_modules*sizeof(Range);
  1804. i++;
  1805. values[i].arity = 2;
  1806. values[i].name = "loaded_code";
  1807. values[i].ui[0] = erts_total_code_size;
  1808. i++;
  1809. values[i].arity = 2;
  1810. values[i].name = "dist_table";
  1811. values[i].ui[0] = erts_dist_table_size();
  1812. i++;
  1813. values[i].arity = 2;
  1814. values[i].name = "node_table";
  1815. values[i].ui[0] = erts_node_table_size();
  1816. i++;
  1817. values[i].arity = 2;
  1818. values[i].name = "bits_bufs_size";
  1819. values[i].ui[0] = erts_bits_bufs_size();
  1820. i++;
  1821. values[i].arity = 2;
  1822. values[i].name = "bif_timer";
  1823. values[i].ui[0] = erts_bif_timer_memory_size();
  1824. i++;
  1825. values[i].arity = 2;
  1826. values[i].name = "link_lh";
  1827. values[i].ui[0] = erts_tot_link_lh_size();
  1828. i++;
  1829. {
  1830. Uint n;
  1831. for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
  1832. n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
  1833. n++) {
  1834. erts_fix_info(ERTS_ALC_N2T(n), &efi);
  1835. values[i].arity = 3;
  1836. values[i].name = ERTS_ALC_N2TD(n);
  1837. values[i].ui[0] = efi.total;
  1838. values[i].ui[1] = efi.used;
  1839. i++;
  1840. }
  1841. }
  1842. length = i;
  1843. ASSERT(length <= MAX_AA_VALUES);
  1844. if (print_to_p) {
  1845. /* Print result... */
  1846. int to = *print_to_p;
  1847. void *arg = print_to_arg;
  1848. erts_print(to, arg, "=allocated_areas\n");
  1849. for (i = 0; i < length; i++) {
  1850. switch (values[i].arity) {
  1851. case 2:
  1852. erts_print(to, arg, "%s: %bpu\n",
  1853. values[i].name, values[i].ui[0]);
  1854. break;
  1855. case 3:
  1856. erts_print(to, arg, "%s: %bpu %bpu\n",
  1857. values[i].name, values[i].ui[0], values[i].ui[1]);
  1858. break;
  1859. default:
  1860. erts_print(to, arg, "ERROR: internal_error\n");
  1861. ASSERT(0);
  1862. return am_internal_error;
  1863. }
  1864. }
  1865. }
  1866. if (proc) {
  1867. /* Build erlang term result... */
  1868. Eterm tuples[MAX_AA_VALUES];
  1869. Uint *hp;
  1870. Uint **hpp;
  1871. Uint hsz;
  1872. Uint *hszp;
  1873. erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  1874. hpp = NULL;
  1875. hsz = 0;
  1876. hszp = &hsz;
  1877. while (1) {
  1878. int i;
  1879. for (i = 0; i < length; i++) {
  1880. Eterm atom;
  1881. if (hpp)
  1882. atom = am_atom_put(values[i].name,
  1883. (int) strlen(values[i].name));
  1884. else
  1885. atom = am_true;
  1886. switch (values[i].arity) {
  1887. case 2:
  1888. tuples[i] = erts_bld_tuple(hpp, hszp, 2,
  1889. atom,
  1890. erts_bld_uint(hpp, hszp,
  1891. values[i].ui[0]));
  1892. break;
  1893. case 3:
  1894. tuples[i] = erts_bld_tuple(hpp, hszp, 3,
  1895. atom,
  1896. erts_bld_uint(hpp, hszp,
  1897. values[i].ui[0]),
  1898. erts_bld_uint(hpp, hszp,
  1899. values[i].ui[1]));
  1900. break;
  1901. default:
  1902. ASSERT(0);
  1903. return am_internal_error;
  1904. }
  1905. }
  1906. res = erts_bld_list(hpp, hszp, length, tuples);
  1907. if (hpp)
  1908. break;
  1909. hp = HAlloc((Process *) proc, hsz);
  1910. hpp = &hp;
  1911. hszp = NULL;
  1912. }
  1913. }
  1914. return res;
  1915. #undef MAX_AA_VALUES
  1916. }
  1917. Eterm
  1918. erts_alloc_util_allocators(void *proc)
  1919. {
  1920. Eterm res;
  1921. Uint *hp;
  1922. Uint sz;
  1923. int i;
  1924. /*
  1925. * Currently all allocators except sys_alloc and fix_alloc are
  1926. * alloc_util allocators.
  1927. */
  1928. sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
  1929. ASSERT(sz > 0);
  1930. hp = HAlloc((Process *) proc, sz);
  1931. res = NIL;
  1932. for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
  1933. switch (i) {
  1934. case ERTS_ALC_A_SYSTEM:
  1935. case ERTS_ALC_A_FIXED_SIZE:
  1936. break;
  1937. default: {
  1938. char *alc_str = (char *) ERTS_ALC_A2AD(i);
  1939. Eterm alc = am_atom_put(alc_str, sys_strlen(alc_str));
  1940. res = CONS(hp, alc, res);
  1941. hp += 2;
  1942. break;
  1943. }
  1944. }
  1945. }
  1946. return res;
  1947. }
  1948. Eterm
  1949. erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz)
  1950. {
  1951. #define ERTS_AIT_RET(R) \
  1952. do { res = (R); goto done; } while (0)
  1953. #define ERTS_AIT_HALLOC(P, S) \
  1954. do { hp = HAlloc((P), (S)); hp_end = hp + (S); } while (0)
  1955. ErtsAlcType_t i;
  1956. Uint sz = 0;
  1957. Uint *hp = NULL;
  1958. Uint *hp_end = NULL;
  1959. Eterm res = am_undefined;
  1960. if (is_not_atom(which_alloc))
  1961. goto done;
  1962. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  1963. if (erts_is_atom_str((char *) ERTS_ALC_A2AD(i), which_alloc)) {
  1964. if (!erts_allctrs_info[i].enabled)
  1965. ERTS_AIT_RET(am_false);
  1966. else {
  1967. if (erts_allctrs_info[i].alloc_util) {
  1968. Eterm ires, tmp;
  1969. Eterm **hpp;
  1970. Uint *szp;
  1971. Eterm (*info_func)(Allctr_t *,
  1972. int,
  1973. int *,
  1974. void *,
  1975. Uint **,
  1976. Uint *);
  1977. info_func = (only_sz
  1978. ? erts_alcu_sz_info
  1979. : erts_alcu_info);
  1980. if (erts_allctrs_info[i].thr_spec) {
  1981. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[i];
  1982. int j;
  1983. int block_system = !tspec->all_thr_safe;
  1984. if (block_system) {
  1985. erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  1986. erts_smp_block_system(0);
  1987. }
  1988. ASSERT(tspec->enabled);
  1989. szp = &sz;
  1990. hpp = NULL;
  1991. while (1) {
  1992. ires = NIL;
  1993. for (j = tspec->size - 1; j >= 0; j--) {
  1994. Allctr_t *allctr = tspec->allctr[j];
  1995. if (allctr) {
  1996. tmp = erts_bld_tuple(hpp,
  1997. szp,
  1998. 3,
  1999. erts_bld_atom(hpp,
  2000. szp,
  2001. "instance"),
  2002. make_small((Uint) j),
  2003. (*info_func)(allctr,
  2004. hpp != NULL,
  2005. NULL,
  2006. NULL,
  2007. hpp,
  2008. szp));
  2009. ires = erts_bld_cons(hpp, szp, tmp, ires);
  2010. }
  2011. }
  2012. if (hpp)
  2013. break;
  2014. ERTS_AIT_HALLOC((Process *) proc, sz);
  2015. hpp = &hp;
  2016. szp = NULL;
  2017. }
  2018. if (block_system) {
  2019. erts_smp_release_system();
  2020. erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2021. }
  2022. }
  2023. else {
  2024. Allctr_t *allctr = erts_allctrs_info[i].extra;
  2025. szp = &sz;
  2026. hpp = NULL;
  2027. while (1) {
  2028. ires = NIL;
  2029. tmp = erts_bld_tuple(hpp,
  2030. szp,
  2031. 3,
  2032. erts_bld_atom(hpp,
  2033. szp,
  2034. "instance"),
  2035. make_small((Uint) 0),
  2036. (*info_func)(allctr,
  2037. hpp != NULL,
  2038. NULL,
  2039. NULL,
  2040. hpp,
  2041. szp));
  2042. ires = erts_bld_cons(hpp, szp, tmp, ires);
  2043. if (hpp)
  2044. break;
  2045. ERTS_AIT_HALLOC((Process *) proc, sz);
  2046. hpp = &hp;
  2047. szp = NULL;
  2048. }
  2049. }
  2050. ERTS_AIT_RET(ires);
  2051. }
  2052. else {
  2053. Eterm *szp, **hpp;
  2054. switch (i) {
  2055. case ERTS_ALC_A_SYSTEM: {
  2056. SysAllocStat sas;
  2057. Eterm opts_am;
  2058. Eterm opts;
  2059. Eterm as[4]; /* Ok even if !HEAP_ON_C_STACK, not really heap data on stack */
  2060. Eterm ts[4]; /* Ok even if !HEAP_ON_C_STACK, not really heap data on stack */
  2061. int l;
  2062. if (only_sz)
  2063. ERTS_AIT_RET(NIL);
  2064. sys_alloc_stat(&sas);
  2065. opts_am = am_atom_put("options", 7);
  2066. szp = &sz;
  2067. hpp = NULL;
  2068. restart_sys_alloc:
  2069. l = 0;
  2070. as[l] = am_atom_put("e", 1);
  2071. ts[l++] = am_true;
  2072. #ifdef ELIB_ALLOC_IS_CLIB
  2073. as[l] = am_atom_put("m", 1);
  2074. ts[l++] = am_atom_put("elib", 4);
  2075. #else
  2076. as[l] = am_atom_put("m", 1);
  2077. ts[l++] = am_atom_put("libc", 4);
  2078. #endif
  2079. if(sas.trim_threshold >= 0) {
  2080. as[l] = am_atom_put("tt", 2);
  2081. ts[l++] = erts_bld_uint(hpp, szp,
  2082. (Uint) sas.trim_threshold);
  2083. }
  2084. if(sas.top_pad >= 0) {
  2085. as[l] = am_atom_put("tp", 2);
  2086. ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
  2087. }
  2088. opts = erts_bld_2tup_list(hpp, szp, l, as, ts);
  2089. res = erts_bld_2tup_list(hpp, szp, 1, &opts_am, &opts);
  2090. if (szp) {
  2091. ERTS_AIT_HALLOC((Process *) proc, sz);
  2092. szp = NULL;
  2093. hpp = &hp;
  2094. goto restart_sys_alloc;
  2095. }
  2096. ERTS_AIT_RET(res);
  2097. }
  2098. case ERTS_ALC_A_FIXED_SIZE: {
  2099. ErtsAlcType_t n;
  2100. Eterm as[2], vs[2];
  2101. if (only_sz)
  2102. ERTS_AIT_RET(NIL);
  2103. as[0] = am_atom_put("options", 7);
  2104. as[1] = am_atom_put("pools", 5);
  2105. szp = &sz;
  2106. hpp = NULL;
  2107. restart_fix_alloc:
  2108. vs[0] = erts_bld_cons(hpp, szp,
  2109. erts_bld_tuple(hpp, szp, 2,
  2110. am_atom_put("e",
  2111. 1),
  2112. am_true),
  2113. NIL);
  2114. vs[1] = NIL;
  2115. for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
  2116. n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
  2117. n++) {
  2118. ErtsFixInfo efi;
  2119. erts_fix_info(ERTS_ALC_N2T(n), &efi);
  2120. vs[1] = erts_bld_cons(
  2121. hpp, szp,
  2122. erts_bld_tuple(
  2123. hpp, szp, 3,
  2124. am_atom_put((char *) ERTS_ALC_N2TD(n),
  2125. strlen(ERTS_ALC_N2TD(n))),
  2126. erts_bld_uint(hpp, szp, efi.total),
  2127. erts_bld_uint(hpp, szp, efi.used)),
  2128. vs[1]);
  2129. }
  2130. res = erts_bld_2tup_list(hpp, szp, 2, as, vs);
  2131. if (szp) {
  2132. ERTS_AIT_HALLOC((Process *) proc, sz);
  2133. szp = NULL;
  2134. hpp = &hp;
  2135. goto restart_fix_alloc;
  2136. }
  2137. ERTS_AIT_RET(res);
  2138. }
  2139. default:
  2140. ASSERT(0);
  2141. goto done;
  2142. }
  2143. }
  2144. }
  2145. }
  2146. }
  2147. if (ERTS_IS_ATOM_STR("mseg_alloc", which_alloc)) {
  2148. #if HAVE_ERTS_MSEG
  2149. if (only_sz)
  2150. ERTS_AIT_RET(NIL);
  2151. erts_mseg_info(NULL, NULL, 0, NULL, &sz);
  2152. if (sz)
  2153. ERTS_AIT_HALLOC((Process *) proc, sz);
  2154. ERTS_AIT_RET(erts_mseg_info(NULL, NULL, 1, &hp, NULL));
  2155. #else
  2156. ERTS_AIT_RET(am_false);
  2157. #endif
  2158. }
  2159. else if (ERTS_IS_ATOM_STR("alloc_util", which_alloc)) {
  2160. if (only_sz)
  2161. ERTS_AIT_RET(NIL);
  2162. erts_alcu_au_info_options(NULL, NULL, NULL, &sz);
  2163. if (sz)
  2164. ERTS_AIT_HALLOC((Process *) proc, sz);
  2165. ERTS_AIT_RET(erts_alcu_au_info_options(NULL, NULL, &hp, NULL));
  2166. }
  2167. done:
  2168. if (hp) {
  2169. ASSERT(hp_end >= hp);
  2170. HRelease((Process *) proc, hp_end, hp);
  2171. }
  2172. return res;
  2173. #undef ERTS_AIT_RET
  2174. #undef ERTS_AIT_HALLOC
  2175. }
  2176. void
  2177. erts_allocator_info(int to, void *arg)
  2178. {
  2179. ErtsAlcType_t a;
  2180. ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0)
  2181. || (ERTS_IS_CRASH_DUMPING
  2182. && erts_smp_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
  2183. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2184. int ai;
  2185. for (ai = 0; ai == 0 || ai < erts_allctrs_info[a].thr_spec; ai++) {
  2186. if (erts_allctrs_info[a].thr_spec) {
  2187. if (!erts_allctr_thr_spec[a].allctr[ai])
  2188. continue;
  2189. erts_print(to, arg, "=allocator:%s[%d]\n",
  2190. ERTS_ALC_A2AD(a), ai);
  2191. }
  2192. else {
  2193. erts_print(to, arg, "=allocator:%s\n", ERTS_ALC_A2AD(a));
  2194. }
  2195. if (!erts_allctrs_info[a].enabled)
  2196. erts_print(to, arg, "option e: false\n");
  2197. else {
  2198. if (erts_allctrs_info[a].alloc_util) {
  2199. void *as;
  2200. if (!erts_allctrs_info[a].thr_spec)
  2201. as = erts_allctrs_info[a].extra;
  2202. else {
  2203. ASSERT(erts_allctr_thr_spec[a].enabled);
  2204. as = erts_allctr_thr_spec[a].allctr[ai];
  2205. }
  2206. /* Binary alloc has its own thread safety... */
  2207. erts_alcu_info(as, 0, &to, arg, NULL, NULL);
  2208. }
  2209. else {
  2210. switch (a) {
  2211. case ERTS_ALC_A_SYSTEM: {
  2212. SysAllocStat sas;
  2213. erts_print(to, arg, "option e: true\n");
  2214. #ifdef ELIB_ALLOC_IS_CLIB
  2215. erts_print(to, arg, "option m: elib\n");
  2216. #else
  2217. erts_print(to, arg, "option m: libc\n");
  2218. #endif
  2219. sys_alloc_stat(&sas);
  2220. if(sas.trim_threshold >= 0)
  2221. erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
  2222. if(sas.top_pad >= 0)
  2223. erts_print(to, arg, "option tp: %d\n", sas.top_pad);
  2224. break;
  2225. }
  2226. case ERTS_ALC_A_FIXED_SIZE: {
  2227. ErtsAlcType_t n;
  2228. erts_print(to, arg, "option e: true\n");
  2229. for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
  2230. n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
  2231. n++) {
  2232. ErtsFixInfo efi;
  2233. erts_fix_info(ERTS_ALC_N2T(n), &efi);
  2234. erts_print(to, arg, "%s: %lu %lu\n",
  2235. ERTS_ALC_N2TD(n),
  2236. efi.total,
  2237. efi.used);
  2238. }
  2239. break;
  2240. }
  2241. default:
  2242. ASSERT(0);
  2243. break;
  2244. }
  2245. }
  2246. }
  2247. }
  2248. }
  2249. #if HAVE_ERTS_MSEG
  2250. erts_print(to, arg, "=allocator:mseg_alloc\n");
  2251. erts_mseg_info(&to, arg, 0, NULL, NULL);
  2252. #endif
  2253. erts_print(to, arg, "=allocator:alloc_util\n");
  2254. erts_alcu_au_info_options(&to, arg, NULL, NULL);
  2255. erts_print(to, arg, "=allocator:instr\n");
  2256. erts_print(to, arg, "option m: %s\n",
  2257. erts_instr_memory_map ? "true" : "false");
  2258. erts_print(to, arg, "option s: %s\n",
  2259. erts_instr_stat ? "true" : "false");
  2260. erts_print(to, arg, "option t: %s\n",
  2261. erts_mtrace_enabled ? "true" : "false");
  2262. }
  2263. Eterm
  2264. erts_allocator_options(void *proc)
  2265. {
  2266. #if HAVE_ERTS_MSEG
  2267. int use_mseg = 0;
  2268. #endif
  2269. Uint sz, *szp, *hp, **hpp;
  2270. Eterm res, features, settings;
  2271. Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5];
  2272. Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5];
  2273. int a, length;
  2274. SysAllocStat sas;
  2275. Uint *endp = NULL;
  2276. sys_alloc_stat(&sas);
  2277. /* First find out the heap size needed ... */
  2278. hpp = NULL;
  2279. szp = &sz;
  2280. sz = 0;
  2281. bld_term:
  2282. length = 0;
  2283. features = NIL;
  2284. settings = NIL;
  2285. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2286. Eterm tmp = NIL;
  2287. atoms[length] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2288. strlen(ERTS_ALC_A2AD(a)));
  2289. if (erts_allctrs_info[a].enabled) {
  2290. if (erts_allctrs_info[a].alloc_util) {
  2291. Allctr_t *allctr;
  2292. #if HAVE_ERTS_MSEG
  2293. use_mseg++;
  2294. #endif
  2295. if (erts_allctr_thr_spec[a].enabled)
  2296. allctr = erts_allctr_thr_spec[a].allctr[1];
  2297. else
  2298. allctr = erts_allctrs_info[a].extra;
  2299. tmp = erts_alcu_info_options(allctr, NULL, NULL, hpp, szp);
  2300. }
  2301. else {
  2302. int l = 0;
  2303. Eterm as[4];
  2304. Eterm ts[4];
  2305. as[l] = am_atom_put("e", 1);
  2306. ts[l++] = am_true;
  2307. switch (a) {
  2308. case ERTS_ALC_A_SYSTEM:
  2309. #ifdef ELIB_ALLOC_IS_CLIB
  2310. as[l] = am_atom_put("m", 1);
  2311. ts[l++] = am_atom_put("elib", 4);
  2312. #else
  2313. as[l] = am_atom_put("m", 1);
  2314. ts[l++] = am_atom_put("libc", 4);
  2315. #endif
  2316. if(sas.trim_threshold >= 0) {
  2317. as[l] = am_atom_put("tt", 2);
  2318. ts[l++] = erts_bld_uint(hpp, szp,
  2319. (Uint) sas.trim_threshold);
  2320. }
  2321. if(sas.top_pad >= 0) {
  2322. as[l] = am_atom_put("tp", 2);
  2323. ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
  2324. }
  2325. break;
  2326. default:
  2327. break;
  2328. }
  2329. tmp = erts_bld_2tup_list(hpp, szp, l, as, ts);
  2330. }
  2331. }
  2332. else {
  2333. Eterm atom = am_atom_put("e", 1);
  2334. Eterm term = am_false;
  2335. tmp = erts_bld_2tup_list(hpp, szp, 1, &atom, &term);
  2336. }
  2337. terms[length++] = tmp;
  2338. }
  2339. #if HAVE_ERTS_MSEG
  2340. if (use_mseg) {
  2341. atoms[length] = am_atom_put("mseg_alloc", 10);
  2342. terms[length++] = erts_mseg_info_options(NULL, NULL, hpp, szp);
  2343. }
  2344. #endif
  2345. atoms[length] = am_atom_put("alloc_util", 10);
  2346. terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
  2347. {
  2348. Eterm o[3], v[3];
  2349. o[0] = am_atom_put("m", 1);
  2350. v[0] = erts_instr_memory_map ? am_true : am_false;
  2351. o[1] = am_atom_put("s", 1);
  2352. v[1] = erts_instr_stat ? am_true : am_false;
  2353. o[2] = am_atom_put("t", 1);
  2354. v[2] = erts_mtrace_enabled ? am_true : am_false;
  2355. atoms[length] = am_atom_put("instr", 5);
  2356. terms[length++] = erts_bld_2tup_list(hpp, szp, 3, o, v);
  2357. }
  2358. settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
  2359. length = 0;
  2360. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2361. if (erts_allctrs_info[a].enabled) {
  2362. terms[length++] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2363. strlen(ERTS_ALC_A2AD(a)));
  2364. }
  2365. }
  2366. #if HAVE_ERTS_MSEG
  2367. if (use_mseg)
  2368. terms[length++] = am_atom_put("mseg_alloc", 10);
  2369. #endif
  2370. features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
  2371. #if defined(ELIB_ALLOC_IS_CLIB)
  2372. {
  2373. Eterm version;
  2374. int i;
  2375. int ver[5];
  2376. i = sscanf(ERLANG_VERSION,
  2377. "%d.%d.%d.%d.%d",
  2378. &ver[0], &ver[1], &ver[2], &ver[3], &ver[4]);
  2379. version = NIL;
  2380. for(i--; i >= 0; i--)
  2381. version = erts_bld_cons(hpp, szp, make_small(ver[i]), version);
  2382. res = erts_bld_tuple(hpp, szp, 4,
  2383. am_elib_malloc, version, features, settings);
  2384. }
  2385. #elif defined(__GLIBC__)
  2386. {
  2387. Eterm AM_glibc = am_atom_put("glibc", 5);
  2388. Eterm version;
  2389. version = erts_bld_cons(hpp,
  2390. szp,
  2391. make_small(__GLIBC__),
  2392. #ifdef __GLIBC_MINOR__
  2393. erts_bld_cons(hpp,
  2394. szp,
  2395. make_small(__GLIBC_MINOR__),
  2396. NIL)
  2397. #else
  2398. NIL
  2399. #endif
  2400. );
  2401. res = erts_bld_tuple(hpp, szp, 4,
  2402. AM_glibc, version, features, settings);
  2403. }
  2404. #else /* unknown allocator */
  2405. res = erts_bld_tuple(hpp, szp, 4,
  2406. am_undefined, NIL, features, settings);
  2407. #endif
  2408. if (szp) {
  2409. /* ... and then build the term */
  2410. hp = HAlloc((Process *) proc, sz);
  2411. endp = hp + sz;
  2412. hpp = &hp;
  2413. szp = NULL;
  2414. goto bld_term;
  2415. }
  2416. ASSERT(endp >= hp);
  2417. HRelease((Process *) proc, endp, hp);
  2418. return res;
  2419. }
  2420. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  2421. * Deprecated functions *
  2422. * *
  2423. * These functions are still defined since "non-OTP linked in drivers" may *
  2424. * contain (illegal) calls to them. *
  2425. \* */
  2426. /* --- DO *NOT* USE THESE FUNCTIONS --- */
  2427. void *sys_alloc(Uint sz)
  2428. { return erts_alloc_fnf(ERTS_ALC_T_UNDEF, sz); }
  2429. void *sys_realloc(void *ptr, Uint sz)
  2430. { return erts_realloc_fnf(ERTS_ALC_T_UNDEF, ptr, sz); }
  2431. void sys_free(void *ptr)
  2432. { erts_free(ERTS_ALC_T_UNDEF, ptr); }
  2433. void *safe_alloc(Uint sz)
  2434. { return erts_alloc(ERTS_ALC_T_UNDEF, sz); }
  2435. void *safe_realloc(void *ptr, Uint sz)
  2436. { return erts_realloc(ERTS_ALC_T_UNDEF, ptr, sz); }
  2437. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  2438. * NOTE: erts_alc_test() is only supposed to be used for testing. *
  2439. * *
  2440. * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
  2441. * to erts_alc_test() *
  2442. \* */
  2443. #define ERTS_ALC_TEST_ABORT erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
  2444. unsigned long erts_alc_test(unsigned long op,
  2445. unsigned long a1,
  2446. unsigned long a2,
  2447. unsigned long a3)
  2448. {
  2449. switch (op >> 8) {
  2450. case 0x0: return erts_alcu_test(op, a1, a2);
  2451. case 0x1: return erts_gfalc_test(op, a1, a2);
  2452. case 0x2: return erts_bfalc_test(op, a1, a2);
  2453. case 0x3: return erts_afalc_test(op, a1, a2);
  2454. case 0x4: return erts_mseg_test(op, a1, a2, a3);
  2455. case 0xf:
  2456. switch (op) {
  2457. case 0xf00:
  2458. #ifdef USE_THREADS
  2459. if (((Allctr_t *) a1)->thread_safe)
  2460. return (unsigned long) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF,
  2461. (void *) a1,
  2462. (Uint) a2);
  2463. else
  2464. #endif
  2465. return (unsigned long) erts_alcu_alloc(ERTS_ALC_T_UNDEF,
  2466. (void *) a1,
  2467. (Uint) a2);
  2468. case 0xf01:
  2469. #ifdef USE_THREADS
  2470. if (((Allctr_t *) a1)->thread_safe)
  2471. return (unsigned long) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF,
  2472. (void *) a1,
  2473. (void *) a2,
  2474. (Uint) a3);
  2475. else
  2476. #endif
  2477. return (unsigned long) erts_alcu_realloc(ERTS_ALC_T_UNDEF,
  2478. (void *) a1,
  2479. (void *) a2,
  2480. (Uint) a3);
  2481. case 0xf02:
  2482. #ifdef USE_THREADS
  2483. if (((Allctr_t *) a1)->thread_safe)
  2484. erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
  2485. else
  2486. #endif
  2487. erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
  2488. return 0;
  2489. case 0xf03: {
  2490. Allctr_t *allctr;
  2491. struct au_init init;
  2492. SET_DEFAULT_ALLOC_OPTS(&init);
  2493. init.enable = 1;
  2494. init.atype = GOODFIT;
  2495. init.init.util.name_prefix = (char *) a1;
  2496. init.init.util.ts = a2 ? 1 : 0;
  2497. if ((char **) a3) {
  2498. char **argv = (char **) a3;
  2499. int i = 0;
  2500. while (argv[i]) {
  2501. if (argv[i][0] == '-' && argv[i][1] == 't')
  2502. handle_au_arg(&init, &argv[i][2], argv, &i);
  2503. else
  2504. return (unsigned long) NULL;
  2505. i++;
  2506. }
  2507. }
  2508. switch (init.atype) {
  2509. case GOODFIT:
  2510. allctr = erts_gfalc_start((GFAllctr_t *)
  2511. erts_alloc(ERTS_ALC_T_UNDEF,
  2512. sizeof(GFAllctr_t)),
  2513. &init.init.gf,
  2514. &init.init.util);
  2515. break;
  2516. case BESTFIT:
  2517. allctr = erts_bfalc_start((BFAllctr_t *)
  2518. erts_alloc(ERTS_ALC_T_UNDEF,
  2519. sizeof(BFAllctr_t)),
  2520. &init.init.bf,
  2521. &init.init.util);
  2522. break;
  2523. case AFIT:
  2524. allctr = erts_afalc_start((AFAllctr_t *)
  2525. erts_alloc(ERTS_ALC_T_UNDEF,
  2526. sizeof(AFAllctr_t)),
  2527. &init.init.af,
  2528. &init.init.util);
  2529. break;
  2530. default:
  2531. ASSERT(0);
  2532. allctr = NULL;
  2533. break;
  2534. }
  2535. return (unsigned long) allctr;
  2536. }
  2537. case 0xf04:
  2538. erts_alcu_stop((Allctr_t *) a1);
  2539. erts_free(ERTS_ALC_T_UNDEF, (void *) a1);
  2540. break;
  2541. #ifdef USE_THREADS
  2542. case 0xf05: return (unsigned long) 1;
  2543. case 0xf06: return (unsigned long) ((Allctr_t *) a1)->thread_safe;
  2544. #ifdef ETHR_NO_FORKSAFETY
  2545. case 0xf07: return (unsigned long) 0;
  2546. #else
  2547. case 0xf07: return (unsigned long) ((Allctr_t *) a1)->thread_safe;
  2548. #endif
  2549. case 0xf08: {
  2550. ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_mutex));
  2551. if (ethr_mutex_init(mtx) != 0)
  2552. ERTS_ALC_TEST_ABORT;
  2553. return (unsigned long) mtx;
  2554. }
  2555. case 0xf09: {
  2556. ethr_mutex *mtx = (ethr_mutex *) a1;
  2557. if (ethr_mutex_destroy(mtx) != 0)
  2558. ERTS_ALC_TEST_ABORT;
  2559. erts_free(ERTS_ALC_T_UNDEF, (void *) mtx);
  2560. break;
  2561. }
  2562. case 0xf0a:
  2563. if (ethr_mutex_lock((ethr_mutex *) a1) != 0)
  2564. ERTS_ALC_TEST_ABORT;
  2565. break;
  2566. case 0xf0b:
  2567. if (ethr_mutex_unlock((ethr_mutex *) a1) != 0)
  2568. ERTS_ALC_TEST_ABORT;
  2569. break;
  2570. case 0xf0c: {
  2571. ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
  2572. if (ethr_cond_init(cnd) != 0)
  2573. ERTS_ALC_TEST_ABORT;
  2574. return (unsigned long) cnd;
  2575. }
  2576. case 0xf0d: {
  2577. ethr_cond *cnd = (ethr_cond *) a1;
  2578. if (ethr_cond_destroy(cnd) != 0)
  2579. ERTS_ALC_TEST_ABORT;
  2580. erts_free(ERTS_ALC_T_UNDEF, (void *) cnd);
  2581. break;
  2582. }
  2583. case 0xf0e:
  2584. if (ethr_cond_broadcast((ethr_cond *) a1) != 0)
  2585. ERTS_ALC_TEST_ABORT;
  2586. break;
  2587. case 0xf0f: {
  2588. int res;
  2589. do {
  2590. res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
  2591. } while (res == EINTR);
  2592. if (res != 0)
  2593. ERTS_ALC_TEST_ABORT;
  2594. break;
  2595. }
  2596. case 0xf10: {
  2597. ethr_tid *tid = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_tid));
  2598. if (ethr_thr_create(tid,
  2599. (void * (*)(void *)) a1,
  2600. (void *) a2,
  2601. NULL) != 0)
  2602. ERTS_ALC_TEST_ABORT;
  2603. return (unsigned long) tid;
  2604. }
  2605. case 0xf11: {
  2606. ethr_tid *tid = (ethr_tid *) a1;
  2607. if (ethr_thr_join(*tid, NULL) != 0)
  2608. ERTS_ALC_TEST_ABORT;
  2609. erts_free(ERTS_ALC_T_UNDEF, (void *) tid);
  2610. break;
  2611. }
  2612. case 0xf12:
  2613. ethr_thr_exit((void *) a1);
  2614. ERTS_ALC_TEST_ABORT;
  2615. break;
  2616. #endif /* #ifdef USE_THREADS */
  2617. default:
  2618. break;
  2619. }
  2620. return (unsigned long) 0;
  2621. default:
  2622. break;
  2623. }
  2624. ASSERT(0);
  2625. return ~((unsigned long) 0);
  2626. }
  2627. #ifdef DEBUG
  2628. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  2629. * Debug stuff *
  2630. \* */
  2631. #if 0
  2632. #define PRINT_OPS
  2633. #else
  2634. #undef PRINT_OPS
  2635. #endif
  2636. #define FENCE_SZ (3*sizeof(UWord))
  2637. #if defined(ARCH_64)
  2638. #define FENCE_PATTERN 0xABCDEF97ABCDEF97
  2639. #else
  2640. #define FENCE_PATTERN 0xABCDEF97
  2641. #endif
  2642. #define TYPE_PATTERN_MASK ERTS_ALC_N_MASK
  2643. #define TYPE_PATTERN_SHIFT 16
  2644. #define FIXED_FENCE_PATTERN_MASK \
  2645. (~((UWord) (TYPE_PATTERN_MASK << TYPE_PATTERN_SHIFT)))
  2646. #define FIXED_FENCE_PATTERN \
  2647. (FENCE_PATTERN & FIXED_FENCE_PATTERN_MASK)
  2648. #define MK_PATTERN(T) \
  2649. (FIXED_FENCE_PATTERN | (((T) & TYPE_PATTERN_MASK) << TYPE_PATTERN_SHIFT))
  2650. #define GET_TYPE_OF_PATTERN(P) \
  2651. (((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
  2652. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  2653. static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
  2654. void check_allocated_block( Uint type, void *blk)
  2655. {
  2656. Uint dummy;
  2657. check_memory_fence(blk, &dummy, ERTS_ALC_T2N(type), ERTS_ALC_O_FREE);
  2658. }
  2659. void check_allocators(void)
  2660. {
  2661. int i;
  2662. if (!erts_initialized)
  2663. return;
  2664. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; ++i) {
  2665. if (erts_allctrs_info[i].alloc_util) {
  2666. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
  2667. Allctr_t *allctr = real_af->extra;
  2668. Carrier_t *ct;
  2669. #ifdef USE_THREADS
  2670. if (allctr->thread_safe)
  2671. erts_mtx_lock(&allctr->mutex);
  2672. #endif
  2673. if (allctr->check_mbc) {
  2674. for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
  2675. fprintf(stderr,"Checking allocator %d\r\n",i);
  2676. allctr->check_mbc(allctr,ct);
  2677. }
  2678. }
  2679. #ifdef USE_THREADS
  2680. if (allctr->thread_safe)
  2681. erts_mtx_unlock(&allctr->mutex);
  2682. #endif
  2683. }
  2684. }
  2685. }
  2686. #endif
  2687. static void *
  2688. set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
  2689. {
  2690. UWord *ui_ptr;
  2691. UWord pattern;
  2692. if (!ptr)
  2693. return NULL;
  2694. ui_ptr = (UWord *) ptr;
  2695. pattern = MK_PATTERN(n);
  2696. *(ui_ptr++) = sz;
  2697. *(ui_ptr++) = pattern;
  2698. memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
  2699. return (void *) ui_ptr;
  2700. }
  2701. static void *
  2702. check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
  2703. {
  2704. Uint sz;
  2705. Uint found_type;
  2706. UWord pre_pattern;
  2707. UWord post_pattern;
  2708. UWord *ui_ptr;
  2709. if (!ptr)
  2710. return NULL;
  2711. ui_ptr = (UWord *) ptr;
  2712. pre_pattern = *(--ui_ptr);
  2713. *size = sz = *(--ui_ptr);
  2714. found_type = GET_TYPE_OF_PATTERN(pre_pattern);
  2715. if (pre_pattern != MK_PATTERN(n)) {
  2716. if ((FIXED_FENCE_PATTERN_MASK & pre_pattern) != FIXED_FENCE_PATTERN)
  2717. erl_exit(ERTS_ABORT_EXIT,
  2718. "ERROR: Fence at beginning of memory block (p=0x%u) "
  2719. "clobbered.\n",
  2720. (unsigned long) ptr);
  2721. }
  2722. memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
  2723. if (post_pattern != MK_PATTERN(n)
  2724. || pre_pattern != post_pattern) {
  2725. char fbuf[10];
  2726. char obuf[10];
  2727. char *ftype;
  2728. char *otype;
  2729. char *op_str;
  2730. if ((FIXED_FENCE_PATTERN_MASK & post_pattern) != FIXED_FENCE_PATTERN)
  2731. erl_exit(ERTS_ABORT_EXIT,
  2732. "ERROR: Fence at end of memory block (p=0x%u, sz=%u) "
  2733. "clobbered.\n",
  2734. (unsigned long) ptr, (unsigned long) sz);
  2735. if (found_type != GET_TYPE_OF_PATTERN(post_pattern))
  2736. erl_exit(ERTS_ABORT_EXIT,
  2737. "ERROR: Fence around memory block (p=0x%u, sz=%u) "
  2738. "clobbered.\n",
  2739. (unsigned long) ptr, (unsigned long) sz);
  2740. ftype = type_no_str(found_type);
  2741. if (!ftype) {
  2742. sprintf(fbuf, "%d", (int) found_type);
  2743. ftype = fbuf;
  2744. }
  2745. otype = type_no_str(n);
  2746. if (!otype) {
  2747. sprintf(obuf, "%d", (int) n);
  2748. otype = obuf;
  2749. }
  2750. switch (func) {
  2751. case ERTS_ALC_O_ALLOC: op_str = "allocated"; break;
  2752. case ERTS_ALC_O_REALLOC: op_str = "reallocated"; break;
  2753. case ERTS_ALC_O_FREE: op_str = "freed"; break;
  2754. default: op_str = "???"; break;
  2755. }
  2756. erl_exit(ERTS_ABORT_EXIT,
  2757. "ERROR: Memory block (p=0x%u, sz=%u) allocated as type \"%s\","
  2758. " but %s as type \"%s\".\n",
  2759. (unsigned long) ptr, (unsigned long) sz, ftype, op_str, otype);
  2760. }
  2761. return (void *) ui_ptr;
  2762. }
  2763. static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
  2764. static void *
  2765. debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
  2766. {
  2767. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  2768. Uint dsize;
  2769. void *res;
  2770. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  2771. dsize = size + FENCE_SZ;
  2772. res = (*real_af->alloc)(n, real_af->extra, dsize);
  2773. res = set_memory_fence(res, size, n);
  2774. #ifdef PRINT_OPS
  2775. fprintf(stderr, "0x%lx = alloc(%s, %lu)\r\n",
  2776. (Uint) res, ERTS_ALC_N2TD(n), size);
  2777. #endif
  2778. return res;
  2779. }
  2780. static void *
  2781. debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
  2782. {
  2783. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  2784. Uint dsize;
  2785. Uint old_size;
  2786. void *dptr;
  2787. void *res;
  2788. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  2789. dsize = size + FENCE_SZ;
  2790. dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
  2791. if (old_size > size)
  2792. sys_memset((void *) (((char *) ptr) + size),
  2793. 0xf,
  2794. sizeof(Uint) + old_size - size);
  2795. res = (*real_af->realloc)(n, real_af->extra, dptr, dsize);
  2796. res = set_memory_fence(res, size, n);
  2797. #ifdef PRINT_OPS
  2798. fprintf(stderr, "0x%lx = realloc(%s, 0x%lx, %lu)\r\n",
  2799. (Uint) res, ERTS_ALC_N2TD(n), (Uint) ptr, size);
  2800. #endif
  2801. return res;
  2802. }
  2803. static void
  2804. debug_free(ErtsAlcType_t n, void *extra, void *ptr)
  2805. {
  2806. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  2807. void *dptr;
  2808. Uint size;
  2809. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  2810. dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
  2811. sys_memset((void *) dptr, n, size + FENCE_SZ);
  2812. (*real_af->free)(n, real_af->extra, dptr);
  2813. #ifdef PRINT_OPS
  2814. fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
  2815. #endif
  2816. }
  2817. static Uint
  2818. install_debug_functions(void)
  2819. {
  2820. int i;
  2821. ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
  2822. sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
  2823. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  2824. erts_allctrs[i].alloc = debug_alloc;
  2825. erts_allctrs[i].realloc = debug_realloc;
  2826. erts_allctrs[i].free = debug_free;
  2827. erts_allctrs[i].extra = (void *) &real_allctrs[i];
  2828. }
  2829. return FENCE_SZ;
  2830. }
  2831. #endif /* #ifdef DEBUG */