PageRenderTime 46ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 0ms

/erts/emulator/beam/erl_alloc.c

https://github.com/vladdu/otp
C | 4095 lines | 3523 code | 456 blank | 116 comment | 604 complexity | 4edcef43d23026d317f54903ef6bc3a1 MD5 | raw file
Possible License(s): BSD-3-Clause, Apache-2.0, Unlicense
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2002-2020. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. /*
  21. * Description: Management of memory allocators.
  22. *
  23. * Author: Rickard Green
  24. */
  25. #ifdef HAVE_CONFIG_H
  26. # include "config.h"
  27. #endif
  28. #define ERTS_ALLOC_C__
  29. #define ERTS_ALC_INTERNAL__
  30. #define ERTS_WANT_MEM_MAPPERS
  31. #include "sys.h"
  32. #define ERL_THREADS_EMU_INTERNAL__
  33. #include "erl_threads.h"
  34. #include "global.h"
  35. #include "erl_db.h"
  36. #include "erl_binary.h"
  37. #include "erl_bits.h"
  38. #include "erl_mtrace.h"
  39. #include "erl_mseg.h"
  40. #include "erl_monitor_link.h"
  41. #include "erl_hl_timer.h"
  42. #include "erl_cpu_topology.h"
  43. #include "erl_thr_queue.h"
  44. #include "erl_nfunc_sched.h"
  45. #if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
  46. #include "erl_check_io.h"
  47. #endif
  48. #include "erl_bif_unique.h"
  49. #define GET_ERL_GF_ALLOC_IMPL
  50. #include "erl_goodfit_alloc.h"
  51. #define GET_ERL_BF_ALLOC_IMPL
  52. #include "erl_bestfit_alloc.h"
  53. #define GET_ERL_AF_ALLOC_IMPL
  54. #include "erl_afit_alloc.h"
  55. #define GET_ERL_AOFF_ALLOC_IMPL
  56. #include "erl_ao_firstfit_alloc.h"
  57. #if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_AU_MAX_PREF_ALLOC_INSTANCES
  58. # error "Too many schedulers; cannot create that many pref alloc instances"
  59. #endif
  60. #define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS
  61. #if defined(SMALL_MEMORY) || defined(VALGRIND) || defined(ADDRESS_SANITIZER)
  62. #define AU_ALLOC_DEFAULT_ENABLE(X) 0
  63. #else
  64. #define AU_ALLOC_DEFAULT_ENABLE(X) (X)
  65. #endif
  66. #define ERTS_ALC_DEFAULT_ENABLED_ACUL 60
  67. #define ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC 45
  68. #define ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC 85
  69. #define ERTS_ALC_DEFAULT_ACUL ERTS_ALC_DEFAULT_ENABLED_ACUL
  70. #define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
  71. #define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
  72. #ifdef DEBUG
  73. static Uint install_debug_functions(void);
  74. #if 0
  75. #define HARD_DEBUG
  76. #ifdef __GNUC__
  77. #warning "* * * * * * * * * * * * * *"
  78. #warning "* HARD DEBUG IS ENABLED! *"
  79. #warning "* * * * * * * * * * * * * *"
  80. #endif
  81. #endif
  82. #endif
  83. static int lock_all_physical_memory = 0;
  84. ErtsAllocatorFunctions_t ERTS_WRITE_UNLIKELY(erts_allctrs[ERTS_ALC_A_MAX+1]);
  85. ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
  86. ErtsAllocatorThrSpec_t ERTS_WRITE_UNLIKELY(erts_allctr_thr_spec[ERTS_ALC_A_MAX+1]);
  87. #define ERTS_MIN(A, B) ((A) < (B) ? (A) : (B))
  88. #define ERTS_MAX(A, B) ((A) > (B) ? (A) : (B))
  89. typedef union {
  90. GFAllctr_t gfa;
  91. char align_gfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(GFAllctr_t))];
  92. BFAllctr_t bfa;
  93. char align_bfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(BFAllctr_t))];
  94. AFAllctr_t afa;
  95. char align_afa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AFAllctr_t))];
  96. AOFFAllctr_t aoffa;
  97. char align_aoffa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AOFFAllctr_t))];
  98. } ErtsAllocatorState_t erts_align_attribute(ERTS_CACHE_LINE_SIZE);
  99. static ErtsAllocatorState_t std_alloc_state;
  100. static ErtsAllocatorState_t ll_alloc_state;
  101. static ErtsAllocatorState_t sl_alloc_state;
  102. static ErtsAllocatorState_t temp_alloc_state;
  103. static ErtsAllocatorState_t eheap_alloc_state;
  104. static ErtsAllocatorState_t binary_alloc_state;
  105. static ErtsAllocatorState_t ets_alloc_state;
  106. static ErtsAllocatorState_t driver_alloc_state;
  107. static ErtsAllocatorState_t fix_alloc_state;
  108. static ErtsAllocatorState_t literal_alloc_state;
  109. static ErtsAllocatorState_t test_alloc_state;
  110. enum {
  111. ERTS_ALC_INFO_A_ALLOC_UTIL = ERTS_ALC_A_MAX + 1,
  112. ERTS_ALC_INFO_A_MSEG_ALLOC,
  113. ERTS_ALC_INFO_A_ERTS_MMAP,
  114. ERTS_ALC_INFO_A_END
  115. };
  116. typedef struct {
  117. erts_atomic32_t refc;
  118. int only_sz;
  119. int internal;
  120. Uint req_sched;
  121. Process *proc;
  122. ErtsIRefStorage iref;
  123. int allocs[ERTS_ALC_INFO_A_END - ERTS_ALC_A_MIN + 1];
  124. } ErtsAllocInfoReq;
  125. ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
  126. ErtsAllocInfoReq,
  127. 5,
  128. ERTS_ALC_T_AINFO_REQ)
  129. ErtsAlcType_t erts_fix_core_allocator_ix;
  130. struct au_init {
  131. int enable;
  132. int thr_spec;
  133. int disable_allowed;
  134. int thr_spec_allowed;
  135. int carrier_migration_allowed;
  136. ErtsAlcStrat_t astrat;
  137. struct {
  138. AllctrInit_t util;
  139. GFAllctrInit_t gf;
  140. BFAllctrInit_t bf;
  141. AFAllctrInit_t af;
  142. AOFFAllctrInit_t aoff;
  143. } init;
  144. struct {
  145. int mmbcs;
  146. int lmbcs;
  147. int smbcs;
  148. int mmmbc;
  149. } default_;
  150. };
  151. #define DEFAULT_ALLCTR_INIT { \
  152. ERTS_DEFAULT_ALLCTR_INIT, \
  153. ERTS_DEFAULT_GF_ALLCTR_INIT, \
  154. ERTS_DEFAULT_BF_ALLCTR_INIT, \
  155. ERTS_DEFAULT_AF_ALLCTR_INIT, \
  156. ERTS_DEFAULT_AOFF_ALLCTR_INIT \
  157. }
  158. typedef struct {
  159. int erts_alloc_config;
  160. #if HAVE_ERTS_MSEG
  161. ErtsMsegInit_t mseg;
  162. #endif
  163. int trim_threshold;
  164. int top_pad;
  165. AlcUInit_t alloc_util;
  166. struct {
  167. char *mtrace;
  168. char *nodename;
  169. } instr;
  170. struct au_init sl_alloc;
  171. struct au_init std_alloc;
  172. struct au_init ll_alloc;
  173. struct au_init temp_alloc;
  174. struct au_init eheap_alloc;
  175. struct au_init binary_alloc;
  176. struct au_init ets_alloc;
  177. struct au_init driver_alloc;
  178. struct au_init fix_alloc;
  179. struct au_init literal_alloc;
  180. struct au_init test_alloc;
  181. } erts_alc_hndl_args_init_t;
  182. #define ERTS_AU_INIT__ {0, 0, 1, 1, 1, \
  183. ERTS_ALC_S_GOODFIT, DEFAULT_ALLCTR_INIT, \
  184. {1,1,1,1}}
  185. #define SET_DEFAULT_ALLOC_OPTS(IP) \
  186. do { \
  187. struct au_init aui__ = ERTS_AU_INIT__; \
  188. sys_memcpy((void *) (IP), (void *) &aui__, sizeof(struct au_init)); \
  189. } while (0)
  190. static void
  191. set_default_sl_alloc_opts(struct au_init *ip)
  192. {
  193. SET_DEFAULT_ALLOC_OPTS(ip);
  194. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  195. ip->thr_spec = 1;
  196. ip->astrat = ERTS_ALC_S_GOODFIT;
  197. ip->init.util.name_prefix = "sl_";
  198. ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
  199. ip->init.util.cp = ERTS_ALC_A_SHORT_LIVED;
  200. #ifndef SMALL_MEMORY
  201. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  202. #else
  203. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  204. #endif
  205. ip->init.util.ts = ERTS_ALC_MTA_SHORT_LIVED;
  206. ip->init.util.rsbcst = 80;
  207. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  208. }
  209. static void
  210. set_default_std_alloc_opts(struct au_init *ip)
  211. {
  212. SET_DEFAULT_ALLOC_OPTS(ip);
  213. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  214. ip->thr_spec = 1;
  215. ip->astrat = ERTS_ALC_S_BESTFIT;
  216. ip->init.util.name_prefix = "std_";
  217. ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
  218. ip->init.util.cp = ERTS_ALC_A_STANDARD;
  219. #ifndef SMALL_MEMORY
  220. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  221. #else
  222. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  223. #endif
  224. ip->init.util.ts = ERTS_ALC_MTA_STANDARD;
  225. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  226. }
  227. static void
  228. set_default_ll_alloc_opts(struct au_init *ip)
  229. {
  230. SET_DEFAULT_ALLOC_OPTS(ip);
  231. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  232. ip->thr_spec = 0;
  233. ip->astrat = ERTS_ALC_S_BESTFIT;
  234. ip->init.bf.ao = 1;
  235. ip->init.util.ramv = 0;
  236. ip->init.util.mmsbc = 0;
  237. ip->init.util.sbct = ~((UWord) 0);
  238. ip->init.util.name_prefix = "ll_";
  239. ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
  240. ip->init.util.cp = ERTS_ALC_A_LONG_LIVED;
  241. #ifndef SMALL_MEMORY
  242. ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
  243. #else
  244. ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
  245. #endif
  246. ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
  247. ip->init.util.asbcst = 0;
  248. ip->init.util.rsbcst = 0;
  249. ip->init.util.rsbcmt = 0;
  250. ip->init.util.rmbcmt = 0;
  251. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_LL_ALLOC;
  252. }
  253. static void
  254. set_default_literal_alloc_opts(struct au_init *ip)
  255. {
  256. SET_DEFAULT_ALLOC_OPTS(ip);
  257. #ifdef ADDRESS_SANITIZER
  258. ip->enable = 0;
  259. #else
  260. ip->enable = 1;
  261. #endif
  262. ip->thr_spec = 0;
  263. ip->disable_allowed = 0;
  264. ip->thr_spec_allowed = 0;
  265. ip->carrier_migration_allowed = 0;
  266. ip->astrat = ERTS_ALC_S_BESTFIT;
  267. ip->init.bf.ao = 1;
  268. ip->init.util.ramv = 0;
  269. ip->init.util.mmsbc = 0;
  270. ip->init.util.sbct = ~((UWord) 0);
  271. ip->init.util.name_prefix = "literal_";
  272. ip->init.util.alloc_no = ERTS_ALC_A_LITERAL;
  273. #ifndef SMALL_MEMORY
  274. ip->init.util.mmbcs = 1024*1024; /* Main carrier size */
  275. #else
  276. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  277. #endif
  278. ip->init.util.ts = ERTS_ALC_MTA_LITERAL;
  279. ip->init.util.asbcst = 0;
  280. ip->init.util.rsbcst = 0;
  281. ip->init.util.rsbcmt = 0;
  282. ip->init.util.rmbcmt = 0;
  283. ip->init.util.acul = 0;
  284. #if defined(ARCH_32)
  285. # if HAVE_ERTS_MSEG
  286. ip->init.util.mseg_alloc = &erts_alcu_literal_32_mseg_alloc;
  287. ip->init.util.mseg_realloc = &erts_alcu_literal_32_mseg_realloc;
  288. ip->init.util.mseg_dealloc = &erts_alcu_literal_32_mseg_dealloc;
  289. # endif
  290. ip->init.util.sys_alloc = &erts_alcu_literal_32_sys_alloc;
  291. ip->init.util.sys_realloc = &erts_alcu_literal_32_sys_realloc;
  292. ip->init.util.sys_dealloc = &erts_alcu_literal_32_sys_dealloc;
  293. #elif defined(ARCH_64)
  294. # ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
  295. ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
  296. ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
  297. ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
  298. ip->init.util.mseg_mmapper = &erts_literal_mmapper;
  299. # endif
  300. #else
  301. # error Unknown architecture
  302. #endif
  303. }
  304. static void
  305. set_default_temp_alloc_opts(struct au_init *ip)
  306. {
  307. SET_DEFAULT_ALLOC_OPTS(ip);
  308. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  309. ip->thr_spec = 1;
  310. ip->disable_allowed = 0;
  311. ip->carrier_migration_allowed = 0;
  312. ip->astrat = ERTS_ALC_S_AFIT;
  313. ip->init.util.name_prefix = "temp_";
  314. ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY;
  315. #ifndef SMALL_MEMORY
  316. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  317. #else
  318. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  319. #endif
  320. ip->init.util.ts = ERTS_ALC_MTA_TEMPORARY;
  321. ip->init.util.rsbcst = 90;
  322. ip->init.util.rmbcmt = 100;
  323. }
  324. static void
  325. set_default_eheap_alloc_opts(struct au_init *ip)
  326. {
  327. SET_DEFAULT_ALLOC_OPTS(ip);
  328. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  329. ip->thr_spec = 1;
  330. ip->astrat = ERTS_ALC_S_GOODFIT;
  331. ip->init.util.name_prefix = "eheap_";
  332. ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
  333. ip->init.util.cp = ERTS_ALC_A_EHEAP;
  334. #ifndef SMALL_MEMORY
  335. ip->init.util.mmbcs = 512*1024; /* Main carrier size */
  336. #else
  337. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  338. #endif
  339. ip->init.util.ts = ERTS_ALC_MTA_EHEAP;
  340. ip->init.util.rsbcst = 50;
  341. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC;
  342. }
  343. static void
  344. set_default_binary_alloc_opts(struct au_init *ip)
  345. {
  346. SET_DEFAULT_ALLOC_OPTS(ip);
  347. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  348. ip->thr_spec = 1;
  349. ip->astrat = ERTS_ALC_S_BESTFIT;
  350. ip->init.util.name_prefix = "binary_";
  351. ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
  352. ip->init.util.cp = ERTS_ALC_A_BINARY;
  353. #ifndef SMALL_MEMORY
  354. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  355. #else
  356. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  357. #endif
  358. ip->init.util.ts = ERTS_ALC_MTA_BINARY;
  359. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  360. ip->init.util.atags = 1;
  361. }
  362. static void
  363. set_default_ets_alloc_opts(struct au_init *ip)
  364. {
  365. SET_DEFAULT_ALLOC_OPTS(ip);
  366. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  367. ip->thr_spec = 1;
  368. ip->astrat = ERTS_ALC_S_BESTFIT;
  369. ip->init.util.name_prefix = "ets_";
  370. ip->init.util.alloc_no = ERTS_ALC_A_ETS;
  371. ip->init.util.cp = ERTS_ALC_A_ETS;
  372. #ifndef SMALL_MEMORY
  373. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  374. #else
  375. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  376. #endif
  377. ip->init.util.ts = ERTS_ALC_MTA_ETS;
  378. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  379. }
  380. static void
  381. set_default_driver_alloc_opts(struct au_init *ip)
  382. {
  383. SET_DEFAULT_ALLOC_OPTS(ip);
  384. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  385. ip->thr_spec = 1;
  386. ip->astrat = ERTS_ALC_S_BESTFIT;
  387. ip->init.util.name_prefix = "driver_";
  388. ip->init.util.alloc_no = ERTS_ALC_A_DRIVER;
  389. ip->init.util.cp = ERTS_ALC_A_DRIVER;
  390. #ifndef SMALL_MEMORY
  391. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  392. #else
  393. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  394. #endif
  395. ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
  396. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  397. ip->init.util.atags = 1;
  398. }
  399. static void
  400. set_default_fix_alloc_opts(struct au_init *ip,
  401. size_t *fix_type_sizes)
  402. {
  403. SET_DEFAULT_ALLOC_OPTS(ip);
  404. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  405. ip->thr_spec = 1;
  406. ip->astrat = ERTS_ALC_S_BESTFIT;
  407. ip->init.bf.ao = 1;
  408. ip->init.util.name_prefix = "fix_";
  409. ip->init.util.fix_type_size = fix_type_sizes;
  410. ip->init.util.alloc_no = ERTS_ALC_A_FIXED_SIZE;
  411. ip->init.util.cp = ERTS_ALC_A_FIXED_SIZE;
  412. #ifndef SMALL_MEMORY
  413. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  414. #else
  415. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  416. #endif
  417. ip->init.util.ts = ERTS_ALC_MTA_FIXED_SIZE;
  418. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  419. }
  420. static void
  421. set_default_test_alloc_opts(struct au_init *ip)
  422. {
  423. SET_DEFAULT_ALLOC_OPTS(ip);
  424. ip->enable = 0; /* Disabled by default */
  425. ip->thr_spec = -1 * erts_no_schedulers;
  426. ip->astrat = ERTS_ALC_S_FIRSTFIT;
  427. ip->init.aoff.crr_order = FF_AOFF;
  428. ip->init.aoff.blk_order = FF_BF;
  429. ip->init.util.name_prefix = "test_";
  430. ip->init.util.alloc_no = ERTS_ALC_A_TEST;
  431. ip->init.util.cp = ERTS_ALC_A_TEST;
  432. ip->init.util.mmbcs = 0; /* Main carrier size */
  433. ip->init.util.ts = ERTS_ALC_MTA_TEST;
  434. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  435. ip->init.util.atags = 1;
  436. /* Use a constant minimal MBC size */
  437. #if ERTS_SA_MB_CARRIERS
  438. ip->init.util.smbcs = ERTS_SACRR_UNIT_SZ;
  439. ip->init.util.lmbcs = ERTS_SACRR_UNIT_SZ;
  440. ip->init.util.sbct = ERTS_SACRR_UNIT_SZ;
  441. #else
  442. ip->init.util.smbcs = 1 << 12;
  443. ip->init.util.lmbcs = 1 << 12;
  444. ip->init.util.sbct = 1 << 12;
  445. #endif
  446. }
  447. static void
  448. adjust_tpref(struct au_init *ip, int no_sched)
  449. {
  450. if (ip->thr_spec) {
  451. ip->thr_spec = no_sched;
  452. ip->thr_spec *= -1; /* thread preferred */
  453. /* If default ... */
  454. /* ... shrink main multi-block carrier size */
  455. if (ip->default_.mmbcs)
  456. ip->init.util.mmbcs /= ERTS_MIN(4, no_sched);
  457. /* ... shrink largest multi-block carrier size */
  458. if (ip->default_.lmbcs)
  459. ip->init.util.lmbcs /= ERTS_MIN(2, no_sched);
  460. /* ... shrink smallest multi-block carrier size */
  461. if (ip->default_.smbcs)
  462. ip->init.util.smbcs /= ERTS_MIN(4, no_sched);
  463. }
  464. }
  465. static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
  466. static void
  467. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu);
  468. static void
  469. start_au_allocator(ErtsAlcType_t alctr_n,
  470. struct au_init *init,
  471. ErtsAllocatorState_t *state);
  472. static void
  473. refuse_af_strategy(struct au_init *init)
  474. {
  475. if (init->astrat == ERTS_ALC_S_AFIT)
  476. init->astrat = ERTS_ALC_S_GOODFIT;
  477. }
  478. #ifdef HARD_DEBUG
  479. static void hdbg_init(void);
  480. #endif
  481. static void adjust_fix_alloc_sizes(UWord extra_block_size)
  482. {
  483. if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) {
  484. int j;
  485. if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) {
  486. int i;
  487. ErtsAllocatorThrSpec_t* tspec;
  488. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  489. ASSERT(tspec->enabled);
  490. for (i=0; i < tspec->size; i++) {
  491. Allctr_t* allctr = tspec->allctr[i];
  492. for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
  493. size_t size = allctr->fix[j].type_size;
  494. size = MAX(size + extra_block_size,
  495. sizeof(ErtsAllctrDDBlock_t));
  496. allctr->fix[j].type_size = size;
  497. }
  498. }
  499. }
  500. else
  501. {
  502. Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
  503. for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
  504. size_t size = allctr->fix[j].type_size;
  505. size = MAX(size + extra_block_size,
  506. sizeof(ErtsAllctrDDBlock_t));
  507. allctr->fix[j].type_size = size;
  508. }
  509. }
  510. }
  511. }
  512. static ERTS_INLINE int
  513. strategy_support_carrier_migration(struct au_init *auip)
  514. {
  515. /*
  516. * Currently only aoff* and ageff* support carrier
  517. * migration, i.e, type AOFIRSTFIT.
  518. */
  519. return auip->astrat == ERTS_ALC_S_FIRSTFIT;
  520. }
  521. static ERTS_INLINE void
  522. adjust_carrier_migration_support(struct au_init *auip)
  523. {
  524. if (auip->init.util.acul) {
  525. auip->thr_spec = -1; /* Need thread preferred */
  526. /*
  527. * If strategy cannot handle carrier migration,
  528. * default to a strategy that can...
  529. */
  530. if (!strategy_support_carrier_migration(auip)) {
  531. /* Default to aoffcbf */
  532. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  533. auip->init.aoff.crr_order = FF_AOFF;
  534. auip->init.aoff.blk_order = FF_BF;
  535. }
  536. }
  537. }
  538. void
  539. erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
  540. {
  541. UWord extra_block_size = 0;
  542. int i, ncpu;
  543. erts_alc_hndl_args_init_t init = {
  544. 0,
  545. #if HAVE_ERTS_MSEG
  546. ERTS_MSEG_INIT_DEFAULT_INITIALIZER,
  547. #endif
  548. ERTS_DEFAULT_TRIM_THRESHOLD,
  549. ERTS_DEFAULT_TOP_PAD,
  550. ERTS_DEFAULT_ALCU_INIT,
  551. };
  552. size_t fix_type_sizes[ERTS_ALC_NO_FIXED_SIZES] = {0};
  553. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PROC)]
  554. = sizeof(Process);
  555. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR)]
  556. = sizeof(ErtsMonitorDataHeap);
  557. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LINK)]
  558. = sizeof(ErtsLinkData);
  559. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
  560. = sizeof(ErtsDrvSelectDataState);
  561. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
  562. = sizeof(ErtsNifSelectDataState);
  563. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
  564. = sizeof(ErtsMessageRef);
  565. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
  566. = sizeof(ErtsThrQElement_t);
  567. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)]
  568. = erts_timer_type_size(ERTS_ALC_T_LL_PTIMER);
  569. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)]
  570. = erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
  571. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
  572. = erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
  573. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
  574. = sizeof(ErtsNSchedMagicRefTableEntry);
  575. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PREF_NSCHED_ENT)]
  576. = sizeof(ErtsNSchedPidRefTableEntry);
  577. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
  578. = ERTS_MAGIC_BIN_UNALIGNED_SIZE(sizeof(ErtsMagicIndirectionWord));
  579. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_RECV_MARK_BLK)]
  580. = sizeof(ErtsRecvMarkerBlock);
  581. #ifdef HARD_DEBUG
  582. hdbg_init();
  583. #endif
  584. lock_all_physical_memory = 0;
  585. ncpu = eaiop->ncpu;
  586. if (ncpu < 1)
  587. ncpu = 1;
  588. erts_tsd_key_create(&erts_allctr_prelock_tsd_key,
  589. "erts_allctr_prelock_tsd_key");
  590. erts_sys_alloc_init();
  591. erts_init_utils_mem();
  592. set_default_sl_alloc_opts(&init.sl_alloc);
  593. set_default_std_alloc_opts(&init.std_alloc);
  594. set_default_ll_alloc_opts(&init.ll_alloc);
  595. set_default_temp_alloc_opts(&init.temp_alloc);
  596. set_default_eheap_alloc_opts(&init.eheap_alloc);
  597. set_default_binary_alloc_opts(&init.binary_alloc);
  598. set_default_ets_alloc_opts(&init.ets_alloc);
  599. set_default_driver_alloc_opts(&init.driver_alloc);
  600. set_default_fix_alloc_opts(&init.fix_alloc,
  601. fix_type_sizes);
  602. set_default_literal_alloc_opts(&init.literal_alloc);
  603. set_default_test_alloc_opts(&init.test_alloc);
  604. if (argc && argv)
  605. handle_args(argc, argv, &init);
  606. if (lock_all_physical_memory) {
  607. #ifdef HAVE_MLOCKALL
  608. errno = 0;
  609. if (mlockall(MCL_CURRENT|MCL_FUTURE) != 0) {
  610. int err = errno;
  611. const char *errstr = err ? strerror(err) : "unknown";
  612. erts_exit(1, "Failed to lock physical memory: %s (%d)\n",
  613. errstr, err);
  614. }
  615. #else
  616. erts_exit(1, "Failed to lock physical memory: Not supported\n");
  617. #endif
  618. }
  619. /* Make adjustments for carrier migration support */
  620. init.temp_alloc.init.util.acul = 0;
  621. adjust_carrier_migration_support(&init.sl_alloc);
  622. adjust_carrier_migration_support(&init.std_alloc);
  623. adjust_carrier_migration_support(&init.ll_alloc);
  624. adjust_carrier_migration_support(&init.eheap_alloc);
  625. adjust_carrier_migration_support(&init.binary_alloc);
  626. adjust_carrier_migration_support(&init.ets_alloc);
  627. adjust_carrier_migration_support(&init.driver_alloc);
  628. adjust_carrier_migration_support(&init.fix_alloc);
  629. adjust_carrier_migration_support(&init.literal_alloc);
  630. if (init.erts_alloc_config) {
  631. /* Adjust flags that erts_alloc_config won't like */
  632. /* No thread specific instances */
  633. init.temp_alloc.thr_spec = 0;
  634. init.sl_alloc.thr_spec = 0;
  635. init.std_alloc.thr_spec = 0;
  636. init.ll_alloc.thr_spec = 0;
  637. init.eheap_alloc.thr_spec = 0;
  638. init.binary_alloc.thr_spec = 0;
  639. init.ets_alloc.thr_spec = 0;
  640. init.driver_alloc.thr_spec = 0;
  641. init.fix_alloc.thr_spec = 0;
  642. init.literal_alloc.thr_spec = 0;
  643. /* No carrier migration */
  644. init.temp_alloc.init.util.acul = 0;
  645. init.sl_alloc.init.util.acul = 0;
  646. init.std_alloc.init.util.acul = 0;
  647. init.ll_alloc.init.util.acul = 0;
  648. init.eheap_alloc.init.util.acul = 0;
  649. init.binary_alloc.init.util.acul = 0;
  650. init.ets_alloc.init.util.acul = 0;
  651. init.driver_alloc.init.util.acul = 0;
  652. init.fix_alloc.init.util.acul = 0;
  653. init.literal_alloc.init.util.acul = 0;
  654. }
  655. /* Only temp_alloc can use thread specific interface */
  656. if (init.temp_alloc.thr_spec)
  657. init.temp_alloc.thr_spec = erts_no_schedulers;
  658. /* Others must use thread preferred interface */
  659. adjust_tpref(&init.sl_alloc, erts_no_schedulers);
  660. adjust_tpref(&init.std_alloc, erts_no_schedulers);
  661. adjust_tpref(&init.ll_alloc, erts_no_schedulers);
  662. adjust_tpref(&init.eheap_alloc, erts_no_schedulers);
  663. adjust_tpref(&init.binary_alloc, erts_no_schedulers);
  664. adjust_tpref(&init.ets_alloc, erts_no_schedulers);
  665. adjust_tpref(&init.driver_alloc, erts_no_schedulers);
  666. adjust_tpref(&init.fix_alloc, erts_no_schedulers);
  667. adjust_tpref(&init.literal_alloc, erts_no_schedulers);
  668. /*
  669. * The following allocators cannot be run with afit strategy.
  670. * Make sure they don't...
  671. */
  672. refuse_af_strategy(&init.sl_alloc);
  673. refuse_af_strategy(&init.std_alloc);
  674. refuse_af_strategy(&init.ll_alloc);
  675. refuse_af_strategy(&init.eheap_alloc);
  676. refuse_af_strategy(&init.binary_alloc);
  677. refuse_af_strategy(&init.ets_alloc);
  678. refuse_af_strategy(&init.driver_alloc);
  679. refuse_af_strategy(&init.fix_alloc);
  680. refuse_af_strategy(&init.literal_alloc);
  681. if (!init.temp_alloc.thr_spec)
  682. refuse_af_strategy(&init.temp_alloc);
  683. erts_mtrace_pre_init();
  684. #if HAVE_ERTS_MSEG
  685. init.mseg.nos = erts_no_schedulers;
  686. erts_mseg_init(&init.mseg);
  687. #endif
  688. erts_alcu_init(&init.alloc_util);
  689. erts_afalc_init();
  690. erts_bfalc_init();
  691. erts_gfalc_init();
  692. erts_aoffalc_init();
  693. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  694. erts_allctrs[i].alloc = NULL;
  695. erts_allctrs[i].realloc = NULL;
  696. erts_allctrs[i].free = NULL;
  697. erts_allctrs[i].extra = NULL;
  698. erts_allctrs_info[i].alloc_util = 0;
  699. erts_allctrs_info[i].enabled = 0;
  700. erts_allctrs_info[i].thr_spec = 0;
  701. erts_allctrs_info[i].extra = NULL;
  702. }
  703. erts_allctrs[ERTS_ALC_A_SYSTEM].alloc = erts_sys_alloc;
  704. erts_allctrs[ERTS_ALC_A_SYSTEM].realloc = erts_sys_realloc;
  705. erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
  706. erts_allctrs_info[ERTS_ALC_A_SYSTEM].enabled = 1;
  707. set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu);
  708. set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu);
  709. set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu);
  710. set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc, ncpu);
  711. set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc, ncpu);
  712. set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc, ncpu);
  713. set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc, ncpu);
  714. set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc, ncpu);
  715. set_au_allocator(ERTS_ALC_A_FIXED_SIZE, &init.fix_alloc, ncpu);
  716. set_au_allocator(ERTS_ALC_A_LITERAL, &init.literal_alloc, ncpu);
  717. set_au_allocator(ERTS_ALC_A_TEST, &init.test_alloc, ncpu);
  718. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  719. if (!erts_allctrs[i].alloc)
  720. erts_exit(ERTS_ABORT_EXIT,
  721. "Missing alloc function for %s\n", ERTS_ALC_A2AD(i));
  722. if (!erts_allctrs[i].realloc)
  723. erts_exit(ERTS_ABORT_EXIT,
  724. "Missing realloc function for %s\n", ERTS_ALC_A2AD(i));
  725. if (!erts_allctrs[i].free)
  726. erts_exit(ERTS_ABORT_EXIT,
  727. "Missing free function for %s\n", ERTS_ALC_A2AD(i));
  728. }
  729. sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
  730. sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
  731. erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
  732. start_au_allocator(ERTS_ALC_A_TEMPORARY,
  733. &init.temp_alloc,
  734. &temp_alloc_state);
  735. start_au_allocator(ERTS_ALC_A_SHORT_LIVED,
  736. &init.sl_alloc,
  737. &sl_alloc_state);
  738. start_au_allocator(ERTS_ALC_A_STANDARD,
  739. &init.std_alloc,
  740. &std_alloc_state);
  741. start_au_allocator(ERTS_ALC_A_LONG_LIVED,
  742. &init.ll_alloc,
  743. &ll_alloc_state);
  744. start_au_allocator(ERTS_ALC_A_EHEAP,
  745. &init.eheap_alloc,
  746. &eheap_alloc_state);
  747. start_au_allocator(ERTS_ALC_A_BINARY,
  748. &init.binary_alloc,
  749. &binary_alloc_state);
  750. start_au_allocator(ERTS_ALC_A_ETS,
  751. &init.ets_alloc,
  752. &ets_alloc_state);
  753. start_au_allocator(ERTS_ALC_A_DRIVER,
  754. &init.driver_alloc,
  755. &driver_alloc_state);
  756. start_au_allocator(ERTS_ALC_A_FIXED_SIZE,
  757. &init.fix_alloc,
  758. &fix_alloc_state);
  759. start_au_allocator(ERTS_ALC_A_LITERAL,
  760. &init.literal_alloc,
  761. &literal_alloc_state);
  762. start_au_allocator(ERTS_ALC_A_TEST,
  763. &init.test_alloc,
  764. &test_alloc_state);
  765. erts_mtrace_install_wrapper_functions();
  766. init_aireq_alloc();
  767. #ifdef DEBUG
  768. extra_block_size += install_debug_functions();
  769. #endif
  770. adjust_fix_alloc_sizes(extra_block_size);
  771. }
  772. void
  773. erts_alloc_late_init(void)
  774. {
  775. }
  776. static void *
  777. erts_realloc_fixed_size(ErtsAlcType_t type, void *extra, void *p, Uint size)
  778. {
  779. erts_exit(ERTS_ABORT_EXIT,
  780. "Attempt to reallocate a block of the fixed size type %s\n",
  781. ERTS_ALC_T2TD(type));
  782. }
  783. static void
  784. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
  785. {
  786. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  787. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  788. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  789. /*
  790. * Some allocators are forced on if halfword heap is used.
  791. */
  792. if (init->init.util.force)
  793. init->enable = 1;
  794. tspec->enabled = 0;
  795. tspec->dd = 0;
  796. tspec->aix = alctr_n;
  797. tspec->size = 0;
  798. ai->thr_spec = 0;
  799. if (!init->enable) {
  800. af->alloc = erts_sys_alloc;
  801. af->realloc = erts_sys_realloc;
  802. af->free = erts_sys_free;
  803. af->extra = NULL;
  804. ai->alloc_util = 0;
  805. ai->enabled = 0;
  806. ai->extra = NULL;
  807. return;
  808. }
  809. if (init->thr_spec) {
  810. if (init->thr_spec > 0) {
  811. af->alloc = erts_alcu_alloc_thr_spec;
  812. if (init->init.util.fix_type_size)
  813. af->realloc = erts_realloc_fixed_size;
  814. else if (init->init.util.ramv)
  815. af->realloc = erts_alcu_realloc_mv_thr_spec;
  816. else
  817. af->realloc = erts_alcu_realloc_thr_spec;
  818. af->free = erts_alcu_free_thr_spec;
  819. }
  820. else {
  821. af->alloc = erts_alcu_alloc_thr_pref;
  822. if (init->init.util.fix_type_size)
  823. af->realloc = erts_realloc_fixed_size;
  824. else if (init->init.util.ramv)
  825. af->realloc = erts_alcu_realloc_mv_thr_pref;
  826. else
  827. af->realloc = erts_alcu_realloc_thr_pref;
  828. af->free = erts_alcu_free_thr_pref;
  829. tspec->dd = 1;
  830. }
  831. tspec->enabled = 1;
  832. tspec->size = abs(init->thr_spec) + 1;
  833. ai->thr_spec = tspec->size;
  834. }
  835. else
  836. if (init->init.util.ts) {
  837. af->alloc = erts_alcu_alloc_ts;
  838. if (init->init.util.fix_type_size)
  839. af->realloc = erts_realloc_fixed_size;
  840. else if (init->init.util.ramv)
  841. af->realloc = erts_alcu_realloc_mv_ts;
  842. else
  843. af->realloc = erts_alcu_realloc_ts;
  844. af->free = erts_alcu_free_ts;
  845. }
  846. else
  847. {
  848. erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n",
  849. init->init.util.name_prefix);
  850. }
  851. af->extra = NULL;
  852. ai->alloc_util = 1;
  853. ai->enabled = 1;
  854. }
  855. static void
  856. start_au_allocator(ErtsAlcType_t alctr_n,
  857. struct au_init *init,
  858. ErtsAllocatorState_t *state)
  859. {
  860. int i;
  861. int size = 1;
  862. void *as0;
  863. ErtsAlcStrat_t astrat;
  864. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  865. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  866. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  867. ErtsAlcFixList_t *fix_lists = NULL;
  868. size_t fix_list_size = 0;
  869. if (!init->enable)
  870. return;
  871. if (init->thr_spec) {
  872. char *states = erts_sys_alloc(0,
  873. NULL,
  874. ((sizeof(Allctr_t *)
  875. * (tspec->size + 1))
  876. + (sizeof(ErtsAllocatorState_t)
  877. * tspec->size)
  878. + ERTS_CACHE_LINE_SIZE - 1));
  879. if (!states)
  880. erts_exit(ERTS_ABORT_EXIT,
  881. "Failed to allocate allocator states for %salloc\n",
  882. init->init.util.name_prefix);
  883. tspec->allctr = (Allctr_t **) states;
  884. states += sizeof(Allctr_t *) * (tspec->size + 1);
  885. states = ((((UWord) states) & ERTS_CACHE_LINE_MASK)
  886. ? (char *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
  887. + ERTS_CACHE_LINE_SIZE)
  888. : (char *) states);
  889. tspec->allctr[0] = (Allctr_t *) state;
  890. size = tspec->size;
  891. for (i = 1; i < size; i++)
  892. tspec->allctr[i] = (Allctr_t *)
  893. &((ErtsAllocatorState_t *) states)[i-1];
  894. }
  895. if (init->init.util.fix_type_size) {
  896. size_t tot_fix_list_size;
  897. fix_list_size = sizeof(ErtsAlcFixList_t)*ERTS_ALC_NO_FIXED_SIZES;
  898. fix_list_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(fix_list_size);
  899. tot_fix_list_size = fix_list_size;
  900. if (init->thr_spec)
  901. tot_fix_list_size *= tspec->size;
  902. fix_lists = erts_sys_alloc(0,
  903. NULL,
  904. (tot_fix_list_size
  905. + ERTS_CACHE_LINE_SIZE - 1));
  906. if (!fix_lists)
  907. erts_exit(ERTS_ABORT_EXIT,
  908. "Failed to allocate fix lists for %salloc\n",
  909. init->init.util.name_prefix);
  910. if (((UWord) fix_lists) & ERTS_CACHE_LINE_MASK)
  911. fix_lists = ((ErtsAlcFixList_t *)
  912. ((((UWord) fix_lists) & ~ERTS_CACHE_LINE_MASK)
  913. + ERTS_CACHE_LINE_SIZE));
  914. }
  915. for (i = 0; i < size; i++) {
  916. Allctr_t *as;
  917. astrat = init->astrat;
  918. if (!init->thr_spec)
  919. as0 = state;
  920. else {
  921. as0 = (void *) tspec->allctr[i];
  922. if (!as0)
  923. continue;
  924. if (init->thr_spec < 0) {
  925. init->init.util.ts = i == 0;
  926. init->init.util.tspec = 0;
  927. init->init.util.tpref = -1*init->thr_spec + 1;
  928. }
  929. else {
  930. if (i != 0)
  931. init->init.util.ts = 0;
  932. else {
  933. if (astrat == ERTS_ALC_S_AFIT)
  934. astrat = ERTS_ALC_S_GOODFIT;
  935. init->init.util.ts = 1;
  936. }
  937. init->init.util.tspec = init->thr_spec + 1;
  938. init->init.util.tpref = 0;
  939. }
  940. }
  941. if (fix_lists) {
  942. init->init.util.fix = fix_lists;
  943. fix_lists = ((ErtsAlcFixList_t *)
  944. (((char *) fix_lists) + fix_list_size));
  945. }
  946. init->init.util.alloc_strat = astrat;
  947. init->init.util.ix = i;
  948. switch (astrat) {
  949. case ERTS_ALC_S_GOODFIT:
  950. as = erts_gfalc_start((GFAllctr_t *) as0,
  951. &init->init.gf,
  952. &init->init.util);
  953. break;
  954. case ERTS_ALC_S_BESTFIT:
  955. as = erts_bfalc_start((BFAllctr_t *) as0,
  956. &init->init.bf,
  957. &init->init.util);
  958. break;
  959. case ERTS_ALC_S_AFIT:
  960. as = erts_afalc_start((AFAllctr_t *) as0,
  961. &init->init.af,
  962. &init->init.util);
  963. break;
  964. case ERTS_ALC_S_FIRSTFIT:
  965. as = erts_aoffalc_start((AOFFAllctr_t *) as0,
  966. &init->init.aoff,
  967. &init->init.util);
  968. break;
  969. default:
  970. as = NULL;
  971. ASSERT(0);
  972. }
  973. if (!as)
  974. erts_exit(ERTS_ABORT_EXIT,
  975. "Failed to start %salloc\n", init->init.util.name_prefix);
  976. ASSERT(as == (void *) as0);
  977. af->extra = as;
  978. }
  979. if (init->thr_spec)
  980. af->extra = tspec;
  981. ai->extra = af->extra;
  982. }
  983. static void bad_param(char *param_start, char *param_end)
  984. {
  985. size_t len = param_end - param_start;
  986. char param[100];
  987. if (len > 99)
  988. len = 99;
  989. sys_memcpy((void *) param, (void *) param_start, len);
  990. param[len] = '\0';
  991. erts_fprintf(stderr, "bad \"%s\" parameter\n", param);
  992. erts_usage();
  993. }
  994. static void bad_value(char *param_start, char *param_end, char *value)
  995. {
  996. size_t len = param_end - param_start;
  997. char param[100];
  998. if (len > 99)
  999. len = 99;
  1000. sys_memcpy((void *) param, (void *) param_start, len);
  1001. param[len] = '\0';
  1002. erts_fprintf(stderr, "bad \"%s\" value: %s\n", param, value);
  1003. erts_usage();
  1004. }
  1005. /* Get arg marks argument as handled by
  1006. putting NULL in argv */
  1007. static char *
  1008. get_value(char* rest, char** argv, int* ip)
  1009. {
  1010. char *param = argv[*ip]+1;
  1011. argv[*ip] = NULL;
  1012. if (*rest == '\0') {
  1013. char *next = argv[*ip + 1];
  1014. if (next[0] == '-'
  1015. && next[1] == '-'
  1016. && next[2] == '\0') {
  1017. bad_value(param, rest, "");
  1018. }
  1019. (*ip)++;
  1020. argv[*ip] = NULL;
  1021. return next;
  1022. }
  1023. return rest;
  1024. }
  1025. static ERTS_INLINE int
  1026. has_prefix(const char *prefix, const char *string)
  1027. {
  1028. int i;
  1029. for (i = 0; prefix[i]; i++)
  1030. if (prefix[i] != string[i])
  1031. return 0;
  1032. return 1;
  1033. }
  1034. static int
  1035. get_bool_value(char *param_end, char** argv, int* ip)
  1036. {
  1037. char *param = argv[*ip]+1;
  1038. char *value = get_value(param_end, argv, ip);
  1039. if (sys_strcmp(value, "true") == 0)
  1040. return 1;
  1041. else if (sys_strcmp(value, "false") == 0)
  1042. return 0;
  1043. else
  1044. bad_value(param, param_end, value);
  1045. return -1;
  1046. }
  1047. static Uint kb_to_bytes(Sint kb, Uint *bytes)
  1048. {
  1049. const Uint max = ((~((Uint) 0))/1024) + 1;
  1050. if (kb < 0 || (Uint)kb > max)
  1051. return 0;
  1052. if ((Uint)kb == max)
  1053. *bytes = ~((Uint) 0);
  1054. else
  1055. *bytes = ((Uint) kb)*1024;
  1056. return 1;
  1057. }
  1058. static Uint
  1059. get_kb_value(char *param_end, char** argv, int* ip)
  1060. {
  1061. Sint tmp;
  1062. Uint bytes = 0;
  1063. char *rest;
  1064. char *param = argv[*ip]+1;
  1065. char *value = get_value(param_end, argv, ip);
  1066. errno = 0;
  1067. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1068. if (errno != 0 || rest == value || !kb_to_bytes(tmp, &bytes))
  1069. bad_value(param, param_end, value);
  1070. return bytes;
  1071. }
  1072. static UWord
  1073. get_mb_value(char *param_end, char** argv, int* ip)
  1074. {
  1075. SWord tmp;
  1076. UWord max = ((~((UWord) 0))/(1024*1024)) + 1;
  1077. char *rest;
  1078. char *param = argv[*ip]+1;
  1079. char *value = get_value(param_end, argv, ip);
  1080. errno = 0;
  1081. tmp = (SWord) ErtsStrToSint(value, &rest, 10);
  1082. if (errno != 0 || rest == value || tmp < 0 || max < ((UWord) tmp))
  1083. bad_value(param, param_end, value);
  1084. if (max == (UWord) tmp)
  1085. return ~((UWord) 0);
  1086. else
  1087. return ((UWord) tmp)*1024*1024;
  1088. }
  1089. #if 0
  1090. static Uint
  1091. get_byte_value(char *param_end, char** argv, int* ip)
  1092. {
  1093. Sint tmp;
  1094. char *rest;
  1095. char *param = argv[*ip]+1;
  1096. char *value = get_value(param_end, argv, ip);
  1097. errno = 0;
  1098. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1099. if (errno != 0 || rest == value || tmp < 0)
  1100. bad_value(param, param_end, value);
  1101. return (Uint) tmp;
  1102. }
  1103. #endif
  1104. static Uint
  1105. get_amount_value(char *param_end, char** argv, int* ip)
  1106. {
  1107. Sint tmp;
  1108. char *rest;
  1109. char *param = argv[*ip]+1;
  1110. char *value = get_value(param_end, argv, ip);
  1111. errno = 0;
  1112. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1113. if (errno != 0 || rest == value || tmp < 0)
  1114. bad_value(param, param_end, value);
  1115. return (Uint) tmp;
  1116. }
  1117. static Uint
  1118. get_acul_value(struct au_init *auip, char *param_end, char** argv, int* ip)
  1119. {
  1120. Sint tmp;
  1121. char *rest;
  1122. char *param = argv[*ip]+1;
  1123. char *value = get_value(param_end, argv, ip);
  1124. if (sys_strcmp(value, "de") == 0) {
  1125. switch (auip->init.util.alloc_no) {
  1126. case ERTS_ALC_A_LONG_LIVED:
  1127. return ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC;
  1128. case ERTS_ALC_A_EHEAP:
  1129. return ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC;
  1130. default:
  1131. return ERTS_ALC_DEFAULT_ENABLED_ACUL;
  1132. }
  1133. }
  1134. errno = 0;
  1135. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1136. if (errno != 0 || rest == value || tmp < 0 || 100 < tmp)
  1137. bad_value(param, param_end, value);
  1138. return (Uint) tmp;
  1139. }
  1140. static void
  1141. handle_au_arg(struct au_init *auip,
  1142. char* sub_param,
  1143. char** argv,
  1144. int* ip,
  1145. int u_switch)
  1146. {
  1147. char *param = argv[*ip]+1;
  1148. switch (sub_param[0]) {
  1149. case 'a':
  1150. if (sub_param[1] == 'c') { /* Migration parameters "ac*" */
  1151. UWord value;
  1152. UWord* wp;
  1153. if (!auip->carrier_migration_allowed && !u_switch)
  1154. goto bad_switch;
  1155. if (has_prefix("acul", sub_param)) {
  1156. value = get_acul_value(auip, sub_param + 4, argv, ip);
  1157. wp = &auip->init.util.acul;
  1158. }
  1159. else if (has_prefix("acnl", sub_param)) {
  1160. value = get_amount_value(sub_param + 4, argv, ip);
  1161. wp = &auip->init.util.acnl;
  1162. }
  1163. else if (has_prefix("acfml", sub_param)) {
  1164. value = get_amount_value(sub_param + 5, argv, ip);
  1165. wp = &auip->init.util.acfml;
  1166. }
  1167. else
  1168. goto bad_switch;
  1169. if (auip->carrier_migration_allowed)
  1170. *wp = value;
  1171. }
  1172. else if(has_prefix("asbcst", sub_param)) {
  1173. auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip);
  1174. }
  1175. else if(has_prefix("as", sub_param)) {
  1176. char *alg = get_value(sub_param + 2, argv, ip);
  1177. if (sys_strcmp("bf", alg) == 0) {
  1178. auip->astrat = ERTS_ALC_S_BESTFIT;
  1179. auip->init.bf.ao = 0;
  1180. }
  1181. else if (sys_strcmp("aobf", alg) == 0) {
  1182. auip->astrat = ERTS_ALC_S_BESTFIT;
  1183. auip->init.bf.ao = 1;
  1184. }
  1185. else if (sys_strcmp("gf", alg) == 0) {
  1186. auip->astrat = ERTS_ALC_S_GOODFIT;
  1187. }
  1188. else if (sys_strcmp("af", alg) == 0) {
  1189. auip->astrat = ERTS_ALC_S_AFIT;
  1190. }
  1191. else if (sys_strcmp("aoff", alg) == 0) {
  1192. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1193. auip->init.aoff.crr_order = FF_AOFF;
  1194. auip->init.aoff.blk_order = FF_AOFF;
  1195. }
  1196. else if (sys_strcmp("aoffcbf", alg) == 0) {
  1197. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1198. auip->init.aoff.crr_order = FF_AOFF;
  1199. auip->init.aoff.blk_order = FF_BF;
  1200. }
  1201. else if (sys_strcmp("aoffcaobf", alg) == 0) {
  1202. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1203. auip->init.aoff.crr_order = FF_AOFF;
  1204. auip->init.aoff.blk_order = FF_AOBF;
  1205. }
  1206. else if (sys_strcmp("ageffcaoff", alg) == 0) {
  1207. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1208. auip->init.aoff.crr_order = FF_AGEFF;
  1209. auip->init.aoff.blk_order = FF_AOFF;
  1210. }
  1211. else if (sys_strcmp("ageffcbf", alg) == 0) {
  1212. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1213. auip->init.aoff.crr_order = FF_AGEFF;
  1214. auip->init.aoff.blk_order = FF_BF;
  1215. }
  1216. else if (sys_strcmp("ageffcaobf", alg) == 0) {
  1217. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1218. auip->init.aoff.crr_order = FF_AGEFF;
  1219. auip->init.aoff.blk_order = FF_AOBF;
  1220. }
  1221. else {
  1222. if (auip->init.util.alloc_no == ERTS_ALC_A_TEST
  1223. && sys_strcmp("chaosff", alg) == 0) {
  1224. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1225. auip->init.aoff.crr_order = FF_CHAOS;
  1226. auip->init.aoff.blk_order = FF_CHAOS;
  1227. }
  1228. else {
  1229. bad_value(param, sub_param + 1, alg);
  1230. }
  1231. }
  1232. if (!strategy_support_carrier_migration(auip))
  1233. auip->init.util.acul = 0;
  1234. } else if (has_prefix("atags", sub_param)) {
  1235. auip->init.util.atags = get_bool_value(sub_param + 5, argv, ip);
  1236. }
  1237. else
  1238. goto bad_switch;
  1239. break;
  1240. case 'c': {
  1241. if (has_prefix("cp", sub_param)) {
  1242. char *param, *param_end, *value;
  1243. int cp;
  1244. if (!auip->carrier_migration_allowed && !u_switch)
  1245. goto bad_switch;
  1246. param = argv[*ip]+1;
  1247. param_end = sub_param + 2;
  1248. value = get_value(param_end, argv, ip);
  1249. if (value[0] == '\0' || value[1] != '\0')
  1250. bad_value(param, param_end, value);
  1251. switch (value[0]) {
  1252. case 'B': cp = ERTS_ALC_A_BINARY; break;
  1253. case 'D': cp = ERTS_ALC_A_STANDARD; break;
  1254. case 'E': cp = ERTS_ALC_A_ETS; break;
  1255. case 'F': cp = ERTS_ALC_A_FIXED_SIZE; break;
  1256. case 'H': cp = ERTS_ALC_A_EHEAP; break;
  1257. case 'L': cp = ERTS_ALC_A_LONG_LIVED; break;
  1258. case 'R': cp = ERTS_ALC_A_DRIVER; break;
  1259. case 'S': cp = ERTS_ALC_A_SHORT_LIVED; break;
  1260. case '@': cp = ERTS_ALC_COMMON_CPOOL_IX; break;
  1261. case ':': cp = auip->init.util.alloc_no; break;
  1262. default: cp = -1;
  1263. bad_value(param, param_end, value);
  1264. break;
  1265. }
  1266. if (auip->carrier_migration_allowed)
  1267. auip->init.util.cp = cp;
  1268. }
  1269. else
  1270. goto bad_switch;
  1271. break;
  1272. }
  1273. case 'e': {
  1274. int e = get_bool_value(sub_param + 1, argv, ip);
  1275. if (!auip->disable_allowed && !e) {
  1276. if (!u_switch)
  1277. bad_value(param, sub_param + 1, "false");
  1278. else
  1279. ASSERT(auip->enable); /* ignore */
  1280. }
  1281. else auip->enable = e;
  1282. break;
  1283. }
  1284. case 'l':
  1285. if (has_prefix("lmbcs", sub_param)) {
  1286. auip->default_.lmbcs = 0;
  1287. auip->init.util.lmbcs = get_kb_value(sub_param + 5, argv, ip);
  1288. }
  1289. else
  1290. goto bad_switch;
  1291. break;
  1292. case 'm':
  1293. if (has_prefix("mbcgs", sub_param)) {
  1294. auip->init.util.mbcgs = get_amount_value(sub_param + 5, argv, ip);
  1295. }
  1296. else if (has_prefix("mbsd", sub_param)) {
  1297. auip->init.gf.mbsd = get_amount_value(sub_param + 4, argv, ip);
  1298. if (auip->init.gf.mbsd < 1)
  1299. auip->init.gf.mbsd = 1;
  1300. }
  1301. else if (has_prefix("mmbcs", sub_param)) {
  1302. auip->default_.mmbcs = 0;
  1303. auip->init.util.mmbcs = get_kb_value(sub_param + 5, argv, ip);
  1304. }
  1305. else if (has_prefix("mmmbc", sub_param)) {
  1306. auip->default_.mmmbc = 0;
  1307. auip->init.util.mmmbc = get_amount_value(sub_param + 5, argv, ip);
  1308. }
  1309. else if (has_prefix("mmsbc", sub_param)) {
  1310. auip->init.util.mmsbc = get_amount_value(sub_param + 5, argv, ip);
  1311. }
  1312. else
  1313. goto bad_switch;
  1314. break;
  1315. case 'r':
  1316. if(has_prefix("rsbcmt", sub_param)) {
  1317. auip->init.util.rsbcmt = get_amount_value(sub_param + 6, argv, ip);
  1318. if (auip->init.util.rsbcmt > 100)
  1319. auip->init.util.rsbcmt = 100;
  1320. }
  1321. else if(has_prefix("rsbcst", sub_param)) {
  1322. auip->init.util.rsbcst = get_amount_value(sub_param + 6, argv, ip);
  1323. if (auip->init.util.rsbcst > 100)
  1324. auip->init.util.rsbcst = 100;
  1325. }
  1326. else if (has_prefix("rmbcmt", sub_param)) {
  1327. auip->init.util.rmbcmt = get_amount_value(sub_param + 6, argv, ip);
  1328. if (auip->init.util.rmbcmt > 100)
  1329. auip->init.util.rmbcmt = 100;
  1330. }
  1331. else if (has_prefix("ramv", sub_param)) {
  1332. auip->init.util.ramv = get_bool_value(sub_param + 4, argv, ip);
  1333. }
  1334. else
  1335. goto bad_switch;
  1336. break;
  1337. case 's':
  1338. if(has_prefix("sbct", sub_param)) {
  1339. auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
  1340. }
  1341. else if (has_prefix("smbcs", sub_param)) {
  1342. auip->default_.smbcs = 0;
  1343. auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip);
  1344. }
  1345. else
  1346. goto bad_switch;
  1347. break;
  1348. case 't': {
  1349. int res = get_bool_value(sub_param+1, argv, ip);
  1350. if (res > 0) {
  1351. if (!auip->thr_spec_allowed) {
  1352. if (!u_switch)
  1353. bad_value(param, sub_param + 1, "true");
  1354. else
  1355. ASSERT(!auip->thr_spec); /* ignore */
  1356. }
  1357. else
  1358. auip->thr_spec = 1;
  1359. break;
  1360. }
  1361. else if (res == 0) {
  1362. auip->thr_spec = 0;
  1363. auip->init.util.acul = 0;
  1364. break;
  1365. }
  1366. goto bad_switch;
  1367. }
  1368. default:
  1369. bad_switch:
  1370. bad_param(param, sub_param);
  1371. }
  1372. }
  1373. static void
  1374. handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
  1375. {
  1376. struct au_init *aui[] = {
  1377. &init->binary_alloc,
  1378. &init->std_alloc,
  1379. &init->ets_alloc,
  1380. &init->eheap_alloc,
  1381. &init->ll_alloc,
  1382. &init->driver_alloc,
  1383. &init->fix_alloc,
  1384. &init->sl_alloc
  1385. /* test_alloc not affected by +Mea??? or +Mu??? */
  1386. };
  1387. int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
  1388. char *arg;
  1389. char *rest;
  1390. int i, j;
  1391. i = 1;
  1392. ASSERT(argc && argv && init);
  1393. while (i < *argc) {
  1394. if(argv[i][0] == '-') {
  1395. char *param = argv[i]+1;
  1396. switch (argv[i][1]) {
  1397. case 'M':
  1398. switch (argv[i][2]) {
  1399. case 'B':
  1400. handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i, 0);
  1401. break;
  1402. case 'I':
  1403. if (has_prefix("scs", argv[i]+3)) {
  1404. #if HAVE_ERTS_MSEG
  1405. init->mseg.literal_mmap.scs =
  1406. #endif
  1407. get_mb_value(argv[i]+6, argv, &i);
  1408. }
  1409. else
  1410. handle_au_arg(&init->literal_alloc, &argv[i][3], argv, &i, 0);
  1411. break;
  1412. case 'D':
  1413. handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i, 0);
  1414. break;
  1415. case 'E':
  1416. handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i, 0);
  1417. break;
  1418. case 'F':
  1419. handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i, 0);
  1420. break;
  1421. case 'H':
  1422. handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i, 0);
  1423. break;
  1424. case 'L':
  1425. handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i, 0);
  1426. break;
  1427. case 'M':
  1428. if (has_prefix("amcbf", argv[i]+3)) {
  1429. #if HAVE_ERTS_MSEG
  1430. init->mseg.amcbf =
  1431. #endif
  1432. get_kb_value(argv[i]+8, argv, &i);
  1433. }
  1434. else if (has_prefix("rmcbf", argv[i]+3)) {
  1435. #if HAVE_ERTS_MSEG
  1436. init->mseg.rmcbf =
  1437. #endif
  1438. get_amount_value(argv[i]+8, argv, &i);
  1439. }
  1440. else if (has_prefix("mcs", argv[i]+3)) {
  1441. #if HAVE_ERTS_MSEG
  1442. init->mseg.mcs =
  1443. #endif
  1444. get_amount_value(argv[i]+6, argv, &i);
  1445. }
  1446. else if (has_prefix("scs", argv[i]+3)) {
  1447. #if HAVE_ERTS_MSEG
  1448. init->mseg.dflt_mmap.scs =
  1449. #endif
  1450. get_mb_value(argv[i]+6, argv, &i);
  1451. }
  1452. else if (has_prefix("sco", argv[i]+3)) {
  1453. #if HAVE_ERTS_MSEG
  1454. init->mseg.dflt_mmap.sco =
  1455. #endif
  1456. get_bool_value(argv[i]+6, argv, &i);
  1457. }
  1458. else if (has_prefix("scrpm", argv[i]+3)) {
  1459. #if HAVE_ERTS_MSEG
  1460. init->mseg.dflt_mmap.scrpm =
  1461. #endif
  1462. get_bool_value(argv[i]+8, argv, &i);
  1463. }
  1464. else if (has_prefix("scrfsd", argv[i]+3)) {
  1465. #if HAVE_ERTS_MSEG
  1466. init->mseg.dflt_mmap.scrfsd =
  1467. #endif
  1468. get_amount_value(argv[i]+9, argv, &i);
  1469. }
  1470. else {
  1471. bad_param(param, param+2);
  1472. }
  1473. break;
  1474. case 'R':
  1475. handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i, 0);
  1476. break;
  1477. case 'S':
  1478. handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i, 0);
  1479. break;
  1480. case 'T':
  1481. handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i, 0);
  1482. break;
  1483. case 'Z':
  1484. handle_au_arg(&init->test_alloc, &argv[i][3], argv, &i, 0);
  1485. break;
  1486. case 'Y': { /* sys_alloc */
  1487. if (has_prefix("tt", param+2)) {
  1488. /* set trim threshold */
  1489. arg = get_value(param+4, argv, &i);
  1490. errno = 0;
  1491. init->trim_threshold = (int) strtol(arg, &rest, 10);
  1492. if (errno != 0
  1493. || rest == arg
  1494. || init->trim_threshold < 0
  1495. || (INT_MAX/1024) < init->trim_threshold) {
  1496. bad_value(param, param+4, arg);
  1497. }
  1498. VERBOSE(DEBUG_SYSTEM,
  1499. ("using trim threshold: %d\n",
  1500. init->trim_threshold));
  1501. init->trim_threshold *= 1024;
  1502. }
  1503. else if (has_prefix("tp", param+2)) {
  1504. /* set top pad */
  1505. arg = get_value(param+4, argv, &i);
  1506. errno = 0;
  1507. init->top_pad = (int) strtol(arg, &rest, 10);
  1508. if (errno != 0
  1509. || rest == arg
  1510. || init->top_pad < 0
  1511. || (INT_MAX/1024) < init->top_pad) {
  1512. bad_value(param, param+4, arg);
  1513. }
  1514. VERBOSE(DEBUG_SYSTEM,
  1515. ("using top pad: %d\n",init->top_pad));
  1516. init->top_pad *= 1024;
  1517. }
  1518. else if (has_prefix("m", param+2)) {
  1519. /* Has been handled by erlexec */
  1520. (void) get_value(param+3, argv, &i);
  1521. }
  1522. else if (has_prefix("e", param+2)) {
  1523. arg = get_value(param+3, argv, &i);
  1524. if (sys_strcmp("true", arg) != 0)
  1525. bad_value(param, param+3, arg);
  1526. }
  1527. else
  1528. bad_param(param, param+2);
  1529. break;
  1530. }
  1531. case 'e':
  1532. switch (argv[i][3]) {
  1533. case 'a': {
  1534. int a;
  1535. arg = get_value(argv[i]+4, argv, &i);
  1536. if (sys_strcmp("min", arg) == 0) {
  1537. for (a = 0; a < aui_sz; a++)
  1538. aui[a]->enable = 0;
  1539. }
  1540. else if (sys_strcmp("max", arg) == 0) {
  1541. for (a = 0; a < aui_sz; a++)
  1542. aui[a]->enable = 1;
  1543. }
  1544. else if (sys_strcmp("config", arg) == 0) {
  1545. init->erts_alloc_config = 1;
  1546. }
  1547. else if (sys_strcmp("r9c", arg) == 0
  1548. || sys_strcmp("r10b", arg) == 0
  1549. || sys_strcmp("r11b", arg) == 0) {
  1550. set_default_sl_alloc_opts(&init->sl_alloc);
  1551. set_default_std_alloc_opts(&init->std_alloc);
  1552. set_default_ll_alloc_opts(&init->ll_alloc);
  1553. set_default_temp_alloc_opts(&init->temp_alloc);
  1554. set_default_eheap_alloc_opts(&init->eheap_alloc);
  1555. set_default_binary_alloc_opts(&init->binary_alloc);
  1556. set_default_ets_alloc_opts(&init->ets_alloc);
  1557. set_default_driver_alloc_opts(&init->driver_alloc);
  1558. set_default_driver_alloc_opts(&init->fix_alloc);
  1559. init->driver_alloc.enable = 0;
  1560. if (sys_strcmp("r9c", arg) == 0) {
  1561. init->sl_alloc.enable = 0;
  1562. init->std_alloc.enable = 0;
  1563. init->binary_alloc.enable = 0;
  1564. init->ets_alloc.enable = 0;
  1565. }
  1566. for (a = 0; a < aui_sz; a++) {
  1567. aui[a]->thr_spec = 0;
  1568. aui[a]->init.util.acul = 0;
  1569. aui[a]->init.util.ramv = 0;
  1570. aui[a]->init.util.lmbcs = 5*1024*1024;
  1571. }
  1572. }
  1573. else {
  1574. bad_param(param, param+3);
  1575. }
  1576. break;
  1577. }
  1578. default:
  1579. bad_param(param, param+1);
  1580. }
  1581. break;
  1582. case 'i':
  1583. switch (argv[i][3]) {
  1584. case 't':
  1585. init->instr.mtrace = get_value(argv[i]+4, argv, &i);
  1586. break;
  1587. default:
  1588. bad_param(param, param+2);
  1589. }
  1590. break;
  1591. case 'l':
  1592. if (has_prefix("pm", param+2)) {
  1593. arg = get_value(argv[i]+5, argv, &i);
  1594. if (sys_strcmp("all", arg) == 0)
  1595. lock_all_physical_memory = 1;
  1596. else if (sys_strcmp("no", arg) == 0)
  1597. lock_all_physical_memory = 0;
  1598. else
  1599. bad_value(param, param+4, arg);
  1600. break;
  1601. }
  1602. bad_param(param, param+2);
  1603. break;
  1604. case 'u':
  1605. if (has_prefix("ycs", argv[i]+3)) {
  1606. init->alloc_util.ycs
  1607. = get_kb_value(argv[i]+6, argv, &i);
  1608. }
  1609. else if (has_prefix("mmc", argv[i]+3)) {
  1610. init->alloc_util.mmc
  1611. = get_amount_value(argv[i]+6, argv, &i);
  1612. }
  1613. else if (has_prefix("sac", argv[i]+3)) {
  1614. init->alloc_util.sac
  1615. = get_bool_value(argv[i]+6, argv, &i);
  1616. }
  1617. else {
  1618. int a;
  1619. int start = i;
  1620. char *param = argv[i];
  1621. char *val = i+1 < *argc ? argv[i+1] : NULL;
  1622. for (a = 0; a < aui_sz; a++) {
  1623. if (a > 0) {
  1624. ASSERT(i == start || i == start+1);
  1625. argv[start] = param;
  1626. if (i != start)
  1627. argv[start + 1] = val;
  1628. i = start;
  1629. }
  1630. handle_au_arg(aui[a], &argv[i][3], argv, &i, 1);
  1631. }
  1632. }
  1633. break;
  1634. default:
  1635. bad_param(param, param+1);
  1636. }
  1637. break;
  1638. case '-':
  1639. if (argv[i][2] == '\0') {
  1640. /* End of system flags reached */
  1641. if (init->instr.mtrace) {
  1642. while (i < *argc) {
  1643. if(sys_strcmp(argv[i], "-sname") == 0
  1644. || sys_strcmp(argv[i], "-name") == 0) {
  1645. if (i + 1 <*argc) {
  1646. init->instr.nodename = argv[i+1];
  1647. break;
  1648. }
  1649. }
  1650. i++;
  1651. }
  1652. }
  1653. goto args_parsed;
  1654. }
  1655. break;
  1656. default:
  1657. break;
  1658. }
  1659. }
  1660. i++;
  1661. }
  1662. args_parsed:
  1663. /* Handled arguments have been marked with NULL. Slide arguments
  1664. not handled towards the beginning of argv. */
  1665. for (i = 0, j = 0; i < *argc; i++) {
  1666. if (argv[i])
  1667. argv[j++] = argv[i];
  1668. }
  1669. *argc = j;
  1670. }
  1671. static char *type_no_str(ErtsAlcType_t n)
  1672. {
  1673. #if ERTS_ALC_N_MIN != 0
  1674. if (n < ERTS_ALC_N_MIN)
  1675. return NULL;
  1676. #endif
  1677. if (n > ERTS_ALC_N_MAX)
  1678. return NULL;
  1679. return (char *) ERTS_ALC_N2TD(n);
  1680. }
  1681. #define type_str(T) type_no_str(ERTS_ALC_T2N((T)))
  1682. void
  1683. erts_alloc_register_scheduler(void *vesdp)
  1684. {
  1685. ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
  1686. int ix = (int) esdp->no;
  1687. int aix;
  1688. ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
  1689. for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
  1690. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
  1691. esdp->alloc_data.deallctr[aix] = NULL;
  1692. esdp->alloc_data.pref_ix[aix] = -1;
  1693. if (tspec->enabled) {
  1694. if (!tspec->dd)
  1695. esdp->alloc_data.pref_ix[aix] = ix;
  1696. else {
  1697. Allctr_t *allctr = tspec->allctr[ix];
  1698. ASSERT(allctr);
  1699. esdp->alloc_data.deallctr[aix] = allctr;
  1700. esdp->alloc_data.pref_ix[aix] = ix;
  1701. }
  1702. }
  1703. }
  1704. }
  1705. void
  1706. erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
  1707. int *need_thr_progress,
  1708. ErtsThrPrgrVal *thr_prgr_p,
  1709. int *more_work)
  1710. {
  1711. ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
  1712. int aix;
  1713. for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
  1714. Allctr_t *allctr;
  1715. if (esdp)
  1716. allctr = esdp->alloc_data.deallctr[aix];
  1717. else {
  1718. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
  1719. if (tspec->enabled && tspec->dd)
  1720. allctr = tspec->allctr[0];
  1721. else
  1722. allctr = NULL;
  1723. }
  1724. if (allctr) {
  1725. erts_alcu_check_delayed_dealloc(allctr,
  1726. 1,
  1727. need_thr_progress,
  1728. thr_prgr_p,
  1729. more_work);
  1730. }
  1731. }
  1732. }
  1733. erts_aint32_t
  1734. erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
  1735. {
  1736. ErtsAllocatorThrSpec_t *tspec;
  1737. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  1738. if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
  1739. return erts_alcu_fix_alloc_shrink(tspec->allctr[ix], flgs);
  1740. if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
  1741. return erts_alcu_fix_alloc_shrink(
  1742. erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
  1743. return 0;
  1744. }
  1745. static void
  1746. no_verify(Allctr_t *allctr)
  1747. {
  1748. }
  1749. erts_alloc_verify_func_t
  1750. erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr)
  1751. {
  1752. if (erts_allctrs_info[ERTS_ALC_A_TEMPORARY].alloc_util
  1753. && erts_allctrs_info[ERTS_ALC_A_TEMPORARY].thr_spec) {
  1754. ErtsAllocatorThrSpec_t *tspec;
  1755. int ix = ERTS_ALC_GET_THR_IX();
  1756. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_TEMPORARY];
  1757. if (ix < tspec->size) {
  1758. *allctr = tspec->allctr[ix];
  1759. return erts_alcu_verify_unused;
  1760. }
  1761. }
  1762. *allctr = NULL;
  1763. return no_verify;
  1764. }
  1765. __decl_noreturn void
  1766. erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
  1767. {
  1768. char buf[10];
  1769. char *t_str;
  1770. char *allctr_str;
  1771. ASSERT(n >= ERTS_ALC_N_MIN);
  1772. ASSERT(n <= ERTS_ALC_N_MAX);
  1773. if (n < ERTS_ALC_N_MIN || ERTS_ALC_N_MAX < n)
  1774. allctr_str = "UNKNOWN";
  1775. else {
  1776. ErtsAlcType_t a = ERTS_ALC_T2A(ERTS_ALC_N2T(n));
  1777. if (erts_allctrs_info[a].enabled)
  1778. allctr_str = (char *) ERTS_ALC_A2AD(a);
  1779. else
  1780. allctr_str = (char *) ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
  1781. }
  1782. t_str = type_no_str(n);
  1783. if (!t_str) {
  1784. erts_snprintf(buf, sizeof(buf), "%d", (int) n);
  1785. t_str = buf;
  1786. }
  1787. switch (error) {
  1788. case ERTS_ALC_E_NOTSUP: {
  1789. char *op_str;
  1790. switch (func) {
  1791. case ERTS_ALC_O_ALLOC: op_str = "alloc"; break;
  1792. case ERTS_ALC_O_REALLOC: op_str = "realloc"; break;
  1793. case ERTS_ALC_O_FREE: op_str = "free"; break;
  1794. default: op_str = "UNKNOWN"; break;
  1795. }
  1796. erts_exit(ERTS_ABORT_EXIT,
  1797. "%s: %s operation not supported (memory type: \"%s\")\n",
  1798. allctr_str, op_str, t_str);
  1799. break;
  1800. }
  1801. case ERTS_ALC_E_NOMEM: {
  1802. Uint size;
  1803. va_list argp;
  1804. char *op = func == ERTS_ALC_O_REALLOC ? "reallocate" : "allocate";
  1805. va_start(argp, n);
  1806. size = va_arg(argp, Uint);
  1807. va_end(argp);
  1808. erts_exit(ERTS_DUMP_EXIT,
  1809. "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
  1810. allctr_str, op, size, t_str);
  1811. break;
  1812. }
  1813. case ERTS_ALC_E_NOALLCTR:
  1814. erts_exit(ERTS_ABORT_EXIT,
  1815. "erts_alloc: Unknown allocator type: %d\n",
  1816. ERTS_ALC_T2A(ERTS_ALC_N2T(n)));
  1817. break;
  1818. default:
  1819. erts_exit(ERTS_ABORT_EXIT, "erts_alloc: Unknown error: %d\n", error);
  1820. break;
  1821. }
  1822. }
  1823. __decl_noreturn void
  1824. erts_alloc_enomem(ErtsAlcType_t type, Uint size)
  1825. {
  1826. erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
  1827. }
  1828. __decl_noreturn void
  1829. erts_alloc_n_enomem(ErtsAlcType_t n, Uint size)
  1830. {
  1831. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_ALLOC, n, size);
  1832. }
  1833. __decl_noreturn void
  1834. erts_realloc_enomem(ErtsAlcType_t type, void *ptr, Uint size)
  1835. {
  1836. erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
  1837. }
  1838. __decl_noreturn void
  1839. erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
  1840. {
  1841. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_REALLOC, n, size);
  1842. }
  1843. static ERTS_INLINE UWord
  1844. alcu_size(ErtsAlcType_t alloc_no, ErtsAlcUFixInfo_t *fi, int fisz)
  1845. {
  1846. UWord res;
  1847. int ai;
  1848. if (!erts_allctrs_info[alloc_no].thr_spec) {
  1849. AllctrSize_t size;
  1850. Allctr_t *allctr;
  1851. allctr = erts_allctrs_info[alloc_no].extra;
  1852. erts_alcu_current_size(allctr, &size, fi, fisz);
  1853. return size.blocks;
  1854. }
  1855. res = 0;
  1856. /* Thread-specific allocators can migrate carriers across types, so we have
  1857. * to visit every allocator type to gather information on blocks that were
  1858. * allocated by us. */
  1859. for (ai = ERTS_ALC_A_MIN; ai < ERTS_ALC_A_MAX; ai++) {
  1860. ErtsAllocatorThrSpec_t *tspec;
  1861. Allctr_t *allctr;
  1862. int i;
  1863. if (!erts_allctrs_info[ai].thr_spec) {
  1864. continue;
  1865. }
  1866. tspec = &erts_allctr_thr_spec[ai];
  1867. ASSERT(tspec->enabled);
  1868. for (i = tspec->size - 1; i >= 0; i--) {
  1869. allctr = tspec->allctr[i];
  1870. if (allctr) {
  1871. AllctrSize_t size;
  1872. if (ai == alloc_no) {
  1873. erts_alcu_current_size(allctr, &size, fi, fisz);
  1874. } else {
  1875. erts_alcu_foreign_size(allctr, alloc_no, &size);
  1876. }
  1877. ASSERT(((SWord)size.blocks) >= 0);
  1878. res += size.blocks;
  1879. }
  1880. }
  1881. }
  1882. return res;
  1883. }
  1884. static ERTS_INLINE void
  1885. add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
  1886. {
  1887. int ix = ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE;
  1888. ASSERT(0 <= ix && ix < ERTS_ALC_NO_FIXED_SIZES);
  1889. *ap += (UWord) fi[ix].allocated;
  1890. *up += (UWord) fi[ix].used;
  1891. }
  1892. Eterm
  1893. erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
  1894. {
  1895. /*
  1896. * NOTE! When updating this function, make sure to also update
  1897. * erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl
  1898. */
  1899. #define ERTS_MEM_NEED_ALL_ALCU (want_tot_or_sys)
  1900. struct {
  1901. int total;
  1902. int processes;
  1903. int processes_used;
  1904. int system;
  1905. int atom;
  1906. int atom_used;
  1907. int binary;
  1908. int code;
  1909. int ets;
  1910. } want = {0};
  1911. struct {
  1912. UWord total;
  1913. UWord processes;
  1914. UWord processes_used;
  1915. UWord system;
  1916. UWord atom;
  1917. UWord atom_used;
  1918. UWord binary;
  1919. UWord code;
  1920. UWord ets;
  1921. } size = {0};
  1922. Eterm atoms[sizeof(size)/sizeof(UWord)];
  1923. UWord *uintps[sizeof(size)/sizeof(UWord)];
  1924. Eterm euints[sizeof(size)/sizeof(UWord)];
  1925. int want_tot_or_sys;
  1926. int length;
  1927. Eterm res = THE_NON_VALUE;
  1928. ErtsAlcType_t ai;
  1929. int only_one_value = 0;
  1930. ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}};
  1931. ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
  1932. /* Figure out whats wanted... */
  1933. length = 0;
  1934. if (is_non_value(earg)) { /* i.e. wants all */
  1935. want.total = 1;
  1936. atoms[length] = am_total;
  1937. uintps[length++] = &size.total;
  1938. want.processes = 1;
  1939. atoms[length] = am_processes;
  1940. uintps[length++] = &size.processes;
  1941. want.processes_used = 1;
  1942. atoms[length] = am_processes_used;
  1943. uintps[length++] = &size.processes_used;
  1944. want.system = 1;
  1945. atoms[length] = am_system;
  1946. uintps[length++] = &size.system;
  1947. want.atom = 1;
  1948. atoms[length] = am_atom;
  1949. uintps[length++] = &size.atom;
  1950. want.atom_used = 1;
  1951. atoms[length] = am_atom_used;
  1952. uintps[length++] = &size.atom_used;
  1953. want.binary = 1;
  1954. atoms[length] = am_binary;
  1955. uintps[length++] = &size.binary;
  1956. want.code = 1;
  1957. atoms[length] = am_code;
  1958. uintps[length++] = &size.code;
  1959. want.ets = 1;
  1960. atoms[length] = am_ets;
  1961. uintps[length++] = &size.ets;
  1962. }
  1963. else {
  1964. DeclareTmpHeapNoproc(tmp_heap,2);
  1965. Eterm wanted_list;
  1966. if (is_nil(earg))
  1967. return NIL;
  1968. UseTmpHeapNoproc(2);
  1969. if (is_not_atom(earg))
  1970. wanted_list = earg;
  1971. else {
  1972. wanted_list = CONS(&tmp_heap[0], earg, NIL);
  1973. only_one_value = 1;
  1974. }
  1975. while (is_list(wanted_list)) {
  1976. switch (CAR(list_val(wanted_list))) {
  1977. case am_total:
  1978. if (!want.total) {
  1979. want.total = 1;
  1980. atoms[length] = am_total;
  1981. uintps[length++] = &size.total;
  1982. }
  1983. break;
  1984. case am_processes:
  1985. if (!want.processes) {
  1986. want.processes = 1;
  1987. atoms[length] = am_processes;
  1988. uintps[length++] = &size.processes;
  1989. }
  1990. break;
  1991. case am_processes_used:
  1992. if (!want.processes_used) {
  1993. want.processes_used = 1;
  1994. atoms[length] = am_processes_used;
  1995. uintps[length++] = &size.processes_used;
  1996. }
  1997. break;
  1998. case am_system:
  1999. if (!want.system) {
  2000. want.system = 1;
  2001. atoms[length] = am_system;
  2002. uintps[length++] = &size.system;
  2003. }
  2004. break;
  2005. case am_atom:
  2006. if (!want.atom) {
  2007. want.atom = 1;
  2008. atoms[length] = am_atom;
  2009. uintps[length++] = &size.atom;
  2010. }
  2011. break;
  2012. case am_atom_used:
  2013. if (!want.atom_used) {
  2014. want.atom_used = 1;
  2015. atoms[length] = am_atom_used;
  2016. uintps[length++] = &size.atom_used;
  2017. }
  2018. break;
  2019. case am_binary:
  2020. if (!want.binary) {
  2021. want.binary = 1;
  2022. atoms[length] = am_binary;
  2023. uintps[length++] = &size.binary;
  2024. }
  2025. break;
  2026. case am_code:
  2027. if (!want.code) {
  2028. want.code = 1;
  2029. atoms[length] = am_code;
  2030. uintps[length++] = &size.code;
  2031. }
  2032. break;
  2033. case am_ets:
  2034. if (!want.ets) {
  2035. want.ets = 1;
  2036. atoms[length] = am_ets;
  2037. uintps[length++] = &size.ets;
  2038. }
  2039. break;
  2040. default:
  2041. UnUseTmpHeapNoproc(2);
  2042. return am_badarg;
  2043. }
  2044. wanted_list = CDR(list_val(wanted_list));
  2045. }
  2046. UnUseTmpHeapNoproc(2);
  2047. if (is_not_nil(wanted_list))
  2048. return am_badarg;
  2049. }
  2050. /* All alloc_util allocators *have* to be enabled, except test_alloc */
  2051. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  2052. switch (ai) {
  2053. case ERTS_ALC_A_SYSTEM:
  2054. case ERTS_ALC_A_TEST:
  2055. break;
  2056. default:
  2057. if (!erts_allctrs_info[ai].enabled
  2058. || !erts_allctrs_info[ai].alloc_util) {
  2059. return am_notsup;
  2060. }
  2061. break;
  2062. }
  2063. }
  2064. ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
  2065. ASSERT(length <= sizeof(euints)/sizeof(Eterm));
  2066. ASSERT(length <= sizeof(uintps)/sizeof(UWord));
  2067. if (proc) {
  2068. ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  2069. == erts_proc_lc_my_proc_locks(proc));
  2070. /* We'll need locks early in the lock order */
  2071. erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  2072. }
  2073. /* Calculate values needed... */
  2074. want_tot_or_sys = want.total || want.system;
  2075. if (ERTS_MEM_NEED_ALL_ALCU) {
  2076. size.total = 0;
  2077. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  2078. if (erts_allctrs_info[ai].alloc_util) {
  2079. UWord *save;
  2080. UWord asz;
  2081. switch (ai) {
  2082. case ERTS_ALC_A_TEMPORARY:
  2083. /*
  2084. * Often not thread safe and usually never
  2085. * contain any allocated memory.
  2086. */
  2087. continue;
  2088. case ERTS_ALC_A_TEST:
  2089. continue;
  2090. case ERTS_ALC_A_EHEAP:
  2091. save = &size.processes;
  2092. break;
  2093. case ERTS_ALC_A_ETS:
  2094. save = &size.ets;
  2095. break;
  2096. case ERTS_ALC_A_BINARY:
  2097. save = &size.binary;
  2098. break;
  2099. case ERTS_ALC_A_FIXED_SIZE:
  2100. asz = alcu_size(ai, fi, ERTS_ALC_NO_FIXED_SIZES);
  2101. size.total += asz;
  2102. continue;
  2103. default:
  2104. save = NULL;
  2105. break;
  2106. }
  2107. asz = alcu_size(ai, NULL, 0);
  2108. if (save)
  2109. *save = asz;
  2110. size.total += asz;
  2111. }
  2112. }
  2113. }
  2114. if (want_tot_or_sys || want.processes || want.processes_used) {
  2115. UWord tmp;
  2116. if (ERTS_MEM_NEED_ALL_ALCU)
  2117. tmp = size.processes;
  2118. else {
  2119. alcu_size(ERTS_ALC_A_FIXED_SIZE,
  2120. fi, ERTS_ALC_NO_FIXED_SIZES);
  2121. tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
  2122. }
  2123. tmp += erts_ptab_mem_size(&erts_proc);
  2124. tmp += erts_bif_timer_memory_size();
  2125. size.processes = size.processes_used = tmp;
  2126. add_fix_values(&size.processes,
  2127. &size.processes_used,
  2128. fi,
  2129. ERTS_ALC_T_PROC);
  2130. add_fix_values(&size.processes,
  2131. &size.processes_used,
  2132. fi,
  2133. ERTS_ALC_T_MONITOR);
  2134. add_fix_values(&size.processes,
  2135. &size.processes_used,
  2136. fi,
  2137. ERTS_ALC_T_LINK);
  2138. add_fix_values(&size.processes,
  2139. &size.processes_used,
  2140. fi,
  2141. ERTS_ALC_T_MSG_REF);
  2142. add_fix_values(&size.processes,
  2143. &size.processes_used,
  2144. fi,
  2145. ERTS_ALC_T_LL_PTIMER);
  2146. add_fix_values(&size.processes,
  2147. &size.processes_used,
  2148. fi,
  2149. ERTS_ALC_T_HL_PTIMER);
  2150. add_fix_values(&size.processes,
  2151. &size.processes_used,
  2152. fi,
  2153. ERTS_ALC_T_BIF_TIMER);
  2154. }
  2155. if (want.atom || want.atom_used) {
  2156. Uint reserved_atom_space, atom_space;
  2157. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  2158. size.atom = size.atom_used = atom_table_sz();
  2159. if (want.atom)
  2160. size.atom += reserved_atom_space;
  2161. if (want.atom_used)
  2162. size.atom_used += atom_space;
  2163. }
  2164. if (!ERTS_MEM_NEED_ALL_ALCU && want.binary)
  2165. size.binary = alcu_size(ERTS_ALC_A_BINARY, NULL, 0);
  2166. if (want.code) {
  2167. size.code = module_table_sz();
  2168. size.code += export_table_sz();
  2169. size.code += export_entries_sz();
  2170. size.code += erts_fun_table_sz();
  2171. size.code += erts_ranges_sz();
  2172. size.code += erts_total_code_size;
  2173. }
  2174. if (want.ets) {
  2175. if (!ERTS_MEM_NEED_ALL_ALCU)
  2176. size.ets = alcu_size(ERTS_ALC_A_ETS, NULL, 0);
  2177. size.ets += erts_get_ets_misc_mem_size();
  2178. }
  2179. if (want_tot_or_sys) {
  2180. #ifdef BEAMASM
  2181. /* The JIT allocates code on its own because of W^X restrictions, so we
  2182. * need to bump the total size accordingly. */
  2183. size.total += erts_total_code_size;
  2184. #endif
  2185. ASSERT(size.total >= size.processes);
  2186. size.system = size.total - size.processes;
  2187. }
  2188. if (print_to_p) {
  2189. int i;
  2190. fmtfn_t to = *print_to_p;
  2191. void *arg = print_to_arg;
  2192. /* Print result... */
  2193. erts_print(to, arg, "=memory\n");
  2194. for (i = 0; i < length; i++)
  2195. erts_print(to, arg, "%T: %bpu\n", atoms[i], *uintps[i]);
  2196. }
  2197. if (proc) {
  2198. /* Build erlang term result... */
  2199. Uint *hp;
  2200. Uint hsz;
  2201. erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2202. if (only_one_value) {
  2203. ASSERT(length == 1);
  2204. hsz = 0;
  2205. erts_bld_uword(NULL, &hsz, *uintps[0]);
  2206. hp = hsz ? HAlloc((Process *) proc, hsz) : NULL;
  2207. res = erts_bld_uword(&hp, NULL, *uintps[0]);
  2208. }
  2209. else {
  2210. Uint **hpp = NULL;
  2211. Uint *hszp = &hsz;
  2212. hsz = 0;
  2213. while (1) {
  2214. int i;
  2215. for (i = 0; i < length; i++)
  2216. euints[i] = erts_bld_uword(hpp, hszp, *uintps[i]);
  2217. res = erts_bld_2tup_list(hpp, hszp, length, atoms, euints);
  2218. if (hpp)
  2219. break;
  2220. hp = HAlloc((Process *) proc, hsz);
  2221. hpp = &hp;
  2222. hszp = NULL;
  2223. }
  2224. }
  2225. }
  2226. return res;
  2227. #undef ERTS_MEM_NEED_ALL_ALCU
  2228. }
  2229. struct aa_values {
  2230. Uint arity;
  2231. const char *name;
  2232. Uint ui[2];
  2233. };
  2234. Eterm
  2235. erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc)
  2236. {
  2237. #define MAX_AA_VALUES (24)
  2238. struct aa_values values[MAX_AA_VALUES];
  2239. Eterm res = THE_NON_VALUE;
  2240. int i, length;
  2241. Uint reserved_atom_space, atom_space;
  2242. if (proc) {
  2243. ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  2244. == erts_proc_lc_my_proc_locks(proc));
  2245. /* We'll need locks early in the lock order */
  2246. erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  2247. }
  2248. i = 0;
  2249. values[i].arity = 2;
  2250. values[i].name = "sys_misc";
  2251. values[i].ui[0] = erts_sys_misc_mem_sz();
  2252. i++;
  2253. values[i].arity = 2;
  2254. values[i].name = "static";
  2255. values[i].ui[0] =
  2256. sizeof(ErtsPTab)*2 /* proc & port tables */
  2257. + erts_timer_wheel_memory_size(); /* Timer wheel */
  2258. i++;
  2259. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  2260. values[i].arity = 3;
  2261. values[i].name = "atom_space";
  2262. values[i].ui[0] = reserved_atom_space;
  2263. values[i].ui[1] = atom_space;
  2264. i++;
  2265. values[i].arity = 2;
  2266. values[i].name = "atom_table";
  2267. values[i].ui[0] = atom_table_sz();
  2268. i++;
  2269. values[i].arity = 2;
  2270. values[i].name = "module_table";
  2271. values[i].ui[0] = module_table_sz();
  2272. i++;
  2273. values[i].arity = 2;
  2274. values[i].name = "export_table";
  2275. values[i].ui[0] = export_table_sz();
  2276. i++;
  2277. values[i].arity = 2;
  2278. values[i].name = "export_list";
  2279. values[i].ui[0] = export_entries_sz();
  2280. i++;
  2281. values[i].arity = 2;
  2282. values[i].name = "register_table";
  2283. values[i].ui[0] = process_reg_sz();
  2284. i++;
  2285. values[i].arity = 2;
  2286. values[i].name = "fun_table";
  2287. values[i].ui[0] = erts_fun_table_sz();
  2288. i++;
  2289. values[i].arity = 2;
  2290. values[i].name = "module_refs";
  2291. values[i].ui[0] = erts_ranges_sz();
  2292. i++;
  2293. values[i].arity = 2;
  2294. values[i].name = "loaded_code";
  2295. values[i].ui[0] = erts_total_code_size;
  2296. i++;
  2297. values[i].arity = 2;
  2298. values[i].name = "dist_table";
  2299. values[i].ui[0] = erts_dist_table_size();
  2300. i++;
  2301. values[i].arity = 2;
  2302. values[i].name = "node_table";
  2303. values[i].ui[0] = erts_node_table_size();
  2304. i++;
  2305. values[i].arity = 2;
  2306. values[i].name = "bits_bufs_size";
  2307. values[i].ui[0] = erts_bits_bufs_size();
  2308. i++;
  2309. values[i].arity = 2;
  2310. values[i].name = "bif_timer";
  2311. values[i].ui[0] = erts_bif_timer_memory_size();
  2312. i++;
  2313. values[i].arity = 2;
  2314. values[i].name = "process_table";
  2315. values[i].ui[0] = erts_ptab_mem_size(&erts_proc);
  2316. i++;
  2317. values[i].arity = 2;
  2318. values[i].name = "port_table";
  2319. values[i].ui[0] = erts_ptab_mem_size(&erts_port);
  2320. i++;
  2321. values[i].arity = 2;
  2322. values[i].name = "ets_misc";
  2323. values[i].ui[0] = erts_get_ets_misc_mem_size();
  2324. i++;
  2325. /* Data not allocated by any alloc_util allocators, must be summed into
  2326. * the "total" figure in erlang:memory/0,1. */
  2327. values[i].arity = 2;
  2328. values[i].name = "external_alloc";
  2329. #ifdef BEAMASM
  2330. values[i].ui[0] = erts_total_code_size;
  2331. #else
  2332. values[i].ui[0] = 0;
  2333. #endif
  2334. i++;
  2335. length = i;
  2336. ASSERT(length <= MAX_AA_VALUES);
  2337. if (print_to_p) {
  2338. /* Print result... */
  2339. fmtfn_t to = *print_to_p;
  2340. void *arg = print_to_arg;
  2341. erts_print(to, arg, "=allocated_areas\n");
  2342. for (i = 0; i < length; i++) {
  2343. switch (values[i].arity) {
  2344. case 2:
  2345. erts_print(to, arg, "%s: %beu\n",
  2346. values[i].name, values[i].ui[0]);
  2347. break;
  2348. case 3:
  2349. erts_print(to, arg, "%s: %beu %beu\n",
  2350. values[i].name, values[i].ui[0], values[i].ui[1]);
  2351. break;
  2352. default:
  2353. erts_print(to, arg, "ERROR: internal_error\n");
  2354. ASSERT(0);
  2355. return am_internal_error;
  2356. }
  2357. }
  2358. }
  2359. if (proc) {
  2360. /* Build erlang term result... */
  2361. Eterm tuples[MAX_AA_VALUES];
  2362. Uint *hp;
  2363. Uint **hpp;
  2364. Uint hsz;
  2365. Uint *hszp;
  2366. erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2367. hpp = NULL;
  2368. hsz = 0;
  2369. hszp = &hsz;
  2370. while (1) {
  2371. int i;
  2372. for (i = 0; i < length; i++) {
  2373. Eterm atom;
  2374. if (hpp)
  2375. atom = am_atom_put(values[i].name,
  2376. (int) sys_strlen(values[i].name));
  2377. else
  2378. atom = am_true;
  2379. switch (values[i].arity) {
  2380. case 2:
  2381. tuples[i] = erts_bld_tuple(hpp, hszp, 2,
  2382. atom,
  2383. erts_bld_uint(hpp, hszp,
  2384. values[i].ui[0]));
  2385. break;
  2386. case 3:
  2387. tuples[i] = erts_bld_tuple(hpp, hszp, 3,
  2388. atom,
  2389. erts_bld_uint(hpp, hszp,
  2390. values[i].ui[0]),
  2391. erts_bld_uint(hpp, hszp,
  2392. values[i].ui[1]));
  2393. break;
  2394. default:
  2395. ASSERT(0);
  2396. return am_internal_error;
  2397. }
  2398. }
  2399. res = erts_bld_list(hpp, hszp, length, tuples);
  2400. if (hpp)
  2401. break;
  2402. hp = HAlloc((Process *) proc, hsz);
  2403. hpp = &hp;
  2404. hszp = NULL;
  2405. }
  2406. }
  2407. return res;
  2408. #undef MAX_AA_VALUES
  2409. }
  2410. Eterm
  2411. erts_alloc_util_allocators(void *proc)
  2412. {
  2413. Eterm res;
  2414. Uint *hp;
  2415. Uint sz;
  2416. int i;
  2417. /*
  2418. * Currently all allocators except sys_alloc are
  2419. * alloc_util allocators.
  2420. * Also hide test_alloc which is disabled by default
  2421. * and only intended for our own testing.
  2422. */
  2423. sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
  2424. ASSERT(sz > 0);
  2425. hp = HAlloc((Process *) proc, sz);
  2426. res = NIL;
  2427. for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
  2428. switch (i) {
  2429. case ERTS_ALC_A_SYSTEM:
  2430. case ERTS_ALC_A_TEST:
  2431. break;
  2432. default: {
  2433. char *alc_str = (char *) ERTS_ALC_A2AD(i);
  2434. Eterm alc = am_atom_put(alc_str, sys_strlen(alc_str));
  2435. res = CONS(hp, alc, res);
  2436. hp += 2;
  2437. break;
  2438. }
  2439. }
  2440. }
  2441. return res;
  2442. }
  2443. void
  2444. erts_allocator_info(fmtfn_t to, void *arg)
  2445. {
  2446. ErtsAlcType_t a;
  2447. ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
  2448. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2449. int ai;
  2450. for (ai = 0; ai == 0 || ai < erts_allctrs_info[a].thr_spec; ai++) {
  2451. if (erts_allctrs_info[a].thr_spec) {
  2452. if (!erts_allctr_thr_spec[a].allctr[ai])
  2453. continue;
  2454. erts_print(to, arg, "=allocator:%s[%d]\n",
  2455. ERTS_ALC_A2AD(a), ai);
  2456. }
  2457. else {
  2458. erts_print(to, arg, "=allocator:%s\n", ERTS_ALC_A2AD(a));
  2459. }
  2460. if (!erts_allctrs_info[a].enabled)
  2461. erts_print(to, arg, "option e: false\n");
  2462. else {
  2463. if (erts_allctrs_info[a].alloc_util) {
  2464. void *as;
  2465. if (!erts_allctrs_info[a].thr_spec)
  2466. as = erts_allctrs_info[a].extra;
  2467. else {
  2468. ASSERT(erts_allctr_thr_spec[a].enabled);
  2469. as = erts_allctr_thr_spec[a].allctr[ai];
  2470. }
  2471. /* Binary alloc has its own thread safety... */
  2472. erts_alcu_info(as, 0, 0, &to, arg, NULL, NULL);
  2473. }
  2474. else {
  2475. switch (a) {
  2476. case ERTS_ALC_A_SYSTEM: {
  2477. SysAllocStat sas;
  2478. erts_print(to, arg, "option e: true\n");
  2479. erts_print(to, arg, "option m: libc\n");
  2480. sys_alloc_stat(&sas);
  2481. if(sas.trim_threshold >= 0)
  2482. erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
  2483. if(sas.top_pad >= 0)
  2484. erts_print(to, arg, "option tp: %d\n", sas.top_pad);
  2485. break;
  2486. }
  2487. default:
  2488. ASSERT(0);
  2489. break;
  2490. }
  2491. }
  2492. }
  2493. }
  2494. }
  2495. #if HAVE_ERTS_MSEG
  2496. {
  2497. struct erts_mmap_info_struct emis;
  2498. int max = (int) erts_no_schedulers;
  2499. int i;
  2500. for (i = 0; i <= max; i++) {
  2501. erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
  2502. erts_mseg_info(i, &to, arg, 0, 0, NULL, NULL);
  2503. }
  2504. erts_print(to, arg, "=allocator:erts_mmap.default_mmap\n");
  2505. erts_mmap_info(&erts_dflt_mmapper, &to, arg, NULL, NULL, &emis);
  2506. #if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2507. erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
  2508. erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
  2509. #endif
  2510. }
  2511. #endif
  2512. erts_print(to, arg, "=allocator:alloc_util\n");
  2513. erts_alcu_au_info_options(&to, arg, NULL, NULL);
  2514. erts_print(to, arg, "=allocator:instr\n");
  2515. erts_print(to, arg, "option t: %s\n",
  2516. erts_mtrace_enabled ? "true" : "false");
  2517. }
  2518. Eterm
  2519. erts_allocator_options(void *proc)
  2520. {
  2521. #if HAVE_ERTS_MSEG
  2522. int use_mseg = 0;
  2523. #endif
  2524. Uint sz, *szp, *hp, **hpp;
  2525. Eterm res, features, settings;
  2526. Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
  2527. Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
  2528. int a, length;
  2529. SysAllocStat sas;
  2530. Uint *endp = NULL;
  2531. sys_alloc_stat(&sas);
  2532. /* First find out the heap size needed ... */
  2533. hpp = NULL;
  2534. szp = &sz;
  2535. sz = 0;
  2536. bld_term:
  2537. length = 0;
  2538. features = NIL;
  2539. settings = NIL;
  2540. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2541. Eterm tmp = NIL;
  2542. atoms[length] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2543. sys_strlen(ERTS_ALC_A2AD(a)));
  2544. if (erts_allctrs_info[a].enabled) {
  2545. if (erts_allctrs_info[a].alloc_util) {
  2546. Allctr_t *allctr;
  2547. #if HAVE_ERTS_MSEG
  2548. use_mseg++;
  2549. #endif
  2550. if (erts_allctr_thr_spec[a].enabled)
  2551. allctr = erts_allctr_thr_spec[a].allctr[0];
  2552. else
  2553. allctr = erts_allctrs_info[a].extra;
  2554. tmp = erts_alcu_info_options(allctr, NULL, NULL, hpp, szp);
  2555. }
  2556. else {
  2557. int l = 0;
  2558. Eterm as[4];
  2559. Eterm ts[4];
  2560. as[l] = ERTS_MAKE_AM("e");
  2561. ts[l++] = am_true;
  2562. switch (a) {
  2563. case ERTS_ALC_A_SYSTEM:
  2564. as[l] = ERTS_MAKE_AM("m");
  2565. ts[l++] = ERTS_MAKE_AM("libc");
  2566. if(sas.trim_threshold >= 0) {
  2567. as[l] = ERTS_MAKE_AM("tt");
  2568. ts[l++] = erts_bld_uint(hpp, szp,
  2569. (Uint) sas.trim_threshold);
  2570. }
  2571. if(sas.top_pad >= 0) {
  2572. as[l] = ERTS_MAKE_AM("tp");
  2573. ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
  2574. }
  2575. break;
  2576. default:
  2577. break;
  2578. }
  2579. tmp = erts_bld_2tup_list(hpp, szp, l, as, ts);
  2580. }
  2581. }
  2582. else {
  2583. Eterm atom = ERTS_MAKE_AM("e");
  2584. Eterm term = am_false;
  2585. tmp = erts_bld_2tup_list(hpp, szp, 1, &atom, &term);
  2586. }
  2587. terms[length++] = tmp;
  2588. }
  2589. #if HAVE_ERTS_MSEG
  2590. if (use_mseg) {
  2591. atoms[length] = ERTS_MAKE_AM("mseg_alloc");
  2592. terms[length++] = erts_mseg_info_options(0, NULL, NULL, hpp, szp);
  2593. }
  2594. #endif
  2595. atoms[length] = ERTS_MAKE_AM("alloc_util");
  2596. terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
  2597. #if HAVE_ERTS_MMAP
  2598. atoms[length] = ERTS_MAKE_AM("erts_mmap");
  2599. terms[length++] = erts_mmap_info_options(&erts_dflt_mmapper, NULL, NULL,
  2600. NULL, hpp, szp);
  2601. #endif
  2602. {
  2603. Eterm o[1], v[1];
  2604. o[0] = ERTS_MAKE_AM("t");
  2605. v[0] = erts_mtrace_enabled ? am_true : am_false;
  2606. atoms[length] = ERTS_MAKE_AM("instr");
  2607. terms[length++] = erts_bld_2tup_list(hpp, szp, 1, o, v);
  2608. }
  2609. atoms[length] = ERTS_MAKE_AM("lock_physical_memory");
  2610. terms[length++] = (lock_all_physical_memory ? am_all : am_no);
  2611. settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
  2612. length = 0;
  2613. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2614. if (erts_allctrs_info[a].enabled) {
  2615. terms[length++] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2616. sys_strlen(ERTS_ALC_A2AD(a)));
  2617. }
  2618. }
  2619. #if HAVE_ERTS_MSEG
  2620. if (use_mseg)
  2621. terms[length++] = ERTS_MAKE_AM("mseg_alloc");
  2622. #endif
  2623. #if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
  2624. terms[length++] = ERTS_MAKE_AM("sys_aligned_alloc");
  2625. #endif
  2626. #if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2627. terms[length++] = ERTS_MAKE_AM("literal_mmap");
  2628. #endif
  2629. features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
  2630. #if defined(__GLIBC__)
  2631. {
  2632. Eterm AM_glibc = ERTS_MAKE_AM("glibc");
  2633. Eterm version;
  2634. version = erts_bld_cons(hpp,
  2635. szp,
  2636. make_small(__GLIBC__),
  2637. #ifdef __GLIBC_MINOR__
  2638. erts_bld_cons(hpp,
  2639. szp,
  2640. make_small(__GLIBC_MINOR__),
  2641. NIL)
  2642. #else
  2643. NIL
  2644. #endif
  2645. );
  2646. res = erts_bld_tuple(hpp, szp, 4,
  2647. AM_glibc, version, features, settings);
  2648. }
  2649. #else /* unknown allocator */
  2650. res = erts_bld_tuple(hpp, szp, 4,
  2651. am_undefined, NIL, features, settings);
  2652. #endif
  2653. if (szp) {
  2654. /* ... and then build the term */
  2655. hp = HAlloc((Process *) proc, sz);
  2656. endp = hp + sz;
  2657. hpp = &hp;
  2658. szp = NULL;
  2659. goto bld_term;
  2660. }
  2661. ASSERT(endp >= hp);
  2662. HRelease((Process *) proc, endp, hp);
  2663. return res;
  2664. }
  2665. void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size)
  2666. {
  2667. UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1)
  2668. #ifdef VALGRIND
  2669. + sizeof(UWord)
  2670. #endif
  2671. );
  2672. #ifdef VALGRIND
  2673. { /* Link them to avoid Leak_PossiblyLost */
  2674. static UWord* first_in_list = NULL;
  2675. *(UWord**)v = first_in_list;
  2676. first_in_list = (UWord*) v;
  2677. v += sizeof(UWord);
  2678. }
  2679. #endif
  2680. if (v & ERTS_CACHE_LINE_MASK) {
  2681. v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
  2682. }
  2683. ASSERT((v & ERTS_CACHE_LINE_MASK) == 0);
  2684. return (void*)v;
  2685. }
  2686. static void
  2687. reply_alloc_info(void *vair)
  2688. {
  2689. ErtsAllocInfoReq *air = (ErtsAllocInfoReq *) vair;
  2690. Uint sched_id = erts_get_scheduler_id();
  2691. int global_instances = air->req_sched == sched_id;
  2692. ErtsProcLocks rp_locks;
  2693. Process *rp = air->proc;
  2694. Eterm ref_copy = NIL, ai_list, msg = NIL;
  2695. Eterm *hp = NULL, *hp_start = NULL, *hp_end = NULL;
  2696. Eterm **hpp;
  2697. Uint sz, *szp;
  2698. ErlOffHeap *ohp = NULL;
  2699. ErtsMessage *mp = NULL;
  2700. #if HAVE_ERTS_MMAP
  2701. struct erts_mmap_info_struct mmap_info_dflt;
  2702. # if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2703. struct erts_mmap_info_struct mmap_info_literal;
  2704. # endif
  2705. #endif
  2706. int i;
  2707. Eterm (*info_func)(Allctr_t *,
  2708. int,
  2709. int,
  2710. fmtfn_t *,
  2711. void *,
  2712. Uint **,
  2713. Uint *) = (air->only_sz
  2714. ? erts_alcu_sz_info
  2715. : erts_alcu_info);
  2716. rp_locks = air->req_sched == sched_id ? ERTS_PROC_LOCK_MAIN : 0;
  2717. sz = 0;
  2718. hpp = NULL;
  2719. szp = &sz;
  2720. while (1) {
  2721. if (hpp)
  2722. ref_copy = erts_iref_storage_make_ref(&air->iref,
  2723. hpp, ohp, 0);
  2724. else
  2725. *szp += erts_iref_storage_heap_size(&air->iref);
  2726. ai_list = NIL;
  2727. for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
  2728. for (i--; i >= 0; i--) {
  2729. int ai = air->allocs[i];
  2730. Allctr_t *allctr;
  2731. Eterm ainfo;
  2732. Eterm alloc_atom;
  2733. if (global_instances) {
  2734. switch (ai) {
  2735. case ERTS_ALC_A_SYSTEM: {
  2736. alloc_atom = erts_bld_atom(hpp, szp, "sys_alloc");
  2737. ainfo = NIL;
  2738. if (!air->only_sz) {
  2739. SysAllocStat sas;
  2740. if (hpp)
  2741. sys_alloc_stat(&sas);
  2742. if (szp) {
  2743. /* ensure ehough heap */
  2744. sas.top_pad = INT_MAX;
  2745. sas.trim_threshold = INT_MAX;
  2746. }
  2747. if (sas.top_pad >= 0) {
  2748. ainfo = erts_bld_cons(
  2749. hpp, szp,
  2750. erts_bld_tuple(
  2751. hpp, szp, 2,
  2752. erts_bld_atom(hpp, szp, "tp"),
  2753. erts_bld_uint(
  2754. hpp, szp,
  2755. (Uint) sas.top_pad)),
  2756. ainfo);
  2757. }
  2758. if (sas.trim_threshold >= 0) {
  2759. ainfo = erts_bld_cons(
  2760. hpp, szp,
  2761. erts_bld_tuple(
  2762. hpp, szp, 2,
  2763. erts_bld_atom(hpp, szp, "tt"),
  2764. erts_bld_uint(
  2765. hpp, szp,
  2766. (Uint) sas.trim_threshold)),
  2767. ainfo);
  2768. }
  2769. ainfo = erts_bld_cons(hpp, szp,
  2770. erts_bld_tuple(
  2771. hpp, szp, 2,
  2772. erts_bld_atom(hpp, szp,
  2773. "m"),
  2774. erts_bld_atom(hpp, szp,
  2775. "libc")),
  2776. ainfo);
  2777. ainfo = erts_bld_cons(hpp, szp,
  2778. erts_bld_tuple(
  2779. hpp, szp, 2,
  2780. erts_bld_atom(hpp, szp,
  2781. "e"),
  2782. am_true),
  2783. ainfo);
  2784. ainfo = erts_bld_tuple(hpp, szp, 2,
  2785. erts_bld_atom(hpp, szp,
  2786. "options"),
  2787. ainfo);
  2788. ainfo = erts_bld_cons(hpp, szp,ainfo,NIL);
  2789. }
  2790. ainfo = erts_bld_tuple(hpp, szp, 3,
  2791. alloc_atom,
  2792. make_small(0),
  2793. ainfo);
  2794. break;
  2795. }
  2796. case ERTS_ALC_INFO_A_ALLOC_UTIL:
  2797. alloc_atom = erts_bld_atom(hpp, szp, "alloc_util");
  2798. ainfo = (air->only_sz
  2799. ? NIL
  2800. : erts_alcu_au_info_options(NULL, NULL,
  2801. hpp, szp));
  2802. ainfo = erts_bld_tuple(hpp, szp, 3,
  2803. alloc_atom,
  2804. make_small(0),
  2805. ainfo);
  2806. break;
  2807. case ERTS_ALC_INFO_A_ERTS_MMAP:
  2808. alloc_atom = erts_bld_atom(hpp, szp, "erts_mmap");
  2809. #if HAVE_ERTS_MMAP
  2810. ainfo = (air->only_sz ? NIL :
  2811. erts_mmap_info(&erts_dflt_mmapper, NULL, NULL,
  2812. hpp, szp, &mmap_info_dflt));
  2813. ainfo = erts_bld_tuple3(hpp, szp,
  2814. alloc_atom,
  2815. erts_bld_atom(hpp,szp,"default_mmap"),
  2816. ainfo);
  2817. # if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2818. ai_list = erts_bld_cons(hpp, szp,
  2819. ainfo, ai_list);
  2820. ainfo = (air->only_sz ? NIL :
  2821. erts_mmap_info(&erts_literal_mmapper, NULL, NULL,
  2822. hpp, szp, &mmap_info_literal));
  2823. ainfo = erts_bld_tuple3(hpp, szp,
  2824. alloc_atom,
  2825. erts_bld_atom(hpp,szp,"literal_mmap"),
  2826. ainfo);
  2827. # endif
  2828. #else /* !HAVE_ERTS_MMAP */
  2829. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
  2830. am_false);
  2831. #endif
  2832. break;
  2833. case ERTS_ALC_INFO_A_MSEG_ALLOC:
  2834. alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
  2835. #if HAVE_ERTS_MSEG
  2836. ainfo = erts_mseg_info(0, NULL, NULL, hpp != NULL,
  2837. air->only_sz, hpp, szp);
  2838. ainfo = erts_bld_tuple3(hpp, szp,
  2839. alloc_atom,
  2840. make_small(0),
  2841. ainfo);
  2842. #else
  2843. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
  2844. am_false);
  2845. #endif
  2846. break;
  2847. default:
  2848. alloc_atom = erts_bld_atom(hpp, szp,
  2849. (char *) ERTS_ALC_A2AD(ai));
  2850. if (!erts_allctrs_info[ai].enabled)
  2851. ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
  2852. am_false);
  2853. else if (erts_allctrs_info[ai].alloc_util) {
  2854. if (erts_allctrs_info[ai].thr_spec)
  2855. allctr = erts_allctr_thr_spec[ai].allctr[0];
  2856. else
  2857. allctr = erts_allctrs_info[ai].extra;
  2858. ainfo = info_func(allctr, air->internal, hpp != NULL,
  2859. NULL, NULL, hpp, szp);
  2860. ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom,
  2861. make_small(0), ainfo);
  2862. }
  2863. else {
  2864. erts_exit(ERTS_ABORT_EXIT, "%s:%d: internal error\n",
  2865. __FILE__, __LINE__);
  2866. }
  2867. }
  2868. ai_list = erts_bld_cons(hpp, szp,
  2869. ainfo, ai_list);
  2870. }
  2871. switch (ai) {
  2872. case ERTS_ALC_A_SYSTEM:
  2873. case ERTS_ALC_INFO_A_ALLOC_UTIL:
  2874. case ERTS_ALC_INFO_A_ERTS_MMAP:
  2875. break;
  2876. case ERTS_ALC_INFO_A_MSEG_ALLOC:
  2877. #if HAVE_ERTS_MSEG
  2878. alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
  2879. ainfo = erts_mseg_info(sched_id, NULL, NULL,
  2880. hpp != NULL, air->only_sz, hpp, szp);
  2881. ainfo = erts_bld_tuple(hpp, szp, 3,
  2882. alloc_atom,
  2883. make_small(sched_id),
  2884. ainfo);
  2885. ai_list = erts_bld_cons(hpp, szp, ainfo, ai_list);
  2886. #endif
  2887. break;
  2888. default:
  2889. if (erts_allctrs_info[ai].thr_spec) {
  2890. alloc_atom = erts_bld_atom(hpp, szp,
  2891. (char *) ERTS_ALC_A2AD(ai));
  2892. allctr = erts_allctr_thr_spec[ai].allctr[sched_id];
  2893. ainfo = info_func(allctr, air->internal, hpp != NULL, NULL,
  2894. NULL, hpp, szp);
  2895. ai_list = erts_bld_cons(hpp, szp,
  2896. erts_bld_tuple(
  2897. hpp, szp,
  2898. 3,
  2899. alloc_atom,
  2900. make_small(sched_id),
  2901. ainfo),
  2902. ai_list);
  2903. }
  2904. break;
  2905. }
  2906. msg = erts_bld_tuple(hpp, szp,
  2907. 3,
  2908. ref_copy,
  2909. make_small(sched_id),
  2910. ai_list);
  2911. }
  2912. if (hpp)
  2913. break;
  2914. mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
  2915. hp_start = hp;
  2916. hp_end = hp + sz;
  2917. szp = NULL;
  2918. hpp = &hp;
  2919. }
  2920. if (hp != hp_end)
  2921. erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1);
  2922. erts_queue_message(rp, rp_locks, mp, msg, am_system);
  2923. if (air->req_sched == sched_id)
  2924. rp_locks &= ~ERTS_PROC_LOCK_MAIN;
  2925. erts_proc_unlock(rp, rp_locks);
  2926. erts_proc_dec_refc(rp);
  2927. if (erts_atomic32_dec_read_nob(&air->refc) == 0) {
  2928. erts_iref_storage_clean(&air->iref);
  2929. aireq_free(air);
  2930. }
  2931. }
  2932. int
  2933. erts_request_alloc_info(struct process *c_p,
  2934. Eterm ref,
  2935. Eterm allocs,
  2936. int only_sz,
  2937. int internal)
  2938. {
  2939. ErtsAllocInfoReq *air = aireq_alloc();
  2940. Eterm req_ai[ERTS_ALC_INFO_A_END] = {0};
  2941. Eterm alist;
  2942. int airix = 0, ai;
  2943. air->req_sched = erts_get_scheduler_id();
  2944. air->only_sz = only_sz;
  2945. air->internal = internal;
  2946. air->proc = c_p;
  2947. if (is_not_internal_ref(ref))
  2948. return 0;
  2949. erts_iref_storage_save(&air->iref, ref);
  2950. if (is_not_list(allocs))
  2951. return 0;
  2952. alist = allocs;
  2953. while (is_list(alist)) {
  2954. int saved = 0;
  2955. Eterm* consp = list_val(alist);
  2956. Eterm alloc = CAR(consp);
  2957. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
  2958. if (erts_is_atom_str(erts_alc_a2ad[ai], alloc, 0))
  2959. goto save_alloc;
  2960. if (erts_is_atom_str("mseg_alloc", alloc, 0)) {
  2961. ai = ERTS_ALC_INFO_A_MSEG_ALLOC;
  2962. goto save_alloc;
  2963. }
  2964. if (erts_is_atom_str("erts_mmap", alloc, 0)) {
  2965. ai = ERTS_ALC_INFO_A_ERTS_MMAP;
  2966. goto save_alloc;
  2967. }
  2968. if (erts_is_atom_str("alloc_util", alloc, 0)) {
  2969. ai = ERTS_ALC_INFO_A_ALLOC_UTIL;
  2970. save_alloc:
  2971. if (req_ai[ai])
  2972. return 0;
  2973. air->allocs[airix++] = ai;
  2974. req_ai[ai] = 1;
  2975. saved = 1;
  2976. }
  2977. if (!saved)
  2978. return 0;
  2979. alist = CDR(consp);
  2980. }
  2981. if (is_not_nil(alist))
  2982. return 0;
  2983. air->allocs[airix] = ERTS_ALC_A_INVALID;
  2984. erts_atomic32_init_nob(&air->refc,
  2985. (erts_aint32_t) erts_no_schedulers);
  2986. erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
  2987. if (erts_no_schedulers > 1)
  2988. erts_schedule_multi_misc_aux_work(1,
  2989. erts_no_schedulers,
  2990. reply_alloc_info,
  2991. (void *) air);
  2992. reply_alloc_info((void *) air);
  2993. return 1;
  2994. }
  2995. Eterm erts_alloc_set_dyn_param(Process* c_p, Eterm tuple)
  2996. {
  2997. ErtsAllocatorThrSpec_t *tspec;
  2998. ErtsAlcType_t ai;
  2999. Allctr_t* allctr;
  3000. Eterm* tp;
  3001. Eterm res;
  3002. if (!is_tuple_arity(tuple, 3))
  3003. goto badarg;
  3004. tp = tuple_val(tuple);
  3005. /*
  3006. * Ex: {ets_alloc, sbct, 256000}
  3007. */
  3008. if (!is_atom(tp[1]) || !is_atom(tp[2]) || !is_integer(tp[3]))
  3009. goto badarg;
  3010. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
  3011. if (erts_is_atom_str(erts_alc_a2ad[ai], tp[1], 0))
  3012. break;
  3013. if (ai > ERTS_ALC_A_MAX)
  3014. goto badarg;
  3015. if (!erts_allctrs_info[ai].enabled ||
  3016. !erts_allctrs_info[ai].alloc_util) {
  3017. return am_notsup;
  3018. }
  3019. if (tp[2] == am_sbct) {
  3020. Uint sbct;
  3021. int i, ok;
  3022. if (!term_to_Uint(tp[3], &sbct))
  3023. goto badarg;
  3024. tspec = &erts_allctr_thr_spec[ai];
  3025. if (tspec->enabled) {
  3026. ok = 0;
  3027. for (i = 0; i < tspec->size; i++) {
  3028. allctr = tspec->allctr[i];
  3029. ok |= allctr->try_set_dyn_param(allctr, am_sbct, sbct);
  3030. }
  3031. }
  3032. else {
  3033. allctr = erts_allctrs_info[ai].extra;
  3034. ok = allctr->try_set_dyn_param(allctr, am_sbct, sbct);
  3035. }
  3036. return ok ? am_ok : am_notsup;
  3037. }
  3038. return am_notsup;
  3039. badarg:
  3040. ERTS_BIF_PREP_ERROR(res, c_p, EXC_BADARG);
  3041. return res;
  3042. }
  3043. /*
  3044. * The allocator wrapper prelocking stuff below is about the locking order.
  3045. * It only affects wrappers (erl_mtrace.c) that keep locks during
  3046. * alloc/realloc/free.
  3047. *
  3048. * Some query functions in erl_alloc_util.c lock the allocator mutex and then
  3049. * use erts_printf that in turn may call the sys allocator through the wrappers.
  3050. * To avoid breaking locking order these query functions first "pre-locks" all
  3051. * allocator wrappers.
  3052. */
  3053. ErtsAllocatorWrapper_t *erts_allctr_wrappers;
  3054. int erts_allctr_wrapper_prelocked = 0;
  3055. erts_tsd_key_t erts_allctr_prelock_tsd_key;
  3056. void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper)
  3057. {
  3058. ASSERT(wrapper->lock && wrapper->unlock);
  3059. wrapper->next = erts_allctr_wrappers;
  3060. erts_allctr_wrappers = wrapper;
  3061. }
  3062. void erts_allctr_wrapper_pre_lock(void)
  3063. {
  3064. if (erts_allctr_wrappers) {
  3065. ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
  3066. for ( ; wrapper; wrapper = wrapper->next) {
  3067. wrapper->lock();
  3068. }
  3069. ASSERT(!erts_allctr_wrapper_prelocked);
  3070. erts_allctr_wrapper_prelocked = 1;
  3071. erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)1);
  3072. }
  3073. }
  3074. void erts_allctr_wrapper_pre_unlock(void)
  3075. {
  3076. if (erts_allctr_wrappers) {
  3077. ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
  3078. erts_allctr_wrapper_prelocked = 0;
  3079. erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)0);
  3080. for ( ; wrapper; wrapper = wrapper->next) {
  3081. wrapper->unlock();
  3082. }
  3083. }
  3084. }
  3085. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3086. * NOTE: erts_alc_test() is only supposed to be used for testing. *
  3087. * *
  3088. * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
  3089. * to erts_alc_test() *
  3090. \* */
  3091. #define ERTS_ALC_TEST_ABORT erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
  3092. UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
  3093. {
  3094. switch (op >> 8) {
  3095. case 0x0: return erts_alcu_test(op, a1, a2);
  3096. case 0x1: return erts_gfalc_test(op, a1, a2);
  3097. case 0x2: return erts_bfalc_test(op, a1, a2);
  3098. case 0x3: return erts_afalc_test(op, a1, a2);
  3099. case 0x4: return erts_mseg_test(op, a1, a2, a3);
  3100. case 0x5: return erts_aoffalc_test(op, a1, a2);
  3101. case 0xf:
  3102. switch (op) {
  3103. case 0xf00:
  3104. if (((Allctr_t *) a1)->thread_safe)
  3105. return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_TEST,
  3106. (void *) a1,
  3107. (Uint) a2);
  3108. else
  3109. return (UWord) erts_alcu_alloc(ERTS_ALC_T_TEST,
  3110. (void *) a1,
  3111. (Uint) a2);
  3112. case 0xf01:
  3113. if (((Allctr_t *) a1)->thread_safe)
  3114. return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_TEST,
  3115. (void *) a1,
  3116. (void *) a2,
  3117. (Uint) a3);
  3118. else
  3119. return (UWord) erts_alcu_realloc(ERTS_ALC_T_TEST,
  3120. (void *) a1,
  3121. (void *) a2,
  3122. (Uint) a3);
  3123. case 0xf02:
  3124. if (((Allctr_t *) a1)->thread_safe)
  3125. erts_alcu_free_ts(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
  3126. else
  3127. erts_alcu_free(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
  3128. return 0;
  3129. case 0xf03: {
  3130. Allctr_t *allctr;
  3131. struct au_init init;
  3132. SET_DEFAULT_ALLOC_OPTS(&init);
  3133. init.enable = 1;
  3134. init.astrat = ERTS_ALC_S_GOODFIT;
  3135. init.init.util.name_prefix = (char *) a1;
  3136. init.init.util.alloc_no = ERTS_ALC_A_TEST;
  3137. init.init.util.alloc_strat = init.astrat;
  3138. init.init.util.ts = 1;
  3139. if ((char **) a3) {
  3140. char **argv = (char **) a3;
  3141. int i = 0;
  3142. while (argv[i]) {
  3143. if (argv[i][0] == '-' && argv[i][1] == 't')
  3144. handle_au_arg(&init, &argv[i][2], argv, &i, 0);
  3145. else
  3146. return (UWord) NULL;
  3147. i++;
  3148. }
  3149. }
  3150. switch (init.astrat) {
  3151. case ERTS_ALC_S_GOODFIT:
  3152. allctr = erts_gfalc_start((GFAllctr_t *)
  3153. erts_alloc(ERTS_ALC_T_TEST,
  3154. sizeof(GFAllctr_t)),
  3155. &init.init.gf,
  3156. &init.init.util);
  3157. break;
  3158. case ERTS_ALC_S_BESTFIT:
  3159. allctr = erts_bfalc_start((BFAllctr_t *)
  3160. erts_alloc(ERTS_ALC_T_TEST,
  3161. sizeof(BFAllctr_t)),
  3162. &init.init.bf,
  3163. &init.init.util);
  3164. break;
  3165. case ERTS_ALC_S_AFIT:
  3166. allctr = erts_afalc_start((AFAllctr_t *)
  3167. erts_alloc(ERTS_ALC_T_TEST,
  3168. sizeof(AFAllctr_t)),
  3169. &init.init.af,
  3170. &init.init.util);
  3171. break;
  3172. case ERTS_ALC_S_FIRSTFIT:
  3173. allctr = erts_aoffalc_start((AOFFAllctr_t *)
  3174. erts_alloc(ERTS_ALC_T_TEST,
  3175. sizeof(AOFFAllctr_t)),
  3176. &init.init.aoff,
  3177. &init.init.util);
  3178. break;
  3179. default:
  3180. ASSERT(0);
  3181. allctr = NULL;
  3182. break;
  3183. }
  3184. return (UWord) allctr;
  3185. }
  3186. case 0xf04:
  3187. erts_alcu_stop((Allctr_t *) a1);
  3188. erts_free(ERTS_ALC_T_TEST, (void *) a1);
  3189. break;
  3190. case 0xf05: return (UWord) 1;
  3191. case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
  3192. #ifdef ETHR_NO_FORKSAFETY
  3193. case 0xf07: return (UWord) 0;
  3194. #else
  3195. case 0xf07: return (UWord) ((Allctr_t *) a1)->thread_safe;
  3196. #endif
  3197. case 0xf08: {
  3198. ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_mutex));
  3199. if (ethr_mutex_init(mtx) != 0)
  3200. ERTS_ALC_TEST_ABORT;
  3201. return (UWord) mtx;
  3202. }
  3203. case 0xf09: {
  3204. ethr_mutex *mtx = (ethr_mutex *) a1;
  3205. if (ethr_mutex_destroy(mtx) != 0)
  3206. ERTS_ALC_TEST_ABORT;
  3207. erts_free(ERTS_ALC_T_TEST, (void *) mtx);
  3208. break;
  3209. }
  3210. case 0xf0a:
  3211. ethr_mutex_lock((ethr_mutex *) a1);
  3212. break;
  3213. case 0xf0b:
  3214. ethr_mutex_unlock((ethr_mutex *) a1);
  3215. break;
  3216. case 0xf0c: {
  3217. ethr_cond *cnd = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_cond));
  3218. if (ethr_cond_init(cnd) != 0)
  3219. ERTS_ALC_TEST_ABORT;
  3220. return (UWord) cnd;
  3221. }
  3222. case 0xf0d: {
  3223. ethr_cond *cnd = (ethr_cond *) a1;
  3224. if (ethr_cond_destroy(cnd) != 0)
  3225. ERTS_ALC_TEST_ABORT;
  3226. erts_free(ERTS_ALC_T_TEST, (void *) cnd);
  3227. break;
  3228. }
  3229. case 0xf0e:
  3230. ethr_cond_broadcast((ethr_cond *) a1);
  3231. break;
  3232. case 0xf0f: {
  3233. int res;
  3234. do {
  3235. res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
  3236. } while (res == EINTR);
  3237. break;
  3238. }
  3239. case 0xf10: {
  3240. ethr_tid *tid = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_tid));
  3241. if (ethr_thr_create(tid,
  3242. (void * (*)(void *)) a1,
  3243. (void *) a2,
  3244. NULL) != 0)
  3245. ERTS_ALC_TEST_ABORT;
  3246. return (UWord) tid;
  3247. }
  3248. case 0xf11: {
  3249. ethr_tid *tid = (ethr_tid *) a1;
  3250. if (ethr_thr_join(*tid, NULL) != 0)
  3251. ERTS_ALC_TEST_ABORT;
  3252. erts_free(ERTS_ALC_T_TEST, (void *) tid);
  3253. break;
  3254. }
  3255. case 0xf12:
  3256. ethr_thr_exit((void *) a1);
  3257. ERTS_ALC_TEST_ABORT;
  3258. break;
  3259. case 0xf13: return (UWord) 1;
  3260. case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1);
  3261. case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0;
  3262. case 0xf16: return (UWord) erts_realloc(ERTS_ALC_T_TEST, (void*)a1, (Uint)a2);
  3263. case 0xf17: {
  3264. Uint extra_hdr_sz = UNIT_CEILING((Uint)a1);
  3265. ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
  3266. Uint offset = ts->allctr[0]->mbc_header_size;
  3267. void* orig_creating_mbc = ts->allctr[0]->creating_mbc;
  3268. void* orig_destroying_mbc = ts->allctr[0]->destroying_mbc;
  3269. void* new_creating_mbc = *(void**)a2; /* inout arg */
  3270. void* new_destroying_mbc = *(void**)a3; /* inout arg */
  3271. int i;
  3272. for (i=0; i < ts->size; i++) {
  3273. Allctr_t* ap = ts->allctr[i];
  3274. if (ap->mbc_header_size != offset
  3275. || ap->creating_mbc != orig_creating_mbc
  3276. || ap->destroying_mbc != orig_destroying_mbc
  3277. || ap->mbc_list.first != NULL)
  3278. return -1;
  3279. }
  3280. for (i=0; i < ts->size; i++) {
  3281. ts->allctr[i]->mbc_header_size += extra_hdr_sz;
  3282. ts->allctr[i]->creating_mbc = new_creating_mbc;
  3283. ts->allctr[i]->destroying_mbc = new_destroying_mbc;
  3284. }
  3285. *(void**)a2 = orig_creating_mbc;
  3286. *(void**)a3 = orig_destroying_mbc;
  3287. return offset;
  3288. }
  3289. case 0xf18: {
  3290. ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
  3291. return ts->allctr[0]->largest_mbc_size;
  3292. }
  3293. default:
  3294. break;
  3295. }
  3296. return (UWord) 0;
  3297. default:
  3298. break;
  3299. }
  3300. ASSERT(0);
  3301. return ~((UWord) 0);
  3302. }
  3303. #ifdef DEBUG
  3304. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3305. * Debug stuff *
  3306. \* */
  3307. #if 0
  3308. #define PRINT_OPS
  3309. #else
  3310. #undef PRINT_OPS
  3311. #endif
  3312. #ifdef HARD_DEBUG
  3313. #define FENCE_SZ (4*sizeof(UWord))
  3314. #else
  3315. #define FENCE_SZ (3*sizeof(UWord))
  3316. #endif
  3317. #if defined(ARCH_64)
  3318. #define FENCE_PATTERN 0xABCDEF97ABCDEF97
  3319. #else
  3320. #define FENCE_PATTERN 0xABCDEF97
  3321. #endif
  3322. #define TYPE_PATTERN_MASK ERTS_ALC_N_MASK
  3323. #define TYPE_PATTERN_SHIFT 16
  3324. #define FIXED_FENCE_PATTERN_MASK \
  3325. (~((UWord) (TYPE_PATTERN_MASK << TYPE_PATTERN_SHIFT)))
  3326. #define FIXED_FENCE_PATTERN \
  3327. (FENCE_PATTERN & FIXED_FENCE_PATTERN_MASK)
  3328. #define MK_PATTERN(T) \
  3329. (FIXED_FENCE_PATTERN | (((T) & TYPE_PATTERN_MASK) << TYPE_PATTERN_SHIFT))
  3330. #define GET_TYPE_OF_PATTERN(P) \
  3331. (((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
  3332. #ifdef HARD_DEBUG
  3333. #define ERL_ALC_HDBG_MAX_MBLK 100000
  3334. #define ERTS_ALC_O_CHECK -1
  3335. typedef struct hdbg_mblk_ hdbg_mblk;
  3336. struct hdbg_mblk_ {
  3337. hdbg_mblk *next;
  3338. hdbg_mblk *prev;
  3339. void *p;
  3340. Uint s;
  3341. ErtsAlcType_t n;
  3342. };
  3343. static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
  3344. static hdbg_mblk *free_hdbg_mblks;
  3345. static hdbg_mblk *used_hdbg_mblks;
  3346. static erts_mtx_t hdbg_mblk_mtx;
  3347. static void
  3348. hdbg_init(void)
  3349. {
  3350. int i;
  3351. for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
  3352. hdbg_mblks[i].next = &hdbg_mblks[i+1];
  3353. hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
  3354. free_hdbg_mblks = &hdbg_mblks[0];
  3355. used_hdbg_mblks = NULL;
  3356. erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
  3357. ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
  3358. }
  3359. static void *check_memory_fence(void *ptr,
  3360. Uint *size,
  3361. ErtsAlcType_t n,
  3362. int func);
  3363. void erts_hdbg_chk_blks(void);
  3364. void
  3365. erts_hdbg_chk_blks(void)
  3366. {
  3367. hdbg_mblk *mblk;
  3368. erts_mtx_lock(&hdbg_mblk_mtx);
  3369. for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
  3370. Uint sz;
  3371. check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
  3372. ASSERT(sz == mblk->s);
  3373. }
  3374. erts_mtx_unlock(&hdbg_mblk_mtx);
  3375. }
  3376. static hdbg_mblk *
  3377. hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
  3378. {
  3379. hdbg_mblk *mblk;
  3380. erts_mtx_lock(&hdbg_mblk_mtx);
  3381. mblk = free_hdbg_mblks;
  3382. if (!mblk) {
  3383. erts_fprintf(stderr,
  3384. "Ran out of debug blocks; please increase "
  3385. "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
  3386. ERL_ALC_HDBG_MAX_MBLK);
  3387. abort();
  3388. }
  3389. free_hdbg_mblks = mblk->next;
  3390. mblk->p = p;
  3391. mblk->s = s;
  3392. mblk->n = n;
  3393. mblk->next = used_hdbg_mblks;
  3394. mblk->prev = NULL;
  3395. if (used_hdbg_mblks)
  3396. used_hdbg_mblks->prev = mblk;
  3397. used_hdbg_mblks = mblk;
  3398. erts_mtx_unlock(&hdbg_mblk_mtx);
  3399. return (void *) mblk;
  3400. }
  3401. static void
  3402. hdbg_free(hdbg_mblk *mblk)
  3403. {
  3404. erts_mtx_lock(&hdbg_mblk_mtx);
  3405. if (mblk->next)
  3406. mblk->next->prev = mblk->prev;
  3407. if (mblk->prev)
  3408. mblk->prev->next = mblk->next;
  3409. else
  3410. used_hdbg_mblks = mblk->next;
  3411. mblk->next = free_hdbg_mblks;
  3412. free_hdbg_mblks = mblk;
  3413. erts_mtx_unlock(&hdbg_mblk_mtx);
  3414. }
  3415. #endif
  3416. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  3417. static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
  3418. void check_allocated_block( Uint type, void *blk)
  3419. {
  3420. Uint dummy;
  3421. check_memory_fence(blk, &dummy, ERTS_ALC_T2N(type), ERTS_ALC_O_FREE);
  3422. }
  3423. void check_allocators(void)
  3424. {
  3425. int i;
  3426. if (!erts_initialized)
  3427. return;
  3428. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; ++i) {
  3429. if (erts_allctrs_info[i].alloc_util) {
  3430. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
  3431. Allctr_t *allctr = real_af->extra;
  3432. Carrier_t *ct;
  3433. if (allctr->thread_safe)
  3434. erts_mtx_lock(&allctr->mutex);
  3435. if (allctr->check_mbc) {
  3436. for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
  3437. fprintf(stderr,"Checking allocator %d\r\n",i);
  3438. allctr->check_mbc(allctr,ct);
  3439. }
  3440. }
  3441. if (allctr->thread_safe)
  3442. erts_mtx_unlock(&allctr->mutex);
  3443. }
  3444. }
  3445. }
  3446. #endif
  3447. static void *
  3448. set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
  3449. {
  3450. UWord *ui_ptr;
  3451. UWord pattern;
  3452. #ifdef HARD_DEBUG
  3453. hdbg_mblk **mblkpp;
  3454. #endif
  3455. if (!ptr)
  3456. return NULL;
  3457. ui_ptr = (UWord *) ptr;
  3458. pattern = MK_PATTERN(n);
  3459. #ifdef HARD_DEBUG
  3460. mblkpp = (hdbg_mblk **) ui_ptr++;
  3461. #endif
  3462. *(ui_ptr++) = sz;
  3463. *(ui_ptr++) = pattern;
  3464. sys_memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
  3465. #ifdef HARD_DEBUG
  3466. *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
  3467. #endif
  3468. return (void *) ui_ptr;
  3469. }
  3470. static void *
  3471. check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
  3472. {
  3473. Uint sz;
  3474. Uint found_type;
  3475. UWord pre_pattern, expected_pattern;
  3476. UWord post_pattern;
  3477. UWord *ui_ptr;
  3478. #ifdef HARD_DEBUG
  3479. hdbg_mblk *mblk;
  3480. #endif
  3481. if (!ptr)
  3482. return NULL;
  3483. expected_pattern = MK_PATTERN(n);
  3484. ui_ptr = (UWord *) ptr;
  3485. pre_pattern = *(--ui_ptr);
  3486. *size = sz = *(--ui_ptr);
  3487. #ifdef HARD_DEBUG
  3488. mblk = (hdbg_mblk *) *(--ui_ptr);
  3489. #endif
  3490. found_type = GET_TYPE_OF_PATTERN(pre_pattern);
  3491. if (found_type != n) {
  3492. erts_exit(ERTS_ABORT_EXIT, "ERROR: Miss matching allocator types"
  3493. " used in alloc and free\n");
  3494. }
  3495. if (pre_pattern != expected_pattern) {
  3496. if ((FIXED_FENCE_PATTERN_MASK & pre_pattern) != FIXED_FENCE_PATTERN)
  3497. erts_exit(ERTS_ABORT_EXIT,
  3498. "ERROR: Fence at beginning of memory block (p=0x%u) "
  3499. "clobbered.\n",
  3500. (UWord) ptr);
  3501. }
  3502. sys_memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
  3503. if (post_pattern != expected_pattern || pre_pattern != post_pattern) {
  3504. char fbuf[10];
  3505. char obuf[10];
  3506. char *ftype;
  3507. char *otype;
  3508. char *op_str;
  3509. if ((FIXED_FENCE_PATTERN_MASK & post_pattern) != FIXED_FENCE_PATTERN)
  3510. erts_exit(ERTS_ABORT_EXIT,
  3511. "ERROR: Fence at end of memory block (p=0x%u, sz=%u) "
  3512. "clobbered.\n",
  3513. (UWord) ptr, (UWord) sz);
  3514. if (found_type != GET_TYPE_OF_PATTERN(post_pattern))
  3515. erts_exit(ERTS_ABORT_EXIT,
  3516. "ERROR: Fence around memory block (p=0x%u, sz=%u) "
  3517. "clobbered.\n",
  3518. (UWord) ptr, (UWord) sz);
  3519. ftype = type_no_str(found_type);
  3520. if (!ftype) {
  3521. erts_snprintf(fbuf, sizeof(fbuf), "%d", (int) found_type);
  3522. ftype = fbuf;
  3523. }
  3524. otype = type_no_str(n);
  3525. if (!otype) {
  3526. erts_snprintf(obuf, sizeof(obuf), "%d", (int) n);
  3527. otype = obuf;
  3528. }
  3529. switch (func) {
  3530. case ERTS_ALC_O_ALLOC: op_str = "allocated"; break;
  3531. case ERTS_ALC_O_REALLOC: op_str = "reallocated"; break;
  3532. case ERTS_ALC_O_FREE: op_str = "freed"; break;
  3533. default: op_str = "???"; break;
  3534. }
  3535. erts_exit(ERTS_ABORT_EXIT,
  3536. "ERROR: Memory block (p=0x%u, sz=%u) allocated as type \"%s\","
  3537. " but %s as type \"%s\".\n",
  3538. (UWord) ptr, (UWord) sz, ftype, op_str, otype);
  3539. }
  3540. #ifdef HARD_DEBUG
  3541. switch (func) {
  3542. case ERTS_ALC_O_REALLOC:
  3543. case ERTS_ALC_O_FREE:
  3544. hdbg_free(mblk);
  3545. break;
  3546. default:
  3547. break;
  3548. }
  3549. #endif
  3550. return (void *) ui_ptr;
  3551. }
  3552. static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
  3553. static void *
  3554. debug_alloc(ErtsAlcType_t type, void *extra, Uint size)
  3555. {
  3556. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3557. ErtsAlcType_t n;
  3558. Uint dsize;
  3559. void *res;
  3560. #ifdef HARD_DEBUG
  3561. erts_hdbg_chk_blks();
  3562. #endif
  3563. n = ERTS_ALC_T2N(type);
  3564. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3565. dsize = size + FENCE_SZ;
  3566. res = (*real_af->alloc)(type, real_af->extra, dsize);
  3567. res = set_memory_fence(res, size, n);
  3568. #ifdef PRINT_OPS
  3569. fprintf(stderr, "0x%lx = alloc(%s, %lu)\r\n",
  3570. (Uint) res, ERTS_ALC_N2TD(n), size);
  3571. #endif
  3572. return res;
  3573. }
  3574. static void *
  3575. debug_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
  3576. {
  3577. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3578. ErtsAlcType_t n;
  3579. Uint dsize;
  3580. Uint old_size;
  3581. void *dptr;
  3582. void *res;
  3583. n = ERTS_ALC_T2N(type);
  3584. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3585. dsize = size + FENCE_SZ;
  3586. dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
  3587. #ifdef HARD_DEBUG
  3588. erts_hdbg_chk_blks();
  3589. #endif
  3590. if (ptr && old_size > size)
  3591. sys_memset((void *) (((char *) ptr) + size),
  3592. 0xf,
  3593. sizeof(Uint) + old_size - size);
  3594. res = (*real_af->realloc)(type, real_af->extra, dptr, dsize);
  3595. res = set_memory_fence(res, size, n);
  3596. #ifdef PRINT_OPS
  3597. fprintf(stderr, "0x%lx = realloc(%s, 0x%lx, %lu)\r\n",
  3598. (Uint) res, ERTS_ALC_N2TD(n), (Uint) ptr, size);
  3599. #endif
  3600. return res;
  3601. }
  3602. static void
  3603. debug_free(ErtsAlcType_t type, void *extra, void *ptr)
  3604. {
  3605. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3606. ErtsAlcType_t n;
  3607. void *dptr;
  3608. Uint size;
  3609. int free_pattern;
  3610. n = ERTS_ALC_T2N(type);
  3611. free_pattern = n;
  3612. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3613. if (!ptr)
  3614. return;
  3615. dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
  3616. sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
  3617. (*real_af->free)(type, real_af->extra, dptr);
  3618. #ifdef PRINT_OPS
  3619. fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
  3620. #endif
  3621. #ifdef HARD_DEBUG
  3622. erts_hdbg_chk_blks();
  3623. #endif
  3624. }
  3625. static Uint
  3626. install_debug_functions(void)
  3627. {
  3628. int i;
  3629. ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
  3630. sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
  3631. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  3632. erts_allctrs[i].alloc = debug_alloc;
  3633. erts_allctrs[i].realloc = debug_realloc;
  3634. erts_allctrs[i].free = debug_free;
  3635. erts_allctrs[i].extra = (void *) &real_allctrs[i];
  3636. }
  3637. return FENCE_SZ;
  3638. }
  3639. #endif /* #ifdef DEBUG */