PageRenderTime 35ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/erl_alloc.c

https://github.com/erlang/otp
C | 4098 lines | 3526 code | 456 blank | 116 comment | 604 complexity | 6b7fd8ea08df64813bd3ef9cbaaf3a6e MD5 | raw file
Possible License(s): BSD-3-Clause, Apache-2.0, Unlicense, LGPL-2.1
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2002-2020. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. /*
  21. * Description: Management of memory allocators.
  22. *
  23. * Author: Rickard Green
  24. */
  25. #ifdef HAVE_CONFIG_H
  26. # include "config.h"
  27. #endif
  28. #define ERTS_ALLOC_C__
  29. #define ERTS_ALC_INTERNAL__
  30. #define ERTS_WANT_MEM_MAPPERS
  31. #include "sys.h"
  32. #define ERL_THREADS_EMU_INTERNAL__
  33. #include "erl_threads.h"
  34. #include "global.h"
  35. #include "erl_db.h"
  36. #include "erl_binary.h"
  37. #include "erl_bits.h"
  38. #include "erl_mtrace.h"
  39. #include "erl_mseg.h"
  40. #include "erl_monitor_link.h"
  41. #include "erl_hl_timer.h"
  42. #include "erl_cpu_topology.h"
  43. #include "erl_thr_queue.h"
  44. #include "erl_nfunc_sched.h"
  45. #if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
  46. #include "erl_check_io.h"
  47. #endif
  48. #include "erl_bif_unique.h"
  49. #define GET_ERL_GF_ALLOC_IMPL
  50. #include "erl_goodfit_alloc.h"
  51. #define GET_ERL_BF_ALLOC_IMPL
  52. #include "erl_bestfit_alloc.h"
  53. #define GET_ERL_AF_ALLOC_IMPL
  54. #include "erl_afit_alloc.h"
  55. #define GET_ERL_AOFF_ALLOC_IMPL
  56. #include "erl_ao_firstfit_alloc.h"
  57. #if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_AU_MAX_PREF_ALLOC_INSTANCES
  58. # error "Too many schedulers; cannot create that many pref alloc instances"
  59. #endif
  60. #define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS
  61. #if defined(SMALL_MEMORY) || defined(VALGRIND) || defined(ADDRESS_SANITIZER)
  62. #define AU_ALLOC_DEFAULT_ENABLE(X) 0
  63. #else
  64. #define AU_ALLOC_DEFAULT_ENABLE(X) (X)
  65. #endif
  66. #define ERTS_ALC_DEFAULT_ENABLED_ACUL 60
  67. #define ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC 45
  68. #define ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC 85
  69. #define ERTS_ALC_DEFAULT_ACUL ERTS_ALC_DEFAULT_ENABLED_ACUL
  70. #define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
  71. #define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
  72. #ifdef DEBUG
  73. static Uint install_debug_functions(void);
  74. #if 0
  75. #define HARD_DEBUG
  76. #ifdef __GNUC__
  77. #warning "* * * * * * * * * * * * * *"
  78. #warning "* HARD DEBUG IS ENABLED! *"
  79. #warning "* * * * * * * * * * * * * *"
  80. #endif
  81. #endif
  82. #endif
  83. static int lock_all_physical_memory = 0;
  84. ErtsAllocatorFunctions_t ERTS_WRITE_UNLIKELY(erts_allctrs[ERTS_ALC_A_MAX+1]);
  85. ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
  86. ErtsAllocatorThrSpec_t ERTS_WRITE_UNLIKELY(erts_allctr_thr_spec[ERTS_ALC_A_MAX+1]);
  87. #define ERTS_MIN(A, B) ((A) < (B) ? (A) : (B))
  88. #define ERTS_MAX(A, B) ((A) > (B) ? (A) : (B))
  89. typedef union {
  90. GFAllctr_t gfa;
  91. char align_gfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(GFAllctr_t))];
  92. BFAllctr_t bfa;
  93. char align_bfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(BFAllctr_t))];
  94. AFAllctr_t afa;
  95. char align_afa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AFAllctr_t))];
  96. AOFFAllctr_t aoffa;
  97. char align_aoffa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AOFFAllctr_t))];
  98. } ErtsAllocatorState_t erts_align_attribute(ERTS_CACHE_LINE_SIZE);
  99. static ErtsAllocatorState_t std_alloc_state;
  100. static ErtsAllocatorState_t ll_alloc_state;
  101. static ErtsAllocatorState_t sl_alloc_state;
  102. static ErtsAllocatorState_t temp_alloc_state;
  103. static ErtsAllocatorState_t eheap_alloc_state;
  104. static ErtsAllocatorState_t binary_alloc_state;
  105. static ErtsAllocatorState_t ets_alloc_state;
  106. static ErtsAllocatorState_t driver_alloc_state;
  107. static ErtsAllocatorState_t fix_alloc_state;
  108. static ErtsAllocatorState_t literal_alloc_state;
  109. static ErtsAllocatorState_t test_alloc_state;
  110. enum {
  111. ERTS_ALC_INFO_A_ALLOC_UTIL = ERTS_ALC_A_MAX + 1,
  112. ERTS_ALC_INFO_A_MSEG_ALLOC,
  113. ERTS_ALC_INFO_A_ERTS_MMAP,
  114. ERTS_ALC_INFO_A_END
  115. };
  116. typedef struct {
  117. erts_atomic32_t refc;
  118. int only_sz;
  119. int internal;
  120. Uint req_sched;
  121. Process *proc;
  122. ErtsIRefStorage iref;
  123. int allocs[ERTS_ALC_INFO_A_END - ERTS_ALC_A_MIN + 1];
  124. } ErtsAllocInfoReq;
  125. ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
  126. ErtsAllocInfoReq,
  127. 5,
  128. ERTS_ALC_T_AINFO_REQ)
  129. ErtsAlcType_t erts_fix_core_allocator_ix;
  130. struct au_init {
  131. int enable;
  132. int thr_spec;
  133. int disable_allowed;
  134. int thr_spec_allowed;
  135. int carrier_migration_allowed;
  136. ErtsAlcStrat_t astrat;
  137. struct {
  138. AllctrInit_t util;
  139. GFAllctrInit_t gf;
  140. BFAllctrInit_t bf;
  141. AFAllctrInit_t af;
  142. AOFFAllctrInit_t aoff;
  143. } init;
  144. struct {
  145. int mmbcs;
  146. int lmbcs;
  147. int smbcs;
  148. int mmmbc;
  149. } default_;
  150. };
  151. #define DEFAULT_ALLCTR_INIT { \
  152. ERTS_DEFAULT_ALLCTR_INIT, \
  153. ERTS_DEFAULT_GF_ALLCTR_INIT, \
  154. ERTS_DEFAULT_BF_ALLCTR_INIT, \
  155. ERTS_DEFAULT_AF_ALLCTR_INIT, \
  156. ERTS_DEFAULT_AOFF_ALLCTR_INIT \
  157. }
  158. typedef struct {
  159. int erts_alloc_config;
  160. #if HAVE_ERTS_MSEG
  161. ErtsMsegInit_t mseg;
  162. #endif
  163. int trim_threshold;
  164. int top_pad;
  165. AlcUInit_t alloc_util;
  166. struct {
  167. char *mtrace;
  168. char *nodename;
  169. } instr;
  170. struct au_init sl_alloc;
  171. struct au_init std_alloc;
  172. struct au_init ll_alloc;
  173. struct au_init temp_alloc;
  174. struct au_init eheap_alloc;
  175. struct au_init binary_alloc;
  176. struct au_init ets_alloc;
  177. struct au_init driver_alloc;
  178. struct au_init fix_alloc;
  179. struct au_init literal_alloc;
  180. struct au_init test_alloc;
  181. } erts_alc_hndl_args_init_t;
  182. #define ERTS_AU_INIT__ {0, 0, 1, 1, 1, \
  183. ERTS_ALC_S_GOODFIT, DEFAULT_ALLCTR_INIT, \
  184. {1,1,1,1}}
  185. #define SET_DEFAULT_ALLOC_OPTS(IP) \
  186. do { \
  187. struct au_init aui__ = ERTS_AU_INIT__; \
  188. sys_memcpy((void *) (IP), (void *) &aui__, sizeof(struct au_init)); \
  189. } while (0)
  190. static void
  191. set_default_sl_alloc_opts(struct au_init *ip)
  192. {
  193. SET_DEFAULT_ALLOC_OPTS(ip);
  194. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  195. ip->thr_spec = 1;
  196. ip->astrat = ERTS_ALC_S_GOODFIT;
  197. ip->init.util.name_prefix = "sl_";
  198. ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
  199. ip->init.util.cp = ERTS_ALC_A_SHORT_LIVED;
  200. #ifndef SMALL_MEMORY
  201. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  202. #else
  203. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  204. #endif
  205. ip->init.util.ts = ERTS_ALC_MTA_SHORT_LIVED;
  206. ip->init.util.rsbcst = 80;
  207. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  208. }
  209. static void
  210. set_default_std_alloc_opts(struct au_init *ip)
  211. {
  212. SET_DEFAULT_ALLOC_OPTS(ip);
  213. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  214. ip->thr_spec = 1;
  215. ip->astrat = ERTS_ALC_S_BESTFIT;
  216. ip->init.util.name_prefix = "std_";
  217. ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
  218. ip->init.util.cp = ERTS_ALC_A_STANDARD;
  219. #ifndef SMALL_MEMORY
  220. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  221. #else
  222. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  223. #endif
  224. ip->init.util.ts = ERTS_ALC_MTA_STANDARD;
  225. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  226. }
  227. static void
  228. set_default_ll_alloc_opts(struct au_init *ip)
  229. {
  230. SET_DEFAULT_ALLOC_OPTS(ip);
  231. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  232. ip->thr_spec = 0;
  233. ip->astrat = ERTS_ALC_S_BESTFIT;
  234. ip->init.bf.ao = 1;
  235. ip->init.util.ramv = 0;
  236. ip->init.util.mmsbc = 0;
  237. ip->init.util.sbct = ~((UWord) 0);
  238. ip->init.util.name_prefix = "ll_";
  239. ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
  240. ip->init.util.cp = ERTS_ALC_A_LONG_LIVED;
  241. #ifndef SMALL_MEMORY
  242. ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
  243. #else
  244. ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
  245. #endif
  246. ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
  247. ip->init.util.asbcst = 0;
  248. ip->init.util.rsbcst = 0;
  249. ip->init.util.rsbcmt = 0;
  250. ip->init.util.rmbcmt = 0;
  251. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_LL_ALLOC;
  252. }
  253. static void
  254. set_default_literal_alloc_opts(struct au_init *ip)
  255. {
  256. SET_DEFAULT_ALLOC_OPTS(ip);
  257. #ifdef ADDRESS_SANITIZER
  258. ip->enable = 0;
  259. #else
  260. ip->enable = 1;
  261. #endif
  262. ip->thr_spec = 0;
  263. ip->disable_allowed = 0;
  264. ip->thr_spec_allowed = 0;
  265. ip->carrier_migration_allowed = 0;
  266. ip->astrat = ERTS_ALC_S_BESTFIT;
  267. ip->init.bf.ao = 1;
  268. ip->init.util.ramv = 0;
  269. ip->init.util.mmsbc = 0;
  270. ip->init.util.sbct = ~((UWord) 0);
  271. ip->init.util.name_prefix = "literal_";
  272. ip->init.util.alloc_no = ERTS_ALC_A_LITERAL;
  273. #ifndef SMALL_MEMORY
  274. ip->init.util.mmbcs = 1024*1024; /* Main carrier size */
  275. #else
  276. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  277. #endif
  278. ip->init.util.ts = ERTS_ALC_MTA_LITERAL;
  279. ip->init.util.asbcst = 0;
  280. ip->init.util.rsbcst = 0;
  281. ip->init.util.rsbcmt = 0;
  282. ip->init.util.rmbcmt = 0;
  283. ip->init.util.acul = 0;
  284. #if defined(ARCH_32)
  285. # if HAVE_ERTS_MSEG
  286. ip->init.util.mseg_alloc = &erts_alcu_literal_32_mseg_alloc;
  287. ip->init.util.mseg_realloc = &erts_alcu_literal_32_mseg_realloc;
  288. ip->init.util.mseg_dealloc = &erts_alcu_literal_32_mseg_dealloc;
  289. # endif
  290. ip->init.util.sys_alloc = &erts_alcu_literal_32_sys_alloc;
  291. ip->init.util.sys_realloc = &erts_alcu_literal_32_sys_realloc;
  292. ip->init.util.sys_dealloc = &erts_alcu_literal_32_sys_dealloc;
  293. #elif defined(ARCH_64)
  294. # ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
  295. ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
  296. ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
  297. ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
  298. ip->init.util.mseg_mmapper = &erts_literal_mmapper;
  299. # endif
  300. #else
  301. # error Unknown architecture
  302. #endif
  303. }
  304. static void
  305. set_default_temp_alloc_opts(struct au_init *ip)
  306. {
  307. SET_DEFAULT_ALLOC_OPTS(ip);
  308. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  309. ip->thr_spec = 1;
  310. ip->disable_allowed = 0;
  311. ip->carrier_migration_allowed = 0;
  312. ip->astrat = ERTS_ALC_S_AFIT;
  313. ip->init.util.name_prefix = "temp_";
  314. ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY;
  315. #ifndef SMALL_MEMORY
  316. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  317. #else
  318. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  319. #endif
  320. ip->init.util.ts = ERTS_ALC_MTA_TEMPORARY;
  321. ip->init.util.rsbcst = 90;
  322. ip->init.util.rmbcmt = 100;
  323. }
  324. static void
  325. set_default_eheap_alloc_opts(struct au_init *ip)
  326. {
  327. SET_DEFAULT_ALLOC_OPTS(ip);
  328. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  329. ip->thr_spec = 1;
  330. ip->astrat = ERTS_ALC_S_GOODFIT;
  331. ip->init.util.name_prefix = "eheap_";
  332. ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
  333. ip->init.util.cp = ERTS_ALC_A_EHEAP;
  334. #ifndef SMALL_MEMORY
  335. ip->init.util.mmbcs = 512*1024; /* Main carrier size */
  336. #else
  337. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  338. #endif
  339. ip->init.util.ts = ERTS_ALC_MTA_EHEAP;
  340. ip->init.util.rsbcst = 50;
  341. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC;
  342. }
  343. static void
  344. set_default_binary_alloc_opts(struct au_init *ip)
  345. {
  346. SET_DEFAULT_ALLOC_OPTS(ip);
  347. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  348. ip->thr_spec = 1;
  349. ip->astrat = ERTS_ALC_S_BESTFIT;
  350. ip->init.util.name_prefix = "binary_";
  351. ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
  352. ip->init.util.cp = ERTS_ALC_A_BINARY;
  353. #ifndef SMALL_MEMORY
  354. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  355. #else
  356. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  357. #endif
  358. ip->init.util.ts = ERTS_ALC_MTA_BINARY;
  359. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  360. ip->init.util.atags = 1;
  361. }
  362. static void
  363. set_default_ets_alloc_opts(struct au_init *ip)
  364. {
  365. SET_DEFAULT_ALLOC_OPTS(ip);
  366. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  367. ip->thr_spec = 1;
  368. ip->astrat = ERTS_ALC_S_BESTFIT;
  369. ip->init.util.name_prefix = "ets_";
  370. ip->init.util.alloc_no = ERTS_ALC_A_ETS;
  371. ip->init.util.cp = ERTS_ALC_A_ETS;
  372. ip->init.util.mmbc0 = 0;
  373. #ifndef SMALL_MEMORY
  374. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  375. #else
  376. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  377. #endif
  378. ip->init.util.ts = ERTS_ALC_MTA_ETS;
  379. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  380. }
  381. static void
  382. set_default_driver_alloc_opts(struct au_init *ip)
  383. {
  384. SET_DEFAULT_ALLOC_OPTS(ip);
  385. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  386. ip->thr_spec = 1;
  387. ip->astrat = ERTS_ALC_S_BESTFIT;
  388. ip->init.util.name_prefix = "driver_";
  389. ip->init.util.alloc_no = ERTS_ALC_A_DRIVER;
  390. ip->init.util.cp = ERTS_ALC_A_DRIVER;
  391. #ifndef SMALL_MEMORY
  392. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  393. #else
  394. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  395. #endif
  396. ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
  397. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  398. ip->init.util.atags = 1;
  399. }
  400. static void
  401. set_default_fix_alloc_opts(struct au_init *ip,
  402. size_t *fix_type_sizes)
  403. {
  404. SET_DEFAULT_ALLOC_OPTS(ip);
  405. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  406. ip->thr_spec = 1;
  407. ip->astrat = ERTS_ALC_S_BESTFIT;
  408. ip->init.bf.ao = 1;
  409. ip->init.util.name_prefix = "fix_";
  410. ip->init.util.fix_type_size = fix_type_sizes;
  411. ip->init.util.alloc_no = ERTS_ALC_A_FIXED_SIZE;
  412. ip->init.util.cp = ERTS_ALC_A_FIXED_SIZE;
  413. #ifndef SMALL_MEMORY
  414. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  415. #else
  416. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  417. #endif
  418. ip->init.util.ts = ERTS_ALC_MTA_FIXED_SIZE;
  419. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  420. }
  421. static void
  422. set_default_test_alloc_opts(struct au_init *ip)
  423. {
  424. SET_DEFAULT_ALLOC_OPTS(ip);
  425. ip->enable = 0; /* Disabled by default */
  426. ip->thr_spec = -1 * erts_no_schedulers;
  427. ip->astrat = ERTS_ALC_S_FIRSTFIT;
  428. ip->init.aoff.crr_order = FF_AOFF;
  429. ip->init.aoff.blk_order = FF_BF;
  430. ip->init.util.name_prefix = "test_";
  431. ip->init.util.alloc_no = ERTS_ALC_A_TEST;
  432. ip->init.util.cp = ERTS_ALC_A_TEST;
  433. ip->init.util.mmbcs = 0; /* Main carrier size */
  434. ip->init.util.ts = ERTS_ALC_MTA_TEST;
  435. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  436. ip->init.util.atags = 1;
  437. /* Use a constant minimal MBC size */
  438. #if ERTS_SA_MB_CARRIERS
  439. ip->init.util.smbcs = ERTS_SACRR_UNIT_SZ;
  440. ip->init.util.lmbcs = ERTS_SACRR_UNIT_SZ;
  441. ip->init.util.sbct = ERTS_SACRR_UNIT_SZ;
  442. #else
  443. ip->init.util.smbcs = 1 << 12;
  444. ip->init.util.lmbcs = 1 << 12;
  445. ip->init.util.sbct = 1 << 12;
  446. #endif
  447. }
  448. static void
  449. adjust_tpref(struct au_init *ip, int no_sched)
  450. {
  451. if (ip->thr_spec) {
  452. ip->thr_spec = no_sched;
  453. ip->thr_spec *= -1; /* thread preferred */
  454. /* If default ... */
  455. /* ... shrink main multi-block carrier size */
  456. if (ip->default_.mmbcs)
  457. ip->init.util.mmbcs /= ERTS_MIN(4, no_sched);
  458. /* ... shrink largest multi-block carrier size */
  459. if (ip->default_.lmbcs)
  460. ip->init.util.lmbcs /= ERTS_MIN(2, no_sched);
  461. /* ... shrink smallest multi-block carrier size */
  462. if (ip->default_.smbcs)
  463. ip->init.util.smbcs /= ERTS_MIN(4, no_sched);
  464. }
  465. }
  466. static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
  467. static void
  468. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu);
  469. static void
  470. start_au_allocator(ErtsAlcType_t alctr_n,
  471. struct au_init *init,
  472. ErtsAllocatorState_t *state);
  473. static void
  474. refuse_af_strategy(struct au_init *init)
  475. {
  476. if (init->astrat == ERTS_ALC_S_AFIT)
  477. init->astrat = ERTS_ALC_S_GOODFIT;
  478. }
  479. #ifdef HARD_DEBUG
  480. static void hdbg_init(void);
  481. #endif
  482. static void adjust_fix_alloc_sizes(UWord extra_block_size)
  483. {
  484. if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) {
  485. int j;
  486. if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) {
  487. int i;
  488. ErtsAllocatorThrSpec_t* tspec;
  489. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  490. ASSERT(tspec->enabled);
  491. for (i=0; i < tspec->size; i++) {
  492. Allctr_t* allctr = tspec->allctr[i];
  493. for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
  494. size_t size = allctr->fix[j].type_size;
  495. size = MAX(size + extra_block_size,
  496. sizeof(ErtsAllctrDDBlock_t));
  497. allctr->fix[j].type_size = size;
  498. }
  499. }
  500. }
  501. else
  502. {
  503. Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
  504. for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
  505. size_t size = allctr->fix[j].type_size;
  506. size = MAX(size + extra_block_size,
  507. sizeof(ErtsAllctrDDBlock_t));
  508. allctr->fix[j].type_size = size;
  509. }
  510. }
  511. }
  512. }
  513. static ERTS_INLINE int
  514. strategy_support_carrier_migration(struct au_init *auip)
  515. {
  516. /*
  517. * Currently only aoff* and ageff* support carrier
  518. * migration, i.e, type AOFIRSTFIT.
  519. */
  520. return auip->astrat == ERTS_ALC_S_FIRSTFIT;
  521. }
  522. static ERTS_INLINE void
  523. adjust_carrier_migration_support(struct au_init *auip)
  524. {
  525. if (auip->init.util.acul) {
  526. auip->thr_spec = -1; /* Need thread preferred */
  527. /*
  528. * If strategy cannot handle carrier migration,
  529. * default to a strategy that can...
  530. */
  531. if (!strategy_support_carrier_migration(auip)) {
  532. /* Default to aoffcbf */
  533. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  534. auip->init.aoff.crr_order = FF_AOFF;
  535. auip->init.aoff.blk_order = FF_BF;
  536. }
  537. }
  538. }
  539. void
  540. erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
  541. {
  542. UWord extra_block_size = 0;
  543. int i, ncpu;
  544. erts_alc_hndl_args_init_t init = {
  545. 0,
  546. #if HAVE_ERTS_MSEG
  547. ERTS_MSEG_INIT_DEFAULT_INITIALIZER,
  548. #endif
  549. ERTS_DEFAULT_TRIM_THRESHOLD,
  550. ERTS_DEFAULT_TOP_PAD,
  551. ERTS_DEFAULT_ALCU_INIT,
  552. };
  553. size_t fix_type_sizes[ERTS_ALC_NO_FIXED_SIZES] = {0};
  554. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PROC)]
  555. = sizeof(Process);
  556. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR)]
  557. = sizeof(ErtsMonitorDataHeap);
  558. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LINK)]
  559. = sizeof(ErtsILink);
  560. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
  561. = sizeof(ErtsDrvSelectDataState);
  562. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
  563. = sizeof(ErtsNifSelectDataState);
  564. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
  565. = sizeof(ErtsMessageRef);
  566. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
  567. = sizeof(ErtsThrQElement_t);
  568. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)]
  569. = erts_timer_type_size(ERTS_ALC_T_LL_PTIMER);
  570. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)]
  571. = erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
  572. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
  573. = erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
  574. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
  575. = sizeof(ErtsNSchedMagicRefTableEntry);
  576. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PREF_NSCHED_ENT)]
  577. = sizeof(ErtsNSchedPidRefTableEntry);
  578. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
  579. = ERTS_MAGIC_BIN_UNALIGNED_SIZE(sizeof(ErtsMagicIndirectionWord));
  580. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_RECV_MARK_BLK)]
  581. = sizeof(ErtsRecvMarkerBlock);
  582. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_SIGQ_BUFFERS)]
  583. = sizeof(ErtsSignalInQueueBufferArray);
  584. #ifdef HARD_DEBUG
  585. hdbg_init();
  586. #endif
  587. lock_all_physical_memory = 0;
  588. ncpu = eaiop->ncpu;
  589. if (ncpu < 1)
  590. ncpu = 1;
  591. erts_tsd_key_create(&erts_allctr_prelock_tsd_key,
  592. "erts_allctr_prelock_tsd_key");
  593. erts_sys_alloc_init();
  594. erts_init_utils_mem();
  595. set_default_sl_alloc_opts(&init.sl_alloc);
  596. set_default_std_alloc_opts(&init.std_alloc);
  597. set_default_ll_alloc_opts(&init.ll_alloc);
  598. set_default_temp_alloc_opts(&init.temp_alloc);
  599. set_default_eheap_alloc_opts(&init.eheap_alloc);
  600. set_default_binary_alloc_opts(&init.binary_alloc);
  601. set_default_ets_alloc_opts(&init.ets_alloc);
  602. set_default_driver_alloc_opts(&init.driver_alloc);
  603. set_default_fix_alloc_opts(&init.fix_alloc,
  604. fix_type_sizes);
  605. set_default_literal_alloc_opts(&init.literal_alloc);
  606. set_default_test_alloc_opts(&init.test_alloc);
  607. if (argc && argv)
  608. handle_args(argc, argv, &init);
  609. if (lock_all_physical_memory) {
  610. #ifdef HAVE_MLOCKALL
  611. errno = 0;
  612. if (mlockall(MCL_CURRENT|MCL_FUTURE) != 0) {
  613. int err = errno;
  614. const char *errstr = err ? strerror(err) : "unknown";
  615. erts_exit(1, "Failed to lock physical memory: %s (%d)\n",
  616. errstr, err);
  617. }
  618. #else
  619. erts_exit(1, "Failed to lock physical memory: Not supported\n");
  620. #endif
  621. }
  622. /* Make adjustments for carrier migration support */
  623. init.temp_alloc.init.util.acul = 0;
  624. adjust_carrier_migration_support(&init.sl_alloc);
  625. adjust_carrier_migration_support(&init.std_alloc);
  626. adjust_carrier_migration_support(&init.ll_alloc);
  627. adjust_carrier_migration_support(&init.eheap_alloc);
  628. adjust_carrier_migration_support(&init.binary_alloc);
  629. adjust_carrier_migration_support(&init.ets_alloc);
  630. adjust_carrier_migration_support(&init.driver_alloc);
  631. adjust_carrier_migration_support(&init.fix_alloc);
  632. adjust_carrier_migration_support(&init.literal_alloc);
  633. if (init.erts_alloc_config) {
  634. /* Adjust flags that erts_alloc_config won't like */
  635. /* No thread specific instances */
  636. init.temp_alloc.thr_spec = 0;
  637. init.sl_alloc.thr_spec = 0;
  638. init.std_alloc.thr_spec = 0;
  639. init.ll_alloc.thr_spec = 0;
  640. init.eheap_alloc.thr_spec = 0;
  641. init.binary_alloc.thr_spec = 0;
  642. init.ets_alloc.thr_spec = 0;
  643. init.driver_alloc.thr_spec = 0;
  644. init.fix_alloc.thr_spec = 0;
  645. init.literal_alloc.thr_spec = 0;
  646. /* No carrier migration */
  647. init.temp_alloc.init.util.acul = 0;
  648. init.sl_alloc.init.util.acul = 0;
  649. init.std_alloc.init.util.acul = 0;
  650. init.ll_alloc.init.util.acul = 0;
  651. init.eheap_alloc.init.util.acul = 0;
  652. init.binary_alloc.init.util.acul = 0;
  653. init.ets_alloc.init.util.acul = 0;
  654. init.driver_alloc.init.util.acul = 0;
  655. init.fix_alloc.init.util.acul = 0;
  656. init.literal_alloc.init.util.acul = 0;
  657. }
  658. /* Only temp_alloc can use thread specific interface */
  659. if (init.temp_alloc.thr_spec)
  660. init.temp_alloc.thr_spec = erts_no_schedulers;
  661. /* Others must use thread preferred interface */
  662. adjust_tpref(&init.sl_alloc, erts_no_schedulers);
  663. adjust_tpref(&init.std_alloc, erts_no_schedulers);
  664. adjust_tpref(&init.ll_alloc, erts_no_schedulers);
  665. adjust_tpref(&init.eheap_alloc, erts_no_schedulers);
  666. adjust_tpref(&init.binary_alloc, erts_no_schedulers);
  667. adjust_tpref(&init.ets_alloc, erts_no_schedulers);
  668. adjust_tpref(&init.driver_alloc, erts_no_schedulers);
  669. adjust_tpref(&init.fix_alloc, erts_no_schedulers);
  670. adjust_tpref(&init.literal_alloc, erts_no_schedulers);
  671. /*
  672. * The following allocators cannot be run with afit strategy.
  673. * Make sure they don't...
  674. */
  675. refuse_af_strategy(&init.sl_alloc);
  676. refuse_af_strategy(&init.std_alloc);
  677. refuse_af_strategy(&init.ll_alloc);
  678. refuse_af_strategy(&init.eheap_alloc);
  679. refuse_af_strategy(&init.binary_alloc);
  680. refuse_af_strategy(&init.ets_alloc);
  681. refuse_af_strategy(&init.driver_alloc);
  682. refuse_af_strategy(&init.fix_alloc);
  683. refuse_af_strategy(&init.literal_alloc);
  684. if (!init.temp_alloc.thr_spec)
  685. refuse_af_strategy(&init.temp_alloc);
  686. erts_mtrace_pre_init();
  687. #if HAVE_ERTS_MSEG
  688. init.mseg.nos = erts_no_schedulers;
  689. erts_mseg_init(&init.mseg);
  690. #endif
  691. erts_alcu_init(&init.alloc_util);
  692. erts_afalc_init();
  693. erts_bfalc_init();
  694. erts_gfalc_init();
  695. erts_aoffalc_init();
  696. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  697. erts_allctrs[i].alloc = NULL;
  698. erts_allctrs[i].realloc = NULL;
  699. erts_allctrs[i].free = NULL;
  700. erts_allctrs[i].extra = NULL;
  701. erts_allctrs_info[i].alloc_util = 0;
  702. erts_allctrs_info[i].enabled = 0;
  703. erts_allctrs_info[i].thr_spec = 0;
  704. erts_allctrs_info[i].extra = NULL;
  705. }
  706. erts_allctrs[ERTS_ALC_A_SYSTEM].alloc = erts_sys_alloc;
  707. erts_allctrs[ERTS_ALC_A_SYSTEM].realloc = erts_sys_realloc;
  708. erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
  709. erts_allctrs_info[ERTS_ALC_A_SYSTEM].enabled = 1;
  710. set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu);
  711. set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu);
  712. set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu);
  713. set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc, ncpu);
  714. set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc, ncpu);
  715. set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc, ncpu);
  716. set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc, ncpu);
  717. set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc, ncpu);
  718. set_au_allocator(ERTS_ALC_A_FIXED_SIZE, &init.fix_alloc, ncpu);
  719. set_au_allocator(ERTS_ALC_A_LITERAL, &init.literal_alloc, ncpu);
  720. set_au_allocator(ERTS_ALC_A_TEST, &init.test_alloc, ncpu);
  721. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  722. if (!erts_allctrs[i].alloc)
  723. erts_exit(ERTS_ABORT_EXIT,
  724. "Missing alloc function for %s\n", ERTS_ALC_A2AD(i));
  725. if (!erts_allctrs[i].realloc)
  726. erts_exit(ERTS_ABORT_EXIT,
  727. "Missing realloc function for %s\n", ERTS_ALC_A2AD(i));
  728. if (!erts_allctrs[i].free)
  729. erts_exit(ERTS_ABORT_EXIT,
  730. "Missing free function for %s\n", ERTS_ALC_A2AD(i));
  731. }
  732. sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
  733. sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
  734. erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
  735. start_au_allocator(ERTS_ALC_A_TEMPORARY,
  736. &init.temp_alloc,
  737. &temp_alloc_state);
  738. start_au_allocator(ERTS_ALC_A_SHORT_LIVED,
  739. &init.sl_alloc,
  740. &sl_alloc_state);
  741. start_au_allocator(ERTS_ALC_A_STANDARD,
  742. &init.std_alloc,
  743. &std_alloc_state);
  744. start_au_allocator(ERTS_ALC_A_LONG_LIVED,
  745. &init.ll_alloc,
  746. &ll_alloc_state);
  747. start_au_allocator(ERTS_ALC_A_EHEAP,
  748. &init.eheap_alloc,
  749. &eheap_alloc_state);
  750. start_au_allocator(ERTS_ALC_A_BINARY,
  751. &init.binary_alloc,
  752. &binary_alloc_state);
  753. start_au_allocator(ERTS_ALC_A_ETS,
  754. &init.ets_alloc,
  755. &ets_alloc_state);
  756. start_au_allocator(ERTS_ALC_A_DRIVER,
  757. &init.driver_alloc,
  758. &driver_alloc_state);
  759. start_au_allocator(ERTS_ALC_A_FIXED_SIZE,
  760. &init.fix_alloc,
  761. &fix_alloc_state);
  762. start_au_allocator(ERTS_ALC_A_LITERAL,
  763. &init.literal_alloc,
  764. &literal_alloc_state);
  765. start_au_allocator(ERTS_ALC_A_TEST,
  766. &init.test_alloc,
  767. &test_alloc_state);
  768. erts_mtrace_install_wrapper_functions();
  769. init_aireq_alloc();
  770. #ifdef DEBUG
  771. extra_block_size += install_debug_functions();
  772. #endif
  773. adjust_fix_alloc_sizes(extra_block_size);
  774. }
  775. void
  776. erts_alloc_late_init(void)
  777. {
  778. }
  779. static void *
  780. erts_realloc_fixed_size(ErtsAlcType_t type, void *extra, void *p, Uint size)
  781. {
  782. erts_exit(ERTS_ABORT_EXIT,
  783. "Attempt to reallocate a block of the fixed size type %s\n",
  784. ERTS_ALC_T2TD(type));
  785. }
  786. static void
  787. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
  788. {
  789. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  790. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  791. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  792. /*
  793. * Some allocators are forced on if halfword heap is used.
  794. */
  795. if (init->init.util.force)
  796. init->enable = 1;
  797. tspec->enabled = 0;
  798. tspec->dd = 0;
  799. tspec->aix = alctr_n;
  800. tspec->size = 0;
  801. ai->thr_spec = 0;
  802. if (!init->enable) {
  803. af->alloc = erts_sys_alloc;
  804. af->realloc = erts_sys_realloc;
  805. af->free = erts_sys_free;
  806. af->extra = NULL;
  807. ai->alloc_util = 0;
  808. ai->enabled = 0;
  809. ai->extra = NULL;
  810. return;
  811. }
  812. if (init->thr_spec) {
  813. if (init->thr_spec > 0) {
  814. af->alloc = erts_alcu_alloc_thr_spec;
  815. if (init->init.util.fix_type_size)
  816. af->realloc = erts_realloc_fixed_size;
  817. else if (init->init.util.ramv)
  818. af->realloc = erts_alcu_realloc_mv_thr_spec;
  819. else
  820. af->realloc = erts_alcu_realloc_thr_spec;
  821. af->free = erts_alcu_free_thr_spec;
  822. }
  823. else {
  824. af->alloc = erts_alcu_alloc_thr_pref;
  825. if (init->init.util.fix_type_size)
  826. af->realloc = erts_realloc_fixed_size;
  827. else if (init->init.util.ramv)
  828. af->realloc = erts_alcu_realloc_mv_thr_pref;
  829. else
  830. af->realloc = erts_alcu_realloc_thr_pref;
  831. af->free = erts_alcu_free_thr_pref;
  832. tspec->dd = 1;
  833. }
  834. tspec->enabled = 1;
  835. tspec->size = abs(init->thr_spec) + 1;
  836. ai->thr_spec = tspec->size;
  837. }
  838. else
  839. if (init->init.util.ts) {
  840. af->alloc = erts_alcu_alloc_ts;
  841. if (init->init.util.fix_type_size)
  842. af->realloc = erts_realloc_fixed_size;
  843. else if (init->init.util.ramv)
  844. af->realloc = erts_alcu_realloc_mv_ts;
  845. else
  846. af->realloc = erts_alcu_realloc_ts;
  847. af->free = erts_alcu_free_ts;
  848. }
  849. else
  850. {
  851. erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n",
  852. init->init.util.name_prefix);
  853. }
  854. af->extra = NULL;
  855. ai->alloc_util = 1;
  856. ai->enabled = 1;
  857. }
  858. static void
  859. start_au_allocator(ErtsAlcType_t alctr_n,
  860. struct au_init *init,
  861. ErtsAllocatorState_t *state)
  862. {
  863. int i;
  864. int size = 1;
  865. void *as0;
  866. ErtsAlcStrat_t astrat;
  867. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  868. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  869. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  870. ErtsAlcFixList_t *fix_lists = NULL;
  871. size_t fix_list_size = 0;
  872. if (!init->enable)
  873. return;
  874. if (init->thr_spec) {
  875. char *states = erts_sys_alloc(0,
  876. NULL,
  877. ((sizeof(Allctr_t *)
  878. * (tspec->size + 1))
  879. + (sizeof(ErtsAllocatorState_t)
  880. * tspec->size)
  881. + ERTS_CACHE_LINE_SIZE - 1));
  882. if (!states)
  883. erts_exit(ERTS_ABORT_EXIT,
  884. "Failed to allocate allocator states for %salloc\n",
  885. init->init.util.name_prefix);
  886. tspec->allctr = (Allctr_t **) states;
  887. states += sizeof(Allctr_t *) * (tspec->size + 1);
  888. states = ((((UWord) states) & ERTS_CACHE_LINE_MASK)
  889. ? (char *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
  890. + ERTS_CACHE_LINE_SIZE)
  891. : (char *) states);
  892. tspec->allctr[0] = (Allctr_t *) state;
  893. size = tspec->size;
  894. for (i = 1; i < size; i++)
  895. tspec->allctr[i] = (Allctr_t *)
  896. &((ErtsAllocatorState_t *) states)[i-1];
  897. }
  898. if (init->init.util.fix_type_size) {
  899. size_t tot_fix_list_size;
  900. fix_list_size = sizeof(ErtsAlcFixList_t)*ERTS_ALC_NO_FIXED_SIZES;
  901. fix_list_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(fix_list_size);
  902. tot_fix_list_size = fix_list_size;
  903. if (init->thr_spec)
  904. tot_fix_list_size *= tspec->size;
  905. fix_lists = erts_sys_alloc(0,
  906. NULL,
  907. (tot_fix_list_size
  908. + ERTS_CACHE_LINE_SIZE - 1));
  909. if (!fix_lists)
  910. erts_exit(ERTS_ABORT_EXIT,
  911. "Failed to allocate fix lists for %salloc\n",
  912. init->init.util.name_prefix);
  913. if (((UWord) fix_lists) & ERTS_CACHE_LINE_MASK)
  914. fix_lists = ((ErtsAlcFixList_t *)
  915. ((((UWord) fix_lists) & ~ERTS_CACHE_LINE_MASK)
  916. + ERTS_CACHE_LINE_SIZE));
  917. }
  918. for (i = 0; i < size; i++) {
  919. Allctr_t *as;
  920. astrat = init->astrat;
  921. if (!init->thr_spec)
  922. as0 = state;
  923. else {
  924. as0 = (void *) tspec->allctr[i];
  925. if (!as0)
  926. continue;
  927. if (init->thr_spec < 0) {
  928. init->init.util.ts = i == 0;
  929. init->init.util.tspec = 0;
  930. init->init.util.tpref = -1*init->thr_spec + 1;
  931. }
  932. else {
  933. if (i != 0)
  934. init->init.util.ts = 0;
  935. else {
  936. if (astrat == ERTS_ALC_S_AFIT)
  937. astrat = ERTS_ALC_S_GOODFIT;
  938. init->init.util.ts = 1;
  939. }
  940. init->init.util.tspec = init->thr_spec + 1;
  941. init->init.util.tpref = 0;
  942. }
  943. }
  944. if (fix_lists) {
  945. init->init.util.fix = fix_lists;
  946. fix_lists = ((ErtsAlcFixList_t *)
  947. (((char *) fix_lists) + fix_list_size));
  948. }
  949. init->init.util.alloc_strat = astrat;
  950. init->init.util.ix = i;
  951. switch (astrat) {
  952. case ERTS_ALC_S_GOODFIT:
  953. as = erts_gfalc_start((GFAllctr_t *) as0,
  954. &init->init.gf,
  955. &init->init.util);
  956. break;
  957. case ERTS_ALC_S_BESTFIT:
  958. as = erts_bfalc_start((BFAllctr_t *) as0,
  959. &init->init.bf,
  960. &init->init.util);
  961. break;
  962. case ERTS_ALC_S_AFIT:
  963. as = erts_afalc_start((AFAllctr_t *) as0,
  964. &init->init.af,
  965. &init->init.util);
  966. break;
  967. case ERTS_ALC_S_FIRSTFIT:
  968. as = erts_aoffalc_start((AOFFAllctr_t *) as0,
  969. &init->init.aoff,
  970. &init->init.util);
  971. break;
  972. default:
  973. as = NULL;
  974. ASSERT(0);
  975. }
  976. if (!as)
  977. erts_exit(ERTS_ABORT_EXIT,
  978. "Failed to start %salloc\n", init->init.util.name_prefix);
  979. ASSERT(as == (void *) as0);
  980. af->extra = as;
  981. }
  982. if (init->thr_spec)
  983. af->extra = tspec;
  984. ai->extra = af->extra;
  985. }
  986. static void bad_param(char *param_start, char *param_end)
  987. {
  988. size_t len = param_end - param_start;
  989. char param[100];
  990. if (len > 99)
  991. len = 99;
  992. sys_memcpy((void *) param, (void *) param_start, len);
  993. param[len] = '\0';
  994. erts_fprintf(stderr, "bad \"%s\" parameter\n", param);
  995. erts_usage();
  996. }
  997. static void bad_value(char *param_start, char *param_end, char *value)
  998. {
  999. size_t len = param_end - param_start;
  1000. char param[100];
  1001. if (len > 99)
  1002. len = 99;
  1003. sys_memcpy((void *) param, (void *) param_start, len);
  1004. param[len] = '\0';
  1005. erts_fprintf(stderr, "bad \"%s\" value: %s\n", param, value);
  1006. erts_usage();
  1007. }
  1008. /* Get arg marks argument as handled by
  1009. putting NULL in argv */
  1010. static char *
  1011. get_value(char* rest, char** argv, int* ip)
  1012. {
  1013. char *param = argv[*ip]+1;
  1014. argv[*ip] = NULL;
  1015. if (*rest == '\0') {
  1016. char *next = argv[*ip + 1];
  1017. if (next[0] == '-'
  1018. && next[1] == '-'
  1019. && next[2] == '\0') {
  1020. bad_value(param, rest, "");
  1021. }
  1022. (*ip)++;
  1023. argv[*ip] = NULL;
  1024. return next;
  1025. }
  1026. return rest;
  1027. }
  1028. static ERTS_INLINE int
  1029. has_prefix(const char *prefix, const char *string)
  1030. {
  1031. int i;
  1032. for (i = 0; prefix[i]; i++)
  1033. if (prefix[i] != string[i])
  1034. return 0;
  1035. return 1;
  1036. }
  1037. static int
  1038. get_bool_value(char *param_end, char** argv, int* ip)
  1039. {
  1040. char *param = argv[*ip]+1;
  1041. char *value = get_value(param_end, argv, ip);
  1042. if (sys_strcmp(value, "true") == 0)
  1043. return 1;
  1044. else if (sys_strcmp(value, "false") == 0)
  1045. return 0;
  1046. else
  1047. bad_value(param, param_end, value);
  1048. return -1;
  1049. }
  1050. static Uint kb_to_bytes(Sint kb, Uint *bytes)
  1051. {
  1052. const Uint max = ((~((Uint) 0))/1024) + 1;
  1053. if (kb < 0 || (Uint)kb > max)
  1054. return 0;
  1055. if ((Uint)kb == max)
  1056. *bytes = ~((Uint) 0);
  1057. else
  1058. *bytes = ((Uint) kb)*1024;
  1059. return 1;
  1060. }
  1061. static Uint
  1062. get_kb_value(char *param_end, char** argv, int* ip)
  1063. {
  1064. Sint tmp;
  1065. Uint bytes = 0;
  1066. char *rest;
  1067. char *param = argv[*ip]+1;
  1068. char *value = get_value(param_end, argv, ip);
  1069. errno = 0;
  1070. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1071. if (errno != 0 || rest == value || !kb_to_bytes(tmp, &bytes))
  1072. bad_value(param, param_end, value);
  1073. return bytes;
  1074. }
  1075. static UWord
  1076. get_mb_value(char *param_end, char** argv, int* ip)
  1077. {
  1078. SWord tmp;
  1079. UWord max = ((~((UWord) 0))/(1024*1024)) + 1;
  1080. char *rest;
  1081. char *param = argv[*ip]+1;
  1082. char *value = get_value(param_end, argv, ip);
  1083. errno = 0;
  1084. tmp = (SWord) ErtsStrToSint(value, &rest, 10);
  1085. if (errno != 0 || rest == value || tmp < 0 || max < ((UWord) tmp))
  1086. bad_value(param, param_end, value);
  1087. if (max == (UWord) tmp)
  1088. return ~((UWord) 0);
  1089. else
  1090. return ((UWord) tmp)*1024*1024;
  1091. }
  1092. #if 0
  1093. static Uint
  1094. get_byte_value(char *param_end, char** argv, int* ip)
  1095. {
  1096. Sint tmp;
  1097. char *rest;
  1098. char *param = argv[*ip]+1;
  1099. char *value = get_value(param_end, argv, ip);
  1100. errno = 0;
  1101. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1102. if (errno != 0 || rest == value || tmp < 0)
  1103. bad_value(param, param_end, value);
  1104. return (Uint) tmp;
  1105. }
  1106. #endif
  1107. static Uint
  1108. get_amount_value(char *param_end, char** argv, int* ip)
  1109. {
  1110. Sint tmp;
  1111. char *rest;
  1112. char *param = argv[*ip]+1;
  1113. char *value = get_value(param_end, argv, ip);
  1114. errno = 0;
  1115. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1116. if (errno != 0 || rest == value || tmp < 0)
  1117. bad_value(param, param_end, value);
  1118. return (Uint) tmp;
  1119. }
  1120. static Uint
  1121. get_acul_value(struct au_init *auip, char *param_end, char** argv, int* ip)
  1122. {
  1123. Sint tmp;
  1124. char *rest;
  1125. char *param = argv[*ip]+1;
  1126. char *value = get_value(param_end, argv, ip);
  1127. if (sys_strcmp(value, "de") == 0) {
  1128. switch (auip->init.util.alloc_no) {
  1129. case ERTS_ALC_A_LONG_LIVED:
  1130. return ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC;
  1131. case ERTS_ALC_A_EHEAP:
  1132. return ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC;
  1133. default:
  1134. return ERTS_ALC_DEFAULT_ENABLED_ACUL;
  1135. }
  1136. }
  1137. errno = 0;
  1138. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1139. if (errno != 0 || rest == value || tmp < 0 || 100 < tmp)
  1140. bad_value(param, param_end, value);
  1141. return (Uint) tmp;
  1142. }
  1143. static void
  1144. handle_au_arg(struct au_init *auip,
  1145. char* sub_param,
  1146. char** argv,
  1147. int* ip,
  1148. int u_switch)
  1149. {
  1150. char *param = argv[*ip]+1;
  1151. switch (sub_param[0]) {
  1152. case 'a':
  1153. if (sub_param[1] == 'c') { /* Migration parameters "ac*" */
  1154. UWord value;
  1155. UWord* wp;
  1156. if (!auip->carrier_migration_allowed && !u_switch)
  1157. goto bad_switch;
  1158. if (has_prefix("acul", sub_param)) {
  1159. value = get_acul_value(auip, sub_param + 4, argv, ip);
  1160. wp = &auip->init.util.acul;
  1161. }
  1162. else if (has_prefix("acnl", sub_param)) {
  1163. value = get_amount_value(sub_param + 4, argv, ip);
  1164. wp = &auip->init.util.acnl;
  1165. }
  1166. else if (has_prefix("acfml", sub_param)) {
  1167. value = get_amount_value(sub_param + 5, argv, ip);
  1168. wp = &auip->init.util.acfml;
  1169. }
  1170. else
  1171. goto bad_switch;
  1172. if (auip->carrier_migration_allowed)
  1173. *wp = value;
  1174. }
  1175. else if(has_prefix("asbcst", sub_param)) {
  1176. auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip);
  1177. }
  1178. else if(has_prefix("as", sub_param)) {
  1179. char *alg = get_value(sub_param + 2, argv, ip);
  1180. if (sys_strcmp("bf", alg) == 0) {
  1181. auip->astrat = ERTS_ALC_S_BESTFIT;
  1182. auip->init.bf.ao = 0;
  1183. }
  1184. else if (sys_strcmp("aobf", alg) == 0) {
  1185. auip->astrat = ERTS_ALC_S_BESTFIT;
  1186. auip->init.bf.ao = 1;
  1187. }
  1188. else if (sys_strcmp("gf", alg) == 0) {
  1189. auip->astrat = ERTS_ALC_S_GOODFIT;
  1190. }
  1191. else if (sys_strcmp("af", alg) == 0) {
  1192. auip->astrat = ERTS_ALC_S_AFIT;
  1193. }
  1194. else if (sys_strcmp("aoff", alg) == 0) {
  1195. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1196. auip->init.aoff.crr_order = FF_AOFF;
  1197. auip->init.aoff.blk_order = FF_AOFF;
  1198. }
  1199. else if (sys_strcmp("aoffcbf", alg) == 0) {
  1200. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1201. auip->init.aoff.crr_order = FF_AOFF;
  1202. auip->init.aoff.blk_order = FF_BF;
  1203. }
  1204. else if (sys_strcmp("aoffcaobf", alg) == 0) {
  1205. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1206. auip->init.aoff.crr_order = FF_AOFF;
  1207. auip->init.aoff.blk_order = FF_AOBF;
  1208. }
  1209. else if (sys_strcmp("ageffcaoff", alg) == 0) {
  1210. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1211. auip->init.aoff.crr_order = FF_AGEFF;
  1212. auip->init.aoff.blk_order = FF_AOFF;
  1213. }
  1214. else if (sys_strcmp("ageffcbf", alg) == 0) {
  1215. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1216. auip->init.aoff.crr_order = FF_AGEFF;
  1217. auip->init.aoff.blk_order = FF_BF;
  1218. }
  1219. else if (sys_strcmp("ageffcaobf", alg) == 0) {
  1220. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1221. auip->init.aoff.crr_order = FF_AGEFF;
  1222. auip->init.aoff.blk_order = FF_AOBF;
  1223. }
  1224. else {
  1225. if (auip->init.util.alloc_no == ERTS_ALC_A_TEST
  1226. && sys_strcmp("chaosff", alg) == 0) {
  1227. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1228. auip->init.aoff.crr_order = FF_CHAOS;
  1229. auip->init.aoff.blk_order = FF_CHAOS;
  1230. }
  1231. else {
  1232. bad_value(param, sub_param + 1, alg);
  1233. }
  1234. }
  1235. if (!strategy_support_carrier_migration(auip))
  1236. auip->init.util.acul = 0;
  1237. } else if (has_prefix("atags", sub_param)) {
  1238. auip->init.util.atags = get_bool_value(sub_param + 5, argv, ip);
  1239. }
  1240. else
  1241. goto bad_switch;
  1242. break;
  1243. case 'c': {
  1244. if (has_prefix("cp", sub_param)) {
  1245. char *param, *param_end, *value;
  1246. int cp;
  1247. if (!auip->carrier_migration_allowed && !u_switch)
  1248. goto bad_switch;
  1249. param = argv[*ip]+1;
  1250. param_end = sub_param + 2;
  1251. value = get_value(param_end, argv, ip);
  1252. if (value[0] == '\0' || value[1] != '\0')
  1253. bad_value(param, param_end, value);
  1254. switch (value[0]) {
  1255. case 'B': cp = ERTS_ALC_A_BINARY; break;
  1256. case 'D': cp = ERTS_ALC_A_STANDARD; break;
  1257. case 'E': cp = ERTS_ALC_A_ETS; break;
  1258. case 'F': cp = ERTS_ALC_A_FIXED_SIZE; break;
  1259. case 'H': cp = ERTS_ALC_A_EHEAP; break;
  1260. case 'L': cp = ERTS_ALC_A_LONG_LIVED; break;
  1261. case 'R': cp = ERTS_ALC_A_DRIVER; break;
  1262. case 'S': cp = ERTS_ALC_A_SHORT_LIVED; break;
  1263. case '@': cp = ERTS_ALC_COMMON_CPOOL_IX; break;
  1264. case ':': cp = auip->init.util.alloc_no; break;
  1265. default: cp = -1;
  1266. bad_value(param, param_end, value);
  1267. break;
  1268. }
  1269. if (auip->carrier_migration_allowed)
  1270. auip->init.util.cp = cp;
  1271. }
  1272. else
  1273. goto bad_switch;
  1274. break;
  1275. }
  1276. case 'e': {
  1277. int e = get_bool_value(sub_param + 1, argv, ip);
  1278. if (!auip->disable_allowed && !e) {
  1279. if (!u_switch)
  1280. bad_value(param, sub_param + 1, "false");
  1281. else
  1282. ASSERT(auip->enable); /* ignore */
  1283. }
  1284. else auip->enable = e;
  1285. break;
  1286. }
  1287. case 'l':
  1288. if (has_prefix("lmbcs", sub_param)) {
  1289. auip->default_.lmbcs = 0;
  1290. auip->init.util.lmbcs = get_kb_value(sub_param + 5, argv, ip);
  1291. }
  1292. else
  1293. goto bad_switch;
  1294. break;
  1295. case 'm':
  1296. if (has_prefix("mbcgs", sub_param)) {
  1297. auip->init.util.mbcgs = get_amount_value(sub_param + 5, argv, ip);
  1298. }
  1299. else if (has_prefix("mbsd", sub_param)) {
  1300. auip->init.gf.mbsd = get_amount_value(sub_param + 4, argv, ip);
  1301. if (auip->init.gf.mbsd < 1)
  1302. auip->init.gf.mbsd = 1;
  1303. }
  1304. else if (has_prefix("mmbcs", sub_param)) {
  1305. auip->default_.mmbcs = 0;
  1306. auip->init.util.mmbcs = get_kb_value(sub_param + 5, argv, ip);
  1307. }
  1308. else if (has_prefix("mmmbc", sub_param)) {
  1309. auip->default_.mmmbc = 0;
  1310. auip->init.util.mmmbc = get_amount_value(sub_param + 5, argv, ip);
  1311. }
  1312. else if (has_prefix("mmsbc", sub_param)) {
  1313. auip->init.util.mmsbc = get_amount_value(sub_param + 5, argv, ip);
  1314. }
  1315. else
  1316. goto bad_switch;
  1317. break;
  1318. case 'r':
  1319. if(has_prefix("rsbcmt", sub_param)) {
  1320. auip->init.util.rsbcmt = get_amount_value(sub_param + 6, argv, ip);
  1321. if (auip->init.util.rsbcmt > 100)
  1322. auip->init.util.rsbcmt = 100;
  1323. }
  1324. else if(has_prefix("rsbcst", sub_param)) {
  1325. auip->init.util.rsbcst = get_amount_value(sub_param + 6, argv, ip);
  1326. if (auip->init.util.rsbcst > 100)
  1327. auip->init.util.rsbcst = 100;
  1328. }
  1329. else if (has_prefix("rmbcmt", sub_param)) {
  1330. auip->init.util.rmbcmt = get_amount_value(sub_param + 6, argv, ip);
  1331. if (auip->init.util.rmbcmt > 100)
  1332. auip->init.util.rmbcmt = 100;
  1333. }
  1334. else if (has_prefix("ramv", sub_param)) {
  1335. auip->init.util.ramv = get_bool_value(sub_param + 4, argv, ip);
  1336. }
  1337. else
  1338. goto bad_switch;
  1339. break;
  1340. case 's':
  1341. if(has_prefix("sbct", sub_param)) {
  1342. auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
  1343. }
  1344. else if (has_prefix("smbcs", sub_param)) {
  1345. auip->default_.smbcs = 0;
  1346. auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip);
  1347. }
  1348. else
  1349. goto bad_switch;
  1350. break;
  1351. case 't': {
  1352. int res = get_bool_value(sub_param+1, argv, ip);
  1353. if (res > 0) {
  1354. if (!auip->thr_spec_allowed) {
  1355. if (!u_switch)
  1356. bad_value(param, sub_param + 1, "true");
  1357. else
  1358. ASSERT(!auip->thr_spec); /* ignore */
  1359. }
  1360. else
  1361. auip->thr_spec = 1;
  1362. break;
  1363. }
  1364. else if (res == 0) {
  1365. auip->thr_spec = 0;
  1366. auip->init.util.acul = 0;
  1367. break;
  1368. }
  1369. goto bad_switch;
  1370. }
  1371. default:
  1372. bad_switch:
  1373. bad_param(param, sub_param);
  1374. }
  1375. }
  1376. static void
  1377. handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
  1378. {
  1379. struct au_init *aui[] = {
  1380. &init->binary_alloc,
  1381. &init->std_alloc,
  1382. &init->ets_alloc,
  1383. &init->eheap_alloc,
  1384. &init->ll_alloc,
  1385. &init->driver_alloc,
  1386. &init->fix_alloc,
  1387. &init->sl_alloc
  1388. /* test_alloc not affected by +Mea??? or +Mu??? */
  1389. };
  1390. int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
  1391. char *arg;
  1392. char *rest;
  1393. int i, j;
  1394. i = 1;
  1395. ASSERT(argc && argv && init);
  1396. while (i < *argc) {
  1397. if(argv[i][0] == '-') {
  1398. char *param = argv[i]+1;
  1399. switch (argv[i][1]) {
  1400. case 'M':
  1401. switch (argv[i][2]) {
  1402. case 'B':
  1403. handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i, 0);
  1404. break;
  1405. case 'I':
  1406. if (has_prefix("scs", argv[i]+3)) {
  1407. #if HAVE_ERTS_MSEG
  1408. init->mseg.literal_mmap.scs =
  1409. #endif
  1410. get_mb_value(argv[i]+6, argv, &i);
  1411. }
  1412. else
  1413. handle_au_arg(&init->literal_alloc, &argv[i][3], argv, &i, 0);
  1414. break;
  1415. case 'D':
  1416. handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i, 0);
  1417. break;
  1418. case 'E':
  1419. handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i, 0);
  1420. break;
  1421. case 'F':
  1422. handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i, 0);
  1423. break;
  1424. case 'H':
  1425. handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i, 0);
  1426. break;
  1427. case 'L':
  1428. handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i, 0);
  1429. break;
  1430. case 'M':
  1431. if (has_prefix("amcbf", argv[i]+3)) {
  1432. #if HAVE_ERTS_MSEG
  1433. init->mseg.amcbf =
  1434. #endif
  1435. get_kb_value(argv[i]+8, argv, &i);
  1436. }
  1437. else if (has_prefix("rmcbf", argv[i]+3)) {
  1438. #if HAVE_ERTS_MSEG
  1439. init->mseg.rmcbf =
  1440. #endif
  1441. get_amount_value(argv[i]+8, argv, &i);
  1442. }
  1443. else if (has_prefix("mcs", argv[i]+3)) {
  1444. #if HAVE_ERTS_MSEG
  1445. init->mseg.mcs =
  1446. #endif
  1447. get_amount_value(argv[i]+6, argv, &i);
  1448. }
  1449. else if (has_prefix("scs", argv[i]+3)) {
  1450. #if HAVE_ERTS_MSEG
  1451. init->mseg.dflt_mmap.scs =
  1452. #endif
  1453. get_mb_value(argv[i]+6, argv, &i);
  1454. }
  1455. else if (has_prefix("sco", argv[i]+3)) {
  1456. #if HAVE_ERTS_MSEG
  1457. init->mseg.dflt_mmap.sco =
  1458. #endif
  1459. get_bool_value(argv[i]+6, argv, &i);
  1460. }
  1461. else if (has_prefix("scrpm", argv[i]+3)) {
  1462. #if HAVE_ERTS_MSEG
  1463. init->mseg.dflt_mmap.scrpm =
  1464. #endif
  1465. get_bool_value(argv[i]+8, argv, &i);
  1466. }
  1467. else if (has_prefix("scrfsd", argv[i]+3)) {
  1468. #if HAVE_ERTS_MSEG
  1469. init->mseg.dflt_mmap.scrfsd =
  1470. #endif
  1471. get_amount_value(argv[i]+9, argv, &i);
  1472. }
  1473. else {
  1474. bad_param(param, param+2);
  1475. }
  1476. break;
  1477. case 'R':
  1478. handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i, 0);
  1479. break;
  1480. case 'S':
  1481. handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i, 0);
  1482. break;
  1483. case 'T':
  1484. handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i, 0);
  1485. break;
  1486. case 'Z':
  1487. handle_au_arg(&init->test_alloc, &argv[i][3], argv, &i, 0);
  1488. break;
  1489. case 'Y': { /* sys_alloc */
  1490. if (has_prefix("tt", param+2)) {
  1491. /* set trim threshold */
  1492. arg = get_value(param+4, argv, &i);
  1493. errno = 0;
  1494. init->trim_threshold = (int) strtol(arg, &rest, 10);
  1495. if (errno != 0
  1496. || rest == arg
  1497. || init->trim_threshold < 0
  1498. || (INT_MAX/1024) < init->trim_threshold) {
  1499. bad_value(param, param+4, arg);
  1500. }
  1501. VERBOSE(DEBUG_SYSTEM,
  1502. ("using trim threshold: %d\n",
  1503. init->trim_threshold));
  1504. init->trim_threshold *= 1024;
  1505. }
  1506. else if (has_prefix("tp", param+2)) {
  1507. /* set top pad */
  1508. arg = get_value(param+4, argv, &i);
  1509. errno = 0;
  1510. init->top_pad = (int) strtol(arg, &rest, 10);
  1511. if (errno != 0
  1512. || rest == arg
  1513. || init->top_pad < 0
  1514. || (INT_MAX/1024) < init->top_pad) {
  1515. bad_value(param, param+4, arg);
  1516. }
  1517. VERBOSE(DEBUG_SYSTEM,
  1518. ("using top pad: %d\n",init->top_pad));
  1519. init->top_pad *= 1024;
  1520. }
  1521. else if (has_prefix("m", param+2)) {
  1522. /* Has been handled by erlexec */
  1523. (void) get_value(param+3, argv, &i);
  1524. }
  1525. else if (has_prefix("e", param+2)) {
  1526. arg = get_value(param+3, argv, &i);
  1527. if (sys_strcmp("true", arg) != 0)
  1528. bad_value(param, param+3, arg);
  1529. }
  1530. else
  1531. bad_param(param, param+2);
  1532. break;
  1533. }
  1534. case 'e':
  1535. switch (argv[i][3]) {
  1536. case 'a': {
  1537. int a;
  1538. arg = get_value(argv[i]+4, argv, &i);
  1539. if (sys_strcmp("min", arg) == 0) {
  1540. for (a = 0; a < aui_sz; a++)
  1541. aui[a]->enable = 0;
  1542. }
  1543. else if (sys_strcmp("max", arg) == 0) {
  1544. for (a = 0; a < aui_sz; a++)
  1545. aui[a]->enable = 1;
  1546. }
  1547. else if (sys_strcmp("config", arg) == 0) {
  1548. init->erts_alloc_config = 1;
  1549. }
  1550. else if (sys_strcmp("r9c", arg) == 0
  1551. || sys_strcmp("r10b", arg) == 0
  1552. || sys_strcmp("r11b", arg) == 0) {
  1553. set_default_sl_alloc_opts(&init->sl_alloc);
  1554. set_default_std_alloc_opts(&init->std_alloc);
  1555. set_default_ll_alloc_opts(&init->ll_alloc);
  1556. set_default_temp_alloc_opts(&init->temp_alloc);
  1557. set_default_eheap_alloc_opts(&init->eheap_alloc);
  1558. set_default_binary_alloc_opts(&init->binary_alloc);
  1559. set_default_ets_alloc_opts(&init->ets_alloc);
  1560. set_default_driver_alloc_opts(&init->driver_alloc);
  1561. set_default_driver_alloc_opts(&init->fix_alloc);
  1562. init->driver_alloc.enable = 0;
  1563. if (sys_strcmp("r9c", arg) == 0) {
  1564. init->sl_alloc.enable = 0;
  1565. init->std_alloc.enable = 0;
  1566. init->binary_alloc.enable = 0;
  1567. init->ets_alloc.enable = 0;
  1568. }
  1569. for (a = 0; a < aui_sz; a++) {
  1570. aui[a]->thr_spec = 0;
  1571. aui[a]->init.util.acul = 0;
  1572. aui[a]->init.util.ramv = 0;
  1573. aui[a]->init.util.lmbcs = 5*1024*1024;
  1574. }
  1575. }
  1576. else {
  1577. bad_param(param, param+3);
  1578. }
  1579. break;
  1580. }
  1581. default:
  1582. bad_param(param, param+1);
  1583. }
  1584. break;
  1585. case 'i':
  1586. switch (argv[i][3]) {
  1587. case 't':
  1588. init->instr.mtrace = get_value(argv[i]+4, argv, &i);
  1589. break;
  1590. default:
  1591. bad_param(param, param+2);
  1592. }
  1593. break;
  1594. case 'l':
  1595. if (has_prefix("pm", param+2)) {
  1596. arg = get_value(argv[i]+5, argv, &i);
  1597. if (sys_strcmp("all", arg) == 0)
  1598. lock_all_physical_memory = 1;
  1599. else if (sys_strcmp("no", arg) == 0)
  1600. lock_all_physical_memory = 0;
  1601. else
  1602. bad_value(param, param+4, arg);
  1603. break;
  1604. }
  1605. bad_param(param, param+2);
  1606. break;
  1607. case 'u':
  1608. if (has_prefix("ycs", argv[i]+3)) {
  1609. init->alloc_util.ycs
  1610. = get_kb_value(argv[i]+6, argv, &i);
  1611. }
  1612. else if (has_prefix("mmc", argv[i]+3)) {
  1613. init->alloc_util.mmc
  1614. = get_amount_value(argv[i]+6, argv, &i);
  1615. }
  1616. else if (has_prefix("sac", argv[i]+3)) {
  1617. init->alloc_util.sac
  1618. = get_bool_value(argv[i]+6, argv, &i);
  1619. }
  1620. else {
  1621. int a;
  1622. int start = i;
  1623. char *param = argv[i];
  1624. char *val = i+1 < *argc ? argv[i+1] : NULL;
  1625. for (a = 0; a < aui_sz; a++) {
  1626. if (a > 0) {
  1627. ASSERT(i == start || i == start+1);
  1628. argv[start] = param;
  1629. if (i != start)
  1630. argv[start + 1] = val;
  1631. i = start;
  1632. }
  1633. handle_au_arg(aui[a], &argv[i][3], argv, &i, 1);
  1634. }
  1635. }
  1636. break;
  1637. default:
  1638. bad_param(param, param+1);
  1639. }
  1640. break;
  1641. case '-':
  1642. if (argv[i][2] == '\0') {
  1643. /* End of system flags reached */
  1644. if (init->instr.mtrace) {
  1645. while (i < *argc) {
  1646. if(sys_strcmp(argv[i], "-sname") == 0
  1647. || sys_strcmp(argv[i], "-name") == 0) {
  1648. if (i + 1 <*argc) {
  1649. init->instr.nodename = argv[i+1];
  1650. break;
  1651. }
  1652. }
  1653. i++;
  1654. }
  1655. }
  1656. goto args_parsed;
  1657. }
  1658. break;
  1659. default:
  1660. break;
  1661. }
  1662. }
  1663. i++;
  1664. }
  1665. args_parsed:
  1666. /* Handled arguments have been marked with NULL. Slide arguments
  1667. not handled towards the beginning of argv. */
  1668. for (i = 0, j = 0; i < *argc; i++) {
  1669. if (argv[i])
  1670. argv[j++] = argv[i];
  1671. }
  1672. *argc = j;
  1673. }
  1674. static char *type_no_str(ErtsAlcType_t n)
  1675. {
  1676. #if ERTS_ALC_N_MIN != 0
  1677. if (n < ERTS_ALC_N_MIN)
  1678. return NULL;
  1679. #endif
  1680. if (n > ERTS_ALC_N_MAX)
  1681. return NULL;
  1682. return (char *) ERTS_ALC_N2TD(n);
  1683. }
  1684. #define type_str(T) type_no_str(ERTS_ALC_T2N((T)))
  1685. void
  1686. erts_alloc_register_scheduler(void *vesdp)
  1687. {
  1688. ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
  1689. int ix = (int) esdp->no;
  1690. int aix;
  1691. ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
  1692. for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
  1693. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
  1694. esdp->alloc_data.deallctr[aix] = NULL;
  1695. esdp->alloc_data.pref_ix[aix] = -1;
  1696. if (tspec->enabled) {
  1697. if (!tspec->dd)
  1698. esdp->alloc_data.pref_ix[aix] = ix;
  1699. else {
  1700. Allctr_t *allctr = tspec->allctr[ix];
  1701. ASSERT(allctr);
  1702. esdp->alloc_data.deallctr[aix] = allctr;
  1703. esdp->alloc_data.pref_ix[aix] = ix;
  1704. }
  1705. }
  1706. }
  1707. }
  1708. void
  1709. erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
  1710. int *need_thr_progress,
  1711. ErtsThrPrgrVal *thr_prgr_p,
  1712. int *more_work)
  1713. {
  1714. ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
  1715. int aix;
  1716. for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
  1717. Allctr_t *allctr;
  1718. if (esdp)
  1719. allctr = esdp->alloc_data.deallctr[aix];
  1720. else {
  1721. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
  1722. if (tspec->enabled && tspec->dd)
  1723. allctr = tspec->allctr[0];
  1724. else
  1725. allctr = NULL;
  1726. }
  1727. if (allctr) {
  1728. erts_alcu_check_delayed_dealloc(allctr,
  1729. 1,
  1730. need_thr_progress,
  1731. thr_prgr_p,
  1732. more_work);
  1733. }
  1734. }
  1735. }
  1736. erts_aint32_t
  1737. erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
  1738. {
  1739. ErtsAllocatorThrSpec_t *tspec;
  1740. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  1741. if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
  1742. return erts_alcu_fix_alloc_shrink(tspec->allctr[ix], flgs);
  1743. if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
  1744. return erts_alcu_fix_alloc_shrink(
  1745. erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
  1746. return 0;
  1747. }
  1748. static void
  1749. no_verify(Allctr_t *allctr)
  1750. {
  1751. }
  1752. erts_alloc_verify_func_t
  1753. erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr)
  1754. {
  1755. if (erts_allctrs_info[ERTS_ALC_A_TEMPORARY].alloc_util
  1756. && erts_allctrs_info[ERTS_ALC_A_TEMPORARY].thr_spec) {
  1757. ErtsAllocatorThrSpec_t *tspec;
  1758. int ix = ERTS_ALC_GET_THR_IX();
  1759. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_TEMPORARY];
  1760. if (ix < tspec->size) {
  1761. *allctr = tspec->allctr[ix];
  1762. return erts_alcu_verify_unused;
  1763. }
  1764. }
  1765. *allctr = NULL;
  1766. return no_verify;
  1767. }
  1768. __decl_noreturn void
  1769. erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
  1770. {
  1771. char buf[10];
  1772. char *t_str;
  1773. char *allctr_str;
  1774. ASSERT(n >= ERTS_ALC_N_MIN);
  1775. ASSERT(n <= ERTS_ALC_N_MAX);
  1776. if (n < ERTS_ALC_N_MIN || ERTS_ALC_N_MAX < n)
  1777. allctr_str = "UNKNOWN";
  1778. else {
  1779. ErtsAlcType_t a = ERTS_ALC_T2A(ERTS_ALC_N2T(n));
  1780. if (erts_allctrs_info[a].enabled)
  1781. allctr_str = (char *) ERTS_ALC_A2AD(a);
  1782. else
  1783. allctr_str = (char *) ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
  1784. }
  1785. t_str = type_no_str(n);
  1786. if (!t_str) {
  1787. erts_snprintf(buf, sizeof(buf), "%d", (int) n);
  1788. t_str = buf;
  1789. }
  1790. switch (error) {
  1791. case ERTS_ALC_E_NOTSUP: {
  1792. char *op_str;
  1793. switch (func) {
  1794. case ERTS_ALC_O_ALLOC: op_str = "alloc"; break;
  1795. case ERTS_ALC_O_REALLOC: op_str = "realloc"; break;
  1796. case ERTS_ALC_O_FREE: op_str = "free"; break;
  1797. default: op_str = "UNKNOWN"; break;
  1798. }
  1799. erts_exit(ERTS_ABORT_EXIT,
  1800. "%s: %s operation not supported (memory type: \"%s\")\n",
  1801. allctr_str, op_str, t_str);
  1802. break;
  1803. }
  1804. case ERTS_ALC_E_NOMEM: {
  1805. Uint size;
  1806. va_list argp;
  1807. char *op = func == ERTS_ALC_O_REALLOC ? "reallocate" : "allocate";
  1808. va_start(argp, n);
  1809. size = va_arg(argp, Uint);
  1810. va_end(argp);
  1811. erts_exit(ERTS_DUMP_EXIT,
  1812. "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
  1813. allctr_str, op, size, t_str);
  1814. break;
  1815. }
  1816. case ERTS_ALC_E_NOALLCTR:
  1817. erts_exit(ERTS_ABORT_EXIT,
  1818. "erts_alloc: Unknown allocator type: %d\n",
  1819. ERTS_ALC_T2A(ERTS_ALC_N2T(n)));
  1820. break;
  1821. default:
  1822. erts_exit(ERTS_ABORT_EXIT, "erts_alloc: Unknown error: %d\n", error);
  1823. break;
  1824. }
  1825. }
  1826. __decl_noreturn void
  1827. erts_alloc_enomem(ErtsAlcType_t type, Uint size)
  1828. {
  1829. erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
  1830. }
  1831. __decl_noreturn void
  1832. erts_alloc_n_enomem(ErtsAlcType_t n, Uint size)
  1833. {
  1834. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_ALLOC, n, size);
  1835. }
  1836. __decl_noreturn void
  1837. erts_realloc_enomem(ErtsAlcType_t type, void *ptr, Uint size)
  1838. {
  1839. erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
  1840. }
  1841. __decl_noreturn void
  1842. erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
  1843. {
  1844. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_REALLOC, n, size);
  1845. }
  1846. static ERTS_INLINE UWord
  1847. alcu_size(ErtsAlcType_t alloc_no, ErtsAlcUFixInfo_t *fi, int fisz)
  1848. {
  1849. UWord res;
  1850. int ai;
  1851. if (!erts_allctrs_info[alloc_no].thr_spec) {
  1852. AllctrSize_t size;
  1853. Allctr_t *allctr;
  1854. allctr = erts_allctrs_info[alloc_no].extra;
  1855. erts_alcu_current_size(allctr, &size, fi, fisz);
  1856. return size.blocks;
  1857. }
  1858. res = 0;
  1859. /* Thread-specific allocators can migrate carriers across types, so we have
  1860. * to visit every allocator type to gather information on blocks that were
  1861. * allocated by us. */
  1862. for (ai = ERTS_ALC_A_MIN; ai < ERTS_ALC_A_MAX; ai++) {
  1863. ErtsAllocatorThrSpec_t *tspec;
  1864. Allctr_t *allctr;
  1865. int i;
  1866. if (!erts_allctrs_info[ai].thr_spec) {
  1867. continue;
  1868. }
  1869. tspec = &erts_allctr_thr_spec[ai];
  1870. ASSERT(tspec->enabled);
  1871. for (i = tspec->size - 1; i >= 0; i--) {
  1872. allctr = tspec->allctr[i];
  1873. if (allctr) {
  1874. AllctrSize_t size;
  1875. if (ai == alloc_no) {
  1876. erts_alcu_current_size(allctr, &size, fi, fisz);
  1877. } else {
  1878. erts_alcu_foreign_size(allctr, alloc_no, &size);
  1879. }
  1880. ASSERT(((SWord)size.blocks) >= 0);
  1881. res += size.blocks;
  1882. }
  1883. }
  1884. }
  1885. return res;
  1886. }
  1887. static ERTS_INLINE void
  1888. add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
  1889. {
  1890. int ix = ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE;
  1891. ASSERT(0 <= ix && ix < ERTS_ALC_NO_FIXED_SIZES);
  1892. *ap += (UWord) fi[ix].allocated;
  1893. *up += (UWord) fi[ix].used;
  1894. }
  1895. Eterm
  1896. erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
  1897. {
  1898. /*
  1899. * NOTE! When updating this function, make sure to also update
  1900. * erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl
  1901. */
  1902. #define ERTS_MEM_NEED_ALL_ALCU (want_tot_or_sys)
  1903. struct {
  1904. int total;
  1905. int processes;
  1906. int processes_used;
  1907. int system;
  1908. int atom;
  1909. int atom_used;
  1910. int binary;
  1911. int code;
  1912. int ets;
  1913. } want = {0};
  1914. struct {
  1915. UWord total;
  1916. UWord processes;
  1917. UWord processes_used;
  1918. UWord system;
  1919. UWord atom;
  1920. UWord atom_used;
  1921. UWord binary;
  1922. UWord code;
  1923. UWord ets;
  1924. } size = {0};
  1925. Eterm atoms[sizeof(size)/sizeof(UWord)];
  1926. UWord *uintps[sizeof(size)/sizeof(UWord)];
  1927. Eterm euints[sizeof(size)/sizeof(UWord)];
  1928. int want_tot_or_sys;
  1929. int length;
  1930. Eterm res = THE_NON_VALUE;
  1931. ErtsAlcType_t ai;
  1932. int only_one_value = 0;
  1933. ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}};
  1934. ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
  1935. /* Figure out whats wanted... */
  1936. length = 0;
  1937. if (is_non_value(earg)) { /* i.e. wants all */
  1938. want.total = 1;
  1939. atoms[length] = am_total;
  1940. uintps[length++] = &size.total;
  1941. want.processes = 1;
  1942. atoms[length] = am_processes;
  1943. uintps[length++] = &size.processes;
  1944. want.processes_used = 1;
  1945. atoms[length] = am_processes_used;
  1946. uintps[length++] = &size.processes_used;
  1947. want.system = 1;
  1948. atoms[length] = am_system;
  1949. uintps[length++] = &size.system;
  1950. want.atom = 1;
  1951. atoms[length] = am_atom;
  1952. uintps[length++] = &size.atom;
  1953. want.atom_used = 1;
  1954. atoms[length] = am_atom_used;
  1955. uintps[length++] = &size.atom_used;
  1956. want.binary = 1;
  1957. atoms[length] = am_binary;
  1958. uintps[length++] = &size.binary;
  1959. want.code = 1;
  1960. atoms[length] = am_code;
  1961. uintps[length++] = &size.code;
  1962. want.ets = 1;
  1963. atoms[length] = am_ets;
  1964. uintps[length++] = &size.ets;
  1965. }
  1966. else {
  1967. DeclareTmpHeapNoproc(tmp_heap,2);
  1968. Eterm wanted_list;
  1969. if (is_nil(earg))
  1970. return NIL;
  1971. UseTmpHeapNoproc(2);
  1972. if (is_not_atom(earg))
  1973. wanted_list = earg;
  1974. else {
  1975. wanted_list = CONS(&tmp_heap[0], earg, NIL);
  1976. only_one_value = 1;
  1977. }
  1978. while (is_list(wanted_list)) {
  1979. switch (CAR(list_val(wanted_list))) {
  1980. case am_total:
  1981. if (!want.total) {
  1982. want.total = 1;
  1983. atoms[length] = am_total;
  1984. uintps[length++] = &size.total;
  1985. }
  1986. break;
  1987. case am_processes:
  1988. if (!want.processes) {
  1989. want.processes = 1;
  1990. atoms[length] = am_processes;
  1991. uintps[length++] = &size.processes;
  1992. }
  1993. break;
  1994. case am_processes_used:
  1995. if (!want.processes_used) {
  1996. want.processes_used = 1;
  1997. atoms[length] = am_processes_used;
  1998. uintps[length++] = &size.processes_used;
  1999. }
  2000. break;
  2001. case am_system:
  2002. if (!want.system) {
  2003. want.system = 1;
  2004. atoms[length] = am_system;
  2005. uintps[length++] = &size.system;
  2006. }
  2007. break;
  2008. case am_atom:
  2009. if (!want.atom) {
  2010. want.atom = 1;
  2011. atoms[length] = am_atom;
  2012. uintps[length++] = &size.atom;
  2013. }
  2014. break;
  2015. case am_atom_used:
  2016. if (!want.atom_used) {
  2017. want.atom_used = 1;
  2018. atoms[length] = am_atom_used;
  2019. uintps[length++] = &size.atom_used;
  2020. }
  2021. break;
  2022. case am_binary:
  2023. if (!want.binary) {
  2024. want.binary = 1;
  2025. atoms[length] = am_binary;
  2026. uintps[length++] = &size.binary;
  2027. }
  2028. break;
  2029. case am_code:
  2030. if (!want.code) {
  2031. want.code = 1;
  2032. atoms[length] = am_code;
  2033. uintps[length++] = &size.code;
  2034. }
  2035. break;
  2036. case am_ets:
  2037. if (!want.ets) {
  2038. want.ets = 1;
  2039. atoms[length] = am_ets;
  2040. uintps[length++] = &size.ets;
  2041. }
  2042. break;
  2043. default:
  2044. UnUseTmpHeapNoproc(2);
  2045. return am_badarg;
  2046. }
  2047. wanted_list = CDR(list_val(wanted_list));
  2048. }
  2049. UnUseTmpHeapNoproc(2);
  2050. if (is_not_nil(wanted_list))
  2051. return am_badarg;
  2052. }
  2053. /* All alloc_util allocators *have* to be enabled, except test_alloc */
  2054. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  2055. switch (ai) {
  2056. case ERTS_ALC_A_SYSTEM:
  2057. case ERTS_ALC_A_TEST:
  2058. break;
  2059. default:
  2060. if (!erts_allctrs_info[ai].enabled
  2061. || !erts_allctrs_info[ai].alloc_util) {
  2062. return am_notsup;
  2063. }
  2064. break;
  2065. }
  2066. }
  2067. ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
  2068. ASSERT(length <= sizeof(euints)/sizeof(Eterm));
  2069. ASSERT(length <= sizeof(uintps)/sizeof(UWord));
  2070. if (proc) {
  2071. ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  2072. == erts_proc_lc_my_proc_locks(proc));
  2073. /* We'll need locks early in the lock order */
  2074. erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  2075. }
  2076. /* Calculate values needed... */
  2077. want_tot_or_sys = want.total || want.system;
  2078. if (ERTS_MEM_NEED_ALL_ALCU) {
  2079. size.total = 0;
  2080. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  2081. if (erts_allctrs_info[ai].alloc_util) {
  2082. UWord *save;
  2083. UWord asz;
  2084. switch (ai) {
  2085. case ERTS_ALC_A_TEMPORARY:
  2086. /*
  2087. * Often not thread safe and usually never
  2088. * contain any allocated memory.
  2089. */
  2090. continue;
  2091. case ERTS_ALC_A_TEST:
  2092. continue;
  2093. case ERTS_ALC_A_EHEAP:
  2094. save = &size.processes;
  2095. break;
  2096. case ERTS_ALC_A_ETS:
  2097. save = &size.ets;
  2098. break;
  2099. case ERTS_ALC_A_BINARY:
  2100. save = &size.binary;
  2101. break;
  2102. case ERTS_ALC_A_FIXED_SIZE:
  2103. asz = alcu_size(ai, fi, ERTS_ALC_NO_FIXED_SIZES);
  2104. size.total += asz;
  2105. continue;
  2106. default:
  2107. save = NULL;
  2108. break;
  2109. }
  2110. asz = alcu_size(ai, NULL, 0);
  2111. if (save)
  2112. *save = asz;
  2113. size.total += asz;
  2114. }
  2115. }
  2116. }
  2117. if (want_tot_or_sys || want.processes || want.processes_used) {
  2118. UWord tmp;
  2119. if (ERTS_MEM_NEED_ALL_ALCU)
  2120. tmp = size.processes;
  2121. else {
  2122. alcu_size(ERTS_ALC_A_FIXED_SIZE,
  2123. fi, ERTS_ALC_NO_FIXED_SIZES);
  2124. tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
  2125. }
  2126. tmp += erts_ptab_mem_size(&erts_proc);
  2127. tmp += erts_bif_timer_memory_size();
  2128. size.processes = size.processes_used = tmp;
  2129. add_fix_values(&size.processes,
  2130. &size.processes_used,
  2131. fi,
  2132. ERTS_ALC_T_PROC);
  2133. add_fix_values(&size.processes,
  2134. &size.processes_used,
  2135. fi,
  2136. ERTS_ALC_T_MONITOR);
  2137. add_fix_values(&size.processes,
  2138. &size.processes_used,
  2139. fi,
  2140. ERTS_ALC_T_LINK);
  2141. add_fix_values(&size.processes,
  2142. &size.processes_used,
  2143. fi,
  2144. ERTS_ALC_T_MSG_REF);
  2145. add_fix_values(&size.processes,
  2146. &size.processes_used,
  2147. fi,
  2148. ERTS_ALC_T_LL_PTIMER);
  2149. add_fix_values(&size.processes,
  2150. &size.processes_used,
  2151. fi,
  2152. ERTS_ALC_T_HL_PTIMER);
  2153. add_fix_values(&size.processes,
  2154. &size.processes_used,
  2155. fi,
  2156. ERTS_ALC_T_BIF_TIMER);
  2157. }
  2158. if (want.atom || want.atom_used) {
  2159. Uint reserved_atom_space, atom_space;
  2160. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  2161. size.atom = size.atom_used = atom_table_sz();
  2162. if (want.atom)
  2163. size.atom += reserved_atom_space;
  2164. if (want.atom_used)
  2165. size.atom_used += atom_space;
  2166. }
  2167. if (!ERTS_MEM_NEED_ALL_ALCU && want.binary)
  2168. size.binary = alcu_size(ERTS_ALC_A_BINARY, NULL, 0);
  2169. if (want.code) {
  2170. size.code = module_table_sz();
  2171. size.code += export_table_sz();
  2172. size.code += export_entries_sz();
  2173. size.code += erts_fun_table_sz();
  2174. size.code += erts_ranges_sz();
  2175. size.code += erts_total_code_size;
  2176. }
  2177. if (want.ets) {
  2178. if (!ERTS_MEM_NEED_ALL_ALCU)
  2179. size.ets = alcu_size(ERTS_ALC_A_ETS, NULL, 0);
  2180. size.ets += erts_get_ets_misc_mem_size();
  2181. }
  2182. if (want_tot_or_sys) {
  2183. #ifdef BEAMASM
  2184. /* The JIT allocates code on its own because of W^X restrictions, so we
  2185. * need to bump the total size accordingly. */
  2186. size.total += erts_total_code_size;
  2187. #endif
  2188. ASSERT(size.total >= size.processes);
  2189. size.system = size.total - size.processes;
  2190. }
  2191. if (print_to_p) {
  2192. int i;
  2193. fmtfn_t to = *print_to_p;
  2194. void *arg = print_to_arg;
  2195. /* Print result... */
  2196. erts_print(to, arg, "=memory\n");
  2197. for (i = 0; i < length; i++)
  2198. erts_print(to, arg, "%T: %bpu\n", atoms[i], *uintps[i]);
  2199. }
  2200. if (proc) {
  2201. /* Build erlang term result... */
  2202. Uint *hp;
  2203. Uint hsz;
  2204. erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2205. if (only_one_value) {
  2206. ASSERT(length == 1);
  2207. hsz = 0;
  2208. erts_bld_uword(NULL, &hsz, *uintps[0]);
  2209. hp = hsz ? HAlloc((Process *) proc, hsz) : NULL;
  2210. res = erts_bld_uword(&hp, NULL, *uintps[0]);
  2211. }
  2212. else {
  2213. Uint **hpp = NULL;
  2214. Uint *hszp = &hsz;
  2215. hsz = 0;
  2216. while (1) {
  2217. int i;
  2218. for (i = 0; i < length; i++)
  2219. euints[i] = erts_bld_uword(hpp, hszp, *uintps[i]);
  2220. res = erts_bld_2tup_list(hpp, hszp, length, atoms, euints);
  2221. if (hpp)
  2222. break;
  2223. hp = HAlloc((Process *) proc, hsz);
  2224. hpp = &hp;
  2225. hszp = NULL;
  2226. }
  2227. }
  2228. }
  2229. return res;
  2230. #undef ERTS_MEM_NEED_ALL_ALCU
  2231. }
  2232. struct aa_values {
  2233. Uint arity;
  2234. const char *name;
  2235. Uint ui[2];
  2236. };
  2237. Eterm
  2238. erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc)
  2239. {
  2240. #define MAX_AA_VALUES (24)
  2241. struct aa_values values[MAX_AA_VALUES];
  2242. Eterm res = THE_NON_VALUE;
  2243. int i, length;
  2244. Uint reserved_atom_space, atom_space;
  2245. if (proc) {
  2246. ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  2247. == erts_proc_lc_my_proc_locks(proc));
  2248. /* We'll need locks early in the lock order */
  2249. erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  2250. }
  2251. i = 0;
  2252. values[i].arity = 2;
  2253. values[i].name = "sys_misc";
  2254. values[i].ui[0] = erts_sys_misc_mem_sz();
  2255. i++;
  2256. values[i].arity = 2;
  2257. values[i].name = "static";
  2258. values[i].ui[0] =
  2259. sizeof(ErtsPTab)*2 /* proc & port tables */
  2260. + erts_timer_wheel_memory_size(); /* Timer wheel */
  2261. i++;
  2262. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  2263. values[i].arity = 3;
  2264. values[i].name = "atom_space";
  2265. values[i].ui[0] = reserved_atom_space;
  2266. values[i].ui[1] = atom_space;
  2267. i++;
  2268. values[i].arity = 2;
  2269. values[i].name = "atom_table";
  2270. values[i].ui[0] = atom_table_sz();
  2271. i++;
  2272. values[i].arity = 2;
  2273. values[i].name = "module_table";
  2274. values[i].ui[0] = module_table_sz();
  2275. i++;
  2276. values[i].arity = 2;
  2277. values[i].name = "export_table";
  2278. values[i].ui[0] = export_table_sz();
  2279. i++;
  2280. values[i].arity = 2;
  2281. values[i].name = "export_list";
  2282. values[i].ui[0] = export_entries_sz();
  2283. i++;
  2284. values[i].arity = 2;
  2285. values[i].name = "register_table";
  2286. values[i].ui[0] = process_reg_sz();
  2287. i++;
  2288. values[i].arity = 2;
  2289. values[i].name = "fun_table";
  2290. values[i].ui[0] = erts_fun_table_sz();
  2291. i++;
  2292. values[i].arity = 2;
  2293. values[i].name = "module_refs";
  2294. values[i].ui[0] = erts_ranges_sz();
  2295. i++;
  2296. values[i].arity = 2;
  2297. values[i].name = "loaded_code";
  2298. values[i].ui[0] = erts_total_code_size;
  2299. i++;
  2300. values[i].arity = 2;
  2301. values[i].name = "dist_table";
  2302. values[i].ui[0] = erts_dist_table_size();
  2303. i++;
  2304. values[i].arity = 2;
  2305. values[i].name = "node_table";
  2306. values[i].ui[0] = erts_node_table_size();
  2307. i++;
  2308. values[i].arity = 2;
  2309. values[i].name = "bits_bufs_size";
  2310. values[i].ui[0] = erts_bits_bufs_size();
  2311. i++;
  2312. values[i].arity = 2;
  2313. values[i].name = "bif_timer";
  2314. values[i].ui[0] = erts_bif_timer_memory_size();
  2315. i++;
  2316. values[i].arity = 2;
  2317. values[i].name = "process_table";
  2318. values[i].ui[0] = erts_ptab_mem_size(&erts_proc);
  2319. i++;
  2320. values[i].arity = 2;
  2321. values[i].name = "port_table";
  2322. values[i].ui[0] = erts_ptab_mem_size(&erts_port);
  2323. i++;
  2324. values[i].arity = 2;
  2325. values[i].name = "ets_misc";
  2326. values[i].ui[0] = erts_get_ets_misc_mem_size();
  2327. i++;
  2328. /* Data not allocated by any alloc_util allocators, must be summed into
  2329. * the "total" figure in erlang:memory/0,1. */
  2330. values[i].arity = 2;
  2331. values[i].name = "external_alloc";
  2332. #ifdef BEAMASM
  2333. values[i].ui[0] = erts_total_code_size;
  2334. #else
  2335. values[i].ui[0] = 0;
  2336. #endif
  2337. i++;
  2338. length = i;
  2339. ASSERT(length <= MAX_AA_VALUES);
  2340. if (print_to_p) {
  2341. /* Print result... */
  2342. fmtfn_t to = *print_to_p;
  2343. void *arg = print_to_arg;
  2344. erts_print(to, arg, "=allocated_areas\n");
  2345. for (i = 0; i < length; i++) {
  2346. switch (values[i].arity) {
  2347. case 2:
  2348. erts_print(to, arg, "%s: %beu\n",
  2349. values[i].name, values[i].ui[0]);
  2350. break;
  2351. case 3:
  2352. erts_print(to, arg, "%s: %beu %beu\n",
  2353. values[i].name, values[i].ui[0], values[i].ui[1]);
  2354. break;
  2355. default:
  2356. erts_print(to, arg, "ERROR: internal_error\n");
  2357. ASSERT(0);
  2358. return am_internal_error;
  2359. }
  2360. }
  2361. }
  2362. if (proc) {
  2363. /* Build erlang term result... */
  2364. Eterm tuples[MAX_AA_VALUES];
  2365. Uint *hp;
  2366. Uint **hpp;
  2367. Uint hsz;
  2368. Uint *hszp;
  2369. erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2370. hpp = NULL;
  2371. hsz = 0;
  2372. hszp = &hsz;
  2373. while (1) {
  2374. int i;
  2375. for (i = 0; i < length; i++) {
  2376. Eterm atom;
  2377. if (hpp)
  2378. atom = am_atom_put(values[i].name,
  2379. (int) sys_strlen(values[i].name));
  2380. else
  2381. atom = am_true;
  2382. switch (values[i].arity) {
  2383. case 2:
  2384. tuples[i] = erts_bld_tuple(hpp, hszp, 2,
  2385. atom,
  2386. erts_bld_uint(hpp, hszp,
  2387. values[i].ui[0]));
  2388. break;
  2389. case 3:
  2390. tuples[i] = erts_bld_tuple(hpp, hszp, 3,
  2391. atom,
  2392. erts_bld_uint(hpp, hszp,
  2393. values[i].ui[0]),
  2394. erts_bld_uint(hpp, hszp,
  2395. values[i].ui[1]));
  2396. break;
  2397. default:
  2398. ASSERT(0);
  2399. return am_internal_error;
  2400. }
  2401. }
  2402. res = erts_bld_list(hpp, hszp, length, tuples);
  2403. if (hpp)
  2404. break;
  2405. hp = HAlloc((Process *) proc, hsz);
  2406. hpp = &hp;
  2407. hszp = NULL;
  2408. }
  2409. }
  2410. return res;
  2411. #undef MAX_AA_VALUES
  2412. }
  2413. Eterm
  2414. erts_alloc_util_allocators(void *proc)
  2415. {
  2416. Eterm res;
  2417. Uint *hp;
  2418. Uint sz;
  2419. int i;
  2420. /*
  2421. * Currently all allocators except sys_alloc are
  2422. * alloc_util allocators.
  2423. * Also hide test_alloc which is disabled by default
  2424. * and only intended for our own testing.
  2425. */
  2426. sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
  2427. ASSERT(sz > 0);
  2428. hp = HAlloc((Process *) proc, sz);
  2429. res = NIL;
  2430. for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
  2431. switch (i) {
  2432. case ERTS_ALC_A_SYSTEM:
  2433. case ERTS_ALC_A_TEST:
  2434. break;
  2435. default: {
  2436. char *alc_str = (char *) ERTS_ALC_A2AD(i);
  2437. Eterm alc = am_atom_put(alc_str, sys_strlen(alc_str));
  2438. res = CONS(hp, alc, res);
  2439. hp += 2;
  2440. break;
  2441. }
  2442. }
  2443. }
  2444. return res;
  2445. }
  2446. void
  2447. erts_allocator_info(fmtfn_t to, void *arg)
  2448. {
  2449. ErtsAlcType_t a;
  2450. ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
  2451. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2452. int ai;
  2453. for (ai = 0; ai == 0 || ai < erts_allctrs_info[a].thr_spec; ai++) {
  2454. if (erts_allctrs_info[a].thr_spec) {
  2455. if (!erts_allctr_thr_spec[a].allctr[ai])
  2456. continue;
  2457. erts_print(to, arg, "=allocator:%s[%d]\n",
  2458. ERTS_ALC_A2AD(a), ai);
  2459. }
  2460. else {
  2461. erts_print(to, arg, "=allocator:%s\n", ERTS_ALC_A2AD(a));
  2462. }
  2463. if (!erts_allctrs_info[a].enabled)
  2464. erts_print(to, arg, "option e: false\n");
  2465. else {
  2466. if (erts_allctrs_info[a].alloc_util) {
  2467. void *as;
  2468. if (!erts_allctrs_info[a].thr_spec)
  2469. as = erts_allctrs_info[a].extra;
  2470. else {
  2471. ASSERT(erts_allctr_thr_spec[a].enabled);
  2472. as = erts_allctr_thr_spec[a].allctr[ai];
  2473. }
  2474. /* Binary alloc has its own thread safety... */
  2475. erts_alcu_info(as, 0, 0, &to, arg, NULL, NULL);
  2476. }
  2477. else {
  2478. switch (a) {
  2479. case ERTS_ALC_A_SYSTEM: {
  2480. SysAllocStat sas;
  2481. erts_print(to, arg, "option e: true\n");
  2482. erts_print(to, arg, "option m: libc\n");
  2483. sys_alloc_stat(&sas);
  2484. if(sas.trim_threshold >= 0)
  2485. erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
  2486. if(sas.top_pad >= 0)
  2487. erts_print(to, arg, "option tp: %d\n", sas.top_pad);
  2488. break;
  2489. }
  2490. default:
  2491. ASSERT(0);
  2492. break;
  2493. }
  2494. }
  2495. }
  2496. }
  2497. }
  2498. #if HAVE_ERTS_MSEG
  2499. {
  2500. struct erts_mmap_info_struct emis;
  2501. int max = (int) erts_no_schedulers;
  2502. int i;
  2503. for (i = 0; i <= max; i++) {
  2504. erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
  2505. erts_mseg_info(i, &to, arg, 0, 0, NULL, NULL);
  2506. }
  2507. erts_print(to, arg, "=allocator:erts_mmap.default_mmap\n");
  2508. erts_mmap_info(&erts_dflt_mmapper, &to, arg, NULL, NULL, &emis);
  2509. #if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2510. erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
  2511. erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
  2512. #endif
  2513. }
  2514. #endif
  2515. erts_print(to, arg, "=allocator:alloc_util\n");
  2516. erts_alcu_au_info_options(&to, arg, NULL, NULL);
  2517. erts_print(to, arg, "=allocator:instr\n");
  2518. erts_print(to, arg, "option t: %s\n",
  2519. erts_mtrace_enabled ? "true" : "false");
  2520. }
  2521. Eterm
  2522. erts_allocator_options(void *proc)
  2523. {
  2524. #if HAVE_ERTS_MSEG
  2525. int use_mseg = 0;
  2526. #endif
  2527. Uint sz, *szp, *hp, **hpp;
  2528. Eterm res, features, settings;
  2529. Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
  2530. Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
  2531. int a, length;
  2532. SysAllocStat sas;
  2533. Uint *endp = NULL;
  2534. sys_alloc_stat(&sas);
  2535. /* First find out the heap size needed ... */
  2536. hpp = NULL;
  2537. szp = &sz;
  2538. sz = 0;
  2539. bld_term:
  2540. length = 0;
  2541. features = NIL;
  2542. settings = NIL;
  2543. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2544. Eterm tmp = NIL;
  2545. atoms[length] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2546. sys_strlen(ERTS_ALC_A2AD(a)));
  2547. if (erts_allctrs_info[a].enabled) {
  2548. if (erts_allctrs_info[a].alloc_util) {
  2549. Allctr_t *allctr;
  2550. #if HAVE_ERTS_MSEG
  2551. use_mseg++;
  2552. #endif
  2553. if (erts_allctr_thr_spec[a].enabled)
  2554. allctr = erts_allctr_thr_spec[a].allctr[0];
  2555. else
  2556. allctr = erts_allctrs_info[a].extra;
  2557. tmp = erts_alcu_info_options(allctr, NULL, NULL, hpp, szp);
  2558. }
  2559. else {
  2560. int l = 0;
  2561. Eterm as[4];
  2562. Eterm ts[4];
  2563. as[l] = ERTS_MAKE_AM("e");
  2564. ts[l++] = am_true;
  2565. switch (a) {
  2566. case ERTS_ALC_A_SYSTEM:
  2567. as[l] = ERTS_MAKE_AM("m");
  2568. ts[l++] = ERTS_MAKE_AM("libc");
  2569. if(sas.trim_threshold >= 0) {
  2570. as[l] = ERTS_MAKE_AM("tt");
  2571. ts[l++] = erts_bld_uint(hpp, szp,
  2572. (Uint) sas.trim_threshold);
  2573. }
  2574. if(sas.top_pad >= 0) {
  2575. as[l] = ERTS_MAKE_AM("tp");
  2576. ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
  2577. }
  2578. break;
  2579. default:
  2580. break;
  2581. }
  2582. tmp = erts_bld_2tup_list(hpp, szp, l, as, ts);
  2583. }
  2584. }
  2585. else {
  2586. Eterm atom = ERTS_MAKE_AM("e");
  2587. Eterm term = am_false;
  2588. tmp = erts_bld_2tup_list(hpp, szp, 1, &atom, &term);
  2589. }
  2590. terms[length++] = tmp;
  2591. }
  2592. #if HAVE_ERTS_MSEG
  2593. if (use_mseg) {
  2594. atoms[length] = ERTS_MAKE_AM("mseg_alloc");
  2595. terms[length++] = erts_mseg_info_options(0, NULL, NULL, hpp, szp);
  2596. }
  2597. #endif
  2598. atoms[length] = ERTS_MAKE_AM("alloc_util");
  2599. terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
  2600. #if HAVE_ERTS_MMAP
  2601. atoms[length] = ERTS_MAKE_AM("erts_mmap");
  2602. terms[length++] = erts_mmap_info_options(&erts_dflt_mmapper, NULL, NULL,
  2603. NULL, hpp, szp);
  2604. #endif
  2605. {
  2606. Eterm o[1], v[1];
  2607. o[0] = ERTS_MAKE_AM("t");
  2608. v[0] = erts_mtrace_enabled ? am_true : am_false;
  2609. atoms[length] = ERTS_MAKE_AM("instr");
  2610. terms[length++] = erts_bld_2tup_list(hpp, szp, 1, o, v);
  2611. }
  2612. atoms[length] = ERTS_MAKE_AM("lock_physical_memory");
  2613. terms[length++] = (lock_all_physical_memory ? am_all : am_no);
  2614. settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
  2615. length = 0;
  2616. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2617. if (erts_allctrs_info[a].enabled) {
  2618. terms[length++] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2619. sys_strlen(ERTS_ALC_A2AD(a)));
  2620. }
  2621. }
  2622. #if HAVE_ERTS_MSEG
  2623. if (use_mseg)
  2624. terms[length++] = ERTS_MAKE_AM("mseg_alloc");
  2625. #endif
  2626. #if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
  2627. terms[length++] = ERTS_MAKE_AM("sys_aligned_alloc");
  2628. #endif
  2629. #if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2630. terms[length++] = ERTS_MAKE_AM("literal_mmap");
  2631. #endif
  2632. features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
  2633. #if defined(__GLIBC__)
  2634. {
  2635. Eterm AM_glibc = ERTS_MAKE_AM("glibc");
  2636. Eterm version;
  2637. version = erts_bld_cons(hpp,
  2638. szp,
  2639. make_small(__GLIBC__),
  2640. #ifdef __GLIBC_MINOR__
  2641. erts_bld_cons(hpp,
  2642. szp,
  2643. make_small(__GLIBC_MINOR__),
  2644. NIL)
  2645. #else
  2646. NIL
  2647. #endif
  2648. );
  2649. res = erts_bld_tuple(hpp, szp, 4,
  2650. AM_glibc, version, features, settings);
  2651. }
  2652. #else /* unknown allocator */
  2653. res = erts_bld_tuple(hpp, szp, 4,
  2654. am_undefined, NIL, features, settings);
  2655. #endif
  2656. if (szp) {
  2657. /* ... and then build the term */
  2658. hp = HAlloc((Process *) proc, sz);
  2659. endp = hp + sz;
  2660. hpp = &hp;
  2661. szp = NULL;
  2662. goto bld_term;
  2663. }
  2664. ASSERT(endp >= hp);
  2665. HRelease((Process *) proc, endp, hp);
  2666. return res;
  2667. }
  2668. void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size)
  2669. {
  2670. UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1)
  2671. #ifdef VALGRIND
  2672. + sizeof(UWord)
  2673. #endif
  2674. );
  2675. #ifdef VALGRIND
  2676. { /* Link them to avoid Leak_PossiblyLost */
  2677. static UWord* first_in_list = NULL;
  2678. *(UWord**)v = first_in_list;
  2679. first_in_list = (UWord*) v;
  2680. v += sizeof(UWord);
  2681. }
  2682. #endif
  2683. if (v & ERTS_CACHE_LINE_MASK) {
  2684. v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
  2685. }
  2686. ASSERT((v & ERTS_CACHE_LINE_MASK) == 0);
  2687. return (void*)v;
  2688. }
  2689. static void
  2690. reply_alloc_info(void *vair)
  2691. {
  2692. ErtsAllocInfoReq *air = (ErtsAllocInfoReq *) vair;
  2693. Uint sched_id = erts_get_scheduler_id();
  2694. int global_instances = air->req_sched == sched_id;
  2695. ErtsProcLocks rp_locks;
  2696. Process *rp = air->proc;
  2697. Eterm ref_copy = NIL, ai_list, msg = NIL;
  2698. Eterm *hp = NULL, *hp_start = NULL, *hp_end = NULL;
  2699. Eterm **hpp;
  2700. Uint sz, *szp;
  2701. ErlOffHeap *ohp = NULL;
  2702. ErtsMessage *mp = NULL;
  2703. #if HAVE_ERTS_MMAP
  2704. struct erts_mmap_info_struct mmap_info_dflt;
  2705. # if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2706. struct erts_mmap_info_struct mmap_info_literal;
  2707. # endif
  2708. #endif
  2709. int i;
  2710. Eterm (*info_func)(Allctr_t *,
  2711. int,
  2712. int,
  2713. fmtfn_t *,
  2714. void *,
  2715. Uint **,
  2716. Uint *) = (air->only_sz
  2717. ? erts_alcu_sz_info
  2718. : erts_alcu_info);
  2719. rp_locks = air->req_sched == sched_id ? ERTS_PROC_LOCK_MAIN : 0;
  2720. sz = 0;
  2721. hpp = NULL;
  2722. szp = &sz;
  2723. while (1) {
  2724. if (hpp)
  2725. ref_copy = erts_iref_storage_make_ref(&air->iref,
  2726. hpp, ohp, 0);
  2727. else
  2728. *szp += erts_iref_storage_heap_size(&air->iref);
  2729. ai_list = NIL;
  2730. for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
  2731. for (i--; i >= 0; i--) {
  2732. int ai = air->allocs[i];
  2733. Allctr_t *allctr;
  2734. Eterm ainfo;
  2735. Eterm alloc_atom;
  2736. if (global_instances) {
  2737. switch (ai) {
  2738. case ERTS_ALC_A_SYSTEM: {
  2739. alloc_atom = erts_bld_atom(hpp, szp, "sys_alloc");
  2740. ainfo = NIL;
  2741. if (!air->only_sz) {
  2742. SysAllocStat sas;
  2743. if (hpp)
  2744. sys_alloc_stat(&sas);
  2745. if (szp) {
  2746. /* ensure ehough heap */
  2747. sas.top_pad = INT_MAX;
  2748. sas.trim_threshold = INT_MAX;
  2749. }
  2750. if (sas.top_pad >= 0) {
  2751. ainfo = erts_bld_cons(
  2752. hpp, szp,
  2753. erts_bld_tuple(
  2754. hpp, szp, 2,
  2755. erts_bld_atom(hpp, szp, "tp"),
  2756. erts_bld_uint(
  2757. hpp, szp,
  2758. (Uint) sas.top_pad)),
  2759. ainfo);
  2760. }
  2761. if (sas.trim_threshold >= 0) {
  2762. ainfo = erts_bld_cons(
  2763. hpp, szp,
  2764. erts_bld_tuple(
  2765. hpp, szp, 2,
  2766. erts_bld_atom(hpp, szp, "tt"),
  2767. erts_bld_uint(
  2768. hpp, szp,
  2769. (Uint) sas.trim_threshold)),
  2770. ainfo);
  2771. }
  2772. ainfo = erts_bld_cons(hpp, szp,
  2773. erts_bld_tuple(
  2774. hpp, szp, 2,
  2775. erts_bld_atom(hpp, szp,
  2776. "m"),
  2777. erts_bld_atom(hpp, szp,
  2778. "libc")),
  2779. ainfo);
  2780. ainfo = erts_bld_cons(hpp, szp,
  2781. erts_bld_tuple(
  2782. hpp, szp, 2,
  2783. erts_bld_atom(hpp, szp,
  2784. "e"),
  2785. am_true),
  2786. ainfo);
  2787. ainfo = erts_bld_tuple(hpp, szp, 2,
  2788. erts_bld_atom(hpp, szp,
  2789. "options"),
  2790. ainfo);
  2791. ainfo = erts_bld_cons(hpp, szp,ainfo,NIL);
  2792. }
  2793. ainfo = erts_bld_tuple(hpp, szp, 3,
  2794. alloc_atom,
  2795. make_small(0),
  2796. ainfo);
  2797. break;
  2798. }
  2799. case ERTS_ALC_INFO_A_ALLOC_UTIL:
  2800. alloc_atom = erts_bld_atom(hpp, szp, "alloc_util");
  2801. ainfo = (air->only_sz
  2802. ? NIL
  2803. : erts_alcu_au_info_options(NULL, NULL,
  2804. hpp, szp));
  2805. ainfo = erts_bld_tuple(hpp, szp, 3,
  2806. alloc_atom,
  2807. make_small(0),
  2808. ainfo);
  2809. break;
  2810. case ERTS_ALC_INFO_A_ERTS_MMAP:
  2811. alloc_atom = erts_bld_atom(hpp, szp, "erts_mmap");
  2812. #if HAVE_ERTS_MMAP
  2813. ainfo = (air->only_sz ? NIL :
  2814. erts_mmap_info(&erts_dflt_mmapper, NULL, NULL,
  2815. hpp, szp, &mmap_info_dflt));
  2816. ainfo = erts_bld_tuple3(hpp, szp,
  2817. alloc_atom,
  2818. erts_bld_atom(hpp,szp,"default_mmap"),
  2819. ainfo);
  2820. # if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2821. ai_list = erts_bld_cons(hpp, szp,
  2822. ainfo, ai_list);
  2823. ainfo = (air->only_sz ? NIL :
  2824. erts_mmap_info(&erts_literal_mmapper, NULL, NULL,
  2825. hpp, szp, &mmap_info_literal));
  2826. ainfo = erts_bld_tuple3(hpp, szp,
  2827. alloc_atom,
  2828. erts_bld_atom(hpp,szp,"literal_mmap"),
  2829. ainfo);
  2830. # endif
  2831. #else /* !HAVE_ERTS_MMAP */
  2832. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
  2833. am_false);
  2834. #endif
  2835. break;
  2836. case ERTS_ALC_INFO_A_MSEG_ALLOC:
  2837. alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
  2838. #if HAVE_ERTS_MSEG
  2839. ainfo = erts_mseg_info(0, NULL, NULL, hpp != NULL,
  2840. air->only_sz, hpp, szp);
  2841. ainfo = erts_bld_tuple3(hpp, szp,
  2842. alloc_atom,
  2843. make_small(0),
  2844. ainfo);
  2845. #else
  2846. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
  2847. am_false);
  2848. #endif
  2849. break;
  2850. default:
  2851. alloc_atom = erts_bld_atom(hpp, szp,
  2852. (char *) ERTS_ALC_A2AD(ai));
  2853. if (!erts_allctrs_info[ai].enabled)
  2854. ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
  2855. am_false);
  2856. else if (erts_allctrs_info[ai].alloc_util) {
  2857. if (erts_allctrs_info[ai].thr_spec)
  2858. allctr = erts_allctr_thr_spec[ai].allctr[0];
  2859. else
  2860. allctr = erts_allctrs_info[ai].extra;
  2861. ainfo = info_func(allctr, air->internal, hpp != NULL,
  2862. NULL, NULL, hpp, szp);
  2863. ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom,
  2864. make_small(0), ainfo);
  2865. }
  2866. else {
  2867. erts_exit(ERTS_ABORT_EXIT, "%s:%d: internal error\n",
  2868. __FILE__, __LINE__);
  2869. }
  2870. }
  2871. ai_list = erts_bld_cons(hpp, szp,
  2872. ainfo, ai_list);
  2873. }
  2874. switch (ai) {
  2875. case ERTS_ALC_A_SYSTEM:
  2876. case ERTS_ALC_INFO_A_ALLOC_UTIL:
  2877. case ERTS_ALC_INFO_A_ERTS_MMAP:
  2878. break;
  2879. case ERTS_ALC_INFO_A_MSEG_ALLOC:
  2880. #if HAVE_ERTS_MSEG
  2881. alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
  2882. ainfo = erts_mseg_info(sched_id, NULL, NULL,
  2883. hpp != NULL, air->only_sz, hpp, szp);
  2884. ainfo = erts_bld_tuple(hpp, szp, 3,
  2885. alloc_atom,
  2886. make_small(sched_id),
  2887. ainfo);
  2888. ai_list = erts_bld_cons(hpp, szp, ainfo, ai_list);
  2889. #endif
  2890. break;
  2891. default:
  2892. if (erts_allctrs_info[ai].thr_spec) {
  2893. alloc_atom = erts_bld_atom(hpp, szp,
  2894. (char *) ERTS_ALC_A2AD(ai));
  2895. allctr = erts_allctr_thr_spec[ai].allctr[sched_id];
  2896. ainfo = info_func(allctr, air->internal, hpp != NULL, NULL,
  2897. NULL, hpp, szp);
  2898. ai_list = erts_bld_cons(hpp, szp,
  2899. erts_bld_tuple(
  2900. hpp, szp,
  2901. 3,
  2902. alloc_atom,
  2903. make_small(sched_id),
  2904. ainfo),
  2905. ai_list);
  2906. }
  2907. break;
  2908. }
  2909. msg = erts_bld_tuple(hpp, szp,
  2910. 3,
  2911. ref_copy,
  2912. make_small(sched_id),
  2913. ai_list);
  2914. }
  2915. if (hpp)
  2916. break;
  2917. mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
  2918. hp_start = hp;
  2919. hp_end = hp + sz;
  2920. szp = NULL;
  2921. hpp = &hp;
  2922. }
  2923. if (hp != hp_end)
  2924. erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1);
  2925. erts_queue_message(rp, rp_locks, mp, msg, am_system);
  2926. if (air->req_sched == sched_id)
  2927. rp_locks &= ~ERTS_PROC_LOCK_MAIN;
  2928. erts_proc_unlock(rp, rp_locks);
  2929. erts_proc_dec_refc(rp);
  2930. if (erts_atomic32_dec_read_nob(&air->refc) == 0) {
  2931. erts_iref_storage_clean(&air->iref);
  2932. aireq_free(air);
  2933. }
  2934. }
  2935. int
  2936. erts_request_alloc_info(struct process *c_p,
  2937. Eterm ref,
  2938. Eterm allocs,
  2939. int only_sz,
  2940. int internal)
  2941. {
  2942. ErtsAllocInfoReq *air = aireq_alloc();
  2943. Eterm req_ai[ERTS_ALC_INFO_A_END] = {0};
  2944. Eterm alist;
  2945. int airix = 0, ai;
  2946. air->req_sched = erts_get_scheduler_id();
  2947. air->only_sz = only_sz;
  2948. air->internal = internal;
  2949. air->proc = c_p;
  2950. if (is_not_internal_ref(ref))
  2951. return 0;
  2952. erts_iref_storage_save(&air->iref, ref);
  2953. if (is_not_list(allocs))
  2954. return 0;
  2955. alist = allocs;
  2956. while (is_list(alist)) {
  2957. int saved = 0;
  2958. Eterm* consp = list_val(alist);
  2959. Eterm alloc = CAR(consp);
  2960. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
  2961. if (erts_is_atom_str(erts_alc_a2ad[ai], alloc, 0))
  2962. goto save_alloc;
  2963. if (erts_is_atom_str("mseg_alloc", alloc, 0)) {
  2964. ai = ERTS_ALC_INFO_A_MSEG_ALLOC;
  2965. goto save_alloc;
  2966. }
  2967. if (erts_is_atom_str("erts_mmap", alloc, 0)) {
  2968. ai = ERTS_ALC_INFO_A_ERTS_MMAP;
  2969. goto save_alloc;
  2970. }
  2971. if (erts_is_atom_str("alloc_util", alloc, 0)) {
  2972. ai = ERTS_ALC_INFO_A_ALLOC_UTIL;
  2973. save_alloc:
  2974. if (req_ai[ai])
  2975. return 0;
  2976. air->allocs[airix++] = ai;
  2977. req_ai[ai] = 1;
  2978. saved = 1;
  2979. }
  2980. if (!saved)
  2981. return 0;
  2982. alist = CDR(consp);
  2983. }
  2984. if (is_not_nil(alist))
  2985. return 0;
  2986. air->allocs[airix] = ERTS_ALC_A_INVALID;
  2987. erts_atomic32_init_nob(&air->refc,
  2988. (erts_aint32_t) erts_no_schedulers);
  2989. erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
  2990. if (erts_no_schedulers > 1)
  2991. erts_schedule_multi_misc_aux_work(1,
  2992. erts_no_schedulers,
  2993. reply_alloc_info,
  2994. (void *) air);
  2995. reply_alloc_info((void *) air);
  2996. return 1;
  2997. }
  2998. Eterm erts_alloc_set_dyn_param(Process* c_p, Eterm tuple)
  2999. {
  3000. ErtsAllocatorThrSpec_t *tspec;
  3001. ErtsAlcType_t ai;
  3002. Allctr_t* allctr;
  3003. Eterm* tp;
  3004. Eterm res;
  3005. if (!is_tuple_arity(tuple, 3))
  3006. goto badarg;
  3007. tp = tuple_val(tuple);
  3008. /*
  3009. * Ex: {ets_alloc, sbct, 256000}
  3010. */
  3011. if (!is_atom(tp[1]) || !is_atom(tp[2]) || !is_integer(tp[3]))
  3012. goto badarg;
  3013. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
  3014. if (erts_is_atom_str(erts_alc_a2ad[ai], tp[1], 0))
  3015. break;
  3016. if (ai > ERTS_ALC_A_MAX)
  3017. goto badarg;
  3018. if (!erts_allctrs_info[ai].enabled ||
  3019. !erts_allctrs_info[ai].alloc_util) {
  3020. return am_notsup;
  3021. }
  3022. if (tp[2] == am_sbct) {
  3023. Uint sbct;
  3024. int i, ok;
  3025. if (!term_to_Uint(tp[3], &sbct))
  3026. goto badarg;
  3027. tspec = &erts_allctr_thr_spec[ai];
  3028. if (tspec->enabled) {
  3029. ok = 0;
  3030. for (i = 0; i < tspec->size; i++) {
  3031. allctr = tspec->allctr[i];
  3032. ok |= allctr->try_set_dyn_param(allctr, am_sbct, sbct);
  3033. }
  3034. }
  3035. else {
  3036. allctr = erts_allctrs_info[ai].extra;
  3037. ok = allctr->try_set_dyn_param(allctr, am_sbct, sbct);
  3038. }
  3039. return ok ? am_ok : am_notsup;
  3040. }
  3041. return am_notsup;
  3042. badarg:
  3043. ERTS_BIF_PREP_ERROR(res, c_p, EXC_BADARG);
  3044. return res;
  3045. }
  3046. /*
  3047. * The allocator wrapper prelocking stuff below is about the locking order.
  3048. * It only affects wrappers (erl_mtrace.c) that keep locks during
  3049. * alloc/realloc/free.
  3050. *
  3051. * Some query functions in erl_alloc_util.c lock the allocator mutex and then
  3052. * use erts_printf that in turn may call the sys allocator through the wrappers.
  3053. * To avoid breaking locking order these query functions first "pre-locks" all
  3054. * allocator wrappers.
  3055. */
  3056. ErtsAllocatorWrapper_t *erts_allctr_wrappers;
  3057. int erts_allctr_wrapper_prelocked = 0;
  3058. erts_tsd_key_t erts_allctr_prelock_tsd_key;
  3059. void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper)
  3060. {
  3061. ASSERT(wrapper->lock && wrapper->unlock);
  3062. wrapper->next = erts_allctr_wrappers;
  3063. erts_allctr_wrappers = wrapper;
  3064. }
  3065. void erts_allctr_wrapper_pre_lock(void)
  3066. {
  3067. if (erts_allctr_wrappers) {
  3068. ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
  3069. for ( ; wrapper; wrapper = wrapper->next) {
  3070. wrapper->lock();
  3071. }
  3072. ASSERT(!erts_allctr_wrapper_prelocked);
  3073. erts_allctr_wrapper_prelocked = 1;
  3074. erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)1);
  3075. }
  3076. }
  3077. void erts_allctr_wrapper_pre_unlock(void)
  3078. {
  3079. if (erts_allctr_wrappers) {
  3080. ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
  3081. erts_allctr_wrapper_prelocked = 0;
  3082. erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)0);
  3083. for ( ; wrapper; wrapper = wrapper->next) {
  3084. wrapper->unlock();
  3085. }
  3086. }
  3087. }
  3088. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3089. * NOTE: erts_alc_test() is only supposed to be used for testing. *
  3090. * *
  3091. * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
  3092. * to erts_alc_test() *
  3093. \* */
  3094. #define ERTS_ALC_TEST_ABORT erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
  3095. UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
  3096. {
  3097. switch (op >> 8) {
  3098. case 0x0: return erts_alcu_test(op, a1, a2);
  3099. case 0x1: return erts_gfalc_test(op, a1, a2);
  3100. case 0x2: return erts_bfalc_test(op, a1, a2);
  3101. case 0x3: return erts_afalc_test(op, a1, a2);
  3102. case 0x4: return erts_mseg_test(op, a1, a2, a3);
  3103. case 0x5: return erts_aoffalc_test(op, a1, a2);
  3104. case 0xf:
  3105. switch (op) {
  3106. case 0xf00:
  3107. if (((Allctr_t *) a1)->thread_safe)
  3108. return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_TEST,
  3109. (void *) a1,
  3110. (Uint) a2);
  3111. else
  3112. return (UWord) erts_alcu_alloc(ERTS_ALC_T_TEST,
  3113. (void *) a1,
  3114. (Uint) a2);
  3115. case 0xf01:
  3116. if (((Allctr_t *) a1)->thread_safe)
  3117. return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_TEST,
  3118. (void *) a1,
  3119. (void *) a2,
  3120. (Uint) a3);
  3121. else
  3122. return (UWord) erts_alcu_realloc(ERTS_ALC_T_TEST,
  3123. (void *) a1,
  3124. (void *) a2,
  3125. (Uint) a3);
  3126. case 0xf02:
  3127. if (((Allctr_t *) a1)->thread_safe)
  3128. erts_alcu_free_ts(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
  3129. else
  3130. erts_alcu_free(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
  3131. return 0;
  3132. case 0xf03: {
  3133. Allctr_t *allctr;
  3134. struct au_init init;
  3135. SET_DEFAULT_ALLOC_OPTS(&init);
  3136. init.enable = 1;
  3137. init.astrat = ERTS_ALC_S_GOODFIT;
  3138. init.init.util.name_prefix = (char *) a1;
  3139. init.init.util.alloc_no = ERTS_ALC_A_TEST;
  3140. init.init.util.alloc_strat = init.astrat;
  3141. init.init.util.ts = 1;
  3142. if ((char **) a3) {
  3143. char **argv = (char **) a3;
  3144. int i = 0;
  3145. while (argv[i]) {
  3146. if (argv[i][0] == '-' && argv[i][1] == 't')
  3147. handle_au_arg(&init, &argv[i][2], argv, &i, 0);
  3148. else
  3149. return (UWord) NULL;
  3150. i++;
  3151. }
  3152. }
  3153. switch (init.astrat) {
  3154. case ERTS_ALC_S_GOODFIT:
  3155. allctr = erts_gfalc_start((GFAllctr_t *)
  3156. erts_alloc(ERTS_ALC_T_TEST,
  3157. sizeof(GFAllctr_t)),
  3158. &init.init.gf,
  3159. &init.init.util);
  3160. break;
  3161. case ERTS_ALC_S_BESTFIT:
  3162. allctr = erts_bfalc_start((BFAllctr_t *)
  3163. erts_alloc(ERTS_ALC_T_TEST,
  3164. sizeof(BFAllctr_t)),
  3165. &init.init.bf,
  3166. &init.init.util);
  3167. break;
  3168. case ERTS_ALC_S_AFIT:
  3169. allctr = erts_afalc_start((AFAllctr_t *)
  3170. erts_alloc(ERTS_ALC_T_TEST,
  3171. sizeof(AFAllctr_t)),
  3172. &init.init.af,
  3173. &init.init.util);
  3174. break;
  3175. case ERTS_ALC_S_FIRSTFIT:
  3176. allctr = erts_aoffalc_start((AOFFAllctr_t *)
  3177. erts_alloc(ERTS_ALC_T_TEST,
  3178. sizeof(AOFFAllctr_t)),
  3179. &init.init.aoff,
  3180. &init.init.util);
  3181. break;
  3182. default:
  3183. ASSERT(0);
  3184. allctr = NULL;
  3185. break;
  3186. }
  3187. return (UWord) allctr;
  3188. }
  3189. case 0xf04:
  3190. erts_alcu_stop((Allctr_t *) a1);
  3191. erts_free(ERTS_ALC_T_TEST, (void *) a1);
  3192. break;
  3193. case 0xf05: return (UWord) 1;
  3194. case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
  3195. #ifdef ETHR_NO_FORKSAFETY
  3196. case 0xf07: return (UWord) 0;
  3197. #else
  3198. case 0xf07: return (UWord) ((Allctr_t *) a1)->thread_safe;
  3199. #endif
  3200. case 0xf08: {
  3201. ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_mutex));
  3202. if (ethr_mutex_init(mtx) != 0)
  3203. ERTS_ALC_TEST_ABORT;
  3204. return (UWord) mtx;
  3205. }
  3206. case 0xf09: {
  3207. ethr_mutex *mtx = (ethr_mutex *) a1;
  3208. if (ethr_mutex_destroy(mtx) != 0)
  3209. ERTS_ALC_TEST_ABORT;
  3210. erts_free(ERTS_ALC_T_TEST, (void *) mtx);
  3211. break;
  3212. }
  3213. case 0xf0a:
  3214. ethr_mutex_lock((ethr_mutex *) a1);
  3215. break;
  3216. case 0xf0b:
  3217. ethr_mutex_unlock((ethr_mutex *) a1);
  3218. break;
  3219. case 0xf0c: {
  3220. ethr_cond *cnd = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_cond));
  3221. if (ethr_cond_init(cnd) != 0)
  3222. ERTS_ALC_TEST_ABORT;
  3223. return (UWord) cnd;
  3224. }
  3225. case 0xf0d: {
  3226. ethr_cond *cnd = (ethr_cond *) a1;
  3227. if (ethr_cond_destroy(cnd) != 0)
  3228. ERTS_ALC_TEST_ABORT;
  3229. erts_free(ERTS_ALC_T_TEST, (void *) cnd);
  3230. break;
  3231. }
  3232. case 0xf0e:
  3233. ethr_cond_broadcast((ethr_cond *) a1);
  3234. break;
  3235. case 0xf0f: {
  3236. int res;
  3237. do {
  3238. res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
  3239. } while (res == EINTR);
  3240. break;
  3241. }
  3242. case 0xf10: {
  3243. ethr_tid *tid = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_tid));
  3244. if (ethr_thr_create(tid,
  3245. (void * (*)(void *)) a1,
  3246. (void *) a2,
  3247. NULL) != 0)
  3248. ERTS_ALC_TEST_ABORT;
  3249. return (UWord) tid;
  3250. }
  3251. case 0xf11: {
  3252. ethr_tid *tid = (ethr_tid *) a1;
  3253. if (ethr_thr_join(*tid, NULL) != 0)
  3254. ERTS_ALC_TEST_ABORT;
  3255. erts_free(ERTS_ALC_T_TEST, (void *) tid);
  3256. break;
  3257. }
  3258. case 0xf12:
  3259. ethr_thr_exit((void *) a1);
  3260. ERTS_ALC_TEST_ABORT;
  3261. break;
  3262. case 0xf13: return (UWord) 1;
  3263. case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1);
  3264. case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0;
  3265. case 0xf16: return (UWord) erts_realloc(ERTS_ALC_T_TEST, (void*)a1, (Uint)a2);
  3266. case 0xf17: {
  3267. Uint extra_hdr_sz = UNIT_CEILING((Uint)a1);
  3268. ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
  3269. Uint offset = ts->allctr[0]->mbc_header_size;
  3270. void* orig_creating_mbc = ts->allctr[0]->creating_mbc;
  3271. void* orig_destroying_mbc = ts->allctr[0]->destroying_mbc;
  3272. void* new_creating_mbc = *(void**)a2; /* inout arg */
  3273. void* new_destroying_mbc = *(void**)a3; /* inout arg */
  3274. int i;
  3275. for (i=0; i < ts->size; i++) {
  3276. Allctr_t* ap = ts->allctr[i];
  3277. if (ap->mbc_header_size != offset
  3278. || ap->creating_mbc != orig_creating_mbc
  3279. || ap->destroying_mbc != orig_destroying_mbc
  3280. || ap->mbc_list.first != NULL)
  3281. return -1;
  3282. }
  3283. for (i=0; i < ts->size; i++) {
  3284. ts->allctr[i]->mbc_header_size += extra_hdr_sz;
  3285. ts->allctr[i]->creating_mbc = new_creating_mbc;
  3286. ts->allctr[i]->destroying_mbc = new_destroying_mbc;
  3287. }
  3288. *(void**)a2 = orig_creating_mbc;
  3289. *(void**)a3 = orig_destroying_mbc;
  3290. return offset;
  3291. }
  3292. case 0xf18: {
  3293. ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
  3294. return ts->allctr[0]->largest_mbc_size;
  3295. }
  3296. default:
  3297. break;
  3298. }
  3299. return (UWord) 0;
  3300. default:
  3301. break;
  3302. }
  3303. ASSERT(0);
  3304. return ~((UWord) 0);
  3305. }
  3306. #ifdef DEBUG
  3307. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3308. * Debug stuff *
  3309. \* */
  3310. #if 0
  3311. #define PRINT_OPS
  3312. #else
  3313. #undef PRINT_OPS
  3314. #endif
  3315. #ifdef HARD_DEBUG
  3316. #define FENCE_SZ (4*sizeof(UWord))
  3317. #else
  3318. #define FENCE_SZ (3*sizeof(UWord))
  3319. #endif
  3320. #if defined(ARCH_64)
  3321. #define FENCE_PATTERN 0xABCDEF97ABCDEF97
  3322. #else
  3323. #define FENCE_PATTERN 0xABCDEF97
  3324. #endif
  3325. #define TYPE_PATTERN_MASK ERTS_ALC_N_MASK
  3326. #define TYPE_PATTERN_SHIFT 16
  3327. #define FIXED_FENCE_PATTERN_MASK \
  3328. (~((UWord) (TYPE_PATTERN_MASK << TYPE_PATTERN_SHIFT)))
  3329. #define FIXED_FENCE_PATTERN \
  3330. (FENCE_PATTERN & FIXED_FENCE_PATTERN_MASK)
  3331. #define MK_PATTERN(T) \
  3332. (FIXED_FENCE_PATTERN | (((T) & TYPE_PATTERN_MASK) << TYPE_PATTERN_SHIFT))
  3333. #define GET_TYPE_OF_PATTERN(P) \
  3334. (((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
  3335. #ifdef HARD_DEBUG
  3336. #define ERL_ALC_HDBG_MAX_MBLK 100000
  3337. #define ERTS_ALC_O_CHECK -1
  3338. typedef struct hdbg_mblk_ hdbg_mblk;
  3339. struct hdbg_mblk_ {
  3340. hdbg_mblk *next;
  3341. hdbg_mblk *prev;
  3342. void *p;
  3343. Uint s;
  3344. ErtsAlcType_t n;
  3345. };
  3346. static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
  3347. static hdbg_mblk *free_hdbg_mblks;
  3348. static hdbg_mblk *used_hdbg_mblks;
  3349. static erts_mtx_t hdbg_mblk_mtx;
  3350. static void
  3351. hdbg_init(void)
  3352. {
  3353. int i;
  3354. for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
  3355. hdbg_mblks[i].next = &hdbg_mblks[i+1];
  3356. hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
  3357. free_hdbg_mblks = &hdbg_mblks[0];
  3358. used_hdbg_mblks = NULL;
  3359. erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
  3360. ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
  3361. }
  3362. static void *check_memory_fence(void *ptr,
  3363. Uint *size,
  3364. ErtsAlcType_t n,
  3365. int func);
  3366. void erts_hdbg_chk_blks(void);
  3367. void
  3368. erts_hdbg_chk_blks(void)
  3369. {
  3370. hdbg_mblk *mblk;
  3371. erts_mtx_lock(&hdbg_mblk_mtx);
  3372. for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
  3373. Uint sz;
  3374. check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
  3375. ASSERT(sz == mblk->s);
  3376. }
  3377. erts_mtx_unlock(&hdbg_mblk_mtx);
  3378. }
  3379. static hdbg_mblk *
  3380. hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
  3381. {
  3382. hdbg_mblk *mblk;
  3383. erts_mtx_lock(&hdbg_mblk_mtx);
  3384. mblk = free_hdbg_mblks;
  3385. if (!mblk) {
  3386. erts_fprintf(stderr,
  3387. "Ran out of debug blocks; please increase "
  3388. "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
  3389. ERL_ALC_HDBG_MAX_MBLK);
  3390. abort();
  3391. }
  3392. free_hdbg_mblks = mblk->next;
  3393. mblk->p = p;
  3394. mblk->s = s;
  3395. mblk->n = n;
  3396. mblk->next = used_hdbg_mblks;
  3397. mblk->prev = NULL;
  3398. if (used_hdbg_mblks)
  3399. used_hdbg_mblks->prev = mblk;
  3400. used_hdbg_mblks = mblk;
  3401. erts_mtx_unlock(&hdbg_mblk_mtx);
  3402. return (void *) mblk;
  3403. }
  3404. static void
  3405. hdbg_free(hdbg_mblk *mblk)
  3406. {
  3407. erts_mtx_lock(&hdbg_mblk_mtx);
  3408. if (mblk->next)
  3409. mblk->next->prev = mblk->prev;
  3410. if (mblk->prev)
  3411. mblk->prev->next = mblk->next;
  3412. else
  3413. used_hdbg_mblks = mblk->next;
  3414. mblk->next = free_hdbg_mblks;
  3415. free_hdbg_mblks = mblk;
  3416. erts_mtx_unlock(&hdbg_mblk_mtx);
  3417. }
  3418. #endif
  3419. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  3420. static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
  3421. void check_allocated_block( Uint type, void *blk)
  3422. {
  3423. Uint dummy;
  3424. check_memory_fence(blk, &dummy, ERTS_ALC_T2N(type), ERTS_ALC_O_FREE);
  3425. }
  3426. void check_allocators(void)
  3427. {
  3428. int i;
  3429. if (!erts_initialized)
  3430. return;
  3431. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; ++i) {
  3432. if (erts_allctrs_info[i].alloc_util) {
  3433. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
  3434. Allctr_t *allctr = real_af->extra;
  3435. Carrier_t *ct;
  3436. if (allctr->thread_safe)
  3437. erts_mtx_lock(&allctr->mutex);
  3438. if (allctr->check_mbc) {
  3439. for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
  3440. fprintf(stderr,"Checking allocator %d\r\n",i);
  3441. allctr->check_mbc(allctr,ct);
  3442. }
  3443. }
  3444. if (allctr->thread_safe)
  3445. erts_mtx_unlock(&allctr->mutex);
  3446. }
  3447. }
  3448. }
  3449. #endif
  3450. static void *
  3451. set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
  3452. {
  3453. UWord *ui_ptr;
  3454. UWord pattern;
  3455. #ifdef HARD_DEBUG
  3456. hdbg_mblk **mblkpp;
  3457. #endif
  3458. if (!ptr)
  3459. return NULL;
  3460. ui_ptr = (UWord *) ptr;
  3461. pattern = MK_PATTERN(n);
  3462. #ifdef HARD_DEBUG
  3463. mblkpp = (hdbg_mblk **) ui_ptr++;
  3464. #endif
  3465. *(ui_ptr++) = sz;
  3466. *(ui_ptr++) = pattern;
  3467. sys_memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
  3468. #ifdef HARD_DEBUG
  3469. *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
  3470. #endif
  3471. return (void *) ui_ptr;
  3472. }
  3473. static void *
  3474. check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
  3475. {
  3476. Uint sz;
  3477. Uint found_type;
  3478. UWord pre_pattern, expected_pattern;
  3479. UWord post_pattern;
  3480. UWord *ui_ptr;
  3481. #ifdef HARD_DEBUG
  3482. hdbg_mblk *mblk;
  3483. #endif
  3484. if (!ptr)
  3485. return NULL;
  3486. expected_pattern = MK_PATTERN(n);
  3487. ui_ptr = (UWord *) ptr;
  3488. pre_pattern = *(--ui_ptr);
  3489. *size = sz = *(--ui_ptr);
  3490. #ifdef HARD_DEBUG
  3491. mblk = (hdbg_mblk *) *(--ui_ptr);
  3492. #endif
  3493. found_type = GET_TYPE_OF_PATTERN(pre_pattern);
  3494. if (found_type != n) {
  3495. erts_exit(ERTS_ABORT_EXIT, "ERROR: Miss matching allocator types"
  3496. " used in alloc and free\n");
  3497. }
  3498. if (pre_pattern != expected_pattern) {
  3499. if ((FIXED_FENCE_PATTERN_MASK & pre_pattern) != FIXED_FENCE_PATTERN)
  3500. erts_exit(ERTS_ABORT_EXIT,
  3501. "ERROR: Fence at beginning of memory block (p=0x%u) "
  3502. "clobbered.\n",
  3503. (UWord) ptr);
  3504. }
  3505. sys_memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
  3506. if (post_pattern != expected_pattern || pre_pattern != post_pattern) {
  3507. char fbuf[10];
  3508. char obuf[10];
  3509. char *ftype;
  3510. char *otype;
  3511. char *op_str;
  3512. if ((FIXED_FENCE_PATTERN_MASK & post_pattern) != FIXED_FENCE_PATTERN)
  3513. erts_exit(ERTS_ABORT_EXIT,
  3514. "ERROR: Fence at end of memory block (p=0x%u, sz=%u) "
  3515. "clobbered.\n",
  3516. (UWord) ptr, (UWord) sz);
  3517. if (found_type != GET_TYPE_OF_PATTERN(post_pattern))
  3518. erts_exit(ERTS_ABORT_EXIT,
  3519. "ERROR: Fence around memory block (p=0x%u, sz=%u) "
  3520. "clobbered.\n",
  3521. (UWord) ptr, (UWord) sz);
  3522. ftype = type_no_str(found_type);
  3523. if (!ftype) {
  3524. erts_snprintf(fbuf, sizeof(fbuf), "%d", (int) found_type);
  3525. ftype = fbuf;
  3526. }
  3527. otype = type_no_str(n);
  3528. if (!otype) {
  3529. erts_snprintf(obuf, sizeof(obuf), "%d", (int) n);
  3530. otype = obuf;
  3531. }
  3532. switch (func) {
  3533. case ERTS_ALC_O_ALLOC: op_str = "allocated"; break;
  3534. case ERTS_ALC_O_REALLOC: op_str = "reallocated"; break;
  3535. case ERTS_ALC_O_FREE: op_str = "freed"; break;
  3536. default: op_str = "???"; break;
  3537. }
  3538. erts_exit(ERTS_ABORT_EXIT,
  3539. "ERROR: Memory block (p=0x%u, sz=%u) allocated as type \"%s\","
  3540. " but %s as type \"%s\".\n",
  3541. (UWord) ptr, (UWord) sz, ftype, op_str, otype);
  3542. }
  3543. #ifdef HARD_DEBUG
  3544. switch (func) {
  3545. case ERTS_ALC_O_REALLOC:
  3546. case ERTS_ALC_O_FREE:
  3547. hdbg_free(mblk);
  3548. break;
  3549. default:
  3550. break;
  3551. }
  3552. #endif
  3553. return (void *) ui_ptr;
  3554. }
  3555. static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
  3556. static void *
  3557. debug_alloc(ErtsAlcType_t type, void *extra, Uint size)
  3558. {
  3559. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3560. ErtsAlcType_t n;
  3561. Uint dsize;
  3562. void *res;
  3563. #ifdef HARD_DEBUG
  3564. erts_hdbg_chk_blks();
  3565. #endif
  3566. n = ERTS_ALC_T2N(type);
  3567. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3568. dsize = size + FENCE_SZ;
  3569. res = (*real_af->alloc)(type, real_af->extra, dsize);
  3570. res = set_memory_fence(res, size, n);
  3571. #ifdef PRINT_OPS
  3572. fprintf(stderr, "0x%lx = alloc(%s, %lu)\r\n",
  3573. (Uint) res, ERTS_ALC_N2TD(n), size);
  3574. #endif
  3575. return res;
  3576. }
  3577. static void *
  3578. debug_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
  3579. {
  3580. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3581. ErtsAlcType_t n;
  3582. Uint dsize;
  3583. Uint old_size;
  3584. void *dptr;
  3585. void *res;
  3586. n = ERTS_ALC_T2N(type);
  3587. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3588. dsize = size + FENCE_SZ;
  3589. dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
  3590. #ifdef HARD_DEBUG
  3591. erts_hdbg_chk_blks();
  3592. #endif
  3593. if (ptr && old_size > size)
  3594. sys_memset((void *) (((char *) ptr) + size),
  3595. 0xf,
  3596. sizeof(Uint) + old_size - size);
  3597. res = (*real_af->realloc)(type, real_af->extra, dptr, dsize);
  3598. res = set_memory_fence(res, size, n);
  3599. #ifdef PRINT_OPS
  3600. fprintf(stderr, "0x%lx = realloc(%s, 0x%lx, %lu)\r\n",
  3601. (Uint) res, ERTS_ALC_N2TD(n), (Uint) ptr, size);
  3602. #endif
  3603. return res;
  3604. }
  3605. static void
  3606. debug_free(ErtsAlcType_t type, void *extra, void *ptr)
  3607. {
  3608. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3609. ErtsAlcType_t n;
  3610. void *dptr;
  3611. Uint size;
  3612. int free_pattern;
  3613. n = ERTS_ALC_T2N(type);
  3614. free_pattern = n;
  3615. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3616. if (!ptr)
  3617. return;
  3618. dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
  3619. sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
  3620. (*real_af->free)(type, real_af->extra, dptr);
  3621. #ifdef PRINT_OPS
  3622. fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
  3623. #endif
  3624. #ifdef HARD_DEBUG
  3625. erts_hdbg_chk_blks();
  3626. #endif
  3627. }
  3628. static Uint
  3629. install_debug_functions(void)
  3630. {
  3631. int i;
  3632. ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
  3633. sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
  3634. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  3635. erts_allctrs[i].alloc = debug_alloc;
  3636. erts_allctrs[i].realloc = debug_realloc;
  3637. erts_allctrs[i].free = debug_free;
  3638. erts_allctrs[i].extra = (void *) &real_allctrs[i];
  3639. }
  3640. return FENCE_SZ;
  3641. }
  3642. #endif /* #ifdef DEBUG */