PageRenderTime 77ms CodeModel.GetById 27ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/erl_alloc.c

http://github.com/erlang/otp
C | 4119 lines | 3549 code | 457 blank | 113 comment | 601 complexity | b24d5370c4eb8ca1a3d6f0838fc7007e MD5 | raw file
Possible License(s): BSD-3-Clause, Apache-2.0, Unlicense, LGPL-2.1, MPL-2.0-no-copyleft-exception
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2002-2020. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. /*
  21. * Description: Management of memory allocators.
  22. *
  23. * Author: Rickard Green
  24. */
  25. #ifdef HAVE_CONFIG_H
  26. # include "config.h"
  27. #endif
  28. #define ERTS_ALLOC_C__
  29. #define ERTS_ALC_INTERNAL__
  30. #define ERTS_WANT_MEM_MAPPERS
  31. #include "sys.h"
  32. #define ERL_THREADS_EMU_INTERNAL__
  33. #include "erl_threads.h"
  34. #include "global.h"
  35. #include "erl_db.h"
  36. #include "erl_binary.h"
  37. #include "erl_bits.h"
  38. #include "erl_mtrace.h"
  39. #include "erl_mseg.h"
  40. #include "erl_monitor_link.h"
  41. #include "erl_hl_timer.h"
  42. #include "erl_cpu_topology.h"
  43. #include "erl_thr_queue.h"
  44. #include "erl_nfunc_sched.h"
  45. #if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
  46. #include "erl_check_io.h"
  47. #endif
  48. #include "erl_bif_unique.h"
  49. #define GET_ERL_GF_ALLOC_IMPL
  50. #include "erl_goodfit_alloc.h"
  51. #define GET_ERL_BF_ALLOC_IMPL
  52. #include "erl_bestfit_alloc.h"
  53. #define GET_ERL_AF_ALLOC_IMPL
  54. #include "erl_afit_alloc.h"
  55. #define GET_ERL_AOFF_ALLOC_IMPL
  56. #include "erl_ao_firstfit_alloc.h"
  57. #if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_AU_MAX_PREF_ALLOC_INSTANCES
  58. # error "Too many schedulers; cannot create that many pref alloc instances"
  59. #endif
  60. #define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS
  61. #if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND)
  62. #define AU_ALLOC_DEFAULT_ENABLE(X) 0
  63. #else
  64. #define AU_ALLOC_DEFAULT_ENABLE(X) (X)
  65. #endif
  66. #define ERTS_ALC_DEFAULT_ENABLED_ACUL 60
  67. #define ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC 45
  68. #define ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC 85
  69. #define ERTS_ALC_DEFAULT_ACUL ERTS_ALC_DEFAULT_ENABLED_ACUL
  70. #define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
  71. #define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
  72. #ifdef DEBUG
  73. static Uint install_debug_functions(void);
  74. #if 0
  75. #define HARD_DEBUG
  76. #ifdef __GNUC__
  77. #warning "* * * * * * * * * * * * * *"
  78. #warning "* HARD DEBUG IS ENABLED! *"
  79. #warning "* * * * * * * * * * * * * *"
  80. #endif
  81. #endif
  82. #endif
  83. static int lock_all_physical_memory = 0;
  84. ErtsAllocatorFunctions_t ERTS_WRITE_UNLIKELY(erts_allctrs[ERTS_ALC_A_MAX+1]);
  85. ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
  86. ErtsAllocatorThrSpec_t ERTS_WRITE_UNLIKELY(erts_allctr_thr_spec[ERTS_ALC_A_MAX+1]);
  87. #define ERTS_MIN(A, B) ((A) < (B) ? (A) : (B))
  88. #define ERTS_MAX(A, B) ((A) > (B) ? (A) : (B))
  89. typedef union {
  90. GFAllctr_t gfa;
  91. char align_gfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(GFAllctr_t))];
  92. BFAllctr_t bfa;
  93. char align_bfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(BFAllctr_t))];
  94. AFAllctr_t afa;
  95. char align_afa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AFAllctr_t))];
  96. AOFFAllctr_t aoffa;
  97. char align_aoffa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AOFFAllctr_t))];
  98. } ErtsAllocatorState_t erts_align_attribute(ERTS_CACHE_LINE_SIZE);
  99. static ErtsAllocatorState_t std_alloc_state;
  100. static ErtsAllocatorState_t ll_alloc_state;
  101. static ErtsAllocatorState_t sl_alloc_state;
  102. static ErtsAllocatorState_t temp_alloc_state;
  103. static ErtsAllocatorState_t eheap_alloc_state;
  104. static ErtsAllocatorState_t binary_alloc_state;
  105. static ErtsAllocatorState_t ets_alloc_state;
  106. static ErtsAllocatorState_t driver_alloc_state;
  107. static ErtsAllocatorState_t fix_alloc_state;
  108. static ErtsAllocatorState_t literal_alloc_state;
  109. #ifdef ERTS_ALC_A_EXEC
  110. static ErtsAllocatorState_t exec_alloc_state;
  111. #endif
  112. static ErtsAllocatorState_t test_alloc_state;
  113. enum {
  114. ERTS_ALC_INFO_A_ALLOC_UTIL = ERTS_ALC_A_MAX + 1,
  115. ERTS_ALC_INFO_A_MSEG_ALLOC,
  116. ERTS_ALC_INFO_A_ERTS_MMAP,
  117. ERTS_ALC_INFO_A_DISABLED_EXEC, /* fake a disabled "exec_alloc" */
  118. ERTS_ALC_INFO_A_END
  119. };
  120. typedef struct {
  121. erts_atomic32_t refc;
  122. int only_sz;
  123. int internal;
  124. Uint req_sched;
  125. Process *proc;
  126. ErtsIRefStorage iref;
  127. int allocs[ERTS_ALC_INFO_A_END - ERTS_ALC_A_MIN + 1];
  128. } ErtsAllocInfoReq;
  129. ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
  130. ErtsAllocInfoReq,
  131. 5,
  132. ERTS_ALC_T_AINFO_REQ)
  133. ErtsAlcType_t erts_fix_core_allocator_ix;
  134. struct au_init {
  135. int enable;
  136. int thr_spec;
  137. int disable_allowed;
  138. int thr_spec_allowed;
  139. int carrier_migration_allowed;
  140. ErtsAlcStrat_t astrat;
  141. struct {
  142. AllctrInit_t util;
  143. GFAllctrInit_t gf;
  144. BFAllctrInit_t bf;
  145. AFAllctrInit_t af;
  146. AOFFAllctrInit_t aoff;
  147. } init;
  148. struct {
  149. int mmbcs;
  150. int lmbcs;
  151. int smbcs;
  152. int mmmbc;
  153. } default_;
  154. };
  155. #define DEFAULT_ALLCTR_INIT { \
  156. ERTS_DEFAULT_ALLCTR_INIT, \
  157. ERTS_DEFAULT_GF_ALLCTR_INIT, \
  158. ERTS_DEFAULT_BF_ALLCTR_INIT, \
  159. ERTS_DEFAULT_AF_ALLCTR_INIT, \
  160. ERTS_DEFAULT_AOFF_ALLCTR_INIT \
  161. }
  162. typedef struct {
  163. int erts_alloc_config;
  164. #if HAVE_ERTS_MSEG
  165. ErtsMsegInit_t mseg;
  166. #endif
  167. int trim_threshold;
  168. int top_pad;
  169. AlcUInit_t alloc_util;
  170. struct {
  171. char *mtrace;
  172. char *nodename;
  173. } instr;
  174. struct au_init sl_alloc;
  175. struct au_init std_alloc;
  176. struct au_init ll_alloc;
  177. struct au_init temp_alloc;
  178. struct au_init eheap_alloc;
  179. struct au_init binary_alloc;
  180. struct au_init ets_alloc;
  181. struct au_init driver_alloc;
  182. struct au_init fix_alloc;
  183. struct au_init literal_alloc;
  184. struct au_init exec_alloc;
  185. struct au_init test_alloc;
  186. } erts_alc_hndl_args_init_t;
  187. #define ERTS_AU_INIT__ {0, 0, 1, 1, 1, \
  188. ERTS_ALC_S_GOODFIT, DEFAULT_ALLCTR_INIT, \
  189. {1,1,1,1}}
  190. #define SET_DEFAULT_ALLOC_OPTS(IP) \
  191. do { \
  192. struct au_init aui__ = ERTS_AU_INIT__; \
  193. sys_memcpy((void *) (IP), (void *) &aui__, sizeof(struct au_init)); \
  194. } while (0)
  195. static void
  196. set_default_sl_alloc_opts(struct au_init *ip)
  197. {
  198. SET_DEFAULT_ALLOC_OPTS(ip);
  199. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  200. ip->thr_spec = 1;
  201. ip->astrat = ERTS_ALC_S_GOODFIT;
  202. ip->init.util.name_prefix = "sl_";
  203. ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
  204. #ifndef SMALL_MEMORY
  205. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  206. #else
  207. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  208. #endif
  209. ip->init.util.ts = ERTS_ALC_MTA_SHORT_LIVED;
  210. ip->init.util.rsbcst = 80;
  211. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  212. }
  213. static void
  214. set_default_std_alloc_opts(struct au_init *ip)
  215. {
  216. SET_DEFAULT_ALLOC_OPTS(ip);
  217. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  218. ip->thr_spec = 1;
  219. ip->astrat = ERTS_ALC_S_BESTFIT;
  220. ip->init.util.name_prefix = "std_";
  221. ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
  222. #ifndef SMALL_MEMORY
  223. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  224. #else
  225. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  226. #endif
  227. ip->init.util.ts = ERTS_ALC_MTA_STANDARD;
  228. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  229. }
  230. static void
  231. set_default_ll_alloc_opts(struct au_init *ip)
  232. {
  233. SET_DEFAULT_ALLOC_OPTS(ip);
  234. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  235. ip->thr_spec = 0;
  236. ip->astrat = ERTS_ALC_S_BESTFIT;
  237. ip->init.bf.ao = 1;
  238. ip->init.util.ramv = 0;
  239. ip->init.util.mmsbc = 0;
  240. ip->init.util.sbct = ~((UWord) 0);
  241. ip->init.util.name_prefix = "ll_";
  242. ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
  243. #ifndef SMALL_MEMORY
  244. ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
  245. #else
  246. ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
  247. #endif
  248. ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
  249. ip->init.util.asbcst = 0;
  250. ip->init.util.rsbcst = 0;
  251. ip->init.util.rsbcmt = 0;
  252. ip->init.util.rmbcmt = 0;
  253. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_LL_ALLOC;
  254. }
  255. static void
  256. set_default_literal_alloc_opts(struct au_init *ip)
  257. {
  258. SET_DEFAULT_ALLOC_OPTS(ip);
  259. ip->enable = 1;
  260. ip->thr_spec = 0;
  261. ip->disable_allowed = 0;
  262. ip->thr_spec_allowed = 0;
  263. ip->carrier_migration_allowed = 0;
  264. ip->astrat = ERTS_ALC_S_BESTFIT;
  265. ip->init.bf.ao = 1;
  266. ip->init.util.ramv = 0;
  267. ip->init.util.mmsbc = 0;
  268. ip->init.util.sbct = ~((UWord) 0);
  269. ip->init.util.name_prefix = "literal_";
  270. ip->init.util.alloc_no = ERTS_ALC_A_LITERAL;
  271. #ifndef SMALL_MEMORY
  272. ip->init.util.mmbcs = 1024*1024; /* Main carrier size */
  273. #else
  274. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  275. #endif
  276. ip->init.util.ts = ERTS_ALC_MTA_LITERAL;
  277. ip->init.util.asbcst = 0;
  278. ip->init.util.rsbcst = 0;
  279. ip->init.util.rsbcmt = 0;
  280. ip->init.util.rmbcmt = 0;
  281. ip->init.util.acul = 0;
  282. #if defined(ARCH_32)
  283. # if HAVE_ERTS_MSEG
  284. ip->init.util.mseg_alloc = &erts_alcu_literal_32_mseg_alloc;
  285. ip->init.util.mseg_realloc = &erts_alcu_literal_32_mseg_realloc;
  286. ip->init.util.mseg_dealloc = &erts_alcu_literal_32_mseg_dealloc;
  287. # endif
  288. ip->init.util.sys_alloc = &erts_alcu_literal_32_sys_alloc;
  289. ip->init.util.sys_realloc = &erts_alcu_literal_32_sys_realloc;
  290. ip->init.util.sys_dealloc = &erts_alcu_literal_32_sys_dealloc;
  291. #elif defined(ARCH_64)
  292. # ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
  293. ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
  294. ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
  295. ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
  296. ip->init.util.mseg_mmapper = &erts_literal_mmapper;
  297. # endif
  298. #else
  299. # error Unknown architecture
  300. #endif
  301. }
  302. #ifdef ERTS_ALC_A_EXEC
  303. static void
  304. set_default_exec_alloc_opts(struct au_init *ip)
  305. {
  306. SET_DEFAULT_ALLOC_OPTS(ip);
  307. ip->enable = 1;
  308. ip->thr_spec = 0;
  309. ip->disable_allowed = 0;
  310. ip->thr_spec_allowed = 0;
  311. ip->carrier_migration_allowed = 0;
  312. ip->astrat = ERTS_ALC_S_BESTFIT;
  313. ip->init.bf.ao = 1;
  314. ip->init.util.ramv = 0;
  315. ip->init.util.mmsbc = 0;
  316. ip->init.util.sbct = ~((UWord) 0);
  317. ip->init.util.name_prefix = "exec_";
  318. ip->init.util.alloc_no = ERTS_ALC_A_EXEC;
  319. ip->init.util.mmbcs = 0; /* No main carrier */
  320. ip->init.util.ts = ERTS_ALC_MTA_EXEC;
  321. ip->init.util.asbcst = 0;
  322. ip->init.util.rsbcst = 0;
  323. ip->init.util.rsbcmt = 0;
  324. ip->init.util.rmbcmt = 0;
  325. ip->init.util.acul = 0;
  326. ip->init.util.mseg_alloc = &erts_alcu_exec_mseg_alloc;
  327. ip->init.util.mseg_realloc = &erts_alcu_exec_mseg_realloc;
  328. ip->init.util.mseg_dealloc = &erts_alcu_exec_mseg_dealloc;
  329. }
  330. #endif /* ERTS_ALC_A_EXEC */
  331. static void
  332. set_default_temp_alloc_opts(struct au_init *ip)
  333. {
  334. SET_DEFAULT_ALLOC_OPTS(ip);
  335. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  336. ip->thr_spec = 1;
  337. ip->disable_allowed = 0;
  338. ip->carrier_migration_allowed = 0;
  339. ip->astrat = ERTS_ALC_S_AFIT;
  340. ip->init.util.name_prefix = "temp_";
  341. ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY;
  342. #ifndef SMALL_MEMORY
  343. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  344. #else
  345. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  346. #endif
  347. ip->init.util.ts = ERTS_ALC_MTA_TEMPORARY;
  348. ip->init.util.rsbcst = 90;
  349. ip->init.util.rmbcmt = 100;
  350. }
  351. static void
  352. set_default_eheap_alloc_opts(struct au_init *ip)
  353. {
  354. SET_DEFAULT_ALLOC_OPTS(ip);
  355. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  356. ip->thr_spec = 1;
  357. ip->astrat = ERTS_ALC_S_GOODFIT;
  358. ip->init.util.name_prefix = "eheap_";
  359. ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
  360. #ifndef SMALL_MEMORY
  361. ip->init.util.mmbcs = 512*1024; /* Main carrier size */
  362. #else
  363. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  364. #endif
  365. ip->init.util.ts = ERTS_ALC_MTA_EHEAP;
  366. ip->init.util.rsbcst = 50;
  367. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC;
  368. }
  369. static void
  370. set_default_binary_alloc_opts(struct au_init *ip)
  371. {
  372. SET_DEFAULT_ALLOC_OPTS(ip);
  373. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  374. ip->thr_spec = 1;
  375. ip->astrat = ERTS_ALC_S_BESTFIT;
  376. ip->init.util.name_prefix = "binary_";
  377. ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
  378. #ifndef SMALL_MEMORY
  379. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  380. #else
  381. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  382. #endif
  383. ip->init.util.ts = ERTS_ALC_MTA_BINARY;
  384. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  385. ip->init.util.atags = 1;
  386. }
  387. static void
  388. set_default_ets_alloc_opts(struct au_init *ip)
  389. {
  390. SET_DEFAULT_ALLOC_OPTS(ip);
  391. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  392. ip->thr_spec = 1;
  393. ip->astrat = ERTS_ALC_S_BESTFIT;
  394. ip->init.util.name_prefix = "ets_";
  395. ip->init.util.alloc_no = ERTS_ALC_A_ETS;
  396. #ifndef SMALL_MEMORY
  397. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  398. #else
  399. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  400. #endif
  401. ip->init.util.ts = ERTS_ALC_MTA_ETS;
  402. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  403. }
  404. static void
  405. set_default_driver_alloc_opts(struct au_init *ip)
  406. {
  407. SET_DEFAULT_ALLOC_OPTS(ip);
  408. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  409. ip->thr_spec = 1;
  410. ip->astrat = ERTS_ALC_S_BESTFIT;
  411. ip->init.util.name_prefix = "driver_";
  412. ip->init.util.alloc_no = ERTS_ALC_A_DRIVER;
  413. #ifndef SMALL_MEMORY
  414. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  415. #else
  416. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  417. #endif
  418. ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
  419. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  420. ip->init.util.atags = 1;
  421. }
  422. static void
  423. set_default_fix_alloc_opts(struct au_init *ip,
  424. size_t *fix_type_sizes)
  425. {
  426. SET_DEFAULT_ALLOC_OPTS(ip);
  427. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  428. ip->thr_spec = 1;
  429. ip->astrat = ERTS_ALC_S_BESTFIT;
  430. ip->init.bf.ao = 1;
  431. ip->init.util.name_prefix = "fix_";
  432. ip->init.util.fix_type_size = fix_type_sizes;
  433. ip->init.util.alloc_no = ERTS_ALC_A_FIXED_SIZE;
  434. #ifndef SMALL_MEMORY
  435. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  436. #else
  437. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  438. #endif
  439. ip->init.util.ts = ERTS_ALC_MTA_FIXED_SIZE;
  440. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  441. }
  442. static void
  443. set_default_test_alloc_opts(struct au_init *ip)
  444. {
  445. SET_DEFAULT_ALLOC_OPTS(ip);
  446. ip->enable = 0; /* Disabled by default */
  447. ip->thr_spec = -1 * erts_no_schedulers;
  448. ip->astrat = ERTS_ALC_S_FIRSTFIT;
  449. ip->init.aoff.crr_order = FF_AOFF;
  450. ip->init.aoff.blk_order = FF_BF;
  451. ip->init.util.name_prefix = "test_";
  452. ip->init.util.alloc_no = ERTS_ALC_A_TEST;
  453. ip->init.util.mmbcs = 0; /* Main carrier size */
  454. ip->init.util.ts = ERTS_ALC_MTA_TEST;
  455. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  456. ip->init.util.atags = 1;
  457. /* Use a constant minimal MBC size */
  458. #if ERTS_SA_MB_CARRIERS
  459. ip->init.util.smbcs = ERTS_SACRR_UNIT_SZ;
  460. ip->init.util.lmbcs = ERTS_SACRR_UNIT_SZ;
  461. ip->init.util.sbct = ERTS_SACRR_UNIT_SZ;
  462. #else
  463. ip->init.util.smbcs = 1 << 12;
  464. ip->init.util.lmbcs = 1 << 12;
  465. ip->init.util.sbct = 1 << 12;
  466. #endif
  467. }
  468. static void
  469. adjust_tpref(struct au_init *ip, int no_sched)
  470. {
  471. if (ip->thr_spec) {
  472. ip->thr_spec = no_sched;
  473. ip->thr_spec *= -1; /* thread preferred */
  474. /* If default ... */
  475. /* ... shrink main multi-block carrier size */
  476. if (ip->default_.mmbcs)
  477. ip->init.util.mmbcs /= ERTS_MIN(4, no_sched);
  478. /* ... shrink largest multi-block carrier size */
  479. if (ip->default_.lmbcs)
  480. ip->init.util.lmbcs /= ERTS_MIN(2, no_sched);
  481. /* ... shrink smallest multi-block carrier size */
  482. if (ip->default_.smbcs)
  483. ip->init.util.smbcs /= ERTS_MIN(4, no_sched);
  484. }
  485. }
  486. static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
  487. static void
  488. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu);
  489. static void
  490. start_au_allocator(ErtsAlcType_t alctr_n,
  491. struct au_init *init,
  492. ErtsAllocatorState_t *state);
  493. static void
  494. refuse_af_strategy(struct au_init *init)
  495. {
  496. if (init->astrat == ERTS_ALC_S_AFIT)
  497. init->astrat = ERTS_ALC_S_GOODFIT;
  498. }
  499. #ifdef HARD_DEBUG
  500. static void hdbg_init(void);
  501. #endif
  502. static void adjust_fix_alloc_sizes(UWord extra_block_size)
  503. {
  504. if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) {
  505. int j;
  506. if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) {
  507. int i;
  508. ErtsAllocatorThrSpec_t* tspec;
  509. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  510. ASSERT(tspec->enabled);
  511. for (i=0; i < tspec->size; i++) {
  512. Allctr_t* allctr = tspec->allctr[i];
  513. for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
  514. size_t size = allctr->fix[j].type_size;
  515. size = MAX(size + extra_block_size,
  516. sizeof(ErtsAllctrDDBlock_t));
  517. allctr->fix[j].type_size = size;
  518. }
  519. }
  520. }
  521. else
  522. {
  523. Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
  524. for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
  525. size_t size = allctr->fix[j].type_size;
  526. size = MAX(size + extra_block_size,
  527. sizeof(ErtsAllctrDDBlock_t));
  528. allctr->fix[j].type_size = size;
  529. }
  530. }
  531. }
  532. }
  533. static ERTS_INLINE int
  534. strategy_support_carrier_migration(struct au_init *auip)
  535. {
  536. /*
  537. * Currently only aoff* and ageff* support carrier
  538. * migration, i.e, type AOFIRSTFIT.
  539. */
  540. return auip->astrat == ERTS_ALC_S_FIRSTFIT;
  541. }
  542. static ERTS_INLINE void
  543. adjust_carrier_migration_support(struct au_init *auip)
  544. {
  545. if (auip->init.util.acul) {
  546. auip->thr_spec = -1; /* Need thread preferred */
  547. /*
  548. * If strategy cannot handle carrier migration,
  549. * default to a strategy that can...
  550. */
  551. if (!strategy_support_carrier_migration(auip)) {
  552. /* Default to aoffcbf */
  553. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  554. auip->init.aoff.crr_order = FF_AOFF;
  555. auip->init.aoff.blk_order = FF_BF;
  556. }
  557. }
  558. }
  559. void
  560. erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
  561. {
  562. UWord extra_block_size = 0;
  563. int i, ncpu;
  564. erts_alc_hndl_args_init_t init = {
  565. 0,
  566. #if HAVE_ERTS_MSEG
  567. ERTS_MSEG_INIT_DEFAULT_INITIALIZER,
  568. #endif
  569. ERTS_DEFAULT_TRIM_THRESHOLD,
  570. ERTS_DEFAULT_TOP_PAD,
  571. ERTS_DEFAULT_ALCU_INIT,
  572. };
  573. size_t fix_type_sizes[ERTS_ALC_NO_FIXED_SIZES] = {0};
  574. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PROC)]
  575. = sizeof(Process);
  576. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR)]
  577. = sizeof(ErtsMonitorDataHeap);
  578. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LINK)]
  579. = sizeof(ErtsLinkData);
  580. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
  581. = sizeof(ErtsDrvSelectDataState);
  582. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
  583. = sizeof(ErtsNifSelectDataState);
  584. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
  585. = sizeof(ErtsMessageRef);
  586. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
  587. = sizeof(ErtsThrQElement_t);
  588. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)]
  589. = erts_timer_type_size(ERTS_ALC_T_LL_PTIMER);
  590. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)]
  591. = erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
  592. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
  593. = erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
  594. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
  595. = sizeof(ErtsNSchedMagicRefTableEntry);
  596. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
  597. = ERTS_MAGIC_BIN_UNALIGNED_SIZE(sizeof(ErtsMagicIndirectionWord));
  598. #ifdef HARD_DEBUG
  599. hdbg_init();
  600. #endif
  601. lock_all_physical_memory = 0;
  602. ncpu = eaiop->ncpu;
  603. if (ncpu < 1)
  604. ncpu = 1;
  605. erts_tsd_key_create(&erts_allctr_prelock_tsd_key,
  606. "erts_allctr_prelock_tsd_key");
  607. erts_sys_alloc_init();
  608. erts_init_utils_mem();
  609. set_default_sl_alloc_opts(&init.sl_alloc);
  610. set_default_std_alloc_opts(&init.std_alloc);
  611. set_default_ll_alloc_opts(&init.ll_alloc);
  612. set_default_temp_alloc_opts(&init.temp_alloc);
  613. set_default_eheap_alloc_opts(&init.eheap_alloc);
  614. set_default_binary_alloc_opts(&init.binary_alloc);
  615. set_default_ets_alloc_opts(&init.ets_alloc);
  616. set_default_driver_alloc_opts(&init.driver_alloc);
  617. set_default_fix_alloc_opts(&init.fix_alloc,
  618. fix_type_sizes);
  619. set_default_literal_alloc_opts(&init.literal_alloc);
  620. #ifdef ERTS_ALC_A_EXEC
  621. set_default_exec_alloc_opts(&init.exec_alloc);
  622. #endif
  623. set_default_test_alloc_opts(&init.test_alloc);
  624. if (argc && argv)
  625. handle_args(argc, argv, &init);
  626. if (lock_all_physical_memory) {
  627. #ifdef HAVE_MLOCKALL
  628. errno = 0;
  629. if (mlockall(MCL_CURRENT|MCL_FUTURE) != 0) {
  630. int err = errno;
  631. char *errstr = err ? strerror(err) : "unknown";
  632. erts_exit(1, "Failed to lock physical memory: %s (%d)\n",
  633. errstr, err);
  634. }
  635. #else
  636. erts_exit(1, "Failed to lock physical memory: Not supported\n");
  637. #endif
  638. }
  639. /* Make adjustments for carrier migration support */
  640. init.temp_alloc.init.util.acul = 0;
  641. adjust_carrier_migration_support(&init.sl_alloc);
  642. adjust_carrier_migration_support(&init.std_alloc);
  643. adjust_carrier_migration_support(&init.ll_alloc);
  644. adjust_carrier_migration_support(&init.eheap_alloc);
  645. adjust_carrier_migration_support(&init.binary_alloc);
  646. adjust_carrier_migration_support(&init.ets_alloc);
  647. adjust_carrier_migration_support(&init.driver_alloc);
  648. adjust_carrier_migration_support(&init.fix_alloc);
  649. adjust_carrier_migration_support(&init.literal_alloc);
  650. #ifdef ERTS_ALC_A_EXEC
  651. adjust_carrier_migration_support(&init.exec_alloc);
  652. #endif
  653. if (init.erts_alloc_config) {
  654. /* Adjust flags that erts_alloc_config won't like */
  655. /* No thread specific instances */
  656. init.temp_alloc.thr_spec = 0;
  657. init.sl_alloc.thr_spec = 0;
  658. init.std_alloc.thr_spec = 0;
  659. init.ll_alloc.thr_spec = 0;
  660. init.eheap_alloc.thr_spec = 0;
  661. init.binary_alloc.thr_spec = 0;
  662. init.ets_alloc.thr_spec = 0;
  663. init.driver_alloc.thr_spec = 0;
  664. init.fix_alloc.thr_spec = 0;
  665. init.literal_alloc.thr_spec = 0;
  666. #ifdef ERTS_ALC_A_EXEC
  667. init.exec_alloc.thr_spec = 0;
  668. #endif
  669. /* No carrier migration */
  670. init.temp_alloc.init.util.acul = 0;
  671. init.sl_alloc.init.util.acul = 0;
  672. init.std_alloc.init.util.acul = 0;
  673. init.ll_alloc.init.util.acul = 0;
  674. init.eheap_alloc.init.util.acul = 0;
  675. init.binary_alloc.init.util.acul = 0;
  676. init.ets_alloc.init.util.acul = 0;
  677. init.driver_alloc.init.util.acul = 0;
  678. init.fix_alloc.init.util.acul = 0;
  679. init.literal_alloc.init.util.acul = 0;
  680. #ifdef ERTS_ALC_A_EXEC
  681. init.exec_alloc.init.util.acul = 0;
  682. #endif
  683. }
  684. /* Only temp_alloc can use thread specific interface */
  685. if (init.temp_alloc.thr_spec)
  686. init.temp_alloc.thr_spec = erts_no_schedulers;
  687. /* Others must use thread preferred interface */
  688. adjust_tpref(&init.sl_alloc, erts_no_schedulers);
  689. adjust_tpref(&init.std_alloc, erts_no_schedulers);
  690. adjust_tpref(&init.ll_alloc, erts_no_schedulers);
  691. adjust_tpref(&init.eheap_alloc, erts_no_schedulers);
  692. adjust_tpref(&init.binary_alloc, erts_no_schedulers);
  693. adjust_tpref(&init.ets_alloc, erts_no_schedulers);
  694. adjust_tpref(&init.driver_alloc, erts_no_schedulers);
  695. adjust_tpref(&init.fix_alloc, erts_no_schedulers);
  696. adjust_tpref(&init.literal_alloc, erts_no_schedulers);
  697. #ifdef ERTS_ALC_A_EXEC
  698. adjust_tpref(&init.exec_alloc, erts_no_schedulers);
  699. #endif
  700. /*
  701. * The following allocators cannot be run with afit strategy.
  702. * Make sure they don't...
  703. */
  704. refuse_af_strategy(&init.sl_alloc);
  705. refuse_af_strategy(&init.std_alloc);
  706. refuse_af_strategy(&init.ll_alloc);
  707. refuse_af_strategy(&init.eheap_alloc);
  708. refuse_af_strategy(&init.binary_alloc);
  709. refuse_af_strategy(&init.ets_alloc);
  710. refuse_af_strategy(&init.driver_alloc);
  711. refuse_af_strategy(&init.fix_alloc);
  712. refuse_af_strategy(&init.literal_alloc);
  713. #ifdef ERTS_ALC_A_EXEC
  714. refuse_af_strategy(&init.exec_alloc);
  715. #endif
  716. if (!init.temp_alloc.thr_spec)
  717. refuse_af_strategy(&init.temp_alloc);
  718. erts_mtrace_pre_init();
  719. #if HAVE_ERTS_MSEG
  720. init.mseg.nos = erts_no_schedulers;
  721. erts_mseg_init(&init.mseg);
  722. #endif
  723. erts_alcu_init(&init.alloc_util);
  724. erts_afalc_init();
  725. erts_bfalc_init();
  726. erts_gfalc_init();
  727. erts_aoffalc_init();
  728. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  729. erts_allctrs[i].alloc = NULL;
  730. erts_allctrs[i].realloc = NULL;
  731. erts_allctrs[i].free = NULL;
  732. erts_allctrs[i].extra = NULL;
  733. erts_allctrs_info[i].alloc_util = 0;
  734. erts_allctrs_info[i].enabled = 0;
  735. erts_allctrs_info[i].thr_spec = 0;
  736. erts_allctrs_info[i].extra = NULL;
  737. }
  738. erts_allctrs[ERTS_ALC_A_SYSTEM].alloc = erts_sys_alloc;
  739. erts_allctrs[ERTS_ALC_A_SYSTEM].realloc = erts_sys_realloc;
  740. erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
  741. erts_allctrs_info[ERTS_ALC_A_SYSTEM].enabled = 1;
  742. set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu);
  743. set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu);
  744. set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu);
  745. set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc, ncpu);
  746. set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc, ncpu);
  747. set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc, ncpu);
  748. set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc, ncpu);
  749. set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc, ncpu);
  750. set_au_allocator(ERTS_ALC_A_FIXED_SIZE, &init.fix_alloc, ncpu);
  751. set_au_allocator(ERTS_ALC_A_LITERAL, &init.literal_alloc, ncpu);
  752. #ifdef ERTS_ALC_A_EXEC
  753. set_au_allocator(ERTS_ALC_A_EXEC, &init.exec_alloc, ncpu);
  754. #endif
  755. set_au_allocator(ERTS_ALC_A_TEST, &init.test_alloc, ncpu);
  756. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  757. if (!erts_allctrs[i].alloc)
  758. erts_exit(ERTS_ABORT_EXIT,
  759. "Missing alloc function for %s\n", ERTS_ALC_A2AD(i));
  760. if (!erts_allctrs[i].realloc)
  761. erts_exit(ERTS_ABORT_EXIT,
  762. "Missing realloc function for %s\n", ERTS_ALC_A2AD(i));
  763. if (!erts_allctrs[i].free)
  764. erts_exit(ERTS_ABORT_EXIT,
  765. "Missing free function for %s\n", ERTS_ALC_A2AD(i));
  766. }
  767. sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
  768. sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
  769. erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
  770. start_au_allocator(ERTS_ALC_A_TEMPORARY,
  771. &init.temp_alloc,
  772. &temp_alloc_state);
  773. start_au_allocator(ERTS_ALC_A_SHORT_LIVED,
  774. &init.sl_alloc,
  775. &sl_alloc_state);
  776. start_au_allocator(ERTS_ALC_A_STANDARD,
  777. &init.std_alloc,
  778. &std_alloc_state);
  779. start_au_allocator(ERTS_ALC_A_LONG_LIVED,
  780. &init.ll_alloc,
  781. &ll_alloc_state);
  782. start_au_allocator(ERTS_ALC_A_EHEAP,
  783. &init.eheap_alloc,
  784. &eheap_alloc_state);
  785. start_au_allocator(ERTS_ALC_A_BINARY,
  786. &init.binary_alloc,
  787. &binary_alloc_state);
  788. start_au_allocator(ERTS_ALC_A_ETS,
  789. &init.ets_alloc,
  790. &ets_alloc_state);
  791. start_au_allocator(ERTS_ALC_A_DRIVER,
  792. &init.driver_alloc,
  793. &driver_alloc_state);
  794. start_au_allocator(ERTS_ALC_A_FIXED_SIZE,
  795. &init.fix_alloc,
  796. &fix_alloc_state);
  797. start_au_allocator(ERTS_ALC_A_LITERAL,
  798. &init.literal_alloc,
  799. &literal_alloc_state);
  800. #ifdef ERTS_ALC_A_EXEC
  801. start_au_allocator(ERTS_ALC_A_EXEC,
  802. &init.exec_alloc,
  803. &exec_alloc_state);
  804. #endif
  805. start_au_allocator(ERTS_ALC_A_TEST,
  806. &init.test_alloc,
  807. &test_alloc_state);
  808. erts_mtrace_install_wrapper_functions();
  809. init_aireq_alloc();
  810. #ifdef DEBUG
  811. extra_block_size += install_debug_functions();
  812. #endif
  813. adjust_fix_alloc_sizes(extra_block_size);
  814. }
  815. void
  816. erts_alloc_late_init(void)
  817. {
  818. }
  819. static void *
  820. erts_realloc_fixed_size(ErtsAlcType_t type, void *extra, void *p, Uint size)
  821. {
  822. erts_exit(ERTS_ABORT_EXIT,
  823. "Attempt to reallocate a block of the fixed size type %s\n",
  824. ERTS_ALC_T2TD(type));
  825. }
  826. static void
  827. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
  828. {
  829. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  830. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  831. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  832. /*
  833. * Some allocators are forced on if halfword heap is used.
  834. */
  835. if (init->init.util.force)
  836. init->enable = 1;
  837. tspec->enabled = 0;
  838. tspec->dd = 0;
  839. tspec->aix = alctr_n;
  840. tspec->size = 0;
  841. ai->thr_spec = 0;
  842. if (!init->enable) {
  843. af->alloc = erts_sys_alloc;
  844. af->realloc = erts_sys_realloc;
  845. af->free = erts_sys_free;
  846. af->extra = NULL;
  847. ai->alloc_util = 0;
  848. ai->enabled = 0;
  849. ai->extra = NULL;
  850. return;
  851. }
  852. if (init->thr_spec) {
  853. if (init->thr_spec > 0) {
  854. af->alloc = erts_alcu_alloc_thr_spec;
  855. if (init->init.util.fix_type_size)
  856. af->realloc = erts_realloc_fixed_size;
  857. else if (init->init.util.ramv)
  858. af->realloc = erts_alcu_realloc_mv_thr_spec;
  859. else
  860. af->realloc = erts_alcu_realloc_thr_spec;
  861. af->free = erts_alcu_free_thr_spec;
  862. }
  863. else {
  864. af->alloc = erts_alcu_alloc_thr_pref;
  865. if (init->init.util.fix_type_size)
  866. af->realloc = erts_realloc_fixed_size;
  867. else if (init->init.util.ramv)
  868. af->realloc = erts_alcu_realloc_mv_thr_pref;
  869. else
  870. af->realloc = erts_alcu_realloc_thr_pref;
  871. af->free = erts_alcu_free_thr_pref;
  872. tspec->dd = 1;
  873. }
  874. tspec->enabled = 1;
  875. tspec->size = abs(init->thr_spec) + 1;
  876. ai->thr_spec = tspec->size;
  877. }
  878. else
  879. if (init->init.util.ts) {
  880. af->alloc = erts_alcu_alloc_ts;
  881. if (init->init.util.fix_type_size)
  882. af->realloc = erts_realloc_fixed_size;
  883. else if (init->init.util.ramv)
  884. af->realloc = erts_alcu_realloc_mv_ts;
  885. else
  886. af->realloc = erts_alcu_realloc_ts;
  887. af->free = erts_alcu_free_ts;
  888. }
  889. else
  890. {
  891. erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n",
  892. init->init.util.name_prefix);
  893. }
  894. af->extra = NULL;
  895. ai->alloc_util = 1;
  896. ai->enabled = 1;
  897. }
  898. static void
  899. start_au_allocator(ErtsAlcType_t alctr_n,
  900. struct au_init *init,
  901. ErtsAllocatorState_t *state)
  902. {
  903. int i;
  904. int size = 1;
  905. void *as0;
  906. ErtsAlcStrat_t astrat;
  907. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  908. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  909. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  910. ErtsAlcFixList_t *fix_lists = NULL;
  911. size_t fix_list_size = 0;
  912. if (!init->enable)
  913. return;
  914. if (init->thr_spec) {
  915. char *states = erts_sys_alloc(0,
  916. NULL,
  917. ((sizeof(Allctr_t *)
  918. * (tspec->size + 1))
  919. + (sizeof(ErtsAllocatorState_t)
  920. * tspec->size)
  921. + ERTS_CACHE_LINE_SIZE - 1));
  922. if (!states)
  923. erts_exit(ERTS_ABORT_EXIT,
  924. "Failed to allocate allocator states for %salloc\n",
  925. init->init.util.name_prefix);
  926. tspec->allctr = (Allctr_t **) states;
  927. states += sizeof(Allctr_t *) * (tspec->size + 1);
  928. states = ((((UWord) states) & ERTS_CACHE_LINE_MASK)
  929. ? (char *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
  930. + ERTS_CACHE_LINE_SIZE)
  931. : (char *) states);
  932. tspec->allctr[0] = (Allctr_t *) state;
  933. size = tspec->size;
  934. for (i = 1; i < size; i++)
  935. tspec->allctr[i] = (Allctr_t *)
  936. &((ErtsAllocatorState_t *) states)[i-1];
  937. }
  938. if (init->init.util.fix_type_size) {
  939. size_t tot_fix_list_size;
  940. fix_list_size = sizeof(ErtsAlcFixList_t)*ERTS_ALC_NO_FIXED_SIZES;
  941. fix_list_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(fix_list_size);
  942. tot_fix_list_size = fix_list_size;
  943. if (init->thr_spec)
  944. tot_fix_list_size *= tspec->size;
  945. fix_lists = erts_sys_alloc(0,
  946. NULL,
  947. (tot_fix_list_size
  948. + ERTS_CACHE_LINE_SIZE - 1));
  949. if (!fix_lists)
  950. erts_exit(ERTS_ABORT_EXIT,
  951. "Failed to allocate fix lists for %salloc\n",
  952. init->init.util.name_prefix);
  953. if (((UWord) fix_lists) & ERTS_CACHE_LINE_MASK)
  954. fix_lists = ((ErtsAlcFixList_t *)
  955. ((((UWord) fix_lists) & ~ERTS_CACHE_LINE_MASK)
  956. + ERTS_CACHE_LINE_SIZE));
  957. }
  958. for (i = 0; i < size; i++) {
  959. Allctr_t *as;
  960. astrat = init->astrat;
  961. if (!init->thr_spec)
  962. as0 = state;
  963. else {
  964. as0 = (void *) tspec->allctr[i];
  965. if (!as0)
  966. continue;
  967. if (init->thr_spec < 0) {
  968. init->init.util.ts = i == 0;
  969. init->init.util.tspec = 0;
  970. init->init.util.tpref = -1*init->thr_spec + 1;
  971. }
  972. else {
  973. if (i != 0)
  974. init->init.util.ts = 0;
  975. else {
  976. if (astrat == ERTS_ALC_S_AFIT)
  977. astrat = ERTS_ALC_S_GOODFIT;
  978. init->init.util.ts = 1;
  979. }
  980. init->init.util.tspec = init->thr_spec + 1;
  981. init->init.util.tpref = 0;
  982. }
  983. }
  984. if (fix_lists) {
  985. init->init.util.fix = fix_lists;
  986. fix_lists = ((ErtsAlcFixList_t *)
  987. (((char *) fix_lists) + fix_list_size));
  988. }
  989. init->init.util.alloc_strat = astrat;
  990. init->init.util.ix = i;
  991. switch (astrat) {
  992. case ERTS_ALC_S_GOODFIT:
  993. as = erts_gfalc_start((GFAllctr_t *) as0,
  994. &init->init.gf,
  995. &init->init.util);
  996. break;
  997. case ERTS_ALC_S_BESTFIT:
  998. as = erts_bfalc_start((BFAllctr_t *) as0,
  999. &init->init.bf,
  1000. &init->init.util);
  1001. break;
  1002. case ERTS_ALC_S_AFIT:
  1003. as = erts_afalc_start((AFAllctr_t *) as0,
  1004. &init->init.af,
  1005. &init->init.util);
  1006. break;
  1007. case ERTS_ALC_S_FIRSTFIT:
  1008. as = erts_aoffalc_start((AOFFAllctr_t *) as0,
  1009. &init->init.aoff,
  1010. &init->init.util);
  1011. break;
  1012. default:
  1013. as = NULL;
  1014. ASSERT(0);
  1015. }
  1016. if (!as)
  1017. erts_exit(ERTS_ABORT_EXIT,
  1018. "Failed to start %salloc\n", init->init.util.name_prefix);
  1019. ASSERT(as == (void *) as0);
  1020. af->extra = as;
  1021. }
  1022. if (init->thr_spec)
  1023. af->extra = tspec;
  1024. ai->extra = af->extra;
  1025. }
  1026. static void bad_param(char *param_start, char *param_end)
  1027. {
  1028. size_t len = param_end - param_start;
  1029. char param[100];
  1030. if (len > 99)
  1031. len = 99;
  1032. sys_memcpy((void *) param, (void *) param_start, len);
  1033. param[len] = '\0';
  1034. erts_fprintf(stderr, "bad \"%s\" parameter\n", param);
  1035. erts_usage();
  1036. }
  1037. static void bad_value(char *param_start, char *param_end, char *value)
  1038. {
  1039. size_t len = param_end - param_start;
  1040. char param[100];
  1041. if (len > 99)
  1042. len = 99;
  1043. sys_memcpy((void *) param, (void *) param_start, len);
  1044. param[len] = '\0';
  1045. erts_fprintf(stderr, "bad \"%s\" value: %s\n", param, value);
  1046. erts_usage();
  1047. }
  1048. /* Get arg marks argument as handled by
  1049. putting NULL in argv */
  1050. static char *
  1051. get_value(char* rest, char** argv, int* ip)
  1052. {
  1053. char *param = argv[*ip]+1;
  1054. argv[*ip] = NULL;
  1055. if (*rest == '\0') {
  1056. char *next = argv[*ip + 1];
  1057. if (next[0] == '-'
  1058. && next[1] == '-'
  1059. && next[2] == '\0') {
  1060. bad_value(param, rest, "");
  1061. }
  1062. (*ip)++;
  1063. argv[*ip] = NULL;
  1064. return next;
  1065. }
  1066. return rest;
  1067. }
  1068. static ERTS_INLINE int
  1069. has_prefix(const char *prefix, const char *string)
  1070. {
  1071. int i;
  1072. for (i = 0; prefix[i]; i++)
  1073. if (prefix[i] != string[i])
  1074. return 0;
  1075. return 1;
  1076. }
  1077. static int
  1078. get_bool_value(char *param_end, char** argv, int* ip)
  1079. {
  1080. char *param = argv[*ip]+1;
  1081. char *value = get_value(param_end, argv, ip);
  1082. if (sys_strcmp(value, "true") == 0)
  1083. return 1;
  1084. else if (sys_strcmp(value, "false") == 0)
  1085. return 0;
  1086. else
  1087. bad_value(param, param_end, value);
  1088. return -1;
  1089. }
  1090. static Uint kb_to_bytes(Sint kb, Uint *bytes)
  1091. {
  1092. const Uint max = ((~((Uint) 0))/1024) + 1;
  1093. if (kb < 0 || (Uint)kb > max)
  1094. return 0;
  1095. if ((Uint)kb == max)
  1096. *bytes = ~((Uint) 0);
  1097. else
  1098. *bytes = ((Uint) kb)*1024;
  1099. return 1;
  1100. }
  1101. static Uint
  1102. get_kb_value(char *param_end, char** argv, int* ip)
  1103. {
  1104. Sint tmp;
  1105. Uint bytes = 0;
  1106. char *rest;
  1107. char *param = argv[*ip]+1;
  1108. char *value = get_value(param_end, argv, ip);
  1109. errno = 0;
  1110. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1111. if (errno != 0 || rest == value || !kb_to_bytes(tmp, &bytes))
  1112. bad_value(param, param_end, value);
  1113. return bytes;
  1114. }
  1115. static UWord
  1116. get_mb_value(char *param_end, char** argv, int* ip)
  1117. {
  1118. SWord tmp;
  1119. UWord max = ((~((UWord) 0))/(1024*1024)) + 1;
  1120. char *rest;
  1121. char *param = argv[*ip]+1;
  1122. char *value = get_value(param_end, argv, ip);
  1123. errno = 0;
  1124. tmp = (SWord) ErtsStrToSint(value, &rest, 10);
  1125. if (errno != 0 || rest == value || tmp < 0 || max < ((UWord) tmp))
  1126. bad_value(param, param_end, value);
  1127. if (max == (UWord) tmp)
  1128. return ~((UWord) 0);
  1129. else
  1130. return ((UWord) tmp)*1024*1024;
  1131. }
  1132. #if 0
  1133. static Uint
  1134. get_byte_value(char *param_end, char** argv, int* ip)
  1135. {
  1136. Sint tmp;
  1137. char *rest;
  1138. char *param = argv[*ip]+1;
  1139. char *value = get_value(param_end, argv, ip);
  1140. errno = 0;
  1141. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1142. if (errno != 0 || rest == value || tmp < 0)
  1143. bad_value(param, param_end, value);
  1144. return (Uint) tmp;
  1145. }
  1146. #endif
  1147. static Uint
  1148. get_amount_value(char *param_end, char** argv, int* ip)
  1149. {
  1150. Sint tmp;
  1151. char *rest;
  1152. char *param = argv[*ip]+1;
  1153. char *value = get_value(param_end, argv, ip);
  1154. errno = 0;
  1155. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1156. if (errno != 0 || rest == value || tmp < 0)
  1157. bad_value(param, param_end, value);
  1158. return (Uint) tmp;
  1159. }
  1160. static Uint
  1161. get_acul_value(struct au_init *auip, char *param_end, char** argv, int* ip)
  1162. {
  1163. Sint tmp;
  1164. char *rest;
  1165. char *param = argv[*ip]+1;
  1166. char *value = get_value(param_end, argv, ip);
  1167. if (sys_strcmp(value, "de") == 0) {
  1168. switch (auip->init.util.alloc_no) {
  1169. case ERTS_ALC_A_LONG_LIVED:
  1170. return ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC;
  1171. case ERTS_ALC_A_EHEAP:
  1172. return ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC;
  1173. default:
  1174. return ERTS_ALC_DEFAULT_ENABLED_ACUL;
  1175. }
  1176. }
  1177. errno = 0;
  1178. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1179. if (errno != 0 || rest == value || tmp < 0 || 100 < tmp)
  1180. bad_value(param, param_end, value);
  1181. return (Uint) tmp;
  1182. }
  1183. static void
  1184. handle_au_arg(struct au_init *auip,
  1185. char* sub_param,
  1186. char** argv,
  1187. int* ip,
  1188. int u_switch)
  1189. {
  1190. char *param = argv[*ip]+1;
  1191. switch (sub_param[0]) {
  1192. case 'a':
  1193. if (sub_param[1] == 'c') { /* Migration parameters "ac*" */
  1194. UWord value;
  1195. UWord* wp;
  1196. if (!auip->carrier_migration_allowed && !u_switch)
  1197. goto bad_switch;
  1198. if (has_prefix("acul", sub_param)) {
  1199. value = get_acul_value(auip, sub_param + 4, argv, ip);
  1200. wp = &auip->init.util.acul;
  1201. }
  1202. else if (has_prefix("acnl", sub_param)) {
  1203. value = get_amount_value(sub_param + 4, argv, ip);
  1204. wp = &auip->init.util.acnl;
  1205. }
  1206. else if (has_prefix("acfml", sub_param)) {
  1207. value = get_amount_value(sub_param + 5, argv, ip);
  1208. wp = &auip->init.util.acfml;
  1209. }
  1210. else
  1211. goto bad_switch;
  1212. if (auip->carrier_migration_allowed)
  1213. *wp = value;
  1214. }
  1215. else if(has_prefix("asbcst", sub_param)) {
  1216. auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip);
  1217. }
  1218. else if(has_prefix("as", sub_param)) {
  1219. char *alg = get_value(sub_param + 2, argv, ip);
  1220. if (sys_strcmp("bf", alg) == 0) {
  1221. auip->astrat = ERTS_ALC_S_BESTFIT;
  1222. auip->init.bf.ao = 0;
  1223. }
  1224. else if (sys_strcmp("aobf", alg) == 0) {
  1225. auip->astrat = ERTS_ALC_S_BESTFIT;
  1226. auip->init.bf.ao = 1;
  1227. }
  1228. else if (sys_strcmp("gf", alg) == 0) {
  1229. auip->astrat = ERTS_ALC_S_GOODFIT;
  1230. }
  1231. else if (sys_strcmp("af", alg) == 0) {
  1232. auip->astrat = ERTS_ALC_S_AFIT;
  1233. }
  1234. else if (sys_strcmp("aoff", alg) == 0) {
  1235. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1236. auip->init.aoff.crr_order = FF_AOFF;
  1237. auip->init.aoff.blk_order = FF_AOFF;
  1238. }
  1239. else if (sys_strcmp("aoffcbf", alg) == 0) {
  1240. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1241. auip->init.aoff.crr_order = FF_AOFF;
  1242. auip->init.aoff.blk_order = FF_BF;
  1243. }
  1244. else if (sys_strcmp("aoffcaobf", alg) == 0) {
  1245. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1246. auip->init.aoff.crr_order = FF_AOFF;
  1247. auip->init.aoff.blk_order = FF_AOBF;
  1248. }
  1249. else if (sys_strcmp("ageffcaoff", alg) == 0) {
  1250. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1251. auip->init.aoff.crr_order = FF_AGEFF;
  1252. auip->init.aoff.blk_order = FF_AOFF;
  1253. }
  1254. else if (sys_strcmp("ageffcbf", alg) == 0) {
  1255. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1256. auip->init.aoff.crr_order = FF_AGEFF;
  1257. auip->init.aoff.blk_order = FF_BF;
  1258. }
  1259. else if (sys_strcmp("ageffcaobf", alg) == 0) {
  1260. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1261. auip->init.aoff.crr_order = FF_AGEFF;
  1262. auip->init.aoff.blk_order = FF_AOBF;
  1263. }
  1264. else {
  1265. if (auip->init.util.alloc_no == ERTS_ALC_A_TEST
  1266. && sys_strcmp("chaosff", alg) == 0) {
  1267. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1268. auip->init.aoff.crr_order = FF_CHAOS;
  1269. auip->init.aoff.blk_order = FF_CHAOS;
  1270. }
  1271. else {
  1272. bad_value(param, sub_param + 1, alg);
  1273. }
  1274. }
  1275. if (!strategy_support_carrier_migration(auip))
  1276. auip->init.util.acul = 0;
  1277. } else if (has_prefix("atags", sub_param)) {
  1278. auip->init.util.atags = get_bool_value(sub_param + 5, argv, ip);
  1279. }
  1280. else
  1281. goto bad_switch;
  1282. break;
  1283. case 'e': {
  1284. int e = get_bool_value(sub_param + 1, argv, ip);
  1285. if (!auip->disable_allowed && !e) {
  1286. if (!u_switch)
  1287. bad_value(param, sub_param + 1, "false");
  1288. else
  1289. ASSERT(auip->enable); /* ignore */
  1290. }
  1291. else auip->enable = e;
  1292. break;
  1293. }
  1294. case 'l':
  1295. if (has_prefix("lmbcs", sub_param)) {
  1296. auip->default_.lmbcs = 0;
  1297. auip->init.util.lmbcs = get_kb_value(sub_param + 5, argv, ip);
  1298. }
  1299. else
  1300. goto bad_switch;
  1301. break;
  1302. case 'm':
  1303. if (has_prefix("mbcgs", sub_param)) {
  1304. auip->init.util.mbcgs = get_amount_value(sub_param + 5, argv, ip);
  1305. }
  1306. else if (has_prefix("mbsd", sub_param)) {
  1307. auip->init.gf.mbsd = get_amount_value(sub_param + 4, argv, ip);
  1308. if (auip->init.gf.mbsd < 1)
  1309. auip->init.gf.mbsd = 1;
  1310. }
  1311. else if (has_prefix("mmbcs", sub_param)) {
  1312. auip->default_.mmbcs = 0;
  1313. auip->init.util.mmbcs = get_kb_value(sub_param + 5, argv, ip);
  1314. }
  1315. else if (has_prefix("mmmbc", sub_param)) {
  1316. auip->default_.mmmbc = 0;
  1317. auip->init.util.mmmbc = get_amount_value(sub_param + 5, argv, ip);
  1318. }
  1319. else if (has_prefix("mmsbc", sub_param)) {
  1320. auip->init.util.mmsbc = get_amount_value(sub_param + 5, argv, ip);
  1321. }
  1322. else
  1323. goto bad_switch;
  1324. break;
  1325. case 'r':
  1326. if(has_prefix("rsbcmt", sub_param)) {
  1327. auip->init.util.rsbcmt = get_amount_value(sub_param + 6, argv, ip);
  1328. if (auip->init.util.rsbcmt > 100)
  1329. auip->init.util.rsbcmt = 100;
  1330. }
  1331. else if(has_prefix("rsbcst", sub_param)) {
  1332. auip->init.util.rsbcst = get_amount_value(sub_param + 6, argv, ip);
  1333. if (auip->init.util.rsbcst > 100)
  1334. auip->init.util.rsbcst = 100;
  1335. }
  1336. else if (has_prefix("rmbcmt", sub_param)) {
  1337. auip->init.util.rmbcmt = get_amount_value(sub_param + 6, argv, ip);
  1338. if (auip->init.util.rmbcmt > 100)
  1339. auip->init.util.rmbcmt = 100;
  1340. }
  1341. else if (has_prefix("ramv", sub_param)) {
  1342. auip->init.util.ramv = get_bool_value(sub_param + 4, argv, ip);
  1343. }
  1344. else
  1345. goto bad_switch;
  1346. break;
  1347. case 's':
  1348. if(has_prefix("sbct", sub_param)) {
  1349. auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
  1350. }
  1351. else if (has_prefix("smbcs", sub_param)) {
  1352. auip->default_.smbcs = 0;
  1353. auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip);
  1354. }
  1355. else
  1356. goto bad_switch;
  1357. break;
  1358. case 't': {
  1359. int res = get_bool_value(sub_param+1, argv, ip);
  1360. if (res > 0) {
  1361. if (!auip->thr_spec_allowed) {
  1362. if (!u_switch)
  1363. bad_value(param, sub_param + 1, "true");
  1364. else
  1365. ASSERT(!auip->thr_spec); /* ignore */
  1366. }
  1367. else
  1368. auip->thr_spec = 1;
  1369. break;
  1370. }
  1371. else if (res == 0) {
  1372. auip->thr_spec = 0;
  1373. auip->init.util.acul = 0;
  1374. break;
  1375. }
  1376. goto bad_switch;
  1377. }
  1378. default:
  1379. bad_switch:
  1380. bad_param(param, sub_param);
  1381. }
  1382. }
  1383. static void
  1384. handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
  1385. {
  1386. struct au_init *aui[] = {
  1387. &init->binary_alloc,
  1388. &init->std_alloc,
  1389. &init->ets_alloc,
  1390. &init->eheap_alloc,
  1391. &init->ll_alloc,
  1392. &init->driver_alloc,
  1393. &init->fix_alloc,
  1394. &init->sl_alloc
  1395. /* test_alloc not affected by +Mea??? or +Mu??? */
  1396. };
  1397. int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
  1398. char *arg;
  1399. char *rest;
  1400. int i, j;
  1401. i = 1;
  1402. ASSERT(argc && argv && init);
  1403. while (i < *argc) {
  1404. if(argv[i][0] == '-') {
  1405. char *param = argv[i]+1;
  1406. switch (argv[i][1]) {
  1407. case 'M':
  1408. switch (argv[i][2]) {
  1409. case 'B':
  1410. handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i, 0);
  1411. break;
  1412. case 'I':
  1413. if (has_prefix("scs", argv[i]+3)) {
  1414. #if HAVE_ERTS_MSEG
  1415. init->mseg.literal_mmap.scs =
  1416. #endif
  1417. get_mb_value(argv[i]+6, argv, &i);
  1418. }
  1419. else
  1420. handle_au_arg(&init->literal_alloc, &argv[i][3], argv, &i, 0);
  1421. break;
  1422. case 'X':
  1423. if (has_prefix("scs", argv[i]+3)) {
  1424. /* Ignore obsolete */
  1425. (void) get_mb_value(argv[i]+6, argv, &i);
  1426. }
  1427. else
  1428. handle_au_arg(&init->exec_alloc, &argv[i][3], argv, &i, 0);
  1429. break;
  1430. case 'D':
  1431. handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i, 0);
  1432. break;
  1433. case 'E':
  1434. handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i, 0);
  1435. break;
  1436. case 'F':
  1437. handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i, 0);
  1438. break;
  1439. case 'H':
  1440. handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i, 0);
  1441. break;
  1442. case 'L':
  1443. handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i, 0);
  1444. break;
  1445. case 'M':
  1446. if (has_prefix("amcbf", argv[i]+3)) {
  1447. #if HAVE_ERTS_MSEG
  1448. init->mseg.amcbf =
  1449. #endif
  1450. get_kb_value(argv[i]+8, argv, &i);
  1451. }
  1452. else if (has_prefix("rmcbf", argv[i]+3)) {
  1453. #if HAVE_ERTS_MSEG
  1454. init->mseg.rmcbf =
  1455. #endif
  1456. get_amount_value(argv[i]+8, argv, &i);
  1457. }
  1458. else if (has_prefix("mcs", argv[i]+3)) {
  1459. #if HAVE_ERTS_MSEG
  1460. init->mseg.mcs =
  1461. #endif
  1462. get_amount_value(argv[i]+6, argv, &i);
  1463. }
  1464. else if (has_prefix("scs", argv[i]+3)) {
  1465. #if HAVE_ERTS_MSEG
  1466. init->mseg.dflt_mmap.scs =
  1467. #endif
  1468. get_mb_value(argv[i]+6, argv, &i);
  1469. }
  1470. else if (has_prefix("sco", argv[i]+3)) {
  1471. #if HAVE_ERTS_MSEG
  1472. init->mseg.dflt_mmap.sco =
  1473. #endif
  1474. get_bool_value(argv[i]+6, argv, &i);
  1475. }
  1476. else if (has_prefix("scrpm", argv[i]+3)) {
  1477. #if HAVE_ERTS_MSEG
  1478. init->mseg.dflt_mmap.scrpm =
  1479. #endif
  1480. get_bool_value(argv[i]+8, argv, &i);
  1481. }
  1482. else if (has_prefix("scrfsd", argv[i]+3)) {
  1483. #if HAVE_ERTS_MSEG
  1484. init->mseg.dflt_mmap.scrfsd =
  1485. #endif
  1486. get_amount_value(argv[i]+9, argv, &i);
  1487. }
  1488. else {
  1489. bad_param(param, param+2);
  1490. }
  1491. break;
  1492. case 'R':
  1493. handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i, 0);
  1494. break;
  1495. case 'S':
  1496. handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i, 0);
  1497. break;
  1498. case 'T':
  1499. handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i, 0);
  1500. break;
  1501. case 'Z':
  1502. handle_au_arg(&init->test_alloc, &argv[i][3], argv, &i, 0);
  1503. break;
  1504. case 'Y': { /* sys_alloc */
  1505. if (has_prefix("tt", param+2)) {
  1506. /* set trim threshold */
  1507. arg = get_value(param+4, argv, &i);
  1508. errno = 0;
  1509. init->trim_threshold = (int) strtol(arg, &rest, 10);
  1510. if (errno != 0
  1511. || rest == arg
  1512. || init->trim_threshold < 0
  1513. || (INT_MAX/1024) < init->trim_threshold) {
  1514. bad_value(param, param+4, arg);
  1515. }
  1516. VERBOSE(DEBUG_SYSTEM,
  1517. ("using trim threshold: %d\n",
  1518. init->trim_threshold));
  1519. init->trim_threshold *= 1024;
  1520. }
  1521. else if (has_prefix("tp", param+2)) {
  1522. /* set top pad */
  1523. arg = get_value(param+4, argv, &i);
  1524. errno = 0;
  1525. init->top_pad = (int) strtol(arg, &rest, 10);
  1526. if (errno != 0
  1527. || rest == arg
  1528. || init->top_pad < 0
  1529. || (INT_MAX/1024) < init->top_pad) {
  1530. bad_value(param, param+4, arg);
  1531. }
  1532. VERBOSE(DEBUG_SYSTEM,
  1533. ("using top pad: %d\n",init->top_pad));
  1534. init->top_pad *= 1024;
  1535. }
  1536. else if (has_prefix("m", param+2)) {
  1537. /* Has been handled by erlexec */
  1538. (void) get_value(param+3, argv, &i);
  1539. }
  1540. else if (has_prefix("e", param+2)) {
  1541. arg = get_value(param+3, argv, &i);
  1542. if (sys_strcmp("true", arg) != 0)
  1543. bad_value(param, param+3, arg);
  1544. }
  1545. else
  1546. bad_param(param, param+2);
  1547. break;
  1548. }
  1549. case 'e':
  1550. switch (argv[i][3]) {
  1551. case 'a': {
  1552. int a;
  1553. arg = get_value(argv[i]+4, argv, &i);
  1554. if (sys_strcmp("min", arg) == 0) {
  1555. for (a = 0; a < aui_sz; a++)
  1556. aui[a]->enable = 0;
  1557. }
  1558. else if (sys_strcmp("max", arg) == 0) {
  1559. for (a = 0; a < aui_sz; a++)
  1560. aui[a]->enable = 1;
  1561. }
  1562. else if (sys_strcmp("config", arg) == 0) {
  1563. init->erts_alloc_config = 1;
  1564. }
  1565. else if (sys_strcmp("r9c", arg) == 0
  1566. || sys_strcmp("r10b", arg) == 0
  1567. || sys_strcmp("r11b", arg) == 0) {
  1568. set_default_sl_alloc_opts(&init->sl_alloc);
  1569. set_default_std_alloc_opts(&init->std_alloc);
  1570. set_default_ll_alloc_opts(&init->ll_alloc);
  1571. set_default_temp_alloc_opts(&init->temp_alloc);
  1572. set_default_eheap_alloc_opts(&init->eheap_alloc);
  1573. set_default_binary_alloc_opts(&init->binary_alloc);
  1574. set_default_ets_alloc_opts(&init->ets_alloc);
  1575. set_default_driver_alloc_opts(&init->driver_alloc);
  1576. set_default_driver_alloc_opts(&init->fix_alloc);
  1577. init->driver_alloc.enable = 0;
  1578. if (sys_strcmp("r9c", arg) == 0) {
  1579. init->sl_alloc.enable = 0;
  1580. init->std_alloc.enable = 0;
  1581. init->binary_alloc.enable = 0;
  1582. init->ets_alloc.enable = 0;
  1583. }
  1584. for (a = 0; a < aui_sz; a++) {
  1585. aui[a]->thr_spec = 0;
  1586. aui[a]->init.util.acul = 0;
  1587. aui[a]->init.util.ramv = 0;
  1588. aui[a]->init.util.lmbcs = 5*1024*1024;
  1589. }
  1590. }
  1591. else {
  1592. bad_param(param, param+3);
  1593. }
  1594. break;
  1595. }
  1596. default:
  1597. bad_param(param, param+1);
  1598. }
  1599. break;
  1600. case 'i':
  1601. switch (argv[i][3]) {
  1602. case 't':
  1603. init->instr.mtrace = get_value(argv[i]+4, argv, &i);
  1604. break;
  1605. default:
  1606. bad_param(param, param+2);
  1607. }
  1608. break;
  1609. case 'l':
  1610. if (has_prefix("pm", param+2)) {
  1611. arg = get_value(argv[i]+5, argv, &i);
  1612. if (sys_strcmp("all", arg) == 0)
  1613. lock_all_physical_memory = 1;
  1614. else if (sys_strcmp("no", arg) == 0)
  1615. lock_all_physical_memory = 0;
  1616. else
  1617. bad_value(param, param+4, arg);
  1618. break;
  1619. }
  1620. bad_param(param, param+2);
  1621. break;
  1622. case 'u':
  1623. if (has_prefix("ycs", argv[i]+3)) {
  1624. init->alloc_util.ycs
  1625. = get_kb_value(argv[i]+6, argv, &i);
  1626. }
  1627. else if (has_prefix("mmc", argv[i]+3)) {
  1628. init->alloc_util.mmc
  1629. = get_amount_value(argv[i]+6, argv, &i);
  1630. }
  1631. else if (has_prefix("sac", argv[i]+3)) {
  1632. init->alloc_util.sac
  1633. = get_bool_value(argv[i]+6, argv, &i);
  1634. }
  1635. else {
  1636. int a;
  1637. int start = i;
  1638. char *param = argv[i];
  1639. char *val = i+1 < *argc ? argv[i+1] : NULL;
  1640. for (a = 0; a < aui_sz; a++) {
  1641. if (a > 0) {
  1642. ASSERT(i == start || i == start+1);
  1643. argv[start] = param;
  1644. if (i != start)
  1645. argv[start + 1] = val;
  1646. i = start;
  1647. }
  1648. handle_au_arg(aui[a], &argv[i][3], argv, &i, 1);
  1649. }
  1650. }
  1651. break;
  1652. default:
  1653. bad_param(param, param+1);
  1654. }
  1655. break;
  1656. case '-':
  1657. if (argv[i][2] == '\0') {
  1658. /* End of system flags reached */
  1659. if (init->instr.mtrace) {
  1660. while (i < *argc) {
  1661. if(sys_strcmp(argv[i], "-sname") == 0
  1662. || sys_strcmp(argv[i], "-name") == 0) {
  1663. if (i + 1 <*argc) {
  1664. init->instr.nodename = argv[i+1];
  1665. break;
  1666. }
  1667. }
  1668. i++;
  1669. }
  1670. }
  1671. goto args_parsed;
  1672. }
  1673. break;
  1674. default:
  1675. break;
  1676. }
  1677. }
  1678. i++;
  1679. }
  1680. args_parsed:
  1681. /* Handled arguments have been marked with NULL. Slide arguments
  1682. not handled towards the beginning of argv. */
  1683. for (i = 0, j = 0; i < *argc; i++) {
  1684. if (argv[i])
  1685. argv[j++] = argv[i];
  1686. }
  1687. *argc = j;
  1688. }
  1689. static char *type_no_str(ErtsAlcType_t n)
  1690. {
  1691. #if ERTS_ALC_N_MIN != 0
  1692. if (n < ERTS_ALC_N_MIN)
  1693. return NULL;
  1694. #endif
  1695. if (n > ERTS_ALC_N_MAX)
  1696. return NULL;
  1697. return (char *) ERTS_ALC_N2TD(n);
  1698. }
  1699. #define type_str(T) type_no_str(ERTS_ALC_T2N((T)))
  1700. void
  1701. erts_alloc_register_scheduler(void *vesdp)
  1702. {
  1703. ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
  1704. int ix = (int) esdp->no;
  1705. int aix;
  1706. ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
  1707. for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
  1708. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
  1709. esdp->alloc_data.deallctr[aix] = NULL;
  1710. esdp->alloc_data.pref_ix[aix] = -1;
  1711. if (tspec->enabled) {
  1712. if (!tspec->dd)
  1713. esdp->alloc_data.pref_ix[aix] = ix;
  1714. else {
  1715. Allctr_t *allctr = tspec->allctr[ix];
  1716. ASSERT(allctr);
  1717. esdp->alloc_data.deallctr[aix] = allctr;
  1718. esdp->alloc_data.pref_ix[aix] = ix;
  1719. }
  1720. }
  1721. }
  1722. }
  1723. void
  1724. erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
  1725. int *need_thr_progress,
  1726. ErtsThrPrgrVal *thr_prgr_p,
  1727. int *more_work)
  1728. {
  1729. ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
  1730. int aix;
  1731. for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
  1732. Allctr_t *allctr;
  1733. if (esdp)
  1734. allctr = esdp->alloc_data.deallctr[aix];
  1735. else {
  1736. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
  1737. if (tspec->enabled && tspec->dd)
  1738. allctr = tspec->allctr[0];
  1739. else
  1740. allctr = NULL;
  1741. }
  1742. if (allctr) {
  1743. erts_alcu_check_delayed_dealloc(allctr,
  1744. 1,
  1745. need_thr_progress,
  1746. thr_prgr_p,
  1747. more_work);
  1748. }
  1749. }
  1750. }
  1751. erts_aint32_t
  1752. erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
  1753. {
  1754. ErtsAllocatorThrSpec_t *tspec;
  1755. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  1756. if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
  1757. return erts_alcu_fix_alloc_shrink(tspec->allctr[ix], flgs);
  1758. if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
  1759. return erts_alcu_fix_alloc_shrink(
  1760. erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
  1761. return 0;
  1762. }
  1763. static void
  1764. no_verify(Allctr_t *allctr)
  1765. {
  1766. }
  1767. erts_alloc_verify_func_t
  1768. erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr)
  1769. {
  1770. if (erts_allctrs_info[ERTS_ALC_A_TEMPORARY].alloc_util
  1771. && erts_allctrs_info[ERTS_ALC_A_TEMPORARY].thr_spec) {
  1772. ErtsAllocatorThrSpec_t *tspec;
  1773. int ix = ERTS_ALC_GET_THR_IX();
  1774. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_TEMPORARY];
  1775. if (ix < tspec->size) {
  1776. *allctr = tspec->allctr[ix];
  1777. return erts_alcu_verify_unused;
  1778. }
  1779. }
  1780. *allctr = NULL;
  1781. return no_verify;
  1782. }
  1783. __decl_noreturn void
  1784. erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
  1785. {
  1786. char buf[10];
  1787. char *t_str;
  1788. char *allctr_str;
  1789. ASSERT(n >= ERTS_ALC_N_MIN);
  1790. ASSERT(n <= ERTS_ALC_N_MAX);
  1791. if (n < ERTS_ALC_N_MIN || ERTS_ALC_N_MAX < n)
  1792. allctr_str = "UNKNOWN";
  1793. else {
  1794. ErtsAlcType_t a = ERTS_ALC_T2A(ERTS_ALC_N2T(n));
  1795. if (erts_allctrs_info[a].enabled)
  1796. allctr_str = (char *) ERTS_ALC_A2AD(a);
  1797. else
  1798. allctr_str = (char *) ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
  1799. }
  1800. t_str = type_no_str(n);
  1801. if (!t_str) {
  1802. erts_snprintf(buf, sizeof(buf), "%d", (int) n);
  1803. t_str = buf;
  1804. }
  1805. switch (error) {
  1806. case ERTS_ALC_E_NOTSUP: {
  1807. char *op_str;
  1808. switch (func) {
  1809. case ERTS_ALC_O_ALLOC: op_str = "alloc"; break;
  1810. case ERTS_ALC_O_REALLOC: op_str = "realloc"; break;
  1811. case ERTS_ALC_O_FREE: op_str = "free"; break;
  1812. default: op_str = "UNKNOWN"; break;
  1813. }
  1814. erts_exit(ERTS_ABORT_EXIT,
  1815. "%s: %s operation not supported (memory type: \"%s\")\n",
  1816. allctr_str, op_str, t_str);
  1817. break;
  1818. }
  1819. case ERTS_ALC_E_NOMEM: {
  1820. Uint size;
  1821. va_list argp;
  1822. char *op = func == ERTS_ALC_O_REALLOC ? "reallocate" : "allocate";
  1823. va_start(argp, n);
  1824. size = va_arg(argp, Uint);
  1825. va_end(argp);
  1826. erts_exit(ERTS_DUMP_EXIT,
  1827. "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
  1828. allctr_str, op, size, t_str);
  1829. break;
  1830. }
  1831. case ERTS_ALC_E_NOALLCTR:
  1832. erts_exit(ERTS_ABORT_EXIT,
  1833. "erts_alloc: Unknown allocator type: %d\n",
  1834. ERTS_ALC_T2A(ERTS_ALC_N2T(n)));
  1835. break;
  1836. default:
  1837. erts_exit(ERTS_ABORT_EXIT, "erts_alloc: Unknown error: %d\n", error);
  1838. break;
  1839. }
  1840. }
  1841. __decl_noreturn void
  1842. erts_alloc_enomem(ErtsAlcType_t type, Uint size)
  1843. {
  1844. erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
  1845. }
  1846. __decl_noreturn void
  1847. erts_alloc_n_enomem(ErtsAlcType_t n, Uint size)
  1848. {
  1849. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_ALLOC, n, size);
  1850. }
  1851. __decl_noreturn void
  1852. erts_realloc_enomem(ErtsAlcType_t type, void *ptr, Uint size)
  1853. {
  1854. erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
  1855. }
  1856. __decl_noreturn void
  1857. erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
  1858. {
  1859. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_REALLOC, n, size);
  1860. }
  1861. static ERTS_INLINE UWord
  1862. alcu_size(ErtsAlcType_t alloc_no, ErtsAlcUFixInfo_t *fi, int fisz)
  1863. {
  1864. UWord res;
  1865. int ai;
  1866. if (!erts_allctrs_info[alloc_no].thr_spec) {
  1867. AllctrSize_t size;
  1868. Allctr_t *allctr;
  1869. allctr = erts_allctrs_info[alloc_no].extra;
  1870. erts_alcu_current_size(allctr, &size, fi, fisz);
  1871. return size.blocks;
  1872. }
  1873. res = 0;
  1874. /* Thread-specific allocators can migrate carriers across types, so we have
  1875. * to visit every allocator type to gather information on blocks that were
  1876. * allocated by us. */
  1877. for (ai = ERTS_ALC_A_MIN; ai < ERTS_ALC_A_MAX; ai++) {
  1878. ErtsAllocatorThrSpec_t *tspec;
  1879. Allctr_t *allctr;
  1880. int i;
  1881. if (!erts_allctrs_info[ai].thr_spec) {
  1882. continue;
  1883. }
  1884. tspec = &erts_allctr_thr_spec[ai];
  1885. ASSERT(tspec->enabled);
  1886. for (i = tspec->size - 1; i >= 0; i--) {
  1887. allctr = tspec->allctr[i];
  1888. if (allctr) {
  1889. AllctrSize_t size;
  1890. if (ai == alloc_no) {
  1891. erts_alcu_current_size(allctr, &size, fi, fisz);
  1892. } else {
  1893. erts_alcu_foreign_size(allctr, alloc_no, &size);
  1894. }
  1895. ASSERT(((SWord)size.blocks) >= 0);
  1896. res += size.blocks;
  1897. }
  1898. }
  1899. }
  1900. return res;
  1901. }
  1902. static ERTS_INLINE void
  1903. add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
  1904. {
  1905. int ix = ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE;
  1906. ASSERT(0 <= ix && ix < ERTS_ALC_NO_FIXED_SIZES);
  1907. *ap += (UWord) fi[ix].allocated;
  1908. *up += (UWord) fi[ix].used;
  1909. }
  1910. Eterm
  1911. erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
  1912. {
  1913. /*
  1914. * NOTE! When updating this function, make sure to also update
  1915. * erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl
  1916. */
  1917. #define ERTS_MEM_NEED_ALL_ALCU (want_tot_or_sys)
  1918. struct {
  1919. int total;
  1920. int processes;
  1921. int processes_used;
  1922. int system;
  1923. int atom;
  1924. int atom_used;
  1925. int binary;
  1926. int code;
  1927. int ets;
  1928. } want = {0};
  1929. struct {
  1930. UWord total;
  1931. UWord processes;
  1932. UWord processes_used;
  1933. UWord system;
  1934. UWord atom;
  1935. UWord atom_used;
  1936. UWord binary;
  1937. UWord code;
  1938. UWord ets;
  1939. } size = {0};
  1940. Eterm atoms[sizeof(size)/sizeof(UWord)];
  1941. UWord *uintps[sizeof(size)/sizeof(UWord)];
  1942. Eterm euints[sizeof(size)/sizeof(UWord)];
  1943. int want_tot_or_sys;
  1944. int length;
  1945. Eterm res = THE_NON_VALUE;
  1946. ErtsAlcType_t ai;
  1947. int only_one_value = 0;
  1948. ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}};
  1949. ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
  1950. /* Figure out whats wanted... */
  1951. length = 0;
  1952. if (is_non_value(earg)) { /* i.e. wants all */
  1953. want.total = 1;
  1954. atoms[length] = am_total;
  1955. uintps[length++] = &size.total;
  1956. want.processes = 1;
  1957. atoms[length] = am_processes;
  1958. uintps[length++] = &size.processes;
  1959. want.processes_used = 1;
  1960. atoms[length] = am_processes_used;
  1961. uintps[length++] = &size.processes_used;
  1962. want.system = 1;
  1963. atoms[length] = am_system;
  1964. uintps[length++] = &size.system;
  1965. want.atom = 1;
  1966. atoms[length] = am_atom;
  1967. uintps[length++] = &size.atom;
  1968. want.atom_used = 1;
  1969. atoms[length] = am_atom_used;
  1970. uintps[length++] = &size.atom_used;
  1971. want.binary = 1;
  1972. atoms[length] = am_binary;
  1973. uintps[length++] = &size.binary;
  1974. want.code = 1;
  1975. atoms[length] = am_code;
  1976. uintps[length++] = &size.code;
  1977. want.ets = 1;
  1978. atoms[length] = am_ets;
  1979. uintps[length++] = &size.ets;
  1980. }
  1981. else {
  1982. DeclareTmpHeapNoproc(tmp_heap,2);
  1983. Eterm wanted_list;
  1984. if (is_nil(earg))
  1985. return NIL;
  1986. UseTmpHeapNoproc(2);
  1987. if (is_not_atom(earg))
  1988. wanted_list = earg;
  1989. else {
  1990. wanted_list = CONS(&tmp_heap[0], earg, NIL);
  1991. only_one_value = 1;
  1992. }
  1993. while (is_list(wanted_list)) {
  1994. switch (CAR(list_val(wanted_list))) {
  1995. case am_total:
  1996. if (!want.total) {
  1997. want.total = 1;
  1998. atoms[length] = am_total;
  1999. uintps[length++] = &size.total;
  2000. }
  2001. break;
  2002. case am_processes:
  2003. if (!want.processes) {
  2004. want.processes = 1;
  2005. atoms[length] = am_processes;
  2006. uintps[length++] = &size.processes;
  2007. }
  2008. break;
  2009. case am_processes_used:
  2010. if (!want.processes_used) {
  2011. want.processes_used = 1;
  2012. atoms[length] = am_processes_used;
  2013. uintps[length++] = &size.processes_used;
  2014. }
  2015. break;
  2016. case am_system:
  2017. if (!want.system) {
  2018. want.system = 1;
  2019. atoms[length] = am_system;
  2020. uintps[length++] = &size.system;
  2021. }
  2022. break;
  2023. case am_atom:
  2024. if (!want.atom) {
  2025. want.atom = 1;
  2026. atoms[length] = am_atom;
  2027. uintps[length++] = &size.atom;
  2028. }
  2029. break;
  2030. case am_atom_used:
  2031. if (!want.atom_used) {
  2032. want.atom_used = 1;
  2033. atoms[length] = am_atom_used;
  2034. uintps[length++] = &size.atom_used;
  2035. }
  2036. break;
  2037. case am_binary:
  2038. if (!want.binary) {
  2039. want.binary = 1;
  2040. atoms[length] = am_binary;
  2041. uintps[length++] = &size.binary;
  2042. }
  2043. break;
  2044. case am_code:
  2045. if (!want.code) {
  2046. want.code = 1;
  2047. atoms[length] = am_code;
  2048. uintps[length++] = &size.code;
  2049. }
  2050. break;
  2051. case am_ets:
  2052. if (!want.ets) {
  2053. want.ets = 1;
  2054. atoms[length] = am_ets;
  2055. uintps[length++] = &size.ets;
  2056. }
  2057. break;
  2058. default:
  2059. UnUseTmpHeapNoproc(2);
  2060. return am_badarg;
  2061. }
  2062. wanted_list = CDR(list_val(wanted_list));
  2063. }
  2064. UnUseTmpHeapNoproc(2);
  2065. if (is_not_nil(wanted_list))
  2066. return am_badarg;
  2067. }
  2068. /* All alloc_util allocators *have* to be enabled, except test_alloc */
  2069. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  2070. switch (ai) {
  2071. case ERTS_ALC_A_SYSTEM:
  2072. case ERTS_ALC_A_TEST:
  2073. break;
  2074. default:
  2075. if (!erts_allctrs_info[ai].enabled
  2076. || !erts_allctrs_info[ai].alloc_util) {
  2077. return am_notsup;
  2078. }
  2079. break;
  2080. }
  2081. }
  2082. ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
  2083. ASSERT(length <= sizeof(euints)/sizeof(Eterm));
  2084. ASSERT(length <= sizeof(uintps)/sizeof(UWord));
  2085. if (proc) {
  2086. ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  2087. == erts_proc_lc_my_proc_locks(proc));
  2088. /* We'll need locks early in the lock order */
  2089. erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  2090. }
  2091. /* Calculate values needed... */
  2092. want_tot_or_sys = want.total || want.system;
  2093. if (ERTS_MEM_NEED_ALL_ALCU) {
  2094. size.total = 0;
  2095. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  2096. if (erts_allctrs_info[ai].alloc_util) {
  2097. UWord *save;
  2098. UWord asz;
  2099. switch (ai) {
  2100. case ERTS_ALC_A_TEMPORARY:
  2101. /*
  2102. * Often not thread safe and usually never
  2103. * contain any allocated memory.
  2104. */
  2105. continue;
  2106. case ERTS_ALC_A_TEST:
  2107. continue;
  2108. case ERTS_ALC_A_EHEAP:
  2109. save = &size.processes;
  2110. break;
  2111. case ERTS_ALC_A_ETS:
  2112. save = &size.ets;
  2113. break;
  2114. case ERTS_ALC_A_BINARY:
  2115. save = &size.binary;
  2116. break;
  2117. case ERTS_ALC_A_FIXED_SIZE:
  2118. asz = alcu_size(ai, fi, ERTS_ALC_NO_FIXED_SIZES);
  2119. size.total += asz;
  2120. continue;
  2121. default:
  2122. save = NULL;
  2123. break;
  2124. }
  2125. asz = alcu_size(ai, NULL, 0);
  2126. if (save)
  2127. *save = asz;
  2128. size.total += asz;
  2129. }
  2130. }
  2131. }
  2132. if (want_tot_or_sys || want.processes || want.processes_used) {
  2133. UWord tmp;
  2134. if (ERTS_MEM_NEED_ALL_ALCU)
  2135. tmp = size.processes;
  2136. else {
  2137. alcu_size(ERTS_ALC_A_FIXED_SIZE,
  2138. fi, ERTS_ALC_NO_FIXED_SIZES);
  2139. tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
  2140. }
  2141. tmp += erts_ptab_mem_size(&erts_proc);
  2142. tmp += erts_bif_timer_memory_size();
  2143. size.processes = size.processes_used = tmp;
  2144. add_fix_values(&size.processes,
  2145. &size.processes_used,
  2146. fi,
  2147. ERTS_ALC_T_PROC);
  2148. add_fix_values(&size.processes,
  2149. &size.processes_used,
  2150. fi,
  2151. ERTS_ALC_T_MONITOR);
  2152. add_fix_values(&size.processes,
  2153. &size.processes_used,
  2154. fi,
  2155. ERTS_ALC_T_LINK);
  2156. add_fix_values(&size.processes,
  2157. &size.processes_used,
  2158. fi,
  2159. ERTS_ALC_T_MSG_REF);
  2160. add_fix_values(&size.processes,
  2161. &size.processes_used,
  2162. fi,
  2163. ERTS_ALC_T_LL_PTIMER);
  2164. add_fix_values(&size.processes,
  2165. &size.processes_used,
  2166. fi,
  2167. ERTS_ALC_T_HL_PTIMER);
  2168. add_fix_values(&size.processes,
  2169. &size.processes_used,
  2170. fi,
  2171. ERTS_ALC_T_BIF_TIMER);
  2172. }
  2173. if (want.atom || want.atom_used) {
  2174. Uint reserved_atom_space, atom_space;
  2175. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  2176. size.atom = size.atom_used = atom_table_sz();
  2177. if (want.atom)
  2178. size.atom += reserved_atom_space;
  2179. if (want.atom_used)
  2180. size.atom_used += atom_space;
  2181. }
  2182. if (!ERTS_MEM_NEED_ALL_ALCU && want.binary)
  2183. size.binary = alcu_size(ERTS_ALC_A_BINARY, NULL, 0);
  2184. if (want.code) {
  2185. size.code = module_table_sz();
  2186. size.code += export_table_sz();
  2187. size.code += export_entries_sz();
  2188. size.code += erts_fun_table_sz();
  2189. size.code += erts_ranges_sz();
  2190. size.code += erts_total_code_size;
  2191. }
  2192. if (want.ets) {
  2193. if (!ERTS_MEM_NEED_ALL_ALCU)
  2194. size.ets = alcu_size(ERTS_ALC_A_ETS, NULL, 0);
  2195. size.ets += erts_get_ets_misc_mem_size();
  2196. }
  2197. if (want_tot_or_sys) {
  2198. ASSERT(size.total >= size.processes);
  2199. size.system = size.total - size.processes;
  2200. }
  2201. if (print_to_p) {
  2202. int i;
  2203. fmtfn_t to = *print_to_p;
  2204. void *arg = print_to_arg;
  2205. /* Print result... */
  2206. erts_print(to, arg, "=memory\n");
  2207. for (i = 0; i < length; i++)
  2208. erts_print(to, arg, "%T: %bpu\n", atoms[i], *uintps[i]);
  2209. }
  2210. if (proc) {
  2211. /* Build erlang term result... */
  2212. Uint *hp;
  2213. Uint hsz;
  2214. erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2215. if (only_one_value) {
  2216. ASSERT(length == 1);
  2217. hsz = 0;
  2218. erts_bld_uword(NULL, &hsz, *uintps[0]);
  2219. hp = hsz ? HAlloc((Process *) proc, hsz) : NULL;
  2220. res = erts_bld_uword(&hp, NULL, *uintps[0]);
  2221. }
  2222. else {
  2223. Uint **hpp = NULL;
  2224. Uint *hszp = &hsz;
  2225. hsz = 0;
  2226. while (1) {
  2227. int i;
  2228. for (i = 0; i < length; i++)
  2229. euints[i] = erts_bld_uword(hpp, hszp, *uintps[i]);
  2230. res = erts_bld_2tup_list(hpp, hszp, length, atoms, euints);
  2231. if (hpp)
  2232. break;
  2233. hp = HAlloc((Process *) proc, hsz);
  2234. hpp = &hp;
  2235. hszp = NULL;
  2236. }
  2237. }
  2238. }
  2239. return res;
  2240. #undef ERTS_MEM_NEED_ALL_ALCU
  2241. }
  2242. struct aa_values {
  2243. Uint arity;
  2244. const char *name;
  2245. Uint ui[2];
  2246. };
  2247. Eterm
  2248. erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc)
  2249. {
  2250. #define MAX_AA_VALUES (24)
  2251. struct aa_values values[MAX_AA_VALUES];
  2252. Eterm res = THE_NON_VALUE;
  2253. int i, length;
  2254. Uint reserved_atom_space, atom_space;
  2255. if (proc) {
  2256. ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  2257. == erts_proc_lc_my_proc_locks(proc));
  2258. /* We'll need locks early in the lock order */
  2259. erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  2260. }
  2261. i = 0;
  2262. values[i].arity = 2;
  2263. values[i].name = "sys_misc";
  2264. values[i].ui[0] = erts_sys_misc_mem_sz();
  2265. i++;
  2266. values[i].arity = 2;
  2267. values[i].name = "static";
  2268. values[i].ui[0] =
  2269. sizeof(ErtsPTab)*2 /* proc & port tables */
  2270. + erts_timer_wheel_memory_size(); /* Timer wheel */
  2271. i++;
  2272. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  2273. values[i].arity = 3;
  2274. values[i].name = "atom_space";
  2275. values[i].ui[0] = reserved_atom_space;
  2276. values[i].ui[1] = atom_space;
  2277. i++;
  2278. values[i].arity = 2;
  2279. values[i].name = "atom_table";
  2280. values[i].ui[0] = atom_table_sz();
  2281. i++;
  2282. values[i].arity = 2;
  2283. values[i].name = "module_table";
  2284. values[i].ui[0] = module_table_sz();
  2285. i++;
  2286. values[i].arity = 2;
  2287. values[i].name = "export_table";
  2288. values[i].ui[0] = export_table_sz();
  2289. i++;
  2290. values[i].arity = 2;
  2291. values[i].name = "export_list";
  2292. values[i].ui[0] = export_entries_sz();
  2293. i++;
  2294. values[i].arity = 2;
  2295. values[i].name = "register_table";
  2296. values[i].ui[0] = process_reg_sz();
  2297. i++;
  2298. values[i].arity = 2;
  2299. values[i].name = "fun_table";
  2300. values[i].ui[0] = erts_fun_table_sz();
  2301. i++;
  2302. values[i].arity = 2;
  2303. values[i].name = "module_refs";
  2304. values[i].ui[0] = erts_ranges_sz();
  2305. i++;
  2306. values[i].arity = 2;
  2307. values[i].name = "loaded_code";
  2308. values[i].ui[0] = erts_total_code_size;
  2309. i++;
  2310. values[i].arity = 2;
  2311. values[i].name = "dist_table";
  2312. values[i].ui[0] = erts_dist_table_size();
  2313. i++;
  2314. values[i].arity = 2;
  2315. values[i].name = "node_table";
  2316. values[i].ui[0] = erts_node_table_size();
  2317. i++;
  2318. values[i].arity = 2;
  2319. values[i].name = "bits_bufs_size";
  2320. values[i].ui[0] = erts_bits_bufs_size();
  2321. i++;
  2322. values[i].arity = 2;
  2323. values[i].name = "bif_timer";
  2324. values[i].ui[0] = erts_bif_timer_memory_size();
  2325. i++;
  2326. values[i].arity = 2;
  2327. values[i].name = "process_table";
  2328. values[i].ui[0] = erts_ptab_mem_size(&erts_proc);
  2329. i++;
  2330. values[i].arity = 2;
  2331. values[i].name = "port_table";
  2332. values[i].ui[0] = erts_ptab_mem_size(&erts_port);
  2333. i++;
  2334. values[i].arity = 2;
  2335. values[i].name = "ets_misc";
  2336. values[i].ui[0] = erts_get_ets_misc_mem_size();
  2337. i++;
  2338. length = i;
  2339. ASSERT(length <= MAX_AA_VALUES);
  2340. if (print_to_p) {
  2341. /* Print result... */
  2342. fmtfn_t to = *print_to_p;
  2343. void *arg = print_to_arg;
  2344. erts_print(to, arg, "=allocated_areas\n");
  2345. for (i = 0; i < length; i++) {
  2346. switch (values[i].arity) {
  2347. case 2:
  2348. erts_print(to, arg, "%s: %beu\n",
  2349. values[i].name, values[i].ui[0]);
  2350. break;
  2351. case 3:
  2352. erts_print(to, arg, "%s: %beu %beu\n",
  2353. values[i].name, values[i].ui[0], values[i].ui[1]);
  2354. break;
  2355. default:
  2356. erts_print(to, arg, "ERROR: internal_error\n");
  2357. ASSERT(0);
  2358. return am_internal_error;
  2359. }
  2360. }
  2361. }
  2362. if (proc) {
  2363. /* Build erlang term result... */
  2364. Eterm tuples[MAX_AA_VALUES];
  2365. Uint *hp;
  2366. Uint **hpp;
  2367. Uint hsz;
  2368. Uint *hszp;
  2369. erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2370. hpp = NULL;
  2371. hsz = 0;
  2372. hszp = &hsz;
  2373. while (1) {
  2374. int i;
  2375. for (i = 0; i < length; i++) {
  2376. Eterm atom;
  2377. if (hpp)
  2378. atom = am_atom_put(values[i].name,
  2379. (int) sys_strlen(values[i].name));
  2380. else
  2381. atom = am_true;
  2382. switch (values[i].arity) {
  2383. case 2:
  2384. tuples[i] = erts_bld_tuple(hpp, hszp, 2,
  2385. atom,
  2386. erts_bld_uint(hpp, hszp,
  2387. values[i].ui[0]));
  2388. break;
  2389. case 3:
  2390. tuples[i] = erts_bld_tuple(hpp, hszp, 3,
  2391. atom,
  2392. erts_bld_uint(hpp, hszp,
  2393. values[i].ui[0]),
  2394. erts_bld_uint(hpp, hszp,
  2395. values[i].ui[1]));
  2396. break;
  2397. default:
  2398. ASSERT(0);
  2399. return am_internal_error;
  2400. }
  2401. }
  2402. res = erts_bld_list(hpp, hszp, length, tuples);
  2403. if (hpp)
  2404. break;
  2405. hp = HAlloc((Process *) proc, hsz);
  2406. hpp = &hp;
  2407. hszp = NULL;
  2408. }
  2409. }
  2410. return res;
  2411. #undef MAX_AA_VALUES
  2412. }
  2413. Eterm
  2414. erts_alloc_util_allocators(void *proc)
  2415. {
  2416. Eterm res;
  2417. Uint *hp;
  2418. Uint sz;
  2419. int i;
  2420. /*
  2421. * Currently all allocators except sys_alloc are
  2422. * alloc_util allocators.
  2423. * Also hide test_alloc which is disabled by default
  2424. * and only intended for our own testing.
  2425. */
  2426. sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
  2427. ASSERT(sz > 0);
  2428. hp = HAlloc((Process *) proc, sz);
  2429. res = NIL;
  2430. for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
  2431. switch (i) {
  2432. case ERTS_ALC_A_SYSTEM:
  2433. case ERTS_ALC_A_TEST:
  2434. break;
  2435. default: {
  2436. char *alc_str = (char *) ERTS_ALC_A2AD(i);
  2437. Eterm alc = am_atom_put(alc_str, sys_strlen(alc_str));
  2438. res = CONS(hp, alc, res);
  2439. hp += 2;
  2440. break;
  2441. }
  2442. }
  2443. }
  2444. return res;
  2445. }
  2446. void
  2447. erts_allocator_info(fmtfn_t to, void *arg)
  2448. {
  2449. ErtsAlcType_t a;
  2450. ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
  2451. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2452. int ai;
  2453. for (ai = 0; ai == 0 || ai < erts_allctrs_info[a].thr_spec; ai++) {
  2454. if (erts_allctrs_info[a].thr_spec) {
  2455. if (!erts_allctr_thr_spec[a].allctr[ai])
  2456. continue;
  2457. erts_print(to, arg, "=allocator:%s[%d]\n",
  2458. ERTS_ALC_A2AD(a), ai);
  2459. }
  2460. else {
  2461. erts_print(to, arg, "=allocator:%s\n", ERTS_ALC_A2AD(a));
  2462. }
  2463. if (!erts_allctrs_info[a].enabled)
  2464. erts_print(to, arg, "option e: false\n");
  2465. else {
  2466. if (erts_allctrs_info[a].alloc_util) {
  2467. void *as;
  2468. if (!erts_allctrs_info[a].thr_spec)
  2469. as = erts_allctrs_info[a].extra;
  2470. else {
  2471. ASSERT(erts_allctr_thr_spec[a].enabled);
  2472. as = erts_allctr_thr_spec[a].allctr[ai];
  2473. }
  2474. /* Binary alloc has its own thread safety... */
  2475. erts_alcu_info(as, 0, 0, &to, arg, NULL, NULL);
  2476. }
  2477. else {
  2478. switch (a) {
  2479. case ERTS_ALC_A_SYSTEM: {
  2480. SysAllocStat sas;
  2481. erts_print(to, arg, "option e: true\n");
  2482. erts_print(to, arg, "option m: libc\n");
  2483. sys_alloc_stat(&sas);
  2484. if(sas.trim_threshold >= 0)
  2485. erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
  2486. if(sas.top_pad >= 0)
  2487. erts_print(to, arg, "option tp: %d\n", sas.top_pad);
  2488. break;
  2489. }
  2490. default:
  2491. ASSERT(0);
  2492. break;
  2493. }
  2494. }
  2495. }
  2496. }
  2497. }
  2498. #if HAVE_ERTS_MSEG
  2499. {
  2500. struct erts_mmap_info_struct emis;
  2501. int max = (int) erts_no_schedulers;
  2502. int i;
  2503. for (i = 0; i <= max; i++) {
  2504. erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
  2505. erts_mseg_info(i, &to, arg, 0, 0, NULL, NULL);
  2506. }
  2507. erts_print(to, arg, "=allocator:erts_mmap.default_mmap\n");
  2508. erts_mmap_info(&erts_dflt_mmapper, &to, arg, NULL, NULL, &emis);
  2509. #if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2510. erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
  2511. erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
  2512. #endif
  2513. }
  2514. #endif
  2515. erts_print(to, arg, "=allocator:alloc_util\n");
  2516. erts_alcu_au_info_options(&to, arg, NULL, NULL);
  2517. erts_print(to, arg, "=allocator:instr\n");
  2518. erts_print(to, arg, "option t: %s\n",
  2519. erts_mtrace_enabled ? "true" : "false");
  2520. }
  2521. Eterm
  2522. erts_allocator_options(void *proc)
  2523. {
  2524. #if HAVE_ERTS_MSEG
  2525. int use_mseg = 0;
  2526. #endif
  2527. Uint sz, *szp, *hp, **hpp;
  2528. Eterm res, features, settings;
  2529. Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
  2530. Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
  2531. int a, length;
  2532. SysAllocStat sas;
  2533. Uint *endp = NULL;
  2534. sys_alloc_stat(&sas);
  2535. /* First find out the heap size needed ... */
  2536. hpp = NULL;
  2537. szp = &sz;
  2538. sz = 0;
  2539. bld_term:
  2540. length = 0;
  2541. features = NIL;
  2542. settings = NIL;
  2543. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2544. Eterm tmp = NIL;
  2545. atoms[length] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2546. sys_strlen(ERTS_ALC_A2AD(a)));
  2547. if (erts_allctrs_info[a].enabled) {
  2548. if (erts_allctrs_info[a].alloc_util) {
  2549. Allctr_t *allctr;
  2550. #if HAVE_ERTS_MSEG
  2551. use_mseg++;
  2552. #endif
  2553. if (erts_allctr_thr_spec[a].enabled)
  2554. allctr = erts_allctr_thr_spec[a].allctr[0];
  2555. else
  2556. allctr = erts_allctrs_info[a].extra;
  2557. tmp = erts_alcu_info_options(allctr, NULL, NULL, hpp, szp);
  2558. }
  2559. else {
  2560. int l = 0;
  2561. Eterm as[4];
  2562. Eterm ts[4];
  2563. as[l] = ERTS_MAKE_AM("e");
  2564. ts[l++] = am_true;
  2565. switch (a) {
  2566. case ERTS_ALC_A_SYSTEM:
  2567. as[l] = ERTS_MAKE_AM("m");
  2568. ts[l++] = ERTS_MAKE_AM("libc");
  2569. if(sas.trim_threshold >= 0) {
  2570. as[l] = ERTS_MAKE_AM("tt");
  2571. ts[l++] = erts_bld_uint(hpp, szp,
  2572. (Uint) sas.trim_threshold);
  2573. }
  2574. if(sas.top_pad >= 0) {
  2575. as[l] = ERTS_MAKE_AM("tp");
  2576. ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
  2577. }
  2578. break;
  2579. default:
  2580. break;
  2581. }
  2582. tmp = erts_bld_2tup_list(hpp, szp, l, as, ts);
  2583. }
  2584. }
  2585. else {
  2586. Eterm atom = ERTS_MAKE_AM("e");
  2587. Eterm term = am_false;
  2588. tmp = erts_bld_2tup_list(hpp, szp, 1, &atom, &term);
  2589. }
  2590. terms[length++] = tmp;
  2591. }
  2592. #if HAVE_ERTS_MSEG
  2593. if (use_mseg) {
  2594. atoms[length] = ERTS_MAKE_AM("mseg_alloc");
  2595. terms[length++] = erts_mseg_info_options(0, NULL, NULL, hpp, szp);
  2596. }
  2597. #endif
  2598. atoms[length] = ERTS_MAKE_AM("alloc_util");
  2599. terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
  2600. #if HAVE_ERTS_MMAP
  2601. atoms[length] = ERTS_MAKE_AM("erts_mmap");
  2602. terms[length++] = erts_mmap_info_options(&erts_dflt_mmapper, NULL, NULL,
  2603. NULL, hpp, szp);
  2604. #endif
  2605. {
  2606. Eterm o[1], v[1];
  2607. o[0] = ERTS_MAKE_AM("t");
  2608. v[0] = erts_mtrace_enabled ? am_true : am_false;
  2609. atoms[length] = ERTS_MAKE_AM("instr");
  2610. terms[length++] = erts_bld_2tup_list(hpp, szp, 1, o, v);
  2611. }
  2612. atoms[length] = ERTS_MAKE_AM("lock_physical_memory");
  2613. terms[length++] = (lock_all_physical_memory ? am_all : am_no);
  2614. settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
  2615. length = 0;
  2616. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2617. if (erts_allctrs_info[a].enabled) {
  2618. terms[length++] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2619. sys_strlen(ERTS_ALC_A2AD(a)));
  2620. }
  2621. }
  2622. #if HAVE_ERTS_MSEG
  2623. if (use_mseg)
  2624. terms[length++] = ERTS_MAKE_AM("mseg_alloc");
  2625. #endif
  2626. #if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
  2627. terms[length++] = ERTS_MAKE_AM("sys_aligned_alloc");
  2628. #endif
  2629. #if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2630. terms[length++] = ERTS_MAKE_AM("literal_mmap");
  2631. #endif
  2632. features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
  2633. #if defined(__GLIBC__)
  2634. {
  2635. Eterm AM_glibc = ERTS_MAKE_AM("glibc");
  2636. Eterm version;
  2637. version = erts_bld_cons(hpp,
  2638. szp,
  2639. make_small(__GLIBC__),
  2640. #ifdef __GLIBC_MINOR__
  2641. erts_bld_cons(hpp,
  2642. szp,
  2643. make_small(__GLIBC_MINOR__),
  2644. NIL)
  2645. #else
  2646. NIL
  2647. #endif
  2648. );
  2649. res = erts_bld_tuple(hpp, szp, 4,
  2650. AM_glibc, version, features, settings);
  2651. }
  2652. #else /* unknown allocator */
  2653. res = erts_bld_tuple(hpp, szp, 4,
  2654. am_undefined, NIL, features, settings);
  2655. #endif
  2656. if (szp) {
  2657. /* ... and then build the term */
  2658. hp = HAlloc((Process *) proc, sz);
  2659. endp = hp + sz;
  2660. hpp = &hp;
  2661. szp = NULL;
  2662. goto bld_term;
  2663. }
  2664. ASSERT(endp >= hp);
  2665. HRelease((Process *) proc, endp, hp);
  2666. return res;
  2667. }
  2668. void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size)
  2669. {
  2670. UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1)
  2671. #ifdef VALGRIND
  2672. + sizeof(UWord)
  2673. #endif
  2674. );
  2675. #ifdef VALGRIND
  2676. { /* Link them to avoid Leak_PossiblyLost */
  2677. static UWord* first_in_list = NULL;
  2678. *(UWord**)v = first_in_list;
  2679. first_in_list = (UWord*) v;
  2680. v += sizeof(UWord);
  2681. }
  2682. #endif
  2683. if (v & ERTS_CACHE_LINE_MASK) {
  2684. v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
  2685. }
  2686. ASSERT((v & ERTS_CACHE_LINE_MASK) == 0);
  2687. return (void*)v;
  2688. }
  2689. static void
  2690. reply_alloc_info(void *vair)
  2691. {
  2692. ErtsAllocInfoReq *air = (ErtsAllocInfoReq *) vair;
  2693. Uint sched_id = erts_get_scheduler_id();
  2694. int global_instances = air->req_sched == sched_id;
  2695. ErtsProcLocks rp_locks;
  2696. Process *rp = air->proc;
  2697. Eterm ref_copy = NIL, ai_list, msg = NIL;
  2698. Eterm *hp = NULL, *hp_start = NULL, *hp_end = NULL;
  2699. Eterm **hpp;
  2700. Uint sz, *szp;
  2701. ErlOffHeap *ohp = NULL;
  2702. ErtsMessage *mp = NULL;
  2703. #if HAVE_ERTS_MMAP
  2704. struct erts_mmap_info_struct mmap_info_dflt;
  2705. # if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2706. struct erts_mmap_info_struct mmap_info_literal;
  2707. # endif
  2708. #endif
  2709. int i;
  2710. Eterm (*info_func)(Allctr_t *,
  2711. int,
  2712. int,
  2713. fmtfn_t *,
  2714. void *,
  2715. Uint **,
  2716. Uint *) = (air->only_sz
  2717. ? erts_alcu_sz_info
  2718. : erts_alcu_info);
  2719. rp_locks = air->req_sched == sched_id ? ERTS_PROC_LOCK_MAIN : 0;
  2720. sz = 0;
  2721. hpp = NULL;
  2722. szp = &sz;
  2723. while (1) {
  2724. if (hpp)
  2725. ref_copy = erts_iref_storage_make_ref(&air->iref,
  2726. hpp, ohp, 0);
  2727. else
  2728. *szp += erts_iref_storage_heap_size(&air->iref);
  2729. ai_list = NIL;
  2730. for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
  2731. for (i--; i >= 0; i--) {
  2732. int ai = air->allocs[i];
  2733. Allctr_t *allctr;
  2734. Eterm ainfo;
  2735. Eterm alloc_atom;
  2736. if (global_instances) {
  2737. switch (ai) {
  2738. case ERTS_ALC_A_SYSTEM: {
  2739. alloc_atom = erts_bld_atom(hpp, szp, "sys_alloc");
  2740. ainfo = NIL;
  2741. if (!air->only_sz) {
  2742. SysAllocStat sas;
  2743. if (hpp)
  2744. sys_alloc_stat(&sas);
  2745. if (szp) {
  2746. /* ensure ehough heap */
  2747. sas.top_pad = INT_MAX;
  2748. sas.trim_threshold = INT_MAX;
  2749. }
  2750. if (sas.top_pad >= 0) {
  2751. ainfo = erts_bld_cons(
  2752. hpp, szp,
  2753. erts_bld_tuple(
  2754. hpp, szp, 2,
  2755. erts_bld_atom(hpp, szp, "tp"),
  2756. erts_bld_uint(
  2757. hpp, szp,
  2758. (Uint) sas.top_pad)),
  2759. ainfo);
  2760. }
  2761. if (sas.trim_threshold >= 0) {
  2762. ainfo = erts_bld_cons(
  2763. hpp, szp,
  2764. erts_bld_tuple(
  2765. hpp, szp, 2,
  2766. erts_bld_atom(hpp, szp, "tt"),
  2767. erts_bld_uint(
  2768. hpp, szp,
  2769. (Uint) sas.trim_threshold)),
  2770. ainfo);
  2771. }
  2772. ainfo = erts_bld_cons(hpp, szp,
  2773. erts_bld_tuple(
  2774. hpp, szp, 2,
  2775. erts_bld_atom(hpp, szp,
  2776. "m"),
  2777. erts_bld_atom(hpp, szp,
  2778. "libc")),
  2779. ainfo);
  2780. ainfo = erts_bld_cons(hpp, szp,
  2781. erts_bld_tuple(
  2782. hpp, szp, 2,
  2783. erts_bld_atom(hpp, szp,
  2784. "e"),
  2785. am_true),
  2786. ainfo);
  2787. ainfo = erts_bld_tuple(hpp, szp, 2,
  2788. erts_bld_atom(hpp, szp,
  2789. "options"),
  2790. ainfo);
  2791. ainfo = erts_bld_cons(hpp, szp,ainfo,NIL);
  2792. }
  2793. ainfo = erts_bld_tuple(hpp, szp, 3,
  2794. alloc_atom,
  2795. make_small(0),
  2796. ainfo);
  2797. break;
  2798. }
  2799. case ERTS_ALC_INFO_A_ALLOC_UTIL:
  2800. alloc_atom = erts_bld_atom(hpp, szp, "alloc_util");
  2801. ainfo = (air->only_sz
  2802. ? NIL
  2803. : erts_alcu_au_info_options(NULL, NULL,
  2804. hpp, szp));
  2805. ainfo = erts_bld_tuple(hpp, szp, 3,
  2806. alloc_atom,
  2807. make_small(0),
  2808. ainfo);
  2809. break;
  2810. case ERTS_ALC_INFO_A_ERTS_MMAP:
  2811. alloc_atom = erts_bld_atom(hpp, szp, "erts_mmap");
  2812. #if HAVE_ERTS_MMAP
  2813. ainfo = (air->only_sz ? NIL :
  2814. erts_mmap_info(&erts_dflt_mmapper, NULL, NULL,
  2815. hpp, szp, &mmap_info_dflt));
  2816. ainfo = erts_bld_tuple3(hpp, szp,
  2817. alloc_atom,
  2818. erts_bld_atom(hpp,szp,"default_mmap"),
  2819. ainfo);
  2820. # if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2821. ai_list = erts_bld_cons(hpp, szp,
  2822. ainfo, ai_list);
  2823. ainfo = (air->only_sz ? NIL :
  2824. erts_mmap_info(&erts_literal_mmapper, NULL, NULL,
  2825. hpp, szp, &mmap_info_literal));
  2826. ainfo = erts_bld_tuple3(hpp, szp,
  2827. alloc_atom,
  2828. erts_bld_atom(hpp,szp,"literal_mmap"),
  2829. ainfo);
  2830. # endif
  2831. #else /* !HAVE_ERTS_MMAP */
  2832. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
  2833. am_false);
  2834. #endif
  2835. break;
  2836. case ERTS_ALC_INFO_A_MSEG_ALLOC:
  2837. alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
  2838. #if HAVE_ERTS_MSEG
  2839. ainfo = erts_mseg_info(0, NULL, NULL, hpp != NULL,
  2840. air->only_sz, hpp, szp);
  2841. ainfo = erts_bld_tuple3(hpp, szp,
  2842. alloc_atom,
  2843. make_small(0),
  2844. ainfo);
  2845. #else
  2846. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
  2847. am_false);
  2848. #endif
  2849. break;
  2850. #ifndef ERTS_ALC_A_EXEC
  2851. case ERTS_ALC_INFO_A_DISABLED_EXEC:
  2852. alloc_atom = erts_bld_atom(hpp, szp, "exec_alloc");
  2853. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom, am_false);
  2854. break;
  2855. #endif
  2856. default:
  2857. alloc_atom = erts_bld_atom(hpp, szp,
  2858. (char *) ERTS_ALC_A2AD(ai));
  2859. if (!erts_allctrs_info[ai].enabled)
  2860. ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
  2861. am_false);
  2862. else if (erts_allctrs_info[ai].alloc_util) {
  2863. if (erts_allctrs_info[ai].thr_spec)
  2864. allctr = erts_allctr_thr_spec[ai].allctr[0];
  2865. else
  2866. allctr = erts_allctrs_info[ai].extra;
  2867. ainfo = info_func(allctr, air->internal, hpp != NULL,
  2868. NULL, NULL, hpp, szp);
  2869. ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom,
  2870. make_small(0), ainfo);
  2871. }
  2872. else {
  2873. erts_exit(ERTS_ABORT_EXIT, "%s:%d: internal error\n",
  2874. __FILE__, __LINE__);
  2875. }
  2876. }
  2877. ai_list = erts_bld_cons(hpp, szp,
  2878. ainfo, ai_list);
  2879. }
  2880. switch (ai) {
  2881. case ERTS_ALC_A_SYSTEM:
  2882. case ERTS_ALC_INFO_A_ALLOC_UTIL:
  2883. case ERTS_ALC_INFO_A_ERTS_MMAP:
  2884. case ERTS_ALC_INFO_A_DISABLED_EXEC:
  2885. break;
  2886. case ERTS_ALC_INFO_A_MSEG_ALLOC:
  2887. #if HAVE_ERTS_MSEG
  2888. alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
  2889. ainfo = erts_mseg_info(sched_id, NULL, NULL,
  2890. hpp != NULL, air->only_sz, hpp, szp);
  2891. ainfo = erts_bld_tuple(hpp, szp, 3,
  2892. alloc_atom,
  2893. make_small(sched_id),
  2894. ainfo);
  2895. ai_list = erts_bld_cons(hpp, szp, ainfo, ai_list);
  2896. #endif
  2897. break;
  2898. default:
  2899. if (erts_allctrs_info[ai].thr_spec) {
  2900. alloc_atom = erts_bld_atom(hpp, szp,
  2901. (char *) ERTS_ALC_A2AD(ai));
  2902. allctr = erts_allctr_thr_spec[ai].allctr[sched_id];
  2903. ainfo = info_func(allctr, air->internal, hpp != NULL, NULL,
  2904. NULL, hpp, szp);
  2905. ai_list = erts_bld_cons(hpp, szp,
  2906. erts_bld_tuple(
  2907. hpp, szp,
  2908. 3,
  2909. alloc_atom,
  2910. make_small(sched_id),
  2911. ainfo),
  2912. ai_list);
  2913. }
  2914. break;
  2915. }
  2916. msg = erts_bld_tuple(hpp, szp,
  2917. 3,
  2918. ref_copy,
  2919. make_small(sched_id),
  2920. ai_list);
  2921. }
  2922. if (hpp)
  2923. break;
  2924. mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
  2925. hp_start = hp;
  2926. hp_end = hp + sz;
  2927. szp = NULL;
  2928. hpp = &hp;
  2929. }
  2930. if (hp != hp_end)
  2931. erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1);
  2932. erts_queue_message(rp, rp_locks, mp, msg, am_system);
  2933. if (air->req_sched == sched_id)
  2934. rp_locks &= ~ERTS_PROC_LOCK_MAIN;
  2935. erts_proc_unlock(rp, rp_locks);
  2936. erts_proc_dec_refc(rp);
  2937. if (erts_atomic32_dec_read_nob(&air->refc) == 0) {
  2938. erts_iref_storage_clean(&air->iref);
  2939. aireq_free(air);
  2940. }
  2941. }
  2942. int
  2943. erts_request_alloc_info(struct process *c_p,
  2944. Eterm ref,
  2945. Eterm allocs,
  2946. int only_sz,
  2947. int internal)
  2948. {
  2949. ErtsAllocInfoReq *air = aireq_alloc();
  2950. Eterm req_ai[ERTS_ALC_INFO_A_END] = {0};
  2951. Eterm alist;
  2952. int airix = 0, ai;
  2953. air->req_sched = erts_get_scheduler_id();
  2954. air->only_sz = only_sz;
  2955. air->internal = internal;
  2956. air->proc = c_p;
  2957. if (is_not_internal_ref(ref))
  2958. return 0;
  2959. erts_iref_storage_save(&air->iref, ref);
  2960. if (is_not_list(allocs))
  2961. return 0;
  2962. alist = allocs;
  2963. while (is_list(alist)) {
  2964. int saved = 0;
  2965. Eterm* consp = list_val(alist);
  2966. Eterm alloc = CAR(consp);
  2967. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
  2968. if (erts_is_atom_str(erts_alc_a2ad[ai], alloc, 0))
  2969. goto save_alloc;
  2970. if (erts_is_atom_str("mseg_alloc", alloc, 0)) {
  2971. ai = ERTS_ALC_INFO_A_MSEG_ALLOC;
  2972. goto save_alloc;
  2973. }
  2974. if (erts_is_atom_str("erts_mmap", alloc, 0)) {
  2975. ai = ERTS_ALC_INFO_A_ERTS_MMAP;
  2976. goto save_alloc;
  2977. }
  2978. #ifndef ERTS_ALC_A_EXEC
  2979. if (erts_is_atom_str("exec_alloc", alloc, 0)) {
  2980. ai = ERTS_ALC_INFO_A_DISABLED_EXEC;
  2981. goto save_alloc;
  2982. }
  2983. #endif
  2984. if (erts_is_atom_str("alloc_util", alloc, 0)) {
  2985. ai = ERTS_ALC_INFO_A_ALLOC_UTIL;
  2986. save_alloc:
  2987. if (req_ai[ai])
  2988. return 0;
  2989. air->allocs[airix++] = ai;
  2990. req_ai[ai] = 1;
  2991. saved = 1;
  2992. }
  2993. if (!saved)
  2994. return 0;
  2995. alist = CDR(consp);
  2996. }
  2997. if (is_not_nil(alist))
  2998. return 0;
  2999. air->allocs[airix] = ERTS_ALC_A_INVALID;
  3000. erts_atomic32_init_nob(&air->refc,
  3001. (erts_aint32_t) erts_no_schedulers);
  3002. erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
  3003. if (erts_no_schedulers > 1)
  3004. erts_schedule_multi_misc_aux_work(1,
  3005. erts_no_schedulers,
  3006. reply_alloc_info,
  3007. (void *) air);
  3008. reply_alloc_info((void *) air);
  3009. return 1;
  3010. }
  3011. Eterm erts_alloc_set_dyn_param(Process* c_p, Eterm tuple)
  3012. {
  3013. ErtsAllocatorThrSpec_t *tspec;
  3014. ErtsAlcType_t ai;
  3015. Allctr_t* allctr;
  3016. Eterm* tp;
  3017. Eterm res;
  3018. if (!is_tuple_arity(tuple, 3))
  3019. goto badarg;
  3020. tp = tuple_val(tuple);
  3021. /*
  3022. * Ex: {ets_alloc, sbct, 256000}
  3023. */
  3024. if (!is_atom(tp[1]) || !is_atom(tp[2]) || !is_integer(tp[3]))
  3025. goto badarg;
  3026. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
  3027. if (erts_is_atom_str(erts_alc_a2ad[ai], tp[1], 0))
  3028. break;
  3029. if (ai > ERTS_ALC_A_MAX)
  3030. goto badarg;
  3031. if (!erts_allctrs_info[ai].enabled ||
  3032. !erts_allctrs_info[ai].alloc_util) {
  3033. return am_notsup;
  3034. }
  3035. if (tp[2] == am_sbct) {
  3036. Uint sbct;
  3037. int i, ok;
  3038. if (!term_to_Uint(tp[3], &sbct))
  3039. goto badarg;
  3040. tspec = &erts_allctr_thr_spec[ai];
  3041. if (tspec->enabled) {
  3042. ok = 0;
  3043. for (i = 0; i < tspec->size; i++) {
  3044. allctr = tspec->allctr[i];
  3045. ok |= allctr->try_set_dyn_param(allctr, am_sbct, sbct);
  3046. }
  3047. }
  3048. else {
  3049. allctr = erts_allctrs_info[ai].extra;
  3050. ok = allctr->try_set_dyn_param(allctr, am_sbct, sbct);
  3051. }
  3052. return ok ? am_ok : am_notsup;
  3053. }
  3054. return am_notsup;
  3055. badarg:
  3056. ERTS_BIF_PREP_ERROR(res, c_p, EXC_BADARG);
  3057. return res;
  3058. }
  3059. /*
  3060. * The allocator wrapper prelocking stuff below is about the locking order.
  3061. * It only affects wrappers (erl_mtrace.c) that keep locks during
  3062. * alloc/realloc/free.
  3063. *
  3064. * Some query functions in erl_alloc_util.c lock the allocator mutex and then
  3065. * use erts_printf that in turn may call the sys allocator through the wrappers.
  3066. * To avoid breaking locking order these query functions first "pre-locks" all
  3067. * allocator wrappers.
  3068. */
  3069. ErtsAllocatorWrapper_t *erts_allctr_wrappers;
  3070. int erts_allctr_wrapper_prelocked = 0;
  3071. erts_tsd_key_t erts_allctr_prelock_tsd_key;
  3072. void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper)
  3073. {
  3074. ASSERT(wrapper->lock && wrapper->unlock);
  3075. wrapper->next = erts_allctr_wrappers;
  3076. erts_allctr_wrappers = wrapper;
  3077. }
  3078. void erts_allctr_wrapper_pre_lock(void)
  3079. {
  3080. if (erts_allctr_wrappers) {
  3081. ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
  3082. for ( ; wrapper; wrapper = wrapper->next) {
  3083. wrapper->lock();
  3084. }
  3085. ASSERT(!erts_allctr_wrapper_prelocked);
  3086. erts_allctr_wrapper_prelocked = 1;
  3087. erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)1);
  3088. }
  3089. }
  3090. void erts_allctr_wrapper_pre_unlock(void)
  3091. {
  3092. if (erts_allctr_wrappers) {
  3093. ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
  3094. erts_allctr_wrapper_prelocked = 0;
  3095. erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)0);
  3096. for ( ; wrapper; wrapper = wrapper->next) {
  3097. wrapper->unlock();
  3098. }
  3099. }
  3100. }
  3101. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3102. * NOTE: erts_alc_test() is only supposed to be used for testing. *
  3103. * *
  3104. * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
  3105. * to erts_alc_test() *
  3106. \* */
  3107. #define ERTS_ALC_TEST_ABORT erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
  3108. UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
  3109. {
  3110. switch (op >> 8) {
  3111. case 0x0: return erts_alcu_test(op, a1, a2);
  3112. case 0x1: return erts_gfalc_test(op, a1, a2);
  3113. case 0x2: return erts_bfalc_test(op, a1, a2);
  3114. case 0x3: return erts_afalc_test(op, a1, a2);
  3115. case 0x4: return erts_mseg_test(op, a1, a2, a3);
  3116. case 0x5: return erts_aoffalc_test(op, a1, a2);
  3117. case 0xf:
  3118. switch (op) {
  3119. case 0xf00:
  3120. if (((Allctr_t *) a1)->thread_safe)
  3121. return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_TEST,
  3122. (void *) a1,
  3123. (Uint) a2);
  3124. else
  3125. return (UWord) erts_alcu_alloc(ERTS_ALC_T_TEST,
  3126. (void *) a1,
  3127. (Uint) a2);
  3128. case 0xf01:
  3129. if (((Allctr_t *) a1)->thread_safe)
  3130. return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_TEST,
  3131. (void *) a1,
  3132. (void *) a2,
  3133. (Uint) a3);
  3134. else
  3135. return (UWord) erts_alcu_realloc(ERTS_ALC_T_TEST,
  3136. (void *) a1,
  3137. (void *) a2,
  3138. (Uint) a3);
  3139. case 0xf02:
  3140. if (((Allctr_t *) a1)->thread_safe)
  3141. erts_alcu_free_ts(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
  3142. else
  3143. erts_alcu_free(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
  3144. return 0;
  3145. case 0xf03: {
  3146. Allctr_t *allctr;
  3147. struct au_init init;
  3148. SET_DEFAULT_ALLOC_OPTS(&init);
  3149. init.enable = 1;
  3150. init.astrat = ERTS_ALC_S_GOODFIT;
  3151. init.init.util.name_prefix = (char *) a1;
  3152. init.init.util.alloc_no = ERTS_ALC_A_TEST;
  3153. init.init.util.alloc_strat = init.astrat;
  3154. init.init.util.ts = 1;
  3155. if ((char **) a3) {
  3156. char **argv = (char **) a3;
  3157. int i = 0;
  3158. while (argv[i]) {
  3159. if (argv[i][0] == '-' && argv[i][1] == 't')
  3160. handle_au_arg(&init, &argv[i][2], argv, &i, 0);
  3161. else
  3162. return (UWord) NULL;
  3163. i++;
  3164. }
  3165. }
  3166. switch (init.astrat) {
  3167. case ERTS_ALC_S_GOODFIT:
  3168. allctr = erts_gfalc_start((GFAllctr_t *)
  3169. erts_alloc(ERTS_ALC_T_TEST,
  3170. sizeof(GFAllctr_t)),
  3171. &init.init.gf,
  3172. &init.init.util);
  3173. break;
  3174. case ERTS_ALC_S_BESTFIT:
  3175. allctr = erts_bfalc_start((BFAllctr_t *)
  3176. erts_alloc(ERTS_ALC_T_TEST,
  3177. sizeof(BFAllctr_t)),
  3178. &init.init.bf,
  3179. &init.init.util);
  3180. break;
  3181. case ERTS_ALC_S_AFIT:
  3182. allctr = erts_afalc_start((AFAllctr_t *)
  3183. erts_alloc(ERTS_ALC_T_TEST,
  3184. sizeof(AFAllctr_t)),
  3185. &init.init.af,
  3186. &init.init.util);
  3187. break;
  3188. case ERTS_ALC_S_FIRSTFIT:
  3189. allctr = erts_aoffalc_start((AOFFAllctr_t *)
  3190. erts_alloc(ERTS_ALC_T_TEST,
  3191. sizeof(AOFFAllctr_t)),
  3192. &init.init.aoff,
  3193. &init.init.util);
  3194. break;
  3195. default:
  3196. ASSERT(0);
  3197. allctr = NULL;
  3198. break;
  3199. }
  3200. return (UWord) allctr;
  3201. }
  3202. case 0xf04:
  3203. erts_alcu_stop((Allctr_t *) a1);
  3204. erts_free(ERTS_ALC_T_TEST, (void *) a1);
  3205. break;
  3206. case 0xf05: return (UWord) 1;
  3207. case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
  3208. #ifdef ETHR_NO_FORKSAFETY
  3209. case 0xf07: return (UWord) 0;
  3210. #else
  3211. case 0xf07: return (UWord) ((Allctr_t *) a1)->thread_safe;
  3212. #endif
  3213. case 0xf08: {
  3214. ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_mutex));
  3215. if (ethr_mutex_init(mtx) != 0)
  3216. ERTS_ALC_TEST_ABORT;
  3217. return (UWord) mtx;
  3218. }
  3219. case 0xf09: {
  3220. ethr_mutex *mtx = (ethr_mutex *) a1;
  3221. if (ethr_mutex_destroy(mtx) != 0)
  3222. ERTS_ALC_TEST_ABORT;
  3223. erts_free(ERTS_ALC_T_TEST, (void *) mtx);
  3224. break;
  3225. }
  3226. case 0xf0a:
  3227. ethr_mutex_lock((ethr_mutex *) a1);
  3228. break;
  3229. case 0xf0b:
  3230. ethr_mutex_unlock((ethr_mutex *) a1);
  3231. break;
  3232. case 0xf0c: {
  3233. ethr_cond *cnd = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_cond));
  3234. if (ethr_cond_init(cnd) != 0)
  3235. ERTS_ALC_TEST_ABORT;
  3236. return (UWord) cnd;
  3237. }
  3238. case 0xf0d: {
  3239. ethr_cond *cnd = (ethr_cond *) a1;
  3240. if (ethr_cond_destroy(cnd) != 0)
  3241. ERTS_ALC_TEST_ABORT;
  3242. erts_free(ERTS_ALC_T_TEST, (void *) cnd);
  3243. break;
  3244. }
  3245. case 0xf0e:
  3246. ethr_cond_broadcast((ethr_cond *) a1);
  3247. break;
  3248. case 0xf0f: {
  3249. int res;
  3250. do {
  3251. res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
  3252. } while (res == EINTR);
  3253. break;
  3254. }
  3255. case 0xf10: {
  3256. ethr_tid *tid = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_tid));
  3257. if (ethr_thr_create(tid,
  3258. (void * (*)(void *)) a1,
  3259. (void *) a2,
  3260. NULL) != 0)
  3261. ERTS_ALC_TEST_ABORT;
  3262. return (UWord) tid;
  3263. }
  3264. case 0xf11: {
  3265. ethr_tid *tid = (ethr_tid *) a1;
  3266. if (ethr_thr_join(*tid, NULL) != 0)
  3267. ERTS_ALC_TEST_ABORT;
  3268. erts_free(ERTS_ALC_T_TEST, (void *) tid);
  3269. break;
  3270. }
  3271. case 0xf12:
  3272. ethr_thr_exit((void *) a1);
  3273. ERTS_ALC_TEST_ABORT;
  3274. break;
  3275. case 0xf13: return (UWord) 1;
  3276. case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1);
  3277. case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0;
  3278. case 0xf16: return (UWord) erts_realloc(ERTS_ALC_T_TEST, (void*)a1, (Uint)a2);
  3279. case 0xf17: {
  3280. Uint extra_hdr_sz = UNIT_CEILING((Uint)a1);
  3281. ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
  3282. Uint offset = ts->allctr[0]->mbc_header_size;
  3283. void* orig_creating_mbc = ts->allctr[0]->creating_mbc;
  3284. void* orig_destroying_mbc = ts->allctr[0]->destroying_mbc;
  3285. void* new_creating_mbc = *(void**)a2; /* inout arg */
  3286. void* new_destroying_mbc = *(void**)a3; /* inout arg */
  3287. int i;
  3288. for (i=0; i < ts->size; i++) {
  3289. Allctr_t* ap = ts->allctr[i];
  3290. if (ap->mbc_header_size != offset
  3291. || ap->creating_mbc != orig_creating_mbc
  3292. || ap->destroying_mbc != orig_destroying_mbc
  3293. || ap->mbc_list.first != NULL)
  3294. return -1;
  3295. }
  3296. for (i=0; i < ts->size; i++) {
  3297. ts->allctr[i]->mbc_header_size += extra_hdr_sz;
  3298. ts->allctr[i]->creating_mbc = new_creating_mbc;
  3299. ts->allctr[i]->destroying_mbc = new_destroying_mbc;
  3300. }
  3301. *(void**)a2 = orig_creating_mbc;
  3302. *(void**)a3 = orig_destroying_mbc;
  3303. return offset;
  3304. }
  3305. case 0xf18: {
  3306. ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
  3307. return ts->allctr[0]->largest_mbc_size;
  3308. }
  3309. default:
  3310. break;
  3311. }
  3312. return (UWord) 0;
  3313. default:
  3314. break;
  3315. }
  3316. ASSERT(0);
  3317. return ~((UWord) 0);
  3318. }
  3319. #ifdef DEBUG
  3320. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3321. * Debug stuff *
  3322. \* */
  3323. #if 0
  3324. #define PRINT_OPS
  3325. #else
  3326. #undef PRINT_OPS
  3327. #endif
  3328. #ifdef HARD_DEBUG
  3329. #define FENCE_SZ (4*sizeof(UWord))
  3330. #else
  3331. #define FENCE_SZ (3*sizeof(UWord))
  3332. #endif
  3333. #if defined(ARCH_64)
  3334. #define FENCE_PATTERN 0xABCDEF97ABCDEF97
  3335. #else
  3336. #define FENCE_PATTERN 0xABCDEF97
  3337. #endif
  3338. #define TYPE_PATTERN_MASK ERTS_ALC_N_MASK
  3339. #define TYPE_PATTERN_SHIFT 16
  3340. #define FIXED_FENCE_PATTERN_MASK \
  3341. (~((UWord) (TYPE_PATTERN_MASK << TYPE_PATTERN_SHIFT)))
  3342. #define FIXED_FENCE_PATTERN \
  3343. (FENCE_PATTERN & FIXED_FENCE_PATTERN_MASK)
  3344. #define MK_PATTERN(T) \
  3345. (FIXED_FENCE_PATTERN | (((T) & TYPE_PATTERN_MASK) << TYPE_PATTERN_SHIFT))
  3346. #define GET_TYPE_OF_PATTERN(P) \
  3347. (((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
  3348. #ifdef HARD_DEBUG
  3349. #define ERL_ALC_HDBG_MAX_MBLK 100000
  3350. #define ERTS_ALC_O_CHECK -1
  3351. typedef struct hdbg_mblk_ hdbg_mblk;
  3352. struct hdbg_mblk_ {
  3353. hdbg_mblk *next;
  3354. hdbg_mblk *prev;
  3355. void *p;
  3356. Uint s;
  3357. ErtsAlcType_t n;
  3358. };
  3359. static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
  3360. static hdbg_mblk *free_hdbg_mblks;
  3361. static hdbg_mblk *used_hdbg_mblks;
  3362. static erts_mtx_t hdbg_mblk_mtx;
  3363. static void
  3364. hdbg_init(void)
  3365. {
  3366. int i;
  3367. for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
  3368. hdbg_mblks[i].next = &hdbg_mblks[i+1];
  3369. hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
  3370. free_hdbg_mblks = &hdbg_mblks[0];
  3371. used_hdbg_mblks = NULL;
  3372. erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
  3373. ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
  3374. }
  3375. static void *check_memory_fence(void *ptr,
  3376. Uint *size,
  3377. ErtsAlcType_t n,
  3378. int func);
  3379. void erts_hdbg_chk_blks(void);
  3380. void
  3381. erts_hdbg_chk_blks(void)
  3382. {
  3383. hdbg_mblk *mblk;
  3384. erts_mtx_lock(&hdbg_mblk_mtx);
  3385. for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
  3386. Uint sz;
  3387. check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
  3388. ASSERT(sz == mblk->s);
  3389. }
  3390. erts_mtx_unlock(&hdbg_mblk_mtx);
  3391. }
  3392. static hdbg_mblk *
  3393. hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
  3394. {
  3395. hdbg_mblk *mblk;
  3396. erts_mtx_lock(&hdbg_mblk_mtx);
  3397. mblk = free_hdbg_mblks;
  3398. if (!mblk) {
  3399. erts_fprintf(stderr,
  3400. "Ran out of debug blocks; please increase "
  3401. "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
  3402. ERL_ALC_HDBG_MAX_MBLK);
  3403. abort();
  3404. }
  3405. free_hdbg_mblks = mblk->next;
  3406. mblk->p = p;
  3407. mblk->s = s;
  3408. mblk->n = n;
  3409. mblk->next = used_hdbg_mblks;
  3410. mblk->prev = NULL;
  3411. if (used_hdbg_mblks)
  3412. used_hdbg_mblks->prev = mblk;
  3413. used_hdbg_mblks = mblk;
  3414. erts_mtx_unlock(&hdbg_mblk_mtx);
  3415. return (void *) mblk;
  3416. }
  3417. static void
  3418. hdbg_free(hdbg_mblk *mblk)
  3419. {
  3420. erts_mtx_lock(&hdbg_mblk_mtx);
  3421. if (mblk->next)
  3422. mblk->next->prev = mblk->prev;
  3423. if (mblk->prev)
  3424. mblk->prev->next = mblk->next;
  3425. else
  3426. used_hdbg_mblks = mblk->next;
  3427. mblk->next = free_hdbg_mblks;
  3428. free_hdbg_mblks = mblk;
  3429. erts_mtx_unlock(&hdbg_mblk_mtx);
  3430. }
  3431. #endif
  3432. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  3433. static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
  3434. void check_allocated_block( Uint type, void *blk)
  3435. {
  3436. Uint dummy;
  3437. check_memory_fence(blk, &dummy, ERTS_ALC_T2N(type), ERTS_ALC_O_FREE);
  3438. }
  3439. void check_allocators(void)
  3440. {
  3441. int i;
  3442. if (!erts_initialized)
  3443. return;
  3444. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; ++i) {
  3445. if (erts_allctrs_info[i].alloc_util) {
  3446. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
  3447. Allctr_t *allctr = real_af->extra;
  3448. Carrier_t *ct;
  3449. if (allctr->thread_safe)
  3450. erts_mtx_lock(&allctr->mutex);
  3451. if (allctr->check_mbc) {
  3452. for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
  3453. fprintf(stderr,"Checking allocator %d\r\n",i);
  3454. allctr->check_mbc(allctr,ct);
  3455. }
  3456. }
  3457. if (allctr->thread_safe)
  3458. erts_mtx_unlock(&allctr->mutex);
  3459. }
  3460. }
  3461. }
  3462. #endif
  3463. static void *
  3464. set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
  3465. {
  3466. UWord *ui_ptr;
  3467. UWord pattern;
  3468. #ifdef HARD_DEBUG
  3469. hdbg_mblk **mblkpp;
  3470. #endif
  3471. if (!ptr)
  3472. return NULL;
  3473. ui_ptr = (UWord *) ptr;
  3474. pattern = MK_PATTERN(n);
  3475. #ifdef HARD_DEBUG
  3476. mblkpp = (hdbg_mblk **) ui_ptr++;
  3477. #endif
  3478. *(ui_ptr++) = sz;
  3479. *(ui_ptr++) = pattern;
  3480. sys_memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
  3481. #ifdef HARD_DEBUG
  3482. *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
  3483. #endif
  3484. return (void *) ui_ptr;
  3485. }
  3486. static void *
  3487. check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
  3488. {
  3489. Uint sz;
  3490. Uint found_type;
  3491. UWord pre_pattern, expected_pattern;
  3492. UWord post_pattern;
  3493. UWord *ui_ptr;
  3494. #ifdef HARD_DEBUG
  3495. hdbg_mblk *mblk;
  3496. #endif
  3497. if (!ptr)
  3498. return NULL;
  3499. expected_pattern = MK_PATTERN(n);
  3500. ui_ptr = (UWord *) ptr;
  3501. pre_pattern = *(--ui_ptr);
  3502. *size = sz = *(--ui_ptr);
  3503. #ifdef HARD_DEBUG
  3504. mblk = (hdbg_mblk *) *(--ui_ptr);
  3505. #endif
  3506. found_type = GET_TYPE_OF_PATTERN(pre_pattern);
  3507. if (found_type != n) {
  3508. erts_exit(ERTS_ABORT_EXIT, "ERROR: Miss matching allocator types"
  3509. " used in alloc and free\n");
  3510. }
  3511. if (pre_pattern != expected_pattern) {
  3512. if ((FIXED_FENCE_PATTERN_MASK & pre_pattern) != FIXED_FENCE_PATTERN)
  3513. erts_exit(ERTS_ABORT_EXIT,
  3514. "ERROR: Fence at beginning of memory block (p=0x%u) "
  3515. "clobbered.\n",
  3516. (UWord) ptr);
  3517. }
  3518. sys_memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
  3519. if (post_pattern != expected_pattern || pre_pattern != post_pattern) {
  3520. char fbuf[10];
  3521. char obuf[10];
  3522. char *ftype;
  3523. char *otype;
  3524. char *op_str;
  3525. if ((FIXED_FENCE_PATTERN_MASK & post_pattern) != FIXED_FENCE_PATTERN)
  3526. erts_exit(ERTS_ABORT_EXIT,
  3527. "ERROR: Fence at end of memory block (p=0x%u, sz=%u) "
  3528. "clobbered.\n",
  3529. (UWord) ptr, (UWord) sz);
  3530. if (found_type != GET_TYPE_OF_PATTERN(post_pattern))
  3531. erts_exit(ERTS_ABORT_EXIT,
  3532. "ERROR: Fence around memory block (p=0x%u, sz=%u) "
  3533. "clobbered.\n",
  3534. (UWord) ptr, (UWord) sz);
  3535. ftype = type_no_str(found_type);
  3536. if (!ftype) {
  3537. erts_snprintf(fbuf, sizeof(fbuf), "%d", (int) found_type);
  3538. ftype = fbuf;
  3539. }
  3540. otype = type_no_str(n);
  3541. if (!otype) {
  3542. erts_snprintf(obuf, sizeof(obuf), "%d", (int) n);
  3543. otype = obuf;
  3544. }
  3545. switch (func) {
  3546. case ERTS_ALC_O_ALLOC: op_str = "allocated"; break;
  3547. case ERTS_ALC_O_REALLOC: op_str = "reallocated"; break;
  3548. case ERTS_ALC_O_FREE: op_str = "freed"; break;
  3549. default: op_str = "???"; break;
  3550. }
  3551. erts_exit(ERTS_ABORT_EXIT,
  3552. "ERROR: Memory block (p=0x%u, sz=%u) allocated as type \"%s\","
  3553. " but %s as type \"%s\".\n",
  3554. (UWord) ptr, (UWord) sz, ftype, op_str, otype);
  3555. }
  3556. #ifdef HARD_DEBUG
  3557. switch (func) {
  3558. case ERTS_ALC_O_REALLOC:
  3559. case ERTS_ALC_O_FREE:
  3560. hdbg_free(mblk);
  3561. break;
  3562. default:
  3563. break;
  3564. }
  3565. #endif
  3566. return (void *) ui_ptr;
  3567. }
  3568. static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
  3569. static void *
  3570. debug_alloc(ErtsAlcType_t type, void *extra, Uint size)
  3571. {
  3572. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3573. ErtsAlcType_t n;
  3574. Uint dsize;
  3575. void *res;
  3576. #ifdef HARD_DEBUG
  3577. erts_hdbg_chk_blks();
  3578. #endif
  3579. n = ERTS_ALC_T2N(type);
  3580. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3581. dsize = size + FENCE_SZ;
  3582. res = (*real_af->alloc)(type, real_af->extra, dsize);
  3583. res = set_memory_fence(res, size, n);
  3584. #ifdef PRINT_OPS
  3585. fprintf(stderr, "0x%lx = alloc(%s, %lu)\r\n",
  3586. (Uint) res, ERTS_ALC_N2TD(n), size);
  3587. #endif
  3588. return res;
  3589. }
  3590. static void *
  3591. debug_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
  3592. {
  3593. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3594. ErtsAlcType_t n;
  3595. Uint dsize;
  3596. Uint old_size;
  3597. void *dptr;
  3598. void *res;
  3599. n = ERTS_ALC_T2N(type);
  3600. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3601. dsize = size + FENCE_SZ;
  3602. dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
  3603. #ifdef HARD_DEBUG
  3604. erts_hdbg_chk_blks();
  3605. #endif
  3606. if (ptr && old_size > size)
  3607. sys_memset((void *) (((char *) ptr) + size),
  3608. 0xf,
  3609. sizeof(Uint) + old_size - size);
  3610. res = (*real_af->realloc)(type, real_af->extra, dptr, dsize);
  3611. res = set_memory_fence(res, size, n);
  3612. #ifdef PRINT_OPS
  3613. fprintf(stderr, "0x%lx = realloc(%s, 0x%lx, %lu)\r\n",
  3614. (Uint) res, ERTS_ALC_N2TD(n), (Uint) ptr, size);
  3615. #endif
  3616. return res;
  3617. }
  3618. static void
  3619. debug_free(ErtsAlcType_t type, void *extra, void *ptr)
  3620. {
  3621. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3622. ErtsAlcType_t n;
  3623. void *dptr;
  3624. Uint size;
  3625. int free_pattern;
  3626. n = ERTS_ALC_T2N(type);
  3627. free_pattern = n;
  3628. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3629. if (!ptr)
  3630. return;
  3631. dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
  3632. #ifdef ERTS_ALC_A_EXEC
  3633. # if defined(__i386__) || defined(__x86_64__)
  3634. if (ERTS_ALC_T2A(ERTS_ALC_N2T(n)) == ERTS_ALC_A_EXEC) {
  3635. free_pattern = 0x0f; /* Illegal instruction */
  3636. }
  3637. # endif
  3638. #endif
  3639. sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
  3640. (*real_af->free)(type, real_af->extra, dptr);
  3641. #ifdef PRINT_OPS
  3642. fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
  3643. #endif
  3644. #ifdef HARD_DEBUG
  3645. erts_hdbg_chk_blks();
  3646. #endif
  3647. }
  3648. static Uint
  3649. install_debug_functions(void)
  3650. {
  3651. int i;
  3652. ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
  3653. sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
  3654. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  3655. erts_allctrs[i].alloc = debug_alloc;
  3656. erts_allctrs[i].realloc = debug_realloc;
  3657. erts_allctrs[i].free = debug_free;
  3658. erts_allctrs[i].extra = (void *) &real_allctrs[i];
  3659. }
  3660. return FENCE_SZ;
  3661. }
  3662. #endif /* #ifdef DEBUG */