PageRenderTime 68ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/erl_alloc.c

https://github.com/bsmr-erlang/otp
C | 4125 lines | 3555 code | 457 blank | 113 comment | 601 complexity | 3b54aac3d465911f8bc3eb46ca1fc489 MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.1, MPL-2.0-no-copyleft-exception, Apache-2.0
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2002-2018. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. /*
  21. * Description: Management of memory allocators.
  22. *
  23. * Author: Rickard Green
  24. */
  25. #ifdef HAVE_CONFIG_H
  26. # include "config.h"
  27. #endif
  28. #define ERTS_ALLOC_C__
  29. #define ERTS_ALC_INTERNAL__
  30. #define ERTS_WANT_MEM_MAPPERS
  31. #include "sys.h"
  32. #define ERL_THREADS_EMU_INTERNAL__
  33. #include "erl_threads.h"
  34. #include "global.h"
  35. #include "erl_db.h"
  36. #include "erl_binary.h"
  37. #include "erl_bits.h"
  38. #include "erl_mtrace.h"
  39. #include "erl_mseg.h"
  40. #include "erl_monitor_link.h"
  41. #include "erl_hl_timer.h"
  42. #include "erl_cpu_topology.h"
  43. #include "erl_thr_queue.h"
  44. #include "erl_nfunc_sched.h"
  45. #if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
  46. #include "erl_check_io.h"
  47. #endif
  48. #include "erl_bif_unique.h"
  49. #define GET_ERL_GF_ALLOC_IMPL
  50. #include "erl_goodfit_alloc.h"
  51. #define GET_ERL_BF_ALLOC_IMPL
  52. #include "erl_bestfit_alloc.h"
  53. #define GET_ERL_AF_ALLOC_IMPL
  54. #include "erl_afit_alloc.h"
  55. #define GET_ERL_AOFF_ALLOC_IMPL
  56. #include "erl_ao_firstfit_alloc.h"
  57. #if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_AU_MAX_PREF_ALLOC_INSTANCES
  58. # error "Too many schedulers; cannot create that many pref alloc instances"
  59. #endif
  60. #define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS
  61. #if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND)
  62. #define AU_ALLOC_DEFAULT_ENABLE(X) 0
  63. #else
  64. #define AU_ALLOC_DEFAULT_ENABLE(X) (X)
  65. #endif
  66. #define ERTS_ALC_DEFAULT_ENABLED_ACUL 60
  67. #define ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC 45
  68. #define ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC 85
  69. #define ERTS_ALC_DEFAULT_ACUL ERTS_ALC_DEFAULT_ENABLED_ACUL
  70. #define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
  71. #define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
  72. #ifdef DEBUG
  73. static Uint install_debug_functions(void);
  74. #if 0
  75. #define HARD_DEBUG
  76. #ifdef __GNUC__
  77. #warning "* * * * * * * * * * * * * *"
  78. #warning "* HARD DEBUG IS ENABLED! *"
  79. #warning "* * * * * * * * * * * * * *"
  80. #endif
  81. #endif
  82. #endif
  83. static int lock_all_physical_memory = 0;
  84. ErtsAllocatorFunctions_t ERTS_WRITE_UNLIKELY(erts_allctrs[ERTS_ALC_A_MAX+1]);
  85. ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
  86. ErtsAllocatorThrSpec_t ERTS_WRITE_UNLIKELY(erts_allctr_thr_spec[ERTS_ALC_A_MAX+1]);
  87. #define ERTS_MIN(A, B) ((A) < (B) ? (A) : (B))
  88. #define ERTS_MAX(A, B) ((A) > (B) ? (A) : (B))
  89. typedef union {
  90. GFAllctr_t gfa;
  91. char align_gfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(GFAllctr_t))];
  92. BFAllctr_t bfa;
  93. char align_bfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(BFAllctr_t))];
  94. AFAllctr_t afa;
  95. char align_afa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AFAllctr_t))];
  96. AOFFAllctr_t aoffa;
  97. char align_aoffa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AOFFAllctr_t))];
  98. } ErtsAllocatorState_t erts_align_attribute(ERTS_CACHE_LINE_SIZE);
  99. static ErtsAllocatorState_t std_alloc_state;
  100. static ErtsAllocatorState_t ll_alloc_state;
  101. static ErtsAllocatorState_t sl_alloc_state;
  102. static ErtsAllocatorState_t temp_alloc_state;
  103. static ErtsAllocatorState_t eheap_alloc_state;
  104. static ErtsAllocatorState_t binary_alloc_state;
  105. static ErtsAllocatorState_t ets_alloc_state;
  106. static ErtsAllocatorState_t driver_alloc_state;
  107. static ErtsAllocatorState_t fix_alloc_state;
  108. static ErtsAllocatorState_t literal_alloc_state;
  109. #ifdef ERTS_ALC_A_EXEC
  110. static ErtsAllocatorState_t exec_alloc_state;
  111. #endif
  112. static ErtsAllocatorState_t test_alloc_state;
  113. enum {
  114. ERTS_ALC_INFO_A_ALLOC_UTIL = ERTS_ALC_A_MAX + 1,
  115. ERTS_ALC_INFO_A_MSEG_ALLOC,
  116. ERTS_ALC_INFO_A_ERTS_MMAP,
  117. ERTS_ALC_INFO_A_DISABLED_EXEC, /* fake a disabled "exec_alloc" */
  118. ERTS_ALC_INFO_A_END
  119. };
  120. typedef struct {
  121. erts_atomic32_t refc;
  122. int only_sz;
  123. int internal;
  124. Uint req_sched;
  125. Process *proc;
  126. ErtsIRefStorage iref;
  127. int allocs[ERTS_ALC_INFO_A_END - ERTS_ALC_A_MIN + 1];
  128. } ErtsAllocInfoReq;
  129. ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
  130. ErtsAllocInfoReq,
  131. 5,
  132. ERTS_ALC_T_AINFO_REQ)
  133. ErtsAlcType_t erts_fix_core_allocator_ix;
  134. struct au_init {
  135. int enable;
  136. int thr_spec;
  137. int disable_allowed;
  138. int thr_spec_allowed;
  139. int carrier_migration_allowed;
  140. ErtsAlcStrat_t astrat;
  141. struct {
  142. AllctrInit_t util;
  143. GFAllctrInit_t gf;
  144. BFAllctrInit_t bf;
  145. AFAllctrInit_t af;
  146. AOFFAllctrInit_t aoff;
  147. } init;
  148. struct {
  149. int mmbcs;
  150. int lmbcs;
  151. int smbcs;
  152. int mmmbc;
  153. } default_;
  154. };
  155. #define DEFAULT_ALLCTR_INIT { \
  156. ERTS_DEFAULT_ALLCTR_INIT, \
  157. ERTS_DEFAULT_GF_ALLCTR_INIT, \
  158. ERTS_DEFAULT_BF_ALLCTR_INIT, \
  159. ERTS_DEFAULT_AF_ALLCTR_INIT, \
  160. ERTS_DEFAULT_AOFF_ALLCTR_INIT \
  161. }
  162. typedef struct {
  163. int erts_alloc_config;
  164. #if HAVE_ERTS_MSEG
  165. ErtsMsegInit_t mseg;
  166. #endif
  167. int trim_threshold;
  168. int top_pad;
  169. AlcUInit_t alloc_util;
  170. struct {
  171. char *mtrace;
  172. char *nodename;
  173. } instr;
  174. struct au_init sl_alloc;
  175. struct au_init std_alloc;
  176. struct au_init ll_alloc;
  177. struct au_init temp_alloc;
  178. struct au_init eheap_alloc;
  179. struct au_init binary_alloc;
  180. struct au_init ets_alloc;
  181. struct au_init driver_alloc;
  182. struct au_init fix_alloc;
  183. struct au_init literal_alloc;
  184. struct au_init exec_alloc;
  185. struct au_init test_alloc;
  186. } erts_alc_hndl_args_init_t;
  187. #define ERTS_AU_INIT__ {0, 0, 1, 1, 1, \
  188. ERTS_ALC_S_GOODFIT, DEFAULT_ALLCTR_INIT, \
  189. {1,1,1,1}}
  190. #define SET_DEFAULT_ALLOC_OPTS(IP) \
  191. do { \
  192. struct au_init aui__ = ERTS_AU_INIT__; \
  193. sys_memcpy((void *) (IP), (void *) &aui__, sizeof(struct au_init)); \
  194. } while (0)
  195. static void
  196. set_default_sl_alloc_opts(struct au_init *ip)
  197. {
  198. SET_DEFAULT_ALLOC_OPTS(ip);
  199. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  200. ip->thr_spec = 1;
  201. ip->astrat = ERTS_ALC_S_GOODFIT;
  202. ip->init.util.name_prefix = "sl_";
  203. ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
  204. #ifndef SMALL_MEMORY
  205. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  206. #else
  207. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  208. #endif
  209. ip->init.util.ts = ERTS_ALC_MTA_SHORT_LIVED;
  210. ip->init.util.rsbcst = 80;
  211. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  212. }
  213. static void
  214. set_default_std_alloc_opts(struct au_init *ip)
  215. {
  216. SET_DEFAULT_ALLOC_OPTS(ip);
  217. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  218. ip->thr_spec = 1;
  219. ip->astrat = ERTS_ALC_S_BESTFIT;
  220. ip->init.util.name_prefix = "std_";
  221. ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
  222. #ifndef SMALL_MEMORY
  223. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  224. #else
  225. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  226. #endif
  227. ip->init.util.ts = ERTS_ALC_MTA_STANDARD;
  228. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  229. }
  230. static void
  231. set_default_ll_alloc_opts(struct au_init *ip)
  232. {
  233. SET_DEFAULT_ALLOC_OPTS(ip);
  234. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  235. ip->thr_spec = 0;
  236. ip->astrat = ERTS_ALC_S_BESTFIT;
  237. ip->init.bf.ao = 1;
  238. ip->init.util.ramv = 0;
  239. ip->init.util.mmsbc = 0;
  240. ip->init.util.sbct = ~((UWord) 0);
  241. ip->init.util.name_prefix = "ll_";
  242. ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
  243. #ifndef SMALL_MEMORY
  244. ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
  245. #else
  246. ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
  247. #endif
  248. ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
  249. ip->init.util.asbcst = 0;
  250. ip->init.util.rsbcst = 0;
  251. ip->init.util.rsbcmt = 0;
  252. ip->init.util.rmbcmt = 0;
  253. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_LL_ALLOC;
  254. }
  255. static void
  256. set_default_literal_alloc_opts(struct au_init *ip)
  257. {
  258. SET_DEFAULT_ALLOC_OPTS(ip);
  259. ip->enable = 1;
  260. ip->thr_spec = 0;
  261. ip->disable_allowed = 0;
  262. ip->thr_spec_allowed = 0;
  263. ip->carrier_migration_allowed = 0;
  264. ip->astrat = ERTS_ALC_S_BESTFIT;
  265. ip->init.bf.ao = 1;
  266. ip->init.util.ramv = 0;
  267. ip->init.util.mmsbc = 0;
  268. ip->init.util.sbct = ~((UWord) 0);
  269. ip->init.util.name_prefix = "literal_";
  270. ip->init.util.alloc_no = ERTS_ALC_A_LITERAL;
  271. #ifndef SMALL_MEMORY
  272. ip->init.util.mmbcs = 1024*1024; /* Main carrier size */
  273. #else
  274. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  275. #endif
  276. ip->init.util.ts = ERTS_ALC_MTA_LITERAL;
  277. ip->init.util.asbcst = 0;
  278. ip->init.util.rsbcst = 0;
  279. ip->init.util.rsbcmt = 0;
  280. ip->init.util.rmbcmt = 0;
  281. ip->init.util.acul = 0;
  282. #if defined(ARCH_32)
  283. # if HAVE_ERTS_MSEG
  284. ip->init.util.mseg_alloc = &erts_alcu_literal_32_mseg_alloc;
  285. ip->init.util.mseg_realloc = &erts_alcu_literal_32_mseg_realloc;
  286. ip->init.util.mseg_dealloc = &erts_alcu_literal_32_mseg_dealloc;
  287. # endif
  288. ip->init.util.sys_alloc = &erts_alcu_literal_32_sys_alloc;
  289. ip->init.util.sys_realloc = &erts_alcu_literal_32_sys_realloc;
  290. ip->init.util.sys_dealloc = &erts_alcu_literal_32_sys_dealloc;
  291. #elif defined(ARCH_64)
  292. # ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
  293. ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
  294. ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
  295. ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
  296. ip->init.util.mseg_mmapper = &erts_literal_mmapper;
  297. # endif
  298. #else
  299. # error Unknown architecture
  300. #endif
  301. }
  302. #ifdef ERTS_ALC_A_EXEC
  303. static void
  304. set_default_exec_alloc_opts(struct au_init *ip)
  305. {
  306. SET_DEFAULT_ALLOC_OPTS(ip);
  307. ip->enable = 1;
  308. ip->thr_spec = 0;
  309. ip->disable_allowed = 0;
  310. ip->thr_spec_allowed = 0;
  311. ip->carrier_migration_allowed = 0;
  312. ip->astrat = ERTS_ALC_S_BESTFIT;
  313. ip->init.bf.ao = 1;
  314. ip->init.util.ramv = 0;
  315. ip->init.util.mmsbc = 0;
  316. ip->init.util.sbct = ~((UWord) 0);
  317. ip->init.util.name_prefix = "exec_";
  318. ip->init.util.alloc_no = ERTS_ALC_A_EXEC;
  319. ip->init.util.mmbcs = 0; /* No main carrier */
  320. ip->init.util.ts = ERTS_ALC_MTA_EXEC;
  321. ip->init.util.asbcst = 0;
  322. ip->init.util.rsbcst = 0;
  323. ip->init.util.rsbcmt = 0;
  324. ip->init.util.rmbcmt = 0;
  325. ip->init.util.acul = 0;
  326. ip->init.util.mseg_alloc = &erts_alcu_exec_mseg_alloc;
  327. ip->init.util.mseg_realloc = &erts_alcu_exec_mseg_realloc;
  328. ip->init.util.mseg_dealloc = &erts_alcu_exec_mseg_dealloc;
  329. }
  330. #endif /* ERTS_ALC_A_EXEC */
  331. static void
  332. set_default_temp_alloc_opts(struct au_init *ip)
  333. {
  334. SET_DEFAULT_ALLOC_OPTS(ip);
  335. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  336. ip->thr_spec = 1;
  337. ip->disable_allowed = 0;
  338. ip->carrier_migration_allowed = 0;
  339. ip->astrat = ERTS_ALC_S_AFIT;
  340. ip->init.util.name_prefix = "temp_";
  341. ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY;
  342. #ifndef SMALL_MEMORY
  343. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  344. #else
  345. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  346. #endif
  347. ip->init.util.ts = ERTS_ALC_MTA_TEMPORARY;
  348. ip->init.util.rsbcst = 90;
  349. ip->init.util.rmbcmt = 100;
  350. }
  351. static void
  352. set_default_eheap_alloc_opts(struct au_init *ip)
  353. {
  354. SET_DEFAULT_ALLOC_OPTS(ip);
  355. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  356. ip->thr_spec = 1;
  357. ip->astrat = ERTS_ALC_S_GOODFIT;
  358. ip->init.util.name_prefix = "eheap_";
  359. ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
  360. #ifndef SMALL_MEMORY
  361. ip->init.util.mmbcs = 512*1024; /* Main carrier size */
  362. #else
  363. ip->init.util.mmbcs = 256*1024; /* Main carrier size */
  364. #endif
  365. ip->init.util.ts = ERTS_ALC_MTA_EHEAP;
  366. ip->init.util.rsbcst = 50;
  367. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC;
  368. }
  369. static void
  370. set_default_binary_alloc_opts(struct au_init *ip)
  371. {
  372. SET_DEFAULT_ALLOC_OPTS(ip);
  373. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  374. ip->thr_spec = 1;
  375. ip->astrat = ERTS_ALC_S_BESTFIT;
  376. ip->init.util.name_prefix = "binary_";
  377. ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
  378. #ifndef SMALL_MEMORY
  379. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  380. #else
  381. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  382. #endif
  383. ip->init.util.ts = ERTS_ALC_MTA_BINARY;
  384. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  385. ip->init.util.atags = 1;
  386. }
  387. static void
  388. set_default_ets_alloc_opts(struct au_init *ip)
  389. {
  390. SET_DEFAULT_ALLOC_OPTS(ip);
  391. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  392. ip->thr_spec = 1;
  393. ip->astrat = ERTS_ALC_S_BESTFIT;
  394. ip->init.util.name_prefix = "ets_";
  395. ip->init.util.alloc_no = ERTS_ALC_A_ETS;
  396. #ifndef SMALL_MEMORY
  397. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  398. #else
  399. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  400. #endif
  401. ip->init.util.ts = ERTS_ALC_MTA_ETS;
  402. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  403. }
  404. static void
  405. set_default_driver_alloc_opts(struct au_init *ip)
  406. {
  407. SET_DEFAULT_ALLOC_OPTS(ip);
  408. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  409. ip->thr_spec = 1;
  410. ip->astrat = ERTS_ALC_S_BESTFIT;
  411. ip->init.util.name_prefix = "driver_";
  412. ip->init.util.alloc_no = ERTS_ALC_A_DRIVER;
  413. #ifndef SMALL_MEMORY
  414. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  415. #else
  416. ip->init.util.mmbcs = 32*1024; /* Main carrier size */
  417. #endif
  418. ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
  419. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  420. ip->init.util.atags = 1;
  421. }
  422. static void
  423. set_default_fix_alloc_opts(struct au_init *ip,
  424. size_t *fix_type_sizes)
  425. {
  426. SET_DEFAULT_ALLOC_OPTS(ip);
  427. ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
  428. ip->thr_spec = 1;
  429. ip->astrat = ERTS_ALC_S_BESTFIT;
  430. ip->init.bf.ao = 1;
  431. ip->init.util.name_prefix = "fix_";
  432. ip->init.util.fix_type_size = fix_type_sizes;
  433. ip->init.util.alloc_no = ERTS_ALC_A_FIXED_SIZE;
  434. #ifndef SMALL_MEMORY
  435. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  436. #else
  437. ip->init.util.mmbcs = 128*1024; /* Main carrier size */
  438. #endif
  439. ip->init.util.ts = ERTS_ALC_MTA_FIXED_SIZE;
  440. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  441. }
  442. static void
  443. set_default_test_alloc_opts(struct au_init *ip)
  444. {
  445. SET_DEFAULT_ALLOC_OPTS(ip);
  446. ip->enable = 0; /* Disabled by default */
  447. ip->thr_spec = -1 * erts_no_schedulers;
  448. ip->astrat = ERTS_ALC_S_FIRSTFIT;
  449. ip->init.aoff.crr_order = FF_AOFF;
  450. ip->init.aoff.blk_order = FF_BF;
  451. ip->init.util.name_prefix = "test_";
  452. ip->init.util.alloc_no = ERTS_ALC_A_TEST;
  453. ip->init.util.mmbcs = 0; /* Main carrier size */
  454. ip->init.util.ts = ERTS_ALC_MTA_TEST;
  455. ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
  456. ip->init.util.atags = 1;
  457. /* Use a constant minimal MBC size */
  458. #if ERTS_SA_MB_CARRIERS
  459. ip->init.util.smbcs = ERTS_SACRR_UNIT_SZ;
  460. ip->init.util.lmbcs = ERTS_SACRR_UNIT_SZ;
  461. ip->init.util.sbct = ERTS_SACRR_UNIT_SZ;
  462. #else
  463. ip->init.util.smbcs = 1 << 12;
  464. ip->init.util.lmbcs = 1 << 12;
  465. ip->init.util.sbct = 1 << 12;
  466. #endif
  467. }
  468. static void
  469. adjust_tpref(struct au_init *ip, int no_sched)
  470. {
  471. if (ip->thr_spec) {
  472. ip->thr_spec = no_sched;
  473. ip->thr_spec *= -1; /* thread preferred */
  474. /* If default ... */
  475. /* ... shrink main multi-block carrier size */
  476. if (ip->default_.mmbcs)
  477. ip->init.util.mmbcs /= ERTS_MIN(4, no_sched);
  478. /* ... shrink largest multi-block carrier size */
  479. if (ip->default_.lmbcs)
  480. ip->init.util.lmbcs /= ERTS_MIN(2, no_sched);
  481. /* ... shrink smallest multi-block carrier size */
  482. if (ip->default_.smbcs)
  483. ip->init.util.smbcs /= ERTS_MIN(4, no_sched);
  484. }
  485. }
  486. static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
  487. static void
  488. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu);
  489. static void
  490. start_au_allocator(ErtsAlcType_t alctr_n,
  491. struct au_init *init,
  492. ErtsAllocatorState_t *state);
  493. static void
  494. refuse_af_strategy(struct au_init *init)
  495. {
  496. if (init->astrat == ERTS_ALC_S_AFIT)
  497. init->astrat = ERTS_ALC_S_GOODFIT;
  498. }
  499. #ifdef HARD_DEBUG
  500. static void hdbg_init(void);
  501. #endif
  502. static void adjust_fix_alloc_sizes(UWord extra_block_size)
  503. {
  504. if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) {
  505. int j;
  506. if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) {
  507. int i;
  508. ErtsAllocatorThrSpec_t* tspec;
  509. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  510. ASSERT(tspec->enabled);
  511. for (i=0; i < tspec->size; i++) {
  512. Allctr_t* allctr = tspec->allctr[i];
  513. for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
  514. size_t size = allctr->fix[j].type_size;
  515. size = MAX(size + extra_block_size,
  516. sizeof(ErtsAllctrDDBlock_t));
  517. allctr->fix[j].type_size = size;
  518. }
  519. }
  520. }
  521. else
  522. {
  523. Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
  524. for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
  525. size_t size = allctr->fix[j].type_size;
  526. size = MAX(size + extra_block_size,
  527. sizeof(ErtsAllctrDDBlock_t));
  528. allctr->fix[j].type_size = size;
  529. }
  530. }
  531. }
  532. }
  533. static ERTS_INLINE int
  534. strategy_support_carrier_migration(struct au_init *auip)
  535. {
  536. /*
  537. * Currently only aoff* and ageff* support carrier
  538. * migration, i.e, type AOFIRSTFIT.
  539. */
  540. return auip->astrat == ERTS_ALC_S_FIRSTFIT;
  541. }
  542. static ERTS_INLINE void
  543. adjust_carrier_migration_support(struct au_init *auip)
  544. {
  545. if (auip->init.util.acul) {
  546. auip->thr_spec = -1; /* Need thread preferred */
  547. /*
  548. * If strategy cannot handle carrier migration,
  549. * default to a strategy that can...
  550. */
  551. if (!strategy_support_carrier_migration(auip)) {
  552. /* Default to aoffcbf */
  553. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  554. auip->init.aoff.crr_order = FF_AOFF;
  555. auip->init.aoff.blk_order = FF_BF;
  556. }
  557. }
  558. }
  559. void
  560. erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
  561. {
  562. UWord extra_block_size = 0;
  563. int i, ncpu;
  564. erts_alc_hndl_args_init_t init = {
  565. 0,
  566. #if HAVE_ERTS_MSEG
  567. ERTS_MSEG_INIT_DEFAULT_INITIALIZER,
  568. #endif
  569. ERTS_DEFAULT_TRIM_THRESHOLD,
  570. ERTS_DEFAULT_TOP_PAD,
  571. ERTS_DEFAULT_ALCU_INIT,
  572. };
  573. size_t fix_type_sizes[ERTS_ALC_NO_FIXED_SIZES] = {0};
  574. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PROC)]
  575. = sizeof(Process);
  576. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR)]
  577. = sizeof(ErtsMonitorDataHeap);
  578. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LINK)]
  579. = sizeof(ErtsLinkData);
  580. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
  581. = sizeof(ErtsDrvSelectDataState);
  582. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
  583. = sizeof(ErtsNifSelectDataState);
  584. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
  585. = sizeof(ErtsMessageRef);
  586. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
  587. = sizeof(ErtsThrQElement_t);
  588. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)]
  589. = erts_timer_type_size(ERTS_ALC_T_LL_PTIMER);
  590. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)]
  591. = erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
  592. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
  593. = erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
  594. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_EXP_TRACE)]
  595. = sizeof(NifExportTrace);
  596. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
  597. = sizeof(ErtsNSchedMagicRefTableEntry);
  598. fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
  599. = ERTS_MAGIC_BIN_UNALIGNED_SIZE(sizeof(ErtsMagicIndirectionWord));
  600. #ifdef HARD_DEBUG
  601. hdbg_init();
  602. #endif
  603. lock_all_physical_memory = 0;
  604. ncpu = eaiop->ncpu;
  605. if (ncpu < 1)
  606. ncpu = 1;
  607. erts_tsd_key_create(&erts_allctr_prelock_tsd_key,
  608. "erts_allctr_prelock_tsd_key");
  609. erts_sys_alloc_init();
  610. erts_init_utils_mem();
  611. set_default_sl_alloc_opts(&init.sl_alloc);
  612. set_default_std_alloc_opts(&init.std_alloc);
  613. set_default_ll_alloc_opts(&init.ll_alloc);
  614. set_default_temp_alloc_opts(&init.temp_alloc);
  615. set_default_eheap_alloc_opts(&init.eheap_alloc);
  616. set_default_binary_alloc_opts(&init.binary_alloc);
  617. set_default_ets_alloc_opts(&init.ets_alloc);
  618. set_default_driver_alloc_opts(&init.driver_alloc);
  619. set_default_fix_alloc_opts(&init.fix_alloc,
  620. fix_type_sizes);
  621. set_default_literal_alloc_opts(&init.literal_alloc);
  622. #ifdef ERTS_ALC_A_EXEC
  623. set_default_exec_alloc_opts(&init.exec_alloc);
  624. #endif
  625. set_default_test_alloc_opts(&init.test_alloc);
  626. if (argc && argv)
  627. handle_args(argc, argv, &init);
  628. if (lock_all_physical_memory) {
  629. #ifdef HAVE_MLOCKALL
  630. errno = 0;
  631. if (mlockall(MCL_CURRENT|MCL_FUTURE) != 0) {
  632. int err = errno;
  633. char *errstr = err ? strerror(err) : "unknown";
  634. erts_exit(1, "Failed to lock physical memory: %s (%d)\n",
  635. errstr, err);
  636. }
  637. #else
  638. erts_exit(1, "Failed to lock physical memory: Not supported\n");
  639. #endif
  640. }
  641. /* Make adjustments for carrier migration support */
  642. init.temp_alloc.init.util.acul = 0;
  643. adjust_carrier_migration_support(&init.sl_alloc);
  644. adjust_carrier_migration_support(&init.std_alloc);
  645. adjust_carrier_migration_support(&init.ll_alloc);
  646. adjust_carrier_migration_support(&init.eheap_alloc);
  647. adjust_carrier_migration_support(&init.binary_alloc);
  648. adjust_carrier_migration_support(&init.ets_alloc);
  649. adjust_carrier_migration_support(&init.driver_alloc);
  650. adjust_carrier_migration_support(&init.fix_alloc);
  651. adjust_carrier_migration_support(&init.literal_alloc);
  652. #ifdef ERTS_ALC_A_EXEC
  653. adjust_carrier_migration_support(&init.exec_alloc);
  654. #endif
  655. if (init.erts_alloc_config) {
  656. /* Adjust flags that erts_alloc_config won't like */
  657. /* No thread specific instances */
  658. init.temp_alloc.thr_spec = 0;
  659. init.sl_alloc.thr_spec = 0;
  660. init.std_alloc.thr_spec = 0;
  661. init.ll_alloc.thr_spec = 0;
  662. init.eheap_alloc.thr_spec = 0;
  663. init.binary_alloc.thr_spec = 0;
  664. init.ets_alloc.thr_spec = 0;
  665. init.driver_alloc.thr_spec = 0;
  666. init.fix_alloc.thr_spec = 0;
  667. init.literal_alloc.thr_spec = 0;
  668. #ifdef ERTS_ALC_A_EXEC
  669. init.exec_alloc.thr_spec = 0;
  670. #endif
  671. /* No carrier migration */
  672. init.temp_alloc.init.util.acul = 0;
  673. init.sl_alloc.init.util.acul = 0;
  674. init.std_alloc.init.util.acul = 0;
  675. init.ll_alloc.init.util.acul = 0;
  676. init.eheap_alloc.init.util.acul = 0;
  677. init.binary_alloc.init.util.acul = 0;
  678. init.ets_alloc.init.util.acul = 0;
  679. init.driver_alloc.init.util.acul = 0;
  680. init.fix_alloc.init.util.acul = 0;
  681. init.literal_alloc.init.util.acul = 0;
  682. #ifdef ERTS_ALC_A_EXEC
  683. init.exec_alloc.init.util.acul = 0;
  684. #endif
  685. }
  686. /* Only temp_alloc can use thread specific interface */
  687. if (init.temp_alloc.thr_spec)
  688. init.temp_alloc.thr_spec = erts_no_schedulers;
  689. /* Others must use thread preferred interface */
  690. adjust_tpref(&init.sl_alloc, erts_no_schedulers);
  691. adjust_tpref(&init.std_alloc, erts_no_schedulers);
  692. adjust_tpref(&init.ll_alloc, erts_no_schedulers);
  693. adjust_tpref(&init.eheap_alloc, erts_no_schedulers);
  694. adjust_tpref(&init.binary_alloc, erts_no_schedulers);
  695. adjust_tpref(&init.ets_alloc, erts_no_schedulers);
  696. adjust_tpref(&init.driver_alloc, erts_no_schedulers);
  697. adjust_tpref(&init.fix_alloc, erts_no_schedulers);
  698. adjust_tpref(&init.literal_alloc, erts_no_schedulers);
  699. #ifdef ERTS_ALC_A_EXEC
  700. adjust_tpref(&init.exec_alloc, erts_no_schedulers);
  701. #endif
  702. /*
  703. * The following allocators cannot be run with afit strategy.
  704. * Make sure they don't...
  705. */
  706. refuse_af_strategy(&init.sl_alloc);
  707. refuse_af_strategy(&init.std_alloc);
  708. refuse_af_strategy(&init.ll_alloc);
  709. refuse_af_strategy(&init.eheap_alloc);
  710. refuse_af_strategy(&init.binary_alloc);
  711. refuse_af_strategy(&init.ets_alloc);
  712. refuse_af_strategy(&init.driver_alloc);
  713. refuse_af_strategy(&init.fix_alloc);
  714. refuse_af_strategy(&init.literal_alloc);
  715. #ifdef ERTS_ALC_A_EXEC
  716. refuse_af_strategy(&init.exec_alloc);
  717. #endif
  718. if (!init.temp_alloc.thr_spec)
  719. refuse_af_strategy(&init.temp_alloc);
  720. erts_mtrace_pre_init();
  721. #if HAVE_ERTS_MSEG
  722. init.mseg.nos = erts_no_schedulers;
  723. erts_mseg_init(&init.mseg);
  724. #endif
  725. erts_alcu_init(&init.alloc_util);
  726. erts_afalc_init();
  727. erts_bfalc_init();
  728. erts_gfalc_init();
  729. erts_aoffalc_init();
  730. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  731. erts_allctrs[i].alloc = NULL;
  732. erts_allctrs[i].realloc = NULL;
  733. erts_allctrs[i].free = NULL;
  734. erts_allctrs[i].extra = NULL;
  735. erts_allctrs_info[i].alloc_util = 0;
  736. erts_allctrs_info[i].enabled = 0;
  737. erts_allctrs_info[i].thr_spec = 0;
  738. erts_allctrs_info[i].extra = NULL;
  739. }
  740. erts_allctrs[ERTS_ALC_A_SYSTEM].alloc = erts_sys_alloc;
  741. erts_allctrs[ERTS_ALC_A_SYSTEM].realloc = erts_sys_realloc;
  742. erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
  743. erts_allctrs_info[ERTS_ALC_A_SYSTEM].enabled = 1;
  744. set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu);
  745. set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu);
  746. set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu);
  747. set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc, ncpu);
  748. set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc, ncpu);
  749. set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc, ncpu);
  750. set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc, ncpu);
  751. set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc, ncpu);
  752. set_au_allocator(ERTS_ALC_A_FIXED_SIZE, &init.fix_alloc, ncpu);
  753. set_au_allocator(ERTS_ALC_A_LITERAL, &init.literal_alloc, ncpu);
  754. #ifdef ERTS_ALC_A_EXEC
  755. set_au_allocator(ERTS_ALC_A_EXEC, &init.exec_alloc, ncpu);
  756. #endif
  757. set_au_allocator(ERTS_ALC_A_TEST, &init.test_alloc, ncpu);
  758. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  759. if (!erts_allctrs[i].alloc)
  760. erts_exit(ERTS_ABORT_EXIT,
  761. "Missing alloc function for %s\n", ERTS_ALC_A2AD(i));
  762. if (!erts_allctrs[i].realloc)
  763. erts_exit(ERTS_ABORT_EXIT,
  764. "Missing realloc function for %s\n", ERTS_ALC_A2AD(i));
  765. if (!erts_allctrs[i].free)
  766. erts_exit(ERTS_ABORT_EXIT,
  767. "Missing free function for %s\n", ERTS_ALC_A2AD(i));
  768. }
  769. sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
  770. sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
  771. erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
  772. start_au_allocator(ERTS_ALC_A_TEMPORARY,
  773. &init.temp_alloc,
  774. &temp_alloc_state);
  775. start_au_allocator(ERTS_ALC_A_SHORT_LIVED,
  776. &init.sl_alloc,
  777. &sl_alloc_state);
  778. start_au_allocator(ERTS_ALC_A_STANDARD,
  779. &init.std_alloc,
  780. &std_alloc_state);
  781. start_au_allocator(ERTS_ALC_A_LONG_LIVED,
  782. &init.ll_alloc,
  783. &ll_alloc_state);
  784. start_au_allocator(ERTS_ALC_A_EHEAP,
  785. &init.eheap_alloc,
  786. &eheap_alloc_state);
  787. start_au_allocator(ERTS_ALC_A_BINARY,
  788. &init.binary_alloc,
  789. &binary_alloc_state);
  790. start_au_allocator(ERTS_ALC_A_ETS,
  791. &init.ets_alloc,
  792. &ets_alloc_state);
  793. start_au_allocator(ERTS_ALC_A_DRIVER,
  794. &init.driver_alloc,
  795. &driver_alloc_state);
  796. start_au_allocator(ERTS_ALC_A_FIXED_SIZE,
  797. &init.fix_alloc,
  798. &fix_alloc_state);
  799. start_au_allocator(ERTS_ALC_A_LITERAL,
  800. &init.literal_alloc,
  801. &literal_alloc_state);
  802. #ifdef ERTS_ALC_A_EXEC
  803. start_au_allocator(ERTS_ALC_A_EXEC,
  804. &init.exec_alloc,
  805. &exec_alloc_state);
  806. #endif
  807. start_au_allocator(ERTS_ALC_A_TEST,
  808. &init.test_alloc,
  809. &test_alloc_state);
  810. erts_mtrace_install_wrapper_functions();
  811. init_aireq_alloc();
  812. #ifdef DEBUG
  813. extra_block_size += install_debug_functions();
  814. #endif
  815. adjust_fix_alloc_sizes(extra_block_size);
  816. }
  817. void
  818. erts_alloc_late_init(void)
  819. {
  820. }
  821. static void *
  822. erts_realloc_fixed_size(ErtsAlcType_t type, void *extra, void *p, Uint size)
  823. {
  824. erts_exit(ERTS_ABORT_EXIT,
  825. "Attempt to reallocate a block of the fixed size type %s\n",
  826. ERTS_ALC_T2TD(type));
  827. }
  828. static void
  829. set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
  830. {
  831. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  832. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  833. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  834. /*
  835. * Some allocators are forced on if halfword heap is used.
  836. */
  837. if (init->init.util.force)
  838. init->enable = 1;
  839. tspec->enabled = 0;
  840. tspec->dd = 0;
  841. tspec->aix = alctr_n;
  842. tspec->size = 0;
  843. ai->thr_spec = 0;
  844. if (!init->enable) {
  845. af->alloc = erts_sys_alloc;
  846. af->realloc = erts_sys_realloc;
  847. af->free = erts_sys_free;
  848. af->extra = NULL;
  849. ai->alloc_util = 0;
  850. ai->enabled = 0;
  851. ai->extra = NULL;
  852. return;
  853. }
  854. if (init->thr_spec) {
  855. if (init->thr_spec > 0) {
  856. af->alloc = erts_alcu_alloc_thr_spec;
  857. if (init->init.util.fix_type_size)
  858. af->realloc = erts_realloc_fixed_size;
  859. else if (init->init.util.ramv)
  860. af->realloc = erts_alcu_realloc_mv_thr_spec;
  861. else
  862. af->realloc = erts_alcu_realloc_thr_spec;
  863. af->free = erts_alcu_free_thr_spec;
  864. }
  865. else {
  866. af->alloc = erts_alcu_alloc_thr_pref;
  867. if (init->init.util.fix_type_size)
  868. af->realloc = erts_realloc_fixed_size;
  869. else if (init->init.util.ramv)
  870. af->realloc = erts_alcu_realloc_mv_thr_pref;
  871. else
  872. af->realloc = erts_alcu_realloc_thr_pref;
  873. af->free = erts_alcu_free_thr_pref;
  874. tspec->dd = 1;
  875. }
  876. tspec->enabled = 1;
  877. tspec->size = abs(init->thr_spec) + 1;
  878. ai->thr_spec = tspec->size;
  879. }
  880. else
  881. if (init->init.util.ts) {
  882. af->alloc = erts_alcu_alloc_ts;
  883. if (init->init.util.fix_type_size)
  884. af->realloc = erts_realloc_fixed_size;
  885. else if (init->init.util.ramv)
  886. af->realloc = erts_alcu_realloc_mv_ts;
  887. else
  888. af->realloc = erts_alcu_realloc_ts;
  889. af->free = erts_alcu_free_ts;
  890. }
  891. else
  892. {
  893. erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n",
  894. init->init.util.name_prefix);
  895. }
  896. af->extra = NULL;
  897. ai->alloc_util = 1;
  898. ai->enabled = 1;
  899. }
  900. static void
  901. start_au_allocator(ErtsAlcType_t alctr_n,
  902. struct au_init *init,
  903. ErtsAllocatorState_t *state)
  904. {
  905. int i;
  906. int size = 1;
  907. void *as0;
  908. ErtsAlcStrat_t astrat;
  909. ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
  910. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
  911. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
  912. ErtsAlcFixList_t *fix_lists = NULL;
  913. size_t fix_list_size = 0;
  914. if (!init->enable)
  915. return;
  916. if (init->thr_spec) {
  917. char *states = erts_sys_alloc(0,
  918. NULL,
  919. ((sizeof(Allctr_t *)
  920. * (tspec->size + 1))
  921. + (sizeof(ErtsAllocatorState_t)
  922. * tspec->size)
  923. + ERTS_CACHE_LINE_SIZE - 1));
  924. if (!states)
  925. erts_exit(ERTS_ABORT_EXIT,
  926. "Failed to allocate allocator states for %salloc\n",
  927. init->init.util.name_prefix);
  928. tspec->allctr = (Allctr_t **) states;
  929. states += sizeof(Allctr_t *) * (tspec->size + 1);
  930. states = ((((UWord) states) & ERTS_CACHE_LINE_MASK)
  931. ? (char *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
  932. + ERTS_CACHE_LINE_SIZE)
  933. : (char *) states);
  934. tspec->allctr[0] = (Allctr_t *) state;
  935. size = tspec->size;
  936. for (i = 1; i < size; i++)
  937. tspec->allctr[i] = (Allctr_t *)
  938. &((ErtsAllocatorState_t *) states)[i-1];
  939. }
  940. if (init->init.util.fix_type_size) {
  941. size_t tot_fix_list_size;
  942. fix_list_size = sizeof(ErtsAlcFixList_t)*ERTS_ALC_NO_FIXED_SIZES;
  943. fix_list_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(fix_list_size);
  944. tot_fix_list_size = fix_list_size;
  945. if (init->thr_spec)
  946. tot_fix_list_size *= tspec->size;
  947. fix_lists = erts_sys_alloc(0,
  948. NULL,
  949. (tot_fix_list_size
  950. + ERTS_CACHE_LINE_SIZE - 1));
  951. if (!fix_lists)
  952. erts_exit(ERTS_ABORT_EXIT,
  953. "Failed to allocate fix lists for %salloc\n",
  954. init->init.util.name_prefix);
  955. if (((UWord) fix_lists) & ERTS_CACHE_LINE_MASK)
  956. fix_lists = ((ErtsAlcFixList_t *)
  957. ((((UWord) fix_lists) & ~ERTS_CACHE_LINE_MASK)
  958. + ERTS_CACHE_LINE_SIZE));
  959. }
  960. for (i = 0; i < size; i++) {
  961. Allctr_t *as;
  962. astrat = init->astrat;
  963. if (!init->thr_spec)
  964. as0 = state;
  965. else {
  966. as0 = (void *) tspec->allctr[i];
  967. if (!as0)
  968. continue;
  969. if (init->thr_spec < 0) {
  970. init->init.util.ts = i == 0;
  971. init->init.util.tspec = 0;
  972. init->init.util.tpref = -1*init->thr_spec + 1;
  973. }
  974. else {
  975. if (i != 0)
  976. init->init.util.ts = 0;
  977. else {
  978. if (astrat == ERTS_ALC_S_AFIT)
  979. astrat = ERTS_ALC_S_GOODFIT;
  980. init->init.util.ts = 1;
  981. }
  982. init->init.util.tspec = init->thr_spec + 1;
  983. init->init.util.tpref = 0;
  984. }
  985. }
  986. if (fix_lists) {
  987. init->init.util.fix = fix_lists;
  988. fix_lists = ((ErtsAlcFixList_t *)
  989. (((char *) fix_lists) + fix_list_size));
  990. }
  991. init->init.util.alloc_strat = astrat;
  992. init->init.util.ix = i;
  993. switch (astrat) {
  994. case ERTS_ALC_S_GOODFIT:
  995. as = erts_gfalc_start((GFAllctr_t *) as0,
  996. &init->init.gf,
  997. &init->init.util);
  998. break;
  999. case ERTS_ALC_S_BESTFIT:
  1000. as = erts_bfalc_start((BFAllctr_t *) as0,
  1001. &init->init.bf,
  1002. &init->init.util);
  1003. break;
  1004. case ERTS_ALC_S_AFIT:
  1005. as = erts_afalc_start((AFAllctr_t *) as0,
  1006. &init->init.af,
  1007. &init->init.util);
  1008. break;
  1009. case ERTS_ALC_S_FIRSTFIT:
  1010. as = erts_aoffalc_start((AOFFAllctr_t *) as0,
  1011. &init->init.aoff,
  1012. &init->init.util);
  1013. break;
  1014. default:
  1015. as = NULL;
  1016. ASSERT(0);
  1017. }
  1018. if (!as)
  1019. erts_exit(ERTS_ABORT_EXIT,
  1020. "Failed to start %salloc\n", init->init.util.name_prefix);
  1021. ASSERT(as == (void *) as0);
  1022. af->extra = as;
  1023. }
  1024. if (init->thr_spec)
  1025. af->extra = tspec;
  1026. ai->extra = af->extra;
  1027. }
  1028. static void bad_param(char *param_start, char *param_end)
  1029. {
  1030. size_t len = param_end - param_start;
  1031. char param[100];
  1032. if (len > 99)
  1033. len = 99;
  1034. sys_memcpy((void *) param, (void *) param_start, len);
  1035. param[len] = '\0';
  1036. erts_fprintf(stderr, "bad \"%s\" parameter\n", param);
  1037. erts_usage();
  1038. }
  1039. static void bad_value(char *param_start, char *param_end, char *value)
  1040. {
  1041. size_t len = param_end - param_start;
  1042. char param[100];
  1043. if (len > 99)
  1044. len = 99;
  1045. sys_memcpy((void *) param, (void *) param_start, len);
  1046. param[len] = '\0';
  1047. erts_fprintf(stderr, "bad \"%s\" value: %s\n", param, value);
  1048. erts_usage();
  1049. }
  1050. /* Get arg marks argument as handled by
  1051. putting NULL in argv */
  1052. static char *
  1053. get_value(char* rest, char** argv, int* ip)
  1054. {
  1055. char *param = argv[*ip]+1;
  1056. argv[*ip] = NULL;
  1057. if (*rest == '\0') {
  1058. char *next = argv[*ip + 1];
  1059. if (next[0] == '-'
  1060. && next[1] == '-'
  1061. && next[2] == '\0') {
  1062. bad_value(param, rest, "");
  1063. }
  1064. (*ip)++;
  1065. argv[*ip] = NULL;
  1066. return next;
  1067. }
  1068. return rest;
  1069. }
  1070. static ERTS_INLINE int
  1071. has_prefix(const char *prefix, const char *string)
  1072. {
  1073. int i;
  1074. for (i = 0; prefix[i]; i++)
  1075. if (prefix[i] != string[i])
  1076. return 0;
  1077. return 1;
  1078. }
  1079. static int
  1080. get_bool_value(char *param_end, char** argv, int* ip)
  1081. {
  1082. char *param = argv[*ip]+1;
  1083. char *value = get_value(param_end, argv, ip);
  1084. if (sys_strcmp(value, "true") == 0)
  1085. return 1;
  1086. else if (sys_strcmp(value, "false") == 0)
  1087. return 0;
  1088. else
  1089. bad_value(param, param_end, value);
  1090. return -1;
  1091. }
  1092. static Uint kb_to_bytes(Sint kb, Uint *bytes)
  1093. {
  1094. const Uint max = ((~((Uint) 0))/1024) + 1;
  1095. if (kb < 0 || (Uint)kb > max)
  1096. return 0;
  1097. if ((Uint)kb == max)
  1098. *bytes = ~((Uint) 0);
  1099. else
  1100. *bytes = ((Uint) kb)*1024;
  1101. return 1;
  1102. }
  1103. static Uint
  1104. get_kb_value(char *param_end, char** argv, int* ip)
  1105. {
  1106. Sint tmp;
  1107. Uint bytes = 0;
  1108. char *rest;
  1109. char *param = argv[*ip]+1;
  1110. char *value = get_value(param_end, argv, ip);
  1111. errno = 0;
  1112. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1113. if (errno != 0 || rest == value || !kb_to_bytes(tmp, &bytes))
  1114. bad_value(param, param_end, value);
  1115. return bytes;
  1116. }
  1117. static UWord
  1118. get_mb_value(char *param_end, char** argv, int* ip)
  1119. {
  1120. SWord tmp;
  1121. UWord max = ((~((UWord) 0))/(1024*1024)) + 1;
  1122. char *rest;
  1123. char *param = argv[*ip]+1;
  1124. char *value = get_value(param_end, argv, ip);
  1125. errno = 0;
  1126. tmp = (SWord) ErtsStrToSint(value, &rest, 10);
  1127. if (errno != 0 || rest == value || tmp < 0 || max < ((UWord) tmp))
  1128. bad_value(param, param_end, value);
  1129. if (max == (UWord) tmp)
  1130. return ~((UWord) 0);
  1131. else
  1132. return ((UWord) tmp)*1024*1024;
  1133. }
  1134. #if 0
  1135. static Uint
  1136. get_byte_value(char *param_end, char** argv, int* ip)
  1137. {
  1138. Sint tmp;
  1139. char *rest;
  1140. char *param = argv[*ip]+1;
  1141. char *value = get_value(param_end, argv, ip);
  1142. errno = 0;
  1143. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1144. if (errno != 0 || rest == value || tmp < 0)
  1145. bad_value(param, param_end, value);
  1146. return (Uint) tmp;
  1147. }
  1148. #endif
  1149. static Uint
  1150. get_amount_value(char *param_end, char** argv, int* ip)
  1151. {
  1152. Sint tmp;
  1153. char *rest;
  1154. char *param = argv[*ip]+1;
  1155. char *value = get_value(param_end, argv, ip);
  1156. errno = 0;
  1157. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1158. if (errno != 0 || rest == value || tmp < 0)
  1159. bad_value(param, param_end, value);
  1160. return (Uint) tmp;
  1161. }
  1162. static Uint
  1163. get_acul_value(struct au_init *auip, char *param_end, char** argv, int* ip)
  1164. {
  1165. Sint tmp;
  1166. char *rest;
  1167. char *param = argv[*ip]+1;
  1168. char *value = get_value(param_end, argv, ip);
  1169. if (sys_strcmp(value, "de") == 0) {
  1170. switch (auip->init.util.alloc_no) {
  1171. case ERTS_ALC_A_LONG_LIVED:
  1172. return ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC;
  1173. case ERTS_ALC_A_EHEAP:
  1174. return ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC;
  1175. default:
  1176. return ERTS_ALC_DEFAULT_ENABLED_ACUL;
  1177. }
  1178. }
  1179. errno = 0;
  1180. tmp = (Sint) ErtsStrToSint(value, &rest, 10);
  1181. if (errno != 0 || rest == value || tmp < 0 || 100 < tmp)
  1182. bad_value(param, param_end, value);
  1183. return (Uint) tmp;
  1184. }
  1185. static void
  1186. handle_au_arg(struct au_init *auip,
  1187. char* sub_param,
  1188. char** argv,
  1189. int* ip,
  1190. int u_switch)
  1191. {
  1192. char *param = argv[*ip]+1;
  1193. switch (sub_param[0]) {
  1194. case 'a':
  1195. if (sub_param[1] == 'c') { /* Migration parameters "ac*" */
  1196. UWord value;
  1197. UWord* wp;
  1198. if (!auip->carrier_migration_allowed && !u_switch)
  1199. goto bad_switch;
  1200. if (has_prefix("acul", sub_param)) {
  1201. value = get_acul_value(auip, sub_param + 4, argv, ip);
  1202. wp = &auip->init.util.acul;
  1203. }
  1204. else if (has_prefix("acnl", sub_param)) {
  1205. value = get_amount_value(sub_param + 4, argv, ip);
  1206. wp = &auip->init.util.acnl;
  1207. }
  1208. else if (has_prefix("acfml", sub_param)) {
  1209. value = get_amount_value(sub_param + 5, argv, ip);
  1210. wp = &auip->init.util.acfml;
  1211. }
  1212. else
  1213. goto bad_switch;
  1214. if (auip->carrier_migration_allowed)
  1215. *wp = value;
  1216. }
  1217. else if(has_prefix("asbcst", sub_param)) {
  1218. auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip);
  1219. }
  1220. else if(has_prefix("as", sub_param)) {
  1221. char *alg = get_value(sub_param + 2, argv, ip);
  1222. if (sys_strcmp("bf", alg) == 0) {
  1223. auip->astrat = ERTS_ALC_S_BESTFIT;
  1224. auip->init.bf.ao = 0;
  1225. }
  1226. else if (sys_strcmp("aobf", alg) == 0) {
  1227. auip->astrat = ERTS_ALC_S_BESTFIT;
  1228. auip->init.bf.ao = 1;
  1229. }
  1230. else if (sys_strcmp("gf", alg) == 0) {
  1231. auip->astrat = ERTS_ALC_S_GOODFIT;
  1232. }
  1233. else if (sys_strcmp("af", alg) == 0) {
  1234. auip->astrat = ERTS_ALC_S_AFIT;
  1235. }
  1236. else if (sys_strcmp("aoff", alg) == 0) {
  1237. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1238. auip->init.aoff.crr_order = FF_AOFF;
  1239. auip->init.aoff.blk_order = FF_AOFF;
  1240. }
  1241. else if (sys_strcmp("aoffcbf", alg) == 0) {
  1242. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1243. auip->init.aoff.crr_order = FF_AOFF;
  1244. auip->init.aoff.blk_order = FF_BF;
  1245. }
  1246. else if (sys_strcmp("aoffcaobf", alg) == 0) {
  1247. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1248. auip->init.aoff.crr_order = FF_AOFF;
  1249. auip->init.aoff.blk_order = FF_AOBF;
  1250. }
  1251. else if (sys_strcmp("ageffcaoff", alg) == 0) {
  1252. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1253. auip->init.aoff.crr_order = FF_AGEFF;
  1254. auip->init.aoff.blk_order = FF_AOFF;
  1255. }
  1256. else if (sys_strcmp("ageffcbf", alg) == 0) {
  1257. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1258. auip->init.aoff.crr_order = FF_AGEFF;
  1259. auip->init.aoff.blk_order = FF_BF;
  1260. }
  1261. else if (sys_strcmp("ageffcaobf", alg) == 0) {
  1262. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1263. auip->init.aoff.crr_order = FF_AGEFF;
  1264. auip->init.aoff.blk_order = FF_AOBF;
  1265. }
  1266. else {
  1267. if (auip->init.util.alloc_no == ERTS_ALC_A_TEST
  1268. && sys_strcmp("chaosff", alg) == 0) {
  1269. auip->astrat = ERTS_ALC_S_FIRSTFIT;
  1270. auip->init.aoff.crr_order = FF_CHAOS;
  1271. auip->init.aoff.blk_order = FF_CHAOS;
  1272. }
  1273. else {
  1274. bad_value(param, sub_param + 1, alg);
  1275. }
  1276. }
  1277. if (!strategy_support_carrier_migration(auip))
  1278. auip->init.util.acul = 0;
  1279. } else if (has_prefix("atags", sub_param)) {
  1280. auip->init.util.atags = get_bool_value(sub_param + 5, argv, ip);
  1281. }
  1282. else
  1283. goto bad_switch;
  1284. break;
  1285. case 'e': {
  1286. int e = get_bool_value(sub_param + 1, argv, ip);
  1287. if (!auip->disable_allowed && !e) {
  1288. if (!u_switch)
  1289. bad_value(param, sub_param + 1, "false");
  1290. else
  1291. ASSERT(auip->enable); /* ignore */
  1292. }
  1293. else auip->enable = e;
  1294. break;
  1295. }
  1296. case 'l':
  1297. if (has_prefix("lmbcs", sub_param)) {
  1298. auip->default_.lmbcs = 0;
  1299. auip->init.util.lmbcs = get_kb_value(sub_param + 5, argv, ip);
  1300. }
  1301. else
  1302. goto bad_switch;
  1303. break;
  1304. case 'm':
  1305. if (has_prefix("mbcgs", sub_param)) {
  1306. auip->init.util.mbcgs = get_amount_value(sub_param + 5, argv, ip);
  1307. }
  1308. else if (has_prefix("mbsd", sub_param)) {
  1309. auip->init.gf.mbsd = get_amount_value(sub_param + 4, argv, ip);
  1310. if (auip->init.gf.mbsd < 1)
  1311. auip->init.gf.mbsd = 1;
  1312. }
  1313. else if (has_prefix("mmbcs", sub_param)) {
  1314. auip->default_.mmbcs = 0;
  1315. auip->init.util.mmbcs = get_kb_value(sub_param + 5, argv, ip);
  1316. }
  1317. else if (has_prefix("mmmbc", sub_param)) {
  1318. auip->default_.mmmbc = 0;
  1319. auip->init.util.mmmbc = get_amount_value(sub_param + 5, argv, ip);
  1320. }
  1321. else if (has_prefix("mmsbc", sub_param)) {
  1322. auip->init.util.mmsbc = get_amount_value(sub_param + 5, argv, ip);
  1323. }
  1324. else
  1325. goto bad_switch;
  1326. break;
  1327. case 'r':
  1328. if(has_prefix("rsbcmt", sub_param)) {
  1329. auip->init.util.rsbcmt = get_amount_value(sub_param + 6, argv, ip);
  1330. if (auip->init.util.rsbcmt > 100)
  1331. auip->init.util.rsbcmt = 100;
  1332. }
  1333. else if(has_prefix("rsbcst", sub_param)) {
  1334. auip->init.util.rsbcst = get_amount_value(sub_param + 6, argv, ip);
  1335. if (auip->init.util.rsbcst > 100)
  1336. auip->init.util.rsbcst = 100;
  1337. }
  1338. else if (has_prefix("rmbcmt", sub_param)) {
  1339. auip->init.util.rmbcmt = get_amount_value(sub_param + 6, argv, ip);
  1340. if (auip->init.util.rmbcmt > 100)
  1341. auip->init.util.rmbcmt = 100;
  1342. }
  1343. else if (has_prefix("ramv", sub_param)) {
  1344. auip->init.util.ramv = get_bool_value(sub_param + 4, argv, ip);
  1345. }
  1346. else
  1347. goto bad_switch;
  1348. break;
  1349. case 's':
  1350. if(has_prefix("sbct", sub_param)) {
  1351. auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
  1352. }
  1353. else if (has_prefix("smbcs", sub_param)) {
  1354. auip->default_.smbcs = 0;
  1355. auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip);
  1356. }
  1357. else
  1358. goto bad_switch;
  1359. break;
  1360. case 't': {
  1361. int res = get_bool_value(sub_param+1, argv, ip);
  1362. if (res > 0) {
  1363. if (!auip->thr_spec_allowed) {
  1364. if (!u_switch)
  1365. bad_value(param, sub_param + 1, "true");
  1366. else
  1367. ASSERT(!auip->thr_spec); /* ignore */
  1368. }
  1369. else
  1370. auip->thr_spec = 1;
  1371. break;
  1372. }
  1373. else if (res == 0) {
  1374. auip->thr_spec = 0;
  1375. auip->init.util.acul = 0;
  1376. break;
  1377. }
  1378. goto bad_switch;
  1379. }
  1380. default:
  1381. bad_switch:
  1382. bad_param(param, sub_param);
  1383. }
  1384. }
  1385. static void
  1386. handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
  1387. {
  1388. struct au_init *aui[] = {
  1389. &init->binary_alloc,
  1390. &init->std_alloc,
  1391. &init->ets_alloc,
  1392. &init->eheap_alloc,
  1393. &init->ll_alloc,
  1394. &init->driver_alloc,
  1395. &init->fix_alloc,
  1396. &init->sl_alloc
  1397. /* test_alloc not affected by +Mea??? or +Mu??? */
  1398. };
  1399. int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
  1400. char *arg;
  1401. char *rest;
  1402. int i, j;
  1403. i = 1;
  1404. ASSERT(argc && argv && init);
  1405. while (i < *argc) {
  1406. if(argv[i][0] == '-') {
  1407. char *param = argv[i]+1;
  1408. switch (argv[i][1]) {
  1409. case 'M':
  1410. switch (argv[i][2]) {
  1411. case 'B':
  1412. handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i, 0);
  1413. break;
  1414. case 'I':
  1415. if (has_prefix("scs", argv[i]+3)) {
  1416. #if HAVE_ERTS_MSEG
  1417. init->mseg.literal_mmap.scs =
  1418. #endif
  1419. get_mb_value(argv[i]+6, argv, &i);
  1420. }
  1421. else
  1422. handle_au_arg(&init->literal_alloc, &argv[i][3], argv, &i, 0);
  1423. break;
  1424. case 'X':
  1425. if (has_prefix("scs", argv[i]+3)) {
  1426. /* Ignore obsolete */
  1427. (void) get_mb_value(argv[i]+6, argv, &i);
  1428. }
  1429. else
  1430. handle_au_arg(&init->exec_alloc, &argv[i][3], argv, &i, 0);
  1431. break;
  1432. case 'D':
  1433. handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i, 0);
  1434. break;
  1435. case 'E':
  1436. handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i, 0);
  1437. break;
  1438. case 'F':
  1439. handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i, 0);
  1440. break;
  1441. case 'H':
  1442. handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i, 0);
  1443. break;
  1444. case 'L':
  1445. handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i, 0);
  1446. break;
  1447. case 'M':
  1448. if (has_prefix("amcbf", argv[i]+3)) {
  1449. #if HAVE_ERTS_MSEG
  1450. init->mseg.amcbf =
  1451. #endif
  1452. get_kb_value(argv[i]+8, argv, &i);
  1453. }
  1454. else if (has_prefix("rmcbf", argv[i]+3)) {
  1455. #if HAVE_ERTS_MSEG
  1456. init->mseg.rmcbf =
  1457. #endif
  1458. get_amount_value(argv[i]+8, argv, &i);
  1459. }
  1460. else if (has_prefix("mcs", argv[i]+3)) {
  1461. #if HAVE_ERTS_MSEG
  1462. init->mseg.mcs =
  1463. #endif
  1464. get_amount_value(argv[i]+6, argv, &i);
  1465. }
  1466. else if (has_prefix("scs", argv[i]+3)) {
  1467. #if HAVE_ERTS_MSEG
  1468. init->mseg.dflt_mmap.scs =
  1469. #endif
  1470. get_mb_value(argv[i]+6, argv, &i);
  1471. }
  1472. else if (has_prefix("sco", argv[i]+3)) {
  1473. #if HAVE_ERTS_MSEG
  1474. init->mseg.dflt_mmap.sco =
  1475. #endif
  1476. get_bool_value(argv[i]+6, argv, &i);
  1477. }
  1478. else if (has_prefix("scrpm", argv[i]+3)) {
  1479. #if HAVE_ERTS_MSEG
  1480. init->mseg.dflt_mmap.scrpm =
  1481. #endif
  1482. get_bool_value(argv[i]+8, argv, &i);
  1483. }
  1484. else if (has_prefix("scrfsd", argv[i]+3)) {
  1485. #if HAVE_ERTS_MSEG
  1486. init->mseg.dflt_mmap.scrfsd =
  1487. #endif
  1488. get_amount_value(argv[i]+9, argv, &i);
  1489. }
  1490. else {
  1491. bad_param(param, param+2);
  1492. }
  1493. break;
  1494. case 'R':
  1495. handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i, 0);
  1496. break;
  1497. case 'S':
  1498. handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i, 0);
  1499. break;
  1500. case 'T':
  1501. handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i, 0);
  1502. break;
  1503. case 'Z':
  1504. handle_au_arg(&init->test_alloc, &argv[i][3], argv, &i, 0);
  1505. break;
  1506. case 'Y': { /* sys_alloc */
  1507. if (has_prefix("tt", param+2)) {
  1508. /* set trim threshold */
  1509. arg = get_value(param+4, argv, &i);
  1510. errno = 0;
  1511. init->trim_threshold = (int) strtol(arg, &rest, 10);
  1512. if (errno != 0
  1513. || rest == arg
  1514. || init->trim_threshold < 0
  1515. || (INT_MAX/1024) < init->trim_threshold) {
  1516. bad_value(param, param+4, arg);
  1517. }
  1518. VERBOSE(DEBUG_SYSTEM,
  1519. ("using trim threshold: %d\n",
  1520. init->trim_threshold));
  1521. init->trim_threshold *= 1024;
  1522. }
  1523. else if (has_prefix("tp", param+2)) {
  1524. /* set top pad */
  1525. arg = get_value(param+4, argv, &i);
  1526. errno = 0;
  1527. init->top_pad = (int) strtol(arg, &rest, 10);
  1528. if (errno != 0
  1529. || rest == arg
  1530. || init->top_pad < 0
  1531. || (INT_MAX/1024) < init->top_pad) {
  1532. bad_value(param, param+4, arg);
  1533. }
  1534. VERBOSE(DEBUG_SYSTEM,
  1535. ("using top pad: %d\n",init->top_pad));
  1536. init->top_pad *= 1024;
  1537. }
  1538. else if (has_prefix("m", param+2)) {
  1539. /* Has been handled by erlexec */
  1540. (void) get_value(param+3, argv, &i);
  1541. }
  1542. else if (has_prefix("e", param+2)) {
  1543. arg = get_value(param+3, argv, &i);
  1544. if (sys_strcmp("true", arg) != 0)
  1545. bad_value(param, param+3, arg);
  1546. }
  1547. else
  1548. bad_param(param, param+2);
  1549. break;
  1550. }
  1551. case 'e':
  1552. switch (argv[i][3]) {
  1553. case 'a': {
  1554. int a;
  1555. arg = get_value(argv[i]+4, argv, &i);
  1556. if (sys_strcmp("min", arg) == 0) {
  1557. for (a = 0; a < aui_sz; a++)
  1558. aui[a]->enable = 0;
  1559. }
  1560. else if (sys_strcmp("max", arg) == 0) {
  1561. for (a = 0; a < aui_sz; a++)
  1562. aui[a]->enable = 1;
  1563. }
  1564. else if (sys_strcmp("config", arg) == 0) {
  1565. init->erts_alloc_config = 1;
  1566. }
  1567. else if (sys_strcmp("r9c", arg) == 0
  1568. || sys_strcmp("r10b", arg) == 0
  1569. || sys_strcmp("r11b", arg) == 0) {
  1570. set_default_sl_alloc_opts(&init->sl_alloc);
  1571. set_default_std_alloc_opts(&init->std_alloc);
  1572. set_default_ll_alloc_opts(&init->ll_alloc);
  1573. set_default_temp_alloc_opts(&init->temp_alloc);
  1574. set_default_eheap_alloc_opts(&init->eheap_alloc);
  1575. set_default_binary_alloc_opts(&init->binary_alloc);
  1576. set_default_ets_alloc_opts(&init->ets_alloc);
  1577. set_default_driver_alloc_opts(&init->driver_alloc);
  1578. set_default_driver_alloc_opts(&init->fix_alloc);
  1579. init->driver_alloc.enable = 0;
  1580. if (sys_strcmp("r9c", arg) == 0) {
  1581. init->sl_alloc.enable = 0;
  1582. init->std_alloc.enable = 0;
  1583. init->binary_alloc.enable = 0;
  1584. init->ets_alloc.enable = 0;
  1585. }
  1586. for (a = 0; a < aui_sz; a++) {
  1587. aui[a]->thr_spec = 0;
  1588. aui[a]->init.util.acul = 0;
  1589. aui[a]->init.util.ramv = 0;
  1590. aui[a]->init.util.lmbcs = 5*1024*1024;
  1591. }
  1592. }
  1593. else {
  1594. bad_param(param, param+3);
  1595. }
  1596. break;
  1597. }
  1598. default:
  1599. bad_param(param, param+1);
  1600. }
  1601. break;
  1602. case 'i':
  1603. switch (argv[i][3]) {
  1604. case 't':
  1605. init->instr.mtrace = get_value(argv[i]+4, argv, &i);
  1606. break;
  1607. default:
  1608. bad_param(param, param+2);
  1609. }
  1610. break;
  1611. case 'l':
  1612. if (has_prefix("pm", param+2)) {
  1613. arg = get_value(argv[i]+5, argv, &i);
  1614. if (sys_strcmp("all", arg) == 0)
  1615. lock_all_physical_memory = 1;
  1616. else if (sys_strcmp("no", arg) == 0)
  1617. lock_all_physical_memory = 0;
  1618. else
  1619. bad_value(param, param+4, arg);
  1620. break;
  1621. }
  1622. bad_param(param, param+2);
  1623. break;
  1624. case 'u':
  1625. if (has_prefix("ycs", argv[i]+3)) {
  1626. init->alloc_util.ycs
  1627. = get_kb_value(argv[i]+6, argv, &i);
  1628. }
  1629. else if (has_prefix("mmc", argv[i]+3)) {
  1630. init->alloc_util.mmc
  1631. = get_amount_value(argv[i]+6, argv, &i);
  1632. }
  1633. else if (has_prefix("sac", argv[i]+3)) {
  1634. init->alloc_util.sac
  1635. = get_bool_value(argv[i]+6, argv, &i);
  1636. }
  1637. else {
  1638. int a;
  1639. int start = i;
  1640. char *param = argv[i];
  1641. char *val = i+1 < *argc ? argv[i+1] : NULL;
  1642. for (a = 0; a < aui_sz; a++) {
  1643. if (a > 0) {
  1644. ASSERT(i == start || i == start+1);
  1645. argv[start] = param;
  1646. if (i != start)
  1647. argv[start + 1] = val;
  1648. i = start;
  1649. }
  1650. handle_au_arg(aui[a], &argv[i][3], argv, &i, 1);
  1651. }
  1652. }
  1653. break;
  1654. default:
  1655. bad_param(param, param+1);
  1656. }
  1657. break;
  1658. case '-':
  1659. if (argv[i][2] == '\0') {
  1660. /* End of system flags reached */
  1661. if (init->instr.mtrace) {
  1662. while (i < *argc) {
  1663. if(sys_strcmp(argv[i], "-sname") == 0
  1664. || sys_strcmp(argv[i], "-name") == 0) {
  1665. if (i + 1 <*argc) {
  1666. init->instr.nodename = argv[i+1];
  1667. break;
  1668. }
  1669. }
  1670. i++;
  1671. }
  1672. }
  1673. goto args_parsed;
  1674. }
  1675. break;
  1676. default:
  1677. break;
  1678. }
  1679. }
  1680. i++;
  1681. }
  1682. args_parsed:
  1683. /* Handled arguments have been marked with NULL. Slide arguments
  1684. not handled towards the beginning of argv. */
  1685. for (i = 0, j = 0; i < *argc; i++) {
  1686. if (argv[i])
  1687. argv[j++] = argv[i];
  1688. }
  1689. *argc = j;
  1690. }
  1691. static char *type_no_str(ErtsAlcType_t n)
  1692. {
  1693. #if ERTS_ALC_N_MIN != 0
  1694. if (n < ERTS_ALC_N_MIN)
  1695. return NULL;
  1696. #endif
  1697. if (n > ERTS_ALC_N_MAX)
  1698. return NULL;
  1699. return (char *) ERTS_ALC_N2TD(n);
  1700. }
  1701. #define type_str(T) type_no_str(ERTS_ALC_T2N((T)))
  1702. void
  1703. erts_alloc_register_scheduler(void *vesdp)
  1704. {
  1705. ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
  1706. int ix = (int) esdp->no;
  1707. int aix;
  1708. ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
  1709. for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
  1710. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
  1711. esdp->alloc_data.deallctr[aix] = NULL;
  1712. esdp->alloc_data.pref_ix[aix] = -1;
  1713. if (tspec->enabled) {
  1714. if (!tspec->dd)
  1715. esdp->alloc_data.pref_ix[aix] = ix;
  1716. else {
  1717. Allctr_t *allctr = tspec->allctr[ix];
  1718. ASSERT(allctr);
  1719. esdp->alloc_data.deallctr[aix] = allctr;
  1720. esdp->alloc_data.pref_ix[aix] = ix;
  1721. }
  1722. }
  1723. }
  1724. }
  1725. void
  1726. erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
  1727. int *need_thr_progress,
  1728. ErtsThrPrgrVal *thr_prgr_p,
  1729. int *more_work)
  1730. {
  1731. ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
  1732. int aix;
  1733. for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
  1734. Allctr_t *allctr;
  1735. if (esdp)
  1736. allctr = esdp->alloc_data.deallctr[aix];
  1737. else {
  1738. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
  1739. if (tspec->enabled && tspec->dd)
  1740. allctr = tspec->allctr[0];
  1741. else
  1742. allctr = NULL;
  1743. }
  1744. if (allctr) {
  1745. erts_alcu_check_delayed_dealloc(allctr,
  1746. 1,
  1747. need_thr_progress,
  1748. thr_prgr_p,
  1749. more_work);
  1750. }
  1751. }
  1752. }
  1753. erts_aint32_t
  1754. erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
  1755. {
  1756. ErtsAllocatorThrSpec_t *tspec;
  1757. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  1758. if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
  1759. return erts_alcu_fix_alloc_shrink(tspec->allctr[ix], flgs);
  1760. if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
  1761. return erts_alcu_fix_alloc_shrink(
  1762. erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
  1763. return 0;
  1764. }
  1765. static void
  1766. no_verify(Allctr_t *allctr)
  1767. {
  1768. }
  1769. erts_alloc_verify_func_t
  1770. erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr)
  1771. {
  1772. if (erts_allctrs_info[ERTS_ALC_A_TEMPORARY].alloc_util
  1773. && erts_allctrs_info[ERTS_ALC_A_TEMPORARY].thr_spec) {
  1774. ErtsAllocatorThrSpec_t *tspec;
  1775. int ix = ERTS_ALC_GET_THR_IX();
  1776. tspec = &erts_allctr_thr_spec[ERTS_ALC_A_TEMPORARY];
  1777. if (ix < tspec->size) {
  1778. *allctr = tspec->allctr[ix];
  1779. return erts_alcu_verify_unused;
  1780. }
  1781. }
  1782. *allctr = NULL;
  1783. return no_verify;
  1784. }
  1785. __decl_noreturn void
  1786. erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
  1787. {
  1788. char buf[10];
  1789. char *t_str;
  1790. char *allctr_str;
  1791. ASSERT(n >= ERTS_ALC_N_MIN);
  1792. ASSERT(n <= ERTS_ALC_N_MAX);
  1793. if (n < ERTS_ALC_N_MIN || ERTS_ALC_N_MAX < n)
  1794. allctr_str = "UNKNOWN";
  1795. else {
  1796. ErtsAlcType_t a = ERTS_ALC_T2A(ERTS_ALC_N2T(n));
  1797. if (erts_allctrs_info[a].enabled)
  1798. allctr_str = (char *) ERTS_ALC_A2AD(a);
  1799. else
  1800. allctr_str = (char *) ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
  1801. }
  1802. t_str = type_no_str(n);
  1803. if (!t_str) {
  1804. erts_snprintf(buf, sizeof(buf), "%d", (int) n);
  1805. t_str = buf;
  1806. }
  1807. switch (error) {
  1808. case ERTS_ALC_E_NOTSUP: {
  1809. char *op_str;
  1810. switch (func) {
  1811. case ERTS_ALC_O_ALLOC: op_str = "alloc"; break;
  1812. case ERTS_ALC_O_REALLOC: op_str = "realloc"; break;
  1813. case ERTS_ALC_O_FREE: op_str = "free"; break;
  1814. default: op_str = "UNKNOWN"; break;
  1815. }
  1816. erts_exit(ERTS_ABORT_EXIT,
  1817. "%s: %s operation not supported (memory type: \"%s\")\n",
  1818. allctr_str, op_str, t_str);
  1819. break;
  1820. }
  1821. case ERTS_ALC_E_NOMEM: {
  1822. Uint size;
  1823. va_list argp;
  1824. char *op = func == ERTS_ALC_O_REALLOC ? "reallocate" : "allocate";
  1825. va_start(argp, n);
  1826. size = va_arg(argp, Uint);
  1827. va_end(argp);
  1828. erts_exit(ERTS_DUMP_EXIT,
  1829. "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
  1830. allctr_str, op, size, t_str);
  1831. break;
  1832. }
  1833. case ERTS_ALC_E_NOALLCTR:
  1834. erts_exit(ERTS_ABORT_EXIT,
  1835. "erts_alloc: Unknown allocator type: %d\n",
  1836. ERTS_ALC_T2A(ERTS_ALC_N2T(n)));
  1837. break;
  1838. default:
  1839. erts_exit(ERTS_ABORT_EXIT, "erts_alloc: Unknown error: %d\n", error);
  1840. break;
  1841. }
  1842. }
  1843. __decl_noreturn void
  1844. erts_alloc_enomem(ErtsAlcType_t type, Uint size)
  1845. {
  1846. erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
  1847. }
  1848. __decl_noreturn void
  1849. erts_alloc_n_enomem(ErtsAlcType_t n, Uint size)
  1850. {
  1851. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_ALLOC, n, size);
  1852. }
  1853. __decl_noreturn void
  1854. erts_realloc_enomem(ErtsAlcType_t type, void *ptr, Uint size)
  1855. {
  1856. erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
  1857. }
  1858. __decl_noreturn void
  1859. erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
  1860. {
  1861. erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_REALLOC, n, size);
  1862. }
  1863. static ERTS_INLINE UWord
  1864. alcu_size(ErtsAlcType_t alloc_no, ErtsAlcUFixInfo_t *fi, int fisz)
  1865. {
  1866. UWord res;
  1867. int ai;
  1868. if (!erts_allctrs_info[alloc_no].thr_spec) {
  1869. AllctrSize_t size;
  1870. Allctr_t *allctr;
  1871. allctr = erts_allctrs_info[alloc_no].extra;
  1872. erts_alcu_current_size(allctr, &size, fi, fisz);
  1873. return size.blocks;
  1874. }
  1875. res = 0;
  1876. /* Thread-specific allocators can migrate carriers across types, so we have
  1877. * to visit every allocator type to gather information on blocks that were
  1878. * allocated by us. */
  1879. for (ai = ERTS_ALC_A_MIN; ai < ERTS_ALC_A_MAX; ai++) {
  1880. ErtsAllocatorThrSpec_t *tspec;
  1881. Allctr_t *allctr;
  1882. int i;
  1883. if (!erts_allctrs_info[ai].thr_spec) {
  1884. continue;
  1885. }
  1886. tspec = &erts_allctr_thr_spec[ai];
  1887. ASSERT(tspec->enabled);
  1888. for (i = tspec->size - 1; i >= 0; i--) {
  1889. allctr = tspec->allctr[i];
  1890. if (allctr) {
  1891. AllctrSize_t size;
  1892. if (ai == alloc_no) {
  1893. erts_alcu_current_size(allctr, &size, fi, fisz);
  1894. } else {
  1895. erts_alcu_foreign_size(allctr, alloc_no, &size);
  1896. }
  1897. ASSERT(((SWord)size.blocks) >= 0);
  1898. res += size.blocks;
  1899. }
  1900. }
  1901. }
  1902. return res;
  1903. }
  1904. static ERTS_INLINE void
  1905. add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
  1906. {
  1907. int ix = ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE;
  1908. ASSERT(0 <= ix && ix < ERTS_ALC_NO_FIXED_SIZES);
  1909. *ap += (UWord) fi[ix].allocated;
  1910. *up += (UWord) fi[ix].used;
  1911. }
  1912. Eterm
  1913. erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
  1914. {
  1915. /*
  1916. * NOTE! When updating this function, make sure to also update
  1917. * erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl
  1918. */
  1919. #define ERTS_MEM_NEED_ALL_ALCU (want_tot_or_sys)
  1920. struct {
  1921. int total;
  1922. int processes;
  1923. int processes_used;
  1924. int system;
  1925. int atom;
  1926. int atom_used;
  1927. int binary;
  1928. int code;
  1929. int ets;
  1930. } want = {0};
  1931. struct {
  1932. UWord total;
  1933. UWord processes;
  1934. UWord processes_used;
  1935. UWord system;
  1936. UWord atom;
  1937. UWord atom_used;
  1938. UWord binary;
  1939. UWord code;
  1940. UWord ets;
  1941. } size = {0};
  1942. Eterm atoms[sizeof(size)/sizeof(UWord)];
  1943. UWord *uintps[sizeof(size)/sizeof(UWord)];
  1944. Eterm euints[sizeof(size)/sizeof(UWord)];
  1945. int want_tot_or_sys;
  1946. int length;
  1947. Eterm res = THE_NON_VALUE;
  1948. ErtsAlcType_t ai;
  1949. int only_one_value = 0;
  1950. ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}};
  1951. ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
  1952. /* Figure out whats wanted... */
  1953. length = 0;
  1954. if (is_non_value(earg)) { /* i.e. wants all */
  1955. want.total = 1;
  1956. atoms[length] = am_total;
  1957. uintps[length++] = &size.total;
  1958. want.processes = 1;
  1959. atoms[length] = am_processes;
  1960. uintps[length++] = &size.processes;
  1961. want.processes_used = 1;
  1962. atoms[length] = am_processes_used;
  1963. uintps[length++] = &size.processes_used;
  1964. want.system = 1;
  1965. atoms[length] = am_system;
  1966. uintps[length++] = &size.system;
  1967. want.atom = 1;
  1968. atoms[length] = am_atom;
  1969. uintps[length++] = &size.atom;
  1970. want.atom_used = 1;
  1971. atoms[length] = am_atom_used;
  1972. uintps[length++] = &size.atom_used;
  1973. want.binary = 1;
  1974. atoms[length] = am_binary;
  1975. uintps[length++] = &size.binary;
  1976. want.code = 1;
  1977. atoms[length] = am_code;
  1978. uintps[length++] = &size.code;
  1979. want.ets = 1;
  1980. atoms[length] = am_ets;
  1981. uintps[length++] = &size.ets;
  1982. }
  1983. else {
  1984. DeclareTmpHeapNoproc(tmp_heap,2);
  1985. Eterm wanted_list;
  1986. if (is_nil(earg))
  1987. return NIL;
  1988. UseTmpHeapNoproc(2);
  1989. if (is_not_atom(earg))
  1990. wanted_list = earg;
  1991. else {
  1992. wanted_list = CONS(&tmp_heap[0], earg, NIL);
  1993. only_one_value = 1;
  1994. }
  1995. while (is_list(wanted_list)) {
  1996. switch (CAR(list_val(wanted_list))) {
  1997. case am_total:
  1998. if (!want.total) {
  1999. want.total = 1;
  2000. atoms[length] = am_total;
  2001. uintps[length++] = &size.total;
  2002. }
  2003. break;
  2004. case am_processes:
  2005. if (!want.processes) {
  2006. want.processes = 1;
  2007. atoms[length] = am_processes;
  2008. uintps[length++] = &size.processes;
  2009. }
  2010. break;
  2011. case am_processes_used:
  2012. if (!want.processes_used) {
  2013. want.processes_used = 1;
  2014. atoms[length] = am_processes_used;
  2015. uintps[length++] = &size.processes_used;
  2016. }
  2017. break;
  2018. case am_system:
  2019. if (!want.system) {
  2020. want.system = 1;
  2021. atoms[length] = am_system;
  2022. uintps[length++] = &size.system;
  2023. }
  2024. break;
  2025. case am_atom:
  2026. if (!want.atom) {
  2027. want.atom = 1;
  2028. atoms[length] = am_atom;
  2029. uintps[length++] = &size.atom;
  2030. }
  2031. break;
  2032. case am_atom_used:
  2033. if (!want.atom_used) {
  2034. want.atom_used = 1;
  2035. atoms[length] = am_atom_used;
  2036. uintps[length++] = &size.atom_used;
  2037. }
  2038. break;
  2039. case am_binary:
  2040. if (!want.binary) {
  2041. want.binary = 1;
  2042. atoms[length] = am_binary;
  2043. uintps[length++] = &size.binary;
  2044. }
  2045. break;
  2046. case am_code:
  2047. if (!want.code) {
  2048. want.code = 1;
  2049. atoms[length] = am_code;
  2050. uintps[length++] = &size.code;
  2051. }
  2052. break;
  2053. case am_ets:
  2054. if (!want.ets) {
  2055. want.ets = 1;
  2056. atoms[length] = am_ets;
  2057. uintps[length++] = &size.ets;
  2058. }
  2059. break;
  2060. default:
  2061. UnUseTmpHeapNoproc(2);
  2062. return am_badarg;
  2063. }
  2064. wanted_list = CDR(list_val(wanted_list));
  2065. }
  2066. UnUseTmpHeapNoproc(2);
  2067. if (is_not_nil(wanted_list))
  2068. return am_badarg;
  2069. }
  2070. /* All alloc_util allocators *have* to be enabled, except test_alloc */
  2071. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  2072. switch (ai) {
  2073. case ERTS_ALC_A_SYSTEM:
  2074. case ERTS_ALC_A_TEST:
  2075. break;
  2076. default:
  2077. if (!erts_allctrs_info[ai].enabled
  2078. || !erts_allctrs_info[ai].alloc_util) {
  2079. return am_notsup;
  2080. }
  2081. break;
  2082. }
  2083. }
  2084. ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
  2085. ASSERT(length <= sizeof(euints)/sizeof(Eterm));
  2086. ASSERT(length <= sizeof(uintps)/sizeof(UWord));
  2087. if (proc) {
  2088. ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  2089. == erts_proc_lc_my_proc_locks(proc));
  2090. /* We'll need locks early in the lock order */
  2091. erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  2092. }
  2093. /* Calculate values needed... */
  2094. want_tot_or_sys = want.total || want.system;
  2095. if (ERTS_MEM_NEED_ALL_ALCU) {
  2096. size.total = 0;
  2097. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
  2098. if (erts_allctrs_info[ai].alloc_util) {
  2099. UWord *save;
  2100. UWord asz;
  2101. switch (ai) {
  2102. case ERTS_ALC_A_TEMPORARY:
  2103. /*
  2104. * Often not thread safe and usually never
  2105. * contain any allocated memory.
  2106. */
  2107. continue;
  2108. case ERTS_ALC_A_TEST:
  2109. continue;
  2110. case ERTS_ALC_A_EHEAP:
  2111. save = &size.processes;
  2112. break;
  2113. case ERTS_ALC_A_ETS:
  2114. save = &size.ets;
  2115. break;
  2116. case ERTS_ALC_A_BINARY:
  2117. save = &size.binary;
  2118. break;
  2119. case ERTS_ALC_A_FIXED_SIZE:
  2120. asz = alcu_size(ai, fi, ERTS_ALC_NO_FIXED_SIZES);
  2121. size.total += asz;
  2122. continue;
  2123. default:
  2124. save = NULL;
  2125. break;
  2126. }
  2127. asz = alcu_size(ai, NULL, 0);
  2128. if (save)
  2129. *save = asz;
  2130. size.total += asz;
  2131. }
  2132. }
  2133. }
  2134. if (want_tot_or_sys || want.processes || want.processes_used) {
  2135. UWord tmp;
  2136. if (ERTS_MEM_NEED_ALL_ALCU)
  2137. tmp = size.processes;
  2138. else {
  2139. alcu_size(ERTS_ALC_A_FIXED_SIZE,
  2140. fi, ERTS_ALC_NO_FIXED_SIZES);
  2141. tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
  2142. }
  2143. tmp += erts_ptab_mem_size(&erts_proc);
  2144. tmp += erts_bif_timer_memory_size();
  2145. size.processes = size.processes_used = tmp;
  2146. add_fix_values(&size.processes,
  2147. &size.processes_used,
  2148. fi,
  2149. ERTS_ALC_T_PROC);
  2150. add_fix_values(&size.processes,
  2151. &size.processes_used,
  2152. fi,
  2153. ERTS_ALC_T_MONITOR);
  2154. add_fix_values(&size.processes,
  2155. &size.processes_used,
  2156. fi,
  2157. ERTS_ALC_T_LINK);
  2158. add_fix_values(&size.processes,
  2159. &size.processes_used,
  2160. fi,
  2161. ERTS_ALC_T_MSG_REF);
  2162. add_fix_values(&size.processes,
  2163. &size.processes_used,
  2164. fi,
  2165. ERTS_ALC_T_LL_PTIMER);
  2166. add_fix_values(&size.processes,
  2167. &size.processes_used,
  2168. fi,
  2169. ERTS_ALC_T_HL_PTIMER);
  2170. add_fix_values(&size.processes,
  2171. &size.processes_used,
  2172. fi,
  2173. ERTS_ALC_T_BIF_TIMER);
  2174. add_fix_values(&size.processes,
  2175. &size.processes_used,
  2176. fi,
  2177. ERTS_ALC_T_NIF_EXP_TRACE);
  2178. }
  2179. if (want.atom || want.atom_used) {
  2180. Uint reserved_atom_space, atom_space;
  2181. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  2182. size.atom = size.atom_used = atom_table_sz();
  2183. if (want.atom)
  2184. size.atom += reserved_atom_space;
  2185. if (want.atom_used)
  2186. size.atom_used += atom_space;
  2187. }
  2188. if (!ERTS_MEM_NEED_ALL_ALCU && want.binary)
  2189. size.binary = alcu_size(ERTS_ALC_A_BINARY, NULL, 0);
  2190. if (want.code) {
  2191. size.code = module_table_sz();
  2192. size.code += export_table_sz();
  2193. size.code += export_entries_sz();
  2194. size.code += erts_fun_table_sz();
  2195. size.code += erts_ranges_sz();
  2196. size.code += erts_total_code_size;
  2197. }
  2198. if (want.ets) {
  2199. if (!ERTS_MEM_NEED_ALL_ALCU)
  2200. size.ets = alcu_size(ERTS_ALC_A_ETS, NULL, 0);
  2201. size.ets += erts_get_ets_misc_mem_size();
  2202. }
  2203. if (want_tot_or_sys) {
  2204. ASSERT(size.total >= size.processes);
  2205. size.system = size.total - size.processes;
  2206. }
  2207. if (print_to_p) {
  2208. int i;
  2209. fmtfn_t to = *print_to_p;
  2210. void *arg = print_to_arg;
  2211. /* Print result... */
  2212. erts_print(to, arg, "=memory\n");
  2213. for (i = 0; i < length; i++)
  2214. erts_print(to, arg, "%T: %bpu\n", atoms[i], *uintps[i]);
  2215. }
  2216. if (proc) {
  2217. /* Build erlang term result... */
  2218. Uint *hp;
  2219. Uint hsz;
  2220. erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2221. if (only_one_value) {
  2222. ASSERT(length == 1);
  2223. hsz = 0;
  2224. erts_bld_uword(NULL, &hsz, *uintps[0]);
  2225. hp = hsz ? HAlloc((Process *) proc, hsz) : NULL;
  2226. res = erts_bld_uword(&hp, NULL, *uintps[0]);
  2227. }
  2228. else {
  2229. Uint **hpp = NULL;
  2230. Uint *hszp = &hsz;
  2231. hsz = 0;
  2232. while (1) {
  2233. int i;
  2234. for (i = 0; i < length; i++)
  2235. euints[i] = erts_bld_uword(hpp, hszp, *uintps[i]);
  2236. res = erts_bld_2tup_list(hpp, hszp, length, atoms, euints);
  2237. if (hpp)
  2238. break;
  2239. hp = HAlloc((Process *) proc, hsz);
  2240. hpp = &hp;
  2241. hszp = NULL;
  2242. }
  2243. }
  2244. }
  2245. return res;
  2246. #undef ERTS_MEM_NEED_ALL_ALCU
  2247. }
  2248. struct aa_values {
  2249. Uint arity;
  2250. const char *name;
  2251. Uint ui[2];
  2252. };
  2253. Eterm
  2254. erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc)
  2255. {
  2256. #define MAX_AA_VALUES (24)
  2257. struct aa_values values[MAX_AA_VALUES];
  2258. Eterm res = THE_NON_VALUE;
  2259. int i, length;
  2260. Uint reserved_atom_space, atom_space;
  2261. if (proc) {
  2262. ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
  2263. == erts_proc_lc_my_proc_locks(proc));
  2264. /* We'll need locks early in the lock order */
  2265. erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
  2266. }
  2267. i = 0;
  2268. values[i].arity = 2;
  2269. values[i].name = "sys_misc";
  2270. values[i].ui[0] = erts_sys_misc_mem_sz();
  2271. i++;
  2272. values[i].arity = 2;
  2273. values[i].name = "static";
  2274. values[i].ui[0] =
  2275. sizeof(ErtsPTab)*2 /* proc & port tables */
  2276. + erts_timer_wheel_memory_size(); /* Timer wheel */
  2277. i++;
  2278. erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
  2279. values[i].arity = 3;
  2280. values[i].name = "atom_space";
  2281. values[i].ui[0] = reserved_atom_space;
  2282. values[i].ui[1] = atom_space;
  2283. i++;
  2284. values[i].arity = 2;
  2285. values[i].name = "atom_table";
  2286. values[i].ui[0] = atom_table_sz();
  2287. i++;
  2288. values[i].arity = 2;
  2289. values[i].name = "module_table";
  2290. values[i].ui[0] = module_table_sz();
  2291. i++;
  2292. values[i].arity = 2;
  2293. values[i].name = "export_table";
  2294. values[i].ui[0] = export_table_sz();
  2295. i++;
  2296. values[i].arity = 2;
  2297. values[i].name = "export_list";
  2298. values[i].ui[0] = export_entries_sz();
  2299. i++;
  2300. values[i].arity = 2;
  2301. values[i].name = "register_table";
  2302. values[i].ui[0] = process_reg_sz();
  2303. i++;
  2304. values[i].arity = 2;
  2305. values[i].name = "fun_table";
  2306. values[i].ui[0] = erts_fun_table_sz();
  2307. i++;
  2308. values[i].arity = 2;
  2309. values[i].name = "module_refs";
  2310. values[i].ui[0] = erts_ranges_sz();
  2311. i++;
  2312. values[i].arity = 2;
  2313. values[i].name = "loaded_code";
  2314. values[i].ui[0] = erts_total_code_size;
  2315. i++;
  2316. values[i].arity = 2;
  2317. values[i].name = "dist_table";
  2318. values[i].ui[0] = erts_dist_table_size();
  2319. i++;
  2320. values[i].arity = 2;
  2321. values[i].name = "node_table";
  2322. values[i].ui[0] = erts_node_table_size();
  2323. i++;
  2324. values[i].arity = 2;
  2325. values[i].name = "bits_bufs_size";
  2326. values[i].ui[0] = erts_bits_bufs_size();
  2327. i++;
  2328. values[i].arity = 2;
  2329. values[i].name = "bif_timer";
  2330. values[i].ui[0] = erts_bif_timer_memory_size();
  2331. i++;
  2332. values[i].arity = 2;
  2333. values[i].name = "process_table";
  2334. values[i].ui[0] = erts_ptab_mem_size(&erts_proc);
  2335. i++;
  2336. values[i].arity = 2;
  2337. values[i].name = "port_table";
  2338. values[i].ui[0] = erts_ptab_mem_size(&erts_port);
  2339. i++;
  2340. values[i].arity = 2;
  2341. values[i].name = "ets_misc";
  2342. values[i].ui[0] = erts_get_ets_misc_mem_size();
  2343. i++;
  2344. length = i;
  2345. ASSERT(length <= MAX_AA_VALUES);
  2346. if (print_to_p) {
  2347. /* Print result... */
  2348. fmtfn_t to = *print_to_p;
  2349. void *arg = print_to_arg;
  2350. erts_print(to, arg, "=allocated_areas\n");
  2351. for (i = 0; i < length; i++) {
  2352. switch (values[i].arity) {
  2353. case 2:
  2354. erts_print(to, arg, "%s: %beu\n",
  2355. values[i].name, values[i].ui[0]);
  2356. break;
  2357. case 3:
  2358. erts_print(to, arg, "%s: %beu %beu\n",
  2359. values[i].name, values[i].ui[0], values[i].ui[1]);
  2360. break;
  2361. default:
  2362. erts_print(to, arg, "ERROR: internal_error\n");
  2363. ASSERT(0);
  2364. return am_internal_error;
  2365. }
  2366. }
  2367. }
  2368. if (proc) {
  2369. /* Build erlang term result... */
  2370. Eterm tuples[MAX_AA_VALUES];
  2371. Uint *hp;
  2372. Uint **hpp;
  2373. Uint hsz;
  2374. Uint *hszp;
  2375. erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
  2376. hpp = NULL;
  2377. hsz = 0;
  2378. hszp = &hsz;
  2379. while (1) {
  2380. int i;
  2381. for (i = 0; i < length; i++) {
  2382. Eterm atom;
  2383. if (hpp)
  2384. atom = am_atom_put(values[i].name,
  2385. (int) sys_strlen(values[i].name));
  2386. else
  2387. atom = am_true;
  2388. switch (values[i].arity) {
  2389. case 2:
  2390. tuples[i] = erts_bld_tuple(hpp, hszp, 2,
  2391. atom,
  2392. erts_bld_uint(hpp, hszp,
  2393. values[i].ui[0]));
  2394. break;
  2395. case 3:
  2396. tuples[i] = erts_bld_tuple(hpp, hszp, 3,
  2397. atom,
  2398. erts_bld_uint(hpp, hszp,
  2399. values[i].ui[0]),
  2400. erts_bld_uint(hpp, hszp,
  2401. values[i].ui[1]));
  2402. break;
  2403. default:
  2404. ASSERT(0);
  2405. return am_internal_error;
  2406. }
  2407. }
  2408. res = erts_bld_list(hpp, hszp, length, tuples);
  2409. if (hpp)
  2410. break;
  2411. hp = HAlloc((Process *) proc, hsz);
  2412. hpp = &hp;
  2413. hszp = NULL;
  2414. }
  2415. }
  2416. return res;
  2417. #undef MAX_AA_VALUES
  2418. }
  2419. Eterm
  2420. erts_alloc_util_allocators(void *proc)
  2421. {
  2422. Eterm res;
  2423. Uint *hp;
  2424. Uint sz;
  2425. int i;
  2426. /*
  2427. * Currently all allocators except sys_alloc are
  2428. * alloc_util allocators.
  2429. * Also hide test_alloc which is disabled by default
  2430. * and only intended for our own testing.
  2431. */
  2432. sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
  2433. ASSERT(sz > 0);
  2434. hp = HAlloc((Process *) proc, sz);
  2435. res = NIL;
  2436. for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
  2437. switch (i) {
  2438. case ERTS_ALC_A_SYSTEM:
  2439. case ERTS_ALC_A_TEST:
  2440. break;
  2441. default: {
  2442. char *alc_str = (char *) ERTS_ALC_A2AD(i);
  2443. Eterm alc = am_atom_put(alc_str, sys_strlen(alc_str));
  2444. res = CONS(hp, alc, res);
  2445. hp += 2;
  2446. break;
  2447. }
  2448. }
  2449. }
  2450. return res;
  2451. }
  2452. void
  2453. erts_allocator_info(fmtfn_t to, void *arg)
  2454. {
  2455. ErtsAlcType_t a;
  2456. ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
  2457. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2458. int ai;
  2459. for (ai = 0; ai == 0 || ai < erts_allctrs_info[a].thr_spec; ai++) {
  2460. if (erts_allctrs_info[a].thr_spec) {
  2461. if (!erts_allctr_thr_spec[a].allctr[ai])
  2462. continue;
  2463. erts_print(to, arg, "=allocator:%s[%d]\n",
  2464. ERTS_ALC_A2AD(a), ai);
  2465. }
  2466. else {
  2467. erts_print(to, arg, "=allocator:%s\n", ERTS_ALC_A2AD(a));
  2468. }
  2469. if (!erts_allctrs_info[a].enabled)
  2470. erts_print(to, arg, "option e: false\n");
  2471. else {
  2472. if (erts_allctrs_info[a].alloc_util) {
  2473. void *as;
  2474. if (!erts_allctrs_info[a].thr_spec)
  2475. as = erts_allctrs_info[a].extra;
  2476. else {
  2477. ASSERT(erts_allctr_thr_spec[a].enabled);
  2478. as = erts_allctr_thr_spec[a].allctr[ai];
  2479. }
  2480. /* Binary alloc has its own thread safety... */
  2481. erts_alcu_info(as, 0, 0, &to, arg, NULL, NULL);
  2482. }
  2483. else {
  2484. switch (a) {
  2485. case ERTS_ALC_A_SYSTEM: {
  2486. SysAllocStat sas;
  2487. erts_print(to, arg, "option e: true\n");
  2488. erts_print(to, arg, "option m: libc\n");
  2489. sys_alloc_stat(&sas);
  2490. if(sas.trim_threshold >= 0)
  2491. erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
  2492. if(sas.top_pad >= 0)
  2493. erts_print(to, arg, "option tp: %d\n", sas.top_pad);
  2494. break;
  2495. }
  2496. default:
  2497. ASSERT(0);
  2498. break;
  2499. }
  2500. }
  2501. }
  2502. }
  2503. }
  2504. #if HAVE_ERTS_MSEG
  2505. {
  2506. struct erts_mmap_info_struct emis;
  2507. int max = (int) erts_no_schedulers;
  2508. int i;
  2509. for (i = 0; i <= max; i++) {
  2510. erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
  2511. erts_mseg_info(i, &to, arg, 0, 0, NULL, NULL);
  2512. }
  2513. erts_print(to, arg, "=allocator:erts_mmap.default_mmap\n");
  2514. erts_mmap_info(&erts_dflt_mmapper, &to, arg, NULL, NULL, &emis);
  2515. #if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2516. erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
  2517. erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
  2518. #endif
  2519. }
  2520. #endif
  2521. erts_print(to, arg, "=allocator:alloc_util\n");
  2522. erts_alcu_au_info_options(&to, arg, NULL, NULL);
  2523. erts_print(to, arg, "=allocator:instr\n");
  2524. erts_print(to, arg, "option t: %s\n",
  2525. erts_mtrace_enabled ? "true" : "false");
  2526. }
  2527. Eterm
  2528. erts_allocator_options(void *proc)
  2529. {
  2530. #if HAVE_ERTS_MSEG
  2531. int use_mseg = 0;
  2532. #endif
  2533. Uint sz, *szp, *hp, **hpp;
  2534. Eterm res, features, settings;
  2535. Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
  2536. Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+7];
  2537. int a, length;
  2538. SysAllocStat sas;
  2539. Uint *endp = NULL;
  2540. sys_alloc_stat(&sas);
  2541. /* First find out the heap size needed ... */
  2542. hpp = NULL;
  2543. szp = &sz;
  2544. sz = 0;
  2545. bld_term:
  2546. length = 0;
  2547. features = NIL;
  2548. settings = NIL;
  2549. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2550. Eterm tmp = NIL;
  2551. atoms[length] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2552. sys_strlen(ERTS_ALC_A2AD(a)));
  2553. if (erts_allctrs_info[a].enabled) {
  2554. if (erts_allctrs_info[a].alloc_util) {
  2555. Allctr_t *allctr;
  2556. #if HAVE_ERTS_MSEG
  2557. use_mseg++;
  2558. #endif
  2559. if (erts_allctr_thr_spec[a].enabled)
  2560. allctr = erts_allctr_thr_spec[a].allctr[0];
  2561. else
  2562. allctr = erts_allctrs_info[a].extra;
  2563. tmp = erts_alcu_info_options(allctr, NULL, NULL, hpp, szp);
  2564. }
  2565. else {
  2566. int l = 0;
  2567. Eterm as[4];
  2568. Eterm ts[4];
  2569. as[l] = ERTS_MAKE_AM("e");
  2570. ts[l++] = am_true;
  2571. switch (a) {
  2572. case ERTS_ALC_A_SYSTEM:
  2573. as[l] = ERTS_MAKE_AM("m");
  2574. ts[l++] = ERTS_MAKE_AM("libc");
  2575. if(sas.trim_threshold >= 0) {
  2576. as[l] = ERTS_MAKE_AM("tt");
  2577. ts[l++] = erts_bld_uint(hpp, szp,
  2578. (Uint) sas.trim_threshold);
  2579. }
  2580. if(sas.top_pad >= 0) {
  2581. as[l] = ERTS_MAKE_AM("tp");
  2582. ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
  2583. }
  2584. break;
  2585. default:
  2586. break;
  2587. }
  2588. tmp = erts_bld_2tup_list(hpp, szp, l, as, ts);
  2589. }
  2590. }
  2591. else {
  2592. Eterm atom = ERTS_MAKE_AM("e");
  2593. Eterm term = am_false;
  2594. tmp = erts_bld_2tup_list(hpp, szp, 1, &atom, &term);
  2595. }
  2596. terms[length++] = tmp;
  2597. }
  2598. #if HAVE_ERTS_MSEG
  2599. if (use_mseg) {
  2600. atoms[length] = ERTS_MAKE_AM("mseg_alloc");
  2601. terms[length++] = erts_mseg_info_options(0, NULL, NULL, hpp, szp);
  2602. }
  2603. #endif
  2604. atoms[length] = ERTS_MAKE_AM("alloc_util");
  2605. terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
  2606. #if HAVE_ERTS_MMAP
  2607. atoms[length] = ERTS_MAKE_AM("erts_mmap");
  2608. terms[length++] = erts_mmap_info_options(&erts_dflt_mmapper, NULL, NULL,
  2609. NULL, hpp, szp);
  2610. #endif
  2611. {
  2612. Eterm o[1], v[1];
  2613. o[0] = ERTS_MAKE_AM("t");
  2614. v[0] = erts_mtrace_enabled ? am_true : am_false;
  2615. atoms[length] = ERTS_MAKE_AM("instr");
  2616. terms[length++] = erts_bld_2tup_list(hpp, szp, 1, o, v);
  2617. }
  2618. atoms[length] = ERTS_MAKE_AM("lock_physical_memory");
  2619. terms[length++] = (lock_all_physical_memory ? am_all : am_no);
  2620. settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
  2621. length = 0;
  2622. for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
  2623. if (erts_allctrs_info[a].enabled) {
  2624. terms[length++] = am_atom_put((char *) ERTS_ALC_A2AD(a),
  2625. sys_strlen(ERTS_ALC_A2AD(a)));
  2626. }
  2627. }
  2628. #if HAVE_ERTS_MSEG
  2629. if (use_mseg)
  2630. terms[length++] = ERTS_MAKE_AM("mseg_alloc");
  2631. #endif
  2632. #if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
  2633. terms[length++] = ERTS_MAKE_AM("sys_aligned_alloc");
  2634. #endif
  2635. #if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2636. terms[length++] = ERTS_MAKE_AM("literal_mmap");
  2637. #endif
  2638. features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
  2639. #if defined(__GLIBC__)
  2640. {
  2641. Eterm AM_glibc = ERTS_MAKE_AM("glibc");
  2642. Eterm version;
  2643. version = erts_bld_cons(hpp,
  2644. szp,
  2645. make_small(__GLIBC__),
  2646. #ifdef __GLIBC_MINOR__
  2647. erts_bld_cons(hpp,
  2648. szp,
  2649. make_small(__GLIBC_MINOR__),
  2650. NIL)
  2651. #else
  2652. NIL
  2653. #endif
  2654. );
  2655. res = erts_bld_tuple(hpp, szp, 4,
  2656. AM_glibc, version, features, settings);
  2657. }
  2658. #else /* unknown allocator */
  2659. res = erts_bld_tuple(hpp, szp, 4,
  2660. am_undefined, NIL, features, settings);
  2661. #endif
  2662. if (szp) {
  2663. /* ... and then build the term */
  2664. hp = HAlloc((Process *) proc, sz);
  2665. endp = hp + sz;
  2666. hpp = &hp;
  2667. szp = NULL;
  2668. goto bld_term;
  2669. }
  2670. ASSERT(endp >= hp);
  2671. HRelease((Process *) proc, endp, hp);
  2672. return res;
  2673. }
  2674. void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size)
  2675. {
  2676. UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1)
  2677. #ifdef VALGRIND
  2678. + sizeof(UWord)
  2679. #endif
  2680. );
  2681. #ifdef VALGRIND
  2682. { /* Link them to avoid Leak_PossiblyLost */
  2683. static UWord* first_in_list = NULL;
  2684. *(UWord**)v = first_in_list;
  2685. first_in_list = (UWord*) v;
  2686. v += sizeof(UWord);
  2687. }
  2688. #endif
  2689. if (v & ERTS_CACHE_LINE_MASK) {
  2690. v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
  2691. }
  2692. ASSERT((v & ERTS_CACHE_LINE_MASK) == 0);
  2693. return (void*)v;
  2694. }
  2695. static void
  2696. reply_alloc_info(void *vair)
  2697. {
  2698. ErtsAllocInfoReq *air = (ErtsAllocInfoReq *) vair;
  2699. Uint sched_id = erts_get_scheduler_id();
  2700. int global_instances = air->req_sched == sched_id;
  2701. ErtsProcLocks rp_locks;
  2702. Process *rp = air->proc;
  2703. Eterm ref_copy = NIL, ai_list, msg = NIL;
  2704. Eterm *hp = NULL, *hp_start = NULL, *hp_end = NULL;
  2705. Eterm **hpp;
  2706. Uint sz, *szp;
  2707. ErlOffHeap *ohp = NULL;
  2708. ErtsMessage *mp = NULL;
  2709. #if HAVE_ERTS_MMAP
  2710. struct erts_mmap_info_struct mmap_info_dflt;
  2711. # if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2712. struct erts_mmap_info_struct mmap_info_literal;
  2713. # endif
  2714. #endif
  2715. int i;
  2716. Eterm (*info_func)(Allctr_t *,
  2717. int,
  2718. int,
  2719. fmtfn_t *,
  2720. void *,
  2721. Uint **,
  2722. Uint *) = (air->only_sz
  2723. ? erts_alcu_sz_info
  2724. : erts_alcu_info);
  2725. rp_locks = air->req_sched == sched_id ? ERTS_PROC_LOCK_MAIN : 0;
  2726. sz = 0;
  2727. hpp = NULL;
  2728. szp = &sz;
  2729. while (1) {
  2730. if (hpp)
  2731. ref_copy = erts_iref_storage_make_ref(&air->iref,
  2732. hpp, ohp, 0);
  2733. else
  2734. *szp += erts_iref_storage_heap_size(&air->iref);
  2735. ai_list = NIL;
  2736. for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
  2737. for (i--; i >= 0; i--) {
  2738. int ai = air->allocs[i];
  2739. Allctr_t *allctr;
  2740. Eterm ainfo;
  2741. Eterm alloc_atom;
  2742. if (global_instances) {
  2743. switch (ai) {
  2744. case ERTS_ALC_A_SYSTEM: {
  2745. alloc_atom = erts_bld_atom(hpp, szp, "sys_alloc");
  2746. ainfo = NIL;
  2747. if (!air->only_sz) {
  2748. SysAllocStat sas;
  2749. if (hpp)
  2750. sys_alloc_stat(&sas);
  2751. if (szp) {
  2752. /* ensure ehough heap */
  2753. sas.top_pad = INT_MAX;
  2754. sas.trim_threshold = INT_MAX;
  2755. }
  2756. if (sas.top_pad >= 0) {
  2757. ainfo = erts_bld_cons(
  2758. hpp, szp,
  2759. erts_bld_tuple(
  2760. hpp, szp, 2,
  2761. erts_bld_atom(hpp, szp, "tp"),
  2762. erts_bld_uint(
  2763. hpp, szp,
  2764. (Uint) sas.top_pad)),
  2765. ainfo);
  2766. }
  2767. if (sas.trim_threshold >= 0) {
  2768. ainfo = erts_bld_cons(
  2769. hpp, szp,
  2770. erts_bld_tuple(
  2771. hpp, szp, 2,
  2772. erts_bld_atom(hpp, szp, "tt"),
  2773. erts_bld_uint(
  2774. hpp, szp,
  2775. (Uint) sas.trim_threshold)),
  2776. ainfo);
  2777. }
  2778. ainfo = erts_bld_cons(hpp, szp,
  2779. erts_bld_tuple(
  2780. hpp, szp, 2,
  2781. erts_bld_atom(hpp, szp,
  2782. "m"),
  2783. erts_bld_atom(hpp, szp,
  2784. "libc")),
  2785. ainfo);
  2786. ainfo = erts_bld_cons(hpp, szp,
  2787. erts_bld_tuple(
  2788. hpp, szp, 2,
  2789. erts_bld_atom(hpp, szp,
  2790. "e"),
  2791. am_true),
  2792. ainfo);
  2793. ainfo = erts_bld_tuple(hpp, szp, 2,
  2794. erts_bld_atom(hpp, szp,
  2795. "options"),
  2796. ainfo);
  2797. ainfo = erts_bld_cons(hpp, szp,ainfo,NIL);
  2798. }
  2799. ainfo = erts_bld_tuple(hpp, szp, 3,
  2800. alloc_atom,
  2801. make_small(0),
  2802. ainfo);
  2803. break;
  2804. }
  2805. case ERTS_ALC_INFO_A_ALLOC_UTIL:
  2806. alloc_atom = erts_bld_atom(hpp, szp, "alloc_util");
  2807. ainfo = (air->only_sz
  2808. ? NIL
  2809. : erts_alcu_au_info_options(NULL, NULL,
  2810. hpp, szp));
  2811. ainfo = erts_bld_tuple(hpp, szp, 3,
  2812. alloc_atom,
  2813. make_small(0),
  2814. ainfo);
  2815. break;
  2816. case ERTS_ALC_INFO_A_ERTS_MMAP:
  2817. alloc_atom = erts_bld_atom(hpp, szp, "erts_mmap");
  2818. #if HAVE_ERTS_MMAP
  2819. ainfo = (air->only_sz ? NIL :
  2820. erts_mmap_info(&erts_dflt_mmapper, NULL, NULL,
  2821. hpp, szp, &mmap_info_dflt));
  2822. ainfo = erts_bld_tuple3(hpp, szp,
  2823. alloc_atom,
  2824. erts_bld_atom(hpp,szp,"default_mmap"),
  2825. ainfo);
  2826. # if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  2827. ai_list = erts_bld_cons(hpp, szp,
  2828. ainfo, ai_list);
  2829. ainfo = (air->only_sz ? NIL :
  2830. erts_mmap_info(&erts_literal_mmapper, NULL, NULL,
  2831. hpp, szp, &mmap_info_literal));
  2832. ainfo = erts_bld_tuple3(hpp, szp,
  2833. alloc_atom,
  2834. erts_bld_atom(hpp,szp,"literal_mmap"),
  2835. ainfo);
  2836. # endif
  2837. #else /* !HAVE_ERTS_MMAP */
  2838. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
  2839. am_false);
  2840. #endif
  2841. break;
  2842. case ERTS_ALC_INFO_A_MSEG_ALLOC:
  2843. alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
  2844. #if HAVE_ERTS_MSEG
  2845. ainfo = erts_mseg_info(0, NULL, NULL, hpp != NULL,
  2846. air->only_sz, hpp, szp);
  2847. ainfo = erts_bld_tuple3(hpp, szp,
  2848. alloc_atom,
  2849. make_small(0),
  2850. ainfo);
  2851. #else
  2852. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
  2853. am_false);
  2854. #endif
  2855. break;
  2856. #ifndef ERTS_ALC_A_EXEC
  2857. case ERTS_ALC_INFO_A_DISABLED_EXEC:
  2858. alloc_atom = erts_bld_atom(hpp, szp, "exec_alloc");
  2859. ainfo = erts_bld_tuple2(hpp, szp, alloc_atom, am_false);
  2860. break;
  2861. #endif
  2862. default:
  2863. alloc_atom = erts_bld_atom(hpp, szp,
  2864. (char *) ERTS_ALC_A2AD(ai));
  2865. if (!erts_allctrs_info[ai].enabled)
  2866. ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
  2867. am_false);
  2868. else if (erts_allctrs_info[ai].alloc_util) {
  2869. if (erts_allctrs_info[ai].thr_spec)
  2870. allctr = erts_allctr_thr_spec[ai].allctr[0];
  2871. else
  2872. allctr = erts_allctrs_info[ai].extra;
  2873. ainfo = info_func(allctr, air->internal, hpp != NULL,
  2874. NULL, NULL, hpp, szp);
  2875. ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom,
  2876. make_small(0), ainfo);
  2877. }
  2878. else {
  2879. erts_exit(ERTS_ABORT_EXIT, "%s:%d: internal error\n",
  2880. __FILE__, __LINE__);
  2881. }
  2882. }
  2883. ai_list = erts_bld_cons(hpp, szp,
  2884. ainfo, ai_list);
  2885. }
  2886. switch (ai) {
  2887. case ERTS_ALC_A_SYSTEM:
  2888. case ERTS_ALC_INFO_A_ALLOC_UTIL:
  2889. case ERTS_ALC_INFO_A_ERTS_MMAP:
  2890. case ERTS_ALC_INFO_A_DISABLED_EXEC:
  2891. break;
  2892. case ERTS_ALC_INFO_A_MSEG_ALLOC:
  2893. #if HAVE_ERTS_MSEG
  2894. alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
  2895. ainfo = erts_mseg_info(sched_id, NULL, NULL,
  2896. hpp != NULL, air->only_sz, hpp, szp);
  2897. ainfo = erts_bld_tuple(hpp, szp, 3,
  2898. alloc_atom,
  2899. make_small(sched_id),
  2900. ainfo);
  2901. ai_list = erts_bld_cons(hpp, szp, ainfo, ai_list);
  2902. #endif
  2903. break;
  2904. default:
  2905. if (erts_allctrs_info[ai].thr_spec) {
  2906. alloc_atom = erts_bld_atom(hpp, szp,
  2907. (char *) ERTS_ALC_A2AD(ai));
  2908. allctr = erts_allctr_thr_spec[ai].allctr[sched_id];
  2909. ainfo = info_func(allctr, air->internal, hpp != NULL, NULL,
  2910. NULL, hpp, szp);
  2911. ai_list = erts_bld_cons(hpp, szp,
  2912. erts_bld_tuple(
  2913. hpp, szp,
  2914. 3,
  2915. alloc_atom,
  2916. make_small(sched_id),
  2917. ainfo),
  2918. ai_list);
  2919. }
  2920. break;
  2921. }
  2922. msg = erts_bld_tuple(hpp, szp,
  2923. 3,
  2924. ref_copy,
  2925. make_small(sched_id),
  2926. ai_list);
  2927. }
  2928. if (hpp)
  2929. break;
  2930. mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
  2931. hp_start = hp;
  2932. hp_end = hp + sz;
  2933. szp = NULL;
  2934. hpp = &hp;
  2935. }
  2936. if (hp != hp_end)
  2937. erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1);
  2938. erts_queue_message(rp, rp_locks, mp, msg, am_system);
  2939. if (air->req_sched == sched_id)
  2940. rp_locks &= ~ERTS_PROC_LOCK_MAIN;
  2941. erts_proc_unlock(rp, rp_locks);
  2942. erts_proc_dec_refc(rp);
  2943. if (erts_atomic32_dec_read_nob(&air->refc) == 0) {
  2944. erts_iref_storage_clean(&air->iref);
  2945. aireq_free(air);
  2946. }
  2947. }
  2948. int
  2949. erts_request_alloc_info(struct process *c_p,
  2950. Eterm ref,
  2951. Eterm allocs,
  2952. int only_sz,
  2953. int internal)
  2954. {
  2955. ErtsAllocInfoReq *air = aireq_alloc();
  2956. Eterm req_ai[ERTS_ALC_INFO_A_END] = {0};
  2957. Eterm alist;
  2958. int airix = 0, ai;
  2959. air->req_sched = erts_get_scheduler_id();
  2960. air->only_sz = only_sz;
  2961. air->internal = internal;
  2962. air->proc = c_p;
  2963. if (is_not_internal_ref(ref))
  2964. return 0;
  2965. erts_iref_storage_save(&air->iref, ref);
  2966. if (is_not_list(allocs))
  2967. return 0;
  2968. alist = allocs;
  2969. while (is_list(alist)) {
  2970. int saved = 0;
  2971. Eterm* consp = list_val(alist);
  2972. Eterm alloc = CAR(consp);
  2973. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
  2974. if (erts_is_atom_str(erts_alc_a2ad[ai], alloc, 0))
  2975. goto save_alloc;
  2976. if (erts_is_atom_str("mseg_alloc", alloc, 0)) {
  2977. ai = ERTS_ALC_INFO_A_MSEG_ALLOC;
  2978. goto save_alloc;
  2979. }
  2980. if (erts_is_atom_str("erts_mmap", alloc, 0)) {
  2981. ai = ERTS_ALC_INFO_A_ERTS_MMAP;
  2982. goto save_alloc;
  2983. }
  2984. #ifndef ERTS_ALC_A_EXEC
  2985. if (erts_is_atom_str("exec_alloc", alloc, 0)) {
  2986. ai = ERTS_ALC_INFO_A_DISABLED_EXEC;
  2987. goto save_alloc;
  2988. }
  2989. #endif
  2990. if (erts_is_atom_str("alloc_util", alloc, 0)) {
  2991. ai = ERTS_ALC_INFO_A_ALLOC_UTIL;
  2992. save_alloc:
  2993. if (req_ai[ai])
  2994. return 0;
  2995. air->allocs[airix++] = ai;
  2996. req_ai[ai] = 1;
  2997. saved = 1;
  2998. }
  2999. if (!saved)
  3000. return 0;
  3001. alist = CDR(consp);
  3002. }
  3003. if (is_not_nil(alist))
  3004. return 0;
  3005. air->allocs[airix] = ERTS_ALC_A_INVALID;
  3006. erts_atomic32_init_nob(&air->refc,
  3007. (erts_aint32_t) erts_no_schedulers);
  3008. erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
  3009. if (erts_no_schedulers > 1)
  3010. erts_schedule_multi_misc_aux_work(1,
  3011. erts_no_schedulers,
  3012. reply_alloc_info,
  3013. (void *) air);
  3014. reply_alloc_info((void *) air);
  3015. return 1;
  3016. }
  3017. Eterm erts_alloc_set_dyn_param(Process* c_p, Eterm tuple)
  3018. {
  3019. ErtsAllocatorThrSpec_t *tspec;
  3020. ErtsAlcType_t ai;
  3021. Allctr_t* allctr;
  3022. Eterm* tp;
  3023. Eterm res;
  3024. if (!is_tuple_arity(tuple, 3))
  3025. goto badarg;
  3026. tp = tuple_val(tuple);
  3027. /*
  3028. * Ex: {ets_alloc, sbct, 256000}
  3029. */
  3030. if (!is_atom(tp[1]) || !is_atom(tp[2]) || !is_integer(tp[3]))
  3031. goto badarg;
  3032. for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
  3033. if (erts_is_atom_str(erts_alc_a2ad[ai], tp[1], 0))
  3034. break;
  3035. if (ai > ERTS_ALC_A_MAX)
  3036. goto badarg;
  3037. if (!erts_allctrs_info[ai].enabled ||
  3038. !erts_allctrs_info[ai].alloc_util) {
  3039. return am_notsup;
  3040. }
  3041. if (tp[2] == am_sbct) {
  3042. Uint sbct;
  3043. int i, ok;
  3044. if (!term_to_Uint(tp[3], &sbct))
  3045. goto badarg;
  3046. tspec = &erts_allctr_thr_spec[ai];
  3047. if (tspec->enabled) {
  3048. ok = 0;
  3049. for (i = 0; i < tspec->size; i++) {
  3050. allctr = tspec->allctr[i];
  3051. ok |= allctr->try_set_dyn_param(allctr, am_sbct, sbct);
  3052. }
  3053. }
  3054. else {
  3055. allctr = erts_allctrs_info[ai].extra;
  3056. ok = allctr->try_set_dyn_param(allctr, am_sbct, sbct);
  3057. }
  3058. return ok ? am_ok : am_notsup;
  3059. }
  3060. return am_notsup;
  3061. badarg:
  3062. ERTS_BIF_PREP_ERROR(res, c_p, EXC_BADARG);
  3063. return res;
  3064. }
  3065. /*
  3066. * The allocator wrapper prelocking stuff below is about the locking order.
  3067. * It only affects wrappers (erl_mtrace.c) that keep locks during
  3068. * alloc/realloc/free.
  3069. *
  3070. * Some query functions in erl_alloc_util.c lock the allocator mutex and then
  3071. * use erts_printf that in turn may call the sys allocator through the wrappers.
  3072. * To avoid breaking locking order these query functions first "pre-locks" all
  3073. * allocator wrappers.
  3074. */
  3075. ErtsAllocatorWrapper_t *erts_allctr_wrappers;
  3076. int erts_allctr_wrapper_prelocked = 0;
  3077. erts_tsd_key_t erts_allctr_prelock_tsd_key;
  3078. void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper)
  3079. {
  3080. ASSERT(wrapper->lock && wrapper->unlock);
  3081. wrapper->next = erts_allctr_wrappers;
  3082. erts_allctr_wrappers = wrapper;
  3083. }
  3084. void erts_allctr_wrapper_pre_lock(void)
  3085. {
  3086. if (erts_allctr_wrappers) {
  3087. ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
  3088. for ( ; wrapper; wrapper = wrapper->next) {
  3089. wrapper->lock();
  3090. }
  3091. ASSERT(!erts_allctr_wrapper_prelocked);
  3092. erts_allctr_wrapper_prelocked = 1;
  3093. erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)1);
  3094. }
  3095. }
  3096. void erts_allctr_wrapper_pre_unlock(void)
  3097. {
  3098. if (erts_allctr_wrappers) {
  3099. ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers;
  3100. erts_allctr_wrapper_prelocked = 0;
  3101. erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)0);
  3102. for ( ; wrapper; wrapper = wrapper->next) {
  3103. wrapper->unlock();
  3104. }
  3105. }
  3106. }
  3107. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3108. * NOTE: erts_alc_test() is only supposed to be used for testing. *
  3109. * *
  3110. * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
  3111. * to erts_alc_test() *
  3112. \* */
  3113. #define ERTS_ALC_TEST_ABORT erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
  3114. UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
  3115. {
  3116. switch (op >> 8) {
  3117. case 0x0: return erts_alcu_test(op, a1, a2);
  3118. case 0x1: return erts_gfalc_test(op, a1, a2);
  3119. case 0x2: return erts_bfalc_test(op, a1, a2);
  3120. case 0x3: return erts_afalc_test(op, a1, a2);
  3121. case 0x4: return erts_mseg_test(op, a1, a2, a3);
  3122. case 0x5: return erts_aoffalc_test(op, a1, a2);
  3123. case 0xf:
  3124. switch (op) {
  3125. case 0xf00:
  3126. if (((Allctr_t *) a1)->thread_safe)
  3127. return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_TEST,
  3128. (void *) a1,
  3129. (Uint) a2);
  3130. else
  3131. return (UWord) erts_alcu_alloc(ERTS_ALC_T_TEST,
  3132. (void *) a1,
  3133. (Uint) a2);
  3134. case 0xf01:
  3135. if (((Allctr_t *) a1)->thread_safe)
  3136. return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_TEST,
  3137. (void *) a1,
  3138. (void *) a2,
  3139. (Uint) a3);
  3140. else
  3141. return (UWord) erts_alcu_realloc(ERTS_ALC_T_TEST,
  3142. (void *) a1,
  3143. (void *) a2,
  3144. (Uint) a3);
  3145. case 0xf02:
  3146. if (((Allctr_t *) a1)->thread_safe)
  3147. erts_alcu_free_ts(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
  3148. else
  3149. erts_alcu_free(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
  3150. return 0;
  3151. case 0xf03: {
  3152. Allctr_t *allctr;
  3153. struct au_init init;
  3154. SET_DEFAULT_ALLOC_OPTS(&init);
  3155. init.enable = 1;
  3156. init.astrat = ERTS_ALC_S_GOODFIT;
  3157. init.init.util.name_prefix = (char *) a1;
  3158. init.init.util.alloc_no = ERTS_ALC_A_TEST;
  3159. init.init.util.alloc_strat = init.astrat;
  3160. init.init.util.ts = 1;
  3161. if ((char **) a3) {
  3162. char **argv = (char **) a3;
  3163. int i = 0;
  3164. while (argv[i]) {
  3165. if (argv[i][0] == '-' && argv[i][1] == 't')
  3166. handle_au_arg(&init, &argv[i][2], argv, &i, 0);
  3167. else
  3168. return (UWord) NULL;
  3169. i++;
  3170. }
  3171. }
  3172. switch (init.astrat) {
  3173. case ERTS_ALC_S_GOODFIT:
  3174. allctr = erts_gfalc_start((GFAllctr_t *)
  3175. erts_alloc(ERTS_ALC_T_TEST,
  3176. sizeof(GFAllctr_t)),
  3177. &init.init.gf,
  3178. &init.init.util);
  3179. break;
  3180. case ERTS_ALC_S_BESTFIT:
  3181. allctr = erts_bfalc_start((BFAllctr_t *)
  3182. erts_alloc(ERTS_ALC_T_TEST,
  3183. sizeof(BFAllctr_t)),
  3184. &init.init.bf,
  3185. &init.init.util);
  3186. break;
  3187. case ERTS_ALC_S_AFIT:
  3188. allctr = erts_afalc_start((AFAllctr_t *)
  3189. erts_alloc(ERTS_ALC_T_TEST,
  3190. sizeof(AFAllctr_t)),
  3191. &init.init.af,
  3192. &init.init.util);
  3193. break;
  3194. case ERTS_ALC_S_FIRSTFIT:
  3195. allctr = erts_aoffalc_start((AOFFAllctr_t *)
  3196. erts_alloc(ERTS_ALC_T_TEST,
  3197. sizeof(AOFFAllctr_t)),
  3198. &init.init.aoff,
  3199. &init.init.util);
  3200. break;
  3201. default:
  3202. ASSERT(0);
  3203. allctr = NULL;
  3204. break;
  3205. }
  3206. return (UWord) allctr;
  3207. }
  3208. case 0xf04:
  3209. erts_alcu_stop((Allctr_t *) a1);
  3210. erts_free(ERTS_ALC_T_TEST, (void *) a1);
  3211. break;
  3212. case 0xf05: return (UWord) 1;
  3213. case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
  3214. #ifdef ETHR_NO_FORKSAFETY
  3215. case 0xf07: return (UWord) 0;
  3216. #else
  3217. case 0xf07: return (UWord) ((Allctr_t *) a1)->thread_safe;
  3218. #endif
  3219. case 0xf08: {
  3220. ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_mutex));
  3221. if (ethr_mutex_init(mtx) != 0)
  3222. ERTS_ALC_TEST_ABORT;
  3223. return (UWord) mtx;
  3224. }
  3225. case 0xf09: {
  3226. ethr_mutex *mtx = (ethr_mutex *) a1;
  3227. if (ethr_mutex_destroy(mtx) != 0)
  3228. ERTS_ALC_TEST_ABORT;
  3229. erts_free(ERTS_ALC_T_TEST, (void *) mtx);
  3230. break;
  3231. }
  3232. case 0xf0a:
  3233. ethr_mutex_lock((ethr_mutex *) a1);
  3234. break;
  3235. case 0xf0b:
  3236. ethr_mutex_unlock((ethr_mutex *) a1);
  3237. break;
  3238. case 0xf0c: {
  3239. ethr_cond *cnd = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_cond));
  3240. if (ethr_cond_init(cnd) != 0)
  3241. ERTS_ALC_TEST_ABORT;
  3242. return (UWord) cnd;
  3243. }
  3244. case 0xf0d: {
  3245. ethr_cond *cnd = (ethr_cond *) a1;
  3246. if (ethr_cond_destroy(cnd) != 0)
  3247. ERTS_ALC_TEST_ABORT;
  3248. erts_free(ERTS_ALC_T_TEST, (void *) cnd);
  3249. break;
  3250. }
  3251. case 0xf0e:
  3252. ethr_cond_broadcast((ethr_cond *) a1);
  3253. break;
  3254. case 0xf0f: {
  3255. int res;
  3256. do {
  3257. res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
  3258. } while (res == EINTR);
  3259. break;
  3260. }
  3261. case 0xf10: {
  3262. ethr_tid *tid = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_tid));
  3263. if (ethr_thr_create(tid,
  3264. (void * (*)(void *)) a1,
  3265. (void *) a2,
  3266. NULL) != 0)
  3267. ERTS_ALC_TEST_ABORT;
  3268. return (UWord) tid;
  3269. }
  3270. case 0xf11: {
  3271. ethr_tid *tid = (ethr_tid *) a1;
  3272. if (ethr_thr_join(*tid, NULL) != 0)
  3273. ERTS_ALC_TEST_ABORT;
  3274. erts_free(ERTS_ALC_T_TEST, (void *) tid);
  3275. break;
  3276. }
  3277. case 0xf12:
  3278. ethr_thr_exit((void *) a1);
  3279. ERTS_ALC_TEST_ABORT;
  3280. break;
  3281. case 0xf13: return (UWord) 1;
  3282. case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1);
  3283. case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0;
  3284. case 0xf16: return (UWord) erts_realloc(ERTS_ALC_T_TEST, (void*)a1, (Uint)a2);
  3285. case 0xf17: {
  3286. Uint extra_hdr_sz = UNIT_CEILING((Uint)a1);
  3287. ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
  3288. Uint offset = ts->allctr[0]->mbc_header_size;
  3289. void* orig_creating_mbc = ts->allctr[0]->creating_mbc;
  3290. void* orig_destroying_mbc = ts->allctr[0]->destroying_mbc;
  3291. void* new_creating_mbc = *(void**)a2; /* inout arg */
  3292. void* new_destroying_mbc = *(void**)a3; /* inout arg */
  3293. int i;
  3294. for (i=0; i < ts->size; i++) {
  3295. Allctr_t* ap = ts->allctr[i];
  3296. if (ap->mbc_header_size != offset
  3297. || ap->creating_mbc != orig_creating_mbc
  3298. || ap->destroying_mbc != orig_destroying_mbc
  3299. || ap->mbc_list.first != NULL)
  3300. return -1;
  3301. }
  3302. for (i=0; i < ts->size; i++) {
  3303. ts->allctr[i]->mbc_header_size += extra_hdr_sz;
  3304. ts->allctr[i]->creating_mbc = new_creating_mbc;
  3305. ts->allctr[i]->destroying_mbc = new_destroying_mbc;
  3306. }
  3307. *(void**)a2 = orig_creating_mbc;
  3308. *(void**)a3 = orig_destroying_mbc;
  3309. return offset;
  3310. }
  3311. case 0xf18: {
  3312. ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
  3313. return ts->allctr[0]->largest_mbc_size;
  3314. }
  3315. default:
  3316. break;
  3317. }
  3318. return (UWord) 0;
  3319. default:
  3320. break;
  3321. }
  3322. ASSERT(0);
  3323. return ~((UWord) 0);
  3324. }
  3325. #ifdef DEBUG
  3326. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3327. * Debug stuff *
  3328. \* */
  3329. #if 0
  3330. #define PRINT_OPS
  3331. #else
  3332. #undef PRINT_OPS
  3333. #endif
  3334. #ifdef HARD_DEBUG
  3335. #define FENCE_SZ (4*sizeof(UWord))
  3336. #else
  3337. #define FENCE_SZ (3*sizeof(UWord))
  3338. #endif
  3339. #if defined(ARCH_64)
  3340. #define FENCE_PATTERN 0xABCDEF97ABCDEF97
  3341. #else
  3342. #define FENCE_PATTERN 0xABCDEF97
  3343. #endif
  3344. #define TYPE_PATTERN_MASK ERTS_ALC_N_MASK
  3345. #define TYPE_PATTERN_SHIFT 16
  3346. #define FIXED_FENCE_PATTERN_MASK \
  3347. (~((UWord) (TYPE_PATTERN_MASK << TYPE_PATTERN_SHIFT)))
  3348. #define FIXED_FENCE_PATTERN \
  3349. (FENCE_PATTERN & FIXED_FENCE_PATTERN_MASK)
  3350. #define MK_PATTERN(T) \
  3351. (FIXED_FENCE_PATTERN | (((T) & TYPE_PATTERN_MASK) << TYPE_PATTERN_SHIFT))
  3352. #define GET_TYPE_OF_PATTERN(P) \
  3353. (((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
  3354. #ifdef HARD_DEBUG
  3355. #define ERL_ALC_HDBG_MAX_MBLK 100000
  3356. #define ERTS_ALC_O_CHECK -1
  3357. typedef struct hdbg_mblk_ hdbg_mblk;
  3358. struct hdbg_mblk_ {
  3359. hdbg_mblk *next;
  3360. hdbg_mblk *prev;
  3361. void *p;
  3362. Uint s;
  3363. ErtsAlcType_t n;
  3364. };
  3365. static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
  3366. static hdbg_mblk *free_hdbg_mblks;
  3367. static hdbg_mblk *used_hdbg_mblks;
  3368. static erts_mtx_t hdbg_mblk_mtx;
  3369. static void
  3370. hdbg_init(void)
  3371. {
  3372. int i;
  3373. for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
  3374. hdbg_mblks[i].next = &hdbg_mblks[i+1];
  3375. hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
  3376. free_hdbg_mblks = &hdbg_mblks[0];
  3377. used_hdbg_mblks = NULL;
  3378. erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
  3379. ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
  3380. }
  3381. static void *check_memory_fence(void *ptr,
  3382. Uint *size,
  3383. ErtsAlcType_t n,
  3384. int func);
  3385. void erts_hdbg_chk_blks(void);
  3386. void
  3387. erts_hdbg_chk_blks(void)
  3388. {
  3389. hdbg_mblk *mblk;
  3390. erts_mtx_lock(&hdbg_mblk_mtx);
  3391. for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
  3392. Uint sz;
  3393. check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
  3394. ASSERT(sz == mblk->s);
  3395. }
  3396. erts_mtx_unlock(&hdbg_mblk_mtx);
  3397. }
  3398. static hdbg_mblk *
  3399. hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
  3400. {
  3401. hdbg_mblk *mblk;
  3402. erts_mtx_lock(&hdbg_mblk_mtx);
  3403. mblk = free_hdbg_mblks;
  3404. if (!mblk) {
  3405. erts_fprintf(stderr,
  3406. "Ran out of debug blocks; please increase "
  3407. "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
  3408. ERL_ALC_HDBG_MAX_MBLK);
  3409. abort();
  3410. }
  3411. free_hdbg_mblks = mblk->next;
  3412. mblk->p = p;
  3413. mblk->s = s;
  3414. mblk->n = n;
  3415. mblk->next = used_hdbg_mblks;
  3416. mblk->prev = NULL;
  3417. if (used_hdbg_mblks)
  3418. used_hdbg_mblks->prev = mblk;
  3419. used_hdbg_mblks = mblk;
  3420. erts_mtx_unlock(&hdbg_mblk_mtx);
  3421. return (void *) mblk;
  3422. }
  3423. static void
  3424. hdbg_free(hdbg_mblk *mblk)
  3425. {
  3426. erts_mtx_lock(&hdbg_mblk_mtx);
  3427. if (mblk->next)
  3428. mblk->next->prev = mblk->prev;
  3429. if (mblk->prev)
  3430. mblk->prev->next = mblk->next;
  3431. else
  3432. used_hdbg_mblks = mblk->next;
  3433. mblk->next = free_hdbg_mblks;
  3434. free_hdbg_mblks = mblk;
  3435. erts_mtx_unlock(&hdbg_mblk_mtx);
  3436. }
  3437. #endif
  3438. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  3439. static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
  3440. void check_allocated_block( Uint type, void *blk)
  3441. {
  3442. Uint dummy;
  3443. check_memory_fence(blk, &dummy, ERTS_ALC_T2N(type), ERTS_ALC_O_FREE);
  3444. }
  3445. void check_allocators(void)
  3446. {
  3447. int i;
  3448. if (!erts_initialized)
  3449. return;
  3450. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; ++i) {
  3451. if (erts_allctrs_info[i].alloc_util) {
  3452. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
  3453. Allctr_t *allctr = real_af->extra;
  3454. Carrier_t *ct;
  3455. if (allctr->thread_safe)
  3456. erts_mtx_lock(&allctr->mutex);
  3457. if (allctr->check_mbc) {
  3458. for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
  3459. fprintf(stderr,"Checking allocator %d\r\n",i);
  3460. allctr->check_mbc(allctr,ct);
  3461. }
  3462. }
  3463. if (allctr->thread_safe)
  3464. erts_mtx_unlock(&allctr->mutex);
  3465. }
  3466. }
  3467. }
  3468. #endif
  3469. static void *
  3470. set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
  3471. {
  3472. UWord *ui_ptr;
  3473. UWord pattern;
  3474. #ifdef HARD_DEBUG
  3475. hdbg_mblk **mblkpp;
  3476. #endif
  3477. if (!ptr)
  3478. return NULL;
  3479. ui_ptr = (UWord *) ptr;
  3480. pattern = MK_PATTERN(n);
  3481. #ifdef HARD_DEBUG
  3482. mblkpp = (hdbg_mblk **) ui_ptr++;
  3483. #endif
  3484. *(ui_ptr++) = sz;
  3485. *(ui_ptr++) = pattern;
  3486. sys_memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
  3487. #ifdef HARD_DEBUG
  3488. *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
  3489. #endif
  3490. return (void *) ui_ptr;
  3491. }
  3492. static void *
  3493. check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
  3494. {
  3495. Uint sz;
  3496. Uint found_type;
  3497. UWord pre_pattern, expected_pattern;
  3498. UWord post_pattern;
  3499. UWord *ui_ptr;
  3500. #ifdef HARD_DEBUG
  3501. hdbg_mblk *mblk;
  3502. #endif
  3503. if (!ptr)
  3504. return NULL;
  3505. expected_pattern = MK_PATTERN(n);
  3506. ui_ptr = (UWord *) ptr;
  3507. pre_pattern = *(--ui_ptr);
  3508. *size = sz = *(--ui_ptr);
  3509. #ifdef HARD_DEBUG
  3510. mblk = (hdbg_mblk *) *(--ui_ptr);
  3511. #endif
  3512. found_type = GET_TYPE_OF_PATTERN(pre_pattern);
  3513. if (found_type != n) {
  3514. erts_exit(ERTS_ABORT_EXIT, "ERROR: Miss matching allocator types"
  3515. " used in alloc and free\n");
  3516. }
  3517. if (pre_pattern != expected_pattern) {
  3518. if ((FIXED_FENCE_PATTERN_MASK & pre_pattern) != FIXED_FENCE_PATTERN)
  3519. erts_exit(ERTS_ABORT_EXIT,
  3520. "ERROR: Fence at beginning of memory block (p=0x%u) "
  3521. "clobbered.\n",
  3522. (UWord) ptr);
  3523. }
  3524. sys_memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
  3525. if (post_pattern != expected_pattern || pre_pattern != post_pattern) {
  3526. char fbuf[10];
  3527. char obuf[10];
  3528. char *ftype;
  3529. char *otype;
  3530. char *op_str;
  3531. if ((FIXED_FENCE_PATTERN_MASK & post_pattern) != FIXED_FENCE_PATTERN)
  3532. erts_exit(ERTS_ABORT_EXIT,
  3533. "ERROR: Fence at end of memory block (p=0x%u, sz=%u) "
  3534. "clobbered.\n",
  3535. (UWord) ptr, (UWord) sz);
  3536. if (found_type != GET_TYPE_OF_PATTERN(post_pattern))
  3537. erts_exit(ERTS_ABORT_EXIT,
  3538. "ERROR: Fence around memory block (p=0x%u, sz=%u) "
  3539. "clobbered.\n",
  3540. (UWord) ptr, (UWord) sz);
  3541. ftype = type_no_str(found_type);
  3542. if (!ftype) {
  3543. erts_snprintf(fbuf, sizeof(fbuf), "%d", (int) found_type);
  3544. ftype = fbuf;
  3545. }
  3546. otype = type_no_str(n);
  3547. if (!otype) {
  3548. erts_snprintf(obuf, sizeof(obuf), "%d", (int) n);
  3549. otype = obuf;
  3550. }
  3551. switch (func) {
  3552. case ERTS_ALC_O_ALLOC: op_str = "allocated"; break;
  3553. case ERTS_ALC_O_REALLOC: op_str = "reallocated"; break;
  3554. case ERTS_ALC_O_FREE: op_str = "freed"; break;
  3555. default: op_str = "???"; break;
  3556. }
  3557. erts_exit(ERTS_ABORT_EXIT,
  3558. "ERROR: Memory block (p=0x%u, sz=%u) allocated as type \"%s\","
  3559. " but %s as type \"%s\".\n",
  3560. (UWord) ptr, (UWord) sz, ftype, op_str, otype);
  3561. }
  3562. #ifdef HARD_DEBUG
  3563. switch (func) {
  3564. case ERTS_ALC_O_REALLOC:
  3565. case ERTS_ALC_O_FREE:
  3566. hdbg_free(mblk);
  3567. break;
  3568. default:
  3569. break;
  3570. }
  3571. #endif
  3572. return (void *) ui_ptr;
  3573. }
  3574. static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
  3575. static void *
  3576. debug_alloc(ErtsAlcType_t type, void *extra, Uint size)
  3577. {
  3578. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3579. ErtsAlcType_t n;
  3580. Uint dsize;
  3581. void *res;
  3582. #ifdef HARD_DEBUG
  3583. erts_hdbg_chk_blks();
  3584. #endif
  3585. n = ERTS_ALC_T2N(type);
  3586. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3587. dsize = size + FENCE_SZ;
  3588. res = (*real_af->alloc)(type, real_af->extra, dsize);
  3589. res = set_memory_fence(res, size, n);
  3590. #ifdef PRINT_OPS
  3591. fprintf(stderr, "0x%lx = alloc(%s, %lu)\r\n",
  3592. (Uint) res, ERTS_ALC_N2TD(n), size);
  3593. #endif
  3594. return res;
  3595. }
  3596. static void *
  3597. debug_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
  3598. {
  3599. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3600. ErtsAlcType_t n;
  3601. Uint dsize;
  3602. Uint old_size;
  3603. void *dptr;
  3604. void *res;
  3605. n = ERTS_ALC_T2N(type);
  3606. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3607. dsize = size + FENCE_SZ;
  3608. dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
  3609. #ifdef HARD_DEBUG
  3610. erts_hdbg_chk_blks();
  3611. #endif
  3612. if (ptr && old_size > size)
  3613. sys_memset((void *) (((char *) ptr) + size),
  3614. 0xf,
  3615. sizeof(Uint) + old_size - size);
  3616. res = (*real_af->realloc)(type, real_af->extra, dptr, dsize);
  3617. res = set_memory_fence(res, size, n);
  3618. #ifdef PRINT_OPS
  3619. fprintf(stderr, "0x%lx = realloc(%s, 0x%lx, %lu)\r\n",
  3620. (Uint) res, ERTS_ALC_N2TD(n), (Uint) ptr, size);
  3621. #endif
  3622. return res;
  3623. }
  3624. static void
  3625. debug_free(ErtsAlcType_t type, void *extra, void *ptr)
  3626. {
  3627. ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
  3628. ErtsAlcType_t n;
  3629. void *dptr;
  3630. Uint size;
  3631. int free_pattern;
  3632. n = ERTS_ALC_T2N(type);
  3633. free_pattern = n;
  3634. ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
  3635. if (!ptr)
  3636. return;
  3637. dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
  3638. #ifdef ERTS_ALC_A_EXEC
  3639. # if defined(__i386__) || defined(__x86_64__)
  3640. if (ERTS_ALC_T2A(ERTS_ALC_N2T(n)) == ERTS_ALC_A_EXEC) {
  3641. free_pattern = 0x0f; /* Illegal instruction */
  3642. }
  3643. # endif
  3644. #endif
  3645. sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
  3646. (*real_af->free)(type, real_af->extra, dptr);
  3647. #ifdef PRINT_OPS
  3648. fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
  3649. #endif
  3650. #ifdef HARD_DEBUG
  3651. erts_hdbg_chk_blks();
  3652. #endif
  3653. }
  3654. static Uint
  3655. install_debug_functions(void)
  3656. {
  3657. int i;
  3658. ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
  3659. sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
  3660. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  3661. erts_allctrs[i].alloc = debug_alloc;
  3662. erts_allctrs[i].realloc = debug_realloc;
  3663. erts_allctrs[i].free = debug_free;
  3664. erts_allctrs[i].extra = (void *) &real_allctrs[i];
  3665. }
  3666. return FENCE_SZ;
  3667. }
  3668. #endif /* #ifdef DEBUG */