PageRenderTime 95ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 1ms

/erts/emulator/beam/erl_alloc_util.c

https://github.com/bsmr-erlang/otp
C | 8251 lines | 6391 code | 1444 blank | 416 comment | 1244 complexity | b9fbce4ac17882801f6d4ee963dfc2ea MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.1, MPL-2.0-no-copyleft-exception, Apache-2.0
  1. /*
  2. * %CopyrightBegin%
  3. *
  4. * Copyright Ericsson AB 2002-2018. All Rights Reserved.
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * %CopyrightEnd%
  19. */
  20. /*
  21. * Description: A memory allocator utility. This utility provides
  22. * management of (multiple) memory segments, coalescing
  23. * of free blocks, etc. Allocators are implemented by
  24. * implementing a callback-interface which is called by
  25. * this utility. The only task the callback-module has to
  26. * perform is to supervise the free blocks.
  27. *
  28. * Author: Rickard Green
  29. */
  30. /*
  31. * Alloc util will enforce 8 byte alignment if sys_alloc and mseg_alloc at
  32. * least enforces 8 byte alignment. If sys_alloc only enforces 4 byte
  33. * alignment then alloc util will do so too.
  34. */
  35. #ifdef HAVE_CONFIG_H
  36. # include "config.h"
  37. #endif
  38. #include "global.h"
  39. #include "big.h"
  40. #include "erl_mmap.h"
  41. #include "erl_mtrace.h"
  42. #define GET_ERL_ALLOC_UTIL_IMPL
  43. #include "erl_alloc_util.h"
  44. #include "erl_mseg.h"
  45. #include "erl_threads.h"
  46. #include "erl_thr_progress.h"
  47. #include "erl_bif_unique.h"
  48. #include "erl_nif.h"
  49. #ifdef ERTS_ENABLE_LOCK_COUNT
  50. #include "erl_lock_count.h"
  51. #endif
  52. #include "lttng-wrapper.h"
  53. #if defined(ERTS_ALLOC_UTIL_HARD_DEBUG) && defined(__GNUC__)
  54. #warning "* * * * * * * * * *"
  55. #warning "* * * * * * * * * *"
  56. #warning "* * NOTE: * *"
  57. #warning "* * Hard debug * *"
  58. #warning "* * is enabled! * *"
  59. #warning "* * * * * * * * * *"
  60. #warning "* * * * * * * * * *"
  61. #endif
  62. #define ERTS_ALCU_DD_OPS_LIM_HIGH 20
  63. #define ERTS_ALCU_DD_OPS_LIM_LOW 2
  64. /* Fix alloc limit */
  65. #define ERTS_ALCU_FIX_MAX_LIST_SZ 1000
  66. #define ERTS_ALC_FIX_MAX_SHRINK_OPS 30
  67. #define ALLOC_ZERO_EQ_NULL 0
  68. #ifndef ERTS_MSEG_FLG_2POW
  69. # define ERTS_MSEG_FLG_2POW 0
  70. #endif
  71. #ifndef ERTS_MSEG_FLG_NONE
  72. # define ERTS_MSEG_FLG_NONE 0
  73. #endif
  74. static int atoms_initialized = 0;
  75. static int initialized = 0;
  76. #define INV_SYS_ALLOC_CARRIER_MASK ((UWord) (sys_alloc_carrier_size - 1))
  77. #define SYS_ALLOC_CARRIER_MASK (~INV_SYS_ALLOC_CARRIER_MASK)
  78. #define SYS_ALLOC_CARRIER_FLOOR(X) ((X) & SYS_ALLOC_CARRIER_MASK)
  79. #define SYS_ALLOC_CARRIER_CEILING(X) \
  80. SYS_ALLOC_CARRIER_FLOOR((X) + INV_SYS_ALLOC_CARRIER_MASK)
  81. #define SYS_PAGE_SIZE (sys_page_size)
  82. #define SYS_PAGE_SZ_MASK ((UWord)(SYS_PAGE_SIZE - 1))
  83. #if 0
  84. /* Can be useful for debugging */
  85. #define MBC_REALLOC_ALWAYS_MOVES
  86. #endif
  87. /* alloc_util global parameters */
  88. static Uint sys_alloc_carrier_size;
  89. static Uint sys_page_size;
  90. #if HAVE_ERTS_MSEG
  91. static Uint max_mseg_carriers;
  92. #endif
  93. static int allow_sys_alloc_carriers;
  94. #define ONE_GIGA (1000000000)
  95. #define ERTS_ALC_CC_GIGA_VAL(CC) ((CC) / ONE_GIGA)
  96. #define ERTS_ALC_CC_VAL(CC) ((CC) % ONE_GIGA)
  97. #define INC_CC(CC) ((CC)++)
  98. #define DEC_CC(CC) ((CC)--)
  99. /* Multi block carrier (MBC) memory layout in OTP 22:
  100. Empty MBC:
  101. [Carrier_t|pad|Block_t L0T0|fhdr| free... ]
  102. MBC after allocating first block:
  103. [Carrier_t|pad|Block_t 0000| udata |pad|Block_t L0T0|fhdr| free... ]
  104. MBC after allocating second block:
  105. [Carrier_t|pad|Block_t 0000| udata |pad|Block_t 0000| udata |pad|Block_t L0T0|fhdr| free... ]
  106. MBC after deallocating first block:
  107. [Carrier_t|pad|Block_t 00T0|fhdr| free |FreeBlkFtr_t|Block_t 0P00| udata |pad|Block_t L0T0|fhdr| free... ]
  108. MBC after allocating first block, with allocation tagging enabled:
  109. [Carrier_t|pad|Block_t 000A| udata |atag|pad|Block_t L0T0|fhdr| free... ]
  110. udata = Allocated user data
  111. atag = A tag with basic metadata about this allocation
  112. pad = Padding to ensure correct alignment for user data
  113. fhdr = Allocator specific header to keep track of free block
  114. free = Unused free memory
  115. T = This block is free (THIS_FREE_BLK_HDR_FLG)
  116. P = Previous block is free (PREV_FREE_BLK_HDR_FLG)
  117. L = Last block in carrier (LAST_BLK_HDR_FLG)
  118. A = Block has an allocation tag footer, only valid for allocated blocks
  119. (ATAG_BLK_HDR_FLG)
  120. */
  121. /* Single block carrier (SBC):
  122. [Carrier_t|pad|Block_t 1110| udata... ]
  123. [Carrier_t|pad|Block_t 111A| udata | atag]
  124. */
  125. /* Allocation tags ...
  126. *
  127. * These are added to the footer of every block when enabled. Currently they
  128. * consist of the allocation type and an atom identifying the allocating
  129. * driver/nif (or 'system' if that can't be determined), but the format is not
  130. * supposed to be set in stone.
  131. *
  132. * The packing scheme requires that the atom values are small enough to fit
  133. * into a word with ERTS_ALC_N_BITS to spare. Users must check for overflow
  134. * before MAKE_ATAG(). */
  135. typedef UWord alcu_atag_t;
  136. #define MAKE_ATAG(IdAtom, TypeNum) \
  137. (ASSERT((TypeNum) >= ERTS_ALC_N_MIN && (TypeNum) <= ERTS_ALC_N_MAX), \
  138. ASSERT(atom_val(IdAtom) <= MAX_ATAG_ATOM_ID), \
  139. (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (TypeNum))
  140. #define ATAG_ID(AT) (make_atom((AT) >> ERTS_ALC_N_BITS))
  141. #define ATAG_TYPE(AT) ((AT) & ERTS_ALC_N_MASK)
  142. #define MAX_ATAG_ATOM_ID (ERTS_UWORD_MAX >> ERTS_ALC_N_BITS)
  143. #define DBG_IS_VALID_ATAG(AT) \
  144. (ATAG_TYPE(AT) >= ERTS_ALC_N_MIN && \
  145. ATAG_TYPE(AT) <= ERTS_ALC_N_MAX && \
  146. ATAG_ID(AT) <= MAX_ATAG_ATOM_ID)
  147. /* Blocks ... */
  148. #define UNUSED0_BLK_FTR_FLG (((UWord) 1) << 0)
  149. #define UNUSED1_BLK_FTR_FLG (((UWord) 1) << 1)
  150. #define UNUSED2_BLK_FTR_FLG (((UWord) 1) << 2)
  151. #if MBC_ABLK_OFFSET_BITS
  152. # define ABLK_HDR_SZ (offsetof(Block_t,u))
  153. #else
  154. # define ABLK_HDR_SZ (sizeof(Block_t))
  155. #endif
  156. #define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t))
  157. #define BLK_HAS_ATAG(B) \
  158. (!!((B)->bhdr & ATAG_BLK_HDR_FLG))
  159. #define GET_BLK_ATAG(B) \
  160. (ASSERT(BLK_HAS_ATAG(B)), \
  161. ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1])
  162. #define SET_BLK_ATAG(B, T) \
  163. ((B)->bhdr |= ATAG_BLK_HDR_FLG, \
  164. ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T))
  165. #define BLK_ATAG_SZ(AP) ((AP)->atags ? sizeof(alcu_atag_t) : 0)
  166. #define UMEMSZ2BLKSZ(AP, SZ) \
  167. (ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ) <= (AP)->min_block_size \
  168. ? (AP)->min_block_size \
  169. : UNIT_CEILING(ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ)))
  170. #define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ))
  171. #define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ))
  172. #define PREV_BLK_SZ(B) ((UWord) (((FreeBlkFtr_t *)(B))[-1]))
  173. #define SET_BLK_SZ_FTR(B, SZ) \
  174. (((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ))
  175. #define SET_MBC_ABLK_SZ(B, SZ) \
  176. (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
  177. (B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ))
  178. #define SET_MBC_FBLK_SZ(B, SZ) \
  179. (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
  180. (B)->bhdr = (((B)->bhdr) & ~MBC_FBLK_SZ_MASK) | (SZ))
  181. #define SET_SBC_BLK_SZ(B, SZ) \
  182. (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
  183. (B)->bhdr = (((B)->bhdr) & ~SBC_BLK_SZ_MASK) | (SZ))
  184. #define SET_PREV_BLK_FREE(AP,B) \
  185. (ASSERT(!IS_MBC_FIRST_BLK(AP,B)), \
  186. ASSERT(!IS_FREE_BLK(B)), \
  187. (B)->bhdr |= PREV_FREE_BLK_HDR_FLG)
  188. #define SET_PREV_BLK_ALLOCED(B) \
  189. ((B)->bhdr &= ~PREV_FREE_BLK_HDR_FLG)
  190. #define SET_LAST_BLK(B) \
  191. ((B)->bhdr |= LAST_BLK_HDR_FLG)
  192. #define SET_NOT_LAST_BLK(B) \
  193. ((B)->bhdr &= ~LAST_BLK_HDR_FLG)
  194. #define SBH_THIS_FREE THIS_FREE_BLK_HDR_FLG
  195. #define SBH_PREV_FREE PREV_FREE_BLK_HDR_FLG
  196. #define SBH_LAST_BLK LAST_BLK_HDR_FLG
  197. #if MBC_ABLK_OFFSET_BITS
  198. # define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << ERTS_SUPER_ALIGN_BITS)
  199. # define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> ERTS_SACRR_UNIT_SHIFT)
  200. # define SET_MBC_ABLK_HDR(B, Sz, F, C) \
  201. (ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \
  202. ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
  203. (B)->bhdr = ((Sz) | (F) | (BLK_CARRIER_OFFSET(B,C) << MBC_ABLK_OFFSET_SHIFT)))
  204. # define SET_MBC_FBLK_HDR(B, Sz, F, C) \
  205. (ASSERT(((Sz) & ~MBC_FBLK_SZ_MASK) == 0), \
  206. ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
  207. (B)->bhdr = ((Sz) | (F)), \
  208. (B)->u.carrier = (C))
  209. # define IS_MBC_FIRST_ABLK(AP,B) \
  210. ((((UWord)(B) & ~ERTS_SACRR_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
  211. && ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
  212. # define IS_MBC_FIRST_FBLK(AP,B) \
  213. ((char*)(B) == (char*)((B)->u.carrier) + MBC_HEADER_SIZE(AP))
  214. # define IS_MBC_FIRST_BLK(AP,B) \
  215. (IS_FREE_BLK(B) ? IS_MBC_FIRST_FBLK(AP,B) : IS_MBC_FIRST_ABLK(AP,B))
  216. # define SET_BLK_FREE(B) \
  217. (ASSERT(!IS_PREV_BLK_FREE(B)), \
  218. (B)->u.carrier = ABLK_TO_MBC(B), \
  219. (B)->bhdr &= (MBC_ABLK_SZ_MASK|LAST_BLK_HDR_FLG), \
  220. (B)->bhdr |= THIS_FREE_BLK_HDR_FLG)
  221. # define SET_BLK_ALLOCED(B) \
  222. (ASSERT(((B)->bhdr & (MBC_ABLK_OFFSET_MASK|THIS_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
  223. (B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG, \
  224. (B)->bhdr |= (BLK_CARRIER_OFFSET(B,(B)->u.carrier) << MBC_ABLK_OFFSET_SHIFT))
  225. #else /* !MBC_ABLK_OFFSET_BITS */
  226. # define MBC_SZ_MAX_LIMIT ((UWord)~0)
  227. # define SET_MBC_ABLK_HDR(B, Sz, F, C) \
  228. (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \
  229. ASSERT(((F) & ~BLK_FLG_MASK) == 0), \
  230. ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
  231. (B)->bhdr = ((Sz) | (F)), \
  232. (B)->carrier = (C))
  233. # define SET_MBC_FBLK_HDR(B, Sz, F, C) \
  234. (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \
  235. ASSERT(((F) & ~BLK_FLG_MASK) == 0), \
  236. ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
  237. (B)->bhdr = ((Sz) | (F)), \
  238. (B)->carrier = (C))
  239. # define IS_MBC_FIRST_BLK(AP,B) \
  240. ((char*)(B) == (char*)((B)->carrier) + MBC_HEADER_SIZE(AP))
  241. # define IS_MBC_FIRST_ABLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
  242. # define IS_MBC_FIRST_FBLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
  243. # define SET_BLK_FREE(B) \
  244. (ASSERT(!IS_PREV_BLK_FREE(B)), \
  245. (B)->bhdr |= THIS_FREE_BLK_HDR_FLG)
  246. # define SET_BLK_ALLOCED(B) \
  247. ((B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG)
  248. #endif /* !MBC_ABLK_OFFSET_BITS */
  249. #define SET_SBC_BLK_HDR(B, Sz) \
  250. (ASSERT(((Sz) & BLK_FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG)))
  251. #define BLK_UMEM_SZ(B) \
  252. (BLK_SZ(B) - (ABLK_HDR_SZ))
  253. #define IS_PREV_BLK_FREE(B) \
  254. ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
  255. #define IS_PREV_BLK_ALLOCED(B) \
  256. (!IS_PREV_BLK_FREE((B)))
  257. #define IS_ALLOCED_BLK(B) \
  258. (!IS_FREE_BLK((B)))
  259. #define IS_LAST_BLK(B) \
  260. ((B)->bhdr & LAST_BLK_HDR_FLG)
  261. #define IS_NOT_LAST_BLK(B) \
  262. (!IS_LAST_BLK((B)))
  263. #define GET_LAST_BLK_HDR_FLG(B) \
  264. ((B)->bhdr & LAST_BLK_HDR_FLG)
  265. #define GET_THIS_FREE_BLK_HDR_FLG(B) \
  266. ((B)->bhdr & THIS_FREE_BLK_HDR_FLG)
  267. #define GET_PREV_FREE_BLK_HDR_FLG(B) \
  268. ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
  269. #define GET_BLK_HDR_FLGS(B) \
  270. ((B)->bhdr & BLK_FLG_MASK)
  271. #define NXT_BLK(B) \
  272. (ASSERT(IS_MBC_BLK(B)), \
  273. (Block_t *) (((char *) (B)) + MBC_BLK_SZ((B))))
  274. #define PREV_BLK(B) \
  275. ((Block_t *) (((char *) (B)) - PREV_BLK_SZ((B))))
  276. #define BLK_AFTER(B,Sz) \
  277. ((Block_t *) (((char *) (B)) + (Sz)))
  278. #define BLK_SZ(B) ((B)->bhdr & (((B)->bhdr & THIS_FREE_BLK_HDR_FLG) ? MBC_FBLK_SZ_MASK : MBC_ABLK_SZ_MASK))
  279. /* Carriers ... */
  280. /* #define ERTS_ALC_CPOOL_DEBUG */
  281. #if defined(DEBUG) && !defined(ERTS_ALC_CPOOL_DEBUG)
  282. # define ERTS_ALC_CPOOL_DEBUG
  283. #endif
  284. #ifdef ERTS_ALC_CPOOL_DEBUG
  285. # define ERTS_ALC_CPOOL_ASSERT(A) \
  286. ((void) ((A) \
  287. ? 1 \
  288. : (erts_alcu_assert_failed(#A, \
  289. (char *) __FILE__, \
  290. __LINE__, \
  291. (char *) __func__), \
  292. 0)))
  293. #else
  294. # define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1)
  295. #endif
  296. #define ERTS_ALC_IS_CPOOL_ENABLED(A) ((A)->cpool.util_limit)
  297. #define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON 1000
  298. #define ERTS_ALC_CPOOL_ALLOC_OP_INC 8
  299. #define ERTS_ALC_CPOOL_FREE_OP_DEC 10
  300. #define ERTS_ALC_CPOOL_ALLOC_OP(A) \
  301. do { \
  302. if ((A)->cpool.disable_abandon < ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON) { \
  303. (A)->cpool.disable_abandon += ERTS_ALC_CPOOL_ALLOC_OP_INC; \
  304. if ((A)->cpool.disable_abandon > ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON) \
  305. (A)->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON; \
  306. } \
  307. } while (0)
  308. #if ERTS_ALC_CPOOL_ALLOC_OP_INC >= ERTS_ALC_CPOOL_FREE_OP_DEC
  309. # error "Implementation assume ERTS_ALC_CPOOL_ALLOC_OP_INC < ERTS_ALC_CPOOL_FREE_OP_DEC"
  310. #endif
  311. #define ERTS_ALC_CPOOL_REALLOC_OP(A) \
  312. do { \
  313. if ((A)->cpool.disable_abandon) { \
  314. (A)->cpool.disable_abandon -= (ERTS_ALC_CPOOL_FREE_OP_DEC \
  315. - ERTS_ALC_CPOOL_ALLOC_OP_INC); \
  316. if ((A)->cpool.disable_abandon < 0) \
  317. (A)->cpool.disable_abandon = 0; \
  318. } \
  319. } while (0)
  320. #define ERTS_ALC_CPOOL_FREE_OP(A) \
  321. do { \
  322. if ((A)->cpool.disable_abandon) { \
  323. (A)->cpool.disable_abandon -= ERTS_ALC_CPOOL_FREE_OP_DEC; \
  324. if ((A)->cpool.disable_abandon < 0) \
  325. (A)->cpool.disable_abandon = 0; \
  326. } \
  327. } while (0)
  328. #define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0)
  329. #define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1)
  330. #define ERTS_CRR_ALCTR_FLG_HOMECOMING (((erts_aint_t) 1) << 2)
  331. #define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \
  332. ERTS_CRR_ALCTR_FLG_BUSY | \
  333. ERTS_CRR_ALCTR_FLG_HOMECOMING)
  334. #define SBC_HEADER_SIZE \
  335. (UNIT_CEILING(offsetof(Carrier_t, cpool) \
  336. + ABLK_HDR_SZ) \
  337. - ABLK_HDR_SZ)
  338. #define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size)
  339. #define MSEG_CARRIER_HDR_FLAG (((UWord) 1) << 0)
  340. #define SBC_CARRIER_HDR_FLAG (((UWord) 1) << 1)
  341. #define SCH_SYS_ALLOC 0
  342. #define SCH_MSEG MSEG_CARRIER_HDR_FLAG
  343. #define SCH_MBC 0
  344. #define SCH_SBC SBC_CARRIER_HDR_FLAG
  345. #define SET_CARRIER_HDR(C, Sz, F, AP) \
  346. (ASSERT(((Sz) & CRR_FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \
  347. erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))
  348. #define BLK_TO_SBC(B) \
  349. ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE))
  350. #define FIRST_BLK_TO_MBC(AP, B) \
  351. ((Carrier_t *) (((char *) (B)) - MBC_HEADER_SIZE(AP)))
  352. #define MBC_TO_FIRST_BLK(AP, P) \
  353. ((Block_t *) (((char *) (P)) + MBC_HEADER_SIZE(AP)))
  354. #define SBC2BLK(AP, P) \
  355. ((Block_t *) (((char *) (P)) + SBC_HEADER_SIZE))
  356. #define SBC2UMEM(AP, P) \
  357. ((void *) (((char *) (P)) + (SBC_HEADER_SIZE + ABLK_HDR_SZ)))
  358. #define IS_MSEG_CARRIER(C) \
  359. ((C)->chdr & MSEG_CARRIER_HDR_FLAG)
  360. #define IS_SYS_ALLOC_CARRIER(C) \
  361. (!IS_MSEG_CARRIER((C)))
  362. #define IS_SB_CARRIER(C) \
  363. ((C)->chdr & SBC_CARRIER_HDR_FLAG)
  364. #define IS_MB_CARRIER(C) \
  365. (!IS_SB_CARRIER((C)))
  366. #define SET_CARRIER_SZ(C, SZ) \
  367. (ASSERT(((SZ) & CRR_FLG_MASK) == 0), \
  368. ((C)->chdr = ((C)->chdr & CRR_FLG_MASK) | (SZ)))
  369. #define CFLG_SBC (1 << 0)
  370. #define CFLG_MBC (1 << 1)
  371. #define CFLG_FORCE_MSEG (1 << 2)
  372. #define CFLG_FORCE_SYS_ALLOC (1 << 3)
  373. #define CFLG_FORCE_SIZE (1 << 4)
  374. #define CFLG_MAIN_CARRIER (1 << 5)
  375. #define CFLG_NO_CPOOL (1 << 6)
  376. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  377. static void check_blk_carrier(Allctr_t *, Block_t *);
  378. #define HARD_CHECK_BLK_CARRIER(A, B) check_blk_carrier((A), (B))
  379. #else
  380. #define HARD_CHECK_BLK_CARRIER(A, B)
  381. #endif
  382. /* Statistics updating ... */
  383. #ifdef DEBUG
  384. #define DEBUG_CHECK_CARRIER_NO_SZ(AP) \
  385. ASSERT(((AP)->sbcs.curr.norm.mseg.no \
  386. && (AP)->sbcs.curr.norm.mseg.size) \
  387. || (!(AP)->sbcs.curr.norm.mseg.no \
  388. && !(AP)->sbcs.curr.norm.mseg.size)); \
  389. ASSERT(((AP)->sbcs.curr.norm.sys_alloc.no \
  390. && (AP)->sbcs.curr.norm.sys_alloc.size) \
  391. || (!(AP)->sbcs.curr.norm.sys_alloc.no \
  392. && !(AP)->sbcs.curr.norm.sys_alloc.size)); \
  393. ASSERT(((AP)->mbcs.curr.norm.mseg.no \
  394. && (AP)->mbcs.curr.norm.mseg.size) \
  395. || (!(AP)->mbcs.curr.norm.mseg.no \
  396. && !(AP)->mbcs.curr.norm.mseg.size)); \
  397. ASSERT(((AP)->mbcs.curr.norm.sys_alloc.no \
  398. && (AP)->mbcs.curr.norm.sys_alloc.size) \
  399. || (!(AP)->mbcs.curr.norm.sys_alloc.no \
  400. && !(AP)->mbcs.curr.norm.sys_alloc.size));
  401. #else
  402. #define DEBUG_CHECK_CARRIER_NO_SZ(AP)
  403. #endif
  404. #define STAT_SBC_ALLOC(AP, BSZ) \
  405. (AP)->sbcs.blocks.curr.size += (BSZ); \
  406. if ((AP)->sbcs.blocks.max.size < (AP)->sbcs.blocks.curr.size) \
  407. (AP)->sbcs.blocks.max.size = (AP)->sbcs.blocks.curr.size; \
  408. if ((AP)->sbcs.max.no < ((AP)->sbcs.curr.norm.mseg.no \
  409. + (AP)->sbcs.curr.norm.sys_alloc.no)) \
  410. (AP)->sbcs.max.no = ((AP)->sbcs.curr.norm.mseg.no \
  411. + (AP)->sbcs.curr.norm.sys_alloc.no); \
  412. if ((AP)->sbcs.max.size < ((AP)->sbcs.curr.norm.mseg.size \
  413. + (AP)->sbcs.curr.norm.sys_alloc.size)) \
  414. (AP)->sbcs.max.size = ((AP)->sbcs.curr.norm.mseg.size \
  415. + (AP)->sbcs.curr.norm.sys_alloc.size)
  416. #define STAT_MSEG_SBC_ALLOC(AP, CSZ, BSZ) \
  417. do { \
  418. (AP)->sbcs.curr.norm.mseg.no++; \
  419. (AP)->sbcs.curr.norm.mseg.size += (CSZ); \
  420. STAT_SBC_ALLOC((AP), (BSZ)); \
  421. DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
  422. } while (0)
  423. #define STAT_SYS_ALLOC_SBC_ALLOC(AP, CSZ, BSZ) \
  424. do { \
  425. (AP)->sbcs.curr.norm.sys_alloc.no++; \
  426. (AP)->sbcs.curr.norm.sys_alloc.size += (CSZ); \
  427. STAT_SBC_ALLOC((AP), (BSZ)); \
  428. DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
  429. } while (0)
  430. #define STAT_SBC_FREE(AP, BSZ) \
  431. ASSERT((AP)->sbcs.blocks.curr.size >= (BSZ)); \
  432. (AP)->sbcs.blocks.curr.size -= (BSZ)
  433. #define STAT_MSEG_SBC_FREE(AP, CSZ, BSZ) \
  434. do { \
  435. ASSERT((AP)->sbcs.curr.norm.mseg.no > 0); \
  436. (AP)->sbcs.curr.norm.mseg.no--; \
  437. ASSERT((AP)->sbcs.curr.norm.mseg.size >= (CSZ)); \
  438. (AP)->sbcs.curr.norm.mseg.size -= (CSZ); \
  439. STAT_SBC_FREE((AP), (BSZ)); \
  440. DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
  441. } while (0)
  442. #define STAT_SYS_ALLOC_SBC_FREE(AP, CSZ, BSZ) \
  443. do { \
  444. ASSERT((AP)->sbcs.curr.norm.sys_alloc.no > 0); \
  445. (AP)->sbcs.curr.norm.sys_alloc.no--; \
  446. ASSERT((AP)->sbcs.curr.norm.sys_alloc.size >= (CSZ)); \
  447. (AP)->sbcs.curr.norm.sys_alloc.size -= (CSZ); \
  448. STAT_SBC_FREE((AP), (BSZ)); \
  449. DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
  450. } while (0)
  451. #define STAT_MBC_ALLOC(AP) \
  452. if ((AP)->mbcs.max.no < ((AP)->mbcs.curr.norm.mseg.no \
  453. + (AP)->mbcs.curr.norm.sys_alloc.no)) \
  454. (AP)->mbcs.max.no = ((AP)->mbcs.curr.norm.mseg.no \
  455. + (AP)->mbcs.curr.norm.sys_alloc.no); \
  456. if ((AP)->mbcs.max.size < ((AP)->mbcs.curr.norm.mseg.size \
  457. + (AP)->mbcs.curr.norm.sys_alloc.size)) \
  458. (AP)->mbcs.max.size = ((AP)->mbcs.curr.norm.mseg.size \
  459. + (AP)->mbcs.curr.norm.sys_alloc.size)
  460. #define STAT_MSEG_MBC_ALLOC(AP, CSZ) \
  461. do { \
  462. (AP)->mbcs.curr.norm.mseg.no++; \
  463. (AP)->mbcs.curr.norm.mseg.size += (CSZ); \
  464. STAT_MBC_ALLOC((AP)); \
  465. DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
  466. } while (0)
  467. #define STAT_SYS_ALLOC_MBC_ALLOC(AP, CSZ) \
  468. do { \
  469. (AP)->mbcs.curr.norm.sys_alloc.no++; \
  470. (AP)->mbcs.curr.norm.sys_alloc.size += (CSZ); \
  471. STAT_MBC_ALLOC((AP)); \
  472. DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
  473. } while (0)
  474. #define STAT_MBC_CPOOL_FETCH(AP, CRR) \
  475. do { \
  476. UWord csz__ = CARRIER_SZ((CRR)); \
  477. if (IS_MSEG_CARRIER((CRR))) \
  478. STAT_MSEG_MBC_ALLOC((AP), csz__); \
  479. else \
  480. STAT_SYS_ALLOC_MBC_ALLOC((AP), csz__); \
  481. set_new_allctr_abandon_limit(AP); \
  482. (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks[(AP)->alloc_no]; \
  483. if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no) \
  484. (AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no; \
  485. (AP)->mbcs.blocks.curr.size += \
  486. (CRR)->cpool.blocks_size[(AP)->alloc_no]; \
  487. if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size) \
  488. (AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size; \
  489. } while (0)
  490. #define STAT_MSEG_MBC_FREE(AP, CSZ) \
  491. do { \
  492. ASSERT((AP)->mbcs.curr.norm.mseg.no > 0); \
  493. (AP)->mbcs.curr.norm.mseg.no--; \
  494. ASSERT((AP)->mbcs.curr.norm.mseg.size >= (CSZ)); \
  495. (AP)->mbcs.curr.norm.mseg.size -= (CSZ); \
  496. DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
  497. } while (0)
  498. #define STAT_SYS_ALLOC_MBC_FREE(AP, CSZ) \
  499. do { \
  500. ASSERT((AP)->mbcs.curr.norm.sys_alloc.no > 0); \
  501. (AP)->mbcs.curr.norm.sys_alloc.no--; \
  502. ASSERT((AP)->mbcs.curr.norm.sys_alloc.size >= (CSZ)); \
  503. (AP)->mbcs.curr.norm.sys_alloc.size -= (CSZ); \
  504. DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
  505. } while (0)
  506. #define STAT_MBC_FREE(AP, CRR) \
  507. do { \
  508. UWord csz__ = CARRIER_SZ((CRR)); \
  509. if (IS_MSEG_CARRIER((CRR))) { \
  510. STAT_MSEG_MBC_FREE((AP), csz__); \
  511. } else { \
  512. STAT_SYS_ALLOC_MBC_FREE((AP), csz__); \
  513. } \
  514. set_new_allctr_abandon_limit(AP); \
  515. } while (0)
  516. #define STAT_MBC_ABANDON(AP, CRR) \
  517. do { \
  518. STAT_MBC_FREE(AP, CRR); \
  519. ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no \
  520. >= (CRR)->cpool.blocks[(AP)->alloc_no]); \
  521. (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks[(AP)->alloc_no]; \
  522. ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size \
  523. >= (CRR)->cpool.blocks_size[(AP)->alloc_no]); \
  524. (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size[(AP)->alloc_no]; \
  525. } while (0)
  526. #define STAT_MBC_BLK_ALLOC_CRR(AP, CRR, BSZ) \
  527. do { \
  528. (CRR)->cpool.blocks[(AP)->alloc_no]++; \
  529. (CRR)->cpool.blocks_size[(AP)->alloc_no] += (BSZ); \
  530. (CRR)->cpool.total_blocks_size += (BSZ); \
  531. } while (0)
  532. #define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \
  533. do { \
  534. CarriersStats_t *cstats__ = &(AP)->mbcs; \
  535. cstats__->blocks.curr.no++; \
  536. if (cstats__->blocks.max.no < cstats__->blocks.curr.no) \
  537. cstats__->blocks.max.no = cstats__->blocks.curr.no; \
  538. cstats__->blocks.curr.size += (BSZ); \
  539. if (cstats__->blocks.max.size < cstats__->blocks.curr.size) \
  540. cstats__->blocks.max.size = cstats__->blocks.curr.size; \
  541. STAT_MBC_BLK_ALLOC_CRR((AP), (CRR), (BSZ)); \
  542. } while (0)
  543. static ERTS_INLINE int
  544. stat_cpool_mbc_blk_free(Allctr_t *allctr,
  545. ErtsAlcType_t type,
  546. Carrier_t *crr,
  547. Carrier_t **busy_pcrr_pp,
  548. UWord blksz)
  549. {
  550. Allctr_t *orig_allctr;
  551. int alloc_no;
  552. alloc_no = ERTS_ALC_T2A(type);
  553. ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] > 0);
  554. crr->cpool.blocks[alloc_no]--;
  555. ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] >= blksz);
  556. crr->cpool.blocks_size[alloc_no] -= blksz;
  557. ERTS_ALC_CPOOL_ASSERT(crr->cpool.total_blocks_size >= blksz);
  558. crr->cpool.total_blocks_size -= blksz;
  559. if (allctr->alloc_no == alloc_no && (!busy_pcrr_pp || !*busy_pcrr_pp)) {
  560. /* This is a local block, so we should not update the pool
  561. * statistics. */
  562. return 0;
  563. }
  564. /* This is either a foreign block that's been fetched from the pool, or any
  565. * block that's in the pool. The carrier's owner keeps the statistics for
  566. * both pooled and foreign blocks. */
  567. orig_allctr = crr->cpool.orig_allctr;
  568. ERTS_ALC_CPOOL_ASSERT(alloc_no != allctr->alloc_no ||
  569. (crr == *busy_pcrr_pp && allctr == orig_allctr));
  570. #ifdef ERTS_ALC_CPOOL_DEBUG
  571. ERTS_ALC_CPOOL_ASSERT(
  572. erts_atomic_dec_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0);
  573. ERTS_ALC_CPOOL_ASSERT(
  574. erts_atomic_add_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
  575. -((erts_aint_t) blksz)) >= 0);
  576. #else
  577. erts_atomic_dec_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]);
  578. erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
  579. -((erts_aint_t) blksz));
  580. #endif
  581. return 1;
  582. }
  583. #define STAT_MBC_BLK_FREE(AP, TYPE, CRR, BPCRRPP, BSZ, FLGS) \
  584. do { \
  585. if (!stat_cpool_mbc_blk_free((AP), (TYPE), (CRR), (BPCRRPP), (BSZ))) { \
  586. CarriersStats_t *cstats__ = &(AP)->mbcs; \
  587. ASSERT(cstats__->blocks.curr.no > 0); \
  588. cstats__->blocks.curr.no--; \
  589. ASSERT(cstats__->blocks.curr.size >= (BSZ)); \
  590. cstats__->blocks.curr.size -= (BSZ); \
  591. } \
  592. } while (0)
  593. /* Debug stuff... */
  594. #ifdef DEBUG
  595. static UWord carrier_alignment;
  596. #define DEBUG_SAVE_ALIGNMENT(C) \
  597. do { \
  598. UWord algnmnt__ = sizeof(Unit_t) - (((UWord) (C)) % sizeof(Unit_t));\
  599. carrier_alignment = MIN(carrier_alignment, algnmnt__); \
  600. ASSERT(((UWord) (C)) % sizeof(UWord) == 0); \
  601. } while (0)
  602. #define DEBUG_CHECK_ALIGNMENT(P) \
  603. do { \
  604. ASSERT(sizeof(Unit_t) - (((UWord) (P)) % sizeof(Unit_t)) \
  605. >= carrier_alignment); \
  606. ASSERT(((UWord) (P)) % sizeof(UWord) == 0); \
  607. } while (0)
  608. #else
  609. #define DEBUG_SAVE_ALIGNMENT(C)
  610. #define DEBUG_CHECK_ALIGNMENT(P)
  611. #endif
  612. #ifdef DEBUG
  613. # define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking())
  614. #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \
  615. do { \
  616. if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) { \
  617. if (!(A)->debug.saved_tid) { \
  618. (A)->debug.tid = erts_thr_self(); \
  619. (A)->debug.saved_tid = 1; \
  620. } \
  621. else { \
  622. ERTS_LC_ASSERT( \
  623. ethr_equal_tids((A)->debug.tid, erts_thr_self())); \
  624. } \
  625. } \
  626. } while (0)
  627. #else
  628. #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
  629. #endif
  630. static void make_name_atoms(Allctr_t *allctr);
  631. static Block_t *create_carrier(Allctr_t *, Uint, UWord);
  632. static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **);
  633. static void mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp);
  634. static void dealloc_block(Allctr_t *, ErtsAlcType_t, Uint32, void *, ErtsAlcFixList_t *);
  635. static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type)
  636. {
  637. ErtsSchedulerData *esdp;
  638. Eterm id;
  639. ERTS_CT_ASSERT(_unchecked_atom_val(am_system) <= MAX_ATAG_ATOM_ID);
  640. ASSERT(allocator->atags);
  641. esdp = erts_get_scheduler_data();
  642. id = am_system;
  643. if (esdp) {
  644. if (esdp->current_nif) {
  645. Module *mod = erts_nif_get_module((esdp->current_nif)->mod_nif);
  646. /* Mod can be NULL if a resource destructor allocates memory after
  647. * the module has been unloaded. */
  648. if (mod) {
  649. id = make_atom(mod->module);
  650. }
  651. } else if (esdp->current_port) {
  652. Port *p = esdp->current_port;
  653. id = (p->drv_ptr)->name_atom;
  654. }
  655. /* We fall back to 'system' if we can't pack the driver/NIF name into
  656. * the tag. This may be a bit misleading but we've made no promises
  657. * that the information is complete.
  658. *
  659. * This can only happen on 32-bit emulators when a new driver/NIF has
  660. * been loaded *after* 16 million atoms have been used, and supporting
  661. * that fringe case is not worth an extra word. 64-bit emulators are
  662. * unaffected since the atom cache limits atom indexes to 32 bits. */
  663. if(MAX_ATOM_TABLE_SIZE > MAX_ATAG_ATOM_ID) {
  664. if (atom_val(id) > MAX_ATAG_ATOM_ID) {
  665. id = am_system;
  666. }
  667. }
  668. }
  669. return MAKE_ATAG(id, ERTS_ALC_T2N(type));
  670. }
  671. static void set_alloc_tag(Allctr_t *allocator, void *p, alcu_atag_t tag)
  672. {
  673. Block_t *block;
  674. ASSERT(DBG_IS_VALID_ATAG(tag));
  675. ASSERT(allocator->atags && p);
  676. (void)allocator;
  677. block = UMEM2BLK(p);
  678. SET_BLK_ATAG(block, tag);
  679. }
  680. /* internal data... */
  681. #if 0
  682. static ERTS_INLINE void *
  683. internal_alloc(UWord size)
  684. {
  685. void *res = erts_sys_alloc(0, NULL, size);
  686. if (!res)
  687. erts_alloc_enomem(ERTS_ALC_T_UNDEF, size);
  688. return res;
  689. }
  690. static ERTS_INLINE void *
  691. internal_realloc(void *ptr, UWord size)
  692. {
  693. void *res = erts_sys_realloc(0, NULL, ptr, size);
  694. if (!res)
  695. erts_alloc_enomem(ERTS_ALC_T_UNDEF, size);
  696. return res;
  697. }
  698. static ERTS_INLINE void
  699. internal_free(void *ptr)
  700. {
  701. erts_sys_free(0, NULL, ptr);
  702. }
  703. #endif
  704. #ifdef ARCH_32
  705. /*
  706. * Bit vector for the entire 32-bit virtual address space
  707. * with one bit for each super aligned memory segment.
  708. */
  709. #define VSPACE_MAP_BITS (1 << (32 - ERTS_MMAP_SUPERALIGNED_BITS))
  710. #define VSPACE_MAP_SZ (VSPACE_MAP_BITS / ERTS_VSPACE_WORD_BITS)
  711. static ERTS_INLINE void set_bit(UWord* map, Uint ix)
  712. {
  713. ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
  714. map[ix / ERTS_VSPACE_WORD_BITS]
  715. |= ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
  716. }
  717. static ERTS_INLINE void clr_bit(UWord* map, Uint ix)
  718. {
  719. ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
  720. map[ix / ERTS_VSPACE_WORD_BITS]
  721. &= ~((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
  722. }
  723. #ifdef DEBUG
  724. static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
  725. {
  726. ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
  727. return map[ix / ERTS_VSPACE_WORD_BITS]
  728. & ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
  729. }
  730. #endif
  731. UWord erts_literal_vspace_map[VSPACE_MAP_SZ];
  732. static void set_literal_range(void* start, Uint size)
  733. {
  734. Uint ix = (UWord)start >> ERTS_MMAP_SUPERALIGNED_BITS;
  735. Uint n = size >> ERTS_MMAP_SUPERALIGNED_BITS;
  736. ASSERT(!((UWord)start & ERTS_INV_SUPERALIGNED_MASK));
  737. ASSERT(!((UWord)size & ERTS_INV_SUPERALIGNED_MASK));
  738. ASSERT(n);
  739. while (n--) {
  740. ASSERT(!is_bit_set(erts_literal_vspace_map, ix));
  741. set_bit(erts_literal_vspace_map, ix);
  742. ix++;
  743. }
  744. }
  745. static void clear_literal_range(void* start, Uint size)
  746. {
  747. Uint ix = (UWord)start >> ERTS_MMAP_SUPERALIGNED_BITS;
  748. Uint n = size >> ERTS_MMAP_SUPERALIGNED_BITS;
  749. ASSERT(!((UWord)start & ERTS_INV_SUPERALIGNED_MASK));
  750. ASSERT(!((UWord)size & ERTS_INV_SUPERALIGNED_MASK));
  751. ASSERT(n);
  752. while (n--) {
  753. ASSERT(is_bit_set(erts_literal_vspace_map, ix));
  754. clr_bit(erts_literal_vspace_map, ix);
  755. ix++;
  756. }
  757. }
  758. #endif /* ARCH_32 */
  759. /* mseg ... */
  760. #if HAVE_ERTS_MSEG
  761. static void*
  762. erts_alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
  763. {
  764. void *res;
  765. UWord size = (UWord) *size_p;
  766. res = erts_mseg_alloc_opt(allctr->alloc_no, &size, flags, &allctr->mseg_opt);
  767. *size_p = (Uint) size;
  768. INC_CC(allctr->calls.mseg_alloc);
  769. return res;
  770. }
  771. static void*
  772. erts_alcu_mseg_realloc(Allctr_t *allctr, void *seg,
  773. Uint old_size, Uint *new_size_p)
  774. {
  775. void *res;
  776. UWord new_size = (UWord) *new_size_p;
  777. res = erts_mseg_realloc_opt(allctr->alloc_no, seg, (UWord) old_size, &new_size,
  778. ERTS_MSEG_FLG_NONE, &allctr->mseg_opt);
  779. *new_size_p = (Uint) new_size;
  780. INC_CC(allctr->calls.mseg_realloc);
  781. return res;
  782. }
  783. static void
  784. erts_alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
  785. {
  786. erts_mseg_dealloc_opt(allctr->alloc_no, seg, (UWord) size, flags, &allctr->mseg_opt);
  787. INC_CC(allctr->calls.mseg_dealloc);
  788. }
  789. #if defined(ARCH_32)
  790. void*
  791. erts_alcu_literal_32_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
  792. {
  793. void* res;
  794. Uint sz = ERTS_SUPERALIGNED_CEILING(*size_p);
  795. ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
  796. allctr->t == 0);
  797. ERTS_LC_ASSERT(allctr->thread_safe);
  798. res = erts_alcu_mseg_alloc(allctr, &sz, flags);
  799. if (res) {
  800. set_literal_range(res, sz);
  801. *size_p = sz;
  802. }
  803. return res;
  804. }
  805. void*
  806. erts_alcu_literal_32_mseg_realloc(Allctr_t *allctr, void *seg,
  807. Uint old_size, Uint *new_size_p)
  808. {
  809. void* res;
  810. Uint new_sz = ERTS_SUPERALIGNED_CEILING(*new_size_p);
  811. ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
  812. allctr->t == 0);
  813. ERTS_LC_ASSERT(allctr->thread_safe);
  814. if (seg && old_size)
  815. clear_literal_range(seg, old_size);
  816. res = erts_alcu_mseg_realloc(allctr, seg, old_size, &new_sz);
  817. if (res) {
  818. set_literal_range(res, new_sz);
  819. *new_size_p = new_sz;
  820. }
  821. return res;
  822. }
  823. void
  824. erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
  825. Uint flags)
  826. {
  827. ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
  828. allctr->t == 0);
  829. ERTS_LC_ASSERT(allctr->thread_safe);
  830. erts_alcu_mseg_dealloc(allctr, seg, size, flags);
  831. clear_literal_range(seg, size);
  832. }
  833. #elif defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
  834. /* For allocators that have their own mmapper (super carrier),
  835. * like literal_alloc.
  836. */
  837. void*
  838. erts_alcu_mmapper_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
  839. {
  840. void* res;
  841. UWord size = (UWord) *size_p;
  842. Uint32 mmap_flags = ERTS_MMAPFLG_SUPERCARRIER_ONLY;
  843. if (flags & ERTS_MSEG_FLG_2POW)
  844. mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
  845. res = erts_mmap(allctr->mseg_mmapper, mmap_flags, &size);
  846. *size_p = (Uint)size;
  847. INC_CC(allctr->calls.mseg_alloc);
  848. return res;
  849. }
  850. void*
  851. erts_alcu_mmapper_mseg_realloc(Allctr_t *allctr, void *seg,
  852. Uint old_size, Uint *new_size_p)
  853. {
  854. void *res;
  855. UWord new_size = (UWord) *new_size_p;
  856. res = erts_mremap(allctr->mseg_mmapper, ERTS_MSEG_FLG_NONE, seg, old_size, &new_size);
  857. *new_size_p = (Uint) new_size;
  858. INC_CC(allctr->calls.mseg_realloc);
  859. return res;
  860. }
  861. void
  862. erts_alcu_mmapper_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
  863. Uint flags)
  864. {
  865. Uint32 mmap_flags = ERTS_MMAPFLG_SUPERCARRIER_ONLY;
  866. if (flags & ERTS_MSEG_FLG_2POW)
  867. mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
  868. erts_munmap(allctr->mseg_mmapper, mmap_flags, seg, (UWord)size);
  869. INC_CC(allctr->calls.mseg_dealloc);
  870. }
  871. #endif /* ARCH_64 && ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION */
  872. #if defined(ERTS_ALC_A_EXEC)
  873. /*
  874. * For exec_alloc that need memory with PROT_EXEC
  875. */
  876. void*
  877. erts_alcu_exec_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
  878. {
  879. void* res = erts_alcu_mseg_alloc(allctr, size_p, flags);
  880. if (res) {
  881. int r = mprotect(res, *size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
  882. ASSERT(r == 0); (void)r;
  883. }
  884. return res;
  885. }
  886. void*
  887. erts_alcu_exec_mseg_realloc(Allctr_t *allctr, void *seg,
  888. Uint old_size, Uint *new_size_p)
  889. {
  890. void *res;
  891. if (seg && old_size) {
  892. int r = mprotect(seg, old_size, PROT_READ | PROT_WRITE);
  893. ASSERT(r == 0); (void)r;
  894. }
  895. res = erts_alcu_mseg_realloc(allctr, seg, old_size, new_size_p);
  896. if (res) {
  897. int r = mprotect(res, *new_size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
  898. ASSERT(r == 0); (void)r;
  899. }
  900. return res;
  901. }
  902. void
  903. erts_alcu_exec_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
  904. {
  905. int r = mprotect(seg, size, PROT_READ | PROT_WRITE);
  906. ASSERT(r == 0); (void)r;
  907. erts_alcu_mseg_dealloc(allctr, seg, size, flags);
  908. }
  909. #endif /* ERTS_ALC_A_EXEC */
  910. #endif /* HAVE_ERTS_MSEG */
  911. static void*
  912. erts_alcu_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
  913. {
  914. void *res;
  915. const Uint size = *size_p;
  916. #if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
  917. if (superalign)
  918. res = erts_sys_aligned_alloc(ERTS_SACRR_UNIT_SZ, size);
  919. else
  920. #endif
  921. res = erts_sys_alloc(0, NULL, size);
  922. INC_CC(allctr->calls.sys_alloc);
  923. if (erts_mtrace_enabled)
  924. erts_mtrace_crr_alloc(res, allctr->alloc_no, ERTS_ALC_A_SYSTEM, size);
  925. return res;
  926. }
  927. static void*
  928. erts_alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign)
  929. {
  930. void *res;
  931. const Uint size = *size_p;
  932. #if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
  933. if (superalign)
  934. res = erts_sys_aligned_realloc(ERTS_SACRR_UNIT_SZ, ptr, size, old_size);
  935. else
  936. #endif
  937. res = erts_sys_realloc(0, NULL, ptr, size);
  938. INC_CC(allctr->calls.sys_realloc);
  939. if (erts_mtrace_enabled)
  940. erts_mtrace_crr_realloc(res,
  941. allctr->alloc_no,
  942. ERTS_ALC_A_SYSTEM,
  943. ptr,
  944. size);
  945. return res;
  946. }
  947. static void
  948. erts_alcu_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int superalign)
  949. {
  950. #if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
  951. if (superalign)
  952. erts_sys_aligned_free(ERTS_SACRR_UNIT_SZ, ptr);
  953. else
  954. #endif
  955. erts_sys_free(0, NULL, ptr);
  956. INC_CC(allctr->calls.sys_free);
  957. if (erts_mtrace_enabled)
  958. erts_mtrace_crr_free(allctr->alloc_no, ERTS_ALC_A_SYSTEM, ptr);
  959. }
  960. #ifdef ARCH_32
  961. void*
  962. erts_alcu_literal_32_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
  963. {
  964. void* res;
  965. Uint size = ERTS_SUPERALIGNED_CEILING(*size_p);
  966. ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
  967. allctr->t == 0);
  968. ERTS_LC_ASSERT(allctr->thread_safe);
  969. res = erts_alcu_sys_alloc(allctr, &size, 1);
  970. if (res) {
  971. set_literal_range(res, size);
  972. *size_p = size;
  973. }
  974. return res;
  975. }
  976. void*
  977. erts_alcu_literal_32_sys_realloc(Allctr_t *allctr, void *ptr, Uint* size_p, Uint old_size, int superalign)
  978. {
  979. void* res;
  980. Uint size = ERTS_SUPERALIGNED_CEILING(*size_p);
  981. ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
  982. allctr->t == 0);
  983. ERTS_LC_ASSERT(allctr->thread_safe);
  984. if (ptr && old_size)
  985. clear_literal_range(ptr, old_size);
  986. res = erts_alcu_sys_realloc(allctr, ptr, &size, old_size, 1);
  987. if (res) {
  988. set_literal_range(res, size);
  989. *size_p = size;
  990. }
  991. return res;
  992. }
  993. void
  994. erts_alcu_literal_32_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int superalign)
  995. {
  996. ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
  997. allctr->t == 0);
  998. ERTS_LC_ASSERT(allctr->thread_safe);
  999. erts_alcu_sys_dealloc(allctr, ptr, size, 1);
  1000. clear_literal_range(ptr, size);
  1001. }
  1002. #endif /* ARCH_32 */
  1003. static Uint
  1004. get_next_mbc_size(Allctr_t *allctr)
  1005. {
  1006. Uint size;
  1007. int cs = (allctr->mbcs.curr.norm.mseg.no
  1008. + allctr->mbcs.curr.norm.sys_alloc.no
  1009. - (allctr->main_carrier ? 1 : 0));
  1010. ASSERT(cs >= 0);
  1011. ASSERT(allctr->largest_mbc_size >= allctr->smallest_mbc_size);
  1012. if (cs >= allctr->mbc_growth_stages)
  1013. size = allctr->largest_mbc_size;
  1014. else
  1015. size = ((cs*(allctr->largest_mbc_size - allctr->smallest_mbc_size)
  1016. / allctr->mbc_growth_stages)
  1017. + allctr->smallest_mbc_size);
  1018. if (size < allctr->min_mbc_size)
  1019. size = allctr->min_mbc_size;
  1020. return size;
  1021. }
  1022. static ERTS_INLINE void
  1023. link_carrier(CarrierList_t *cl, Carrier_t *crr)
  1024. {
  1025. crr->next = NULL;
  1026. if (!cl->last) {
  1027. ASSERT(!cl->first);
  1028. cl->first = cl->last = crr;
  1029. crr->prev = NULL;
  1030. }
  1031. else {
  1032. ASSERT(cl->first);
  1033. ASSERT(!cl->first->prev);
  1034. ASSERT(cl->last);
  1035. ASSERT(!cl->last->next);
  1036. crr->prev = cl->last;
  1037. cl->last->next = crr;
  1038. cl->last = crr;
  1039. }
  1040. ASSERT(crr->next != crr);
  1041. ASSERT(crr->prev != crr);
  1042. }
  1043. static ERTS_INLINE void
  1044. relink_carrier(CarrierList_t *cl, Carrier_t *crr)
  1045. {
  1046. if (crr->next) {
  1047. if (crr->next->prev != crr)
  1048. crr->next->prev = crr;
  1049. }
  1050. else if (cl->last != crr)
  1051. cl->last = crr;
  1052. if (crr->prev) {
  1053. if (crr->prev->next != crr)
  1054. crr->prev->next = crr;
  1055. }
  1056. else if (cl->first != crr)
  1057. cl->first = crr;
  1058. }
  1059. static ERTS_INLINE void
  1060. unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
  1061. {
  1062. ASSERT(crr->next != crr);
  1063. ASSERT(crr->prev != crr);
  1064. if (cl->first == crr) {
  1065. ASSERT(!crr->prev);
  1066. cl->first = crr->next;
  1067. }
  1068. else {
  1069. ASSERT(crr->prev);
  1070. crr->prev->next = crr->next;
  1071. }
  1072. if (cl->last == crr) {
  1073. ASSERT(!crr->next);
  1074. cl->last = crr->prev;
  1075. }
  1076. else {
  1077. ASSERT(crr->next);
  1078. crr->next->prev = crr->prev;
  1079. }
  1080. #ifdef DEBUG
  1081. crr->next = crr;
  1082. crr->prev = crr;
  1083. #endif
  1084. }
  1085. static ERTS_INLINE int is_abandoned(Carrier_t *crr)
  1086. {
  1087. return crr->cpool.state != ERTS_MBC_IS_HOME;
  1088. }
  1089. static ERTS_INLINE void
  1090. unlink_abandoned_carrier(Carrier_t *crr)
  1091. {
  1092. if (crr->cpool.state == ERTS_MBC_WAS_POOLED) {
  1093. aoff_remove_pooled_mbc(crr->cpool.orig_allctr, crr);
  1094. }
  1095. }
  1096. static ERTS_INLINE void
  1097. clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
  1098. {
  1099. if (crr) {
  1100. erts_aint_t max_size;
  1101. erts_aint_t iallctr;
  1102. max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
  1103. erts_atomic_set_nob(&crr->cpool.max_size, max_size);
  1104. iallctr = erts_atomic_read_nob(&crr->allctr);
  1105. ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
  1106. == ((erts_aint_t)allctr |
  1107. ERTS_CRR_ALCTR_FLG_IN_POOL |
  1108. ERTS_CRR_ALCTR_FLG_BUSY));
  1109. iallctr &= ~ERTS_CRR_ALCTR_FLG_BUSY;
  1110. erts_atomic_set_relb(&crr->allctr, iallctr);
  1111. }
  1112. }
  1113. #if 0
  1114. #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
  1115. do { if ((FIX)) chk_fix_list((A), (FIX), (IX), (B)); } while (0)
  1116. static void
  1117. chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
  1118. {
  1119. void *p;
  1120. int n;
  1121. for (n = 0, p = fix[ix].list; p; p = *((void **) p))
  1122. n++;
  1123. if (n != fix[ix].list_size) {
  1124. erts_fprintf(stderr, "FOUND IT ts=%d, sched=%d, ix=%d, n=%d, ls=%d %s!\n",
  1125. allctr->thread_safe, allctr->ix, ix, n, fix[ix].list_size, before ? "before" : "after");
  1126. abort();
  1127. }
  1128. }
  1129. #else
  1130. #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B)
  1131. #endif
  1132. static ERTS_INLINE Allctr_t *get_pref_allctr(void *extra);
  1133. static void *mbc_alloc(Allctr_t *allctr, Uint size);
  1134. static ERTS_INLINE void
  1135. sched_fix_shrink(Allctr_t *allctr, int on)
  1136. {
  1137. if (on && !allctr->fix_shrink_scheduled) {
  1138. allctr->fix_shrink_scheduled = 1;
  1139. erts_set_aux_work_timeout(allctr->ix,
  1140. (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
  1141. | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
  1142. 1);
  1143. }
  1144. else if (!on && allctr->fix_shrink_scheduled) {
  1145. allctr->fix_shrink_scheduled = 0;
  1146. erts_set_aux_work_timeout(allctr->ix,
  1147. (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
  1148. | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
  1149. 0);
  1150. }
  1151. }
  1152. static ERTS_INLINE void
  1153. fix_cpool_check_shrink(Allctr_t *allctr,
  1154. ErtsAlcType_t type,
  1155. ErtsAlcFixList_t *fix,
  1156. Carrier_t **busy_pcrr_pp)
  1157. {
  1158. if (fix->u.cpool.shrink_list > 0) {
  1159. if (fix->list_size == 0)
  1160. fix->u.cpool.shrink_list = 0;
  1161. else {
  1162. void *p;
  1163. if (busy_pcrr_pp) {
  1164. clear_busy_pool_carrier(allctr, *busy_pcrr_pp);
  1165. *busy_pcrr_pp = NULL;
  1166. }
  1167. fix->u.cpool.shrink_list--;
  1168. p = fix->list;
  1169. fix->list = *((void **) p);
  1170. fix->list_size--;
  1171. if (fix->u.cpool.min_list_size > fix->list_size)
  1172. fix->u.cpool.min_list_size = fix->list_size;
  1173. dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, p, fix);
  1174. }
  1175. }
  1176. }
  1177. static ERTS_INLINE void *
  1178. fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
  1179. {
  1180. void *res;
  1181. ErtsAlcFixList_t *fix;
  1182. fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
  1183. ASSERT(type == fix->type && size == fix->type_size);
  1184. ASSERT(size >= sizeof(ErtsAllctrDDBlock_t));
  1185. res = fix->list;
  1186. if (res) {
  1187. fix->list = *((void **) res);
  1188. fix->list_size--;
  1189. if (fix->u.cpool.min_list_size > fix->list_size)
  1190. fix->u.cpool.min_list_size = fix->list_size;
  1191. fix->u.cpool.used++;
  1192. fix_cpool_check_shrink(allctr, type, fix, NULL);
  1193. return res;
  1194. }
  1195. if (size >= allctr->sbc_threshold) {
  1196. Block_t *blk;
  1197. blk = create_carrier(allctr, size, CFLG_SBC);
  1198. res = blk ? BLK2UMEM(blk) : NULL;
  1199. }
  1200. else
  1201. res = mbc_alloc(allctr, size);
  1202. if (res) {
  1203. fix->u.cpool.used++;
  1204. fix->u.cpool.allocated++;
  1205. }
  1206. return res;
  1207. }
  1208. static ERTS_INLINE void
  1209. fix_cpool_free(Allctr_t *allctr,
  1210. ErtsAlcType_t type,
  1211. Uint32 flags,
  1212. void *p,
  1213. Carrier_t **busy_pcrr_pp)
  1214. {
  1215. ErtsAlcFixList_t *fix;
  1216. Allctr_t *fix_allctr;
  1217. /* If this isn't a fix allocator we need to update the fix list of our
  1218. * neighboring fix_alloc to keep the statistics consistent. */
  1219. if (!allctr->fix) {
  1220. ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
  1221. fix_allctr = get_pref_allctr(tspec);
  1222. ASSERT(!fix_allctr->thread_safe);
  1223. ASSERT(allctr != fix_allctr);
  1224. }
  1225. else {
  1226. fix_allctr = allctr;
  1227. }
  1228. ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(fix_allctr));
  1229. ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
  1230. fix = &fix_allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
  1231. ASSERT(type == fix->type);
  1232. if (!(flags & DEALLOC_FLG_FIX_SHRINK)) {
  1233. fix->u.cpool.used--;
  1234. }
  1235. /* We don't want foreign blocks to be long-lived, so we skip recycling if
  1236. * allctr != fix_allctr. */
  1237. if (allctr == fix_allctr
  1238. && (!busy_pcrr_pp || !*busy_pcrr_pp)
  1239. && !fix->u.cpool.shrink_list
  1240. && fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
  1241. *((void **) p) = fix->list;
  1242. fix->list = p;
  1243. fix->list_size++;
  1244. sched_fix_shrink(allctr, 1);
  1245. }
  1246. else {
  1247. Block_t *blk = UMEM2BLK(p);
  1248. if (IS_SBC_BLK(blk))
  1249. destroy_carrier(allctr, blk, NULL);
  1250. else
  1251. mbc_free(allctr, type, p, busy_pcrr_pp);
  1252. fix->u.cpool.allocated--;
  1253. fix_cpool_check_shrink(allctr, type, fix, busy_pcrr_pp);
  1254. }
  1255. }
  1256. static ERTS_INLINE erts_aint32_t
  1257. fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
  1258. {
  1259. int all_empty = 1;
  1260. erts_aint32_t res = 0;
  1261. int ix, o;
  1262. int flush = flgs == 0;
  1263. if (allctr->thread_safe)
  1264. erts_mtx_lock(&allctr->mutex);
  1265. for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
  1266. ErtsAlcFixList_t *fix = &allctr->fix[ix];
  1267. ErtsAlcType_t type;
  1268. ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
  1269. if (flush)
  1270. fix->u.cpool.shrink_list = fix->list_size;
  1271. else if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
  1272. fix->u.cpool.shrink_list = fix->u.cpool.min_list_size;
  1273. fix->u.cpool.min_list_size = fix->list_size;
  1274. }
  1275. type = ERTS_ALC_N2T((ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE));
  1276. for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
  1277. void *ptr;
  1278. if (fix->u.cpool.shrink_list == 0)
  1279. break;
  1280. if (fix->list_size == 0) {
  1281. fix->u.cpool.shrink_list = 0;
  1282. break;
  1283. }
  1284. ptr = fix->list;
  1285. fix->list = *((void **) ptr);
  1286. fix->list_size--;
  1287. fix->u.cpool.shrink_list--;
  1288. dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, ptr, fix);
  1289. }
  1290. if (fix->u.cpool.min_list_size > fix->list_size)
  1291. fix->u.cpool.min_list_size = fix->list_size;
  1292. if (fix->list_size != 0) {
  1293. if (fix->u.cpool.shrink_list > 0)
  1294. res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
  1295. all_empty = 0;
  1296. }
  1297. }
  1298. if (all_empty)
  1299. sched_fix_shrink(allctr, 0);
  1300. if (allctr->thread_safe)
  1301. erts_mtx_unlock(&allctr->mutex);
  1302. return res;
  1303. }
  1304. static ERTS_INLINE void *
  1305. fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
  1306. {
  1307. ErtsAlcFixList_t *fix;
  1308. void *res;
  1309. fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
  1310. ASSERT(type == fix->type && size == fix->type_size);
  1311. ASSERT(size >= sizeof(ErtsAllctrDDBlock_t));
  1312. ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
  1313. fix->u.nocpool.used++;
  1314. res = fix->list;
  1315. if (res) {
  1316. fix->list_size--;
  1317. fix->list = *((void **) res);
  1318. if (fix->list && fix->u.nocpool.allocated > fix->u.nocpool.limit) {
  1319. Block_t *blk;
  1320. void *p = fix->list;
  1321. fix->list = *((void **) p);
  1322. fix->list_size--;
  1323. blk = UMEM2BLK(p);
  1324. if (IS_SBC_BLK(blk))
  1325. destroy_carrier(allctr, blk, NULL);
  1326. else
  1327. mbc_free(allctr, type, p, NULL);
  1328. fix->u.nocpool.allocated--;
  1329. }
  1330. ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
  1331. return res;
  1332. }
  1333. if (fix->u.nocpool.limit < fix->u.nocpool.used)
  1334. fix->u.nocpool.limit = fix->u.nocpool.used;
  1335. if (fix->u.nocpool.max_used < fix->u.nocpool.used)
  1336. fix->u.nocpool.max_used = fix->u.nocpool.used;
  1337. fix->u.nocpool.allocated++;
  1338. if (size >= allctr->sbc_threshold) {
  1339. Block_t *blk;
  1340. blk = create_carrier(allctr, size, CFLG_SBC);
  1341. res = blk ? BLK2UMEM(blk) : NULL;
  1342. }
  1343. else
  1344. res = mbc_alloc(allctr, size);
  1345. if (!res) {
  1346. fix->u.nocpool.allocated--;
  1347. fix->u.nocpool.used--;
  1348. }
  1349. return res;
  1350. }
  1351. static ERTS_INLINE void
  1352. fix_nocpool_free(Allctr_t *allctr,
  1353. ErtsAlcType_t type,
  1354. void *p)
  1355. {
  1356. Block_t *blk;
  1357. ErtsAlcFixList_t *fix;
  1358. fix = &allctr->fix[ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE];
  1359. ASSERT(fix->type == type);
  1360. ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
  1361. fix->u.nocpool.used--;
  1362. if (fix->u.nocpool.allocated < fix->u.nocpool.limit
  1363. && fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
  1364. *((void **) p) = fix->list;
  1365. fix->list = p;
  1366. fix->list_size++;
  1367. sched_fix_shrink(allctr, 1);
  1368. ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
  1369. return;
  1370. }
  1371. fix->u.nocpool.allocated--;
  1372. if (fix->list && fix->u.nocpool.allocated > fix->u.nocpool.limit) {
  1373. blk = UMEM2BLK(p);
  1374. if (IS_SBC_BLK(blk))
  1375. destroy_carrier(allctr, blk, NULL);
  1376. else
  1377. mbc_free(allctr, type, p, NULL);
  1378. p = fix->list;
  1379. fix->list = *((void **) p);
  1380. fix->list_size--;
  1381. fix->u.nocpool.allocated--;
  1382. }
  1383. blk = UMEM2BLK(p);
  1384. if (IS_SBC_BLK(blk))
  1385. destroy_carrier(allctr, blk, NULL);
  1386. else
  1387. mbc_free(allctr, type, p, NULL);
  1388. ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
  1389. }
  1390. static ERTS_INLINE erts_aint32_t
  1391. fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
  1392. {
  1393. int all_empty = 1;
  1394. erts_aint32_t res = 0;
  1395. int ix, o;
  1396. int flush = flgs == 0;
  1397. if (allctr->thread_safe)
  1398. erts_mtx_lock(&allctr->mutex);
  1399. for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
  1400. ErtsAlcFixList_t *fix = &allctr->fix[ix];
  1401. ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
  1402. if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
  1403. fix->u.nocpool.limit = fix->u.nocpool.max_used;
  1404. if (fix->u.nocpool.limit < fix->u.nocpool.used)
  1405. fix->u.nocpool.limit = fix->u.nocpool.used;
  1406. fix->u.nocpool.max_used = fix->u.nocpool.used;
  1407. ASSERT(fix->u.nocpool.limit >= 0);
  1408. }
  1409. if (flush) {
  1410. fix->u.nocpool.limit = 0;
  1411. fix->u.nocpool.max_used = fix->u.nocpool.used;
  1412. ASSERT(fix->u.nocpool.limit >= 0);
  1413. }
  1414. for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
  1415. void *ptr;
  1416. if (!flush && fix->u.nocpool.limit >= fix->u.nocpool.allocated)
  1417. break;
  1418. if (fix->list_size == 0)
  1419. break;
  1420. ptr = fix->list;
  1421. fix->list = *((void **) ptr);
  1422. fix->list_size--;
  1423. dealloc_block(allctr, fix->type, 0, ptr, NULL);
  1424. fix->u.nocpool.allocated--;
  1425. }
  1426. if (fix->list_size != 0) {
  1427. if (fix->u.nocpool.limit < fix->u.nocpool.allocated)
  1428. res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
  1429. all_empty = 0;
  1430. }
  1431. ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
  1432. }
  1433. if (all_empty)
  1434. sched_fix_shrink(allctr, 0);
  1435. if (allctr->thread_safe)
  1436. erts_mtx_unlock(&allctr->mutex);
  1437. return res;
  1438. }
  1439. erts_aint32_t
  1440. erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
  1441. {
  1442. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  1443. return fix_cpool_alloc_shrink(allctr, flgs);
  1444. else
  1445. return fix_nocpool_alloc_shrink(allctr, flgs);
  1446. }
  1447. static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned);
  1448. static ERTS_INLINE void
  1449. dealloc_mbc(Allctr_t *allctr, Carrier_t *crr)
  1450. {
  1451. ASSERT(IS_MB_CARRIER(crr));
  1452. if (allctr->destroying_mbc)
  1453. allctr->destroying_mbc(allctr, crr);
  1454. dealloc_carrier(allctr, crr, 1);
  1455. }
  1456. static UWord allctr_abandon_limit(Allctr_t *allctr);
  1457. static void set_new_allctr_abandon_limit(Allctr_t*);
  1458. static void abandon_carrier(Allctr_t*, Carrier_t*);
  1459. static void poolify_my_carrier(Allctr_t*, Carrier_t*);
  1460. static void enqueue_homecoming(Allctr_t*, Carrier_t*);
  1461. static ERTS_INLINE Allctr_t*
  1462. get_pref_allctr(void *extra)
  1463. {
  1464. ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
  1465. int pref_ix;
  1466. pref_ix = ERTS_ALC_GET_THR_IX();
  1467. ERTS_CT_ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
  1468. ASSERT(0 <= pref_ix && pref_ix < tspec->size);
  1469. return tspec->allctr[pref_ix];
  1470. }
  1471. #define ERTS_ALC_TS_PREF_LOCK_IF_USED (1)
  1472. #define ERTS_ALC_TS_PREF_LOCK_NO (0)
  1473. /* SMP note:
  1474. * get_used_allctr() must be safe WITHOUT locking the allocator while
  1475. * concurrent threads may be updating adjacent blocks.
  1476. * We rely on getting a consistent result (without atomic op) when reading
  1477. * the block header word even if a concurrent thread is updating
  1478. * the "PREV_FREE" flag bit.
  1479. */
  1480. static ERTS_INLINE Allctr_t*
  1481. get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
  1482. Carrier_t **busy_pcrr_pp)
  1483. {
  1484. Block_t* blk = UMEM2BLK(p);
  1485. Carrier_t *crr;
  1486. erts_aint_t iallctr;
  1487. Allctr_t *used_allctr;
  1488. *busy_pcrr_pp = NULL;
  1489. if (IS_SBC_BLK(blk)) {
  1490. crr = BLK_TO_SBC(blk);
  1491. if (sizep)
  1492. *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ;
  1493. iallctr = erts_atomic_read_dirty(&crr->allctr);
  1494. }
  1495. else {
  1496. crr = ABLK_TO_MBC(blk);
  1497. if (sizep)
  1498. *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ;
  1499. if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr))
  1500. iallctr = erts_atomic_read_dirty(&crr->allctr);
  1501. else {
  1502. int locked_pref_allctr = 0;
  1503. iallctr = erts_atomic_read_ddrb(&crr->allctr);
  1504. if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock
  1505. && pref_allctr->thread_safe) {
  1506. used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
  1507. if (pref_allctr == used_allctr) {
  1508. erts_mtx_lock(&pref_allctr->mutex);
  1509. locked_pref_allctr = 1;
  1510. }
  1511. }
  1512. while ((iallctr & ((~ERTS_CRR_ALCTR_FLG_MASK)|ERTS_CRR_ALCTR_FLG_IN_POOL))
  1513. == (((erts_aint_t) pref_allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL)) {
  1514. erts_aint_t act;
  1515. ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
  1516. if (iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING) {
  1517. /*
  1518. * This carrier has just been given back to us by writing
  1519. * to crr->allctr with a write barrier (see abandon_carrier).
  1520. *
  1521. * We need a mathing read barrier to guarantee a correct view
  1522. * of the carrier for deallocation work.
  1523. */
  1524. act = erts_atomic_cmpxchg_rb(&crr->allctr,
  1525. iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
  1526. iallctr);
  1527. }
  1528. else {
  1529. act = erts_atomic_cmpxchg_ddrb(&crr->allctr,
  1530. iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
  1531. iallctr);
  1532. }
  1533. if (act == iallctr) {
  1534. *busy_pcrr_pp = crr;
  1535. break;
  1536. }
  1537. iallctr = act;
  1538. }
  1539. used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
  1540. if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock) {
  1541. if (locked_pref_allctr && used_allctr != pref_allctr) {
  1542. /* Was taken out of pool; now owned by someone else */
  1543. erts_mtx_unlock(&pref_allctr->mutex);
  1544. }
  1545. }
  1546. return used_allctr;
  1547. }
  1548. }
  1549. used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
  1550. if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock
  1551. && used_allctr == pref_allctr
  1552. && pref_allctr->thread_safe) {
  1553. erts_mtx_lock(&pref_allctr->mutex);
  1554. }
  1555. return used_allctr;
  1556. }
  1557. static void
  1558. init_dd_queue(ErtsAllctrDDQueue_t *ddq)
  1559. {
  1560. erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL);
  1561. erts_atomic_init_nob(&ddq->tail.data.last,
  1562. (erts_aint_t) &ddq->tail.data.marker);
  1563. erts_atomic_init_nob(&ddq->tail.data.um_refc[0], 0);
  1564. erts_atomic_init_nob(&ddq->tail.data.um_refc[1], 0);
  1565. erts_atomic32_init_nob(&ddq->tail.data.um_refc_ix, 0);
  1566. ddq->head.first = &ddq->tail.data.marker;
  1567. ddq->head.unref_end = &ddq->tail.data.marker;
  1568. ddq->head.next.thr_progress = erts_thr_progress_current();
  1569. ddq->head.next.thr_progress_reached = 1;
  1570. ddq->head.next.um_refc_ix = 1;
  1571. ddq->head.next.unref_end = &ddq->tail.data.marker;
  1572. ddq->head.used_marker = 1;
  1573. }
  1574. static ERTS_INLINE int
  1575. ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
  1576. {
  1577. erts_aint_t itmp;
  1578. ErtsAllctrDDBlock_t *enq, *this = ptr;
  1579. erts_atomic_init_nob(&this->u.atmc_next, ERTS_AINT_NULL);
  1580. /* Enqueue at end of list... */
  1581. enq = (ErtsAllctrDDBlock_t *) erts_atomic_read_nob(&ddq->tail.data.last);
  1582. itmp = erts_atomic_cmpxchg_relb(&enq->u.atmc_next,
  1583. (erts_aint_t) this,
  1584. ERTS_AINT_NULL);
  1585. if (itmp == ERTS_AINT_NULL) {
  1586. /* We are required to move last pointer */
  1587. #ifdef DEBUG
  1588. ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->u.atmc_next));
  1589. ASSERT(((erts_aint_t) enq)
  1590. == erts_atomic_xchg_relb(&ddq->tail.data.last,
  1591. (erts_aint_t) this));
  1592. #else
  1593. erts_atomic_set_relb(&ddq->tail.data.last, (erts_aint_t) this);
  1594. #endif
  1595. return 1;
  1596. }
  1597. else {
  1598. /*
  1599. * We *need* to insert element somewhere in between the
  1600. * last element we read earlier and the actual last element.
  1601. */
  1602. int i = cinit;
  1603. while (1) {
  1604. erts_aint_t itmp2;
  1605. erts_atomic_set_nob(&this->u.atmc_next, itmp);
  1606. itmp2 = erts_atomic_cmpxchg_relb(&enq->u.atmc_next,
  1607. (erts_aint_t) this,
  1608. itmp);
  1609. if (itmp == itmp2)
  1610. return 0; /* inserted this */
  1611. if ((i & 1) == 0)
  1612. itmp = itmp2;
  1613. else {
  1614. enq = (ErtsAllctrDDBlock_t *) itmp2;
  1615. itmp = erts_atomic_read_acqb(&enq->u.atmc_next);
  1616. ASSERT(itmp != ERTS_AINT_NULL);
  1617. }
  1618. i++;
  1619. }
  1620. }
  1621. }
  1622. static ERTS_INLINE erts_aint_t
  1623. check_insert_marker(ErtsAllctrDDQueue_t *ddq, erts_aint_t ilast)
  1624. {
  1625. if (!ddq->head.used_marker
  1626. && ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast) {
  1627. erts_aint_t itmp;
  1628. ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast;
  1629. erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL);
  1630. itmp = erts_atomic_cmpxchg_relb(&last->u.atmc_next,
  1631. (erts_aint_t) &ddq->tail.data.marker,
  1632. ERTS_AINT_NULL);
  1633. if (itmp == ERTS_AINT_NULL) {
  1634. ilast = (erts_aint_t) &ddq->tail.data.marker;
  1635. ddq->head.used_marker = !0;
  1636. erts_atomic_set_relb(&ddq->tail.data.last, ilast);
  1637. }
  1638. }
  1639. return ilast;
  1640. }
  1641. static ERTS_INLINE int
  1642. ddq_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
  1643. {
  1644. int last_elem;
  1645. int um_refc_ix = 0;
  1646. int managed_thread = erts_thr_progress_is_managed_thread();
  1647. if (!managed_thread) {
  1648. um_refc_ix = erts_atomic32_read_acqb(&ddq->tail.data.um_refc_ix);
  1649. while (1) {
  1650. int tmp_um_refc_ix;
  1651. erts_atomic_inc_acqb(&ddq->tail.data.um_refc[um_refc_ix]);
  1652. tmp_um_refc_ix = erts_atomic32_read_acqb(&ddq->tail.data.um_refc_ix);
  1653. if (tmp_um_refc_ix == um_refc_ix)
  1654. break;
  1655. erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
  1656. um_refc_ix = tmp_um_refc_ix;
  1657. }
  1658. }
  1659. last_elem = ddq_managed_thread_enqueue(ddq, ptr, cinit);
  1660. if (!managed_thread)
  1661. erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
  1662. return last_elem;
  1663. }
  1664. static ERTS_INLINE void *
  1665. ddq_dequeue(ErtsAllctrDDQueue_t *ddq)
  1666. {
  1667. ErtsAllctrDDBlock_t *blk;
  1668. if (ddq->head.first == ddq->head.unref_end)
  1669. return NULL;
  1670. blk = ddq->head.first;
  1671. if (blk == &ddq->tail.data.marker) {
  1672. ASSERT(ddq->head.used_marker);
  1673. ddq->head.used_marker = 0;
  1674. blk = ((ErtsAllctrDDBlock_t *)
  1675. erts_atomic_read_nob(&blk->u.atmc_next));
  1676. if (blk == ddq->head.unref_end) {
  1677. ddq->head.first = blk;
  1678. return NULL;
  1679. }
  1680. }
  1681. ddq->head.first = ((ErtsAllctrDDBlock_t *)
  1682. erts_atomic_read_nob(&blk->u.atmc_next));
  1683. ASSERT(ddq->head.first);
  1684. return (void *) blk;
  1685. }
  1686. static int
  1687. ddq_check_incoming(ErtsAllctrDDQueue_t *ddq)
  1688. {
  1689. erts_aint_t ilast = erts_atomic_read_nob(&ddq->tail.data.last);
  1690. if (((ErtsAllctrDDBlock_t *) ilast) == &ddq->tail.data.marker
  1691. && ddq->head.first == &ddq->tail.data.marker) {
  1692. /* Nothing more to do... */
  1693. return 0;
  1694. }
  1695. if (ddq->head.next.thr_progress_reached
  1696. || erts_thr_progress_has_reached(ddq->head.next.thr_progress)) {
  1697. int um_refc_ix;
  1698. ddq->head.next.thr_progress_reached = 1;
  1699. um_refc_ix = ddq->head.next.um_refc_ix;
  1700. if (erts_atomic_read_nob(&ddq->tail.data.um_refc[um_refc_ix]) == 0) {
  1701. /* Move unreferenced end pointer forward... */
  1702. ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
  1703. ddq->head.unref_end = ddq->head.next.unref_end;
  1704. ilast = check_insert_marker(ddq, ilast);
  1705. if (ddq->head.unref_end != (ErtsAllctrDDBlock_t *) ilast) {
  1706. ddq->head.next.unref_end = (ErtsAllctrDDBlock_t *) ilast;
  1707. ddq->head.next.thr_progress = erts_thr_progress_later(NULL);
  1708. erts_atomic32_set_relb(&ddq->tail.data.um_refc_ix,
  1709. um_refc_ix);
  1710. ddq->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
  1711. ddq->head.next.thr_progress_reached = 0;
  1712. }
  1713. }
  1714. }
  1715. return 1;
  1716. }
  1717. static ERTS_INLINE void
  1718. store_earliest_thr_prgr(ErtsThrPrgrVal *prev_val, ErtsAllctrDDQueue_t *ddq)
  1719. {
  1720. if (!ddq->head.next.thr_progress_reached
  1721. && (*prev_val == ERTS_THR_PRGR_INVALID
  1722. || erts_thr_progress_cmp(ddq->head.next.thr_progress,
  1723. *prev_val) < 0)) {
  1724. *prev_val = ddq->head.next.thr_progress;
  1725. }
  1726. }
  1727. static void
  1728. check_pending_dealloc_carrier(Allctr_t *allctr,
  1729. int *need_thr_progress,
  1730. ErtsThrPrgrVal *thr_prgr_p,
  1731. int *need_more_work);
  1732. static void
  1733. handle_delayed_fix_dealloc(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags,
  1734. void *ptr)
  1735. {
  1736. ASSERT(ERTS_ALC_IS_FIX_TYPE(type));
  1737. if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  1738. fix_nocpool_free(allctr, type, ptr);
  1739. else {
  1740. Block_t *blk = UMEM2BLK(ptr);
  1741. Carrier_t *busy_pcrr_p;
  1742. Allctr_t *used_allctr;
  1743. if (IS_SBC_BLK(blk)) {
  1744. busy_pcrr_p = NULL;
  1745. goto doit;
  1746. }
  1747. used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
  1748. NULL, &busy_pcrr_p);
  1749. if (used_allctr == allctr) {
  1750. doit:
  1751. fix_cpool_free(allctr, type, flags, ptr, &busy_pcrr_p);
  1752. clear_busy_pool_carrier(allctr, busy_pcrr_p);
  1753. }
  1754. else {
  1755. /* Carrier migrated; need to redirect block to new owner... */
  1756. ErtsAllctrDDBlock_t *dd_block;
  1757. int cinit;
  1758. dd_block = (ErtsAllctrDDBlock_t*)ptr;
  1759. dd_block->flags = flags;
  1760. dd_block->type = type;
  1761. ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
  1762. DEC_CC(allctr->calls.this_free);
  1763. cinit = used_allctr->dd.ix - allctr->dd.ix;
  1764. if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
  1765. erts_alloc_notify_delayed_dealloc(used_allctr->ix);
  1766. }
  1767. }
  1768. }
  1769. static void schedule_dealloc_carrier(Allctr_t*, Carrier_t*);
  1770. static void dealloc_my_carrier(Allctr_t*, Carrier_t*);
  1771. static ERTS_INLINE int
  1772. handle_delayed_dealloc(Allctr_t *allctr,
  1773. int allctr_locked,
  1774. int use_limit,
  1775. int ops_limit,
  1776. int *need_thr_progress,
  1777. ErtsThrPrgrVal *thr_prgr_p,
  1778. int *need_more_work)
  1779. {
  1780. int need_thr_prgr = 0;
  1781. int need_mr_wrk = 0;
  1782. int have_checked_incoming = 0;
  1783. int ops = 0;
  1784. int res;
  1785. ErtsAllctrDDQueue_t *ddq;
  1786. if (allctr->thread_safe && !allctr_locked)
  1787. erts_mtx_lock(&allctr->mutex);
  1788. ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
  1789. ddq = &allctr->dd.q;
  1790. res = 0;
  1791. while (1) {
  1792. Block_t *blk;
  1793. void *ptr;
  1794. if (use_limit && ++ops > ops_limit) {
  1795. if (ddq->head.first != ddq->head.unref_end) {
  1796. need_mr_wrk = 1;
  1797. if (need_more_work)
  1798. *need_more_work |= 1;
  1799. }
  1800. break;
  1801. }
  1802. dequeue:
  1803. ptr = ddq_dequeue(ddq);
  1804. if (!ptr) {
  1805. if (have_checked_incoming)
  1806. break;
  1807. need_thr_prgr = ddq_check_incoming(ddq);
  1808. if (need_thr_progress) {
  1809. *need_thr_progress |= need_thr_prgr;
  1810. if (need_thr_prgr)
  1811. store_earliest_thr_prgr(thr_prgr_p, ddq);
  1812. }
  1813. have_checked_incoming = 1;
  1814. goto dequeue;
  1815. }
  1816. res = 1;
  1817. blk = UMEM2BLK(ptr);
  1818. if (blk->bhdr == HOMECOMING_MBC_BLK_HDR) {
  1819. /*
  1820. * A multiblock carrier that previously has been migrated away
  1821. * from us, was sent back to us either because
  1822. * - it became empty and we need to deallocated it, or
  1823. * - it was inserted into the pool and we need to update our pooled_tree
  1824. */
  1825. Carrier_t *crr = ErtsContainerStruct(blk, Carrier_t,
  1826. cpool.homecoming_dd.blk);
  1827. Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr);
  1828. erts_aint_t iallctr;
  1829. ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
  1830. ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
  1831. iallctr = erts_atomic_read_nob(&crr->allctr);
  1832. ASSERT(iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING);
  1833. while (1) {
  1834. if ((iallctr & (~ERTS_CRR_ALCTR_FLG_MASK |
  1835. ERTS_CRR_ALCTR_FLG_IN_POOL))
  1836. == (erts_aint_t)allctr) {
  1837. /*
  1838. * Carrier is home (mine and not in pool)
  1839. */
  1840. ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
  1841. erts_atomic_set_nob(&crr->allctr, (erts_aint_t)allctr);
  1842. if (IS_FREE_LAST_MBC_BLK(first_blk))
  1843. dealloc_my_carrier(allctr, crr);
  1844. else
  1845. ASSERT(crr->cpool.state == ERTS_MBC_IS_HOME);
  1846. }
  1847. else {
  1848. erts_aint_t exp = iallctr;
  1849. erts_aint_t want = iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING;
  1850. iallctr = erts_atomic_cmpxchg_nob(&crr->allctr,
  1851. want,
  1852. exp);
  1853. if (iallctr != exp)
  1854. continue; /* retry */
  1855. ASSERT(crr->cpool.state != ERTS_MBC_IS_HOME);
  1856. unlink_abandoned_carrier(crr);
  1857. if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL)
  1858. poolify_my_carrier(allctr, crr);
  1859. else
  1860. crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
  1861. }
  1862. break;
  1863. }
  1864. }
  1865. else {
  1866. ErtsAllctrDDBlock_t *dd_block;
  1867. ErtsAlcType_t type;
  1868. Uint32 flags;
  1869. dd_block = (ErtsAllctrDDBlock_t*)ptr;
  1870. flags = dd_block->flags;
  1871. type = dd_block->type;
  1872. flags |= DEALLOC_FLG_REDIRECTED;
  1873. ASSERT(IS_SBC_BLK(blk) || (ABLK_TO_MBC(blk) !=
  1874. ErtsContainerStruct(blk, Carrier_t,
  1875. cpool.homecoming_dd.blk)));
  1876. INC_CC(allctr->calls.this_free);
  1877. if (ERTS_ALC_IS_FIX_TYPE(type)) {
  1878. handle_delayed_fix_dealloc(allctr, type, flags, ptr);
  1879. } else {
  1880. dealloc_block(allctr, type, flags, ptr, NULL);
  1881. }
  1882. }
  1883. }
  1884. if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) {
  1885. need_thr_prgr = ddq_check_incoming(ddq);
  1886. *need_thr_progress |= need_thr_prgr;
  1887. if (need_thr_prgr)
  1888. store_earliest_thr_prgr(thr_prgr_p, ddq);
  1889. }
  1890. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  1891. check_pending_dealloc_carrier(allctr,
  1892. need_thr_progress,
  1893. thr_prgr_p,
  1894. need_more_work);
  1895. if (allctr->thread_safe && !allctr_locked)
  1896. erts_mtx_unlock(&allctr->mutex);
  1897. return res;
  1898. }
  1899. static ERTS_INLINE void
  1900. enqueue_dealloc_other_instance(ErtsAlcType_t type,
  1901. Allctr_t *allctr,
  1902. void *ptr,
  1903. int cinit)
  1904. {
  1905. ErtsAllctrDDBlock_t *dd_block = ((ErtsAllctrDDBlock_t*)ptr);
  1906. dd_block->type = type;
  1907. dd_block->flags = 0;
  1908. if (ddq_enqueue(&allctr->dd.q, ptr, cinit))
  1909. erts_alloc_notify_delayed_dealloc(allctr->ix);
  1910. }
  1911. static ERTS_INLINE void
  1912. update_pooled_tree(Allctr_t *allctr, Carrier_t *crr, Uint blk_sz)
  1913. {
  1914. if (allctr == crr->cpool.orig_allctr && crr->cpool.state == ERTS_MBC_WAS_POOLED) {
  1915. /*
  1916. * Update pooled_tree with a potentially new (larger) max_sz
  1917. */
  1918. AOFF_RBTree_t* crr_node = &crr->cpool.pooled;
  1919. if (blk_sz > crr_node->hdr.bhdr) {
  1920. crr_node->hdr.bhdr = blk_sz;
  1921. erts_aoff_larger_max_size(crr_node);
  1922. }
  1923. }
  1924. }
  1925. static ERTS_INLINE void
  1926. check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
  1927. {
  1928. Carrier_t *crr;
  1929. UWord ncrr_in_pool, largest_fblk;
  1930. if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  1931. return;
  1932. ASSERT(allctr->cpool.abandon_limit == allctr_abandon_limit(allctr));
  1933. ASSERT(erts_thr_progress_is_managed_thread());
  1934. if (allctr->cpool.disable_abandon)
  1935. return;
  1936. if (allctr->mbcs.blocks.curr.size > allctr->cpool.abandon_limit)
  1937. return;
  1938. ncrr_in_pool = erts_atomic_read_nob(&allctr->cpool.stat.no_carriers);
  1939. if (ncrr_in_pool >= allctr->cpool.in_pool_limit)
  1940. return;
  1941. crr = FBLK_TO_MBC(fblk);
  1942. if (allctr->main_carrier == crr)
  1943. return;
  1944. if (crr->cpool.total_blocks_size > crr->cpool.abandon_limit)
  1945. return;
  1946. if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID
  1947. && !erts_thr_progress_has_reached(crr->cpool.thr_prgr))
  1948. return;
  1949. largest_fblk = allctr->largest_fblk_in_mbc(allctr, crr);
  1950. if (largest_fblk < allctr->cpool.fblk_min_limit)
  1951. return;
  1952. erts_atomic_set_nob(&crr->cpool.max_size, largest_fblk);
  1953. abandon_carrier(allctr, crr);
  1954. }
  1955. void
  1956. erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
  1957. int limit,
  1958. int *need_thr_progress,
  1959. ErtsThrPrgrVal *thr_prgr_p,
  1960. int *more_work)
  1961. {
  1962. handle_delayed_dealloc(allctr,
  1963. 0,
  1964. limit,
  1965. ERTS_ALCU_DD_OPS_LIM_HIGH,
  1966. need_thr_progress,
  1967. thr_prgr_p,
  1968. more_work);
  1969. }
  1970. #define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \
  1971. handle_delayed_dealloc((Allctr), (Locked), 1, \
  1972. ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL)
  1973. static void
  1974. dealloc_block(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags, void *ptr,
  1975. ErtsAlcFixList_t *fix)
  1976. {
  1977. Block_t *blk = UMEM2BLK(ptr);
  1978. ASSERT(!fix || type == fix->type);
  1979. ERTS_LC_ASSERT(!allctr->thread_safe
  1980. || erts_lc_mtx_is_locked(&allctr->mutex));
  1981. if (IS_SBC_BLK(blk)) {
  1982. destroy_carrier(allctr, blk, NULL);
  1983. if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
  1984. if (!(flags & DEALLOC_FLG_FIX_SHRINK))
  1985. fix->u.cpool.used--;
  1986. fix->u.cpool.allocated--;
  1987. }
  1988. }
  1989. else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  1990. mbc_free(allctr, type, ptr, NULL);
  1991. else {
  1992. Carrier_t *busy_pcrr_p;
  1993. Allctr_t *used_allctr;
  1994. used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
  1995. NULL, &busy_pcrr_p);
  1996. if (used_allctr == allctr) {
  1997. if (fix) {
  1998. if (!(flags & DEALLOC_FLG_FIX_SHRINK))
  1999. fix->u.cpool.used--;
  2000. fix->u.cpool.allocated--;
  2001. }
  2002. mbc_free(allctr, type, ptr, &busy_pcrr_p);
  2003. clear_busy_pool_carrier(allctr, busy_pcrr_p);
  2004. }
  2005. else {
  2006. /* Carrier migrated; need to redirect block to new owner... */
  2007. ErtsAllctrDDBlock_t *dd_block;
  2008. int cinit;
  2009. dd_block = (ErtsAllctrDDBlock_t*)ptr;
  2010. dd_block->flags = flags;
  2011. dd_block->type = type;
  2012. ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
  2013. if (flags & DEALLOC_FLG_REDIRECTED)
  2014. DEC_CC(allctr->calls.this_free);
  2015. cinit = used_allctr->dd.ix - allctr->dd.ix;
  2016. if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
  2017. erts_alloc_notify_delayed_dealloc(used_allctr->ix);
  2018. }
  2019. }
  2020. }
  2021. /* Multi block carrier alloc/realloc/free ... */
  2022. /* NOTE! mbc_alloc() may in case of memory shortage place the requested
  2023. * block in a sbc.
  2024. */
  2025. static ERTS_INLINE void *
  2026. mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
  2027. {
  2028. Block_t *blk;
  2029. Uint get_blk_sz;
  2030. ASSERT(size);
  2031. ASSERT(size < allctr->sbc_threshold);
  2032. *blk_szp = get_blk_sz = UMEMSZ2BLKSZ(allctr, size);
  2033. blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0);
  2034. if (!blk) {
  2035. blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
  2036. #if !ERTS_SUPER_ALIGNED_MSEG_ONLY
  2037. if (!blk) {
  2038. /* Emergency! We couldn't create the carrier as we wanted.
  2039. Try to place it in a sys_alloced sbc. */
  2040. blk = create_carrier(allctr,
  2041. size,
  2042. (CFLG_SBC
  2043. | CFLG_FORCE_SIZE
  2044. | CFLG_FORCE_SYS_ALLOC));
  2045. }
  2046. #endif
  2047. }
  2048. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  2049. if (IS_MBC_BLK(blk)) {
  2050. (*allctr->link_free_block)(allctr, blk);
  2051. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2052. (*allctr->unlink_free_block)(allctr, blk);
  2053. }
  2054. #endif
  2055. return blk;
  2056. }
  2057. static ERTS_INLINE void
  2058. mbc_alloc_finalize(Allctr_t *allctr,
  2059. Block_t *blk,
  2060. Uint org_blk_sz,
  2061. UWord flags,
  2062. Carrier_t *crr,
  2063. Uint want_blk_sz,
  2064. int valid_blk_info)
  2065. {
  2066. Uint blk_sz;
  2067. Uint nxt_blk_sz;
  2068. Block_t *nxt_blk;
  2069. UWord prev_free_flg = flags & PREV_FREE_BLK_HDR_FLG;
  2070. ASSERT(org_blk_sz >= want_blk_sz);
  2071. ASSERT(blk);
  2072. #ifdef DEBUG
  2073. nxt_blk = NULL;
  2074. #endif
  2075. if (org_blk_sz - allctr->min_block_size >= want_blk_sz) {
  2076. /* Shrink block... */
  2077. blk_sz = want_blk_sz;
  2078. nxt_blk_sz = org_blk_sz - blk_sz;
  2079. SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr);
  2080. nxt_blk = BLK_AFTER(blk, blk_sz);
  2081. SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
  2082. SBH_THIS_FREE|(flags & LAST_BLK_HDR_FLG),
  2083. crr);
  2084. if (!(flags & LAST_BLK_HDR_FLG)) {
  2085. SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
  2086. if (!valid_blk_info) {
  2087. Block_t *nxt_nxt_blk = BLK_AFTER(nxt_blk, nxt_blk_sz);
  2088. SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
  2089. }
  2090. }
  2091. (*allctr->link_free_block)(allctr, nxt_blk);
  2092. ASSERT(IS_NOT_LAST_BLK(blk));
  2093. ASSERT(IS_FREE_BLK(nxt_blk));
  2094. ASSERT((flags & LAST_BLK_HDR_FLG)
  2095. ? IS_LAST_BLK(nxt_blk)
  2096. : IS_NOT_LAST_BLK(nxt_blk));
  2097. ASSERT((flags & LAST_BLK_HDR_FLG)
  2098. || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
  2099. ASSERT((flags & LAST_BLK_HDR_FLG)
  2100. || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
  2101. ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
  2102. ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
  2103. ASSERT(nxt_blk_sz >= allctr->min_block_size);
  2104. ASSERT(ABLK_TO_MBC(blk) == crr);
  2105. ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
  2106. }
  2107. else {
  2108. ASSERT(org_blk_sz <= MBC_ABLK_SZ_MASK);
  2109. blk_sz = org_blk_sz;
  2110. if (flags & LAST_BLK_HDR_FLG) {
  2111. if (valid_blk_info)
  2112. SET_BLK_ALLOCED(blk);
  2113. else
  2114. SET_MBC_ABLK_HDR(blk, blk_sz, SBH_LAST_BLK|prev_free_flg, crr);
  2115. }
  2116. else {
  2117. if (valid_blk_info)
  2118. SET_BLK_ALLOCED(blk);
  2119. else
  2120. SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr);
  2121. nxt_blk = BLK_AFTER(blk, blk_sz);
  2122. SET_PREV_BLK_ALLOCED(nxt_blk);
  2123. }
  2124. ASSERT((flags & LAST_BLK_HDR_FLG)
  2125. ? IS_LAST_BLK(blk)
  2126. : IS_NOT_LAST_BLK(blk));
  2127. ASSERT(ABLK_TO_MBC(blk) == crr);
  2128. }
  2129. ERTS_ALC_CPOOL_ALLOC_OP(allctr);
  2130. STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
  2131. ASSERT(IS_ALLOCED_BLK(blk));
  2132. ASSERT(blk_sz == MBC_BLK_SZ(blk));
  2133. ASSERT(blk_sz % sizeof(Unit_t) == 0);
  2134. ASSERT(blk_sz >= allctr->min_block_size);
  2135. ASSERT(blk_sz >= want_blk_sz);
  2136. ASSERT(IS_MBC_BLK(blk));
  2137. ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk));
  2138. ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk));
  2139. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2140. }
  2141. static void *
  2142. mbc_alloc(Allctr_t *allctr, Uint size)
  2143. {
  2144. Block_t *blk;
  2145. Uint blk_sz;
  2146. blk = mbc_alloc_block(allctr, size, &blk_sz);
  2147. if (!blk)
  2148. return NULL;
  2149. if (IS_MBC_BLK(blk))
  2150. mbc_alloc_finalize(allctr,
  2151. blk,
  2152. MBC_FBLK_SZ(blk),
  2153. GET_BLK_HDR_FLGS(blk),
  2154. FBLK_TO_MBC(blk),
  2155. blk_sz,
  2156. 1);
  2157. return BLK2UMEM(blk);
  2158. }
  2159. typedef struct {
  2160. char *ptr;
  2161. UWord size;
  2162. } ErtsMemDiscardRegion;
  2163. /* Construct a discard region for the user memory of a free block, letting the
  2164. * OS reclaim its physical memory when required.
  2165. *
  2166. * Note that we're ignoring both the footer and everything that comes before
  2167. * the minimum block size as the allocator uses those areas to manage the
  2168. * block. */
  2169. static void ERTS_INLINE
  2170. mem_discard_start(Allctr_t *allocator, Block_t *block,
  2171. ErtsMemDiscardRegion *out)
  2172. {
  2173. UWord size = BLK_SZ(block);
  2174. ASSERT(size >= allocator->min_block_size);
  2175. if (size > (allocator->min_block_size + FBLK_FTR_SZ)) {
  2176. out->size = size - allocator->min_block_size - FBLK_FTR_SZ;
  2177. } else {
  2178. out->size = 0;
  2179. }
  2180. out->ptr = (char*)block + allocator->min_block_size;
  2181. }
  2182. /* Expands a discard region into a neighboring free block, allowing us to
  2183. * discard the block header and first page.
  2184. *
  2185. * This is very important in small-allocation scenarios where no single block
  2186. * is large enough to be discarded on its own. */
  2187. static void ERTS_INLINE
  2188. mem_discard_coalesce(Allctr_t *allocator, Block_t *neighbor,
  2189. ErtsMemDiscardRegion *region)
  2190. {
  2191. char *neighbor_start;
  2192. ASSERT(IS_FREE_BLK(neighbor));
  2193. neighbor_start = (char*)neighbor;
  2194. if (region->ptr >= neighbor_start) {
  2195. char *region_start_page;
  2196. region_start_page = region->ptr - SYS_PAGE_SIZE;
  2197. region_start_page = (char*)((UWord)region_start_page & ~SYS_PAGE_SZ_MASK);
  2198. /* Expand if our first page begins within the previous free block's
  2199. * unused data. */
  2200. if (region_start_page >= (neighbor_start + allocator->min_block_size)) {
  2201. region->size += (region->ptr - region_start_page) - FBLK_FTR_SZ;
  2202. region->ptr = region_start_page;
  2203. }
  2204. } else {
  2205. char *region_end_page;
  2206. UWord neighbor_size;
  2207. ASSERT(region->ptr <= neighbor_start);
  2208. region_end_page = region->ptr + region->size + SYS_PAGE_SIZE;
  2209. region_end_page = (char*)((UWord)region_end_page & ~SYS_PAGE_SZ_MASK);
  2210. neighbor_size = BLK_SZ(neighbor) - FBLK_FTR_SZ;
  2211. /* Expand if our last page ends anywhere within the next free block,
  2212. * sans the footer we'll inherit. */
  2213. if (region_end_page < neighbor_start + neighbor_size) {
  2214. region->size += region_end_page - (region->ptr + region->size);
  2215. }
  2216. }
  2217. }
  2218. static void ERTS_INLINE
  2219. mem_discard_finish(Allctr_t *allocator, Block_t *block,
  2220. ErtsMemDiscardRegion *region)
  2221. {
  2222. #ifdef DEBUG
  2223. char *block_start, *block_end;
  2224. UWord block_size;
  2225. block_size = BLK_SZ(block);
  2226. /* Ensure that the region is completely covered by the legal area of the
  2227. * free block. This must hold even when the region is too small to be
  2228. * discarded. */
  2229. if (region->size > 0) {
  2230. ASSERT(block_size > allocator->min_block_size + FBLK_FTR_SZ);
  2231. block_start = (char*)block + allocator->min_block_size;
  2232. block_end = (char*)block + block_size - FBLK_FTR_SZ;
  2233. ASSERT(region->size == 0 ||
  2234. (region->ptr + region->size <= block_end &&
  2235. region->ptr >= block_start &&
  2236. region->size <= block_size));
  2237. }
  2238. #else
  2239. (void)allocator;
  2240. (void)block;
  2241. #endif
  2242. if (region->size > SYS_PAGE_SIZE) {
  2243. UWord align_offset, size;
  2244. char *ptr;
  2245. align_offset = SYS_PAGE_SIZE - ((UWord)region->ptr & SYS_PAGE_SZ_MASK);
  2246. size = (region->size - align_offset) & ~SYS_PAGE_SZ_MASK;
  2247. ptr = region->ptr + align_offset;
  2248. if (size > 0) {
  2249. ASSERT(!((UWord)ptr & SYS_PAGE_SZ_MASK));
  2250. ASSERT(!(size & SYS_PAGE_SZ_MASK));
  2251. erts_mem_discard(ptr, size);
  2252. }
  2253. }
  2254. }
  2255. static void
  2256. carrier_mem_discard_free_blocks(Allctr_t *allocator, Carrier_t *carrier)
  2257. {
  2258. static const int MAX_BLOCKS_TO_DISCARD = 100;
  2259. Block_t *block;
  2260. int i;
  2261. block = allocator->first_fblk_in_mbc(allocator, carrier);
  2262. i = 0;
  2263. while (block != NULL && i < MAX_BLOCKS_TO_DISCARD) {
  2264. ErtsMemDiscardRegion region;
  2265. ASSERT(IS_FREE_BLK(block));
  2266. mem_discard_start(allocator, block, &region);
  2267. mem_discard_finish(allocator, block, &region);
  2268. block = allocator->next_fblk_in_mbc(allocator, carrier, block);
  2269. i++;
  2270. }
  2271. }
  2272. static void
  2273. mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp)
  2274. {
  2275. ErtsMemDiscardRegion discard_region = {0};
  2276. int discard;
  2277. Uint is_first_blk;
  2278. Uint is_last_blk;
  2279. Uint blk_sz;
  2280. Block_t *blk;
  2281. Block_t *nxt_blk;
  2282. Carrier_t *crr;
  2283. ASSERT(p);
  2284. blk = UMEM2BLK(p);
  2285. blk_sz = MBC_ABLK_SZ(blk);
  2286. ASSERT(IS_MBC_BLK(blk));
  2287. ASSERT(blk_sz >= allctr->min_block_size);
  2288. #ifndef DEBUG
  2289. /* We want to mark freed blocks as reclaimable to the OS, but it's a fairly
  2290. * expensive operation which doesn't do much good if we use it again soon
  2291. * after, so we limit it to deallocations on pooled carriers. */
  2292. discard = busy_pcrr_pp && *busy_pcrr_pp;
  2293. #else
  2294. /* Always discard in debug mode, regardless of whether we're in the pool or
  2295. * not. */
  2296. discard = 1;
  2297. #endif
  2298. if (discard) {
  2299. mem_discard_start(allctr, blk, &discard_region);
  2300. }
  2301. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2302. crr = ABLK_TO_MBC(blk);
  2303. ERTS_ALC_CPOOL_FREE_OP(allctr);
  2304. STAT_MBC_BLK_FREE(allctr, type, crr, busy_pcrr_pp, blk_sz, alcu_flgs);
  2305. is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk);
  2306. is_last_blk = IS_LAST_BLK(blk);
  2307. if (IS_PREV_BLK_FREE(blk)) {
  2308. ASSERT(!is_first_blk);
  2309. /* Coalesce with previous block... */
  2310. blk = PREV_BLK(blk);
  2311. (*allctr->unlink_free_block)(allctr, blk);
  2312. if (discard) {
  2313. mem_discard_coalesce(allctr, blk, &discard_region);
  2314. }
  2315. blk_sz += MBC_FBLK_SZ(blk);
  2316. is_first_blk = IS_MBC_FIRST_FBLK(allctr, blk);
  2317. SET_MBC_FBLK_SZ(blk, blk_sz);
  2318. }
  2319. else {
  2320. SET_BLK_FREE(blk);
  2321. }
  2322. if (is_last_blk)
  2323. SET_LAST_BLK(blk);
  2324. else {
  2325. nxt_blk = BLK_AFTER(blk, blk_sz);
  2326. if (IS_FREE_BLK(nxt_blk)) {
  2327. /* Coalesce with next block... */
  2328. (*allctr->unlink_free_block)(allctr, nxt_blk);
  2329. if (discard) {
  2330. mem_discard_coalesce(allctr, nxt_blk, &discard_region);
  2331. }
  2332. blk_sz += MBC_FBLK_SZ(nxt_blk);
  2333. SET_MBC_FBLK_SZ(blk, blk_sz);
  2334. is_last_blk = IS_LAST_BLK(nxt_blk);
  2335. if (is_last_blk)
  2336. SET_LAST_BLK(blk);
  2337. else {
  2338. SET_NOT_LAST_BLK(blk);
  2339. SET_BLK_SZ_FTR(blk, blk_sz);
  2340. }
  2341. }
  2342. else {
  2343. SET_PREV_BLK_FREE(allctr, nxt_blk);
  2344. SET_NOT_LAST_BLK(blk);
  2345. SET_BLK_SZ_FTR(blk, blk_sz);
  2346. }
  2347. }
  2348. ASSERT(IS_FREE_BLK(blk));
  2349. ASSERT(!is_last_blk == !IS_LAST_BLK(blk));
  2350. ASSERT(!is_first_blk == !IS_MBC_FIRST_FBLK(allctr, blk));
  2351. ASSERT(is_first_blk || IS_PREV_BLK_ALLOCED(blk));
  2352. ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(blk)));
  2353. ASSERT(blk_sz == MBC_BLK_SZ(blk));
  2354. ASSERT(is_last_blk || blk == PREV_BLK(NXT_BLK(blk)));
  2355. ASSERT(blk_sz % sizeof(Unit_t) == 0);
  2356. ASSERT(IS_MBC_BLK(blk));
  2357. if (is_first_blk && is_last_blk && crr != allctr->main_carrier) {
  2358. destroy_carrier(allctr, blk, busy_pcrr_pp);
  2359. }
  2360. else {
  2361. (*allctr->link_free_block)(allctr, blk);
  2362. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2363. if (discard) {
  2364. mem_discard_finish(allctr, blk, &discard_region);
  2365. }
  2366. if (busy_pcrr_pp && *busy_pcrr_pp) {
  2367. update_pooled_tree(allctr, crr, blk_sz);
  2368. } else {
  2369. check_abandon_carrier(allctr, blk, busy_pcrr_pp);
  2370. }
  2371. }
  2372. }
  2373. static void *
  2374. mbc_realloc(Allctr_t *allctr, ErtsAlcType_t type, void *p, Uint size,
  2375. Uint32 alcu_flgs, Carrier_t **busy_pcrr_pp)
  2376. {
  2377. void *new_p;
  2378. Uint old_blk_sz;
  2379. Block_t *blk;
  2380. #ifndef MBC_REALLOC_ALWAYS_MOVES
  2381. Block_t *new_blk, *cand_blk;
  2382. Uint cand_blk_sz;
  2383. Uint blk_sz, get_blk_sz;
  2384. Block_t *nxt_blk;
  2385. Uint nxt_blk_sz;
  2386. Uint is_last_blk;
  2387. #endif /* #ifndef MBC_REALLOC_ALWAYS_MOVES */
  2388. ASSERT(p);
  2389. ASSERT(size);
  2390. ASSERT(size < allctr->sbc_threshold);
  2391. blk = (Block_t *) UMEM2BLK(p);
  2392. old_blk_sz = MBC_ABLK_SZ(blk);
  2393. ASSERT(old_blk_sz >= allctr->min_block_size);
  2394. #ifdef MBC_REALLOC_ALWAYS_MOVES
  2395. if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
  2396. return NULL;
  2397. #else /* !MBC_REALLOC_ALWAYS_MOVES */
  2398. if (busy_pcrr_pp && *busy_pcrr_pp) {
  2399. /*
  2400. * Don't want to use carrier in pool
  2401. */
  2402. new_p = mbc_alloc(allctr, size);
  2403. if (!new_p)
  2404. return NULL;
  2405. new_blk = UMEM2BLK(new_p);
  2406. ASSERT(!(IS_MBC_BLK(new_blk) && ABLK_TO_MBC(new_blk) == *busy_pcrr_pp));
  2407. sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
  2408. mbc_free(allctr, type, p, busy_pcrr_pp);
  2409. return new_p;
  2410. }
  2411. get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size);
  2412. ASSERT(IS_ALLOCED_BLK(blk));
  2413. ASSERT(IS_MBC_BLK(blk));
  2414. is_last_blk = IS_LAST_BLK(blk);
  2415. if (old_blk_sz == blk_sz)
  2416. return p;
  2417. else if (blk_sz < old_blk_sz) {
  2418. /* Shrink block... */
  2419. Carrier_t* crr;
  2420. Block_t *nxt_nxt_blk;
  2421. Uint diff_sz_val = old_blk_sz - blk_sz;
  2422. Uint old_blk_sz_val = old_blk_sz;
  2423. if (get_blk_sz >= old_blk_sz)
  2424. return p;
  2425. if (diff_sz_val >= (~((Uint) 0) / 100)) {
  2426. /* div both by 128 */
  2427. old_blk_sz_val >>= 7;
  2428. diff_sz_val >>= 7;
  2429. }
  2430. /* Avoid fragmentation by moving the block if it is shrunk much */
  2431. if (100*diff_sz_val > allctr->mbc_move_threshold*old_blk_sz_val) {
  2432. if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
  2433. return NULL;
  2434. cand_blk_sz = old_blk_sz;
  2435. if (!IS_PREV_BLK_FREE(blk)) {
  2436. cand_blk = blk;
  2437. }
  2438. else {
  2439. ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk));
  2440. cand_blk = PREV_BLK(blk);
  2441. cand_blk_sz += PREV_BLK_SZ(blk);
  2442. }
  2443. if (!is_last_blk) {
  2444. nxt_blk = BLK_AFTER(blk, old_blk_sz);
  2445. if (IS_FREE_BLK(nxt_blk))
  2446. cand_blk_sz += MBC_FBLK_SZ(nxt_blk);
  2447. }
  2448. new_blk = (*allctr->get_free_block)(allctr,
  2449. get_blk_sz,
  2450. cand_blk,
  2451. cand_blk_sz);
  2452. if (new_blk || cand_blk != blk)
  2453. goto move_into_new_blk;
  2454. }
  2455. /* Shrink at current location */
  2456. nxt_blk_sz = old_blk_sz - blk_sz;
  2457. if ((is_last_blk || IS_ALLOCED_BLK(BLK_AFTER(blk,old_blk_sz)))
  2458. && (nxt_blk_sz < allctr->min_block_size))
  2459. return p;
  2460. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2461. nxt_nxt_blk = BLK_AFTER(blk, old_blk_sz);
  2462. SET_MBC_ABLK_SZ(blk, blk_sz);
  2463. SET_NOT_LAST_BLK(blk);
  2464. nxt_blk = BLK_AFTER(blk, blk_sz);
  2465. crr = ABLK_TO_MBC(blk);
  2466. ERTS_ALC_CPOOL_REALLOC_OP(allctr);
  2467. STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
  2468. STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
  2469. ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size);
  2470. if (!is_last_blk) {
  2471. if (IS_FREE_BLK(nxt_nxt_blk)) {
  2472. /* Coalesce with next free block... */
  2473. nxt_blk_sz += MBC_FBLK_SZ(nxt_nxt_blk);
  2474. (*allctr->unlink_free_block)(allctr, nxt_nxt_blk);
  2475. is_last_blk = GET_LAST_BLK_HDR_FLG(nxt_nxt_blk);
  2476. }
  2477. else {
  2478. SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
  2479. }
  2480. SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
  2481. }
  2482. SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
  2483. SBH_THIS_FREE | (is_last_blk ? SBH_LAST_BLK : 0),
  2484. crr);
  2485. (*allctr->link_free_block)(allctr, nxt_blk);
  2486. ASSERT(IS_ALLOCED_BLK(blk));
  2487. ASSERT(blk_sz == MBC_BLK_SZ(blk));
  2488. ASSERT(blk_sz % sizeof(Unit_t) == 0);
  2489. ASSERT(blk_sz >= allctr->min_block_size);
  2490. ASSERT(blk_sz >= size + ABLK_HDR_SZ);
  2491. ASSERT(IS_MBC_BLK(blk));
  2492. ASSERT(IS_FREE_BLK(nxt_blk));
  2493. ASSERT(IS_PREV_BLK_ALLOCED(nxt_blk));
  2494. ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
  2495. ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
  2496. ASSERT(nxt_blk_sz >= allctr->min_block_size);
  2497. ASSERT(IS_MBC_BLK(nxt_blk));
  2498. ASSERT(is_last_blk ? IS_LAST_BLK(nxt_blk) : IS_NOT_LAST_BLK(nxt_blk));
  2499. ASSERT(is_last_blk || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
  2500. ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
  2501. ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
  2502. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2503. check_abandon_carrier(allctr, nxt_blk, NULL);
  2504. return p;
  2505. }
  2506. /* Need larger block... */
  2507. if (!is_last_blk) {
  2508. nxt_blk = BLK_AFTER(blk, old_blk_sz);
  2509. nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
  2510. if (IS_FREE_BLK(nxt_blk) && get_blk_sz <= old_blk_sz + nxt_blk_sz) {
  2511. Carrier_t* crr = ABLK_TO_MBC(blk);
  2512. /* Grow into next block... */
  2513. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2514. (*allctr->unlink_free_block)(allctr, nxt_blk);
  2515. nxt_blk_sz -= blk_sz - old_blk_sz;
  2516. is_last_blk = IS_LAST_BLK(nxt_blk);
  2517. if (nxt_blk_sz < allctr->min_block_size) {
  2518. blk_sz += nxt_blk_sz;
  2519. SET_MBC_ABLK_SZ(blk, blk_sz);
  2520. if (is_last_blk) {
  2521. SET_LAST_BLK(blk);
  2522. #ifdef DEBUG
  2523. nxt_blk = NULL;
  2524. #endif
  2525. }
  2526. else {
  2527. nxt_blk = BLK_AFTER(blk, blk_sz);
  2528. SET_PREV_BLK_ALLOCED(nxt_blk);
  2529. #ifdef DEBUG
  2530. is_last_blk = IS_LAST_BLK(nxt_blk);
  2531. nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
  2532. #endif
  2533. }
  2534. }
  2535. else {
  2536. SET_MBC_ABLK_SZ(blk, blk_sz);
  2537. nxt_blk = BLK_AFTER(blk, blk_sz);
  2538. SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz, SBH_THIS_FREE, crr);
  2539. if (is_last_blk)
  2540. SET_LAST_BLK(nxt_blk);
  2541. else
  2542. SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
  2543. (*allctr->link_free_block)(allctr, nxt_blk);
  2544. ASSERT(IS_FREE_BLK(nxt_blk));
  2545. ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
  2546. }
  2547. ERTS_ALC_CPOOL_REALLOC_OP(allctr);
  2548. STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
  2549. STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
  2550. ASSERT(IS_ALLOCED_BLK(blk));
  2551. ASSERT(blk_sz == MBC_BLK_SZ(blk));
  2552. ASSERT(blk_sz % sizeof(Unit_t) == 0);
  2553. ASSERT(blk_sz >= allctr->min_block_size);
  2554. ASSERT(blk_sz >= size + ABLK_HDR_SZ);
  2555. ASSERT(IS_MBC_BLK(blk));
  2556. ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk));
  2557. ASSERT(!nxt_blk || nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
  2558. ASSERT(!nxt_blk || nxt_blk_sz % sizeof(Unit_t) == 0);
  2559. ASSERT(!nxt_blk || nxt_blk_sz >= allctr->min_block_size);
  2560. ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk));
  2561. ASSERT(!nxt_blk || (is_last_blk
  2562. ? IS_LAST_BLK(nxt_blk)
  2563. : IS_NOT_LAST_BLK(nxt_blk)));
  2564. ASSERT(!nxt_blk || is_last_blk
  2565. || IS_ALLOCED_BLK(nxt_blk)
  2566. || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
  2567. ASSERT(!nxt_blk || is_last_blk
  2568. || IS_ALLOCED_BLK(nxt_blk)
  2569. || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
  2570. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2571. return p;
  2572. }
  2573. }
  2574. if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
  2575. return NULL;
  2576. /* Need to grow in another block */
  2577. if (!IS_PREV_BLK_FREE(blk)) {
  2578. cand_blk = NULL;
  2579. cand_blk_sz = 0;
  2580. }
  2581. else {
  2582. ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk));
  2583. cand_blk = PREV_BLK(blk);
  2584. cand_blk_sz = old_blk_sz + PREV_BLK_SZ(blk);
  2585. if (!is_last_blk) {
  2586. nxt_blk = BLK_AFTER(blk, old_blk_sz);
  2587. if (IS_FREE_BLK(nxt_blk))
  2588. cand_blk_sz += MBC_FBLK_SZ(nxt_blk);
  2589. }
  2590. }
  2591. if (cand_blk_sz < get_blk_sz) {
  2592. /* We wont fit in cand_blk get a new one */
  2593. #endif /* !MBC_REALLOC_ALWAYS_MOVES */
  2594. new_p = mbc_alloc(allctr, size);
  2595. if (!new_p)
  2596. return NULL;
  2597. sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
  2598. mbc_free(allctr, type, p, busy_pcrr_pp);
  2599. return new_p;
  2600. #ifndef MBC_REALLOC_ALWAYS_MOVES
  2601. }
  2602. else {
  2603. /* We will at least fit in cand_blk */
  2604. new_blk = (*allctr->get_free_block)(allctr,
  2605. get_blk_sz,
  2606. cand_blk,
  2607. cand_blk_sz);
  2608. move_into_new_blk:
  2609. /*
  2610. * new_blk, and cand_blk have to be correctly set
  2611. * when jumping to this label.
  2612. */
  2613. if (new_blk) {
  2614. mbc_alloc_finalize(allctr,
  2615. new_blk,
  2616. MBC_FBLK_SZ(new_blk),
  2617. GET_BLK_HDR_FLGS(new_blk),
  2618. FBLK_TO_MBC(new_blk),
  2619. blk_sz,
  2620. 1);
  2621. new_p = BLK2UMEM(new_blk);
  2622. sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
  2623. mbc_free(allctr, type, p, NULL);
  2624. return new_p;
  2625. }
  2626. else {
  2627. Carrier_t* crr;
  2628. Uint new_blk_sz;
  2629. UWord new_blk_flgs;
  2630. Uint prev_blk_sz;
  2631. Uint blk_cpy_sz;
  2632. ASSERT(IS_PREV_BLK_FREE(blk));
  2633. ASSERT(cand_blk == PREV_BLK(blk));
  2634. prev_blk_sz = PREV_BLK_SZ(blk);
  2635. new_blk = cand_blk;
  2636. new_blk_sz = prev_blk_sz + old_blk_sz;
  2637. new_blk_flgs = GET_BLK_HDR_FLGS(new_blk);
  2638. HARD_CHECK_BLK_CARRIER(allctr, blk);
  2639. (*allctr->unlink_free_block)(allctr, new_blk); /* prev */
  2640. if (is_last_blk)
  2641. new_blk_flgs |= LAST_BLK_HDR_FLG;
  2642. else {
  2643. nxt_blk = BLK_AFTER(blk, old_blk_sz);
  2644. if (IS_FREE_BLK(nxt_blk)) {
  2645. new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk);
  2646. new_blk_sz += MBC_FBLK_SZ(nxt_blk);
  2647. (*allctr->unlink_free_block)(allctr, nxt_blk);
  2648. }
  2649. }
  2650. /*
  2651. * Copy user-data then update new blocks in mbc_alloc_finalize().
  2652. * mbc_alloc_finalize() may write headers at old location of
  2653. * user data; therfore, order is important.
  2654. */
  2655. new_p = BLK2UMEM(new_blk);
  2656. blk_cpy_sz = MIN(blk_sz, old_blk_sz);
  2657. crr = FBLK_TO_MBC(new_blk);
  2658. if (prev_blk_sz >= blk_cpy_sz)
  2659. sys_memcpy(new_p, p, blk_cpy_sz - ABLK_HDR_SZ);
  2660. else
  2661. sys_memmove(new_p, p, blk_cpy_sz - ABLK_HDR_SZ);
  2662. mbc_alloc_finalize(allctr,
  2663. new_blk,
  2664. new_blk_sz,
  2665. new_blk_flgs,
  2666. crr,
  2667. blk_sz,
  2668. 0);
  2669. ERTS_ALC_CPOOL_FREE_OP(allctr);
  2670. STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
  2671. return new_p;
  2672. }
  2673. }
  2674. #endif /* !MBC_REALLOC_ALWAYS_MOVES */
  2675. }
  2676. #define ERTS_ALC_MAX_DEALLOC_CARRIER 10
  2677. #define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 100
  2678. #define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3
  2679. #define ERTS_ALC_CPOOL_PTR_MOD_MRK (((erts_aint_t) 1) << 0)
  2680. #define ERTS_ALC_CPOOL_PTR_DEL_MRK (((erts_aint_t) 1) << 1)
  2681. #define ERTS_ALC_CPOOL_PTR_MRKS \
  2682. (ERTS_ALC_CPOOL_PTR_MOD_MRK | ERTS_ALC_CPOOL_PTR_DEL_MRK)
  2683. /*
  2684. * When setting multiple mod markers we always
  2685. * set mod markers in pointer order and always
  2686. * on next pointers before prev pointers.
  2687. */
  2688. typedef union {
  2689. ErtsAlcCPoolData_t sentinel;
  2690. char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAlcCPoolData_t))];
  2691. } ErtsAlcCrrPool_t;
  2692. #if ERTS_ALC_A_INVALID != 0
  2693. # error "Carrier pool implementation assumes ERTS_ALC_A_INVALID == 0"
  2694. #endif
  2695. #if ERTS_ALC_A_MIN <= ERTS_ALC_A_INVALID
  2696. # error "Carrier pool implementation assumes ERTS_ALC_A_MIN > ERTS_ALC_A_INVALID"
  2697. #endif
  2698. /* The pools are only allowed to be manipulated by managed threads except in
  2699. * the alloc_SUITE:cpool test, where only test_carrier_pool is used. */
  2700. static ErtsAlcCrrPool_t firstfit_carrier_pool;
  2701. static ErtsAlcCrrPool_t test_carrier_pool;
  2702. #define ERTS_ALC_CPOOL_MAX_BACKOFF (1 << 8)
  2703. static int
  2704. backoff(int n)
  2705. {
  2706. int i;
  2707. for (i = 0; i < n; i++)
  2708. ERTS_SPIN_BODY;
  2709. if (n >= ERTS_ALC_CPOOL_MAX_BACKOFF)
  2710. return ERTS_ALC_CPOOL_MAX_BACKOFF;
  2711. else
  2712. return n << 1;
  2713. }
  2714. static int
  2715. cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr)
  2716. {
  2717. ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
  2718. ErtsAlcCPoolData_t *cpdp = sentinel;
  2719. Carrier_t *tmp_crr;
  2720. while (1) {
  2721. cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~CRR_FLG_MASK);
  2722. if (cpdp == sentinel)
  2723. return 0;
  2724. tmp_crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool));
  2725. if (tmp_crr == crr)
  2726. return 1;
  2727. }
  2728. }
  2729. static int
  2730. cpool_is_empty(Allctr_t *allctr)
  2731. {
  2732. ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
  2733. return ((erts_atomic_read_rb(&sentinel->next) == (erts_aint_t) sentinel)
  2734. && (erts_atomic_read_rb(&sentinel->prev) == (erts_aint_t) sentinel));
  2735. }
  2736. static ERTS_INLINE ErtsAlcCPoolData_t *
  2737. cpool_aint2cpd(erts_aint_t aint)
  2738. {
  2739. return (ErtsAlcCPoolData_t *) (aint & ~ERTS_ALC_CPOOL_PTR_MRKS);
  2740. }
  2741. static ERTS_INLINE erts_aint_t
  2742. cpool_read(erts_atomic_t *aptr)
  2743. {
  2744. return erts_atomic_read_acqb(aptr);
  2745. }
  2746. static ERTS_INLINE void
  2747. cpool_init(erts_atomic_t *aptr, erts_aint_t val)
  2748. {
  2749. erts_atomic_set_nob(aptr, val);
  2750. }
  2751. static ERTS_INLINE void
  2752. cpool_set_mod_marked(erts_atomic_t *aptr, erts_aint_t new, erts_aint_t old)
  2753. {
  2754. #ifdef ERTS_ALC_CPOOL_DEBUG
  2755. erts_aint_t act = erts_atomic_xchg_relb(aptr, new);
  2756. ERTS_ALC_CPOOL_ASSERT(act == (old | ERTS_ALC_CPOOL_PTR_MOD_MRK));
  2757. #else
  2758. erts_atomic_set_relb(aptr, new);
  2759. #endif
  2760. }
  2761. static ERTS_INLINE erts_aint_t
  2762. cpool_try_mod_mark_exp(erts_atomic_t *aptr, erts_aint_t exp)
  2763. {
  2764. ERTS_ALC_CPOOL_ASSERT((exp & ERTS_ALC_CPOOL_PTR_MOD_MRK) == 0);
  2765. return erts_atomic_cmpxchg_nob(aptr, exp | ERTS_ALC_CPOOL_PTR_MOD_MRK, exp);
  2766. }
  2767. static ERTS_INLINE erts_aint_t
  2768. cpool_mod_mark_exp(erts_atomic_t *aptr, erts_aint_t exp)
  2769. {
  2770. int b;
  2771. erts_aint_t act;
  2772. ERTS_ALC_CPOOL_ASSERT((exp & ERTS_ALC_CPOOL_PTR_MOD_MRK) == 0);
  2773. while (1) {
  2774. act = erts_atomic_cmpxchg_nob(aptr,
  2775. exp | ERTS_ALC_CPOOL_PTR_MOD_MRK,
  2776. exp);
  2777. if (act == exp)
  2778. return exp;
  2779. b = 1;
  2780. do {
  2781. if ((act & ~ERTS_ALC_CPOOL_PTR_MOD_MRK) != exp)
  2782. return act;
  2783. b = backoff(b);
  2784. act = erts_atomic_read_nob(aptr);
  2785. } while (act != exp);
  2786. }
  2787. }
  2788. static ERTS_INLINE erts_aint_t
  2789. cpool_mod_mark(erts_atomic_t *aptr)
  2790. {
  2791. int b;
  2792. erts_aint_t act, exp;
  2793. act = cpool_read(aptr);
  2794. while (1) {
  2795. b = 1;
  2796. while (act & ERTS_ALC_CPOOL_PTR_MOD_MRK) {
  2797. b = backoff(b);
  2798. act = erts_atomic_read_nob(aptr);
  2799. }
  2800. exp = act;
  2801. act = erts_atomic_cmpxchg_acqb(aptr,
  2802. exp | ERTS_ALC_CPOOL_PTR_MOD_MRK,
  2803. exp);
  2804. if (act == exp)
  2805. return exp;
  2806. }
  2807. }
  2808. static void
  2809. cpool_insert(Allctr_t *allctr, Carrier_t *crr)
  2810. {
  2811. ErtsAlcCPoolData_t *cpd1p, *cpd2p;
  2812. erts_aint_t val;
  2813. ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
  2814. Allctr_t *orig_allctr = crr->cpool.orig_allctr;
  2815. ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
  2816. || erts_thr_progress_is_managed_thread());
  2817. {
  2818. int alloc_no = allctr->alloc_no;
  2819. ERTS_ALC_CPOOL_ASSERT(
  2820. erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no]) >= 0 &&
  2821. crr->cpool.blocks_size[alloc_no] >= 0);
  2822. ERTS_ALC_CPOOL_ASSERT(
  2823. erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0 &&
  2824. crr->cpool.blocks[alloc_no] >= 0);
  2825. /* We only modify the counter for our current type since the others are
  2826. * conceptually still in the pool. */
  2827. erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
  2828. ((erts_aint_t) crr->cpool.blocks_size[alloc_no]));
  2829. erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no],
  2830. ((erts_aint_t) crr->cpool.blocks[alloc_no]));
  2831. }
  2832. erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,
  2833. (erts_aint_t) CARRIER_SZ(crr));
  2834. erts_atomic_inc_nob(&orig_allctr->cpool.stat.no_carriers);
  2835. /*
  2836. * We search in 'next' direction and begin by passing
  2837. * one element before trying to insert. This in order to
  2838. * avoid contention with threads fetching elements.
  2839. */
  2840. val = cpool_read(&sentinel->next);
  2841. /* Find a predecessor to be, and set mod marker on its next ptr */
  2842. while (1) {
  2843. cpd1p = cpool_aint2cpd(val);
  2844. if (cpd1p == sentinel) {
  2845. val = cpool_mod_mark(&cpd1p->next);
  2846. break;
  2847. }
  2848. val = cpool_read(&cpd1p->next);
  2849. if (!(val & ERTS_ALC_CPOOL_PTR_MRKS)) {
  2850. erts_aint_t tmp = cpool_try_mod_mark_exp(&cpd1p->next, val);
  2851. if (tmp == val) {
  2852. val = tmp;
  2853. break;
  2854. }
  2855. val = tmp;
  2856. }
  2857. }
  2858. /* Set mod marker on prev ptr of the to be successor */
  2859. cpd2p = cpool_aint2cpd(val);
  2860. cpool_init(&crr->cpool.next, (erts_aint_t) cpd2p);
  2861. cpool_init(&crr->cpool.prev, (erts_aint_t) cpd1p);
  2862. val = (erts_aint_t) cpd1p;
  2863. while (1) {
  2864. int b;
  2865. erts_aint_t tmp;
  2866. tmp = cpool_mod_mark_exp(&cpd2p->prev, val);
  2867. if (tmp == val)
  2868. break;
  2869. b = 1;
  2870. do {
  2871. b = backoff(b);
  2872. tmp = cpool_read(&cpd2p->prev);
  2873. } while (tmp != val);
  2874. }
  2875. /* Write pointers to this element in successor and predecessor */
  2876. cpool_set_mod_marked(&cpd1p->next,
  2877. (erts_aint_t) &crr->cpool,
  2878. (erts_aint_t) cpd2p);
  2879. cpool_set_mod_marked(&cpd2p->prev,
  2880. (erts_aint_t) &crr->cpool,
  2881. (erts_aint_t) cpd1p);
  2882. LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr));
  2883. }
  2884. static void
  2885. cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
  2886. {
  2887. ErtsAlcCPoolData_t *cpd1p, *cpd2p;
  2888. erts_aint_t val;
  2889. #ifdef ERTS_ALC_CPOOL_DEBUG
  2890. ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
  2891. #endif
  2892. ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
  2893. || erts_thr_progress_is_managed_thread());
  2894. ERTS_ALC_CPOOL_ASSERT(sentinel != &crr->cpool);
  2895. /* Set mod marker on next ptr of our predecessor */
  2896. val = (erts_aint_t) &crr->cpool;
  2897. while (1) {
  2898. erts_aint_t tmp;
  2899. cpd1p = cpool_aint2cpd(cpool_read(&crr->cpool.prev));
  2900. tmp = cpool_mod_mark_exp(&cpd1p->next, val);
  2901. if (tmp == val)
  2902. break;
  2903. }
  2904. /* Set mod marker on our next ptr */
  2905. val = cpool_mod_mark(&crr->cpool.next);
  2906. /* Set mod marker on the prev ptr of our successor */
  2907. cpd2p = cpool_aint2cpd(val);
  2908. val = (erts_aint_t) &crr->cpool;
  2909. while (1) {
  2910. int b;
  2911. erts_aint_t tmp;
  2912. tmp = cpool_mod_mark_exp(&cpd2p->prev, val);
  2913. if (tmp == val)
  2914. break;
  2915. b = 1;
  2916. do {
  2917. b = backoff(b);
  2918. tmp = cpool_read(&cpd2p->prev);
  2919. } while (tmp != val);
  2920. }
  2921. /* Set mod marker on our prev ptr */
  2922. val = (erts_aint_t) cpd1p;
  2923. while (1) {
  2924. int b;
  2925. erts_aint_t tmp;
  2926. tmp = cpool_mod_mark_exp(&crr->cpool.prev, val);
  2927. if (tmp == val)
  2928. break;
  2929. b = 1;
  2930. do {
  2931. b = backoff(b);
  2932. tmp = cpool_read(&cpd2p->prev);
  2933. } while (tmp != val);
  2934. }
  2935. /* Write pointers past this element in predecessor and successor */
  2936. cpool_set_mod_marked(&cpd1p->next,
  2937. (erts_aint_t) cpd2p,
  2938. (erts_aint_t) &crr->cpool);
  2939. cpool_set_mod_marked(&cpd2p->prev,
  2940. (erts_aint_t) cpd1p,
  2941. (erts_aint_t) &crr->cpool);
  2942. /* Repleace mod markers with delete markers on this element */
  2943. cpool_set_mod_marked(&crr->cpool.next,
  2944. ((erts_aint_t) cpd2p) | ERTS_ALC_CPOOL_PTR_DEL_MRK,
  2945. ((erts_aint_t) cpd2p) | ERTS_ALC_CPOOL_PTR_MOD_MRK);
  2946. cpool_set_mod_marked(&crr->cpool.prev,
  2947. ((erts_aint_t) cpd1p) | ERTS_ALC_CPOOL_PTR_DEL_MRK,
  2948. ((erts_aint_t) cpd1p) | ERTS_ALC_CPOOL_PTR_MOD_MRK);
  2949. crr->cpool.thr_prgr = erts_thr_progress_later(NULL);
  2950. {
  2951. Allctr_t *orig_allctr = crr->cpool.orig_allctr;
  2952. int alloc_no = allctr->alloc_no;
  2953. ERTS_ALC_CPOOL_ASSERT(orig_allctr == prev_allctr);
  2954. ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] <=
  2955. erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no]));
  2956. ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] <=
  2957. erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]));
  2958. /* We only modify the counters for our current type since the others
  2959. * were, conceptually, never taken out of the pool. */
  2960. erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
  2961. -((erts_aint_t) crr->cpool.blocks_size[alloc_no]));
  2962. erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no],
  2963. -((erts_aint_t) crr->cpool.blocks[alloc_no]));
  2964. erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,
  2965. -((erts_aint_t) CARRIER_SZ(crr)));
  2966. erts_atomic_dec_wb(&orig_allctr->cpool.stat.no_carriers);
  2967. }
  2968. }
  2969. static Carrier_t *
  2970. cpool_fetch(Allctr_t *allctr, UWord size)
  2971. {
  2972. int i, seen_sentinel;
  2973. Carrier_t *crr;
  2974. Carrier_t *reinsert_crr = NULL;
  2975. ErtsAlcCPoolData_t *cpdp;
  2976. ErtsAlcCPoolData_t *cpool_entrance = NULL;
  2977. ErtsAlcCPoolData_t *sentinel;
  2978. ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
  2979. || erts_thr_progress_is_managed_thread());
  2980. i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
  2981. LTTNG3(carrier_pool_get, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, (unsigned long)size);
  2982. /*
  2983. * Search my own pooled_tree,
  2984. * i.e my abandoned carriers that were in the pool last time I checked.
  2985. */
  2986. do {
  2987. erts_aint_t exp, act;
  2988. crr = aoff_lookup_pooled_mbc(allctr, size);
  2989. if (!crr)
  2990. break;
  2991. ASSERT(crr->cpool.state == ERTS_MBC_WAS_POOLED);
  2992. ASSERT(crr->cpool.orig_allctr == allctr);
  2993. aoff_remove_pooled_mbc(allctr, crr);
  2994. exp = erts_atomic_read_nob(&crr->allctr);
  2995. if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
  2996. ASSERT((exp & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t)allctr);
  2997. if (erts_atomic_read_nob(&crr->cpool.max_size) < size) {
  2998. /*
  2999. * This carrier has been fetched and inserted back again
  3000. * by a foreign allocator. That's why it has a stale search size.
  3001. */
  3002. ASSERT(exp & ERTS_CRR_ALCTR_FLG_HOMECOMING);
  3003. crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size);
  3004. aoff_add_pooled_mbc(allctr, crr);
  3005. INC_CC(allctr->cpool.stat.skip_size);
  3006. continue;
  3007. }
  3008. else if (exp & ERTS_CRR_ALCTR_FLG_BUSY) {
  3009. /*
  3010. * This must be our own carrier as part of a realloc call.
  3011. * Skip it to make things simpler.
  3012. * Must wait to re-insert to not be found again by lookup.
  3013. */
  3014. ASSERT(!reinsert_crr);
  3015. reinsert_crr = crr;
  3016. INC_CC(allctr->cpool.stat.skip_busy);
  3017. continue;
  3018. }
  3019. /* Try to fetch it... */
  3020. act = erts_atomic_cmpxchg_mb(&crr->allctr,
  3021. exp & ~ERTS_CRR_ALCTR_FLG_IN_POOL,
  3022. exp);
  3023. if (act == exp) {
  3024. cpool_delete(allctr, allctr, crr);
  3025. crr->cpool.state = ERTS_MBC_IS_HOME;
  3026. if (reinsert_crr)
  3027. aoff_add_pooled_mbc(allctr, reinsert_crr);
  3028. return crr;
  3029. }
  3030. exp = act;
  3031. INC_CC(allctr->cpool.stat.skip_race);
  3032. }
  3033. else
  3034. INC_CC(allctr->cpool.stat.skip_not_pooled);
  3035. /* Not in pool anymore */
  3036. ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY));
  3037. crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
  3038. }while (--i > 0);
  3039. if (reinsert_crr)
  3040. aoff_add_pooled_mbc(allctr, reinsert_crr);
  3041. /*
  3042. * Try find a nice cpool_entrance
  3043. */
  3044. while (allctr->cpool.pooled_tree) {
  3045. erts_aint_t iallctr;
  3046. crr = ErtsContainerStruct(allctr->cpool.pooled_tree, Carrier_t, cpool.pooled);
  3047. iallctr = erts_atomic_read_nob(&crr->allctr);
  3048. if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL) {
  3049. cpool_entrance = &crr->cpool;
  3050. break;
  3051. }
  3052. /* Not in pool anymore */
  3053. ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
  3054. aoff_remove_pooled_mbc(allctr, crr);
  3055. crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
  3056. if (--i <= 0) {
  3057. INC_CC(allctr->cpool.stat.fail_pooled);
  3058. return NULL;
  3059. }
  3060. }
  3061. /*
  3062. * Finally search the shared pool and try employ foreign carriers
  3063. */
  3064. sentinel = allctr->cpool.sentinel;
  3065. if (cpool_entrance) {
  3066. /*
  3067. * We saw a pooled carried above, use it as entrance into the pool
  3068. */
  3069. }
  3070. else {
  3071. /*
  3072. * No pooled carrier seen above. Start search at cpool sentinel,
  3073. * but begin by passing one element before trying to fetch.
  3074. * This in order to avoid contention with threads inserting elements.
  3075. */
  3076. cpool_entrance = cpool_aint2cpd(cpool_read(&sentinel->prev));
  3077. if (cpool_entrance == sentinel)
  3078. goto check_dc_list;
  3079. }
  3080. cpdp = cpool_entrance;
  3081. seen_sentinel = 0;
  3082. do {
  3083. erts_aint_t exp;
  3084. cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
  3085. if (cpdp == sentinel) {
  3086. if (seen_sentinel) {
  3087. /* We been here before. cpool_entrance must have been removed */
  3088. INC_CC(allctr->cpool.stat.entrance_removed);
  3089. break;
  3090. }
  3091. seen_sentinel = 1;
  3092. continue;
  3093. }
  3094. ASSERT(cpdp != cpool_entrance || seen_sentinel);
  3095. crr = ErtsContainerStruct(cpdp, Carrier_t, cpool);
  3096. exp = erts_atomic_read_rb(&crr->allctr);
  3097. if (erts_atomic_read_nob(&cpdp->max_size) < size) {
  3098. INC_CC(allctr->cpool.stat.skip_size);
  3099. }
  3100. else if ((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL | ERTS_CRR_ALCTR_FLG_BUSY))
  3101. == ERTS_CRR_ALCTR_FLG_IN_POOL) {
  3102. erts_aint_t act;
  3103. erts_aint_t want = (((erts_aint_t) allctr)
  3104. | (exp & ERTS_CRR_ALCTR_FLG_HOMECOMING));
  3105. /* Try to fetch it... */
  3106. act = erts_atomic_cmpxchg_mb(&crr->allctr, want, exp);
  3107. if (act == exp) {
  3108. cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
  3109. if (crr->cpool.orig_allctr == allctr) {
  3110. unlink_abandoned_carrier(crr);
  3111. crr->cpool.state = ERTS_MBC_IS_HOME;
  3112. }
  3113. return crr;
  3114. }
  3115. }
  3116. if (exp & ERTS_CRR_ALCTR_FLG_BUSY)
  3117. INC_CC(allctr->cpool.stat.skip_busy);
  3118. else
  3119. INC_CC(allctr->cpool.stat.skip_race);
  3120. if (--i <= 0) {
  3121. INC_CC(allctr->cpool.stat.fail_shared);
  3122. return NULL;
  3123. }
  3124. }while (cpdp != cpool_entrance);
  3125. check_dc_list:
  3126. /* Last; check our own pending dealloc carrier list... */
  3127. crr = allctr->cpool.dc_list.last;
  3128. while (crr) {
  3129. if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
  3130. Block_t* blk;
  3131. unlink_carrier(&allctr->cpool.dc_list, crr);
  3132. ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr)
  3133. == ((erts_aint_t) allctr));
  3134. blk = MBC_TO_FIRST_BLK(allctr, crr);
  3135. ASSERT(FBLK_TO_MBC(blk) == crr);
  3136. allctr->link_free_block(allctr, blk);
  3137. return crr;
  3138. }
  3139. crr = crr->prev;
  3140. if (--i <= 0) {
  3141. INC_CC(allctr->cpool.stat.fail_pend_dealloc);
  3142. return NULL;
  3143. }
  3144. }
  3145. if (i != ERTS_ALC_CPOOL_MAX_FETCH_INSPECT)
  3146. INC_CC(allctr->cpool.stat.fail);
  3147. return NULL;
  3148. }
  3149. static void
  3150. check_pending_dealloc_carrier(Allctr_t *allctr,
  3151. int *need_thr_progress,
  3152. ErtsThrPrgrVal *thr_prgr_p,
  3153. int *need_more_work)
  3154. {
  3155. Carrier_t *crr = allctr->cpool.dc_list.first;
  3156. if (crr) {
  3157. ErtsThrPrgrVal current = erts_thr_progress_current();
  3158. int i = 0;
  3159. do {
  3160. Carrier_t *dcrr;
  3161. if (!erts_thr_progress_has_reached_this(current, crr->cpool.thr_prgr))
  3162. break;
  3163. dcrr = crr;
  3164. crr = crr->next;
  3165. dealloc_mbc(allctr, dcrr);
  3166. i++;
  3167. } while (crr && i < ERTS_ALC_MAX_DEALLOC_CARRIER);
  3168. allctr->cpool.dc_list.first = crr;
  3169. if (!crr)
  3170. allctr->cpool.dc_list.last = NULL;
  3171. else {
  3172. crr->prev = NULL;
  3173. if (need_more_work) {
  3174. ERTS_ALC_CPOOL_ASSERT(need_thr_progress && thr_prgr_p);
  3175. if (erts_thr_progress_has_reached_this(current, crr->cpool.thr_prgr))
  3176. *need_more_work = 1;
  3177. else {
  3178. *need_thr_progress = 1;
  3179. if (*thr_prgr_p == ERTS_THR_PRGR_INVALID
  3180. || erts_thr_progress_cmp(crr->cpool.thr_prgr,
  3181. *thr_prgr_p) < 0) {
  3182. *thr_prgr_p = crr->cpool.thr_prgr;
  3183. }
  3184. }
  3185. }
  3186. }
  3187. }
  3188. }
  3189. static void
  3190. schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
  3191. {
  3192. Allctr_t *orig_allctr;
  3193. ASSERT(IS_MB_CARRIER(crr));
  3194. if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
  3195. dealloc_mbc(allctr, crr);
  3196. return;
  3197. }
  3198. orig_allctr = crr->cpool.orig_allctr;
  3199. if (allctr == orig_allctr) {
  3200. if (!(erts_atomic_read_nob(&crr->allctr) & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
  3201. dealloc_my_carrier(allctr, crr);
  3202. }
  3203. /*else
  3204. * Carrier was abandoned earlier by other thread and
  3205. * is still waiting for us in dd-queue.
  3206. * handle_delayed_dealloc() will handle it when crr is dequeued.
  3207. */
  3208. }
  3209. else {
  3210. /*
  3211. * We send the carrier to its origin for deallocation.
  3212. * This in order:
  3213. * - not to complicate things for the thread specific
  3214. * instances of mseg_alloc, and
  3215. * - to ensure that we always only reuse empty carriers
  3216. * originating from our own thread specific mseg_alloc
  3217. * instance which is beneficial on NUMA systems.
  3218. */
  3219. erts_aint_t iallctr;
  3220. #ifdef ERTS_ALC_CPOOL_DEBUG
  3221. Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr);
  3222. ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(first_blk));
  3223. ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, first_blk));
  3224. ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(first_blk));
  3225. ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, first_blk));
  3226. ERTS_ALC_CPOOL_ASSERT((erts_atomic_read_nob(&crr->allctr)
  3227. & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
  3228. == (erts_aint_t) allctr);
  3229. #endif
  3230. iallctr = (erts_aint_t)orig_allctr | ERTS_CRR_ALCTR_FLG_HOMECOMING;
  3231. if (!(erts_atomic_xchg_nob(&crr->allctr, iallctr)
  3232. & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
  3233. enqueue_homecoming(allctr, crr);
  3234. }
  3235. }
  3236. }
  3237. static void dealloc_my_carrier(Allctr_t *allctr, Carrier_t *crr)
  3238. {
  3239. Block_t *blk;
  3240. int check_pending_dealloc;
  3241. erts_aint_t max_size;
  3242. ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
  3243. if (is_abandoned(crr)) {
  3244. unlink_abandoned_carrier(crr);
  3245. crr->cpool.state = ERTS_MBC_IS_HOME;
  3246. }
  3247. if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
  3248. || erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
  3249. dealloc_mbc(allctr, crr);
  3250. return;
  3251. }
  3252. blk = MBC_TO_FIRST_BLK(allctr, crr);
  3253. ASSERT(IS_FREE_LAST_MBC_BLK(blk));
  3254. max_size = (erts_aint_t) MBC_FBLK_SZ(blk);
  3255. erts_atomic_set_nob(&crr->cpool.max_size, max_size);
  3256. crr->next = NULL;
  3257. crr->prev = allctr->cpool.dc_list.last;
  3258. if (allctr->cpool.dc_list.last) {
  3259. check_pending_dealloc = 1;
  3260. allctr->cpool.dc_list.last->next = crr;
  3261. }
  3262. else {
  3263. check_pending_dealloc = 0;
  3264. allctr->cpool.dc_list.first = crr;
  3265. }
  3266. allctr->cpool.dc_list.last = crr;
  3267. if (check_pending_dealloc)
  3268. check_pending_dealloc_carrier(allctr, NULL, NULL, NULL);
  3269. erts_alloc_ensure_handle_delayed_dealloc_call(allctr->ix);
  3270. }
  3271. static ERTS_INLINE void
  3272. cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
  3273. {
  3274. crr->cpool.homecoming_dd.blk.bhdr = HOMECOMING_MBC_BLK_HDR;
  3275. erts_atomic_init_nob(&crr->cpool.next, ERTS_AINT_NULL);
  3276. erts_atomic_init_nob(&crr->cpool.prev, ERTS_AINT_NULL);
  3277. crr->cpool.orig_allctr = allctr;
  3278. crr->cpool.thr_prgr = ERTS_THR_PRGR_INVALID;
  3279. erts_atomic_init_nob(&crr->cpool.max_size, 0);
  3280. sys_memset(&crr->cpool.blocks_size, 0, sizeof(crr->cpool.blocks_size));
  3281. sys_memset(&crr->cpool.blocks, 0, sizeof(crr->cpool.blocks));
  3282. crr->cpool.total_blocks_size = 0;
  3283. if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  3284. crr->cpool.abandon_limit = 0;
  3285. else {
  3286. UWord csz = CARRIER_SZ(crr);
  3287. UWord limit = csz*allctr->cpool.util_limit;
  3288. if (limit > csz)
  3289. limit /= 100;
  3290. else
  3291. limit = (csz/100)*allctr->cpool.util_limit;
  3292. crr->cpool.abandon_limit = limit;
  3293. }
  3294. crr->cpool.state = ERTS_MBC_IS_HOME;
  3295. }
  3296. static UWord
  3297. allctr_abandon_limit(Allctr_t *allctr)
  3298. {
  3299. UWord limit;
  3300. UWord csz;
  3301. csz = allctr->mbcs.curr.norm.mseg.size;
  3302. csz += allctr->mbcs.curr.norm.sys_alloc.size;
  3303. limit = csz*allctr->cpool.util_limit;
  3304. if (limit > csz)
  3305. limit /= 100;
  3306. else
  3307. limit = (csz/100)*allctr->cpool.util_limit;
  3308. return limit;
  3309. }
  3310. static void ERTS_INLINE
  3311. set_new_allctr_abandon_limit(Allctr_t *allctr)
  3312. {
  3313. allctr->cpool.abandon_limit = allctr_abandon_limit(allctr);
  3314. }
  3315. static void
  3316. abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
  3317. {
  3318. erts_aint_t iallctr;
  3319. STAT_MBC_ABANDON(allctr, crr);
  3320. unlink_carrier(&allctr->mbc_list, crr);
  3321. allctr->remove_mbc(allctr, crr);
  3322. /* Mark our free blocks as unused and reclaimable to the OS. */
  3323. carrier_mem_discard_free_blocks(allctr, crr);
  3324. cpool_insert(allctr, crr);
  3325. iallctr = erts_atomic_read_nob(&crr->allctr);
  3326. if (allctr == crr->cpool.orig_allctr) {
  3327. /* preserve HOMECOMING flag */
  3328. ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr);
  3329. erts_atomic_set_wb(&crr->allctr, iallctr | ERTS_CRR_ALCTR_FLG_IN_POOL);
  3330. poolify_my_carrier(allctr, crr);
  3331. }
  3332. else {
  3333. ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr);
  3334. iallctr = ((erts_aint_t)crr->cpool.orig_allctr |
  3335. ERTS_CRR_ALCTR_FLG_HOMECOMING |
  3336. ERTS_CRR_ALCTR_FLG_IN_POOL);
  3337. if (!(erts_atomic_xchg_wb(&crr->allctr, iallctr)
  3338. & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
  3339. enqueue_homecoming(allctr, crr);
  3340. }
  3341. }
  3342. }
  3343. static void
  3344. enqueue_homecoming(Allctr_t* allctr, Carrier_t* crr)
  3345. {
  3346. Allctr_t* orig_allctr = crr->cpool.orig_allctr;
  3347. const int cinit = orig_allctr->dd.ix - allctr->dd.ix;
  3348. Block_t* dd_blk = &crr->cpool.homecoming_dd.blk;
  3349. /*
  3350. * The receiver will recognize this as a carrier
  3351. * (and not a block which is the common case)
  3352. * since the block header is HOMECOMING_MBC_BLK_HDR.
  3353. */
  3354. ASSERT(dd_blk->bhdr == HOMECOMING_MBC_BLK_HDR);
  3355. if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(dd_blk), cinit))
  3356. erts_alloc_notify_delayed_dealloc(orig_allctr->ix);
  3357. }
  3358. static void
  3359. poolify_my_carrier(Allctr_t *allctr, Carrier_t *crr)
  3360. {
  3361. ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
  3362. crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size);
  3363. aoff_add_pooled_mbc(allctr, crr);
  3364. crr->cpool.state = ERTS_MBC_WAS_POOLED;
  3365. }
  3366. static void
  3367. cpool_read_stat(Allctr_t *allctr, int alloc_no,
  3368. UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp)
  3369. {
  3370. int i;
  3371. UWord noc = 0, csz = 0, nob = 0, bsz = 0;
  3372. /*
  3373. * We try to get consistent values, but after
  3374. * ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS failed
  3375. * tries we give up and present what we got...
  3376. */
  3377. for (i = 0; i <= ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS; i++) {
  3378. UWord tnoc, tcsz, tnob, tbsz;
  3379. tnoc = (UWord) (nocp
  3380. ? erts_atomic_read_nob(&allctr->cpool.stat.no_carriers)
  3381. : 0);
  3382. tcsz = (UWord) (cszp
  3383. ? erts_atomic_read_nob(&allctr->cpool.stat.carriers_size)
  3384. : 0);
  3385. tnob = (UWord) (nobp
  3386. ? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks[alloc_no])
  3387. : 0);
  3388. tbsz = (UWord) (bszp
  3389. ? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size[alloc_no])
  3390. : 0);
  3391. if (tnoc == noc && tcsz == csz && tnob == nob && tbsz == bsz)
  3392. break;
  3393. noc = tnoc;
  3394. csz = tcsz;
  3395. nob = tnob;
  3396. bsz = tbsz;
  3397. ERTS_THR_READ_MEMORY_BARRIER;
  3398. }
  3399. if (nocp)
  3400. *nocp = noc;
  3401. if (cszp)
  3402. *cszp = csz;
  3403. if (nobp)
  3404. *nobp = nob;
  3405. if (bszp)
  3406. *bszp = bsz;
  3407. }
  3408. #ifdef DEBUG
  3409. #if ERTS_SA_MB_CARRIERS
  3410. #define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % ERTS_SACRR_UNIT_SZ == 0)
  3411. #else
  3412. #define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ)
  3413. #endif
  3414. static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C,
  3415. UWord CSZ, Block_t* B, UWord BSZ)
  3416. {
  3417. ASSERT(IS_LAST_BLK((B)));
  3418. ASSERT((CSZ) == CARRIER_SZ((C)));
  3419. ASSERT((BSZ) % sizeof(Unit_t) == 0);
  3420. if ((SBC)) {
  3421. ASSERT((BSZ) == SBC_BLK_SZ((B)));
  3422. ASSERT((char*)B == (char*)C + SBC_HEADER_SIZE);
  3423. ASSERT(IS_SBC_BLK((B)));
  3424. ASSERT(IS_SB_CARRIER((C)));
  3425. }
  3426. else {
  3427. ASSERT(IS_FREE_BLK(B));
  3428. ASSERT((BSZ) == MBC_FBLK_SZ((B)));
  3429. ASSERT(IS_MBC_FIRST_FBLK(A, (B)));
  3430. ASSERT(IS_MBC_BLK((B)));
  3431. ASSERT(IS_MB_CARRIER((C)));
  3432. ASSERT(FBLK_TO_MBC(B) == (C));
  3433. if ((MSEGED)) {
  3434. ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE((CSZ));
  3435. }
  3436. }
  3437. if ((MSEGED)) {
  3438. ASSERT(IS_MSEG_CARRIER((C)));
  3439. }
  3440. else {
  3441. ASSERT(IS_SYS_ALLOC_CARRIER((C)));
  3442. ASSERT((CSZ) % sizeof(Unit_t) == 0);
  3443. }
  3444. }
  3445. #else
  3446. #define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ)
  3447. #endif
  3448. static Block_t *
  3449. create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
  3450. {
  3451. Block_t *blk;
  3452. Carrier_t *crr;
  3453. Uint blk_sz, bcrr_sz, crr_sz;
  3454. #if HAVE_ERTS_MSEG
  3455. int have_tried_sys_alloc = 0, have_tried_mseg = 0;
  3456. Uint mseg_flags;
  3457. #endif
  3458. #ifdef DEBUG
  3459. int is_mseg = 0;
  3460. #endif
  3461. if ((ERTS_SUPER_ALIGNED_MSEG_ONLY && (flags & CFLG_MBC))
  3462. || !allow_sys_alloc_carriers) {
  3463. flags |= CFLG_FORCE_MSEG;
  3464. flags &= ~CFLG_FORCE_SYS_ALLOC;
  3465. #if !HAVE_ERTS_MSEG
  3466. return NULL;
  3467. #endif
  3468. }
  3469. flags |= allctr->crr_set_flgs;
  3470. flags &= ~allctr->crr_clr_flgs;
  3471. ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC))
  3472. || (flags & CFLG_MBC && !(flags & CFLG_SBC)));
  3473. ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC));
  3474. if (umem_sz > (ERTS_UINT_MAX - ERTS_UINT_MAX/100)) {
  3475. /* Do an overly conservative _overflow_ check here so we don't
  3476. * have to deal with it from here on. I guess we could be more accurate
  3477. * but I don't think the need to allocate over 99% of the address space
  3478. * will ever arise on any machine, neither 32 nor 64 bit.
  3479. */
  3480. return NULL;
  3481. }
  3482. if (flags & CFLG_MAIN_CARRIER) {
  3483. ASSERT(flags & CFLG_MBC);
  3484. ASSERT(flags & CFLG_NO_CPOOL);
  3485. ASSERT(umem_sz == allctr->main_carrier_size);
  3486. ERTS_UNDEF(blk_sz, 0);
  3487. if (allctr->main_carrier_size < allctr->min_mbc_size)
  3488. allctr->main_carrier_size = allctr->min_mbc_size;
  3489. crr_sz = bcrr_sz = allctr->main_carrier_size;
  3490. }
  3491. else {
  3492. ERTS_UNDEF(bcrr_sz, 0);
  3493. ERTS_UNDEF(crr_sz, 0);
  3494. blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
  3495. }
  3496. allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON;
  3497. if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC
  3498. && ERTS_ALC_IS_CPOOL_ENABLED(allctr)
  3499. && erts_thr_progress_is_managed_thread()) {
  3500. crr = cpool_fetch(allctr, blk_sz);
  3501. if (crr) {
  3502. STAT_MBC_CPOOL_FETCH(allctr, crr);
  3503. INC_CC(allctr->cpool.stat.fetch);
  3504. link_carrier(&allctr->mbc_list, crr);
  3505. (*allctr->add_mbc)(allctr, crr);
  3506. blk = (*allctr->get_free_block)(allctr, blk_sz, NULL, 0);
  3507. ASSERT(blk);
  3508. return blk;
  3509. }
  3510. }
  3511. #if HAVE_ERTS_MSEG
  3512. if (flags & CFLG_FORCE_SYS_ALLOC)
  3513. goto try_sys_alloc;
  3514. if (flags & CFLG_FORCE_MSEG)
  3515. goto try_mseg;
  3516. if (erts_mseg_no(&allctr->mseg_opt) >= max_mseg_carriers)
  3517. goto try_sys_alloc;
  3518. if (flags & CFLG_SBC) {
  3519. if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs)
  3520. goto try_sys_alloc;
  3521. }
  3522. #if !ERTS_SUPER_ALIGNED_MSEG_ONLY
  3523. else {
  3524. if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs)
  3525. goto try_sys_alloc;
  3526. }
  3527. #endif
  3528. try_mseg:
  3529. if (flags & CFLG_SBC) {
  3530. crr_sz = blk_sz + SBC_HEADER_SIZE;
  3531. mseg_flags = ERTS_MSEG_FLG_NONE;
  3532. }
  3533. else {
  3534. if (!(flags & CFLG_MAIN_CARRIER)) {
  3535. crr_sz = (*allctr->get_next_mbc_size)(allctr);
  3536. if (crr_sz < MBC_HEADER_SIZE(allctr) + blk_sz)
  3537. crr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
  3538. }
  3539. mseg_flags = ERTS_MSEG_FLG_2POW;
  3540. }
  3541. crr = (Carrier_t *) allctr->mseg_alloc(allctr, &crr_sz, mseg_flags);
  3542. if (!crr) {
  3543. have_tried_mseg = 1;
  3544. if (!(have_tried_sys_alloc || flags & CFLG_FORCE_MSEG))
  3545. goto try_sys_alloc;
  3546. return NULL;
  3547. }
  3548. #ifdef DEBUG
  3549. is_mseg = 1;
  3550. #endif
  3551. if (flags & CFLG_SBC) {
  3552. SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC, allctr);
  3553. STAT_MSEG_SBC_ALLOC(allctr, crr_sz, blk_sz);
  3554. goto sbc_final_touch;
  3555. }
  3556. else {
  3557. #ifndef ARCH_64
  3558. ASSERT(crr_sz <= MBC_SZ_MAX_LIMIT);
  3559. #endif
  3560. SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC, allctr);
  3561. STAT_MSEG_MBC_ALLOC(allctr, crr_sz);
  3562. goto mbc_final_touch;
  3563. }
  3564. try_sys_alloc:
  3565. #endif /* #if HAVE_ERTS_MSEG */
  3566. if (flags & CFLG_SBC) {
  3567. bcrr_sz = blk_sz + SBC_HEADER_SIZE;
  3568. }
  3569. else if (!(flags & CFLG_MAIN_CARRIER)) {
  3570. bcrr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
  3571. if (bcrr_sz < allctr->smallest_mbc_size)
  3572. bcrr_sz = allctr->smallest_mbc_size;
  3573. }
  3574. crr_sz = (flags & CFLG_FORCE_SIZE
  3575. ? UNIT_CEILING(bcrr_sz)
  3576. : SYS_ALLOC_CARRIER_CEILING(bcrr_sz));
  3577. crr = (Carrier_t *) allctr->sys_alloc(allctr, &crr_sz, flags & CFLG_MBC);
  3578. if (!crr) {
  3579. if (crr_sz > UNIT_CEILING(bcrr_sz)) {
  3580. crr_sz = UNIT_CEILING(bcrr_sz);
  3581. crr = (Carrier_t *) allctr->sys_alloc(allctr, &crr_sz, flags & CFLG_MBC);
  3582. }
  3583. if (!crr) {
  3584. #if HAVE_ERTS_MSEG
  3585. have_tried_sys_alloc = 1;
  3586. if (!(have_tried_mseg || flags & CFLG_FORCE_SYS_ALLOC))
  3587. goto try_mseg;
  3588. #endif
  3589. return NULL;
  3590. }
  3591. }
  3592. if (flags & CFLG_SBC) {
  3593. SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC, allctr);
  3594. STAT_SYS_ALLOC_SBC_ALLOC(allctr, crr_sz, blk_sz);
  3595. #if HAVE_ERTS_MSEG
  3596. sbc_final_touch:
  3597. #endif
  3598. blk = SBC2BLK(allctr, crr);
  3599. SET_SBC_BLK_HDR(blk, blk_sz);
  3600. link_carrier(&allctr->sbc_list, crr);
  3601. CHECK_1BLK_CARRIER(allctr, 1, is_mseg, crr, crr_sz, blk, blk_sz);
  3602. }
  3603. else {
  3604. SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr);
  3605. STAT_SYS_ALLOC_MBC_ALLOC(allctr, crr_sz);
  3606. #if HAVE_ERTS_MSEG
  3607. mbc_final_touch:
  3608. #endif
  3609. set_new_allctr_abandon_limit(allctr);
  3610. blk = MBC_TO_FIRST_BLK(allctr, crr);
  3611. blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr));
  3612. SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr);
  3613. if (flags & CFLG_MAIN_CARRIER) {
  3614. ASSERT(!allctr->main_carrier);
  3615. allctr->main_carrier = crr;
  3616. }
  3617. cpool_init_carrier_data(allctr, crr);
  3618. link_carrier(&allctr->mbc_list, crr);
  3619. CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz);
  3620. if (allctr->creating_mbc)
  3621. (*allctr->creating_mbc)(allctr, crr);
  3622. }
  3623. #ifdef USE_LTTNG_VM_TRACEPOINTS
  3624. if (LTTNG_ENABLED(carrier_create)) {
  3625. lttng_decl_carrier_stats(mbc_stats);
  3626. lttng_decl_carrier_stats(sbc_stats);
  3627. LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->mbcs), mbc_stats);
  3628. LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->sbcs), sbc_stats);
  3629. LTTNG5(carrier_create,
  3630. ERTS_ALC_A2AD(allctr->alloc_no),
  3631. allctr->ix,
  3632. crr_sz,
  3633. mbc_stats,
  3634. sbc_stats);
  3635. }
  3636. #endif
  3637. DEBUG_SAVE_ALIGNMENT(crr);
  3638. return blk;
  3639. }
  3640. static Block_t *
  3641. resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
  3642. {
  3643. Block_t *new_blk;
  3644. Carrier_t *new_crr, *old_crr;
  3645. UWord create_flags;
  3646. Uint old_crr_sz, old_blk_sz, new_blk_sz, new_crr_sz;
  3647. Uint new_bcrr_sz;
  3648. if (flags & CFLG_MBC) {
  3649. ASSERT(0);
  3650. return NULL;
  3651. }
  3652. ASSERT(flags & CFLG_SBC);
  3653. create_flags = flags|CFLG_SBC;
  3654. HARD_CHECK_BLK_CARRIER(allctr, old_blk);
  3655. old_blk_sz = SBC_BLK_SZ(old_blk);
  3656. old_crr = BLK_TO_SBC(old_blk);
  3657. old_crr_sz = CARRIER_SZ(old_crr);
  3658. ASSERT(IS_SB_CARRIER(old_crr));
  3659. ASSERT(IS_SBC_BLK(old_blk));
  3660. new_blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
  3661. #if HAVE_ERTS_MSEG
  3662. if (IS_MSEG_CARRIER(old_crr)) {
  3663. STAT_MSEG_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
  3664. if (!(flags & CFLG_FORCE_SYS_ALLOC)) {
  3665. new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
  3666. new_crr_sz = ERTS_SACRR_UNIT_CEILING(new_crr_sz);
  3667. new_crr = (Carrier_t *) allctr->mseg_realloc(allctr,
  3668. old_crr,
  3669. old_crr_sz,
  3670. &new_crr_sz);
  3671. if (new_crr) {
  3672. SET_CARRIER_SZ(new_crr, new_crr_sz);
  3673. new_blk = SBC2BLK(allctr, new_crr);
  3674. SET_SBC_BLK_SZ(new_blk, new_blk_sz);
  3675. STAT_MSEG_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
  3676. relink_carrier(&allctr->sbc_list, new_crr);
  3677. CHECK_1BLK_CARRIER(allctr, 1, 1, new_crr, new_crr_sz,
  3678. new_blk, new_blk_sz);
  3679. DEBUG_SAVE_ALIGNMENT(new_crr);
  3680. return new_blk;
  3681. }
  3682. create_flags |= CFLG_FORCE_SYS_ALLOC; /* since mseg_realloc()
  3683. failed */
  3684. }
  3685. new_blk = create_carrier(allctr, umem_sz, create_flags);
  3686. if (new_blk) {
  3687. sys_memcpy((void *) BLK2UMEM(new_blk),
  3688. (void *) BLK2UMEM(old_blk),
  3689. MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
  3690. unlink_carrier(&allctr->sbc_list, old_crr);
  3691. allctr->mseg_dealloc(allctr, old_crr, old_crr_sz, ERTS_MSEG_FLG_NONE);
  3692. }
  3693. else {
  3694. /* Old carrier unchanged; restore stat */
  3695. STAT_MSEG_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
  3696. }
  3697. return new_blk;
  3698. }
  3699. else {
  3700. if (!(flags & CFLG_FORCE_MSEG)) {
  3701. #endif /* #if HAVE_ERTS_MSEG */
  3702. new_bcrr_sz = new_blk_sz + SBC_HEADER_SIZE;
  3703. new_crr_sz = (flags & CFLG_FORCE_SIZE
  3704. ? UNIT_CEILING(new_bcrr_sz)
  3705. : SYS_ALLOC_CARRIER_CEILING(new_bcrr_sz));
  3706. new_crr = (Carrier_t *) allctr->sys_realloc(allctr,
  3707. (void *) old_crr,
  3708. &new_crr_sz,
  3709. old_crr_sz,
  3710. 0);
  3711. if (new_crr) {
  3712. sys_realloc_success:
  3713. SET_CARRIER_SZ(new_crr, new_crr_sz);
  3714. new_blk = SBC2BLK(allctr, new_crr);
  3715. SET_SBC_BLK_SZ(new_blk, new_blk_sz);
  3716. STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
  3717. STAT_SYS_ALLOC_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
  3718. relink_carrier(&allctr->sbc_list, new_crr);
  3719. CHECK_1BLK_CARRIER(allctr, 1, 0, new_crr, new_crr_sz,
  3720. new_blk, new_blk_sz);
  3721. DEBUG_SAVE_ALIGNMENT(new_crr);
  3722. return new_blk;
  3723. }
  3724. else if (new_crr_sz > UNIT_CEILING(new_bcrr_sz)) {
  3725. new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
  3726. new_crr_sz = UNIT_CEILING(new_crr_sz);
  3727. new_crr = (Carrier_t *) allctr->sys_realloc(allctr,
  3728. (void *) old_crr,
  3729. &new_crr_sz,
  3730. old_crr_sz,
  3731. 0);
  3732. if (new_crr)
  3733. goto sys_realloc_success;
  3734. }
  3735. #if !HAVE_ERTS_MSEG
  3736. return NULL;
  3737. #else
  3738. create_flags |= CFLG_FORCE_MSEG; /* Since sys_realloc() failed */
  3739. }
  3740. STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
  3741. new_blk = create_carrier(allctr, umem_sz, create_flags);
  3742. if (new_blk) {
  3743. sys_memcpy((void *) BLK2UMEM(new_blk),
  3744. (void *) BLK2UMEM(old_blk),
  3745. MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
  3746. unlink_carrier(&allctr->sbc_list, old_crr);
  3747. allctr->sys_dealloc(allctr, old_crr, CARRIER_SZ(old_crr), 0);
  3748. }
  3749. else {
  3750. /* Old carrier unchanged; restore... */
  3751. STAT_SYS_ALLOC_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
  3752. }
  3753. return new_blk;
  3754. }
  3755. #endif
  3756. }
  3757. static void
  3758. dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned)
  3759. {
  3760. #if HAVE_ERTS_MSEG
  3761. if (IS_MSEG_CARRIER(crr))
  3762. allctr->mseg_dealloc(allctr, crr, CARRIER_SZ(crr),
  3763. (superaligned
  3764. ? ERTS_MSEG_FLG_2POW
  3765. : ERTS_MSEG_FLG_NONE));
  3766. else
  3767. #endif
  3768. allctr->sys_dealloc(allctr, crr, CARRIER_SZ(crr), superaligned);
  3769. }
  3770. static void
  3771. destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
  3772. {
  3773. Uint crr_sz;
  3774. Carrier_t *crr;
  3775. if (IS_SBC_BLK(blk)) {
  3776. Uint blk_sz = SBC_BLK_SZ(blk);
  3777. crr = BLK_TO_SBC(blk);
  3778. crr_sz = CARRIER_SZ(crr);
  3779. ASSERT(IS_LAST_BLK(blk));
  3780. HARD_CHECK_BLK_CARRIER(allctr, blk);
  3781. #if HAVE_ERTS_MSEG
  3782. if (IS_MSEG_CARRIER(crr)) {
  3783. STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz);
  3784. }
  3785. else
  3786. #endif
  3787. STAT_SYS_ALLOC_SBC_FREE(allctr, crr_sz, blk_sz);
  3788. unlink_carrier(&allctr->sbc_list, crr);
  3789. dealloc_carrier(allctr, crr, 0);
  3790. }
  3791. else {
  3792. ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
  3793. crr = FIRST_BLK_TO_MBC(allctr, blk);
  3794. #ifdef DEBUG
  3795. if (!allctr->stopped) {
  3796. ASSERT(IS_LAST_BLK(blk));
  3797. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  3798. (*allctr->link_free_block)(allctr, blk);
  3799. HARD_CHECK_BLK_CARRIER(allctr, blk);
  3800. (*allctr->unlink_free_block)(allctr, blk);
  3801. #endif
  3802. }
  3803. #endif
  3804. if (busy_pcrr_pp && *busy_pcrr_pp) {
  3805. erts_aint_t iallctr = erts_atomic_read_nob(&crr->allctr);
  3806. ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
  3807. ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
  3808. == (((erts_aint_t) allctr)
  3809. | ERTS_CRR_ALCTR_FLG_IN_POOL
  3810. | ERTS_CRR_ALCTR_FLG_BUSY));
  3811. ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
  3812. *busy_pcrr_pp = NULL;
  3813. erts_atomic_set_nob(&crr->allctr,
  3814. (iallctr & ~(ERTS_CRR_ALCTR_FLG_IN_POOL |
  3815. ERTS_CRR_ALCTR_FLG_BUSY)));
  3816. cpool_delete(allctr, allctr, crr);
  3817. }
  3818. else
  3819. {
  3820. unlink_carrier(&allctr->mbc_list, crr);
  3821. STAT_MBC_FREE(allctr, crr);
  3822. if (allctr->remove_mbc)
  3823. allctr->remove_mbc(allctr, crr);
  3824. }
  3825. #ifdef USE_LTTNG_VM_TRACEPOINTS
  3826. if (LTTNG_ENABLED(carrier_destroy)) {
  3827. lttng_decl_carrier_stats(mbc_stats);
  3828. lttng_decl_carrier_stats(sbc_stats);
  3829. LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->mbcs), mbc_stats);
  3830. LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->sbcs), sbc_stats);
  3831. LTTNG5(carrier_destroy,
  3832. ERTS_ALC_A2AD(allctr->alloc_no),
  3833. allctr->ix,
  3834. CARRIER_SZ(crr),
  3835. mbc_stats,
  3836. sbc_stats);
  3837. }
  3838. #endif
  3839. schedule_dealloc_carrier(allctr, crr);
  3840. }
  3841. }
  3842. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  3843. * Info stuff *
  3844. \* */
  3845. static struct {
  3846. Eterm versions;
  3847. Eterm options;
  3848. Eterm e;
  3849. Eterm t;
  3850. Eterm ramv;
  3851. Eterm atags;
  3852. #if HAVE_ERTS_MSEG
  3853. Eterm asbcst;
  3854. Eterm rsbcst;
  3855. #endif
  3856. Eterm rsbcmt;
  3857. Eterm rmbcmt;
  3858. Eterm mmbcs;
  3859. Eterm msbclt;
  3860. #if HAVE_ERTS_MSEG
  3861. Eterm mmsbc;
  3862. Eterm mmmbc;
  3863. #endif
  3864. Eterm lmbcs;
  3865. Eterm smbcs;
  3866. Eterm mbcgs;
  3867. Eterm acul;
  3868. Eterm acnl;
  3869. Eterm acfml;
  3870. #if HAVE_ERTS_MSEG
  3871. Eterm mmc;
  3872. #endif
  3873. Eterm ycs;
  3874. Eterm sac;
  3875. Eterm fix_types;
  3876. Eterm mbcs;
  3877. Eterm mbcs_pool;
  3878. Eterm fetch;
  3879. Eterm fail_pooled;
  3880. Eterm fail_shared;
  3881. Eterm fail_pend_dealloc;
  3882. Eterm fail;
  3883. Eterm skip_size;
  3884. Eterm skip_busy;
  3885. Eterm skip_not_pooled;
  3886. Eterm skip_homecoming;
  3887. Eterm skip_race;
  3888. Eterm entrance_removed;
  3889. Eterm sbcs;
  3890. Eterm sys_alloc_carriers_size;
  3891. #if HAVE_ERTS_MSEG
  3892. Eterm mseg_alloc_carriers_size;
  3893. #endif
  3894. Eterm carriers_size;
  3895. Eterm sys_alloc_carriers;
  3896. #if HAVE_ERTS_MSEG
  3897. Eterm mseg_alloc_carriers;
  3898. #endif
  3899. Eterm carriers;
  3900. Eterm blocks_size;
  3901. Eterm blocks;
  3902. Eterm foreign_blocks;
  3903. Eterm calls;
  3904. Eterm sys_alloc;
  3905. Eterm sys_free;
  3906. Eterm sys_realloc;
  3907. #if HAVE_ERTS_MSEG
  3908. Eterm mseg_alloc;
  3909. Eterm mseg_dealloc;
  3910. Eterm mseg_realloc;
  3911. #endif
  3912. #ifdef DEBUG
  3913. Eterm end_of_atoms;
  3914. #endif
  3915. } am;
  3916. static Eterm alloc_type_atoms[ERTS_ALC_N_MAX + 1];
  3917. static ERTS_INLINE void atom_init(Eterm *atom, char *name)
  3918. {
  3919. *atom = am_atom_put(name, sys_strlen(name));
  3920. }
  3921. #define AM_INIT(AM) atom_init(&am.AM, #AM)
  3922. static erts_mtx_t init_atoms_mtx;
  3923. static void
  3924. init_atoms(Allctr_t *allctr)
  3925. {
  3926. erts_mtx_lock(&init_atoms_mtx);
  3927. if (!atoms_initialized) {
  3928. int ix;
  3929. #ifdef DEBUG
  3930. Eterm *atom;
  3931. for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
  3932. *atom = THE_NON_VALUE;
  3933. }
  3934. #endif
  3935. AM_INIT(versions);
  3936. AM_INIT(options);
  3937. AM_INIT(e);
  3938. AM_INIT(t);
  3939. AM_INIT(ramv);
  3940. AM_INIT(atags);
  3941. #if HAVE_ERTS_MSEG
  3942. AM_INIT(asbcst);
  3943. AM_INIT(rsbcst);
  3944. #endif
  3945. AM_INIT(rsbcmt);
  3946. AM_INIT(rmbcmt);
  3947. AM_INIT(mmbcs);
  3948. AM_INIT(msbclt);
  3949. #if HAVE_ERTS_MSEG
  3950. AM_INIT(mmsbc);
  3951. AM_INIT(mmmbc);
  3952. #endif
  3953. AM_INIT(lmbcs);
  3954. AM_INIT(smbcs);
  3955. AM_INIT(mbcgs);
  3956. AM_INIT(acul);
  3957. AM_INIT(acnl);
  3958. AM_INIT(acfml);
  3959. #if HAVE_ERTS_MSEG
  3960. AM_INIT(mmc);
  3961. #endif
  3962. AM_INIT(ycs);
  3963. AM_INIT(sac);
  3964. AM_INIT(fix_types);
  3965. AM_INIT(mbcs);
  3966. AM_INIT(mbcs_pool);
  3967. AM_INIT(fetch);
  3968. AM_INIT(fail_pooled);
  3969. AM_INIT(fail_shared);
  3970. AM_INIT(fail_pend_dealloc);
  3971. AM_INIT(fail);
  3972. AM_INIT(skip_size);
  3973. AM_INIT(skip_busy);
  3974. AM_INIT(skip_not_pooled);
  3975. AM_INIT(skip_homecoming);
  3976. AM_INIT(skip_race);
  3977. AM_INIT(entrance_removed);
  3978. AM_INIT(sbcs);
  3979. AM_INIT(sys_alloc_carriers_size);
  3980. #if HAVE_ERTS_MSEG
  3981. AM_INIT(mseg_alloc_carriers_size);
  3982. #endif
  3983. AM_INIT(carriers_size);
  3984. AM_INIT(sys_alloc_carriers);
  3985. #if HAVE_ERTS_MSEG
  3986. AM_INIT(mseg_alloc_carriers);
  3987. #endif
  3988. AM_INIT(carriers);
  3989. AM_INIT(blocks_size);
  3990. AM_INIT(blocks);
  3991. AM_INIT(foreign_blocks);
  3992. AM_INIT(calls);
  3993. AM_INIT(sys_alloc);
  3994. AM_INIT(sys_free);
  3995. AM_INIT(sys_realloc);
  3996. #if HAVE_ERTS_MSEG
  3997. AM_INIT(mseg_alloc);
  3998. AM_INIT(mseg_dealloc);
  3999. AM_INIT(mseg_realloc);
  4000. #endif
  4001. #ifdef DEBUG
  4002. for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
  4003. ASSERT(*atom != THE_NON_VALUE);
  4004. }
  4005. #endif
  4006. for (ix = ERTS_ALC_N_MIN; ix <= ERTS_ALC_N_MAX; ix++) {
  4007. const char *name = ERTS_ALC_N2TD(ix);
  4008. size_t len = sys_strlen(name);
  4009. alloc_type_atoms[ix] = am_atom_put(name, len);
  4010. }
  4011. }
  4012. if (allctr && !allctr->atoms_initialized) {
  4013. make_name_atoms(allctr);
  4014. (*allctr->init_atoms)();
  4015. allctr->atoms_initialized = 1;
  4016. }
  4017. atoms_initialized = 1;
  4018. erts_mtx_unlock(&init_atoms_mtx);
  4019. }
  4020. static ERTS_INLINE void
  4021. ensure_atoms_initialized(Allctr_t *allctr)
  4022. {
  4023. if (!allctr || !allctr->atoms_initialized)
  4024. init_atoms(allctr);
  4025. }
  4026. #define bld_uint erts_bld_uint
  4027. #define bld_cons erts_bld_cons
  4028. #define bld_tuple erts_bld_tuple
  4029. #define bld_string erts_bld_string
  4030. /*
  4031. * bld_unstable_uint() (instead bld_uint()) is used when values may
  4032. * change between size check and actual build. This because a value
  4033. * that would fit a small when size check is done may need to be built
  4034. * as a big when the actual build is performed. Caller is required to
  4035. * HRelease after build.
  4036. *
  4037. * Note, bld_unstable_uint() should have been called bld_unstable_uword()
  4038. * but we do not want to rename it...
  4039. */
  4040. static ERTS_INLINE Eterm
  4041. bld_unstable_uint(Uint **hpp, Uint *szp, UWord ui)
  4042. {
  4043. Eterm res = THE_NON_VALUE;
  4044. if (szp)
  4045. *szp += BIG_UWORD_HEAP_SIZE(~((UWord) 0));
  4046. if (hpp) {
  4047. if (IS_USMALL(0, ui))
  4048. res = make_small(ui);
  4049. else {
  4050. res = uword_to_big(ui, *hpp);
  4051. *hpp += BIG_UWORD_HEAP_SIZE(ui);
  4052. }
  4053. }
  4054. return res;
  4055. }
  4056. static ERTS_INLINE void
  4057. add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
  4058. {
  4059. *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
  4060. }
  4061. static ERTS_INLINE void
  4062. add_3tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2, Eterm el3)
  4063. {
  4064. *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 3, el1, el2, el3), *lp);
  4065. }
  4066. static ERTS_INLINE void
  4067. add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
  4068. Eterm el1, Eterm el2, Eterm el3, Eterm el4)
  4069. {
  4070. *lp =
  4071. bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp);
  4072. }
  4073. static ERTS_INLINE void
  4074. add_fix_types(Allctr_t *allctr, int internal, Uint **hpp, Uint *szp,
  4075. Eterm *lp, Eterm fix)
  4076. {
  4077. if (allctr->fix) {
  4078. if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  4079. add_2tup(hpp, szp, lp, am.fix_types, fix);
  4080. else if (internal)
  4081. add_3tup(hpp, szp, lp,
  4082. am.fix_types,
  4083. erts_bld_uword(hpp, szp, ~((UWord) 0)),
  4084. fix);
  4085. }
  4086. }
  4087. static Eterm
  4088. sz_info_fix(Allctr_t *allctr,
  4089. int internal,
  4090. fmtfn_t *print_to_p,
  4091. void *print_to_arg,
  4092. Uint **hpp,
  4093. Uint *szp)
  4094. {
  4095. Eterm res;
  4096. int ix;
  4097. ASSERT(allctr->fix);
  4098. res = NIL;
  4099. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
  4100. if (internal) {
  4101. for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
  4102. ErtsAlcFixList_t *fix = &allctr->fix[ix];
  4103. UWord alloced = fix->type_size * fix->u.cpool.allocated;
  4104. UWord used = fix->type_size * fix->u.cpool.used;
  4105. if (print_to_p) {
  4106. fmtfn_t to = *print_to_p;
  4107. void *arg = print_to_arg;
  4108. erts_print(to,
  4109. arg,
  4110. "fix type internal: %s %bpu %bpu\n",
  4111. (char *) ERTS_ALC_T2TD(fix->type),
  4112. alloced,
  4113. used);
  4114. }
  4115. if (hpp || szp) {
  4116. add_3tup(hpp, szp, &res,
  4117. alloc_type_atoms[ERTS_ALC_T2N(fix->type)],
  4118. bld_unstable_uint(hpp, szp, alloced),
  4119. bld_unstable_uint(hpp, szp, used));
  4120. }
  4121. }
  4122. }
  4123. }
  4124. else {
  4125. for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
  4126. ErtsAlcFixList_t *fix = &allctr->fix[ix];
  4127. UWord alloced = fix->type_size * fix->u.nocpool.allocated;
  4128. UWord used = fix->type_size*fix->u.nocpool.used;
  4129. if (print_to_p) {
  4130. fmtfn_t to = *print_to_p;
  4131. void *arg = print_to_arg;
  4132. erts_print(to,
  4133. arg,
  4134. "fix type: %s %bpu %bpu\n",
  4135. (char *) ERTS_ALC_T2TD(fix->type),
  4136. alloced,
  4137. used);
  4138. }
  4139. if (hpp || szp) {
  4140. add_3tup(hpp, szp, &res,
  4141. alloc_type_atoms[ERTS_ALC_T2N(fix->type)],
  4142. bld_unstable_uint(hpp, szp, alloced),
  4143. bld_unstable_uint(hpp, szp, used));
  4144. }
  4145. }
  4146. }
  4147. return res;
  4148. }
  4149. static Eterm
  4150. sz_info_carriers(Allctr_t *allctr,
  4151. CarriersStats_t *cs,
  4152. char *prefix,
  4153. fmtfn_t *print_to_p,
  4154. void *print_to_arg,
  4155. Uint **hpp,
  4156. Uint *szp)
  4157. {
  4158. Eterm res = THE_NON_VALUE;
  4159. UWord curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
  4160. if (print_to_p) {
  4161. fmtfn_t to = *print_to_p;
  4162. void *arg = print_to_arg;
  4163. erts_print(to,
  4164. arg,
  4165. "%sblocks size: %bpu %bpu %bpu\n",
  4166. prefix,
  4167. cs->blocks.curr.size,
  4168. cs->blocks.max.size,
  4169. cs->blocks.max_ever.size);
  4170. erts_print(to,
  4171. arg,
  4172. "%scarriers size: %bpu %bpu %bpu\n",
  4173. prefix,
  4174. curr_size,
  4175. cs->max.size,
  4176. cs->max_ever.size);
  4177. }
  4178. if (hpp || szp) {
  4179. res = NIL;
  4180. add_4tup(hpp, szp, &res,
  4181. am.carriers_size,
  4182. bld_unstable_uint(hpp, szp, curr_size),
  4183. bld_unstable_uint(hpp, szp, cs->max.size),
  4184. bld_unstable_uint(hpp, szp, cs->max_ever.size));
  4185. add_4tup(hpp, szp, &res,
  4186. am.blocks_size,
  4187. bld_unstable_uint(hpp, szp, cs->blocks.curr.size),
  4188. bld_unstable_uint(hpp, szp, cs->blocks.max.size),
  4189. bld_unstable_uint(hpp, szp, cs->blocks.max_ever.size));
  4190. }
  4191. return res;
  4192. }
  4193. static Eterm
  4194. info_cpool(Allctr_t *allctr,
  4195. int sz_only,
  4196. char *prefix,
  4197. fmtfn_t *print_to_p,
  4198. void *print_to_arg,
  4199. Uint **hpp,
  4200. Uint *szp)
  4201. {
  4202. Eterm res = THE_NON_VALUE;
  4203. UWord noc, csz, nob, bsz;
  4204. noc = csz = nob = bsz = ~0;
  4205. if (print_to_p || hpp) {
  4206. if (sz_only)
  4207. cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz);
  4208. else
  4209. cpool_read_stat(allctr, allctr->alloc_no, &noc, &csz, &nob, &bsz);
  4210. }
  4211. if (print_to_p) {
  4212. fmtfn_t to = *print_to_p;
  4213. void *arg = print_to_arg;
  4214. if (!sz_only)
  4215. erts_print(to, arg, "%sblocks: %bpu\n", prefix, nob);
  4216. erts_print(to, arg, "%sblocks size: %bpu\n", prefix, bsz);
  4217. if (!sz_only)
  4218. erts_print(to, arg, "%scarriers: %bpu\n", prefix, noc);
  4219. erts_print(to, arg, "%scarriers size: %bpu\n", prefix, csz);
  4220. }
  4221. if (hpp || szp) {
  4222. Eterm foreign_blocks;
  4223. int i;
  4224. foreign_blocks = NIL;
  4225. res = NIL;
  4226. if (!sz_only) {
  4227. add_3tup(hpp, szp, &res, am.fail_pooled,
  4228. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pooled)),
  4229. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pooled)));
  4230. add_3tup(hpp, szp, &res, am.fail_shared,
  4231. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_shared)),
  4232. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_shared)));
  4233. add_3tup(hpp, szp, &res, am.fail_pend_dealloc,
  4234. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pend_dealloc)),
  4235. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pend_dealloc)));
  4236. add_3tup(hpp, szp, &res, am.fail,
  4237. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail)),
  4238. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail)));
  4239. add_3tup(hpp, szp, &res, am.fetch,
  4240. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fetch)),
  4241. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fetch)));
  4242. add_3tup(hpp, szp, &res, am.skip_size,
  4243. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_size)),
  4244. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_size)));
  4245. add_3tup(hpp, szp, &res, am.skip_busy,
  4246. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_busy)),
  4247. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_busy)));
  4248. add_3tup(hpp, szp, &res, am.skip_not_pooled,
  4249. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_not_pooled)),
  4250. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_not_pooled)));
  4251. add_3tup(hpp, szp, &res, am.skip_homecoming,
  4252. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_homecoming)),
  4253. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_homecoming)));
  4254. add_3tup(hpp, szp, &res, am.skip_race,
  4255. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_race)),
  4256. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_race)));
  4257. add_3tup(hpp, szp, &res, am.entrance_removed,
  4258. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.entrance_removed)),
  4259. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.entrance_removed)));
  4260. }
  4261. add_2tup(hpp, szp, &res,
  4262. am.carriers_size,
  4263. bld_unstable_uint(hpp, szp, csz));
  4264. if (!sz_only) {
  4265. add_2tup(hpp, szp, &res,
  4266. am.carriers,
  4267. bld_unstable_uint(hpp, szp, noc));
  4268. }
  4269. add_2tup(hpp, szp, &res,
  4270. am.blocks_size,
  4271. bld_unstable_uint(hpp, szp, bsz));
  4272. if (!sz_only) {
  4273. add_2tup(hpp, szp, &res,
  4274. am.blocks,
  4275. bld_unstable_uint(hpp, szp, nob));
  4276. }
  4277. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  4278. const char *name_str;
  4279. Eterm name, info;
  4280. if (i == allctr->alloc_no) {
  4281. continue;
  4282. }
  4283. cpool_read_stat(allctr, i, NULL, NULL, &nob, &bsz);
  4284. if (bsz == 0 && (nob == 0 || sz_only)) {
  4285. continue;
  4286. }
  4287. name_str = ERTS_ALC_A2AD(i);
  4288. info = NIL;
  4289. add_2tup(hpp, szp, &info,
  4290. am.blocks_size,
  4291. bld_unstable_uint(hpp, szp, bsz));
  4292. if (!sz_only) {
  4293. add_2tup(hpp, szp, &info,
  4294. am.blocks,
  4295. bld_unstable_uint(hpp, szp, nob));
  4296. }
  4297. name = am_atom_put(name_str, sys_strlen(name_str));
  4298. add_2tup(hpp, szp, &foreign_blocks, name, info);
  4299. }
  4300. add_2tup(hpp, szp, &res, am.foreign_blocks, foreign_blocks);
  4301. }
  4302. return res;
  4303. }
  4304. static Eterm
  4305. info_carriers(Allctr_t *allctr,
  4306. CarriersStats_t *cs,
  4307. char *prefix,
  4308. fmtfn_t *print_to_p,
  4309. void *print_to_arg,
  4310. Uint **hpp,
  4311. Uint *szp)
  4312. {
  4313. Eterm res = THE_NON_VALUE;
  4314. UWord curr_no, curr_size;
  4315. curr_no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no;
  4316. curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
  4317. if (print_to_p) {
  4318. fmtfn_t to = *print_to_p;
  4319. void *arg = print_to_arg;
  4320. erts_print(to,
  4321. arg,
  4322. "%sblocks: %bpu %bpu %bpu\n",
  4323. prefix,
  4324. cs->blocks.curr.no,
  4325. cs->blocks.max.no,
  4326. cs->blocks.max_ever.no);
  4327. erts_print(to,
  4328. arg,
  4329. "%sblocks size: %bpu %bpu %bpu\n",
  4330. prefix,
  4331. cs->blocks.curr.size,
  4332. cs->blocks.max.size,
  4333. cs->blocks.max_ever.size);
  4334. erts_print(to,
  4335. arg,
  4336. "%scarriers: %bpu %bpu %bpu\n",
  4337. prefix,
  4338. curr_no,
  4339. cs->max.no,
  4340. cs->max_ever.no);
  4341. #if HAVE_ERTS_MSEG
  4342. erts_print(to,
  4343. arg,
  4344. "%smseg carriers: %bpu\n",
  4345. prefix,
  4346. cs->curr.norm.mseg.no);
  4347. #endif
  4348. erts_print(to,
  4349. arg,
  4350. "%ssys_alloc carriers: %bpu\n",
  4351. prefix,
  4352. cs->curr.norm.sys_alloc.no);
  4353. erts_print(to,
  4354. arg,
  4355. "%scarriers size: %bpu %bpu %bpu\n",
  4356. prefix,
  4357. curr_size,
  4358. cs->max.size,
  4359. cs->max_ever.size);
  4360. #if HAVE_ERTS_MSEG
  4361. erts_print(to,
  4362. arg,
  4363. "%smseg carriers size: %bpu\n",
  4364. prefix,
  4365. cs->curr.norm.mseg.size);
  4366. #endif
  4367. erts_print(to,
  4368. arg,
  4369. "%ssys_alloc carriers size: %bpu\n",
  4370. prefix,
  4371. cs->curr.norm.sys_alloc.size);
  4372. }
  4373. if (hpp || szp) {
  4374. res = NIL;
  4375. add_2tup(hpp, szp, &res,
  4376. am.sys_alloc_carriers_size,
  4377. bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.size));
  4378. #if HAVE_ERTS_MSEG
  4379. add_2tup(hpp, szp, &res,
  4380. am.mseg_alloc_carriers_size,
  4381. bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.size));
  4382. #endif
  4383. add_4tup(hpp, szp, &res,
  4384. am.carriers_size,
  4385. bld_unstable_uint(hpp, szp, curr_size),
  4386. bld_unstable_uint(hpp, szp, cs->max.size),
  4387. bld_unstable_uint(hpp, szp, cs->max_ever.size));
  4388. add_2tup(hpp, szp, &res,
  4389. am.sys_alloc_carriers,
  4390. bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.no));
  4391. #if HAVE_ERTS_MSEG
  4392. add_2tup(hpp, szp, &res,
  4393. am.mseg_alloc_carriers,
  4394. bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.no));
  4395. #endif
  4396. add_4tup(hpp, szp, &res,
  4397. am.carriers,
  4398. bld_unstable_uint(hpp, szp, curr_no),
  4399. bld_unstable_uint(hpp, szp, cs->max.no),
  4400. bld_unstable_uint(hpp, szp, cs->max_ever.no));
  4401. add_4tup(hpp, szp, &res,
  4402. am.blocks_size,
  4403. bld_unstable_uint(hpp, szp, cs->blocks.curr.size),
  4404. bld_unstable_uint(hpp, szp, cs->blocks.max.size),
  4405. bld_unstable_uint(hpp, szp, cs->blocks.max_ever.size));
  4406. add_4tup(hpp, szp, &res,
  4407. am.blocks,
  4408. bld_unstable_uint(hpp, szp, cs->blocks.curr.no),
  4409. bld_unstable_uint(hpp, szp, cs->blocks.max.no),
  4410. bld_unstable_uint(hpp, szp, cs->blocks.max_ever.no));
  4411. }
  4412. return res;
  4413. }
  4414. static void
  4415. make_name_atoms(Allctr_t *allctr)
  4416. {
  4417. char alloc[] = "alloc";
  4418. char realloc[] = "realloc";
  4419. char free[] = "free";
  4420. char buf[MAX_ATOM_CHARACTERS];
  4421. size_t prefix_len = sys_strlen(allctr->name_prefix);
  4422. if (prefix_len > MAX_ATOM_CHARACTERS + sizeof(realloc) - 1)
  4423. erts_exit(ERTS_ERROR_EXIT,"Too long allocator name: %salloc\n",allctr->name_prefix);
  4424. sys_memcpy((void *) buf, (void *) allctr->name_prefix, prefix_len);
  4425. sys_memcpy((void *) &buf[prefix_len], (void *) alloc, sizeof(alloc) - 1);
  4426. allctr->name.alloc = am_atom_put(buf, prefix_len + sizeof(alloc) - 1);
  4427. sys_memcpy((void *) &buf[prefix_len], (void *) realloc, sizeof(realloc) - 1);
  4428. allctr->name.realloc = am_atom_put(buf, prefix_len + sizeof(realloc) - 1);
  4429. sys_memcpy((void *) &buf[prefix_len], (void *) free, sizeof(free) - 1);
  4430. allctr->name.free = am_atom_put(buf, prefix_len + sizeof(free) - 1);
  4431. }
  4432. static Eterm
  4433. info_calls(Allctr_t *allctr,
  4434. fmtfn_t *print_to_p,
  4435. void *print_to_arg,
  4436. Uint **hpp,
  4437. Uint *szp)
  4438. {
  4439. Eterm res = THE_NON_VALUE;
  4440. if (print_to_p) {
  4441. #define PRINT_CC_4(TO, TOA, NAME, CC) \
  4442. erts_print(TO, TOA, "%s calls: %b64u\n", NAME, CC)
  4443. #define PRINT_CC_5(TO, TOA, PRFX, NAME, CC) \
  4444. erts_print(TO, TOA, "%s%s calls: %b64u\n",PRFX,NAME,CC)
  4445. char *prefix = allctr->name_prefix;
  4446. fmtfn_t to = *print_to_p;
  4447. void *arg = print_to_arg;
  4448. PRINT_CC_5(to, arg, prefix, "alloc", allctr->calls.this_alloc);
  4449. PRINT_CC_5(to, arg, prefix, "free", allctr->calls.this_free);
  4450. PRINT_CC_5(to, arg, prefix, "realloc", allctr->calls.this_realloc);
  4451. #if HAVE_ERTS_MSEG
  4452. PRINT_CC_4(to, arg, "mseg_alloc", allctr->calls.mseg_alloc);
  4453. PRINT_CC_4(to, arg, "mseg_dealloc", allctr->calls.mseg_dealloc);
  4454. PRINT_CC_4(to, arg, "mseg_realloc", allctr->calls.mseg_realloc);
  4455. #endif
  4456. PRINT_CC_4(to, arg, "sys_alloc", allctr->calls.sys_alloc);
  4457. PRINT_CC_4(to, arg, "sys_free", allctr->calls.sys_free);
  4458. PRINT_CC_4(to, arg, "sys_realloc", allctr->calls.sys_realloc);
  4459. #undef PRINT_CC_4
  4460. #undef PRINT_CC_5
  4461. }
  4462. if (hpp || szp) {
  4463. ASSERT(allctr->name.alloc != THE_NON_VALUE);
  4464. ASSERT(allctr->name.realloc != THE_NON_VALUE);
  4465. ASSERT(allctr->name.free != THE_NON_VALUE);
  4466. res = NIL;
  4467. add_3tup(hpp, szp, &res,
  4468. am.sys_realloc,
  4469. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_realloc)),
  4470. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_realloc)));
  4471. add_3tup(hpp, szp, &res,
  4472. am.sys_free,
  4473. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_free)),
  4474. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_free)));
  4475. add_3tup(hpp, szp, &res,
  4476. am.sys_alloc,
  4477. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_alloc)),
  4478. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_alloc)));
  4479. #if HAVE_ERTS_MSEG
  4480. add_3tup(hpp, szp, &res,
  4481. am.mseg_realloc,
  4482. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_realloc)),
  4483. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_realloc)));
  4484. add_3tup(hpp, szp, &res,
  4485. am.mseg_dealloc,
  4486. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_dealloc)),
  4487. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_dealloc)));
  4488. add_3tup(hpp, szp, &res,
  4489. am.mseg_alloc,
  4490. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_alloc)),
  4491. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_alloc)));
  4492. #endif
  4493. add_3tup(hpp, szp, &res,
  4494. allctr->name.realloc,
  4495. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_realloc)),
  4496. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_realloc)));
  4497. add_3tup(hpp, szp, &res,
  4498. allctr->name.free,
  4499. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_free)),
  4500. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_free)));
  4501. add_3tup(hpp, szp, &res,
  4502. allctr->name.alloc,
  4503. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_alloc)),
  4504. bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_alloc)));
  4505. }
  4506. return res;
  4507. }
  4508. static Eterm
  4509. info_options(Allctr_t *allctr,
  4510. fmtfn_t *print_to_p,
  4511. void *print_to_arg,
  4512. Uint **hpp,
  4513. Uint *szp)
  4514. {
  4515. Eterm res = THE_NON_VALUE;
  4516. UWord acul, acnl, acfml;
  4517. if (!allctr) {
  4518. if (print_to_p)
  4519. erts_print(*print_to_p, print_to_arg, "option e: false\n");
  4520. if (hpp || szp) {
  4521. res = NIL;
  4522. add_2tup(hpp, szp, &res, am.e, am_false);
  4523. }
  4524. return res;
  4525. }
  4526. acul = allctr->cpool.util_limit;
  4527. acnl = allctr->cpool.in_pool_limit;
  4528. acfml = allctr->cpool.fblk_min_limit;
  4529. if (print_to_p) {
  4530. char topt[21]; /* Enough for any 64-bit integer */
  4531. if (allctr->t)
  4532. erts_snprintf(&topt[0], sizeof(topt), "%d", allctr->t);
  4533. else
  4534. erts_snprintf(&topt[0], sizeof(topt), "false");
  4535. erts_print(*print_to_p,
  4536. print_to_arg,
  4537. "option e: true\n"
  4538. "option t: %s\n"
  4539. "option ramv: %s\n"
  4540. "option atags: %s\n"
  4541. "option sbct: %beu\n"
  4542. #if HAVE_ERTS_MSEG
  4543. "option asbcst: %bpu\n"
  4544. "option rsbcst: %bpu\n"
  4545. #endif
  4546. "option rsbcmt: %beu\n"
  4547. "option rmbcmt: %beu\n"
  4548. "option mmbcs: %beu\n"
  4549. #if HAVE_ERTS_MSEG
  4550. "option mmsbc: %beu\n"
  4551. "option mmmbc: %beu\n"
  4552. #endif
  4553. "option lmbcs: %beu\n"
  4554. "option smbcs: %beu\n"
  4555. "option mbcgs: %beu\n"
  4556. "option acul: %bpu\n",
  4557. topt,
  4558. allctr->ramv ? "true" : "false",
  4559. allctr->atags ? "true" : "false",
  4560. allctr->sbc_threshold,
  4561. #if HAVE_ERTS_MSEG
  4562. allctr->mseg_opt.abs_shrink_th,
  4563. allctr->mseg_opt.rel_shrink_th,
  4564. #endif
  4565. allctr->sbc_move_threshold,
  4566. allctr->mbc_move_threshold,
  4567. allctr->main_carrier_size,
  4568. #if HAVE_ERTS_MSEG
  4569. allctr->max_mseg_sbcs,
  4570. allctr->max_mseg_mbcs,
  4571. #endif
  4572. allctr->largest_mbc_size,
  4573. allctr->smallest_mbc_size,
  4574. allctr->mbc_growth_stages,
  4575. acul);
  4576. }
  4577. res = (*allctr->info_options)(allctr, "option ", print_to_p, print_to_arg,
  4578. hpp, szp);
  4579. if (hpp || szp) {
  4580. add_2tup(hpp, szp, &res,
  4581. am.acfml,
  4582. bld_uint(hpp, szp, acfml));
  4583. add_2tup(hpp, szp, &res,
  4584. am.acnl,
  4585. bld_uint(hpp, szp, acnl));
  4586. add_2tup(hpp, szp, &res,
  4587. am.acul,
  4588. bld_uint(hpp, szp, acul));
  4589. add_2tup(hpp, szp, &res,
  4590. am.mbcgs,
  4591. bld_uint(hpp, szp, allctr->mbc_growth_stages));
  4592. add_2tup(hpp, szp, &res,
  4593. am.smbcs,
  4594. bld_uint(hpp, szp, allctr->smallest_mbc_size));
  4595. add_2tup(hpp, szp, &res,
  4596. am.lmbcs,
  4597. bld_uint(hpp, szp, allctr->largest_mbc_size));
  4598. #if HAVE_ERTS_MSEG
  4599. add_2tup(hpp, szp, &res,
  4600. am.mmsbc,
  4601. bld_uint(hpp, szp, allctr->max_mseg_sbcs));
  4602. add_2tup(hpp, szp, &res,
  4603. am.mmmbc,
  4604. bld_uint(hpp, szp, allctr->max_mseg_mbcs));
  4605. #endif
  4606. add_2tup(hpp, szp, &res,
  4607. am.mmbcs,
  4608. bld_uint(hpp, szp, allctr->main_carrier_size));
  4609. add_2tup(hpp, szp, &res,
  4610. am.rmbcmt,
  4611. bld_uint(hpp, szp, allctr->mbc_move_threshold));
  4612. add_2tup(hpp, szp, &res,
  4613. am.rsbcmt,
  4614. bld_uint(hpp, szp, allctr->sbc_move_threshold));
  4615. #if HAVE_ERTS_MSEG
  4616. add_2tup(hpp, szp, &res,
  4617. am.rsbcst,
  4618. bld_uint(hpp, szp, allctr->mseg_opt.rel_shrink_th));
  4619. add_2tup(hpp, szp, &res,
  4620. am.asbcst,
  4621. bld_uint(hpp, szp, allctr->mseg_opt.abs_shrink_th));
  4622. #endif
  4623. add_2tup(hpp, szp, &res,
  4624. am_sbct,
  4625. bld_uint(hpp, szp, allctr->sbc_threshold));
  4626. add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false);
  4627. add_2tup(hpp, szp, &res, am.atags, allctr->atags ? am_true : am_false);
  4628. add_2tup(hpp, szp, &res, am.t, (allctr->t ? am_true : am_false));
  4629. add_2tup(hpp, szp, &res, am.e, am_true);
  4630. }
  4631. return res;
  4632. }
  4633. static ERTS_INLINE void
  4634. update_max_ever_values(CarriersStats_t *cs)
  4635. {
  4636. if (cs->max_ever.no < cs->max.no)
  4637. cs->max_ever.no = cs->max.no;
  4638. if (cs->max_ever.size < cs->max.size)
  4639. cs->max_ever.size = cs->max.size;
  4640. if (cs->blocks.max_ever.no < cs->blocks.max.no)
  4641. cs->blocks.max_ever.no = cs->blocks.max.no;
  4642. if (cs->blocks.max_ever.size < cs->blocks.max.size)
  4643. cs->blocks.max_ever.size = cs->blocks.max.size;
  4644. }
  4645. static ERTS_INLINE void
  4646. reset_max_values(CarriersStats_t *cs)
  4647. {
  4648. cs->max.no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no;
  4649. cs->max.size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
  4650. cs->blocks.max.no = cs->blocks.curr.no;
  4651. cs->blocks.max.size = cs->blocks.curr.size;
  4652. }
  4653. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  4654. * Exported functions *
  4655. \* */
  4656. Eterm
  4657. erts_alcu_au_info_options(fmtfn_t *print_to_p, void *print_to_arg,
  4658. Uint **hpp, Uint *szp)
  4659. {
  4660. Eterm res = THE_NON_VALUE;
  4661. if (print_to_p) {
  4662. erts_print(*print_to_p,
  4663. print_to_arg,
  4664. #if HAVE_ERTS_MSEG
  4665. "option mmc: %beu\n"
  4666. #endif
  4667. "option ycs: %beu\n"
  4668. "option sac: %s\n",
  4669. #if HAVE_ERTS_MSEG
  4670. max_mseg_carriers,
  4671. #endif
  4672. sys_alloc_carrier_size,
  4673. allow_sys_alloc_carriers ? "true" : "false");
  4674. }
  4675. if (hpp || szp) {
  4676. res = NIL;
  4677. ensure_atoms_initialized(NULL);
  4678. add_2tup(hpp, szp, &res,
  4679. am.sac,
  4680. allow_sys_alloc_carriers ? am_true : am_false);
  4681. add_2tup(hpp, szp, &res,
  4682. am.ycs,
  4683. bld_uint(hpp, szp, sys_alloc_carrier_size));
  4684. #if HAVE_ERTS_MSEG
  4685. add_2tup(hpp, szp, &res,
  4686. am.mmc,
  4687. bld_uint(hpp, szp, max_mseg_carriers));
  4688. #endif
  4689. }
  4690. return res;
  4691. }
  4692. Eterm
  4693. erts_alcu_info_options(Allctr_t *allctr,
  4694. fmtfn_t *print_to_p,
  4695. void *print_to_arg,
  4696. Uint **hpp,
  4697. Uint *szp)
  4698. {
  4699. Eterm res;
  4700. if (hpp || szp)
  4701. ensure_atoms_initialized(allctr);
  4702. if (allctr->thread_safe) {
  4703. erts_allctr_wrapper_pre_lock();
  4704. erts_mtx_lock(&allctr->mutex);
  4705. }
  4706. res = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
  4707. if (allctr->thread_safe) {
  4708. erts_mtx_unlock(&allctr->mutex);
  4709. erts_allctr_wrapper_pre_unlock();
  4710. }
  4711. return res;
  4712. }
  4713. /* ----------------------------------------------------------------------- */
  4714. Eterm
  4715. erts_alcu_sz_info(Allctr_t *allctr,
  4716. int internal,
  4717. int begin_max_period,
  4718. fmtfn_t *print_to_p,
  4719. void *print_to_arg,
  4720. Uint **hpp,
  4721. Uint *szp)
  4722. {
  4723. Eterm res, mbcs, sbcs, fix = THE_NON_VALUE;
  4724. Eterm mbcs_pool;
  4725. res = THE_NON_VALUE;
  4726. if (!allctr) {
  4727. if (print_to_p)
  4728. erts_print(*print_to_p, print_to_arg, "false\n");
  4729. if (szp)
  4730. *szp = 0;
  4731. return am_false;
  4732. }
  4733. if (hpp || szp)
  4734. ensure_atoms_initialized(allctr);
  4735. if (allctr->thread_safe) {
  4736. erts_allctr_wrapper_pre_lock();
  4737. erts_mtx_lock(&allctr->mutex);
  4738. }
  4739. ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
  4740. /* Update sbc values not continuously updated */
  4741. allctr->sbcs.blocks.curr.no
  4742. = allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
  4743. allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
  4744. update_max_ever_values(&allctr->mbcs);
  4745. update_max_ever_values(&allctr->sbcs);
  4746. if (allctr->fix)
  4747. fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
  4748. mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
  4749. print_to_arg, hpp, szp);
  4750. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  4751. mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p,
  4752. print_to_arg, hpp, szp);
  4753. else
  4754. mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
  4755. sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
  4756. print_to_arg, hpp, szp);
  4757. if (hpp || szp) {
  4758. res = NIL;
  4759. add_2tup(hpp, szp, &res, am.sbcs, sbcs);
  4760. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  4761. add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
  4762. add_2tup(hpp, szp, &res, am.mbcs, mbcs);
  4763. add_fix_types(allctr, internal, hpp, szp, &res, fix);
  4764. }
  4765. if (begin_max_period) {
  4766. reset_max_values(&allctr->mbcs);
  4767. reset_max_values(&allctr->sbcs);
  4768. }
  4769. if (allctr->thread_safe) {
  4770. erts_mtx_unlock(&allctr->mutex);
  4771. erts_allctr_wrapper_pre_unlock();
  4772. }
  4773. return res;
  4774. }
  4775. Eterm
  4776. erts_alcu_info(Allctr_t *allctr,
  4777. int internal,
  4778. int begin_max_period,
  4779. fmtfn_t *print_to_p,
  4780. void *print_to_arg,
  4781. Uint **hpp,
  4782. Uint *szp)
  4783. {
  4784. Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE;
  4785. Eterm mbcs_pool;
  4786. res = THE_NON_VALUE;
  4787. if (!allctr) {
  4788. if (print_to_p)
  4789. erts_print(*print_to_p, print_to_arg, "false\n");
  4790. if (szp)
  4791. *szp = 0;
  4792. return am_false;
  4793. }
  4794. if (hpp || szp)
  4795. ensure_atoms_initialized(allctr);
  4796. if (allctr->thread_safe) {
  4797. erts_allctr_wrapper_pre_lock();
  4798. erts_mtx_lock(&allctr->mutex);
  4799. }
  4800. ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
  4801. /* Update sbc values not continuously updated */
  4802. allctr->sbcs.blocks.curr.no
  4803. = allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
  4804. allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
  4805. update_max_ever_values(&allctr->mbcs);
  4806. update_max_ever_values(&allctr->sbcs);
  4807. if (print_to_p) {
  4808. erts_print(*print_to_p,
  4809. print_to_arg,
  4810. "versions: %s %s\n",
  4811. allctr->vsn_str,
  4812. ERTS_ALCU_VSN_STR);
  4813. }
  4814. sett = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
  4815. if (allctr->fix)
  4816. fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
  4817. mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
  4818. print_to_arg, hpp, szp);
  4819. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  4820. mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p,
  4821. print_to_arg, hpp, szp);
  4822. else
  4823. mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
  4824. sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
  4825. print_to_arg, hpp, szp);
  4826. calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp);
  4827. if (hpp || szp) {
  4828. res = NIL;
  4829. add_2tup(hpp, szp, &res, am.calls, calls);
  4830. add_2tup(hpp, szp, &res, am.sbcs, sbcs);
  4831. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  4832. add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
  4833. add_2tup(hpp, szp, &res, am.mbcs, mbcs);
  4834. add_fix_types(allctr, internal, hpp, szp, &res, fix);
  4835. add_2tup(hpp, szp, &res, am.options, sett);
  4836. add_3tup(hpp, szp, &res,
  4837. am.versions,
  4838. bld_string(hpp, szp, allctr->vsn_str),
  4839. bld_string(hpp, szp, ERTS_ALCU_VSN_STR));;
  4840. }
  4841. if (begin_max_period) {
  4842. reset_max_values(&allctr->mbcs);
  4843. reset_max_values(&allctr->sbcs);
  4844. }
  4845. if (allctr->thread_safe) {
  4846. erts_mtx_unlock(&allctr->mutex);
  4847. erts_allctr_wrapper_pre_unlock();
  4848. }
  4849. return res;
  4850. }
  4851. void
  4852. erts_alcu_foreign_size(Allctr_t *allctr, ErtsAlcType_t alloc_no, AllctrSize_t *size)
  4853. {
  4854. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
  4855. UWord csz, bsz;
  4856. cpool_read_stat(allctr, alloc_no, NULL, &csz, NULL, &bsz);
  4857. size->carriers = csz;
  4858. size->blocks = bsz;
  4859. } else {
  4860. size->carriers = 0;
  4861. size->blocks = 0;
  4862. }
  4863. }
  4864. void
  4865. erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz)
  4866. {
  4867. if (allctr->thread_safe)
  4868. erts_mtx_lock(&allctr->mutex);
  4869. size->carriers = allctr->mbcs.curr.norm.mseg.size;
  4870. size->carriers += allctr->mbcs.curr.norm.sys_alloc.size;
  4871. size->carriers += allctr->sbcs.curr.norm.mseg.size;
  4872. size->carriers += allctr->sbcs.curr.norm.sys_alloc.size;
  4873. size->blocks = allctr->mbcs.blocks.curr.size;
  4874. size->blocks += allctr->sbcs.blocks.curr.size;
  4875. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
  4876. UWord csz, bsz;
  4877. cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz);
  4878. size->blocks += bsz;
  4879. size->carriers += csz;
  4880. }
  4881. if (fi) {
  4882. int ix;
  4883. for (ix = 0; ix < fisz; ix++) {
  4884. if (allctr->fix) {
  4885. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
  4886. fi[ix].allocated += (allctr->fix[ix].type_size
  4887. * allctr->fix[ix].u.cpool.allocated);
  4888. fi[ix].used += (allctr->fix[ix].type_size
  4889. * allctr->fix[ix].u.cpool.used);
  4890. }
  4891. else {
  4892. fi[ix].allocated += (allctr->fix[ix].type_size
  4893. * allctr->fix[ix].u.nocpool.allocated);
  4894. fi[ix].used += (allctr->fix[ix].type_size
  4895. * allctr->fix[ix].u.nocpool.used);
  4896. }
  4897. }
  4898. }
  4899. }
  4900. if (allctr->thread_safe)
  4901. erts_mtx_unlock(&allctr->mutex);
  4902. }
  4903. /* ----------------------------------------------------------------------- */
  4904. static ERTS_INLINE void *
  4905. do_erts_alcu_alloc(ErtsAlcType_t type, Allctr_t *allctr, Uint size)
  4906. {
  4907. void *res;
  4908. ASSERT(initialized);
  4909. ASSERT(allctr);
  4910. ERTS_LC_ASSERT(!allctr->thread_safe
  4911. || erts_lc_mtx_is_locked(&allctr->mutex));
  4912. ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
  4913. /* Reject sizes that can't fit into the header word. */
  4914. if (size > ~BLK_FLG_MASK) {
  4915. return NULL;
  4916. }
  4917. #if ALLOC_ZERO_EQ_NULL
  4918. if (!size)
  4919. return NULL;
  4920. #endif
  4921. INC_CC(allctr->calls.this_alloc);
  4922. if (allctr->fix) {
  4923. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  4924. return fix_cpool_alloc(allctr, type, size);
  4925. else
  4926. return fix_nocpool_alloc(allctr, type, size);
  4927. }
  4928. if (size >= allctr->sbc_threshold) {
  4929. Block_t *blk;
  4930. blk = create_carrier(allctr, size, CFLG_SBC);
  4931. res = blk ? BLK2UMEM(blk) : NULL;
  4932. }
  4933. else
  4934. res = mbc_alloc(allctr, size);
  4935. return res;
  4936. }
  4937. void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
  4938. {
  4939. Allctr_t *allctr = (Allctr_t *) extra;
  4940. void *res;
  4941. ASSERT(!"This is not thread safe");
  4942. res = do_erts_alcu_alloc(type, allctr, size);
  4943. if (allctr->atags && res) {
  4944. set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
  4945. }
  4946. DEBUG_CHECK_ALIGNMENT(res);
  4947. return res;
  4948. }
  4949. void *
  4950. erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
  4951. {
  4952. Allctr_t *allctr = (Allctr_t *) extra;
  4953. alcu_atag_t tag = 0;
  4954. void *res;
  4955. if (allctr->atags) {
  4956. tag = determine_alloc_tag(allctr, type);
  4957. }
  4958. erts_mtx_lock(&allctr->mutex);
  4959. res = do_erts_alcu_alloc(type, allctr, size);
  4960. if (allctr->atags && res) {
  4961. set_alloc_tag(allctr, res, tag);
  4962. }
  4963. erts_mtx_unlock(&allctr->mutex);
  4964. DEBUG_CHECK_ALIGNMENT(res);
  4965. return res;
  4966. }
  4967. void *
  4968. erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
  4969. {
  4970. ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
  4971. int ix;
  4972. alcu_atag_t tag = 0;
  4973. Allctr_t *allctr;
  4974. void *res;
  4975. ix = ERTS_ALC_GET_THR_IX();
  4976. ASSERT(0 <= ix && ix < tspec->size);
  4977. allctr = tspec->allctr[ix];
  4978. if (allctr->atags) {
  4979. tag = determine_alloc_tag(allctr, type);
  4980. }
  4981. if (allctr->thread_safe)
  4982. erts_mtx_lock(&allctr->mutex);
  4983. res = do_erts_alcu_alloc(type, allctr, size);
  4984. if (allctr->atags && res) {
  4985. set_alloc_tag(allctr, res, tag);
  4986. }
  4987. if (allctr->thread_safe)
  4988. erts_mtx_unlock(&allctr->mutex);
  4989. DEBUG_CHECK_ALIGNMENT(res);
  4990. return res;
  4991. }
  4992. void *
  4993. erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
  4994. {
  4995. Allctr_t *pref_allctr;
  4996. alcu_atag_t tag = 0;
  4997. void *res;
  4998. pref_allctr = get_pref_allctr(extra);
  4999. if (pref_allctr->atags) {
  5000. tag = determine_alloc_tag(pref_allctr, type);
  5001. }
  5002. if (pref_allctr->thread_safe)
  5003. erts_mtx_lock(&pref_allctr->mutex);
  5004. ASSERT(pref_allctr->dd.use);
  5005. ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
  5006. ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
  5007. res = do_erts_alcu_alloc(type, pref_allctr, size);
  5008. if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
  5009. /* Cleaned up a bit more; try one more time... */
  5010. res = do_erts_alcu_alloc(type, pref_allctr, size);
  5011. }
  5012. if (pref_allctr->atags && res) {
  5013. set_alloc_tag(pref_allctr, res, tag);
  5014. }
  5015. if (pref_allctr->thread_safe)
  5016. erts_mtx_unlock(&pref_allctr->mutex);
  5017. DEBUG_CHECK_ALIGNMENT(res);
  5018. return res;
  5019. }
  5020. /* ------------------------------------------------------------------------- */
  5021. static ERTS_INLINE void
  5022. do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p,
  5023. Carrier_t **busy_pcrr_pp)
  5024. {
  5025. ASSERT(initialized);
  5026. ASSERT(allctr);
  5027. ERTS_LC_ASSERT(!allctr->thread_safe
  5028. || erts_lc_mtx_is_locked(&allctr->mutex));
  5029. ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
  5030. if (p) {
  5031. INC_CC(allctr->calls.this_free);
  5032. if (ERTS_ALC_IS_FIX_TYPE(type)) {
  5033. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
  5034. fix_cpool_free(allctr, type, 0, p, busy_pcrr_pp);
  5035. else
  5036. fix_nocpool_free(allctr, type, p);
  5037. }
  5038. else {
  5039. Block_t *blk = UMEM2BLK(p);
  5040. if (IS_SBC_BLK(blk))
  5041. destroy_carrier(allctr, blk, NULL);
  5042. else
  5043. mbc_free(allctr, type, p, busy_pcrr_pp);
  5044. }
  5045. }
  5046. }
  5047. void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
  5048. {
  5049. Allctr_t *allctr = (Allctr_t *) extra;
  5050. do_erts_alcu_free(type, allctr, p, NULL);
  5051. }
  5052. void
  5053. erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
  5054. {
  5055. Allctr_t *allctr = (Allctr_t *) extra;
  5056. erts_mtx_lock(&allctr->mutex);
  5057. do_erts_alcu_free(type, allctr, p, NULL);
  5058. erts_mtx_unlock(&allctr->mutex);
  5059. }
  5060. void
  5061. erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
  5062. {
  5063. ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
  5064. int ix;
  5065. Allctr_t *allctr;
  5066. ix = ERTS_ALC_GET_THR_IX();
  5067. ASSERT(0 <= ix && ix < tspec->size);
  5068. allctr = tspec->allctr[ix];
  5069. if (allctr->thread_safe)
  5070. erts_mtx_lock(&allctr->mutex);
  5071. do_erts_alcu_free(type, allctr, p, NULL);
  5072. if (allctr->thread_safe)
  5073. erts_mtx_unlock(&allctr->mutex);
  5074. }
  5075. void
  5076. erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
  5077. {
  5078. if (p) {
  5079. Carrier_t *busy_pcrr_p;
  5080. Allctr_t *pref_allctr, *used_allctr;
  5081. pref_allctr = get_pref_allctr(extra);
  5082. used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_IF_USED,
  5083. p, NULL, &busy_pcrr_p);
  5084. if (pref_allctr != used_allctr) {
  5085. enqueue_dealloc_other_instance(type,
  5086. used_allctr,
  5087. p,
  5088. (used_allctr->dd.ix
  5089. - pref_allctr->dd.ix));
  5090. }
  5091. else {
  5092. ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
  5093. do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p);
  5094. clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
  5095. if (pref_allctr->thread_safe)
  5096. erts_mtx_unlock(&pref_allctr->mutex);
  5097. }
  5098. }
  5099. }
  5100. /* ------------------------------------------------------------------------- */
  5101. static ERTS_INLINE void *
  5102. do_erts_alcu_realloc(ErtsAlcType_t type,
  5103. Allctr_t *allctr,
  5104. void *p,
  5105. Uint size,
  5106. Uint32 alcu_flgs,
  5107. Carrier_t **busy_pcrr_pp)
  5108. {
  5109. Block_t *blk;
  5110. void *res;
  5111. ASSERT(initialized);
  5112. ASSERT(allctr);
  5113. ERTS_LC_ASSERT(!allctr->thread_safe
  5114. || erts_lc_mtx_is_locked(&allctr->mutex));
  5115. ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
  5116. if (!p) {
  5117. res = do_erts_alcu_alloc(type, allctr, size);
  5118. INC_CC(allctr->calls.this_realloc);
  5119. DEC_CC(allctr->calls.this_alloc);
  5120. return res;
  5121. }
  5122. /* Reject sizes that can't fit into the header word. */
  5123. if (size > ~BLK_FLG_MASK) {
  5124. return NULL;
  5125. }
  5126. #if ALLOC_ZERO_EQ_NULL
  5127. if (!size) {
  5128. ASSERT(p);
  5129. do_erts_alcu_free(type, allctr, p, busy_pcrr_pp);
  5130. INC_CC(allctr->calls.this_realloc);
  5131. DEC_CC(allctr->calls.this_free);
  5132. return NULL;
  5133. }
  5134. #endif
  5135. INC_CC(allctr->calls.this_realloc);
  5136. blk = UMEM2BLK(p);
  5137. if (size < allctr->sbc_threshold) {
  5138. if (IS_MBC_BLK(blk))
  5139. res = mbc_realloc(allctr, type, p, size, alcu_flgs, busy_pcrr_pp);
  5140. else {
  5141. Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size;
  5142. Uint crr_sz;
  5143. Uint diff_sz_val;
  5144. Uint crr_sz_val;
  5145. #if HAVE_ERTS_MSEG
  5146. if (IS_SYS_ALLOC_CARRIER(BLK_TO_SBC(blk)))
  5147. #endif
  5148. crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz);
  5149. #if HAVE_ERTS_MSEG
  5150. else
  5151. crr_sz = ERTS_SACRR_UNIT_CEILING(used_sz);
  5152. #endif
  5153. diff_sz_val = crr_sz - used_sz;
  5154. if (diff_sz_val < (~((Uint) 0) / 100))
  5155. crr_sz_val = crr_sz;
  5156. else {
  5157. /* div both by 128 */
  5158. crr_sz_val = crr_sz >> 7;
  5159. /* A sys_alloc carrier could potentially be
  5160. smaller than 128 bytes (but not likely) */
  5161. if (crr_sz_val == 0)
  5162. goto do_carrier_resize;
  5163. diff_sz_val >>= 7;
  5164. }
  5165. if (100*diff_sz_val < allctr->sbc_move_threshold*crr_sz_val)
  5166. /* Data won't be copied into a new carrier... */
  5167. goto do_carrier_resize;
  5168. else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
  5169. return NULL;
  5170. res = mbc_alloc(allctr, size);
  5171. if (res) {
  5172. sys_memcpy((void*) res,
  5173. (void*) p,
  5174. MIN(SBC_BLK_SZ(blk) - ABLK_HDR_SZ, size));
  5175. destroy_carrier(allctr, blk, NULL);
  5176. }
  5177. }
  5178. }
  5179. else {
  5180. Block_t *new_blk;
  5181. if(IS_SBC_BLK(blk)) {
  5182. do_carrier_resize:
  5183. new_blk = resize_carrier(allctr, blk, size, CFLG_SBC);
  5184. res = new_blk ? BLK2UMEM(new_blk) : NULL;
  5185. }
  5186. else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
  5187. return NULL;
  5188. else {
  5189. new_blk = create_carrier(allctr, size, CFLG_SBC);
  5190. if (new_blk) {
  5191. res = BLK2UMEM(new_blk);
  5192. sys_memcpy((void *) res,
  5193. (void *) p,
  5194. MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size));
  5195. mbc_free(allctr, type, p, busy_pcrr_pp);
  5196. }
  5197. else
  5198. res = NULL;
  5199. }
  5200. }
  5201. return res;
  5202. }
  5203. void *
  5204. erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size)
  5205. {
  5206. Allctr_t *allctr = (Allctr_t *)extra;
  5207. void *res;
  5208. res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
  5209. DEBUG_CHECK_ALIGNMENT(res);
  5210. if (allctr->atags && res) {
  5211. set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
  5212. }
  5213. return res;
  5214. }
  5215. void *
  5216. erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
  5217. {
  5218. Allctr_t *allctr = (Allctr_t *)extra;
  5219. void *res;
  5220. res = do_erts_alcu_alloc(type, allctr, size);
  5221. if (!res)
  5222. res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
  5223. else {
  5224. Block_t *blk;
  5225. size_t cpy_size;
  5226. blk = UMEM2BLK(p);
  5227. cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
  5228. if (cpy_size > size)
  5229. cpy_size = size;
  5230. sys_memcpy(res, p, cpy_size);
  5231. do_erts_alcu_free(type, allctr, p, NULL);
  5232. }
  5233. DEBUG_CHECK_ALIGNMENT(res);
  5234. if (allctr->atags && res) {
  5235. set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
  5236. }
  5237. return res;
  5238. }
  5239. void *
  5240. erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
  5241. {
  5242. Allctr_t *allctr = (Allctr_t *) extra;
  5243. alcu_atag_t tag = 0;
  5244. void *res;
  5245. if (allctr->atags) {
  5246. tag = determine_alloc_tag(allctr, type);
  5247. }
  5248. erts_mtx_lock(&allctr->mutex);
  5249. res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
  5250. if (allctr->atags && res) {
  5251. set_alloc_tag(allctr, res, tag);
  5252. }
  5253. erts_mtx_unlock(&allctr->mutex);
  5254. DEBUG_CHECK_ALIGNMENT(res);
  5255. return res;
  5256. }
  5257. void *
  5258. erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
  5259. {
  5260. Allctr_t *allctr = (Allctr_t *) extra;
  5261. alcu_atag_t tag = 0;
  5262. void *res;
  5263. if (allctr->atags) {
  5264. tag = determine_alloc_tag(allctr, type);
  5265. }
  5266. erts_mtx_lock(&allctr->mutex);
  5267. res = do_erts_alcu_alloc(type, allctr, size);
  5268. if (!res)
  5269. res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
  5270. else {
  5271. Block_t *blk;
  5272. size_t cpy_size;
  5273. blk = UMEM2BLK(p);
  5274. cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
  5275. if (cpy_size > size)
  5276. cpy_size = size;
  5277. sys_memcpy(res, p, cpy_size);
  5278. do_erts_alcu_free(type, allctr, p, NULL);
  5279. }
  5280. if (allctr->atags && res) {
  5281. set_alloc_tag(allctr, res, tag);
  5282. }
  5283. erts_mtx_unlock(&allctr->mutex);
  5284. DEBUG_CHECK_ALIGNMENT(res);
  5285. return res;
  5286. }
  5287. void *
  5288. erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
  5289. void *ptr, Uint size)
  5290. {
  5291. ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
  5292. int ix;
  5293. alcu_atag_t tag = 0;
  5294. Allctr_t *allctr;
  5295. void *res;
  5296. ix = ERTS_ALC_GET_THR_IX();
  5297. ASSERT(0 <= ix && ix < tspec->size);
  5298. allctr = tspec->allctr[ix];
  5299. if (allctr->atags) {
  5300. tag = determine_alloc_tag(allctr, type);
  5301. }
  5302. if (allctr->thread_safe)
  5303. erts_mtx_lock(&allctr->mutex);
  5304. res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
  5305. if (allctr->atags && res) {
  5306. set_alloc_tag(allctr, res, tag);
  5307. }
  5308. if (allctr->thread_safe)
  5309. erts_mtx_unlock(&allctr->mutex);
  5310. DEBUG_CHECK_ALIGNMENT(res);
  5311. return res;
  5312. }
  5313. void *
  5314. erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
  5315. void *ptr, Uint size)
  5316. {
  5317. ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
  5318. int ix;
  5319. alcu_atag_t tag = 0;
  5320. Allctr_t *allctr;
  5321. void *res;
  5322. ix = ERTS_ALC_GET_THR_IX();
  5323. ASSERT(0 <= ix && ix < tspec->size);
  5324. allctr = tspec->allctr[ix];
  5325. if (allctr->atags) {
  5326. tag = determine_alloc_tag(allctr, type);
  5327. }
  5328. if (allctr->thread_safe)
  5329. erts_mtx_lock(&allctr->mutex);
  5330. res = do_erts_alcu_alloc(type, allctr, size);
  5331. if (!res) {
  5332. res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
  5333. }
  5334. else {
  5335. Block_t *blk;
  5336. size_t cpy_size;
  5337. blk = UMEM2BLK(ptr);
  5338. cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
  5339. if (cpy_size > size)
  5340. cpy_size = size;
  5341. sys_memcpy(res, ptr, cpy_size);
  5342. do_erts_alcu_free(type, allctr, ptr, NULL);
  5343. }
  5344. if (allctr->atags && res) {
  5345. set_alloc_tag(allctr, res, tag);
  5346. }
  5347. if (allctr->thread_safe)
  5348. erts_mtx_unlock(&allctr->mutex);
  5349. DEBUG_CHECK_ALIGNMENT(res);
  5350. return res;
  5351. }
  5352. static ERTS_INLINE void *
  5353. realloc_thr_pref(ErtsAlcType_t type, Allctr_t *pref_allctr, void *p, Uint size,
  5354. int force_move)
  5355. {
  5356. void *res;
  5357. Allctr_t *used_allctr;
  5358. UWord old_user_size;
  5359. Carrier_t *busy_pcrr_p;
  5360. alcu_atag_t tag = 0;
  5361. int retried;
  5362. if (pref_allctr->atags) {
  5363. tag = determine_alloc_tag(pref_allctr, type);
  5364. }
  5365. if (pref_allctr->thread_safe)
  5366. erts_mtx_lock(&pref_allctr->mutex);
  5367. ASSERT(pref_allctr->dd.use);
  5368. ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
  5369. retried = 0;
  5370. restart:
  5371. used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO,
  5372. p, &old_user_size, &busy_pcrr_p);
  5373. ASSERT(used_allctr && pref_allctr);
  5374. if (!force_move && used_allctr == pref_allctr) {
  5375. ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
  5376. res = do_erts_alcu_realloc(type,
  5377. used_allctr,
  5378. p,
  5379. size,
  5380. 0,
  5381. &busy_pcrr_p);
  5382. clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
  5383. if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
  5384. /* Cleaned up a bit more; try one more time... */
  5385. retried = 1;
  5386. goto restart;
  5387. }
  5388. if (pref_allctr->atags && res) {
  5389. set_alloc_tag(pref_allctr, res, tag);
  5390. }
  5391. if (pref_allctr->thread_safe)
  5392. erts_mtx_unlock(&pref_allctr->mutex);
  5393. }
  5394. else {
  5395. res = do_erts_alcu_alloc(type, pref_allctr, size);
  5396. if (!res)
  5397. goto unlock_ts_return;
  5398. else {
  5399. if (pref_allctr->atags) {
  5400. set_alloc_tag(pref_allctr, res, tag);
  5401. }
  5402. DEBUG_CHECK_ALIGNMENT(res);
  5403. if (used_allctr != pref_allctr) {
  5404. if (pref_allctr->thread_safe)
  5405. erts_mtx_unlock(&pref_allctr->mutex);
  5406. sys_memcpy(res, p, MIN(size, old_user_size));
  5407. enqueue_dealloc_other_instance(type,
  5408. used_allctr,
  5409. p,
  5410. (used_allctr->dd.ix
  5411. - pref_allctr->dd.ix));
  5412. }
  5413. else {
  5414. sys_memcpy(res, p, MIN(size, old_user_size));
  5415. do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p);
  5416. ASSERT(pref_allctr == used_allctr);
  5417. clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
  5418. unlock_ts_return:
  5419. if (pref_allctr->thread_safe)
  5420. erts_mtx_unlock(&pref_allctr->mutex);
  5421. }
  5422. }
  5423. }
  5424. DEBUG_CHECK_ALIGNMENT(res);
  5425. return res;
  5426. }
  5427. void *
  5428. erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
  5429. {
  5430. if (p) {
  5431. Allctr_t *pref_allctr = get_pref_allctr(extra);
  5432. return realloc_thr_pref(type, pref_allctr, p, size, 0);
  5433. }
  5434. return erts_alcu_alloc_thr_pref(type, extra, size);
  5435. }
  5436. void *
  5437. erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
  5438. void *p, Uint size)
  5439. {
  5440. if (p) {
  5441. Allctr_t *pref_allctr = get_pref_allctr(extra);
  5442. return realloc_thr_pref(type, pref_allctr, p, size, 1);
  5443. }
  5444. return erts_alcu_alloc_thr_pref(type, extra, size);
  5445. }
  5446. static Uint adjust_sbct(Allctr_t* allctr, Uint sbct)
  5447. {
  5448. #ifndef ARCH_64
  5449. if (sbct > 0) {
  5450. Uint max_mbc_block_sz = UNIT_CEILING(sbct - 1 + ABLK_HDR_SZ);
  5451. if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK
  5452. || max_mbc_block_sz < sbct) { /* wrap around */
  5453. /*
  5454. * By limiting sbc_threshold to (hard limit - min_block_size)
  5455. * we avoid having to split off free "residue blocks"
  5456. * smaller than min_block_size.
  5457. */
  5458. max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1);
  5459. sbct = max_mbc_block_sz - ABLK_HDR_SZ + 1;
  5460. }
  5461. }
  5462. #endif
  5463. return sbct;
  5464. }
  5465. int erts_alcu_try_set_dyn_param(Allctr_t* allctr, Eterm param, Uint value)
  5466. {
  5467. const Uint MIN_DYN_SBCT = 4000; /* a lame catastrophe prevention */
  5468. if (param == am_sbct && value >= MIN_DYN_SBCT) {
  5469. allctr->sbc_threshold = adjust_sbct(allctr, value);
  5470. return 1;
  5471. }
  5472. return 0;
  5473. }
  5474. /* ------------------------------------------------------------------------- */
  5475. int
  5476. erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
  5477. {
  5478. /* erts_alcu_start assumes that allctr has been zeroed */
  5479. int i;
  5480. if (((UWord)allctr & ERTS_CRR_ALCTR_FLG_MASK) != 0) {
  5481. erts_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n",
  5482. __FILE__, __LINE__);
  5483. }
  5484. /* The various fields packed into the header word must not overlap */
  5485. ERTS_CT_ASSERT(!(MBC_ABLK_OFFSET_MASK & MBC_ABLK_SZ_MASK));
  5486. ERTS_CT_ASSERT(!(MBC_ABLK_OFFSET_MASK & BLK_FLG_MASK));
  5487. ERTS_CT_ASSERT(!(MBC_ABLK_SZ_MASK & BLK_FLG_MASK));
  5488. ERTS_CT_ASSERT(!(MBC_FBLK_SZ_MASK & BLK_FLG_MASK));
  5489. ERTS_CT_ASSERT(!(SBC_BLK_SZ_MASK & BLK_FLG_MASK));
  5490. ERTS_CT_ASSERT(!(CRR_SZ_MASK & CRR_FLG_MASK));
  5491. if (!initialized)
  5492. goto error;
  5493. #if HAVE_ERTS_MSEG
  5494. sys_memcpy((void *) &allctr->mseg_opt,
  5495. (void *) &erts_mseg_default_opt,
  5496. sizeof(ErtsMsegOpt_t));
  5497. if (init->tspec || init->tpref)
  5498. allctr->mseg_opt.sched_spec = 1;
  5499. #endif /* HAVE_ERTS_MSEG */
  5500. allctr->name_prefix = init->name_prefix;
  5501. if (!allctr->name_prefix)
  5502. goto error;
  5503. allctr->ix = init->ix;
  5504. allctr->alloc_no = init->alloc_no;
  5505. allctr->alloc_strat = init->alloc_strat;
  5506. ASSERT(allctr->alloc_no >= ERTS_ALC_A_MIN &&
  5507. allctr->alloc_no <= ERTS_ALC_A_MAX);
  5508. if (allctr->alloc_no < ERTS_ALC_A_MIN
  5509. || ERTS_ALC_A_MAX < allctr->alloc_no)
  5510. allctr->alloc_no = ERTS_ALC_A_INVALID;
  5511. if (!allctr->vsn_str)
  5512. goto error;
  5513. allctr->name.alloc = THE_NON_VALUE;
  5514. allctr->name.realloc = THE_NON_VALUE;
  5515. allctr->name.free = THE_NON_VALUE;
  5516. if (init->tspec)
  5517. allctr->t = init->tspec;
  5518. else if (init->tpref)
  5519. allctr->t = init->tpref;
  5520. else
  5521. allctr->t = 0;
  5522. allctr->ramv = init->ramv;
  5523. allctr->atags = init->atags;
  5524. allctr->main_carrier_size = init->mmbcs;
  5525. #if HAVE_ERTS_MSEG
  5526. allctr->mseg_opt.abs_shrink_th = init->asbcst;
  5527. allctr->mseg_opt.rel_shrink_th = init->rsbcst;
  5528. #endif
  5529. allctr->sbc_move_threshold = init->rsbcmt;
  5530. allctr->mbc_move_threshold = init->rmbcmt;
  5531. #if HAVE_ERTS_MSEG
  5532. allctr->max_mseg_sbcs = init->mmsbc;
  5533. # if ERTS_SUPER_ALIGNED_MSEG_ONLY
  5534. allctr->max_mseg_mbcs = ~(Uint)0;
  5535. # else
  5536. allctr->max_mseg_mbcs = init->mmmbc;
  5537. # endif
  5538. #endif
  5539. allctr->largest_mbc_size = MAX(init->lmbcs, init->smbcs);
  5540. #ifndef ARCH_64
  5541. if (allctr->largest_mbc_size > MBC_SZ_MAX_LIMIT) {
  5542. allctr->largest_mbc_size = MBC_SZ_MAX_LIMIT;
  5543. }
  5544. #endif
  5545. allctr->smallest_mbc_size = init->smbcs;
  5546. allctr->mbc_growth_stages = MAX(1, init->mbcgs);
  5547. if (allctr->min_block_size < ABLK_HDR_SZ)
  5548. goto error;
  5549. allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
  5550. + sizeof(FreeBlkFtr_t));
  5551. if (init->tpref) {
  5552. Uint sz = ABLK_HDR_SZ;
  5553. sz += sizeof(ErtsAllctrDDBlock_t);
  5554. sz = UNIT_CEILING(sz);
  5555. if (sz > allctr->min_block_size)
  5556. allctr->min_block_size = sz;
  5557. }
  5558. allctr->cpool.pooled_tree = NULL;
  5559. allctr->cpool.dc_list.first = NULL;
  5560. allctr->cpool.dc_list.last = NULL;
  5561. allctr->cpool.abandon_limit = 0;
  5562. allctr->cpool.disable_abandon = 0;
  5563. for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
  5564. erts_atomic_init_nob(&allctr->cpool.stat.blocks_size[i], 0);
  5565. erts_atomic_init_nob(&allctr->cpool.stat.no_blocks[i], 0);
  5566. }
  5567. erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0);
  5568. erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0);
  5569. if (!init->ts && init->acul && init->acnl) {
  5570. ASSERT(allctr->add_mbc);
  5571. ASSERT(allctr->remove_mbc);
  5572. ASSERT(allctr->largest_fblk_in_mbc);
  5573. ASSERT(allctr->first_fblk_in_mbc);
  5574. ASSERT(allctr->next_fblk_in_mbc);
  5575. allctr->cpool.util_limit = init->acul;
  5576. allctr->cpool.in_pool_limit = init->acnl;
  5577. allctr->cpool.fblk_min_limit = init->acfml;
  5578. if (allctr->alloc_strat == ERTS_ALC_S_FIRSTFIT) {
  5579. allctr->cpool.sentinel = &firstfit_carrier_pool.sentinel;
  5580. }
  5581. else if (allctr->alloc_no != ERTS_ALC_A_TEST) {
  5582. ERTS_INTERNAL_ERROR("Impossible carrier migration config.");
  5583. }
  5584. }
  5585. else {
  5586. allctr->cpool.util_limit = 0;
  5587. allctr->cpool.in_pool_limit = 0;
  5588. allctr->cpool.fblk_min_limit = 0;
  5589. }
  5590. /* The invasive tests don't really care whether the pool is enabled or not,
  5591. * so we need to set this unconditionally for this allocator type. */
  5592. if (allctr->alloc_no == ERTS_ALC_A_TEST) {
  5593. allctr->cpool.sentinel = &test_carrier_pool.sentinel;
  5594. }
  5595. allctr->sbc_threshold = adjust_sbct(allctr, init->sbct);
  5596. #if HAVE_ERTS_MSEG
  5597. if (allctr->mseg_opt.abs_shrink_th > ~((UWord) 0) / 100)
  5598. allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100;
  5599. #endif
  5600. if (init->ts) {
  5601. allctr->thread_safe = 1;
  5602. erts_mtx_init(&allctr->mutex, "alcu_allocator", make_small(allctr->alloc_no),
  5603. ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
  5604. #ifdef DEBUG
  5605. allctr->debug.saved_tid = 0;
  5606. #endif
  5607. }
  5608. if(!allctr->get_free_block
  5609. || !allctr->link_free_block
  5610. || !allctr->unlink_free_block
  5611. || !allctr->info_options)
  5612. goto error;
  5613. if (!allctr->get_next_mbc_size)
  5614. allctr->get_next_mbc_size = get_next_mbc_size;
  5615. if (allctr->mbc_header_size < sizeof(Carrier_t))
  5616. goto error;
  5617. allctr->dd.use = 0;
  5618. if (init->tpref) {
  5619. allctr->dd.use = 1;
  5620. init_dd_queue(&allctr->dd.q);
  5621. allctr->dd.ix = init->ix;
  5622. }
  5623. allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
  5624. + ABLK_HDR_SZ)
  5625. - ABLK_HDR_SZ);
  5626. if (init->sys_alloc) {
  5627. ASSERT(init->sys_realloc && init->sys_dealloc);
  5628. allctr->sys_alloc = init->sys_alloc;
  5629. allctr->sys_realloc = init->sys_realloc;
  5630. allctr->sys_dealloc = init->sys_dealloc;
  5631. }
  5632. else {
  5633. ASSERT(!init->sys_realloc && !init->sys_dealloc);
  5634. allctr->sys_alloc = &erts_alcu_sys_alloc;
  5635. allctr->sys_realloc = &erts_alcu_sys_realloc;
  5636. allctr->sys_dealloc = &erts_alcu_sys_dealloc;
  5637. }
  5638. allctr->try_set_dyn_param = &erts_alcu_try_set_dyn_param;
  5639. #if HAVE_ERTS_MSEG
  5640. if (init->mseg_alloc) {
  5641. ASSERT(init->mseg_realloc && init->mseg_dealloc);
  5642. allctr->mseg_alloc = init->mseg_alloc;
  5643. allctr->mseg_realloc = init->mseg_realloc;
  5644. allctr->mseg_dealloc = init->mseg_dealloc;
  5645. allctr->mseg_mmapper = init->mseg_mmapper;
  5646. }
  5647. else {
  5648. ASSERT(!init->mseg_realloc && !init->mseg_dealloc);
  5649. allctr->mseg_alloc = &erts_alcu_mseg_alloc;
  5650. allctr->mseg_realloc = &erts_alcu_mseg_realloc;
  5651. allctr->mseg_dealloc = &erts_alcu_mseg_dealloc;
  5652. }
  5653. /* If a custom carrier alloc function is specified, make sure it's used */
  5654. if (init->mseg_alloc && !init->sys_alloc) {
  5655. allctr->crr_set_flgs = CFLG_FORCE_MSEG;
  5656. allctr->crr_clr_flgs = CFLG_FORCE_SYS_ALLOC;
  5657. }
  5658. else if (!init->mseg_alloc && init->sys_alloc) {
  5659. allctr->crr_set_flgs = CFLG_FORCE_SYS_ALLOC;
  5660. allctr->crr_clr_flgs = CFLG_FORCE_MSEG;
  5661. }
  5662. #endif
  5663. if (allctr->main_carrier_size) {
  5664. Block_t *blk;
  5665. blk = create_carrier(allctr,
  5666. allctr->main_carrier_size,
  5667. (ERTS_SUPER_ALIGNED_MSEG_ONLY
  5668. ? CFLG_FORCE_MSEG : CFLG_FORCE_SYS_ALLOC)
  5669. | CFLG_MBC
  5670. | CFLG_FORCE_SIZE
  5671. | CFLG_NO_CPOOL
  5672. | CFLG_MAIN_CARRIER);
  5673. if (!blk) {
  5674. if (allctr->thread_safe)
  5675. erts_mtx_destroy(&allctr->mutex);
  5676. erts_exit(ERTS_ABORT_EXIT,
  5677. "Failed to create main carrier for %salloc\n",
  5678. init->name_prefix);
  5679. }
  5680. (*allctr->link_free_block)(allctr, blk);
  5681. HARD_CHECK_BLK_CARRIER(allctr, blk);
  5682. }
  5683. if (init->fix) {
  5684. int i;
  5685. allctr->fix = init->fix;
  5686. allctr->fix_shrink_scheduled = 0;
  5687. for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) {
  5688. allctr->fix[i].type_size = init->fix_type_size[i];
  5689. allctr->fix[i].type = ERTS_ALC_N2T(i + ERTS_ALC_N_MIN_A_FIXED_SIZE);
  5690. allctr->fix[i].list_size = 0;
  5691. allctr->fix[i].list = NULL;
  5692. if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
  5693. allctr->fix[i].u.cpool.min_list_size = 0;
  5694. allctr->fix[i].u.cpool.shrink_list = 0;
  5695. allctr->fix[i].u.cpool.allocated = 0;
  5696. allctr->fix[i].u.cpool.used = 0;
  5697. }
  5698. else {
  5699. allctr->fix[i].u.nocpool.max_used = 0;
  5700. allctr->fix[i].u.nocpool.limit = 0;
  5701. allctr->fix[i].u.nocpool.allocated = 0;
  5702. allctr->fix[i].u.nocpool.used = 0;
  5703. }
  5704. }
  5705. }
  5706. return 1;
  5707. error:
  5708. if (allctr->thread_safe)
  5709. erts_mtx_destroy(&allctr->mutex);
  5710. return 0;
  5711. }
  5712. /* ------------------------------------------------------------------------- */
  5713. void
  5714. erts_alcu_stop(Allctr_t *allctr)
  5715. {
  5716. allctr->stopped = 1;
  5717. while (allctr->sbc_list.first)
  5718. destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first), NULL);
  5719. while (allctr->mbc_list.first)
  5720. destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL);
  5721. if (allctr->thread_safe)
  5722. erts_mtx_destroy(&allctr->mutex);
  5723. }
  5724. /* ------------------------------------------------------------------------- */
  5725. void
  5726. erts_alcu_init(AlcUInit_t *init)
  5727. {
  5728. ErtsAlcCPoolData_t *sentinel;
  5729. sentinel = &firstfit_carrier_pool.sentinel;
  5730. erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
  5731. erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
  5732. sentinel = &test_carrier_pool.sentinel;
  5733. erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
  5734. erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
  5735. ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
  5736. #if HAVE_ERTS_MSEG
  5737. ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
  5738. max_mseg_carriers = init->mmc;
  5739. sys_alloc_carrier_size = ERTS_SACRR_UNIT_CEILING(init->ycs);
  5740. #else /* #if HAVE_ERTS_MSEG */
  5741. sys_alloc_carrier_size = ((init->ycs + 4095) / 4096) * 4096;
  5742. #endif
  5743. allow_sys_alloc_carriers = init->sac;
  5744. sys_page_size = erts_sys_get_page_size();
  5745. #ifdef DEBUG
  5746. carrier_alignment = sizeof(Unit_t);
  5747. #endif
  5748. erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms", NIL,
  5749. ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
  5750. atoms_initialized = 0;
  5751. initialized = 1;
  5752. }
  5753. /* ------------------------------------------------------------------------- */
  5754. /* Allocation histograms and carrier information is gathered by walking through
  5755. * all carriers associated with each allocator instance. This is done as
  5756. * aux_yield_work on the scheduler that owns each instance.
  5757. *
  5758. * Yielding is implemented by temporarily inserting a "dummy carrier" at the
  5759. * last position. It's permanently "busy" so it won't get picked up by someone
  5760. * else when in the carrier pool, and we never make the employer aware of it
  5761. * through callbacks so we can't accidentally allocate on it.
  5762. *
  5763. * Plain malloc/free is used to guarantee we won't allocate with the allocator
  5764. * we're scanning. */
  5765. /* Yield between carriers once this many blocks have been processed. Note that
  5766. * a single carrier scan may exceed this figure. */
  5767. #ifndef DEBUG
  5768. #define BLOCKSCAN_REDUCTIONS (8000)
  5769. #else
  5770. #define BLOCKSCAN_REDUCTIONS (400)
  5771. #endif
  5772. /* Abort a single carrier scan after this many blocks to prevent really large
  5773. * MBCs from blocking forever. */
  5774. #define BLOCKSCAN_BAILOUT_THRESHOLD (16000)
  5775. typedef struct alcu_blockscan {
  5776. /* A per-scheduler list used when multiple scans have been queued. The
  5777. * current scanner will always run until completion/abort before moving on
  5778. * to the next. */
  5779. struct alcu_blockscan *scanner_queue;
  5780. Allctr_t *allocator;
  5781. Process *process;
  5782. int (*current_op)(struct alcu_blockscan *scanner);
  5783. int (*next_op)(struct alcu_blockscan *scanner);
  5784. int reductions;
  5785. ErtsAlcCPoolData_t *cpool_cursor;
  5786. CarrierList_t *current_clist;
  5787. Carrier_t *clist_cursor;
  5788. Carrier_t dummy_carrier;
  5789. /* Called if the process that started this job dies before we're done. */
  5790. void (*abort)(void *user_data);
  5791. /* Called on each carrier. The callback must return the number of blocks
  5792. * scanned to yield properly between carriers.
  5793. *
  5794. * Note that it's not possible to "yield back" into a carrier. */
  5795. int (*scan)(Allctr_t *, void *user_data, Carrier_t *);
  5796. /* Called when all carriers have been scanned. The callback may return
  5797. * non-zero to yield. */
  5798. int (*finish)(void *user_data);
  5799. void *user_data;
  5800. } blockscan_t;
  5801. static Carrier_t *blockscan_restore_clist_cursor(blockscan_t *state)
  5802. {
  5803. Carrier_t *cursor = state->clist_cursor;
  5804. ASSERT(state->clist_cursor == (state->current_clist)->first ||
  5805. state->clist_cursor == &state->dummy_carrier);
  5806. if (cursor == &state->dummy_carrier) {
  5807. cursor = cursor->next;
  5808. unlink_carrier(state->current_clist, state->clist_cursor);
  5809. }
  5810. return cursor;
  5811. }
  5812. static void blockscan_save_clist_cursor(blockscan_t *state, Carrier_t *after)
  5813. {
  5814. ASSERT(state->clist_cursor == (state->current_clist)->first ||
  5815. state->clist_cursor == &state->dummy_carrier);
  5816. state->clist_cursor = &state->dummy_carrier;
  5817. (state->clist_cursor)->next = after->next;
  5818. (state->clist_cursor)->prev = after;
  5819. relink_carrier(state->current_clist, state->clist_cursor);
  5820. }
  5821. static int blockscan_clist_yielding(blockscan_t *state)
  5822. {
  5823. Carrier_t *cursor = blockscan_restore_clist_cursor(state);
  5824. if (ERTS_PROC_IS_EXITING(state->process)) {
  5825. return 0;
  5826. }
  5827. while (cursor) {
  5828. /* Skip dummy carriers inserted by another (concurrent) block scan.
  5829. * This can happen when scanning thread-safe allocators from multiple
  5830. * schedulers. */
  5831. if (CARRIER_SZ(cursor) > 0) {
  5832. int blocks_scanned = state->scan(state->allocator,
  5833. state->user_data,
  5834. cursor);
  5835. state->reductions -= blocks_scanned;
  5836. if (state->reductions <= 0) {
  5837. blockscan_save_clist_cursor(state, cursor);
  5838. return 1;
  5839. }
  5840. }
  5841. cursor = cursor->next;
  5842. }
  5843. return 0;
  5844. }
  5845. static ErtsAlcCPoolData_t *blockscan_restore_cpool_cursor(blockscan_t *state)
  5846. {
  5847. ErtsAlcCPoolData_t *cursor;
  5848. cursor = cpool_aint2cpd(cpool_read(&(state->cpool_cursor)->next));
  5849. if (state->cpool_cursor == &state->dummy_carrier.cpool) {
  5850. cpool_delete(state->allocator, state->allocator, &state->dummy_carrier);
  5851. }
  5852. return cursor;
  5853. }
  5854. static void blockscan_save_cpool_cursor(blockscan_t *state,
  5855. ErtsAlcCPoolData_t *after)
  5856. {
  5857. ErtsAlcCPoolData_t *dummy_carrier, *prev_carrier, *next_carrier;
  5858. dummy_carrier = &state->dummy_carrier.cpool;
  5859. next_carrier = cpool_aint2cpd(cpool_mod_mark(&after->next));
  5860. prev_carrier = cpool_aint2cpd(cpool_mod_mark(&next_carrier->prev));
  5861. cpool_init(&dummy_carrier->next, (erts_aint_t)next_carrier);
  5862. cpool_init(&dummy_carrier->prev, (erts_aint_t)prev_carrier);
  5863. cpool_set_mod_marked(&prev_carrier->next,
  5864. (erts_aint_t)dummy_carrier,
  5865. (erts_aint_t)next_carrier);
  5866. cpool_set_mod_marked(&next_carrier->prev,
  5867. (erts_aint_t)dummy_carrier,
  5868. (erts_aint_t)prev_carrier);
  5869. state->cpool_cursor = dummy_carrier;
  5870. }
  5871. static int blockscan_cpool_yielding(blockscan_t *state)
  5872. {
  5873. ErtsAlcCPoolData_t *sentinel, *cursor;
  5874. sentinel = (state->allocator)->cpool.sentinel;
  5875. cursor = blockscan_restore_cpool_cursor(state);
  5876. if (ERTS_PROC_IS_EXITING(state->process)) {
  5877. return 0;
  5878. }
  5879. while (cursor != sentinel) {
  5880. Carrier_t *carrier;
  5881. erts_aint_t exp;
  5882. /* When a deallocation happens on a pooled carrier it will be routed to
  5883. * its owner, so the only way to be sure that it isn't modified while
  5884. * scanning is to skip all carriers that aren't ours. The deallocations
  5885. * deferred to us will get handled when we're done. */
  5886. while (cursor->orig_allctr != state->allocator) {
  5887. cursor = cpool_aint2cpd(cpool_read(&cursor->next));
  5888. if (cursor == sentinel) {
  5889. return 0;
  5890. }
  5891. }
  5892. carrier = ErtsContainerStruct(cursor, Carrier_t, cpool);
  5893. exp = erts_atomic_read_rb(&carrier->allctr);
  5894. if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
  5895. ASSERT(state->allocator == (Allctr_t*)(exp & ~ERTS_CRR_ALCTR_FLG_MASK));
  5896. ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY));
  5897. if (erts_atomic_cmpxchg_acqb(&carrier->allctr,
  5898. exp | ERTS_CRR_ALCTR_FLG_BUSY,
  5899. exp) == exp) {
  5900. /* Skip dummy carriers inserted by another (concurrent) block
  5901. * scan. This can happen when scanning thread-safe allocators
  5902. * from multiple schedulers. */
  5903. if (CARRIER_SZ(carrier) > 0) {
  5904. int blocks_scanned = state->scan(state->allocator,
  5905. state->user_data,
  5906. carrier);
  5907. state->reductions -= blocks_scanned;
  5908. if (state->reductions <= 0) {
  5909. blockscan_save_cpool_cursor(state, cursor);
  5910. erts_atomic_set_relb(&carrier->allctr, exp);
  5911. return 1;
  5912. }
  5913. }
  5914. erts_atomic_set_relb(&carrier->allctr, exp);
  5915. }
  5916. }
  5917. cursor = cpool_aint2cpd(cpool_read(&cursor->next));
  5918. }
  5919. return 0;
  5920. }
  5921. static int blockscan_yield_helper(blockscan_t *state,
  5922. int (*yielding_op)(blockscan_t*))
  5923. {
  5924. /* Note that we don't check whether to abort here; only yielding_op knows
  5925. * whether the carrier is still in the list/pool. */
  5926. if ((state->allocator)->thread_safe) {
  5927. /* Locked scans have to be as short as possible. */
  5928. state->reductions = 1;
  5929. erts_mtx_lock(&(state->allocator)->mutex);
  5930. } else {
  5931. state->reductions = BLOCKSCAN_REDUCTIONS;
  5932. }
  5933. if (yielding_op(state)) {
  5934. state->next_op = state->current_op;
  5935. }
  5936. if ((state->allocator)->thread_safe) {
  5937. erts_mtx_unlock(&(state->allocator)->mutex);
  5938. }
  5939. return 1;
  5940. }
  5941. /* */
  5942. static int blockscan_finish(blockscan_t *state)
  5943. {
  5944. if (ERTS_PROC_IS_EXITING(state->process)) {
  5945. state->abort(state->user_data);
  5946. return 0;
  5947. }
  5948. state->current_op = blockscan_finish;
  5949. return state->finish(state->user_data);
  5950. }
  5951. static int blockscan_sweep_sbcs(blockscan_t *state)
  5952. {
  5953. if (state->current_op != blockscan_sweep_sbcs) {
  5954. SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_SBC, state->allocator);
  5955. state->current_clist = &(state->allocator)->sbc_list;
  5956. state->clist_cursor = (state->current_clist)->first;
  5957. }
  5958. state->current_op = blockscan_sweep_sbcs;
  5959. state->next_op = blockscan_finish;
  5960. return blockscan_yield_helper(state, blockscan_clist_yielding);
  5961. }
  5962. static int blockscan_sweep_mbcs(blockscan_t *state)
  5963. {
  5964. if (state->current_op != blockscan_sweep_mbcs) {
  5965. SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
  5966. state->current_clist = &(state->allocator)->mbc_list;
  5967. state->clist_cursor = (state->current_clist)->first;
  5968. }
  5969. state->current_op = blockscan_sweep_mbcs;
  5970. state->next_op = blockscan_sweep_sbcs;
  5971. return blockscan_yield_helper(state, blockscan_clist_yielding);
  5972. }
  5973. static int blockscan_sweep_cpool(blockscan_t *state)
  5974. {
  5975. if (state->current_op != blockscan_sweep_cpool) {
  5976. SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
  5977. state->cpool_cursor = (state->allocator)->cpool.sentinel;
  5978. }
  5979. state->current_op = blockscan_sweep_cpool;
  5980. state->next_op = blockscan_sweep_mbcs;
  5981. return blockscan_yield_helper(state, blockscan_cpool_yielding);
  5982. }
  5983. static int blockscan_get_specific_allocator(int allocator_num,
  5984. int sched_id,
  5985. Allctr_t **out)
  5986. {
  5987. ErtsAllocatorInfo_t *ai;
  5988. Allctr_t *allocator;
  5989. ASSERT(allocator_num >= ERTS_ALC_A_MIN &&
  5990. allocator_num <= ERTS_ALC_A_MAX);
  5991. ASSERT(sched_id >= 0 && sched_id <= erts_no_schedulers);
  5992. ai = &erts_allctrs_info[allocator_num];
  5993. if (!ai->enabled || !ai->alloc_util) {
  5994. return 0;
  5995. }
  5996. if (!ai->thr_spec) {
  5997. if (sched_id != 0) {
  5998. /* Only thread-specific allocators can be scanned on a specific
  5999. * scheduler. */
  6000. return 0;
  6001. }
  6002. allocator = (Allctr_t*)ai->extra;
  6003. ASSERT(allocator->thread_safe);
  6004. } else {
  6005. ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t*)ai->extra;
  6006. ASSERT(sched_id < tspec->size);
  6007. allocator = tspec->allctr[sched_id];
  6008. }
  6009. *out = allocator;
  6010. return 1;
  6011. }
  6012. static void blockscan_sched_trampoline(void *arg)
  6013. {
  6014. ErtsAlcuBlockscanYieldData *yield;
  6015. ErtsSchedulerData *esdp;
  6016. blockscan_t *scanner;
  6017. esdp = erts_get_scheduler_data();
  6018. scanner = (blockscan_t*)arg;
  6019. yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan);
  6020. ASSERT((yield->last == NULL) == (yield->current == NULL));
  6021. if (yield->last != NULL) {
  6022. blockscan_t *prev_scanner = yield->last;
  6023. ASSERT(prev_scanner->scanner_queue == NULL);
  6024. prev_scanner->scanner_queue = scanner;
  6025. } else {
  6026. yield->current = scanner;
  6027. }
  6028. scanner->scanner_queue = NULL;
  6029. yield->last = scanner;
  6030. erts_notify_new_aux_yield_work(esdp);
  6031. }
  6032. static void blockscan_dispatch(blockscan_t *scanner, Process *owner,
  6033. Allctr_t *allocator, int sched_id)
  6034. {
  6035. ASSERT(erts_get_scheduler_id() != 0);
  6036. if (sched_id == 0) {
  6037. /* Global instances are always handled on the current scheduler. */
  6038. sched_id = ERTS_ALC_GET_THR_IX();
  6039. ASSERT(allocator->thread_safe);
  6040. }
  6041. scanner->allocator = allocator;
  6042. scanner->process = owner;
  6043. erts_proc_inc_refc(scanner->process);
  6044. cpool_init_carrier_data(scanner->allocator, &scanner->dummy_carrier);
  6045. erts_atomic_init_nob(&(scanner->dummy_carrier).allctr,
  6046. (erts_aint_t)allocator | ERTS_CRR_ALCTR_FLG_BUSY);
  6047. if (ERTS_ALC_IS_CPOOL_ENABLED(scanner->allocator)) {
  6048. scanner->next_op = blockscan_sweep_cpool;
  6049. } else {
  6050. scanner->next_op = blockscan_sweep_mbcs;
  6051. }
  6052. /* Aux yield jobs can only be set up while running on the scheduler that
  6053. * services them, so we move there before continuing.
  6054. *
  6055. * We can't drive the scan itself through this since the scheduler will
  6056. * always finish *all* misc aux work in one go which makes it impossible to
  6057. * yield. */
  6058. erts_schedule_misc_aux_work(sched_id, blockscan_sched_trampoline, scanner);
  6059. }
  6060. int erts_handle_yielded_alcu_blockscan(ErtsSchedulerData *esdp,
  6061. ErtsAlcuBlockscanYieldData *yield)
  6062. {
  6063. blockscan_t *scanner = yield->current;
  6064. (void)esdp;
  6065. ASSERT((yield->last == NULL) == (yield->current == NULL));
  6066. if (scanner) {
  6067. if (scanner->next_op(scanner)) {
  6068. return 1;
  6069. }
  6070. ASSERT(ERTS_PROC_IS_EXITING(scanner->process) ||
  6071. scanner->current_op == blockscan_finish);
  6072. yield->current = scanner->scanner_queue;
  6073. if (yield->current == NULL) {
  6074. ASSERT(scanner == yield->last);
  6075. yield->last = NULL;
  6076. }
  6077. erts_proc_dec_refc(scanner->process);
  6078. /* Plain free is intentional. */
  6079. free(scanner);
  6080. return yield->current != NULL;
  6081. }
  6082. return 0;
  6083. }
  6084. void erts_alcu_sched_spec_data_init(ErtsSchedulerData *esdp)
  6085. {
  6086. ErtsAlcuBlockscanYieldData *yield;
  6087. yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan);
  6088. yield->current = NULL;
  6089. yield->last = NULL;
  6090. }
  6091. /* ------------------------------------------------------------------------- */
  6092. static ERTS_INLINE int u64_log2(Uint64 v)
  6093. {
  6094. static const int log2_tab64[64] = {
  6095. 63, 0, 58, 1, 59, 47, 53, 2,
  6096. 60, 39, 48, 27, 54, 33, 42, 3,
  6097. 61, 51, 37, 40, 49, 18, 28, 20,
  6098. 55, 30, 34, 11, 43, 14, 22, 4,
  6099. 62, 57, 46, 52, 38, 26, 32, 41,
  6100. 50, 36, 17, 19, 29, 10, 13, 21,
  6101. 56, 45, 25, 31, 35, 16, 9, 12,
  6102. 44, 24, 15, 8, 23, 7, 6, 5};
  6103. v |= v >> 1;
  6104. v |= v >> 2;
  6105. v |= v >> 4;
  6106. v |= v >> 8;
  6107. v |= v >> 16;
  6108. v |= v >> 32;
  6109. return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
  6110. }
  6111. /* ------------------------------------------------------------------------- */
  6112. typedef struct hist_tree__ {
  6113. struct hist_tree__ *parent;
  6114. struct hist_tree__ *left;
  6115. struct hist_tree__ *right;
  6116. int is_red;
  6117. alcu_atag_t tag;
  6118. UWord histogram[1];
  6119. } hist_tree_t;
  6120. #define ERTS_RBT_PREFIX hist_tree
  6121. #define ERTS_RBT_T hist_tree_t
  6122. #define ERTS_RBT_KEY_T UWord
  6123. #define ERTS_RBT_FLAGS_T int
  6124. #define ERTS_RBT_INIT_EMPTY_TNODE(T) ((void)0)
  6125. #define ERTS_RBT_IS_RED(T) ((T)->is_red)
  6126. #define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
  6127. #define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
  6128. #define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
  6129. #define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
  6130. #define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
  6131. #define ERTS_RBT_GET_PARENT(T) ((T)->parent)
  6132. #define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
  6133. #define ERTS_RBT_GET_RIGHT(T) ((T)->right)
  6134. #define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
  6135. #define ERTS_RBT_GET_LEFT(T) ((T)->left)
  6136. #define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
  6137. #define ERTS_RBT_GET_KEY(T) ((T)->tag)
  6138. #define ERTS_RBT_IS_LT(KX, KY) (KX < KY)
  6139. #define ERTS_RBT_IS_EQ(KX, KY) (KX == KY)
  6140. #define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
  6141. #define ERTS_RBT_WANT_FOREACH_DESTROY
  6142. #define ERTS_RBT_WANT_INSERT
  6143. #define ERTS_RBT_WANT_LOOKUP
  6144. #define ERTS_RBT_UNDEF
  6145. #include "erl_rbtree.h"
  6146. typedef struct {
  6147. blockscan_t common;
  6148. ErtsIRefStorage iref;
  6149. Process *process;
  6150. hist_tree_rbt_yield_state_t hist_tree_yield;
  6151. hist_tree_t *hist_tree;
  6152. UWord hist_count;
  6153. UWord hist_slot_start;
  6154. int hist_slot_count;
  6155. UWord unscanned_size;
  6156. ErtsHeapFactory msg_factory;
  6157. int building_result;
  6158. Eterm result_list;
  6159. } gather_ahist_t;
  6160. static void gather_ahist_update(gather_ahist_t *state, UWord tag, UWord size)
  6161. {
  6162. hist_tree_t *hist_node;
  6163. UWord size_interval;
  6164. int hist_slot;
  6165. hist_node = hist_tree_rbt_lookup(state->hist_tree, tag);
  6166. if (hist_node == NULL) {
  6167. /* Plain calloc is intentional. */
  6168. hist_node = (hist_tree_t*)calloc(1, sizeof(hist_tree_t) +
  6169. (state->hist_slot_count - 1) *
  6170. sizeof(hist_node->histogram[0]));
  6171. hist_node->tag = tag;
  6172. hist_tree_rbt_insert(&state->hist_tree, hist_node);
  6173. state->hist_count++;
  6174. }
  6175. size_interval = (size / state->hist_slot_start);
  6176. size_interval = u64_log2(size_interval + 1);
  6177. hist_slot = MIN(size_interval, state->hist_slot_count - 1);
  6178. hist_node->histogram[hist_slot]++;
  6179. }
  6180. static int gather_ahist_scan(Allctr_t *allocator,
  6181. void *user_data,
  6182. Carrier_t *carrier)
  6183. {
  6184. gather_ahist_t *state;
  6185. int blocks_scanned;
  6186. Block_t *block;
  6187. state = (gather_ahist_t*)user_data;
  6188. blocks_scanned = 1;
  6189. if (IS_SB_CARRIER(carrier)) {
  6190. alcu_atag_t tag;
  6191. block = SBC2BLK(allocator, carrier);
  6192. if (BLK_HAS_ATAG(block)) {
  6193. tag = GET_BLK_ATAG(block);
  6194. ASSERT(DBG_IS_VALID_ATAG(tag));
  6195. gather_ahist_update(state, tag, SBC_BLK_SZ(block));
  6196. }
  6197. } else {
  6198. UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
  6199. ASSERT(IS_MB_CARRIER(carrier));
  6200. block = MBC_TO_FIRST_BLK(allocator, carrier);
  6201. while (1) {
  6202. UWord block_size = MBC_BLK_SZ(block);
  6203. if (IS_ALLOCED_BLK(block) && BLK_HAS_ATAG(block)) {
  6204. alcu_atag_t tag = GET_BLK_ATAG(block);
  6205. ASSERT(DBG_IS_VALID_ATAG(tag));
  6206. gather_ahist_update(state, tag, block_size);
  6207. }
  6208. scanned_bytes += block_size;
  6209. if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) {
  6210. state->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes;
  6211. break;
  6212. } else if (IS_LAST_BLK(block)) {
  6213. break;
  6214. }
  6215. block = NXT_BLK(block);
  6216. blocks_scanned++;
  6217. }
  6218. }
  6219. return blocks_scanned;
  6220. }
  6221. static int gather_ahist_append_result(hist_tree_t *node, void *arg, Sint reds)
  6222. {
  6223. gather_ahist_t *state = (gather_ahist_t*)arg;
  6224. Eterm histogram_tuple, tag_tuple;
  6225. Eterm *hp;
  6226. int ix;
  6227. ASSERT(state->building_result);
  6228. hp = erts_produce_heap(&state->msg_factory, 7 + state->hist_slot_count, 0);
  6229. hp[0] = make_arityval(state->hist_slot_count);
  6230. for (ix = 0; ix < state->hist_slot_count; ix++) {
  6231. hp[1 + ix] = make_small(node->histogram[ix]);
  6232. }
  6233. histogram_tuple = make_tuple(hp);
  6234. hp += 1 + state->hist_slot_count;
  6235. hp[0] = make_arityval(3);
  6236. hp[1] = ATAG_ID(node->tag);
  6237. hp[2] = alloc_type_atoms[ATAG_TYPE(node->tag)];
  6238. hp[3] = histogram_tuple;
  6239. tag_tuple = make_tuple(hp);
  6240. hp += 4;
  6241. state->result_list = CONS(hp, tag_tuple, state->result_list);
  6242. /* Plain free is intentional. */
  6243. free(node);
  6244. return 1;
  6245. }
  6246. static void gather_ahist_send(gather_ahist_t *state)
  6247. {
  6248. Eterm result_tuple, unscanned_size, task_ref;
  6249. Uint term_size;
  6250. Eterm *hp;
  6251. ASSERT((state->result_list == NIL) ^ (state->hist_count > 0));
  6252. ASSERT(state->building_result);
  6253. term_size = 4 + erts_iref_storage_heap_size(&state->iref);
  6254. term_size += IS_USMALL(0, state->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE;
  6255. hp = erts_produce_heap(&state->msg_factory, term_size, 0);
  6256. task_ref = erts_iref_storage_make_ref(&state->iref, &hp,
  6257. &(state->msg_factory.message)->hfrag.off_heap, 0);
  6258. unscanned_size = bld_unstable_uint(&hp, NULL, state->unscanned_size);
  6259. hp[0] = make_arityval(3);
  6260. hp[1] = task_ref;
  6261. hp[2] = unscanned_size;
  6262. hp[3] = state->result_list;
  6263. result_tuple = make_tuple(hp);
  6264. erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1);
  6265. erts_queue_message(state->process, 0, state->msg_factory.message,
  6266. result_tuple, am_system);
  6267. }
  6268. static int gather_ahist_finish(void *arg)
  6269. {
  6270. gather_ahist_t *state = (gather_ahist_t*)arg;
  6271. if (!state->building_result) {
  6272. ErtsMessage *message;
  6273. Uint minimum_size;
  6274. Eterm *hp;
  6275. /* {Ref, unscanned size, [{Tag, {Histogram}} | Rest]} */
  6276. minimum_size = 4 + erts_iref_storage_heap_size(&state->iref) +
  6277. state->hist_count * (7 + state->hist_slot_count);
  6278. message = erts_alloc_message(minimum_size, &hp);
  6279. erts_factory_selfcontained_message_init(&state->msg_factory,
  6280. message, hp);
  6281. ERTS_RBT_YIELD_STAT_INIT(&state->hist_tree_yield);
  6282. state->result_list = NIL;
  6283. state->building_result = 1;
  6284. }
  6285. if (!hist_tree_rbt_foreach_destroy_yielding(&state->hist_tree,
  6286. &gather_ahist_append_result,
  6287. state,
  6288. &state->hist_tree_yield,
  6289. BLOCKSCAN_REDUCTIONS)) {
  6290. return 1;
  6291. }
  6292. gather_ahist_send(state);
  6293. return 0;
  6294. }
  6295. static int gather_ahist_destroy_result(hist_tree_t *node, void *arg, Sint reds)
  6296. {
  6297. (void)arg;
  6298. free(node);
  6299. return 1;
  6300. }
  6301. static void gather_ahist_abort(void *arg)
  6302. {
  6303. gather_ahist_t *state = (gather_ahist_t*)arg;
  6304. if (state->building_result) {
  6305. erts_factory_undo(&state->msg_factory);
  6306. }
  6307. hist_tree_rbt_foreach_destroy(&state->hist_tree,
  6308. &gather_ahist_destroy_result,
  6309. NULL);
  6310. }
  6311. int erts_alcu_gather_alloc_histograms(Process *p, int allocator_num,
  6312. int sched_id, int hist_width,
  6313. UWord hist_start, Eterm ref)
  6314. {
  6315. gather_ahist_t *gather_state;
  6316. blockscan_t *scanner;
  6317. Allctr_t *allocator;
  6318. ASSERT(is_internal_ref(ref));
  6319. if (!blockscan_get_specific_allocator(allocator_num,
  6320. sched_id,
  6321. &allocator)) {
  6322. return 0;
  6323. }
  6324. ensure_atoms_initialized(allocator);
  6325. /* Plain calloc is intentional. */
  6326. gather_state = (gather_ahist_t*)calloc(1, sizeof(gather_ahist_t));
  6327. scanner = &gather_state->common;
  6328. scanner->abort = gather_ahist_abort;
  6329. scanner->scan = gather_ahist_scan;
  6330. scanner->finish = gather_ahist_finish;
  6331. scanner->user_data = gather_state;
  6332. erts_iref_storage_save(&gather_state->iref, ref);
  6333. gather_state->hist_slot_start = hist_start;
  6334. gather_state->hist_slot_count = hist_width;
  6335. gather_state->process = p;
  6336. blockscan_dispatch(scanner, p, allocator, sched_id);
  6337. return 1;
  6338. }
  6339. /* ------------------------------------------------------------------------- */
  6340. typedef struct chist_node__ {
  6341. struct chist_node__ *next;
  6342. UWord carrier_size;
  6343. UWord unscanned_size;
  6344. UWord allocated_size;
  6345. /* BLOCKSCAN_BAILOUT_THRESHOLD guarantees we won't overflow this or the
  6346. * counters in the free block histogram. */
  6347. int allocated_count;
  6348. int flags;
  6349. int histogram[1];
  6350. } chist_node_t;
  6351. typedef struct {
  6352. blockscan_t common;
  6353. ErtsIRefStorage iref;
  6354. Process *process;
  6355. Eterm allocator_desc;
  6356. chist_node_t *info_list;
  6357. UWord info_count;
  6358. UWord hist_slot_start;
  6359. int hist_slot_count;
  6360. ErtsHeapFactory msg_factory;
  6361. int building_result;
  6362. Eterm result_list;
  6363. } gather_cinfo_t;
  6364. static int gather_cinfo_scan(Allctr_t *allocator,
  6365. void *user_data,
  6366. Carrier_t *carrier)
  6367. {
  6368. gather_cinfo_t *state;
  6369. chist_node_t *node;
  6370. int blocks_scanned;
  6371. Block_t *block;
  6372. state = (gather_cinfo_t*)user_data;
  6373. node = calloc(1, sizeof(chist_node_t) +
  6374. (state->hist_slot_count - 1) *
  6375. sizeof(node->histogram[0]));
  6376. blocks_scanned = 1;
  6377. /* ERTS_CRR_ALCTR_FLG_BUSY is ignored since we've set it ourselves and it
  6378. * would be misleading to include it. */
  6379. node->flags = erts_atomic_read_rb(&carrier->allctr) &
  6380. (ERTS_CRR_ALCTR_FLG_MASK & ~ERTS_CRR_ALCTR_FLG_BUSY);
  6381. node->carrier_size = CARRIER_SZ(carrier);
  6382. if (IS_SB_CARRIER(carrier)) {
  6383. UWord block_size;
  6384. block = SBC2BLK(allocator, carrier);
  6385. block_size = SBC_BLK_SZ(block);
  6386. node->allocated_size = block_size;
  6387. node->allocated_count = 1;
  6388. } else {
  6389. UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
  6390. block = MBC_TO_FIRST_BLK(allocator, carrier);
  6391. while (1) {
  6392. UWord block_size = MBC_BLK_SZ(block);
  6393. scanned_bytes += block_size;
  6394. if (IS_ALLOCED_BLK(block)) {
  6395. node->allocated_size += block_size;
  6396. node->allocated_count++;
  6397. } else {
  6398. UWord size_interval;
  6399. int hist_slot;
  6400. size_interval = (block_size / state->hist_slot_start);
  6401. size_interval = u64_log2(size_interval + 1);
  6402. hist_slot = MIN(size_interval, state->hist_slot_count - 1);
  6403. node->histogram[hist_slot]++;
  6404. }
  6405. if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) {
  6406. node->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes;
  6407. break;
  6408. } else if (IS_LAST_BLK(block)) {
  6409. break;
  6410. }
  6411. block = NXT_BLK(block);
  6412. blocks_scanned++;
  6413. }
  6414. }
  6415. node->next = state->info_list;
  6416. state->info_list = node;
  6417. state->info_count++;
  6418. return blocks_scanned;
  6419. }
  6420. static void gather_cinfo_append_result(gather_cinfo_t *state,
  6421. chist_node_t *info)
  6422. {
  6423. Eterm carrier_size, unscanned_size, allocated_size;
  6424. Eterm histogram_tuple, carrier_tuple;
  6425. Uint term_size;
  6426. Eterm *hp;
  6427. int ix;
  6428. ASSERT(state->building_result);
  6429. term_size = 11 + state->hist_slot_count;
  6430. term_size += IS_USMALL(0, info->carrier_size) ? 0 : BIG_UINT_HEAP_SIZE;
  6431. term_size += IS_USMALL(0, info->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE;
  6432. term_size += IS_USMALL(0, info->allocated_size) ? 0 : BIG_UINT_HEAP_SIZE;
  6433. hp = erts_produce_heap(&state->msg_factory, term_size, 0);
  6434. hp[0] = make_arityval(state->hist_slot_count);
  6435. for (ix = 0; ix < state->hist_slot_count; ix++) {
  6436. hp[1 + ix] = make_small(info->histogram[ix]);
  6437. }
  6438. histogram_tuple = make_tuple(hp);
  6439. hp += 1 + state->hist_slot_count;
  6440. carrier_size = bld_unstable_uint(&hp, NULL, info->carrier_size);
  6441. unscanned_size = bld_unstable_uint(&hp, NULL, info->unscanned_size);
  6442. allocated_size = bld_unstable_uint(&hp, NULL, info->allocated_size);
  6443. hp[0] = make_arityval(7);
  6444. hp[1] = state->allocator_desc;
  6445. hp[2] = carrier_size;
  6446. hp[3] = unscanned_size;
  6447. hp[4] = allocated_size;
  6448. hp[5] = make_small(info->allocated_count);
  6449. hp[6] = (info->flags & ERTS_CRR_ALCTR_FLG_IN_POOL) ? am_true : am_false;
  6450. hp[7] = histogram_tuple;
  6451. carrier_tuple = make_tuple(hp);
  6452. hp += 8;
  6453. state->result_list = CONS(hp, carrier_tuple, state->result_list);
  6454. free(info);
  6455. }
  6456. static void gather_cinfo_send(gather_cinfo_t *state)
  6457. {
  6458. Eterm result_tuple, task_ref;
  6459. int term_size;
  6460. Eterm *hp;
  6461. ASSERT((state->result_list == NIL) ^ (state->info_count > 0));
  6462. ASSERT(state->building_result);
  6463. term_size = 3 + erts_iref_storage_heap_size(&state->iref);
  6464. hp = erts_produce_heap(&state->msg_factory, term_size, 0);
  6465. task_ref = erts_iref_storage_make_ref(&state->iref, &hp,
  6466. &(state->msg_factory.message)->hfrag.off_heap, 0);
  6467. hp[0] = make_arityval(2);
  6468. hp[1] = task_ref;
  6469. hp[2] = state->result_list;
  6470. result_tuple = make_tuple(hp);
  6471. erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1);
  6472. erts_queue_message(state->process, 0, state->msg_factory.message,
  6473. result_tuple, am_system);
  6474. }
  6475. static int gather_cinfo_finish(void *arg)
  6476. {
  6477. gather_cinfo_t *state = (gather_cinfo_t*)arg;
  6478. int reductions = BLOCKSCAN_REDUCTIONS;
  6479. if (!state->building_result) {
  6480. ErtsMessage *message;
  6481. Uint minimum_size;
  6482. Eterm *hp;
  6483. /* {Ref, [{Carrier size, unscanned size, allocated size,
  6484. * allocated block count, {Free block histogram}} | Rest]} */
  6485. minimum_size = 3 + erts_iref_storage_heap_size(&state->iref) +
  6486. state->info_count * (11 + state->hist_slot_count);
  6487. message = erts_alloc_message(minimum_size, &hp);
  6488. erts_factory_selfcontained_message_init(&state->msg_factory,
  6489. message, hp);
  6490. state->result_list = NIL;
  6491. state->building_result = 1;
  6492. }
  6493. while (state->info_list) {
  6494. chist_node_t *current = state->info_list;
  6495. state->info_list = current->next;
  6496. gather_cinfo_append_result(state, current);
  6497. if (reductions-- <= 0) {
  6498. return 1;
  6499. }
  6500. }
  6501. gather_cinfo_send(state);
  6502. return 0;
  6503. }
  6504. static void gather_cinfo_abort(void *arg)
  6505. {
  6506. gather_cinfo_t *state = (gather_cinfo_t*)arg;
  6507. if (state->building_result) {
  6508. erts_factory_undo(&state->msg_factory);
  6509. }
  6510. while (state->info_list) {
  6511. chist_node_t *current = state->info_list;
  6512. state->info_list = current->next;
  6513. free(current);
  6514. }
  6515. }
  6516. int erts_alcu_gather_carrier_info(struct process *p, int allocator_num,
  6517. int sched_id, int hist_width,
  6518. UWord hist_start, Eterm ref)
  6519. {
  6520. gather_cinfo_t *gather_state;
  6521. blockscan_t *scanner;
  6522. const char *allocator_desc;
  6523. Allctr_t *allocator;
  6524. ASSERT(is_internal_ref(ref));
  6525. if (!blockscan_get_specific_allocator(allocator_num,
  6526. sched_id,
  6527. &allocator)) {
  6528. return 0;
  6529. }
  6530. allocator_desc = ERTS_ALC_A2AD(allocator_num);
  6531. /* Plain calloc is intentional. */
  6532. gather_state = (gather_cinfo_t*)calloc(1, sizeof(gather_cinfo_t));
  6533. scanner = &gather_state->common;
  6534. scanner->abort = gather_cinfo_abort;
  6535. scanner->scan = gather_cinfo_scan;
  6536. scanner->finish = gather_cinfo_finish;
  6537. scanner->user_data = gather_state;
  6538. gather_state->allocator_desc = erts_atom_put((byte *)allocator_desc,
  6539. sys_strlen(allocator_desc),
  6540. ERTS_ATOM_ENC_LATIN1, 1);
  6541. erts_iref_storage_save(&gather_state->iref, ref);
  6542. gather_state->hist_slot_start = hist_start * 2;
  6543. gather_state->hist_slot_count = hist_width;
  6544. gather_state->process = p;
  6545. blockscan_dispatch(scanner, p, allocator, sched_id);
  6546. return 1;
  6547. }
  6548. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  6549. * NOTE: erts_alcu_test() is only supposed to be used for testing. *
  6550. * *
  6551. * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
  6552. * to erts_alcu_test() *
  6553. \* */
  6554. UWord
  6555. erts_alcu_test(UWord op, UWord a1, UWord a2)
  6556. {
  6557. switch (op) {
  6558. case 0x000: return (UWord) BLK_SZ((Block_t *) a1);
  6559. case 0x001: return (UWord) BLK_UMEM_SZ((Block_t *) a1);
  6560. case 0x002: return (UWord) IS_PREV_BLK_FREE((Block_t *) a1);
  6561. case 0x003: return (UWord) IS_FREE_BLK((Block_t *) a1);
  6562. case 0x004: return (UWord) IS_LAST_BLK((Block_t *) a1);
  6563. case 0x005: return (UWord) UMEM2BLK((void *) a1);
  6564. case 0x006: return (UWord) BLK2UMEM((Block_t *) a1);
  6565. case 0x007: return (UWord) IS_SB_CARRIER((Carrier_t *) a1);
  6566. case 0x008: return (UWord) IS_SBC_BLK((Block_t *) a1);
  6567. case 0x009: return (UWord) IS_MB_CARRIER((Carrier_t *) a1);
  6568. case 0x00a: return (UWord) IS_MSEG_CARRIER((Carrier_t *) a1);
  6569. case 0x00b: return (UWord) CARRIER_SZ((Carrier_t *) a1);
  6570. case 0x00c: return (UWord) SBC2BLK((Allctr_t *) a1,
  6571. (Carrier_t *) a2);
  6572. case 0x00d: return (UWord) BLK_TO_SBC((Block_t *) a2);
  6573. case 0x00e: return (UWord) MBC_TO_FIRST_BLK((Allctr_t *) a1,
  6574. (Carrier_t *) a2);
  6575. case 0x00f: return (UWord) FIRST_BLK_TO_MBC((Allctr_t *) a1,
  6576. (Block_t *) a2);
  6577. case 0x010: return (UWord) ((Allctr_t *) a1)->mbc_list.first;
  6578. case 0x011: return (UWord) ((Allctr_t *) a1)->mbc_list.last;
  6579. case 0x012: return (UWord) ((Allctr_t *) a1)->sbc_list.first;
  6580. case 0x013: return (UWord) ((Allctr_t *) a1)->sbc_list.last;
  6581. case 0x014: return (UWord) ((Carrier_t *) a1)->next;
  6582. case 0x015: return (UWord) ((Carrier_t *) a1)->prev;
  6583. case 0x016: return (UWord) ABLK_HDR_SZ;
  6584. case 0x017: return (UWord) ((Allctr_t *) a1)->min_block_size;
  6585. case 0x018: return (UWord) NXT_BLK((Block_t *) a1);
  6586. case 0x019: return (UWord) PREV_BLK((Block_t *) a1);
  6587. case 0x01a: return (UWord) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2);
  6588. case 0x01b: return (UWord) sizeof(Unit_t);
  6589. case 0x01c: return (UWord) BLK_TO_MBC((Block_t*) a1);
  6590. case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
  6591. case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
  6592. case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t);
  6593. case 0x020:
  6594. SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1);
  6595. cpool_init_carrier_data((Allctr_t *) a1, (Carrier_t *) a2);
  6596. return (UWord) a2;
  6597. case 0x021:
  6598. cpool_insert((Allctr_t *) a1, (Carrier_t *) a2);
  6599. return (UWord) a2;
  6600. case 0x022:
  6601. cpool_delete((Allctr_t *) a1, (Allctr_t *) a1, (Carrier_t *) a2);
  6602. return (UWord) a2;
  6603. case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1);
  6604. case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2);
  6605. case 0x025: /* UMEM2BLK_TEST*/
  6606. #ifdef DEBUG
  6607. # ifdef HARD_DEBUG
  6608. return (UWord)UMEM2BLK(a1-3*sizeof(UWord));
  6609. # else
  6610. return (UWord)UMEM2BLK(a1-2*sizeof(UWord));
  6611. # endif
  6612. #else
  6613. return (UWord)UMEM2BLK(a1);
  6614. #endif
  6615. default: ASSERT(0); return ~((UWord) 0);
  6616. }
  6617. return 0;
  6618. }
  6619. /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
  6620. * Debug functions *
  6621. \* */
  6622. void
  6623. erts_alcu_assert_failed(char* expr, char* file, int line, char *func)
  6624. {
  6625. fflush(stdout);
  6626. fprintf(stderr, "%s:%d:%s(): Assertion failed: %s\n",
  6627. file, line, func, expr);
  6628. fflush(stderr);
  6629. #if defined(__WIN__) || defined(__WIN32__)
  6630. DebugBreak();
  6631. #else
  6632. abort();
  6633. #endif
  6634. }
  6635. void
  6636. erts_alcu_verify_unused(Allctr_t *allctr)
  6637. {
  6638. UWord no;
  6639. no = allctr->sbcs.curr.norm.mseg.no;
  6640. no += allctr->sbcs.curr.norm.sys_alloc.no;
  6641. no += allctr->mbcs.blocks.curr.no;
  6642. if (no) {
  6643. UWord sz = allctr->sbcs.blocks.curr.size;
  6644. sz += allctr->mbcs.blocks.curr.size;
  6645. erts_exit(ERTS_ABORT_EXIT,
  6646. "%salloc() used when expected to be unused!\n"
  6647. "Total amount of blocks allocated: %bpu\n"
  6648. "Total amount of bytes allocated: %bpu\n",
  6649. allctr->name_prefix, no, sz);
  6650. }
  6651. }
  6652. void
  6653. erts_alcu_verify_unused_ts(Allctr_t *allctr)
  6654. {
  6655. erts_mtx_lock(&allctr->mutex);
  6656. erts_alcu_verify_unused(allctr);
  6657. erts_mtx_unlock(&allctr->mutex);
  6658. }
  6659. #ifdef DEBUG
  6660. int is_sbc_blk(Block_t* blk)
  6661. {
  6662. return IS_SBC_BLK(blk);
  6663. }
  6664. #endif
  6665. #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
  6666. static void
  6667. check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
  6668. {
  6669. Carrier_t *crr;
  6670. CarrierList_t *cl;
  6671. if (IS_SBC_BLK(iblk)) {
  6672. Carrier_t *sbc = BLK_TO_SBC(iblk);
  6673. ASSERT(SBC2BLK(allctr, sbc) == iblk);
  6674. ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk));
  6675. crr = sbc;
  6676. cl = &allctr->sbc_list;
  6677. }
  6678. else {
  6679. Block_t *prev_blk = NULL;
  6680. Block_t *blk;
  6681. char *carrier_end;
  6682. Uint is_free_blk;
  6683. Uint tot_blk_sz;
  6684. Uint blk_sz;
  6685. int has_wrapped_around = 0;
  6686. blk = iblk;
  6687. tot_blk_sz = 0;
  6688. crr = BLK_TO_MBC(blk);
  6689. ASSERT(IS_MB_CARRIER(crr));
  6690. /* Step around the carrier one whole lap starting at 'iblk'
  6691. */
  6692. while (1) {
  6693. ASSERT(IS_MBC_BLK(blk));
  6694. ASSERT(BLK_TO_MBC(blk) == crr);
  6695. if (prev_blk) {
  6696. ASSERT(NXT_BLK(prev_blk) == blk);
  6697. if (IS_FREE_BLK(prev_blk)) {
  6698. ASSERT(IS_PREV_BLK_FREE(blk));
  6699. ASSERT(prev_blk == PREV_BLK(blk));
  6700. }
  6701. else {
  6702. ASSERT(IS_PREV_BLK_ALLOCED(blk));
  6703. }
  6704. }
  6705. if (has_wrapped_around) {
  6706. ASSERT(((Block_t *) crr) < blk);
  6707. if (blk == iblk)
  6708. break;
  6709. ASSERT(blk < iblk);
  6710. }
  6711. else
  6712. ASSERT(blk >= iblk);
  6713. blk_sz = MBC_BLK_SZ(blk);
  6714. ASSERT(blk_sz % sizeof(Unit_t) == 0);
  6715. ASSERT(blk_sz >= allctr->min_block_size);
  6716. tot_blk_sz += blk_sz;
  6717. is_free_blk = (int) IS_FREE_BLK(blk);
  6718. ASSERT(!is_free_blk
  6719. || IS_LAST_BLK(blk)
  6720. || PREV_BLK_SZ(((char *) blk)+blk_sz) == blk_sz);
  6721. if (allctr->check_block)
  6722. (*allctr->check_block)(allctr, blk, (int) is_free_blk);
  6723. if (IS_LAST_BLK(blk)) {
  6724. carrier_end = ((char *) NXT_BLK(blk));
  6725. has_wrapped_around = 1;
  6726. prev_blk = NULL;
  6727. blk = MBC_TO_FIRST_BLK(allctr, crr);
  6728. ASSERT(IS_MBC_FIRST_BLK(allctr,blk));
  6729. }
  6730. else {
  6731. prev_blk = blk;
  6732. blk = NXT_BLK(blk);
  6733. }
  6734. }
  6735. ASSERT((((char *) crr)
  6736. + MBC_HEADER_SIZE(allctr)
  6737. + tot_blk_sz) == carrier_end);
  6738. ASSERT(((char *) crr) + CARRIER_SZ(crr) - sizeof(Unit_t) <= carrier_end
  6739. && carrier_end <= ((char *) crr) + CARRIER_SZ(crr));
  6740. if (allctr->check_mbc)
  6741. (*allctr->check_mbc)(allctr, crr);
  6742. #if HAVE_ERTS_MSEG
  6743. if (IS_MSEG_CARRIER(crr)) {
  6744. ASSERT(CARRIER_SZ(crr) % ERTS_SACRR_UNIT_SZ == 0);
  6745. }
  6746. #endif
  6747. cl = &allctr->mbc_list;
  6748. }
  6749. #ifdef DEBUG
  6750. if (cl->first == crr) {
  6751. ASSERT(!crr->prev);
  6752. }
  6753. else {
  6754. ASSERT(crr->prev);
  6755. ASSERT(crr->prev->next == crr);
  6756. }
  6757. if (cl->last == crr) {
  6758. ASSERT(!crr->next);
  6759. }
  6760. else {
  6761. ASSERT(crr->next);
  6762. ASSERT(crr->next->prev == crr);
  6763. }
  6764. #endif
  6765. }
  6766. #endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
  6767. #ifdef ERTS_ENABLE_LOCK_COUNT
  6768. static void lcnt_enable_allocator_lock_count(Allctr_t *allocator, int enable) {
  6769. if(!allocator->thread_safe) {
  6770. return;
  6771. }
  6772. if(enable) {
  6773. erts_lcnt_install_new_lock_info(&allocator->mutex.lcnt,
  6774. "alcu_allocator", make_small(allocator->alloc_no),
  6775. ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
  6776. } else {
  6777. erts_lcnt_uninstall(&allocator->mutex.lcnt);
  6778. }
  6779. }
  6780. static void lcnt_update_thread_spec_locks(ErtsAllocatorThrSpec_t *tspec, int enable) {
  6781. if(tspec->enabled) {
  6782. int i;
  6783. for(i = 0; i < tspec->size; i++) {
  6784. lcnt_enable_allocator_lock_count(tspec->allctr[i], enable);
  6785. }
  6786. }
  6787. }
  6788. void erts_lcnt_update_allocator_locks(int enable) {
  6789. int i;
  6790. for(i = ERTS_ALC_A_MIN; i < ERTS_ALC_A_MAX; i++) {
  6791. ErtsAllocatorInfo_t *ai = &erts_allctrs_info[i];
  6792. if(ai->enabled && ai->alloc_util) {
  6793. if(ai->thr_spec) {
  6794. lcnt_update_thread_spec_locks((ErtsAllocatorThrSpec_t*)ai->extra, enable);
  6795. } else {
  6796. lcnt_enable_allocator_lock_count((Allctr_t*)ai->extra, enable);
  6797. }
  6798. }
  6799. }
  6800. }
  6801. #endif /* ERTS_ENABLE_LOCK_COUNT */