/contrib/tcsh/tc.alloc.c

https://bitbucket.org/freebsd/freebsd-head/ · C · 650 lines · 413 code · 72 blank · 165 comment · 78 complexity · f4bdf499cf7dc61acdf009e7e3b8ed28 MD5 · raw file

  1. /* $Header: /p/tcsh/cvsroot/tcsh/tc.alloc.c,v 3.50 2011/12/30 20:55:24 christos Exp $ */
  2. /*
  3. * tc.alloc.c (Caltech) 2/21/82
  4. * Chris Kingsley, kingsley@cit-20.
  5. *
  6. * This is a very fast storage allocator. It allocates blocks of a small
  7. * number of different sizes, and keeps free lists of each size. Blocks that
  8. * don't exactly fit are passed up to the next larger size. In this
  9. * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
  10. * This is designed for use in a program that uses vast quantities of memory,
  11. * but bombs when it runs out.
  12. */
  13. /*-
  14. * Copyright (c) 1980, 1991 The Regents of the University of California.
  15. * All rights reserved.
  16. *
  17. * Redistribution and use in source and binary forms, with or without
  18. * modification, are permitted provided that the following conditions
  19. * are met:
  20. * 1. Redistributions of source code must retain the above copyright
  21. * notice, this list of conditions and the following disclaimer.
  22. * 2. Redistributions in binary form must reproduce the above copyright
  23. * notice, this list of conditions and the following disclaimer in the
  24. * documentation and/or other materials provided with the distribution.
  25. * 3. Neither the name of the University nor the names of its contributors
  26. * may be used to endorse or promote products derived from this software
  27. * without specific prior written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  30. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  31. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  32. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  33. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  34. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  35. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  36. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  37. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  38. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  39. * SUCH DAMAGE.
  40. */
  41. #include "sh.h"
  42. #ifdef HAVE_MALLINFO
  43. #include <malloc.h>
  44. #endif
  45. RCSID("$tcsh: tc.alloc.c,v 3.50 2011/12/30 20:55:24 christos Exp $")
  46. #define RCHECK
  47. #define DEBUG
  48. static char *memtop = NULL; /* PWP: top of current memory */
  49. static char *membot = NULL; /* PWP: bottom of allocatable memory */
  50. int dont_free = 0;
  51. #ifdef WINNT_NATIVE
  52. # define malloc fmalloc
  53. # define free ffree
  54. # define calloc fcalloc
  55. # define realloc frealloc
  56. #endif /* WINNT_NATIVE */
  57. #if !defined(DEBUG) || defined(SYSMALLOC)
  58. static void
  59. out_of_memory (void)
  60. {
  61. static const char msg[] = "Out of memory\n";
  62. write(didfds ? 2 : SHDIAG, msg, strlen(msg));
  63. _exit(1);
  64. }
  65. #endif
  66. #ifndef SYSMALLOC
  67. #ifdef SX
  68. extern void* sbrk();
  69. #endif
  70. /*
  71. * Lots of os routines are busted and try to free invalid pointers.
  72. * Although our free routine is smart enough and it will pick bad
  73. * pointers most of the time, in cases where we know we are going to get
  74. * a bad pointer, we'd rather leak.
  75. */
  76. #ifndef NULL
  77. #define NULL 0
  78. #endif
  79. typedef unsigned char U_char; /* we don't really have signed chars */
  80. typedef unsigned int U_int;
  81. typedef unsigned short U_short;
  82. typedef unsigned long U_long;
  83. /*
  84. * The overhead on a block is at least 4 bytes. When free, this space
  85. * contains a pointer to the next free block, and the bottom two bits must
  86. * be zero. When in use, the first byte is set to MAGIC, and the second
  87. * byte is the size index. The remaining bytes are for alignment.
  88. * If range checking is enabled and the size of the block fits
  89. * in two bytes, then the top two bytes hold the size of the requested block
  90. * plus the range checking words, and the header word MINUS ONE.
  91. */
  92. #define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
  93. union overhead {
  94. union overhead *ov_next; /* when free */
  95. struct {
  96. U_char ovu_magic; /* magic number */
  97. U_char ovu_index; /* bucket # */
  98. #ifdef RCHECK
  99. U_short ovu_size; /* actual block size */
  100. U_int ovu_rmagic; /* range magic number */
  101. #endif
  102. } ovu;
  103. #define ov_magic ovu.ovu_magic
  104. #define ov_index ovu.ovu_index
  105. #define ov_size ovu.ovu_size
  106. #define ov_rmagic ovu.ovu_rmagic
  107. };
  108. #define MAGIC 0xfd /* magic # on accounting info */
  109. #define RMAGIC 0x55555555 /* magic # on range info */
  110. #ifdef RCHECK
  111. #define RSLOP sizeof (U_int)
  112. #else
  113. #define RSLOP 0
  114. #endif
  115. #define ROUNDUP 7
  116. /*
  117. * nextf[i] is the pointer to the next free block of size 2^(i+3). The
  118. * smallest allocatable block is 8 bytes. The overhead information
  119. * precedes the data area returned to the user.
  120. */
  121. #define NBUCKETS ((sizeof(long) << 3) - 3)
  122. static union overhead *nextf[NBUCKETS] IZERO_STRUCT;
  123. /*
  124. * nmalloc[i] is the difference between the number of mallocs and frees
  125. * for a given block size.
  126. */
  127. static U_int nmalloc[NBUCKETS] IZERO_STRUCT;
  128. #ifndef lint
  129. static int findbucket (union overhead *, int);
  130. static void morecore (int);
  131. #endif
  132. #ifdef DEBUG
  133. # define CHECK(a, str, p) \
  134. if (a) { \
  135. xprintf(str, p); \
  136. xprintf(" (memtop = %p membot = %p)\n", memtop, membot); \
  137. abort(); \
  138. }
  139. #else
  140. # define CHECK(a, str, p) \
  141. if (a) { \
  142. xprintf(str, p); \
  143. xprintf(" (memtop = %p membot = %p)\n", memtop, membot); \
  144. return; \
  145. }
  146. #endif
  147. memalign_t
  148. malloc(size_t nbytes)
  149. {
  150. #ifndef lint
  151. union overhead *p;
  152. int bucket = 0;
  153. unsigned shiftr;
  154. /*
  155. * Convert amount of memory requested into closest block size stored in
  156. * hash buckets which satisfies request. Account for space used per block
  157. * for accounting.
  158. */
  159. #ifdef SUNOS4
  160. /*
  161. * SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
  162. * so we get one more...
  163. * From Michael Schroeder: This is not true. It depends on the
  164. * timezone string. In Europe it can overwrite the 13th byte on a
  165. * 12 byte malloc.
  166. * So we punt and we always allocate an extra byte.
  167. */
  168. nbytes++;
  169. #endif
  170. nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP);
  171. shiftr = (nbytes - 1) >> 2;
  172. /* apart from this loop, this is O(1) */
  173. while ((shiftr >>= 1) != 0)
  174. bucket++;
  175. /*
  176. * If nothing in hash bucket right now, request more memory from the
  177. * system.
  178. */
  179. if (nextf[bucket] == NULL)
  180. morecore(bucket);
  181. if ((p = nextf[bucket]) == NULL) {
  182. child++;
  183. #ifndef DEBUG
  184. out_of_memory();
  185. #else
  186. showall(NULL, NULL);
  187. xprintf(CGETS(19, 1, "nbytes=%zu: Out of memory\n"), nbytes);
  188. abort();
  189. #endif
  190. /* fool lint */
  191. return ((memalign_t) 0);
  192. }
  193. /* remove from linked list */
  194. nextf[bucket] = nextf[bucket]->ov_next;
  195. p->ov_magic = MAGIC;
  196. p->ov_index = bucket;
  197. nmalloc[bucket]++;
  198. #ifdef RCHECK
  199. /*
  200. * Record allocated size of block and bound space with magic numbers.
  201. */
  202. p->ov_size = (p->ov_index <= 13) ? nbytes - 1 : 0;
  203. p->ov_rmagic = RMAGIC;
  204. *((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC;
  205. #endif
  206. return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead))));
  207. #else
  208. if (nbytes)
  209. return ((memalign_t) 0);
  210. else
  211. return ((memalign_t) 0);
  212. #endif /* !lint */
  213. }
  214. #ifndef lint
  215. /*
  216. * Allocate more memory to the indicated bucket.
  217. */
  218. static void
  219. morecore(int bucket)
  220. {
  221. union overhead *op;
  222. int rnu; /* 2^rnu bytes will be requested */
  223. int nblks; /* become nblks blocks of the desired size */
  224. int siz;
  225. if (nextf[bucket])
  226. return;
  227. /*
  228. * Insure memory is allocated on a page boundary. Should make getpageize
  229. * call?
  230. */
  231. op = (union overhead *) sbrk(0);
  232. memtop = (char *) op;
  233. if (membot == NULL)
  234. membot = memtop;
  235. if ((long) op & 0x3ff) {
  236. memtop = sbrk((int) (1024 - ((long) op & 0x3ff)));
  237. memtop += (long) (1024 - ((long) op & 0x3ff));
  238. }
  239. /* take 2k unless the block is bigger than that */
  240. rnu = (bucket <= 8) ? 11 : bucket + 3;
  241. nblks = 1 << (rnu - (bucket + 3)); /* how many blocks to get */
  242. memtop = sbrk(1 << rnu); /* PWP */
  243. op = (union overhead *) memtop;
  244. /* no more room! */
  245. if ((long) op == -1)
  246. return;
  247. memtop += (long) (1 << rnu);
  248. /*
  249. * Round up to minimum allocation size boundary and deduct from block count
  250. * to reflect.
  251. */
  252. if (((U_long) op) & ROUNDUP) {
  253. op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP);
  254. nblks--;
  255. }
  256. /*
  257. * Add new memory allocated to that on free list for this hash bucket.
  258. */
  259. nextf[bucket] = op;
  260. siz = 1 << (bucket + 3);
  261. while (--nblks > 0) {
  262. op->ov_next = (union overhead *) (((caddr_t) op) + siz);
  263. op = (union overhead *) (((caddr_t) op) + siz);
  264. }
  265. op->ov_next = NULL;
  266. }
  267. #endif
  268. void
  269. free(ptr_t cp)
  270. {
  271. #ifndef lint
  272. int size;
  273. union overhead *op;
  274. /*
  275. * the don't free flag is there so that we avoid os bugs in routines
  276. * that free invalid pointers!
  277. */
  278. if (cp == NULL || dont_free)
  279. return;
  280. CHECK(!memtop || !membot,
  281. CGETS(19, 2, "free(%p) called before any allocations."), cp);
  282. CHECK(cp > (ptr_t) memtop,
  283. CGETS(19, 3, "free(%p) above top of memory."), cp);
  284. CHECK(cp < (ptr_t) membot,
  285. CGETS(19, 4, "free(%p) below bottom of memory."), cp);
  286. op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
  287. CHECK(op->ov_magic != MAGIC,
  288. CGETS(19, 5, "free(%p) bad block."), cp);
  289. #ifdef RCHECK
  290. if (op->ov_index <= 13)
  291. CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC,
  292. CGETS(19, 6, "free(%p) bad range check."), cp);
  293. #endif
  294. CHECK(op->ov_index >= NBUCKETS,
  295. CGETS(19, 7, "free(%p) bad block index."), cp);
  296. size = op->ov_index;
  297. op->ov_next = nextf[size];
  298. nextf[size] = op;
  299. nmalloc[size]--;
  300. #else
  301. if (cp == NULL)
  302. return;
  303. #endif
  304. }
  305. memalign_t
  306. calloc(size_t i, size_t j)
  307. {
  308. #ifndef lint
  309. char *cp;
  310. i *= j;
  311. cp = xmalloc(i);
  312. memset(cp, 0, i);
  313. return ((memalign_t) cp);
  314. #else
  315. if (i && j)
  316. return ((memalign_t) 0);
  317. else
  318. return ((memalign_t) 0);
  319. #endif
  320. }
  321. /*
  322. * When a program attempts "storage compaction" as mentioned in the
  323. * old malloc man page, it realloc's an already freed block. Usually
  324. * this is the last block it freed; occasionally it might be farther
  325. * back. We have to search all the free lists for the block in order
  326. * to determine its bucket: 1st we make one pass thru the lists
  327. * checking only the first block in each; if that fails we search
  328. * ``realloc_srchlen'' blocks in each list for a match (the variable
  329. * is extern so the caller can modify it). If that fails we just copy
  330. * however many bytes was given to realloc() and hope it's not huge.
  331. */
  332. #ifndef lint
  333. /* 4 should be plenty, -1 =>'s whole list */
  334. static int realloc_srchlen = 4;
  335. #endif /* lint */
  336. memalign_t
  337. realloc(ptr_t cp, size_t nbytes)
  338. {
  339. #ifndef lint
  340. U_int onb;
  341. union overhead *op;
  342. ptr_t res;
  343. int i;
  344. int was_alloced = 0;
  345. if (cp == NULL)
  346. return (malloc(nbytes));
  347. op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
  348. if (op->ov_magic == MAGIC) {
  349. was_alloced++;
  350. i = op->ov_index;
  351. }
  352. else
  353. /*
  354. * Already free, doing "compaction".
  355. *
  356. * Search for the old block of memory on the free list. First, check the
  357. * most common case (last element free'd), then (this failing) the last
  358. * ``realloc_srchlen'' items free'd. If all lookups fail, then assume
  359. * the size of the memory block being realloc'd is the smallest
  360. * possible.
  361. */
  362. if ((i = findbucket(op, 1)) < 0 &&
  363. (i = findbucket(op, realloc_srchlen)) < 0)
  364. i = 0;
  365. onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP);
  366. /* avoid the copy if same size block */
  367. if (was_alloced && (onb <= (U_int) (1 << (i + 3))) &&
  368. (onb > (U_int) (1 << (i + 2)))) {
  369. #ifdef RCHECK
  370. /* JMR: formerly this wasn't updated ! */
  371. nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP);
  372. *((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC;
  373. op->ov_rmagic = RMAGIC;
  374. op->ov_size = (op->ov_index <= 13) ? nbytes - 1 : 0;
  375. #endif
  376. return ((memalign_t) cp);
  377. }
  378. if ((res = malloc(nbytes)) == NULL)
  379. return ((memalign_t) NULL);
  380. if (cp != res) { /* common optimization */
  381. /*
  382. * christos: this used to copy nbytes! It should copy the
  383. * smaller of the old and new size
  384. */
  385. onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP;
  386. (void) memmove(res, cp, onb < nbytes ? onb : nbytes);
  387. }
  388. if (was_alloced)
  389. free(cp);
  390. return ((memalign_t) res);
  391. #else
  392. if (cp && nbytes)
  393. return ((memalign_t) 0);
  394. else
  395. return ((memalign_t) 0);
  396. #endif /* !lint */
  397. }
  398. /*
  399. * On linux, _nss_nis_setnetgrent() calls this function to determine
  400. * the usable size of the pointer passed, but this is not a portable
  401. * API, so we cannot use our malloc replacement without providing one.
  402. * Thanks a lot glibc!
  403. */
  404. #ifdef __linux__
  405. #define M_U_S_CONST
  406. #else
  407. #define M_U_S_CONST
  408. #endif
  409. size_t malloc_usable_size(M_U_S_CONST void *);
  410. size_t
  411. malloc_usable_size(M_U_S_CONST void *ptr)
  412. {
  413. const union overhead *op = (const union overhead *)
  414. (((const char *) ptr) - MEMALIGN(sizeof(*op)));
  415. if (op->ov_magic == MAGIC)
  416. return 1 << (op->ov_index + 2);
  417. else
  418. return 0;
  419. }
  420. #ifndef lint
  421. /*
  422. * Search ``srchlen'' elements of each free list for a block whose
  423. * header starts at ``freep''. If srchlen is -1 search the whole list.
  424. * Return bucket number, or -1 if not found.
  425. */
  426. static int
  427. findbucket(union overhead *freep, int srchlen)
  428. {
  429. union overhead *p;
  430. size_t i;
  431. int j;
  432. for (i = 0; i < NBUCKETS; i++) {
  433. j = 0;
  434. for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
  435. if (p == freep)
  436. return (i);
  437. j++;
  438. }
  439. }
  440. return (-1);
  441. }
  442. #endif
  443. #else /* SYSMALLOC */
  444. /**
  445. ** ``Protected versions'' of malloc, realloc, calloc, and free
  446. **
  447. ** On many systems:
  448. **
  449. ** 1. malloc(0) is bad
  450. ** 2. free(0) is bad
  451. ** 3. realloc(0, n) is bad
  452. ** 4. realloc(n, 0) is bad
  453. **
  454. ** Also we call our error routine if we run out of memory.
  455. **/
  456. memalign_t
  457. smalloc(size_t n)
  458. {
  459. ptr_t ptr;
  460. n = n ? n : 1;
  461. #ifdef HAVE_SBRK
  462. if (membot == NULL)
  463. membot = sbrk(0);
  464. #endif /* HAVE_SBRK */
  465. if ((ptr = malloc(n)) == NULL)
  466. out_of_memory();
  467. #ifndef HAVE_SBRK
  468. if (memtop < ((char *) ptr) + n)
  469. memtop = ((char *) ptr) + n;
  470. if (membot == NULL)
  471. membot = ptr;
  472. #endif /* !HAVE_SBRK */
  473. return ((memalign_t) ptr);
  474. }
  475. memalign_t
  476. srealloc(ptr_t p, size_t n)
  477. {
  478. ptr_t ptr;
  479. n = n ? n : 1;
  480. #ifdef HAVE_SBRK
  481. if (membot == NULL)
  482. membot = sbrk(0);
  483. #endif /* HAVE_SBRK */
  484. if ((ptr = (p ? realloc(p, n) : malloc(n))) == NULL)
  485. out_of_memory();
  486. #ifndef HAVE_SBRK
  487. if (memtop < ((char *) ptr) + n)
  488. memtop = ((char *) ptr) + n;
  489. if (membot == NULL)
  490. membot = ptr;
  491. #endif /* !HAVE_SBRK */
  492. return ((memalign_t) ptr);
  493. }
  494. memalign_t
  495. scalloc(size_t s, size_t n)
  496. {
  497. ptr_t ptr;
  498. n *= s;
  499. n = n ? n : 1;
  500. #ifdef HAVE_SBRK
  501. if (membot == NULL)
  502. membot = sbrk(0);
  503. #endif /* HAVE_SBRK */
  504. if ((ptr = malloc(n)) == NULL)
  505. out_of_memory();
  506. memset (ptr, 0, n);
  507. #ifndef HAVE_SBRK
  508. if (memtop < ((char *) ptr) + n)
  509. memtop = ((char *) ptr) + n;
  510. if (membot == NULL)
  511. membot = ptr;
  512. #endif /* !HAVE_SBRK */
  513. return ((memalign_t) ptr);
  514. }
  515. void
  516. sfree(ptr_t p)
  517. {
  518. if (p && !dont_free)
  519. free(p);
  520. }
  521. #endif /* SYSMALLOC */
  522. /*
  523. * mstats - print out statistics about malloc
  524. *
  525. * Prints two lines of numbers, one showing the length of the free list
  526. * for each size category, the second showing the number of mallocs -
  527. * frees for each size category.
  528. */
  529. /*ARGSUSED*/
  530. void
  531. showall(Char **v, struct command *c)
  532. {
  533. #ifndef SYSMALLOC
  534. size_t i, j;
  535. union overhead *p;
  536. int totfree = 0, totused = 0;
  537. xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname);
  538. for (i = 0; i < NBUCKETS; i++) {
  539. for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
  540. continue;
  541. xprintf(" %4zd", j);
  542. totfree += j * (1 << (i + 3));
  543. }
  544. xprintf("\n%s:\t", CGETS(19, 9, "used"));
  545. for (i = 0; i < NBUCKETS; i++) {
  546. xprintf(" %4d", nmalloc[i]);
  547. totused += nmalloc[i] * (1 << (i + 3));
  548. }
  549. xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
  550. totused, totfree);
  551. xprintf(CGETS(19, 11,
  552. "\tAllocated memory from 0x%lx to 0x%lx. Real top at 0x%lx\n"),
  553. (unsigned long) membot, (unsigned long) memtop,
  554. (unsigned long) sbrk(0));
  555. #else /* SYSMALLOC */
  556. #ifndef HAVE_MALLINFO
  557. #ifdef HAVE_SBRK
  558. memtop = sbrk(0);
  559. #endif /* HAVE_SBRK */
  560. xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
  561. (unsigned long) membot, (unsigned long) memtop,
  562. (unsigned long) (memtop - membot));
  563. #else /* HAVE_MALLINFO */
  564. struct mallinfo mi;
  565. mi = mallinfo();
  566. xprintf(CGETS(19, 13, "%s current memory allocation:\n"), progname);
  567. xprintf(CGETS(19, 14, "Total space allocated from system: %d\n"), mi.arena);
  568. xprintf(CGETS(19, 15, "Number of non-inuse chunks: %d\n"), mi.ordblks);
  569. xprintf(CGETS(19, 16, "Number of mmapped regions: %d\n"), mi.hblks);
  570. xprintf(CGETS(19, 17, "Total space in mmapped regions: %d\n"), mi.hblkhd);
  571. xprintf(CGETS(19, 18, "Total allocated space: %d\n"), mi.uordblks);
  572. xprintf(CGETS(19, 19, "Total non-inuse space: %d\n"), mi.fordblks);
  573. xprintf(CGETS(19, 20, "Top-most, releasable space: %d\n"), mi.keepcost);
  574. #endif /* HAVE_MALLINFO */
  575. #endif /* SYSMALLOC */
  576. USE(c);
  577. USE(v);
  578. }