PageRenderTime 68ms CodeModel.GetById 19ms app.highlight 41ms RepoModel.GetById 1ms app.codeStats 1ms

/vendor/gc/allchblk.c

http://github.com/feyeleanor/RubyGoLightly
C | 887 lines | 639 code | 76 blank | 172 comment | 200 complexity | 24384706407d730d7473bfdc00a3752e MD5 | raw file
  1/* 
  2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
  4 * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.
  5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  6 *
  7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
  9 *
 10 * Permission is hereby granted to use or copy this program
 11 * for any purpose,  provided the above notices are retained on all copies.
 12 * Permission to modify the code and to distribute modified code is granted,
 13 * provided the above notices are retained, and a notice that the code was
 14 * modified is included with the above copyright notice.
 15 */
 16
 17/* #define DEBUG */
 18#include <stdio.h>
 19#include "private/gc_priv.h"
 20
 21GC_bool GC_use_entire_heap = 0;
 22
 23/*
 24 * Free heap blocks are kept on one of several free lists,
 25 * depending on the size of the block.  Each free list is doubly linked.
 26 * Adjacent free blocks are coalesced.
 27 */
 28
 29 
 30# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
 31		/* largest block we will allocate starting on a black   */
 32		/* listed block.  Must be >= HBLKSIZE.			*/
 33
 34
 35# define UNIQUE_THRESHOLD 32
 36	/* Sizes up to this many HBLKs each have their own free list    */
 37# define HUGE_THRESHOLD 256
 38	/* Sizes of at least this many heap blocks are mapped to a	*/
 39	/* single free list.						*/
 40# define FL_COMPRESSION 8
 41	/* In between sizes map this many distinct sizes to a single	*/
 42	/* bin.								*/
 43
 44# define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
 45				 + UNIQUE_THRESHOLD
 46
 47struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
 48
 49#ifndef USE_MUNMAP
 50
 51  word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
 52	/* Number of free bytes on each list.	*/
 53
 54  /* Return the largest n such that 					*/
 55  /* Is GC_large_allocd_bytes + the number of free bytes on lists	*/
 56  /* n .. N_HBLK_FLS > GC_max_large_allocd_bytes.			*/
 57  /* If there is no such n, return 0.					*/
 58# ifdef __GNUC__
 59  __inline__
 60# endif
 61  static int GC_enough_large_bytes_left(void)
 62  {
 63    int n;
 64    word bytes = GC_large_allocd_bytes;
 65
 66    GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
 67    for (n = N_HBLK_FLS; n >= 0; --n) {
 68	bytes += GC_free_bytes[n];
 69	if (bytes >= GC_max_large_allocd_bytes) return n;
 70    }
 71    return 0;
 72  }
 73
 74# define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
 75
 76# define FREE_ASSERT(e) GC_ASSERT(e)
 77
 78#else /* USE_MUNMAP */
 79
 80# define INCR_FREE_BYTES(n, b)
 81# define FREE_ASSERT(e)
 82
 83#endif /* USE_MUNMAP */
 84
 85/* Map a number of blocks to the appropriate large block free list index. */
 86int GC_hblk_fl_from_blocks(word blocks_needed)
 87{
 88    if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
 89    if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
 90    return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
 91					+ UNIQUE_THRESHOLD;
 92    
 93}
 94
 95# define PHDR(hhdr) HDR(hhdr -> hb_prev)
 96# define NHDR(hhdr) HDR(hhdr -> hb_next)
 97
 98# ifdef USE_MUNMAP
 99#   define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
100# else  /* !USE_MMAP */
101#   define IS_MAPPED(hhdr) 1
102# endif /* USE_MUNMAP */
103
104# if !defined(NO_DEBUGGING)
105void GC_print_hblkfreelist()
106{
107    struct hblk * h;
108    word total_free = 0;
109    hdr * hhdr;
110    word sz;
111    unsigned i;
112    
113    for (i = 0; i <= N_HBLK_FLS; ++i) {
114      h = GC_hblkfreelist[i];
115#     ifdef USE_MUNMAP
116        if (0 != h) GC_printf("Free list %ld:\n",
117		              (unsigned long)i);
118#     else
119        if (0 != h) GC_printf("Free list %lu (Total size %lu):\n",
120		              i, (unsigned long)GC_free_bytes[i]);
121#     endif
122      while (h != 0) {
123        hhdr = HDR(h);
124        sz = hhdr -> hb_sz;
125    	GC_printf("\t%p size %lu ", h, (unsigned long)sz);
126    	total_free += sz;
127        if (GC_is_black_listed(h, HBLKSIZE) != 0) {
128             GC_printf("start black listed\n");
129        } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
130             GC_printf("partially black listed\n");
131        } else {
132             GC_printf("not black listed\n");
133        }
134        h = hhdr -> hb_next;
135      }
136    }
137#   ifndef USE_MUNMAP
138      if (total_free != GC_large_free_bytes) {
139	GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
140		  (unsigned long) GC_large_free_bytes);
141      }
142#   endif
143    GC_printf("Total of %lu bytes on free list\n", (unsigned long)total_free);
144}
145
146/* Return the free list index on which the block described by the header */
147/* appears, or -1 if it appears nowhere.				 */
148int free_list_index_of(hdr *wanted)
149{
150    struct hblk * h;
151    hdr * hhdr;
152    int i;
153    
154    for (i = 0; i <= N_HBLK_FLS; ++i) {
155      h = GC_hblkfreelist[i];
156      while (h != 0) {
157        hhdr = HDR(h);
158	if (hhdr == wanted) return i;
159        h = hhdr -> hb_next;
160      }
161    }
162    return -1;
163}
164
165void GC_dump_regions()
166{
167    unsigned i;
168    ptr_t start, end;
169    ptr_t p;
170    size_t bytes;
171    hdr *hhdr;
172    for (i = 0; i < GC_n_heap_sects; ++i) {
173	start = GC_heap_sects[i].hs_start;
174	bytes = GC_heap_sects[i].hs_bytes;
175	end = start + bytes;
176	/* Merge in contiguous sections.	*/
177	  while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
178	    ++i;
179	    end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
180	  }
181	GC_printf("***Section from %p to %p\n", start, end);
182	for (p = start; p < end;) {
183	    hhdr = HDR(p);
184	    GC_printf("\t%p ", p);
185	    if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
186		GC_printf("Missing header!!(%d)\n", hhdr);
187		p += HBLKSIZE;
188		continue;
189	    }
190	    if (HBLK_IS_FREE(hhdr)) {
191                int correct_index = GC_hblk_fl_from_blocks(
192					divHBLKSZ(hhdr -> hb_sz));
193	        int actual_index;
194		
195		GC_printf("\tfree block of size 0x%lx bytes",
196			  (unsigned long)(hhdr -> hb_sz));
197	 	if (IS_MAPPED(hhdr)) {
198		    GC_printf("\n");
199		} else {
200		    GC_printf("(unmapped)\n");
201		}
202		actual_index = free_list_index_of(hhdr);
203		if (-1 == actual_index) {
204		    GC_printf("\t\tBlock not on free list %d!!\n",
205			      correct_index);
206		} else if (correct_index != actual_index) {
207		    GC_printf("\t\tBlock on list %d, should be on %d!!\n",
208			      actual_index, correct_index);
209		}
210		p += hhdr -> hb_sz;
211	    } else {
212		GC_printf("\tused for blocks of size 0x%lx bytes\n",
213			  (unsigned long)(hhdr -> hb_sz));
214		p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
215	    }
216	}
217    }
218}
219
220# endif /* NO_DEBUGGING */
221
222/* Initialize hdr for a block containing the indicated size and 	*/
223/* kind of objects.							*/
224/* Return FALSE on failure.						*/
225static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
226			    int kind, unsigned flags)
227{
228    word descr;
229    size_t granules;
230    
231    /* Set size, kind and mark proc fields */
232      hhdr -> hb_sz = byte_sz;
233      hhdr -> hb_obj_kind = (unsigned char)kind;
234      hhdr -> hb_flags = (unsigned char)flags;
235      hhdr -> hb_block = block;
236      descr = GC_obj_kinds[kind].ok_descriptor;
237      if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
238      hhdr -> hb_descr = descr;
239    
240#   ifdef MARK_BIT_PER_OBJ
241     /* Set hb_inv_sz as portably as possible. 				*/
242     /* We set it to the smallest value such that sz * inv_sz > 2**32    */
243     /* This may be more precision than necessary.			*/
244      if (byte_sz > MAXOBJBYTES) {
245	 hhdr -> hb_inv_sz = LARGE_INV_SZ;
246      } else {
247	word inv_sz;
248
249#       if CPP_WORDSZ == 64
250          inv_sz = ((word)1 << 32)/byte_sz;
251	  if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
252#	else  /* 32 bit words */
253	  GC_ASSERT(byte_sz >= 4);
254	  inv_sz = ((unsigned)1 << 31)/byte_sz;
255	  inv_sz *= 2;
256	  while (inv_sz*byte_sz > byte_sz) ++inv_sz;
257#	endif
258	hhdr -> hb_inv_sz = inv_sz;
259      }
260#   else /* MARK_BIT_PER_GRANULE */
261      hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
262      granules = BYTES_TO_GRANULES(byte_sz);
263      if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
264        /* Make it look like a valid block. */
265        hhdr -> hb_sz = HBLKSIZE;
266        hhdr -> hb_descr = 0;
267        hhdr -> hb_large_block = TRUE;
268        hhdr -> hb_map = 0;
269        return FALSE;
270      } else {
271        size_t index = (hhdr -> hb_large_block? 0 : granules);
272        hhdr -> hb_map = GC_obj_map[index];
273      }
274#   endif /* MARK_BIT_PER_GRANULE */
275      
276    /* Clear mark bits */
277      GC_clear_hdr_marks(hhdr);
278      
279    hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
280    return(TRUE);
281}
282
283#define FL_UNKNOWN -1
284/*
285 * Remove hhdr from the appropriate free list.
286 * We assume it is on the nth free list, or on the size
287 * appropriate free list if n is FL_UNKNOWN.
288 */
289void GC_remove_from_fl(hdr *hhdr, int n)
290{
291    int index;
292
293    GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
294#   ifndef USE_MUNMAP
295      /* We always need index to mainatin free counts.	*/
296      if (FL_UNKNOWN == n) {
297          index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
298      } else {
299	  index = n;
300      }
301#   endif
302    if (hhdr -> hb_prev == 0) {
303#	ifdef USE_MUNMAP
304	  if (FL_UNKNOWN == n) {
305            index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
306	  } else {
307	    index = n;
308	  }
309#	endif
310	GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
311	GC_hblkfreelist[index] = hhdr -> hb_next;
312    } else {
313	hdr *phdr;
314	GET_HDR(hhdr -> hb_prev, phdr);
315	phdr -> hb_next = hhdr -> hb_next;
316    }
317    FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
318    INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
319    if (0 != hhdr -> hb_next) {
320	hdr * nhdr;
321	GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
322	GET_HDR(hhdr -> hb_next, nhdr);
323	nhdr -> hb_prev = hhdr -> hb_prev;
324    }
325}
326
327/*
328 * Return a pointer to the free block ending just before h, if any.
329 */
330struct hblk * GC_free_block_ending_at(struct hblk *h)
331{
332    struct hblk * p = h - 1;
333    hdr * phdr;
334
335    GET_HDR(p, phdr);
336    while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
337	p = FORWARDED_ADDR(p,phdr);
338	phdr = HDR(p);
339    }
340    if (0 != phdr) {
341        if(HBLK_IS_FREE(phdr)) {
342	    return p;
343	} else {
344	    return 0;
345	}
346    }
347    p = GC_prev_block(h - 1);
348    if (0 != p) {
349      phdr = HDR(p);
350      if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
351	return p;
352      }
353    }
354    return 0;
355}
356
357/*
358 * Add hhdr to the appropriate free list.
359 * We maintain individual free lists sorted by address.
360 */
361void GC_add_to_fl(struct hblk *h, hdr *hhdr)
362{
363    int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
364    struct hblk *second = GC_hblkfreelist[index];
365    hdr * second_hdr;
366#   if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
367      struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
368      hdr * nexthdr = HDR(next);
369      struct hblk *prev = GC_free_block_ending_at(h);
370      hdr * prevhdr = HDR(prev);
371      GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
372                || (signed_word)GC_heapsize < 0);
373		/* In the last case, blocks may be too large to merge. */
374      GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
375      		|| (signed_word)GC_heapsize < 0);
376#   endif
377    GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
378    GC_hblkfreelist[index] = h;
379    INCR_FREE_BYTES(index, hhdr -> hb_sz);
380    FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
381    hhdr -> hb_next = second;
382    hhdr -> hb_prev = 0;
383    if (0 != second) {
384      GET_HDR(second, second_hdr);
385      second_hdr -> hb_prev = h;
386    }
387    hhdr -> hb_flags |= FREE_BLK;
388}
389
390#ifdef USE_MUNMAP
391
392/* Unmap blocks that haven't been recently touched.  This is the only way */
393/* way blocks are ever unmapped.					  */
394void GC_unmap_old(void)
395{
396    struct hblk * h;
397    hdr * hhdr;
398    word sz;
399    unsigned short last_rec, threshold;
400    int i;
401#   ifndef MUNMAP_THRESHOLD
402#     define MUNMAP_THRESHOLD 6
403#   endif
404    
405    for (i = 0; i <= N_HBLK_FLS; ++i) {
406      for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
407        hhdr = HDR(h);
408	if (!IS_MAPPED(hhdr)) continue;
409	threshold = (unsigned short)(GC_gc_no - MUNMAP_THRESHOLD);
410	last_rec = hhdr -> hb_last_reclaimed;
411	if ((last_rec > GC_gc_no || last_rec < threshold)
412	    && threshold < GC_gc_no /* not recently wrapped */) {
413          sz = hhdr -> hb_sz;
414	  GC_unmap((ptr_t)h, sz);
415	  hhdr -> hb_flags |= WAS_UNMAPPED;
416    	}
417      }
418    }  
419}
420
421/* Merge all unmapped blocks that are adjacent to other free		*/
422/* blocks.  This may involve remapping, since all blocks are either	*/
423/* fully mapped or fully unmapped.					*/
424void GC_merge_unmapped(void)
425{
426    struct hblk * h, *next;
427    hdr * hhdr, *nexthdr;
428    word size, nextsize;
429    int i;
430    
431    for (i = 0; i <= N_HBLK_FLS; ++i) {
432      h = GC_hblkfreelist[i];
433      while (h != 0) {
434	GET_HDR(h, hhdr);
435	size = hhdr->hb_sz;
436	next = (struct hblk *)((word)h + size);
437	GET_HDR(next, nexthdr);
438	/* Coalesce with successor, if possible */
439	  if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
440	      && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
441	         /* no pot. overflow */) {
442	    if (IS_MAPPED(hhdr)) {
443	      GC_ASSERT(!IS_MAPPED(nexthdr));
444	      /* make both consistent, so that we can merge */
445	        if (size > nextsize) {
446		  GC_remap((ptr_t)next, nextsize);
447		} else {
448		  GC_unmap((ptr_t)h, size);
449		  hhdr -> hb_flags |= WAS_UNMAPPED;
450		}
451	    } else if (IS_MAPPED(nexthdr)) {
452	      GC_ASSERT(!IS_MAPPED(hhdr));
453	      if (size > nextsize) {
454		GC_unmap((ptr_t)next, nextsize);
455	      } else {
456		GC_remap((ptr_t)h, size);
457		hhdr -> hb_flags &= ~WAS_UNMAPPED;
458		hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
459	      }
460	    } else {
461	      /* Unmap any gap in the middle */
462		GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
463	    }
464	    /* If they are both unmapped, we merge, but leave unmapped. */
465	    GC_remove_from_fl(hhdr, i);
466	    GC_remove_from_fl(nexthdr, FL_UNKNOWN);
467	    hhdr -> hb_sz += nexthdr -> hb_sz; 
468	    GC_remove_header(next);
469	    GC_add_to_fl(h, hhdr); 
470	    /* Start over at beginning of list */
471	    h = GC_hblkfreelist[i];
472	  } else /* not mergable with successor */ {
473	    h = hhdr -> hb_next;
474	  }
475      } /* while (h != 0) ... */
476    } /* for ... */
477}
478
479#endif /* USE_MUNMAP */
480
481/*
482 * Return a pointer to a block starting at h of length bytes.
483 * Memory for the block is mapped.
484 * Remove the block from its free list, and return the remainder (if any)
485 * to its appropriate free list.
486 * May fail by returning 0.
487 * The header for the returned block must be set up by the caller.
488 * If the return value is not 0, then hhdr is the header for it.
489 */
490struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
491			        size_t bytes, int index)
492{
493    word total_size = hhdr -> hb_sz;
494    struct hblk * rest;
495    hdr * rest_hdr;
496
497    GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
498    GC_remove_from_fl(hhdr, index);
499    if (total_size == bytes) return h;
500    rest = (struct hblk *)((word)h + bytes);
501    rest_hdr = GC_install_header(rest);
502    if (0 == rest_hdr) {
503	/* FIXME: This is likely to be very bad news ... */
504	WARN("Header allocation failed: Dropping block.\n", 0);
505	return(0);
506    }
507    rest_hdr -> hb_sz = total_size - bytes;
508    rest_hdr -> hb_flags = 0;
509#   ifdef GC_ASSERTIONS
510      /* Mark h not free, to avoid assertion about adjacent free blocks. */
511        hhdr -> hb_flags &= ~FREE_BLK;
512#   endif
513    GC_add_to_fl(rest, rest_hdr);
514    return h;
515}
516
517/*
518 * H is a free block.  N points at an address inside it.
519 * A new header for n has already been set up.  Fix up h's header
520 * to reflect the fact that it is being split, move it to the
521 * appropriate free list.
522 * N replaces h in the original free list.
523 *
524 * Nhdr is not completely filled in, since it is about to allocated.
525 * It may in fact end up on the wrong free list for its size.
526 * (Hence adding it to a free list is silly.  But this path is hopefully
527 * rare enough that it doesn't matter.  The code is cleaner this way.)
528 */
529void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
530		    hdr *nhdr, int index /* Index of free list */)
531{
532    word total_size = hhdr -> hb_sz;
533    word h_size = (word)n - (word)h;
534    struct hblk *prev = hhdr -> hb_prev;
535    struct hblk *next = hhdr -> hb_next;
536
537    /* Replace h with n on its freelist */
538      nhdr -> hb_prev = prev;
539      nhdr -> hb_next = next;
540      nhdr -> hb_sz = total_size - h_size;
541      nhdr -> hb_flags = 0;
542      if (0 != prev) {
543	HDR(prev) -> hb_next = n;
544      } else {
545        GC_hblkfreelist[index] = n;
546      }
547      if (0 != next) {
548	HDR(next) -> hb_prev = n;
549      }
550      INCR_FREE_BYTES(index, -(signed_word)h_size);
551      FREE_ASSERT(GC_free_bytes[index] > 0);
552#     ifdef GC_ASSERTIONS
553	nhdr -> hb_flags &= ~FREE_BLK;
554				/* Don't fail test for consecutive	*/
555				/* free blocks in GC_add_to_fl.		*/
556#     endif
557#   ifdef USE_MUNMAP
558      hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
559#   endif
560    hhdr -> hb_sz = h_size;
561    GC_add_to_fl(h, hhdr);
562    nhdr -> hb_flags |= FREE_BLK;
563}
564	
565struct hblk *
566GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n,
567		 GC_bool may_split);
568
569/*
570 * Allocate (and return pointer to) a heap block
571 *   for objects of size sz bytes, searching the nth free list.
572 *
573 * NOTE: We set obj_map field in header correctly.
574 *       Caller is responsible for building an object freelist in block.
575 *
576 * The client is responsible for clearing the block, if necessary.
577 */
578struct hblk *
579GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
580{
581    word blocks;
582    int start_list;
583    int i;
584    struct hblk *result;
585    int split_limit; /* Highest index of free list whose blocks we	*/
586    		     /* split.						*/
587
588    GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
589    blocks = OBJ_SZ_TO_BLOCKS(sz);
590    if ((signed_word)(blocks * HBLKSIZE) < 0) {
591      return 0;
592    }
593    start_list = GC_hblk_fl_from_blocks(blocks);
594    /* Try for an exact match first. */
595    result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
596    if (0 != result) return result;
597    if (GC_use_entire_heap || GC_dont_gc
598	|| USED_HEAP_SIZE < GC_requested_heapsize
599        || TRUE_INCREMENTAL || !GC_should_collect()) {
600	/* Should use more of the heap, even if it requires splitting. */
601	split_limit = N_HBLK_FLS;
602    } else {
603#     ifdef USE_MUNMAP
604	/* avoid splitting, since that might require remapping */
605	split_limit = 0;
606#     else
607	if (GC_finalizer_bytes_freed > (GC_heapsize >> 4))  {
608	  /* If we are deallocating lots of memory from		*/
609	  /* finalizers, fail and collect sooner rather		*/
610	  /* than later.					*/
611	  split_limit = 0;
612	} else {
613	  /* If we have enough large blocks left to cover any	*/
614	  /* previous request for large blocks, we go ahead	*/
615	  /* and split.  Assuming a steady state, that should	*/
616	  /* be safe.  It means that we can use the full 	*/
617	  /* heap if we allocate only small objects.		*/
618          split_limit = GC_enough_large_bytes_left();
619	}
620#     endif
621    }
622    if (start_list < UNIQUE_THRESHOLD) {
623      /* No reason to try start_list again, since all blocks are exact	*/
624      /* matches.							*/
625      ++start_list;
626    }
627    for (i = start_list; i <= split_limit; ++i) {
628	struct hblk * result = GC_allochblk_nth(sz, kind, flags, i, TRUE);
629	if (0 != result) return result;
630    }
631    return 0;
632}
633/*
634 * The same, but with search restricted to nth free list.
635 * Flags is IGNORE_OFF_PAGE or zero.
636 * Unlike the above, sz is in bytes.
637 * The may_split flag indicates whether it's OK to split larger blocks.
638 */
639struct hblk *
640GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split)
641{
642    struct hblk *hbp;
643    hdr * hhdr;		/* Header corr. to hbp */
644    			/* Initialized after loop if hbp !=0 	*/
645    			/* Gcc uninitialized use warning is bogus.	*/
646    struct hblk *thishbp;
647    hdr * thishdr;		/* Header corr. to hbp */
648    signed_word size_needed;    /* number of bytes in requested objects */
649    signed_word size_avail;	/* bytes available in this block	*/
650
651    size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
652
653    /* search for a big enough block in free list */
654	hbp = GC_hblkfreelist[n];
655	for(; 0 != hbp; hbp = hhdr -> hb_next) {
656	    GET_HDR(hbp, hhdr);
657	    size_avail = hhdr->hb_sz;
658	    if (size_avail < size_needed) continue;
659	    if (size_avail != size_needed) {
660	      signed_word next_size;
661
662	      if (!may_split) continue;
663	      /* If the next heap block is obviously better, go on.	*/
664	      /* This prevents us from disassembling a single large block */
665	      /* to get tiny blocks.					*/
666	      thishbp = hhdr -> hb_next;
667	      if (thishbp != 0) {
668	  	GET_HDR(thishbp, thishdr);
669	        next_size = (signed_word)(thishdr -> hb_sz);
670	        if (next_size < size_avail
671	            && next_size >= size_needed
672	            && !GC_is_black_listed(thishbp, (word)size_needed)) {
673	            continue;
674	        }
675	      }
676	    }
677	    if ( !IS_UNCOLLECTABLE(kind) &&
678	         (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
679	      struct hblk * lasthbp = hbp;
680	      ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
681	      signed_word orig_avail = size_avail;
682	      signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
683	      					HBLKSIZE
684	      					: size_needed);
685	      
686	      
687	      while ((ptr_t)lasthbp <= search_end
688	             && (thishbp = GC_is_black_listed(lasthbp,
689	             				      (word)eff_size_needed))
690		        != 0) {
691	        lasthbp = thishbp;
692	      }
693	      size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
694	      thishbp = lasthbp;
695	      if (size_avail >= size_needed) {
696	        if (thishbp != hbp &&
697		    0 != (thishdr = GC_install_header(thishbp))) {
698		  /* Make sure it's mapped before we mangle it. */
699#		    ifdef USE_MUNMAP
700		      if (!IS_MAPPED(hhdr)) {
701		        GC_remap((ptr_t)hbp, hhdr -> hb_sz);
702		        hhdr -> hb_flags &= ~WAS_UNMAPPED;
703		      }
704#		    endif
705	          /* Split the block at thishbp */
706		      GC_split_block(hbp, hhdr, thishbp, thishdr, n);
707		  /* Advance to thishbp */
708		      hbp = thishbp;
709		      hhdr = thishdr;
710		      /* We must now allocate thishbp, since it may	*/
711		      /* be on the wrong free list.			*/
712		}
713	      } else if (size_needed > (signed_word)BL_LIMIT
714	                 && orig_avail - size_needed
715			    > (signed_word)BL_LIMIT) {
716	        /* Punt, since anything else risks unreasonable heap growth. */
717		if (++GC_large_alloc_warn_suppressed
718		    >= GC_large_alloc_warn_interval) {
719	          WARN("Repeated allocation of very large block "
720		       "(appr. size %ld):\n"
721		       "\tMay lead to memory leak and poor performance.\n",
722		       size_needed);
723		  GC_large_alloc_warn_suppressed = 0;
724		}
725	        size_avail = orig_avail;
726	      } else if (size_avail == 0 && size_needed == HBLKSIZE
727			 && IS_MAPPED(hhdr)) {
728		if (!GC_find_leak) {
729	      	  static unsigned count = 0;
730	      	  
731	      	  /* The block is completely blacklisted.  We need 	*/
732	      	  /* to drop some such blocks, since otherwise we spend */
733	      	  /* all our time traversing them if pointerfree	*/
734	      	  /* blocks are unpopular.				*/
735	          /* A dropped block will be reconsidered at next GC.	*/
736	          if ((++count & 3) == 0) {
737	            /* Allocate and drop the block in small chunks, to	*/
738	            /* maximize the chance that we will recover some	*/
739	            /* later.						*/
740		      word total_size = hhdr -> hb_sz;
741	              struct hblk * limit = hbp + divHBLKSZ(total_size);
742	              struct hblk * h;
743		      struct hblk * prev = hhdr -> hb_prev;
744	              
745		      GC_large_free_bytes -= total_size;
746		      GC_bytes_dropped += total_size;
747		      GC_remove_from_fl(hhdr, n);
748	              for (h = hbp; h < limit; h++) {
749	                if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
750	                  (void) setup_header(
751	                	  hhdr, h,
752	              		  HBLKSIZE,
753	              		  PTRFREE, 0); /* Cant fail */
754	              	  if (GC_debugging_started) {
755	              	    BZERO(h, HBLKSIZE);
756	              	  }
757	                }
758	              }
759	            /* Restore hbp to point at free block */
760		      hbp = prev;
761		      if (0 == hbp) {
762			return GC_allochblk_nth(sz, kind, flags, n, may_split);
763		      }
764	   	      hhdr = HDR(hbp);
765	          }
766		}
767	      }
768	    }
769	    if( size_avail >= size_needed ) {
770#		ifdef USE_MUNMAP
771		  if (!IS_MAPPED(hhdr)) {
772		    GC_remap((ptr_t)hbp, hhdr -> hb_sz);
773		    hhdr -> hb_flags &= ~WAS_UNMAPPED;
774		    /* Note: This may leave adjacent, mapped free blocks. */
775		  }
776#	        endif
777		/* hbp may be on the wrong freelist; the parameter n	*/
778		/* is important.					*/
779		hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
780		break;
781	    }
782	}
783
784    if (0 == hbp) return 0;
785	
786    /* Add it to map of valid blocks */
787    	if (!GC_install_counts(hbp, (word)size_needed)) return(0);
788    	/* This leaks memory under very rare conditions. */
789    		
790    /* Set up header */
791        if (!setup_header(hhdr, hbp, sz, kind, flags)) {
792            GC_remove_counts(hbp, (word)size_needed);
793            return(0); /* ditto */
794        }
795
796    /* Notify virtual dirty bit implementation that we are about to write.  */
797    /* Ensure that pointerfree objects are not protected if it's avoidable. */
798    	GC_remove_protection(hbp, divHBLKSZ(size_needed),
799			     (hhdr -> hb_descr == 0) /* pointer-free */);
800        
801    /* We just successfully allocated a block.  Restart count of	*/
802    /* consecutive failures.						*/
803    {
804	extern unsigned GC_fail_count;
805	
806	GC_fail_count = 0;
807    }
808
809    GC_large_free_bytes -= size_needed;
810    
811    GC_ASSERT(IS_MAPPED(hhdr));
812    return( hbp );
813}
814 
815struct hblk * GC_freehblk_ptr = 0;  /* Search position hint for GC_freehblk */
816
817/*
818 * Free a heap block.
819 *
820 * Coalesce the block with its neighbors if possible.
821 *
822 * All mark words are assumed to be cleared.
823 */
824void
825GC_freehblk(struct hblk *hbp)
826{
827struct hblk *next, *prev;
828hdr *hhdr, *prevhdr, *nexthdr;
829signed_word size;
830
831
832    GET_HDR(hbp, hhdr);
833    size = hhdr->hb_sz;
834    size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
835    if (size <= 0)
836      ABORT("Deallocating excessively large block.  Too large an allocation?");
837      /* Probably possible if we try to allocate more than half the address */
838      /* space at once.  If we dont catch it here, strange things happen    */
839      /* later.								    */
840    GC_remove_counts(hbp, (word)size);
841    hhdr->hb_sz = size;
842#   ifdef USE_MUNMAP
843      hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
844#   endif
845    
846    /* Check for duplicate deallocation in the easy case */
847      if (HBLK_IS_FREE(hhdr)) {
848        GC_printf("Duplicate large block deallocation of %p\n", hbp);
849	ABORT("Duplicate large block deallocation");
850      }
851
852    GC_ASSERT(IS_MAPPED(hhdr));
853    hhdr -> hb_flags |= FREE_BLK;
854    next = (struct hblk *)((word)hbp + size);
855    GET_HDR(next, nexthdr);
856    prev = GC_free_block_ending_at(hbp);
857    /* Coalesce with successor, if possible */
858      if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)
859         && (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0
860	 /* no overflow */) {
861	GC_remove_from_fl(nexthdr, FL_UNKNOWN);
862	hhdr -> hb_sz += nexthdr -> hb_sz; 
863	GC_remove_header(next);
864      }
865    /* Coalesce with predecessor, if possible. */
866      if (0 != prev) {
867	prevhdr = HDR(prev);
868	if (IS_MAPPED(prevhdr)
869	    && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
870	  GC_remove_from_fl(prevhdr, FL_UNKNOWN);
871	  prevhdr -> hb_sz += hhdr -> hb_sz;
872#	  ifdef USE_MUNMAP
873	    prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
874#	  endif
875	  GC_remove_header(hbp);
876	  hbp = prev;
877	  hhdr = prevhdr;
878	}
879      }
880    /* FIXME: It is not clear we really always want to do these merges	*/
881    /* with -DUSE_MUNMAP, since it updates ages and hence prevents	*/
882    /* unmapping. 							*/
883
884    GC_large_free_bytes += size;
885    GC_add_to_fl(hbp, hhdr);    
886}
887