PageRenderTime 68ms CodeModel.GetById 33ms RepoModel.GetById 0ms app.codeStats 1ms

/src/c/gbc.d

https://gitlab.com/jlarocco/ecl
D | 976 lines | 771 code | 115 blank | 90 comment | 144 complexity | 4a7a32eed6651586d3f3202dfb3149f0 MD5 | raw file
Possible License(s): LGPL-2.0, JSON
  1. /* -*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*- */
  2. /* vim: set filetype=c tabstop=8 shiftwidth=4 expandtab: */
  3. /*
  4. gbc.c -- Garbage collector.
  5. */
  6. /*
  7. Copyright (c) 1984, Taiichi Yuasa and Masami Hagiya.
  8. Copyright (c) 1990, Giuseppe Attardi and William F. Schelter.
  9. Copyright (c) 2001, Juan Jose Garcia Ripoll.
  10. ECL is free software; you can redistribute it and/or
  11. modify it under the terms of the GNU Library General Public
  12. License as published by the Free Software Foundation; either
  13. version 2 of the License, or (at your option) any later version.
  14. See file '../Copyright' for full details.
  15. */
  16. #ifdef ECL_THREADS
  17. #include <pthread.h>
  18. #endif
  19. #include <stdio.h>
  20. #include <ecl/ecl.h>
  21. #include <ecl/page.h>
  22. #include <ecl/internal.h>
  23. #include <ecl/bytecodes.h>
  24. /******************************* EXPORTS ******************************/
  25. bool GC_enable;
  26. /******************************* ------- ******************************/
  27. /*
  28. mark_table[m]:i represents word w = 128*m + 4*i, where m = addr-DATA_START.
  29. Therefore m = w >> 7, i = (w / 4) % 32 = (w >> 2) & 0x1f.
  30. */
  31. static int *mark_table;
  32. #define MTbit(x) ((ptr2int(x) >> 2) & 0x1f)
  33. #define MTword(x) mark_table[((cl_ptr)x - heap_start) >> 7]
  34. #define get_mark_bit(x) (MTword(x) >> MTbit(x) & 1)
  35. #define set_mark_bit(x) (MTword(x) |= (1 << MTbit(x)))
  36. #define clear_mark_bit(x) (MTword(x) ~= (~1 << MTbit(x)))
  37. #define VALID_DATA_ADDRESS(pp) \
  38. (!ECL_IMMEDIATE(pp) && (heap_start <= (cl_ptr)(pp)) && ((cl_ptr)(pp) < heap_end))
  39. static bool debug = FALSE;
  40. static int maxpage;
  41. #define GC_ROOT_MAX 200
  42. static cl_object *gc_root[GC_ROOT_MAX];
  43. static int gc_roots;
  44. static bool collect_blocks;
  45. static int gc_time; /* Beppe */
  46. /*
  47. We must register location, since value may be reassigned (e.g. malloc_list)
  48. */
  49. static void _mark_object(cl_object x);
  50. static void _mark_contblock(void *p, cl_index s);
  51. static void mark_cl_env(struct cl_env_struct *env);
  52. extern void sigint (void);
  53. void
  54. ecl_register_root(cl_object *p)
  55. {
  56. if (gc_roots >= GC_ROOT_MAX)
  57. ecl_internal_error("too many roots");
  58. gc_root[gc_roots++] = p;
  59. }
  60. cl_object
  61. si_gc(cl_object area)
  62. {
  63. if (!GC_enabled())
  64. ecl_internal_error("GC is not enabled");
  65. if (Null(area))
  66. ecl_gc(t_cons);
  67. else
  68. ecl_gc(t_contiguous);
  69. @(return)
  70. }
  71. /*----------------------------------------------------------------------
  72. * Mark phase
  73. *----------------------------------------------------------------------
  74. */
  75. /* Whenever two arrays are linked together by displacement,
  76. if one is live, the other will be made live */
  77. #define mark_displaced(ar) mark_object(ar)
  78. #define mark_contblock(x,s) {if (collect_blocks) _mark_contblock(x,s); }
  79. #if 1
  80. #define mark_object(x) if ((x != OBJNULL) && !ECL_IMMEDIATE(x)) _mark_object(x)
  81. #define mark_next(a) if ((a != OBJNULL) && !ECL_IMMEDIATE(a)) { x=(a); goto BEGIN; }
  82. #else
  83. #define mark_object(x) _mark_object(x)
  84. #define mark_next(a) x=(a); goto BEGIN
  85. #endif
  86. /* We make bitvectors multiple of sizeof(int) in size allocated
  87. Assume 8 = number of bits in char */
  88. #define W_SIZE (8*sizeof(int))
  89. static void
  90. _mark_object(cl_object x)
  91. {
  92. cl_index i, j;
  93. cl_object *p, y;
  94. cl_ptr cp;
  95. BEGIN:
  96. #if 0
  97. /* We cannot get here because mark_object() and mark_next() already check this */
  98. if (ECL_IMMEDIATE(x)) return; /* fixnum, character or locative */
  99. if (x == OBJNULL)
  100. return;
  101. #endif
  102. /* We need this, because sometimes we arrive to data structures
  103. * which have been created in the C stack (t_frame in gfun.d,
  104. * for instance) */
  105. if (!VALID_DATA_ADDRESS(x))
  106. return;
  107. if (x->d.m) {
  108. if (x->d.m == FREE)
  109. ecl_internal_error("mark_object: pointer to free object.");
  110. else
  111. return;
  112. }
  113. x->d.m = TRUE;
  114. switch (ecl_t_of(x)) {
  115. case t_bignum: {
  116. /* GMP may set num.alloc before actually allocating anything.
  117. With these checks we make sure we do not move anything
  118. we don't have to. Besides, we use big_dim as the size
  119. of the object, because big_size might even be smaller.
  120. */
  121. cl_ptr limbs = (cl_ptr)x->big.big_limbs;
  122. cl_index size = x->big.big_dim * sizeof(mp_limb_t);
  123. if (size) mark_contblock(limbs, size);
  124. break;
  125. }
  126. case t_ratio:
  127. mark_object(x->ratio.num);
  128. mark_next(x->ratio.den);
  129. break;
  130. #ifdef ECL_SSE2
  131. case t_sse_pack:
  132. #endif
  133. case t_singlefloat:
  134. case t_doublefloat:
  135. #ifdef ECL_LONG_FLOAT
  136. case t_longfloat:
  137. #endif
  138. break;
  139. case t_complex:
  140. mark_object(x->complex.imag);
  141. mark_next(x->complex.real);
  142. break;
  143. case t_character:
  144. break;
  145. case t_symbol:
  146. mark_object(x->symbol.hpack);
  147. mark_object(x->symbol.name);
  148. mark_object(x->symbol.plist);
  149. mark_object(x->symbol.gfdef);
  150. mark_next(x->symbol.value);
  151. break;
  152. case t_package:
  153. mark_object(x->pack.name);
  154. mark_object(x->pack.nicknames);
  155. mark_object(x->pack.shadowings);
  156. mark_object(x->pack.uses);
  157. mark_object(x->pack.usedby);
  158. mark_object(x->pack.internal);
  159. mark_next(x->pack.external);
  160. break;
  161. case t_cons:
  162. mark_object(CAR(x));
  163. mark_next(CDR(x));
  164. break;
  165. case t_hashtable:
  166. mark_object(x->hash.rehash_size);
  167. mark_object(x->hash.threshold);
  168. if (x->hash.data == NULL)
  169. break;
  170. for (i = 0, j = x->hash.size; i < j; i++) {
  171. mark_object(x->hash.data[i].key);
  172. mark_object(x->hash.data[i].value);
  173. }
  174. mark_contblock(x->hash.data, j * sizeof(struct ecl_hashtable_entry));
  175. break;
  176. case t_array:
  177. mark_contblock(x->array.dims, sizeof(x->array.dims[0])*x->array.rank);
  178. #ifdef ECL_UNICODE
  179. case t_string:
  180. #endif
  181. case t_vector:
  182. if ((y = x->array.displaced) != ECL_NIL)
  183. mark_displaced(y);
  184. cp = (cl_ptr)x->array.self.t;
  185. if (cp == NULL)
  186. break;
  187. switch ((cl_elttype)x->array.elttype) {
  188. #ifdef ECL_UNICODE
  189. case ecl_aet_ch:
  190. #endif
  191. case ecl_aet_object:
  192. if (x->array.displaced == ECL_NIL || CAR(x->array.displaced) == ECL_NIL) {
  193. i = x->vector.dim;
  194. p = x->array.self.t;
  195. goto MARK_DATA;
  196. }
  197. j = sizeof(cl_object)*x->array.dim;
  198. break;
  199. case ecl_aet_bc:
  200. j = x->array.dim;
  201. break;
  202. case ecl_aet_bit:
  203. j = sizeof(int) * ((x->vector.offset + x->vector.dim + W_SIZE -1)/W_SIZE);
  204. break;
  205. case ecl_aet_fix:
  206. j = x->array.dim * sizeof(cl_fixnum);
  207. break;
  208. case ecl_aet_index:
  209. j = x->array.dim * sizeof(cl_index);
  210. break;
  211. case ecl_aet_sf:
  212. j = x->array.dim * sizeof(float);
  213. break;
  214. case ecl_aet_df:
  215. j = x->array.dim * sizeof(double);
  216. break;
  217. case ecl_aet_b8:
  218. j = x->array.dim * sizeof(uint8_t);
  219. break;
  220. case ecl_aet_i8:
  221. j = x->array.dim * sizeof(int8_t);
  222. break;
  223. default:
  224. ecl_internal_error("Allocation botch: unknown array element type");
  225. }
  226. goto COPY_ARRAY;
  227. case t_base_string:
  228. if ((y = x->base_string.displaced) != ECL_NIL)
  229. mark_displaced(y);
  230. cp = x->base_string.self;
  231. if (cp == NULL)
  232. break;
  233. j = x->base_string.dim+1;
  234. COPY_ARRAY:
  235. mark_contblock(cp, j);
  236. break;
  237. case t_bitvector:
  238. if ((y = x->vector.displaced) != ECL_NIL)
  239. mark_displaced(y);
  240. cp = x->vector.self.bit;
  241. if (cp == NULL)
  242. break;
  243. j= sizeof(int) * ((x->vector.offset + x->vector.dim + W_SIZE -1)/W_SIZE);
  244. goto COPY_ARRAY;
  245. case t_stream:
  246. switch ((enum ecl_smmode)x->stream.mode) {
  247. case ecl_smm_input:
  248. case ecl_smm_output:
  249. case ecl_smm_io:
  250. case ecl_smm_probe:
  251. mark_contblock(x->stream.buffer, BUFSIZ);
  252. mark_object(x->stream.object0);
  253. mark_next(x->stream.object1);
  254. break;
  255. case ecl_smm_synonym:
  256. mark_next(x->stream.object0);
  257. break;
  258. case ecl_smm_broadcast:
  259. case ecl_smm_concatenated:
  260. mark_next(x->stream.object0);
  261. break;
  262. case ecl_smm_two_way:
  263. case ecl_smm_echo:
  264. mark_object(x->stream.object0);
  265. mark_next(x->stream.object1);
  266. break;
  267. case ecl_smm_string_input:
  268. case ecl_smm_string_output:
  269. mark_next(x->stream.object0);
  270. break;
  271. default:
  272. ecl_internal_error("mark stream botch");
  273. }
  274. break;
  275. case t_random:
  276. break;
  277. case t_readtable:
  278. if (x->readtable.table == NULL)
  279. break;
  280. mark_contblock((cl_ptr)(x->readtable.table),
  281. RTABSIZE*sizeof(struct ecl_readtable_entry));
  282. for (i = 0; i < RTABSIZE; i++) {
  283. cl_object *p = x->readtable.table[i].dispatch_table;
  284. mark_object(x->readtable.table[i].macro);
  285. if (p != NULL) {
  286. mark_contblock(p, RTABSIZE*sizeof(cl_object));
  287. for (j = 0; j < RTABSIZE; j++)
  288. mark_object(p[j]);
  289. }
  290. }
  291. break;
  292. case t_pathname:
  293. mark_object(x->pathname.host);
  294. mark_object(x->pathname.device);
  295. mark_object(x->pathname.version);
  296. mark_object(x->pathname.name);
  297. mark_object(x->pathname.type);
  298. mark_next(x->pathname.directory);
  299. break;
  300. case t_bytecodes:
  301. mark_object(x->bytecodes.name);
  302. mark_object(x->bytecodes.lex);
  303. mark_object(x->bytecodes.specials);
  304. mark_object(x->bytecodes.definition);
  305. mark_contblock(x->bytecodes.code, x->bytecodes.code_size * sizeof(cl_opcode));
  306. mark_next(x->bytecodes.data);
  307. break;
  308. case t_bclosure:
  309. mark_object(x->bclosure.code);
  310. mark_next(x->bclosure.lex);
  311. break;
  312. case t_cfun:
  313. case t_cfunfixed:
  314. mark_object(x->cfun.block);
  315. mark_next(x->cfun.name);
  316. break;
  317. case t_cclosure:
  318. mark_object(x->cfun.block);
  319. mark_next(x->cclosure.env);
  320. break;
  321. #ifdef ECL_THREADS
  322. case t_process:
  323. /* Already marked by malloc: x->process.env
  324. */
  325. mark_object(x->process.name);
  326. mark_object(x->process.interrupt);
  327. mark_object(x->process.function);
  328. mark_cl_env(x->process.env);
  329. mark_next(x->process.args);
  330. break;
  331. case t_lock:
  332. mark_next(x->lock.name);
  333. mark_next(x->lock.holder);
  334. break;
  335. case t_condition_variable:
  336. break;
  337. #endif /* THREADS */
  338. #ifdef ECL_SEMAPHORES
  339. case t_semaphore:
  340. break;
  341. #endif
  342. case t_instance:
  343. mark_object(x->instance.clas);
  344. mark_object(x->instance.sig);
  345. p = x->instance.slots;
  346. i = x->instance.length;
  347. goto MARK_DATA;
  348. case t_codeblock:
  349. mark_object(x->cblock.name);
  350. mark_object(x->cblock.next);
  351. mark_object(x->cblock.links);
  352. p = x->cblock.temp_data;
  353. if (p) {
  354. i = x->cblock.temp_data_size;
  355. mark_contblock(p, i * sizeof(cl_object));
  356. while (i-- > 0)
  357. mark_object(p[i]);
  358. }
  359. i = x->cblock.data_size;
  360. p = x->cblock.data;
  361. goto MARK_DATA;
  362. case t_foreign:
  363. if (x->foreign.size)
  364. mark_contblock(x->foreign.data, x->foreign.size);
  365. mark_next(x->foreign.tag);
  366. break;
  367. MARK_DATA:
  368. if (p) {
  369. mark_contblock(p, i * sizeof(cl_object));
  370. while (i-- > 0)
  371. mark_object(p[i]);
  372. }
  373. return;
  374. default:
  375. if (debug)
  376. printf("\ttype = %d\n", ecl_t_of(x));
  377. ecl_internal_error("mark botch");
  378. }
  379. }
  380. static void
  381. mark_stack_conservative(cl_ptr bottom, cl_ptr top)
  382. {
  383. int p, m;
  384. cl_object x;
  385. struct typemanager *tm;
  386. cl_ptr j;
  387. if (debug) { printf("Traversing C stack .."); fflush(stdout); }
  388. /* On machines which align local pointers on multiple of 2 rather
  389. than 4 we need to mark twice
  390. if (offset) mark_stack_conservative(bottom, ((char *) top) + offset, 0);
  391. */
  392. for (j = bottom ; j < top ; j+=sizeof(cl_ptr)) {
  393. cl_ptr aux = *((cl_ptr*)j);
  394. /* improved Beppe: */
  395. if (VALID_DATA_ADDRESS(aux) && type_map[p = page(aux)] < (char)t_end) {
  396. tm = tm_of((cl_type)type_map[p]);
  397. x = (cl_object)(aux - (aux - pagetochar(p)) % tm->tm_size);
  398. m = x->d.m;
  399. if (m != FREE && m != TRUE) {
  400. if (m) {
  401. fprintf(stderr,
  402. "** bad value %d of d.m in gc page %d skipping mark **",
  403. m, p); fflush(stderr);
  404. } else {
  405. mark_object(x);
  406. }
  407. }
  408. }
  409. }
  410. if (debug) {
  411. printf(". done.\n"); fflush(stdout);
  412. }
  413. }
  414. static void
  415. mark_cl_env(struct cl_env_struct *env)
  416. {
  417. int i = 0;
  418. cl_object where = 0;
  419. ecl_bds_ptr bdp = 0;
  420. ecl_frame_ptr frp = 0;
  421. ecl_ihs_ptr ihs = 0;
  422. mark_contblock(env, sizeof(*env));
  423. mark_object(env->lex_env);
  424. mark_contblock(env->stack, env->stack_size * sizeof(cl_object));
  425. mark_stack_conservative((cl_ptr)env->stack, (cl_ptr)env->stack_top);
  426. if ((bdp = env->bds_org)) {
  427. mark_contblock(bdp, env->bds_size * sizeof(*bdp));
  428. for (; bdp <= env->bds_top; bdp++) {
  429. mark_object(bdp->symbol);
  430. mark_object(bdp->value);
  431. }
  432. }
  433. mark_object(env->bindings_hash);
  434. if ((frp = env->frs_org)) {
  435. mark_contblock(frp, env->frs_size * sizeof(*frp));
  436. for (; frp <= env->frs_top; frp++) {
  437. mark_object(frp->frs_val);
  438. }
  439. }
  440. for (ihs = env->ihs_top; ihs; ihs = ihs->next) {
  441. mark_object(ihs->function);
  442. mark_object(ihs->lex_env);
  443. }
  444. for (i=0; i<env->nvalues; i++)
  445. mark_object(env->values[i]);
  446. mark_object(env->string_pool);
  447. if (env->c_env) {
  448. mark_object(env->c_env->variables);
  449. mark_object(env->c_env->macros);
  450. mark_object(env->c_env->constants);
  451. }
  452. mark_object(env->fmt_aux_stream);
  453. mark_contblock(env->queue, sizeof(short) * ECL_PPRINT_QUEUE_SIZE);
  454. mark_contblock(env->indent_stack, sizeof(short) * ECL_PPRINT_INDENTATION_STACK_SIZE);
  455. mark_object(env->big_register[0]);
  456. mark_object(env->big_register[1]);
  457. mark_object(env->big_register[2]);
  458. #ifdef ECL_THREADS
  459. mark_object(env->method_hash_clear_list);
  460. #endif
  461. mark_object(env->method_hash);
  462. mark_object(env->method_spec_vector);
  463. #ifdef ECL_THREADS
  464. /* We should mark the stacks of the threads somehow!!! */
  465. #error "The old garbage collector does not support threads"
  466. #else
  467. # ifdef ECL_DOWN_STACK
  468. mark_stack_conservative((cl_ptr)(&where), (cl_ptr)env->cs_org);
  469. # else
  470. mark_stack_conservative((cl_ptr)env->cs_org, (cl_ptr)(&where));
  471. # endif /* ECL_DOWN_STACK */
  472. #endif /* THREADS */
  473. #ifdef ECL_FFICALL
  474. mark_contblock(env->fficall, sizeof(struct ecl_fficall));
  475. mark_object(((struct ecl_fficall*)env->fficall)->cstring);
  476. #endif
  477. }
  478. static void
  479. mark_phase(void)
  480. {
  481. int i;
  482. cl_object s;
  483. /* save registers on the stack */
  484. jmp_buf volatile registers;
  485. ecl_setjmp(registers);
  486. /* mark registered symbols & keywords */
  487. for (i=0; i<cl_num_symbols_in_core; i++) {
  488. s = (cl_object)(cl_symbols + i);
  489. s->symbol.m = FALSE;
  490. }
  491. for (i=0; i<cl_num_symbols_in_core; i++) {
  492. s = (cl_object)(cl_symbols + i);
  493. mark_object(s);
  494. }
  495. /* We mark everything, but we do not want to get the loaded
  496. * libraries to be marked unless they are referenced somewhere
  497. * else (function definition. etc) */
  498. s = cl_core.libraries;
  499. if (s) {
  500. for (i = 0; i < s->vector.fillp; i++) {
  501. cl_object dll = s->vector.self.t[i];
  502. if (dll->cblock.locked) {
  503. mark_object(dll);
  504. }
  505. }
  506. s->vector.elttype = ecl_aet_fix;
  507. mark_object(s);
  508. s->vector.elttype = ecl_aet_object;
  509. }
  510. mark_stack_conservative((cl_ptr)&cl_core, (cl_ptr)(&cl_core + 1));
  511. /* mark roots */
  512. for (i = 0; i < gc_roots; i++)
  513. mark_object(*gc_root[i]);
  514. #ifdef ECL_THREADS
  515. mark_object(cl_core.processes);
  516. #else
  517. mark_cl_env(&cl_env);
  518. #endif
  519. }
  520. static void
  521. sweep_phase(void)
  522. {
  523. register int i, j, k;
  524. register cl_object x;
  525. register cl_ptr p;
  526. register struct typemanager *tm;
  527. register cl_object f;
  528. ECL_NIL->symbol.m = FALSE;
  529. ECL_T->symbol.m = FALSE;
  530. if (debug)
  531. printf("type map\n");
  532. for (i = 0; i < maxpage; i++) {
  533. if (type_map[i] == (int)t_contiguous) {
  534. if (debug) {
  535. printf("-");
  536. continue;
  537. }
  538. }
  539. if (type_map[i] >= (int)t_end)
  540. continue;
  541. tm = tm_of((cl_type)type_map[i]);
  542. /*
  543. general sweeper
  544. */
  545. if (debug)
  546. printf("%c", tm->tm_name[0]);
  547. p = pagetochar(i);
  548. f = tm->tm_free;
  549. k = 0;
  550. for (j = tm->tm_nppage; j > 0; --j, p += tm->tm_size) {
  551. x = (cl_object)p;
  552. if (x->d.m == FREE)
  553. continue;
  554. else if (x->d.m) {
  555. x->d.m = FALSE;
  556. continue;
  557. }
  558. /* INV: Make sure this is the same as in alloc_2.d */
  559. switch (x->d.t) {
  560. #ifdef ENABLE_DLOPEN
  561. case t_codeblock:
  562. ecl_library_close(x);
  563. break;
  564. #endif
  565. case t_stream:
  566. if (!x->stream.closed)
  567. cl_close(1, x);
  568. break;
  569. #ifdef ECL_THREADS
  570. case t_lock:
  571. #if defined(ECL_MS_WINDOWS_HOST)
  572. CloseHandle(x->lock.mutex);
  573. #else
  574. pthread_mutex_destroy(&x->lock.mutex);
  575. #endif
  576. break;
  577. case t_condition_variable:
  578. #if defined(ECL_MS_WINDOWS_HOST)
  579. CloseHandle(x->condition_variable.cv);
  580. #else
  581. pthread_cond_destroy(&x->condition_variable.cv);
  582. #endif
  583. break;
  584. #endif
  585. #ifdef ECL_SEMAPHORES
  586. case t_semaphore:
  587. #error "Unfinished"
  588. break;
  589. #endif
  590. default:;
  591. }
  592. ((struct freelist *)x)->f_link = f;
  593. x->d.m = FREE;
  594. f = x;
  595. k++;
  596. }
  597. tm->tm_free = f;
  598. tm->tm_nfree += k;
  599. tm->tm_nused -= k;
  600. }
  601. if (debug) {
  602. putchar('\n');
  603. fflush(stdout);
  604. }
  605. }
  606. static void
  607. contblock_sweep_phase(void)
  608. {
  609. register int i, j;
  610. register cl_ptr s, e, p, q;
  611. register struct contblock *cbp;
  612. cb_pointer = NULL;
  613. ncb = 0;
  614. for (i = 0; i < maxpage;) {
  615. if (type_map[i] != (int)t_contiguous) {
  616. i++;
  617. continue;
  618. }
  619. for (j = i+1;
  620. j < maxpage && type_map[j] == (int)t_contiguous;
  621. j++)
  622. ;
  623. s = pagetochar(i);
  624. e = pagetochar(j);
  625. for (p = s; p < e;) {
  626. if (get_mark_bit((int *)p)) {
  627. p += 4;
  628. continue;
  629. }
  630. q = p + 4;
  631. while (q < e && !get_mark_bit((int *)q))
  632. q += 4;
  633. ecl_dealloc(p);
  634. p = q + 4;
  635. }
  636. i = j + 1;
  637. }
  638. if (debug) {
  639. for (cbp = cb_pointer; cbp != NULL; cbp = cbp->cb_link)
  640. printf("0x%p %d\n", cbp, cbp->cb_size);
  641. fflush(stdout);
  642. }
  643. }
  644. cl_object (*GC_enter_hook)() = NULL;
  645. cl_object (*GC_exit_hook)() = NULL;
  646. void
  647. ecl_gc(cl_type t)
  648. {
  649. const cl_env_ptr env = ecl_process_env();
  650. int i, j;
  651. int tm;
  652. int gc_start = ecl_runtime();
  653. bool interrupts;
  654. if (!GC_enabled())
  655. return;
  656. GC_disable();
  657. CL_NEWENV_BEGIN {
  658. if (SYM_VAL(@'si::*gc-verbose*') != ECL_NIL) {
  659. printf("\n[GC ..");
  660. /* To use this should add entries in tm_table for reloc and contig.
  661. fprintf(stdout, "\n[GC for %d %s pages ..",
  662. tm_of(t)->tm_npage,
  663. tm_table[(int)t].tm_name + 1); */
  664. fflush(stdout);
  665. }
  666. debug = ecl_symbol_value(@'si::*gc-message*') != ECL_NIL;
  667. if (GC_enter_hook != NULL)
  668. (*GC_enter_hook)();
  669. #ifdef THREADS
  670. #error "We need to stop all other threads"
  671. #endif /* THREADS */
  672. interrupts = env->disable_interrupts;
  673. env->disable_interrupts = 1;
  674. collect_blocks = t > t_end;
  675. if (collect_blocks)
  676. cbgccount++;
  677. else
  678. tm_table[(int)t].tm_gccount++;
  679. if (debug) {
  680. if (collect_blocks)
  681. printf("GC entered for collecting blocks\n");
  682. else
  683. printf("GC entered for collecting %s\n", tm_table[(int)t].tm_name);
  684. fflush(stdout);
  685. }
  686. maxpage = page(heap_end);
  687. if (collect_blocks) {
  688. /*
  689. 1 page = 512 word
  690. 512 bit = 16 word
  691. */
  692. int mark_table_size = maxpage * (LISP_PAGESIZE / 32);
  693. extern void cl_resize_hole(cl_index);
  694. if (holepage < mark_table_size*sizeof(int)/LISP_PAGESIZE + 1)
  695. new_holepage = mark_table_size*sizeof(int)/LISP_PAGESIZE + 1;
  696. if (new_holepage < HOLEPAGE)
  697. new_holepage = HOLEPAGE;
  698. cl_resize_hole(new_holepage);
  699. mark_table = (int*)heap_end;
  700. for (i = 0; i < mark_table_size; i++)
  701. mark_table[i] = 0;
  702. }
  703. if (debug) {
  704. printf("mark phase\n");
  705. fflush(stdout);
  706. tm = ecl_runtime();
  707. }
  708. mark_phase();
  709. if (debug) {
  710. printf("mark ended (%d)\n", ecl_runtime() - tm);
  711. printf("sweep phase\n");
  712. fflush(stdout);
  713. tm = ecl_runtime();
  714. }
  715. sweep_phase();
  716. if (debug) {
  717. printf("sweep ended (%d)\n", ecl_runtime() - tm);
  718. fflush(stdout);
  719. }
  720. if (t == t_contiguous) {
  721. if (debug) {
  722. printf("contblock sweep phase\n");
  723. fflush(stdout);
  724. tm = ecl_runtime();
  725. }
  726. contblock_sweep_phase();
  727. if (debug)
  728. printf("contblock sweep ended (%d)\n", ecl_runtime() - tm);
  729. }
  730. if (debug) {
  731. for (i = 0, j = 0; i < (int)t_end; i++) {
  732. if (tm_table[i].tm_type == (cl_type)i) {
  733. printf("%13s: %8d used %8d free %4d/%d pages\n",
  734. tm_table[i].tm_name,
  735. tm_table[i].tm_nused,
  736. tm_table[i].tm_nfree,
  737. tm_table[i].tm_npage,
  738. tm_table[i].tm_maxpage);
  739. j += tm_table[i].tm_npage;
  740. } else
  741. printf("%13s: linked to %s\n",
  742. tm_table[i].tm_name,
  743. tm_table[(int)tm_table[i].tm_type].tm_name);
  744. }
  745. printf("contblock: %d blocks %d pages\n", ncb, ncbpage);
  746. printf("hole: %d pages\n", holepage);
  747. printf("GC ended\n");
  748. fflush(stdout);
  749. }
  750. env->disable_interrupts = interrupts;
  751. if (GC_exit_hook != NULL)
  752. (*GC_exit_hook)();
  753. } CL_NEWENV_END;
  754. GC_enable();
  755. #ifdef THREADS
  756. #error "We need to activate all other threads again"
  757. #endif /* THREADS */
  758. gc_time += (gc_start = ecl_runtime() - gc_start);
  759. if (SYM_VAL(@'si::*gc-verbose*') != ECL_NIL) {
  760. /* Don't use fprintf since on Linux it calls malloc() */
  761. printf(". finished in %.2f\"]", gc_start/60.0);
  762. fflush(stdout);
  763. }
  764. if (env->interrupt_pending) ecl_check_pending_interrupts();
  765. }
  766. /*
  767. *----------------------------------------------------------------------
  768. *
  769. * mark_contblock --
  770. * sets the mark bit for words from address p to address p+s.
  771. * Both p and p+s are rounded to word boundaries.
  772. *
  773. * Results:
  774. * none.
  775. *
  776. * Side effects:
  777. * mark_table
  778. *
  779. *----------------------------------------------------------------------
  780. */
  781. static void
  782. _mark_contblock(void *x, cl_index s)
  783. {
  784. cl_ptr p = x;
  785. if (p >= heap_start && p < data_end) {
  786. ptrdiff_t pg = page(p);
  787. if ((cl_type)type_map[pg] == t_contiguous) {
  788. cl_ptr q = p + s;
  789. p = int2ptr(ptr2int(p) & ~3);
  790. q = int2ptr(ptr2int(q + 3) & ~3);
  791. for (; p < q; p+= 4)
  792. set_mark_bit(p);
  793. }
  794. }
  795. }
  796. /*----------------------------------------------------------------------
  797. * Utilities
  798. *----------------------------------------------------------------------
  799. */
  800. @(defun si::room-report ()
  801. int i;
  802. cl_object *tl;
  803. @
  804. the_env->nvalues = 8;
  805. the_env->values[0] = ecl_make_fixnum(real_maxpage);
  806. the_env->values[1] = ecl_make_fixnum(available_pages());
  807. the_env->values[2] = ecl_make_fixnum(ncbpage);
  808. the_env->values[3] = ecl_make_fixnum(maxcbpage);
  809. the_env->values[4] = ecl_make_fixnum(ncb);
  810. the_env->values[5] = ecl_make_fixnum(cbgccount);
  811. the_env->values[6] = ecl_make_fixnum(holepage);
  812. the_env->values[7] = ECL_NIL;
  813. tl = &the_env->values[7];
  814. for (i = 0; i < (int)t_end; i++) {
  815. if (tm_table[i].tm_type == (cl_type)i) {
  816. tl = &CDR(*tl = CONS(ecl_make_fixnum(tm_table[i].tm_nused), ECL_NIL));
  817. tl = &CDR(*tl = CONS(ecl_make_fixnum(tm_table[i].tm_nfree), ECL_NIL));
  818. tl = &CDR(*tl = CONS(ecl_make_fixnum(tm_table[i].tm_npage), ECL_NIL));
  819. tl = &CDR(*tl = CONS(ecl_make_fixnum(tm_table[i].tm_maxpage), ECL_NIL));
  820. tl = &CDR(*tl = CONS(ecl_make_fixnum(tm_table[i].tm_gccount), ECL_NIL));
  821. } else {
  822. tl = &CDR(*tl = CONS(ECL_NIL, ECL_NIL));
  823. tl = &CDR(*tl = CONS(ecl_make_fixnum(tm_table[i].tm_type), ECL_NIL));
  824. tl = &CDR(*tl = CONS(ECL_NIL, ECL_NIL));
  825. tl = &CDR(*tl = CONS(ECL_NIL, ECL_NIL));
  826. tl = &CDR(*tl = CONS(ECL_NIL, ECL_NIL));
  827. }
  828. }
  829. return the_env->values[0];
  830. @)
  831. @(defun si::reset-gc-count ()
  832. int i;
  833. @
  834. cbgccount = 0;
  835. for (i = 0; i < (int)t_end; i++)
  836. tm_table[i].tm_gccount = 0;
  837. @(return)
  838. @)
  839. @(defun si::gc-time ()
  840. @
  841. @(return ecl_make_fixnum(gc_time))
  842. @)
  843. cl_object
  844. si_get_finalizer(cl_object o)
  845. {
  846. @(return ECL_NIL)
  847. }
  848. cl_object
  849. si_set_finalizer(cl_object o, cl_object finalizer)
  850. {
  851. @(return)
  852. }
  853. void
  854. init_GC(void)
  855. {
  856. GC_enable();
  857. gc_time = 0;
  858. }