PageRenderTime 95ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/src/racket/src/thread.c

http://github.com/gmarceau/PLT
C | 7989 lines | 6099 code | 1374 blank | 516 comment | 1097 complexity | 53b08be251330184d36c3e02c55b6546 MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.1
  1. /*
  2. Racket
  3. Copyright (c) 2004-2011 PLT Scheme Inc.
  4. Copyright (c) 1995-2001 Matthew Flatt
  5. This library is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU Library General Public
  7. License as published by the Free Software Foundation; either
  8. version 2 of the License, or (at your option) any later version.
  9. This library is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. Library General Public License for more details.
  13. You should have received a copy of the GNU Library General Public
  14. License along with this library; if not, write to the Free
  15. Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  16. Boston, MA 02110-1301 USA.
  17. */
  18. /* This file implements Racket threads.
  19. Usually, Racket threads are implemented by copying the stack.
  20. The scheme_thread_block() function is called occasionally by the
  21. evaluator so that the current thread can be swapped out.
  22. do_swap_thread() performs the actual swap. Threads can also be
  23. implemented by the OS; the bottom part of this file contains
  24. OS-specific thread code.
  25. Much of the work in thread management is knowning when to go to
  26. sleep, to be nice to the OS outside of Racket. The rest of the
  27. work is implementing custodians (called "custodians" in the code),
  28. parameters, and wills. */
  29. /* Some copilers don't like re-def of GC_malloc in schemef.h: */
  30. #ifndef MZ_PRECISE_GC
  31. # define SCHEME_NO_GC_PROTO
  32. #endif
  33. #include "schpriv.h"
  34. #include "schmach.h"
  35. #include "schgc.h"
  36. #ifdef MZ_USE_FUTURES
  37. # include "future.h"
  38. #endif
  39. #ifndef PALMOS_STUFF
  40. # include <time.h>
  41. #endif
  42. #ifdef FILES_HAVE_FDS
  43. # include <sys/types.h>
  44. # include <sys/time.h>
  45. # ifdef SELECT_INCLUDE
  46. # include <sys/select.h>
  47. # endif
  48. # ifdef USE_BEOS_SOCKET_INCLUDE
  49. # include <be/net/socket.h>
  50. # endif
  51. #endif
  52. #ifdef USE_WINSOCK_TCP
  53. # ifdef USE_TCP
  54. # include <winsock.h>
  55. # endif
  56. #endif
  57. #ifdef USE_BEOS_PORT_THREADS
  58. # include <be/net/socket.h>
  59. #endif
  60. #ifdef USE_STACKAVAIL
  61. # include <malloc.h>
  62. #endif
  63. #ifdef UNISTD_INCLUDE
  64. # include <unistd.h>
  65. #endif
  66. #ifndef SIGNMZTHREAD
  67. # define SIGMZTHREAD SIGUSR2
  68. #endif
  69. #if defined(WINDOWS_PROCESSES) || defined(WINDOWS_FILE_HANDLES)
  70. # include <windows.h>
  71. THREAD_LOCAL_DECL(extern void *scheme_break_semaphore;)
  72. #endif
  73. #if defined(FILES_HAVE_FDS) \
  74. || defined(USE_BEOS_PORT_THREADS) \
  75. || (defined(USE_WINSOCK_TCP) && defined(USE_TCP)) \
  76. || (defined(WINDOWS_PROCESSES) || defined(WINDOWS_FILE_HANDLES))
  77. # define USING_FDS
  78. # if (!defined(USE_WINSOCK_TCP) || !defined(USE_TCP)) && !defined(FILES_HAVE_FDS)
  79. # include <sys/types.h>
  80. # endif
  81. #endif
  82. #include "schfd.h"
  83. #define DEFAULT_INIT_STACK_SIZE 1000
  84. #define MAX_INIT_STACK_SIZE 100000
  85. #ifdef SGC_STD_DEBUGGING
  86. # define SENORA_GC_NO_FREE
  87. #endif
  88. /* If a finalization callback invokes Scheme code,
  89. we can end up with a thread swap in the middle of a thread
  90. swap (where the outer swap was interrupted by GC). The
  91. following is a debugging flag to help detect and fix
  92. such problems. */
  93. #define WATCH_FOR_NESTED_SWAPS 0
  94. #if WATCH_FOR_NESTED_SWAPS
  95. static int swapping = 0;
  96. #endif
  97. extern void scheme_gmp_tls_init(intptr_t *s);
  98. extern void *scheme_gmp_tls_load(intptr_t *s);
  99. extern void scheme_gmp_tls_unload(intptr_t *s, void *p);
  100. extern void scheme_gmp_tls_snapshot(intptr_t *s, intptr_t *save);
  101. extern void scheme_gmp_tls_restore_snapshot(intptr_t *s, void *data, intptr_t *save, int do_free);
  102. static void check_ready_break();
  103. THREAD_LOCAL_DECL(extern int scheme_num_read_syntax_objects);
  104. THREAD_LOCAL_DECL(extern intptr_t scheme_hash_request_count);
  105. THREAD_LOCAL_DECL(extern intptr_t scheme_hash_iteration_count);
  106. THREAD_LOCAL_DECL(extern intptr_t scheme_code_page_total);
  107. #ifdef MZ_USE_JIT
  108. extern int scheme_jit_malloced;
  109. #else
  110. # define scheme_jit_malloced 0
  111. #endif
  112. /*========================================================================*/
  113. /* local variables and prototypes */
  114. /*========================================================================*/
  115. #define INIT_TB_SIZE 20
  116. #ifndef MZ_THREAD_QUANTUM_USEC
  117. # define MZ_THREAD_QUANTUM_USEC 10000
  118. #endif
  119. THREAD_LOCAL_DECL(static int buffer_init_size);
  120. THREAD_LOCAL_DECL(Scheme_Thread *scheme_current_thread = NULL);
  121. THREAD_LOCAL_DECL(Scheme_Thread *scheme_main_thread = NULL);
  122. THREAD_LOCAL_DECL(Scheme_Thread *scheme_first_thread = NULL);
  123. XFORM_NONGCING Scheme_Thread *scheme_get_current_thread() { return scheme_current_thread; }
  124. XFORM_NONGCING intptr_t scheme_get_multiple_count() { return scheme_current_thread->ku.multiple.count; }
  125. XFORM_NONGCING Scheme_Object **scheme_get_multiple_array() { return scheme_current_thread->ku.multiple.array; }
  126. XFORM_NONGCING void scheme_set_current_thread_ran_some() { scheme_current_thread->ran_some = 1; }
  127. THREAD_LOCAL_DECL(Scheme_Thread_Set *scheme_thread_set_top);
  128. THREAD_LOCAL_DECL(static int num_running_threads); /* not counting original */
  129. #ifdef LINK_EXTENSIONS_BY_TABLE
  130. Scheme_Thread **scheme_current_thread_ptr;
  131. volatile int *scheme_fuel_counter_ptr;
  132. #endif
  133. THREAD_LOCAL_DECL(static int swap_no_setjmp = 0);
  134. THREAD_LOCAL_DECL(static int thread_swap_count);
  135. THREAD_LOCAL_DECL(int scheme_did_gc_count);
  136. SHARED_OK static int init_load_on_demand = 1;
  137. #ifdef RUNSTACK_IS_GLOBAL
  138. THREAD_LOCAL_DECL(Scheme_Object **scheme_current_runstack_start);
  139. THREAD_LOCAL_DECL(Scheme_Object **scheme_current_runstack);
  140. THREAD_LOCAL_DECL(MZ_MARK_STACK_TYPE scheme_current_cont_mark_stack);
  141. THREAD_LOCAL_DECL(MZ_MARK_POS_TYPE scheme_current_cont_mark_pos);
  142. #endif
  143. THREAD_LOCAL_DECL(static Scheme_Custodian *main_custodian);
  144. THREAD_LOCAL_DECL(static Scheme_Custodian *last_custodian);
  145. THREAD_LOCAL_DECL(static Scheme_Hash_Table *limited_custodians = NULL);
  146. READ_ONLY static Scheme_Object *initial_inspector;
  147. #ifndef MZ_PRECISE_GC
  148. static int cust_box_count, cust_box_alloc;
  149. static Scheme_Custodian_Box **cust_boxes;
  150. # ifndef USE_SENORA_GC
  151. extern int GC_is_marked(void *);
  152. # endif
  153. #endif
  154. READ_ONLY Scheme_At_Exit_Proc replacement_at_exit;
  155. ROSYM Scheme_Object *scheme_parameterization_key;
  156. ROSYM Scheme_Object *scheme_exn_handler_key;
  157. ROSYM Scheme_Object *scheme_break_enabled_key;
  158. THREAD_LOCAL_DECL(intptr_t scheme_total_gc_time);
  159. THREAD_LOCAL_DECL(static intptr_t start_this_gc_time);
  160. THREAD_LOCAL_DECL(static intptr_t end_this_gc_time);
  161. static void get_ready_for_GC(void);
  162. static void done_with_GC(void);
  163. #ifdef MZ_PRECISE_GC
  164. static void inform_GC(int master_gc, int major_gc, intptr_t pre_used, intptr_t post_used,
  165. intptr_t pre_admin, intptr_t post_admin);
  166. #endif
  167. THREAD_LOCAL_DECL(static volatile short delayed_break_ready);
  168. THREAD_LOCAL_DECL(static Scheme_Thread *main_break_target_thread);
  169. THREAD_LOCAL_DECL(Scheme_Sleep_Proc scheme_place_sleep);
  170. HOOK_SHARED_OK void (*scheme_sleep)(float seconds, void *fds);
  171. HOOK_SHARED_OK void (*scheme_notify_multithread)(int on);
  172. HOOK_SHARED_OK void (*scheme_wakeup_on_input)(void *fds);
  173. HOOK_SHARED_OK int (*scheme_check_for_break)(void);
  174. HOOK_SHARED_OK Scheme_On_Atomic_Timeout_Proc scheme_on_atomic_timeout;
  175. HOOK_SHARED_OK static int atomic_timeout_auto_suspend;
  176. HOOK_SHARED_OK static int atomic_timeout_atomic_level;
  177. THREAD_LOCAL_DECL(struct Scheme_GC_Pre_Post_Callback_Desc *gc_prepost_callback_descs);
  178. ROSYM static Scheme_Object *read_symbol, *write_symbol, *execute_symbol, *delete_symbol, *exists_symbol;
  179. ROSYM static Scheme_Object *client_symbol, *server_symbol;
  180. THREAD_LOCAL_DECL(static int do_atomic = 0);
  181. THREAD_LOCAL_DECL(static int missed_context_switch = 0);
  182. THREAD_LOCAL_DECL(static int have_activity = 0);
  183. THREAD_LOCAL_DECL(int scheme_active_but_sleeping = 0);
  184. THREAD_LOCAL_DECL(static int thread_ended_with_activity);
  185. THREAD_LOCAL_DECL(int scheme_no_stack_overflow);
  186. THREAD_LOCAL_DECL(int all_breaks_disabled = 0);
  187. THREAD_LOCAL_DECL(static int needs_sleep_cancelled);
  188. THREAD_LOCAL_DECL(static double needs_sleep_time_end); /* back-door result */
  189. THREAD_LOCAL_DECL(static int tls_pos = 0);
  190. /* On swap, put target in a static variable, instead of on the stack,
  191. so that the swapped-out thread is less likely to have a pointer
  192. to the target thread. */
  193. THREAD_LOCAL_DECL(static Scheme_Thread *swap_target);
  194. THREAD_LOCAL_DECL(static Scheme_Object *scheduled_kills);
  195. THREAD_LOCAL_DECL(static Scheme_Object *the_nested_exn_handler);
  196. THREAD_LOCAL_DECL(static Scheme_Object *cust_closers);
  197. THREAD_LOCAL_DECL(static Scheme_Object *thread_swap_callbacks);
  198. THREAD_LOCAL_DECL(static Scheme_Object *thread_swap_out_callbacks);
  199. THREAD_LOCAL_DECL(static Scheme_Object *recycle_cell);
  200. THREAD_LOCAL_DECL(static Scheme_Object *maybe_recycle_cell);
  201. THREAD_LOCAL_DECL(static int recycle_cc_count);
  202. THREAD_LOCAL_DECL(struct Scheme_Hash_Table *place_local_misc_table);
  203. #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
  204. extern intptr_t GC_is_place();
  205. #endif
  206. #ifdef MZ_PRECISE_GC
  207. extern intptr_t GC_get_memory_use(void *c);
  208. #else
  209. extern MZ_DLLIMPORT long GC_get_memory_use();
  210. #endif
  211. typedef struct Thread_Cell {
  212. Scheme_Object so;
  213. char inherited, assigned;
  214. Scheme_Object *def_val;
  215. /* A thread's thread_cell table maps cells to keys weakly.
  216. This table maps keys to values weakly. The two weak
  217. levels ensure that thread cells are properly GCed
  218. when the value of a thread cell references the thread
  219. cell. */
  220. Scheme_Bucket_Table *vals;
  221. } Thread_Cell;
  222. #ifdef MZ_PRECISE_GC
  223. /* This is a trick to get the types right. Note that
  224. the layout of the weak box is defined by the
  225. GC spec. */
  226. typedef struct {
  227. short type;
  228. short hash_key;
  229. Scheme_Custodian *val;
  230. } Scheme_Custodian_Weak_Box;
  231. # define MALLOC_MREF() (Scheme_Custodian_Reference *)scheme_make_late_weak_box(NULL)
  232. # define CUSTODIAN_FAM(x) ((Scheme_Custodian_Weak_Box *)x)->val
  233. # define xCUSTODIAN_FAM(x) SCHEME_BOX_VAL(x)
  234. #else
  235. # define MALLOC_MREF() MALLOC_ONE_WEAK(Scheme_Custodian_Reference)
  236. # define CUSTODIAN_FAM(x) (*(x))
  237. # define xCUSTODIAN_FAM(x) (*(x))
  238. #endif
  239. typedef struct Proc_Global_Rec {
  240. const char *key;
  241. void *val;
  242. struct Proc_Global_Rec *next;
  243. } Proc_Global_Rec;
  244. SHARED_OK static Proc_Global_Rec *process_globals;
  245. #if defined(MZ_USE_MZRT)
  246. static mzrt_mutex *process_global_lock;
  247. #endif
  248. #ifdef MZ_PRECISE_GC
  249. static void register_traversers(void);
  250. #endif
  251. static Scheme_Object *custodian_require_mem(int argc, Scheme_Object *args[]);
  252. static Scheme_Object *custodian_limit_mem(int argc, Scheme_Object *args[]);
  253. static Scheme_Object *custodian_can_mem(int argc, Scheme_Object *args[]);
  254. static Scheme_Object *new_tracking_fun(int argc, Scheme_Object *args[]);
  255. static Scheme_Object *union_tracking_val(int argc, Scheme_Object *args[]);
  256. static Scheme_Object *collect_garbage(int argc, Scheme_Object *args[]);
  257. static Scheme_Object *current_memory_use(int argc, Scheme_Object *args[]);
  258. static Scheme_Object *sch_thread(int argc, Scheme_Object *args[]);
  259. static Scheme_Object *sch_thread_nokill(int argc, Scheme_Object *args[]);
  260. static Scheme_Object *sch_sleep(int argc, Scheme_Object *args[]);
  261. static Scheme_Object *thread_p(int argc, Scheme_Object *args[]);
  262. static Scheme_Object *thread_running_p(int argc, Scheme_Object *args[]);
  263. static Scheme_Object *thread_dead_p(int argc, Scheme_Object *args[]);
  264. static Scheme_Object *thread_wait(int argc, Scheme_Object *args[]);
  265. static Scheme_Object *sch_current(int argc, Scheme_Object *args[]);
  266. static Scheme_Object *kill_thread(int argc, Scheme_Object *args[]);
  267. static Scheme_Object *break_thread(int argc, Scheme_Object *args[]);
  268. static Scheme_Object *thread_suspend(int argc, Scheme_Object *args[]);
  269. static Scheme_Object *thread_resume(int argc, Scheme_Object *args[]);
  270. static Scheme_Object *make_thread_suspend(int argc, Scheme_Object *args[]);
  271. static Scheme_Object *make_thread_resume(int argc, Scheme_Object *args[]);
  272. static Scheme_Object *make_thread_dead(int argc, Scheme_Object *args[]);
  273. static void register_thread_sync();
  274. static Scheme_Object *sch_sync(int argc, Scheme_Object *args[]);
  275. static Scheme_Object *sch_sync_timeout(int argc, Scheme_Object *args[]);
  276. static Scheme_Object *sch_sync_enable_break(int argc, Scheme_Object *args[]);
  277. static Scheme_Object *sch_sync_timeout_enable_break(int argc, Scheme_Object *args[]);
  278. static Scheme_Object *evt_p(int argc, Scheme_Object *args[]);
  279. static Scheme_Object *evts_to_evt(int argc, Scheme_Object *args[]);
  280. static Scheme_Object *make_custodian(int argc, Scheme_Object *argv[]);
  281. static Scheme_Object *make_custodian_from_main(int argc, Scheme_Object *argv[]);
  282. static Scheme_Object *custodian_p(int argc, Scheme_Object *argv[]);
  283. static Scheme_Object *custodian_close_all(int argc, Scheme_Object *argv[]);
  284. static Scheme_Object *custodian_to_list(int argc, Scheme_Object *argv[]);
  285. static Scheme_Object *current_custodian(int argc, Scheme_Object *argv[]);
  286. static Scheme_Object *make_custodian_box(int argc, Scheme_Object *argv[]);
  287. static Scheme_Object *custodian_box_value(int argc, Scheme_Object *argv[]);
  288. static Scheme_Object *custodian_box_p(int argc, Scheme_Object *argv[]);
  289. static Scheme_Object *call_as_nested_thread(int argc, Scheme_Object *argv[]);
  290. static Scheme_Object *current_namespace(int argc, Scheme_Object *args[]);
  291. static Scheme_Object *namespace_p(int argc, Scheme_Object *args[]);
  292. static Scheme_Object *parameter_p(int argc, Scheme_Object *args[]);
  293. static Scheme_Object *parameter_procedure_eq(int argc, Scheme_Object *args[]);
  294. static Scheme_Object *make_parameter(int argc, Scheme_Object *args[]);
  295. static Scheme_Object *make_derived_parameter(int argc, Scheme_Object *args[]);
  296. static Scheme_Object *extend_parameterization(int argc, Scheme_Object *args[]);
  297. static Scheme_Object *parameterization_p(int argc, Scheme_Object *args[]);
  298. static Scheme_Object *reparameterize(int argc, Scheme_Object **argv);
  299. static Scheme_Object *make_thread_cell(int argc, Scheme_Object *args[]);
  300. static Scheme_Object *thread_cell_p(int argc, Scheme_Object *args[]);
  301. static Scheme_Object *thread_cell_get(int argc, Scheme_Object *args[]);
  302. static Scheme_Object *thread_cell_set(int argc, Scheme_Object *args[]);
  303. static Scheme_Object *thread_cell_values(int argc, Scheme_Object *args[]);
  304. static Scheme_Object *make_security_guard(int argc, Scheme_Object *argv[]);
  305. static Scheme_Object *security_guard_p(int argc, Scheme_Object *argv[]);
  306. static Scheme_Object *current_security_guard(int argc, Scheme_Object *argv[]);
  307. static Scheme_Object *make_thread_set(int argc, Scheme_Object *argv[]);
  308. static Scheme_Object *thread_set_p(int argc, Scheme_Object *argv[]);
  309. static Scheme_Object *current_thread_set(int argc, Scheme_Object *argv[]);
  310. static Scheme_Object *current_thread_initial_stack_size(int argc, Scheme_Object *argv[]);
  311. static void adjust_custodian_family(void *pr, void *ignored);
  312. static Scheme_Object *make_will_executor(int argc, Scheme_Object *args[]);
  313. static Scheme_Object *will_executor_p(int argc, Scheme_Object *args[]);
  314. static Scheme_Object *register_will(int argc, Scheme_Object *args[]);
  315. static Scheme_Object *will_executor_try(int argc, Scheme_Object *args[]);
  316. static Scheme_Object *will_executor_go(int argc, Scheme_Object *args[]);
  317. static Scheme_Object *will_executor_sema(Scheme_Object *w, int *repost);
  318. static Scheme_Object *check_break_now(int argc, Scheme_Object *args[]);
  319. static int syncing_ready(Scheme_Object *s, Scheme_Schedule_Info *sinfo);
  320. static void make_initial_config(Scheme_Thread *p);
  321. static int do_kill_thread(Scheme_Thread *p);
  322. static void suspend_thread(Scheme_Thread *p);
  323. static int check_sleep(int need_activity, int sleep_now);
  324. static void remove_thread(Scheme_Thread *r);
  325. static void exit_or_escape(Scheme_Thread *p);
  326. static int resume_suspend_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo);
  327. static int dead_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo);
  328. static int cust_box_ready(Scheme_Object *o);
  329. static int can_break_param(Scheme_Thread *p);
  330. static int post_system_idle();
  331. static Scheme_Object *current_stats(int argc, Scheme_Object *args[]);
  332. SHARED_OK static Scheme_Object **config_map;
  333. typedef struct {
  334. MZTAG_IF_REQUIRED
  335. short is_derived;
  336. Scheme_Object *key;
  337. Scheme_Object *guard;
  338. Scheme_Object *extract_guard;
  339. Scheme_Object *defcell;
  340. } ParamData;
  341. enum {
  342. CONFIG_DIRECT,
  343. CONFIG_INDIRECT
  344. };
  345. typedef struct Scheme_Thread_Custodian_Hop {
  346. Scheme_Object so;
  347. Scheme_Thread *p; /* really an indirection with precise gc */
  348. } Scheme_Thread_Custodian_Hop;
  349. SHARED_OK static Scheme_Custodian_Extractor *extractors;
  350. #define SETJMP(p) scheme_setjmpup(&p->jmpup_buf, p, p->stack_start)
  351. #define LONGJMP(p) scheme_longjmpup(&p->jmpup_buf)
  352. #define RESETJMP(p) scheme_reset_jmpup_buf(&p->jmpup_buf)
  353. #ifdef WIN32_THREADS
  354. /* Only set up for Boehm GC that thinks it's a DLL: */
  355. # define GC_THINKS_ITS_A_DLL_BUT_ISNT
  356. # ifdef GC_THINKS_ITS_A_DLL_BUT_ISNT
  357. extern BOOL WINAPI DllMain(HINSTANCE inst, ULONG reason, LPVOID reserved);
  358. # endif
  359. #endif
  360. #ifndef MZ_PRECISE_GC
  361. # define scheme_thread_hop_type scheme_thread_type
  362. #endif
  363. #ifdef MZ_PRECISE_GC
  364. uintptr_t scheme_get_current_thread_stack_start(void);
  365. #endif
  366. SHARED_OK Scheme_Object *initial_cmdline_vec;
  367. /*========================================================================*/
  368. /* initialization */
  369. /*========================================================================*/
  370. void scheme_init_thread(Scheme_Env *env)
  371. {
  372. #ifdef MZ_PRECISE_GC
  373. register_traversers();
  374. #endif
  375. REGISTER_SO(read_symbol);
  376. REGISTER_SO(write_symbol);
  377. REGISTER_SO(execute_symbol);
  378. REGISTER_SO(delete_symbol);
  379. REGISTER_SO(exists_symbol);
  380. REGISTER_SO(client_symbol);
  381. REGISTER_SO(server_symbol);
  382. read_symbol = scheme_intern_symbol("read");
  383. write_symbol = scheme_intern_symbol("write");
  384. execute_symbol = scheme_intern_symbol("execute");
  385. delete_symbol = scheme_intern_symbol("delete");
  386. exists_symbol = scheme_intern_symbol("exists");
  387. client_symbol = scheme_intern_symbol("client");
  388. server_symbol = scheme_intern_symbol("server");
  389. GLOBAL_PRIM_W_ARITY("dump-memory-stats" , scheme_dump_gc_stats, 0, -1, env);
  390. GLOBAL_PRIM_W_ARITY("vector-set-performance-stats!", current_stats , 1, 2, env);
  391. GLOBAL_PRIM_W_ARITY("make-empty-namespace", scheme_make_namespace, 0, 0, env);
  392. GLOBAL_PRIM_W_ARITY("thread" , sch_thread , 1, 1, env);
  393. GLOBAL_PRIM_W_ARITY("thread/suspend-to-kill", sch_thread_nokill , 1, 1, env);
  394. GLOBAL_PRIM_W_ARITY("sleep" , sch_sleep , 0, 1, env);
  395. GLOBAL_FOLDING_PRIM("thread?" , thread_p , 1, 1, 1, env);
  396. GLOBAL_PRIM_W_ARITY("thread-running?" , thread_running_p , 1, 1, env);
  397. GLOBAL_PRIM_W_ARITY("thread-dead?" , thread_dead_p , 1, 1, env);
  398. GLOBAL_PRIM_W_ARITY("thread-wait" , thread_wait , 1, 1, env);
  399. GLOBAL_PRIM_W_ARITY("current-thread" , sch_current , 0, 0, env);
  400. GLOBAL_PRIM_W_ARITY("kill-thread" , kill_thread , 1, 1, env);
  401. GLOBAL_PRIM_W_ARITY("break-thread" , break_thread , 1, 1, env);
  402. GLOBAL_PRIM_W_ARITY("thread-suspend" , thread_suspend , 1, 1, env);
  403. GLOBAL_PRIM_W_ARITY("thread-resume" , thread_resume , 1, 2, env);
  404. GLOBAL_PRIM_W_ARITY("thread-resume-evt" , make_thread_resume , 1, 1, env);
  405. GLOBAL_PRIM_W_ARITY("thread-suspend-evt" , make_thread_suspend, 1, 1, env);
  406. GLOBAL_PRIM_W_ARITY("thread-dead-evt" , make_thread_dead , 1, 1, env);
  407. register_thread_sync();
  408. scheme_add_evt(scheme_thread_suspend_type, (Scheme_Ready_Fun)resume_suspend_ready, NULL, NULL, 1);
  409. scheme_add_evt(scheme_thread_resume_type, (Scheme_Ready_Fun)resume_suspend_ready, NULL, NULL, 1);
  410. scheme_add_evt(scheme_thread_dead_type, (Scheme_Ready_Fun)dead_ready, NULL, NULL, 1);
  411. scheme_add_evt(scheme_cust_box_type, cust_box_ready, NULL, NULL, 0);
  412. GLOBAL_PARAMETER("current-custodian" , current_custodian , MZCONFIG_CUSTODIAN, env);
  413. GLOBAL_PRIM_W_ARITY("make-custodian" , make_custodian , 0, 1, env);
  414. GLOBAL_FOLDING_PRIM("custodian?" , custodian_p , 1, 1, 1 , env);
  415. GLOBAL_PRIM_W_ARITY("custodian-shutdown-all", custodian_close_all , 1, 1, env);
  416. GLOBAL_PRIM_W_ARITY("custodian-managed-list", custodian_to_list , 2, 2, env);
  417. GLOBAL_PRIM_W_ARITY("make-custodian-box" , make_custodian_box , 2, 2, env);
  418. GLOBAL_PRIM_W_ARITY("custodian-box-value" , custodian_box_value , 1, 1, env);
  419. GLOBAL_FOLDING_PRIM("custodian-box?" , custodian_box_p , 1, 1, 1 , env);
  420. GLOBAL_PRIM_W_ARITY("call-in-nested-thread" , call_as_nested_thread, 1, 2, env);
  421. GLOBAL_PARAMETER("current-namespace" , current_namespace, MZCONFIG_ENV, env);
  422. GLOBAL_PRIM_W_ARITY("namespace?" , namespace_p , 1, 1, env);
  423. GLOBAL_PRIM_W_ARITY("security-guard?" , security_guard_p , 1, 1, env);
  424. GLOBAL_PRIM_W_ARITY("make-security-guard", make_security_guard, 3, 4, env);
  425. GLOBAL_PARAMETER("current-security-guard", current_security_guard, MZCONFIG_SECURITY_GUARD, env);
  426. GLOBAL_PRIM_W_ARITY("thread-group?" , thread_set_p , 1, 1, env);
  427. GLOBAL_PRIM_W_ARITY("make-thread-group", make_thread_set, 0, 1, env);
  428. GLOBAL_PARAMETER("current-thread-group", current_thread_set, MZCONFIG_THREAD_SET, env);
  429. GLOBAL_PRIM_W_ARITY("parameter?" , parameter_p , 1, 1, env);
  430. GLOBAL_PRIM_W_ARITY("make-parameter" , make_parameter , 1, 2, env);
  431. GLOBAL_PRIM_W_ARITY("make-derived-parameter", make_derived_parameter, 3, 3, env);
  432. GLOBAL_PRIM_W_ARITY("parameter-procedure=?" , parameter_procedure_eq, 2, 2, env);
  433. GLOBAL_PRIM_W_ARITY("parameterization?" , parameterization_p , 1, 1, env);
  434. GLOBAL_PRIM_W_ARITY("thread-cell?" , thread_cell_p , 1, 1, env);
  435. GLOBAL_PRIM_W_ARITY("make-thread-cell" , make_thread_cell , 1, 2, env);
  436. GLOBAL_PRIM_W_ARITY("thread-cell-ref" , thread_cell_get , 1, 1, env);
  437. GLOBAL_PRIM_W_ARITY("thread-cell-set!" , thread_cell_set , 2, 2, env);
  438. GLOBAL_PRIM_W_ARITY("current-preserved-thread-cell-values", thread_cell_values, 0, 1, env);
  439. GLOBAL_PRIM_W_ARITY("make-will-executor", make_will_executor, 0, 0, env);
  440. GLOBAL_PRIM_W_ARITY("will-executor?" , will_executor_p , 1, 1, env);
  441. GLOBAL_PRIM_W_ARITY("will-register" , register_will , 3, 3, env);
  442. GLOBAL_PRIM_W_ARITY("will-try-execute" , will_executor_try , 1, 1, env);
  443. GLOBAL_PRIM_W_ARITY("will-execute" , will_executor_go , 1, 1, env);
  444. scheme_add_evt_through_sema(scheme_will_executor_type, will_executor_sema, NULL);
  445. GLOBAL_PRIM_W_ARITY("collect-garbage" , collect_garbage , 0, 0, env);
  446. GLOBAL_PRIM_W_ARITY("current-memory-use" , current_memory_use , 0, 1, env);
  447. GLOBAL_PRIM_W_ARITY("custodian-require-memory" , custodian_require_mem, 3, 3, env);
  448. GLOBAL_PRIM_W_ARITY("custodian-limit-memory" , custodian_limit_mem , 2, 3, env);
  449. GLOBAL_PRIM_W_ARITY("custodian-memory-accounting-available?", custodian_can_mem , 0, 0, env);
  450. GLOBAL_FOLDING_PRIM("evt?" , evt_p , 1, 1 , 1, env);
  451. GLOBAL_PRIM_W_ARITY2("sync" , sch_sync , 1, -1, 0, -1, env);
  452. GLOBAL_PRIM_W_ARITY2("sync/timeout" , sch_sync_timeout , 2, -1, 0, -1, env);
  453. GLOBAL_PRIM_W_ARITY2("sync/enable-break" , sch_sync_enable_break , 1, -1, 0, -1, env);
  454. GLOBAL_PRIM_W_ARITY2("sync/timeout/enable-break", sch_sync_timeout_enable_break, 2, -1, 0, -1, env);
  455. GLOBAL_PRIM_W_ARITY("choice-evt" , evts_to_evt , 0, -1, env);
  456. GLOBAL_PARAMETER("current-thread-initial-stack-size", current_thread_initial_stack_size, MZCONFIG_THREAD_INIT_STACK_SIZE, env);
  457. }
  458. void scheme_init_thread_places(void) {
  459. buffer_init_size = INIT_TB_SIZE;
  460. REGISTER_SO(recycle_cell);
  461. REGISTER_SO(maybe_recycle_cell);
  462. REGISTER_SO(gc_prepost_callback_descs);
  463. REGISTER_SO(place_local_misc_table);
  464. }
  465. void scheme_init_memtrace(Scheme_Env *env)
  466. {
  467. Scheme_Object *v;
  468. Scheme_Env *newenv;
  469. v = scheme_intern_symbol("#%memtrace");
  470. newenv = scheme_primitive_module(v, env);
  471. v = scheme_make_symbol("memory-trace-continuation-mark");
  472. scheme_add_global("memory-trace-continuation-mark", v , newenv);
  473. v = scheme_make_prim_w_arity(new_tracking_fun,
  474. "new-memtrace-tracking-function", 1, 1);
  475. scheme_add_global("new-memtrace-tracking-function", v, newenv);
  476. v = scheme_make_prim_w_arity(union_tracking_val,
  477. "unioned-memtrace-tracking-value", 1, 1);
  478. scheme_add_global("unioned-memtrace-tracking-value", v, newenv);
  479. scheme_finish_primitive_module(newenv);
  480. }
  481. void scheme_init_inspector() {
  482. REGISTER_SO(initial_inspector);
  483. initial_inspector = scheme_make_initial_inspectors();
  484. /* Keep the initial inspector in case someone resets Scheme (by
  485. calling scheme_basic_env() a second time. Using the same
  486. inspector after a reset lets us use the same initial module
  487. instances. */
  488. }
  489. Scheme_Object *scheme_get_current_inspector()
  490. XFORM_SKIP_PROC
  491. {
  492. Scheme_Config *c;
  493. if (scheme_defining_primitives)
  494. return initial_inspector;
  495. c = scheme_current_config();
  496. return scheme_get_param(c, MZCONFIG_INSPECTOR);
  497. }
  498. Scheme_Object *scheme_get_initial_inspector(void)
  499. {
  500. return initial_inspector;
  501. }
  502. void scheme_init_parameterization()
  503. {
  504. REGISTER_SO(scheme_exn_handler_key);
  505. REGISTER_SO(scheme_parameterization_key);
  506. REGISTER_SO(scheme_break_enabled_key);
  507. scheme_exn_handler_key = scheme_make_symbol("exnh");
  508. scheme_parameterization_key = scheme_make_symbol("paramz");
  509. scheme_break_enabled_key = scheme_make_symbol("break-on?");
  510. }
  511. void scheme_init_paramz(Scheme_Env *env)
  512. {
  513. Scheme_Object *v;
  514. Scheme_Env *newenv;
  515. v = scheme_intern_symbol("#%paramz");
  516. newenv = scheme_primitive_module(v, env);
  517. scheme_add_global_constant("exception-handler-key", scheme_exn_handler_key , newenv);
  518. scheme_add_global_constant("parameterization-key" , scheme_parameterization_key, newenv);
  519. scheme_add_global_constant("break-enabled-key" , scheme_break_enabled_key , newenv);
  520. GLOBAL_PRIM_W_ARITY("extend-parameterization" , extend_parameterization , 1, -1, newenv);
  521. GLOBAL_PRIM_W_ARITY("check-for-break" , check_break_now , 0, 0, newenv);
  522. GLOBAL_PRIM_W_ARITY("reparameterize" , reparameterize , 1, 1, newenv);
  523. GLOBAL_PRIM_W_ARITY("make-custodian-from-main", make_custodian_from_main, 0, 0, newenv);
  524. scheme_finish_primitive_module(newenv);
  525. scheme_protect_primitive_provide(newenv, NULL);
  526. }
  527. static Scheme_Object *collect_garbage(int c, Scheme_Object *p[])
  528. {
  529. scheme_collect_garbage();
  530. return scheme_void;
  531. }
  532. static Scheme_Object *current_memory_use(int argc, Scheme_Object *args[])
  533. {
  534. Scheme_Object *arg = NULL;
  535. intptr_t retval = 0;
  536. if (argc) {
  537. if(SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
  538. arg = args[0];
  539. } else if(SCHEME_PROCP(args[0])) {
  540. arg = args[0];
  541. } else {
  542. scheme_wrong_type("current-memory-use",
  543. "custodian or memory-trace-function",
  544. 0, argc, args);
  545. }
  546. }
  547. #ifdef MZ_PRECISE_GC
  548. retval = GC_get_memory_use(arg);
  549. #else
  550. retval = GC_get_memory_use();
  551. #endif
  552. return scheme_make_integer_value(retval);
  553. }
  554. /*========================================================================*/
  555. /* custodians */
  556. /*========================================================================*/
  557. static void adjust_limit_table(Scheme_Custodian *c)
  558. {
  559. /* If a custodian has a limit and any object or children, then it
  560. must not be collected and merged with its parent. To prevent
  561. collection, we register the custodian in the `limite_custodians'
  562. table. */
  563. if (c->has_limit) {
  564. if (c->elems || CUSTODIAN_FAM(c->children)) {
  565. if (!c->recorded) {
  566. c->recorded = 1;
  567. if (!limited_custodians)
  568. limited_custodians = scheme_make_hash_table(SCHEME_hash_ptr);
  569. scheme_hash_set(limited_custodians, (Scheme_Object *)c, scheme_true);
  570. }
  571. } else if (c->recorded) {
  572. c->recorded = 0;
  573. if (limited_custodians)
  574. scheme_hash_set(limited_custodians, (Scheme_Object *)c, NULL);
  575. }
  576. }
  577. }
  578. static Scheme_Object *custodian_require_mem(int argc, Scheme_Object *args[])
  579. {
  580. intptr_t lim;
  581. Scheme_Custodian *c1, *c2, *cx;
  582. if(NOT_SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
  583. scheme_wrong_type("custodian-require-memory", "custodian", 0, argc, args);
  584. return NULL;
  585. }
  586. if (SCHEME_INTP(args[1]) && (SCHEME_INT_VAL(args[1]) > 0)) {
  587. lim = SCHEME_INT_VAL(args[1]);
  588. } else if (SCHEME_BIGNUMP(args[1]) && SCHEME_BIGPOS(args[1])) {
  589. lim = 0x3fffffff; /* more memory than we actually have */
  590. } else {
  591. scheme_wrong_type("custodian-require-memory", "positive exact integer", 1, argc, args);
  592. return NULL;
  593. }
  594. if(NOT_SAME_TYPE(SCHEME_TYPE(args[2]), scheme_custodian_type)) {
  595. scheme_wrong_type("custodian-require-memory", "custodian", 2, argc, args);
  596. return NULL;
  597. }
  598. c1 = (Scheme_Custodian *)args[0];
  599. c2 = (Scheme_Custodian *)args[2];
  600. /* Check whether c1 is super to c2: */
  601. if (c1 == c2) {
  602. cx = NULL;
  603. } else {
  604. for (cx = c2; cx && NOT_SAME_OBJ(cx, c1); ) {
  605. cx = CUSTODIAN_FAM(cx->parent);
  606. }
  607. }
  608. if (!cx) {
  609. scheme_raise_exn(MZEXN_FAIL_CONTRACT,
  610. "custodian-require-memory: second custodian is not a sub-custodian of the first custodian");
  611. }
  612. #ifdef MZ_PRECISE_GC
  613. if (GC_set_account_hook(MZACCT_REQUIRE, c1, lim, c2))
  614. return scheme_void;
  615. #endif
  616. scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED,
  617. "custodian-require-memory: not supported");
  618. return NULL; /* doesn't get here */
  619. }
  620. static Scheme_Object *custodian_limit_mem(int argc, Scheme_Object *args[])
  621. {
  622. intptr_t lim;
  623. if (NOT_SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
  624. scheme_wrong_type("custodian-limit-memory", "custodian", 0, argc, args);
  625. return NULL;
  626. }
  627. if (SCHEME_INTP(args[1]) && (SCHEME_INT_VAL(args[1]) > 0)) {
  628. lim = SCHEME_INT_VAL(args[1]);
  629. } else if (SCHEME_BIGNUMP(args[1]) && SCHEME_BIGPOS(args[1])) {
  630. lim = 0x3fffffff; /* more memory than we actually have */
  631. } else {
  632. scheme_wrong_type("custodian-limit-memory", "positive exact integer", 1, argc, args);
  633. return NULL;
  634. }
  635. if (argc > 2) {
  636. if (NOT_SAME_TYPE(SCHEME_TYPE(args[2]), scheme_custodian_type)) {
  637. scheme_wrong_type("custodian-require-memory", "custodian", 2, argc, args);
  638. return NULL;
  639. }
  640. }
  641. ((Scheme_Custodian *)args[0])->has_limit = 1;
  642. adjust_limit_table((Scheme_Custodian *)args[0]);
  643. if (argc > 2) {
  644. ((Scheme_Custodian *)args[2])->has_limit = 1;
  645. adjust_limit_table((Scheme_Custodian *)args[2]);
  646. }
  647. #ifdef MZ_PRECISE_GC
  648. if (GC_set_account_hook(MZACCT_LIMIT, args[0], lim, (argc > 2) ? args[2] : args[0]))
  649. return scheme_void;
  650. #endif
  651. scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED,
  652. "custodian-limit-memory: not supported");
  653. return NULL; /* doesn't get here */
  654. }
  655. static Scheme_Object *custodian_can_mem(int argc, Scheme_Object *args[])
  656. {
  657. #ifdef MZ_PRECISE_GC
  658. return (GC_accouting_enabled() ? scheme_true : scheme_false);
  659. #else
  660. return scheme_false;
  661. #endif
  662. }
  663. static Scheme_Object *new_tracking_fun(int argc, Scheme_Object *args[])
  664. {
  665. int retval = 0;
  666. #ifdef MZ_PRECISE_GC
  667. retval = GC_mtrace_new_id(args[0]);
  668. #endif
  669. return scheme_make_integer(retval);
  670. }
  671. static Scheme_Object *union_tracking_val(int argc, Scheme_Object *args[])
  672. {
  673. int retval = 0;
  674. #ifdef MZ_PRECISE_GC
  675. retval = GC_mtrace_union_current_with(SCHEME_INT_VAL(args[0]));
  676. #endif
  677. return scheme_make_integer(retval);
  678. }
  679. static void ensure_custodian_space(Scheme_Custodian *m, int k)
  680. {
  681. int i;
  682. if (m->count + k >= m->alloc) {
  683. Scheme_Object ***naya_boxes;
  684. Scheme_Custodian_Reference **naya_mrefs;
  685. Scheme_Close_Custodian_Client **naya_closers;
  686. void **naya_data;
  687. m->alloc = (m->alloc ? (2 * m->alloc) : 4);
  688. if (m->alloc < k)
  689. m->alloc += k;
  690. naya_boxes = MALLOC_N(Scheme_Object**, m->alloc);
  691. naya_closers = MALLOC_N(Scheme_Close_Custodian_Client*, m->alloc);
  692. naya_data = MALLOC_N(void*, m->alloc);
  693. naya_mrefs = MALLOC_N(Scheme_Custodian_Reference*, m->alloc);
  694. for (i = m->count; i--; ) {
  695. naya_boxes[i] = m->boxes[i];
  696. m->boxes[i] = NULL;
  697. naya_closers[i] = m->closers[i];
  698. m->closers[i] = NULL;
  699. naya_data[i] = m->data[i];
  700. m->data[i] = NULL;
  701. naya_mrefs[i] = m->mrefs[i];
  702. m->mrefs[i] = NULL;
  703. }
  704. m->boxes = naya_boxes;
  705. m->closers = naya_closers;
  706. m->data = naya_data;
  707. m->mrefs = naya_mrefs;
  708. }
  709. }
  710. static void add_managed_box(Scheme_Custodian *m,
  711. Scheme_Object **box, Scheme_Custodian_Reference *mref,
  712. Scheme_Close_Custodian_Client *f, void *data)
  713. {
  714. int i;
  715. for (i = m->count; i--; ) {
  716. if (!m->boxes[i]) {
  717. m->boxes[i] = box;
  718. m->closers[i] = f;
  719. m->data[i] = data;
  720. m->mrefs[i] = mref;
  721. m->elems++;
  722. adjust_limit_table(m);
  723. return;
  724. }
  725. }
  726. ensure_custodian_space(m, 1);
  727. m->boxes[m->count] = box;
  728. m->closers[m->count] = f;
  729. m->data[m->count] = data;
  730. m->mrefs[m->count] = mref;
  731. m->elems++;
  732. adjust_limit_table(m);
  733. m->count++;
  734. }
  735. static void remove_managed(Scheme_Custodian_Reference *mr, Scheme_Object *o,
  736. Scheme_Close_Custodian_Client **old_f, void **old_data)
  737. {
  738. Scheme_Custodian *m;
  739. int i;
  740. if (!mr)
  741. return;
  742. m = CUSTODIAN_FAM(mr);
  743. if (!m)
  744. return;
  745. for (i = m->count; i--; ) {
  746. if (m->boxes[i] && SAME_OBJ((xCUSTODIAN_FAM(m->boxes[i])), o)) {
  747. xCUSTODIAN_FAM(m->boxes[i]) = 0;
  748. m->boxes[i] = NULL;
  749. CUSTODIAN_FAM(m->mrefs[i]) = 0;
  750. m->mrefs[i] = NULL;
  751. if (old_f)
  752. *old_f = m->closers[i];
  753. if (old_data)
  754. *old_data = m->data[i];
  755. m->data[i] = NULL;
  756. --m->elems;
  757. adjust_limit_table(m);
  758. break;
  759. }
  760. }
  761. while (m->count && !m->boxes[m->count - 1]) {
  762. --m->count;
  763. }
  764. }
  765. static void adjust_custodian_family(void *mgr, void *skip_move)
  766. {
  767. /* Threads note: because this function is only called as a
  768. finalization callback, it is automatically syncronized by the GC
  769. locks. And it is synchronized against all finalizations, so a
  770. managee can't try to unregister while we're shuffling its
  771. custodian. */
  772. Scheme_Custodian *r = (Scheme_Custodian *)mgr, *parent, *m;
  773. int i;
  774. parent = CUSTODIAN_FAM(r->parent);
  775. if (parent) {
  776. /* Remove from parent's list of children: */
  777. if (CUSTODIAN_FAM(parent->children) == r) {
  778. CUSTODIAN_FAM(parent->children) = CUSTODIAN_FAM(r->sibling);
  779. } else {
  780. m = CUSTODIAN_FAM(parent->children);
  781. while (m && CUSTODIAN_FAM(m->sibling) != r) {
  782. m = CUSTODIAN_FAM(m->sibling);
  783. }
  784. if (m)
  785. CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(r->sibling);
  786. }
  787. /* Remove from global list: */
  788. if (CUSTODIAN_FAM(r->global_next))
  789. CUSTODIAN_FAM(CUSTODIAN_FAM(r->global_next)->global_prev) = CUSTODIAN_FAM(r->global_prev);
  790. else
  791. last_custodian = CUSTODIAN_FAM(r->global_prev);
  792. CUSTODIAN_FAM(CUSTODIAN_FAM(r->global_prev)->global_next) = CUSTODIAN_FAM(r->global_next);
  793. /* Add children to parent's list: */
  794. for (m = CUSTODIAN_FAM(r->children); m; ) {
  795. Scheme_Custodian *next = CUSTODIAN_FAM(m->sibling);
  796. CUSTODIAN_FAM(m->parent) = parent;
  797. CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(parent->children);
  798. CUSTODIAN_FAM(parent->children) = m;
  799. m = next;
  800. }
  801. adjust_limit_table(parent);
  802. /* Add remaining managed items to parent: */
  803. if (!skip_move) {
  804. for (i = 0; i < r->count; i++) {
  805. if (r->boxes[i]) {
  806. CUSTODIAN_FAM(r->mrefs[i]) = parent;
  807. add_managed_box(parent, r->boxes[i], r->mrefs[i], r->closers[i], r->data[i]);
  808. #ifdef MZ_PRECISE_GC
  809. {
  810. Scheme_Object *o;
  811. o = xCUSTODIAN_FAM(r->boxes[i]);
  812. if (SAME_TYPE(SCHEME_TYPE(o), scheme_thread_hop_type)) {
  813. o = WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
  814. if (o)
  815. GC_register_thread(o, parent);
  816. }
  817. }
  818. #endif
  819. }
  820. }
  821. }
  822. }
  823. CUSTODIAN_FAM(r->parent) = NULL;
  824. CUSTODIAN_FAM(r->sibling) = NULL;
  825. if (!skip_move)
  826. CUSTODIAN_FAM(r->children) = NULL;
  827. CUSTODIAN_FAM(r->global_prev) = NULL;
  828. CUSTODIAN_FAM(r->global_next) = NULL;
  829. }
  830. void insert_custodian(Scheme_Custodian *m, Scheme_Custodian *parent)
  831. {
  832. /* insert into parent's list: */
  833. CUSTODIAN_FAM(m->parent) = parent;
  834. if (parent) {
  835. CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(parent->children);
  836. CUSTODIAN_FAM(parent->children) = m;
  837. } else
  838. CUSTODIAN_FAM(m->sibling) = NULL;
  839. /* Insert into global chain. A custodian is always inserted
  840. directly after its parent, so families stay together, and
  841. the local list stays in the same order as the sibling list. */
  842. if (parent) {
  843. Scheme_Custodian *next;
  844. next = CUSTODIAN_FAM(parent->global_next);
  845. CUSTODIAN_FAM(m->global_next) = next;
  846. CUSTODIAN_FAM(m->global_prev) = parent;
  847. CUSTODIAN_FAM(parent->global_next) = m;
  848. if (next)
  849. CUSTODIAN_FAM(next->global_prev) = m;
  850. else
  851. last_custodian = m;
  852. } else {
  853. CUSTODIAN_FAM(m->global_next) = NULL;
  854. CUSTODIAN_FAM(m->global_prev) = NULL;
  855. }
  856. if (parent)
  857. adjust_limit_table(parent);
  858. }
  859. Scheme_Custodian *scheme_make_custodian(Scheme_Custodian *parent)
  860. {
  861. Scheme_Custodian *m;
  862. Scheme_Custodian_Reference *mw;
  863. if (!parent)
  864. parent = main_custodian; /* still NULL if we're creating main; that's ok */
  865. m = MALLOC_ONE_TAGGED(Scheme_Custodian);
  866. m->so.type = scheme_custodian_type;
  867. m->alloc = m->count = 0;
  868. mw = MALLOC_MREF();
  869. m->parent = mw;
  870. mw = MALLOC_MREF();
  871. m->children = mw;
  872. mw = MALLOC_MREF();
  873. m->sibling = mw;
  874. mw = MALLOC_MREF();
  875. m->global_next = mw;
  876. mw = MALLOC_MREF();
  877. m->global_prev = mw;
  878. CUSTODIAN_FAM(m->children) = NULL;
  879. insert_custodian(m, parent);
  880. scheme_add_finalizer(m, adjust_custodian_family, NULL);
  881. return m;
  882. }
  883. static void rebox_willdone_object(void *o, void *mr)
  884. {
  885. Scheme_Custodian *m = CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr);
  886. Scheme_Close_Custodian_Client *f;
  887. void *data;
  888. /* Still needs management? */
  889. if (m) {
  890. #ifdef MZ_PRECISE_GC
  891. Scheme_Object *b;
  892. #else
  893. Scheme_Object **b;
  894. #endif
  895. remove_managed(mr, o, &f, &data);
  896. #ifdef MZ_PRECISE_GC
  897. b = scheme_box(NULL);
  898. #else
  899. b = MALLOC_ONE(Scheme_Object*); /* not atomic this time */
  900. #endif
  901. xCUSTODIAN_FAM(b) = o;
  902. /* Put the custodian back: */
  903. CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr) = m;
  904. add_managed_box(m, (Scheme_Object **)b, (Scheme_Custodian_Reference *)mr, f, data);
  905. }
  906. }
  907. static void managed_object_gone(void *o, void *mr)
  908. {
  909. Scheme_Custodian *m = CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr);
  910. /* Still has management? */
  911. if (m)
  912. remove_managed(mr, o, NULL, NULL);
  913. }
  914. int scheme_custodian_is_available(Scheme_Custodian *m) XFORM_SKIP_PROC
  915. /* may be called from a future thread */
  916. {
  917. if (m->shut_down)
  918. return 0;
  919. return 1;
  920. }
  921. void scheme_custodian_check_available(Scheme_Custodian *m, const char *who, const char *what)
  922. {
  923. if (!m)
  924. m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
  925. if (!scheme_custodian_is_available(m))
  926. scheme_arg_mismatch(who, "the custodian has been shut down: ",
  927. (Scheme_Object *)m);
  928. }
  929. Scheme_Custodian_Reference *scheme_add_managed(Scheme_Custodian *m, Scheme_Object *o,
  930. Scheme_Close_Custodian_Client *f, void *data,
  931. int must_close)
  932. {
  933. #ifdef MZ_PRECISE_GC
  934. Scheme_Object *b;
  935. #else
  936. Scheme_Object **b;
  937. #endif
  938. Scheme_Custodian_Reference *mr;
  939. if (!m)
  940. m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
  941. if (m->shut_down) {
  942. /* The custodian was shut down in the time that it took
  943. to allocate o. This situation should be avoided if at
  944. all possible, but here's the fail-safe. */
  945. if (f)
  946. f(o, data);
  947. return NULL;
  948. }
  949. #ifdef MZ_PRECISE_GC
  950. b = scheme_make_late_weak_box(NULL);
  951. #else
  952. b = MALLOC_ONE_WEAK(Scheme_Object*);
  953. #endif
  954. xCUSTODIAN_FAM(b) = o;
  955. mr = MALLOC_MREF();
  956. CUSTODIAN_FAM(mr) = m;
  957. /* The atomic link via the box `b' allows the execution of wills for
  958. o. After this, we should either drop the object or we have to
  959. hold on to the object strongly (for when custodian-close-all is
  960. called). */
  961. if (must_close)
  962. scheme_add_finalizer(o, rebox_willdone_object, mr);
  963. else
  964. scheme_add_finalizer(o, managed_object_gone, mr);
  965. add_managed_box(m, (Scheme_Object **)b, mr, f, data);
  966. return mr;
  967. }
  968. void scheme_remove_managed(Scheme_Custodian_Reference *mr, Scheme_Object *o)
  969. {
  970. /* Is this a good idea? I'm not sure: */
  971. scheme_subtract_finalizer(o, managed_object_gone, mr);
  972. scheme_subtract_finalizer(o, rebox_willdone_object, mr);
  973. remove_managed(mr, o, NULL, NULL);
  974. }
  975. Scheme_Thread *scheme_do_close_managed(Scheme_Custodian *m, Scheme_Exit_Closer_Func cf)
  976. {
  977. Scheme_Thread *kill_self = NULL;
  978. Scheme_Custodian *c, *start, *next_m;
  979. int i, is_thread;
  980. Scheme_Thread *the_thread;
  981. Scheme_Object *o;
  982. Scheme_Close_Custodian_Client *f;
  983. void *data;
  984. if (!m)
  985. m = main_custodian;
  986. if (m->shut_down)
  987. return NULL;
  988. m->shut_down = 1;
  989. /* Need to kill children first, transitively, so find
  990. last decendent. The family will be the global-list from
  991. m to this last decendent, inclusive. */
  992. for (c = m; CUSTODIAN_FAM(c->children); ) {
  993. for (c = CUSTODIAN_FAM(c->children); CUSTODIAN_FAM(c->sibling); ) {
  994. c = CUSTODIAN_FAM(c->sibling);
  995. }
  996. }
  997. start = m;
  998. m = c;
  999. while (1) {
  1000. /* It matters that this loop starts at the top. See
  1001. the m->count = i assignment below. */
  1002. for (i = m->count; i--; ) {
  1003. if (m->boxes[i]) {
  1004. o = xCUSTODIAN_FAM(m->boxes[i]);
  1005. f = m->closers[i];
  1006. data = m->data[i];
  1007. if (!cf && (SAME_TYPE(SCHEME_TYPE(o), scheme_thread_hop_type))) {
  1008. /* We've added an indirection and made it weak. See mr_hop note above. */
  1009. is_thread = 1;
  1010. the_thread = (Scheme_Thread *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
  1011. } else {
  1012. is_thread = 0;
  1013. the_thread = NULL;
  1014. }
  1015. xCUSTODIAN_FAM(m->boxes[i]) = NULL;
  1016. CUSTODIAN_FAM(m->mrefs[i]) = NULL;
  1017. /* Set m->count to i in case a GC happens while
  1018. the closer is running. If there's a GC, then
  1019. for_each_managed will be called. */
  1020. m->count = i;
  1021. if (is_thread && !the_thread) {
  1022. /* Thread is already collected, so skip */
  1023. } else if (cf) {
  1024. cf(o, f, data);
  1025. } else {
  1026. if (is_thread) {
  1027. if (the_thread) {
  1028. /* Only kill the thread if it has no other custodians */
  1029. if (SCHEME_NULLP(the_thread->extra_mrefs)) {
  1030. if (do_kill_thread(the_thread))
  1031. kill_self = the_thread;
  1032. } else {
  1033. Scheme_Custodian_Reference *mref;
  1034. mref = m->mrefs[i];
  1035. if (mref == the_thread->mref) {
  1036. /* Designate a new main custodian for the thread */
  1037. mref = (Scheme_Custodian_Reference *)SCHEME_CAR(the_thread->extra_mrefs);
  1038. the_thread->mref = mref;
  1039. the_thread->extra_mrefs = SCHEME_CDR(the_thread->extra_mrefs);
  1040. #ifdef MZ_PRECISE_GC
  1041. GC_register_thread(the_thread, CUSTODIAN_FAM(mref));
  1042. #endif
  1043. } else {
  1044. /* Just remove mref from the list of extras */
  1045. Scheme_Object *l, *prev = NULL;
  1046. for (l = the_thread->extra_mrefs; 1; l = SCHEME_CDR(l)) {
  1047. if (SAME_OBJ(SCHEME_CAR(l), (Scheme_Object *)mref)) {
  1048. if (prev)
  1049. SCHEME_CDR(prev) = SCHEME_CDR(l);
  1050. else
  1051. the_thread->extra_mrefs = SCHEME_CDR(l);
  1052. break;
  1053. }
  1054. prev = l;
  1055. }
  1056. }
  1057. }
  1058. }
  1059. } else {
  1060. f(o, data);
  1061. }
  1062. }
  1063. }
  1064. }
  1065. #ifdef MZ_PRECISE_GC
  1066. {
  1067. Scheme_Object *pr = m->cust_boxes, *wb;
  1068. Scheme_Custodian_Box *cb;
  1069. while (pr) {
  1070. wb = SCHEME_CAR(pr);
  1071. cb = (Scheme_Custodian_Box *)SCHEME_BOX_VAL(wb);
  1072. if (cb) cb->v = NULL;
  1073. pr = SCHEME_CDR(pr);
  1074. }
  1075. m->cust_boxes = NULL;
  1076. }
  1077. #endif
  1078. m->count = 0;
  1079. m->alloc = 0;
  1080. m->elems = 0;
  1081. m->boxes = NULL;
  1082. m->closers = NULL;
  1083. m->data = NULL;
  1084. m->mrefs = NULL;
  1085. m->shut_down = 1;
  1086. if (SAME_OBJ(m, start))
  1087. break;
  1088. next_m = CUSTODIAN_FAM(m->global_prev);
  1089. /* Remove this custodian from its parent */
  1090. adjust_custodian_family(m, m);
  1091. adjust_limit_table(m);
  1092. m = next_m;
  1093. }
  1094. #ifdef MZ_USE_FUTURES
  1095. scheme_future_check_custodians();
  1096. #endif
  1097. return kill_self;
  1098. }
  1099. typedef void (*Scheme_For_Each_Func)(Scheme_Object *);
  1100. static void for_each_managed(Scheme_Type type, Scheme_For_Each_Func cf)
  1101. XFORM_SKIP_PROC
  1102. /* This function must not allocate. */
  1103. {
  1104. Scheme_Custodian *m;
  1105. int i;
  1106. if (SAME_TYPE(type, scheme_thread_type))
  1107. type = scheme_thread_hop_type;
  1108. /* back to front so children are first: */
  1109. m = last_custodian;
  1110. while (m) {
  1111. for (i = m->count; i--; ) {
  1112. if (m->boxes[i]) {
  1113. Scheme_Object *o;
  1114. o = xCUSTODIAN_FAM(m->boxes[i]);
  1115. if (SAME_TYPE(SCHEME_TYPE(o), type)) {
  1116. if (SAME_TYPE(type, scheme_thread_hop_type)) {
  1117. /* We've added an indirection and made it weak. See mr_hop note above. */
  1118. Scheme_Thread *t;
  1119. t = (Scheme_Thread *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
  1120. if (!t) {
  1121. /* The thread is already collected */
  1122. continue;
  1123. } else if (SAME_OBJ(t->mref, m->mrefs[i]))
  1124. o = (Scheme_Object *)t;
  1125. else {
  1126. /* The main custodian for this thread is someone else */
  1127. continue;
  1128. }
  1129. }
  1130. cf(o);
  1131. }
  1132. }
  1133. }
  1134. m = CUSTODIAN_FAM(m->global_prev);
  1135. }
  1136. }
  1137. static void do_close_managed(Scheme_Custodian *m)
  1138. /* The trick is that we may need to kill the thread
  1139. that is running us. If so, delay it to the very
  1140. end. */
  1141. {
  1142. if (scheme_do_close_managed(m, NULL)) {
  1143. /* Kill/suspend self */
  1144. if (scheme_current_thread->suspend_to_kill)
  1145. suspend_thread(scheme_current_thread);
  1146. else
  1147. scheme_thread_block(0.0);
  1148. }
  1149. }
  1150. void scheme_close_managed(Scheme_Custodian *m)
  1151. {
  1152. do_close_managed(m);
  1153. /* Give killed threads time to die: */
  1154. scheme_thread_block(0);
  1155. scheme_current_thread->ran_some = 1;
  1156. }
  1157. static Scheme_Object *make_custodian(int argc, Scheme_Object *argv[])
  1158. {
  1159. Scheme_Custodian *m;
  1160. if (argc) {
  1161. if (!SCHEME_CUSTODIANP(argv[0]))
  1162. scheme_wrong_type("make-custodian", "custodian", 0, argc, argv);
  1163. m = (Scheme_Custodian *)argv[0];
  1164. } else
  1165. m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
  1166. if (m->shut_down)
  1167. scheme_arg_mismatch("make-custodian",
  1168. "the custodian has been shut down: ",
  1169. (Scheme_Object *)m);
  1170. return (Scheme_Object *)scheme_make_custodian(m);
  1171. }
  1172. static Scheme_Object *make_custodian_from_main(int argc, Scheme_Object *argv[])
  1173. {
  1174. return (Scheme_Object *)scheme_make_custodian(NULL);
  1175. }
  1176. static Scheme_Object *custodian_p(int argc, Scheme_Object *argv[])
  1177. {
  1178. return SCHEME_CUSTODIANP(argv[0]) ? scheme_true : scheme_false;
  1179. }
  1180. static Scheme_Object *custodian_close_all(int argc, Scheme_Object *argv[])
  1181. {
  1182. if (!SCHEME_CUSTODIANP(argv[0]))
  1183. scheme_wrong_type("custodian-shutdown-all", "custodian", 0, argc, argv);
  1184. scheme_close_managed((Scheme_Custodian *)argv[0]);
  1185. return scheme_void;
  1186. }
  1187. Scheme_Custodian* scheme_custodian_extract_reference(Scheme_Custodian_Reference *mr)
  1188. {
  1189. return CUSTODIAN_FAM(mr);
  1190. }
  1191. int scheme_custodian_is_shut_down(Scheme_Custodian* c)
  1192. {
  1193. return c->shut_down;
  1194. }
  1195. static Scheme_Object *extract_thread(Scheme_Object *o)
  1196. {
  1197. return (Scheme_Object *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
  1198. }
  1199. void scheme_init_custodian_extractors()
  1200. {
  1201. if (!extractors) {
  1202. int n;
  1203. n = scheme_num_types();
  1204. REGISTER_SO(extractors);
  1205. extractors = MALLOC_N_ATOMIC(Scheme_Custodian_Extractor, n);
  1206. memset(extractors, 0, sizeof(Scheme_Custodian_Extractor) * n);
  1207. extractors[scheme_thread_hop_type] = extract_thread;
  1208. }
  1209. }
  1210. void scheme_add_custodian_extractor(Scheme_Type t, Scheme_Custodian_Extractor e)
  1211. {
  1212. if (t) {
  1213. extractors[t] = e;
  1214. }
  1215. }
  1216. static Scheme_Object *custodian_to_list(int argc, Scheme_Object *argv[])
  1217. {
  1218. Scheme_Custodian *m, *m2, *c;
  1219. Scheme_Object **hold, *o;
  1220. int i, j, cnt, kids;
  1221. Scheme_Type type;
  1222. Scheme_Custodian_Extractor ex;
  1223. if (!SCHEME_CUSTODIANP(argv[0]))
  1224. scheme_wrong_type("custodian-managed-list", "custodian", 0, argc, argv);
  1225. if (!SCHEME_CUSTODIANP(argv[1]))
  1226. scheme_wrong_type("custodian-managed-list", "custodian", 1, argc, argv);
  1227. m = (Scheme_Custodian *)argv[0];
  1228. m2 = (Scheme_Custodian *)argv[1];
  1229. /* Check that the second manages the first: */
  1230. c = CUSTODIAN_FAM(m->parent);
  1231. while (c && NOT_SAME_OBJ(m2, c)) {
  1232. c = CUSTODIAN_FAM(c->parent);
  1233. }
  1234. if (!c) {
  1235. scheme_arg_mismatch("custodian-managed-list",
  1236. "the second custodian does not "
  1237. "manage the first custodian: ",
  1238. argv[0]);
  1239. }
  1240. /* Init extractors: */
  1241. scheme_add_custodian_extractor(0, NULL);
  1242. /* Count children: */
  1243. kids = 0;
  1244. for (c = CUSTODIAN_FAM(m->children); c; c = CUSTODIAN_FAM(c->sibling)) {
  1245. kids++;
  1246. }
  1247. /* Do all allocation first, since custodian links are weak.
  1248. Furthermore, allocation may trigger collection of an otherwise
  1249. unreferenced custodian, folding its items into this one,
  1250. so loop until we've allocated enough. */
  1251. do {
  1252. cnt = m->count;
  1253. hold = MALLOC_N(Scheme_Object *, cnt + kids);
  1254. } while (cnt < m->count);
  1255. /* Put managed items into hold array: */
  1256. for (i = m->count, j = 0; i--; ) {
  1257. if (m->boxes[i]) {
  1258. o = xCUSTODIAN_FAM(m->boxes[i]);
  1259. type = SCHEME_TYPE(o);
  1260. ex = extractors[type];
  1261. if (ex) {
  1262. o = ex(o);
  1263. }
  1264. if (o) {
  1265. hold[j] = o;
  1266. j++;
  1267. }
  1268. }
  1269. }
  1270. /* Add kids: */
  1271. for (c = CUSTODIAN_FAM(m->children); c; c = CUSTODIAN_FAM(c->sibling)) {
  1272. hold[j] = (Scheme_Object *)c;
  1273. j++;
  1274. }
  1275. /* Convert the array to a list: */
  1276. return scheme_build_list(j, hold);
  1277. }
  1278. static Scheme_Object *current_custodian(int argc, Scheme_Object *argv[])
  1279. {
  1280. return scheme_param_config("current-custodian",
  1281. scheme_make_integer(MZCONFIG_CUSTODIAN),
  1282. argc, argv,
  1283. -1, custodian_p, "custodian", 0);
  1284. }
  1285. Scheme_Custodian *scheme_get_current_custodian()
  1286. {
  1287. return (Scheme_Custodian *) current_custodian(0, NULL);
  1288. }
  1289. static Scheme_Object *make_custodian_box(int argc, Scheme_Object *argv[])
  1290. {
  1291. Scheme_Custodian_Box *cb;
  1292. if (!SCHEME_CUSTODIANP(argv[0]))
  1293. scheme_wrong_type("make-custodian-box", "custodian", 0, argc, argv);
  1294. cb = MALLOC_ONE_TAGGED(Scheme_Custodian_Box);
  1295. cb->so.type = scheme_cust_box_type;
  1296. cb->cust = (Scheme_Custodian *)argv[0];
  1297. cb->v = argv[1];
  1298. #ifdef MZ_PRECISE_GC
  1299. /* 3m */
  1300. {
  1301. Scheme_Object *wb, *pr, *prev;
  1302. wb = GC_malloc_weak_box(cb, NULL, 0, 1);
  1303. pr = scheme_make_raw_pair(wb, cb->cust->cust_boxes);
  1304. cb->cust->cust_boxes = pr;
  1305. cb->cust->num_cust_boxes++;
  1306. /* The GC prunes the list of custodian boxes in accounting mode,
  1307. but prune here in case accounting is never triggered. */
  1308. if (cb->cust->num_cust_boxes > 2 * cb->cust->checked_cust_boxes) {
  1309. prev = pr;
  1310. pr = SCHEME_CDR(pr);
  1311. while (pr) {
  1312. wb = SCHEME_CAR(pr);
  1313. if (!SCHEME_BOX_VAL(pr)) {
  1314. SCHEME_CDR(prev) = SCHEME_CDR(pr);
  1315. --cb->cust->num_cust_boxes;
  1316. } else {
  1317. prev = pr;
  1318. }
  1319. pr = SCHEME_CDR(pr);
  1320. }
  1321. cb->cust->checked_cust_boxes = cb->cust->num_cust_boxes;
  1322. }
  1323. }
  1324. #else
  1325. /* CGC */
  1326. if (cust_box_count >= cust_box_alloc) {
  1327. Scheme_Custodian_Box **cbs;
  1328. if (!cust_box_alloc) {
  1329. cust_box_alloc = 16;
  1330. REGISTER_SO(cust_boxes);
  1331. } else {
  1332. cust_box_alloc = 2 * cust_box_alloc;
  1333. }
  1334. cbs = (Scheme_Custodian_Box **)scheme_malloc_atomic(cust_box_alloc * sizeof(Scheme_Custodian_Box *));
  1335. memcpy(cbs, cust_boxes, cust_box_count * sizeof(Scheme_Custodian_Box *));
  1336. cust_boxes = cbs;
  1337. }
  1338. cust_boxes[cust_box_count++] = cb;
  1339. #endif
  1340. return (Scheme_Object *)cb;
  1341. }
  1342. static Scheme_Object *custodian_box_value(int argc, Scheme_Object *argv[])
  1343. {
  1344. Scheme_Custodian_Box *cb;
  1345. if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_cust_box_type))
  1346. scheme_wrong_type("custodian-box-value", "custodian-box", 0, argc, argv);
  1347. cb = (Scheme_Custodian_Box *)argv[0];
  1348. if (cb->cust->shut_down)
  1349. return scheme_false;
  1350. return cb->v;
  1351. }
  1352. static Scheme_Object *custodian_box_p(int argc, Scheme_Object *argv[])
  1353. {
  1354. if (SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_cust_box_type))
  1355. return scheme_true;
  1356. else
  1357. return scheme_false;
  1358. }
  1359. static int cust_box_ready(Scheme_Object *o)
  1360. {
  1361. return ((Scheme_Custodian_Box *)o)->cust->shut_down;
  1362. }
  1363. #ifndef MZ_PRECISE_GC
  1364. void scheme_clean_cust_box_list(void)
  1365. {
  1366. int src = 0, dest = 0;
  1367. Scheme_Custodian_Box *cb;
  1368. void *b;
  1369. while (src < cust_box_count) {
  1370. cb = cust_boxes[src];
  1371. b = GC_base(cb);
  1372. if (b
  1373. #ifndef USE_SENORA_GC
  1374. && GC_is_marked(b)
  1375. #endif
  1376. ) {
  1377. cust_boxes[dest++] = cb;
  1378. if (cb->v) {
  1379. if (cb->cust->shut_down) {
  1380. cb->v = NULL;
  1381. }
  1382. }
  1383. }
  1384. src++;
  1385. }
  1386. cust_box_count = dest;
  1387. }
  1388. static void shrink_cust_box_array(void)
  1389. {
  1390. /* Call this function periodically to clean up. */
  1391. if (cust_box_alloc > 128 && (cust_box_count * 4 < cust_box_alloc)) {
  1392. Scheme_Custodian_Box **cbs;
  1393. cust_box_alloc = cust_box_count * 2;
  1394. cbs = (Scheme_Custodian_Box **)scheme_malloc_atomic(cust_box_alloc * sizeof(Scheme_Custodian_Box *));
  1395. memcpy(cbs, cust_boxes, cust_box_count * sizeof(Scheme_Custodian_Box *));
  1396. cust_boxes = cbs;
  1397. }
  1398. }
  1399. #else
  1400. # define shrink_cust_box_array() /* empty */
  1401. # define clean_cust_box_list() /* empty */
  1402. #endif
  1403. void scheme_run_atexit_closers(Scheme_Object *o, Scheme_Close_Custodian_Client *f, void *data)
  1404. {
  1405. Scheme_Object *l;
  1406. if (cust_closers) {
  1407. for (l = cust_closers; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
  1408. Scheme_Exit_Closer_Func cf;
  1409. cf = (Scheme_Exit_Closer_Func)SCHEME_CAR(l);
  1410. cf(o, f, data);
  1411. }
  1412. }
  1413. }
  1414. void scheme_run_atexit_closers_on_all(Scheme_Exit_Closer_Func alt)
  1415. {
  1416. mz_jmp_buf newbuf, *savebuf;
  1417. /* scheme_start_atomic(); */
  1418. /* Atomic would be needed if this was run to implement
  1419. a custodian shutdown, but an actual custodian shutdown
  1420. will have terminated everything else anyway. For a
  1421. polite exit, other threads can run. */
  1422. savebuf = scheme_current_thread->error_buf;
  1423. scheme_current_thread->error_buf = &newbuf;
  1424. if (!scheme_setjmp(newbuf)) {
  1425. scheme_do_close_managed(NULL, alt ? alt : scheme_run_atexit_closers);
  1426. }
  1427. scheme_current_thread->error_buf = savebuf;
  1428. }
  1429. void do_run_atexit_closers_on_all()
  1430. {
  1431. scheme_run_atexit_closers_on_all(NULL);
  1432. }
  1433. void scheme_set_atexit(Scheme_At_Exit_Proc p)
  1434. {
  1435. replacement_at_exit = p;
  1436. }
  1437. void scheme_add_atexit_closer(Scheme_Exit_Closer_Func f)
  1438. {
  1439. if (!cust_closers) {
  1440. if (replacement_at_exit) {
  1441. replacement_at_exit(do_run_atexit_closers_on_all);
  1442. } else {
  1443. #ifdef USE_ON_EXIT_FOR_ATEXIT
  1444. on_exit(do_run_atexit_closers_on_all, NULL);
  1445. #else
  1446. atexit(do_run_atexit_closers_on_all);
  1447. #endif
  1448. }
  1449. REGISTER_SO(cust_closers);
  1450. cust_closers = scheme_null;
  1451. }
  1452. cust_closers = scheme_make_raw_pair((Scheme_Object *)f, cust_closers);
  1453. }
  1454. void scheme_schedule_custodian_close(Scheme_Custodian *c)
  1455. {
  1456. /* This procedure might be called by a garbage collector to register
  1457. a resource-based kill. */
  1458. if (!scheduled_kills) {
  1459. REGISTER_SO(scheduled_kills);
  1460. scheduled_kills = scheme_null;
  1461. }
  1462. scheduled_kills = scheme_make_pair((Scheme_Object *)c, scheduled_kills);
  1463. scheme_fuel_counter = 0;
  1464. scheme_jit_stack_boundary = (uintptr_t)-1;
  1465. }
  1466. static void check_scheduled_kills()
  1467. {
  1468. while (scheduled_kills && !SCHEME_NULLP(scheduled_kills)) {
  1469. Scheme_Object *k;
  1470. k = SCHEME_CAR(scheduled_kills);
  1471. scheduled_kills = SCHEME_CDR(scheduled_kills);
  1472. do_close_managed((Scheme_Custodian *)k);
  1473. }
  1474. }
  1475. static void check_current_custodian_allows(const char *who, Scheme_Thread *p)
  1476. {
  1477. Scheme_Object *l;
  1478. Scheme_Custodian_Reference *mref;
  1479. Scheme_Custodian *m, *current;
  1480. /* Check management of the thread: */
  1481. current = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
  1482. for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
  1483. mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
  1484. m = CUSTODIAN_FAM(mref);
  1485. while (NOT_SAME_OBJ(m, current)) {
  1486. m = CUSTODIAN_FAM(m->parent);
  1487. if (!m)
  1488. goto bad;
  1489. }
  1490. }
  1491. mref = p->mref;
  1492. if (!mref)
  1493. return;
  1494. m = CUSTODIAN_FAM(mref);
  1495. if (!m)
  1496. return;
  1497. while (NOT_SAME_OBJ(m, current)) {
  1498. m = CUSTODIAN_FAM(m->parent);
  1499. if (!m)
  1500. goto bad;
  1501. }
  1502. return;
  1503. bad:
  1504. scheme_arg_mismatch(who,
  1505. "the current custodian does not "
  1506. "solely manage the specified thread: ",
  1507. (Scheme_Object *)p);
  1508. }
  1509. void scheme_free_all(void)
  1510. {
  1511. scheme_do_close_managed(NULL, NULL);
  1512. scheme_free_dynamic_extensions();
  1513. #ifdef MZ_PRECISE_GC
  1514. GC_free_all();
  1515. #endif
  1516. }
  1517. /*========================================================================*/
  1518. /* thread sets */
  1519. /*========================================================================*/
  1520. #define TSET_IL MZ_INLINE
  1521. static Scheme_Thread_Set *create_thread_set(Scheme_Thread_Set *parent)
  1522. {
  1523. Scheme_Thread_Set *t_set;
  1524. t_set = MALLOC_ONE_TAGGED(Scheme_Thread_Set);
  1525. t_set->so.type = scheme_thread_set_type;
  1526. t_set->parent = parent;
  1527. /* Everything in t_set is zeroed */
  1528. return t_set;
  1529. }
  1530. static Scheme_Object *make_thread_set(int argc, Scheme_Object *argv[])
  1531. {
  1532. Scheme_Thread_Set *parent;
  1533. if (argc) {
  1534. if (!(SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_set_type)))
  1535. scheme_wrong_type("make-thread-group", "thread-group", 0, argc, argv);
  1536. parent = (Scheme_Thread_Set *)argv[0];
  1537. } else
  1538. parent = (Scheme_Thread_Set *)scheme_get_param(scheme_current_config(), MZCONFIG_THREAD_SET);
  1539. return (Scheme_Object *)create_thread_set(parent);
  1540. }
  1541. static Scheme_Object *thread_set_p(int argc, Scheme_Object *argv[])
  1542. {
  1543. return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_set_type))
  1544. ? scheme_true
  1545. : scheme_false);
  1546. }
  1547. static Scheme_Object *current_thread_set(int argc, Scheme_Object *argv[])
  1548. {
  1549. return scheme_param_config("current-thread-group",
  1550. scheme_make_integer(MZCONFIG_THREAD_SET),
  1551. argc, argv,
  1552. -1, thread_set_p, "thread-group", 0);
  1553. }
  1554. static TSET_IL void set_t_set_next(Scheme_Object *o, Scheme_Object *n)
  1555. {
  1556. if (SCHEME_THREADP(o))
  1557. ((Scheme_Thread *)o)->t_set_next = n;
  1558. else
  1559. ((Scheme_Thread_Set *)o)->next = n;
  1560. }
  1561. static TSET_IL void set_t_set_prev(Scheme_Object *o, Scheme_Object *n)
  1562. {
  1563. if (SCHEME_THREADP(o))
  1564. ((Scheme_Thread *)o)->t_set_prev = n;
  1565. else
  1566. ((Scheme_Thread_Set *)o)->prev = n;
  1567. }
  1568. static TSET_IL Scheme_Object *get_t_set_next(Scheme_Object *o)
  1569. {
  1570. if (SCHEME_THREADP(o))
  1571. return ((Scheme_Thread *)o)->t_set_next;
  1572. else
  1573. return ((Scheme_Thread_Set *)o)->next;
  1574. }
  1575. static TSET_IL Scheme_Object *get_t_set_prev(Scheme_Object *o)
  1576. {
  1577. if (SCHEME_THREADP(o))
  1578. return ((Scheme_Thread *)o)->t_set_prev;
  1579. else
  1580. return ((Scheme_Thread_Set *)o)->prev;
  1581. }
  1582. static void schedule_in_set(Scheme_Object *s, Scheme_Thread_Set *t_set)
  1583. {
  1584. num_running_threads += 1;
  1585. while (1) {
  1586. set_t_set_next(s, t_set->first);
  1587. if (t_set->first)
  1588. set_t_set_prev(t_set->first, s);
  1589. t_set->first = s;
  1590. if (t_set->current)
  1591. break;
  1592. t_set->current = s;
  1593. s = (Scheme_Object *)t_set;
  1594. t_set = t_set->parent;
  1595. }
  1596. }
  1597. static void unschedule_in_set(Scheme_Object *s, Scheme_Thread_Set *t_set)
  1598. {
  1599. Scheme_Object *prev;
  1600. Scheme_Object *next;
  1601. num_running_threads -= 1;
  1602. while (1) {
  1603. prev = get_t_set_prev(s);
  1604. next = get_t_set_next(s);
  1605. if (!prev)
  1606. t_set->first = next;
  1607. else
  1608. set_t_set_next(prev, next);
  1609. if (next)
  1610. set_t_set_prev(next, prev);
  1611. set_t_set_prev(s, NULL);
  1612. set_t_set_next(s, NULL);
  1613. if (t_set->current == s) {
  1614. if (next) {
  1615. t_set->current = next;
  1616. } else {
  1617. t_set->current = t_set->first;
  1618. }
  1619. }
  1620. if (t_set->current)
  1621. break;
  1622. s = (Scheme_Object *)t_set;
  1623. t_set = t_set->parent;
  1624. }
  1625. }
  1626. /*========================================================================*/
  1627. /* thread record creation */
  1628. /*========================================================================*/
  1629. static Scheme_Thread *make_thread(Scheme_Config *config,
  1630. Scheme_Thread_Cell_Table *cells,
  1631. Scheme_Object *init_break_cell,
  1632. Scheme_Custodian *mgr,
  1633. void *stack_base)
  1634. {
  1635. Scheme_Thread *process;
  1636. int prefix = 0;
  1637. process = MALLOC_ONE_TAGGED(Scheme_Thread);
  1638. process->so.type = scheme_thread_type;
  1639. if (!scheme_main_thread) {
  1640. /* Creating the first thread... */
  1641. REGISTER_SO(scheme_current_thread);
  1642. REGISTER_SO(scheme_main_thread);
  1643. REGISTER_SO(scheme_first_thread);
  1644. REGISTER_SO(thread_swap_callbacks);
  1645. REGISTER_SO(thread_swap_out_callbacks);
  1646. REGISTER_SO(swap_target);
  1647. scheme_current_thread = process;
  1648. scheme_first_thread = scheme_main_thread = process;
  1649. process->prev = NULL;
  1650. process->next = NULL;
  1651. process->suspend_break = 1; /* until start-up finished */
  1652. process->error_buf = NULL;
  1653. thread_swap_callbacks = scheme_null;
  1654. thread_swap_out_callbacks = scheme_null;
  1655. GC_set_collect_start_callback(get_ready_for_GC);
  1656. GC_set_collect_end_callback(done_with_GC);
  1657. #ifdef MZ_PRECISE_GC
  1658. GC_set_collect_inform_callback(inform_GC);
  1659. #endif
  1660. #ifdef LINK_EXTENSIONS_BY_TABLE
  1661. scheme_current_thread_ptr = &scheme_current_thread;
  1662. scheme_fuel_counter_ptr = &scheme_fuel_counter;
  1663. #endif
  1664. #if defined(MZ_PRECISE_GC)
  1665. GC_set_get_thread_stack_base(scheme_get_current_thread_stack_start);
  1666. #endif
  1667. process->stack_start = stack_base;
  1668. } else {
  1669. prefix = 1;
  1670. }
  1671. process->engine_weight = 10000;
  1672. process->cont_mark_pos = (MZ_MARK_POS_TYPE)1;
  1673. process->cont_mark_stack = 0;
  1674. process->cont_mark_stack_segments = NULL;
  1675. process->cont_mark_seg_count = 0;
  1676. if (!config) {
  1677. make_initial_config(process);
  1678. config = process->init_config;
  1679. } else {
  1680. process->init_config = config;
  1681. process->cell_values = cells;
  1682. }
  1683. if (init_break_cell) {
  1684. process->init_break_cell = init_break_cell;
  1685. } else {
  1686. Scheme_Object *v;
  1687. v = scheme_make_thread_cell(scheme_false, 1);
  1688. process->init_break_cell = v;
  1689. }
  1690. if (!mgr)
  1691. mgr = (Scheme_Custodian *)scheme_get_param(config, MZCONFIG_CUSTODIAN);
  1692. #ifdef MZ_PRECISE_GC
  1693. GC_register_new_thread(process, mgr);
  1694. #endif
  1695. {
  1696. Scheme_Object *t_set;
  1697. t_set = scheme_get_param(config, MZCONFIG_THREAD_SET);
  1698. process->t_set_parent = (Scheme_Thread_Set *)t_set;
  1699. }
  1700. if (SAME_OBJ(process, scheme_first_thread)) {
  1701. REGISTER_SO(scheme_thread_set_top);
  1702. scheme_thread_set_top = process->t_set_parent;
  1703. scheme_thread_set_top->first = (Scheme_Object *)process;
  1704. scheme_thread_set_top->current = (Scheme_Object *)process;
  1705. } else
  1706. schedule_in_set((Scheme_Object *)process, process->t_set_parent);
  1707. scheme_init_jmpup_buf(&process->jmpup_buf);
  1708. process->running = MZTHREAD_RUNNING;
  1709. process->dw = NULL;
  1710. process->block_descriptor = NOT_BLOCKED;
  1711. process->block_check = NULL;
  1712. process->block_needs_wakeup = NULL;
  1713. process->sleep_end = 0;
  1714. process->current_local_env = NULL;
  1715. process->external_break = 0;
  1716. process->ran_some = 1;
  1717. process->list_stack = NULL;
  1718. scheme_gmp_tls_init(process->gmp_tls);
  1719. if (prefix) {
  1720. process->next = scheme_first_thread;
  1721. process->prev = NULL;
  1722. process->next->prev = process;
  1723. scheme_first_thread = process;
  1724. }
  1725. if (!buffer_init_size) /* => before place init */
  1726. buffer_init_size = INIT_TB_SIZE;
  1727. {
  1728. Scheme_Object **tb;
  1729. tb = MALLOC_N(Scheme_Object *, buffer_init_size);
  1730. process->tail_buffer = tb;
  1731. }
  1732. process->tail_buffer_size = buffer_init_size;
  1733. {
  1734. int init_stack_size;
  1735. Scheme_Object *iss;
  1736. iss = scheme_get_thread_param(config, cells, MZCONFIG_THREAD_INIT_STACK_SIZE);
  1737. if (SCHEME_INTP(iss))
  1738. init_stack_size = SCHEME_INT_VAL(iss);
  1739. else if (SCHEME_BIGNUMP(iss))
  1740. init_stack_size = 0x7FFFFFFF;
  1741. else
  1742. init_stack_size = DEFAULT_INIT_STACK_SIZE;
  1743. /* A too-large stack size won't help performance.
  1744. A too-small stack size is unsafe for certain kinds of
  1745. tail calls. */
  1746. if (init_stack_size > MAX_INIT_STACK_SIZE)
  1747. init_stack_size = MAX_INIT_STACK_SIZE;
  1748. if (init_stack_size < SCHEME_TAIL_COPY_THRESHOLD)
  1749. init_stack_size = SCHEME_TAIL_COPY_THRESHOLD;
  1750. process->runstack_size = init_stack_size;
  1751. {
  1752. Scheme_Object **sa;
  1753. sa = scheme_alloc_runstack(init_stack_size);
  1754. process->runstack_start = sa;
  1755. }
  1756. process->runstack = process->runstack_start + init_stack_size;
  1757. }
  1758. process->runstack_saved = NULL;
  1759. #ifdef RUNSTACK_IS_GLOBAL
  1760. if (!prefix) {
  1761. # ifndef MZ_PRECISE_GC
  1762. /* Precise GC: we intentionally don't register MZ_RUNSTACK. See done_with_GC() */
  1763. REGISTER_SO(MZ_RUNSTACK);
  1764. # endif
  1765. REGISTER_SO(MZ_RUNSTACK_START);
  1766. MZ_RUNSTACK = process->runstack;
  1767. MZ_RUNSTACK_START = process->runstack_start;
  1768. MZ_CONT_MARK_STACK = process->cont_mark_stack;
  1769. MZ_CONT_MARK_POS = process->cont_mark_pos;
  1770. }
  1771. #endif
  1772. process->on_kill = NULL;
  1773. process->user_tls = NULL;
  1774. process->user_tls_size = 0;
  1775. process->nester = process->nestee = NULL;
  1776. process->mbox_first = NULL;
  1777. process->mbox_last = NULL;
  1778. process->mbox_sema = NULL;
  1779. process->mref = NULL;
  1780. process->extra_mrefs = NULL;
  1781. /* A thread points to a lot of stuff, so it's bad to put a finalization
  1782. on it, which is what registering with a custodian does. Instead, we
  1783. register a weak indirection with the custodian. That way, the thread
  1784. (and anything it points to) can be collected one GC cycle earlier.
  1785. It's possible that the thread will be collected before the indirection
  1786. record, so when we use the indirection (e.g., in custodian traversals),
  1787. we'll need to check for NULL. */
  1788. {
  1789. Scheme_Thread_Custodian_Hop *hop;
  1790. Scheme_Custodian_Reference *mref;
  1791. hop = MALLOC_ONE_WEAK_RT(Scheme_Thread_Custodian_Hop);
  1792. process->mr_hop = hop;
  1793. hop->so.type = scheme_thread_hop_type;
  1794. {
  1795. Scheme_Thread *wp;
  1796. wp = (Scheme_Thread *)WEAKIFY((Scheme_Object *)process);
  1797. hop->p = wp;
  1798. }
  1799. mref = scheme_add_managed(mgr, (Scheme_Object *)hop, NULL, NULL, 0);
  1800. process->mref = mref;
  1801. process->extra_mrefs = scheme_null;
  1802. #ifndef MZ_PRECISE_GC
  1803. scheme_weak_reference((void **)(void *)&hop->p);
  1804. #endif
  1805. }
  1806. return process;
  1807. }
  1808. Scheme_Thread *scheme_make_thread(void *stack_base)
  1809. {
  1810. /* Makes the initial process. */
  1811. return make_thread(NULL, NULL, NULL, NULL, stack_base);
  1812. }
  1813. static void scheme_check_tail_buffer_size(Scheme_Thread *p)
  1814. {
  1815. if (p->tail_buffer_size < buffer_init_size) {
  1816. Scheme_Object **tb;
  1817. tb = MALLOC_N(Scheme_Object *, buffer_init_size);
  1818. p->tail_buffer = tb;
  1819. p->tail_buffer_size = buffer_init_size;
  1820. }
  1821. }
  1822. void scheme_set_tail_buffer_size(int s)
  1823. {
  1824. if (s > buffer_init_size) {
  1825. Scheme_Thread *p;
  1826. buffer_init_size = s;
  1827. for (p = scheme_first_thread; p; p = p->next) {
  1828. scheme_check_tail_buffer_size(p);
  1829. }
  1830. }
  1831. }
  1832. int scheme_tls_allocate()
  1833. {
  1834. return tls_pos++;
  1835. }
  1836. void scheme_tls_set(int pos, void *v)
  1837. {
  1838. Scheme_Thread *p = scheme_current_thread;
  1839. if (p->user_tls_size <= pos) {
  1840. int oldc = p->user_tls_size;
  1841. void **old_tls = p->user_tls, **va;
  1842. p->user_tls_size = tls_pos;
  1843. va = MALLOC_N(void*, tls_pos);
  1844. p->user_tls = va;
  1845. while (oldc--) {
  1846. p->user_tls[oldc] = old_tls[oldc];
  1847. }
  1848. }
  1849. p->user_tls[pos] = v;
  1850. }
  1851. void *scheme_tls_get(int pos)
  1852. {
  1853. Scheme_Thread *p = scheme_current_thread;
  1854. if (p->user_tls_size <= pos)
  1855. return NULL;
  1856. else
  1857. return p->user_tls[pos];
  1858. }
  1859. Scheme_Object **scheme_alloc_runstack(intptr_t len)
  1860. XFORM_SKIP_PROC
  1861. {
  1862. #ifdef MZ_PRECISE_GC
  1863. intptr_t sz;
  1864. void **p;
  1865. sz = sizeof(Scheme_Object*) * (len + 4);
  1866. p = (void **)GC_malloc_tagged_allow_interior(sz);
  1867. *(Scheme_Type *)(void *)p = scheme_rt_runstack;
  1868. ((intptr_t *)(void *)p)[1] = gcBYTES_TO_WORDS(sz);
  1869. ((intptr_t *)(void *)p)[2] = 0;
  1870. ((intptr_t *)(void *)p)[3] = len;
  1871. return (Scheme_Object **)(p + 4);
  1872. #else
  1873. return (Scheme_Object **)scheme_malloc_allow_interior(sizeof(Scheme_Object*) * len);
  1874. #endif
  1875. }
  1876. void scheme_set_runstack_limits(Scheme_Object **rs, intptr_t len, intptr_t start, intptr_t end)
  1877. XFORM_SKIP_PROC
  1878. /* With 3m, we can tell the GC not to scan the unused parts, and we
  1879. can have the fixup function zero out the unused parts; that avoids
  1880. writing and scanning pages that could be skipped for a minor
  1881. GC. For CGC, we have to just clear out the unused part. */
  1882. {
  1883. #ifdef MZ_PRECISE_GC
  1884. if (((intptr_t *)(void *)rs)[-2] != start)
  1885. ((intptr_t *)(void *)rs)[-2] = start;
  1886. if (((intptr_t *)(void *)rs)[-1] != end)
  1887. ((intptr_t *)(void *)rs)[-1] = end;
  1888. #else
  1889. memset(rs, 0, start * sizeof(Scheme_Object *));
  1890. memset(rs + end, 0, (len - end) * sizeof(Scheme_Object *));
  1891. #endif
  1892. }
  1893. void *scheme_register_process_global(const char *key, void *val)
  1894. {
  1895. void *old_val = NULL;
  1896. char *key2;
  1897. Proc_Global_Rec *pg;
  1898. intptr_t len;
  1899. #if defined(MZ_USE_MZRT)
  1900. if (process_global_lock)
  1901. mzrt_mutex_lock(process_global_lock);
  1902. #endif
  1903. for (pg = process_globals; pg; pg = pg->next) {
  1904. if (!strcmp(pg->key, key)) {
  1905. old_val = pg->val;
  1906. break;
  1907. }
  1908. }
  1909. if (!old_val && val) {
  1910. len = strlen(key);
  1911. key2 = (char *)malloc(len + 1);
  1912. memcpy(key2, key, len + 1);
  1913. pg = (Proc_Global_Rec *)malloc(sizeof(Proc_Global_Rec));
  1914. pg->key = key2;
  1915. pg->val = val;
  1916. pg->next = process_globals;
  1917. process_globals = pg;
  1918. }
  1919. #if defined(MZ_USE_MZRT)
  1920. if (process_global_lock)
  1921. mzrt_mutex_unlock(process_global_lock);
  1922. #endif
  1923. return old_val;
  1924. }
  1925. void scheme_init_process_globals(void)
  1926. {
  1927. #if defined(MZ_USE_MZRT)
  1928. mzrt_mutex_create(&process_global_lock);
  1929. #endif
  1930. }
  1931. Scheme_Hash_Table *scheme_get_place_table(void)
  1932. {
  1933. if (!place_local_misc_table)
  1934. place_local_misc_table = scheme_make_hash_table(SCHEME_hash_ptr);
  1935. return place_local_misc_table;
  1936. }
  1937. /*========================================================================*/
  1938. /* thread creation and swapping */
  1939. /*========================================================================*/
  1940. int scheme_in_main_thread(void)
  1941. {
  1942. return !scheme_current_thread->next;
  1943. }
  1944. static void stash_current_marks()
  1945. {
  1946. Scheme_Object *m;
  1947. m = scheme_current_continuation_marks(scheme_current_thread->returned_marks);
  1948. scheme_current_thread->returned_marks = m;
  1949. swap_target = scheme_current_thread->return_marks_to;
  1950. scheme_current_thread->return_marks_to = NULL;
  1951. }
  1952. static void do_swap_thread()
  1953. {
  1954. start:
  1955. scheme_zero_unneeded_rands(scheme_current_thread);
  1956. #if WATCH_FOR_NESTED_SWAPS
  1957. if (swapping)
  1958. printf("death\n");
  1959. swapping = 1;
  1960. #endif
  1961. if (!swap_no_setjmp && SETJMP(scheme_current_thread)) {
  1962. /* We're back! */
  1963. /* See also initial swap in in start_child() */
  1964. thread_swap_count++;
  1965. #ifdef RUNSTACK_IS_GLOBAL
  1966. MZ_RUNSTACK = scheme_current_thread->runstack;
  1967. MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
  1968. MZ_CONT_MARK_STACK = scheme_current_thread->cont_mark_stack;
  1969. MZ_CONT_MARK_POS = scheme_current_thread->cont_mark_pos;
  1970. #endif
  1971. RESETJMP(scheme_current_thread);
  1972. #if WATCH_FOR_NESTED_SWAPS
  1973. swapping = 0;
  1974. #endif
  1975. scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
  1976. scheme_current_thread->gmp_tls_data = NULL;
  1977. {
  1978. Scheme_Object *l, *o;
  1979. Scheme_Closure_Func f;
  1980. for (l = thread_swap_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
  1981. o = SCHEME_CAR(l);
  1982. f = SCHEME_CLOS_FUNC(o);
  1983. o = SCHEME_CLOS_DATA(o);
  1984. f(o);
  1985. }
  1986. }
  1987. if ((scheme_current_thread->runstack_owner
  1988. && ((*scheme_current_thread->runstack_owner) != scheme_current_thread))
  1989. || (scheme_current_thread->cont_mark_stack_owner
  1990. && ((*scheme_current_thread->cont_mark_stack_owner) != scheme_current_thread))) {
  1991. scheme_takeover_stacks(scheme_current_thread);
  1992. }
  1993. {
  1994. intptr_t cpm;
  1995. cpm = scheme_get_process_milliseconds();
  1996. scheme_current_thread->current_start_process_msec = cpm;
  1997. }
  1998. if (scheme_current_thread->return_marks_to) {
  1999. stash_current_marks();
  2000. goto start;
  2001. }
  2002. } else {
  2003. Scheme_Thread *new_thread = swap_target;
  2004. {
  2005. intptr_t cpm;
  2006. cpm = scheme_get_process_milliseconds();
  2007. scheme_current_thread->accum_process_msec += (cpm - scheme_current_thread->current_start_process_msec);
  2008. }
  2009. swap_target = NULL;
  2010. swap_no_setjmp = 0;
  2011. /* We're leaving... */
  2012. {
  2013. Scheme_Object *l, *o;
  2014. Scheme_Closure_Func f;
  2015. for (l = thread_swap_out_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
  2016. o = SCHEME_CAR(l);
  2017. f = SCHEME_CLOS_FUNC(o);
  2018. o = SCHEME_CLOS_DATA(o);
  2019. f(o);
  2020. }
  2021. }
  2022. if (scheme_current_thread->init_break_cell) {
  2023. int cb;
  2024. cb = can_break_param(scheme_current_thread);
  2025. scheme_current_thread->can_break_at_swap = cb;
  2026. }
  2027. {
  2028. GC_CAN_IGNORE void *data;
  2029. data = scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
  2030. scheme_current_thread->gmp_tls_data = data;
  2031. }
  2032. #ifdef RUNSTACK_IS_GLOBAL
  2033. scheme_current_thread->runstack = MZ_RUNSTACK;
  2034. scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
  2035. scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
  2036. scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
  2037. #endif
  2038. scheme_current_thread = new_thread;
  2039. /* Fixup current pointers in thread sets */
  2040. if (!scheme_current_thread->return_marks_to) {
  2041. Scheme_Thread_Set *t_set = new_thread->t_set_parent;
  2042. t_set->current = (Scheme_Object *)new_thread;
  2043. while (t_set->parent) {
  2044. t_set->parent->current = (Scheme_Object *)t_set;
  2045. t_set = t_set->parent;
  2046. }
  2047. }
  2048. LONGJMP(scheme_current_thread);
  2049. }
  2050. }
  2051. void scheme_swap_thread(Scheme_Thread *new_thread)
  2052. {
  2053. swap_target = new_thread;
  2054. new_thread = NULL;
  2055. do_swap_thread();
  2056. }
  2057. static void select_thread()
  2058. {
  2059. Scheme_Thread *new_thread;
  2060. Scheme_Object *o;
  2061. Scheme_Thread_Set *t_set;
  2062. /* Try to pick a next thread to avoid DOS attacks
  2063. through whatever kinds of things call select_thread() */
  2064. o = (Scheme_Object *)scheme_thread_set_top;
  2065. while (!SCHEME_THREADP(o)) {
  2066. t_set = (Scheme_Thread_Set *)o;
  2067. o = get_t_set_next(t_set->current);
  2068. if (!o)
  2069. o = t_set->first;
  2070. }
  2071. /* It's possible that o won't work out. So o is a suggestion for the
  2072. new thread, but the loop below will pick a definitely suitable
  2073. thread. */
  2074. new_thread = (Scheme_Thread *)o;
  2075. do {
  2076. if (!new_thread)
  2077. new_thread = scheme_first_thread;
  2078. /* Can't swap in a thread with a nestee: */
  2079. while (new_thread
  2080. && (new_thread->nestee
  2081. || (new_thread->running & MZTHREAD_SUSPENDED)
  2082. /* USER_SUSPENDED should only happen if new_thread is the main thread
  2083. or if the thread has MZTHREAD_NEED_SUSPEND_CLEANUP */
  2084. || ((new_thread->running & MZTHREAD_USER_SUSPENDED)
  2085. && !(new_thread->running & MZTHREAD_NEED_SUSPEND_CLEANUP)))) {
  2086. new_thread = new_thread->next;
  2087. }
  2088. if (!new_thread && !o) {
  2089. /* The main thread must be blocked on a nestee, and everything
  2090. else is suspended. But we have to go somewhere. Weakly
  2091. resume the main thread's innermost nestee. If it's
  2092. suspended by the user, then we've deadlocked. */
  2093. new_thread = scheme_main_thread;
  2094. while (new_thread->nestee) {
  2095. new_thread = new_thread->nestee;
  2096. }
  2097. if ((new_thread->running & MZTHREAD_USER_SUSPENDED)
  2098. && !(new_thread->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
  2099. if (post_system_idle()) {
  2100. /* Aha! Someone was waiting for us to do nothing. Try again... */
  2101. } else {
  2102. scheme_console_printf("unbreakable deadlock\n");
  2103. if (scheme_exit)
  2104. scheme_exit(1);
  2105. /* We really have to exit: */
  2106. exit(1);
  2107. }
  2108. } else {
  2109. scheme_weak_resume_thread(new_thread);
  2110. }
  2111. break;
  2112. }
  2113. o = NULL;
  2114. } while (!new_thread);
  2115. swap_target = new_thread;
  2116. new_thread = NULL;
  2117. o = NULL;
  2118. t_set = NULL;
  2119. do_swap_thread();
  2120. }
  2121. static void thread_is_dead(Scheme_Thread *r)
  2122. {
  2123. if (r->dead_box) {
  2124. Scheme_Object *o;
  2125. o = SCHEME_PTR_VAL(r->dead_box);
  2126. scheme_post_sema_all(o);
  2127. }
  2128. if (r->running_box) {
  2129. SCHEME_PTR_VAL(r->running_box) = NULL;
  2130. r->running_box = NULL;
  2131. }
  2132. r->suspended_box = NULL;
  2133. r->resumed_box = NULL;
  2134. r->list_stack = NULL;
  2135. r->dw = NULL;
  2136. r->init_config = NULL;
  2137. r->cell_values = NULL;
  2138. r->init_break_cell = NULL;
  2139. r->cont_mark_stack_segments = NULL;
  2140. r->overflow = NULL;
  2141. r->blocker = NULL;
  2142. r->transitive_resumes = NULL;
  2143. r->error_buf = NULL;
  2144. r->spare_runstack = NULL;
  2145. r->mbox_first = NULL;
  2146. r->mbox_last = NULL;
  2147. r->mbox_sema = NULL;
  2148. }
  2149. static void remove_thread(Scheme_Thread *r)
  2150. {
  2151. Scheme_Saved_Stack *saved;
  2152. Scheme_Object *l;
  2153. r->running = 0;
  2154. if (r->prev) {
  2155. r->prev->next = r->next;
  2156. r->next->prev = r->prev;
  2157. } else if (r->next) {
  2158. r->next->prev = NULL;
  2159. scheme_first_thread = r->next;
  2160. }
  2161. r->next = r->prev = NULL;
  2162. unschedule_in_set((Scheme_Object *)r, r->t_set_parent);
  2163. #ifdef RUNSTACK_IS_GLOBAL
  2164. if (r == scheme_current_thread) {
  2165. r->runstack = MZ_RUNSTACK;
  2166. MZ_RUNSTACK = NULL;
  2167. r->runstack_start = MZ_RUNSTACK_START;
  2168. MZ_RUNSTACK_START = NULL;
  2169. r->cont_mark_stack = MZ_CONT_MARK_STACK;
  2170. r->cont_mark_pos = MZ_CONT_MARK_POS;
  2171. }
  2172. #endif
  2173. if (r->runstack_owner) {
  2174. /* Drop ownership, if active, and clear the stack */
  2175. if (r == *(r->runstack_owner)) {
  2176. if (r->runstack_start) {
  2177. scheme_set_runstack_limits(r->runstack_start, r->runstack_size, 0, 0);
  2178. r->runstack_start = NULL;
  2179. }
  2180. for (saved = r->runstack_saved; saved; saved = saved->prev) {
  2181. scheme_set_runstack_limits(saved->runstack_start, saved->runstack_size, 0, 0);
  2182. }
  2183. r->runstack_saved = NULL;
  2184. *(r->runstack_owner) = NULL;
  2185. r->runstack_owner = NULL;
  2186. }
  2187. } else {
  2188. /* Only this thread used the runstack, so clear/free it
  2189. as aggressively as possible */
  2190. #if defined(SENORA_GC_NO_FREE) || defined(MZ_PRECISE_GC)
  2191. memset(r->runstack_start, 0, r->runstack_size * sizeof(Scheme_Object*));
  2192. #else
  2193. GC_free(r->runstack_start);
  2194. #endif
  2195. r->runstack_start = NULL;
  2196. for (saved = r->runstack_saved; saved; saved = saved->prev) {
  2197. #if defined(SENORA_GC_NO_FREE) || defined(MZ_PRECISE_GC)
  2198. memset(saved->runstack_start, 0, saved->runstack_size * sizeof(Scheme_Object*));
  2199. #else
  2200. GC_free(saved->runstack_start);
  2201. #endif
  2202. saved->runstack_start = NULL;
  2203. }
  2204. }
  2205. r->runstack = NULL;
  2206. r->runstack_swapped = NULL;
  2207. if (r->cont_mark_stack_owner
  2208. && ((*r->cont_mark_stack_owner) == r)) {
  2209. *r->cont_mark_stack_owner = NULL;
  2210. }
  2211. r->cont_mark_stack = 0;
  2212. r->cont_mark_stack_owner = NULL;
  2213. r->cont_mark_stack_swapped = NULL;
  2214. r->ku.apply.tail_rator = NULL;
  2215. r->ku.apply.tail_rands = NULL;
  2216. r->tail_buffer = NULL;
  2217. r->ku.multiple.array = NULL;
  2218. r->values_buffer = NULL;
  2219. #ifndef SENORA_GC_NO_FREE
  2220. if (r->list_stack)
  2221. GC_free(r->list_stack);
  2222. #endif
  2223. thread_is_dead(r);
  2224. /* In case we kill a thread while in a bignum operation: */
  2225. scheme_gmp_tls_restore_snapshot(r->gmp_tls, r->gmp_tls_data,
  2226. NULL, ((r == scheme_current_thread) ? 1 : 2));
  2227. if (r == scheme_current_thread) {
  2228. /* We're going to be swapped out immediately. */
  2229. swap_no_setjmp = 1;
  2230. } else
  2231. RESETJMP(r);
  2232. scheme_remove_managed(r->mref, (Scheme_Object *)r->mr_hop);
  2233. for (l = r->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
  2234. scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l), (Scheme_Object *)r->mr_hop);
  2235. }
  2236. r->extra_mrefs = scheme_null;
  2237. }
  2238. void scheme_end_current_thread(void)
  2239. {
  2240. remove_thread(scheme_current_thread);
  2241. thread_ended_with_activity = 1;
  2242. if (scheme_notify_multithread && !scheme_first_thread->next) {
  2243. scheme_notify_multithread(0);
  2244. have_activity = 0;
  2245. }
  2246. select_thread();
  2247. }
  2248. static void start_child(Scheme_Thread * volatile child,
  2249. Scheme_Object * volatile child_eval)
  2250. {
  2251. if (SETJMP(child)) {
  2252. /* Initial swap in: */
  2253. Scheme_Object * volatile result = NULL;
  2254. thread_swap_count++;
  2255. #ifdef RUNSTACK_IS_GLOBAL
  2256. MZ_RUNSTACK = scheme_current_thread->runstack;
  2257. MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
  2258. MZ_CONT_MARK_STACK = scheme_current_thread->cont_mark_stack;
  2259. MZ_CONT_MARK_POS = scheme_current_thread->cont_mark_pos;
  2260. #endif
  2261. scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
  2262. scheme_current_thread->gmp_tls_data = NULL;
  2263. {
  2264. Scheme_Object *l, *o;
  2265. Scheme_Closure_Func f;
  2266. for (l = thread_swap_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
  2267. o = SCHEME_CAR(l);
  2268. f = SCHEME_CLOS_FUNC(o);
  2269. o = SCHEME_CLOS_DATA(o);
  2270. f(o);
  2271. }
  2272. }
  2273. {
  2274. intptr_t cpm;
  2275. cpm = scheme_get_process_milliseconds();
  2276. scheme_current_thread->current_start_process_msec = cpm;
  2277. }
  2278. RESETJMP(child);
  2279. #if WATCH_FOR_NESTED_SWAPS
  2280. swapping = 0;
  2281. #endif
  2282. if (scheme_current_thread->running & MZTHREAD_KILLED) {
  2283. /* This thread is dead! Give up now. */
  2284. exit_or_escape(scheme_current_thread);
  2285. }
  2286. if (scheme_current_thread->return_marks_to) {
  2287. stash_current_marks();
  2288. do_swap_thread();
  2289. }
  2290. {
  2291. mz_jmp_buf newbuf;
  2292. scheme_current_thread->error_buf = &newbuf;
  2293. if (!scheme_setjmp(newbuf)) {
  2294. /* Run the main thunk: */
  2295. /* (checks for break before doing anything else) */
  2296. result = scheme_apply_thread_thunk(child_eval);
  2297. }
  2298. }
  2299. /* !! At this point, scheme_current_thread can turn out to be a
  2300. different thread, which invoked the original thread's
  2301. continuation. */
  2302. /* If we still have a meta continuation, then it means we
  2303. should be resuming at a prompt, not exiting. */
  2304. while (scheme_current_thread->meta_continuation) {
  2305. Scheme_Thread *p = scheme_current_thread;
  2306. Scheme_Overflow *oflow;
  2307. p->cjs.val = result;
  2308. if (!SAME_OBJ(p->meta_continuation->prompt_tag, scheme_default_prompt_tag)) {
  2309. scheme_signal_error("thread ended with meta continuation that isn't for the default prompt");
  2310. } else {
  2311. Scheme_Meta_Continuation *mc;
  2312. mc = p->meta_continuation;
  2313. oflow = mc->overflow;
  2314. p->meta_continuation = mc->next;
  2315. if (!oflow->eot) {
  2316. p->stack_start = oflow->stack_start;
  2317. p->decompose_mc = mc;
  2318. scheme_longjmpup(&oflow->jmp->cont);
  2319. }
  2320. }
  2321. }
  2322. scheme_end_current_thread();
  2323. /* Shouldn't get here! */
  2324. scheme_signal_error("bad thread switch");
  2325. }
  2326. }
  2327. static Scheme_Object *make_subprocess(Scheme_Object *child_thunk,
  2328. void *child_start,
  2329. Scheme_Config *config,
  2330. Scheme_Thread_Cell_Table *cells,
  2331. Scheme_Object *break_cell,
  2332. Scheme_Custodian *mgr,
  2333. int normal_kill)
  2334. {
  2335. Scheme_Thread *child;
  2336. int turn_on_multi;
  2337. turn_on_multi = !scheme_first_thread->next;
  2338. if (!config)
  2339. config = scheme_current_config();
  2340. if (!cells)
  2341. cells = scheme_inherit_cells(NULL);
  2342. if (!break_cell) {
  2343. break_cell = scheme_current_break_cell();
  2344. if (SAME_OBJ(break_cell, maybe_recycle_cell))
  2345. maybe_recycle_cell = NULL;
  2346. }
  2347. child = make_thread(config, cells, break_cell, mgr, child_start);
  2348. /* Use child_thunk name, if any, for the thread name: */
  2349. {
  2350. Scheme_Object *sym;
  2351. const char *s;
  2352. int len;
  2353. s = scheme_get_proc_name(child_thunk, &len, -1);
  2354. if (s) {
  2355. if (len < 0)
  2356. sym = (Scheme_Object *)s;
  2357. else
  2358. sym = scheme_intern_exact_symbol(s, len);
  2359. child->name = sym;
  2360. }
  2361. }
  2362. {
  2363. Scheme_Object *v;
  2364. v = scheme_thread_cell_get(break_cell, cells);
  2365. child->can_break_at_swap = SCHEME_TRUEP(v);
  2366. }
  2367. if (!normal_kill)
  2368. child->suspend_to_kill = 1;
  2369. child->stack_start = child_start;
  2370. /* Sets the child's jmpbuf for swapping in later: */
  2371. start_child(child, child_thunk);
  2372. if (scheme_notify_multithread && turn_on_multi) {
  2373. scheme_notify_multithread(1);
  2374. have_activity = 1;
  2375. }
  2376. SCHEME_USE_FUEL(1000);
  2377. return (Scheme_Object *)child;
  2378. }
  2379. Scheme_Object *scheme_thread(Scheme_Object *thunk)
  2380. {
  2381. return scheme_thread_w_details(thunk, NULL, NULL, NULL, NULL, 0);
  2382. }
  2383. static Scheme_Object *sch_thread(int argc, Scheme_Object *args[])
  2384. {
  2385. scheme_check_proc_arity("thread", 0, 0, argc, args);
  2386. scheme_custodian_check_available(NULL, "thread", "thread");
  2387. return scheme_thread(args[0]);
  2388. }
  2389. static Scheme_Object *sch_thread_nokill(int argc, Scheme_Object *args[])
  2390. {
  2391. scheme_check_proc_arity("thread/suspend-to-kill", 0, 0, argc, args);
  2392. scheme_custodian_check_available(NULL, "thread/suspend-to-kill", "thread");
  2393. return scheme_thread_w_details(args[0], NULL, NULL, NULL, NULL, 1);
  2394. }
  2395. static Scheme_Object *sch_current(int argc, Scheme_Object *args[])
  2396. {
  2397. return (Scheme_Object *)scheme_current_thread;
  2398. }
  2399. static Scheme_Object *thread_p(int argc, Scheme_Object *args[])
  2400. {
  2401. return SCHEME_THREADP(args[0]) ? scheme_true : scheme_false;
  2402. }
  2403. static Scheme_Object *thread_running_p(int argc, Scheme_Object *args[])
  2404. {
  2405. int running;
  2406. if (!SCHEME_THREADP(args[0]))
  2407. scheme_wrong_type("thread-running?", "thread", 0, argc, args);
  2408. running = ((Scheme_Thread *)args[0])->running;
  2409. return ((MZTHREAD_STILL_RUNNING(running) && !(running & MZTHREAD_USER_SUSPENDED))
  2410. ? scheme_true
  2411. : scheme_false);
  2412. }
  2413. static Scheme_Object *thread_dead_p(int argc, Scheme_Object *args[])
  2414. {
  2415. int running;
  2416. if (!SCHEME_THREADP(args[0]))
  2417. scheme_wrong_type("thread-running?", "thread", 0, argc, args);
  2418. running = ((Scheme_Thread *)args[0])->running;
  2419. return MZTHREAD_STILL_RUNNING(running) ? scheme_false : scheme_true;
  2420. }
  2421. static int thread_wait_done(Scheme_Object *p, Scheme_Schedule_Info *sinfo)
  2422. {
  2423. int running = ((Scheme_Thread *)p)->running;
  2424. if (MZTHREAD_STILL_RUNNING(running)) {
  2425. /* Replace the direct thread reference with an event, so that
  2426. the blocking thread can be dequeued: */
  2427. Scheme_Object *evt;
  2428. evt = scheme_get_thread_dead((Scheme_Thread *)p);
  2429. scheme_set_sync_target(sinfo, evt, p, NULL, 0, 0, NULL);
  2430. return 0;
  2431. } else
  2432. return 1;
  2433. }
  2434. static Scheme_Object *thread_wait(int argc, Scheme_Object *args[])
  2435. {
  2436. Scheme_Thread *p;
  2437. if (!SCHEME_THREADP(args[0]))
  2438. scheme_wrong_type("thread-wait", "thread", 0, argc, args);
  2439. p = (Scheme_Thread *)args[0];
  2440. if (MZTHREAD_STILL_RUNNING(p->running)) {
  2441. sch_sync(1, args);
  2442. }
  2443. return scheme_void;
  2444. }
  2445. static void register_thread_sync()
  2446. {
  2447. scheme_add_evt(scheme_thread_type,
  2448. (Scheme_Ready_Fun)thread_wait_done,
  2449. NULL, NULL, 0);
  2450. }
  2451. void scheme_add_swap_callback(Scheme_Closure_Func f, Scheme_Object *data)
  2452. {
  2453. Scheme_Object *p;
  2454. p = scheme_make_raw_pair((Scheme_Object *)f, data);
  2455. thread_swap_callbacks = scheme_make_pair(p, thread_swap_callbacks);
  2456. }
  2457. void scheme_add_swap_out_callback(Scheme_Closure_Func f, Scheme_Object *data)
  2458. {
  2459. Scheme_Object *p;
  2460. p = scheme_make_raw_pair((Scheme_Object *)f, data);
  2461. thread_swap_out_callbacks = scheme_make_pair(p, thread_swap_out_callbacks);
  2462. }
  2463. /**************************************************************************/
  2464. /* Ensure that a new thread has a reasonable starting stack */
  2465. #ifdef DO_STACK_CHECK
  2466. # define THREAD_STACK_SPACE (STACK_SAFETY_MARGIN / 2)
  2467. void scheme_check_stack_ok(char *s); /* prototype, needed for PalmOS */
  2468. void scheme_check_stack_ok(char *s) {
  2469. # include "mzstkchk.h"
  2470. {
  2471. s[THREAD_STACK_SPACE] = 1;
  2472. } else {
  2473. s[THREAD_STACK_SPACE] = 0;
  2474. }
  2475. }
  2476. static int is_stack_too_shallow2(void)
  2477. {
  2478. char s[THREAD_STACK_SPACE+1];
  2479. scheme_check_stack_ok(s);
  2480. return s[THREAD_STACK_SPACE];
  2481. }
  2482. int scheme_is_stack_too_shallow(void)
  2483. /* Make sure this function insn't inlined, mainly because
  2484. is_stack_too_shallow2() can get inlined, and it adds a lot
  2485. to the stack. */
  2486. {
  2487. # include "mzstkchk.h"
  2488. {
  2489. return 1;
  2490. }
  2491. return is_stack_too_shallow2();
  2492. }
  2493. static Scheme_Object *thread_k(void)
  2494. {
  2495. Scheme_Thread *p = scheme_current_thread;
  2496. Scheme_Object *thunk, *result, *break_cell;
  2497. Scheme_Config *config;
  2498. Scheme_Custodian *mgr;
  2499. Scheme_Thread_Cell_Table *cells;
  2500. int suspend_to_kill = p->ku.k.i1;
  2501. thunk = (Scheme_Object *)p->ku.k.p1;
  2502. config = (Scheme_Config *)p->ku.k.p2;
  2503. mgr = (Scheme_Custodian *)p->ku.k.p3;
  2504. cells = (Scheme_Thread_Cell_Table *)SCHEME_CAR((Scheme_Object *)p->ku.k.p4);
  2505. break_cell = SCHEME_CDR((Scheme_Object *)p->ku.k.p4);
  2506. p->ku.k.p1 = NULL;
  2507. p->ku.k.p2 = NULL;
  2508. p->ku.k.p3 = NULL;
  2509. p->ku.k.p4 = NULL;
  2510. result = make_subprocess(thunk, PROMPT_STACK(result),
  2511. config, cells, break_cell, mgr, !suspend_to_kill);
  2512. /* Don't get rid of `result'; it keeps the
  2513. Precise GC xformer from "optimizing" away
  2514. the __gc_var_stack__ frame. */
  2515. return result;
  2516. }
  2517. #endif /* DO_STACK_CHECK */
  2518. Scheme_Object *scheme_thread_w_details(Scheme_Object *thunk,
  2519. Scheme_Config *config,
  2520. Scheme_Thread_Cell_Table *cells,
  2521. Scheme_Object *break_cell,
  2522. Scheme_Custodian *mgr,
  2523. int suspend_to_kill)
  2524. {
  2525. Scheme_Object *result;
  2526. void *stack_marker;
  2527. #ifdef DO_STACK_CHECK
  2528. /* Make sure the thread starts out with a reasonable stack size, so
  2529. it doesn't thrash right away: */
  2530. if (scheme_is_stack_too_shallow()) {
  2531. Scheme_Thread *p = scheme_current_thread;
  2532. p->ku.k.p1 = thunk;
  2533. p->ku.k.p2 = config;
  2534. p->ku.k.p3 = mgr;
  2535. result = scheme_make_pair((Scheme_Object *)cells, break_cell);
  2536. p->ku.k.p4 = result;
  2537. p->ku.k.i1 = suspend_to_kill;
  2538. return scheme_handle_stack_overflow(thread_k);
  2539. }
  2540. #endif
  2541. result = make_subprocess(thunk, PROMPT_STACK(stack_marker),
  2542. config, cells, break_cell, mgr, !suspend_to_kill);
  2543. /* Don't get rid of `result'; it keeps the
  2544. Precise GC xformer from "optimizing" away
  2545. the __gc_var_stack__ frame. */
  2546. return result;
  2547. }
  2548. /**************************************************************************/
  2549. /* Nested threads */
  2550. static Scheme_Object *def_nested_exn_handler(int argc, Scheme_Object *argv[])
  2551. {
  2552. if (scheme_current_thread->nester) {
  2553. Scheme_Thread *p = scheme_current_thread;
  2554. p->cjs.jumping_to_continuation = (Scheme_Object *)scheme_current_thread;
  2555. p->cjs.alt_full_continuation = NULL;
  2556. p->cjs.val = argv[0];
  2557. p->cjs.is_kill = 0;
  2558. p->cjs.skip_dws = 0;
  2559. scheme_longjmp(*p->error_buf, 1);
  2560. }
  2561. return scheme_void; /* misuse of exception handler (wrong kind of thread or under prompt) */
  2562. }
  2563. MZ_DO_NOT_INLINE(Scheme_Object *scheme_call_as_nested_thread(int argc, Scheme_Object *argv[], void *max_bottom));
  2564. Scheme_Object *scheme_call_as_nested_thread(int argc, Scheme_Object *argv[], void *max_bottom)
  2565. {
  2566. Scheme_Thread *p = scheme_current_thread;
  2567. Scheme_Thread * volatile np;
  2568. Scheme_Custodian *mgr;
  2569. Scheme_Object * volatile v;
  2570. mz_jmp_buf newbuf;
  2571. volatile int failure;
  2572. scheme_check_proc_arity("call-in-nested-thread", 0, 0, argc, argv);
  2573. if (argc > 1) {
  2574. if (SCHEME_CUSTODIANP(argv[1]))
  2575. mgr = (Scheme_Custodian *)argv[1];
  2576. else {
  2577. scheme_wrong_type("call-in-nested-thread", "custodian", 1, argc, argv);
  2578. return NULL;
  2579. }
  2580. } else
  2581. mgr = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
  2582. scheme_custodian_check_available(mgr, "call-in-nested-thread", "thread");
  2583. SCHEME_USE_FUEL(25);
  2584. scheme_wait_until_suspend_ok();
  2585. np = MALLOC_ONE_TAGGED(Scheme_Thread);
  2586. np->so.type = scheme_thread_type;
  2587. #ifdef MZ_PRECISE_GC
  2588. GC_register_new_thread(np, mgr);
  2589. #endif
  2590. np->running = MZTHREAD_RUNNING;
  2591. np->ran_some = 1;
  2592. #ifdef RUNSTACK_IS_GLOBAL
  2593. p->runstack = MZ_RUNSTACK;
  2594. p->runstack_start = MZ_RUNSTACK_START;
  2595. p->cont_mark_stack = MZ_CONT_MARK_STACK;
  2596. p->cont_mark_pos = MZ_CONT_MARK_POS;
  2597. #endif
  2598. /* zero out anything we need now, because nestee disables
  2599. GC cleaning for this thread: */
  2600. scheme_prepare_this_thread_for_GC(p);
  2601. if (!p->runstack_owner) {
  2602. Scheme_Thread **owner;
  2603. owner = MALLOC_N(Scheme_Thread *, 1);
  2604. p->runstack_owner = owner;
  2605. *owner = p;
  2606. }
  2607. np->runstack = p->runstack;
  2608. np->runstack_start = p->runstack_start;
  2609. np->runstack_size = p->runstack_size;
  2610. np->runstack_saved = p->runstack_saved;
  2611. np->runstack_owner = p->runstack_owner;
  2612. *np->runstack_owner = np;
  2613. np->stack_start = p->stack_start;
  2614. np->engine_weight = p->engine_weight;
  2615. {
  2616. Scheme_Object **tb;
  2617. tb = MALLOC_N(Scheme_Object *, p->tail_buffer_size);
  2618. np->tail_buffer = tb;
  2619. }
  2620. np->tail_buffer_size = p->tail_buffer_size;
  2621. np->list_stack = p->list_stack;
  2622. np->list_stack_pos = p->list_stack_pos;
  2623. scheme_gmp_tls_init(np->gmp_tls);
  2624. /* np->prev = NULL; - 0ed by allocation */
  2625. np->next = scheme_first_thread;
  2626. scheme_first_thread->prev = np;
  2627. scheme_first_thread = np;
  2628. np->t_set_parent = p->t_set_parent;
  2629. schedule_in_set((Scheme_Object *)np, np->t_set_parent);
  2630. {
  2631. Scheme_Thread_Cell_Table *cells;
  2632. cells = scheme_inherit_cells(p->cell_values);
  2633. np->cell_values = cells;
  2634. }
  2635. {
  2636. Scheme_Config *config;
  2637. config = scheme_current_config();
  2638. np->init_config = config;
  2639. }
  2640. {
  2641. int cb;
  2642. Scheme_Object *bc;
  2643. cb = scheme_can_break(p);
  2644. p->can_break_at_swap = cb;
  2645. bc = scheme_current_break_cell();
  2646. np->init_break_cell = bc;
  2647. if (SAME_OBJ(bc, maybe_recycle_cell))
  2648. maybe_recycle_cell = NULL;
  2649. }
  2650. np->cont_mark_pos = (MZ_MARK_POS_TYPE)1;
  2651. /* others 0ed already by allocation */
  2652. check_ready_break();
  2653. np->nester = p;
  2654. p->nestee = np;
  2655. np->external_break = p->external_break;
  2656. p->external_break = 0;
  2657. {
  2658. Scheme_Thread_Custodian_Hop *hop;
  2659. Scheme_Custodian_Reference *mref;
  2660. hop = MALLOC_ONE_WEAK_RT(Scheme_Thread_Custodian_Hop);
  2661. np->mr_hop = hop;
  2662. hop->so.type = scheme_thread_hop_type;
  2663. {
  2664. Scheme_Thread *wp;
  2665. wp = (Scheme_Thread *)WEAKIFY((Scheme_Object *)np);
  2666. hop->p = wp;
  2667. }
  2668. mref = scheme_add_managed(mgr, (Scheme_Object *)hop, NULL, NULL, 0);
  2669. np->mref = mref;
  2670. np->extra_mrefs = scheme_null;
  2671. #ifndef MZ_PRECISE_GC
  2672. scheme_weak_reference((void **)(void *)&hop->p);
  2673. #endif
  2674. }
  2675. #ifdef RUNSTACK_IS_GLOBAL
  2676. MZ_CONT_MARK_STACK = np->cont_mark_stack;
  2677. MZ_CONT_MARK_POS = np->cont_mark_pos;
  2678. #endif
  2679. scheme_current_thread = np;
  2680. if (p != scheme_main_thread)
  2681. scheme_weak_suspend_thread(p);
  2682. if (!the_nested_exn_handler) {
  2683. REGISTER_SO(the_nested_exn_handler);
  2684. the_nested_exn_handler = scheme_make_prim_w_arity(def_nested_exn_handler,
  2685. "nested-thread-exception-handler",
  2686. 1, 1);
  2687. }
  2688. scheme_set_cont_mark(scheme_exn_handler_key, the_nested_exn_handler);
  2689. /* Call thunk, catch escape: */
  2690. np->error_buf = &newbuf;
  2691. if (scheme_setjmp(newbuf)) {
  2692. if (!np->cjs.is_kill)
  2693. v = np->cjs.val;
  2694. else
  2695. v = NULL;
  2696. failure = 1;
  2697. } else {
  2698. v = scheme_apply(argv[0], 0, NULL);
  2699. failure = 0;
  2700. }
  2701. scheme_remove_managed(np->mref, (Scheme_Object *)np->mr_hop);
  2702. {
  2703. Scheme_Object *l;
  2704. for (l = np->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
  2705. scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l),
  2706. (Scheme_Object *)np->mr_hop);
  2707. }
  2708. }
  2709. np->extra_mrefs = scheme_null;
  2710. #ifdef MZ_PRECISE_GC
  2711. WEAKIFIED(np->mr_hop->p) = NULL;
  2712. #else
  2713. scheme_unweak_reference((void **)(void *)&np->mr_hop->p);
  2714. #endif
  2715. scheme_remove_all_finalization(np->mr_hop);
  2716. if (np->prev)
  2717. np->prev->next = np->next;
  2718. else
  2719. scheme_first_thread = np->next;
  2720. np->next->prev = np->prev;
  2721. np->next = NULL;
  2722. np->prev = NULL;
  2723. unschedule_in_set((Scheme_Object *)np, np->t_set_parent);
  2724. np->running = 0;
  2725. *p->runstack_owner = p;
  2726. p->external_break = np->external_break;
  2727. p->nestee = NULL;
  2728. np->nester = NULL;
  2729. thread_is_dead(np);
  2730. scheme_current_thread = p;
  2731. if (p != scheme_main_thread)
  2732. scheme_weak_resume_thread(p);
  2733. #ifdef RUNSTACK_IS_GLOBAL
  2734. MZ_CONT_MARK_STACK = p->cont_mark_stack;
  2735. MZ_CONT_MARK_POS = p->cont_mark_pos;
  2736. #endif
  2737. if ((p->running & MZTHREAD_KILLED)
  2738. || (p->running & MZTHREAD_USER_SUSPENDED))
  2739. scheme_thread_block(0.0);
  2740. if (failure) {
  2741. if (!v)
  2742. scheme_raise_exn(MZEXN_FAIL,
  2743. "call-in-nested-thread: the thread was killed, or it exited via the default error escape handler");
  2744. else
  2745. scheme_raise(v);
  2746. }
  2747. /* May have just moved a break to a breakable thread: */
  2748. /* Check for external break again after swap or sleep */
  2749. scheme_check_break_now();
  2750. return v;
  2751. }
  2752. static Scheme_Object *call_as_nested_thread(int argc, Scheme_Object *argv[])
  2753. {
  2754. Scheme_Object *result;
  2755. result = scheme_call_as_nested_thread(argc, argv, PROMPT_STACK(result));
  2756. return result;
  2757. }
  2758. /*========================================================================*/
  2759. /* thread scheduling and termination */
  2760. /*========================================================================*/
  2761. static int check_sleep(int need_activity, int sleep_now)
  2762. /* Signals should be suspended */
  2763. {
  2764. Scheme_Thread *p, *p2;
  2765. int end_with_act;
  2766. #if defined(USING_FDS)
  2767. DECL_FDSET(set, 3);
  2768. fd_set *set1, *set2;
  2769. #endif
  2770. void *fds;
  2771. if (scheme_no_stack_overflow)
  2772. return 0;
  2773. /* Is everything blocked? */
  2774. if (!do_atomic) {
  2775. p = scheme_first_thread;
  2776. while (p) {
  2777. if (!p->nestee
  2778. && (p->ran_some || p->block_descriptor == NOT_BLOCKED)
  2779. && (p->next || !(p->running & MZTHREAD_USER_SUSPENDED)))
  2780. break;
  2781. p = p->next;
  2782. }
  2783. } else
  2784. p = NULL;
  2785. p2 = scheme_first_thread;
  2786. while (p2) {
  2787. if (p2->ran_some) {
  2788. scheme_notify_sleep_progress();
  2789. p2->ran_some = 0;
  2790. }
  2791. p2 = p2->next;
  2792. }
  2793. end_with_act = thread_ended_with_activity;
  2794. thread_ended_with_activity = 0;
  2795. if (need_activity
  2796. && !end_with_act
  2797. && (do_atomic
  2798. || (!p && ((!sleep_now && scheme_wakeup_on_input)
  2799. || (sleep_now && (scheme_sleep || scheme_place_sleep)))))) {
  2800. double max_sleep_time = 0;
  2801. /* Poll from top-level process, and all subprocesses are blocked. */
  2802. /* So, everything is blocked pending external input. */
  2803. /* Build a list of file descriptors that we're waiting on */
  2804. /* and turn off polling. */
  2805. if (have_activity)
  2806. scheme_active_but_sleeping = 1;
  2807. if (have_activity && scheme_notify_multithread)
  2808. scheme_notify_multithread(0);
  2809. #if defined(USING_FDS)
  2810. INIT_DECL_FDSET(set, set1, set2);
  2811. set1 = (fd_set *) MZ_GET_FDSET(set, 1);
  2812. set2 = (fd_set *) MZ_GET_FDSET(set, 2);
  2813. fds = (void *)set;
  2814. MZ_FD_ZERO(set);
  2815. MZ_FD_ZERO(set1);
  2816. MZ_FD_ZERO(set2);
  2817. #else
  2818. fds = NULL;
  2819. #endif
  2820. needs_sleep_cancelled = 0;
  2821. p = scheme_first_thread;
  2822. while (p) {
  2823. int merge_time = 0;
  2824. double p_time;
  2825. if (p->nestee) {
  2826. /* nothing */
  2827. } else if (p->block_descriptor == GENERIC_BLOCKED) {
  2828. needs_sleep_time_end = -1.0;
  2829. if (p->block_needs_wakeup) {
  2830. Scheme_Needs_Wakeup_Fun f = p->block_needs_wakeup;
  2831. f(p->blocker, fds);
  2832. }
  2833. p_time = p->sleep_end;
  2834. merge_time = (p_time > 0.0);
  2835. if (needs_sleep_time_end > 0.0) {
  2836. if (!merge_time || (needs_sleep_time_end < p_time)) {
  2837. p_time = needs_sleep_time_end;
  2838. merge_time = 1;
  2839. }
  2840. }
  2841. } else if (p->block_descriptor == SLEEP_BLOCKED) {
  2842. merge_time = 1;
  2843. p_time = p->sleep_end;
  2844. }
  2845. if (merge_time) {
  2846. double d;
  2847. double t;
  2848. d = (p_time - scheme_get_inexact_milliseconds());
  2849. t = (d / 1000);
  2850. if (t <= 0) {
  2851. t = (float)0.00001;
  2852. needs_sleep_cancelled = 1;
  2853. }
  2854. if (!max_sleep_time || (t < max_sleep_time))
  2855. max_sleep_time = t;
  2856. }
  2857. p = p->next;
  2858. }
  2859. if (needs_sleep_cancelled)
  2860. return 0;
  2861. if (post_system_idle()) {
  2862. return 0;
  2863. }
  2864. if (sleep_now) {
  2865. float mst = (float)max_sleep_time;
  2866. /* Make sure that mst didn't go to infinity: */
  2867. if ((double)mst > (2 * max_sleep_time)) {
  2868. mst = 100000000.0;
  2869. }
  2870. {
  2871. Scheme_Sleep_Proc slp;
  2872. if (scheme_place_sleep)
  2873. slp = scheme_place_sleep;
  2874. else
  2875. slp = scheme_sleep;
  2876. slp(mst, fds);
  2877. }
  2878. } else if (scheme_wakeup_on_input)
  2879. scheme_wakeup_on_input(fds);
  2880. return 1;
  2881. }
  2882. return 0;
  2883. }
  2884. void scheme_set_wakeup_time(void *fds, double end_time)
  2885. {
  2886. /* should be called only during a needs_wakeup callback */
  2887. needs_sleep_time_end = end_time;
  2888. }
  2889. void scheme_set_place_sleep(Scheme_Sleep_Proc slp)
  2890. {
  2891. scheme_place_sleep = slp;
  2892. }
  2893. static int post_system_idle()
  2894. {
  2895. return scheme_try_channel_get(scheme_system_idle_channel);
  2896. }
  2897. void scheme_cancel_sleep()
  2898. {
  2899. needs_sleep_cancelled = 1;
  2900. }
  2901. void scheme_check_threads(void)
  2902. /* Signals should be suspended. */
  2903. {
  2904. scheme_current_thread->suspend_break++;
  2905. scheme_thread_block((float)0);
  2906. --scheme_current_thread->suspend_break;
  2907. check_sleep(have_activity, 0);
  2908. }
  2909. void scheme_wake_up(void)
  2910. {
  2911. scheme_active_but_sleeping = 0;
  2912. if (have_activity && scheme_notify_multithread)
  2913. scheme_notify_multithread(1);
  2914. }
  2915. void scheme_out_of_fuel(void)
  2916. {
  2917. if (scheme_defining_primitives) return;
  2918. scheme_thread_block((float)0);
  2919. scheme_current_thread->ran_some = 1;
  2920. }
  2921. static void init_schedule_info(Scheme_Schedule_Info *sinfo, Scheme_Thread *false_pos_ok,
  2922. double sleep_end)
  2923. {
  2924. sinfo->false_positive_ok = false_pos_ok;
  2925. sinfo->potentially_false_positive = 0;
  2926. sinfo->current_syncing = NULL;
  2927. sinfo->spin = 0;
  2928. sinfo->is_poll = 0;
  2929. sinfo->sleep_end = sleep_end;
  2930. }
  2931. Scheme_Object *scheme_current_break_cell()
  2932. {
  2933. return scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
  2934. }
  2935. static int can_break_param(Scheme_Thread *p)
  2936. {
  2937. if (p == scheme_current_thread) {
  2938. Scheme_Object *v;
  2939. v = scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
  2940. v = scheme_thread_cell_get(v, p->cell_values);
  2941. return SCHEME_TRUEP(v);
  2942. } else
  2943. return p->can_break_at_swap;
  2944. }
  2945. int scheme_can_break(Scheme_Thread *p)
  2946. {
  2947. if (!p->suspend_break && !all_breaks_disabled && !scheme_no_stack_overflow) {
  2948. return can_break_param(p);
  2949. } else
  2950. return 0;
  2951. }
  2952. void scheme_set_can_break(int on)
  2953. {
  2954. Scheme_Object *v;
  2955. v = scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
  2956. scheme_thread_cell_set(v, scheme_current_thread->cell_values,
  2957. (on ? scheme_true : scheme_false));
  2958. if (SAME_OBJ(v, maybe_recycle_cell))
  2959. maybe_recycle_cell = NULL;
  2960. }
  2961. void scheme_check_break_now(void)
  2962. {
  2963. Scheme_Thread *p = scheme_current_thread;
  2964. check_ready_break();
  2965. if (p->external_break && scheme_can_break(p)) {
  2966. scheme_thread_block_w_thread(0.0, p);
  2967. p->ran_some = 1;
  2968. }
  2969. }
  2970. static Scheme_Object *check_break_now(int argc, Scheme_Object *args[])
  2971. {
  2972. scheme_check_break_now();
  2973. return scheme_void;
  2974. }
  2975. void scheme_push_break_enable(Scheme_Cont_Frame_Data *cframe, int on, int post_check)
  2976. {
  2977. Scheme_Object *v = NULL;
  2978. if (recycle_cell) {
  2979. if (!SCHEME_TRUEP(((Thread_Cell *)recycle_cell)->def_val) == !on) {
  2980. v = recycle_cell;
  2981. recycle_cell = NULL;
  2982. }
  2983. }
  2984. if (!v)
  2985. v = scheme_make_thread_cell(on ? scheme_true : scheme_false, 1);
  2986. scheme_push_continuation_frame(cframe);
  2987. scheme_set_cont_mark(scheme_break_enabled_key, v);
  2988. if (post_check)
  2989. scheme_check_break_now();
  2990. cframe->cache = v;
  2991. maybe_recycle_cell = v;
  2992. recycle_cc_count = scheme_cont_capture_count;
  2993. }
  2994. void scheme_pop_break_enable(Scheme_Cont_Frame_Data *cframe, int post_check)
  2995. {
  2996. scheme_pop_continuation_frame(cframe);
  2997. if (post_check)
  2998. scheme_check_break_now();
  2999. if (cframe->cache == maybe_recycle_cell) {
  3000. if (recycle_cc_count == scheme_cont_capture_count) {
  3001. recycle_cell = maybe_recycle_cell;
  3002. }
  3003. maybe_recycle_cell = NULL;
  3004. }
  3005. }
  3006. static Scheme_Object *raise_user_break(int argc, Scheme_Object ** volatile argv)
  3007. {
  3008. /* The main action here is buried in code to free temporary bignum
  3009. space on escapes. Aside from a thread kill, this is the only
  3010. place where we have to worry about freeing bignum space, because
  3011. kill and escape are the only possible actions within a bignum
  3012. calculaion. It is possible to have nested bignum calculations,
  3013. though (if the break handler performs bignum arithmetic), so
  3014. that's why we save and restore an old snapshot. */
  3015. mz_jmp_buf *savebuf, newbuf;
  3016. intptr_t save[4];
  3017. savebuf = scheme_current_thread->error_buf;
  3018. scheme_current_thread->error_buf = &newbuf;
  3019. scheme_gmp_tls_snapshot(scheme_current_thread->gmp_tls, save);
  3020. if (!scheme_setjmp(newbuf)) {
  3021. /* >>>> This is the main action <<<< */
  3022. scheme_raise_exn(MZEXN_BREAK, argv[0], "user break");
  3023. /* will definitely escape (or thread will die) */
  3024. } else {
  3025. /* As expected, we're escaping. Unless we're continuing, then
  3026. reset temporary bignum memory. */
  3027. int cont;
  3028. cont = SAME_OBJ((Scheme_Object *)scheme_jumping_to_continuation,
  3029. argv[0]);
  3030. scheme_gmp_tls_restore_snapshot(scheme_current_thread->gmp_tls, NULL, save, !cont);
  3031. scheme_longjmp(*savebuf, 1);
  3032. }
  3033. /* Can't get here */
  3034. return NULL;
  3035. }
  3036. static void raise_break(Scheme_Thread *p)
  3037. {
  3038. int block_descriptor;
  3039. Scheme_Object *blocker; /* semaphore or port */
  3040. Scheme_Ready_Fun block_check;
  3041. Scheme_Needs_Wakeup_Fun block_needs_wakeup;
  3042. Scheme_Object *a[1];
  3043. Scheme_Cont_Frame_Data cframe;
  3044. p->external_break = 0;
  3045. if (p->blocker && (p->block_check == (Scheme_Ready_Fun)syncing_ready)) {
  3046. /* Get out of lines for channels, etc., before calling a break exn handler. */
  3047. scheme_post_syncing_nacks((Syncing *)p->blocker);
  3048. }
  3049. block_descriptor = p->block_descriptor;
  3050. blocker = p->blocker;
  3051. block_check = p->block_check;
  3052. block_needs_wakeup = p->block_needs_wakeup;
  3053. p->block_descriptor = NOT_BLOCKED;
  3054. p->blocker = NULL;
  3055. p->block_check = NULL;
  3056. p->block_needs_wakeup = NULL;
  3057. p->ran_some = 1;
  3058. a[0] = scheme_make_prim((Scheme_Prim *)raise_user_break);
  3059. /* Continuation frame ensures that this doesn't
  3060. look like it's in tail position with respect to
  3061. an existing escape continuation */
  3062. scheme_push_continuation_frame(&cframe);
  3063. scheme_call_ec(1, a);
  3064. scheme_pop_continuation_frame(&cframe);
  3065. /* Continue from break... */
  3066. p->block_descriptor = block_descriptor;
  3067. p->blocker = blocker;
  3068. p->block_check = block_check;
  3069. p->block_needs_wakeup = block_needs_wakeup;
  3070. }
  3071. static void escape_to_kill(Scheme_Thread *p)
  3072. {
  3073. p->cjs.jumping_to_continuation = (Scheme_Object *)p;
  3074. p->cjs.alt_full_continuation = NULL;
  3075. p->cjs.is_kill = 1;
  3076. p->cjs.skip_dws = 0;
  3077. scheme_longjmp(*p->error_buf, 1);
  3078. }
  3079. static void exit_or_escape(Scheme_Thread *p)
  3080. {
  3081. /* Maybe this killed thread is nested: */
  3082. if (p->nester) {
  3083. if (p->running & MZTHREAD_KILLED)
  3084. p->running -= MZTHREAD_KILLED;
  3085. escape_to_kill(p);
  3086. }
  3087. if (SAME_OBJ(p, scheme_main_thread)) {
  3088. /* Hard exit: */
  3089. if (scheme_current_place_id)
  3090. escape_to_kill(p);
  3091. if (scheme_exit)
  3092. scheme_exit(0);
  3093. /* We really have to exit: */
  3094. exit(0);
  3095. }
  3096. remove_thread(p);
  3097. select_thread();
  3098. }
  3099. void scheme_break_main_thread_at(void *p)
  3100. /* This function can be called from an interrupt handler.
  3101. On some platforms, it will even be called from multiple
  3102. OS threads. In the case of multiple threads, there's a
  3103. tiny chance that a single Ctl-C will trigger multiple
  3104. break exceptions. */
  3105. {
  3106. *(volatile short *)p = 1;
  3107. }
  3108. void scheme_break_main_thread()
  3109. /* Calling this function from an arbitary
  3110. thread is dangerous when therad locals are enabled. */
  3111. {
  3112. scheme_break_main_thread_at((void *)&delayed_break_ready);
  3113. }
  3114. void *scheme_get_main_thread_break_handle()
  3115. {
  3116. return (void *)&delayed_break_ready;
  3117. }
  3118. void scheme_set_break_main_target(Scheme_Thread *p)
  3119. {
  3120. if (!main_break_target_thread) {
  3121. REGISTER_SO(main_break_target_thread);
  3122. }
  3123. main_break_target_thread = p;
  3124. }
  3125. static void check_ready_break()
  3126. {
  3127. if (delayed_break_ready) {
  3128. if (scheme_main_thread) {
  3129. delayed_break_ready = 0;
  3130. scheme_break_thread(main_break_target_thread);
  3131. }
  3132. }
  3133. }
  3134. void scheme_break_thread(Scheme_Thread *p)
  3135. {
  3136. if (!p) {
  3137. p = scheme_main_thread;
  3138. if (!p)
  3139. return;
  3140. }
  3141. /* Propagate breaks: */
  3142. while (p->nestee) {
  3143. p = p->nestee;
  3144. }
  3145. p->external_break = 1;
  3146. if (p == scheme_current_thread) {
  3147. if (scheme_can_break(p)) {
  3148. scheme_fuel_counter = 0;
  3149. scheme_jit_stack_boundary = (uintptr_t)-1;
  3150. }
  3151. }
  3152. scheme_weak_resume_thread(p);
  3153. # if defined(WINDOWS_PROCESSES) || defined(WINDOWS_FILE_HANDLES)
  3154. if (SAME_OBJ(p, scheme_main_thread))
  3155. ReleaseSemaphore((HANDLE)scheme_break_semaphore, 1, NULL);
  3156. # endif
  3157. }
  3158. static void call_on_atomic_timeout(int must)
  3159. {
  3160. Scheme_Thread *p = scheme_current_thread;
  3161. int running;
  3162. double sleep_end;
  3163. int block_descriptor;
  3164. Scheme_Object *blocker;
  3165. Scheme_Ready_Fun block_check;
  3166. Scheme_Needs_Wakeup_Fun block_needs_wakeup;
  3167. Scheme_Kill_Action_Func private_on_kill;
  3168. void *private_kill_data;
  3169. void **private_kill_next;
  3170. /* Save any state that has to do with the thread blocking or
  3171. sleeping, in case scheme_on_atomic_timeout() runs Racket code. */
  3172. running = p->running;
  3173. sleep_end = p->sleep_end;
  3174. block_descriptor = p->block_descriptor;
  3175. blocker = p->blocker;
  3176. block_check = p->block_check;
  3177. block_needs_wakeup = p->block_needs_wakeup;
  3178. private_on_kill = p->private_on_kill;
  3179. private_kill_data = p->private_kill_data;
  3180. private_kill_next = p->private_kill_next;
  3181. p->running = MZTHREAD_RUNNING;
  3182. p->sleep_end = 0.0;
  3183. p->block_descriptor = 0;
  3184. p->blocker = NULL;
  3185. p->block_check = NULL;
  3186. p->block_needs_wakeup = NULL;
  3187. scheme_on_atomic_timeout(must);
  3188. p->running = running;
  3189. p->sleep_end = sleep_end;
  3190. p->block_descriptor = block_descriptor;
  3191. p->blocker = blocker;
  3192. p->block_check = block_check;
  3193. p->block_needs_wakeup = block_needs_wakeup;
  3194. p->private_on_kill = private_on_kill;
  3195. p->private_kill_data = private_kill_data;
  3196. p->private_kill_next = private_kill_next;
  3197. }
  3198. static void find_next_thread(Scheme_Thread **return_arg) {
  3199. Scheme_Thread *next;
  3200. Scheme_Thread *p = scheme_current_thread;
  3201. Scheme_Object *next_in_set;
  3202. Scheme_Thread_Set *t_set;
  3203. double msecs = 0.0;
  3204. /* Find the next process. Skip processes that are definitely
  3205. blocked. */
  3206. /* Start from the root */
  3207. next_in_set = (Scheme_Object *)scheme_thread_set_top;
  3208. t_set = NULL; /* this will get set at the beginning of the loop */
  3209. /* Each thread may or may not be available. If it's not available,
  3210. we search thread by thread to find something that is available. */
  3211. while (1) {
  3212. /* next_in_set is the thread or set to try... */
  3213. /* While it's a set, go down into the set, choosing the next
  3214. item after the set's current. For each set, remember where we
  3215. started searching for something to run, so we'll know when
  3216. we've tried everything in the set. */
  3217. while (!SCHEME_THREADP(next_in_set)) {
  3218. t_set = (Scheme_Thread_Set *)next_in_set;
  3219. next_in_set = get_t_set_next(t_set->current);
  3220. if (!next_in_set)
  3221. next_in_set = t_set->first;
  3222. t_set->current = next_in_set;
  3223. t_set->search_start = next_in_set;
  3224. }
  3225. /* Now `t_set' is the set we're trying, and `next' will be the
  3226. thread to try: */
  3227. next = (Scheme_Thread *)next_in_set;
  3228. /* If we get back to the current thread, then
  3229. no other thread was ready. */
  3230. if (SAME_PTR(next, p)) {
  3231. next = NULL;
  3232. break;
  3233. }
  3234. /* Check whether `next' is ready... */
  3235. if (next->nestee) {
  3236. /* Blocked on nestee */
  3237. } else if (next->running & MZTHREAD_USER_SUSPENDED) {
  3238. if (next->next || (next->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
  3239. /* If a non-main thread is still in the queue,
  3240. it needs to be swapped in so it can clean up
  3241. and suspend itself. */
  3242. break;
  3243. }
  3244. } else if (next->running & MZTHREAD_KILLED) {
  3245. /* This one has been terminated. */
  3246. if ((next->running & MZTHREAD_NEED_KILL_CLEANUP)
  3247. || next->nester
  3248. || !next->next) {
  3249. /* The thread needs to clean up. Swap it in so it can die. */
  3250. break;
  3251. } else
  3252. remove_thread(next);
  3253. break;
  3254. } else if (next->external_break && scheme_can_break(next)) {
  3255. break;
  3256. } else {
  3257. if (next->block_descriptor == GENERIC_BLOCKED) {
  3258. if (next->block_check) {
  3259. Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)next->block_check;
  3260. Scheme_Schedule_Info sinfo;
  3261. init_schedule_info(&sinfo, next, next->sleep_end);
  3262. if (f(next->blocker, &sinfo))
  3263. break;
  3264. next->sleep_end = sinfo.sleep_end;
  3265. msecs = 0.0; /* that could have taken a while */
  3266. }
  3267. } else if (next->block_descriptor == SLEEP_BLOCKED) {
  3268. if (!msecs)
  3269. msecs = scheme_get_inexact_milliseconds();
  3270. if (next->sleep_end <= msecs)
  3271. break;
  3272. } else
  3273. break;
  3274. }
  3275. /* Look for the next thread/set in this set */
  3276. if (next->t_set_next)
  3277. next_in_set = next->t_set_next;
  3278. else
  3279. next_in_set = t_set->first;
  3280. /* If we run out of things to try in this set,
  3281. go up to find the next set. */
  3282. if (SAME_OBJ(next_in_set, t_set->search_start)) {
  3283. /* Loop to go up past exhausted sets, clearing search_start
  3284. from each exhausted set. */
  3285. while (1) {
  3286. t_set->search_start = NULL;
  3287. t_set = t_set->parent;
  3288. if (t_set) {
  3289. next_in_set = get_t_set_next(t_set->current);
  3290. if (!next_in_set)
  3291. next_in_set = t_set->first;
  3292. if (SAME_OBJ(next_in_set, t_set->search_start)) {
  3293. t_set->search_start = NULL;
  3294. /* continue going up */
  3295. } else {
  3296. t_set->current = next_in_set;
  3297. break;
  3298. }
  3299. } else
  3300. break;
  3301. }
  3302. if (!t_set) {
  3303. /* We ran out of things to try. If we
  3304. start again with the top, we should
  3305. land back at p. */
  3306. next = NULL;
  3307. break;
  3308. }
  3309. } else {
  3310. /* Set current... */
  3311. t_set->current = next_in_set;
  3312. }
  3313. /* As we go back to the top of the loop, we'll check whether
  3314. next_in_set is a thread or set, etc. */
  3315. }
  3316. p = NULL;
  3317. next_in_set = NULL;
  3318. t_set = NULL;
  3319. *return_arg = next;
  3320. next = NULL;
  3321. }
  3322. void scheme_thread_block(float sleep_time)
  3323. /* If we're blocked, `sleep_time' is a max sleep time,
  3324. not a min sleep time. Otherwise, it's a min & max sleep time.
  3325. This proc auto-resets p's blocking info if an escape occurs. */
  3326. {
  3327. double sleep_end;
  3328. Scheme_Thread *next;
  3329. Scheme_Thread *p = scheme_current_thread;
  3330. if (p->return_marks_to) /* just in case we get here */
  3331. return;
  3332. if (p->running & MZTHREAD_KILLED) {
  3333. /* This thread is dead! Give up now. */
  3334. if (!do_atomic)
  3335. exit_or_escape(p);
  3336. }
  3337. if ((p->running & MZTHREAD_USER_SUSPENDED)
  3338. && !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
  3339. /* This thread was suspended. */
  3340. scheme_wait_until_suspend_ok();
  3341. if (!p->next) {
  3342. /* Suspending the main thread... */
  3343. select_thread();
  3344. } else
  3345. scheme_weak_suspend_thread(p);
  3346. }
  3347. /* Check scheduled_kills early and often. */
  3348. check_scheduled_kills();
  3349. #if defined(UNIX_PROCESSES) && !defined(MZ_PLACES_WAITPID)
  3350. /* Reap zombie processes: */
  3351. scheme_check_child_done();
  3352. #endif
  3353. shrink_cust_box_array();
  3354. if (scheme_active_but_sleeping)
  3355. scheme_wake_up();
  3356. if (sleep_time > 0) {
  3357. sleep_end = scheme_get_inexact_milliseconds();
  3358. sleep_end += (sleep_time * 1000.0);
  3359. } else
  3360. sleep_end = 0;
  3361. start_sleep_check:
  3362. check_ready_break();
  3363. if (!p->external_break && !p->next && scheme_check_for_break && scheme_check_for_break())
  3364. p->external_break = 1;
  3365. if (p->external_break && !p->suspend_break && scheme_can_break(p)) {
  3366. raise_break(p);
  3367. goto start_sleep_check;
  3368. }
  3369. swap_or_sleep:
  3370. #ifdef USE_OSKIT_CONSOLE
  3371. scheme_check_keyboard_input();
  3372. #endif
  3373. /* Check scheduled_kills early and often. */
  3374. check_scheduled_kills();
  3375. #ifdef MZ_USE_FUTURES
  3376. scheme_check_future_work();
  3377. #endif
  3378. #if defined(MZ_USE_MZRT) && !defined(DONT_USE_FOREIGN)
  3379. scheme_check_foreign_work();
  3380. #endif
  3381. if (!do_atomic && (sleep_end >= 0.0)) {
  3382. find_next_thread(&next);
  3383. } else
  3384. next = NULL;
  3385. if (next) {
  3386. /* Clear out search_start fields */
  3387. Scheme_Thread_Set *t_set;
  3388. t_set = next->t_set_parent;
  3389. while (t_set) {
  3390. t_set->search_start = NULL;
  3391. t_set = t_set->parent;
  3392. }
  3393. t_set = NULL;
  3394. }
  3395. if ((sleep_end > 0.0) && (p->block_descriptor == NOT_BLOCKED)) {
  3396. p->block_descriptor = SLEEP_BLOCKED;
  3397. p->sleep_end = sleep_end;
  3398. } else if ((sleep_end > 0.0) && (p->block_descriptor == GENERIC_BLOCKED)) {
  3399. p->sleep_end = sleep_end;
  3400. }
  3401. if (next && (!next->running || (next->running & MZTHREAD_SUSPENDED))) {
  3402. /* In the process of selecting another thread, it was suspended or
  3403. removed. Very unusual, but possible if a block checker does
  3404. strange things??? */
  3405. next = NULL;
  3406. }
  3407. #if 0
  3408. /* Debugging: next must be in the chain of processes */
  3409. if (next) {
  3410. Scheme_Thread *p = scheme_first_thread;
  3411. while (p != next) {
  3412. p = p->next;
  3413. if (!p) {
  3414. printf("error: tried to switch to bad thread\n");
  3415. exit(1);
  3416. }
  3417. }
  3418. }
  3419. #endif
  3420. /*####################################*/
  3421. /* THREAD CONTEXT SWITCH HAPPENS HERE */
  3422. /*####################################*/
  3423. if (next) {
  3424. /* Swap in `next', but first clear references to other threads. */
  3425. swap_target = next;
  3426. next = NULL;
  3427. do_swap_thread();
  3428. } else if (do_atomic && scheme_on_atomic_timeout
  3429. && (atomic_timeout_auto_suspend < 2)) {
  3430. if (!atomic_timeout_auto_suspend
  3431. || (do_atomic <= atomic_timeout_atomic_level)) {
  3432. if (atomic_timeout_auto_suspend) {
  3433. atomic_timeout_auto_suspend++;
  3434. scheme_fuel_counter = p->engine_weight;
  3435. scheme_jit_stack_boundary = scheme_stack_boundary;
  3436. }
  3437. call_on_atomic_timeout(0);
  3438. if (atomic_timeout_auto_suspend > 1)
  3439. --atomic_timeout_auto_suspend;
  3440. }
  3441. } else {
  3442. /* If all processes are blocked, check for total process sleeping: */
  3443. if (p->block_descriptor != NOT_BLOCKED) {
  3444. check_sleep(1, 1);
  3445. }
  3446. }
  3447. if (p->block_descriptor == SLEEP_BLOCKED) {
  3448. p->block_descriptor = NOT_BLOCKED;
  3449. }
  3450. p->sleep_end = 0.0;
  3451. /* Killed while I was asleep? */
  3452. if (p->running & MZTHREAD_KILLED) {
  3453. /* This thread is dead! Give up now. */
  3454. if (p->running & MZTHREAD_NEED_KILL_CLEANUP) {
  3455. /* The thread needs to clean up. It will block immediately to die. */
  3456. return;
  3457. } else {
  3458. if (!do_atomic)
  3459. exit_or_escape(p);
  3460. }
  3461. }
  3462. /* Suspended while I was asleep? */
  3463. if ((p->running & MZTHREAD_USER_SUSPENDED)
  3464. && !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
  3465. scheme_wait_until_suspend_ok();
  3466. if (!p->next)
  3467. scheme_thread_block(0.0); /* main thread handled at top of this function */
  3468. else
  3469. scheme_weak_suspend_thread(p);
  3470. }
  3471. /* Check for external break again after swap or sleep */
  3472. check_ready_break();
  3473. if (p->external_break && !p->suspend_break && scheme_can_break(p)) {
  3474. raise_break(p);
  3475. }
  3476. /* Check for major GC request from master GC */
  3477. #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
  3478. if (!do_atomic)
  3479. GC_check_master_gc_request();
  3480. #endif
  3481. #if defined(MZ_USE_PLACES)
  3482. if (!do_atomic)
  3483. scheme_place_check_for_interruption();
  3484. #endif
  3485. if (sleep_end > 0) {
  3486. if (sleep_end > scheme_get_inexact_milliseconds()) {
  3487. /* Still have time to sleep if necessary, but make sure we're
  3488. not ready (because maybe that's why we were swapped back in!) */
  3489. if (p->block_descriptor == GENERIC_BLOCKED) {
  3490. if (p->block_check) {
  3491. Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)p->block_check;
  3492. Scheme_Schedule_Info sinfo;
  3493. init_schedule_info(&sinfo, p, sleep_end);
  3494. if (f(p->blocker, &sinfo)) {
  3495. sleep_end = 0;
  3496. } else {
  3497. sleep_end = sinfo.sleep_end;
  3498. }
  3499. }
  3500. }
  3501. if (sleep_end > 0)
  3502. goto swap_or_sleep;
  3503. }
  3504. }
  3505. if (do_atomic)
  3506. missed_context_switch = 1;
  3507. scheme_fuel_counter = p->engine_weight;
  3508. scheme_jit_stack_boundary = scheme_stack_boundary;
  3509. scheme_kickoff_green_thread_time_slice_timer(MZ_THREAD_QUANTUM_USEC);
  3510. /* Check scheduled_kills early and often. */
  3511. check_scheduled_kills();
  3512. }
  3513. void scheme_making_progress()
  3514. {
  3515. scheme_current_thread->ran_some = 1;
  3516. }
  3517. int scheme_block_until(Scheme_Ready_Fun _f, Scheme_Needs_Wakeup_Fun fdf,
  3518. Scheme_Object *data, float delay)
  3519. {
  3520. int result;
  3521. Scheme_Thread *p = scheme_current_thread;
  3522. Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)_f;
  3523. Scheme_Schedule_Info sinfo;
  3524. double sleep_end;
  3525. if (!delay)
  3526. sleep_end = 0.0;
  3527. else {
  3528. sleep_end = scheme_get_inexact_milliseconds();
  3529. sleep_end += (delay * 1000.0);
  3530. }
  3531. /* We make an sinfo to be polite, but we also assume
  3532. that f will not generate any redirections! */
  3533. init_schedule_info(&sinfo, NULL, sleep_end);
  3534. while (!(result = f((Scheme_Object *)data, &sinfo))) {
  3535. sleep_end = sinfo.sleep_end;
  3536. if (sinfo.spin) {
  3537. init_schedule_info(&sinfo, NULL, 0.0);
  3538. scheme_thread_block(0.0);
  3539. scheme_current_thread->ran_some = 1;
  3540. } else {
  3541. if (sleep_end) {
  3542. delay = (float)(sleep_end - scheme_get_inexact_milliseconds());
  3543. delay /= 1000.0;
  3544. if (delay < 0)
  3545. delay = (float)0.00001;
  3546. } else
  3547. delay = 0.0;
  3548. p->block_descriptor = GENERIC_BLOCKED;
  3549. p->blocker = (Scheme_Object *)data;
  3550. p->block_check = (Scheme_Ready_Fun)f;
  3551. p->block_needs_wakeup = fdf;
  3552. scheme_thread_block(delay);
  3553. p->block_descriptor = NOT_BLOCKED;
  3554. p->blocker = NULL;
  3555. p->block_check = NULL;
  3556. p->block_needs_wakeup = NULL;
  3557. }
  3558. }
  3559. p->ran_some = 1;
  3560. return result;
  3561. }
  3562. int scheme_block_until_enable_break(Scheme_Ready_Fun _f, Scheme_Needs_Wakeup_Fun fdf,
  3563. Scheme_Object *data, float delay, int enable_break)
  3564. {
  3565. if (enable_break) {
  3566. int v;
  3567. Scheme_Cont_Frame_Data cframe;
  3568. scheme_push_break_enable(&cframe, 1, 1);
  3569. v = scheme_block_until(_f, fdf, data, delay);
  3570. scheme_pop_break_enable(&cframe, 0);
  3571. return v;
  3572. } else
  3573. return scheme_block_until(_f, fdf, data, delay);
  3574. }
  3575. static int ready_unless(Scheme_Object *o)
  3576. {
  3577. Scheme_Object *unless_evt, *data;
  3578. Scheme_Ready_Fun f;
  3579. data = (Scheme_Object *)((void **)o)[0];
  3580. unless_evt = (Scheme_Object *)((void **)o)[1];
  3581. f = (Scheme_Ready_Fun)((void **)o)[2];
  3582. return f(data);
  3583. }
  3584. static void needs_wakeup_unless(Scheme_Object *o, void *fds)
  3585. {
  3586. Scheme_Object *data;
  3587. Scheme_Needs_Wakeup_Fun fdf;
  3588. data = (Scheme_Object *)((void **)o)[0];
  3589. fdf = (Scheme_Needs_Wakeup_Fun)((void **)o)[3];
  3590. fdf(data, fds);
  3591. }
  3592. int scheme_block_until_unless(Scheme_Ready_Fun f, Scheme_Needs_Wakeup_Fun fdf,
  3593. Scheme_Object *data, float delay,
  3594. Scheme_Object *unless,
  3595. int enable_break)
  3596. {
  3597. if (unless) {
  3598. void **a;
  3599. a = MALLOC_N(void *, 4);
  3600. a[0] = data;
  3601. a[1] = unless;
  3602. a[2] = f;
  3603. a[3] = fdf;
  3604. data = (Scheme_Object *) mzALIAS a;
  3605. f = ready_unless;
  3606. if (fdf)
  3607. fdf = needs_wakeup_unless;
  3608. }
  3609. return scheme_block_until_enable_break(f, fdf, data, delay, enable_break);
  3610. }
  3611. void scheme_thread_block_enable_break(float sleep_time, int enable_break)
  3612. {
  3613. if (enable_break) {
  3614. Scheme_Cont_Frame_Data cframe;
  3615. scheme_push_break_enable(&cframe, 1, 1);
  3616. scheme_thread_block(sleep_time);
  3617. scheme_pop_break_enable(&cframe, 0);
  3618. } else
  3619. scheme_thread_block(sleep_time);
  3620. }
  3621. void scheme_start_atomic(void)
  3622. {
  3623. do_atomic++;
  3624. }
  3625. void scheme_start_atomic_no_break(void)
  3626. {
  3627. scheme_start_atomic();
  3628. all_breaks_disabled++;
  3629. }
  3630. void scheme_end_atomic_no_swap(void)
  3631. {
  3632. --do_atomic;
  3633. }
  3634. void scheme_start_in_scheduler(void)
  3635. {
  3636. do_atomic++;
  3637. scheme_no_stack_overflow++;
  3638. }
  3639. void scheme_end_in_scheduler(void)
  3640. {
  3641. --do_atomic;
  3642. --scheme_no_stack_overflow;
  3643. }
  3644. void scheme_end_atomic(void)
  3645. {
  3646. scheme_end_atomic_no_swap();
  3647. if (!do_atomic && missed_context_switch) {
  3648. missed_context_switch = 0;
  3649. scheme_thread_block(0.0);
  3650. scheme_current_thread->ran_some = 1;
  3651. }
  3652. }
  3653. void scheme_end_atomic_can_break(void)
  3654. {
  3655. --all_breaks_disabled;
  3656. scheme_end_atomic();
  3657. if (!all_breaks_disabled)
  3658. scheme_check_break_now();
  3659. }
  3660. int scheme_wait_until_suspend_ok(void)
  3661. {
  3662. int did = 0;
  3663. if (scheme_on_atomic_timeout) {
  3664. /* new-style atomic timeout */
  3665. if (do_atomic > atomic_timeout_atomic_level) {
  3666. scheme_log_abort("attempted to wait for suspend in nested atomic mode");
  3667. abort();
  3668. }
  3669. }
  3670. while (do_atomic && scheme_on_atomic_timeout) {
  3671. did = 1;
  3672. if (atomic_timeout_auto_suspend)
  3673. atomic_timeout_auto_suspend++;
  3674. call_on_atomic_timeout(1);
  3675. if (atomic_timeout_auto_suspend > 1)
  3676. --atomic_timeout_auto_suspend;
  3677. }
  3678. if (do_atomic) {
  3679. scheme_log_abort("about to suspend in atomic mode");
  3680. abort();
  3681. }
  3682. return did;
  3683. }
  3684. Scheme_On_Atomic_Timeout_Proc scheme_set_on_atomic_timeout(Scheme_On_Atomic_Timeout_Proc p)
  3685. {
  3686. Scheme_On_Atomic_Timeout_Proc old;
  3687. old = scheme_on_atomic_timeout;
  3688. scheme_on_atomic_timeout = p;
  3689. if (p) {
  3690. atomic_timeout_auto_suspend = 1;
  3691. atomic_timeout_atomic_level = do_atomic;
  3692. } else {
  3693. atomic_timeout_auto_suspend = 0;
  3694. }
  3695. return old;
  3696. }
  3697. void scheme_weak_suspend_thread(Scheme_Thread *r)
  3698. {
  3699. if (r->running & MZTHREAD_SUSPENDED)
  3700. return;
  3701. if (r->prev) {
  3702. r->prev->next = r->next;
  3703. r->next->prev = r->prev;
  3704. } else {
  3705. r->next->prev = NULL;
  3706. scheme_first_thread = r->next;
  3707. }
  3708. r->next = r->prev = NULL;
  3709. unschedule_in_set((Scheme_Object *)r, r->t_set_parent);
  3710. r->running |= MZTHREAD_SUSPENDED;
  3711. scheme_prepare_this_thread_for_GC(r);
  3712. if (r == scheme_current_thread) {
  3713. select_thread();
  3714. /* Killed while suspended? */
  3715. if ((r->running & MZTHREAD_KILLED) && !(r->running & MZTHREAD_NEED_KILL_CLEANUP))
  3716. scheme_thread_block(0);
  3717. }
  3718. }
  3719. void scheme_weak_resume_thread(Scheme_Thread *r)
  3720. /* This function can be called from an interrupt handler, but
  3721. only for the main thread, which is never suspended. */
  3722. {
  3723. if (!(r->running & MZTHREAD_USER_SUSPENDED)) {
  3724. if (r->running & MZTHREAD_SUSPENDED) {
  3725. r->running -= MZTHREAD_SUSPENDED;
  3726. r->next = scheme_first_thread;
  3727. r->prev = NULL;
  3728. scheme_first_thread = r;
  3729. r->next->prev = r;
  3730. r->ran_some = 1;
  3731. schedule_in_set((Scheme_Object *)r, r->t_set_parent);
  3732. scheme_check_tail_buffer_size(r);
  3733. }
  3734. }
  3735. }
  3736. void scheme_about_to_move_C_stack(void)
  3737. {
  3738. }
  3739. static Scheme_Object *
  3740. sch_sleep(int argc, Scheme_Object *args[])
  3741. {
  3742. float t;
  3743. if (argc && !SCHEME_REALP(args[0]))
  3744. scheme_wrong_type("sleep", "non-negative real number", 0, argc, args);
  3745. if (argc) {
  3746. t = (float)scheme_real_to_double(args[0]);
  3747. if (t < 0)
  3748. scheme_wrong_type("sleep", "non-negative real number", 0, argc, args);
  3749. } else
  3750. t = 0;
  3751. scheme_thread_block(t);
  3752. scheme_current_thread->ran_some = 1;
  3753. return scheme_void;
  3754. }
  3755. static Scheme_Object *break_thread(int argc, Scheme_Object *args[])
  3756. {
  3757. Scheme_Thread *p;
  3758. if (!SAME_TYPE(SCHEME_TYPE(args[0]), scheme_thread_type))
  3759. scheme_wrong_type("break-thread", "thread", 0, argc, args);
  3760. p = (Scheme_Thread *)args[0];
  3761. scheme_break_thread(p);
  3762. /* In case p == scheme_current_thread */
  3763. if (!scheme_fuel_counter) {
  3764. scheme_thread_block(0.0);
  3765. scheme_current_thread->ran_some = 1;
  3766. }
  3767. return scheme_void;
  3768. }
  3769. static int do_kill_thread(Scheme_Thread *p)
  3770. {
  3771. int kill_self = 0;
  3772. if (!MZTHREAD_STILL_RUNNING(p->running)) {
  3773. return 0;
  3774. }
  3775. if (p->suspend_to_kill) {
  3776. if (p == scheme_current_thread)
  3777. return 1; /* suspend in caller */
  3778. suspend_thread(p);
  3779. return 0;
  3780. }
  3781. if (p->nestee)
  3782. scheme_break_thread(p->nestee);
  3783. while (p->private_on_kill) {
  3784. p->private_on_kill(p->private_kill_data);
  3785. if (p->private_kill_next) {
  3786. p->private_on_kill = (Scheme_Kill_Action_Func)p->private_kill_next[0];
  3787. p->private_kill_data = p->private_kill_next[1];
  3788. p->private_kill_next = (void **)p->private_kill_next[2];
  3789. } else {
  3790. p->private_on_kill = NULL;
  3791. p->private_kill_data = NULL;
  3792. }
  3793. }
  3794. if (p->on_kill)
  3795. p->on_kill(p);
  3796. scheme_remove_managed(p->mref, (Scheme_Object *)p->mr_hop);
  3797. {
  3798. Scheme_Object *l;
  3799. for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
  3800. scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l),
  3801. (Scheme_Object *)p->mr_hop);
  3802. }
  3803. }
  3804. if (p->running) {
  3805. if (p->running & MZTHREAD_USER_SUSPENDED) {
  3806. /* end user suspension, because we need to kill the thread */
  3807. p->running -= MZTHREAD_USER_SUSPENDED;
  3808. }
  3809. p->running |= MZTHREAD_KILLED;
  3810. if ((p->running & MZTHREAD_NEED_KILL_CLEANUP)
  3811. || p->nester)
  3812. scheme_weak_resume_thread(p);
  3813. else if (p != scheme_current_thread) {
  3814. /* Do kill stuff... */
  3815. if (p->next)
  3816. remove_thread(p);
  3817. }
  3818. }
  3819. if (p == scheme_current_thread)
  3820. kill_self = 1;
  3821. return kill_self;
  3822. }
  3823. void scheme_kill_thread(Scheme_Thread *p)
  3824. {
  3825. if (do_kill_thread(p)) {
  3826. /* Suspend/kill self: */
  3827. scheme_wait_until_suspend_ok();
  3828. if (p->suspend_to_kill)
  3829. suspend_thread(p);
  3830. else
  3831. scheme_thread_block(0.0);
  3832. }
  3833. /* Give killed threads time to die: */
  3834. scheme_thread_block(0.0);
  3835. scheme_current_thread->ran_some = 1;
  3836. }
  3837. static Scheme_Object *kill_thread(int argc, Scheme_Object *argv[])
  3838. {
  3839. Scheme_Thread *p = (Scheme_Thread *)argv[0];
  3840. if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
  3841. scheme_wrong_type("kill-thread", "thread", 0, argc, argv);
  3842. if (!MZTHREAD_STILL_RUNNING(p->running))
  3843. return scheme_void;
  3844. check_current_custodian_allows("kill-thread", p);
  3845. scheme_kill_thread(p);
  3846. return scheme_void;
  3847. }
  3848. void scheme_push_kill_action(Scheme_Kill_Action_Func f, void *d)
  3849. {
  3850. Scheme_Thread *p = scheme_current_thread;
  3851. if (p->private_on_kill) {
  3852. /* Pretty unlikely that these get nested. An exception handler
  3853. would have to block on and within operations that need special
  3854. kill handling. But it could happen. */
  3855. void **next;
  3856. next = MALLOC_N(void *, 3);
  3857. next[0] = (void *)p->private_on_kill;
  3858. next[1] = p->private_kill_data;
  3859. next[2] = (void *)p->private_kill_next;
  3860. p->private_kill_next = next;
  3861. }
  3862. p->private_on_kill = f;
  3863. p->private_kill_data = d;
  3864. }
  3865. void scheme_pop_kill_action()
  3866. {
  3867. Scheme_Thread *p = scheme_current_thread;
  3868. if (p->private_kill_next) {
  3869. p->private_on_kill = (Scheme_Kill_Action_Func)p->private_kill_next[0];
  3870. p->private_kill_data = p->private_kill_next[1];
  3871. p->private_kill_next = (void **)p->private_kill_next[2];
  3872. } else {
  3873. p->private_on_kill = NULL;
  3874. p->private_kill_data = NULL;
  3875. }
  3876. }
  3877. /*========================================================================*/
  3878. /* suspend/resume and evts */
  3879. /*========================================================================*/
  3880. /* Forward decl: */
  3881. static void transitive_resume(Scheme_Object *resumes);
  3882. static void transitive_promote(Scheme_Thread *p, Scheme_Custodian *c);
  3883. static void promote_thread(Scheme_Thread *p, Scheme_Custodian *to_c);
  3884. static Scheme_Object *thread_suspend(int argc, Scheme_Object *argv[])
  3885. {
  3886. Scheme_Thread *p;
  3887. if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
  3888. scheme_wrong_type("thread-suspend", "thread", 0, argc, argv);
  3889. p = (Scheme_Thread *)argv[0];
  3890. check_current_custodian_allows("thread-suspend", p);
  3891. suspend_thread(p);
  3892. return scheme_void;
  3893. }
  3894. static void suspend_thread(Scheme_Thread *p)
  3895. {
  3896. int running;
  3897. if (!MZTHREAD_STILL_RUNNING(p->running))
  3898. return;
  3899. if (p->running & MZTHREAD_USER_SUSPENDED)
  3900. return;
  3901. /* Get running now, just in case the thread is waiting on its own
  3902. suspend event (in which case posting to the sema will unsuspend
  3903. the thread) */
  3904. running = p->running;
  3905. p->resumed_box = NULL;
  3906. if (p->suspended_box) {
  3907. SCHEME_PTR2_VAL(p->suspended_box) = (Scheme_Object *)p;
  3908. scheme_post_sema_all(SCHEME_PTR1_VAL(p->suspended_box));
  3909. }
  3910. if (p->running_box && (!(p->running & MZTHREAD_SUSPENDED))) {
  3911. /* Make transitive-resume link strong, instead of weak: */
  3912. SCHEME_PTR_VAL(p->running_box) = (Scheme_Object *)p;
  3913. }
  3914. if (SAME_OBJ(p, scheme_main_thread)) {
  3915. /* p is the main thread, which we're not allowed to
  3916. suspend in the normal way. */
  3917. p->running |= MZTHREAD_USER_SUSPENDED;
  3918. scheme_main_was_once_suspended = 1;
  3919. if (p == scheme_current_thread) {
  3920. scheme_thread_block(0.0);
  3921. p->ran_some = 1;
  3922. }
  3923. } else if ((running & (MZTHREAD_NEED_KILL_CLEANUP
  3924. | MZTHREAD_NEED_SUSPEND_CLEANUP))
  3925. && (running & MZTHREAD_SUSPENDED)) {
  3926. /* p probably needs to get out of semaphore-wait lines, etc. */
  3927. scheme_weak_resume_thread(p);
  3928. p->running |= MZTHREAD_USER_SUSPENDED;
  3929. } else {
  3930. if (p == scheme_current_thread) {
  3931. scheme_wait_until_suspend_ok();
  3932. }
  3933. p->running |= MZTHREAD_USER_SUSPENDED;
  3934. scheme_weak_suspend_thread(p); /* ok if p is scheme_current_thread */
  3935. if (p == scheme_current_thread) {
  3936. /* Need to check for breaks */
  3937. scheme_check_break_now();
  3938. }
  3939. }
  3940. }
  3941. static void add_transitive_resume(Scheme_Thread *promote_to, Scheme_Thread *p)
  3942. {
  3943. Scheme_Object *running_box;
  3944. Scheme_Hash_Table *ht;
  3945. if (!p->running_box) {
  3946. Scheme_Object *b, *wb;
  3947. if ((p->running & MZTHREAD_USER_SUSPENDED)
  3948. && !(p->running & MZTHREAD_SUSPENDED))
  3949. wb = (Scheme_Object *)p;
  3950. else
  3951. wb = scheme_make_weak_box((Scheme_Object *)p);
  3952. b = scheme_alloc_small_object();
  3953. b->type = scheme_thread_dead_type;
  3954. SCHEME_PTR_VAL(b) = (Scheme_Object *)wb;
  3955. p->running_box = b;
  3956. }
  3957. running_box = p->running_box;
  3958. if (!promote_to->transitive_resumes) {
  3959. /* Create table */
  3960. ht = scheme_make_hash_table(SCHEME_hash_ptr);
  3961. promote_to->transitive_resumes = (Scheme_Object *)ht;
  3962. } else {
  3963. /* Purge ht entries for threads that are now dead: */
  3964. Scheme_Hash_Table *gone= NULL;
  3965. int i;
  3966. ht = (Scheme_Hash_Table *)promote_to->transitive_resumes;
  3967. for (i = ht->size; i--; ) {
  3968. if (ht->vals[i]) {
  3969. if (!SCHEME_PTR_VAL(ht->keys[i])
  3970. || (SAME_TYPE(SCHEME_TYPE(ht->keys[i]), scheme_weak_box_type)
  3971. && !SCHEME_WEAK_BOX_VAL(ht->vals[i]))) {
  3972. /* This one is dead */
  3973. if (!gone)
  3974. gone = scheme_make_hash_table(SCHEME_hash_ptr);
  3975. scheme_hash_set(gone, ht->keys[i], scheme_true);
  3976. }
  3977. }
  3978. }
  3979. if (gone) {
  3980. /* Remove dead ones: */
  3981. for (i = gone->size; i--; ) {
  3982. if (gone->vals[i]) {
  3983. scheme_hash_set(ht, gone->keys[i], NULL);
  3984. }
  3985. }
  3986. }
  3987. }
  3988. scheme_hash_set(ht, running_box, scheme_true);
  3989. }
  3990. static Scheme_Object *transitive_resume_k(void)
  3991. {
  3992. Scheme_Thread *p = scheme_current_thread;
  3993. Scheme_Object *r = (Scheme_Object *)p->ku.k.p1;
  3994. p->ku.k.p1 = NULL;
  3995. transitive_resume(r);
  3996. return scheme_true;
  3997. }
  3998. static void transitive_resume(Scheme_Object *resumes)
  3999. {
  4000. Scheme_Hash_Table *ht;
  4001. Scheme_Object *a[1];
  4002. int i;
  4003. #ifdef DO_STACK_CHECK
  4004. #include "mzstkchk.h"
  4005. {
  4006. Scheme_Thread *p = scheme_current_thread;
  4007. p->ku.k.p1 = resumes;
  4008. p->suspend_break++;
  4009. scheme_start_atomic();
  4010. scheme_handle_stack_overflow(transitive_resume_k);
  4011. scheme_end_atomic_no_swap();
  4012. --p->suspend_break;
  4013. return;
  4014. }
  4015. #endif
  4016. ht = (Scheme_Hash_Table *)resumes;
  4017. for (i = ht->size; i--; ) {
  4018. if (ht->vals[i]) {
  4019. a[0] = SCHEME_PTR_VAL(ht->keys[i]);
  4020. if (a[0]) {
  4021. if (SAME_TYPE(SCHEME_TYPE(a[0]), scheme_weak_box_type))
  4022. a[0] = SCHEME_WEAK_BOX_VAL(a[0]);
  4023. if (a[0])
  4024. thread_resume(1, a);
  4025. }
  4026. }
  4027. }
  4028. }
  4029. static Scheme_Object *transitive_promote_k(void)
  4030. {
  4031. Scheme_Thread *p = scheme_current_thread;
  4032. Scheme_Thread *pp = (Scheme_Thread *)p->ku.k.p1;
  4033. Scheme_Custodian *c = (Scheme_Custodian *)p->ku.k.p2;
  4034. p->ku.k.p1 = NULL;
  4035. p->ku.k.p2 = NULL;
  4036. transitive_promote(pp, c);
  4037. return scheme_true;
  4038. }
  4039. static void transitive_promote(Scheme_Thread *p, Scheme_Custodian *c)
  4040. {
  4041. Scheme_Hash_Table *ht;
  4042. Scheme_Object *t;
  4043. int i;
  4044. #ifdef DO_STACK_CHECK
  4045. #include "mzstkchk.h"
  4046. {
  4047. Scheme_Thread *pp = scheme_current_thread;
  4048. pp->ku.k.p1 = p;
  4049. pp->ku.k.p2 = c;
  4050. pp->suspend_break++;
  4051. scheme_start_atomic();
  4052. scheme_handle_stack_overflow(transitive_promote_k);
  4053. scheme_end_atomic_no_swap();
  4054. --pp->suspend_break;
  4055. return;
  4056. }
  4057. #endif
  4058. if (!p->transitive_resumes)
  4059. return;
  4060. ht = (Scheme_Hash_Table *)p->transitive_resumes;
  4061. for (i = ht->size; i--; ) {
  4062. if (ht->vals[i]) {
  4063. t = SCHEME_PTR_VAL(ht->keys[i]);
  4064. if (SAME_TYPE(SCHEME_TYPE(t), scheme_weak_box_type))
  4065. t = SCHEME_WEAK_BOX_VAL(t);
  4066. if (t)
  4067. promote_thread((Scheme_Thread *)t, c);
  4068. }
  4069. }
  4070. }
  4071. static void promote_thread(Scheme_Thread *p, Scheme_Custodian *to_c)
  4072. {
  4073. Scheme_Custodian *c, *cx;
  4074. Scheme_Custodian_Reference *mref;
  4075. Scheme_Object *l;
  4076. /* This function also handles transitive promotion. Every transitive
  4077. target for p always has at least the custodians of p, so if we don't
  4078. add a custodian to p, we don't need to check the rest. */
  4079. if (!p->mref || !CUSTODIAN_FAM(p->mref)) {
  4080. /* The thread has no running custodian, so fall through to
  4081. just use to_c */
  4082. } else {
  4083. c = CUSTODIAN_FAM(p->mref);
  4084. /* Check whether c is an ancestor of to_c (in which case we do nothing) */
  4085. for (cx = to_c; cx && NOT_SAME_OBJ(cx, c); ) {
  4086. cx = CUSTODIAN_FAM(cx->parent);
  4087. }
  4088. if (cx) return;
  4089. /* Check whether any of the extras are super to to_c.
  4090. If so, do nothing. */
  4091. for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
  4092. mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
  4093. c = CUSTODIAN_FAM(mref);
  4094. for (cx = to_c; cx && NOT_SAME_OBJ(cx, c); ) {
  4095. cx = CUSTODIAN_FAM(cx->parent);
  4096. }
  4097. if (cx) return;
  4098. }
  4099. /* Check whether to_c is super of c: */
  4100. for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
  4101. cx = CUSTODIAN_FAM(cx->parent);
  4102. }
  4103. /* If cx, fall through to replace the main custodian with to_c,
  4104. because it's an ancestor of the current one. Otherwise, they're
  4105. unrelated. */
  4106. if (!cx) {
  4107. /* Check whether any of the extras should be replaced by to_c */
  4108. for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
  4109. /* Is to_c super of c? */
  4110. for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
  4111. cx = CUSTODIAN_FAM(cx->parent);
  4112. }
  4113. if (cx) {
  4114. /* Replace this custodian with to_c */
  4115. mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
  4116. scheme_remove_managed(mref, (Scheme_Object *)p->mr_hop);
  4117. mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
  4118. SCHEME_CAR(l) = (Scheme_Object *)mref;
  4119. /* It's possible that one of the other custodians is also
  4120. junior to to_c. Remove it if we find one. */
  4121. {
  4122. Scheme_Object *prev;
  4123. prev = l;
  4124. for (l = SCHEME_CDR(l); !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
  4125. mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
  4126. c = CUSTODIAN_FAM(mref);
  4127. for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
  4128. cx = CUSTODIAN_FAM(cx->parent);
  4129. }
  4130. if (cx)
  4131. SCHEME_CDR(prev) = SCHEME_CDR(l);
  4132. }
  4133. }
  4134. transitive_promote(p, to_c);
  4135. return;
  4136. }
  4137. }
  4138. /* Otherwise, this is custodian is unrelated to the existing ones.
  4139. Add it as an extra custodian. */
  4140. mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
  4141. l = scheme_make_raw_pair((Scheme_Object *)mref, p->extra_mrefs);
  4142. p->extra_mrefs = l;
  4143. transitive_promote(p, to_c);
  4144. return;
  4145. }
  4146. }
  4147. /* Replace p's main custodian (if any) with to_c */
  4148. scheme_remove_managed(p->mref, (Scheme_Object *)p->mr_hop);
  4149. mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
  4150. p->mref = mref;
  4151. #ifdef MZ_PRECISE_GC
  4152. GC_register_thread(p, to_c);
  4153. #endif
  4154. transitive_promote(p, to_c);
  4155. }
  4156. static Scheme_Object *thread_resume(int argc, Scheme_Object *argv[])
  4157. {
  4158. Scheme_Thread *p, *promote_to = NULL;
  4159. Scheme_Custodian *promote_c = NULL;
  4160. if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
  4161. scheme_wrong_type("thread-resume", "thread", 0, argc, argv);
  4162. p = (Scheme_Thread *)argv[0];
  4163. if (argc > 1) {
  4164. if (SAME_TYPE(SCHEME_TYPE(argv[1]), scheme_thread_type))
  4165. promote_to = (Scheme_Thread *)argv[1];
  4166. else if (SAME_TYPE(SCHEME_TYPE(argv[1]), scheme_custodian_type)) {
  4167. promote_c = (Scheme_Custodian *)argv[1];
  4168. if (promote_c->shut_down)
  4169. promote_c = NULL;
  4170. } else {
  4171. scheme_wrong_type("thread-resume", "thread or custodian", 1, argc, argv);
  4172. return NULL;
  4173. }
  4174. }
  4175. if (!MZTHREAD_STILL_RUNNING(p->running))
  4176. return scheme_void;
  4177. /* Change/add custodians for p from promote_p */
  4178. if (promote_to) {
  4179. Scheme_Object *l;
  4180. Scheme_Custodian_Reference *mref;
  4181. /* If promote_to doesn't have a working custodian, there's
  4182. nothing to donate */
  4183. if (promote_to->mref && CUSTODIAN_FAM(promote_to->mref)) {
  4184. promote_thread(p, CUSTODIAN_FAM(promote_to->mref));
  4185. for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
  4186. mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
  4187. promote_thread(p, CUSTODIAN_FAM(mref));
  4188. }
  4189. }
  4190. }
  4191. if (promote_c)
  4192. promote_thread(p, promote_c);
  4193. /* Set up transitive resume for future resumes of promote_to: */
  4194. if (promote_to
  4195. && MZTHREAD_STILL_RUNNING(promote_to->running)
  4196. && !SAME_OBJ(promote_to, p))
  4197. add_transitive_resume(promote_to, p);
  4198. /* Check whether the thread has a non-shut-down custodian */
  4199. {
  4200. Scheme_Custodian *c;
  4201. if (p->mref)
  4202. c = CUSTODIAN_FAM(p->mref);
  4203. else
  4204. c = NULL;
  4205. if (!c || c->shut_down)
  4206. return scheme_void;
  4207. }
  4208. if (p->running & MZTHREAD_USER_SUSPENDED) {
  4209. p->suspended_box = NULL;
  4210. if (p->resumed_box) {
  4211. SCHEME_PTR2_VAL(p->resumed_box) = (Scheme_Object *)p;
  4212. scheme_post_sema_all(SCHEME_PTR1_VAL(p->resumed_box));
  4213. }
  4214. if (p->running_box && !(p->running & MZTHREAD_SUSPENDED)) {
  4215. /* Make transitive-resume weak: */
  4216. Scheme_Object *wb;
  4217. wb = scheme_make_weak_box((Scheme_Object *)p);
  4218. SCHEME_PTR_VAL(p->running_box) = wb;
  4219. }
  4220. p->running -= MZTHREAD_USER_SUSPENDED;
  4221. scheme_weak_resume_thread(p);
  4222. if (p->transitive_resumes)
  4223. transitive_resume(p->transitive_resumes);
  4224. }
  4225. return scheme_void;
  4226. }
  4227. static Scheme_Object *make_thread_suspend(int argc, Scheme_Object *argv[])
  4228. {
  4229. Scheme_Thread *p;
  4230. if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
  4231. scheme_wrong_type("thread-suspend-evt", "thread", 0, argc, argv);
  4232. p = (Scheme_Thread *)argv[0];
  4233. return scheme_get_thread_suspend(p);
  4234. }
  4235. Scheme_Object *scheme_get_thread_suspend(Scheme_Thread *p)
  4236. {
  4237. if (!p->suspended_box) {
  4238. Scheme_Object *b;
  4239. b = scheme_alloc_object();
  4240. b->type = scheme_thread_suspend_type;
  4241. if (MZTHREAD_STILL_RUNNING(p->running) && (p->running & MZTHREAD_USER_SUSPENDED))
  4242. SCHEME_PTR2_VAL(b) = (Scheme_Object *)p;
  4243. else {
  4244. Scheme_Object *sema;
  4245. sema = scheme_make_sema(0);
  4246. SCHEME_PTR1_VAL(b) = sema;
  4247. }
  4248. p->suspended_box = b;
  4249. }
  4250. return p->suspended_box;
  4251. }
  4252. static Scheme_Object *make_thread_resume(int argc, Scheme_Object *argv[])
  4253. {
  4254. Scheme_Thread *p;
  4255. if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
  4256. scheme_wrong_type("thread-resume-evt", "thread", 0, argc, argv);
  4257. p = (Scheme_Thread *)argv[0];
  4258. if (!p->resumed_box) {
  4259. Scheme_Object *b;
  4260. b = scheme_alloc_object();
  4261. b->type = scheme_thread_resume_type;
  4262. if (MZTHREAD_STILL_RUNNING(p->running) && !(p->running & MZTHREAD_USER_SUSPENDED))
  4263. SCHEME_PTR2_VAL(b) = (Scheme_Object *)p;
  4264. else {
  4265. Scheme_Object *sema;
  4266. sema = scheme_make_sema(0);
  4267. SCHEME_PTR1_VAL(b) = sema;
  4268. }
  4269. p->resumed_box = b;
  4270. }
  4271. return p->resumed_box;
  4272. }
  4273. static int resume_suspend_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo)
  4274. {
  4275. Scheme_Object *t;
  4276. t = SCHEME_PTR2_VAL(o);
  4277. if (t) {
  4278. scheme_set_sync_target(sinfo, o, t, NULL, 0, 0, NULL);
  4279. return 1;
  4280. }
  4281. scheme_set_sync_target(sinfo, SCHEME_PTR1_VAL(o), o, NULL, 0, 1, NULL);
  4282. return 0;
  4283. }
  4284. static Scheme_Object *make_thread_dead(int argc, Scheme_Object *argv[])
  4285. {
  4286. if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
  4287. scheme_wrong_type("thread-dead-evt", "thread", 0, argc, argv);
  4288. return scheme_get_thread_dead((Scheme_Thread *)argv[0]);
  4289. }
  4290. Scheme_Object *scheme_get_thread_dead(Scheme_Thread *p)
  4291. {
  4292. if (!p->dead_box) {
  4293. Scheme_Object *b;
  4294. Scheme_Object *sema;
  4295. b = scheme_alloc_small_object();
  4296. b->type = scheme_thread_dead_type;
  4297. sema = scheme_make_sema(0);
  4298. SCHEME_PTR_VAL(b) = sema;
  4299. if (!MZTHREAD_STILL_RUNNING(p->running))
  4300. scheme_post_sema_all(sema);
  4301. p->dead_box = b;
  4302. }
  4303. return p->dead_box;
  4304. }
  4305. static int dead_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo)
  4306. {
  4307. scheme_set_sync_target(sinfo, SCHEME_PTR_VAL(o), o, NULL, 0, 1, NULL);
  4308. return 0;
  4309. }
  4310. /*========================================================================*/
  4311. /* syncing */
  4312. /*========================================================================*/
  4313. static void syncing_needs_wakeup(Scheme_Object *s, void *fds);
  4314. typedef struct Evt {
  4315. MZTAG_IF_REQUIRED
  4316. Scheme_Type sync_type;
  4317. Scheme_Ready_Fun_FPC ready;
  4318. Scheme_Needs_Wakeup_Fun needs_wakeup;
  4319. Scheme_Sync_Sema_Fun get_sema;
  4320. Scheme_Sync_Filter_Fun filter;
  4321. int can_redirect;
  4322. } Evt;
  4323. /* PLACE_THREAD_DECL */
  4324. static int evts_array_size;
  4325. static Evt **evts;
  4326. #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
  4327. THREAD_LOCAL_DECL(static int place_evts_array_size);
  4328. THREAD_LOCAL_DECL(static Evt **place_evts);
  4329. #endif
  4330. void scheme_add_evt_worker(Evt ***evt_array,
  4331. int *evt_size,
  4332. Scheme_Type type,
  4333. Scheme_Ready_Fun ready,
  4334. Scheme_Needs_Wakeup_Fun wakeup,
  4335. Scheme_Sync_Filter_Fun filter,
  4336. int can_redirect)
  4337. {
  4338. Evt *naya;
  4339. if (*evt_size <= type) {
  4340. Evt **nevts;
  4341. int new_size;
  4342. new_size = type + 1;
  4343. if (new_size < _scheme_last_type_)
  4344. new_size = _scheme_last_type_;
  4345. nevts = MALLOC_N(Evt*, new_size);
  4346. memcpy(nevts, (*evt_array), (*evt_size) * sizeof(Evt*));
  4347. (*evt_array) = nevts;
  4348. (*evt_size) = new_size;
  4349. }
  4350. naya = MALLOC_ONE_RT(Evt);
  4351. #ifdef MZTAG_REQUIRED
  4352. naya->type = scheme_rt_evt;
  4353. #endif
  4354. naya->sync_type = type;
  4355. naya->ready = (Scheme_Ready_Fun_FPC)ready;
  4356. naya->needs_wakeup = wakeup;
  4357. naya->filter = filter;
  4358. naya->can_redirect = can_redirect;
  4359. (*evt_array)[type] = naya;
  4360. }
  4361. void scheme_add_evt(Scheme_Type type,
  4362. Scheme_Ready_Fun ready,
  4363. Scheme_Needs_Wakeup_Fun wakeup,
  4364. Scheme_Sync_Filter_Fun filter,
  4365. int can_redirect)
  4366. {
  4367. #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
  4368. if (GC_is_place()) {
  4369. if (!place_evts) {
  4370. REGISTER_SO(place_evts);
  4371. }
  4372. scheme_add_evt_worker(&place_evts, &place_evts_array_size, type, ready, wakeup, filter, can_redirect);
  4373. }
  4374. else {
  4375. #endif
  4376. if (!evts) {
  4377. REGISTER_SO(evts);
  4378. }
  4379. scheme_add_evt_worker(&evts, &evts_array_size, type, ready, wakeup, filter, can_redirect);
  4380. #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
  4381. }
  4382. #endif
  4383. }
  4384. void scheme_add_evt_through_sema(Scheme_Type type,
  4385. Scheme_Sync_Sema_Fun get_sema,
  4386. Scheme_Sync_Filter_Fun filter)
  4387. {
  4388. scheme_add_evt(type, NULL, NULL, filter, 0);
  4389. evts[type]->get_sema = get_sema;
  4390. }
  4391. static Evt *find_evt(Scheme_Object *o)
  4392. {
  4393. Scheme_Type t;
  4394. Evt *w = NULL;
  4395. t = SCHEME_TYPE(o);
  4396. if (t < evts_array_size)
  4397. w = evts[t];
  4398. #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
  4399. if (place_evts && w == NULL)
  4400. w = place_evts[t];
  4401. #endif
  4402. if (w && w->filter) {
  4403. Scheme_Sync_Filter_Fun filter;
  4404. filter = w->filter;
  4405. if (!filter(o))
  4406. return NULL;
  4407. }
  4408. return w;
  4409. }
  4410. int scheme_is_evt(Scheme_Object *o)
  4411. {
  4412. if (SCHEME_EVTSETP(o))
  4413. return 1;
  4414. return !!find_evt(o);
  4415. }
  4416. static Syncing *make_syncing(Evt_Set *evt_set, float timeout, double start_time)
  4417. {
  4418. Syncing *syncing;
  4419. int pos;
  4420. syncing = MALLOC_ONE_RT(Syncing);
  4421. #ifdef MZTAG_REQUIRED
  4422. syncing->type = scheme_rt_syncing;
  4423. #endif
  4424. syncing->set = evt_set;
  4425. syncing->timeout = timeout;
  4426. if (timeout >= 0)
  4427. syncing->sleep_end = start_time + (timeout * 1000);
  4428. else
  4429. syncing->sleep_end = 0.0;
  4430. if (evt_set->argc > 1) {
  4431. Scheme_Config *config;
  4432. Scheme_Object *rand_state;
  4433. config = scheme_current_config();
  4434. rand_state = scheme_get_param(config, MZCONFIG_SCHEDULER_RANDOM_STATE);
  4435. pos = scheme_rand((Scheme_Random_State *)rand_state);
  4436. syncing->start_pos = (pos % evt_set->argc);
  4437. }
  4438. return syncing;
  4439. }
  4440. static void *splice_ptr_array(void **a, int al, void **b, int bl, int i)
  4441. {
  4442. void **r;
  4443. int j;
  4444. r = MALLOC_N(void*, al + bl - 1);
  4445. if (a)
  4446. memcpy(r, a, i * sizeof(void*));
  4447. if (b)
  4448. memcpy(r + i, b, bl * sizeof(void*));
  4449. else {
  4450. for (j = 0; j < bl; j++) {
  4451. r[i+j] = a[i];
  4452. }
  4453. }
  4454. if (a)
  4455. memcpy(r + (i + bl), a + (i + 1), (al - i - 1) * sizeof(void*));
  4456. return r;
  4457. }
  4458. static void set_sync_target(Syncing *syncing, int i, Scheme_Object *target,
  4459. Scheme_Object *wrap, Scheme_Object *nack,
  4460. int repost, int retry, Scheme_Accept_Sync accept)
  4461. /* Not ready, deferred to target. */
  4462. {
  4463. Evt_Set *evt_set = syncing->set;
  4464. if (wrap) {
  4465. if (!syncing->wrapss) {
  4466. Scheme_Object **wrapss;
  4467. wrapss = MALLOC_N(Scheme_Object*, evt_set->argc);
  4468. syncing->wrapss = wrapss;
  4469. }
  4470. if (!syncing->wrapss[i])
  4471. syncing->wrapss[i] = scheme_null;
  4472. wrap = scheme_make_pair(wrap, syncing->wrapss[i]);
  4473. syncing->wrapss[i] = wrap;
  4474. }
  4475. if (nack) {
  4476. if (!syncing->nackss) {
  4477. Scheme_Object **nackss;
  4478. nackss = MALLOC_N(Scheme_Object*, evt_set->argc);
  4479. syncing->nackss = nackss;
  4480. }
  4481. if (!syncing->nackss[i])
  4482. syncing->nackss[i] = scheme_null;
  4483. nack = scheme_make_pair(nack, syncing->nackss[i]);
  4484. syncing->nackss[i] = nack;
  4485. }
  4486. if (repost) {
  4487. if (!syncing->reposts) {
  4488. char *s;
  4489. s = (char *)scheme_malloc_atomic(evt_set->argc);
  4490. memset(s, 0, evt_set->argc);
  4491. syncing->reposts = s;
  4492. }
  4493. syncing->reposts[i] = 1;
  4494. }
  4495. if (accept) {
  4496. if (!syncing->accepts) {
  4497. Scheme_Accept_Sync *s;
  4498. s = (Scheme_Accept_Sync *)scheme_malloc_atomic(sizeof(Scheme_Accept_Sync) * evt_set->argc);
  4499. memset(s, 0, evt_set->argc * sizeof(Scheme_Accept_Sync));
  4500. syncing->accepts = s;
  4501. }
  4502. syncing->accepts[i] = accept;
  4503. }
  4504. if (SCHEME_EVTSETP(target) && retry) {
  4505. /* Flatten the set into this one */
  4506. Evt_Set *wts = (Evt_Set *)target;
  4507. if (wts->argc == 1) {
  4508. /* 1 thing in set? Flattening is easy! */
  4509. evt_set->argv[i] = wts->argv[0];
  4510. evt_set->ws[i] = wts->ws[0];
  4511. } else {
  4512. /* Inline the set (in place) */
  4513. Scheme_Object **argv;
  4514. Evt **ws;
  4515. argv = (Scheme_Object **)splice_ptr_array((void **)evt_set->argv,
  4516. evt_set->argc,
  4517. (void **)wts->argv,
  4518. wts->argc,
  4519. i);
  4520. ws = (Evt **)splice_ptr_array((void **)evt_set->ws,
  4521. evt_set->argc,
  4522. (void **)wts->ws,
  4523. wts->argc,
  4524. i);
  4525. evt_set->argv = argv;
  4526. evt_set->ws = ws;
  4527. if (syncing->wrapss) {
  4528. argv = (Scheme_Object **)splice_ptr_array((void **)syncing->wrapss,
  4529. evt_set->argc,
  4530. (void **)NULL,
  4531. wts->argc,
  4532. i);
  4533. syncing->wrapss = argv;
  4534. }
  4535. if (syncing->nackss) {
  4536. argv = (Scheme_Object **)splice_ptr_array((void **)syncing->nackss,
  4537. evt_set->argc,
  4538. (void **)NULL,
  4539. wts->argc,
  4540. i);
  4541. syncing->nackss = argv;
  4542. }
  4543. if (syncing->reposts) {
  4544. char *s;
  4545. int len;
  4546. len = evt_set->argc + wts->argc - 1;
  4547. s = (char *)scheme_malloc_atomic(len);
  4548. memset(s, 0, len);
  4549. memcpy(s, syncing->reposts, i);
  4550. memcpy(s + i + wts->argc, syncing->reposts + i + 1, evt_set->argc - i - 1);
  4551. syncing->reposts = s;
  4552. }
  4553. if (syncing->accepts) {
  4554. Scheme_Accept_Sync *s;
  4555. int len;
  4556. len = evt_set->argc + wts->argc - 1;
  4557. s = (Scheme_Accept_Sync *)scheme_malloc_atomic(len * sizeof(Scheme_Accept_Sync));
  4558. memset(s, 0, len * sizeof(Scheme_Accept_Sync));
  4559. memcpy(s, syncing->accepts, i * sizeof(Scheme_Accept_Sync));
  4560. memcpy(s + i + wts->argc, syncing->accepts + i + 1, (evt_set->argc - i - 1) * sizeof(Scheme_Accept_Sync));
  4561. syncing->accepts = s;
  4562. }
  4563. evt_set->argc += (wts->argc - 1);
  4564. /* scheme_channel_syncer_type needs to know its location, which
  4565. might have changed: */
  4566. argv = evt_set->argv;
  4567. for (i = evt_set->argc; i--; ) {
  4568. if (SAME_TYPE(SCHEME_TYPE(argv[i]), scheme_channel_syncer_type)) {
  4569. ((Scheme_Channel_Syncer *)argv[i])->syncing_i = i;
  4570. }
  4571. }
  4572. }
  4573. } else {
  4574. Evt *ww;
  4575. evt_set->argv[i] = target;
  4576. ww = find_evt(target);
  4577. evt_set->ws[i] = ww;
  4578. }
  4579. }
  4580. void scheme_set_sync_target(Scheme_Schedule_Info *sinfo, Scheme_Object *target,
  4581. Scheme_Object *wrap, Scheme_Object *nack,
  4582. int repost, int retry, Scheme_Accept_Sync accept)
  4583. {
  4584. set_sync_target((Syncing *)sinfo->current_syncing, sinfo->w_i,
  4585. target, wrap, nack, repost, retry, accept);
  4586. if (retry) {
  4587. /* Rewind one step to try new ones (or continue
  4588. if the set was empty). */
  4589. sinfo->w_i--;
  4590. }
  4591. }
  4592. static int syncing_ready(Scheme_Object *s, Scheme_Schedule_Info *sinfo)
  4593. {
  4594. int i, redirections = 0, all_semas = 1, j, result = 0;
  4595. Evt *w;
  4596. Scheme_Object *o;
  4597. Scheme_Schedule_Info r_sinfo;
  4598. Syncing *syncing = (Syncing *)s;
  4599. Evt_Set *evt_set;
  4600. int is_poll;
  4601. double sleep_end;
  4602. sleep_end = syncing->sleep_end;
  4603. if (syncing->result) {
  4604. result = 1;
  4605. goto set_sleep_end_and_return;
  4606. }
  4607. /* We must handle target redirections in the objects on which we're
  4608. syncing. We never have to redirect the evt_set itself, but
  4609. a evt_set can show up as a target, and we inline it in
  4610. that case. */
  4611. evt_set = syncing->set;
  4612. is_poll = (syncing->timeout == 0.0);
  4613. /* Anything ready? */
  4614. for (j = 0; j < evt_set->argc; j++) {
  4615. Scheme_Ready_Fun_FPC ready;
  4616. i = (j + syncing->start_pos) % evt_set->argc;
  4617. o = evt_set->argv[i];
  4618. w = evt_set->ws[i];
  4619. ready = w->ready;
  4620. if (!SCHEME_SEMAP(o)
  4621. && !SCHEME_CHANNELP(o) && !SCHEME_CHANNEL_PUTP(o)
  4622. && !SAME_TYPE(SCHEME_TYPE(o), scheme_channel_syncer_type)
  4623. && !SAME_TYPE(SCHEME_TYPE(o), scheme_never_evt_type))
  4624. all_semas = 0;
  4625. if (ready) {
  4626. int yep;
  4627. init_schedule_info(&r_sinfo, sinfo->false_positive_ok, sleep_end);
  4628. r_sinfo.current_syncing = (Scheme_Object *)syncing;
  4629. r_sinfo.w_i = i;
  4630. r_sinfo.is_poll = is_poll;
  4631. yep = ready(o, &r_sinfo);
  4632. sleep_end = r_sinfo.sleep_end;
  4633. if ((i > r_sinfo.w_i) && sinfo->false_positive_ok) {
  4634. /* There was a redirection. Assert: !yep.
  4635. Give up if we've chained too much. */
  4636. redirections++;
  4637. if (redirections > 10) {
  4638. sinfo->potentially_false_positive = 1;
  4639. result = 1;
  4640. goto set_sleep_end_and_return;
  4641. }
  4642. }
  4643. j += (r_sinfo.w_i - i);
  4644. if (yep) {
  4645. /* If it was a potentially false positive,
  4646. don't set result permanently. Otherwise,
  4647. propagate the false-positive indicator.*/
  4648. if (!r_sinfo.potentially_false_positive) {
  4649. syncing->result = i + 1;
  4650. if (syncing->disable_break)
  4651. syncing->disable_break->suspend_break++;
  4652. if (syncing->reposts && syncing->reposts[i])
  4653. scheme_post_sema(o);
  4654. if (syncing->accepts && syncing->accepts[i])
  4655. scheme_accept_sync(syncing, i);
  4656. scheme_post_syncing_nacks(syncing);
  4657. result = 1;
  4658. goto set_sleep_end_and_return;
  4659. } else {
  4660. sinfo->potentially_false_positive = 1;
  4661. result = 1;
  4662. goto set_sleep_end_and_return;
  4663. }
  4664. } else if (r_sinfo.spin) {
  4665. sinfo->spin = 1;
  4666. }
  4667. } else if (w->get_sema) {
  4668. int repost = 0;
  4669. Scheme_Sync_Sema_Fun get_sema = w->get_sema;
  4670. Scheme_Object *sema;
  4671. sema = get_sema(o, &repost);
  4672. set_sync_target(syncing, i, sema, o, NULL, repost, 1, NULL);
  4673. j--; /* try again with this sema */
  4674. }
  4675. }
  4676. if (syncing->timeout >= 0.0) {
  4677. if (syncing->sleep_end <= scheme_get_inexact_milliseconds())
  4678. result = 1;
  4679. } else if (all_semas) {
  4680. /* Try to block in a GCable way: */
  4681. if (sinfo->false_positive_ok) {
  4682. /* In scheduler. Swap us in so we can suspend. */
  4683. sinfo->potentially_false_positive = 1;
  4684. result = 1;
  4685. } else {
  4686. /* Not in scheduler --- we're allowed to block via suspend,
  4687. which makes the thread GCable. */
  4688. scheme_wait_semas_chs(syncing->set->argc, syncing->set->argv, 0, syncing);
  4689. /* In case a break appeared after we chose something,
  4690. check for a break, because scheme_wait_semas_chs() won't: */
  4691. scheme_check_break_now();
  4692. result = 1;
  4693. }
  4694. }
  4695. set_sleep_end_and_return:
  4696. syncing->sleep_end = sleep_end;
  4697. if (syncing->sleep_end
  4698. && (!sinfo->sleep_end
  4699. || (sinfo->sleep_end > syncing->sleep_end)))
  4700. sinfo->sleep_end = syncing->sleep_end;
  4701. return result;
  4702. }
  4703. void scheme_accept_sync(Syncing *syncing, int i)
  4704. {
  4705. /* run atomic accept action to revise the wrap */
  4706. Scheme_Accept_Sync accept;
  4707. Scheme_Object *v, *pr;
  4708. accept = syncing->accepts[i];
  4709. syncing->accepts[i] = NULL;
  4710. pr = syncing->wrapss[i];
  4711. v = SCHEME_CAR(pr);
  4712. pr = SCHEME_CDR(pr);
  4713. v = accept(v);
  4714. pr = scheme_make_pair(v, pr);
  4715. syncing->wrapss[i] = pr;
  4716. }
  4717. static void syncing_needs_wakeup(Scheme_Object *s, void *fds)
  4718. {
  4719. int i;
  4720. Scheme_Object *o;
  4721. Evt *w;
  4722. Evt_Set *evt_set = ((Syncing *)s)->set;
  4723. for (i = 0; i < evt_set->argc; i++) {
  4724. o = evt_set->argv[i];
  4725. w = evt_set->ws[i];
  4726. if (w->needs_wakeup) {
  4727. Scheme_Needs_Wakeup_Fun nw = w->needs_wakeup;
  4728. nw(o, fds);
  4729. }
  4730. }
  4731. }
  4732. static Scheme_Object *evt_p(int argc, Scheme_Object *argv[])
  4733. {
  4734. return (scheme_is_evt(argv[0])
  4735. ? scheme_true
  4736. : scheme_false);
  4737. }
  4738. Evt_Set *make_evt_set(const char *name, int argc, Scheme_Object **argv, int delta)
  4739. {
  4740. Evt *w, **iws, **ws;
  4741. Evt_Set *evt_set, *subset;
  4742. Scheme_Object **args;
  4743. int i, j, count = 0, reuse = 1;
  4744. iws = MALLOC_N(Evt*, argc-delta);
  4745. /* Find Evt record for each non-set argument, and compute flattened size. */
  4746. for (i = 0; i < (argc - delta); i++) {
  4747. if (!SCHEME_EVTSETP(argv[i+delta])) {
  4748. w = find_evt(argv[i+delta]);
  4749. if (!w) {
  4750. scheme_wrong_type(name, "evt", i+delta, argc, argv);
  4751. return NULL;
  4752. }
  4753. iws[i] = w;
  4754. count++;
  4755. } else {
  4756. int n;
  4757. n = ((Evt_Set *)argv[i+delta])->argc;
  4758. if (n != 1)
  4759. reuse = 0;
  4760. count += n;
  4761. }
  4762. }
  4763. evt_set = MALLOC_ONE_TAGGED(Evt_Set);
  4764. evt_set->so.type = scheme_evt_set_type;
  4765. evt_set->argc = count;
  4766. if (reuse && (count == (argc - delta)))
  4767. ws = iws;
  4768. else
  4769. ws = MALLOC_N(Evt*, count);
  4770. args = MALLOC_N(Scheme_Object*, count);
  4771. for (i = delta, j = 0; i < argc; i++, j++) {
  4772. if (SCHEME_EVTSETP(argv[i])) {
  4773. int k, n;
  4774. subset = (Evt_Set *)argv[i];
  4775. n = subset->argc;
  4776. for (k = 0; k < n; k++, j++) {
  4777. args[j] = subset->argv[k];
  4778. ws[j] = subset->ws[k];
  4779. }
  4780. --j;
  4781. } else {
  4782. ws[j] = iws[i-delta];
  4783. args[j] = argv[i];
  4784. }
  4785. }
  4786. evt_set->ws = ws;
  4787. evt_set->argv = args;
  4788. return evt_set;
  4789. }
  4790. Scheme_Object *scheme_make_evt_set(int argc, Scheme_Object **argv)
  4791. {
  4792. return (Scheme_Object *)make_evt_set("internal-make-evt-set", argc, argv, 0);
  4793. }
  4794. void scheme_post_syncing_nacks(Syncing *syncing)
  4795. /* Also removes channel-syncers. Can be called multiple times. */
  4796. {
  4797. int i, c;
  4798. Scheme_Object *l;
  4799. if (syncing->set) {
  4800. c = syncing->set->argc;
  4801. for (i = 0; i < c; i++) {
  4802. if (SAME_TYPE(SCHEME_TYPE(syncing->set->argv[i]), scheme_channel_syncer_type))
  4803. scheme_get_outof_line((Scheme_Channel_Syncer *)syncing->set->argv[i]);
  4804. if (syncing->nackss) {
  4805. if ((i + 1) != syncing->result) {
  4806. l = syncing->nackss[i];
  4807. if (l) {
  4808. for (; SCHEME_PAIRP(l); l = SCHEME_CDR(l)) {
  4809. scheme_post_sema_all(SCHEME_CAR(l));
  4810. }
  4811. }
  4812. syncing->nackss[i] = NULL;
  4813. }
  4814. }
  4815. }
  4816. }
  4817. }
  4818. static Scheme_Object *do_sync(const char *name, int argc, Scheme_Object *argv[],
  4819. int with_break, int with_timeout, int _tailok)
  4820. {
  4821. volatile int tailok = _tailok;
  4822. Evt_Set * volatile evt_set;
  4823. Syncing * volatile syncing;
  4824. volatile float timeout = -1.0;
  4825. double start_time;
  4826. Scheme_Cont_Frame_Data cframe;
  4827. if (with_timeout) {
  4828. if (!SCHEME_FALSEP(argv[0])) {
  4829. if (SCHEME_REALP(argv[0]))
  4830. timeout = (float)scheme_real_to_double(argv[0]);
  4831. else if (scheme_check_proc_arity(NULL, 0, 0, argc, argv))
  4832. timeout = 0.0;
  4833. if (timeout < 0.0) {
  4834. scheme_wrong_type(name, "non-negative real number", 0, argc, argv);
  4835. return NULL;
  4836. }
  4837. start_time = scheme_get_inexact_milliseconds();
  4838. } else
  4839. start_time = 0;
  4840. } else {
  4841. start_time = 0;
  4842. }
  4843. /* Special case: no timeout, only object is a semaphore */
  4844. if (argc == (with_timeout + 1) && !start_time && SCHEME_SEMAP(argv[with_timeout])) {
  4845. scheme_wait_sema(argv[with_timeout], with_break ? -1 : 0);
  4846. return argv[with_timeout];
  4847. }
  4848. evt_set = NULL;
  4849. /* Special case: only argument is an immutable evt set: */
  4850. if ((argc == (with_timeout + 1)) && SCHEME_EVTSETP(argv[with_timeout])) {
  4851. int i;
  4852. evt_set = (Evt_Set *)argv[with_timeout];
  4853. for (i = evt_set->argc; i--; ) {
  4854. if (evt_set->ws[i]->can_redirect) {
  4855. /* Need to copy this set to handle redirections. */
  4856. evt_set = NULL;
  4857. break;
  4858. }
  4859. }
  4860. }
  4861. if (!evt_set)
  4862. evt_set = make_evt_set(name, argc, argv, with_timeout);
  4863. if (with_break) {
  4864. scheme_push_break_enable(&cframe, 1, 1);
  4865. }
  4866. /* Check for another special case: syncing on a set of semaphores
  4867. without a timeout. Use general code for channels.
  4868. (Note that we check for this case after evt-set flattening.) */
  4869. if (timeout < 0.0) {
  4870. int i;
  4871. for (i = evt_set->argc; i--; ) {
  4872. if (!SCHEME_SEMAP(evt_set->argv[i]))
  4873. break;
  4874. }
  4875. if (i < 0) {
  4876. /* Hit the special case. */
  4877. i = scheme_wait_semas_chs(evt_set->argc, evt_set->argv, 0, NULL);
  4878. if (with_break) {
  4879. scheme_pop_break_enable(&cframe, 1);
  4880. } else {
  4881. /* In case a break appeared after we received a post,
  4882. check for a break, because scheme_wait_semas_chs() won't: */
  4883. scheme_check_break_now();
  4884. }
  4885. if (i)
  4886. return evt_set->argv[i - 1];
  4887. else
  4888. return (tailok ? scheme_false : NULL);
  4889. }
  4890. }
  4891. syncing = make_syncing(evt_set, timeout, start_time);
  4892. if (timeout < 0.0)
  4893. timeout = 0.0; /* means "no timeout" to block_until */
  4894. if (with_break) {
  4895. /* Suspended breaks when something is selected. */
  4896. syncing->disable_break = scheme_current_thread;
  4897. }
  4898. BEGIN_ESCAPEABLE(scheme_post_syncing_nacks, syncing);
  4899. scheme_block_until((Scheme_Ready_Fun)syncing_ready, syncing_needs_wakeup,
  4900. (Scheme_Object *)syncing, timeout);
  4901. END_ESCAPEABLE();
  4902. if (!syncing->result)
  4903. scheme_post_syncing_nacks(syncing);
  4904. if (with_break) {
  4905. scheme_pop_break_enable(&cframe, 0);
  4906. }
  4907. if (with_break) {
  4908. /* Reverse low-level break disable: */
  4909. --syncing->disable_break->suspend_break;
  4910. }
  4911. if (syncing->result) {
  4912. /* Apply wrap functions to the selected evt: */
  4913. Scheme_Object *o, *l, *a, *to_call = NULL, *args[1];
  4914. int to_call_is_handle = 0;
  4915. o = evt_set->argv[syncing->result - 1];
  4916. if (SAME_TYPE(SCHEME_TYPE(o), scheme_channel_syncer_type)) {
  4917. /* This is a put that got changed to a syncer, but not changed back */
  4918. o = ((Scheme_Channel_Syncer *)o)->obj;
  4919. }
  4920. if (syncing->wrapss) {
  4921. l = syncing->wrapss[syncing->result - 1];
  4922. if (l) {
  4923. for (; SCHEME_PAIRP(l); l = SCHEME_CDR(l)) {
  4924. a = SCHEME_CAR(l);
  4925. if (to_call) {
  4926. args[0] = o;
  4927. /* Call wrap proc with breaks disabled */
  4928. scheme_push_break_enable(&cframe, 0, 0);
  4929. o = scheme_apply(to_call, 1, args);
  4930. scheme_pop_break_enable(&cframe, 0);
  4931. to_call = NULL;
  4932. }
  4933. if (SCHEME_BOXP(a) || SCHEME_PROCP(a)) {
  4934. if (SCHEME_BOXP(a)) {
  4935. a = SCHEME_BOX_VAL(a);
  4936. to_call_is_handle = 1;
  4937. }
  4938. to_call = a;
  4939. } else if (SAME_TYPE(scheme_thread_suspend_type, SCHEME_TYPE(a))
  4940. || SAME_TYPE(scheme_thread_resume_type, SCHEME_TYPE(a)))
  4941. o = SCHEME_PTR2_VAL(a);
  4942. else
  4943. o = a;
  4944. }
  4945. if (to_call) {
  4946. args[0] = o;
  4947. /* If to_call is still a wrap-evt (not a handle-evt),
  4948. then set the config one more time: */
  4949. if (!to_call_is_handle) {
  4950. scheme_push_break_enable(&cframe, 0, 0);
  4951. tailok = 0;
  4952. }
  4953. if (tailok) {
  4954. return _scheme_tail_apply(to_call, 1, args);
  4955. } else {
  4956. o = scheme_apply(to_call, 1, args);
  4957. if (!to_call_is_handle)
  4958. scheme_pop_break_enable(&cframe, 1);
  4959. return o;
  4960. }
  4961. }
  4962. }
  4963. }
  4964. return o;
  4965. } else {
  4966. if (with_timeout && SCHEME_PROCP(argv[0])) {
  4967. if (tailok)
  4968. return _scheme_tail_apply(argv[0], 0, NULL);
  4969. else
  4970. return _scheme_apply(argv[0], 0, NULL);
  4971. } else if (tailok)
  4972. return scheme_false;
  4973. else
  4974. return NULL;
  4975. }
  4976. }
  4977. static Scheme_Object *sch_sync(int argc, Scheme_Object *argv[])
  4978. {
  4979. return do_sync("sync", argc, argv, 0, 0, 1);
  4980. }
  4981. static Scheme_Object *sch_sync_timeout(int argc, Scheme_Object *argv[])
  4982. {
  4983. return do_sync("sync/timeout", argc, argv, 0, 1, 1);
  4984. }
  4985. Scheme_Object *scheme_sync(int argc, Scheme_Object *argv[])
  4986. {
  4987. return do_sync("sync", argc, argv, 0, 0, 0);
  4988. }
  4989. Scheme_Object *scheme_sync_timeout(int argc, Scheme_Object *argv[])
  4990. {
  4991. return do_sync("sync/timeout", argc, argv, 0, 1, 0);
  4992. }
  4993. static Scheme_Object *do_scheme_sync_enable_break(const char *who, int with_timeout, int tailok, int argc, Scheme_Object *argv[])
  4994. {
  4995. if (argc == 2 && SCHEME_FALSEP(argv[0]) && SCHEME_SEMAP(argv[1])) {
  4996. scheme_wait_sema(argv[1], -1);
  4997. return scheme_void;
  4998. }
  4999. return do_sync(who, argc, argv, 1, with_timeout, tailok);
  5000. }
  5001. Scheme_Object *scheme_sync_enable_break(int argc, Scheme_Object *argv[])
  5002. {
  5003. return do_scheme_sync_enable_break("sync/enable-break", 0, 0, argc, argv);
  5004. }
  5005. static Scheme_Object *sch_sync_enable_break(int argc, Scheme_Object *argv[])
  5006. {
  5007. return do_scheme_sync_enable_break("sync/enable-break", 0, 1, argc, argv);
  5008. }
  5009. static Scheme_Object *sch_sync_timeout_enable_break(int argc, Scheme_Object *argv[])
  5010. {
  5011. return do_scheme_sync_enable_break("sync/timeout/enable-break", 1, 1, argc, argv);
  5012. }
  5013. static Scheme_Object *evts_to_evt(int argc, Scheme_Object *argv[])
  5014. {
  5015. return (Scheme_Object *)make_evt_set("choice-evt", argc, argv, 0);
  5016. }
  5017. /*========================================================================*/
  5018. /* thread cells */
  5019. /*========================================================================*/
  5020. #define SCHEME_THREAD_CELLP(x) (SAME_TYPE(SCHEME_TYPE(x), scheme_thread_cell_type))
  5021. Scheme_Object *scheme_make_thread_cell(Scheme_Object *def_val, int inherited)
  5022. {
  5023. Thread_Cell *c;
  5024. c = MALLOC_ONE_TAGGED(Thread_Cell);
  5025. c->so.type = scheme_thread_cell_type;
  5026. c->def_val = def_val;
  5027. c->inherited = !!inherited;
  5028. return (Scheme_Object *)c;
  5029. }
  5030. Scheme_Object *scheme_thread_cell_get(Scheme_Object *cell, Scheme_Thread_Cell_Table *cells)
  5031. {
  5032. Scheme_Object *v;
  5033. if (((Thread_Cell *)cell)->assigned) {
  5034. v = scheme_lookup_in_table(cells, (const char *)cell);
  5035. if (v)
  5036. return scheme_ephemeron_value(v);
  5037. }
  5038. return ((Thread_Cell *)cell)->def_val;
  5039. }
  5040. void scheme_thread_cell_set(Scheme_Object *cell, Scheme_Thread_Cell_Table *cells, Scheme_Object *v)
  5041. {
  5042. if (!((Thread_Cell *)cell)->assigned)
  5043. ((Thread_Cell *)cell)->assigned = 1;
  5044. v = scheme_make_ephemeron(cell, v);
  5045. scheme_add_to_table(cells, (const char *)cell, (void *)v, 0);
  5046. }
  5047. static Scheme_Thread_Cell_Table *inherit_cells(Scheme_Thread_Cell_Table *cells,
  5048. Scheme_Thread_Cell_Table *t,
  5049. int inherited)
  5050. {
  5051. Scheme_Bucket *bucket;
  5052. Scheme_Object *cell, *v;
  5053. int i;
  5054. if (!cells)
  5055. cells = scheme_current_thread->cell_values;
  5056. if (!t)
  5057. t = scheme_make_bucket_table(20, SCHEME_hash_weak_ptr);
  5058. for (i = cells->size; i--; ) {
  5059. bucket = cells->buckets[i];
  5060. if (bucket && bucket->val && bucket->key) {
  5061. cell = (Scheme_Object *)HT_EXTRACT_WEAK(bucket->key);
  5062. if (cell && (((Thread_Cell *)cell)->inherited == inherited)) {
  5063. v = (Scheme_Object *)bucket->val;
  5064. scheme_add_to_table(t, (char *)cell, v, 0);
  5065. }
  5066. }
  5067. }
  5068. return t;
  5069. }
  5070. Scheme_Thread_Cell_Table *scheme_inherit_cells(Scheme_Thread_Cell_Table *cells)
  5071. {
  5072. return inherit_cells(cells, NULL, 1);
  5073. }
  5074. static Scheme_Object *thread_cell_values(int argc, Scheme_Object *argv[])
  5075. {
  5076. if (argc == 1) {
  5077. Scheme_Thread_Cell_Table *naya;
  5078. if (!SAME_TYPE(scheme_thread_cell_values_type, SCHEME_TYPE(argv[0]))) {
  5079. scheme_wrong_type("current-preserved-thread-cell-values", "thread cell values", 0, argc, argv);
  5080. return NULL;
  5081. }
  5082. naya = inherit_cells(NULL, NULL, 0);
  5083. inherit_cells((Scheme_Thread_Cell_Table *)SCHEME_PTR_VAL(argv[0]), naya, 1);
  5084. scheme_current_thread->cell_values = naya;
  5085. return scheme_void;
  5086. } else {
  5087. Scheme_Object *o, *ht;
  5088. ht = (Scheme_Object *)inherit_cells(NULL, NULL, 1);
  5089. o = scheme_alloc_small_object();
  5090. o->type = scheme_thread_cell_values_type;
  5091. SCHEME_PTR_VAL(o) = ht;
  5092. return o;
  5093. }
  5094. }
  5095. static Scheme_Object *make_thread_cell(int argc, Scheme_Object *argv[])
  5096. {
  5097. return scheme_make_thread_cell(argv[0], (argc > 1) && SCHEME_TRUEP(argv[1]));
  5098. }
  5099. static Scheme_Object *thread_cell_p(int argc, Scheme_Object *argv[])
  5100. {
  5101. return (SCHEME_THREAD_CELLP(argv[0])
  5102. ? scheme_true
  5103. : scheme_false);
  5104. }
  5105. static Scheme_Object *thread_cell_get(int argc, Scheme_Object *argv[])
  5106. {
  5107. if (!SCHEME_THREAD_CELLP(argv[0]))
  5108. scheme_wrong_type("thread-cell-ref", "thread cell", 0, argc, argv);
  5109. return scheme_thread_cell_get(argv[0], scheme_current_thread->cell_values);
  5110. }
  5111. static Scheme_Object *thread_cell_set(int argc, Scheme_Object *argv[])
  5112. {
  5113. if (!SCHEME_THREAD_CELLP(argv[0]))
  5114. scheme_wrong_type("thread-cell-set!", "thread cell", 0, argc, argv);
  5115. scheme_thread_cell_set(argv[0], scheme_current_thread->cell_values, argv[1]);
  5116. return scheme_void;
  5117. }
  5118. /*========================================================================*/
  5119. /* parameters */
  5120. /*========================================================================*/
  5121. SHARED_OK static int max_configs = __MZCONFIG_BUILTIN_COUNT__;
  5122. static Scheme_Object *do_param(void *data, int argc, Scheme_Object *argv[]);
  5123. Scheme_Config *scheme_current_config()
  5124. {
  5125. Scheme_Object *v;
  5126. v = scheme_extract_one_cc_mark(NULL, scheme_parameterization_key);
  5127. if (!SAME_TYPE(scheme_config_type, SCHEME_TYPE(v))) {
  5128. /* Someone has grabbed parameterization-key out of #%paramz
  5129. and misused it.
  5130. Printing an error message requires consulting parameters,
  5131. so just escape. */
  5132. scheme_longjmp(scheme_error_buf, 1);
  5133. }
  5134. return (Scheme_Config *)v;
  5135. }
  5136. static Scheme_Config *do_extend_config(Scheme_Config *c, Scheme_Object *key, Scheme_Object *val)
  5137. {
  5138. Scheme_Config *naya;
  5139. Scheme_Hash_Tree *ht;
  5140. /* In principle, the key+cell link should be weak, but it's
  5141. difficult to imagine a parameter being GC'ed while an active
  5142. `parameterize' is still on the stack (or, at least, difficult to
  5143. imagine that it matters). */
  5144. naya = MALLOC_ONE_TAGGED(Scheme_Config);
  5145. naya->so.type = scheme_config_type;
  5146. ht = scheme_hash_tree_set(c->ht, key, scheme_make_thread_cell(val, 1));
  5147. naya->ht = ht;
  5148. naya->root = c->root;
  5149. return naya;
  5150. }
  5151. Scheme_Config *scheme_extend_config(Scheme_Config *c, int pos, Scheme_Object *init_val)
  5152. {
  5153. return do_extend_config(c, scheme_make_integer(pos), init_val);
  5154. }
  5155. void scheme_install_config(Scheme_Config *config)
  5156. {
  5157. scheme_set_cont_mark(scheme_parameterization_key, (Scheme_Object *)config);
  5158. }
  5159. Scheme_Object *find_param_cell(Scheme_Config *c, Scheme_Object *k, int force_cell)
  5160. {
  5161. Scheme_Object *v;
  5162. Scheme_Parameterization *p;
  5163. v = scheme_hash_tree_get(c->ht, k);
  5164. if (v)
  5165. return v;
  5166. p = c->root;
  5167. if (SCHEME_INTP(k))
  5168. return p->prims[SCHEME_INT_VAL(k)];
  5169. else {
  5170. if (p->extensions)
  5171. return scheme_lookup_in_table(p->extensions, (const char *)k);
  5172. else
  5173. return NULL;
  5174. }
  5175. }
  5176. Scheme_Object *scheme_get_thread_param(Scheme_Config *c, Scheme_Thread_Cell_Table *cells, int pos)
  5177. {
  5178. Scheme_Object *cell;
  5179. cell = find_param_cell(c, scheme_make_integer(pos), 0);
  5180. return scheme_thread_cell_get(cell, cells);
  5181. }
  5182. Scheme_Object *scheme_get_param(Scheme_Config *c, int pos)
  5183. {
  5184. return scheme_get_thread_param(c, scheme_current_thread->cell_values, pos);
  5185. }
  5186. void scheme_set_thread_param(Scheme_Config *c, Scheme_Thread_Cell_Table *cells, int pos, Scheme_Object *o)
  5187. {
  5188. scheme_thread_cell_set(find_param_cell(c, scheme_make_integer(pos), 1), cells, o);
  5189. }
  5190. void scheme_set_param(Scheme_Config *c, int pos, Scheme_Object *o)
  5191. {
  5192. scheme_thread_cell_set(find_param_cell(c, scheme_make_integer(pos), 1),
  5193. scheme_current_thread->cell_values, o);
  5194. }
  5195. static Scheme_Parameterization *malloc_paramz()
  5196. {
  5197. return (Scheme_Parameterization *)scheme_malloc_tagged(sizeof(Scheme_Parameterization) +
  5198. (max_configs - 1) * sizeof(Scheme_Object*));
  5199. }
  5200. void scheme_flatten_config(Scheme_Config *orig_c)
  5201. {
  5202. }
  5203. static Scheme_Object *parameterization_p(int argc, Scheme_Object **argv)
  5204. {
  5205. Scheme_Object *v = argv[0];
  5206. return (SCHEME_CONFIGP(v)
  5207. ? scheme_true
  5208. : scheme_false);
  5209. }
  5210. #define SCHEME_PARAMETERP(v) ((SCHEME_PRIMP(v) || SCHEME_CLSD_PRIMP(v)) \
  5211. && ((((Scheme_Primitive_Proc *)v)->pp.flags & SCHEME_PRIM_OTHER_TYPE_MASK) \
  5212. == SCHEME_PRIM_TYPE_PARAMETER))
  5213. static Scheme_Object *extend_parameterization(int argc, Scheme_Object *argv[])
  5214. {
  5215. Scheme_Object *key, *a[2], *param;
  5216. Scheme_Config *c;
  5217. int i;
  5218. c = (Scheme_Config *)argv[0];
  5219. if (argc < 2) {
  5220. scheme_flatten_config(c);
  5221. } else if (SCHEME_CONFIGP(c) && (argc & 1)) {
  5222. for (i = 1; i < argc; i += 2) {
  5223. param = argv[i];
  5224. if (!SCHEME_PARAMETERP(param)
  5225. && !(SCHEME_CHAPERONEP(param) && SCHEME_PARAMETERP(SCHEME_CHAPERONE_VAL(param)))) {
  5226. scheme_wrong_type("parameterize", "parameter", i, argc, argv);
  5227. return NULL;
  5228. }
  5229. key = argv[i + 1];
  5230. if (SCHEME_CHAPERONEP(param)) {
  5231. a[0] = key;
  5232. key = scheme_apply_chaperone(param, 1, a, scheme_void);
  5233. param = SCHEME_CHAPERONE_VAL(param);
  5234. }
  5235. a[0] = key;
  5236. a[1] = scheme_false;
  5237. while (1) {
  5238. if (SCHEME_PRIMP(param)) {
  5239. Scheme_Prim *proc;
  5240. proc = (Scheme_Prim *)((Scheme_Primitive_Proc *)param)->prim_val;
  5241. key = proc(2, a); /* leads to scheme_param_config to set a[1] */
  5242. break;
  5243. } else {
  5244. /* sets a[1] */
  5245. key = do_param(((Scheme_Closed_Primitive_Proc *)param)->data, 2, a);
  5246. if (SCHEME_PARAMETERP(key)) {
  5247. param = key;
  5248. a[0] = a[1];
  5249. } else
  5250. break;
  5251. }
  5252. }
  5253. c = do_extend_config(c, key, a[1]);
  5254. }
  5255. }
  5256. return (Scheme_Object *)c;
  5257. }
  5258. static Scheme_Object *reparameterize(int argc, Scheme_Object **argv)
  5259. {
  5260. /* Clones values of all built-in parameters in a new parameterization.
  5261. This could be implemented in Scheme by enumerating all built-in parameters,
  5262. but it's easier and faster here. We need this for the Planet resolver. */
  5263. Scheme_Config *c, *naya;
  5264. Scheme_Parameterization *pz, *npz;
  5265. Scheme_Object *v;
  5266. Scheme_Hash_Tree *ht;
  5267. int i;
  5268. if (!SCHEME_CONFIGP(argv[0]))
  5269. scheme_wrong_type("reparameterize", "parameterization", 0, argc, argv);
  5270. c = (Scheme_Config *)argv[0];
  5271. scheme_flatten_config(c);
  5272. pz = c->root;
  5273. npz = malloc_paramz();
  5274. memcpy(npz, pz, sizeof(Scheme_Parameterization));
  5275. naya = MALLOC_ONE_TAGGED(Scheme_Config);
  5276. naya->so.type = scheme_config_type;
  5277. ht = scheme_make_hash_tree(0);
  5278. naya->ht = ht;
  5279. naya->root = npz;
  5280. for (i = 0; i < max_configs; i++) {
  5281. v = scheme_thread_cell_get(pz->prims[i], scheme_current_thread->cell_values);
  5282. v = scheme_make_thread_cell(v, 1);
  5283. npz->prims[i] = v;
  5284. }
  5285. return (Scheme_Object *)naya;
  5286. }
  5287. static Scheme_Object *parameter_p(int argc, Scheme_Object **argv)
  5288. {
  5289. Scheme_Object *v = argv[0];
  5290. if (SCHEME_CHAPERONEP(v)) v = SCHEME_CHAPERONE_VAL(v);
  5291. return (SCHEME_PARAMETERP(v)
  5292. ? scheme_true
  5293. : scheme_false);
  5294. }
  5295. static Scheme_Object *do_param(void *_data, int argc, Scheme_Object *argv[])
  5296. {
  5297. Scheme_Object *guard, **argv2, *pos[2];
  5298. ParamData *data = (ParamData *)_data;
  5299. if (argc && argv[0]) {
  5300. guard = data->guard;
  5301. if (guard) {
  5302. Scheme_Object *v;
  5303. v = scheme_apply(guard, 1, argv);
  5304. if (argc == 2) {
  5305. /* Special hook for parameterize: */
  5306. argv[1] = v;
  5307. return data->key;
  5308. }
  5309. argv2 = MALLOC_N(Scheme_Object *, argc);
  5310. memcpy(argv2, argv, argc * sizeof(Scheme_Object *));
  5311. argv2[0] = v;
  5312. } else if (argc == 2) {
  5313. /* Special hook for parameterize: */
  5314. argv[1] = argv[0];
  5315. return data->key;
  5316. } else
  5317. argv2 = argv;
  5318. } else
  5319. argv2 = argv;
  5320. if (data->is_derived) {
  5321. if (!argc) {
  5322. Scheme_Object *v;
  5323. v = _scheme_apply(data->key, argc, argv2);
  5324. pos[0] = v;
  5325. return _scheme_tail_apply(data->extract_guard, 1, pos);
  5326. } else {
  5327. return _scheme_tail_apply(data->key, argc, argv2);
  5328. }
  5329. }
  5330. pos[0] = data->key;
  5331. pos[1] = data->defcell;
  5332. return scheme_param_config("parameter-procedure",
  5333. (Scheme_Object *)(void *)pos,
  5334. argc, argv2,
  5335. -2, NULL, NULL, 0);
  5336. }
  5337. static Scheme_Object *make_parameter(int argc, Scheme_Object **argv)
  5338. {
  5339. Scheme_Object *p, *cell;
  5340. ParamData *data;
  5341. void *k;
  5342. k = scheme_make_pair(scheme_true, scheme_false); /* generates a key */
  5343. if (argc > 1)
  5344. scheme_check_proc_arity("make-parameter", 1, 1, argc, argv);
  5345. data = MALLOC_ONE_RT(ParamData);
  5346. #ifdef MZTAG_REQUIRED
  5347. data->type = scheme_rt_param_data;
  5348. #endif
  5349. data->key = (Scheme_Object *)k;
  5350. cell = scheme_make_thread_cell(argv[0], 1);
  5351. data->defcell = cell;
  5352. data->guard = ((argc > 1) ? argv[1] : NULL);
  5353. p = scheme_make_closed_prim_w_arity(do_param, (void *)data,
  5354. "parameter-procedure", 0, 1);
  5355. ((Scheme_Primitive_Proc *)p)->pp.flags |= SCHEME_PRIM_TYPE_PARAMETER;
  5356. return p;
  5357. }
  5358. static Scheme_Object *make_derived_parameter(int argc, Scheme_Object **argv)
  5359. {
  5360. Scheme_Object *p;
  5361. ParamData *data;
  5362. if (!SCHEME_PARAMETERP(argv[0]))
  5363. scheme_wrong_type("make-derived-parameter", "unchaperoned parameter", 0, argc, argv);
  5364. scheme_check_proc_arity("make-derived-parameter", 1, 1, argc, argv);
  5365. scheme_check_proc_arity("make-derived-parameter", 1, 2, argc, argv);
  5366. data = MALLOC_ONE_RT(ParamData);
  5367. #ifdef MZTAG_REQUIRED
  5368. data->type = scheme_rt_param_data;
  5369. #endif
  5370. data->is_derived = 1;
  5371. data->key = argv[0];
  5372. data->guard = argv[1];
  5373. data->extract_guard = argv[2];
  5374. p = scheme_make_closed_prim_w_arity(do_param, (void *)data,
  5375. "parameter-procedure", 0, 1);
  5376. ((Scheme_Primitive_Proc *)p)->pp.flags |= SCHEME_PRIM_TYPE_PARAMETER;
  5377. return p;
  5378. }
  5379. static Scheme_Object *parameter_procedure_eq(int argc, Scheme_Object **argv)
  5380. {
  5381. Scheme_Object *a, *b;
  5382. a = argv[0];
  5383. b = argv[1];
  5384. if (SCHEME_CHAPERONEP(a)) a = SCHEME_CHAPERONE_VAL(a);
  5385. if (SCHEME_CHAPERONEP(b)) b = SCHEME_CHAPERONE_VAL(b);
  5386. if (!SCHEME_PARAMETERP(a))
  5387. scheme_wrong_type("parameter-procedure=?", "parameter-procedure", 0, argc, argv);
  5388. if (!SCHEME_PARAMETERP(b))
  5389. scheme_wrong_type("parameter-procedure=?", "parameter-procedure", 1, argc, argv);
  5390. return (SAME_OBJ(a, b)
  5391. ? scheme_true
  5392. : scheme_false);
  5393. }
  5394. void scheme_set_command_line_arguments(Scheme_Object *vec)
  5395. {
  5396. if (!initial_cmdline_vec)
  5397. REGISTER_SO(initial_cmdline_vec);
  5398. initial_cmdline_vec = vec;
  5399. }
  5400. int scheme_new_param(void)
  5401. {
  5402. return max_configs++;
  5403. }
  5404. static void init_param(Scheme_Thread_Cell_Table *cells,
  5405. Scheme_Parameterization *params,
  5406. int pos,
  5407. Scheme_Object *v)
  5408. {
  5409. Scheme_Object *cell;
  5410. cell = scheme_make_thread_cell(v, 1);
  5411. params->prims[pos] = cell;
  5412. }
  5413. void scheme_set_root_param(int p, Scheme_Object *v)
  5414. {
  5415. Scheme_Parameterization *paramz;
  5416. paramz = scheme_current_thread->init_config->root;
  5417. ((Thread_Cell *)(paramz->prims[p]))->def_val = v;
  5418. }
  5419. static void make_initial_config(Scheme_Thread *p)
  5420. {
  5421. Scheme_Thread_Cell_Table *cells;
  5422. Scheme_Parameterization *paramz;
  5423. Scheme_Config *config;
  5424. cells = scheme_make_bucket_table(5, SCHEME_hash_weak_ptr);
  5425. p->cell_values = cells;
  5426. paramz = (Scheme_Parameterization *)scheme_malloc_tagged(sizeof(Scheme_Parameterization) +
  5427. (max_configs - 1) * sizeof(Scheme_Object*));
  5428. #ifdef MZTAG_REQUIRED
  5429. paramz->type = scheme_rt_parameterization;
  5430. #endif
  5431. config = MALLOC_ONE_TAGGED(Scheme_Config);
  5432. config->so.type = scheme_config_type;
  5433. config->root = paramz;
  5434. {
  5435. Scheme_Hash_Tree *ht;
  5436. ht = scheme_make_hash_tree(0);
  5437. config->ht = ht;
  5438. }
  5439. p->init_config = config;
  5440. init_param(cells, paramz, MZCONFIG_READTABLE, scheme_make_default_readtable());
  5441. init_param(cells, paramz, MZCONFIG_CAN_READ_GRAPH, scheme_true);
  5442. init_param(cells, paramz, MZCONFIG_CAN_READ_COMPILED, scheme_false);
  5443. init_param(cells, paramz, MZCONFIG_CAN_READ_BOX, scheme_true);
  5444. init_param(cells, paramz, MZCONFIG_CAN_READ_PIPE_QUOTE, scheme_true);
  5445. init_param(cells, paramz, MZCONFIG_CAN_READ_DOT, scheme_true);
  5446. init_param(cells, paramz, MZCONFIG_CAN_READ_INFIX_DOT, scheme_true);
  5447. init_param(cells, paramz, MZCONFIG_CAN_READ_QUASI, scheme_true);
  5448. init_param(cells, paramz, MZCONFIG_READ_DECIMAL_INEXACT, scheme_true);
  5449. init_param(cells, paramz, MZCONFIG_CAN_READ_READER, scheme_false);
  5450. init_param(cells, paramz, MZCONFIG_CAN_READ_LANG, scheme_true);
  5451. init_param(cells, paramz, MZCONFIG_LOAD_DELAY_ENABLED, init_load_on_demand ? scheme_true : scheme_false);
  5452. init_param(cells, paramz, MZCONFIG_DELAY_LOAD_INFO, scheme_false);
  5453. init_param(cells, paramz, MZCONFIG_PRINT_GRAPH, scheme_false);
  5454. init_param(cells, paramz, MZCONFIG_PRINT_STRUCT, scheme_true);
  5455. init_param(cells, paramz, MZCONFIG_PRINT_BOX, scheme_true);
  5456. init_param(cells, paramz, MZCONFIG_PRINT_VEC_SHORTHAND, scheme_false);
  5457. init_param(cells, paramz, MZCONFIG_PRINT_HASH_TABLE, scheme_true);
  5458. init_param(cells, paramz, MZCONFIG_PRINT_UNREADABLE, scheme_true);
  5459. init_param(cells, paramz, MZCONFIG_PRINT_PAIR_CURLY, scheme_false);
  5460. init_param(cells, paramz, MZCONFIG_PRINT_MPAIR_CURLY, scheme_true);
  5461. init_param(cells, paramz, MZCONFIG_PRINT_READER, scheme_false);
  5462. init_param(cells, paramz, MZCONFIG_PRINT_LONG_BOOLEAN, scheme_false);
  5463. init_param(cells, paramz, MZCONFIG_PRINT_AS_QQ, scheme_true);
  5464. init_param(cells, paramz, MZCONFIG_PRINT_SYNTAX_WIDTH, scheme_make_integer(32));
  5465. init_param(cells, paramz, MZCONFIG_HONU_MODE, scheme_false);
  5466. init_param(cells, paramz, MZCONFIG_COMPILE_MODULE_CONSTS, scheme_true);
  5467. init_param(cells, paramz, MZCONFIG_USE_JIT, scheme_startup_use_jit ? scheme_true : scheme_false);
  5468. {
  5469. Scheme_Object *s;
  5470. s = scheme_make_immutable_sized_utf8_string("", 0);
  5471. init_param(cells, paramz, MZCONFIG_LOCALE, s);
  5472. }
  5473. init_param(cells, paramz, MZCONFIG_CASE_SENS, (scheme_case_sensitive ? scheme_true : scheme_false));
  5474. init_param(cells, paramz, MZCONFIG_SQUARE_BRACKETS_ARE_PARENS, (scheme_square_brackets_are_parens
  5475. ? scheme_true : scheme_false));
  5476. init_param(cells, paramz, MZCONFIG_CURLY_BRACES_ARE_PARENS, (scheme_curly_braces_are_parens
  5477. ? scheme_true : scheme_false));
  5478. init_param(cells, paramz, MZCONFIG_ERROR_PRINT_WIDTH, scheme_make_integer(256));
  5479. init_param(cells, paramz, MZCONFIG_ERROR_PRINT_CONTEXT_LENGTH, scheme_make_integer(16));
  5480. init_param(cells, paramz, MZCONFIG_ERROR_PRINT_SRCLOC, scheme_true);
  5481. REGISTER_SO(main_custodian);
  5482. REGISTER_SO(last_custodian);
  5483. REGISTER_SO(limited_custodians);
  5484. main_custodian = scheme_make_custodian(NULL);
  5485. #ifdef MZ_PRECISE_GC
  5486. GC_register_root_custodian(main_custodian);
  5487. #endif
  5488. last_custodian = main_custodian;
  5489. init_param(cells, paramz, MZCONFIG_CUSTODIAN, (Scheme_Object *)main_custodian);
  5490. init_param(cells, paramz, MZCONFIG_ALLOW_SET_UNDEFINED, (scheme_allow_set_undefined
  5491. ? scheme_true
  5492. : scheme_false));
  5493. init_param(cells, paramz, MZCONFIG_COLLECTION_PATHS, scheme_null);
  5494. {
  5495. Scheme_Object *s;
  5496. s = scheme_make_path(scheme_os_getcwd(NULL, 0, NULL, 1));
  5497. s = scheme_path_to_directory_path(s);
  5498. init_param(cells, paramz, MZCONFIG_CURRENT_DIRECTORY, s);
  5499. scheme_set_original_dir(s);
  5500. }
  5501. {
  5502. Scheme_Object *rs;
  5503. rs = scheme_make_random_state(scheme_get_milliseconds());
  5504. init_param(cells, paramz, MZCONFIG_RANDOM_STATE, rs);
  5505. rs = scheme_make_random_state(scheme_get_milliseconds());
  5506. init_param(cells, paramz, MZCONFIG_SCHEDULER_RANDOM_STATE, rs);
  5507. }
  5508. {
  5509. Scheme_Object *eh;
  5510. eh = scheme_make_prim_w_arity2(scheme_default_eval_handler,
  5511. "default-eval-handler",
  5512. 1, 1,
  5513. 0, -1);
  5514. init_param(cells, paramz, MZCONFIG_EVAL_HANDLER, eh);
  5515. }
  5516. {
  5517. Scheme_Object *eh;
  5518. eh = scheme_make_prim_w_arity(scheme_default_compile_handler,
  5519. "default-compile-handler",
  5520. 2, 2);
  5521. init_param(cells, paramz, MZCONFIG_COMPILE_HANDLER, eh);
  5522. }
  5523. {
  5524. Scheme_Object *ph;
  5525. ph = scheme_make_prim_w_arity(scheme_default_print_handler,
  5526. "default-print-handler",
  5527. 1, 1);
  5528. init_param(cells, paramz, MZCONFIG_PRINT_HANDLER, ph);
  5529. ph = scheme_make_prim_w_arity(scheme_default_prompt_read_handler,
  5530. "default-prompt-read-handler",
  5531. 0, 0);
  5532. init_param(cells, paramz, MZCONFIG_PROMPT_READ_HANDLER, ph);
  5533. ph = scheme_make_prim_w_arity(scheme_default_read_input_port_handler,
  5534. "default-get-interaction-input-port",
  5535. 0, 0);
  5536. init_param(cells, paramz, MZCONFIG_READ_INPUT_PORT_HANDLER, ph);
  5537. ph = scheme_make_prim_w_arity(scheme_default_read_handler,
  5538. "default-read-interaction-handler",
  5539. 2, 2);
  5540. init_param(cells, paramz, MZCONFIG_READ_HANDLER, ph);
  5541. }
  5542. init_param(cells, paramz, MZCONFIG_PORT_COUNT_LINES, scheme_false);
  5543. {
  5544. Scheme_Object *lh;
  5545. lh = scheme_make_prim_w_arity2(scheme_default_load_extension,
  5546. "default-load-extension-handler",
  5547. 2, 2,
  5548. 0, -1);
  5549. init_param(cells, paramz, MZCONFIG_LOAD_EXTENSION_HANDLER, lh);
  5550. }
  5551. {
  5552. Scheme_Object *ins = initial_inspector;
  5553. init_param(cells, paramz, MZCONFIG_INSPECTOR, ins);
  5554. init_param(cells, paramz, MZCONFIG_CODE_INSPECTOR, ins);
  5555. }
  5556. {
  5557. Scheme_Object *zlv;
  5558. if (initial_cmdline_vec)
  5559. zlv = initial_cmdline_vec;
  5560. else
  5561. zlv = scheme_make_vector(0, NULL);
  5562. init_param(cells, paramz, MZCONFIG_CMDLINE_ARGS, zlv);
  5563. }
  5564. {
  5565. Scheme_Security_Guard *sg;
  5566. sg = MALLOC_ONE_TAGGED(Scheme_Security_Guard);
  5567. sg->so.type = scheme_security_guard_type;
  5568. init_param(cells, paramz, MZCONFIG_SECURITY_GUARD, (Scheme_Object *)sg);
  5569. }
  5570. {
  5571. Scheme_Thread_Set *t_set;
  5572. t_set = create_thread_set(NULL);
  5573. init_param(cells, paramz, MZCONFIG_THREAD_SET, (Scheme_Object *)t_set);
  5574. }
  5575. init_param(cells, paramz, MZCONFIG_THREAD_INIT_STACK_SIZE, scheme_make_integer(DEFAULT_INIT_STACK_SIZE));
  5576. {
  5577. int i;
  5578. for (i = 0; i < max_configs; i++) {
  5579. if (!paramz->prims[i])
  5580. init_param(cells, paramz, i, scheme_false);
  5581. }
  5582. }
  5583. }
  5584. void scheme_set_startup_load_on_demand(int on)
  5585. {
  5586. init_load_on_demand = on;
  5587. }
  5588. Scheme_Object *scheme_register_parameter(Scheme_Prim *function, char *name, int which)
  5589. {
  5590. Scheme_Object *o;
  5591. if (!config_map) {
  5592. REGISTER_SO(config_map);
  5593. config_map = MALLOC_N(Scheme_Object*, max_configs);
  5594. }
  5595. if (config_map[which])
  5596. return config_map[which];
  5597. o = scheme_make_prim_w_arity(function, name, 0, 1);
  5598. ((Scheme_Primitive_Proc *)o)->pp.flags |= SCHEME_PRIM_TYPE_PARAMETER;
  5599. config_map[which] = o;
  5600. return o;
  5601. }
  5602. typedef Scheme_Object *(*PCheck_Proc)(int, Scheme_Object **, Scheme_Config *);
  5603. Scheme_Object *scheme_param_config(char *name, Scheme_Object *pos,
  5604. int argc, Scheme_Object **argv,
  5605. int arity,
  5606. /* -3 => like -1, plus use check to unmarshall the value
  5607. -2 => user parameter; pos is array [key, defcell]
  5608. -1 => use check; if isboolorfilter, check is a filter
  5609. (and expected is ignored), and if check is NULL,
  5610. parameter is boolean-valued
  5611. 0+ => check argument for this arity */
  5612. Scheme_Object *(*check)(int, Scheme_Object **),
  5613. /* Actually called with (int, S_O **, Scheme_Config *) */
  5614. char *expected,
  5615. int isboolorfilter)
  5616. {
  5617. Scheme_Config *config;
  5618. config = scheme_current_config();
  5619. if (argc == 0) {
  5620. if (arity == -2) {
  5621. Scheme_Object *cell;
  5622. cell = find_param_cell(config, ((Scheme_Object **)pos)[0], 0);
  5623. if (!cell)
  5624. cell = ((Scheme_Object **)pos)[1];
  5625. if (SCHEME_THREAD_CELLP(cell))
  5626. return scheme_thread_cell_get(cell, scheme_current_thread->cell_values);
  5627. else
  5628. return cell; /* it's really the value, instead of a cell */
  5629. } else {
  5630. Scheme_Object *s;
  5631. s = scheme_get_param(config, SCHEME_INT_VAL(pos));
  5632. if (arity == -3) {
  5633. Scheme_Object *a[1];
  5634. PCheck_Proc checkp = (PCheck_Proc)check;
  5635. a[0] = s;
  5636. s = checkp(1, a, config);
  5637. }
  5638. return s;
  5639. }
  5640. } else {
  5641. Scheme_Object *naya = argv[0];
  5642. if (arity != -2) {
  5643. if (arity < 0) {
  5644. if (check) {
  5645. PCheck_Proc checkp = (PCheck_Proc)check;
  5646. Scheme_Object *r;
  5647. r = checkp(1, argv, config);
  5648. if (!isboolorfilter && SCHEME_FALSEP(r))
  5649. r = NULL;
  5650. if (!r) {
  5651. scheme_wrong_type(name, expected, 0, 1, argv);
  5652. return NULL;
  5653. }
  5654. if (isboolorfilter)
  5655. naya = r;
  5656. }
  5657. } else
  5658. scheme_check_proc_arity(name, arity, 0, argc, argv);
  5659. if (isboolorfilter && !check)
  5660. naya = ((SCHEME_TRUEP(naya)) ? scheme_true : scheme_false);
  5661. if (argc == 2) {
  5662. /* Special hook for parameterize: */
  5663. argv[1] = naya;
  5664. return pos;
  5665. } else
  5666. scheme_set_param(config, SCHEME_INT_VAL(pos), naya);
  5667. } else {
  5668. Scheme_Object *cell;
  5669. cell = find_param_cell(config, ((Scheme_Object **)pos)[0], 1);
  5670. if (!cell)
  5671. cell = ((Scheme_Object **)pos)[1];
  5672. scheme_thread_cell_set(cell, scheme_current_thread->cell_values, naya);
  5673. }
  5674. return scheme_void;
  5675. }
  5676. }
  5677. static Scheme_Object *
  5678. exact_positive_integer_p (int argc, Scheme_Object *argv[])
  5679. {
  5680. Scheme_Object *n = argv[0];
  5681. if (SCHEME_INTP(n) && (SCHEME_INT_VAL(n) > 0))
  5682. return scheme_true;
  5683. if (SCHEME_BIGNUMP(n) && SCHEME_BIGPOS(n))
  5684. return scheme_true;
  5685. return scheme_false;
  5686. }
  5687. static Scheme_Object *current_thread_initial_stack_size(int argc, Scheme_Object *argv[])
  5688. {
  5689. return scheme_param_config("current-thread-initial-stack-size",
  5690. scheme_make_integer(MZCONFIG_THREAD_INIT_STACK_SIZE),
  5691. argc, argv,
  5692. -1, exact_positive_integer_p, "exact positive integer", 0);
  5693. }
  5694. /*========================================================================*/
  5695. /* namespaces */
  5696. /*========================================================================*/
  5697. Scheme_Env *scheme_get_env(Scheme_Config *c)
  5698. XFORM_SKIP_PROC
  5699. {
  5700. Scheme_Object *o;
  5701. if (!c)
  5702. c = scheme_current_config();
  5703. o = scheme_get_param(c, MZCONFIG_ENV);
  5704. return (Scheme_Env *)o;
  5705. }
  5706. Scheme_Object *scheme_make_namespace(int argc, Scheme_Object *argv[])
  5707. {
  5708. Scheme_Env *genv, *env;
  5709. intptr_t phase;
  5710. genv = scheme_get_env(NULL);
  5711. env = scheme_make_empty_env();
  5712. for (phase = genv->phase; phase--; ) {
  5713. scheme_prepare_exp_env(env);
  5714. env = env->exp_env;
  5715. }
  5716. return (Scheme_Object *)env;
  5717. }
  5718. static Scheme_Object *namespace_p(int argc, Scheme_Object **argv)
  5719. {
  5720. return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_namespace_type))
  5721. ? scheme_true
  5722. : scheme_false);
  5723. }
  5724. static Scheme_Object *current_namespace(int argc, Scheme_Object *argv[])
  5725. {
  5726. return scheme_param_config("current-namespace",
  5727. scheme_make_integer(MZCONFIG_ENV),
  5728. argc, argv,
  5729. -1, namespace_p, "namespace", 0);
  5730. }
  5731. /*========================================================================*/
  5732. /* security guards */
  5733. /*========================================================================*/
  5734. static Scheme_Object *make_security_guard(int argc, Scheme_Object *argv[])
  5735. {
  5736. Scheme_Security_Guard *sg;
  5737. if (!(SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_security_guard_type)))
  5738. scheme_wrong_type("make-security-guard", "security-guard", 0, argc, argv);
  5739. scheme_check_proc_arity("make-security-guard", 3, 1, argc, argv);
  5740. scheme_check_proc_arity("make-security-guard", 4, 2, argc, argv);
  5741. if (argc > 3)
  5742. scheme_check_proc_arity2("make-security-guard", 3, 3, argc, argv, 1);
  5743. sg = MALLOC_ONE_TAGGED(Scheme_Security_Guard);
  5744. sg->so.type = scheme_security_guard_type;
  5745. sg->parent = (Scheme_Security_Guard *)argv[0];
  5746. sg->file_proc = argv[1];
  5747. sg->network_proc = argv[2];
  5748. if ((argc > 3) && SCHEME_TRUEP(argv[3]))
  5749. sg->link_proc = argv[3];
  5750. return (Scheme_Object *)sg;
  5751. }
  5752. static Scheme_Object *security_guard_p(int argc, Scheme_Object *argv[])
  5753. {
  5754. return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_security_guard_type))
  5755. ? scheme_true
  5756. : scheme_false);
  5757. }
  5758. static Scheme_Object *current_security_guard(int argc, Scheme_Object *argv[])
  5759. {
  5760. return scheme_param_config("current-security-guard",
  5761. scheme_make_integer(MZCONFIG_SECURITY_GUARD),
  5762. argc, argv,
  5763. -1, security_guard_p, "security-guard", 0);
  5764. }
  5765. void scheme_security_check_file(const char *who, const char *filename, int guards)
  5766. {
  5767. Scheme_Security_Guard *sg;
  5768. sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
  5769. if (sg->file_proc) {
  5770. Scheme_Object *l = scheme_null, *a[3];
  5771. if (guards & SCHEME_GUARD_FILE_EXISTS)
  5772. l = scheme_make_pair(exists_symbol, l);
  5773. if (guards & SCHEME_GUARD_FILE_DELETE)
  5774. l = scheme_make_pair(delete_symbol, l);
  5775. if (guards & SCHEME_GUARD_FILE_EXECUTE)
  5776. l = scheme_make_pair(execute_symbol, l);
  5777. if (guards & SCHEME_GUARD_FILE_WRITE)
  5778. l = scheme_make_pair(write_symbol, l);
  5779. if (guards & SCHEME_GUARD_FILE_READ)
  5780. l = scheme_make_pair(read_symbol, l);
  5781. a[0] = scheme_intern_symbol(who);
  5782. a[1] = (filename ? scheme_make_sized_path((char *)filename, -1, 1) : scheme_false);
  5783. a[2] = l;
  5784. while (sg->parent) {
  5785. scheme_apply(sg->file_proc, 3, a);
  5786. sg = sg->parent;
  5787. }
  5788. }
  5789. }
  5790. void scheme_security_check_file_link(const char *who, const char *filename, const char *content)
  5791. {
  5792. Scheme_Security_Guard *sg;
  5793. sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
  5794. if (sg->file_proc) {
  5795. Scheme_Object *a[3];
  5796. a[0] = scheme_intern_symbol(who);
  5797. a[1] = scheme_make_sized_path((char *)filename, -1, 1);
  5798. a[2] = scheme_make_sized_path((char *)content, -1, 1);
  5799. while (sg->parent) {
  5800. if (sg->link_proc)
  5801. scheme_apply(sg->link_proc, 3, a);
  5802. else {
  5803. scheme_signal_error("%s: security guard does not allow any link operation; attempted from: %s to: %s",
  5804. who,
  5805. filename,
  5806. content);
  5807. }
  5808. sg = sg->parent;
  5809. }
  5810. }
  5811. }
  5812. void scheme_security_check_network(const char *who, const char *host, int port, int client)
  5813. {
  5814. Scheme_Security_Guard *sg;
  5815. sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
  5816. if (sg->network_proc) {
  5817. Scheme_Object *a[4];
  5818. a[0] = scheme_intern_symbol(who);
  5819. a[1] = (host ? scheme_make_sized_utf8_string((char *)host, -1) : scheme_false);
  5820. a[2] = ((port < 1) ? scheme_false : scheme_make_integer(port));
  5821. a[3] = (client ? client_symbol : server_symbol);
  5822. while (sg->parent) {
  5823. scheme_apply(sg->network_proc, 4, a);
  5824. sg = sg->parent;
  5825. }
  5826. }
  5827. }
  5828. /*========================================================================*/
  5829. /* wills and will executors */
  5830. /*========================================================================*/
  5831. typedef struct ActiveWill {
  5832. MZTAG_IF_REQUIRED
  5833. Scheme_Object *o;
  5834. Scheme_Object *proc;
  5835. struct WillExecutor *w; /* Set to will executor when executed */
  5836. struct ActiveWill *next;
  5837. } ActiveWill;
  5838. typedef struct WillExecutor {
  5839. Scheme_Object so;
  5840. Scheme_Object *sema;
  5841. ActiveWill *first, *last;
  5842. int is_stubborn;
  5843. } WillExecutor;
  5844. static void activate_will(void *o, void *data)
  5845. {
  5846. ActiveWill *a;
  5847. WillExecutor *w;
  5848. Scheme_Object *proc;
  5849. if (SCHEME_PAIRP(data)) {
  5850. w = (WillExecutor *)SCHEME_CAR(data);
  5851. proc = SCHEME_CDR(data);
  5852. } else {
  5853. w = (WillExecutor *)scheme_ephemeron_key(data);
  5854. proc = scheme_ephemeron_value(data);
  5855. }
  5856. if (w) {
  5857. a = MALLOC_ONE_RT(ActiveWill);
  5858. #ifdef MZTAG_REQUIRED
  5859. a->type = scheme_rt_will;
  5860. #endif
  5861. a->o = (Scheme_Object *)o;
  5862. a->proc = proc;
  5863. if (w->last)
  5864. w->last->next = a;
  5865. else
  5866. w->first = a;
  5867. w->last = a;
  5868. scheme_post_sema(w->sema);
  5869. }
  5870. }
  5871. static Scheme_Object *do_next_will(WillExecutor *w)
  5872. {
  5873. ActiveWill *a;
  5874. Scheme_Object *o[1];
  5875. a = w->first;
  5876. w->first = a->next;
  5877. if (!w->first)
  5878. w->last = NULL;
  5879. o[0] = a->o;
  5880. a->o = NULL;
  5881. return scheme_apply_multi(a->proc, 1, o);
  5882. }
  5883. static Scheme_Object *make_will_executor(int argc, Scheme_Object **argv)
  5884. {
  5885. WillExecutor *w;
  5886. Scheme_Object *sema;
  5887. w = MALLOC_ONE_TAGGED(WillExecutor);
  5888. sema = scheme_make_sema(0);
  5889. w->so.type = scheme_will_executor_type;
  5890. w->first = NULL;
  5891. w->last = NULL;
  5892. w->sema = sema;
  5893. w->is_stubborn = 0;
  5894. return (Scheme_Object *)w;
  5895. }
  5896. Scheme_Object *scheme_make_stubborn_will_executor()
  5897. {
  5898. WillExecutor *w;
  5899. w = (WillExecutor *)make_will_executor(0, NULL);
  5900. w->is_stubborn = 1;
  5901. return (Scheme_Object *)w;
  5902. }
  5903. static Scheme_Object *will_executor_p(int argc, Scheme_Object **argv)
  5904. {
  5905. return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
  5906. ? scheme_true
  5907. : scheme_false);
  5908. }
  5909. static Scheme_Object *register_will(int argc, Scheme_Object **argv)
  5910. {
  5911. Scheme_Object *e;
  5912. if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
  5913. scheme_wrong_type("will-register", "will-executor", 0, argc, argv);
  5914. scheme_check_proc_arity("will-register", 1, 2, argc, argv);
  5915. if (((WillExecutor *)argv[0])->is_stubborn) {
  5916. e = scheme_make_pair(argv[0], argv[2]);
  5917. scheme_add_finalizer(argv[1], activate_will, e);
  5918. } else {
  5919. /* If we lose track of the will executor, then drop the finalizer. */
  5920. e = scheme_make_ephemeron(argv[0], argv[2]);
  5921. scheme_add_scheme_finalizer(argv[1], activate_will, e);
  5922. }
  5923. return scheme_void;
  5924. }
  5925. static Scheme_Object *will_executor_try(int argc, Scheme_Object **argv)
  5926. {
  5927. WillExecutor *w;
  5928. if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
  5929. scheme_wrong_type("will-try-execute", "will-executor", 0, argc, argv);
  5930. w = (WillExecutor *)argv[0];
  5931. if (scheme_wait_sema(w->sema, 1))
  5932. return do_next_will(w);
  5933. else
  5934. return scheme_false;
  5935. }
  5936. static Scheme_Object *will_executor_go(int argc, Scheme_Object **argv)
  5937. {
  5938. WillExecutor *w;
  5939. if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
  5940. scheme_wrong_type("will-execute", "will-executor", 0, argc, argv);
  5941. w = (WillExecutor *)argv[0];
  5942. scheme_wait_sema(w->sema, 0);
  5943. return do_next_will(w);
  5944. }
  5945. static Scheme_Object *will_executor_sema(Scheme_Object *w, int *repost)
  5946. {
  5947. *repost = 1;
  5948. return ((WillExecutor *)w)->sema;
  5949. }
  5950. /*========================================================================*/
  5951. /* GC preparation and timing */
  5952. /*========================================================================*/
  5953. #ifdef MZ_XFORM
  5954. START_XFORM_SKIP;
  5955. #endif
  5956. typedef struct Scheme_GC_Pre_Post_Callback_Desc {
  5957. /* All pointer fields => allocate with GC_malloc() */
  5958. Scheme_Object *boxed_key;
  5959. Scheme_Object *pre_desc;
  5960. Scheme_Object *post_desc;
  5961. struct Scheme_GC_Pre_Post_Callback_Desc *prev;
  5962. struct Scheme_GC_Pre_Post_Callback_Desc *next;
  5963. } Scheme_GC_Pre_Post_Callback_Desc;
  5964. Scheme_Object *scheme_add_gc_callback(Scheme_Object *pre, Scheme_Object *post)
  5965. {
  5966. Scheme_GC_Pre_Post_Callback_Desc *desc;
  5967. Scheme_Object *key, *boxed;
  5968. desc = (Scheme_GC_Pre_Post_Callback_Desc *)GC_malloc(sizeof(Scheme_GC_Pre_Post_Callback_Desc));
  5969. desc->pre_desc = pre;
  5970. desc->post_desc = post;
  5971. key = scheme_make_vector(1, scheme_false);
  5972. boxed = scheme_make_weak_box(key);
  5973. desc->boxed_key = boxed;
  5974. desc->next = gc_prepost_callback_descs;
  5975. gc_prepost_callback_descs = desc;
  5976. return key;
  5977. }
  5978. void scheme_remove_gc_callback(Scheme_Object *key)
  5979. {
  5980. Scheme_GC_Pre_Post_Callback_Desc *prev = NULL, *desc;
  5981. desc = gc_prepost_callback_descs;
  5982. while (desc) {
  5983. if (SAME_OBJ(SCHEME_WEAK_BOX_VAL(desc->boxed_key), key)) {
  5984. if (prev)
  5985. prev->next = desc->next;
  5986. else
  5987. gc_prepost_callback_descs = desc->next;
  5988. if (desc->next)
  5989. desc->next->prev = desc->prev;
  5990. }
  5991. prev = desc;
  5992. desc = desc->next;
  5993. }
  5994. }
  5995. #if defined(_MSC_VER)
  5996. # define mzOSAPI WINAPI
  5997. #else
  5998. # define mzOSAPI /* empty */
  5999. #endif
  6000. typedef void (*gccb_Ptr_Ptr_Ptr_Int_to_Void)(void*, void*, void*, int);
  6001. typedef void (*gccb_Ptr_Ptr_Ptr_to_Void)(void*, void*, void*);
  6002. typedef void (*gccb_Ptr_Ptr_Float_to_Void)(void*, void*, float);
  6003. typedef void (*gccb_Ptr_Ptr_Double_to_Void)(void*, void*, double);
  6004. typedef void (*gccb_Ptr_Ptr_Ptr_Nine_Ints)(void*,void*,void*,int,int,int,int,int,int,int,int,int);
  6005. typedef void (mzOSAPI *gccb_OSapi_Ptr_Int_to_Void)(void*, int);
  6006. typedef void (mzOSAPI *gccb_OSapi_Ptr_Ptr_to_Void)(void*, void*);
  6007. typedef void (mzOSAPI *gccb_OSapi_Ptr_Four_Ints_Ptr_Int_Int_Long_to_Void)(void*, int, int, int, int,
  6008. void*, int, int, long);
  6009. #ifdef DONT_USE_FOREIGN
  6010. # define scheme_extract_pointer(x) NULL
  6011. #endif
  6012. static void run_gc_callbacks(int pre)
  6013. XFORM_SKIP_PROC
  6014. {
  6015. Scheme_GC_Pre_Post_Callback_Desc *prev = NULL, *desc;
  6016. Scheme_Object *acts, *act, *protocol;
  6017. int j;
  6018. desc = gc_prepost_callback_descs;
  6019. while (desc) {
  6020. if (!SCHEME_WEAK_BOX_VAL(desc->boxed_key)) {
  6021. if (prev)
  6022. prev->next = desc->next;
  6023. else
  6024. gc_prepost_callback_descs = desc->next;
  6025. if (desc->next)
  6026. desc->next->prev = desc->prev;
  6027. } else {
  6028. if (pre)
  6029. acts = desc->pre_desc;
  6030. else
  6031. acts = desc->post_desc;
  6032. for (j = 0; j < SCHEME_VEC_SIZE(acts); j++) {
  6033. act = SCHEME_VEC_ELS(acts)[j];
  6034. protocol = SCHEME_VEC_ELS(act)[0];
  6035. /* The set of suported protocols is arbitary, based on what we've needed
  6036. so far. */
  6037. if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_ptr_int->void")) {
  6038. gccb_Ptr_Ptr_Ptr_Int_to_Void proc;
  6039. void *a, *b, *c;
  6040. int i;
  6041. proc = (gccb_Ptr_Ptr_Ptr_Int_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
  6042. a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
  6043. b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
  6044. c = scheme_extract_pointer(SCHEME_VEC_ELS(act)[4]);
  6045. i = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[5]);
  6046. proc(a, b, c, i);
  6047. } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_ptr->void")) {
  6048. gccb_Ptr_Ptr_Ptr_to_Void proc;
  6049. void *a, *b, *c;
  6050. proc = (gccb_Ptr_Ptr_Ptr_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
  6051. a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
  6052. b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
  6053. c = scheme_extract_pointer(SCHEME_VEC_ELS(act)[4]);
  6054. proc(a, b, c);
  6055. } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_float->void")) {
  6056. gccb_Ptr_Ptr_Float_to_Void proc;
  6057. void *a, *b;
  6058. float f;
  6059. proc = (gccb_Ptr_Ptr_Float_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
  6060. a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
  6061. b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
  6062. f = SCHEME_DBL_VAL(SCHEME_VEC_ELS(act)[4]);
  6063. proc(a, b, f);
  6064. } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_double->void")) {
  6065. gccb_Ptr_Ptr_Double_to_Void proc;
  6066. void *a, *b;
  6067. double d;
  6068. proc = (gccb_Ptr_Ptr_Double_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
  6069. a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
  6070. b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
  6071. d = SCHEME_DBL_VAL(SCHEME_VEC_ELS(act)[4]);
  6072. proc(a, b, d);
  6073. } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_ptr_int_int_int_int_int_int_int_int_int->void")) {
  6074. gccb_Ptr_Ptr_Ptr_Nine_Ints proc;
  6075. void *a, *b, *c;
  6076. int i1, i2, i3, i4, i5, i6, i7, i8, i9;
  6077. proc = (gccb_Ptr_Ptr_Ptr_Nine_Ints)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
  6078. a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
  6079. b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
  6080. c = scheme_extract_pointer(SCHEME_VEC_ELS(act)[4]);
  6081. i1 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[5]);
  6082. i2 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[6]);
  6083. i3 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[7]);
  6084. i4 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[8]);
  6085. i5 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[9]);
  6086. i6 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[10]);
  6087. i7 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[11]);
  6088. i8 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[12]);
  6089. i9 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[13]);
  6090. proc(a, b, c, i1, i2, i3, i4, i5, i6, i7, i8, i9);
  6091. } else if (!strcmp(SCHEME_SYM_VAL(protocol), "osapi_ptr_ptr->void")) {
  6092. gccb_OSapi_Ptr_Ptr_to_Void proc;
  6093. void *a, *b;
  6094. proc = (gccb_OSapi_Ptr_Ptr_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
  6095. a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
  6096. b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
  6097. proc(a, b);
  6098. } else if (!strcmp(SCHEME_SYM_VAL(protocol), "osapi_ptr_int->void")) {
  6099. gccb_OSapi_Ptr_Int_to_Void proc;
  6100. void *a;
  6101. int i;
  6102. proc = (gccb_OSapi_Ptr_Int_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
  6103. a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
  6104. i = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[3]);
  6105. proc(a, i);
  6106. } else if (!strcmp(SCHEME_SYM_VAL(protocol), "osapi_ptr_int_int_int_int_ptr_int_int_long->void")) {
  6107. gccb_OSapi_Ptr_Four_Ints_Ptr_Int_Int_Long_to_Void proc;
  6108. void *a, *b;
  6109. int i1, i2, i3, i4, i5, i6;
  6110. long l1;
  6111. proc = (gccb_OSapi_Ptr_Four_Ints_Ptr_Int_Int_Long_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
  6112. a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
  6113. i1 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[3]);
  6114. i2 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[4]);
  6115. i3 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[5]);
  6116. i4 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[6]);
  6117. b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[7]);
  6118. i5 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[8]);
  6119. i6 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[9]);
  6120. l1 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[10]);
  6121. proc(a, i1, i2, i3, i4, b, i5, i6, l1);
  6122. }
  6123. prev = desc;
  6124. }
  6125. }
  6126. desc = desc->next;
  6127. }
  6128. }
  6129. void scheme_zero_unneeded_rands(Scheme_Thread *p)
  6130. {
  6131. /* Call this procedure before GC or before copying out
  6132. a thread's stack. */
  6133. }
  6134. static void prepare_thread_for_GC(Scheme_Object *t)
  6135. {
  6136. Scheme_Thread *p = (Scheme_Thread *)t;
  6137. /* zero ununsed part of env stack in each thread */
  6138. if (!p->nestee) {
  6139. Scheme_Saved_Stack *saved;
  6140. # define RUNSTACK_TUNE(x) /* x - Used for performance tuning */
  6141. RUNSTACK_TUNE( intptr_t size; );
  6142. if ((!p->runstack_owner
  6143. || (p == *p->runstack_owner))
  6144. && p->runstack_start) {
  6145. intptr_t rs_end;
  6146. Scheme_Object **rs_start;
  6147. /* If there's a meta-prompt, we can also zero out past the unused part */
  6148. if (p->meta_prompt && (p->meta_prompt->runstack_boundary_start == p->runstack_start)) {
  6149. rs_end = p->meta_prompt->runstack_boundary_offset;
  6150. } else {
  6151. rs_end = p->runstack_size;
  6152. }
  6153. if ((p->runstack_tmp_keep >= p->runstack_start)
  6154. && (p->runstack_tmp_keep < p->runstack))
  6155. rs_start = p->runstack_tmp_keep;
  6156. else
  6157. rs_start = p->runstack;
  6158. scheme_set_runstack_limits(p->runstack_start,
  6159. p->runstack_size,
  6160. rs_start - p->runstack_start,
  6161. rs_end);
  6162. RUNSTACK_TUNE( size = p->runstack_size - (p->runstack - p->runstack_start); );
  6163. for (saved = p->runstack_saved; saved; saved = saved->prev) {
  6164. RUNSTACK_TUNE( size += saved->runstack_size; );
  6165. if (p->meta_prompt && (p->meta_prompt->runstack_boundary_start == saved->runstack_start)) {
  6166. rs_end = p->meta_prompt->runstack_boundary_offset;
  6167. } else {
  6168. rs_end = saved->runstack_size;
  6169. }
  6170. scheme_set_runstack_limits(saved->runstack_start,
  6171. saved->runstack_size,
  6172. saved->runstack_offset,
  6173. rs_end);
  6174. }
  6175. }
  6176. RUNSTACK_TUNE( printf("%ld\n", size); );
  6177. if (p->tail_buffer && (p->tail_buffer != p->runstack_tmp_keep)) {
  6178. int i;
  6179. for (i = 0; i < p->tail_buffer_size; i++) {
  6180. p->tail_buffer[i] = NULL;
  6181. }
  6182. }
  6183. }
  6184. if ((!p->cont_mark_stack_owner
  6185. || (p == *p->cont_mark_stack_owner))
  6186. && p->cont_mark_stack) {
  6187. int segcount, i, segpos;
  6188. /* release unused cont mark stack segments */
  6189. if (p->cont_mark_stack)
  6190. segcount = ((intptr_t)(p->cont_mark_stack - 1) >> SCHEME_LOG_MARK_SEGMENT_SIZE) + 1;
  6191. else
  6192. segcount = 0;
  6193. for (i = segcount; i < p->cont_mark_seg_count; i++) {
  6194. p->cont_mark_stack_segments[i] = NULL;
  6195. }
  6196. if (segcount < p->cont_mark_seg_count)
  6197. p->cont_mark_seg_count = segcount;
  6198. /* zero unused part of last mark stack segment */
  6199. segpos = ((intptr_t)p->cont_mark_stack >> SCHEME_LOG_MARK_SEGMENT_SIZE);
  6200. if (segpos < p->cont_mark_seg_count) {
  6201. Scheme_Cont_Mark *seg = p->cont_mark_stack_segments[segpos];
  6202. int stackpos = ((intptr_t)p->cont_mark_stack & SCHEME_MARK_SEGMENT_MASK);
  6203. if (seg) {
  6204. for (i = stackpos; i < SCHEME_MARK_SEGMENT_SIZE; i++) {
  6205. if (seg[i].key) {
  6206. seg[i].key = NULL;
  6207. seg[i].val = NULL;
  6208. seg[i].cache = NULL;
  6209. } else {
  6210. /* NULL means we already cleared from here on. */
  6211. break;
  6212. }
  6213. }
  6214. }
  6215. }
  6216. {
  6217. MZ_MARK_STACK_TYPE pos;
  6218. /* also zero out slots before the current bottom */
  6219. for (pos = 0; pos < p->cont_mark_stack_bottom; pos++) {
  6220. Scheme_Cont_Mark *seg;
  6221. int stackpos;
  6222. segpos = ((intptr_t)pos >> SCHEME_LOG_MARK_SEGMENT_SIZE);
  6223. seg = p->cont_mark_stack_segments[segpos];
  6224. if (seg) {
  6225. stackpos = ((intptr_t)pos & SCHEME_MARK_SEGMENT_MASK);
  6226. seg[stackpos].key = NULL;
  6227. seg[stackpos].val = NULL;
  6228. seg[stackpos].cache = NULL;
  6229. }
  6230. }
  6231. }
  6232. }
  6233. if (p->values_buffer) {
  6234. if (p->values_buffer_size > 128)
  6235. p->values_buffer = NULL;
  6236. else {
  6237. memset(p->values_buffer, 0, sizeof(Scheme_Object*) * p->values_buffer_size);
  6238. }
  6239. }
  6240. p->spare_runstack = NULL;
  6241. /* zero ununsed part of list stack */
  6242. scheme_clean_list_stack(p);
  6243. }
  6244. void scheme_prepare_this_thread_for_GC(Scheme_Thread *p)
  6245. {
  6246. if (p == scheme_current_thread) {
  6247. #ifdef RUNSTACK_IS_GLOBAL
  6248. scheme_current_thread->runstack = MZ_RUNSTACK;
  6249. scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
  6250. scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
  6251. scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
  6252. #endif
  6253. }
  6254. prepare_thread_for_GC((Scheme_Object *)p);
  6255. }
  6256. static void get_ready_for_GC()
  6257. {
  6258. start_this_gc_time = scheme_get_process_milliseconds();
  6259. #ifdef MZ_USE_FUTURES
  6260. scheme_future_block_until_gc();
  6261. #endif
  6262. run_gc_callbacks(1);
  6263. scheme_zero_unneeded_rands(scheme_current_thread);
  6264. scheme_clear_modidx_cache();
  6265. scheme_clear_shift_cache();
  6266. scheme_clear_prompt_cache();
  6267. scheme_clear_rx_buffers();
  6268. scheme_clear_bignum_cache();
  6269. scheme_clear_delayed_load_cache();
  6270. #ifdef RUNSTACK_IS_GLOBAL
  6271. if (scheme_current_thread->running) {
  6272. scheme_current_thread->runstack = MZ_RUNSTACK;
  6273. scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
  6274. scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
  6275. scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
  6276. }
  6277. #endif
  6278. for_each_managed(scheme_thread_type, prepare_thread_for_GC);
  6279. #ifdef MZ_PRECISE_GC
  6280. scheme_flush_stack_copy_cache();
  6281. #endif
  6282. scheme_fuel_counter = 0;
  6283. scheme_jit_stack_boundary = (uintptr_t)-1;
  6284. #ifdef WINDOWS_PROCESSES
  6285. scheme_suspend_remembered_threads();
  6286. #endif
  6287. #if defined(UNIX_PROCESSES) && !defined(MZ_PLACES_WAITPID)
  6288. scheme_block_child_signals(1);
  6289. #endif
  6290. {
  6291. GC_CAN_IGNORE void *data;
  6292. data = scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
  6293. scheme_current_thread->gmp_tls_data = data;
  6294. }
  6295. scheme_did_gc_count++;
  6296. }
  6297. extern int GC_words_allocd;
  6298. static void done_with_GC()
  6299. {
  6300. scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
  6301. scheme_current_thread->gmp_tls_data = NULL;
  6302. #ifdef RUNSTACK_IS_GLOBAL
  6303. # ifdef MZ_PRECISE_GC
  6304. if (scheme_current_thread->running) {
  6305. MZ_RUNSTACK = scheme_current_thread->runstack;
  6306. MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
  6307. }
  6308. # endif
  6309. #endif
  6310. #ifdef WINDOWS_PROCESSES
  6311. scheme_resume_remembered_threads();
  6312. #endif
  6313. #if defined(UNIX_PROCESSES) && !defined(MZ_PLACES_WAITPID)
  6314. scheme_block_child_signals(0);
  6315. #endif
  6316. end_this_gc_time = scheme_get_process_milliseconds();
  6317. scheme_total_gc_time += (end_this_gc_time - start_this_gc_time);
  6318. run_gc_callbacks(0);
  6319. #ifdef MZ_USE_FUTURES
  6320. scheme_future_continue_after_gc();
  6321. #endif
  6322. #ifndef MZ_PRECISE_GC
  6323. {
  6324. Scheme_Logger *logger = scheme_get_main_logger();
  6325. if (logger) {
  6326. char buf[64];
  6327. intptr_t buflen;
  6328. sprintf(buf,
  6329. "GC in %" PRIdPTR " msec",
  6330. end_this_gc_time - start_this_gc_time);
  6331. buflen = strlen(buf);
  6332. scheme_log_message(logger, SCHEME_LOG_DEBUG, buf, buflen, NULL);
  6333. }
  6334. }
  6335. #endif
  6336. }
  6337. #ifdef MZ_PRECISE_GC
  6338. static char *gc_num(char *nums, int v)
  6339. /* format a number with commas */
  6340. {
  6341. int i, j, len, clen, c, d;
  6342. for (i = 0; nums[i] || nums[i+1]; i++) {
  6343. }
  6344. i++;
  6345. v /= 1024; /* bytes => kbytes */
  6346. sprintf(nums+i, "%d", v);
  6347. for (len = 0; nums[i+len]; len++) { }
  6348. clen = len + ((len + ((nums[i] == '-') ? -2 : -1)) / 3);
  6349. c = 0;
  6350. d = (clen - len);
  6351. for (j = i + clen - 1; j > i; j--) {
  6352. if (c == 3) {
  6353. nums[j] = ',';
  6354. d--;
  6355. c = 0;
  6356. } else {
  6357. nums[j] = nums[j - d];
  6358. c++;
  6359. }
  6360. }
  6361. return nums + i;
  6362. }
  6363. static void inform_GC(int master_gc, int major_gc,
  6364. intptr_t pre_used, intptr_t post_used,
  6365. intptr_t pre_admin, intptr_t post_admin)
  6366. {
  6367. Scheme_Logger *logger = scheme_get_main_logger();
  6368. if (logger) {
  6369. /* Don't use scheme_log(), because it wants to allocate a buffer
  6370. based on the max value-print width, and we may not be at a
  6371. point where parameters are available. */
  6372. char buf[128], nums[128];
  6373. intptr_t buflen, delta, admin_delta;
  6374. #ifdef MZ_USE_PLACES
  6375. # define PLACE_ID_FORMAT "%d:"
  6376. #else
  6377. # define PLACE_ID_FORMAT ""
  6378. #endif
  6379. memset(nums, 0, sizeof(nums));
  6380. delta = pre_used - post_used;
  6381. admin_delta = (pre_admin - post_admin) - delta;
  6382. sprintf(buf,
  6383. "GC [" PLACE_ID_FORMAT "%s] at %sK(+%sK)[+%sK];"
  6384. " freed %sK(%s%sK) in %" PRIdPTR " msec",
  6385. #ifdef MZ_USE_PLACES
  6386. scheme_current_place_id,
  6387. #endif
  6388. (master_gc ? "MASTER" : (major_gc ? "MAJOR" : "minor")),
  6389. gc_num(nums, pre_used), gc_num(nums, pre_admin - pre_used),
  6390. gc_num(nums, scheme_code_page_total),
  6391. gc_num(nums, delta), ((admin_delta < 0) ? "" : "+"), gc_num(nums, admin_delta),
  6392. (master_gc ? 0 : (end_this_gc_time - start_this_gc_time)));
  6393. buflen = strlen(buf);
  6394. scheme_log_message(logger, SCHEME_LOG_DEBUG, buf, buflen, NULL);
  6395. }
  6396. }
  6397. #endif
  6398. #ifdef MZ_XFORM
  6399. END_XFORM_SKIP;
  6400. #endif
  6401. /*========================================================================*/
  6402. /* stats */
  6403. /*========================================================================*/
  6404. static void set_perf_vector(Scheme_Object *v, Scheme_Object *ov, int i, Scheme_Object *a)
  6405. {
  6406. if (SAME_OBJ(v, ov))
  6407. SCHEME_VEC_ELS(v)[i] = a;
  6408. else
  6409. scheme_chaperone_vector_set(ov, i, a);
  6410. }
  6411. static Scheme_Object *current_stats(int argc, Scheme_Object *argv[])
  6412. {
  6413. Scheme_Object *v, *ov;
  6414. Scheme_Thread *t = NULL;
  6415. v = argv[0];
  6416. ov = v;
  6417. if (SCHEME_CHAPERONEP(v))
  6418. v = SCHEME_CHAPERONE_VAL(v);
  6419. if (!SCHEME_MUTABLE_VECTORP(v))
  6420. scheme_wrong_type("vector-set-performance-stats!", "mutable vector", 0, argc, argv);
  6421. if (argc > 1) {
  6422. if (!SCHEME_FALSEP(argv[1])) {
  6423. if (!SCHEME_THREADP(argv[1]))
  6424. scheme_wrong_type("vector-set-performance-stats!", "thread or #f", 0, argc, argv);
  6425. t = (Scheme_Thread *)argv[1];
  6426. }
  6427. }
  6428. if (t) {
  6429. switch (SCHEME_VEC_SIZE(v)) {
  6430. default:
  6431. case 4:
  6432. {
  6433. /* Stack size: */
  6434. intptr_t sz = 0;
  6435. if (MZTHREAD_STILL_RUNNING(t->running)) {
  6436. Scheme_Overflow *overflow;
  6437. Scheme_Saved_Stack *runstack_saved;
  6438. /* C stack */
  6439. if (t == scheme_current_thread) {
  6440. void *stk_start, *stk_end;
  6441. stk_start = t->stack_start;
  6442. stk_end = (void *)&stk_end;
  6443. # ifdef STACK_GROWS_UP
  6444. sz = (intptr_t)stk_end XFORM_OK_MINUS (intptr_t)stk_start;
  6445. # endif
  6446. # ifdef STACK_GROWS_DOWN
  6447. sz = (intptr_t)stk_start XFORM_OK_MINUS (intptr_t)stk_end;
  6448. # endif
  6449. } else {
  6450. if (t->jmpup_buf.stack_copy)
  6451. sz = t->jmpup_buf.stack_size;
  6452. }
  6453. for (overflow = t->overflow; overflow; overflow = overflow->prev) {
  6454. sz += overflow->jmp->cont.stack_size;
  6455. }
  6456. /* Scheme stack */
  6457. {
  6458. int ssz;
  6459. if (t == scheme_current_thread) {
  6460. ssz = (MZ_RUNSTACK_START + t->runstack_size) - MZ_RUNSTACK;
  6461. } else {
  6462. ssz = (t->runstack_start + t->runstack_size) - t->runstack;
  6463. }
  6464. for (runstack_saved = t->runstack_saved; runstack_saved; runstack_saved = runstack_saved->prev) {
  6465. ssz += runstack_saved->runstack_size;
  6466. }
  6467. sz += sizeof(Scheme_Object *) * ssz;
  6468. }
  6469. /* Mark stack */
  6470. if (t == scheme_current_thread) {
  6471. sz += ((intptr_t)scheme_current_cont_mark_pos >> 1) * sizeof(Scheme_Cont_Mark);
  6472. } else {
  6473. sz += ((intptr_t)t->cont_mark_pos >> 1) * sizeof(Scheme_Cont_Mark);
  6474. }
  6475. }
  6476. set_perf_vector(v, ov, 3, scheme_make_integer(sz));
  6477. }
  6478. case 3:
  6479. set_perf_vector(v, ov, 2, (t->block_descriptor
  6480. ? scheme_true
  6481. : ((t->running & MZTHREAD_SUSPENDED)
  6482. ? scheme_true
  6483. : scheme_false)));
  6484. case 2:
  6485. {
  6486. Scheme_Object *dp;
  6487. dp = thread_dead_p(1, (Scheme_Object **) mzALIAS &t);
  6488. set_perf_vector(v, ov, 1, dp);
  6489. }
  6490. case 1:
  6491. {
  6492. Scheme_Object *rp;
  6493. rp = thread_running_p(1, (Scheme_Object **) mzALIAS &t);
  6494. set_perf_vector(v, ov, 0, rp);
  6495. }
  6496. case 0:
  6497. break;
  6498. }
  6499. } else {
  6500. intptr_t cpuend, end, gcend;
  6501. cpuend = scheme_get_process_milliseconds();
  6502. end = scheme_get_milliseconds();
  6503. gcend = scheme_total_gc_time;
  6504. switch (SCHEME_VEC_SIZE(v)) {
  6505. default:
  6506. case 11:
  6507. set_perf_vector(v, ov, 10, scheme_make_integer(scheme_jit_malloced));
  6508. case 10:
  6509. set_perf_vector(v, ov, 9, scheme_make_integer(scheme_hash_iteration_count));
  6510. case 9:
  6511. set_perf_vector(v, ov, 8, scheme_make_integer(scheme_hash_request_count));
  6512. case 8:
  6513. set_perf_vector(v, ov, 7, scheme_make_integer(scheme_num_read_syntax_objects));
  6514. case 7:
  6515. set_perf_vector(v, ov, 6, scheme_make_integer(num_running_threads+1));
  6516. case 6:
  6517. set_perf_vector(v, ov, 5, scheme_make_integer(scheme_overflow_count));
  6518. case 5:
  6519. set_perf_vector(v, ov, 4, scheme_make_integer(thread_swap_count));
  6520. case 4:
  6521. set_perf_vector(v, ov, 3, scheme_make_integer(scheme_did_gc_count));
  6522. case 3:
  6523. set_perf_vector(v, ov, 2, scheme_make_integer(gcend));
  6524. case 2:
  6525. set_perf_vector(v, ov, 1, scheme_make_integer(end));
  6526. case 1:
  6527. set_perf_vector(v, ov, 0, scheme_make_integer(cpuend));
  6528. case 0:
  6529. break;
  6530. }
  6531. }
  6532. return scheme_void;
  6533. }
  6534. /*========================================================================*/
  6535. /* gmp allocation */
  6536. /*========================================================================*/
  6537. /* Allocate atomic, immobile memory for GMP. Although we have set up
  6538. GMP to reliably free anything that it allocates, we allocate via
  6539. the GC to get accounting with 3m. The set of allocated blocks are
  6540. stored in a "mem_pool" variable, which is a linked list; GMP
  6541. allocates with a stack discipline, so maintaining the list is easy.
  6542. Meanwhile, scheme_gmp_tls_unload, etc., attach to the pool to the
  6543. owning thread as needed for GC. */
  6544. void *scheme_malloc_gmp(uintptr_t amt, void **mem_pool)
  6545. {
  6546. void *p, *mp;
  6547. #ifdef MZ_PRECISE_GC
  6548. if (amt < GC_malloc_stays_put_threshold())
  6549. amt = GC_malloc_stays_put_threshold();
  6550. #endif
  6551. p = scheme_malloc_atomic(amt);
  6552. mp = scheme_make_raw_pair(p, *mem_pool);
  6553. *mem_pool = mp;
  6554. return p;
  6555. }
  6556. void scheme_free_gmp(void *p, void **mem_pool)
  6557. {
  6558. if (p != SCHEME_CAR(*mem_pool))
  6559. scheme_log(NULL,
  6560. SCHEME_LOG_FATAL,
  6561. 0,
  6562. "bad GMP memory free");
  6563. *mem_pool = SCHEME_CDR(*mem_pool);
  6564. }
  6565. /*========================================================================*/
  6566. /* precise GC */
  6567. /*========================================================================*/
  6568. Scheme_Jumpup_Buf_Holder *scheme_new_jmpupbuf_holder(void)
  6569. /* Scheme_Jumpup_Buf_Holder exists for precise GC, and for external
  6570. programs that want to store Jumpup_Bufs, because the GC interaction
  6571. is tricky. For example, we use it above for a special trampoline
  6572. implementation. */
  6573. {
  6574. Scheme_Jumpup_Buf_Holder *h;
  6575. h = MALLOC_ONE_RT(Scheme_Jumpup_Buf_Holder);
  6576. #ifdef MZ_PRECISE_GC
  6577. h->type = scheme_rt_buf_holder;
  6578. #endif
  6579. return h;
  6580. }
  6581. #ifdef MZ_PRECISE_GC
  6582. uintptr_t scheme_get_current_thread_stack_start(void)
  6583. {
  6584. Scheme_Thread *p;
  6585. p = scheme_current_thread;
  6586. return (uintptr_t)p->stack_start;
  6587. }
  6588. #endif
  6589. #ifdef MZ_PRECISE_GC
  6590. START_XFORM_SKIP;
  6591. #include "mzmark_thread.inc"
  6592. static void register_traversers(void)
  6593. {
  6594. GC_REG_TRAV(scheme_will_executor_type, mark_will_executor_val);
  6595. GC_REG_TRAV(scheme_custodian_type, mark_custodian_val);
  6596. GC_REG_TRAV(scheme_cust_box_type, mark_custodian_box_val);
  6597. GC_REG_TRAV(scheme_thread_hop_type, mark_thread_hop);
  6598. GC_REG_TRAV(scheme_evt_set_type, mark_evt_set);
  6599. GC_REG_TRAV(scheme_thread_set_type, mark_thread_set);
  6600. GC_REG_TRAV(scheme_config_type, mark_config);
  6601. GC_REG_TRAV(scheme_thread_cell_type, mark_thread_cell);
  6602. GC_REG_TRAV(scheme_rt_param_data, mark_param_data);
  6603. GC_REG_TRAV(scheme_rt_will, mark_will);
  6604. GC_REG_TRAV(scheme_rt_evt, mark_evt);
  6605. GC_REG_TRAV(scheme_rt_syncing, mark_syncing);
  6606. GC_REG_TRAV(scheme_rt_parameterization, mark_parameterization);
  6607. }
  6608. END_XFORM_SKIP;
  6609. #endif