/src/rt/rust_task.h

http://github.com/jruderman/rust · C Header · 591 lines · 344 code · 111 blank · 136 comment · 20 complexity · 721a1b34072107d5ee1e2e5c2efb31d5 MD5 · raw file

  1. /**
  2. The rust task is a cooperatively-scheduled green thread that executes
  3. Rust code on a segmented stack.
  4. This class has too many responsibilities:
  5. * Working with the scheduler loop to signal and respond to state changes,
  6. and dealing with all the thread synchronization issues involved
  7. * Managing the dynamically resizing list of Rust stack segments
  8. * Switching between running Rust code on the Rust segmented stack and
  9. foreign C code on large stacks owned by the scheduler
  10. The lifetime of a rust_task object closely mirrors that of a running Rust
  11. task object, but they are not identical. In particular, the rust_task is an
  12. atomically reference counted object that might be accessed from arbitrary
  13. threads at any time. This may keep the task from being destroyed even after
  14. the task is dead from a Rust task lifecycle perspective.
  15. FIXME (#2696): The task and the scheduler have an over-complicated,
  16. undocumented protocol for shutting down the task, hopefully without
  17. races. It would be easier to reason about if other runtime objects could
  18. not access the task from arbitrary threads, and didn't need to be
  19. atomically refcounted.
  20. */
  21. #ifndef RUST_TASK_H
  22. #define RUST_TASK_H
  23. #include <map>
  24. #include "rust_globals.h"
  25. #include "util/array_list.h"
  26. #include "context.h"
  27. #include "rust_debug.h"
  28. #include "rust_kernel.h"
  29. #include "boxed_region.h"
  30. #include "rust_stack.h"
  31. #include "rust_port_selector.h"
  32. #include "rust_type.h"
  33. #include "rust_sched_loop.h"
  34. // The amount of extra space at the end of each stack segment, available
  35. // to the rt, compiler and dynamic linker for running small functions
  36. // FIXME (#1509): We want this to be 128 but need to slim the red zone calls
  37. // down, disable lazy symbol relocation, and other things we haven't
  38. // discovered yet
  39. #define RZ_LINUX_32 (1024*2)
  40. #define RZ_LINUX_64 (1024*2)
  41. #define RZ_MAC_32 (1024*20)
  42. #define RZ_MAC_64 (1024*20)
  43. #define RZ_WIN_32 (1024*20)
  44. #define RZ_BSD_32 (1024*20)
  45. #define RZ_BSD_64 (1024*20)
  46. #ifdef __linux__
  47. #ifdef __i386__
  48. #define RED_ZONE_SIZE RZ_LINUX_32
  49. #endif
  50. #ifdef __x86_64__
  51. #define RED_ZONE_SIZE RZ_LINUX_64
  52. #endif
  53. #endif
  54. #ifdef __APPLE__
  55. #ifdef __i386__
  56. #define RED_ZONE_SIZE RZ_MAC_32
  57. #endif
  58. #ifdef __x86_64__
  59. #define RED_ZONE_SIZE RZ_MAC_64
  60. #endif
  61. #endif
  62. #ifdef __WIN32__
  63. #ifdef __i386__
  64. #define RED_ZONE_SIZE RZ_WIN_32
  65. #endif
  66. #ifdef __x86_64__
  67. #define RED_ZONE_SIZE RZ_WIN_64
  68. #endif
  69. #endif
  70. #ifdef __FreeBSD__
  71. #ifdef __i386__
  72. #define RED_ZONE_SIZE RZ_BSD_32
  73. #endif
  74. #ifdef __x86_64__
  75. #define RED_ZONE_SIZE RZ_BSD_64
  76. #endif
  77. #endif
  78. struct rust_box;
  79. struct frame_glue_fns {
  80. uintptr_t mark_glue_off;
  81. uintptr_t drop_glue_off;
  82. uintptr_t reloc_glue_off;
  83. };
  84. // std::lib::task::task_result
  85. typedef unsigned long task_result;
  86. #define tr_success 0
  87. #define tr_failure 1
  88. struct spawn_args;
  89. struct cleanup_args;
  90. struct reset_args;
  91. struct new_stack_args;
  92. // std::lib::task::task_notification
  93. //
  94. // since it's currently a unary tag, we only add the fields.
  95. struct task_notification {
  96. rust_task_id id;
  97. task_result result; // task_result
  98. };
  99. extern "C" void
  100. rust_task_fail(rust_task *task,
  101. char const *expr,
  102. char const *file,
  103. size_t line);
  104. struct
  105. rust_task : public kernel_owned<rust_task>
  106. {
  107. RUST_ATOMIC_REFCOUNT();
  108. rust_task_id id;
  109. context ctx;
  110. stk_seg *stk;
  111. uintptr_t runtime_sp; // Runtime sp while task running.
  112. rust_scheduler *sched;
  113. rust_sched_loop *sched_loop;
  114. // Fields known only to the runtime.
  115. rust_kernel *kernel;
  116. const char *const name;
  117. int32_t list_index;
  118. // Rendezvous pointer for receiving data when blocked on a port. If we're
  119. // trying to read data and no data is available on any incoming channel,
  120. // we block on the port, and yield control to the scheduler. Since, we
  121. // were not able to read anything, we remember the location where the
  122. // result should go in the rendezvous_ptr, and let the sender write to
  123. // that location before waking us up.
  124. uintptr_t* rendezvous_ptr;
  125. memory_region local_region;
  126. boxed_region boxed;
  127. // Indicates that fail() has been called and we are cleaning up.
  128. // We use this to suppress the "killed" flag during calls to yield.
  129. bool unwinding;
  130. bool propagate_failure;
  131. uint32_t cc_counter;
  132. debug::task_debug_info debug;
  133. // The amount of stack we're using, excluding red zones
  134. size_t total_stack_sz;
  135. // Used by rust task management routines in libcore/task.rs.
  136. void *task_local_data;
  137. void (*task_local_data_cleanup)(void *data);
  138. private:
  139. // Protects state, cond, cond_name
  140. // Protects the killed flag, disallow_kill flag, reentered_rust_stack
  141. lock_and_signal lifecycle_lock;
  142. rust_task_state state;
  143. rust_cond *cond;
  144. const char *cond_name;
  145. bool event_reject;
  146. rust_cond event_cond;
  147. void *event;
  148. // Indicates that the task was killed and needs to unwind
  149. bool killed;
  150. // Indicates that we've called back into Rust from C
  151. bool reentered_rust_stack;
  152. unsigned long disallow_kill;
  153. unsigned long disallow_yield;
  154. // The stack used for running C code, borrowed from the scheduler thread
  155. stk_seg *c_stack;
  156. uintptr_t next_c_sp;
  157. uintptr_t next_rust_sp;
  158. rust_port_selector port_selector;
  159. // Called when the atomic refcount reaches zero
  160. void delete_this();
  161. void new_stack_fast(size_t requested_sz);
  162. void new_stack(size_t requested_sz);
  163. void free_stack(stk_seg *stk);
  164. size_t get_next_stack_size(size_t min, size_t current, size_t requested);
  165. void return_c_stack();
  166. void transition(rust_task_state src, rust_task_state dst,
  167. rust_cond *cond, const char* cond_name);
  168. void transition_inner(rust_task_state src, rust_task_state dst,
  169. rust_cond *cond, const char* cond_name);
  170. bool must_fail_from_being_killed_inner();
  171. // Called by rust_task_fail to unwind on failure
  172. void begin_failure(char const *expr,
  173. char const *file,
  174. size_t line);
  175. friend void task_start_wrapper(spawn_args *a);
  176. friend void cleanup_task(cleanup_args *a);
  177. friend void reset_stack_limit_on_c_stack(reset_args *a);
  178. friend void new_stack_slow(new_stack_args *a);
  179. friend void rust_task_fail(rust_task *task,
  180. char const *expr,
  181. char const *file,
  182. size_t line);
  183. friend class rust_port;
  184. friend class rust_port_selector;
  185. bool block_inner(rust_cond *on, const char* name);
  186. void wakeup_inner(rust_cond *from);
  187. bool blocked_on(rust_cond *cond);
  188. public:
  189. // Only a pointer to 'name' is kept, so it must live as long as this task.
  190. rust_task(rust_sched_loop *sched_loop,
  191. rust_task_state state,
  192. const char *name,
  193. size_t init_stack_sz);
  194. void start(spawn_fn spawnee_fn,
  195. rust_opaque_box *env,
  196. void *args);
  197. void start();
  198. void assert_is_running();
  199. void *malloc(size_t sz, const char *tag, type_desc *td=0);
  200. void *realloc(void *data, size_t sz);
  201. void free(void *p);
  202. void set_state(rust_task_state state,
  203. rust_cond *cond, const char* cond_name);
  204. bool block(rust_cond *on, const char* name);
  205. void wakeup(rust_cond *from);
  206. void die();
  207. // Print a backtrace, if the "bt" logging option is on.
  208. void backtrace();
  209. // Yields control to the scheduler. Called from the Rust stack
  210. // Returns TRUE if the task was killed and needs to fail.
  211. MUST_CHECK bool yield();
  212. // Fail this task (assuming caller-on-stack is different task).
  213. void kill();
  214. void kill_inner();
  215. // Indicates that we've been killed and now is an apropriate
  216. // time to fail as a result
  217. bool must_fail_from_being_killed();
  218. // Fail self, assuming caller-on-stack is this task.
  219. void fail();
  220. void fail(char const *expr, char const *file, size_t line);
  221. // Propagate failure to the entire rust runtime.
  222. void fail_sched_loop();
  223. frame_glue_fns *get_frame_glue_fns(uintptr_t fp);
  224. void *calloc(size_t size, const char *tag);
  225. // Use this function sparingly. Depending on the ref count is generally
  226. // not at all safe.
  227. intptr_t get_ref_count() const { return ref_count; }
  228. void *next_stack(size_t stk_sz, void *args_addr, size_t args_sz);
  229. void prev_stack();
  230. void record_stack_limit();
  231. void reset_stack_limit();
  232. bool on_rust_stack();
  233. void check_stack_canary();
  234. void delete_all_stacks();
  235. void call_on_c_stack(void *args, void *fn_ptr);
  236. void call_on_rust_stack(void *args, void *fn_ptr);
  237. bool have_c_stack() { return c_stack != NULL; }
  238. rust_port_selector *get_port_selector() { return &port_selector; }
  239. rust_task_state get_state() { return state; }
  240. rust_cond *get_cond() { return cond; }
  241. const char *get_cond_name() { return cond_name; }
  242. void clear_event_reject() {
  243. this->event_reject = false;
  244. }
  245. // Returns TRUE if the task was killed and needs to fail.
  246. MUST_CHECK bool wait_event(void **result);
  247. void signal_event(void *event);
  248. void cleanup_after_turn();
  249. void inhibit_kill();
  250. void allow_kill();
  251. void inhibit_yield();
  252. void allow_yield();
  253. };
  254. // FIXME (#2697): It would be really nice to be able to get rid of this.
  255. inline void *operator new[](size_t size, rust_task *task, const char *tag) {
  256. return task->malloc(size, tag);
  257. }
  258. template <typename T> struct task_owned {
  259. inline void *operator new(size_t size, rust_task *task,
  260. const char *tag) {
  261. return task->malloc(size, tag);
  262. }
  263. inline void *operator new[](size_t size, rust_task *task,
  264. const char *tag) {
  265. return task->malloc(size, tag);
  266. }
  267. inline void *operator new(size_t size, rust_task &task,
  268. const char *tag) {
  269. return task.malloc(size, tag);
  270. }
  271. inline void *operator new[](size_t size, rust_task &task,
  272. const char *tag) {
  273. return task.malloc(size, tag);
  274. }
  275. void operator delete(void *ptr) {
  276. ((T *)ptr)->task->free(ptr);
  277. }
  278. };
  279. // This stuff is on the stack-switching fast path
  280. // Records the pointer to the end of the Rust stack in a platform-
  281. // specific location in the thread control block
  282. extern "C" CDECL void record_sp_limit(void *limit);
  283. extern "C" CDECL uintptr_t get_sp_limit();
  284. // Gets a pointer to the vicinity of the current stack pointer
  285. extern "C" uintptr_t get_sp();
  286. // This is the function that switches between the C and the Rust stack by
  287. // calling another function with a single void* argument while changing the
  288. // stack pointer. It has a funny name because gdb doesn't normally like to
  289. // backtrace through split stacks (thinks it indicates a bug), but has a
  290. // special case to allow functions named __morestack to move the stack pointer
  291. // around.
  292. extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
  293. inline static uintptr_t
  294. sanitize_next_sp(uintptr_t next_sp) {
  295. // Since I'm not precisely sure where the next stack pointer sits in
  296. // relation to where the context switch actually happened, nor in relation
  297. // to the amount of stack needed for calling __morestack I've added some
  298. // extra bytes here.
  299. // FIXME (#2698): On the rust stack this potentially puts is quite far
  300. // into the red zone. Might want to just allocate a new rust stack every
  301. // time we switch back to rust.
  302. const uintptr_t padding = 16;
  303. return align_down(next_sp - padding);
  304. }
  305. inline void
  306. rust_task::call_on_c_stack(void *args, void *fn_ptr) {
  307. // Too expensive to check
  308. // assert(on_rust_stack());
  309. // The shim functions generated by rustc contain the morestack prologue,
  310. // so we need to let them know they have enough stack.
  311. record_sp_limit(0);
  312. uintptr_t prev_rust_sp = next_rust_sp;
  313. next_rust_sp = get_sp();
  314. bool borrowed_a_c_stack = false;
  315. uintptr_t sp;
  316. if (c_stack == NULL) {
  317. c_stack = sched_loop->borrow_c_stack();
  318. next_c_sp = align_down(c_stack->end);
  319. sp = next_c_sp;
  320. borrowed_a_c_stack = true;
  321. } else {
  322. sp = sanitize_next_sp(next_c_sp);
  323. }
  324. __morestack(args, fn_ptr, sp);
  325. // Note that we may not actually get here if we threw an exception,
  326. // in which case we will return the c stack when the exception is caught.
  327. if (borrowed_a_c_stack) {
  328. return_c_stack();
  329. }
  330. next_rust_sp = prev_rust_sp;
  331. record_stack_limit();
  332. }
  333. inline void
  334. rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
  335. // Too expensive to check
  336. // assert(!on_rust_stack());
  337. // Because of the hack in the other function that disables the stack limit
  338. // when entering the C stack, here we restore the stack limit again.
  339. record_stack_limit();
  340. assert(get_sp_limit() != 0 && "Stack must be configured");
  341. assert(next_rust_sp);
  342. bool had_reentered_rust_stack;
  343. {
  344. scoped_lock with(lifecycle_lock);
  345. had_reentered_rust_stack = reentered_rust_stack;
  346. reentered_rust_stack = true;
  347. }
  348. uintptr_t prev_c_sp = next_c_sp;
  349. next_c_sp = get_sp();
  350. uintptr_t sp = sanitize_next_sp(next_rust_sp);
  351. // FIXME (#2047): There are times when this is called and needs
  352. // to be able to throw, and we don't account for that.
  353. __morestack(args, fn_ptr, sp);
  354. next_c_sp = prev_c_sp;
  355. {
  356. scoped_lock with(lifecycle_lock);
  357. reentered_rust_stack = had_reentered_rust_stack;
  358. }
  359. record_sp_limit(0);
  360. }
  361. inline void
  362. rust_task::return_c_stack() {
  363. // Too expensive to check
  364. // assert(on_rust_stack());
  365. assert(c_stack != NULL);
  366. sched_loop->return_c_stack(c_stack);
  367. c_stack = NULL;
  368. next_c_sp = 0;
  369. }
  370. // NB: This runs on the Rust stack
  371. inline void *
  372. rust_task::next_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
  373. new_stack_fast(stk_sz + args_sz);
  374. assert(stk->end - (uintptr_t)stk->data >= stk_sz + args_sz
  375. && "Did not receive enough stack");
  376. uint8_t *new_sp = (uint8_t*)stk->end;
  377. // Push the function arguments to the new stack
  378. new_sp = align_down(new_sp - args_sz);
  379. // I don't know exactly where the region ends that valgrind needs us
  380. // to mark accessible. On x86_64 these extra bytes aren't needed, but
  381. // on i386 we get errors without.
  382. const int fudge_bytes = 16;
  383. reuse_valgrind_stack(stk, new_sp - fudge_bytes);
  384. memcpy(new_sp, args_addr, args_sz);
  385. record_stack_limit();
  386. return new_sp;
  387. }
  388. // The amount of stack in a segment available to Rust code
  389. inline size_t
  390. user_stack_size(stk_seg *stk) {
  391. return (size_t)(stk->end
  392. - (uintptr_t)&stk->data[0]
  393. - RED_ZONE_SIZE);
  394. }
  395. struct new_stack_args {
  396. rust_task *task;
  397. size_t requested_sz;
  398. };
  399. void
  400. new_stack_slow(new_stack_args *args);
  401. // NB: This runs on the Rust stack
  402. // This is the new stack fast path, in which we
  403. // reuse the next cached stack segment
  404. inline void
  405. rust_task::new_stack_fast(size_t requested_sz) {
  406. // The minimum stack size, in bytes, of a Rust stack, excluding red zone
  407. size_t min_sz = sched_loop->min_stack_size;
  408. // Try to reuse an existing stack segment
  409. if (stk != NULL && stk->next != NULL) {
  410. size_t next_sz = user_stack_size(stk->next);
  411. if (min_sz <= next_sz && requested_sz <= next_sz) {
  412. stk = stk->next;
  413. return;
  414. }
  415. }
  416. new_stack_args args = {this, requested_sz};
  417. call_on_c_stack(&args, (void*)new_stack_slow);
  418. }
  419. // NB: This runs on the Rust stack
  420. inline void
  421. rust_task::prev_stack() {
  422. // We're not going to actually delete anything now because that would
  423. // require switching to the C stack and be costly. Instead we'll just move
  424. // up the link list and clean up later, either in new_stack or after our
  425. // turn ends on the scheduler.
  426. stk = stk->prev;
  427. record_stack_limit();
  428. }
  429. extern "C" CDECL void
  430. record_sp_limit(void *limit);
  431. // The LLVM-generated segmented-stack function prolog compares the amount of
  432. // stack needed for each frame to the end-of-stack pointer stored in the
  433. // TCB. As an optimization, when the frame size is less than 256 bytes, it
  434. // will simply compare %esp to to the stack limit instead of subtracting the
  435. // frame size. As a result we need our stack limit to account for those 256
  436. // bytes.
  437. const unsigned LIMIT_OFFSET = 256;
  438. inline void
  439. rust_task::record_stack_limit() {
  440. assert(stk);
  441. assert((uintptr_t)stk->end - RED_ZONE_SIZE
  442. - (uintptr_t)stk->data >= LIMIT_OFFSET
  443. && "Stack size must be greater than LIMIT_OFFSET");
  444. record_sp_limit(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
  445. }
  446. inline rust_task* rust_get_current_task() {
  447. uintptr_t sp_limit = get_sp_limit();
  448. // FIXME (#1226) - Because of a hack in upcall_call_shim_on_c_stack this
  449. // value is sometimes inconveniently set to 0, so we can't use this
  450. // method of retreiving the task pointer and need to fall back to TLS.
  451. if (sp_limit == 0)
  452. return rust_sched_loop::get_task_tls();
  453. // The stack pointer boundary is stored in a quickly-accessible location
  454. // in the TCB. From that we can calculate the address of the stack segment
  455. // structure it belongs to, and in that structure is a pointer to the task
  456. // that owns it.
  457. uintptr_t seg_addr =
  458. sp_limit - RED_ZONE_SIZE - LIMIT_OFFSET - sizeof(stk_seg);
  459. stk_seg *stk = (stk_seg*) seg_addr;
  460. // Make sure we've calculated the right address
  461. ::check_stack_canary(stk);
  462. assert(stk->task != NULL && "task pointer not in stack structure");
  463. return stk->task;
  464. }
  465. //
  466. // Local Variables:
  467. // mode: C++
  468. // fill-column: 78;
  469. // indent-tabs-mode: nil
  470. // c-basic-offset: 4
  471. // buffer-file-coding-system: utf-8-unix
  472. // End:
  473. //
  474. #endif /* RUST_TASK_H */