PageRenderTime 62ms CodeModel.GetById 14ms app.highlight 42ms RepoModel.GetById 1ms app.codeStats 0ms

/src/rt/rust_task.h

http://github.com/jruderman/rust
C Header | 591 lines | 344 code | 111 blank | 136 comment | 20 complexity | 721a1b34072107d5ee1e2e5c2efb31d5 MD5 | raw file
  1/**
  2   The rust task is a cooperatively-scheduled green thread that executes
  3   Rust code on a segmented stack.
  4
  5   This class has too many responsibilities:
  6
  7   * Working with the scheduler loop to signal and respond to state changes,
  8   and dealing with all the thread synchronization issues involved
  9
 10   * Managing the dynamically resizing list of Rust stack segments
 11
 12   * Switching between running Rust code on the Rust segmented stack and
 13   foreign C code on large stacks owned by the scheduler
 14
 15   The lifetime of a rust_task object closely mirrors that of a running Rust
 16   task object, but they are not identical. In particular, the rust_task is an
 17   atomically reference counted object that might be accessed from arbitrary
 18   threads at any time. This may keep the task from being destroyed even after
 19   the task is dead from a Rust task lifecycle perspective.
 20
 21   FIXME (#2696): The task and the scheduler have an over-complicated,
 22   undocumented protocol for shutting down the task, hopefully without
 23   races. It would be easier to reason about if other runtime objects could
 24   not access the task from arbitrary threads, and didn't need to be
 25   atomically refcounted.
 26 */
 27
 28#ifndef RUST_TASK_H
 29#define RUST_TASK_H
 30
 31#include <map>
 32
 33#include "rust_globals.h"
 34#include "util/array_list.h"
 35#include "context.h"
 36#include "rust_debug.h"
 37#include "rust_kernel.h"
 38#include "boxed_region.h"
 39#include "rust_stack.h"
 40#include "rust_port_selector.h"
 41#include "rust_type.h"
 42#include "rust_sched_loop.h"
 43
 44// The amount of extra space at the end of each stack segment, available
 45// to the rt, compiler and dynamic linker for running small functions
 46// FIXME (#1509): We want this to be 128 but need to slim the red zone calls
 47// down, disable lazy symbol relocation, and other things we haven't
 48// discovered yet
 49#define RZ_LINUX_32 (1024*2)
 50#define RZ_LINUX_64 (1024*2)
 51#define RZ_MAC_32   (1024*20)
 52#define RZ_MAC_64   (1024*20)
 53#define RZ_WIN_32   (1024*20)
 54#define RZ_BSD_32   (1024*20)
 55#define RZ_BSD_64   (1024*20)
 56
 57#ifdef __linux__
 58#ifdef __i386__
 59#define RED_ZONE_SIZE RZ_LINUX_32
 60#endif
 61#ifdef __x86_64__
 62#define RED_ZONE_SIZE RZ_LINUX_64
 63#endif
 64#endif
 65#ifdef __APPLE__
 66#ifdef __i386__
 67#define RED_ZONE_SIZE RZ_MAC_32
 68#endif
 69#ifdef __x86_64__
 70#define RED_ZONE_SIZE RZ_MAC_64
 71#endif
 72#endif
 73#ifdef __WIN32__
 74#ifdef __i386__
 75#define RED_ZONE_SIZE RZ_WIN_32
 76#endif
 77#ifdef __x86_64__
 78#define RED_ZONE_SIZE RZ_WIN_64
 79#endif
 80#endif
 81#ifdef __FreeBSD__
 82#ifdef __i386__
 83#define RED_ZONE_SIZE RZ_BSD_32
 84#endif
 85#ifdef __x86_64__
 86#define RED_ZONE_SIZE RZ_BSD_64
 87#endif
 88#endif
 89
 90struct rust_box;
 91
 92struct frame_glue_fns {
 93    uintptr_t mark_glue_off;
 94    uintptr_t drop_glue_off;
 95    uintptr_t reloc_glue_off;
 96};
 97
 98// std::lib::task::task_result
 99typedef unsigned long task_result;
100#define tr_success 0
101#define tr_failure 1
102
103struct spawn_args;
104struct cleanup_args;
105struct reset_args;
106struct new_stack_args;
107
108// std::lib::task::task_notification
109//
110// since it's currently a unary tag, we only add the fields.
111struct task_notification {
112    rust_task_id id;
113    task_result result; // task_result
114};
115
116extern "C" void
117rust_task_fail(rust_task *task,
118               char const *expr,
119               char const *file,
120               size_t line);
121
122struct
123rust_task : public kernel_owned<rust_task>
124{
125    RUST_ATOMIC_REFCOUNT();
126
127    rust_task_id id;
128
129    context ctx;
130    stk_seg *stk;
131    uintptr_t runtime_sp;      // Runtime sp while task running.
132    rust_scheduler *sched;
133    rust_sched_loop *sched_loop;
134
135    // Fields known only to the runtime.
136    rust_kernel *kernel;
137    const char *const name;
138    int32_t list_index;
139
140    // Rendezvous pointer for receiving data when blocked on a port. If we're
141    // trying to read data and no data is available on any incoming channel,
142    // we block on the port, and yield control to the scheduler. Since, we
143    // were not able to read anything, we remember the location where the
144    // result should go in the rendezvous_ptr, and let the sender write to
145    // that location before waking us up.
146    uintptr_t* rendezvous_ptr;
147
148    memory_region local_region;
149    boxed_region boxed;
150
151    // Indicates that fail() has been called and we are cleaning up.
152    // We use this to suppress the "killed" flag during calls to yield.
153    bool unwinding;
154
155    bool propagate_failure;
156
157    uint32_t cc_counter;
158
159    debug::task_debug_info debug;
160
161    // The amount of stack we're using, excluding red zones
162    size_t total_stack_sz;
163
164    // Used by rust task management routines in libcore/task.rs.
165    void *task_local_data;
166    void (*task_local_data_cleanup)(void *data);
167
168private:
169
170    // Protects state, cond, cond_name
171    // Protects the killed flag, disallow_kill flag, reentered_rust_stack
172    lock_and_signal lifecycle_lock;
173    rust_task_state state;
174    rust_cond *cond;
175    const char *cond_name;
176
177    bool event_reject;
178    rust_cond event_cond;
179    void *event;
180
181    // Indicates that the task was killed and needs to unwind
182    bool killed;
183    // Indicates that we've called back into Rust from C
184    bool reentered_rust_stack;
185    unsigned long disallow_kill;
186    unsigned long disallow_yield;
187
188    // The stack used for running C code, borrowed from the scheduler thread
189    stk_seg *c_stack;
190    uintptr_t next_c_sp;
191    uintptr_t next_rust_sp;
192
193    rust_port_selector port_selector;
194
195    // Called when the atomic refcount reaches zero
196    void delete_this();
197
198    void new_stack_fast(size_t requested_sz);
199    void new_stack(size_t requested_sz);
200    void free_stack(stk_seg *stk);
201    size_t get_next_stack_size(size_t min, size_t current, size_t requested);
202
203    void return_c_stack();
204
205    void transition(rust_task_state src, rust_task_state dst,
206                    rust_cond *cond, const char* cond_name);
207    void transition_inner(rust_task_state src, rust_task_state dst,
208                           rust_cond *cond, const char* cond_name);
209
210    bool must_fail_from_being_killed_inner();
211    // Called by rust_task_fail to unwind on failure
212    void begin_failure(char const *expr,
213                       char const *file,
214                       size_t line);
215
216    friend void task_start_wrapper(spawn_args *a);
217    friend void cleanup_task(cleanup_args *a);
218    friend void reset_stack_limit_on_c_stack(reset_args *a);
219    friend void new_stack_slow(new_stack_args *a);
220    friend void rust_task_fail(rust_task *task,
221                               char const *expr,
222                               char const *file,
223                               size_t line);
224
225    friend class rust_port;
226    friend class rust_port_selector;
227    bool block_inner(rust_cond *on, const char* name);
228    void wakeup_inner(rust_cond *from);
229    bool blocked_on(rust_cond *cond);
230
231public:
232
233    // Only a pointer to 'name' is kept, so it must live as long as this task.
234    rust_task(rust_sched_loop *sched_loop,
235              rust_task_state state,
236              const char *name,
237              size_t init_stack_sz);
238
239    void start(spawn_fn spawnee_fn,
240               rust_opaque_box *env,
241               void *args);
242    void start();
243    void assert_is_running();
244
245    void *malloc(size_t sz, const char *tag, type_desc *td=0);
246    void *realloc(void *data, size_t sz);
247    void free(void *p);
248
249    void set_state(rust_task_state state,
250                   rust_cond *cond, const char* cond_name);
251
252    bool block(rust_cond *on, const char* name);
253    void wakeup(rust_cond *from);
254    void die();
255
256    // Print a backtrace, if the "bt" logging option is on.
257    void backtrace();
258
259    // Yields control to the scheduler. Called from the Rust stack
260    // Returns TRUE if the task was killed and needs to fail.
261    MUST_CHECK bool yield();
262
263    // Fail this task (assuming caller-on-stack is different task).
264    void kill();
265    void kill_inner();
266
267    // Indicates that we've been killed and now is an apropriate
268    // time to fail as a result
269    bool must_fail_from_being_killed();
270
271    // Fail self, assuming caller-on-stack is this task.
272    void fail();
273    void fail(char const *expr, char const *file, size_t line);
274
275    // Propagate failure to the entire rust runtime.
276    void fail_sched_loop();
277
278    frame_glue_fns *get_frame_glue_fns(uintptr_t fp);
279
280    void *calloc(size_t size, const char *tag);
281
282    // Use this function sparingly. Depending on the ref count is generally
283    // not at all safe.
284    intptr_t get_ref_count() const { return ref_count; }
285
286    void *next_stack(size_t stk_sz, void *args_addr, size_t args_sz);
287    void prev_stack();
288    void record_stack_limit();
289    void reset_stack_limit();
290
291    bool on_rust_stack();
292    void check_stack_canary();
293    void delete_all_stacks();
294
295    void call_on_c_stack(void *args, void *fn_ptr);
296    void call_on_rust_stack(void *args, void *fn_ptr);
297    bool have_c_stack() { return c_stack != NULL; }
298
299    rust_port_selector *get_port_selector() { return &port_selector; }
300
301    rust_task_state get_state() { return state; }
302    rust_cond *get_cond() { return cond; }
303    const char *get_cond_name() { return cond_name; }
304
305    void clear_event_reject() {
306        this->event_reject = false;
307    }
308
309    // Returns TRUE if the task was killed and needs to fail.
310    MUST_CHECK bool wait_event(void **result);
311    void signal_event(void *event);
312
313    void cleanup_after_turn();
314
315    void inhibit_kill();
316    void allow_kill();
317    void inhibit_yield();
318    void allow_yield();
319};
320
321// FIXME (#2697): It would be really nice to be able to get rid of this.
322inline void *operator new[](size_t size, rust_task *task, const char *tag) {
323    return task->malloc(size, tag);
324}
325
326
327template <typename T> struct task_owned {
328    inline void *operator new(size_t size, rust_task *task,
329                                             const char *tag) {
330        return task->malloc(size, tag);
331    }
332
333    inline void *operator new[](size_t size, rust_task *task,
334                                               const char *tag) {
335        return task->malloc(size, tag);
336    }
337
338    inline void *operator new(size_t size, rust_task &task,
339                                             const char *tag) {
340        return task.malloc(size, tag);
341    }
342
343    inline void *operator new[](size_t size, rust_task &task,
344                                               const char *tag) {
345        return task.malloc(size, tag);
346    }
347
348    void operator delete(void *ptr) {
349        ((T *)ptr)->task->free(ptr);
350    }
351};
352
353// This stuff is on the stack-switching fast path
354
355// Records the pointer to the end of the Rust stack in a platform-
356// specific location in the thread control block
357extern "C" CDECL void      record_sp_limit(void *limit);
358extern "C" CDECL uintptr_t get_sp_limit();
359// Gets a pointer to the vicinity of the current stack pointer
360extern "C" uintptr_t       get_sp();
361
362// This is the function that switches between the C and the Rust stack by
363// calling another function with a single void* argument while changing the
364// stack pointer. It has a funny name because gdb doesn't normally like to
365// backtrace through split stacks (thinks it indicates a bug), but has a
366// special case to allow functions named __morestack to move the stack pointer
367// around.
368extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
369
370inline static uintptr_t
371sanitize_next_sp(uintptr_t next_sp) {
372
373    // Since I'm not precisely sure where the next stack pointer sits in
374    // relation to where the context switch actually happened, nor in relation
375    // to the amount of stack needed for calling __morestack I've added some
376    // extra bytes here.
377
378    // FIXME (#2698): On the rust stack this potentially puts is quite far
379    // into the red zone. Might want to just allocate a new rust stack every
380    // time we switch back to rust.
381    const uintptr_t padding = 16;
382
383    return align_down(next_sp - padding);
384}
385
386inline void
387rust_task::call_on_c_stack(void *args, void *fn_ptr) {
388    // Too expensive to check
389    // assert(on_rust_stack());
390
391    // The shim functions generated by rustc contain the morestack prologue,
392    // so we need to let them know they have enough stack.
393    record_sp_limit(0);
394
395    uintptr_t prev_rust_sp = next_rust_sp;
396    next_rust_sp = get_sp();
397
398    bool borrowed_a_c_stack = false;
399    uintptr_t sp;
400    if (c_stack == NULL) {
401        c_stack = sched_loop->borrow_c_stack();
402        next_c_sp = align_down(c_stack->end);
403        sp = next_c_sp;
404        borrowed_a_c_stack = true;
405    } else {
406        sp = sanitize_next_sp(next_c_sp);
407    }
408
409    __morestack(args, fn_ptr, sp);
410
411    // Note that we may not actually get here if we threw an exception,
412    // in which case we will return the c stack when the exception is caught.
413    if (borrowed_a_c_stack) {
414        return_c_stack();
415    }
416
417    next_rust_sp = prev_rust_sp;
418
419    record_stack_limit();
420}
421
422inline void
423rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
424    // Too expensive to check
425    // assert(!on_rust_stack());
426
427    // Because of the hack in the other function that disables the stack limit
428    // when entering the C stack, here we restore the stack limit again.
429    record_stack_limit();
430
431    assert(get_sp_limit() != 0 && "Stack must be configured");
432    assert(next_rust_sp);
433
434    bool had_reentered_rust_stack;
435    {
436        scoped_lock with(lifecycle_lock);
437        had_reentered_rust_stack = reentered_rust_stack;
438        reentered_rust_stack = true;
439    }
440
441    uintptr_t prev_c_sp = next_c_sp;
442    next_c_sp = get_sp();
443
444    uintptr_t sp = sanitize_next_sp(next_rust_sp);
445
446    // FIXME (#2047): There are times when this is called and needs
447    // to be able to throw, and we don't account for that.
448    __morestack(args, fn_ptr, sp);
449
450    next_c_sp = prev_c_sp;
451    {
452        scoped_lock with(lifecycle_lock);
453        reentered_rust_stack = had_reentered_rust_stack;
454    }
455
456    record_sp_limit(0);
457}
458
459inline void
460rust_task::return_c_stack() {
461    // Too expensive to check
462    // assert(on_rust_stack());
463    assert(c_stack != NULL);
464    sched_loop->return_c_stack(c_stack);
465    c_stack = NULL;
466    next_c_sp = 0;
467}
468
469// NB: This runs on the Rust stack
470inline void *
471rust_task::next_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
472    new_stack_fast(stk_sz + args_sz);
473    assert(stk->end - (uintptr_t)stk->data >= stk_sz + args_sz
474      && "Did not receive enough stack");
475    uint8_t *new_sp = (uint8_t*)stk->end;
476    // Push the function arguments to the new stack
477    new_sp = align_down(new_sp - args_sz);
478
479    // I don't know exactly where the region ends that valgrind needs us
480    // to mark accessible. On x86_64 these extra bytes aren't needed, but
481    // on i386 we get errors without.
482    const int fudge_bytes = 16;
483    reuse_valgrind_stack(stk, new_sp - fudge_bytes);
484
485    memcpy(new_sp, args_addr, args_sz);
486    record_stack_limit();
487    return new_sp;
488}
489
490// The amount of stack in a segment available to Rust code
491inline size_t
492user_stack_size(stk_seg *stk) {
493    return (size_t)(stk->end
494                    - (uintptr_t)&stk->data[0]
495                    - RED_ZONE_SIZE);
496}
497
498struct new_stack_args {
499    rust_task *task;
500    size_t requested_sz;
501};
502
503void
504new_stack_slow(new_stack_args *args);
505
506// NB: This runs on the Rust stack
507// This is the new stack fast path, in which we
508// reuse the next cached stack segment
509inline void
510rust_task::new_stack_fast(size_t requested_sz) {
511    // The minimum stack size, in bytes, of a Rust stack, excluding red zone
512    size_t min_sz = sched_loop->min_stack_size;
513
514    // Try to reuse an existing stack segment
515    if (stk != NULL && stk->next != NULL) {
516        size_t next_sz = user_stack_size(stk->next);
517        if (min_sz <= next_sz && requested_sz <= next_sz) {
518            stk = stk->next;
519            return;
520        }
521    }
522
523    new_stack_args args = {this, requested_sz};
524    call_on_c_stack(&args, (void*)new_stack_slow);
525}
526
527// NB: This runs on the Rust stack
528inline void
529rust_task::prev_stack() {
530    // We're not going to actually delete anything now because that would
531    // require switching to the C stack and be costly. Instead we'll just move
532    // up the link list and clean up later, either in new_stack or after our
533    // turn ends on the scheduler.
534    stk = stk->prev;
535    record_stack_limit();
536}
537
538extern "C" CDECL void
539record_sp_limit(void *limit);
540
541// The LLVM-generated segmented-stack function prolog compares the amount of
542// stack needed for each frame to the end-of-stack pointer stored in the
543// TCB. As an optimization, when the frame size is less than 256 bytes, it
544// will simply compare %esp to to the stack limit instead of subtracting the
545// frame size. As a result we need our stack limit to account for those 256
546// bytes.
547const unsigned LIMIT_OFFSET = 256;
548
549inline void
550rust_task::record_stack_limit() {
551    assert(stk);
552    assert((uintptr_t)stk->end - RED_ZONE_SIZE
553      - (uintptr_t)stk->data >= LIMIT_OFFSET
554           && "Stack size must be greater than LIMIT_OFFSET");
555    record_sp_limit(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
556}
557
558inline rust_task* rust_get_current_task() {
559    uintptr_t sp_limit = get_sp_limit();
560
561    // FIXME (#1226) - Because of a hack in upcall_call_shim_on_c_stack this
562    // value is sometimes inconveniently set to 0, so we can't use this
563    // method of retreiving the task pointer and need to fall back to TLS.
564    if (sp_limit == 0)
565        return rust_sched_loop::get_task_tls();
566
567    // The stack pointer boundary is stored in a quickly-accessible location
568    // in the TCB. From that we can calculate the address of the stack segment
569    // structure it belongs to, and in that structure is a pointer to the task
570    // that owns it.
571    uintptr_t seg_addr =
572        sp_limit - RED_ZONE_SIZE - LIMIT_OFFSET - sizeof(stk_seg);
573    stk_seg *stk = (stk_seg*) seg_addr;
574
575    // Make sure we've calculated the right address
576    ::check_stack_canary(stk);
577    assert(stk->task != NULL && "task pointer not in stack structure");
578    return stk->task;
579}
580
581//
582// Local Variables:
583// mode: C++
584// fill-column: 78;
585// indent-tabs-mode: nil
586// c-basic-offset: 4
587// buffer-file-coding-system: utf-8-unix
588// End:
589//
590
591#endif /* RUST_TASK_H */