PageRenderTime 61ms CodeModel.GetById 2ms app.highlight 52ms RepoModel.GetById 1ms app.codeStats 0ms

/src/rt/rust_task.cpp

http://github.com/jruderman/rust
C++ | 698 lines | 502 code | 107 blank | 89 comment | 80 complexity | cd0fbdf7b906e745b12476886c076654 MD5 | raw file
  1
  2#ifndef __WIN32__
  3#include <execinfo.h>
  4#endif
  5#include <iostream>
  6#include <algorithm>
  7
  8#include "rust_task.h"
  9#include "rust_cc.h"
 10#include "rust_env.h"
 11#include "rust_port.h"
 12
 13// Tasks
 14rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
 15                     const char *name, size_t init_stack_sz) :
 16    ref_count(1),
 17    id(0),
 18    stk(NULL),
 19    runtime_sp(0),
 20    sched(sched_loop->sched),
 21    sched_loop(sched_loop),
 22    kernel(sched_loop->kernel),
 23    name(name),
 24    list_index(-1),
 25    rendezvous_ptr(0),
 26    local_region(&sched_loop->local_region),
 27    boxed(sched_loop->kernel->env, &local_region),
 28    unwinding(false),
 29    cc_counter(0),
 30    total_stack_sz(0),
 31    task_local_data(NULL),
 32    task_local_data_cleanup(NULL),
 33    state(state),
 34    cond(NULL),
 35    cond_name("none"),
 36    event_reject(false),
 37    event(NULL),
 38    killed(false),
 39    reentered_rust_stack(false),
 40    disallow_kill(0),
 41    disallow_yield(0),
 42    c_stack(NULL),
 43    next_c_sp(0),
 44    next_rust_sp(0)
 45{
 46    LOGPTR(sched_loop, "new task", (uintptr_t)this);
 47    DLOG(sched_loop, task, "sizeof(task) = %d (0x%x)",
 48         sizeof *this, sizeof *this);
 49
 50    new_stack(init_stack_sz);
 51}
 52
 53// NB: This does not always run on the task's scheduler thread
 54void
 55rust_task::delete_this()
 56{
 57    DLOG(sched_loop, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
 58         name, (uintptr_t)this, ref_count);
 59
 60    /* FIXME (#2677): tighten this up, there are some more
 61       assertions that hold at task-lifecycle events. */
 62    assert(ref_count == 0); // ||
 63    //   (ref_count == 1 && this == sched->root_task));
 64
 65    sched_loop->release_task(this);
 66}
 67
 68// All failure goes through me. Put your breakpoints here!
 69extern "C" void
 70rust_task_fail(rust_task *task,
 71               char const *expr,
 72               char const *file,
 73               size_t line) {
 74    assert(task != NULL);
 75    task->begin_failure(expr, file, line);
 76}
 77
 78struct spawn_args {
 79    rust_task *task;
 80    spawn_fn f;
 81    rust_opaque_box *envptr;
 82    void *argptr;
 83};
 84
 85struct cleanup_args {
 86    spawn_args *spargs;
 87    bool threw_exception;
 88};
 89
 90void
 91annihilate_boxes(rust_task *task);
 92
 93void
 94cleanup_task(cleanup_args *args) {
 95    spawn_args *a = args->spargs;
 96    bool threw_exception = args->threw_exception;
 97    rust_task *task = a->task;
 98
 99    {
100        scoped_lock with(task->lifecycle_lock);
101        if (task->killed && !threw_exception) {
102            LOG(task, task, "Task killed during termination");
103            threw_exception = true;
104        }
105    }
106
107    // Clean up TLS. This will only be set if TLS was used to begin with.
108    // Because this is a crust function, it must be called from the C stack.
109    if (task->task_local_data_cleanup != NULL) {
110        // This assert should hold but it's not our job to ensure it (and
111        // the condition might change). Handled in libcore/task.rs.
112        // assert(task->task_local_data != NULL);
113        task->task_local_data_cleanup(task->task_local_data);
114        task->task_local_data = NULL;
115    } else if (threw_exception && task->id == INIT_TASK_ID) {
116        // Edge case: If main never spawns any tasks, but fails anyway, TLS
117        // won't be around to take down the kernel (task.rs:kill_taskgroup,
118        // rust_task_kill_all). Do it here instead.
119        // (Note that children tasks can not init their TLS if they were
120        // killed too early, so we need to check main's task id too.)
121        task->fail_sched_loop();
122        // This must not happen twice.
123        static bool main_task_failed_without_spawning = false;
124        assert(!main_task_failed_without_spawning);
125        main_task_failed_without_spawning = true;
126    }
127
128    // FIXME (#2676): For performance we should do the annihilator
129    // instead of the cycle collector even under normal termination, but
130    // since that would hide memory management errors (like not derefing
131    // boxes), it needs to be disableable in debug builds.
132    if (threw_exception) {
133        // FIXME (#2676): When the annihilator is more powerful and
134        // successfully runs resource destructors, etc. we can get rid
135        // of this cc
136        cc::do_cc(task);
137        annihilate_boxes(task);
138    }
139    cc::do_final_cc(task);
140
141    task->die();
142
143#ifdef __WIN32__
144    assert(!threw_exception && "No exception-handling yet on windows builds");
145#endif
146}
147
148extern "C" CDECL void upcall_exchange_free(void *ptr);
149
150// This runs on the Rust stack
151void task_start_wrapper(spawn_args *a)
152{
153    rust_task *task = a->task;
154
155    bool threw_exception = false;
156    try {
157        // The first argument is the return pointer; as the task fn
158        // must have void return type, we can safely pass 0.
159        a->f(0, a->envptr, a->argptr);
160    } catch (rust_task *ex) {
161        assert(ex == task && "Expected this task to be thrown for unwinding");
162        threw_exception = true;
163
164        if (task->c_stack) {
165            task->return_c_stack();
166        }
167
168        // Since we call glue code below we need to make sure we
169        // have the stack limit set up correctly
170        task->reset_stack_limit();
171    }
172
173    // We should have returned any C stack by now
174    assert(task->c_stack == NULL);
175
176    rust_opaque_box* env = a->envptr;
177    if(env) {
178        // free the environment (which should be a unique closure).
179        const type_desc *td = env->td;
180        td->drop_glue(NULL, NULL, NULL, box_body(env));
181        upcall_exchange_free(env);
182    }
183
184    // The cleanup work needs lots of stack
185    cleanup_args ca = {a, threw_exception};
186    task->call_on_c_stack(&ca, (void*)cleanup_task);
187
188    task->ctx.next->swap(task->ctx);
189}
190
191void
192rust_task::start(spawn_fn spawnee_fn,
193                 rust_opaque_box *envptr,
194                 void *argptr)
195{
196    LOG(this, task, "starting task from fn 0x%" PRIxPTR
197        " with env 0x%" PRIxPTR " and arg 0x%" PRIxPTR,
198        spawnee_fn, envptr, argptr);
199
200    assert(stk->data != NULL);
201
202    char *sp = (char *)stk->end;
203
204    sp -= sizeof(spawn_args);
205
206    spawn_args *a = (spawn_args *)sp;
207
208    a->task = this;
209    a->envptr = envptr;
210    a->argptr = argptr;
211    a->f = spawnee_fn;
212
213    ctx.call((void *)task_start_wrapper, a, sp);
214
215    this->start();
216}
217
218void rust_task::start()
219{
220    transition(task_state_newborn, task_state_running, NULL, "none");
221}
222
223bool
224rust_task::must_fail_from_being_killed() {
225    scoped_lock with(lifecycle_lock);
226    return must_fail_from_being_killed_inner();
227}
228
229bool
230rust_task::must_fail_from_being_killed_inner() {
231    lifecycle_lock.must_have_lock();
232    return killed && !reentered_rust_stack && disallow_kill == 0;
233}
234
235void rust_task_yield_fail(rust_task *task) {
236    LOG_ERR(task, task, "task %" PRIxPTR " yielded in an atomic section",
237            task);
238    task->fail();
239}
240
241// Only run this on the rust stack
242MUST_CHECK bool rust_task::yield() {
243    bool killed = false;
244
245    if (disallow_yield > 0) {
246        call_on_c_stack(this, (void *)rust_task_yield_fail);
247    }
248
249    // This check is largely superfluous; it's the one after the context swap
250    // that really matters. This one allows us to assert a useful invariant.
251    if (must_fail_from_being_killed()) {
252        {
253            scoped_lock with(lifecycle_lock);
254            assert(!(state == task_state_blocked));
255        }
256        killed = true;
257    }
258
259    // Return to the scheduler.
260    ctx.next->swap(ctx);
261
262    if (must_fail_from_being_killed()) {
263        killed = true;
264    }
265    return killed;
266}
267
268void
269rust_task::kill() {
270    scoped_lock with(lifecycle_lock);
271    kill_inner();
272}
273
274void rust_task::kill_inner() {
275    lifecycle_lock.must_have_lock();
276
277    // Multiple kills should be able to safely race, but check anyway.
278    if (killed) {
279        LOG(this, task, "task %s @0x%" PRIxPTR " already killed", name, this);
280        return;
281    }
282
283    // Note the distinction here: kill() is when you're in an upcall
284    // from task A and want to force-fail task B, you do B->kill().
285    // If you want to fail yourself you do self->fail().
286    LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
287    // When the task next goes to yield or resume it will fail
288    killed = true;
289    // Unblock the task so it can unwind.
290
291    if (state == task_state_blocked &&
292        must_fail_from_being_killed_inner()) {
293        wakeup_inner(cond);
294    }
295
296    LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
297}
298
299void
300rust_task::fail() {
301    // See note in ::kill() regarding who should call this.
302    fail(NULL, NULL, 0);
303}
304
305void
306rust_task::fail(char const *expr, char const *file, size_t line) {
307    rust_task_fail(this, expr, file, line);
308}
309
310// Called only by rust_task_fail
311void
312rust_task::begin_failure(char const *expr, char const *file, size_t line) {
313
314    if (expr) {
315        LOG_ERR(this, task, "task failed at '%s', %s:%" PRIdPTR,
316                expr, file, line);
317    }
318
319    DLOG(sched_loop, task, "task %s @0x%" PRIxPTR " failing", name, this);
320    backtrace();
321    unwinding = true;
322#ifndef __WIN32__
323    throw this;
324#else
325    die();
326    // FIXME (#908): Need unwinding on windows. This will end up aborting
327    fail_sched_loop();
328#endif
329}
330
331void rust_task::fail_sched_loop() {
332    sched_loop->fail();
333}
334
335frame_glue_fns*
336rust_task::get_frame_glue_fns(uintptr_t fp) {
337    fp -= sizeof(uintptr_t);
338    return *((frame_glue_fns**) fp);
339}
340
341void rust_task::assert_is_running()
342{
343    scoped_lock with(lifecycle_lock);
344    assert(state == task_state_running);
345}
346
347// FIXME (#2851) Remove this code when rust_port goes away?
348bool
349rust_task::blocked_on(rust_cond *on)
350{
351    lifecycle_lock.must_have_lock();
352    return cond == on;
353}
354
355void *
356rust_task::malloc(size_t sz, const char *tag, type_desc *td)
357{
358    return local_region.malloc(sz, tag);
359}
360
361void *
362rust_task::realloc(void *data, size_t sz)
363{
364    return local_region.realloc(data, sz);
365}
366
367void
368rust_task::free(void *p)
369{
370    local_region.free(p);
371}
372
373void
374rust_task::transition(rust_task_state src, rust_task_state dst,
375                      rust_cond *cond, const char* cond_name) {
376    scoped_lock with(lifecycle_lock);
377    transition_inner(src, dst, cond, cond_name);
378}
379
380void rust_task::transition_inner(rust_task_state src, rust_task_state dst,
381                                  rust_cond *cond, const char* cond_name) {
382    lifecycle_lock.must_have_lock();
383    sched_loop->transition(this, src, dst, cond, cond_name);
384}
385
386void
387rust_task::set_state(rust_task_state state,
388                     rust_cond *cond, const char* cond_name) {
389    lifecycle_lock.must_have_lock();
390    this->state = state;
391    this->cond = cond;
392    this->cond_name = cond_name;
393}
394
395bool
396rust_task::block(rust_cond *on, const char* name) {
397    scoped_lock with(lifecycle_lock);
398    return block_inner(on, name);
399}
400
401bool
402rust_task::block_inner(rust_cond *on, const char* name) {
403    if (must_fail_from_being_killed_inner()) {
404        // We're already going to die. Don't block. Tell the task to fail
405        return false;
406    }
407
408    LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
409                         (uintptr_t) on, (uintptr_t) cond);
410    assert(cond == NULL && "Cannot block an already blocked task.");
411    assert(on != NULL && "Cannot block on a NULL object.");
412
413    transition_inner(task_state_running, task_state_blocked, on, name);
414
415    return true;
416}
417
418void
419rust_task::wakeup(rust_cond *from) {
420    scoped_lock with(lifecycle_lock);
421    wakeup_inner(from);
422}
423
424void
425rust_task::wakeup_inner(rust_cond *from) {
426    assert(cond != NULL && "Cannot wake up unblocked task.");
427    LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
428                        (uintptr_t) cond, (uintptr_t) from);
429    assert(cond == from && "Cannot wake up blocked task on wrong condition.");
430
431    transition_inner(task_state_blocked, task_state_running, NULL, "none");
432}
433
434void
435rust_task::die() {
436    transition(task_state_running, task_state_dead, NULL, "none");
437}
438
439void
440rust_task::backtrace() {
441    if (!log_rt_backtrace) return;
442#ifndef __WIN32__
443    void *call_stack[256];
444    int nframes = ::backtrace(call_stack, 256);
445    backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
446#endif
447}
448
449void *
450rust_task::calloc(size_t size, const char *tag) {
451    return local_region.calloc(size, tag);
452}
453
454size_t
455rust_task::get_next_stack_size(size_t min, size_t current, size_t requested) {
456    LOG(this, mem, "calculating new stack size for 0x%" PRIxPTR, this);
457    LOG(this, mem,
458        "min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
459        min, current, requested);
460
461    // Allocate at least enough to accomodate the next frame
462    size_t sz = std::max(min, requested);
463
464    // And double the stack size each allocation
465    const size_t max = 1024 * 1024;
466    size_t next = std::min(max, current * 2);
467
468    sz = std::max(sz, next);
469
470    LOG(this, mem, "next stack size: %" PRIdPTR, sz);
471    assert(requested <= sz);
472    return sz;
473}
474
475void
476rust_task::free_stack(stk_seg *stk) {
477    LOGPTR(sched_loop, "freeing stk segment", (uintptr_t)stk);
478    total_stack_sz -= user_stack_size(stk);
479    destroy_stack(&local_region, stk);
480}
481
482void
483new_stack_slow(new_stack_args *args) {
484    args->task->new_stack(args->requested_sz);
485}
486
487void
488rust_task::new_stack(size_t requested_sz) {
489    LOG(this, mem, "creating new stack for task %" PRIxPTR, this);
490    if (stk) {
491        ::check_stack_canary(stk);
492    }
493
494    // The minimum stack size, in bytes, of a Rust stack, excluding red zone
495    size_t min_sz = sched_loop->min_stack_size;
496
497    // Try to reuse an existing stack segment
498    while (stk != NULL && stk->next != NULL) {
499        size_t next_sz = user_stack_size(stk->next);
500        if (min_sz <= next_sz && requested_sz <= next_sz) {
501            LOG(this, mem, "reusing existing stack");
502            stk = stk->next;
503            return;
504        } else {
505            LOG(this, mem, "existing stack is not big enough");
506            stk_seg *new_next = stk->next->next;
507            free_stack(stk->next);
508            stk->next = new_next;
509            if (new_next) {
510                new_next->prev = stk;
511            }
512        }
513    }
514
515    // The size of the current stack segment, excluding red zone
516    size_t current_sz = 0;
517    if (stk != NULL) {
518        current_sz = user_stack_size(stk);
519    }
520    // The calculated size of the new stack, excluding red zone
521    size_t rust_stk_sz = get_next_stack_size(min_sz,
522                                             current_sz, requested_sz);
523
524    size_t max_stack = kernel->env->max_stack_size;
525    size_t used_stack = total_stack_sz + rust_stk_sz;
526
527    // Don't allow stacks to grow forever. During unwinding we have to allow
528    // for more stack than normal in order to allow destructors room to run,
529    // arbitrarily selected as 2x the maximum stack size.
530    if (!unwinding && used_stack > max_stack) {
531        LOG_ERR(this, task, "task %" PRIxPTR " ran out of stack", this);
532        fail();
533    } else if (unwinding && used_stack > max_stack * 2) {
534        LOG_ERR(this, task,
535                "task %" PRIxPTR " ran out of stack during unwinding", this);
536        fail();
537    }
538
539    size_t sz = rust_stk_sz + RED_ZONE_SIZE;
540    stk_seg *new_stk = create_stack(&local_region, sz);
541    LOGPTR(sched_loop, "new stk", (uintptr_t)new_stk);
542    new_stk->task = this;
543    new_stk->next = NULL;
544    new_stk->prev = stk;
545    if (stk) {
546        stk->next = new_stk;
547    }
548    LOGPTR(sched_loop, "stk end", new_stk->end);
549
550    stk = new_stk;
551    total_stack_sz += user_stack_size(new_stk);
552}
553
554void
555rust_task::cleanup_after_turn() {
556    // Delete any spare stack segments that were left
557    // behind by calls to prev_stack
558    assert(stk);
559    while (stk->next) {
560        stk_seg *new_next = stk->next->next;
561        free_stack(stk->next);
562        stk->next = new_next;
563    }
564}
565
566static bool
567sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
568    // Not positive these bounds for sp are correct.  I think that the first
569    // possible value for esp on a new stack is stk->end, which points to the
570    // address before the first value to be pushed onto a new stack. The last
571    // possible address we can push data to is stk->data.  Regardless, there's
572    // so much slop at either end that we should never hit one of these
573    // boundaries.
574    return (uintptr_t)stk->data <= sp && sp <= stk->end;
575}
576
577/*
578Called by landing pads during unwinding to figure out which stack segment we
579are currently running on and record the stack limit (which was not restored
580when unwinding through __morestack).
581 */
582void
583rust_task::reset_stack_limit() {
584    uintptr_t sp = get_sp();
585    while (!sp_in_stk_seg(sp, stk)) {
586        stk = stk->prev;
587        assert(stk != NULL && "Failed to find the current stack");
588    }
589    record_stack_limit();
590}
591
592void
593rust_task::check_stack_canary() {
594    ::check_stack_canary(stk);
595}
596
597void
598rust_task::delete_all_stacks() {
599    assert(!on_rust_stack());
600    // Delete all the stacks. There may be more than one if the task failed
601    // and no landing pads stopped to clean up.
602    assert(stk->next == NULL);
603    while (stk != NULL) {
604        stk_seg *prev = stk->prev;
605        free_stack(stk);
606        stk = prev;
607    }
608}
609
610/*
611Returns true if we're currently running on the Rust stack
612 */
613bool
614rust_task::on_rust_stack() {
615    if (stk == NULL) {
616        // This only happens during construction
617        return false;
618    }
619
620    uintptr_t sp = get_sp();
621    bool in_first_segment = sp_in_stk_seg(sp, stk);
622    if (in_first_segment) {
623        return true;
624    } else if (stk->prev != NULL) {
625        // This happens only when calling the upcall to delete
626        // a stack segment
627        bool in_second_segment = sp_in_stk_seg(sp, stk->prev);
628        return in_second_segment;
629    } else {
630        return false;
631    }
632}
633
634void
635rust_task::inhibit_kill() {
636    scoped_lock with(lifecycle_lock);
637    // Here might be good, though not mandatory, to check if we have to die.
638    disallow_kill++;
639}
640
641void
642rust_task::allow_kill() {
643    scoped_lock with(lifecycle_lock);
644    assert(disallow_kill > 0 && "Illegal allow_kill(): already killable!");
645    disallow_kill--;
646}
647
648void rust_task::inhibit_yield() {
649    scoped_lock with(lifecycle_lock);
650    disallow_yield++;
651}
652
653void rust_task::allow_yield() {
654    scoped_lock with(lifecycle_lock);
655    assert(disallow_yield > 0 && "Illegal allow_yield(): already yieldable!");
656    disallow_yield--;
657}
658
659MUST_CHECK bool rust_task::wait_event(void **result) {
660    bool killed = false;
661    scoped_lock with(lifecycle_lock);
662
663    if(!event_reject) {
664        block_inner(&event_cond, "waiting on event");
665        lifecycle_lock.unlock();
666        killed = yield();
667        lifecycle_lock.lock();
668    } else if (must_fail_from_being_killed_inner()) {
669        // If the deschedule was rejected, yield won't do our killed check for
670        // us. For thoroughness, do it here. FIXME (#524)
671        killed = true;
672    }
673
674    event_reject = false;
675    *result = event;
676    return killed;
677}
678
679void
680rust_task::signal_event(void *event) {
681    scoped_lock with(lifecycle_lock);
682
683    this->event = event;
684    event_reject = true;
685    if(task_state_blocked == state) {
686        wakeup_inner(&event_cond);
687    }
688}
689
690//
691// Local Variables:
692// mode: C++
693// fill-column: 78;
694// indent-tabs-mode: nil
695// c-basic-offset: 4
696// buffer-file-coding-system: utf-8-unix
697// End:
698//