/src/rt/rust_task.cpp

http://github.com/jruderman/rust · C++ · 698 lines · 502 code · 107 blank · 89 comment · 80 complexity · cd0fbdf7b906e745b12476886c076654 MD5 · raw file

  1. #ifndef __WIN32__
  2. #include <execinfo.h>
  3. #endif
  4. #include <iostream>
  5. #include <algorithm>
  6. #include "rust_task.h"
  7. #include "rust_cc.h"
  8. #include "rust_env.h"
  9. #include "rust_port.h"
  10. // Tasks
  11. rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
  12. const char *name, size_t init_stack_sz) :
  13. ref_count(1),
  14. id(0),
  15. stk(NULL),
  16. runtime_sp(0),
  17. sched(sched_loop->sched),
  18. sched_loop(sched_loop),
  19. kernel(sched_loop->kernel),
  20. name(name),
  21. list_index(-1),
  22. rendezvous_ptr(0),
  23. local_region(&sched_loop->local_region),
  24. boxed(sched_loop->kernel->env, &local_region),
  25. unwinding(false),
  26. cc_counter(0),
  27. total_stack_sz(0),
  28. task_local_data(NULL),
  29. task_local_data_cleanup(NULL),
  30. state(state),
  31. cond(NULL),
  32. cond_name("none"),
  33. event_reject(false),
  34. event(NULL),
  35. killed(false),
  36. reentered_rust_stack(false),
  37. disallow_kill(0),
  38. disallow_yield(0),
  39. c_stack(NULL),
  40. next_c_sp(0),
  41. next_rust_sp(0)
  42. {
  43. LOGPTR(sched_loop, "new task", (uintptr_t)this);
  44. DLOG(sched_loop, task, "sizeof(task) = %d (0x%x)",
  45. sizeof *this, sizeof *this);
  46. new_stack(init_stack_sz);
  47. }
  48. // NB: This does not always run on the task's scheduler thread
  49. void
  50. rust_task::delete_this()
  51. {
  52. DLOG(sched_loop, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
  53. name, (uintptr_t)this, ref_count);
  54. /* FIXME (#2677): tighten this up, there are some more
  55. assertions that hold at task-lifecycle events. */
  56. assert(ref_count == 0); // ||
  57. // (ref_count == 1 && this == sched->root_task));
  58. sched_loop->release_task(this);
  59. }
  60. // All failure goes through me. Put your breakpoints here!
  61. extern "C" void
  62. rust_task_fail(rust_task *task,
  63. char const *expr,
  64. char const *file,
  65. size_t line) {
  66. assert(task != NULL);
  67. task->begin_failure(expr, file, line);
  68. }
  69. struct spawn_args {
  70. rust_task *task;
  71. spawn_fn f;
  72. rust_opaque_box *envptr;
  73. void *argptr;
  74. };
  75. struct cleanup_args {
  76. spawn_args *spargs;
  77. bool threw_exception;
  78. };
  79. void
  80. annihilate_boxes(rust_task *task);
  81. void
  82. cleanup_task(cleanup_args *args) {
  83. spawn_args *a = args->spargs;
  84. bool threw_exception = args->threw_exception;
  85. rust_task *task = a->task;
  86. {
  87. scoped_lock with(task->lifecycle_lock);
  88. if (task->killed && !threw_exception) {
  89. LOG(task, task, "Task killed during termination");
  90. threw_exception = true;
  91. }
  92. }
  93. // Clean up TLS. This will only be set if TLS was used to begin with.
  94. // Because this is a crust function, it must be called from the C stack.
  95. if (task->task_local_data_cleanup != NULL) {
  96. // This assert should hold but it's not our job to ensure it (and
  97. // the condition might change). Handled in libcore/task.rs.
  98. // assert(task->task_local_data != NULL);
  99. task->task_local_data_cleanup(task->task_local_data);
  100. task->task_local_data = NULL;
  101. } else if (threw_exception && task->id == INIT_TASK_ID) {
  102. // Edge case: If main never spawns any tasks, but fails anyway, TLS
  103. // won't be around to take down the kernel (task.rs:kill_taskgroup,
  104. // rust_task_kill_all). Do it here instead.
  105. // (Note that children tasks can not init their TLS if they were
  106. // killed too early, so we need to check main's task id too.)
  107. task->fail_sched_loop();
  108. // This must not happen twice.
  109. static bool main_task_failed_without_spawning = false;
  110. assert(!main_task_failed_without_spawning);
  111. main_task_failed_without_spawning = true;
  112. }
  113. // FIXME (#2676): For performance we should do the annihilator
  114. // instead of the cycle collector even under normal termination, but
  115. // since that would hide memory management errors (like not derefing
  116. // boxes), it needs to be disableable in debug builds.
  117. if (threw_exception) {
  118. // FIXME (#2676): When the annihilator is more powerful and
  119. // successfully runs resource destructors, etc. we can get rid
  120. // of this cc
  121. cc::do_cc(task);
  122. annihilate_boxes(task);
  123. }
  124. cc::do_final_cc(task);
  125. task->die();
  126. #ifdef __WIN32__
  127. assert(!threw_exception && "No exception-handling yet on windows builds");
  128. #endif
  129. }
  130. extern "C" CDECL void upcall_exchange_free(void *ptr);
  131. // This runs on the Rust stack
  132. void task_start_wrapper(spawn_args *a)
  133. {
  134. rust_task *task = a->task;
  135. bool threw_exception = false;
  136. try {
  137. // The first argument is the return pointer; as the task fn
  138. // must have void return type, we can safely pass 0.
  139. a->f(0, a->envptr, a->argptr);
  140. } catch (rust_task *ex) {
  141. assert(ex == task && "Expected this task to be thrown for unwinding");
  142. threw_exception = true;
  143. if (task->c_stack) {
  144. task->return_c_stack();
  145. }
  146. // Since we call glue code below we need to make sure we
  147. // have the stack limit set up correctly
  148. task->reset_stack_limit();
  149. }
  150. // We should have returned any C stack by now
  151. assert(task->c_stack == NULL);
  152. rust_opaque_box* env = a->envptr;
  153. if(env) {
  154. // free the environment (which should be a unique closure).
  155. const type_desc *td = env->td;
  156. td->drop_glue(NULL, NULL, NULL, box_body(env));
  157. upcall_exchange_free(env);
  158. }
  159. // The cleanup work needs lots of stack
  160. cleanup_args ca = {a, threw_exception};
  161. task->call_on_c_stack(&ca, (void*)cleanup_task);
  162. task->ctx.next->swap(task->ctx);
  163. }
  164. void
  165. rust_task::start(spawn_fn spawnee_fn,
  166. rust_opaque_box *envptr,
  167. void *argptr)
  168. {
  169. LOG(this, task, "starting task from fn 0x%" PRIxPTR
  170. " with env 0x%" PRIxPTR " and arg 0x%" PRIxPTR,
  171. spawnee_fn, envptr, argptr);
  172. assert(stk->data != NULL);
  173. char *sp = (char *)stk->end;
  174. sp -= sizeof(spawn_args);
  175. spawn_args *a = (spawn_args *)sp;
  176. a->task = this;
  177. a->envptr = envptr;
  178. a->argptr = argptr;
  179. a->f = spawnee_fn;
  180. ctx.call((void *)task_start_wrapper, a, sp);
  181. this->start();
  182. }
  183. void rust_task::start()
  184. {
  185. transition(task_state_newborn, task_state_running, NULL, "none");
  186. }
  187. bool
  188. rust_task::must_fail_from_being_killed() {
  189. scoped_lock with(lifecycle_lock);
  190. return must_fail_from_being_killed_inner();
  191. }
  192. bool
  193. rust_task::must_fail_from_being_killed_inner() {
  194. lifecycle_lock.must_have_lock();
  195. return killed && !reentered_rust_stack && disallow_kill == 0;
  196. }
  197. void rust_task_yield_fail(rust_task *task) {
  198. LOG_ERR(task, task, "task %" PRIxPTR " yielded in an atomic section",
  199. task);
  200. task->fail();
  201. }
  202. // Only run this on the rust stack
  203. MUST_CHECK bool rust_task::yield() {
  204. bool killed = false;
  205. if (disallow_yield > 0) {
  206. call_on_c_stack(this, (void *)rust_task_yield_fail);
  207. }
  208. // This check is largely superfluous; it's the one after the context swap
  209. // that really matters. This one allows us to assert a useful invariant.
  210. if (must_fail_from_being_killed()) {
  211. {
  212. scoped_lock with(lifecycle_lock);
  213. assert(!(state == task_state_blocked));
  214. }
  215. killed = true;
  216. }
  217. // Return to the scheduler.
  218. ctx.next->swap(ctx);
  219. if (must_fail_from_being_killed()) {
  220. killed = true;
  221. }
  222. return killed;
  223. }
  224. void
  225. rust_task::kill() {
  226. scoped_lock with(lifecycle_lock);
  227. kill_inner();
  228. }
  229. void rust_task::kill_inner() {
  230. lifecycle_lock.must_have_lock();
  231. // Multiple kills should be able to safely race, but check anyway.
  232. if (killed) {
  233. LOG(this, task, "task %s @0x%" PRIxPTR " already killed", name, this);
  234. return;
  235. }
  236. // Note the distinction here: kill() is when you're in an upcall
  237. // from task A and want to force-fail task B, you do B->kill().
  238. // If you want to fail yourself you do self->fail().
  239. LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
  240. // When the task next goes to yield or resume it will fail
  241. killed = true;
  242. // Unblock the task so it can unwind.
  243. if (state == task_state_blocked &&
  244. must_fail_from_being_killed_inner()) {
  245. wakeup_inner(cond);
  246. }
  247. LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
  248. }
  249. void
  250. rust_task::fail() {
  251. // See note in ::kill() regarding who should call this.
  252. fail(NULL, NULL, 0);
  253. }
  254. void
  255. rust_task::fail(char const *expr, char const *file, size_t line) {
  256. rust_task_fail(this, expr, file, line);
  257. }
  258. // Called only by rust_task_fail
  259. void
  260. rust_task::begin_failure(char const *expr, char const *file, size_t line) {
  261. if (expr) {
  262. LOG_ERR(this, task, "task failed at '%s', %s:%" PRIdPTR,
  263. expr, file, line);
  264. }
  265. DLOG(sched_loop, task, "task %s @0x%" PRIxPTR " failing", name, this);
  266. backtrace();
  267. unwinding = true;
  268. #ifndef __WIN32__
  269. throw this;
  270. #else
  271. die();
  272. // FIXME (#908): Need unwinding on windows. This will end up aborting
  273. fail_sched_loop();
  274. #endif
  275. }
  276. void rust_task::fail_sched_loop() {
  277. sched_loop->fail();
  278. }
  279. frame_glue_fns*
  280. rust_task::get_frame_glue_fns(uintptr_t fp) {
  281. fp -= sizeof(uintptr_t);
  282. return *((frame_glue_fns**) fp);
  283. }
  284. void rust_task::assert_is_running()
  285. {
  286. scoped_lock with(lifecycle_lock);
  287. assert(state == task_state_running);
  288. }
  289. // FIXME (#2851) Remove this code when rust_port goes away?
  290. bool
  291. rust_task::blocked_on(rust_cond *on)
  292. {
  293. lifecycle_lock.must_have_lock();
  294. return cond == on;
  295. }
  296. void *
  297. rust_task::malloc(size_t sz, const char *tag, type_desc *td)
  298. {
  299. return local_region.malloc(sz, tag);
  300. }
  301. void *
  302. rust_task::realloc(void *data, size_t sz)
  303. {
  304. return local_region.realloc(data, sz);
  305. }
  306. void
  307. rust_task::free(void *p)
  308. {
  309. local_region.free(p);
  310. }
  311. void
  312. rust_task::transition(rust_task_state src, rust_task_state dst,
  313. rust_cond *cond, const char* cond_name) {
  314. scoped_lock with(lifecycle_lock);
  315. transition_inner(src, dst, cond, cond_name);
  316. }
  317. void rust_task::transition_inner(rust_task_state src, rust_task_state dst,
  318. rust_cond *cond, const char* cond_name) {
  319. lifecycle_lock.must_have_lock();
  320. sched_loop->transition(this, src, dst, cond, cond_name);
  321. }
  322. void
  323. rust_task::set_state(rust_task_state state,
  324. rust_cond *cond, const char* cond_name) {
  325. lifecycle_lock.must_have_lock();
  326. this->state = state;
  327. this->cond = cond;
  328. this->cond_name = cond_name;
  329. }
  330. bool
  331. rust_task::block(rust_cond *on, const char* name) {
  332. scoped_lock with(lifecycle_lock);
  333. return block_inner(on, name);
  334. }
  335. bool
  336. rust_task::block_inner(rust_cond *on, const char* name) {
  337. if (must_fail_from_being_killed_inner()) {
  338. // We're already going to die. Don't block. Tell the task to fail
  339. return false;
  340. }
  341. LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
  342. (uintptr_t) on, (uintptr_t) cond);
  343. assert(cond == NULL && "Cannot block an already blocked task.");
  344. assert(on != NULL && "Cannot block on a NULL object.");
  345. transition_inner(task_state_running, task_state_blocked, on, name);
  346. return true;
  347. }
  348. void
  349. rust_task::wakeup(rust_cond *from) {
  350. scoped_lock with(lifecycle_lock);
  351. wakeup_inner(from);
  352. }
  353. void
  354. rust_task::wakeup_inner(rust_cond *from) {
  355. assert(cond != NULL && "Cannot wake up unblocked task.");
  356. LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
  357. (uintptr_t) cond, (uintptr_t) from);
  358. assert(cond == from && "Cannot wake up blocked task on wrong condition.");
  359. transition_inner(task_state_blocked, task_state_running, NULL, "none");
  360. }
  361. void
  362. rust_task::die() {
  363. transition(task_state_running, task_state_dead, NULL, "none");
  364. }
  365. void
  366. rust_task::backtrace() {
  367. if (!log_rt_backtrace) return;
  368. #ifndef __WIN32__
  369. void *call_stack[256];
  370. int nframes = ::backtrace(call_stack, 256);
  371. backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
  372. #endif
  373. }
  374. void *
  375. rust_task::calloc(size_t size, const char *tag) {
  376. return local_region.calloc(size, tag);
  377. }
  378. size_t
  379. rust_task::get_next_stack_size(size_t min, size_t current, size_t requested) {
  380. LOG(this, mem, "calculating new stack size for 0x%" PRIxPTR, this);
  381. LOG(this, mem,
  382. "min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
  383. min, current, requested);
  384. // Allocate at least enough to accomodate the next frame
  385. size_t sz = std::max(min, requested);
  386. // And double the stack size each allocation
  387. const size_t max = 1024 * 1024;
  388. size_t next = std::min(max, current * 2);
  389. sz = std::max(sz, next);
  390. LOG(this, mem, "next stack size: %" PRIdPTR, sz);
  391. assert(requested <= sz);
  392. return sz;
  393. }
  394. void
  395. rust_task::free_stack(stk_seg *stk) {
  396. LOGPTR(sched_loop, "freeing stk segment", (uintptr_t)stk);
  397. total_stack_sz -= user_stack_size(stk);
  398. destroy_stack(&local_region, stk);
  399. }
  400. void
  401. new_stack_slow(new_stack_args *args) {
  402. args->task->new_stack(args->requested_sz);
  403. }
  404. void
  405. rust_task::new_stack(size_t requested_sz) {
  406. LOG(this, mem, "creating new stack for task %" PRIxPTR, this);
  407. if (stk) {
  408. ::check_stack_canary(stk);
  409. }
  410. // The minimum stack size, in bytes, of a Rust stack, excluding red zone
  411. size_t min_sz = sched_loop->min_stack_size;
  412. // Try to reuse an existing stack segment
  413. while (stk != NULL && stk->next != NULL) {
  414. size_t next_sz = user_stack_size(stk->next);
  415. if (min_sz <= next_sz && requested_sz <= next_sz) {
  416. LOG(this, mem, "reusing existing stack");
  417. stk = stk->next;
  418. return;
  419. } else {
  420. LOG(this, mem, "existing stack is not big enough");
  421. stk_seg *new_next = stk->next->next;
  422. free_stack(stk->next);
  423. stk->next = new_next;
  424. if (new_next) {
  425. new_next->prev = stk;
  426. }
  427. }
  428. }
  429. // The size of the current stack segment, excluding red zone
  430. size_t current_sz = 0;
  431. if (stk != NULL) {
  432. current_sz = user_stack_size(stk);
  433. }
  434. // The calculated size of the new stack, excluding red zone
  435. size_t rust_stk_sz = get_next_stack_size(min_sz,
  436. current_sz, requested_sz);
  437. size_t max_stack = kernel->env->max_stack_size;
  438. size_t used_stack = total_stack_sz + rust_stk_sz;
  439. // Don't allow stacks to grow forever. During unwinding we have to allow
  440. // for more stack than normal in order to allow destructors room to run,
  441. // arbitrarily selected as 2x the maximum stack size.
  442. if (!unwinding && used_stack > max_stack) {
  443. LOG_ERR(this, task, "task %" PRIxPTR " ran out of stack", this);
  444. fail();
  445. } else if (unwinding && used_stack > max_stack * 2) {
  446. LOG_ERR(this, task,
  447. "task %" PRIxPTR " ran out of stack during unwinding", this);
  448. fail();
  449. }
  450. size_t sz = rust_stk_sz + RED_ZONE_SIZE;
  451. stk_seg *new_stk = create_stack(&local_region, sz);
  452. LOGPTR(sched_loop, "new stk", (uintptr_t)new_stk);
  453. new_stk->task = this;
  454. new_stk->next = NULL;
  455. new_stk->prev = stk;
  456. if (stk) {
  457. stk->next = new_stk;
  458. }
  459. LOGPTR(sched_loop, "stk end", new_stk->end);
  460. stk = new_stk;
  461. total_stack_sz += user_stack_size(new_stk);
  462. }
  463. void
  464. rust_task::cleanup_after_turn() {
  465. // Delete any spare stack segments that were left
  466. // behind by calls to prev_stack
  467. assert(stk);
  468. while (stk->next) {
  469. stk_seg *new_next = stk->next->next;
  470. free_stack(stk->next);
  471. stk->next = new_next;
  472. }
  473. }
  474. static bool
  475. sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
  476. // Not positive these bounds for sp are correct. I think that the first
  477. // possible value for esp on a new stack is stk->end, which points to the
  478. // address before the first value to be pushed onto a new stack. The last
  479. // possible address we can push data to is stk->data. Regardless, there's
  480. // so much slop at either end that we should never hit one of these
  481. // boundaries.
  482. return (uintptr_t)stk->data <= sp && sp <= stk->end;
  483. }
  484. /*
  485. Called by landing pads during unwinding to figure out which stack segment we
  486. are currently running on and record the stack limit (which was not restored
  487. when unwinding through __morestack).
  488. */
  489. void
  490. rust_task::reset_stack_limit() {
  491. uintptr_t sp = get_sp();
  492. while (!sp_in_stk_seg(sp, stk)) {
  493. stk = stk->prev;
  494. assert(stk != NULL && "Failed to find the current stack");
  495. }
  496. record_stack_limit();
  497. }
  498. void
  499. rust_task::check_stack_canary() {
  500. ::check_stack_canary(stk);
  501. }
  502. void
  503. rust_task::delete_all_stacks() {
  504. assert(!on_rust_stack());
  505. // Delete all the stacks. There may be more than one if the task failed
  506. // and no landing pads stopped to clean up.
  507. assert(stk->next == NULL);
  508. while (stk != NULL) {
  509. stk_seg *prev = stk->prev;
  510. free_stack(stk);
  511. stk = prev;
  512. }
  513. }
  514. /*
  515. Returns true if we're currently running on the Rust stack
  516. */
  517. bool
  518. rust_task::on_rust_stack() {
  519. if (stk == NULL) {
  520. // This only happens during construction
  521. return false;
  522. }
  523. uintptr_t sp = get_sp();
  524. bool in_first_segment = sp_in_stk_seg(sp, stk);
  525. if (in_first_segment) {
  526. return true;
  527. } else if (stk->prev != NULL) {
  528. // This happens only when calling the upcall to delete
  529. // a stack segment
  530. bool in_second_segment = sp_in_stk_seg(sp, stk->prev);
  531. return in_second_segment;
  532. } else {
  533. return false;
  534. }
  535. }
  536. void
  537. rust_task::inhibit_kill() {
  538. scoped_lock with(lifecycle_lock);
  539. // Here might be good, though not mandatory, to check if we have to die.
  540. disallow_kill++;
  541. }
  542. void
  543. rust_task::allow_kill() {
  544. scoped_lock with(lifecycle_lock);
  545. assert(disallow_kill > 0 && "Illegal allow_kill(): already killable!");
  546. disallow_kill--;
  547. }
  548. void rust_task::inhibit_yield() {
  549. scoped_lock with(lifecycle_lock);
  550. disallow_yield++;
  551. }
  552. void rust_task::allow_yield() {
  553. scoped_lock with(lifecycle_lock);
  554. assert(disallow_yield > 0 && "Illegal allow_yield(): already yieldable!");
  555. disallow_yield--;
  556. }
  557. MUST_CHECK bool rust_task::wait_event(void **result) {
  558. bool killed = false;
  559. scoped_lock with(lifecycle_lock);
  560. if(!event_reject) {
  561. block_inner(&event_cond, "waiting on event");
  562. lifecycle_lock.unlock();
  563. killed = yield();
  564. lifecycle_lock.lock();
  565. } else if (must_fail_from_being_killed_inner()) {
  566. // If the deschedule was rejected, yield won't do our killed check for
  567. // us. For thoroughness, do it here. FIXME (#524)
  568. killed = true;
  569. }
  570. event_reject = false;
  571. *result = event;
  572. return killed;
  573. }
  574. void
  575. rust_task::signal_event(void *event) {
  576. scoped_lock with(lifecycle_lock);
  577. this->event = event;
  578. event_reject = true;
  579. if(task_state_blocked == state) {
  580. wakeup_inner(&event_cond);
  581. }
  582. }
  583. //
  584. // Local Variables:
  585. // mode: C++
  586. // fill-column: 78;
  587. // indent-tabs-mode: nil
  588. // c-basic-offset: 4
  589. // buffer-file-coding-system: utf-8-unix
  590. // End:
  591. //