/src/rt/rust_kernel.cpp

http://github.com/jruderman/rust · C++ · 388 lines · 316 code · 38 blank · 34 comment · 42 complexity · 513ba331a9173f30f6e5221524c80d4e MD5 · raw file

  1. #include "rust_kernel.h"
  2. #include "rust_port.h"
  3. #include "rust_util.h"
  4. #include "rust_scheduler.h"
  5. #include "rust_sched_launcher.h"
  6. #include <algorithm>
  7. #define KLOG_(...) \
  8. KLOG(this, kern, __VA_ARGS__)
  9. #define KLOG_ERR_(field, ...) \
  10. KLOG_LVL(this, field, log_err, __VA_ARGS__)
  11. rust_kernel::rust_kernel(rust_env *env) :
  12. _region(env, true),
  13. _log(NULL),
  14. max_task_id(INIT_TASK_ID-1), // sync_add_and_fetch increments first
  15. max_port_id(1),
  16. rval(0),
  17. max_sched_id(1),
  18. killed(false),
  19. sched_reaper(this),
  20. osmain_driver(NULL),
  21. non_weak_tasks(0),
  22. global_loop_chan(0),
  23. global_env_chan(0),
  24. env(env)
  25. {
  26. // Create the single threaded scheduler that will run on the platform's
  27. // main thread
  28. rust_manual_sched_launcher_factory launchfac;
  29. osmain_scheduler = create_scheduler(&launchfac, 1, false);
  30. osmain_driver = launchfac.get_driver();
  31. sched_reaper.start();
  32. }
  33. void
  34. rust_kernel::log(uint32_t level, char const *fmt, ...) {
  35. char buf[BUF_BYTES];
  36. va_list args;
  37. va_start(args, fmt);
  38. vsnprintf(buf, sizeof(buf), fmt, args);
  39. _log.trace_ln(NULL, level, buf);
  40. va_end(args);
  41. }
  42. void
  43. rust_kernel::fatal(char const *fmt, ...) {
  44. char buf[BUF_BYTES];
  45. va_list args;
  46. va_start(args, fmt);
  47. vsnprintf(buf, sizeof(buf), fmt, args);
  48. _log.trace_ln(NULL, (uint32_t)0, buf);
  49. exit(1);
  50. va_end(args);
  51. }
  52. void *
  53. rust_kernel::malloc(size_t size, const char *tag) {
  54. return _region.malloc(size, tag);
  55. }
  56. void *
  57. rust_kernel::calloc(size_t size, const char *tag) {
  58. return _region.calloc(size, tag);
  59. }
  60. void *
  61. rust_kernel::realloc(void *mem, size_t size) {
  62. return _region.realloc(mem, size);
  63. }
  64. void rust_kernel::free(void *mem) {
  65. _region.free(mem);
  66. }
  67. rust_sched_id
  68. rust_kernel::create_scheduler(size_t num_threads) {
  69. rust_thread_sched_launcher_factory launchfac;
  70. return create_scheduler(&launchfac, num_threads, true);
  71. }
  72. rust_sched_id
  73. rust_kernel::create_scheduler(rust_sched_launcher_factory *launchfac,
  74. size_t num_threads, bool allow_exit) {
  75. rust_sched_id id;
  76. rust_scheduler *sched;
  77. {
  78. scoped_lock with(sched_lock);
  79. if (sched_table.size() == 1) {
  80. // The OS main scheduler may not exit while there are other
  81. // schedulers
  82. KLOG_("Disallowing osmain scheduler to exit");
  83. rust_scheduler *sched =
  84. get_scheduler_by_id_nolock(osmain_scheduler);
  85. assert(sched != NULL);
  86. sched->disallow_exit();
  87. }
  88. id = max_sched_id++;
  89. assert(id != INTPTR_MAX && "Hit the maximum scheduler id");
  90. sched = new (this, "rust_scheduler")
  91. rust_scheduler(this, num_threads, id, allow_exit, killed,
  92. launchfac);
  93. bool is_new = sched_table
  94. .insert(std::pair<rust_sched_id,
  95. rust_scheduler*>(id, sched)).second;
  96. assert(is_new && "Reusing a sched id?");
  97. }
  98. sched->start_task_threads();
  99. return id;
  100. }
  101. rust_scheduler *
  102. rust_kernel::get_scheduler_by_id(rust_sched_id id) {
  103. scoped_lock with(sched_lock);
  104. return get_scheduler_by_id_nolock(id);
  105. }
  106. rust_scheduler *
  107. rust_kernel::get_scheduler_by_id_nolock(rust_sched_id id) {
  108. if (id == 0) {
  109. return NULL;
  110. }
  111. sched_lock.must_have_lock();
  112. sched_map::iterator iter = sched_table.find(id);
  113. if (iter != sched_table.end()) {
  114. return iter->second;
  115. } else {
  116. return NULL;
  117. }
  118. }
  119. void
  120. rust_kernel::release_scheduler_id(rust_sched_id id) {
  121. scoped_lock with(sched_lock);
  122. join_list.push_back(id);
  123. sched_lock.signal();
  124. }
  125. /*
  126. Called by rust_sched_reaper to join every every terminating scheduler thread,
  127. so that we can be sure they have completely exited before the process exits.
  128. If we don't join them then we can see valgrind errors due to un-freed pthread
  129. memory.
  130. */
  131. void
  132. rust_kernel::wait_for_schedulers()
  133. {
  134. scoped_lock with(sched_lock);
  135. while (!sched_table.empty()) {
  136. while (!join_list.empty()) {
  137. rust_sched_id id = join_list.back();
  138. KLOG_("Deleting scheduler %d", id);
  139. join_list.pop_back();
  140. sched_map::iterator iter = sched_table.find(id);
  141. assert(iter != sched_table.end());
  142. rust_scheduler *sched = iter->second;
  143. sched_table.erase(iter);
  144. sched->join_task_threads();
  145. sched->deref();
  146. if (sched_table.size() == 1) {
  147. KLOG_("Allowing osmain scheduler to exit");
  148. // It's only the osmain scheduler left. Tell it to exit
  149. rust_scheduler *sched =
  150. get_scheduler_by_id_nolock(osmain_scheduler);
  151. assert(sched != NULL);
  152. sched->allow_exit();
  153. }
  154. }
  155. if (!sched_table.empty()) {
  156. sched_lock.wait();
  157. }
  158. }
  159. }
  160. /* Called on the main thread to run the osmain scheduler to completion,
  161. then wait for schedulers to exit */
  162. int
  163. rust_kernel::run() {
  164. assert(osmain_driver != NULL);
  165. osmain_driver->start_main_loop();
  166. sched_reaper.join();
  167. return rval;
  168. }
  169. void
  170. rust_kernel::fail() {
  171. // FIXME (#908): On windows we're getting "Application has
  172. // requested the Runtime to terminate it in an unusual way" when
  173. // trying to shutdown cleanly.
  174. set_exit_status(PROC_FAIL_CODE);
  175. #if defined(__WIN32__)
  176. exit(rval);
  177. #endif
  178. // I think this only needs to be done by one task ever; as it is,
  179. // multiple tasks invoking kill_all might get here. Currently libcore
  180. // ensures only one task will ever invoke it, but this would really be
  181. // fine either way, so I'm leaving it as it is. -- bblum
  182. // Copy the list of schedulers so that we don't hold the lock while
  183. // running kill_all_tasks. Refcount to ensure they stay alive.
  184. std::vector<rust_scheduler*> scheds;
  185. {
  186. scoped_lock with(sched_lock);
  187. // All schedulers created after this flag is set will be doomed.
  188. killed = true;
  189. for (sched_map::iterator iter = sched_table.begin();
  190. iter != sched_table.end(); iter++) {
  191. iter->second->ref();
  192. scheds.push_back(iter->second);
  193. }
  194. }
  195. for (std::vector<rust_scheduler*>::iterator iter = scheds.begin();
  196. iter != scheds.end(); iter++) {
  197. (*iter)->kill_all_tasks();
  198. (*iter)->deref();
  199. }
  200. }
  201. rust_task_id
  202. rust_kernel::generate_task_id() {
  203. rust_task_id id = sync::increment(max_task_id);
  204. assert(id != INTPTR_MAX && "Hit the maximum task id");
  205. return id;
  206. }
  207. rust_port_id
  208. rust_kernel::register_port(rust_port *port) {
  209. uintptr_t new_live_ports;
  210. rust_port_id new_port_id;
  211. {
  212. scoped_lock with(port_lock);
  213. new_port_id = max_port_id++;
  214. port_table.put(new_port_id, port);
  215. new_live_ports = port_table.count();
  216. }
  217. assert(new_port_id != INTPTR_MAX && "Hit the maximum port id");
  218. KLOG_("Registered port %" PRIdPTR, new_port_id);
  219. KLOG_("Total outstanding ports: %d", new_live_ports);
  220. return new_port_id;
  221. }
  222. void
  223. rust_kernel::release_port_id(rust_port_id id) {
  224. KLOG_("Releasing port %" PRIdPTR, id);
  225. uintptr_t new_live_ports;
  226. {
  227. scoped_lock with(port_lock);
  228. port_table.remove(id);
  229. new_live_ports = port_table.count();
  230. }
  231. KLOG_("Total outstanding ports: %d", new_live_ports);
  232. }
  233. rust_port *
  234. rust_kernel::get_port_by_id(rust_port_id id) {
  235. assert(id != 0 && "invalid port id");
  236. scoped_lock with(port_lock);
  237. rust_port *port = NULL;
  238. // get leaves port unchanged if not found.
  239. port_table.get(id, &port);
  240. if(port) {
  241. port->ref();
  242. }
  243. return port;
  244. }
  245. #ifdef __WIN32__
  246. void
  247. rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
  248. if (!ok) {
  249. LPTSTR buf;
  250. DWORD err = GetLastError();
  251. FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
  252. FORMAT_MESSAGE_FROM_SYSTEM |
  253. FORMAT_MESSAGE_IGNORE_INSERTS,
  254. NULL, err,
  255. MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
  256. (LPTSTR) &buf, 0, NULL );
  257. KLOG_ERR_(dom, "%s failed with error %ld: %s", fn, err, buf);
  258. LocalFree((HLOCAL)buf);
  259. assert(ok);
  260. }
  261. }
  262. #endif
  263. void
  264. rust_kernel::set_exit_status(int code) {
  265. scoped_lock with(rval_lock);
  266. // If we've already failed then that's the code we're going to use
  267. if (rval != PROC_FAIL_CODE) {
  268. rval = code;
  269. }
  270. }
  271. void
  272. rust_kernel::register_task() {
  273. KLOG_("Registering task");
  274. uintptr_t new_non_weak_tasks = sync::increment(non_weak_tasks);
  275. KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
  276. }
  277. void
  278. rust_kernel::unregister_task() {
  279. KLOG_("Unregistering task");
  280. uintptr_t new_non_weak_tasks = sync::decrement(non_weak_tasks);
  281. KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
  282. if (new_non_weak_tasks == 0) {
  283. end_weak_tasks();
  284. }
  285. }
  286. void
  287. rust_kernel::weaken_task(rust_port_id chan) {
  288. {
  289. scoped_lock with(weak_task_lock);
  290. KLOG_("Weakening task with channel %" PRIdPTR, chan);
  291. weak_task_chans.push_back(chan);
  292. }
  293. uintptr_t new_non_weak_tasks = sync::decrement(non_weak_tasks);
  294. KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
  295. if (new_non_weak_tasks == 0) {
  296. end_weak_tasks();
  297. }
  298. }
  299. void
  300. rust_kernel::unweaken_task(rust_port_id chan) {
  301. uintptr_t new_non_weak_tasks = sync::increment(non_weak_tasks);
  302. KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
  303. {
  304. scoped_lock with(weak_task_lock);
  305. KLOG_("Unweakening task with channel %" PRIdPTR, chan);
  306. std::vector<rust_port_id>::iterator iter =
  307. std::find(weak_task_chans.begin(), weak_task_chans.end(), chan);
  308. if (iter != weak_task_chans.end()) {
  309. weak_task_chans.erase(iter);
  310. }
  311. }
  312. }
  313. void
  314. rust_kernel::end_weak_tasks() {
  315. std::vector<rust_port_id> chancopies;
  316. {
  317. scoped_lock with(weak_task_lock);
  318. chancopies = weak_task_chans;
  319. weak_task_chans.clear();
  320. }
  321. while (!chancopies.empty()) {
  322. rust_port_id chan = chancopies.back();
  323. chancopies.pop_back();
  324. KLOG_("Notifying weak task " PRIdPTR, chan);
  325. uintptr_t token = 0;
  326. send_to_port(chan, &token);
  327. }
  328. }
  329. bool
  330. rust_kernel::send_to_port(rust_port_id chan, void *sptr) {
  331. KLOG_("rust_port_id*_send port: 0x%" PRIxPTR, (uintptr_t) chan);
  332. rust_port *port = get_port_by_id(chan);
  333. if(port) {
  334. port->send(sptr);
  335. port->deref();
  336. return true;
  337. } else {
  338. KLOG_("didn't get the port");
  339. return false;
  340. }
  341. }
  342. //
  343. // Local Variables:
  344. // mode: C++
  345. // fill-column: 78;
  346. // indent-tabs-mode: nil
  347. // c-basic-offset: 4
  348. // buffer-file-coding-system: utf-8-unix
  349. // End:
  350. //