PageRenderTime 54ms CodeModel.GetById 21ms app.highlight 28ms RepoModel.GetById 2ms app.codeStats 0ms

/src/rt/rust_kernel.cpp

http://github.com/jruderman/rust
C++ | 388 lines | 316 code | 38 blank | 34 comment | 42 complexity | 513ba331a9173f30f6e5221524c80d4e MD5 | raw file
  1
  2
  3#include "rust_kernel.h"
  4#include "rust_port.h"
  5#include "rust_util.h"
  6#include "rust_scheduler.h"
  7#include "rust_sched_launcher.h"
  8#include <algorithm>
  9
 10#define KLOG_(...)                              \
 11    KLOG(this, kern, __VA_ARGS__)
 12#define KLOG_ERR_(field, ...)                   \
 13    KLOG_LVL(this, field, log_err, __VA_ARGS__)
 14
 15rust_kernel::rust_kernel(rust_env *env) :
 16    _region(env, true),
 17    _log(NULL),
 18    max_task_id(INIT_TASK_ID-1), // sync_add_and_fetch increments first
 19    max_port_id(1),
 20    rval(0),
 21    max_sched_id(1),
 22    killed(false),
 23    sched_reaper(this),
 24    osmain_driver(NULL),
 25    non_weak_tasks(0),
 26    global_loop_chan(0),
 27    global_env_chan(0),
 28    env(env)
 29
 30{
 31
 32    // Create the single threaded scheduler that will run on the platform's
 33    // main thread
 34    rust_manual_sched_launcher_factory launchfac;
 35    osmain_scheduler = create_scheduler(&launchfac, 1, false);
 36    osmain_driver = launchfac.get_driver();
 37    sched_reaper.start();
 38}
 39
 40void
 41rust_kernel::log(uint32_t level, char const *fmt, ...) {
 42    char buf[BUF_BYTES];
 43    va_list args;
 44    va_start(args, fmt);
 45    vsnprintf(buf, sizeof(buf), fmt, args);
 46    _log.trace_ln(NULL, level, buf);
 47    va_end(args);
 48}
 49
 50void
 51rust_kernel::fatal(char const *fmt, ...) {
 52    char buf[BUF_BYTES];
 53    va_list args;
 54    va_start(args, fmt);
 55    vsnprintf(buf, sizeof(buf), fmt, args);
 56    _log.trace_ln(NULL, (uint32_t)0, buf);
 57    exit(1);
 58    va_end(args);
 59}
 60
 61void *
 62rust_kernel::malloc(size_t size, const char *tag) {
 63    return _region.malloc(size, tag);
 64}
 65
 66void *
 67rust_kernel::calloc(size_t size, const char *tag) {
 68    return _region.calloc(size, tag);
 69}
 70
 71void *
 72rust_kernel::realloc(void *mem, size_t size) {
 73    return _region.realloc(mem, size);
 74}
 75
 76void rust_kernel::free(void *mem) {
 77    _region.free(mem);
 78}
 79
 80rust_sched_id
 81rust_kernel::create_scheduler(size_t num_threads) {
 82    rust_thread_sched_launcher_factory launchfac;
 83    return create_scheduler(&launchfac, num_threads, true);
 84}
 85
 86rust_sched_id
 87rust_kernel::create_scheduler(rust_sched_launcher_factory *launchfac,
 88                              size_t num_threads, bool allow_exit) {
 89    rust_sched_id id;
 90    rust_scheduler *sched;
 91    {
 92        scoped_lock with(sched_lock);
 93
 94        if (sched_table.size() == 1) {
 95            // The OS main scheduler may not exit while there are other
 96            // schedulers
 97            KLOG_("Disallowing osmain scheduler to exit");
 98            rust_scheduler *sched =
 99                get_scheduler_by_id_nolock(osmain_scheduler);
100            assert(sched != NULL);
101            sched->disallow_exit();
102        }
103
104        id = max_sched_id++;
105        assert(id != INTPTR_MAX && "Hit the maximum scheduler id");
106        sched = new (this, "rust_scheduler")
107            rust_scheduler(this, num_threads, id, allow_exit, killed,
108                           launchfac);
109        bool is_new = sched_table
110            .insert(std::pair<rust_sched_id,
111                              rust_scheduler*>(id, sched)).second;
112        assert(is_new && "Reusing a sched id?");
113    }
114    sched->start_task_threads();
115    return id;
116}
117
118rust_scheduler *
119rust_kernel::get_scheduler_by_id(rust_sched_id id) {
120    scoped_lock with(sched_lock);
121    return get_scheduler_by_id_nolock(id);
122}
123
124rust_scheduler *
125rust_kernel::get_scheduler_by_id_nolock(rust_sched_id id) {
126    if (id == 0) {
127        return NULL;
128    }
129    sched_lock.must_have_lock();
130    sched_map::iterator iter = sched_table.find(id);
131    if (iter != sched_table.end()) {
132        return iter->second;
133    } else {
134        return NULL;
135    }
136}
137
138void
139rust_kernel::release_scheduler_id(rust_sched_id id) {
140    scoped_lock with(sched_lock);
141    join_list.push_back(id);
142    sched_lock.signal();
143}
144
145/*
146Called by rust_sched_reaper to join every every terminating scheduler thread,
147so that we can be sure they have completely exited before the process exits.
148If we don't join them then we can see valgrind errors due to un-freed pthread
149memory.
150 */
151void
152rust_kernel::wait_for_schedulers()
153{
154    scoped_lock with(sched_lock);
155    while (!sched_table.empty()) {
156        while (!join_list.empty()) {
157            rust_sched_id id = join_list.back();
158            KLOG_("Deleting scheduler %d", id);
159            join_list.pop_back();
160            sched_map::iterator iter = sched_table.find(id);
161            assert(iter != sched_table.end());
162            rust_scheduler *sched = iter->second;
163            sched_table.erase(iter);
164            sched->join_task_threads();
165            sched->deref();
166            if (sched_table.size() == 1) {
167                KLOG_("Allowing osmain scheduler to exit");
168                // It's only the osmain scheduler left. Tell it to exit
169                rust_scheduler *sched =
170                    get_scheduler_by_id_nolock(osmain_scheduler);
171                assert(sched != NULL);
172                sched->allow_exit();
173            }
174        }
175        if (!sched_table.empty()) {
176            sched_lock.wait();
177        }
178    }
179}
180
181/* Called on the main thread to run the osmain scheduler to completion,
182   then wait for schedulers to exit */
183int
184rust_kernel::run() {
185    assert(osmain_driver != NULL);
186    osmain_driver->start_main_loop();
187    sched_reaper.join();
188    return rval;
189}
190
191void
192rust_kernel::fail() {
193    // FIXME (#908): On windows we're getting "Application has
194    // requested the Runtime to terminate it in an unusual way" when
195    // trying to shutdown cleanly.
196    set_exit_status(PROC_FAIL_CODE);
197#if defined(__WIN32__)
198    exit(rval);
199#endif
200    // I think this only needs to be done by one task ever; as it is,
201    // multiple tasks invoking kill_all might get here. Currently libcore
202    // ensures only one task will ever invoke it, but this would really be
203    // fine either way, so I'm leaving it as it is. -- bblum
204
205    // Copy the list of schedulers so that we don't hold the lock while
206    // running kill_all_tasks. Refcount to ensure they stay alive.
207    std::vector<rust_scheduler*> scheds;
208    {
209        scoped_lock with(sched_lock);
210        // All schedulers created after this flag is set will be doomed.
211        killed = true;
212        for (sched_map::iterator iter = sched_table.begin();
213             iter != sched_table.end(); iter++) {
214            iter->second->ref();
215            scheds.push_back(iter->second);
216        }
217    }
218
219    for (std::vector<rust_scheduler*>::iterator iter = scheds.begin();
220         iter != scheds.end(); iter++) {
221        (*iter)->kill_all_tasks();
222        (*iter)->deref();
223    }
224}
225
226rust_task_id
227rust_kernel::generate_task_id() {
228    rust_task_id id = sync::increment(max_task_id);
229    assert(id != INTPTR_MAX && "Hit the maximum task id");
230    return id;
231}
232
233rust_port_id
234rust_kernel::register_port(rust_port *port) {
235    uintptr_t new_live_ports;
236    rust_port_id new_port_id;
237    {
238        scoped_lock with(port_lock);
239        new_port_id = max_port_id++;
240        port_table.put(new_port_id, port);
241        new_live_ports = port_table.count();
242    }
243    assert(new_port_id != INTPTR_MAX && "Hit the maximum port id");
244    KLOG_("Registered port %" PRIdPTR, new_port_id);
245    KLOG_("Total outstanding ports: %d", new_live_ports);
246    return new_port_id;
247}
248
249void
250rust_kernel::release_port_id(rust_port_id id) {
251    KLOG_("Releasing port %" PRIdPTR, id);
252    uintptr_t new_live_ports;
253    {
254        scoped_lock with(port_lock);
255        port_table.remove(id);
256        new_live_ports = port_table.count();
257    }
258    KLOG_("Total outstanding ports: %d", new_live_ports);
259}
260
261rust_port *
262rust_kernel::get_port_by_id(rust_port_id id) {
263    assert(id != 0 && "invalid port id");
264    scoped_lock with(port_lock);
265    rust_port *port = NULL;
266    // get leaves port unchanged if not found.
267    port_table.get(id, &port);
268    if(port) {
269        port->ref();
270    }
271    return port;
272}
273
274#ifdef __WIN32__
275void
276rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
277    if (!ok) {
278        LPTSTR buf;
279        DWORD err = GetLastError();
280        FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
281                      FORMAT_MESSAGE_FROM_SYSTEM |
282                      FORMAT_MESSAGE_IGNORE_INSERTS,
283                      NULL, err,
284                      MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
285                      (LPTSTR) &buf, 0, NULL );
286        KLOG_ERR_(dom, "%s failed with error %ld: %s", fn, err, buf);
287        LocalFree((HLOCAL)buf);
288        assert(ok);
289    }
290}
291#endif
292
293void
294rust_kernel::set_exit_status(int code) {
295    scoped_lock with(rval_lock);
296    // If we've already failed then that's the code we're going to use
297    if (rval != PROC_FAIL_CODE) {
298        rval = code;
299    }
300}
301
302void
303rust_kernel::register_task() {
304    KLOG_("Registering task");
305    uintptr_t new_non_weak_tasks = sync::increment(non_weak_tasks);
306    KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
307}
308
309void
310rust_kernel::unregister_task() {
311    KLOG_("Unregistering task");
312    uintptr_t new_non_weak_tasks = sync::decrement(non_weak_tasks);
313    KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
314    if (new_non_weak_tasks == 0) {
315        end_weak_tasks();
316    }
317}
318
319void
320rust_kernel::weaken_task(rust_port_id chan) {
321    {
322        scoped_lock with(weak_task_lock);
323        KLOG_("Weakening task with channel %" PRIdPTR, chan);
324        weak_task_chans.push_back(chan);
325    }
326    uintptr_t new_non_weak_tasks = sync::decrement(non_weak_tasks);
327    KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
328    if (new_non_weak_tasks == 0) {
329        end_weak_tasks();
330    }
331}
332
333void
334rust_kernel::unweaken_task(rust_port_id chan) {
335    uintptr_t new_non_weak_tasks = sync::increment(non_weak_tasks);
336    KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
337    {
338        scoped_lock with(weak_task_lock);
339        KLOG_("Unweakening task with channel %" PRIdPTR, chan);
340        std::vector<rust_port_id>::iterator iter =
341            std::find(weak_task_chans.begin(), weak_task_chans.end(), chan);
342        if (iter != weak_task_chans.end()) {
343            weak_task_chans.erase(iter);
344        }
345    }
346}
347
348void
349rust_kernel::end_weak_tasks() {
350    std::vector<rust_port_id> chancopies;
351    {
352        scoped_lock with(weak_task_lock);
353        chancopies = weak_task_chans;
354        weak_task_chans.clear();
355    }
356    while (!chancopies.empty()) {
357        rust_port_id chan = chancopies.back();
358        chancopies.pop_back();
359        KLOG_("Notifying weak task " PRIdPTR, chan);
360        uintptr_t token = 0;
361        send_to_port(chan, &token);
362    }
363}
364
365bool
366rust_kernel::send_to_port(rust_port_id chan, void *sptr) {
367    KLOG_("rust_port_id*_send port: 0x%" PRIxPTR, (uintptr_t) chan);
368
369    rust_port *port = get_port_by_id(chan);
370    if(port) {
371        port->send(sptr);
372        port->deref();
373        return true;
374    } else {
375        KLOG_("didn't get the port");
376        return false;
377    }
378}
379
380//
381// Local Variables:
382// mode: C++
383// fill-column: 78;
384// indent-tabs-mode: nil
385// c-basic-offset: 4
386// buffer-file-coding-system: utf-8-unix
387// End:
388//