/mordor/parallel.cpp
C++ | 134 lines | 107 code | 19 blank | 8 comment | 30 complexity | 523a11b4f7dbf60d06784d828d8c7d93 MD5 | raw file
Possible License(s): BSD-3-Clause
1// Copyright (c) 2009 - Mozy, Inc. 2 3#include "parallel.h" 4 5#include <boost/scoped_ptr.hpp> 6 7#include "assert.h" 8#include "atomic.h" 9#include "fibersynchronization.h" 10 11namespace Mordor { 12 13static Logger::ptr g_log = Log::lookup("mordor:parallel"); 14 15static 16void 17parallel_do_impl(boost::function<void ()> dg, size_t &completed, 18 size_t total, boost::exception_ptr &exception, Scheduler *scheduler, 19 Fiber::ptr caller, FiberSemaphore *sem) 20{ 21 if (sem) 22 sem->wait(); 23 try { 24 dg(); 25 } catch (boost::exception &ex) { 26 removeTopFrames(ex); 27 exception = boost::current_exception(); 28 } catch (...) { 29 exception = boost::current_exception(); 30 } 31 if (sem) 32 sem->notify(); 33 if (atomicIncrement(completed) == total) 34 scheduler->schedule(caller); 35} 36 37void 38parallel_do(const std::vector<boost::function<void ()> > &dgs, 39 int parallelism) 40{ 41 size_t completed = 0; 42 Scheduler *scheduler = Scheduler::getThis(); 43 Fiber::ptr caller = Fiber::getThis(); 44 std::vector<boost::function<void ()> >::const_iterator it; 45 46 if (scheduler == NULL || dgs.size() <= 1) { 47 for(it = dgs.begin(); it != dgs.end(); ++it) { 48 (*it)(); 49 } 50 return; 51 } 52 53 MORDOR_ASSERT(parallelism != 0); 54 boost::scoped_ptr<FiberSemaphore> sem; 55 if (parallelism != -1) 56 sem.reset(new FiberSemaphore(parallelism)); 57 58 std::vector<Fiber::ptr> fibers; 59 std::vector<boost::exception_ptr> exceptions; 60 fibers.reserve(dgs.size()); 61 exceptions.resize(dgs.size()); 62 for(size_t i = 0; i < dgs.size(); ++i) { 63 Fiber::ptr f(new Fiber(boost::bind(¶llel_do_impl, dgs[i], 64 boost::ref(completed), dgs.size(), boost::ref(exceptions[i]), 65 scheduler, caller, sem.get()))); 66 fibers.push_back(f); 67 scheduler->schedule(f); 68 } 69 70 Scheduler::yieldTo(); 71 // Pass the first exception along 72 // TODO: group exceptions? 73 for(std::vector<boost::exception_ptr>::iterator it2 = exceptions.begin(); 74 it2 != exceptions.end(); 75 ++it2) { 76 if (*it2) 77 Mordor::rethrow_exception(*it2); 78 } 79} 80 81void 82parallel_do(const std::vector<boost::function<void ()> > &dgs, 83 std::vector<Fiber::ptr> &fibers, 84 int parallelism) 85{ 86 MORDOR_ASSERT(fibers.size() >= dgs.size()); 87 size_t completed = 0; 88 Scheduler *scheduler = Scheduler::getThis(); 89 Fiber::ptr caller = Fiber::getThis(); 90 std::vector<boost::function<void ()> >::const_iterator it; 91 92 if (scheduler == NULL || dgs.size() <= 1) { 93 for(it = dgs.begin(); it != dgs.end(); ++it) { 94 (*it)(); 95 } 96 return; 97 } 98 99 boost::scoped_ptr<FiberSemaphore> sem; 100 MORDOR_ASSERT(parallelism != 0); 101 if (parallelism != -1) 102 sem.reset(new FiberSemaphore(parallelism)); 103 104 std::vector<boost::exception_ptr> exceptions; 105 exceptions.resize(dgs.size()); 106 for(size_t i = 0; i < dgs.size(); ++i) { 107 fibers[i]->reset(boost::bind(¶llel_do_impl, dgs[i], 108 boost::ref(completed), dgs.size(), boost::ref(exceptions[i]), 109 scheduler, caller, sem.get())); 110 scheduler->schedule(fibers[i]); 111 } 112 Scheduler::yieldTo(); 113 // Make sure all fibers have actually exited, to avoid the caller 114 // immediately calling Fiber::reset, and the Fiber hasn't actually exited 115 // because it is running in a different thread. 116 for (size_t i = 0; i < dgs.size(); ++i) 117 while (fibers[i]->state() == Fiber::EXEC) Scheduler::yield(); 118 119 // Pass the first exception along 120 // TODO: group exceptions? 121 for (size_t i = 0; i < dgs.size(); ++i) { 122 if (exceptions[i]) 123 Mordor::rethrow_exception(exceptions[i]); 124 } 125} 126 127namespace Detail { 128 129Logger::ptr getLogger() 130{ 131 return g_log; 132} 133 134}}