PageRenderTime 308ms CodeModel.GetById 80ms app.highlight 166ms RepoModel.GetById 55ms app.codeStats 1ms

/mordor/tests/scheduler.cpp

http://github.com/mozy/mordor
C++ | 544 lines | 424 code | 72 blank | 48 comment | 18 complexity | 0ed3efba11d0fc67a5e111e6c1659fd0 MD5 | raw file
  1// Copyright (c) 2009 - Mozy, Inc.
  2
  3#include <boost/bind.hpp>
  4#include <boost/thread/mutex.hpp>
  5
  6#include "mordor/atomic.h"
  7#include "mordor/fiber.h"
  8#include "mordor/iomanager.h"
  9#include "mordor/parallel.h"
 10#include "mordor/sleep.h"
 11#include "mordor/test/test.h"
 12#include "mordor/workerpool.h"
 13#include "mordor/util.h"
 14
 15using namespace Mordor;
 16using namespace Mordor::Test;
 17
 18MORDOR_SUITE_INVARIANT(Scheduler)
 19{
 20    MORDOR_TEST_ASSERT(!Scheduler::getThis());
 21}
 22
 23
 24namespace {
 25    static void doNothing() { }
 26
 27    void throwException() { throw Exception(); }
 28
 29    void runOrException(int &i, int expectedValue, bool throwException)
 30    {
 31        MORDOR_LOG_DEBUG(::Mordor::Log::root()) << "set value: " << expectedValue;
 32        if (throwException)
 33            throw Exception();
 34        else
 35            i = expectedValue;
 36    }
 37
 38}
 39
 40// Stop can be called multiple times without consequence
 41MORDOR_UNITTEST(Scheduler, idempotentStopHijack)
 42{
 43    WorkerPool pool;
 44    pool.stop();
 45    pool.stop();
 46}
 47
 48MORDOR_UNITTEST(Scheduler, idempotentStopHybrid)
 49{
 50    WorkerPool pool(2);
 51    pool.stop();
 52    pool.stop();
 53}
 54
 55MORDOR_UNITTEST(Scheduler, idempotentStopSpawn)
 56{
 57    WorkerPool pool(1, false);
 58    pool.stop();
 59    pool.stop();
 60}
 61
 62// Start can be called multiple times without consequence
 63MORDOR_UNITTEST(Scheduler, idempotentStartHijack)
 64{
 65    WorkerPool pool;
 66    pool.start();
 67    pool.start();
 68}
 69
 70MORDOR_UNITTEST(Scheduler, idempotentStartHybrid)
 71{
 72    WorkerPool pool(2);
 73    pool.start();
 74    pool.start();
 75}
 76
 77MORDOR_UNITTEST(Scheduler, idempotentStartSpawn)
 78{
 79    WorkerPool pool(1, false);
 80    pool.start();
 81    pool.start();
 82}
 83
 84// When hijacking the calling thread, you can stop() from anywhere within
 85// it
 86MORDOR_UNITTEST(Scheduler, stopScheduledHijack)
 87{
 88    WorkerPool pool;
 89    pool.schedule(boost::bind(&Scheduler::stop, &pool));
 90    pool.dispatch();
 91}
 92
 93static void delayStop(Scheduler * s)
 94{
 95    // sleep 10 ms so that main thread can yieldTo() before
 96    // scheduled stop running, otherwise ASSERT will fail since
 97    // m_callingFiber not set in new thread
 98    Mordor::sleep(10);
 99    s->stop();
100}
101
102MORDOR_UNITTEST(Scheduler, stopScheduledHybrid)
103{
104    WorkerPool pool(2);
105    pool.schedule(boost::bind(&delayStop, &pool));
106    pool.yieldTo();
107}
108
109// When hijacking the calling thread, you don't need to explicitly start
110// or stop the scheduler; it starts on the first yieldTo, and stops on
111// destruction
112MORDOR_UNITTEST(Scheduler, hijackBasic)
113{
114    Fiber::ptr doNothingFiber(new Fiber(&doNothing));
115    WorkerPool pool;
116    MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &pool);
117    pool.schedule(doNothingFiber);
118    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
119    pool.dispatch();
120    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
121}
122
123// Similar to above, but after the scheduler has stopped, yielding
124// to it again should implicitly restart it
125MORDOR_UNITTEST(Scheduler, hijackMultipleDispatch)
126{
127    Fiber::ptr doNothingFiber(new Fiber(&doNothing));
128    WorkerPool pool;
129    MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &pool);
130    pool.schedule(doNothingFiber);
131    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
132    pool.dispatch();
133    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
134    doNothingFiber->reset(&doNothing);
135    pool.schedule(doNothingFiber);
136    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
137    pool.dispatch();
138    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
139}
140
141// Just calling stop should still clear all pending work
142MORDOR_UNITTEST(Scheduler, hijackStopOnScheduled)
143{
144    Fiber::ptr doNothingFiber(new Fiber(&doNothing));
145    WorkerPool pool;
146    MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &pool);
147    pool.schedule(doNothingFiber);
148    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
149    pool.stop();
150    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
151}
152
153// TODO: could improve this test by having two fibers that
154// synchronize and MORDOR_ASSERT( that they are on different threads
155MORDOR_UNITTEST(Scheduler, hybridBasic)
156{
157    Fiber::ptr doNothingFiber(new Fiber(&doNothing));
158    WorkerPool pool(2);
159    MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &pool);
160    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
161    pool.schedule(doNothingFiber);
162    Scheduler::yield();
163    pool.stop();
164    MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
165}
166
167void
168otherThreadProc(Scheduler *scheduler, bool &done)
169{
170    MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), scheduler);
171    done = true;
172}
173
174MORDOR_UNITTEST(Scheduler, spawnBasic)
175{
176    bool done = false;
177    WorkerPool pool(1, false);
178    Fiber::ptr f(new Fiber(
179        boost::bind(&otherThreadProc, &pool, boost::ref(done))));
180    MORDOR_TEST_ASSERT(!Scheduler::getThis());
181    MORDOR_TEST_ASSERT_EQUAL(f->state(), Fiber::INIT);
182    MORDOR_TEST_ASSERT(!done);
183    pool.schedule(f);
184    volatile bool &doneVolatile = done;
185    while (!doneVolatile);
186    pool.stop();
187    MORDOR_TEST_ASSERT_EQUAL(f->state(), Fiber::TERM);
188}
189
190MORDOR_UNITTEST(Scheduler, switchToStress)
191{
192    WorkerPool poolA(1, true), poolB(1, false);
193
194    // Ensure we return to poolA
195    SchedulerSwitcher switcher;
196    for (int i = 0; i < 1000; ++i) {
197        if (i % 2) {
198            poolA.switchTo();
199            MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolA);
200        } else {
201            poolB.switchTo();
202            MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolB);
203        }
204    }
205}
206
207void
208runInContext(Scheduler &poolA, Scheduler &poolB)
209{
210    SchedulerSwitcher switcher(&poolB);
211    MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolB);
212    MORDOR_TEST_ASSERT_NOT_EQUAL(Scheduler::getThis(), &poolA);
213    MORDOR_THROW_EXCEPTION(OperationAbortedException());
214}
215
216MORDOR_UNITTEST(Scheduler, switcherExceptions)
217{
218    WorkerPool poolA(1, true), poolB(1, false);
219
220    MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolA);
221    MORDOR_TEST_ASSERT_NOT_EQUAL(Scheduler::getThis(), &poolB);
222
223    MORDOR_TEST_ASSERT_EXCEPTION(runInContext(poolA, poolB), OperationAbortedException);
224
225    MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolA);
226    MORDOR_TEST_ASSERT_NOT_EQUAL(Scheduler::getThis(), &poolB);
227}
228
229static void increment(int &total)
230{
231    ++total;
232}
233
234MORDOR_UNITTEST(Scheduler, parallelDo)
235{
236    WorkerPool pool;
237
238    int total = 0;
239    std::vector<boost::function<void ()> > dgs;
240    dgs.push_back(boost::bind(&increment, boost::ref(total)));
241    dgs.push_back(boost::bind(&increment, boost::ref(total)));
242
243    parallel_do(dgs);
244    MORDOR_TEST_ASSERT_EQUAL(total, 2);
245}
246
247MORDOR_UNITTEST(Scheduler, parallelDoFibersDone)
248{
249    WorkerPool pool(8u);
250
251    int total = 0;
252    std::vector<boost::function<void ()> > dgs;
253    std::vector<Fiber::ptr> fibers;
254    boost::function<void ()> dg = boost::bind(&increment, boost::ref(total));
255    for (int i = 0; i < 8; ++i) {
256        dgs.push_back(dg);
257        fibers.push_back(Fiber::ptr(new Fiber(NULL)));
258    }
259
260    for (int i = 0; i < 5000; ++i) {
261        parallel_do(dgs, fibers);
262        for (size_t j = 0; j < dgs.size(); ++j)
263            // This should not assert about the fiber not being terminated
264            fibers[j]->reset(dg);
265    }
266}
267
268static void exception()
269{
270    MORDOR_THROW_EXCEPTION(OperationAbortedException());
271}
272
273MORDOR_UNITTEST(Scheduler, parallelDoException)
274{
275    WorkerPool pool;
276
277    std::vector<boost::function<void ()> > dgs;
278    dgs.push_back(&exception);
279    dgs.push_back(&exception);
280
281    MORDOR_TEST_ASSERT_EXCEPTION(parallel_do(dgs), OperationAbortedException);
282}
283
284static void checkEqual(int x, int &sequence)
285{
286    MORDOR_TEST_ASSERT_EQUAL(x, sequence);
287    ++sequence;
288}
289
290MORDOR_UNITTEST(Scheduler, parallelForEach)
291{
292    const int values[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
293    WorkerPool pool;
294
295    int sequence = 1;
296    parallel_foreach(&values[0], &values[10], boost::bind(
297        &checkEqual, _1, boost::ref(sequence)), 4);
298    MORDOR_TEST_ASSERT_EQUAL(sequence, 11);
299}
300
301MORDOR_UNITTEST(Scheduler, parallelForEachLessThanParallelism)
302{
303    const int values[] = { 1, 2 };
304    WorkerPool pool;
305
306    int sequence = 1;
307    parallel_foreach(&values[0], &values[2], boost::bind(
308        &checkEqual, _1, boost::ref(sequence)), 4);
309    MORDOR_TEST_ASSERT_EQUAL(sequence, 3);
310}
311
312static void checkEqualStop5(int x, int &sequence, bool expectOrdered)
313{
314    if (expectOrdered)
315        MORDOR_TEST_ASSERT_EQUAL(x, sequence);
316    if (++sequence >= 5)
317        MORDOR_THROW_EXCEPTION(OperationAbortedException());
318}
319
320MORDOR_UNITTEST(Scheduler, parallelForEachStopShort)
321{
322    const int values[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
323    WorkerPool pool;
324
325    int sequence = 1;
326    MORDOR_TEST_ASSERT_EXCEPTION(
327    parallel_foreach(&values[0], &values[10], boost::bind(
328        &checkEqualStop5, _1, boost::ref(sequence), true), 4),
329        OperationAbortedException);
330    // 5 <= sequence < 10 (we told it to stop at five, it's undefined how many
331    // more got executed, because of other threads (on a single thread it's
332    // deterministically 5))
333    MORDOR_TEST_ASSERT_EQUAL(sequence, 5);
334}
335
336MORDOR_UNITTEST(Scheduler, parallelForEachStopShortParallel)
337{
338    const int values[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
339    WorkerPool pool(2);
340
341    int sequence = 1;
342    MORDOR_TEST_ASSERT_EXCEPTION(
343    parallel_foreach(&values[0], &values[10], boost::bind(
344        &checkEqualStop5, _1, boost::ref(sequence), false), 4),
345        OperationAbortedException);
346    // 5 <= sequence < 10 (we told it to stop at five, it's undefined how many
347    // more got executed, because of other threads (on a single thread it's
348    // deterministically 5))
349    MORDOR_TEST_ASSERT_GREATER_THAN_OR_EQUAL(sequence, 5);
350    MORDOR_TEST_ASSERT_LESS_THAN(sequence, 10);
351}
352
353// #ifndef NDEBUG
354// MORDOR_UNITTEST(Scheduler, scheduleForThreadNotOnScheduler)
355// {
356//     Fiber::ptr doNothingFiber(new Fiber(&doNothing));
357//     WorkerPool pool(1, false);
358//     MORDOR_TEST_ASSERT_ASSERTED(pool.schedule(doNothingFiber, gettid()));
359//     pool.stop();
360// }
361// #endif
362
363static void sleepForABit(std::set<tid_t> &threads,
364    boost::mutex &mutex, Fiber::ptr scheduleMe, int *count)
365{
366    {
367        boost::mutex::scoped_lock lock(mutex);
368        threads.insert(gettid());
369    }
370    Mordor::sleep(10000);
371    if (count && atomicDecrement(*count) == 0)
372        Scheduler::getThis()->schedule(scheduleMe);
373}
374
375MORDOR_UNITTEST(Scheduler, spreadTheLoad)
376{
377    std::set<tid_t> threads;
378    {
379        boost::mutex mutex;
380        WorkerPool pool(8);
381        // Wait for the other threads to get to idle first
382        Mordor::sleep(100000);
383        int count = 24;
384        for (size_t i = 0; i < 24; ++i)
385            pool.schedule(boost::bind(&sleepForABit, boost::ref(threads),
386                boost::ref(mutex), Fiber::getThis(), &count));
387        // We have to have one of these fibers reschedule us, because if we
388        // let the pool destruct, it will call stop which will wake up all
389        // the threads
390        Scheduler::yieldTo();
391    }
392    // Make sure we hit every thread
393    MORDOR_TEST_ASSERT_ABOUT_EQUAL(threads.size(), 8u, 2u);
394}
395
396static void fail()
397{
398    MORDOR_NOTREACHED();
399}
400
401static void cancelTheTimer(Timer::ptr timer)
402{
403    // Wait for the other threads to get to idle first
404    Mordor::sleep(100000);
405    timer->cancel();
406}
407
408MORDOR_UNITTEST(Scheduler, stopIdleMultithreaded)
409{
410    IOManager ioManager(4);
411    unsigned long long start = TimerManager::now();
412    Timer::ptr timer = ioManager.registerTimer(10000000ull, &fail);
413    // Wait for the other threads to get to idle first
414    Mordor::sleep(100000);
415    ioManager.schedule(boost::bind(&cancelTheTimer, timer));
416    ioManager.stop();
417    // This should have taken less than a second, since we cancelled the timer
418    MORDOR_TEST_ASSERT_LESS_THAN(TimerManager::now() - start, 1000000ull);
419}
420
421static void startTheFibers(std::set<tid_t> &threads,
422    boost::mutex &mutex)
423{
424    Mordor::sleep(100000);
425    for (size_t i = 0; i < 24; ++i)
426        Scheduler::getThis()->schedule(boost::bind(&sleepForABit,
427            boost::ref(threads), boost::ref(mutex), Fiber::ptr(),
428            (int *)NULL));
429}
430
431MORDOR_UNITTEST(Scheduler, spreadTheLoadWhileStopping)
432{
433    std::set<tid_t> threads;
434    {
435        boost::mutex mutex;
436        WorkerPool pool(8);
437        // Wait for the other threads to get to idle first
438        Mordor::sleep(100000);
439
440        pool.schedule(boost::bind(&startTheFibers, boost::ref(threads),
441            boost::ref(mutex)));
442        pool.stop();
443    }
444    // Make sure we hit every thread
445    MORDOR_TEST_ASSERT_ABOUT_EQUAL(threads.size(), 8u, 2u);
446}
447
448MORDOR_UNITTEST(Scheduler, tolerantException)
449{
450    WorkerPool pool;
451    pool.schedule(throwException);
452    MORDOR_TEST_ASSERT_ANY_EXCEPTION(pool.stop());
453}
454
455MORDOR_UNITTEST(Scheduler, tolerantExceptionInBatch)
456{
457    WorkerPool pool(1, true, 10); // batchSize set to 10
458    std::vector<int> values(3);
459    std::vector<boost::function<void ()> > dgs;
460    dgs.push_back(boost::bind(runOrException, boost::ref(values[0]), 1, false));
461    dgs.push_back(boost::bind(runOrException, boost::ref(values[1]), 2, true));
462    dgs.push_back(boost::bind(runOrException, boost::ref(values[2]), 3, false));
463    pool.schedule(dgs.begin(), dgs.end());
464
465    MORDOR_TEST_ASSERT_EQUAL(values[0], 0);
466    MORDOR_TEST_ASSERT_EQUAL(values[1], 0);
467    MORDOR_TEST_ASSERT_EQUAL(values[2], 0);
468
469    // executing the jobs
470    MORDOR_TEST_ASSERT_ANY_EXCEPTION(pool.stop());
471    pool.stop();
472
473    MORDOR_TEST_ASSERT_EQUAL(values[0], 1);
474    MORDOR_TEST_ASSERT_EQUAL(values[1], 0);
475    // even though the 2nd is exceptioned,
476    // the 3rd one should still have chance to get executed
477    MORDOR_TEST_ASSERT_EQUAL(values[2], 3);
478}
479
480static void doSleeping(boost::mutex &mutex, int &count, int &reference, int &max, IOManager &ioManager)
481{
482    boost::mutex::scoped_lock lock(mutex);
483    ++reference;
484    ++count;
485    if (reference > max)
486        max = reference;
487    lock.unlock();
488    sleep(ioManager, 5000);
489    lock.lock();
490    --reference;
491}
492
493MORDOR_UNITTEST(Scheduler, parallelDoParallelism)
494{
495    IOManager ioManager(6, true);
496    int reference = 0, count = 0, max = 0;
497    boost::mutex mutex;
498    std::vector<boost::function<void ()> > dgs;
499    for (int i=0; i<1000; ++i) {
500        dgs.push_back(boost::bind(&doSleeping,
501                            boost::ref(mutex),
502                            boost::ref(count),
503                            boost::ref(reference),
504                            boost::ref(max),
505                            boost::ref(ioManager)));
506    }
507    // 6 threads in IOManager, but only parallel do with 4.
508    parallel_do(dgs, 4);
509    ioManager.stop();
510    MORDOR_TEST_ASSERT_EQUAL(reference, 0);
511    MORDOR_TEST_ASSERT_EQUAL(count, 1000);
512    MORDOR_TEST_ASSERT_LESS_THAN_OR_EQUAL(max, 4);
513}
514
515#ifndef NDEBUG
516MORDOR_UNITTEST(Scheduler, parallelDoEvilParallelism)
517{
518    WorkerPool pool(2, true);
519    std::vector<boost::function<void ()> > dgs;
520    for (int i=0; i<2; ++i) {
521        dgs.push_back(boost::bind(nop<int>, 1));
522    }
523    // doing something evil, no one can save you
524    MORDOR_TEST_ASSERT_ASSERTED(parallel_do(dgs, 0));
525    pool.stop();
526}
527#endif
528
529namespace {
530    struct DummyClass {
531        ~DummyClass() { Scheduler::yield(); }
532    };
533
534    static void fun(boost::shared_ptr<DummyClass> a) {}
535}
536
537MORDOR_UNITTEST(Scheduler, allowYieldInDestructor)
538{
539    WorkerPool pool(2, true);
540    pool.schedule(boost::bind(fun, boost::shared_ptr<DummyClass>(new DummyClass)));
541    pool.schedule(Fiber::ptr(
542            new Fiber(boost::bind(fun, boost::shared_ptr<DummyClass>(new DummyClass)))));
543    pool.stop();
544}