PageRenderTime 203ms CodeModel.GetById 0ms RepoModel.GetById 0ms app.codeStats 0ms

/mordor/tests/scheduler.cpp

http://github.com/mozy/mordor
C++ | 544 lines | 424 code | 72 blank | 48 comment | 18 complexity | 0ed3efba11d0fc67a5e111e6c1659fd0 MD5 | raw file
Possible License(s): BSD-3-Clause
  1. // Copyright (c) 2009 - Mozy, Inc.
  2. #include <boost/bind.hpp>
  3. #include <boost/thread/mutex.hpp>
  4. #include "mordor/atomic.h"
  5. #include "mordor/fiber.h"
  6. #include "mordor/iomanager.h"
  7. #include "mordor/parallel.h"
  8. #include "mordor/sleep.h"
  9. #include "mordor/test/test.h"
  10. #include "mordor/workerpool.h"
  11. #include "mordor/util.h"
  12. using namespace Mordor;
  13. using namespace Mordor::Test;
  14. MORDOR_SUITE_INVARIANT(Scheduler)
  15. {
  16. MORDOR_TEST_ASSERT(!Scheduler::getThis());
  17. }
  18. namespace {
  19. static void doNothing() { }
  20. void throwException() { throw Exception(); }
  21. void runOrException(int &i, int expectedValue, bool throwException)
  22. {
  23. MORDOR_LOG_DEBUG(::Mordor::Log::root()) << "set value: " << expectedValue;
  24. if (throwException)
  25. throw Exception();
  26. else
  27. i = expectedValue;
  28. }
  29. }
  30. // Stop can be called multiple times without consequence
  31. MORDOR_UNITTEST(Scheduler, idempotentStopHijack)
  32. {
  33. WorkerPool pool;
  34. pool.stop();
  35. pool.stop();
  36. }
  37. MORDOR_UNITTEST(Scheduler, idempotentStopHybrid)
  38. {
  39. WorkerPool pool(2);
  40. pool.stop();
  41. pool.stop();
  42. }
  43. MORDOR_UNITTEST(Scheduler, idempotentStopSpawn)
  44. {
  45. WorkerPool pool(1, false);
  46. pool.stop();
  47. pool.stop();
  48. }
  49. // Start can be called multiple times without consequence
  50. MORDOR_UNITTEST(Scheduler, idempotentStartHijack)
  51. {
  52. WorkerPool pool;
  53. pool.start();
  54. pool.start();
  55. }
  56. MORDOR_UNITTEST(Scheduler, idempotentStartHybrid)
  57. {
  58. WorkerPool pool(2);
  59. pool.start();
  60. pool.start();
  61. }
  62. MORDOR_UNITTEST(Scheduler, idempotentStartSpawn)
  63. {
  64. WorkerPool pool(1, false);
  65. pool.start();
  66. pool.start();
  67. }
  68. // When hijacking the calling thread, you can stop() from anywhere within
  69. // it
  70. MORDOR_UNITTEST(Scheduler, stopScheduledHijack)
  71. {
  72. WorkerPool pool;
  73. pool.schedule(boost::bind(&Scheduler::stop, &pool));
  74. pool.dispatch();
  75. }
  76. static void delayStop(Scheduler * s)
  77. {
  78. // sleep 10 ms so that main thread can yieldTo() before
  79. // scheduled stop running, otherwise ASSERT will fail since
  80. // m_callingFiber not set in new thread
  81. Mordor::sleep(10);
  82. s->stop();
  83. }
  84. MORDOR_UNITTEST(Scheduler, stopScheduledHybrid)
  85. {
  86. WorkerPool pool(2);
  87. pool.schedule(boost::bind(&delayStop, &pool));
  88. pool.yieldTo();
  89. }
  90. // When hijacking the calling thread, you don't need to explicitly start
  91. // or stop the scheduler; it starts on the first yieldTo, and stops on
  92. // destruction
  93. MORDOR_UNITTEST(Scheduler, hijackBasic)
  94. {
  95. Fiber::ptr doNothingFiber(new Fiber(&doNothing));
  96. WorkerPool pool;
  97. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &pool);
  98. pool.schedule(doNothingFiber);
  99. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
  100. pool.dispatch();
  101. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
  102. }
  103. // Similar to above, but after the scheduler has stopped, yielding
  104. // to it again should implicitly restart it
  105. MORDOR_UNITTEST(Scheduler, hijackMultipleDispatch)
  106. {
  107. Fiber::ptr doNothingFiber(new Fiber(&doNothing));
  108. WorkerPool pool;
  109. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &pool);
  110. pool.schedule(doNothingFiber);
  111. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
  112. pool.dispatch();
  113. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
  114. doNothingFiber->reset(&doNothing);
  115. pool.schedule(doNothingFiber);
  116. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
  117. pool.dispatch();
  118. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
  119. }
  120. // Just calling stop should still clear all pending work
  121. MORDOR_UNITTEST(Scheduler, hijackStopOnScheduled)
  122. {
  123. Fiber::ptr doNothingFiber(new Fiber(&doNothing));
  124. WorkerPool pool;
  125. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &pool);
  126. pool.schedule(doNothingFiber);
  127. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
  128. pool.stop();
  129. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
  130. }
  131. // TODO: could improve this test by having two fibers that
  132. // synchronize and MORDOR_ASSERT( that they are on different threads
  133. MORDOR_UNITTEST(Scheduler, hybridBasic)
  134. {
  135. Fiber::ptr doNothingFiber(new Fiber(&doNothing));
  136. WorkerPool pool(2);
  137. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &pool);
  138. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::INIT);
  139. pool.schedule(doNothingFiber);
  140. Scheduler::yield();
  141. pool.stop();
  142. MORDOR_TEST_ASSERT_EQUAL(doNothingFiber->state(), Fiber::TERM);
  143. }
  144. void
  145. otherThreadProc(Scheduler *scheduler, bool &done)
  146. {
  147. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), scheduler);
  148. done = true;
  149. }
  150. MORDOR_UNITTEST(Scheduler, spawnBasic)
  151. {
  152. bool done = false;
  153. WorkerPool pool(1, false);
  154. Fiber::ptr f(new Fiber(
  155. boost::bind(&otherThreadProc, &pool, boost::ref(done))));
  156. MORDOR_TEST_ASSERT(!Scheduler::getThis());
  157. MORDOR_TEST_ASSERT_EQUAL(f->state(), Fiber::INIT);
  158. MORDOR_TEST_ASSERT(!done);
  159. pool.schedule(f);
  160. volatile bool &doneVolatile = done;
  161. while (!doneVolatile);
  162. pool.stop();
  163. MORDOR_TEST_ASSERT_EQUAL(f->state(), Fiber::TERM);
  164. }
  165. MORDOR_UNITTEST(Scheduler, switchToStress)
  166. {
  167. WorkerPool poolA(1, true), poolB(1, false);
  168. // Ensure we return to poolA
  169. SchedulerSwitcher switcher;
  170. for (int i = 0; i < 1000; ++i) {
  171. if (i % 2) {
  172. poolA.switchTo();
  173. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolA);
  174. } else {
  175. poolB.switchTo();
  176. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolB);
  177. }
  178. }
  179. }
  180. void
  181. runInContext(Scheduler &poolA, Scheduler &poolB)
  182. {
  183. SchedulerSwitcher switcher(&poolB);
  184. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolB);
  185. MORDOR_TEST_ASSERT_NOT_EQUAL(Scheduler::getThis(), &poolA);
  186. MORDOR_THROW_EXCEPTION(OperationAbortedException());
  187. }
  188. MORDOR_UNITTEST(Scheduler, switcherExceptions)
  189. {
  190. WorkerPool poolA(1, true), poolB(1, false);
  191. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolA);
  192. MORDOR_TEST_ASSERT_NOT_EQUAL(Scheduler::getThis(), &poolB);
  193. MORDOR_TEST_ASSERT_EXCEPTION(runInContext(poolA, poolB), OperationAbortedException);
  194. MORDOR_TEST_ASSERT_EQUAL(Scheduler::getThis(), &poolA);
  195. MORDOR_TEST_ASSERT_NOT_EQUAL(Scheduler::getThis(), &poolB);
  196. }
  197. static void increment(int &total)
  198. {
  199. ++total;
  200. }
  201. MORDOR_UNITTEST(Scheduler, parallelDo)
  202. {
  203. WorkerPool pool;
  204. int total = 0;
  205. std::vector<boost::function<void ()> > dgs;
  206. dgs.push_back(boost::bind(&increment, boost::ref(total)));
  207. dgs.push_back(boost::bind(&increment, boost::ref(total)));
  208. parallel_do(dgs);
  209. MORDOR_TEST_ASSERT_EQUAL(total, 2);
  210. }
  211. MORDOR_UNITTEST(Scheduler, parallelDoFibersDone)
  212. {
  213. WorkerPool pool(8u);
  214. int total = 0;
  215. std::vector<boost::function<void ()> > dgs;
  216. std::vector<Fiber::ptr> fibers;
  217. boost::function<void ()> dg = boost::bind(&increment, boost::ref(total));
  218. for (int i = 0; i < 8; ++i) {
  219. dgs.push_back(dg);
  220. fibers.push_back(Fiber::ptr(new Fiber(NULL)));
  221. }
  222. for (int i = 0; i < 5000; ++i) {
  223. parallel_do(dgs, fibers);
  224. for (size_t j = 0; j < dgs.size(); ++j)
  225. // This should not assert about the fiber not being terminated
  226. fibers[j]->reset(dg);
  227. }
  228. }
  229. static void exception()
  230. {
  231. MORDOR_THROW_EXCEPTION(OperationAbortedException());
  232. }
  233. MORDOR_UNITTEST(Scheduler, parallelDoException)
  234. {
  235. WorkerPool pool;
  236. std::vector<boost::function<void ()> > dgs;
  237. dgs.push_back(&exception);
  238. dgs.push_back(&exception);
  239. MORDOR_TEST_ASSERT_EXCEPTION(parallel_do(dgs), OperationAbortedException);
  240. }
  241. static void checkEqual(int x, int &sequence)
  242. {
  243. MORDOR_TEST_ASSERT_EQUAL(x, sequence);
  244. ++sequence;
  245. }
  246. MORDOR_UNITTEST(Scheduler, parallelForEach)
  247. {
  248. const int values[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
  249. WorkerPool pool;
  250. int sequence = 1;
  251. parallel_foreach(&values[0], &values[10], boost::bind(
  252. &checkEqual, _1, boost::ref(sequence)), 4);
  253. MORDOR_TEST_ASSERT_EQUAL(sequence, 11);
  254. }
  255. MORDOR_UNITTEST(Scheduler, parallelForEachLessThanParallelism)
  256. {
  257. const int values[] = { 1, 2 };
  258. WorkerPool pool;
  259. int sequence = 1;
  260. parallel_foreach(&values[0], &values[2], boost::bind(
  261. &checkEqual, _1, boost::ref(sequence)), 4);
  262. MORDOR_TEST_ASSERT_EQUAL(sequence, 3);
  263. }
  264. static void checkEqualStop5(int x, int &sequence, bool expectOrdered)
  265. {
  266. if (expectOrdered)
  267. MORDOR_TEST_ASSERT_EQUAL(x, sequence);
  268. if (++sequence >= 5)
  269. MORDOR_THROW_EXCEPTION(OperationAbortedException());
  270. }
  271. MORDOR_UNITTEST(Scheduler, parallelForEachStopShort)
  272. {
  273. const int values[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
  274. WorkerPool pool;
  275. int sequence = 1;
  276. MORDOR_TEST_ASSERT_EXCEPTION(
  277. parallel_foreach(&values[0], &values[10], boost::bind(
  278. &checkEqualStop5, _1, boost::ref(sequence), true), 4),
  279. OperationAbortedException);
  280. // 5 <= sequence < 10 (we told it to stop at five, it's undefined how many
  281. // more got executed, because of other threads (on a single thread it's
  282. // deterministically 5))
  283. MORDOR_TEST_ASSERT_EQUAL(sequence, 5);
  284. }
  285. MORDOR_UNITTEST(Scheduler, parallelForEachStopShortParallel)
  286. {
  287. const int values[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
  288. WorkerPool pool(2);
  289. int sequence = 1;
  290. MORDOR_TEST_ASSERT_EXCEPTION(
  291. parallel_foreach(&values[0], &values[10], boost::bind(
  292. &checkEqualStop5, _1, boost::ref(sequence), false), 4),
  293. OperationAbortedException);
  294. // 5 <= sequence < 10 (we told it to stop at five, it's undefined how many
  295. // more got executed, because of other threads (on a single thread it's
  296. // deterministically 5))
  297. MORDOR_TEST_ASSERT_GREATER_THAN_OR_EQUAL(sequence, 5);
  298. MORDOR_TEST_ASSERT_LESS_THAN(sequence, 10);
  299. }
  300. // #ifndef NDEBUG
  301. // MORDOR_UNITTEST(Scheduler, scheduleForThreadNotOnScheduler)
  302. // {
  303. // Fiber::ptr doNothingFiber(new Fiber(&doNothing));
  304. // WorkerPool pool(1, false);
  305. // MORDOR_TEST_ASSERT_ASSERTED(pool.schedule(doNothingFiber, gettid()));
  306. // pool.stop();
  307. // }
  308. // #endif
  309. static void sleepForABit(std::set<tid_t> &threads,
  310. boost::mutex &mutex, Fiber::ptr scheduleMe, int *count)
  311. {
  312. {
  313. boost::mutex::scoped_lock lock(mutex);
  314. threads.insert(gettid());
  315. }
  316. Mordor::sleep(10000);
  317. if (count && atomicDecrement(*count) == 0)
  318. Scheduler::getThis()->schedule(scheduleMe);
  319. }
  320. MORDOR_UNITTEST(Scheduler, spreadTheLoad)
  321. {
  322. std::set<tid_t> threads;
  323. {
  324. boost::mutex mutex;
  325. WorkerPool pool(8);
  326. // Wait for the other threads to get to idle first
  327. Mordor::sleep(100000);
  328. int count = 24;
  329. for (size_t i = 0; i < 24; ++i)
  330. pool.schedule(boost::bind(&sleepForABit, boost::ref(threads),
  331. boost::ref(mutex), Fiber::getThis(), &count));
  332. // We have to have one of these fibers reschedule us, because if we
  333. // let the pool destruct, it will call stop which will wake up all
  334. // the threads
  335. Scheduler::yieldTo();
  336. }
  337. // Make sure we hit every thread
  338. MORDOR_TEST_ASSERT_ABOUT_EQUAL(threads.size(), 8u, 2u);
  339. }
  340. static void fail()
  341. {
  342. MORDOR_NOTREACHED();
  343. }
  344. static void cancelTheTimer(Timer::ptr timer)
  345. {
  346. // Wait for the other threads to get to idle first
  347. Mordor::sleep(100000);
  348. timer->cancel();
  349. }
  350. MORDOR_UNITTEST(Scheduler, stopIdleMultithreaded)
  351. {
  352. IOManager ioManager(4);
  353. unsigned long long start = TimerManager::now();
  354. Timer::ptr timer = ioManager.registerTimer(10000000ull, &fail);
  355. // Wait for the other threads to get to idle first
  356. Mordor::sleep(100000);
  357. ioManager.schedule(boost::bind(&cancelTheTimer, timer));
  358. ioManager.stop();
  359. // This should have taken less than a second, since we cancelled the timer
  360. MORDOR_TEST_ASSERT_LESS_THAN(TimerManager::now() - start, 1000000ull);
  361. }
  362. static void startTheFibers(std::set<tid_t> &threads,
  363. boost::mutex &mutex)
  364. {
  365. Mordor::sleep(100000);
  366. for (size_t i = 0; i < 24; ++i)
  367. Scheduler::getThis()->schedule(boost::bind(&sleepForABit,
  368. boost::ref(threads), boost::ref(mutex), Fiber::ptr(),
  369. (int *)NULL));
  370. }
  371. MORDOR_UNITTEST(Scheduler, spreadTheLoadWhileStopping)
  372. {
  373. std::set<tid_t> threads;
  374. {
  375. boost::mutex mutex;
  376. WorkerPool pool(8);
  377. // Wait for the other threads to get to idle first
  378. Mordor::sleep(100000);
  379. pool.schedule(boost::bind(&startTheFibers, boost::ref(threads),
  380. boost::ref(mutex)));
  381. pool.stop();
  382. }
  383. // Make sure we hit every thread
  384. MORDOR_TEST_ASSERT_ABOUT_EQUAL(threads.size(), 8u, 2u);
  385. }
  386. MORDOR_UNITTEST(Scheduler, tolerantException)
  387. {
  388. WorkerPool pool;
  389. pool.schedule(throwException);
  390. MORDOR_TEST_ASSERT_ANY_EXCEPTION(pool.stop());
  391. }
  392. MORDOR_UNITTEST(Scheduler, tolerantExceptionInBatch)
  393. {
  394. WorkerPool pool(1, true, 10); // batchSize set to 10
  395. std::vector<int> values(3);
  396. std::vector<boost::function<void ()> > dgs;
  397. dgs.push_back(boost::bind(runOrException, boost::ref(values[0]), 1, false));
  398. dgs.push_back(boost::bind(runOrException, boost::ref(values[1]), 2, true));
  399. dgs.push_back(boost::bind(runOrException, boost::ref(values[2]), 3, false));
  400. pool.schedule(dgs.begin(), dgs.end());
  401. MORDOR_TEST_ASSERT_EQUAL(values[0], 0);
  402. MORDOR_TEST_ASSERT_EQUAL(values[1], 0);
  403. MORDOR_TEST_ASSERT_EQUAL(values[2], 0);
  404. // executing the jobs
  405. MORDOR_TEST_ASSERT_ANY_EXCEPTION(pool.stop());
  406. pool.stop();
  407. MORDOR_TEST_ASSERT_EQUAL(values[0], 1);
  408. MORDOR_TEST_ASSERT_EQUAL(values[1], 0);
  409. // even though the 2nd is exceptioned,
  410. // the 3rd one should still have chance to get executed
  411. MORDOR_TEST_ASSERT_EQUAL(values[2], 3);
  412. }
  413. static void doSleeping(boost::mutex &mutex, int &count, int &reference, int &max, IOManager &ioManager)
  414. {
  415. boost::mutex::scoped_lock lock(mutex);
  416. ++reference;
  417. ++count;
  418. if (reference > max)
  419. max = reference;
  420. lock.unlock();
  421. sleep(ioManager, 5000);
  422. lock.lock();
  423. --reference;
  424. }
  425. MORDOR_UNITTEST(Scheduler, parallelDoParallelism)
  426. {
  427. IOManager ioManager(6, true);
  428. int reference = 0, count = 0, max = 0;
  429. boost::mutex mutex;
  430. std::vector<boost::function<void ()> > dgs;
  431. for (int i=0; i<1000; ++i) {
  432. dgs.push_back(boost::bind(&doSleeping,
  433. boost::ref(mutex),
  434. boost::ref(count),
  435. boost::ref(reference),
  436. boost::ref(max),
  437. boost::ref(ioManager)));
  438. }
  439. // 6 threads in IOManager, but only parallel do with 4.
  440. parallel_do(dgs, 4);
  441. ioManager.stop();
  442. MORDOR_TEST_ASSERT_EQUAL(reference, 0);
  443. MORDOR_TEST_ASSERT_EQUAL(count, 1000);
  444. MORDOR_TEST_ASSERT_LESS_THAN_OR_EQUAL(max, 4);
  445. }
  446. #ifndef NDEBUG
  447. MORDOR_UNITTEST(Scheduler, parallelDoEvilParallelism)
  448. {
  449. WorkerPool pool(2, true);
  450. std::vector<boost::function<void ()> > dgs;
  451. for (int i=0; i<2; ++i) {
  452. dgs.push_back(boost::bind(nop<int>, 1));
  453. }
  454. // doing something evil, no one can save you
  455. MORDOR_TEST_ASSERT_ASSERTED(parallel_do(dgs, 0));
  456. pool.stop();
  457. }
  458. #endif
  459. namespace {
  460. struct DummyClass {
  461. ~DummyClass() { Scheduler::yield(); }
  462. };
  463. static void fun(boost::shared_ptr<DummyClass> a) {}
  464. }
  465. MORDOR_UNITTEST(Scheduler, allowYieldInDestructor)
  466. {
  467. WorkerPool pool(2, true);
  468. pool.schedule(boost::bind(fun, boost::shared_ptr<DummyClass>(new DummyClass)));
  469. pool.schedule(Fiber::ptr(
  470. new Fiber(boost::bind(fun, boost::shared_ptr<DummyClass>(new DummyClass)))));
  471. pool.stop();
  472. }