/mordor/tests/fibersync.cpp

http://github.com/mozy/mordor · C++ · 429 lines · 337 code · 57 blank · 35 comment · 3 complexity · d18b484b430b684d66710d4da9fd005a MD5 · raw file

  1. // Copyright (c) 2009 - Mozy, Inc.
  2. #include <boost/bind.hpp>
  3. #include "mordor/atomic.h"
  4. #include "mordor/fiber.h"
  5. #include "mordor/fibersynchronization.h"
  6. #include "mordor/iomanager.h"
  7. #include "mordor/sleep.h"
  8. #include "mordor/test/test.h"
  9. #include "mordor/workerpool.h"
  10. using namespace Mordor;
  11. template<typename M> void test_mutex_basic()
  12. {
  13. WorkerPool pool;
  14. M mutex;
  15. typename M::ScopedLock lock(mutex);
  16. }
  17. MORDOR_UNITTEST(FiberMutex, basic)
  18. {
  19. test_mutex_basic<FiberMutex>();
  20. }
  21. template<typename M> inline void contentionFiber(int fiberNo, M &mutex, int &sequence)
  22. {
  23. MORDOR_TEST_ASSERT_EQUAL(++sequence, fiberNo);
  24. typename M::ScopedLock lock(mutex);
  25. MORDOR_TEST_ASSERT_EQUAL(++sequence, fiberNo + 3 + 1);
  26. }
  27. template<typename M> void test_mutex_contention()
  28. {
  29. WorkerPool pool;
  30. M mutex;
  31. int sequence = 0;
  32. Fiber::ptr fiber1(new Fiber(NULL)), fiber2(new Fiber(NULL)),
  33. fiber3(new Fiber(NULL));
  34. fiber1->reset(boost::bind(&contentionFiber<M>, 1, boost::ref(mutex),
  35. boost::ref(sequence)));
  36. fiber2->reset(boost::bind(&contentionFiber<M>, 2, boost::ref(mutex),
  37. boost::ref(sequence)));
  38. fiber3->reset(boost::bind(&contentionFiber<M>, 3, boost::ref(mutex),
  39. boost::ref(sequence)));
  40. {
  41. typename M::ScopedLock lock(mutex);
  42. pool.schedule(fiber1);
  43. pool.schedule(fiber2);
  44. pool.schedule(fiber3);
  45. pool.dispatch();
  46. MORDOR_TEST_ASSERT_EQUAL(++sequence, 4);
  47. }
  48. pool.dispatch();
  49. MORDOR_TEST_ASSERT_EQUAL(++sequence, 8);
  50. }
  51. MORDOR_UNITTEST(FiberMutex, contention)
  52. {
  53. test_mutex_contention<FiberMutex>();
  54. }
  55. #ifndef NDEBUG
  56. MORDOR_UNITTEST(FiberMutex, notRecursive)
  57. {
  58. WorkerPool pool;
  59. FiberMutex mutex;
  60. FiberMutex::ScopedLock lock(mutex);
  61. MORDOR_TEST_ASSERT_ASSERTED(mutex.lock());
  62. }
  63. #endif
  64. template<typename M> inline void lockIt(M &mutex)
  65. {
  66. typename M::ScopedLock lock(mutex);
  67. }
  68. template<typename M> void test_mutex_unlockUnique()
  69. {
  70. WorkerPool pool;
  71. M mutex;
  72. typename M::ScopedLock lock(mutex);
  73. MORDOR_TEST_ASSERT(!lock.unlockIfNotUnique());
  74. pool.schedule(boost::bind(&lockIt<M>, boost::ref(mutex)));
  75. Scheduler::yield();
  76. MORDOR_TEST_ASSERT(lock.unlockIfNotUnique());
  77. pool.dispatch();
  78. }
  79. MORDOR_UNITTEST(FiberMutex, unlockUnique)
  80. {
  81. test_mutex_unlockUnique<FiberMutex>();
  82. }
  83. template<typename M> inline void lockAndHold(IOManager &ioManager, M &mutex, Atomic<int> &counter)
  84. {
  85. --counter;
  86. typename M::ScopedLock lock(mutex);
  87. while(counter > 0)
  88. Mordor::sleep(ioManager, 50000); // sleep 50ms
  89. }
  90. template<typename M> void test_mutex_performance()
  91. {
  92. IOManager ioManager(2, true);
  93. M mutex;
  94. #ifdef X86_64
  95. #ifndef NDEBUG_PERF
  96. int repeatness = 10000;
  97. #else
  98. int repeatness = 50000;
  99. #endif
  100. #else
  101. // on a 32bit system, a process can only have a 4GB virtual address
  102. // each fiber wound take 1MB virtual address, this gives at most
  103. // 4096 fibers can be alive simultaneously.
  104. int repeatness = 1000;
  105. #endif
  106. Atomic<int> counter = repeatness;
  107. unsigned long long before = TimerManager::now();
  108. for (int i=0; i<repeatness; ++i) {
  109. ioManager.schedule(boost::bind(lockAndHold<M>,
  110. boost::ref(ioManager),
  111. boost::ref(mutex),
  112. boost::ref(counter)));
  113. }
  114. ioManager.stop();
  115. unsigned long long elapse = TimerManager::now() - before;
  116. MORDOR_LOG_INFO(Mordor::Log::root()) << "elapse: " << elapse;
  117. }
  118. MORDOR_UNITTEST(FiberMutex, mutexPerformance)
  119. {
  120. test_mutex_performance<FiberMutex>();
  121. }
  122. MORDOR_UNITTEST(RecursiveFiberMutex, basic)
  123. {
  124. test_mutex_basic<RecursiveFiberMutex>();
  125. }
  126. MORDOR_UNITTEST(RecursiveFiberMutex, recursive_basic)
  127. {
  128. WorkerPool pool;
  129. RecursiveFiberMutex mutex;
  130. RecursiveFiberMutex::ScopedLock lock0(mutex);
  131. {
  132. RecursiveFiberMutex::ScopedLock lock1(mutex);
  133. {
  134. RecursiveFiberMutex::ScopedLock lock2(mutex);
  135. }
  136. }
  137. }
  138. MORDOR_UNITTEST(RecursiveFiberMutex, contention)
  139. {
  140. test_mutex_contention<RecursiveFiberMutex>();
  141. }
  142. MORDOR_UNITTEST(RecursiveFiberMutex, mutexPerformance)
  143. {
  144. test_mutex_performance<RecursiveFiberMutex>();
  145. }
  146. MORDOR_UNITTEST(RecursiveFiberMutex, unlockUnique)
  147. {
  148. test_mutex_unlockUnique<RecursiveFiberMutex>();
  149. }
  150. static void signalMe(FiberCondition &condition, int &sequence)
  151. {
  152. MORDOR_TEST_ASSERT_EQUAL(++sequence, 2);
  153. condition.signal();
  154. }
  155. MORDOR_UNITTEST(FiberCondition, signal)
  156. {
  157. int sequence = 0;
  158. WorkerPool pool;
  159. FiberMutex mutex;
  160. FiberCondition condition(mutex);
  161. FiberMutex::ScopedLock lock(mutex);
  162. pool.schedule(boost::bind(&signalMe, boost::ref(condition),
  163. boost::ref(sequence)));
  164. MORDOR_TEST_ASSERT_EQUAL(++sequence, 1);
  165. condition.wait();
  166. MORDOR_TEST_ASSERT_EQUAL(++sequence, 3);
  167. }
  168. static void waitOnMe(FiberCondition &condition, FiberMutex &mutex,
  169. int &sequence, int expected)
  170. {
  171. MORDOR_TEST_ASSERT_EQUAL(++sequence, expected * 2);
  172. FiberMutex::ScopedLock lock(mutex);
  173. MORDOR_TEST_ASSERT_EQUAL(++sequence, expected * 2 + 1);
  174. condition.wait();
  175. MORDOR_TEST_ASSERT_EQUAL(++sequence, expected + 8);
  176. }
  177. MORDOR_UNITTEST(FiberCondition, broadcast)
  178. {
  179. int sequence = 0;
  180. WorkerPool pool;
  181. FiberMutex mutex;
  182. FiberCondition condition(mutex);
  183. pool.schedule(boost::bind(&waitOnMe, boost::ref(condition),
  184. boost::ref(mutex), boost::ref(sequence), 1));
  185. pool.schedule(boost::bind(&waitOnMe, boost::ref(condition),
  186. boost::ref(mutex), boost::ref(sequence), 2));
  187. pool.schedule(boost::bind(&waitOnMe, boost::ref(condition),
  188. boost::ref(mutex), boost::ref(sequence), 3));
  189. MORDOR_TEST_ASSERT_EQUAL(++sequence, 1);
  190. pool.dispatch();
  191. MORDOR_TEST_ASSERT_EQUAL(++sequence, 8);
  192. condition.broadcast();
  193. pool.dispatch();
  194. MORDOR_TEST_ASSERT_EQUAL(++sequence, 12);
  195. }
  196. static void signalMe2(FiberEvent &event, int &sequence)
  197. {
  198. MORDOR_TEST_ASSERT_EQUAL(++sequence, 2);
  199. event.set();
  200. }
  201. MORDOR_UNITTEST(FiberEvent, autoResetSetWithoutExistingWaiters)
  202. {
  203. int sequence = 0;
  204. WorkerPool pool;
  205. FiberEvent event;
  206. pool.schedule(boost::bind(&signalMe2, boost::ref(event),
  207. boost::ref(sequence)));
  208. MORDOR_TEST_ASSERT_EQUAL(++sequence, 1);
  209. event.wait();
  210. MORDOR_TEST_ASSERT_EQUAL(++sequence, 3);
  211. event.set();
  212. // no fiber waiting at this moment, but the event is signaled
  213. event.wait();
  214. MORDOR_TEST_ASSERT_EQUAL(++sequence, 4);
  215. }
  216. static void signalMe3(TimerManager &manager, FiberEvent &event, FiberEvent &timer,
  217. int &sequence, unsigned long long awhile)
  218. {
  219. MORDOR_TEST_ASSERT_EQUAL(++sequence, 2);
  220. event.set();
  221. timer.wait();
  222. Mordor::sleep(manager, awhile);
  223. MORDOR_TEST_ASSERT_EQUAL(++sequence, 4);
  224. // a fiber is already waiting for a while
  225. event.set();
  226. }
  227. MORDOR_UNITTEST(FiberEvent, autoResetSetWithExistingWaiters)
  228. {
  229. int sequence = 0;
  230. IOManager manager; // for a timer-enabled scheduler
  231. FiberEvent event, timer;
  232. static const unsigned long long awhile = 50000ULL; // sleep for 50ms
  233. manager.schedule(boost::bind(&signalMe3, boost::ref(manager),
  234. boost::ref(event), boost::ref(timer), boost::ref(sequence), awhile));
  235. MORDOR_TEST_ASSERT_EQUAL(++sequence, 1);
  236. event.wait();
  237. MORDOR_TEST_ASSERT_EQUAL(++sequence, 3);
  238. timer.set();
  239. Test::TakesAtLeast _(awhile);
  240. // the first set() call should not leave the event signaled, so this fiber
  241. // should be blocked until signalMe3() allows it to move on.
  242. event.wait();
  243. MORDOR_TEST_ASSERT_EQUAL(++sequence, 5);
  244. }
  245. MORDOR_UNITTEST(FiberEvent, manualReset)
  246. {
  247. int sequence = 0;
  248. WorkerPool pool;
  249. FiberEvent event(false);
  250. pool.schedule(boost::bind(&signalMe2, boost::ref(event),
  251. boost::ref(sequence)));
  252. MORDOR_TEST_ASSERT_EQUAL(++sequence, 1);
  253. event.wait();
  254. MORDOR_TEST_ASSERT_EQUAL(++sequence, 3);
  255. // It's manual reset; you can wait as many times as you want until it's
  256. // reset
  257. event.wait();
  258. event.wait();
  259. }
  260. static void waitOnMe2(FiberEvent &event, int &sequence, int expected)
  261. {
  262. MORDOR_TEST_ASSERT_EQUAL(++sequence, expected + 1);
  263. event.wait();
  264. MORDOR_TEST_ASSERT_EQUAL(++sequence, expected + 5);
  265. }
  266. MORDOR_UNITTEST(FiberEvent, manualResetMultiple)
  267. {
  268. int sequence = 0;
  269. WorkerPool pool;
  270. FiberEvent event(false);
  271. pool.schedule(boost::bind(&waitOnMe2, boost::ref(event),
  272. boost::ref(sequence), 1));
  273. pool.schedule(boost::bind(&waitOnMe2, boost::ref(event),
  274. boost::ref(sequence), 2));
  275. pool.schedule(boost::bind(&waitOnMe2, boost::ref(event),
  276. boost::ref(sequence), 3));
  277. MORDOR_TEST_ASSERT_EQUAL(++sequence, 1);
  278. pool.dispatch();
  279. MORDOR_TEST_ASSERT_EQUAL(++sequence, 5);
  280. event.set();
  281. pool.dispatch();
  282. MORDOR_TEST_ASSERT_EQUAL(++sequence, 9);
  283. // It's manual reset; you can wait as many times as you want until it's
  284. // reset
  285. event.wait();
  286. event.wait();
  287. }
  288. class EventOwner
  289. {
  290. public:
  291. EventOwner()
  292. : m_event(false)
  293. , m_destroying(false)
  294. {}
  295. ~EventOwner()
  296. {
  297. // 1 thread case - we can't awake from yielding in the wait() call
  298. // until the fiber for the scheduled setEvent call is complete
  299. //
  300. // Multi-thread case - We can wake up because event is signalled, but
  301. //other thread might still be inside m_event.set() call,
  302. //with m_event mutex still locked. Destroying that mutex while
  303. //set() is being called can cause crash
  304. m_event.wait();
  305. m_destroying = true;
  306. // Note: in debug mode the FiberEvent will get blocked waiting
  307. // for the lock help in FiberEvent::set, but NDEBUG build will not
  308. }
  309. void setEvent()
  310. {
  311. m_event.set();
  312. if (m_destroying) {
  313. MORDOR_NOTREACHED();
  314. }
  315. }
  316. FiberEvent m_event;
  317. bool m_destroying;
  318. };
  319. MORDOR_UNITTEST(FiberEvent, destroyAfterSet)
  320. {
  321. // Demo risk of using an FiberEvent in multi-threaded enviroment
  322. #if 0
  323. {
  324. //Not safe - owner destruction can start while pool2 is still
  325. //executing setEvent(). Even though we wait on event the
  326. //destructor is allowed to proceed before setEvent() has finished.
  327. WorkerPool pool;
  328. WorkerPool pool2(1,false);
  329. EventOwner owner;
  330. pool2.schedule(boost::bind(&EventOwner::setEvent, &owner));
  331. }
  332. #endif
  333. {
  334. // Safe multi-threaded scenario - pool2 is stopped before event owner is destroyed
  335. // which ensures that scheduled event is complete
  336. WorkerPool pool;
  337. WorkerPool pool2(1,false);
  338. EventOwner owner;
  339. pool2.schedule(boost::bind(&EventOwner::setEvent, &owner));
  340. pool2.stop();
  341. }
  342. {
  343. // Safe multi-threaded scenario - variables are declared in correct order so
  344. // that pool2 is stopped before event owner is destroyed
  345. WorkerPool pool;
  346. EventOwner owner;
  347. WorkerPool pool2(1,false);
  348. pool2.schedule(boost::bind(&EventOwner::setEvent, &owner));
  349. }
  350. {
  351. // Safe single threaded scenario - pool stops itself before
  352. // owner is destroyed
  353. WorkerPool pool;
  354. EventOwner owner;
  355. pool.schedule(boost::bind(&EventOwner::setEvent, &owner));
  356. pool.stop();
  357. }
  358. {
  359. // Safe single threaded scenario - pool destruction automatically
  360. // blocks until setEvent is complete, then owner is destroyed
  361. EventOwner owner;
  362. WorkerPool pool;
  363. pool.schedule(boost::bind(&EventOwner::setEvent, &owner));
  364. }
  365. {
  366. // This is the only case that the event is actually needed and useful!
  367. // Because only one fiber executes at a time on the single thread
  368. WorkerPool pool;
  369. EventOwner owner;
  370. pool.schedule(boost::bind(&EventOwner::setEvent, &owner));
  371. }
  372. }