PageRenderTime 1275ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 0ms

/mordor/iomanager_iocp.cpp

http://github.com/mozy/mordor
C++ | 577 lines | 482 code | 51 blank | 44 comment | 117 complexity | d23f39a1445bbd93c2ac2e62791a4e44 MD5 | raw file
Possible License(s): BSD-3-Clause
  1. // Copyright (c) 2009 - Mozy, Inc.
  2. #include "iomanager_iocp.h"
  3. #include <boost/bind.hpp>
  4. #include "assert.h"
  5. #include "atomic.h"
  6. #include "fiber.h"
  7. #include "runtime_linking.h"
  8. namespace Mordor {
  9. static Logger::ptr g_log = Log::lookup("mordor:iomanager");
  10. static Logger::ptr g_logWaitBlock = Log::lookup("mordor:iomanager:waitblock");
  11. boost::mutex IOManager::m_errorMutex;
  12. size_t IOManager::m_iocpAllowedErrorCount = 0;
  13. size_t IOManager::m_iocpErrorCountWindowInSeconds = 0;
  14. size_t IOManager::m_errorCount = 0;
  15. unsigned long long IOManager::m_firstErrorTime = 0;
  16. AsyncEvent::AsyncEvent()
  17. {
  18. memset(this, 0, sizeof(AsyncEvent));
  19. m_thread = emptytid();
  20. }
  21. IOManager::WaitBlock::WaitBlock(IOManager &outer)
  22. : m_outer(outer),
  23. m_inUseCount(0)
  24. {
  25. m_handles[0] = CreateEventW(NULL, FALSE, FALSE, NULL);
  26. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " CreateEventW(): " << m_handles[0]
  27. << " (" << lastError() << ")";
  28. if (!m_handles[0])
  29. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateEventW");
  30. m_reconfigured = CreateEventW(NULL, FALSE, FALSE, NULL);
  31. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " CreateEventW(): "
  32. << m_reconfigured << " (" << lastError() << ")";
  33. if (!m_reconfigured) {
  34. CloseHandle(m_handles[0]);
  35. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateEventW");
  36. }
  37. }
  38. IOManager::WaitBlock::~WaitBlock()
  39. {
  40. MORDOR_ASSERT(m_inUseCount <= 0);
  41. BOOL bRet = CloseHandle(m_handles[0]);
  42. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " CloseHandle("
  43. << m_handles[0] << "): " << bRet << " (" << lastError() << ")";
  44. bRet = CloseHandle(m_reconfigured);
  45. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " CloseHandle("
  46. << m_reconfigured << "): " << bRet << " (" << lastError() << ")";
  47. }
  48. bool
  49. IOManager::WaitBlock::registerEvent(HANDLE hEvent,
  50. boost::function <void ()> dg,
  51. bool recurring)
  52. {
  53. boost::mutex::scoped_lock lock(m_mutex);
  54. if (m_inUseCount == -1 || m_inUseCount == MAXIMUM_WAIT_OBJECTS - 1)
  55. return false;
  56. ++m_inUseCount;
  57. m_handles[m_inUseCount] = hEvent;
  58. m_schedulers[m_inUseCount] = Scheduler::getThis();
  59. if (!dg)
  60. m_fibers[m_inUseCount] = Fiber::getThis();
  61. m_dgs[m_inUseCount] = dg;
  62. m_recurring[m_inUseCount] = recurring;
  63. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " registerEvent(" << hEvent
  64. << ", " << dg << ")";
  65. if (m_inUseCount == 1) {
  66. Thread thread(boost::bind(&WaitBlock::run, this));
  67. } else {
  68. if (!SetEvent(m_handles[0]))
  69. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
  70. }
  71. return true;
  72. }
  73. typedef boost::function<void ()> functor;
  74. size_t
  75. IOManager::WaitBlock::unregisterEvent(HANDLE handle)
  76. {
  77. boost::mutex::scoped_lock lock(m_mutex);
  78. if (m_inUseCount == -1)
  79. return 0;
  80. size_t unregistered = 0;
  81. HANDLE *srcHandle = std::find(m_handles + 1, m_handles + m_inUseCount + 1, handle);
  82. while (srcHandle != m_handles + m_inUseCount + 1) {
  83. ++unregistered;
  84. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " unregisterEvent(" << handle
  85. << "): " << (srcHandle != m_handles + m_inUseCount + 1);
  86. int index = (int)(srcHandle - m_handles);
  87. removeEntry(index);
  88. if (--m_inUseCount == 0) {
  89. --m_inUseCount;
  90. break;
  91. }
  92. srcHandle = std::find(m_handles + 1, m_handles + m_inUseCount + 1, handle);
  93. }
  94. if (unregistered) {
  95. if (!ResetEvent(m_reconfigured))
  96. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("ResetEvent");
  97. if (!SetEvent(m_handles[0]))
  98. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
  99. lock.unlock();
  100. if (WaitForSingleObject(m_reconfigured, INFINITE) == WAIT_FAILED)
  101. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("WaitForSingleObject");
  102. }
  103. return unregistered;
  104. }
  105. void
  106. IOManager::WaitBlock::run()
  107. {
  108. DWORD dwRet;
  109. DWORD count;
  110. HANDLE handles[MAXIMUM_WAIT_OBJECTS];
  111. {
  112. boost::mutex::scoped_lock lock(m_mutex);
  113. if (m_inUseCount == -1) {
  114. // The first/final handle was unregistered out from under us
  115. // before we could even start
  116. if (!SetEvent(m_reconfigured))
  117. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
  118. }
  119. count = m_inUseCount + 1;
  120. memcpy(handles, m_handles, (count) * sizeof(HANDLE));
  121. }
  122. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " run " << count;
  123. while (count) {
  124. dwRet = WaitForMultipleObjects(count, handles, FALSE, INFINITE);
  125. MORDOR_LOG_LEVEL(g_logWaitBlock, dwRet == WAIT_FAILED ? Log::ERROR : Log::DEBUG)
  126. << this << " WaitForMultipleObjects(" << count << ", " << handles
  127. << "): " << dwRet << " (" << lastError() << ")";
  128. if (dwRet == WAIT_OBJECT_0) {
  129. // Array just got reconfigured
  130. boost::mutex::scoped_lock lock(m_mutex);
  131. if (!SetEvent(m_reconfigured))
  132. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
  133. if (m_inUseCount == -1)
  134. break;
  135. count = m_inUseCount + 1;
  136. memcpy(handles, m_handles, (count) * sizeof(HANDLE));
  137. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " reconfigure " << count;
  138. } else if (dwRet >= WAIT_OBJECT_0 + 1 && dwRet < WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS) {
  139. boost::mutex::scoped_lock lock(m_mutex);
  140. if (m_inUseCount == -1) {
  141. // The final handle was unregistered out from under us
  142. if (!SetEvent(m_reconfigured))
  143. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
  144. break;
  145. }
  146. HANDLE handle = handles[dwRet - WAIT_OBJECT_0];
  147. HANDLE *srcHandle = std::find(m_handles + 1, m_handles + m_inUseCount + 1, handle);
  148. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " Event " << handle << " "
  149. << (srcHandle != m_handles + m_inUseCount + 1);
  150. if (srcHandle != m_handles + m_inUseCount + 1) {
  151. int index = (int)(srcHandle - m_handles);
  152. if (!m_dgs[index])
  153. m_schedulers[index]->schedule(m_fibers[index]);
  154. else
  155. m_schedulers[index]->schedule(m_dgs[index]);
  156. if (!m_recurring[index]) {
  157. removeEntry(index);
  158. if (--m_inUseCount == 0) {
  159. --m_inUseCount;
  160. // if IOManager (m_outer) is still running in unregisterEvent function,
  161. // and waiting for m_reconfigured event,
  162. // let unregisterEvent go ahead,
  163. // otherwise, lock(m_outer.m_mutex) run into deadlock.
  164. if (!SetEvent(m_reconfigured))
  165. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
  166. break;
  167. }
  168. count = m_inUseCount + 1;
  169. memcpy(handles, m_handles, (count) * sizeof(HANDLE));
  170. }
  171. }
  172. } else if (dwRet == WAIT_FAILED) {
  173. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("WaitForMultipleObjects");
  174. } else {
  175. MORDOR_NOTREACHED();
  176. }
  177. }
  178. MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " done";
  179. {
  180. ptr self = shared_from_this();
  181. boost::mutex::scoped_lock lock(m_outer.m_mutex);
  182. std::list<WaitBlock::ptr>::iterator it =
  183. std::find(m_outer.m_waitBlocks.begin(), m_outer.m_waitBlocks.end(),
  184. shared_from_this());
  185. MORDOR_ASSERT(it != m_outer.m_waitBlocks.end());
  186. m_outer.m_waitBlocks.erase(it);
  187. m_outer.tickle();
  188. }
  189. }
  190. void
  191. IOManager::WaitBlock::removeEntry(int index)
  192. {
  193. memmove(&m_handles[index], &m_handles[index + 1], (m_inUseCount - index) * sizeof(HANDLE));
  194. memmove(&m_schedulers[index], &m_schedulers[index + 1], (m_inUseCount - index) * sizeof(Scheduler *));
  195. // Manually destruct old object, move others down, and default construct unused one
  196. m_dgs[index].~functor();
  197. memmove(&m_dgs[index], &m_dgs[index + 1], (m_inUseCount - index) * sizeof(boost::function<void ()>));
  198. new(&m_dgs[m_inUseCount]) boost::function<void ()>();
  199. // Manually destruct old object, move others down, and default construct unused one
  200. m_fibers[index].~shared_ptr<Fiber>();
  201. memmove(&m_fibers[index], &m_fibers[index + 1], (m_inUseCount - index) * sizeof(Fiber::ptr));
  202. new(&m_fibers[m_inUseCount]) Fiber::ptr();
  203. memmove(&m_recurring[index], &m_recurring[index + 1], (m_inUseCount - index) * sizeof(bool));
  204. }
  205. IOManager::IOManager(size_t threads, bool useCaller, bool autoStart)
  206. : Scheduler(threads, useCaller)
  207. {
  208. m_pendingEventCount = 0;
  209. m_hCompletionPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
  210. MORDOR_LOG_LEVEL(g_log, m_hCompletionPort ? Log::VERBOSE : Log::ERROR) << this <<
  211. " CreateIoCompletionPort(): " << m_hCompletionPort << " ("
  212. << (m_hCompletionPort ? ERROR_SUCCESS : lastError()) << ")";
  213. if (!m_hCompletionPort)
  214. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateIoCompletionPort");
  215. if (autoStart) {
  216. try {
  217. start();
  218. } catch (...) {
  219. CloseHandle(m_hCompletionPort);
  220. throw;
  221. }
  222. }
  223. }
  224. IOManager::~IOManager()
  225. {
  226. stop();
  227. CloseHandle(m_hCompletionPort);
  228. }
  229. bool
  230. IOManager::stopping()
  231. {
  232. unsigned long long timeout;
  233. return stopping(timeout);
  234. }
  235. void
  236. IOManager::registerFile(HANDLE handle)
  237. {
  238. // Add the handle to the existing completion port
  239. MORDOR_ASSERT(m_hCompletionPort != INVALID_HANDLE_VALUE);
  240. HANDLE hRet = CreateIoCompletionPort(handle, m_hCompletionPort, 0, 0);
  241. MORDOR_LOG_LEVEL(g_log, hRet ? Log::DEBUG : Log::ERROR) << this <<
  242. " CreateIoCompletionPort(" << handle << ", " << m_hCompletionPort
  243. << "): " << hRet << " (" << (hRet ? ERROR_SUCCESS : lastError()) << ")";
  244. if (hRet != m_hCompletionPort) {
  245. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateIoCompletionPort");
  246. }
  247. }
  248. void
  249. IOManager::registerEvent(AsyncEvent *e)
  250. {
  251. MORDOR_ASSERT(e);
  252. e->m_scheduler = Scheduler::getThis();
  253. e->m_thread = gettid();
  254. e->m_fiber = Fiber::getThis();
  255. MORDOR_ASSERT(e->m_scheduler);
  256. MORDOR_ASSERT(e->m_fiber);
  257. MORDOR_LOG_DEBUG(g_log) << this << " registerEvent(" << &e->overlapped << ")";
  258. #ifndef NDEBUG
  259. {
  260. boost::mutex::scoped_lock lock(m_mutex);
  261. MORDOR_ASSERT(m_pendingEvents.find(&e->overlapped) == m_pendingEvents.end());
  262. m_pendingEvents[&e->overlapped] = e;
  263. #endif
  264. atomicIncrement(m_pendingEventCount);
  265. #ifndef NDEBUG
  266. MORDOR_ASSERT(m_pendingEvents.size() == m_pendingEventCount);
  267. }
  268. #endif
  269. }
  270. void
  271. IOManager::unregisterEvent(AsyncEvent *e)
  272. {
  273. MORDOR_ASSERT(e);
  274. MORDOR_LOG_DEBUG(g_log) << this << " unregisterEvent(" << &e->overlapped << ")";
  275. e->m_thread = emptytid();
  276. e->m_scheduler = NULL;
  277. e->m_fiber.reset();
  278. #ifndef NDEBUG
  279. {
  280. boost::mutex::scoped_lock lock(m_mutex);
  281. std::map<OVERLAPPED *, AsyncEvent *>::iterator it =
  282. m_pendingEvents.find(&e->overlapped);
  283. MORDOR_ASSERT(it != m_pendingEvents.end());
  284. m_pendingEvents.erase(it);
  285. #endif
  286. atomicDecrement(m_pendingEventCount);
  287. #ifndef NDEBUG
  288. MORDOR_ASSERT(m_pendingEvents.size() == m_pendingEventCount);
  289. }
  290. #endif
  291. }
  292. void
  293. IOManager::registerEvent(HANDLE handle, boost::function<void ()> dg, bool recurring)
  294. {
  295. MORDOR_LOG_DEBUG(g_log) << this << " registerEvent(" << handle << ", " << dg
  296. << ")";
  297. MORDOR_ASSERT(handle);
  298. MORDOR_ASSERT(handle != INVALID_HANDLE_VALUE);
  299. MORDOR_ASSERT(Scheduler::getThis());
  300. boost::mutex::scoped_lock lock(m_mutex);
  301. for (std::list<WaitBlock::ptr>::iterator it = m_waitBlocks.begin();
  302. it != m_waitBlocks.end();
  303. ++it) {
  304. if ((*it)->registerEvent(handle, dg, recurring))
  305. return;
  306. }
  307. m_waitBlocks.push_back(WaitBlock::ptr(new WaitBlock(*this)));
  308. bool result = m_waitBlocks.back()->registerEvent(handle, dg, recurring);
  309. MORDOR_ASSERT(result);
  310. }
  311. size_t
  312. IOManager::unregisterEvent(HANDLE handle)
  313. {
  314. MORDOR_ASSERT(handle);
  315. boost::mutex::scoped_lock lock(m_mutex);
  316. size_t result = 0;
  317. for (std::list<WaitBlock::ptr>::iterator it = m_waitBlocks.begin();
  318. it != m_waitBlocks.end();
  319. ++it) {
  320. result += (*it)->unregisterEvent(handle);
  321. }
  322. MORDOR_LOG_DEBUG(g_log) << this << " unregisterEvent(" << handle << "): " << result;
  323. return result;
  324. }
  325. void
  326. IOManager::cancelEvent(HANDLE hFile, AsyncEvent *e)
  327. {
  328. MORDOR_ASSERT(hFile);
  329. MORDOR_ASSERT(e);
  330. MORDOR_LOG_DEBUG(g_log) << this << " cancelEvent(" << hFile << ", "
  331. << &e->overlapped << ")";
  332. if (!pCancelIoEx(hFile, &e->overlapped)) {
  333. error_t error = lastError();
  334. if (error == ERROR_CALL_NOT_IMPLEMENTED) {
  335. if (e->m_thread == emptytid()) {
  336. // Nothing to cancel
  337. return;
  338. } else if (e->m_thread == gettid()) {
  339. if (!CancelIo(hFile))
  340. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CancelIo");
  341. } else {
  342. MORDOR_ASSERT(e->m_scheduler);
  343. // Have to marshal to the original thread
  344. SchedulerSwitcher switcher;
  345. e->m_scheduler->switchTo(e->m_thread);
  346. if (!CancelIo(hFile))
  347. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CancelIo");
  348. }
  349. } else if (error == ERROR_NOT_FOUND || error == ERROR_FILE_NOT_FOUND) {
  350. // Nothing to cancel
  351. }
  352. else if (error == ERROR_INVALID_HANDLE) {
  353. // Nothing to do.
  354. // Not sure why, but the socket is trying to reuse a closed handle.
  355. // This problem is happening randomly, so this is just a workround for now.
  356. // JIRA: KALYPSO-2563
  357. MORDOR_LOG_DEBUG(g_log) << this << " cancelEvent(: " << hFile << ", "
  358. << &e->overlapped << ") : " << lastError();
  359. } else {
  360. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CancelIoEx");
  361. }
  362. }
  363. }
  364. bool
  365. IOManager::stopping(unsigned long long &nextTimeout)
  366. {
  367. // Check when the next timer is expected to timeout
  368. nextTimeout = nextTimer();
  369. // Even if the scheduler wants to stop we return false
  370. // if there is any pending work
  371. if (nextTimeout == ~0ull && Scheduler::stopping()) {
  372. if (m_pendingEventCount != 0)
  373. return false;
  374. boost::mutex::scoped_lock lock(m_mutex);
  375. return m_waitBlocks.empty();
  376. }
  377. return false;
  378. }
  379. // Each thread of this IO manager runs this method as a fiber and it is active when there is nothing
  380. // to do. It must process incoming Async IO calls, expired timers and any fiber scheduled.
  381. void
  382. IOManager::idle()
  383. {
  384. OVERLAPPED_ENTRY events[64];
  385. ULONG count;
  386. while (true) {
  387. unsigned long long nextTimeout;
  388. if (stopping(nextTimeout))
  389. return;
  390. DWORD timeout = INFINITE;
  391. if (nextTimeout != ~0ull) {
  392. // The maximum time we can wait in GetQueuedCompletionStatusEx is
  393. // up to the point that the next timer will expire
  394. timeout = (DWORD)(nextTimeout / 1000) + 1;
  395. }
  396. count = 0;
  397. BOOL ret = pGetQueuedCompletionStatusEx(m_hCompletionPort,
  398. events,
  399. 64,
  400. &count,
  401. timeout,
  402. FALSE);
  403. error_t error = lastError();
  404. MORDOR_LOG_DEBUG(g_log) << this << " GetQueuedCompletionStatusEx("
  405. << m_hCompletionPort << ", " << timeout << "): " << ret << ", ("
  406. << count << ") (" << (ret ? ERROR_SUCCESS : error) << ")";
  407. if (!ret && error) {
  408. if (error == WAIT_TIMEOUT) {
  409. // No tickles or AsyncIO calls have happened so check for timers
  410. // that need execution
  411. std::vector<boost::function<void ()> > expired = processTimers();
  412. if (!expired.empty()) {
  413. schedule(expired.begin(), expired.end());
  414. expired.clear();
  415. try {
  416. // Let the timers execute
  417. Fiber::yield();
  418. } catch (OperationAbortedException &) {
  419. return;
  420. }
  421. }
  422. continue;
  423. }
  424. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("GetQueuedCompletionStatusEx");
  425. }
  426. // Schedule any timers that are ready to execute
  427. std::vector<boost::function<void ()> > expired = processTimers();
  428. if (!expired.empty()) {
  429. schedule(expired.begin(), expired.end());
  430. expired.clear();
  431. }
  432. #ifndef NDEBUG
  433. boost::mutex::scoped_lock lock(m_mutex, boost::defer_lock_t());
  434. #endif
  435. int tickles = 0;
  436. for (ULONG i = 0; i < count; ++i) {
  437. if (events[i].lpCompletionKey == ~0) {
  438. MORDOR_LOG_VERBOSE(g_log) << this << " received tickle";
  439. ++tickles;
  440. continue;
  441. }
  442. // An Async IO call has completed, so wake up the fiber
  443. // that called registerEvent()
  444. AsyncEvent *e = (AsyncEvent *)events[i].lpOverlapped;
  445. #ifndef NDEBUG
  446. if (!lock.owns_lock())
  447. lock.lock();
  448. // Verify that the API has been used properly,
  449. // e.g. that registerEvent has been called
  450. std::map<OVERLAPPED *, AsyncEvent *>::iterator it =
  451. m_pendingEvents.find(events[i].lpOverlapped);
  452. MORDOR_ASSERT(it != m_pendingEvents.end());
  453. MORDOR_ASSERT(e == it->second);
  454. m_pendingEvents.erase(it);
  455. #endif
  456. MORDOR_ASSERT(e->m_scheduler);
  457. MORDOR_ASSERT(e->m_fiber);
  458. MORDOR_LOG_TRACE(g_log) << this << " OVERLAPPED_ENTRY {"
  459. << events[i].lpCompletionKey << ", " << events[i].lpOverlapped
  460. << ", " << events[i].Internal << ", "
  461. << events[i].dwNumberOfBytesTransferred << "}";
  462. Scheduler *scheduler = e->m_scheduler;
  463. Fiber::ptr fiber;
  464. fiber.swap(e->m_fiber);
  465. // Clean up the AsyncEvent structure which can
  466. // be reused for the next Async IO call
  467. e->m_thread = emptytid();
  468. e->m_scheduler = NULL;
  469. scheduler->schedule(fiber);
  470. }
  471. if (count != tickles) {
  472. // Subtract the number of completed Async IO calls
  473. atomicAdd(m_pendingEventCount, (size_t)(-(ptrdiff_t)(count - tickles)));
  474. }
  475. #ifndef NDEBUG
  476. if (lock.owns_lock()) {
  477. MORDOR_ASSERT(m_pendingEventCount == m_pendingEvents.size());
  478. lock.unlock();
  479. }
  480. #endif
  481. // Because we recieved either a tickle or a completed Async IO call
  482. // we know that there must be some work lined up for the scheduler
  483. try {
  484. Fiber::yield();
  485. } catch (OperationAbortedException &) {
  486. return;
  487. }
  488. }
  489. }
  490. void IOManager::setIOCPErrorTolerance(size_t count, size_t seconds)
  491. {
  492. boost::mutex::scoped_lock lock(m_errorMutex);
  493. m_iocpAllowedErrorCount = count;
  494. m_iocpErrorCountWindowInSeconds = seconds;
  495. }
  496. void
  497. IOManager::tickle()
  498. {
  499. // Send a special message with distinct key ~0. This message does not correspond to
  500. // any real completed Async IO call, rather it is used to force the idle() method
  501. // out of a GetQueuedCompletionStatusEx status
  502. BOOL bRet = PostQueuedCompletionStatus(m_hCompletionPort, 0, ~0, NULL);
  503. MORDOR_LOG_LEVEL(g_log, bRet ? Log::DEBUG : Log::ERROR) << this
  504. << " PostQueuedCompletionStatus(" << m_hCompletionPort
  505. << ", 0, ~0, NULL): " << bRet << " (" << (bRet ? ERROR_SUCCESS : lastError()) << ")";
  506. if (!bRet) {
  507. boost::mutex::scoped_lock lock(m_errorMutex);
  508. if (m_iocpAllowedErrorCount != 0) {
  509. unsigned long long currentTime = Mordor::TimerManager::now() / 1000ULL;
  510. unsigned long long secondsElapsed = (currentTime - m_firstErrorTime) / 1000;
  511. if (secondsElapsed > m_iocpErrorCountWindowInSeconds) {
  512. // It's been a while since we started encountering errors
  513. m_firstErrorTime = currentTime;
  514. m_errorCount = 0;
  515. }
  516. if (++m_errorCount <= m_iocpAllowedErrorCount) {
  517. // #112528 - Swallow these errors untill we exceed the error tolerance
  518. MORDOR_LOG_INFO(g_logWaitBlock) << this << " Ignoring PostQueuedCompletionStatus failure. Error tolerance = "
  519. << m_iocpAllowedErrorCount << " Error count = " << m_errorCount;
  520. return;
  521. }
  522. }
  523. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("PostQueuedCompletionStatus");
  524. }
  525. }
  526. }