PageRenderTime 481ms CodeModel.GetById 81ms app.highlight 358ms RepoModel.GetById 35ms app.codeStats 0ms

/mordor/iomanager_iocp.cpp

http://github.com/mozy/mordor
C++ | 577 lines | 482 code | 51 blank | 44 comment | 117 complexity | d23f39a1445bbd93c2ac2e62791a4e44 MD5 | raw file
  1// Copyright (c) 2009 - Mozy, Inc.
  2
  3#include "iomanager_iocp.h"
  4
  5#include <boost/bind.hpp>
  6
  7#include "assert.h"
  8#include "atomic.h"
  9#include "fiber.h"
 10#include "runtime_linking.h"
 11
 12namespace Mordor {
 13
 14static Logger::ptr g_log = Log::lookup("mordor:iomanager");
 15static Logger::ptr g_logWaitBlock = Log::lookup("mordor:iomanager:waitblock");
 16
 17boost::mutex IOManager::m_errorMutex;
 18size_t IOManager::m_iocpAllowedErrorCount = 0;
 19size_t IOManager::m_iocpErrorCountWindowInSeconds = 0;
 20size_t IOManager::m_errorCount = 0;
 21unsigned long long IOManager::m_firstErrorTime = 0;
 22
 23AsyncEvent::AsyncEvent()
 24{
 25    memset(this, 0, sizeof(AsyncEvent));
 26    m_thread = emptytid();
 27}
 28
 29IOManager::WaitBlock::WaitBlock(IOManager &outer)
 30: m_outer(outer),
 31  m_inUseCount(0)
 32{
 33    m_handles[0] = CreateEventW(NULL, FALSE, FALSE, NULL);
 34    MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " CreateEventW(): " << m_handles[0]
 35        << " (" << lastError() << ")";
 36    if (!m_handles[0])
 37        MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateEventW");
 38    m_reconfigured = CreateEventW(NULL, FALSE, FALSE, NULL);
 39    MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " CreateEventW(): "
 40        << m_reconfigured << " (" << lastError() << ")";
 41    if (!m_reconfigured) {
 42        CloseHandle(m_handles[0]);
 43        MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateEventW");
 44    }
 45}
 46
 47IOManager::WaitBlock::~WaitBlock()
 48{
 49    MORDOR_ASSERT(m_inUseCount <= 0);
 50    BOOL bRet = CloseHandle(m_handles[0]);
 51    MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " CloseHandle("
 52        << m_handles[0] << "): " << bRet << " (" << lastError() << ")";
 53    bRet = CloseHandle(m_reconfigured);
 54    MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " CloseHandle("
 55        << m_reconfigured << "): " << bRet << " (" << lastError() << ")";
 56}
 57
 58bool
 59IOManager::WaitBlock::registerEvent(HANDLE hEvent,
 60                                        boost::function <void ()> dg,
 61                                        bool recurring)
 62{
 63    boost::mutex::scoped_lock lock(m_mutex);
 64    if (m_inUseCount == -1 || m_inUseCount == MAXIMUM_WAIT_OBJECTS - 1)
 65        return false;
 66    ++m_inUseCount;
 67    m_handles[m_inUseCount] = hEvent;
 68    m_schedulers[m_inUseCount] = Scheduler::getThis();
 69    if (!dg)
 70        m_fibers[m_inUseCount] = Fiber::getThis();
 71    m_dgs[m_inUseCount] = dg;
 72    m_recurring[m_inUseCount] = recurring;
 73    MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " registerEvent(" << hEvent
 74        << ", " << dg << ")";
 75    if (m_inUseCount == 1) {
 76        Thread thread(boost::bind(&WaitBlock::run, this));
 77    } else {
 78        if (!SetEvent(m_handles[0]))
 79            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
 80    }
 81    return true;
 82}
 83
 84typedef boost::function<void ()> functor;
 85size_t
 86IOManager::WaitBlock::unregisterEvent(HANDLE handle)
 87{
 88    boost::mutex::scoped_lock lock(m_mutex);
 89    if (m_inUseCount == -1)
 90        return 0;
 91    size_t unregistered = 0;
 92    HANDLE *srcHandle = std::find(m_handles + 1, m_handles + m_inUseCount + 1, handle);
 93    while (srcHandle != m_handles + m_inUseCount + 1) {
 94        ++unregistered;
 95        MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " unregisterEvent(" << handle
 96            << "): " << (srcHandle != m_handles + m_inUseCount + 1);
 97        int index = (int)(srcHandle - m_handles);
 98        removeEntry(index);
 99
100        if (--m_inUseCount == 0) {
101            --m_inUseCount;
102            break;
103        }
104        srcHandle = std::find(m_handles + 1, m_handles + m_inUseCount + 1, handle);
105    }
106
107    if (unregistered) {
108        if (!ResetEvent(m_reconfigured))
109            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("ResetEvent");
110        if (!SetEvent(m_handles[0]))
111            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
112        lock.unlock();
113        if (WaitForSingleObject(m_reconfigured, INFINITE) == WAIT_FAILED)
114            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("WaitForSingleObject");
115    }
116    return unregistered;
117}
118
119void
120IOManager::WaitBlock::run()
121{
122    DWORD dwRet;
123    DWORD count;
124    HANDLE handles[MAXIMUM_WAIT_OBJECTS];
125
126    {
127        boost::mutex::scoped_lock lock(m_mutex);
128        if (m_inUseCount == -1) {
129            // The first/final handle was unregistered out from under us
130            // before we could even start
131            if (!SetEvent(m_reconfigured))
132                MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
133        }
134        count = m_inUseCount + 1;
135        memcpy(handles, m_handles, (count) * sizeof(HANDLE));
136    }
137
138    MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " run " << count;
139
140    while (count) {
141        dwRet = WaitForMultipleObjects(count, handles, FALSE, INFINITE);
142        MORDOR_LOG_LEVEL(g_logWaitBlock, dwRet == WAIT_FAILED ? Log::ERROR : Log::DEBUG)
143            << this << " WaitForMultipleObjects(" << count << ", " << handles
144            << "): " << dwRet << " (" << lastError() << ")";
145        if (dwRet == WAIT_OBJECT_0) {
146            // Array just got reconfigured
147            boost::mutex::scoped_lock lock(m_mutex);
148            if (!SetEvent(m_reconfigured))
149                MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
150            if (m_inUseCount == -1)
151                break;
152            count = m_inUseCount + 1;
153            memcpy(handles, m_handles, (count) * sizeof(HANDLE));
154            MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " reconfigure " << count;
155        } else if (dwRet >= WAIT_OBJECT_0 + 1 && dwRet < WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS) {
156            boost::mutex::scoped_lock lock(m_mutex);
157
158            if (m_inUseCount == -1) {
159                // The final handle was unregistered out from under us
160                if (!SetEvent(m_reconfigured))
161                    MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
162                break;
163            }
164
165            HANDLE handle = handles[dwRet - WAIT_OBJECT_0];
166            HANDLE *srcHandle = std::find(m_handles + 1, m_handles + m_inUseCount + 1, handle);
167            MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " Event " << handle << " "
168                << (srcHandle != m_handles + m_inUseCount + 1);
169            if (srcHandle != m_handles + m_inUseCount + 1) {
170                int index = (int)(srcHandle - m_handles);
171                if (!m_dgs[index])
172                    m_schedulers[index]->schedule(m_fibers[index]);
173                else
174                    m_schedulers[index]->schedule(m_dgs[index]);
175                if (!m_recurring[index]) {
176                    removeEntry(index);
177
178                    if (--m_inUseCount == 0) {
179                        --m_inUseCount;
180
181                        // if IOManager (m_outer) is still running in unregisterEvent function,
182                        // and waiting for m_reconfigured event,
183                        // let unregisterEvent go ahead,
184                        // otherwise, lock(m_outer.m_mutex) run into deadlock.
185                        if (!SetEvent(m_reconfigured))
186                            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("SetEvent");
187                        break;
188                    }
189                    count = m_inUseCount + 1;
190                    memcpy(handles, m_handles, (count) * sizeof(HANDLE));
191                }
192            }
193        } else if (dwRet == WAIT_FAILED) {
194            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("WaitForMultipleObjects");
195        } else {
196            MORDOR_NOTREACHED();
197        }
198    }
199    MORDOR_LOG_DEBUG(g_logWaitBlock) << this << " done";
200    {
201        ptr self = shared_from_this();
202        boost::mutex::scoped_lock lock(m_outer.m_mutex);
203        std::list<WaitBlock::ptr>::iterator it =
204            std::find(m_outer.m_waitBlocks.begin(), m_outer.m_waitBlocks.end(),
205                shared_from_this());
206        MORDOR_ASSERT(it != m_outer.m_waitBlocks.end());
207        m_outer.m_waitBlocks.erase(it);
208        m_outer.tickle();
209    }
210}
211
212void
213IOManager::WaitBlock::removeEntry(int index)
214{
215    memmove(&m_handles[index], &m_handles[index + 1], (m_inUseCount - index) * sizeof(HANDLE));
216    memmove(&m_schedulers[index], &m_schedulers[index + 1], (m_inUseCount - index) * sizeof(Scheduler *));
217    // Manually destruct old object, move others down, and default construct unused one
218    m_dgs[index].~functor();
219    memmove(&m_dgs[index], &m_dgs[index + 1], (m_inUseCount - index) * sizeof(boost::function<void ()>));
220    new(&m_dgs[m_inUseCount]) boost::function<void ()>();
221    // Manually destruct old object, move others down, and default construct unused one
222    m_fibers[index].~shared_ptr<Fiber>();
223    memmove(&m_fibers[index], &m_fibers[index + 1], (m_inUseCount - index) * sizeof(Fiber::ptr));
224    new(&m_fibers[m_inUseCount]) Fiber::ptr();
225    memmove(&m_recurring[index], &m_recurring[index + 1], (m_inUseCount - index) * sizeof(bool));
226}
227
228IOManager::IOManager(size_t threads, bool useCaller, bool autoStart)
229    : Scheduler(threads, useCaller)
230{
231    m_pendingEventCount = 0;
232    m_hCompletionPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
233    MORDOR_LOG_LEVEL(g_log, m_hCompletionPort ? Log::VERBOSE : Log::ERROR) << this <<
234        " CreateIoCompletionPort(): " << m_hCompletionPort << " ("
235        << (m_hCompletionPort ? ERROR_SUCCESS : lastError()) << ")";
236    if (!m_hCompletionPort)
237        MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateIoCompletionPort");
238    if (autoStart) {
239        try {
240            start();
241        } catch (...) {
242            CloseHandle(m_hCompletionPort);
243            throw;
244        }
245    }
246}
247
248IOManager::~IOManager()
249{
250    stop();
251    CloseHandle(m_hCompletionPort);
252}
253
254bool
255IOManager::stopping()
256{
257    unsigned long long timeout;
258    return stopping(timeout);
259}
260
261void
262IOManager::registerFile(HANDLE handle)
263{
264    // Add the handle to the existing completion port
265    MORDOR_ASSERT(m_hCompletionPort != INVALID_HANDLE_VALUE);
266    HANDLE hRet = CreateIoCompletionPort(handle, m_hCompletionPort, 0, 0);
267    MORDOR_LOG_LEVEL(g_log, hRet ? Log::DEBUG : Log::ERROR) << this <<
268        " CreateIoCompletionPort(" << handle << ", " << m_hCompletionPort
269        << "): " << hRet << " (" << (hRet ? ERROR_SUCCESS : lastError()) << ")";
270    if (hRet != m_hCompletionPort) {
271        MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateIoCompletionPort");
272    }
273}
274
275void
276IOManager::registerEvent(AsyncEvent *e)
277{
278    MORDOR_ASSERT(e);
279    e->m_scheduler = Scheduler::getThis();
280    e->m_thread = gettid();
281    e->m_fiber = Fiber::getThis();
282    MORDOR_ASSERT(e->m_scheduler);
283    MORDOR_ASSERT(e->m_fiber);
284    MORDOR_LOG_DEBUG(g_log) << this << " registerEvent(" << &e->overlapped << ")";
285#ifndef NDEBUG
286    {
287        boost::mutex::scoped_lock lock(m_mutex);
288        MORDOR_ASSERT(m_pendingEvents.find(&e->overlapped) == m_pendingEvents.end());
289        m_pendingEvents[&e->overlapped] = e;
290#endif
291        atomicIncrement(m_pendingEventCount);
292#ifndef NDEBUG
293        MORDOR_ASSERT(m_pendingEvents.size() == m_pendingEventCount);
294    }
295#endif
296}
297
298void
299IOManager::unregisterEvent(AsyncEvent *e)
300{
301    MORDOR_ASSERT(e);
302    MORDOR_LOG_DEBUG(g_log) << this << " unregisterEvent(" << &e->overlapped << ")";
303    e->m_thread = emptytid();
304    e->m_scheduler = NULL;
305    e->m_fiber.reset();
306#ifndef NDEBUG
307    {
308        boost::mutex::scoped_lock lock(m_mutex);
309        std::map<OVERLAPPED *, AsyncEvent *>::iterator it =
310            m_pendingEvents.find(&e->overlapped);
311        MORDOR_ASSERT(it != m_pendingEvents.end());
312        m_pendingEvents.erase(it);
313#endif
314        atomicDecrement(m_pendingEventCount);
315#ifndef NDEBUG
316        MORDOR_ASSERT(m_pendingEvents.size() == m_pendingEventCount);
317    }
318#endif
319}
320
321void
322IOManager::registerEvent(HANDLE handle, boost::function<void ()> dg, bool recurring)
323{
324    MORDOR_LOG_DEBUG(g_log) << this << " registerEvent(" << handle << ", " << dg
325        << ")";
326    MORDOR_ASSERT(handle);
327    MORDOR_ASSERT(handle != INVALID_HANDLE_VALUE);
328    MORDOR_ASSERT(Scheduler::getThis());
329
330    boost::mutex::scoped_lock lock(m_mutex);
331    for (std::list<WaitBlock::ptr>::iterator it = m_waitBlocks.begin();
332        it != m_waitBlocks.end();
333        ++it) {
334        if ((*it)->registerEvent(handle, dg, recurring))
335            return;
336    }
337    m_waitBlocks.push_back(WaitBlock::ptr(new WaitBlock(*this)));
338    bool result = m_waitBlocks.back()->registerEvent(handle, dg, recurring);
339    MORDOR_ASSERT(result);
340}
341
342size_t
343IOManager::unregisterEvent(HANDLE handle)
344{
345    MORDOR_ASSERT(handle);
346    boost::mutex::scoped_lock lock(m_mutex);
347    size_t result = 0;
348    for (std::list<WaitBlock::ptr>::iterator it = m_waitBlocks.begin();
349        it != m_waitBlocks.end();
350        ++it) {
351        result += (*it)->unregisterEvent(handle);
352    }
353    MORDOR_LOG_DEBUG(g_log) << this << " unregisterEvent(" << handle << "): " << result;
354    return result;
355}
356
357void
358IOManager::cancelEvent(HANDLE hFile, AsyncEvent *e)
359{
360    MORDOR_ASSERT(hFile);
361    MORDOR_ASSERT(e);
362    MORDOR_LOG_DEBUG(g_log) << this << " cancelEvent(" << hFile << ", "
363        << &e->overlapped << ")";
364
365    if (!pCancelIoEx(hFile, &e->overlapped)) {
366        error_t error = lastError();
367        if (error == ERROR_CALL_NOT_IMPLEMENTED) {
368            if (e->m_thread == emptytid()) {
369                // Nothing to cancel
370                return;
371            } else if (e->m_thread == gettid()) {
372                if (!CancelIo(hFile))
373                    MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CancelIo");
374            } else {
375                MORDOR_ASSERT(e->m_scheduler);
376                // Have to marshal to the original thread
377                SchedulerSwitcher switcher;
378                e->m_scheduler->switchTo(e->m_thread);
379                if (!CancelIo(hFile))
380                    MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CancelIo");
381            }
382        } else if (error == ERROR_NOT_FOUND || error == ERROR_FILE_NOT_FOUND) {
383            // Nothing to cancel
384        }
385        else if (error == ERROR_INVALID_HANDLE) {
386            // Nothing to do.
387            // Not sure why, but the socket is trying to reuse a closed handle.
388            // This problem is happening randomly, so this is just a workround for now.
389            // JIRA: KALYPSO-2563
390            MORDOR_LOG_DEBUG(g_log) << this << " cancelEvent(: " << hFile << ", "
391                << &e->overlapped << ") : " << lastError();
392        } else {
393            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CancelIoEx");
394        }
395    }
396}
397
398bool
399IOManager::stopping(unsigned long long &nextTimeout)
400{
401    // Check when the next timer is expected to timeout
402    nextTimeout = nextTimer();
403
404    // Even if the scheduler wants to stop we return false
405    // if there is any pending work
406    if (nextTimeout == ~0ull && Scheduler::stopping()) {
407        if (m_pendingEventCount != 0)
408            return false;
409        boost::mutex::scoped_lock lock(m_mutex);
410        return m_waitBlocks.empty();
411    }
412    return false;
413}
414
415
416// Each thread of this IO manager runs this method as a fiber and it is active when there is nothing
417// to do.  It must process incoming Async IO calls, expired timers and any fiber scheduled.
418void
419IOManager::idle()
420{
421    OVERLAPPED_ENTRY events[64];
422    ULONG count;
423    while (true) {
424        unsigned long long nextTimeout;
425        if (stopping(nextTimeout))
426            return;
427        DWORD timeout = INFINITE;
428        if (nextTimeout != ~0ull) {
429            // The maximum time we can wait in GetQueuedCompletionStatusEx is
430            // up to the point that the next timer will expire
431            timeout = (DWORD)(nextTimeout / 1000) + 1;
432        }
433        count = 0;
434        BOOL ret = pGetQueuedCompletionStatusEx(m_hCompletionPort,
435            events,
436            64,
437            &count,
438            timeout,
439            FALSE);
440        error_t error = lastError();
441        MORDOR_LOG_DEBUG(g_log) << this << " GetQueuedCompletionStatusEx("
442            << m_hCompletionPort << ", " << timeout << "): " << ret << ", ("
443            << count << ") (" << (ret ? ERROR_SUCCESS : error) << ")";
444        if (!ret && error) {
445            if (error == WAIT_TIMEOUT) {
446                // No tickles or AsyncIO calls have happened so check for timers
447                // that need execution
448                std::vector<boost::function<void ()> > expired = processTimers();
449                if (!expired.empty()) {
450                    schedule(expired.begin(), expired.end());
451                    expired.clear();
452                    try {
453                        // Let the timers execute
454                        Fiber::yield();
455                    } catch (OperationAbortedException &) {
456                        return;
457                    }
458                }
459                continue;
460            }
461            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("GetQueuedCompletionStatusEx");
462        }
463
464        // Schedule any timers that are ready to execute
465        std::vector<boost::function<void ()> > expired = processTimers();
466        if (!expired.empty()) {
467            schedule(expired.begin(), expired.end());
468            expired.clear();
469        }
470
471#ifndef NDEBUG
472        boost::mutex::scoped_lock lock(m_mutex, boost::defer_lock_t());
473#endif
474        int tickles = 0;
475        for (ULONG i = 0; i < count; ++i) {
476            if (events[i].lpCompletionKey == ~0) {
477                MORDOR_LOG_VERBOSE(g_log) << this << " received tickle";
478                ++tickles;
479                continue;
480            }
481
482            // An Async IO call has completed, so wake up the fiber
483            // that called registerEvent()
484            AsyncEvent *e = (AsyncEvent *)events[i].lpOverlapped;
485#ifndef NDEBUG
486            if (!lock.owns_lock())
487                lock.lock();
488
489            // Verify that the API has been used properly,
490            // e.g. that registerEvent has been called
491            std::map<OVERLAPPED *, AsyncEvent *>::iterator it =
492                m_pendingEvents.find(events[i].lpOverlapped);
493            MORDOR_ASSERT(it != m_pendingEvents.end());
494            MORDOR_ASSERT(e == it->second);
495            m_pendingEvents.erase(it);
496#endif
497            MORDOR_ASSERT(e->m_scheduler);
498            MORDOR_ASSERT(e->m_fiber);
499
500            MORDOR_LOG_TRACE(g_log) << this << " OVERLAPPED_ENTRY {"
501                << events[i].lpCompletionKey << ", " << events[i].lpOverlapped
502                << ", " << events[i].Internal << ", "
503                << events[i].dwNumberOfBytesTransferred << "}";
504
505            Scheduler *scheduler = e->m_scheduler;
506            Fiber::ptr fiber;
507            fiber.swap(e->m_fiber);
508
509            // Clean up the AsyncEvent structure which can
510            // be reused for the next Async IO call
511            e->m_thread = emptytid();
512            e->m_scheduler = NULL;
513            scheduler->schedule(fiber);
514        }
515        if (count != tickles) {
516            // Subtract the number of completed Async IO calls
517            atomicAdd(m_pendingEventCount, (size_t)(-(ptrdiff_t)(count - tickles)));
518        }
519#ifndef NDEBUG
520        if (lock.owns_lock()) {
521            MORDOR_ASSERT(m_pendingEventCount == m_pendingEvents.size());
522            lock.unlock();
523        }
524#endif
525        // Because we recieved either a tickle or a completed Async IO call
526        // we know that there must be some work lined up for the scheduler
527        try {
528            Fiber::yield();
529        } catch (OperationAbortedException &) {
530            return;
531        }
532    }
533}
534
535void IOManager::setIOCPErrorTolerance(size_t count, size_t seconds)
536{
537    boost::mutex::scoped_lock lock(m_errorMutex);
538    m_iocpAllowedErrorCount = count;
539    m_iocpErrorCountWindowInSeconds = seconds;
540}
541
542void
543IOManager::tickle()
544{
545    // Send a special message with distinct key ~0.  This message does not correspond to
546    // any real completed Async IO call, rather it is used to force the idle() method
547    // out of a GetQueuedCompletionStatusEx status
548    BOOL bRet = PostQueuedCompletionStatus(m_hCompletionPort, 0, ~0, NULL);
549    MORDOR_LOG_LEVEL(g_log, bRet ? Log::DEBUG : Log::ERROR) << this
550        << " PostQueuedCompletionStatus(" << m_hCompletionPort
551        << ", 0, ~0, NULL): " << bRet << " (" << (bRet ? ERROR_SUCCESS : lastError()) << ")";
552
553    if (!bRet) {
554        boost::mutex::scoped_lock lock(m_errorMutex);
555
556        if (m_iocpAllowedErrorCount != 0) {
557            unsigned long long currentTime = Mordor::TimerManager::now() / 1000ULL;
558            unsigned long long secondsElapsed = (currentTime - m_firstErrorTime) / 1000;
559            if (secondsElapsed > m_iocpErrorCountWindowInSeconds) {
560                // It's been a while since we started encountering errors
561                m_firstErrorTime = currentTime;
562                m_errorCount = 0;
563            }
564
565            if (++m_errorCount <= m_iocpAllowedErrorCount) {
566                // #112528 - Swallow these errors untill we exceed the error tolerance
567                MORDOR_LOG_INFO(g_logWaitBlock) << this << "  Ignoring PostQueuedCompletionStatus failure. Error tolerance = "
568                    << m_iocpAllowedErrorCount << " Error count = " << m_errorCount;
569                return;
570            }
571        }
572
573        MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("PostQueuedCompletionStatus");
574    }
575}
576
577}