PageRenderTime 46ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/mordor/fiber.cpp

http://github.com/mozy/mordor
C++ | 683 lines | 598 code | 48 blank | 37 comment | 116 complexity | 2717039ffb69ada8f9f39089292f3d81 MD5 | raw file
Possible License(s): BSD-3-Clause
  1. // Copyright (c) 2009 - Mozy, Inc.
  2. #include "fiber.h"
  3. #include <boost/thread/tss.hpp>
  4. #include "assert.h"
  5. #include "mordor/config.h"
  6. #include "exception.h"
  7. #include "statistics.h"
  8. #include "version.h"
  9. #ifdef WINDOWS
  10. #include <windows.h>
  11. #include "runtime_linking.h"
  12. #else
  13. #include <sys/mman.h>
  14. #include <pthread.h>
  15. #endif
  16. namespace Mordor {
  17. static AverageMinMaxStatistic<unsigned int> &g_statAlloc =
  18. Statistics::registerStatistic("fiber.allocstack",
  19. AverageMinMaxStatistic<unsigned int>("us"));
  20. static AverageMinMaxStatistic<unsigned int> &g_statFree=
  21. Statistics::registerStatistic("fiber.freestack",
  22. AverageMinMaxStatistic<unsigned int>("us"));
  23. static volatile unsigned int g_cntFibers = 0; // Active fibers
  24. static MaxStatistic<unsigned int> &g_statMaxFibers=Statistics::registerStatistic("fiber.max",
  25. MaxStatistic<unsigned int>());
  26. #ifdef SETJMP_FIBERS
  27. #ifdef OSX
  28. #define setjmp _setjmp
  29. #define longjmp _longjmp
  30. #endif
  31. #endif
  32. static size_t g_pagesize;
  33. namespace {
  34. static struct FiberInitializer {
  35. FiberInitializer()
  36. {
  37. #ifdef WINDOWS
  38. SYSTEM_INFO info;
  39. GetSystemInfo(&info);
  40. g_pagesize = info.dwPageSize;
  41. #elif defined(POSIX)
  42. g_pagesize = sysconf(_SC_PAGESIZE);
  43. #endif
  44. }
  45. } g_init;
  46. }
  47. static ConfigVar<size_t>::ptr g_defaultStackSize = Config::lookup<size_t>(
  48. "fiber.defaultstacksize",
  49. #ifdef NATIVE_WINDOWS_FIBERS
  50. 0u,
  51. #else
  52. 1024 * 1024u,
  53. #endif
  54. "Default stack size for new fibers. This is the virtual size; physical "
  55. "memory isn't consumed until it is actually referenced.");
  56. // t_fiber is the Fiber currently executing on this thread
  57. // t_threadFiber is the Fiber that represents the thread's original stack
  58. // t_threadFiber is a boost::tss, because it supports automatic cleanup when
  59. // the thread exits (and datatypes larger than pointer size), while
  60. // ThreadLocalStorage does not
  61. // t_fiber is a ThreadLocalStorage, because it's faster than boost::tss
  62. ThreadLocalStorage<Fiber *> Fiber::t_fiber;
  63. static boost::thread_specific_ptr<Fiber::ptr> t_threadFiber;
  64. static boost::mutex & g_flsMutex()
  65. {
  66. static boost::mutex mutex;
  67. return mutex;
  68. }
  69. static std::vector<bool> & g_flsIndices()
  70. {
  71. static std::vector<bool> indices;
  72. return indices;
  73. }
  74. Fiber::Fiber()
  75. {
  76. g_statMaxFibers.update(atomicIncrement(g_cntFibers));
  77. MORDOR_ASSERT(!t_fiber);
  78. m_state = EXEC;
  79. m_stack = NULL;
  80. m_stacksize = 0;
  81. m_sp = NULL;
  82. setThis(this);
  83. #ifdef NATIVE_WINDOWS_FIBERS
  84. if (!pIsThreadAFiber())
  85. m_stack = ConvertThreadToFiber(NULL);
  86. m_sp = GetCurrentFiber();
  87. #elif defined(UCONTEXT_FIBERS)
  88. m_sp = &m_ctx;
  89. #elif defined(SETJMP_FIBERS)
  90. m_sp = &m_env;
  91. #endif
  92. }
  93. Fiber::Fiber(boost::function<void ()> dg, size_t stacksize)
  94. {
  95. g_statMaxFibers.update(atomicIncrement(g_cntFibers));
  96. stacksize += g_pagesize - 1;
  97. stacksize -= stacksize % g_pagesize;
  98. m_dg = dg;
  99. m_state = INIT;
  100. m_stack = NULL;
  101. m_stacksize = stacksize;
  102. allocStack();
  103. #ifdef UCONTEXT_FIBERS
  104. m_sp = &m_ctx;
  105. #elif defined(SETJMP_FIBERS)
  106. m_sp = &m_env;
  107. #endif
  108. initStack();
  109. }
  110. Fiber::~Fiber()
  111. {
  112. atomicDecrement(g_cntFibers);
  113. if (!m_stack || m_stack == m_sp) {
  114. // Thread entry fiber
  115. MORDOR_NOTHROW_ASSERT(!m_dg);
  116. MORDOR_NOTHROW_ASSERT(m_state == EXEC);
  117. Fiber *cur = t_fiber.get();
  118. // We're actually running on the fiber we're about to delete
  119. // i.e. the thread is dying, so clean up after ourselves
  120. if (cur == this) {
  121. setThis(NULL);
  122. #ifdef NATIVE_WINDOWS_FIBERS
  123. if (m_stack) {
  124. MORDOR_NOTHROW_ASSERT(m_stack == m_sp);
  125. MORDOR_NOTHROW_ASSERT(m_stack == GetCurrentFiber());
  126. pConvertFiberToThread();
  127. }
  128. #endif
  129. }
  130. // Otherwise, there's not a thread left to clean up
  131. } else {
  132. // Regular fiber
  133. MORDOR_NOTHROW_ASSERT(m_state == TERM || m_state == INIT || m_state == EXCEPT);
  134. freeStack();
  135. }
  136. }
  137. void
  138. Fiber::reset(boost::function<void ()> dg)
  139. {
  140. m_exception = boost::exception_ptr();
  141. MORDOR_ASSERT(m_stack);
  142. MORDOR_ASSERT(m_state == TERM || m_state == INIT || m_state == EXCEPT);
  143. m_dg = dg;
  144. initStack();
  145. m_state = INIT;
  146. }
  147. Fiber::ptr
  148. Fiber::getThis()
  149. {
  150. if (t_fiber)
  151. return t_fiber->shared_from_this();
  152. Fiber::ptr threadFiber(new Fiber());
  153. MORDOR_ASSERT(t_fiber.get() == threadFiber.get());
  154. t_threadFiber.reset(new Fiber::ptr(threadFiber));
  155. return t_fiber->shared_from_this();
  156. }
  157. void
  158. Fiber::setThis(Fiber* f)
  159. {
  160. t_fiber = f;
  161. }
  162. void
  163. Fiber::call()
  164. {
  165. MORDOR_ASSERT(!m_outer);
  166. ptr cur = getThis();
  167. MORDOR_ASSERT(m_state == HOLD || m_state == INIT);
  168. MORDOR_ASSERT(cur);
  169. MORDOR_ASSERT(cur.get() != this);
  170. setThis(this);
  171. m_outer = cur;
  172. m_state = m_exception ? EXCEPT : EXEC;
  173. cur->switchContext(this);
  174. setThis(cur.get());
  175. MORDOR_ASSERT(cur->m_yielder);
  176. m_outer.reset();
  177. if (cur->m_yielder) {
  178. MORDOR_ASSERT(cur->m_yielder.get() == this);
  179. Fiber::ptr yielder = cur->m_yielder;
  180. yielder->m_state = cur->m_yielderNextState;
  181. cur->m_yielder.reset();
  182. if (yielder->m_state == EXCEPT && yielder->m_exception)
  183. Mordor::rethrow_exception(yielder->m_exception);
  184. }
  185. MORDOR_ASSERT(cur->m_state == EXEC);
  186. }
  187. void
  188. Fiber::inject(boost::exception_ptr exception)
  189. {
  190. MORDOR_ASSERT(exception);
  191. m_exception = exception;
  192. call();
  193. }
  194. Fiber::ptr
  195. Fiber::yieldTo(bool yieldToCallerOnTerminate)
  196. {
  197. return yieldTo(yieldToCallerOnTerminate, HOLD);
  198. }
  199. void
  200. Fiber::yield()
  201. {
  202. ptr cur = getThis();
  203. MORDOR_ASSERT(cur);
  204. MORDOR_ASSERT(cur->m_state == EXEC);
  205. MORDOR_ASSERT(cur->m_outer);
  206. cur->m_outer->m_yielder = cur;
  207. cur->m_outer->m_yielderNextState = Fiber::HOLD;
  208. cur->switchContext(cur->m_outer.get());
  209. if (cur->m_yielder) {
  210. cur->m_yielder->m_state = cur->m_yielderNextState;
  211. cur->m_yielder.reset();
  212. }
  213. if (cur->m_state == EXCEPT) {
  214. MORDOR_ASSERT(cur->m_exception);
  215. Mordor::rethrow_exception(cur->m_exception);
  216. }
  217. MORDOR_ASSERT(cur->m_state == EXEC);
  218. }
  219. Fiber::State
  220. Fiber::state()
  221. {
  222. return m_state;
  223. }
  224. Fiber::ptr
  225. Fiber::yieldTo(bool yieldToCallerOnTerminate, State targetState)
  226. {
  227. MORDOR_ASSERT(m_state == HOLD || m_state == INIT);
  228. MORDOR_ASSERT(targetState == HOLD || targetState == TERM || targetState == EXCEPT);
  229. ptr cur = getThis();
  230. MORDOR_ASSERT(cur);
  231. setThis(this);
  232. if (yieldToCallerOnTerminate) {
  233. Fiber::ptr outer = shared_from_this();
  234. Fiber::ptr previous;
  235. while (outer) {
  236. previous = outer;
  237. outer = outer->m_outer;
  238. }
  239. previous->m_terminateOuter = cur;
  240. }
  241. m_state = EXEC;
  242. m_yielder = cur;
  243. m_yielderNextState = targetState;
  244. Fiber *curp = cur.get();
  245. // Relinguish our reference
  246. cur.reset();
  247. curp->switchContext(this);
  248. #ifdef NATIVE_WINDOWS_FIBERS
  249. if (targetState == TERM)
  250. return Fiber::ptr();
  251. #endif
  252. MORDOR_ASSERT(targetState != TERM);
  253. setThis(curp);
  254. if (curp->m_yielder) {
  255. Fiber::ptr yielder = curp->m_yielder;
  256. yielder->m_state = curp->m_yielderNextState;
  257. curp->m_yielder.reset();
  258. if (yielder->m_exception)
  259. Mordor::rethrow_exception(yielder->m_exception);
  260. return yielder;
  261. }
  262. if (curp->m_state == EXCEPT) {
  263. MORDOR_ASSERT(curp->m_exception);
  264. Mordor::rethrow_exception(curp->m_exception);
  265. }
  266. MORDOR_ASSERT(curp->m_state == EXEC);
  267. return Fiber::ptr();
  268. }
  269. void
  270. Fiber::entryPoint()
  271. {
  272. // This function never returns, so take care that smart pointers (or other resources)
  273. // are properly released.
  274. ptr cur = getThis();
  275. MORDOR_ASSERT(cur);
  276. if (cur->m_yielder) {
  277. cur->m_yielder->m_state = cur->m_yielderNextState;
  278. cur->m_yielder.reset();
  279. }
  280. MORDOR_ASSERT(cur->m_dg);
  281. State nextState = TERM;
  282. try {
  283. if (cur->m_state == EXCEPT) {
  284. MORDOR_ASSERT(cur->m_exception);
  285. Mordor::rethrow_exception(cur->m_exception);
  286. }
  287. MORDOR_ASSERT(cur->m_state == EXEC);
  288. cur->m_dg();
  289. cur->m_dg = NULL;
  290. } catch (boost::exception &ex) {
  291. removeTopFrames(ex);
  292. cur->m_exception = boost::current_exception();
  293. nextState = EXCEPT;
  294. } catch (...) {
  295. cur->m_exception = boost::current_exception();
  296. nextState = EXCEPT;
  297. }
  298. exitPoint(cur, nextState);
  299. #ifndef NATIVE_WINDOWS_FIBERS
  300. MORDOR_NOTREACHED();
  301. #endif
  302. }
  303. void
  304. Fiber::exitPoint(Fiber::ptr &cur, State targetState)
  305. {
  306. // This function never returns, so take care that smart pointers (or other resources)
  307. // are properly released.
  308. Fiber::ptr outer;
  309. Fiber *rawPtr = NULL;
  310. if (!cur->m_terminateOuter.expired() && !cur->m_outer) {
  311. outer = cur->m_terminateOuter.lock();
  312. rawPtr = outer.get();
  313. } else {
  314. outer = cur->m_outer;
  315. rawPtr = cur.get();
  316. }
  317. MORDOR_ASSERT(outer);
  318. MORDOR_ASSERT(rawPtr);
  319. MORDOR_ASSERT(outer != cur);
  320. // Have to set this reference before calling yieldTo()
  321. // so we can reset cur before we call yieldTo()
  322. // (since it's not ever going to destruct)
  323. outer->m_yielder = cur;
  324. outer->m_yielderNextState = targetState;
  325. MORDOR_ASSERT(!cur.unique());
  326. cur.reset();
  327. if (rawPtr == outer.get()) {
  328. rawPtr = outer.get();
  329. MORDOR_ASSERT(!outer.unique());
  330. outer.reset();
  331. rawPtr->yieldTo(false, targetState);
  332. } else {
  333. outer.reset();
  334. rawPtr->switchContext(rawPtr->m_outer.get());
  335. }
  336. }
  337. #ifdef NATIVE_WINDOWS_FIBERS
  338. static VOID CALLBACK native_fiber_entryPoint(PVOID lpParameter)
  339. {
  340. void (*entryPoint)() = (void (*)())lpParameter;
  341. while (true) {
  342. entryPoint();
  343. }
  344. }
  345. #endif
  346. void
  347. Fiber::allocStack()
  348. {
  349. if (m_stacksize == 0)
  350. m_stacksize = g_defaultStackSize->val();
  351. #ifndef NATIVE_WINDOWS_FIBERS
  352. TimeStatistic<AverageMinMaxStatistic<unsigned int> > time(g_statAlloc);
  353. #endif
  354. #ifdef NATIVE_WINDOWS_FIBERS
  355. // Fibers are allocated in initStack
  356. #elif defined(WINDOWS)
  357. m_stack = VirtualAlloc(NULL, m_stacksize + g_pagesize, MEM_RESERVE, PAGE_NOACCESS);
  358. if (!m_stack)
  359. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("VirtualAlloc");
  360. VirtualAlloc(m_stack, g_pagesize, MEM_COMMIT, PAGE_READWRITE | PAGE_GUARD);
  361. // TODO: don't commit until referenced
  362. VirtualAlloc((char*)m_stack + g_pagesize, m_stacksize, MEM_COMMIT, PAGE_READWRITE);
  363. m_sp = (char*)m_stack + m_stacksize + g_pagesize;
  364. #elif defined(POSIX)
  365. m_stack = mmap(NULL, m_stacksize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
  366. if (m_stack == MAP_FAILED)
  367. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("mmap");
  368. #if defined(VALGRIND) && (defined(LINUX) || defined(OSX))
  369. m_valgrindStackId = VALGRIND_STACK_REGISTER(m_stack, (char *)m_stack + m_stacksize);
  370. #endif
  371. m_sp = (char*)m_stack + m_stacksize;
  372. #endif
  373. }
  374. void
  375. Fiber::freeStack()
  376. {
  377. TimeStatistic<AverageMinMaxStatistic<unsigned int> > time(g_statFree);
  378. #ifdef NATIVE_WINDOWS_FIBERS
  379. MORDOR_ASSERT(m_stack == &m_sp);
  380. DeleteFiber(m_sp);
  381. #elif defined(WINDOWS)
  382. VirtualFree(m_stack, 0, MEM_RELEASE);
  383. #elif defined(POSIX)
  384. #if defined(VALGRIND) && (defined(LINUX) || defined(OSX))
  385. VALGRIND_STACK_DEREGISTER(m_valgrindStackId);
  386. #endif
  387. munmap(m_stack, m_stacksize);
  388. #endif
  389. }
  390. void
  391. Fiber::switchContext(Fiber *to)
  392. {
  393. #ifdef NATIVE_WINDOWS_FIBERS
  394. SwitchToFiber(to->m_sp);
  395. #elif defined(UCONTEXT_FIBERS)
  396. # if defined(CXXABIV1_EXCEPTION)
  397. this->m_eh.swap(to->m_eh);
  398. # endif
  399. if (swapcontext((ucontext_t*)(this->m_sp), (ucontext_t*)to->m_sp))
  400. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("swapcontext");
  401. #elif defined(SETJMP_FIBERS)
  402. if (!setjmp(*(jmp_buf*)this->m_sp)) {
  403. # if defined(CXXABIV1_EXCEPTION)
  404. this->m_eh.swap(to->m_eh);
  405. # endif
  406. longjmp(*(jmp_buf*)to->m_sp, 1);
  407. }
  408. #endif
  409. }
  410. void
  411. Fiber::initStack()
  412. {
  413. #ifdef NATIVE_WINDOWS_FIBERS
  414. if (m_stack)
  415. return;
  416. TimeStatistic<AverageMinMaxStatistic<unsigned int> > stat(g_statAlloc);
  417. m_sp = m_stack = pCreateFiberEx(0, m_stacksize, 0, &native_fiber_entryPoint, &Fiber::entryPoint);
  418. stat.finish();
  419. if (!m_stack)
  420. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("CreateFiber");
  421. // This is so we can distinguish from a created fiber vs. the "root" fiber
  422. m_stack = &m_sp;
  423. #elif defined(UCONTEXT_FIBERS)
  424. if (getcontext(&m_ctx))
  425. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("getcontext");
  426. m_ctx.uc_link = NULL;
  427. m_ctx.uc_stack.ss_sp = m_stack;
  428. m_ctx.uc_stack.ss_size = m_stacksize;
  429. #ifdef OSX
  430. m_ctx.uc_mcsize = sizeof(m_mctx);
  431. m_ctx.uc_mcontext = (mcontext_t)m_mctx;
  432. #endif
  433. makecontext(&m_ctx, &Fiber::entryPoint, 0);
  434. #elif defined(SETJMP_FIBERS)
  435. if (setjmp(m_env)) {
  436. Fiber::entryPoint();
  437. MORDOR_NOTREACHED();
  438. }
  439. #ifdef OSX
  440. #ifdef X86
  441. m_env[9] = (int)m_stack + m_stacksize; // ESP
  442. #if defined(__GNUC__) && defined(__llvm__)
  443. // see following `rbp' note for the reason of setting ebp to esp
  444. m_env[8] = m_env[9]; // EBP
  445. #else
  446. m_env[8] = 0xffffffff; // EBP
  447. #endif
  448. #elif defined(X86_64)
  449. long long *env = (long long *)m_env;
  450. env[2] = (long long)m_stack + m_stacksize; // RSP
  451. #if defined(__GNUC__) && defined(__llvm__)
  452. // NOTE: `rbp' register should be cleaned because after setjmp() returns 0,
  453. // this initStack() call finished, the call frame will be poped, so when
  454. // setjmp() returns the second time (by longjmp), the original `rbp'
  455. // register can't be used anymore as its address is invalid now. However,
  456. // with -O0 gcc+llvm compiling, there are still additional assembly
  457. // instructions that refers rbp to perform writting operation, this will
  458. // cause segmentation fault if `rbp' is cleared to 0 here. To workaround
  459. // the issue, set `rbp' to Fiber's own stack pointer, writting junk data
  460. // to Fiber's own empty stack doesn't hurt anything.
  461. // This issue only happens when compiling with gcc + llvm + `-O0'
  462. env[1] = env[2]; // RBP
  463. #else
  464. env[1] = 0x0LL; // RBP
  465. #endif
  466. #elif defined(PPC)
  467. m_env[0] = (int)m_stack;
  468. #else
  469. #error Architecture not supported
  470. #endif
  471. #elif defined (LINUX)
  472. #ifdef ARM
  473. int *env = (int *)m_env;
  474. env[8] = (int)m_stack + m_stacksize;
  475. #else
  476. #error Platform not supported
  477. #endif
  478. #else
  479. #error Platform not supported
  480. #endif
  481. #endif
  482. }
  483. #ifdef WINDOWS
  484. static bool g_doesntHaveOSFLS;
  485. #endif
  486. size_t
  487. Fiber::flsAlloc()
  488. {
  489. #ifdef WINDOWS
  490. while (!g_doesntHaveOSFLS) {
  491. size_t result = pFlsAlloc(NULL);
  492. if (result == FLS_OUT_OF_INDEXES && lastError() == ERROR_CALL_NOT_IMPLEMENTED) {
  493. g_doesntHaveOSFLS = true;
  494. break;
  495. }
  496. if (result == FLS_OUT_OF_INDEXES)
  497. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("FlsAlloc");
  498. return result;
  499. }
  500. #endif
  501. boost::mutex::scoped_lock lock(g_flsMutex());
  502. std::vector<bool>::iterator it = std::find(g_flsIndices().begin(),
  503. g_flsIndices().end(), false);
  504. // TODO: we don't clear out values when freeing, so we can't reuse
  505. // force new
  506. it = g_flsIndices().end();
  507. if (it == g_flsIndices().end()) {
  508. g_flsIndices().resize(g_flsIndices().size() + 1);
  509. g_flsIndices()[g_flsIndices().size() - 1] = true;
  510. return g_flsIndices().size() - 1;
  511. } else {
  512. size_t result = it - g_flsIndices().begin();
  513. g_flsIndices()[result] = true;
  514. return result;
  515. }
  516. }
  517. void
  518. Fiber::flsFree(size_t key)
  519. {
  520. #ifdef WINDOWS
  521. if (!g_doesntHaveOSFLS) {
  522. if (!pFlsFree((DWORD)key))
  523. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("FlsFree");
  524. return;
  525. }
  526. #endif
  527. boost::mutex::scoped_lock lock(g_flsMutex());
  528. MORDOR_ASSERT(key < g_flsIndices().size());
  529. MORDOR_ASSERT(g_flsIndices()[key]);
  530. if (key + 1 == g_flsIndices().size()) {
  531. g_flsIndices().resize(key);
  532. } else {
  533. // TODO: clear out current values
  534. g_flsIndices()[key] = false;
  535. }
  536. }
  537. void
  538. Fiber::flsSet(size_t key, intptr_t value)
  539. {
  540. #ifdef WINDOWS
  541. if (!g_doesntHaveOSFLS) {
  542. if (!pFlsSetValue((DWORD)key, (PVOID)value))
  543. MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("FlsSetValue");
  544. return;
  545. }
  546. #endif
  547. Fiber::ptr self = Fiber::getThis();
  548. if (self->m_fls.size() <= key)
  549. self->m_fls.resize(key + 1);
  550. self->m_fls[key] = value;
  551. }
  552. intptr_t
  553. Fiber::flsGet(size_t key)
  554. {
  555. #ifdef WINDOWS
  556. if (!g_doesntHaveOSFLS) {
  557. error_t error = lastError();
  558. intptr_t result = (intptr_t)pFlsGetValue((DWORD)key);
  559. lastError(error);
  560. return result;
  561. }
  562. #endif
  563. Fiber::ptr self = Fiber::getThis();
  564. if (self->m_fls.size() <= key)
  565. return 0;
  566. return self->m_fls[key];
  567. }
  568. std::vector<void *>
  569. Fiber::backtrace()
  570. {
  571. MORDOR_ASSERT(m_state != EXEC);
  572. std::vector<void *> result;
  573. if (m_state != HOLD)
  574. return result;
  575. #ifdef WINDOWS
  576. STACKFRAME64 frame;
  577. DWORD type;
  578. CONTEXT *context;
  579. #ifdef _M_IX86
  580. context = (CONTEXT *)((char *)m_sp + 0x14);
  581. type = IMAGE_FILE_MACHINE_I386;
  582. frame.AddrPC.Offset = context->Eip;
  583. frame.AddrPC.Mode = AddrModeFlat;
  584. frame.AddrFrame.Offset = context->Ebp;
  585. frame.AddrFrame.Mode = AddrModeFlat;
  586. frame.AddrStack.Offset = context->Esp;
  587. frame.AddrStack.Mode = AddrModeFlat;
  588. context = NULL;
  589. #elif _M_X64
  590. context = (CONTEXT *)((char *)m_sp + 0x30);
  591. CONTEXT dupContext;
  592. memcpy(&dupContext, context, sizeof(CONTEXT));
  593. context = &dupContext;
  594. type = IMAGE_FILE_MACHINE_AMD64;
  595. frame.AddrPC.Offset = dupContext.Rip;
  596. frame.AddrPC.Mode = AddrModeFlat;
  597. frame.AddrFrame.Offset = dupContext.Rsp;
  598. frame.AddrFrame.Mode = AddrModeFlat;
  599. frame.AddrStack.Offset = dupContext.Rsp;
  600. frame.AddrStack.Mode = AddrModeFlat;
  601. #else
  602. #error "Unsupported platform"
  603. #endif
  604. while (result.size() < 64) {
  605. if (!StackWalk64(type, GetCurrentProcess(), GetCurrentThread(),
  606. &frame, context, NULL, &SymFunctionTableAccess64,
  607. &SymGetModuleBase64, NULL)) {
  608. error_t error = lastError();
  609. break;
  610. }
  611. if (frame.AddrPC.Offset != 0) {
  612. result.push_back((void *)frame.AddrPC.Offset);
  613. }
  614. }
  615. #endif
  616. return result;
  617. }
  618. std::ostream &operator<<(std::ostream &os, Fiber::State state)
  619. {
  620. switch (state) {
  621. case Fiber::INIT:
  622. return os << "INIT";
  623. case Fiber::HOLD:
  624. return os << "HOLD";
  625. case Fiber::EXEC:
  626. return os << "EXEC";
  627. case Fiber::EXCEPT:
  628. return os << "EXCEPT";
  629. case Fiber::TERM:
  630. return os << "TERM";
  631. default:
  632. return os << (int)state;
  633. }
  634. }
  635. }