PageRenderTime 74ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 0ms

/pypy/translator/c/src/thread_pthread.c

https://bitbucket.org/SeanTater/pypy-bugfix-st
C | 498 lines | 375 code | 71 blank | 52 comment | 60 complexity | 1aa5a7438b1333f2adc148c4a9321df7 MD5 | raw file
  1. /* Posix threads interface (from CPython) */
  2. #include <unistd.h> /* for the _POSIX_xxx and _POSIX_THREAD_xxx defines */
  3. #include <stdlib.h>
  4. #include <pthread.h>
  5. #include <signal.h>
  6. #include <stdio.h>
  7. #include <errno.h>
  8. #include <assert.h>
  9. /* The following is hopefully equivalent to what CPython does
  10. (which is trying to compile a snippet of code using it) */
  11. #ifdef PTHREAD_SCOPE_SYSTEM
  12. # ifndef PTHREAD_SYSTEM_SCHED_SUPPORTED
  13. # define PTHREAD_SYSTEM_SCHED_SUPPORTED
  14. # endif
  15. #endif
  16. #if !defined(pthread_attr_default)
  17. # define pthread_attr_default ((pthread_attr_t *)NULL)
  18. #endif
  19. #if !defined(pthread_mutexattr_default)
  20. # define pthread_mutexattr_default ((pthread_mutexattr_t *)NULL)
  21. #endif
  22. #if !defined(pthread_condattr_default)
  23. # define pthread_condattr_default ((pthread_condattr_t *)NULL)
  24. #endif
  25. #define CHECK_STATUS(name) if (status != 0) { perror(name); error = 1; }
  26. /* The POSIX spec requires that use of pthread_attr_setstacksize
  27. be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
  28. #ifdef _POSIX_THREAD_ATTR_STACKSIZE
  29. # ifndef THREAD_STACK_SIZE
  30. # define THREAD_STACK_SIZE 0 /* use default stack size */
  31. # endif
  32. /* for safety, ensure a viable minimum stacksize */
  33. # define THREAD_STACK_MIN 0x8000 /* 32kB */
  34. #else /* !_POSIX_THREAD_ATTR_STACKSIZE */
  35. # ifdef THREAD_STACK_SIZE
  36. # error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
  37. # endif
  38. #endif
  39. /* XXX This implementation is considered (to quote Tim Peters) "inherently
  40. hosed" because:
  41. - It does not guarantee the promise that a non-zero integer is returned.
  42. - The cast to long is inherently unsafe.
  43. - It is not clear that the 'volatile' (for AIX?) and ugly casting in the
  44. latter return statement (for Alpha OSF/1) are any longer necessary.
  45. */
  46. long RPyThreadGetIdent(void)
  47. {
  48. volatile pthread_t threadid;
  49. /* Jump through some hoops for Alpha OSF/1 */
  50. threadid = pthread_self();
  51. #ifdef __CYGWIN__
  52. /* typedef __uint32_t pthread_t; */
  53. return (long) threadid;
  54. #else
  55. if (sizeof(pthread_t) <= sizeof(long))
  56. return (long) threadid;
  57. else
  58. return (long) *(long *) &threadid;
  59. #endif
  60. }
  61. static long _pypythread_stacksize = 0;
  62. static void *bootstrap_pthread(void *func)
  63. {
  64. ((void(*)(void))func)();
  65. return NULL;
  66. }
  67. long RPyThreadStart(void (*func)(void))
  68. {
  69. pthread_t th;
  70. int status;
  71. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  72. pthread_attr_t attrs;
  73. #endif
  74. #if defined(THREAD_STACK_SIZE)
  75. size_t tss;
  76. #endif
  77. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  78. pthread_attr_init(&attrs);
  79. #endif
  80. #ifdef THREAD_STACK_SIZE
  81. tss = (_pypythread_stacksize != 0) ? _pypythread_stacksize
  82. : THREAD_STACK_SIZE;
  83. if (tss != 0)
  84. pthread_attr_setstacksize(&attrs, tss);
  85. #endif
  86. #if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__)
  87. pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
  88. #endif
  89. status = pthread_create(&th,
  90. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  91. &attrs,
  92. #else
  93. (pthread_attr_t*)NULL,
  94. #endif
  95. bootstrap_pthread,
  96. (void *)func
  97. );
  98. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  99. pthread_attr_destroy(&attrs);
  100. #endif
  101. if (status != 0)
  102. return -1;
  103. pthread_detach(th);
  104. #ifdef __CYGWIN__
  105. /* typedef __uint32_t pthread_t; */
  106. return (long) th;
  107. #else
  108. if (sizeof(pthread_t) <= sizeof(long))
  109. return (long) th;
  110. else
  111. return (long) *(long *) &th;
  112. #endif
  113. }
  114. long RPyThreadGetStackSize(void)
  115. {
  116. return _pypythread_stacksize;
  117. }
  118. long RPyThreadSetStackSize(long newsize)
  119. {
  120. #if defined(THREAD_STACK_SIZE)
  121. pthread_attr_t attrs;
  122. size_t tss_min;
  123. int rc;
  124. #endif
  125. if (newsize == 0) { /* set to default */
  126. _pypythread_stacksize = 0;
  127. return 0;
  128. }
  129. #if defined(THREAD_STACK_SIZE)
  130. # if defined(PTHREAD_STACK_MIN)
  131. tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
  132. : THREAD_STACK_MIN;
  133. # else
  134. tss_min = THREAD_STACK_MIN;
  135. # endif
  136. if (newsize >= tss_min) {
  137. /* validate stack size by setting thread attribute */
  138. if (pthread_attr_init(&attrs) == 0) {
  139. rc = pthread_attr_setstacksize(&attrs, newsize);
  140. pthread_attr_destroy(&attrs);
  141. if (rc == 0) {
  142. _pypythread_stacksize = newsize;
  143. return 0;
  144. }
  145. }
  146. }
  147. return -1;
  148. #else
  149. return -2;
  150. #endif
  151. }
  152. /************************************************************/
  153. #ifdef USE_SEMAPHORES
  154. /************************************************************/
  155. #include <semaphore.h>
  156. void RPyThreadAfterFork(void)
  157. {
  158. }
  159. int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
  160. {
  161. int status, error = 0;
  162. lock->initialized = 0;
  163. status = sem_init(&lock->sem, 0, 1);
  164. CHECK_STATUS("sem_init");
  165. if (error)
  166. return 0;
  167. lock->initialized = 1;
  168. return 1;
  169. }
  170. void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock)
  171. {
  172. int status, error = 0;
  173. if (lock->initialized) {
  174. status = sem_destroy(&lock->sem);
  175. CHECK_STATUS("sem_destroy");
  176. /* 'error' is ignored;
  177. CHECK_STATUS already printed an error message */
  178. }
  179. }
  180. /*
  181. * As of February 2002, Cygwin thread implementations mistakenly report error
  182. * codes in the return value of the sem_ calls (like the pthread_ functions).
  183. * Correct implementations return -1 and put the code in errno. This supports
  184. * either.
  185. */
  186. static int
  187. rpythread_fix_status(int status)
  188. {
  189. return (status == -1) ? errno : status;
  190. }
  191. int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag)
  192. {
  193. int success;
  194. sem_t *thelock = &lock->sem;
  195. int status, error = 0;
  196. do {
  197. if (waitflag)
  198. status = rpythread_fix_status(sem_wait(thelock));
  199. else
  200. status = rpythread_fix_status(sem_trywait(thelock));
  201. } while (status == EINTR); /* Retry if interrupted by a signal */
  202. if (waitflag) {
  203. CHECK_STATUS("sem_wait");
  204. } else if (status != EAGAIN) {
  205. CHECK_STATUS("sem_trywait");
  206. }
  207. success = (status == 0) ? 1 : 0;
  208. return success;
  209. }
  210. void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock)
  211. {
  212. sem_t *thelock = &lock->sem;
  213. int status, error = 0;
  214. status = sem_post(thelock);
  215. CHECK_STATUS("sem_post");
  216. }
  217. /************************************************************/
  218. #else /* no semaphores */
  219. /************************************************************/
  220. struct RPyOpaque_ThreadLock *alllocks; /* doubly-linked list */
  221. void RPyThreadAfterFork(void)
  222. {
  223. /* Mess. We have no clue about how it works on CPython on OSX,
  224. but the issue is that the state of mutexes is not really
  225. preserved across a fork(). So we need to walk over all lock
  226. objects here, and rebuild their mutex and condition variable.
  227. See e.g. http://hackage.haskell.org/trac/ghc/ticket/1391 for
  228. a similar bug about GHC.
  229. */
  230. struct RPyOpaque_ThreadLock *p = alllocks;
  231. alllocks = NULL;
  232. while (p) {
  233. struct RPyOpaque_ThreadLock *next = p->next;
  234. int was_locked = p->locked;
  235. RPyThreadLockInit(p);
  236. p->locked = was_locked;
  237. p = next;
  238. }
  239. }
  240. int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
  241. {
  242. int status, error = 0;
  243. lock->initialized = 0;
  244. lock->locked = 0;
  245. status = pthread_mutex_init(&lock->mut,
  246. pthread_mutexattr_default);
  247. CHECK_STATUS("pthread_mutex_init");
  248. status = pthread_cond_init(&lock->lock_released,
  249. pthread_condattr_default);
  250. CHECK_STATUS("pthread_cond_init");
  251. if (error)
  252. return 0;
  253. lock->initialized = 1;
  254. /* add 'lock' in the doubly-linked list */
  255. if (alllocks)
  256. alllocks->prev = lock;
  257. lock->next = alllocks;
  258. lock->prev = NULL;
  259. alllocks = lock;
  260. return 1;
  261. }
  262. void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock)
  263. {
  264. int status, error = 0;
  265. if (lock->initialized) {
  266. /* remove 'lock' from the doubly-linked list */
  267. if (lock->prev)
  268. lock->prev->next = lock->next;
  269. else {
  270. assert(alllocks == lock);
  271. alllocks = lock->next;
  272. }
  273. if (lock->next)
  274. lock->next->prev = lock->prev;
  275. status = pthread_mutex_destroy(&lock->mut);
  276. CHECK_STATUS("pthread_mutex_destroy");
  277. status = pthread_cond_destroy(&lock->lock_released);
  278. CHECK_STATUS("pthread_cond_destroy");
  279. /* 'error' is ignored;
  280. CHECK_STATUS already printed an error message */
  281. }
  282. }
  283. int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag)
  284. {
  285. int success;
  286. int status, error = 0;
  287. status = pthread_mutex_lock( &lock->mut );
  288. CHECK_STATUS("pthread_mutex_lock[1]");
  289. success = lock->locked == 0;
  290. if ( !success && waitflag ) {
  291. /* continue trying until we get the lock */
  292. /* mut must be locked by me -- part of the condition
  293. * protocol */
  294. while ( lock->locked ) {
  295. status = pthread_cond_wait(&lock->lock_released,
  296. &lock->mut);
  297. CHECK_STATUS("pthread_cond_wait");
  298. }
  299. success = 1;
  300. }
  301. if (success) lock->locked = 1;
  302. status = pthread_mutex_unlock( &lock->mut );
  303. CHECK_STATUS("pthread_mutex_unlock[1]");
  304. if (error) success = 0;
  305. return success;
  306. }
  307. void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock)
  308. {
  309. int status, error = 0;
  310. status = pthread_mutex_lock( &lock->mut );
  311. CHECK_STATUS("pthread_mutex_lock[3]");
  312. lock->locked = 0;
  313. status = pthread_mutex_unlock( &lock->mut );
  314. CHECK_STATUS("pthread_mutex_unlock[3]");
  315. /* wake up someone (anyone, if any) waiting on the lock */
  316. status = pthread_cond_signal( &lock->lock_released );
  317. CHECK_STATUS("pthread_cond_signal");
  318. }
  319. /************************************************************/
  320. #endif /* no semaphores */
  321. /************************************************************/
  322. /************************************************************/
  323. /* GIL code */
  324. /************************************************************/
  325. #ifdef __llvm__
  326. # define HAS_ATOMIC_ADD
  327. #endif
  328. #ifdef __GNUC__
  329. # if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
  330. # define HAS_ATOMIC_ADD
  331. # endif
  332. #endif
  333. #ifdef HAS_ATOMIC_ADD
  334. # define atomic_add __sync_fetch_and_add
  335. #else
  336. # if defined(__amd64__)
  337. # define atomic_add(ptr, value) asm volatile ("lock addq %0, %1" \
  338. : : "ri"(value), "m"(*(ptr)) : "memory")
  339. # elif defined(__i386__)
  340. # define atomic_add(ptr, value) asm volatile ("lock addl %0, %1" \
  341. : : "ri"(value), "m"(*(ptr)) : "memory")
  342. # else
  343. # error "Please use gcc >= 4.1 or write a custom 'asm' for your CPU."
  344. # endif
  345. #endif
  346. #define ASSERT_STATUS(call) \
  347. if (call != 0) { \
  348. fprintf(stderr, "Fatal error: " #call "\n"); \
  349. abort(); \
  350. }
  351. static void _debug_print(const char *msg)
  352. {
  353. #if 0
  354. int col = (int)pthread_self();
  355. col = 31 + ((col / 8) % 8);
  356. fprintf(stderr, "\033[%dm%s\033[0m", col, msg);
  357. #endif
  358. }
  359. static volatile long pending_acquires = -1;
  360. static pthread_mutex_t mutex_gil;
  361. static pthread_cond_t cond_gil;
  362. static void assert_has_the_gil(void)
  363. {
  364. #ifdef RPY_ASSERT
  365. assert(pthread_mutex_trylock(&mutex_gil) != 0);
  366. assert(pending_acquires >= 0);
  367. #endif
  368. }
  369. long RPyGilAllocate(void)
  370. {
  371. int status, error = 0;
  372. _debug_print("RPyGilAllocate\n");
  373. pending_acquires = -1;
  374. status = pthread_mutex_init(&mutex_gil,
  375. pthread_mutexattr_default);
  376. CHECK_STATUS("pthread_mutex_init[GIL]");
  377. status = pthread_cond_init(&cond_gil,
  378. pthread_condattr_default);
  379. CHECK_STATUS("pthread_cond_init[GIL]");
  380. if (error == 0) {
  381. pending_acquires = 0;
  382. RPyGilAcquire();
  383. }
  384. return (error == 0);
  385. }
  386. long RPyGilYieldThread(void)
  387. {
  388. /* can be called even before RPyGilAllocate(), but in this case,
  389. pending_acquires will be -1 */
  390. #ifdef RPY_ASSERT
  391. if (pending_acquires >= 0)
  392. assert_has_the_gil();
  393. #endif
  394. if (pending_acquires <= 0)
  395. return 0;
  396. atomic_add(&pending_acquires, 1L);
  397. _debug_print("{");
  398. ASSERT_STATUS(pthread_cond_signal(&cond_gil));
  399. ASSERT_STATUS(pthread_cond_wait(&cond_gil, &mutex_gil));
  400. _debug_print("}");
  401. atomic_add(&pending_acquires, -1L);
  402. assert_has_the_gil();
  403. return 1;
  404. }
  405. void RPyGilRelease(void)
  406. {
  407. _debug_print("RPyGilRelease\n");
  408. #ifdef RPY_ASSERT
  409. assert(pending_acquires >= 0);
  410. #endif
  411. assert_has_the_gil();
  412. ASSERT_STATUS(pthread_mutex_unlock(&mutex_gil));
  413. ASSERT_STATUS(pthread_cond_signal(&cond_gil));
  414. }
  415. void RPyGilAcquire(void)
  416. {
  417. _debug_print("about to RPyGilAcquire...\n");
  418. #ifdef RPY_ASSERT
  419. assert(pending_acquires >= 0);
  420. #endif
  421. atomic_add(&pending_acquires, 1L);
  422. ASSERT_STATUS(pthread_mutex_lock(&mutex_gil));
  423. atomic_add(&pending_acquires, -1L);
  424. assert_has_the_gil();
  425. _debug_print("RPyGilAcquire\n");
  426. }