PageRenderTime 53ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 0ms

/pypy/translator/c/src/thread_pthread.h

https://bitbucket.org/dac_io/pypy
C++ Header | 572 lines | 412 code | 91 blank | 69 comment | 61 complexity | abf6753443ac3cc90c0346f8f899860f MD5 | raw file
  1. /* Posix threads interface (from CPython) */
  2. /* XXX needs to detect HAVE_BROKEN_POSIX_SEMAPHORES properly; currently
  3. it is set only if _POSIX_SEMAPHORES == -1. Seems to be only for
  4. SunOS/5.8 and AIX/5.
  5. */
  6. #include <unistd.h> /* for the _POSIX_xxx and _POSIX_THREAD_xxx defines */
  7. #include <stdlib.h>
  8. #include <pthread.h>
  9. #include <signal.h>
  10. #include <stdio.h>
  11. #include <errno.h>
  12. #include <assert.h>
  13. /* The following is hopefully equivalent to what CPython does
  14. (which is trying to compile a snippet of code using it) */
  15. #ifdef PTHREAD_SCOPE_SYSTEM
  16. # ifndef PTHREAD_SYSTEM_SCHED_SUPPORTED
  17. # define PTHREAD_SYSTEM_SCHED_SUPPORTED
  18. # endif
  19. #endif
  20. /* The POSIX spec says that implementations supporting the sem_*
  21. family of functions must indicate this by defining
  22. _POSIX_SEMAPHORES. */
  23. #ifdef _POSIX_SEMAPHORES
  24. /* On FreeBSD 4.x, _POSIX_SEMAPHORES is defined empty, so
  25. we need to add 0 to make it work there as well. */
  26. #if (_POSIX_SEMAPHORES+0) == -1
  27. #define HAVE_BROKEN_POSIX_SEMAPHORES
  28. #else
  29. #include <semaphore.h>
  30. #endif
  31. #endif
  32. #if !defined(pthread_attr_default)
  33. # define pthread_attr_default ((pthread_attr_t *)NULL)
  34. #endif
  35. #if !defined(pthread_mutexattr_default)
  36. # define pthread_mutexattr_default ((pthread_mutexattr_t *)NULL)
  37. #endif
  38. #if !defined(pthread_condattr_default)
  39. # define pthread_condattr_default ((pthread_condattr_t *)NULL)
  40. #endif
  41. /* Whether or not to use semaphores directly rather than emulating them with
  42. * mutexes and condition variables:
  43. */
  44. #if defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES)
  45. # define USE_SEMAPHORES
  46. #else
  47. # undef USE_SEMAPHORES
  48. #endif
  49. #define CHECK_STATUS(name) if (status != 0) { perror(name); error = 1; }
  50. /********************* structs ***********/
  51. #ifdef USE_SEMAPHORES
  52. #include <semaphore.h>
  53. struct RPyOpaque_ThreadLock {
  54. sem_t sem;
  55. int initialized;
  56. };
  57. #define RPyOpaque_INITEXPR_ThreadLock { { /* sem */ }, 0 }
  58. #else /* no semaphores */
  59. /* A pthread mutex isn't sufficient to model the Python lock type
  60. (see explanations in CPython's Python/thread_pthread.h */
  61. struct RPyOpaque_ThreadLock {
  62. char locked; /* 0=unlocked, 1=locked */
  63. char initialized;
  64. /* a <cond, mutex> pair to handle an acquire of a locked lock */
  65. pthread_cond_t lock_released;
  66. pthread_mutex_t mut;
  67. struct RPyOpaque_ThreadLock *prev, *next;
  68. };
  69. #define RPyOpaque_INITEXPR_ThreadLock { \
  70. 0, 0, \
  71. PTHREAD_COND_INITIALIZER, \
  72. PTHREAD_MUTEX_INITIALIZER \
  73. }
  74. #endif /* no semaphores */
  75. /* prototypes */
  76. long RPyThreadGetIdent(void);
  77. long RPyThreadStart(void (*func)(void));
  78. int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock);
  79. void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock);
  80. int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag);
  81. void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock);
  82. long RPyThreadGetStackSize(void);
  83. long RPyThreadSetStackSize(long);
  84. void RPyThreadAfterFork(void);
  85. /* implementations */
  86. #ifndef PYPY_NOT_MAIN_FILE
  87. /* The POSIX spec requires that use of pthread_attr_setstacksize
  88. be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
  89. #ifdef _POSIX_THREAD_ATTR_STACKSIZE
  90. # ifndef THREAD_STACK_SIZE
  91. # define THREAD_STACK_SIZE 0 /* use default stack size */
  92. # endif
  93. /* for safety, ensure a viable minimum stacksize */
  94. # define THREAD_STACK_MIN 0x8000 /* 32kB */
  95. #else /* !_POSIX_THREAD_ATTR_STACKSIZE */
  96. # ifdef THREAD_STACK_SIZE
  97. # error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
  98. # endif
  99. #endif
  100. /* XXX This implementation is considered (to quote Tim Peters) "inherently
  101. hosed" because:
  102. - It does not guarantee the promise that a non-zero integer is returned.
  103. - The cast to long is inherently unsafe.
  104. - It is not clear that the 'volatile' (for AIX?) and ugly casting in the
  105. latter return statement (for Alpha OSF/1) are any longer necessary.
  106. */
  107. long RPyThreadGetIdent(void)
  108. {
  109. volatile pthread_t threadid;
  110. /* Jump through some hoops for Alpha OSF/1 */
  111. threadid = pthread_self();
  112. if (sizeof(pthread_t) <= sizeof(long))
  113. return (long) threadid;
  114. else
  115. return (long) *(long *) &threadid;
  116. }
  117. static long _pypythread_stacksize = 0;
  118. static void *bootstrap_pthread(void *func)
  119. {
  120. ((void(*)(void))func)();
  121. return NULL;
  122. }
  123. long RPyThreadStart(void (*func)(void))
  124. {
  125. pthread_t th;
  126. int status;
  127. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  128. pthread_attr_t attrs;
  129. #endif
  130. #if defined(THREAD_STACK_SIZE)
  131. size_t tss;
  132. #endif
  133. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  134. pthread_attr_init(&attrs);
  135. #endif
  136. #ifdef THREAD_STACK_SIZE
  137. tss = (_pypythread_stacksize != 0) ? _pypythread_stacksize
  138. : THREAD_STACK_SIZE;
  139. if (tss != 0)
  140. pthread_attr_setstacksize(&attrs, tss);
  141. #endif
  142. #if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__)
  143. pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
  144. #endif
  145. status = pthread_create(&th,
  146. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  147. &attrs,
  148. #else
  149. (pthread_attr_t*)NULL,
  150. #endif
  151. bootstrap_pthread,
  152. (void *)func
  153. );
  154. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  155. pthread_attr_destroy(&attrs);
  156. #endif
  157. if (status != 0)
  158. return -1;
  159. pthread_detach(th);
  160. if (sizeof(pthread_t) <= sizeof(long))
  161. return (long) th;
  162. else
  163. return (long) *(long *) &th;
  164. }
  165. long RPyThreadGetStackSize(void)
  166. {
  167. return _pypythread_stacksize;
  168. }
  169. long RPyThreadSetStackSize(long newsize)
  170. {
  171. #if defined(THREAD_STACK_SIZE)
  172. pthread_attr_t attrs;
  173. size_t tss_min;
  174. int rc;
  175. #endif
  176. if (newsize == 0) { /* set to default */
  177. _pypythread_stacksize = 0;
  178. return 0;
  179. }
  180. #if defined(THREAD_STACK_SIZE)
  181. # if defined(PTHREAD_STACK_MIN)
  182. tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
  183. : THREAD_STACK_MIN;
  184. # else
  185. tss_min = THREAD_STACK_MIN;
  186. # endif
  187. if (newsize >= tss_min) {
  188. /* validate stack size by setting thread attribute */
  189. if (pthread_attr_init(&attrs) == 0) {
  190. rc = pthread_attr_setstacksize(&attrs, newsize);
  191. pthread_attr_destroy(&attrs);
  192. if (rc == 0) {
  193. _pypythread_stacksize = newsize;
  194. return 0;
  195. }
  196. }
  197. }
  198. return -1;
  199. #else
  200. return -2;
  201. #endif
  202. }
  203. /************************************************************/
  204. #ifdef USE_SEMAPHORES
  205. /************************************************************/
  206. #include <semaphore.h>
  207. void RPyThreadAfterFork(void)
  208. {
  209. }
  210. int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
  211. {
  212. int status, error = 0;
  213. lock->initialized = 0;
  214. status = sem_init(&lock->sem, 0, 1);
  215. CHECK_STATUS("sem_init");
  216. if (error)
  217. return 0;
  218. lock->initialized = 1;
  219. return 1;
  220. }
  221. void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock)
  222. {
  223. int status, error = 0;
  224. if (lock->initialized) {
  225. status = sem_destroy(&lock->sem);
  226. CHECK_STATUS("sem_destroy");
  227. /* 'error' is ignored;
  228. CHECK_STATUS already printed an error message */
  229. }
  230. }
  231. /*
  232. * As of February 2002, Cygwin thread implementations mistakenly report error
  233. * codes in the return value of the sem_ calls (like the pthread_ functions).
  234. * Correct implementations return -1 and put the code in errno. This supports
  235. * either.
  236. */
  237. static int
  238. rpythread_fix_status(int status)
  239. {
  240. return (status == -1) ? errno : status;
  241. }
  242. int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag)
  243. {
  244. int success;
  245. sem_t *thelock = &lock->sem;
  246. int status, error = 0;
  247. do {
  248. if (waitflag)
  249. status = rpythread_fix_status(sem_wait(thelock));
  250. else
  251. status = rpythread_fix_status(sem_trywait(thelock));
  252. } while (status == EINTR); /* Retry if interrupted by a signal */
  253. if (waitflag) {
  254. CHECK_STATUS("sem_wait");
  255. } else if (status != EAGAIN) {
  256. CHECK_STATUS("sem_trywait");
  257. }
  258. success = (status == 0) ? 1 : 0;
  259. return success;
  260. }
  261. void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock)
  262. {
  263. sem_t *thelock = &lock->sem;
  264. int status, error = 0;
  265. status = sem_post(thelock);
  266. CHECK_STATUS("sem_post");
  267. }
  268. /************************************************************/
  269. #else /* no semaphores */
  270. /************************************************************/
  271. struct RPyOpaque_ThreadLock *alllocks; /* doubly-linked list */
  272. void RPyThreadAfterFork(void)
  273. {
  274. /* Mess. We have no clue about how it works on CPython on OSX,
  275. but the issue is that the state of mutexes is not really
  276. preserved across a fork(). So we need to walk over all lock
  277. objects here, and rebuild their mutex and condition variable.
  278. See e.g. http://hackage.haskell.org/trac/ghc/ticket/1391 for
  279. a similar bug about GHC.
  280. */
  281. struct RPyOpaque_ThreadLock *p = alllocks;
  282. alllocks = NULL;
  283. while (p) {
  284. struct RPyOpaque_ThreadLock *next = p->next;
  285. int was_locked = p->locked;
  286. RPyThreadLockInit(p);
  287. p->locked = was_locked;
  288. p = next;
  289. }
  290. }
  291. int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
  292. {
  293. int status, error = 0;
  294. lock->initialized = 0;
  295. lock->locked = 0;
  296. status = pthread_mutex_init(&lock->mut,
  297. pthread_mutexattr_default);
  298. CHECK_STATUS("pthread_mutex_init");
  299. status = pthread_cond_init(&lock->lock_released,
  300. pthread_condattr_default);
  301. CHECK_STATUS("pthread_cond_init");
  302. if (error)
  303. return 0;
  304. lock->initialized = 1;
  305. /* add 'lock' in the doubly-linked list */
  306. if (alllocks)
  307. alllocks->prev = lock;
  308. lock->next = alllocks;
  309. lock->prev = NULL;
  310. alllocks = lock;
  311. return 1;
  312. }
  313. void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock)
  314. {
  315. int status, error = 0;
  316. if (lock->initialized) {
  317. /* remove 'lock' from the doubly-linked list */
  318. if (lock->prev)
  319. lock->prev->next = lock->next;
  320. else {
  321. assert(alllocks == lock);
  322. alllocks = lock->next;
  323. }
  324. if (lock->next)
  325. lock->next->prev = lock->prev;
  326. status = pthread_mutex_destroy(&lock->mut);
  327. CHECK_STATUS("pthread_mutex_destroy");
  328. status = pthread_cond_destroy(&lock->lock_released);
  329. CHECK_STATUS("pthread_cond_destroy");
  330. /* 'error' is ignored;
  331. CHECK_STATUS already printed an error message */
  332. }
  333. }
  334. int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag)
  335. {
  336. int success;
  337. int status, error = 0;
  338. status = pthread_mutex_lock( &lock->mut );
  339. CHECK_STATUS("pthread_mutex_lock[1]");
  340. success = lock->locked == 0;
  341. if ( !success && waitflag ) {
  342. /* continue trying until we get the lock */
  343. /* mut must be locked by me -- part of the condition
  344. * protocol */
  345. while ( lock->locked ) {
  346. status = pthread_cond_wait(&lock->lock_released,
  347. &lock->mut);
  348. CHECK_STATUS("pthread_cond_wait");
  349. }
  350. success = 1;
  351. }
  352. if (success) lock->locked = 1;
  353. status = pthread_mutex_unlock( &lock->mut );
  354. CHECK_STATUS("pthread_mutex_unlock[1]");
  355. if (error) success = 0;
  356. return success;
  357. }
  358. void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock)
  359. {
  360. int status, error = 0;
  361. status = pthread_mutex_lock( &lock->mut );
  362. CHECK_STATUS("pthread_mutex_lock[3]");
  363. lock->locked = 0;
  364. status = pthread_mutex_unlock( &lock->mut );
  365. CHECK_STATUS("pthread_mutex_unlock[3]");
  366. /* wake up someone (anyone, if any) waiting on the lock */
  367. status = pthread_cond_signal( &lock->lock_released );
  368. CHECK_STATUS("pthread_cond_signal");
  369. }
  370. /************************************************************/
  371. #endif /* no semaphores */
  372. /************************************************************/
  373. /* Thread-local storage */
  374. #define RPyThreadTLS pthread_key_t
  375. char *RPyThreadTLS_Create(RPyThreadTLS *result)
  376. {
  377. if (pthread_key_create(result, NULL) != 0)
  378. return "out of thread-local storage keys";
  379. else
  380. return NULL;
  381. }
  382. #define RPyThreadTLS_Get(key) pthread_getspecific(key)
  383. #define RPyThreadTLS_Set(key, value) pthread_setspecific(key, value)
  384. /************************************************************/
  385. /* GIL code */
  386. /************************************************************/
  387. #ifdef __llvm__
  388. # define HAS_ATOMIC_ADD
  389. #endif
  390. #ifdef __GNUC__
  391. # if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
  392. # define HAS_ATOMIC_ADD
  393. # endif
  394. #endif
  395. #ifdef HAS_ATOMIC_ADD
  396. # define atomic_add __sync_fetch_and_add
  397. #else
  398. # if defined(__amd64__)
  399. # define atomic_add(ptr, value) asm volatile ("lock addq %0, %1" \
  400. : : "ri"(value), "m"(*(ptr)) : "memory")
  401. # elif defined(__i386__)
  402. # define atomic_add(ptr, value) asm volatile ("lock addl %0, %1" \
  403. : : "ri"(value), "m"(*(ptr)) : "memory")
  404. # else
  405. # error "Please use gcc >= 4.1 or write a custom 'asm' for your CPU."
  406. # endif
  407. #endif
  408. #define ASSERT_STATUS(call) \
  409. if (call != 0) { \
  410. fprintf(stderr, "Fatal error: " #call "\n"); \
  411. abort(); \
  412. }
  413. static void _debug_print(const char *msg)
  414. {
  415. #if 0
  416. int col = (int)pthread_self();
  417. col = 31 + ((col / 8) % 8);
  418. fprintf(stderr, "\033[%dm%s\033[0m", col, msg);
  419. #endif
  420. }
  421. static volatile long pending_acquires = -1;
  422. static pthread_mutex_t mutex_gil = PTHREAD_MUTEX_INITIALIZER;
  423. static pthread_cond_t cond_gil = PTHREAD_COND_INITIALIZER;
  424. static void assert_has_the_gil(void)
  425. {
  426. #ifdef RPY_ASSERT
  427. assert(pthread_mutex_trylock(&mutex_gil) != 0);
  428. assert(pending_acquires >= 0);
  429. #endif
  430. }
  431. long RPyGilAllocate(void)
  432. {
  433. _debug_print("RPyGilAllocate\n");
  434. pending_acquires = 0;
  435. pthread_mutex_trylock(&mutex_gil);
  436. assert_has_the_gil();
  437. return 1;
  438. }
  439. long RPyGilYieldThread(void)
  440. {
  441. /* can be called even before RPyGilAllocate(), but in this case,
  442. pending_acquires will be -1 */
  443. #ifdef RPY_ASSERT
  444. if (pending_acquires >= 0)
  445. assert_has_the_gil();
  446. #endif
  447. if (pending_acquires <= 0)
  448. return 0;
  449. atomic_add(&pending_acquires, 1L);
  450. _debug_print("{");
  451. ASSERT_STATUS(pthread_cond_signal(&cond_gil));
  452. ASSERT_STATUS(pthread_cond_wait(&cond_gil, &mutex_gil));
  453. _debug_print("}");
  454. atomic_add(&pending_acquires, -1L);
  455. assert_has_the_gil();
  456. return 1;
  457. }
  458. void RPyGilRelease(void)
  459. {
  460. _debug_print("RPyGilRelease\n");
  461. #ifdef RPY_ASSERT
  462. assert(pending_acquires >= 0);
  463. #endif
  464. assert_has_the_gil();
  465. ASSERT_STATUS(pthread_mutex_unlock(&mutex_gil));
  466. ASSERT_STATUS(pthread_cond_signal(&cond_gil));
  467. }
  468. void RPyGilAcquire(void)
  469. {
  470. _debug_print("about to RPyGilAcquire...\n");
  471. #ifdef RPY_ASSERT
  472. assert(pending_acquires >= 0);
  473. #endif
  474. atomic_add(&pending_acquires, 1L);
  475. ASSERT_STATUS(pthread_mutex_lock(&mutex_gil));
  476. atomic_add(&pending_acquires, -1L);
  477. assert_has_the_gil();
  478. _debug_print("RPyGilAcquire\n");
  479. }
  480. #endif /* PYPY_NOT_MAIN_FILE */