PageRenderTime 42ms CodeModel.GetById 27ms RepoModel.GetById 0ms app.codeStats 1ms

/lua_lanes/src/lanes.c

https://bitbucket.org/xixs/lua
C | 3312 lines | 2156 code | 350 blank | 806 comment | 398 complexity | 7847040702b43b8fab7ca67a5fd28d7f MD5 | raw file
Possible License(s): Zlib, BSD-3-Clause, CC0-1.0, GPL-3.0, GPL-2.0, CPL-1.0, MPL-2.0-no-copyleft-exception, LGPL-2.0, LGPL-2.1, LGPL-3.0, 0BSD, Cube
  1. /*
  2. * LANES.C Copyright (c) 2007-08, Asko Kauppi
  3. * Copyright (C) 2009-14, Benoit Germain
  4. *
  5. * Multithreading in Lua.
  6. *
  7. * History:
  8. * See CHANGES
  9. *
  10. * Platforms (tested internally):
  11. * OS X (10.5.7 PowerPC/Intel)
  12. * Linux x86 (Ubuntu 8.04)
  13. * Win32 (Windows XP Home SP2, Visual C++ 2005/2008 Express)
  14. *
  15. * Platforms (tested externally):
  16. * Win32 (MSYS) by Ross Berteig.
  17. *
  18. * Platforms (testers appreciated):
  19. * Win64 - should work???
  20. * Linux x64 - should work
  21. * FreeBSD - should work
  22. * QNX - porting shouldn't be hard
  23. * Sun Solaris - porting shouldn't be hard
  24. *
  25. * References:
  26. * "Porting multithreaded applications from Win32 to Mac OS X":
  27. * <http://developer.apple.com/macosx/multithreadedprogramming.html>
  28. *
  29. * Pthreads:
  30. * <http://vergil.chemistry.gatech.edu/resources/programming/threads.html>
  31. *
  32. * MSDN: <http://msdn2.microsoft.com/en-us/library/ms686679.aspx>
  33. *
  34. * <http://ridiculousfish.com/blog/archives/2007/02/17/barrier>
  35. *
  36. * Defines:
  37. * -DLINUX_SCHED_RR: all threads are lifted to SCHED_RR category, to
  38. * allow negative priorities [-3,-1] be used. Even without this,
  39. * using priorities will require 'sudo' privileges on Linux.
  40. *
  41. * -DUSE_PTHREAD_TIMEDJOIN: use 'pthread_timedjoin_np()' for waiting
  42. * for threads with a timeout. This changes the thread cleanup
  43. * mechanism slightly (cleans up at the join, not once the thread
  44. * has finished). May or may not be a good idea to use it.
  45. * Available only in selected operating systems (Linux).
  46. *
  47. * Bugs:
  48. *
  49. * To-do:
  50. *
  51. * Make waiting threads cancellable.
  52. * ...
  53. */
  54. char const* VERSION = "3.9.4";
  55. /*
  56. ===============================================================================
  57. Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com>
  58. 2011-14 Benoit Germain <bnt.germain@gmail.com>
  59. Permission is hereby granted, free of charge, to any person obtaining a copy
  60. of this software and associated documentation files (the "Software"), to deal
  61. in the Software without restriction, including without limitation the rights
  62. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  63. copies of the Software, and to permit persons to whom the Software is
  64. furnished to do so, subject to the following conditions:
  65. The above copyright notice and this permission notice shall be included in
  66. all copies or substantial portions of the Software.
  67. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  68. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  69. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  70. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  71. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  72. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  73. THE SOFTWARE.
  74. ===============================================================================
  75. */
  76. #include <string.h>
  77. #include <stdio.h>
  78. #include <stdlib.h>
  79. #include <ctype.h>
  80. #include "lua.h"
  81. #include "lauxlib.h"
  82. #include "threading.h"
  83. #include "tools.h"
  84. #include "keeper.h"
  85. #include "lanes.h"
  86. #if !(defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC))
  87. # include <sys/time.h>
  88. #endif
  89. /* geteuid() */
  90. #ifdef PLATFORM_LINUX
  91. # include <unistd.h>
  92. # include <sys/types.h>
  93. #endif
  94. /* Do you want full call stacks, or just the line where the error happened?
  95. *
  96. * TBD: The full stack feature does not seem to work (try 'make error').
  97. */
  98. #define ERROR_FULL_STACK 1 // must be either 0 or 1 as we do some index arithmetics with it!
  99. /*
  100. * Lane cancellation request modes
  101. */
  102. enum e_cancel_request
  103. {
  104. CANCEL_NONE, // no pending cancel request
  105. CANCEL_SOFT, // user wants the lane to cancel itself manually on cancel_test()
  106. CANCEL_HARD // user wants the lane to be interrupted (meaning code won't return from those functions) from inside linda:send/receive calls
  107. };
  108. // NOTE: values to be changed by either thread, during execution, without
  109. // locking, are marked "volatile"
  110. //
  111. struct s_lane
  112. {
  113. THREAD_T thread;
  114. //
  115. // M: sub-thread OS thread
  116. // S: not used
  117. char const* debug_name;
  118. lua_State* L;
  119. struct s_Universe* U;
  120. //
  121. // M: prepares the state, and reads results
  122. // S: while S is running, M must keep out of modifying the state
  123. volatile enum e_status status;
  124. //
  125. // M: sets to PENDING (before launching)
  126. // S: updates -> RUNNING/WAITING -> DONE/ERROR_ST/CANCELLED
  127. SIGNAL_T* volatile waiting_on;
  128. //
  129. // When status is WAITING, points on the linda's signal the thread waits on, else NULL
  130. volatile enum e_cancel_request cancel_request;
  131. //
  132. // M: sets to FALSE, flags TRUE for cancel request
  133. // S: reads to see if cancel is requested
  134. #if THREADWAIT_METHOD == THREADWAIT_CONDVAR
  135. SIGNAL_T done_signal;
  136. //
  137. // M: Waited upon at lane ending (if Posix with no PTHREAD_TIMEDJOIN)
  138. // S: sets the signal once cancellation is noticed (avoids a kill)
  139. MUTEX_T done_lock;
  140. //
  141. // Lock required by 'done_signal' condition variable, protecting
  142. // lane status changes to DONE/ERROR_ST/CANCELLED.
  143. #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
  144. volatile enum {
  145. NORMAL, // normal master side state
  146. KILLED // issued an OS kill
  147. } mstatus;
  148. //
  149. // M: sets to NORMAL, if issued a kill changes to KILLED
  150. // S: not used
  151. struct s_lane* volatile selfdestruct_next;
  152. //
  153. // M: sets to non-NULL if facing lane handle '__gc' cycle but the lane
  154. // is still running
  155. // S: cleans up after itself if non-NULL at lane exit
  156. #if HAVE_LANE_TRACKING
  157. struct s_lane* volatile tracking_next;
  158. #endif // HAVE_LANE_TRACKING
  159. //
  160. // For tracking only
  161. };
  162. // To allow free-running threads (longer lifespan than the handle's)
  163. // 'struct s_lane' are malloc/free'd and the handle only carries a pointer.
  164. // This is not deep userdata since the handle's not portable among lanes.
  165. //
  166. #define lua_toLane( L, i) (*((struct s_lane**) luaL_checkudata( L, i, "Lane")))
  167. #define CANCEL_TEST_KEY ((void*)get_lane_from_registry) // used as registry key
  168. static inline struct s_lane* get_lane_from_registry( lua_State* L)
  169. {
  170. struct s_lane* s;
  171. STACK_GROW( L, 1);
  172. STACK_CHECK( L);
  173. lua_pushlightuserdata( L, CANCEL_TEST_KEY);
  174. lua_rawget( L, LUA_REGISTRYINDEX);
  175. s = lua_touserdata( L, -1); // lightuserdata (true 's_lane' pointer) / nil
  176. lua_pop( L, 1);
  177. STACK_END( L, 0);
  178. return s;
  179. }
  180. // intern the debug name in the specified lua state so that the pointer remains valid when the lane's state is closed
  181. static void securize_debug_threadname( lua_State* L, struct s_lane* s)
  182. {
  183. STACK_CHECK( L);
  184. STACK_GROW( L, 3);
  185. lua_getuservalue( L, 1);
  186. lua_newtable( L);
  187. // Lua 5.1 can't do 's->debug_name = lua_pushstring( L, s->debug_name);'
  188. lua_pushstring( L, s->debug_name);
  189. s->debug_name = lua_tostring( L, -1);
  190. lua_rawset( L, -3);
  191. lua_pop( L, 1);
  192. STACK_END( L, 0);
  193. }
  194. /*
  195. * Check if the thread in question ('L') has been signalled for cancel.
  196. *
  197. * Called by cancellation hooks and/or pending Linda operations (because then
  198. * the check won't affect performance).
  199. *
  200. * Returns TRUE if any locks are to be exited, and 'cancel_error()' called,
  201. * to make execution of the lane end.
  202. */
  203. static inline enum e_cancel_request cancel_test( lua_State* L)
  204. {
  205. struct s_lane* const s = get_lane_from_registry( L);
  206. // 's' is NULL for the original main state (and no-one can cancel that)
  207. return s ? s->cancel_request : CANCEL_NONE;
  208. }
  209. #define CANCEL_ERROR ((void*)cancel_error) // 'cancel_error' sentinel
  210. static int cancel_error( lua_State* L)
  211. {
  212. STACK_GROW( L, 1);
  213. lua_pushlightuserdata( L, CANCEL_ERROR); // special error value
  214. return lua_error( L); // doesn't return
  215. }
  216. static void cancel_hook( lua_State* L, lua_Debug* ar)
  217. {
  218. (void)ar;
  219. if( cancel_test( L) != CANCEL_NONE)
  220. {
  221. cancel_error( L);
  222. }
  223. }
  224. #if ERROR_FULL_STACK
  225. static int lane_error( lua_State* L);
  226. #define STACK_TRACE_KEY ((void*)lane_error) // used as registry key
  227. #endif // ERROR_FULL_STACK
  228. /*
  229. * registry[FINALIZER_REG_KEY] is either nil (no finalizers) or a table
  230. * of functions that Lanes will call after the executing 'pcall' has ended.
  231. *
  232. * We're NOT using the GC system for finalizer mainly because providing the
  233. * error (and maybe stack trace) parameters to the finalizer functions would
  234. * anyways complicate that approach.
  235. */
  236. #define FINALIZER_REG_KEY ((void*)LG_set_finalizer)
  237. struct s_Linda;
  238. #if 1
  239. # define DEBUG_SIGNAL( msg, signal_ref ) /* */
  240. #else
  241. # define DEBUG_SIGNAL( msg, signal_ref ) \
  242. { int i; unsigned char *ptr; char buf[999]; \
  243. sprintf( buf, ">>> " msg ": %p\t", (signal_ref) ); \
  244. ptr= (unsigned char *)signal_ref; \
  245. for( i=0; i<sizeof(*signal_ref); i++ ) { \
  246. sprintf( strchr(buf,'\0'), "%02x %c ", ptr[i], ptr[i] ); \
  247. } \
  248. fprintf( stderr, "%s\n", buf ); \
  249. }
  250. #endif
  251. /*
  252. * Push a table stored in registry onto Lua stack.
  253. *
  254. * If there is no existing table, create one if 'create' is TRUE.
  255. *
  256. * Returns: TRUE if a table was pushed
  257. * FALSE if no table found, not created, and nothing pushed
  258. */
  259. static bool_t push_registry_table( lua_State* L, void* key, bool_t create)
  260. {
  261. STACK_GROW( L, 3);
  262. STACK_CHECK( L);
  263. lua_pushlightuserdata( L, key); // key
  264. lua_gettable( L, LUA_REGISTRYINDEX); // t?
  265. if( lua_isnil( L, -1)) // nil?
  266. {
  267. lua_pop( L, 1); //
  268. if( !create)
  269. {
  270. return FALSE;
  271. }
  272. lua_newtable(L); // t
  273. lua_pushlightuserdata( L, key); // t key
  274. lua_pushvalue( L, -2); // t key t
  275. lua_rawset( L, LUA_REGISTRYINDEX); // t
  276. }
  277. STACK_END( L, 1);
  278. return TRUE; // table pushed
  279. }
  280. #if HAVE_LANE_TRACKING
  281. // The chain is ended by '(struct s_lane*)(-1)', not NULL:
  282. // 'tracking_first -> ... -> ... -> (-1)'
  283. #define TRACKING_END ((struct s_lane *)(-1))
  284. /*
  285. * Add the lane to tracking chain; the ones still running at the end of the
  286. * whole process will be cancelled.
  287. */
  288. static void tracking_add( struct s_lane* s)
  289. {
  290. MUTEX_LOCK( &s->U->tracking_cs);
  291. {
  292. assert( s->tracking_next == NULL);
  293. s->tracking_next = s->U->tracking_first;
  294. s->U->tracking_first = s;
  295. }
  296. MUTEX_UNLOCK( &s->U->tracking_cs);
  297. }
  298. /*
  299. * A free-running lane has ended; remove it from tracking chain
  300. */
  301. static bool_t tracking_remove( struct s_lane* s)
  302. {
  303. bool_t found = FALSE;
  304. MUTEX_LOCK( &s->U->tracking_cs);
  305. {
  306. // Make sure (within the MUTEX) that we actually are in the chain
  307. // still (at process exit they will remove us from chain and then
  308. // cancel/kill).
  309. //
  310. if (s->tracking_next != NULL)
  311. {
  312. struct s_lane** ref = (struct s_lane**) &s->U->tracking_first;
  313. while( *ref != TRACKING_END)
  314. {
  315. if( *ref == s)
  316. {
  317. *ref = s->tracking_next;
  318. s->tracking_next = NULL;
  319. found = TRUE;
  320. break;
  321. }
  322. ref = (struct s_lane**) &((*ref)->tracking_next);
  323. }
  324. assert( found);
  325. }
  326. }
  327. MUTEX_UNLOCK( &s->U->tracking_cs);
  328. return found;
  329. }
  330. #endif // HAVE_LANE_TRACKING
  331. //---
  332. // low-level cleanup
  333. static void lane_cleanup( struct s_lane* s)
  334. {
  335. // Clean up after a (finished) thread
  336. //
  337. #if THREADWAIT_METHOD == THREADWAIT_CONDVAR
  338. SIGNAL_FREE( &s->done_signal);
  339. MUTEX_FREE( &s->done_lock);
  340. #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
  341. #if HAVE_LANE_TRACKING
  342. if( s->U->tracking_first != NULL)
  343. {
  344. // Lane was cleaned up, no need to handle at process termination
  345. tracking_remove( s);
  346. }
  347. #endif // HAVE_LANE_TRACKING
  348. free( s);
  349. }
  350. /*
  351. * ###############################################################################################
  352. * ############################################ Linda ############################################
  353. * ###############################################################################################
  354. */
  355. /*
  356. * Actual data is kept within a keeper state, which is hashed by the 's_Linda'
  357. * pointer (which is same to all userdatas pointing to it).
  358. */
  359. struct s_Linda
  360. {
  361. SIGNAL_T read_happened;
  362. SIGNAL_T write_happened;
  363. struct s_Universe* U; // the universe this linda belongs to
  364. enum e_cancel_request simulate_cancel;
  365. unsigned long group; // a group to control keeper allocation between lindas
  366. char name[1];
  367. };
  368. #define LINDA_KEEPER_HASHSEED( linda) (linda->group ? linda->group : (unsigned long)linda)
  369. static void* linda_id( lua_State*, enum eDeepOp);
  370. static inline struct s_Linda* lua_toLinda( lua_State* L, int idx_)
  371. {
  372. struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_);
  373. luaL_argcheck( L, linda != NULL, idx_, "expecting a linda object");
  374. return linda;
  375. }
  376. static void check_key_types( lua_State* L, int start_, int end_)
  377. {
  378. int i;
  379. for( i = start_; i <= end_; ++ i)
  380. {
  381. int t = lua_type( L, i);
  382. if( t == LUA_TBOOLEAN || t == LUA_TNUMBER || t == LUA_TSTRING || t == LUA_TLIGHTUSERDATA)
  383. {
  384. continue;
  385. }
  386. (void) luaL_error( L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i);
  387. }
  388. }
  389. /*
  390. * bool= linda_send( linda_ud, [timeout_secs=-1,] [linda.null,] key_num|str|bool|lightuserdata, ... )
  391. *
  392. * Send one or more values to a Linda. If there is a limit, all values must fit.
  393. *
  394. * Returns: 'true' if the value was queued
  395. * 'false' for timeout (only happens when the queue size is limited)
  396. * nil, CANCEL_ERROR if cancelled
  397. */
  398. LUAG_FUNC( linda_send)
  399. {
  400. struct s_Linda* linda = lua_toLinda( L, 1);
  401. bool_t ret;
  402. enum e_cancel_request cancel = CANCEL_NONE;
  403. int pushed;
  404. time_d timeout = -1.0;
  405. uint_t key_i = 2; // index of first key, if timeout not there
  406. void* as_nil_sentinel; // if not NULL, send() will silently send a single nil if nothing is provided
  407. if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion
  408. {
  409. timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2));
  410. ++ key_i;
  411. }
  412. else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key
  413. {
  414. ++ key_i;
  415. }
  416. as_nil_sentinel = lua_touserdata( L, key_i);
  417. if( as_nil_sentinel == NIL_SENTINEL)
  418. {
  419. // the real key to send data to is after the NIL_SENTINEL marker
  420. ++ key_i;
  421. }
  422. // make sure the key is of a valid type
  423. check_key_types( L, key_i, key_i);
  424. STACK_GROW( L, 1);
  425. // make sure there is something to send
  426. if( (uint_t)lua_gettop( L) == key_i)
  427. {
  428. if( as_nil_sentinel == NIL_SENTINEL)
  429. {
  430. // send a single nil if nothing is provided
  431. lua_pushlightuserdata( L, NIL_SENTINEL);
  432. }
  433. else
  434. {
  435. return luaL_error( L, "no data to send");
  436. }
  437. }
  438. // convert nils to some special non-nil sentinel in sent values
  439. keeper_toggle_nil_sentinels( L, key_i + 1, eLM_ToKeeper);
  440. {
  441. bool_t try_again = TRUE;
  442. struct s_lane* const s = get_lane_from_registry( L);
  443. struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
  444. lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK'
  445. if( KL == NULL) return 0;
  446. STACK_CHECK( KL);
  447. for( ;;)
  448. {
  449. if( s != NULL)
  450. {
  451. cancel = s->cancel_request;
  452. }
  453. cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel;
  454. // if user wants to cancel, or looped because of a timeout, the call returns without sending anything
  455. if( !try_again || cancel != CANCEL_NONE)
  456. {
  457. pushed = 0;
  458. break;
  459. }
  460. STACK_MID( KL, 0);
  461. pushed = keeper_call( linda->U, KL, KEEPER_API( send), L, linda, key_i);
  462. if( pushed < 0)
  463. {
  464. ret = FALSE;
  465. break;
  466. }
  467. ASSERT_L( pushed == 1);
  468. ret = lua_toboolean( L, -1);
  469. lua_pop( L, 1);
  470. if( ret)
  471. {
  472. // Wake up ALL waiting threads
  473. SIGNAL_ALL( &linda->write_happened);
  474. break;
  475. }
  476. // instant timout to bypass the
  477. if( timeout == 0.0)
  478. {
  479. break; /* no wait; instant timeout */
  480. }
  481. // storage limit hit, wait until timeout or signalled that we should try again
  482. {
  483. enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings
  484. if( s != NULL)
  485. {
  486. // change status of lane to "waiting"
  487. prev_status = s->status; // RUNNING, most likely
  488. ASSERT_L( prev_status == RUNNING); // but check, just in case
  489. s->status = WAITING;
  490. ASSERT_L( s->waiting_on == NULL);
  491. s->waiting_on = &linda->read_happened;
  492. }
  493. // could not send because no room: wait until some data was read before trying again, or until timeout is reached
  494. try_again = SIGNAL_WAIT( &linda->read_happened, &K->keeper_cs, timeout);
  495. if( s != NULL)
  496. {
  497. s->waiting_on = NULL;
  498. s->status = prev_status;
  499. }
  500. }
  501. }
  502. STACK_END( KL, 0);
  503. keeper_release( K);
  504. }
  505. // must trigger error after keeper state has been released
  506. if( pushed < 0)
  507. {
  508. return luaL_error( L, "tried to copy unsupported types");
  509. }
  510. switch( cancel)
  511. {
  512. case CANCEL_SOFT:
  513. // if user wants to soft-cancel, the call returns lanes.cancel_error
  514. lua_pushlightuserdata( L, CANCEL_ERROR);
  515. return 1;
  516. case CANCEL_HARD:
  517. // raise an error interrupting execution only in case of hard cancel
  518. return cancel_error( L); // raises an error and doesn't return
  519. default:
  520. lua_pushboolean( L, ret); // true (success) or false (timeout)
  521. return 1;
  522. }
  523. }
  524. /*
  525. * 2 modes of operation
  526. * [val, key]= linda_receive( linda_ud, [timeout_secs_num=-1], key_num|str|bool|lightuserdata [, ...] )
  527. * Consumes a single value from the Linda, in any key.
  528. * Returns: received value (which is consumed from the slot), and the key which had it
  529. * [val1, ... valCOUNT]= linda_receive( linda_ud, [timeout_secs_num=-1], linda.batched, key_num|str|bool|lightuserdata, min_COUNT[, max_COUNT])
  530. * Consumes between min_COUNT and max_COUNT values from the linda, from a single key.
  531. * returns the actual consumed values, or nil if there weren't enough values to consume
  532. *
  533. */
  534. #define BATCH_SENTINEL "270e6c9d-280f-4983-8fee-a7ecdda01475"
  535. LUAG_FUNC( linda_receive)
  536. {
  537. struct s_Linda* linda = lua_toLinda( L, 1);
  538. int pushed, expected_pushed_min, expected_pushed_max;
  539. enum e_cancel_request cancel = CANCEL_NONE;
  540. keeper_api_t keeper_receive;
  541. time_d timeout = -1.0;
  542. uint_t key_i = 2;
  543. if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion
  544. {
  545. timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2));
  546. ++ key_i;
  547. }
  548. else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key
  549. {
  550. ++ key_i;
  551. }
  552. // are we in batched mode?
  553. {
  554. int is_batched;
  555. lua_pushliteral( L, BATCH_SENTINEL);
  556. is_batched = lua_equal( L, key_i, -1);
  557. lua_pop( L, 1);
  558. if( is_batched)
  559. {
  560. // no need to pass linda.batched in the keeper state
  561. ++ key_i;
  562. // make sure the keys are of a valid type
  563. check_key_types( L, key_i, key_i);
  564. // receive multiple values from a single slot
  565. keeper_receive = KEEPER_API( receive_batched);
  566. // we expect a user-defined amount of return value
  567. expected_pushed_min = (int)luaL_checkinteger( L, key_i + 1);
  568. expected_pushed_max = (int)luaL_optinteger( L, key_i + 2, expected_pushed_min);
  569. // don't forget to count the key in addition to the values
  570. ++ expected_pushed_min;
  571. ++ expected_pushed_max;
  572. if( expected_pushed_min > expected_pushed_max)
  573. {
  574. return luaL_error( L, "batched min/max error");
  575. }
  576. }
  577. else
  578. {
  579. // make sure the keys are of a valid type
  580. check_key_types( L, key_i, lua_gettop( L));
  581. // receive a single value, checking multiple slots
  582. keeper_receive = KEEPER_API( receive);
  583. // we expect a single (value, key) pair of returned values
  584. expected_pushed_min = expected_pushed_max = 2;
  585. }
  586. }
  587. {
  588. bool_t try_again = TRUE;
  589. struct s_lane* const s = get_lane_from_registry( L);
  590. struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
  591. if( K == NULL) return 0;
  592. for( ;;)
  593. {
  594. if( s != NULL)
  595. {
  596. cancel = s->cancel_request;
  597. }
  598. cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel;
  599. // if user wants to cancel, or looped because of a timeout, the call returns without sending anything
  600. if( !try_again || cancel != CANCEL_NONE)
  601. {
  602. pushed = 0;
  603. break;
  604. }
  605. // all arguments of receive() but the first are passed to the keeper's receive function
  606. pushed = keeper_call( linda->U, K->L, keeper_receive, L, linda, key_i);
  607. if( pushed < 0)
  608. {
  609. break;
  610. }
  611. if( pushed > 0)
  612. {
  613. ASSERT_L( pushed >= expected_pushed_min && pushed <= expected_pushed_max);
  614. // replace sentinels with real nils
  615. keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper);
  616. // To be done from within the 'K' locking area
  617. //
  618. SIGNAL_ALL( &linda->read_happened);
  619. break;
  620. }
  621. if( timeout == 0.0)
  622. {
  623. break; /* instant timeout */
  624. }
  625. // nothing received, wait until timeout or signalled that we should try again
  626. {
  627. enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings
  628. if( s != NULL)
  629. {
  630. // change status of lane to "waiting"
  631. prev_status = s->status; // RUNNING, most likely
  632. ASSERT_L( prev_status == RUNNING); // but check, just in case
  633. s->status = WAITING;
  634. ASSERT_L( s->waiting_on == NULL);
  635. s->waiting_on = &linda->write_happened;
  636. }
  637. // not enough data to read: wakeup when data was sent, or when timeout is reached
  638. try_again = SIGNAL_WAIT( &linda->write_happened, &K->keeper_cs, timeout);
  639. if( s != NULL)
  640. {
  641. s->waiting_on = NULL;
  642. s->status = prev_status;
  643. }
  644. }
  645. }
  646. keeper_release( K);
  647. }
  648. // must trigger error after keeper state has been released
  649. if( pushed < 0)
  650. {
  651. return luaL_error( L, "tried to copy unsupported types");
  652. }
  653. switch( cancel)
  654. {
  655. case CANCEL_SOFT:
  656. // if user wants to soft-cancel, the call returns CANCEL_ERROR
  657. lua_pushlightuserdata( L, CANCEL_ERROR);
  658. return 1;
  659. case CANCEL_HARD:
  660. // raise an error interrupting execution only in case of hard cancel
  661. return cancel_error( L); // raises an error and doesn't return
  662. default:
  663. return pushed;
  664. }
  665. }
  666. /*
  667. * [true|lanes.cancel_error] = linda_set( linda_ud, key_num|str|bool|lightuserdata [, value [, ...]])
  668. *
  669. * Set one or more value to Linda.
  670. * TODO: what do we do if we set to non-nil and limit is 0?
  671. *
  672. * Existing slot value is replaced, and possible queued entries removed.
  673. */
  674. LUAG_FUNC( linda_set)
  675. {
  676. struct s_Linda* const linda = lua_toLinda( L, 1);
  677. int pushed;
  678. bool_t has_value = lua_gettop( L) > 2;
  679. // make sure the key is of a valid type (throws an error if not the case)
  680. check_key_types( L, 2, 2);
  681. {
  682. struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
  683. if( K == NULL) return 0;
  684. if( linda->simulate_cancel == CANCEL_NONE)
  685. {
  686. if( has_value)
  687. {
  688. // convert nils to some special non-nil sentinel in sent values
  689. keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper);
  690. }
  691. pushed = keeper_call( linda->U, K->L, KEEPER_API( set), L, linda, 2);
  692. if( pushed >= 0) // no error?
  693. {
  694. ASSERT_L( pushed == 0 || pushed == 1);
  695. if( has_value)
  696. {
  697. // we put some data in the slot, tell readers that they should wake
  698. SIGNAL_ALL( &linda->write_happened); // To be done from within the 'K' locking area
  699. }
  700. if( pushed == 1)
  701. {
  702. // the key was full, but it is no longer the case, tell writers they should wake
  703. ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1);
  704. SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area
  705. }
  706. }
  707. }
  708. else // linda is cancelled
  709. {
  710. // do nothing and return lanes.cancel_error
  711. lua_pushlightuserdata( L, CANCEL_ERROR);
  712. pushed = 1;
  713. }
  714. keeper_release( K);
  715. }
  716. // must trigger any error after keeper state has been released
  717. return (pushed < 0) ? luaL_error( L, "tried to copy unsupported types") : pushed;
  718. }
  719. /*
  720. * [val] = linda_count( linda_ud, [key [, ...]])
  721. *
  722. * Get a count of the pending elements in the specified keys
  723. */
  724. LUAG_FUNC( linda_count)
  725. {
  726. struct s_Linda* linda = lua_toLinda( L, 1);
  727. int pushed;
  728. // make sure the keys are of a valid type
  729. check_key_types( L, 2, lua_gettop( L));
  730. {
  731. struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
  732. if( K == NULL) return 0;
  733. pushed = keeper_call( linda->U, K->L, KEEPER_API( count), L, linda, 2);
  734. keeper_release( K);
  735. if( pushed < 0)
  736. {
  737. return luaL_error( L, "tried to count an invalid key");
  738. }
  739. }
  740. return pushed;
  741. }
  742. /*
  743. * [val [, ...]] = linda_get( linda_ud, key_num|str|bool|lightuserdata [, count = 1])
  744. *
  745. * Get one or more values from Linda.
  746. */
  747. LUAG_FUNC( linda_get)
  748. {
  749. struct s_Linda* const linda = lua_toLinda( L, 1);
  750. int pushed;
  751. int count = luaL_optint( L, 3, 1);
  752. luaL_argcheck( L, count >= 1, 3, "count should be >= 1");
  753. luaL_argcheck( L, lua_gettop( L) <= 3, 4, "too many arguments");
  754. // make sure the key is of a valid type (throws an error if not the case)
  755. check_key_types( L, 2, 2);
  756. {
  757. struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
  758. if( K == NULL) return 0;
  759. if( linda->simulate_cancel == CANCEL_NONE)
  760. {
  761. pushed = keeper_call( linda->U, K->L, KEEPER_API( get), L, linda, 2);
  762. if( pushed > 0)
  763. {
  764. keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper);
  765. }
  766. }
  767. else // linda is cancelled
  768. {
  769. // do nothing and return lanes.cancel_error
  770. lua_pushlightuserdata( L, CANCEL_ERROR);
  771. pushed = 1;
  772. }
  773. keeper_release( K);
  774. // must trigger error after keeper state has been released
  775. // (an error can be raised if we attempt to read an unregistered function)
  776. if( pushed < 0)
  777. {
  778. return luaL_error( L, "tried to copy unsupported types");
  779. }
  780. }
  781. return pushed;
  782. }
  783. /*
  784. * [true] = linda_limit( linda_ud, key_num|str|bool|lightuserdata, int)
  785. *
  786. * Set limit to 1 Linda keys.
  787. * Optionally wake threads waiting to write on the linda, in case the limit enables them to do so
  788. */
  789. LUAG_FUNC( linda_limit)
  790. {
  791. struct s_Linda* linda = lua_toLinda( L, 1);
  792. int pushed;
  793. // make sure we got 3 arguments: the linda, a key and a limit
  794. luaL_argcheck( L, lua_gettop( L) == 3, 2, "wrong number of arguments");
  795. // make sure we got a numeric limit
  796. luaL_checknumber( L, 3);
  797. // make sure the key is of a valid type
  798. check_key_types( L, 2, 2);
  799. {
  800. struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
  801. if( K == NULL) return 0;
  802. if( linda->simulate_cancel == CANCEL_NONE)
  803. {
  804. pushed = keeper_call( linda->U, K->L, KEEPER_API( limit), L, linda, 2);
  805. ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads
  806. if( pushed == 1)
  807. {
  808. ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1);
  809. SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area
  810. }
  811. }
  812. else // linda is cancelled
  813. {
  814. // do nothing and return lanes.cancel_error
  815. lua_pushlightuserdata( L, CANCEL_ERROR);
  816. pushed = 1;
  817. }
  818. keeper_release( K);
  819. }
  820. // propagate pushed boolean if any
  821. return pushed;
  822. }
  823. /*
  824. * (void) = linda_cancel( linda_ud, "read"|"write"|"both"|"none")
  825. *
  826. * Signal linda so that waiting threads wake up as if their own lane was cancelled
  827. */
  828. LUAG_FUNC( linda_cancel)
  829. {
  830. struct s_Linda* linda = lua_toLinda( L, 1);
  831. char const* who = luaL_optstring( L, 2, "both");
  832. struct s_Keeper* K;
  833. // make sure we got 3 arguments: the linda, a key and a limit
  834. luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments");
  835. // signalling must be done from inside the K locking area
  836. K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
  837. if( K == NULL) return 0;
  838. linda->simulate_cancel = CANCEL_SOFT;
  839. if( strcmp( who, "both") == 0) // tell everyone writers to wake up
  840. {
  841. SIGNAL_ALL( &linda->write_happened);
  842. SIGNAL_ALL( &linda->read_happened);
  843. }
  844. else if( strcmp( who, "none") == 0) // reset flag
  845. {
  846. linda->simulate_cancel = CANCEL_NONE;
  847. }
  848. else if( strcmp( who, "read") == 0) // tell blocked readers to wake up
  849. {
  850. SIGNAL_ALL( &linda->write_happened);
  851. }
  852. else if( strcmp( who, "write") == 0) // tell blocked writers to wake up
  853. {
  854. SIGNAL_ALL( &linda->read_happened);
  855. }
  856. else
  857. {
  858. // error ...
  859. linda = NULL;
  860. }
  861. keeper_release( K);
  862. // ... but we must raise it outside the lock
  863. if( !linda)
  864. {
  865. return luaL_error( L, "unknown wake hint '%s'", who);
  866. }
  867. return 0;
  868. }
  869. /*
  870. * lightuserdata= linda_deep( linda_ud )
  871. *
  872. * Return the 'deep' userdata pointer, identifying the Linda.
  873. *
  874. * This is needed for using Lindas as key indices (timer system needs it);
  875. * separately created proxies of the same underlying deep object will have
  876. * different userdata and won't be known to be essentially the same deep one
  877. * without this.
  878. */
  879. LUAG_FUNC( linda_deep)
  880. {
  881. struct s_Linda* linda= lua_toLinda( L, 1);
  882. lua_pushlightuserdata( L, linda); // just the address
  883. return 1;
  884. }
  885. /*
  886. * string = linda:__tostring( linda_ud)
  887. *
  888. * Return the stringification of a linda
  889. *
  890. * Useful for concatenation or debugging purposes
  891. */
  892. static int linda_tostring( lua_State* L, int idx_, bool_t opt_)
  893. {
  894. struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_);
  895. if( !opt_)
  896. {
  897. luaL_argcheck( L, linda, idx_, "expecting a linda object");
  898. }
  899. if( linda != NULL)
  900. {
  901. char text[128];
  902. int len;
  903. if( linda->name[0])
  904. len = sprintf( text, "Linda: %.*s", (int)sizeof(text) - 8, linda->name);
  905. else
  906. len = sprintf( text, "Linda: %p", linda);
  907. lua_pushlstring( L, text, len);
  908. return 1;
  909. }
  910. return 0;
  911. }
  912. LUAG_FUNC( linda_tostring)
  913. {
  914. return linda_tostring( L, 1, FALSE);
  915. }
  916. /*
  917. * string = linda:__concat( a, b)
  918. *
  919. * Return the concatenation of a pair of items, one of them being a linda
  920. *
  921. * Useful for concatenation or debugging purposes
  922. */
  923. LUAG_FUNC( linda_concat)
  924. { // linda1? linda2?
  925. bool_t atLeastOneLinda = FALSE;
  926. // Lua semantics enforce that one of the 2 arguments is a Linda, but not necessarily both.
  927. if( linda_tostring( L, 1, TRUE))
  928. {
  929. atLeastOneLinda = TRUE;
  930. lua_replace( L, 1);
  931. }
  932. if( linda_tostring( L, 2, TRUE))
  933. {
  934. atLeastOneLinda = TRUE;
  935. lua_replace( L, 2);
  936. }
  937. if( !atLeastOneLinda) // should not be possible
  938. {
  939. return luaL_error( L, "internal error: linda_concat called on non-Linda");
  940. }
  941. lua_concat( L, 2);
  942. return 1;
  943. }
  944. /*
  945. * table = linda:dump()
  946. * return a table listing all pending data inside the linda
  947. */
  948. LUAG_FUNC( linda_dump)
  949. {
  950. struct s_Linda* linda = lua_toLinda( L, 1);
  951. ASSERT_L( linda->U == get_universe( L));
  952. return keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda));
  953. }
  954. /*
  955. * Identity function of a shared userdata object.
  956. *
  957. * lightuserdata= linda_id( "new" [, ...] )
  958. * = linda_id( "delete", lightuserdata )
  959. *
  960. * Creation and cleanup of actual 'deep' objects. 'luaG_...' will wrap them into
  961. * regular userdata proxies, per each state using the deep data.
  962. *
  963. * tbl= linda_id( "metatable" )
  964. *
  965. * Returns a metatable for the proxy objects ('__gc' method not needed; will
  966. * be added by 'luaG_...')
  967. *
  968. * string= linda_id( "module")
  969. *
  970. * Returns the name of the module that a state should require
  971. * in order to keep a handle on the shared library that exported the idfunc
  972. *
  973. * = linda_id( str, ... )
  974. *
  975. * For any other strings, the ID function must not react at all. This allows
  976. * future extensions of the system.
  977. */
  978. static void* linda_id( lua_State* L, enum eDeepOp op_)
  979. {
  980. switch( op_)
  981. {
  982. case eDO_new:
  983. {
  984. struct s_Linda* s;
  985. size_t name_len = 0;
  986. char const* linda_name = NULL;
  987. unsigned long linda_group = 0;
  988. // should have a string and/or a number of the stack as parameters (name and group)
  989. switch( lua_gettop( L))
  990. {
  991. default: // 0
  992. break;
  993. case 1: // 1 parameter, either a name or a group
  994. if( lua_type( L, -1) == LUA_TSTRING)
  995. {
  996. linda_name = lua_tolstring( L, -1, &name_len);
  997. }
  998. else
  999. {
  1000. linda_group = (unsigned long) lua_tointeger( L, -1);
  1001. }
  1002. break;
  1003. case 2: // 2 parameters, a name and group, in that order
  1004. linda_name = lua_tolstring( L, -2, &name_len);
  1005. linda_group = lua_tointeger( L, -1);
  1006. break;
  1007. }
  1008. /* The deep data is allocated separately of Lua stack; we might no
  1009. * longer be around when last reference to it is being released.
  1010. * One can use any memory allocation scheme.
  1011. * just don't use L's allocF because we don't know which state will get the honor of GCing the linda
  1012. */
  1013. s = (struct s_Linda*) malloc( sizeof(struct s_Linda) + name_len); // terminating 0 is already included
  1014. if( s)
  1015. {
  1016. SIGNAL_INIT( &s->read_happened);
  1017. SIGNAL_INIT( &s->write_happened);
  1018. s->U = get_universe( L);
  1019. s->simulate_cancel = CANCEL_NONE;
  1020. s->group = linda_group << KEEPER_MAGIC_SHIFT;
  1021. s->name[0] = 0;
  1022. memcpy( s->name, linda_name, name_len ? name_len + 1 : 0);
  1023. }
  1024. return s;
  1025. }
  1026. case eDO_delete:
  1027. {
  1028. struct s_Keeper* K;
  1029. struct s_Linda* linda = lua_touserdata( L, 1);
  1030. ASSERT_L( linda);
  1031. /* Clean associated structures in the keeper state.
  1032. */
  1033. K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
  1034. if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup)
  1035. {
  1036. keeper_call( linda->U, K->L, KEEPER_API( clear), L, linda, 0);
  1037. }
  1038. keeper_release( K);
  1039. /* There aren't any lanes waiting on these lindas, since all proxies
  1040. * have been gc'ed. Right?
  1041. */
  1042. SIGNAL_FREE( &linda->read_happened);
  1043. SIGNAL_FREE( &linda->write_happened);
  1044. free( linda);
  1045. return NULL;
  1046. }
  1047. case eDO_metatable:
  1048. {
  1049. STACK_CHECK( L);
  1050. lua_newtable( L);
  1051. // metatable is its own index
  1052. lua_pushvalue( L, -1);
  1053. lua_setfield( L, -2, "__index");
  1054. // protect metatable from external access
  1055. lua_pushliteral( L, "Linda");
  1056. lua_setfield( L, -2, "__metatable");
  1057. lua_pushcfunction( L, LG_linda_tostring);
  1058. lua_setfield( L, -2, "__tostring");
  1059. // Decoda __towatch support
  1060. lua_pushcfunction( L, LG_linda_dump);
  1061. lua_setfield( L, -2, "__towatch");
  1062. lua_pushcfunction( L, LG_linda_concat);
  1063. lua_setfield( L, -2, "__concat");
  1064. // [-1]: linda metatable
  1065. lua_pushcfunction( L, LG_linda_send);
  1066. lua_setfield( L, -2, "send");
  1067. lua_pushcfunction( L, LG_linda_receive);
  1068. lua_setfield( L, -2, "receive");
  1069. lua_pushcfunction( L, LG_linda_limit);
  1070. lua_setfield( L, -2, "limit");
  1071. lua_pushcfunction( L, LG_linda_set);
  1072. lua_setfield( L, -2, "set");
  1073. lua_pushcfunction( L, LG_linda_count);
  1074. lua_setfield( L, -2, "count");
  1075. lua_pushcfunction( L, LG_linda_get);
  1076. lua_setfield( L, -2, "get");
  1077. lua_pushcfunction( L, LG_linda_cancel);
  1078. lua_setfield( L, -2, "cancel");
  1079. lua_pushcfunction( L, LG_linda_deep);
  1080. lua_setfield( L, -2, "deep");
  1081. lua_pushcfunction( L, LG_linda_dump);
  1082. lua_setfield( L, -2, "dump");
  1083. lua_pushliteral( L, BATCH_SENTINEL);
  1084. lua_setfield(L, -2, "batched");
  1085. lua_pushlightuserdata( L, NIL_SENTINEL);
  1086. lua_setfield(L, -2, "null");
  1087. STACK_END( L, 1);
  1088. return NULL;
  1089. }
  1090. case eDO_module:
  1091. // linda is a special case because we know lanes must be loaded from the main lua state
  1092. // to be able to ever get here, so we know it will remain loaded as long a the main state is around
  1093. // in other words, forever.
  1094. default:
  1095. {
  1096. return NULL;
  1097. }
  1098. }
  1099. }
  1100. /*
  1101. * ud = lanes.linda( [name[,group]])
  1102. *
  1103. * returns a linda object, or raises an error if creation failed
  1104. */
  1105. LUAG_FUNC( linda)
  1106. {
  1107. int const top = lua_gettop( L);
  1108. luaL_argcheck( L, top <= 2, top, "too many arguments");
  1109. if( top == 1)
  1110. {
  1111. int const t = lua_type( L, 1);
  1112. luaL_argcheck( L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)");
  1113. }
  1114. else if( top == 2)
  1115. {
  1116. luaL_checktype( L, 1, LUA_TSTRING);
  1117. luaL_checktype( L, 2, LUA_TNUMBER);
  1118. }
  1119. return luaG_newdeepuserdata( L, linda_id);
  1120. }
  1121. /*
  1122. * ###############################################################################################
  1123. * ########################################## Finalizer ##########################################
  1124. * ###############################################################################################
  1125. */
  1126. //---
  1127. // void= finalizer( finalizer_func )
  1128. //
  1129. // finalizer_func( [err, stack_tbl] )
  1130. //
  1131. // Add a function that will be called when exiting the lane, either via
  1132. // normal return or an error.
  1133. //
  1134. LUAG_FUNC( set_finalizer)
  1135. {
  1136. luaL_argcheck( L, lua_isfunction( L, 1), 1, "finalizer should be a function");
  1137. luaL_argcheck( L, lua_gettop( L) == 1, 1, "too many arguments");
  1138. // Get the current finalizer table (if any)
  1139. push_registry_table( L, FINALIZER_REG_KEY, TRUE /*do create if none*/); // finalizer {finalisers}
  1140. STACK_GROW( L, 2);
  1141. lua_pushinteger( L, lua_rawlen( L, -1) + 1); // finalizer {finalisers} idx
  1142. lua_pushvalue( L, 1); // finalizer {finalisers} idx finalizer
  1143. lua_rawset( L, -3); // finalizer {finalisers}
  1144. lua_pop( L, 2); //
  1145. return 0;
  1146. }
  1147. //---
  1148. // Run finalizers - if any - with the given parameters
  1149. //
  1150. // If 'rc' is nonzero, error message and stack index (the latter only when ERROR_FULL_STACK == 1) are available as:
  1151. // [-1]: stack trace (table)
  1152. // [-2]: error message (any type)
  1153. //
  1154. // Returns:
  1155. // 0 if finalizers were run without error (or there were none)
  1156. // LUA_ERRxxx return code if any of the finalizers failed
  1157. //
  1158. // TBD: should we add stack trace on failing finalizer, wouldn't be hard..
  1159. //
  1160. static void push_stack_trace( lua_State* L, int rc_, int stk_base_);
  1161. static int run_finalizers( lua_State* L, int lua_rc)
  1162. {
  1163. int finalizers_index;
  1164. int n;
  1165. int err_handler_index = 0;
  1166. int rc = LUA_OK; // ...
  1167. if( !push_registry_table( L, FINALIZER_REG_KEY, FALSE)) // ... finalizers?
  1168. {
  1169. return 0; // no finalizers
  1170. }
  1171. STACK_GROW( L, 5);
  1172. finalizers_index = lua_gettop( L);
  1173. #if ERROR_FULL_STACK
  1174. lua_pushcfunction( L, lane_error); // ... finalizers lane_error
  1175. err_handler_index = lua_gettop( L);
  1176. #endif // ERROR_FULL_STACK
  1177. for( n = (int) lua_rawlen( L, finalizers_index); n > 0; -- n)
  1178. {
  1179. int args = 0;
  1180. lua_pushinteger( L, n); // ... finalizers lane_error n
  1181. lua_rawget( L, finalizers_index); // ... finalizers lane_error finalizer
  1182. ASSERT_L( lua_isfunction( L, -1));
  1183. if( lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack
  1184. {
  1185. ASSERT_L( finalizers_index == 2 || finalizers_index == 3);
  1186. //char const* err_msg = lua_tostring( L, 1);
  1187. lua_pushvalue( L, 1); // ... finalizers lane_error finalizer err_msg
  1188. // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM
  1189. if( finalizers_index == 3)
  1190. {
  1191. lua_pushvalue( L, 2); // ... finalizers lane_error finalizer err_msg stack_trace
  1192. }
  1193. args = finalizers_index - 1;
  1194. }
  1195. // if no error from the main body, finlizer doesn't receive any argument, else it gets the error message and optional stack trace
  1196. rc = lua_pcall( L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2?
  1197. if( rc != LUA_OK)
  1198. {
  1199. push_stack_trace( L, rc, lua_gettop( L));
  1200. // If one finalizer fails, don't run the others. Return this
  1201. // as the 'real' error, replacing what we could have had (or not)
  1202. // from the actual code.
  1203. break;
  1204. }
  1205. // no error, proceed to next finalizer // ... finalizers lane_error
  1206. }
  1207. if( rc != LUA_OK)
  1208. {
  1209. // ERROR_FULL_STACK accounts for the presence of lane_error on the stack
  1210. int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK;
  1211. // a finalizer generated an error, this is what we leave of the stack
  1212. for( n = nb_err_slots; n > 0; -- n)
  1213. {
  1214. lua_replace( L, n);
  1215. }
  1216. // leave on the stack only the error and optional stack trace produced by the error in the finalizer
  1217. lua_settop( L, nb_err_slots);
  1218. }
  1219. else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack
  1220. {
  1221. lua_settop( L, finalizers_index - 1);
  1222. }
  1223. return rc;
  1224. }
  1225. /*
  1226. * ###############################################################################################
  1227. * ########################################### Threads ###########################################
  1228. * ###############################################################################################
  1229. */
  1230. //---
  1231. // = thread_cancel( lane_ud [,timeout_secs=0.0] [,force_kill_bool=false] )
  1232. //
  1233. // The originator thread asking us specifically to cancel the other thread.
  1234. //
  1235. // 'timeout': <0: wait forever, until the lane is finished
  1236. // 0.0: just signal it to cancel, no time waited
  1237. // >0: time to wait for the lane to detect cancellation
  1238. //
  1239. // 'force_kill': if true, and lane does not detect cancellation within timeout,
  1240. // it is forcefully killed. Using this with 0.0 timeout means just kill
  1241. // (unless the lane is already finished).
  1242. //
  1243. // Returns: true if the lane was already finished (DONE/ERROR_ST/CANCELLED) or if we
  1244. // managed to cancel it.
  1245. // false if the cancellation timed out, or a kill was needed.
  1246. //
  1247. typedef enum
  1248. {
  1249. CR_Timeout,
  1250. CR_Cancelled,
  1251. CR_Killed
  1252. } cancel_result;
  1253. static cancel_result thread_cancel( lua_State* L, struct s_lane* s, double secs, bool_t force, double waitkill_timeout_)
  1254. {
  1255. cancel_result result;
  1256. // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here
  1257. // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN)
  1258. if( s->mstatus == KILLED)
  1259. {
  1260. result = CR_Killed;
  1261. }
  1262. else if( s->status < DONE)
  1263. {
  1264. // signal the linda the wake up the thread so that it can react to the cancel query
  1265. // let us hope we never land here with a pointer on a linda that has been destroyed...
  1266. if( secs < 0.0)
  1267. {
  1268. s->cancel_request = CANCEL_SOFT; // it's now signaled to stop
  1269. // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own
  1270. if( force) // wake the thread so that execution returns from any pending linda operation if desired
  1271. {
  1272. SIGNAL_T *waiting_on = s->waiting_on;
  1273. if( s->status == WAITING && waiting_on != NULL)
  1274. {
  1275. SIGNAL_ALL( waiting_on);
  1276. }
  1277. }
  1278. // say we succeeded though
  1279. result = CR_Cancelled;
  1280. }
  1281. else
  1282. {
  1283. s->cancel_request = CANCEL_HARD; // it's now signaled to stop
  1284. {
  1285. SIGNAL_T *waiting_on = s->waiting_on;
  1286. if( s->status == WAITING && waiting_on != NULL)
  1287. {
  1288. SIGNAL_ALL( waiting_on);
  1289. }
  1290. }
  1291. result = THREAD_WAIT( &s->thread, secs, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout;
  1292. if( (result == CR_Timeout) && force)
  1293. {
  1294. // Killing is asynchronous; we _will_ wait for it to be done at
  1295. // GC, to make sure the data structure can be released (alternative
  1296. // would be use of "cancellation cleanup handlers" that at least
  1297. // PThread seems to have).
  1298. //
  1299. THREAD_KILL( &s->thread);
  1300. #if THREADAPI == THREADAPI_PTHREAD
  1301. // pthread: make sure the thread is really stopped!
  1302. // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS
  1303. result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status);
  1304. if( result == CR_Timeout)
  1305. {
  1306. return luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : "");
  1307. }
  1308. #endif // THREADAPI == THREADAPI_PTHREAD
  1309. s->mstatus = KILLED; // mark 'gc' to wait for it
  1310. // note that s->status value must remain to whatever it was at the time of the kill
  1311. // because we need to know if we can lua_close() the Lua State or not.
  1312. result = CR_Killed;
  1313. }
  1314. }
  1315. }
  1316. else
  1317. {
  1318. // say "ok" by default, including when lane is already done
  1319. result = CR_Cancelled;
  1320. }
  1321. return result;
  1322. }
  1323. //
  1324. // Protects modifying the selfdestruct chain
  1325. #define SELFDESTRUCT_END ((struct s_lane*)(-1))
  1326. //
  1327. // The chain is ended by '(struct s_lane*)(-1)', not NULL:
  1328. // 'selfdestruct_first -> ... -> ... -> (-1)'
  1329. /*
  1330. * Add the lane to selfdestruct chain; the ones still running at the end of the
  1331. * whole process will be cancelled.
  1332. */
  1333. static void selfdestruct_add( struct s_lane* s)
  1334. {
  1335. MUTEX_LOCK( &s->U->selfdestruct_cs);
  1336. assert( s->selfdestruct_next == NULL);
  1337. s->selfdestruct_next = s->U->selfdestruct_first;
  1338. s->U->selfdestruct_first= s;
  1339. MUTEX_UNLOCK( &s->U->selfdestruct_cs);
  1340. }
  1341. /*
  1342. * A free-running lane has ended; remove it from selfdestruct chain
  1343. */
  1344. static bool_t selfdestruct_remove( struct s_lane* s)
  1345. {
  1346. bool_t found = FALSE;
  1347. MUTEX_LOCK( &s->U->selfdestruct_cs);
  1348. {
  1349. // Make sure (within the MUTEX) that we actually are in the chain
  1350. // still (at process exit they will remove us from chain and then
  1351. // cancel/kill).
  1352. //
  1353. if( s->selfdestruct_next != NULL)
  1354. {
  1355. struct s_lane** ref = (struct s_lane**) &s->U->selfdestruct_first;
  1356. while( *ref != SELFDESTRUCT_END )
  1357. {
  1358. if( *ref == s)
  1359. {
  1360. *ref = s->selfdestruct_next;
  1361. s->selfdestruct_next = NULL;
  1362. // the terminal shutdown should wait until the lane is done with its lua_close()
  1363. ++ s->U->selfdestructing_count;
  1364. found = TRUE;
  1365. break;
  1366. }
  1367. ref = (struct s_lane**) &((*ref)->selfdestruct_next);
  1368. }
  1369. assert( found);
  1370. }
  1371. }
  1372. MUTEX_UNLOCK( &s->U->selfdestruct_cs);
  1373. return found;
  1374. }
  1375. /*
  1376. ** mutex-protected allocator for use with Lua states that have non-threadsafe allocators (such as LuaJIT)
  1377. */
  1378. struct ProtectedAllocator_s
  1379. {
  1380. lua_Alloc allocF;
  1381. void* allocUD;
  1382. MUTEX_T lock;
  1383. };
  1384. void * protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize)
  1385. {
  1386. void* p;
  1387. struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud;
  1388. MUTEX_LOCK( &s->lock);
  1389. p = s->allocF( s->allocUD, ptr, osize, nsize);
  1390. MUTEX_UNLOCK( &s->lock);
  1391. return p;
  1392. }
  1393. /*
  1394. * Process end; cancel any still free-running threads
  1395. */
  1396. static int selfdestruct_gc( lua_State* L)
  1397. {
  1398. struct s_Universe* U = (struct s_Universe*) lua_touserdata( L, 1);
  1399. while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once!
  1400. {
  1401. // Signal _all_ still running threads to exit (including the timer thread)
  1402. //
  1403. MUTEX_LOCK( &U->selfdestruct_cs);
  1404. {
  1405. struct s_lane* s = U->selfdestruct_first;
  1406. while( s != SELFDESTRUCT_END)
  1407. {
  1408. // attempt a regular unforced hard cancel with a small timeout
  1409. bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( L, s, 0.0001, FALSE, 0.0);
  1410. // if we failed, and we know the thread is waiting on a linda
  1411. if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL)
  1412. {
  1413. // signal the linda the wake up the thread so that it can react to the cancel query
  1414. // let us hope we never land here with a pointer on a linda that has been destroyed...
  1415. SIGNAL_T *waiting_on = s->waiting_on;
  1416. //s->waiting_on = NULL; // useful, or not?
  1417. SIGNAL_ALL( waiting_on);
  1418. }
  1419. s = s->selfdestruct_next;
  1420. }
  1421. }
  1422. MUTEX_UNLOCK( &U->selfdestruct_cs);
  1423. // When noticing their cancel, the lanes will remove themselves from
  1424. // the selfdestruct chain.
  1425. // TBD: Not sure if Windows (multi core) will require the timed approach,
  1426. // or single Yield. I don't have machine to test that (so leaving
  1427. // for timed approach). -- AKa 25-Oct-2008
  1428. // OS X 10.5 (Intel) needs more to avoid segfaults.
  1429. //
  1430. // "make test" is okay. 100's of "make require" are okay.
  1431. //
  1432. // Tested on MacBook Core Duo 2GHz and 10.5.5:
  1433. // -- AKa 25-Oct-2008
  1434. //
  1435. {
  1436. lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1));
  1437. double const t_until = now_secs() + shutdown_timeout;
  1438. while( U->selfdestruct_first != SELFDESTRUCT_END)
  1439. {
  1440. YIELD(); // give threads time to act on their cancel
  1441. {
  1442. // count the number of cancelled thread that didn't have the time to act yet
  1443. int n = 0;
  1444. double t_now = 0.0;
  1445. MUTEX_LOCK( &U->selfdestruct_cs);
  1446. {
  1447. struct s_lane* s = U->selfdestruct_first;
  1448. while( s != SELFDESTRUCT_END)
  1449. {
  1450. if( s->cancel_request == CANCEL_HARD)
  1451. ++ n;
  1452. s = s->selfdestruct_next;
  1453. }
  1454. }
  1455. MUTEX_UNLOCK( &U->selfdestruct_cs);
  1456. // if timeout elapsed, or we know all threads have acted, stop waiting
  1457. t_now = now_secs();
  1458. if( n == 0 || (t_now >= t_until))
  1459. {
  1460. DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now)));
  1461. break;
  1462. }
  1463. }
  1464. }
  1465. }
  1466. // If some lanes are currently cleaning after themselves, wait until they are done.
  1467. // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
  1468. {
  1469. bool_t again = TRUE;
  1470. do
  1471. {
  1472. MUTEX_LOCK( &U->selfdestruct_cs);
  1473. again = (U->selfdestructing_count > 0) ? TRUE : FALSE;
  1474. MUTEX_UNLOCK( &U->selfdestruct_cs);
  1475. YIELD();
  1476. } while( again);
  1477. }
  1478. //---
  1479. // Kill the still free running threads
  1480. //
  1481. if( U->selfdestruct_first != SELFDESTRUCT_END)
  1482. {
  1483. unsigned int n = 0;
  1484. // first thing we did was to raise the linda signals the threads were waiting on (if any)
  1485. // therefore, any well-behaved thread should be in CANCELLED state
  1486. // these are not running, and the state can be closed
  1487. MUTEX_LOCK( &U->selfdestruct_cs);
  1488. {
  1489. struct s_lane* s = U->selfdestruct_first;
  1490. while( s != SELFDESTRUCT_END)
  1491. {
  1492. struct s_lane* next_s = s->selfdestruct_next;
  1493. s->selfdestruct_next = NULL; // detach from selfdestruct chain
  1494. if( !THREAD_ISNULL( s->thread)) // can be NULL if previous 'soft' termination succeeded
  1495. {
  1496. THREAD_KILL( &s->thread);
  1497. #if THREADAPI == THREADAPI_PTHREAD
  1498. // pthread: make sure the thread is really stopped!
  1499. THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status);
  1500. #endif // THREADAPI == THREADAPI_PTHREAD
  1501. }
  1502. // NO lua_close() in this case because we don't know where execution of the state was interrupted
  1503. lane_cleanup( s);
  1504. s = next_s;
  1505. ++ n;
  1506. }
  1507. U->selfdestruct_first = SELFDESTRUCT_END;
  1508. }
  1509. MUTEX_UNLOCK( &U->selfdestruct_cs);
  1510. DEBUGSPEW_CODE( fprintf( stderr, "Killed %d lane(s) at process end.\n", n));
  1511. }
  1512. }
  1513. // necessary so that calling free_deep_prelude doesn't crash because linda_id expects a linda lightuserdata at absolute slot 1
  1514. lua_settop( L, 0);
  1515. // no need to mutex-protect this as all threads in the universe are gone at that point
  1516. -- U->timer_deep->refcount; // should be 0 now
  1517. free_deep_prelude( L, (DEEP_PRELUDE*) U->timer_deep);
  1518. U->timer_deep = NULL;
  1519. close_keepers( U, L);
  1520. // remove the protected allocator, if any
  1521. {
  1522. void* ud;
  1523. lua_Alloc allocF = lua_getallocf( L, &ud);
  1524. if( allocF == protected_lua_Alloc)
  1525. {
  1526. struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud;
  1527. lua_setallocf( L, s->allocF, s->allocUD);
  1528. MUTEX_FREE( &s->lock);
  1529. s->allocF( s->allocUD, s, sizeof( struct ProtectedAllocator_s), 0);
  1530. }
  1531. }
  1532. #if HAVE_LANE_TRACKING
  1533. MUTEX_FREE( &U->tracking_cs);
  1534. #endif // HAVE_LANE_TRACKING
  1535. // Linked chains handling
  1536. MUTEX_FREE( &U->selfdestruct_cs);
  1537. MUTEX_FREE( &U->require_cs);
  1538. // Locks for 'tools.c' inc/dec counters
  1539. MUTEX_FREE( &U->deep_lock);
  1540. MUTEX_FREE( &U->mtid_lock);
  1541. return 0;
  1542. }
  1543. //---
  1544. // bool = cancel_test()
  1545. //
  1546. // Available inside the global namespace of lanes
  1547. // returns a boolean saying if a cancel request is pending
  1548. //
  1549. LUAG_FUNC( cancel_test)
  1550. {
  1551. enum e_cancel_request test = cancel_test( L);
  1552. lua_pushboolean( L, test != CANCEL_NONE);
  1553. return 1;
  1554. }
  1555. //---
  1556. // = _single( [cores_uint=1] )
  1557. //
  1558. // Limits the process to use only 'cores' CPU cores. To be used for performance
  1559. // testing on multicore devices. DEBUGGING ONLY!
  1560. //
  1561. LUAG_FUNC( set_singlethreaded)
  1562. {
  1563. uint_t cores = luaG_optunsigned( L, 1, 1);
  1564. (void) cores; // prevent "unused" warning
  1565. #ifdef PLATFORM_OSX
  1566. #ifdef _UTILBINDTHREADTOCPU
  1567. if( cores > 1)
  1568. {
  1569. return luaL_error( L, "Limiting to N>1 cores not possible");
  1570. }
  1571. // requires 'chudInitialize()'
  1572. utilBindThreadToCPU(0); // # of CPU to run on (we cannot limit to 2..N CPUs?)
  1573. #else
  1574. return luaL_error( L, "Not available: compile with _UTILBINDTHREADTOCPU");
  1575. #endif
  1576. #else
  1577. return luaL_error( L, "not implemented");
  1578. #endif
  1579. return 0;
  1580. }
  1581. /*
  1582. * str= lane_error( error_val|str )
  1583. *
  1584. * Called if there's an error in some lane; add call stack to error message
  1585. * just like 'lua.c' normally does.
  1586. *
  1587. * ".. will be called with the error message and its return value will be the
  1588. * message returned on the stack by lua_pcall."
  1589. *
  1590. * Note: Rather than modifying the error message itself, it would be better
  1591. * to provide the call stack (as string) completely separated. This would
  1592. * work great with non-string error values as well (current system does not).
  1593. * (This is NOT possible with the Lua 5.1 'lua_pcall()'; we could of course
  1594. * implement a Lanes-specific 'pcall' of our own that does this). TBD!!! :)
  1595. * --AKa 22-Jan-2009
  1596. */
  1597. #if ERROR_FULL_STACK
  1598. # define EXTENDED_STACK_TRACE_KEY ((void*)LG_set_error_reporting) // used as registry key
  1599. LUAG_FUNC( set_error_reporting)
  1600. {
  1601. bool_t equal;
  1602. luaL_checktype( L, 1, LUA_TSTRING);
  1603. lua_pushliteral( L, "extended");
  1604. equal = lua_rawequal( L, -1, 1);
  1605. lua_pop( L, 1);
  1606. if( equal)
  1607. {
  1608. goto done;
  1609. }
  1610. lua_pushliteral( L, "basic");
  1611. equal = !lua_rawequal( L, -1, 1);
  1612. lua_pop( L, 1);
  1613. if( equal)
  1614. {
  1615. return luaL_error( L, "unsupported error reporting model");
  1616. }
  1617. done:
  1618. lua_pushlightuserdata( L, EXTENDED_STACK_TRACE_KEY);
  1619. lua_pushboolean( L, equal);
  1620. lua_rawset( L, LUA_REGISTRYINDEX);
  1621. return 0;
  1622. }
  1623. static int lane_error( lua_State* L)
  1624. {
  1625. lua_Debug ar;
  1626. unsigned lev, n;
  1627. bool_t extended;
  1628. // [1]: error message (any type)
  1629. assert( lua_gettop( L) == 1);
  1630. // Don't do stack survey for cancelled lanes.
  1631. //
  1632. if( lua_touserdata( L, 1) == CANCEL_ERROR)
  1633. {
  1634. return 1; // just pass on
  1635. }
  1636. lua_pushlightuserdata( L, EXTENDED_STACK_TRACE_KEY);
  1637. lua_gettable( L, LUA_REGISTRYINDEX);
  1638. extended = lua_toboolean( L, -1);
  1639. lua_pop( L, 1);
  1640. // Place stack trace at 'registry[lane_error]' for the 'lua_pcall()'
  1641. // caller to fetch. This bypasses the Lua 5.1 limitation of only one
  1642. // return value from error handler to 'lua_pcall()' caller.
  1643. // It's adequate to push stack trace as a table. This gives the receiver
  1644. // of the stack best means to format it to their liking. Also, it allows
  1645. // us to add more stack info later, if needed.
  1646. //
  1647. // table of { "sourcefile.lua:<line>", ... }
  1648. //
  1649. STACK_GROW( L, 4);
  1650. lua_newtable( L);
  1651. // Best to start from level 1, but in some cases it might be a C function
  1652. // and we don't get '.currentline' for that. It's okay - just keep level
  1653. // and table index growing separate. --AKa 22-Jan-2009
  1654. //
  1655. lev = 0;
  1656. n = 1;
  1657. while( lua_getstack( L, ++ lev, &ar))
  1658. {
  1659. lua_getinfo( L, extended ? "Sln" : "Sl", &ar);
  1660. if( extended)
  1661. {
  1662. lua_newtable( L);
  1663. lua_pushstring( L, ar.source);
  1664. lua_setfield( L, -2, "source");
  1665. lua_pushinteger( L, ar.currentline);
  1666. lua_setfield( L, -2, "currentline");
  1667. lua_pushstring( L, ar.name);
  1668. lua_setfield( L, -2, "name");
  1669. lua_pushstring( L, ar.namewhat);
  1670. lua_setfield( L, -2, "namewhat");
  1671. lua_pushstring( L, ar.what);
  1672. lua_setfield( L, -2, "what");
  1673. lua_rawseti(L, -2, n ++);
  1674. }
  1675. else if (ar.currentline > 0)
  1676. {
  1677. lua_pushinteger( L, n++ );
  1678. lua_pushfstring( L, "%s:%d", ar.short_src, ar.currentline );
  1679. lua_settable( L, -3 );
  1680. }
  1681. }
  1682. lua_pushlightuserdata( L, STACK_TRACE_KEY);
  1683. lua_insert( L, -2);
  1684. lua_settable( L, LUA_REGISTRYINDEX);
  1685. assert( lua_gettop( L) == 1);
  1686. return 1; // the untouched error value
  1687. }
  1688. #endif // ERROR_FULL_STACK
  1689. static void push_stack_trace( lua_State* L, int rc_, int stk_base_)
  1690. {
  1691. // Lua 5.1 error handler is limited to one return value; it stored the stack trace in the registry
  1692. switch( rc_)
  1693. {
  1694. case LUA_OK: // no error, body return values are on the stack
  1695. break;
  1696. case LUA_ERRRUN: // cancellation or a runtime error
  1697. #if ERROR_FULL_STACK // when ERROR_FULL_STACK, we installed a handler
  1698. {
  1699. // fetch the call stack table from the registry where the handler stored it
  1700. STACK_GROW( L, 1);
  1701. lua_pushlightuserdata( L, STACK_TRACE_KEY); // err STACK_TRACE_KEY
  1702. // yields nil if no stack was generated (in case of cancellation for example)
  1703. lua_gettable( L, LUA_REGISTRYINDEX); // err trace|nil
  1704. // For cancellation the error message is CANCEL_ERROR, and a stack trace isn't placed
  1705. // For other errors, the message should be a string, and we should have a stack trace table
  1706. ASSERT_L( (lua_istable( L, 1 + stk_base_) && lua_type( L, stk_base_) == LUA_TSTRING) || (lua_touserdata( L, stk_base_) == CANCEL_ERROR));
  1707. // Just leaving the stack trace table on the stack is enough to get it through to the master.
  1708. break;
  1709. }
  1710. #endif // fall through if not ERROR_FULL_STACK
  1711. case LUA_ERRMEM: // memory allocation error (handler not called)
  1712. case LUA_ERRERR: // error while running the error handler (if any, for example an out-of-memory condition)
  1713. default:
  1714. // we should have a single value which is either a string (the error message) or CANCEL_ERROR
  1715. ASSERT_L( (lua_gettop( L) == stk_base_) && ((lua_type( L, stk_base_) == LUA_TSTRING) || (lua_touserdata( L, stk_base_) == CANCEL_ERROR)));
  1716. break;
  1717. }
  1718. }
  1719. LUAG_FUNC( set_debug_threadname)
  1720. {
  1721. // C s_lane structure is a light userdata upvalue
  1722. struct s_lane* s = lua_touserdata( L, lua_upvalueindex( 1));
  1723. luaL_checktype( L, -1, LUA_TSTRING); // "name"
  1724. // store a hidden reference in the registry to make sure the string is kept around even if a lane decides to manually change the "decoda_name" global...
  1725. lua_pushlightuserdata( L, LG_set_debug_threadname); // "name" lud
  1726. lua_pushvalue( L, -2); // "name" lud "name"
  1727. lua_rawset( L, LUA_REGISTRYINDEX); // "name"
  1728. s->debug_name = lua_tostring( L, -1);
  1729. // keep a direct pointer on the string
  1730. THREAD_SETNAME( s->debug_name);
  1731. // to see VM name in Decoda debugger Virtual Machine window
  1732. lua_setglobal( L, "decoda_name"); //
  1733. return 0;
  1734. }
  1735. LUAG_FUNC( get_debug_threadname)
  1736. {
  1737. struct s_lane* const s = lua_toLane( L, 1);
  1738. luaL_argcheck( L, lua_gettop( L) == 1, 2, "too many arguments");
  1739. lua_pushstring( L, s->debug_name);
  1740. return 1;
  1741. }
  1742. LUAG_FUNC( set_thread_priority)
  1743. {
  1744. int const prio = luaL_checkint( L, 1);
  1745. // public Lanes API accepts a generic range -3/+3
  1746. // that will be remapped into the platform-specific scheduler priority scheme
  1747. // On some platforms, -3 is equivalent to -2 and +3 to +2
  1748. if( prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX)
  1749. {
  1750. return luaL_error( L, "priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio);
  1751. }
  1752. THREAD_SET_PRIORITY( prio);
  1753. return 0;
  1754. }
  1755. #if USE_DEBUG_SPEW
  1756. // can't use direct LUA_x errcode indexing because the sequence is not the same between Lua 5.1 and 5.2 :-(
  1757. // LUA_ERRERR doesn't have the same value
  1758. struct errcode_name
  1759. {
  1760. int code;
  1761. char const* name;
  1762. };
  1763. static struct errcode_name s_errcodes[] =
  1764. {
  1765. { LUA_OK, "LUA_OK"},
  1766. { LUA_YIELD, "LUA_YIELD"},
  1767. { LUA_ERRRUN, "LUA_ERRRUN"},
  1768. { LUA_ERRSYNTAX, "LUA_ERRSYNTAX"},
  1769. { LUA_ERRMEM, "LUA_ERRMEM"},
  1770. { LUA_ERRGCMM, "LUA_ERRGCMM"},
  1771. { LUA_ERRERR, "LUA_ERRERR"},
  1772. };
  1773. static char const* get_errcode_name( int _code)
  1774. {
  1775. int i;
  1776. for( i = 0; i < 7; ++ i)
  1777. {
  1778. if( s_errcodes[i].code == _code)
  1779. {
  1780. return s_errcodes[i].name;
  1781. }
  1782. }
  1783. return "<NULL>";
  1784. }
  1785. #endif // USE_DEBUG_SPEW
  1786. #if THREADWAIT_METHOD == THREADWAIT_CONDVAR // implies THREADAPI == THREADAPI_PTHREAD
  1787. static void thread_cleanup_handler( void* opaque)
  1788. {
  1789. struct s_lane* s= (struct s_lane*) opaque;
  1790. MUTEX_LOCK( &s->done_lock);
  1791. s->status = CANCELLED;
  1792. SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on)
  1793. MUTEX_UNLOCK( &s->done_lock);
  1794. }
  1795. #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
  1796. static THREAD_RETURN_T THREAD_CALLCONV lane_main( void* vs)
  1797. {
  1798. struct s_lane* s = (struct s_lane*) vs;
  1799. int rc, rc2;
  1800. lua_State* L = s->L;
  1801. // Called with the lane function and arguments on the stack
  1802. int const nargs = lua_gettop( L) - 1;
  1803. DEBUGSPEW_CODE( struct s_Universe* U = get_universe( L));
  1804. #if HAVE_LANE_TRACKING
  1805. if( s->U->tracking_first)
  1806. {
  1807. tracking_add( s);
  1808. }
  1809. #endif // HAVE_LANE_TRACKING
  1810. THREAD_MAKE_ASYNCH_CANCELLABLE();
  1811. THREAD_CLEANUP_PUSH( thread_cleanup_handler, s);
  1812. s->status = RUNNING; // PENDING -> RUNNING
  1813. // Tie "set_finalizer()" to the state
  1814. lua_pushcfunction( L, LG_set_finalizer);
  1815. populate_func_lookup_table( L, -1, "set_finalizer");
  1816. lua_setglobal( L, "set_finalizer");
  1817. // Tie "set_debug_threadname()" to the state
  1818. // But don't register it in the lookup database because of the s_lane pointer upvalue
  1819. lua_pushlightuserdata( L, s);
  1820. lua_pushcclosure( L, LG_set_debug_threadname, 1);
  1821. lua_setglobal( L, "set_debug_threadname" );
  1822. // Tie "cancel_test()" to the state
  1823. lua_pushcfunction( L, LG_cancel_test);
  1824. populate_func_lookup_table( L, -1, "cancel_test");
  1825. lua_setglobal( L, "cancel_test");
  1826. #if ERROR_FULL_STACK
  1827. // Tie "set_error_reporting()" to the state
  1828. lua_pushcfunction( L, LG_set_error_reporting);
  1829. populate_func_lookup_table( L, -1, "set_error_reporting");
  1830. lua_setglobal( L, "set_error_reporting");
  1831. STACK_GROW( L, 1);
  1832. lua_pushcfunction( L, lane_error); // func args handler
  1833. lua_insert( L, 1); // handler func args
  1834. #endif // ERROR_FULL_STACK
  1835. rc = lua_pcall( L, nargs, LUA_MULTRET, ERROR_FULL_STACK); // retvals|err
  1836. #if ERROR_FULL_STACK
  1837. lua_remove( L, 1); // retvals|error
  1838. # endif // ERROR_FULL_STACK
  1839. // in case of error and if it exists, fetch stack trace from registry and push it
  1840. push_stack_trace( L, rc, 1);
  1841. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p body: %s (%s)\n" INDENT_END, L, get_errcode_name( rc), (lua_touserdata( L, 1)==CANCEL_ERROR) ? "cancelled" : lua_typename( L, lua_type( L, 1))));
  1842. //STACK_DUMP(L);
  1843. // Call finalizers, if the script has set them up.
  1844. //
  1845. rc2 = run_finalizers( L, rc);
  1846. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "Lane %p finalizer: %s\n" INDENT_END, L, get_errcode_name( rc2)));
  1847. if( rc2 != LUA_OK) // Error within a finalizer!
  1848. {
  1849. // the finalizer generated an error, and left its own error message [and stack trace] on the stack
  1850. rc = rc2; // we're overruling the earlier script error or normal return
  1851. }
  1852. s->waiting_on = NULL; // just in case
  1853. if( selfdestruct_remove( s)) // check and remove (under lock!)
  1854. {
  1855. // We're a free-running thread and no-one's there to clean us up.
  1856. //
  1857. lua_close( s->L);
  1858. MUTEX_LOCK( &s->U->selfdestruct_cs);
  1859. // done with lua_close(), terminal shutdown sequence may proceed
  1860. -- s->U->selfdestructing_count;
  1861. MUTEX_UNLOCK( &s->U->selfdestruct_cs);
  1862. lane_cleanup( s); // s is freed at this point
  1863. }
  1864. else
  1865. {
  1866. // leave results (1..top) or error message + stack trace (1..2) on the stack - master will copy them
  1867. enum e_status st = (rc == 0) ? DONE : (lua_touserdata( L, 1) == CANCEL_ERROR) ? CANCELLED : ERROR_ST;
  1868. // Posix no PTHREAD_TIMEDJOIN:
  1869. // 'done_lock' protects the -> DONE|ERROR_ST|CANCELLED state change
  1870. //
  1871. #if THREADWAIT_METHOD == THREADWAIT_CONDVAR
  1872. MUTEX_LOCK( &s->done_lock);
  1873. {
  1874. #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
  1875. s->status = st;
  1876. #if THREADWAIT_METHOD == THREADWAIT_CONDVAR
  1877. SIGNAL_ONE( &s->done_signal); // wake up master (while 's->done_lock' is on)
  1878. }
  1879. MUTEX_UNLOCK( &s->done_lock);
  1880. #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
  1881. }
  1882. THREAD_CLEANUP_POP( FALSE);
  1883. return 0; // ignored
  1884. }
  1885. // --- If a client wants to transfer stuff of a given module from the current state to another Lane, the module must be required
  1886. // with lanes.require, that will call the regular 'require', then populate the lookup database in the source lane
  1887. // module = lanes.require( "modname")
  1888. // upvalue[1]: _G.require
  1889. LUAG_FUNC( require)
  1890. {
  1891. char const* name = lua_tostring( L, 1);
  1892. int const nargs = lua_gettop( L);
  1893. DEBUGSPEW_CODE( struct s_Universe* U = get_universe( L));
  1894. STACK_CHECK( L);
  1895. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s BEGIN\n" INDENT_END, name));
  1896. DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
  1897. lua_pushvalue( L, lua_upvalueindex(1)); // "name" require
  1898. lua_insert( L, 1); // require "name"
  1899. lua_call( L, nargs, 1); // module
  1900. populate_func_lookup_table( L, -1, name);
  1901. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "lanes.require %s END\n" INDENT_END, name));
  1902. DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
  1903. STACK_END( L, 0);
  1904. return 1;
  1905. }
  1906. LUAG_FUNC( thread_gc);
  1907. #define GCCB_KEY (void*)LG_thread_gc
  1908. //---
  1909. // lane_ud= thread_new( function, [libs_str],
  1910. // [cancelstep_uint=0],
  1911. // [prio_int=0],
  1912. // [globals_tbl],
  1913. // [package_tbl],
  1914. // [required],
  1915. // [gc_cb],
  1916. // [... args ...] )
  1917. //
  1918. // Upvalues: metatable to use for 'lane_ud'
  1919. //
  1920. LUAG_FUNC( thread_new)
  1921. {
  1922. lua_State* L2;
  1923. struct s_lane* s;
  1924. struct s_lane** ud;
  1925. char const* libs = lua_tostring( L, 2);
  1926. uint_t cs = luaG_optunsigned( L, 3, 0);
  1927. int const prio = (int) luaL_optinteger( L, 4, 0);
  1928. uint_t glob = lua_isnoneornil( L, 5) ? 0 : 5;
  1929. uint_t package = lua_isnoneornil( L, 6) ? 0 : 6;
  1930. uint_t required = lua_isnoneornil( L, 7) ? 0 : 7;
  1931. uint_t gc_cb = lua_isnoneornil( L, 8) ? 0 : 8;
  1932. #define FIXED_ARGS 8
  1933. uint_t args = lua_gettop(L) - FIXED_ARGS;
  1934. struct s_Universe* U = get_universe( L);
  1935. // public Lanes API accepts a generic range -3/+3
  1936. // that will be remapped into the platform-specific scheduler priority scheme
  1937. // On some platforms, -3 is equivalent to -2 and +3 to +2
  1938. if( prio < THREAD_PRIO_MIN || prio > THREAD_PRIO_MAX)
  1939. {
  1940. return luaL_error( L, "Priority out of range: %d..+%d (%d)", THREAD_PRIO_MIN, THREAD_PRIO_MAX, prio);
  1941. }
  1942. /* --- Create and prepare the sub state --- */
  1943. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: setup\n" INDENT_END));
  1944. DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
  1945. // populate with selected libraries at the same time
  1946. L2 = luaG_newstate( U, L, libs);
  1947. STACK_GROW( L, 2);
  1948. STACK_GROW( L2, 3);
  1949. // give a default "Lua" name to the thread to see VM name in Decoda debugger
  1950. lua_pushfstring( L2, "Lane #%p", L2);
  1951. lua_setglobal( L2, "decoda_name");
  1952. ASSERT_L( lua_gettop(L2) == 0);
  1953. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: update 'package'\n" INDENT_END));
  1954. // package
  1955. if( package != 0)
  1956. {
  1957. // when copying with mode eLM_LaneBody, should raise an error in case of problem, not leave it one the stack
  1958. (void) luaG_inter_copy_package( U, L, L2, package, eLM_LaneBody);
  1959. }
  1960. // modules to require in the target lane *before* the function is transfered!
  1961. STACK_CHECK( L);
  1962. STACK_CHECK( L2);
  1963. if( required != 0)
  1964. {
  1965. int nbRequired = 1;
  1966. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: require 'required' list\n" INDENT_END));
  1967. DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
  1968. // should not happen, was checked in lanes.lua before calling thread_new()
  1969. if( lua_type( L, required) != LUA_TTABLE)
  1970. {
  1971. return luaL_error( L, "expected required module list as a table, got %s", luaL_typename( L, required));
  1972. }
  1973. lua_pushnil( L);
  1974. while( lua_next( L, required) != 0)
  1975. {
  1976. if( lua_type( L, -1) != LUA_TSTRING || lua_type( L, -2) != LUA_TNUMBER || lua_tonumber( L, -2) != nbRequired)
  1977. {
  1978. return luaL_error( L, "required module list should be a list of strings");
  1979. }
  1980. else
  1981. {
  1982. // require the module in the target state, and populate the lookup table there too
  1983. size_t len;
  1984. char const* name = lua_tolstring( L, -1, &len);
  1985. // require the module in the target lane
  1986. STACK_GROW( L2, 2);
  1987. STACK_CHECK( L2);
  1988. lua_getglobal( L2, "require"); // require()?
  1989. if( lua_isnil( L2, -1))
  1990. {
  1991. lua_pop( L2, 1); //
  1992. luaL_error( L, "cannot pre-require modules without loading 'package' library first");
  1993. }
  1994. else
  1995. {
  1996. // if is it "lanes" or "lanes.core", make sure we have copied the initial settings over
  1997. // which might not be the case if the libs list didn't include lanes.core or "*"
  1998. if( strncmp( name, "lanes.core", len) == 0) // this works both both "lanes" and "lanes.core" because of len
  1999. {
  2000. luaG_copy_one_time_settings( U, L, L2);
  2001. }
  2002. lua_pushlstring( L2, name, len); // require() name
  2003. if( lua_pcall( L2, 1, 1, 0) != LUA_OK) // ret/errcode
  2004. {
  2005. // propagate error to main state if any
  2006. luaG_inter_move( U, L2, L, 1, eLM_LaneBody); //
  2007. return lua_error( L);
  2008. }
  2009. STACK_MID( L2, 1);
  2010. // after requiring the module, register the functions it exported in our name<->function database
  2011. populate_func_lookup_table( L2, -1, name);
  2012. STACK_MID( L2, 1);
  2013. lua_pop( L2, 1);
  2014. }
  2015. STACK_END( L2, 0);
  2016. }
  2017. lua_pop( L, 1);
  2018. ++ nbRequired;
  2019. }
  2020. DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
  2021. }
  2022. STACK_END( L2, 0);
  2023. STACK_END( L, 0);
  2024. // Appending the specified globals to the global environment
  2025. // *after* stdlibs have been loaded and modules required, in case we transfer references to native functions they exposed...
  2026. //
  2027. if( glob != 0)
  2028. {
  2029. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: transfer globals\n" INDENT_END));
  2030. STACK_CHECK( L);
  2031. STACK_CHECK( L2);
  2032. if( !lua_istable( L, glob))
  2033. {
  2034. return luaL_error( L, "Expected table, got %s", luaL_typename( L, glob));
  2035. }
  2036. DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
  2037. lua_pushnil( L);
  2038. lua_pushglobaltable( L2); // Lua 5.2 wants us to push the globals table on the stack
  2039. while( lua_next( L, glob))
  2040. {
  2041. luaG_inter_copy( U, L, L2, 2, eLM_LaneBody); // moves the key/value pair to the L2 stack
  2042. // assign it in L2's globals table
  2043. lua_rawset( L2, -3);
  2044. lua_pop( L, 1);
  2045. }
  2046. lua_pop( L2, 1);
  2047. STACK_END( L2, 0);
  2048. STACK_END( L, 0);
  2049. DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
  2050. }
  2051. STACK_CHECK( L);
  2052. STACK_CHECK( L2);
  2053. ASSERT_L( lua_gettop( L2) == 0);
  2054. // Lane main function
  2055. if( lua_type( L, 1) == LUA_TFUNCTION)
  2056. {
  2057. int res;
  2058. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: transfer lane body\n" INDENT_END));
  2059. DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
  2060. lua_pushvalue( L, 1);
  2061. res = luaG_inter_move( U, L, L2, 1, eLM_LaneBody); // L->L2
  2062. DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
  2063. if( res != 0)
  2064. {
  2065. return luaL_error( L, "tried to copy unsupported types");
  2066. }
  2067. STACK_MID( L, 0);
  2068. }
  2069. else if( lua_type( L, 1) == LUA_TSTRING)
  2070. {
  2071. // compile the string
  2072. if( luaL_loadstring( L2, lua_tostring( L, 1)) != 0)
  2073. {
  2074. return luaL_error( L, "error when parsing lane function code");
  2075. }
  2076. }
  2077. STACK_MID( L2, 1);
  2078. ASSERT_L( lua_isfunction( L2, 1));
  2079. // revive arguments
  2080. //
  2081. if( args > 0)
  2082. {
  2083. int res;
  2084. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: transfer lane arguments\n" INDENT_END));
  2085. DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
  2086. res = luaG_inter_copy( U, L, L2, args, eLM_LaneBody); // L->L2
  2087. DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
  2088. if( res != 0)
  2089. {
  2090. return luaL_error( L, "tried to copy unsupported types");
  2091. }
  2092. }
  2093. STACK_MID( L, 0);
  2094. STACK_END( L2, 1 + args);
  2095. // 's' is allocated from heap, not Lua, since its life span may surpass
  2096. // the handle's (if free running thread)
  2097. //
  2098. ud = lua_newuserdata( L, sizeof( struct s_lane*));
  2099. s = *ud = (struct s_lane*) malloc( sizeof( struct s_lane));
  2100. if( s == NULL)
  2101. {
  2102. return luaL_error( L, "could not create lane: out of memory");
  2103. }
  2104. s->L = L2;
  2105. s->U = U;
  2106. s->status = PENDING;
  2107. s->waiting_on = NULL;
  2108. s->debug_name = "<unnamed>";
  2109. s->cancel_request = CANCEL_NONE;
  2110. #if THREADWAIT_METHOD == THREADWAIT_CONDVAR
  2111. MUTEX_INIT( &s->done_lock);
  2112. SIGNAL_INIT( &s->done_signal);
  2113. #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
  2114. s->mstatus = NORMAL;
  2115. s->selfdestruct_next = NULL;
  2116. #if HAVE_LANE_TRACKING
  2117. s->tracking_next = NULL;
  2118. #endif // HAVE_LANE_TRACKING
  2119. // Set metatable for the userdata
  2120. //
  2121. lua_pushvalue( L, lua_upvalueindex( 1));
  2122. lua_setmetatable( L, -2);
  2123. STACK_MID( L, 1);
  2124. // Create uservalue for the userdata
  2125. // (this is where lane body return values will be stored when the handle is indexed by a numeric key)
  2126. lua_newtable( L);
  2127. // Store the gc_cb callback in the uservalue
  2128. if( gc_cb > 0)
  2129. {
  2130. lua_pushlightuserdata( L, GCCB_KEY);
  2131. lua_pushvalue( L, gc_cb);
  2132. lua_rawset( L, -3);
  2133. }
  2134. lua_setuservalue( L, -2);
  2135. // Store 's' in the lane's registry, for 'cancel_test()' (even if 'cs'==0 we still do cancel tests at pending send/receive).
  2136. lua_pushlightuserdata( L2, CANCEL_TEST_KEY);
  2137. lua_pushlightuserdata( L2, s);
  2138. lua_rawset( L2, LUA_REGISTRYINDEX);
  2139. if( cs)
  2140. {
  2141. lua_sethook( L2, cancel_hook, LUA_MASKCOUNT, cs);
  2142. }
  2143. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "thread_new: launching thread\n" INDENT_END));
  2144. THREAD_CREATE( &s->thread, lane_main, s, prio);
  2145. STACK_END( L, 1);
  2146. DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
  2147. return 1;
  2148. }
  2149. //---
  2150. // = thread_gc( lane_ud )
  2151. //
  2152. // Cleanup for a thread userdata. If the thread is still executing, leave it
  2153. // alive as a free-running thread (will clean up itself).
  2154. //
  2155. // * Why NOT cancel/kill a loose thread:
  2156. //
  2157. // At least timer system uses a free-running thread, they should be handy
  2158. // and the issue of canceling/killing threads at gc is not very nice, either
  2159. // (would easily cause waits at gc cycle, which we don't want).
  2160. //
  2161. LUAG_FUNC( thread_gc)
  2162. {
  2163. bool_t have_gc_cb = FALSE;
  2164. struct s_lane* s = lua_toLane( L, 1); // ud
  2165. // if there a gc callback?
  2166. lua_getuservalue( L, 1); // ud uservalue
  2167. lua_pushlightuserdata( L, GCCB_KEY); // ud uservalue __gc
  2168. lua_rawget( L, -2); // ud uservalue gc_cb|nil
  2169. if( !lua_isnil( L, -1))
  2170. {
  2171. lua_remove( L, -2); // ud gc_cb|nil
  2172. lua_pushstring( L, s->debug_name); // ud gc_cb name
  2173. have_gc_cb = TRUE;
  2174. }
  2175. else
  2176. {
  2177. lua_pop( L, 2); // ud
  2178. }
  2179. // We can read 's->status' without locks, but not wait for it
  2180. // test KILLED state first, as it doesn't need to enter the selfdestruct chain
  2181. if( s->mstatus == KILLED)
  2182. {
  2183. // Make sure a kill has proceeded, before cleaning up the data structure.
  2184. //
  2185. // NO lua_close() in this case because we don't know where execution of the state was interrupted
  2186. DEBUGSPEW_CODE( fprintf( stderr, "** Joining with a killed thread (needs testing) **"));
  2187. // make sure the thread is no longer running, just like thread_join()
  2188. if(! THREAD_ISNULL( s->thread))
  2189. {
  2190. THREAD_WAIT( &s->thread, -1, &s->done_signal, &s->done_lock, &s->status);
  2191. }
  2192. if( s->status >= DONE && s->L)
  2193. {
  2194. // we know the thread was killed while the Lua VM was not doing anything: we should be able to close it without crashing
  2195. // now, thread_cancel() will not forcefully kill a lane with s->status >= DONE, so I am not sure it can ever happen
  2196. lua_close( s->L);
  2197. s->L = 0;
  2198. // just in case, but s will be freed soon so...
  2199. s->debug_name = "<gc>";
  2200. }
  2201. DEBUGSPEW_CODE( fprintf( stderr, "** Joined ok **"));
  2202. }
  2203. else if( s->status < DONE)
  2204. {
  2205. // still running: will have to be cleaned up later
  2206. selfdestruct_add( s);
  2207. assert( s->selfdestruct_next);
  2208. if( have_gc_cb)
  2209. {
  2210. lua_pushliteral( L, "selfdestruct"); // ud gc_cb name status
  2211. lua_call( L, 2, 0); // ud
  2212. }
  2213. return 0;
  2214. }
  2215. else if( s->L)
  2216. {
  2217. // no longer accessing the Lua VM: we can close right now
  2218. lua_close( s->L);
  2219. s->L = 0;
  2220. // just in case, but s will be freed soon so...
  2221. s->debug_name = "<gc>";
  2222. }
  2223. // Clean up after a (finished) thread
  2224. lane_cleanup( s);
  2225. // do this after lane cleanup in case the callback triggers an error
  2226. if( have_gc_cb)
  2227. {
  2228. lua_pushliteral( L, "closed"); // ud gc_cb name status
  2229. lua_call( L, 2, 0); // ud
  2230. }
  2231. return 0;
  2232. }
  2233. // lane_h:cancel( [timeout] [, force [, forcekill_timeout]])
  2234. LUAG_FUNC( thread_cancel)
  2235. {
  2236. struct s_lane* s = lua_toLane( L, 1);
  2237. double secs = 0.0;
  2238. int force_i = 2;
  2239. int forcekill_timeout_i = 3;
  2240. if( lua_isnumber( L, 2))
  2241. {
  2242. secs = lua_tonumber( L, 2);
  2243. if( secs < 0.0 && lua_gettop( L) > 3)
  2244. {
  2245. return luaL_error( L, "can't force_kill a soft cancel");
  2246. }
  2247. // negative timeout and force flag means we want to wake linda-waiting threads
  2248. ++ force_i;
  2249. ++ forcekill_timeout_i;
  2250. }
  2251. else if( lua_isnil( L, 2))
  2252. {
  2253. ++ force_i;
  2254. ++ forcekill_timeout_i;
  2255. }
  2256. {
  2257. bool_t force = lua_toboolean( L, force_i); // FALSE if nothing there
  2258. double forcekill_timeout = luaL_optnumber( L, forcekill_timeout_i, 0.0);
  2259. switch( thread_cancel( L, s, secs, force, forcekill_timeout))
  2260. {
  2261. case CR_Timeout:
  2262. lua_pushboolean( L, 0);
  2263. lua_pushstring( L, "timeout");
  2264. return 2;
  2265. case CR_Cancelled:
  2266. lua_pushboolean( L, 1);
  2267. return 1;
  2268. case CR_Killed:
  2269. lua_pushboolean( L, 0);
  2270. lua_pushstring( L, "killed");
  2271. return 2;
  2272. }
  2273. }
  2274. // should never happen, only here to prevent the compiler from complaining of "not all control paths returning a value"
  2275. return 0;
  2276. }
  2277. //---
  2278. // str= thread_status( lane )
  2279. //
  2280. // Returns: "pending" not started yet
  2281. // -> "running" started, doing its work..
  2282. // <-> "waiting" blocked in a receive()
  2283. // -> "done" finished, results are there
  2284. // / "error" finished at an error, error value is there
  2285. // / "cancelled" execution cancelled by M (state gone)
  2286. //
  2287. static char const * thread_status_string( struct s_lane* s)
  2288. {
  2289. enum e_status st = s->status; // read just once (volatile)
  2290. char const* str =
  2291. (s->mstatus == KILLED) ? "killed" : // new to v3.3.0!
  2292. (st == PENDING) ? "pending" :
  2293. (st == RUNNING) ? "running" : // like in 'co.status()'
  2294. (st == WAITING) ? "waiting" :
  2295. (st == DONE) ? "done" :
  2296. (st == ERROR_ST) ? "error" :
  2297. (st == CANCELLED) ? "cancelled" : NULL;
  2298. return str;
  2299. }
  2300. static int push_thread_status( lua_State* L, struct s_lane* s)
  2301. {
  2302. char const* const str = thread_status_string( s);
  2303. ASSERT_L( str);
  2304. lua_pushstring( L, str);
  2305. return 1;
  2306. }
  2307. //---
  2308. // [...] | [nil, err_any, stack_tbl]= thread_join( lane_ud [, wait_secs=-1] )
  2309. //
  2310. // timeout: returns nil
  2311. // done: returns return values (0..N)
  2312. // error: returns nil + error value [+ stack table]
  2313. // cancelled: returns nil
  2314. //
  2315. LUAG_FUNC( thread_join)
  2316. {
  2317. struct s_lane* const s = lua_toLane( L, 1);
  2318. double wait_secs = luaL_optnumber( L, 2, -1.0);
  2319. lua_State* L2 = s->L;
  2320. int ret;
  2321. bool_t done = THREAD_ISNULL( s->thread) || THREAD_WAIT( &s->thread, wait_secs, &s->done_signal, &s->done_lock, &s->status);
  2322. if( !done || !L2)
  2323. {
  2324. return 0; // timeout: pushes none, leaves 'L2' alive
  2325. }
  2326. STACK_CHECK( L);
  2327. // Thread is DONE/ERROR_ST/CANCELLED; all ours now
  2328. if( s->mstatus == KILLED) // OS thread was killed if thread_cancel was forced
  2329. {
  2330. // in that case, even if the thread was killed while DONE/ERROR_ST/CANCELLED, ignore regular return values
  2331. STACK_GROW( L, 2);
  2332. lua_pushnil( L);
  2333. lua_pushliteral( L, "killed");
  2334. ret = 2;
  2335. }
  2336. else
  2337. {
  2338. struct s_Universe* U = get_universe( L);
  2339. // debug_name is a pointer to string possibly interned in the lane's state, that no longer exists when the state is closed
  2340. // so store it in the userdata uservalue at a key that can't possibly collide
  2341. securize_debug_threadname( L, s);
  2342. switch( s->status)
  2343. {
  2344. case DONE:
  2345. {
  2346. uint_t n = lua_gettop( L2); // whole L2 stack
  2347. if( (n > 0) && (luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0))
  2348. {
  2349. return luaL_error( L, "tried to copy unsupported types");
  2350. }
  2351. ret = n;
  2352. }
  2353. break;
  2354. case ERROR_ST:
  2355. {
  2356. int const n = lua_gettop( L2);
  2357. STACK_GROW( L, 3);
  2358. lua_pushnil( L);
  2359. // even when ERROR_FULL_STACK, if the error is not LUA_ERRRUN, the handler wasn't called, and we only have 1 error message on the stack ...
  2360. if( luaG_inter_move( U, L2, L, n, eLM_LaneBody) != 0) // nil "err" [trace]
  2361. {
  2362. return luaL_error( L, "tried to copy unsupported types: %s", lua_tostring( L, -n));
  2363. }
  2364. ret = 1 + n;
  2365. }
  2366. break;
  2367. case CANCELLED:
  2368. ret = 0;
  2369. break;
  2370. default:
  2371. DEBUGSPEW_CODE( fprintf( stderr, "Status: %d\n", s->status));
  2372. ASSERT_L( FALSE);
  2373. ret = 0;
  2374. }
  2375. lua_close( L2);
  2376. }
  2377. s->L = 0;
  2378. STACK_END( L, ret);
  2379. return ret;
  2380. }
  2381. //---
  2382. // thread_index( ud, key) -> value
  2383. //
  2384. // If key is found in the environment, return it
  2385. // If key is numeric, wait until the thread returns and populate the environment with the return values
  2386. // If the return values signal an error, propagate it
  2387. // If key is "status" return the thread status
  2388. // Else raise an error
  2389. LUAG_FUNC( thread_index)
  2390. {
  2391. int const UD = 1;
  2392. int const KEY = 2;
  2393. int const USR = 3;
  2394. struct s_lane* const s = lua_toLane( L, UD);
  2395. ASSERT_L( lua_gettop( L) == 2);
  2396. STACK_GROW( L, 8); // up to 8 positions are needed in case of error propagation
  2397. // If key is numeric, wait until the thread returns and populate the environment with the return values
  2398. if( lua_type( L, KEY) == LUA_TNUMBER)
  2399. {
  2400. // first, check that we don't already have an environment that holds the requested value
  2401. {
  2402. // If key is found in the uservalue, return it
  2403. lua_getuservalue( L, UD);
  2404. lua_pushvalue( L, KEY);
  2405. lua_rawget( L, USR);
  2406. if( !lua_isnil( L, -1))
  2407. {
  2408. return 1;
  2409. }
  2410. lua_pop( L, 1);
  2411. }
  2412. {
  2413. // check if we already fetched the values from the thread or not
  2414. bool_t fetched;
  2415. lua_Integer key = lua_tointeger( L, KEY);
  2416. lua_pushinteger( L, 0);
  2417. lua_rawget( L, USR);
  2418. fetched = !lua_isnil( L, -1);
  2419. lua_pop( L, 1); // back to our 2 args + uservalue on the stack
  2420. if( !fetched)
  2421. {
  2422. lua_pushinteger( L, 0);
  2423. lua_pushboolean( L, 1);
  2424. lua_rawset( L, USR);
  2425. // wait until thread has completed
  2426. lua_pushcfunction( L, LG_thread_join);
  2427. lua_pushvalue( L, UD);
  2428. lua_call( L, 1, LUA_MULTRET); // all return values are on the stack, at slots 4+
  2429. switch( s->status)
  2430. {
  2431. default:
  2432. if( s->mstatus != KILLED)
  2433. {
  2434. // this is an internal error, we probably never get here
  2435. lua_settop( L, 0);
  2436. lua_pushliteral( L, "Unexpected status: ");
  2437. lua_pushstring( L, thread_status_string( s));
  2438. lua_concat( L, 2);
  2439. lua_error( L);
  2440. break;
  2441. }
  2442. // fall through if we are killed, as we got nil, "killed" on the stack
  2443. case DONE: // got regular return values
  2444. {
  2445. int i, nvalues = lua_gettop( L) - 3;
  2446. for( i = nvalues; i > 0; -- i)
  2447. {
  2448. // pop the last element of the stack, to store it in the uservalue at its proper index
  2449. lua_rawseti( L, USR, i);
  2450. }
  2451. }
  2452. break;
  2453. case ERROR_ST: // got 3 values: nil, errstring, callstack table
  2454. // me[-2] could carry the stack table, but even
  2455. // me[-1] is rather unnecessary (and undocumented);
  2456. // use ':join()' instead. --AKa 22-Jan-2009
  2457. ASSERT_L( lua_isnil( L, 4) && !lua_isnil( L, 5) && lua_istable( L, 6));
  2458. // store errstring at key -1
  2459. lua_pushnumber( L, -1);
  2460. lua_pushvalue( L, 5);
  2461. lua_rawset( L, USR);
  2462. break;
  2463. case CANCELLED:
  2464. // do nothing
  2465. break;
  2466. }
  2467. }
  2468. lua_settop( L, 3); // UD KEY ENV
  2469. if( key != -1)
  2470. {
  2471. lua_pushnumber( L, -1); // UD KEY ENV -1
  2472. lua_rawget( L, USR); // UD KEY ENV "error"
  2473. if( !lua_isnil( L, -1)) // an error was stored
  2474. {
  2475. // Note: Lua 5.1 interpreter is not prepared to show
  2476. // non-string errors, so we use 'tostring()' here
  2477. // to get meaningful output. --AKa 22-Jan-2009
  2478. //
  2479. // Also, the stack dump we get is no good; it only
  2480. // lists our internal Lanes functions. There seems
  2481. // to be no way to switch it off, though.
  2482. //
  2483. // Level 3 should show the line where 'h[x]' was read
  2484. // but this only seems to work for string messages
  2485. // (Lua 5.1.4). No idea, why. --AKa 22-Jan-2009
  2486. lua_getmetatable( L, UD); // UD KEY ENV "error" mt
  2487. lua_getfield( L, -1, "cached_error"); // UD KEY ENV "error" mt error()
  2488. lua_getfield( L, -2, "cached_tostring"); // UD KEY ENV "error" mt error() tostring()
  2489. lua_pushvalue( L, 4); // UD KEY ENV "error" mt error() tostring() "error"
  2490. lua_call( L, 1, 1); // tostring( errstring) -- just in case // UD KEY ENV "error" mt error() "error"
  2491. lua_pushinteger( L, 3); // UD KEY ENV "error" mt error() "error" 3
  2492. lua_call( L, 2, 0); // error( tostring( errstring), 3) // UD KEY ENV "error" mt
  2493. }
  2494. else
  2495. {
  2496. lua_pop( L, 1); // back to our 3 arguments on the stack
  2497. }
  2498. }
  2499. lua_rawgeti( L, USR, (int)key);
  2500. }
  2501. return 1;
  2502. }
  2503. if( lua_type( L, KEY) == LUA_TSTRING)
  2504. {
  2505. char const * const keystr = lua_tostring( L, KEY);
  2506. lua_settop( L, 2); // keep only our original arguments on the stack
  2507. if( strcmp( keystr, "status") == 0)
  2508. {
  2509. return push_thread_status( L, s); // push the string representing the status
  2510. }
  2511. // return UD.metatable[key]
  2512. lua_getmetatable( L, UD); // UD KEY mt
  2513. lua_replace( L, -3); // mt KEY
  2514. lua_rawget( L, -2); // mt value
  2515. // only "cancel" and "join" are registered as functions, any other string will raise an error
  2516. if( lua_iscfunction( L, -1))
  2517. {
  2518. return 1;
  2519. }
  2520. return luaL_error( L, "can't index a lane with '%s'", keystr);
  2521. }
  2522. // unknown key
  2523. lua_getmetatable( L, UD);
  2524. lua_getfield( L, -1, "cached_error");
  2525. lua_pushliteral( L, "Unknown key: ");
  2526. lua_pushvalue( L, KEY);
  2527. lua_concat( L, 2);
  2528. lua_call( L, 1, 0); // error( "Unknown key: " .. key) -> doesn't return
  2529. return 0;
  2530. }
  2531. #if HAVE_LANE_TRACKING
  2532. //---
  2533. // threads() -> {}|nil
  2534. //
  2535. // Return a list of all known lanes
  2536. LUAG_FUNC( threads)
  2537. {
  2538. int const top = lua_gettop( L);
  2539. struct s_Universe* U = get_universe( L);
  2540. // List _all_ still running threads
  2541. //
  2542. MUTEX_LOCK( &U->tracking_cs);
  2543. if( U->tracking_first && U->tracking_first != TRACKING_END)
  2544. {
  2545. struct s_lane* s = U->tracking_first;
  2546. lua_newtable( L); // {}
  2547. while( s != TRACKING_END)
  2548. {
  2549. lua_pushstring( L, s->debug_name); // {} "name"
  2550. push_thread_status( L, s); // {} "name" "status"
  2551. lua_rawset( L, -3); // {}
  2552. s = s->tracking_next;
  2553. }
  2554. }
  2555. MUTEX_UNLOCK( &U->tracking_cs);
  2556. return lua_gettop( L) - top;
  2557. }
  2558. #endif // HAVE_LANE_TRACKING
  2559. /*
  2560. * ###############################################################################################
  2561. * ######################################## Timer support ########################################
  2562. * ###############################################################################################
  2563. */
  2564. /*
  2565. * secs= now_secs()
  2566. *
  2567. * Returns the current time, as seconds (millisecond resolution).
  2568. */
  2569. LUAG_FUNC( now_secs )
  2570. {
  2571. lua_pushnumber( L, now_secs() );
  2572. return 1;
  2573. }
  2574. /*
  2575. * wakeup_at_secs= wakeup_conv( date_tbl )
  2576. */
  2577. LUAG_FUNC( wakeup_conv )
  2578. {
  2579. int year, month, day, hour, min, sec, isdst;
  2580. struct tm t;
  2581. memset( &t, 0, sizeof( t));
  2582. //
  2583. // .year (four digits)
  2584. // .month (1..12)
  2585. // .day (1..31)
  2586. // .hour (0..23)
  2587. // .min (0..59)
  2588. // .sec (0..61)
  2589. // .yday (day of the year)
  2590. // .isdst (daylight saving on/off)
  2591. STACK_CHECK( L);
  2592. lua_getfield( L, 1, "year" ); year= (int)lua_tointeger(L,-1); lua_pop(L,1);
  2593. lua_getfield( L, 1, "month" ); month= (int)lua_tointeger(L,-1); lua_pop(L,1);
  2594. lua_getfield( L, 1, "day" ); day= (int)lua_tointeger(L,-1); lua_pop(L,1);
  2595. lua_getfield( L, 1, "hour" ); hour= (int)lua_tointeger(L,-1); lua_pop(L,1);
  2596. lua_getfield( L, 1, "min" ); min= (int)lua_tointeger(L,-1); lua_pop(L,1);
  2597. lua_getfield( L, 1, "sec" ); sec= (int)lua_tointeger(L,-1); lua_pop(L,1);
  2598. // If Lua table has '.isdst' we trust that. If it does not, we'll let
  2599. // 'mktime' decide on whether the time is within DST or not (value -1).
  2600. //
  2601. lua_getfield( L, 1, "isdst" );
  2602. isdst= lua_isboolean(L,-1) ? lua_toboolean(L,-1) : -1;
  2603. lua_pop(L,1);
  2604. STACK_END( L, 0);
  2605. t.tm_year= year-1900;
  2606. t.tm_mon= month-1; // 0..11
  2607. t.tm_mday= day; // 1..31
  2608. t.tm_hour= hour; // 0..23
  2609. t.tm_min= min; // 0..59
  2610. t.tm_sec= sec; // 0..60
  2611. t.tm_isdst= isdst; // 0/1/negative
  2612. lua_pushnumber( L, (double) mktime( &t)); // ms=0
  2613. return 1;
  2614. }
  2615. /*
  2616. * ###############################################################################################
  2617. * ######################################## Module linkage #######################################
  2618. * ###############################################################################################
  2619. */
  2620. static const struct luaL_Reg lanes_functions [] = {
  2621. {"linda", LG_linda},
  2622. {"now_secs", LG_now_secs},
  2623. {"wakeup_conv", LG_wakeup_conv},
  2624. {"set_thread_priority", LG_set_thread_priority},
  2625. {"nameof", luaG_nameof},
  2626. {"set_singlethreaded", LG_set_singlethreaded},
  2627. {NULL, NULL}
  2628. };
  2629. /*
  2630. * One-time initializations
  2631. * settings table it at position 1 on the stack
  2632. * pushes an error string on the stack in case of problem
  2633. */
  2634. static void init_once_LOCKED( void)
  2635. {
  2636. #if (defined PLATFORM_WIN32) || (defined PLATFORM_POCKETPC)
  2637. now_secs(); // initialize 'now_secs()' internal offset
  2638. #endif
  2639. #if (defined PLATFORM_OSX) && (defined _UTILBINDTHREADTOCPU)
  2640. chudInitialize();
  2641. #endif
  2642. //---
  2643. // Linux needs SCHED_RR to change thread priorities, and that is only
  2644. // allowed for sudo'ers. SCHED_OTHER (default) has no priorities.
  2645. // SCHED_OTHER threads are always lower priority than SCHED_RR.
  2646. //
  2647. // ^-- those apply to 2.6 kernel. IF **wishful thinking** these
  2648. // constraints will change in the future, non-sudo priorities can
  2649. // be enabled also for Linux.
  2650. //
  2651. #ifdef PLATFORM_LINUX
  2652. sudo = (geteuid() == 0); // we are root?
  2653. // If lower priorities (-2..-1) are wanted, we need to lift the main
  2654. // thread to SCHED_RR and 50 (medium) level. Otherwise, we're always below
  2655. // the launched threads (even -2).
  2656. //
  2657. #ifdef LINUX_SCHED_RR
  2658. if( sudo)
  2659. {
  2660. struct sched_param sp;
  2661. sp.sched_priority = _PRIO_0;
  2662. PT_CALL( pthread_setschedparam( pthread_self(), SCHED_RR, &sp));
  2663. }
  2664. #endif // LINUX_SCHED_RR
  2665. #endif // PLATFORM_LINUX
  2666. }
  2667. static volatile long s_initCount = 0;
  2668. // upvalue 1: module name
  2669. // upvalue 2: module table
  2670. // param 1: settings table
  2671. LUAG_FUNC( configure)
  2672. {
  2673. struct s_Universe* U = get_universe( L);
  2674. char const* name = luaL_checkstring( L, lua_upvalueindex( 1));
  2675. _ASSERT_L( L, lua_type( L, 1) == LUA_TTABLE);
  2676. /*
  2677. ** Making one-time initializations.
  2678. **
  2679. ** When the host application is single-threaded (and all threading happens via Lanes)
  2680. ** there is no problem. But if the host is multithreaded, we need to lock around the
  2681. ** initializations.
  2682. */
  2683. #if THREADAPI == THREADAPI_WINDOWS
  2684. {
  2685. static volatile int /*bool*/ go_ahead; // = 0
  2686. if( InterlockedCompareExchange( &s_initCount, 1, 0) == 0)
  2687. {
  2688. init_once_LOCKED();
  2689. go_ahead = 1; // let others pass
  2690. }
  2691. else
  2692. {
  2693. while( !go_ahead) { Sleep(1); } // changes threads
  2694. }
  2695. }
  2696. #else // THREADAPI == THREADAPI_PTHREAD
  2697. if( s_initCount == 0)
  2698. {
  2699. static pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER;
  2700. pthread_mutex_lock( &my_lock);
  2701. {
  2702. // Recheck now that we're within the lock
  2703. //
  2704. if( s_initCount == 0)
  2705. {
  2706. init_once_LOCKED();
  2707. s_initCount = 1;
  2708. }
  2709. }
  2710. pthread_mutex_unlock( &my_lock);
  2711. }
  2712. #endif // THREADAPI == THREADAPI_PTHREAD
  2713. STACK_GROW( L, 4);
  2714. STACK_CHECK( L);
  2715. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() BEGIN\n" INDENT_END, L));
  2716. DEBUGSPEW_CODE( ++ U->debugspew_indent_depth);
  2717. lua_getfield( L, 1, "protect_allocator"); // settings protect_allocator
  2718. if( lua_toboolean( L, -1))
  2719. {
  2720. void* allocUD;
  2721. lua_Alloc allocF = lua_getallocf( L, &allocUD);
  2722. if( allocF != protected_lua_Alloc) // just in case
  2723. {
  2724. struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) allocF( allocUD, NULL, 0, sizeof( struct ProtectedAllocator_s));
  2725. s->allocF = allocF;
  2726. s->allocUD = allocUD;
  2727. MUTEX_INIT( &s->lock);
  2728. lua_setallocf( L, protected_lua_Alloc, s);
  2729. }
  2730. }
  2731. lua_pop( L, 1); // settings
  2732. STACK_MID( L, 0);
  2733. // grab or create the universe
  2734. if( U == NULL)
  2735. {
  2736. lua_pushlightuserdata( L, UNIVERSE_REGKEY); // settings UNIVERSE_REGKEY
  2737. U = (struct s_Universe*) lua_newuserdata( L, sizeof( struct s_Universe)); // settings UNIVERSE_REGKEY universe
  2738. memset( U, 0, sizeof( struct s_Universe));
  2739. lua_newtable( L); // settings UNIVERSE_REGKEY universe mt
  2740. lua_getfield( L, 1, "shutdown_timeout"); // settings UNIVERSE_REGKEY universe mt shutdown_timeout
  2741. lua_pushcclosure( L, selfdestruct_gc, 1); // settings UNIVERSE_REGKEY universe mt selfdestruct_gc
  2742. lua_setfield( L, -2, "__gc"); // settings UNIVERSE_REGKEY universe mt
  2743. lua_setmetatable( L, -2); // settings UNIVERSE_REGKEY universe
  2744. lua_rawset( L, LUA_REGISTRYINDEX); // settings
  2745. lua_getfield( L, 1, "verbose_errors"); // settings verbose_errors
  2746. U->verboseErrors = lua_toboolean( L, -1);
  2747. lua_pop( L, 1); // settings
  2748. #if HAVE_LANE_TRACKING
  2749. MUTEX_INIT( &U->tracking_cs);
  2750. lua_getfield( L, 1, "track_lanes"); // settings track_lanes
  2751. U->tracking_first = lua_toboolean( L, -1) ? TRACKING_END : NULL;
  2752. lua_pop( L, 1); // settings
  2753. #endif // HAVE_LANE_TRACKING
  2754. // Linked chains handling
  2755. MUTEX_INIT( &U->selfdestruct_cs);
  2756. MUTEX_RECURSIVE_INIT( &U->require_cs);
  2757. // Locks for 'tools.c' inc/dec counters
  2758. MUTEX_INIT( &U->deep_lock);
  2759. MUTEX_INIT( &U->mtid_lock);
  2760. U->selfdestruct_first = SELFDESTRUCT_END;
  2761. initialize_on_state_create( U, L);
  2762. init_keepers( U, L);
  2763. STACK_MID( L, 0);
  2764. // Initialize 'timer_deep'; a common Linda object shared by all states
  2765. lua_pushcfunction( L, LG_linda); // settings lanes.linda
  2766. lua_pushliteral( L, "lanes-timer"); // settings lanes.linda "lanes-timer"
  2767. lua_call( L, 1, 1); // settings linda
  2768. STACK_MID( L, 1);
  2769. // Proxy userdata contents is only a 'DEEP_PRELUDE*' pointer
  2770. U->timer_deep = * (DEEP_PRELUDE**) lua_touserdata( L, -1);
  2771. ASSERT_L( U->timer_deep && (U->timer_deep->refcount == 1) && U->timer_deep->deep && U->timer_deep->idfunc == linda_id);
  2772. // increment refcount that this linda remains alive as long as the universe is.
  2773. ++ U->timer_deep->refcount;
  2774. lua_pop( L, 1); // settings
  2775. }
  2776. STACK_MID( L, 0);
  2777. // Serialize calls to 'require' from now on, also in the primary state
  2778. serialize_require( U, L);
  2779. // Retrieve main module interface table
  2780. lua_pushvalue( L, lua_upvalueindex( 2)); // settings M
  2781. // remove configure() (this function) from the module interface
  2782. lua_pushnil( L); // settings M nil
  2783. lua_setfield( L, -2, "configure"); // settings M
  2784. // add functions to the module's table
  2785. luaG_registerlibfuncs( L, lanes_functions);
  2786. #if HAVE_LANE_TRACKING
  2787. // register core.threads() only if settings say it should be available
  2788. if( U->tracking_first != NULL)
  2789. {
  2790. lua_pushcfunction( L, LG_threads); // settings M LG_threads()
  2791. lua_setfield( L, -2, "threads"); // settings M
  2792. }
  2793. #endif // HAVE_LANE_TRACKING
  2794. STACK_MID( L, 1);
  2795. {
  2796. char const* errmsg;
  2797. errmsg = push_deep_proxy( U, L, (DEEP_PRELUDE*) U->timer_deep, eLM_LaneBody); // settings M timer_deep
  2798. if( errmsg != NULL)
  2799. {
  2800. return luaL_error( L, errmsg);
  2801. }
  2802. lua_setfield( L, -2, "timer_gateway"); // settings M
  2803. }
  2804. STACK_MID( L, 1);
  2805. // prepare the metatable for threads
  2806. // contains keys: { __gc, __index, cached_error, cached_tostring, cancel, join, get_debug_threadname }
  2807. //
  2808. if( luaL_newmetatable( L, "Lane")) // settings M mt
  2809. {
  2810. lua_pushcfunction( L, LG_thread_gc); // settings M mt LG_thread_gc
  2811. lua_setfield( L, -2, "__gc"); // settings M mt
  2812. lua_pushcfunction( L, LG_thread_index); // settings M mt LG_thread_index
  2813. lua_setfield( L, -2, "__index"); // settings M mt
  2814. lua_getglobal( L, "error"); // settings M mt error
  2815. ASSERT_L( lua_isfunction( L, -1));
  2816. lua_setfield( L, -2, "cached_error"); // settings M mt
  2817. lua_getglobal( L, "tostring"); // settings M mt tostring
  2818. ASSERT_L( lua_isfunction( L, -1));
  2819. lua_setfield( L, -2, "cached_tostring"); // settings M mt
  2820. lua_pushcfunction( L, LG_thread_join); // settings M mt LG_thread_join
  2821. lua_setfield( L, -2, "join"); // settings M mt
  2822. lua_pushcfunction( L, LG_get_debug_threadname); // settings M mt LG_get_debug_threadname
  2823. lua_setfield( L, -2, "get_debug_threadname"); // settings M mt
  2824. lua_pushcfunction( L, LG_thread_cancel); // settings M mt LG_thread_cancel
  2825. lua_setfield( L, -2, "cancel"); // settings M mt
  2826. lua_pushliteral( L, "Lane"); // settings M mt "Lane"
  2827. lua_setfield( L, -2, "__metatable"); // settings M mt
  2828. }
  2829. lua_pushcclosure( L, LG_thread_new, 1); // settings M LG_thread_new
  2830. lua_setfield( L, -2, "thread_new"); // settings M
  2831. // we can't register 'lanes.require' normally because we want to create an upvalued closure
  2832. lua_getglobal( L, "require"); // settings M require
  2833. lua_pushcclosure( L, LG_require, 1); // settings M lanes.require
  2834. lua_setfield( L, -2, "require"); // settings M
  2835. lua_pushstring(L, VERSION); // settings M VERSION
  2836. lua_setfield( L, -2, "version"); // settings M
  2837. lua_pushinteger(L, THREAD_PRIO_MAX); // settings M THREAD_PRIO_MAX
  2838. lua_setfield( L, -2, "max_prio"); // settings M
  2839. lua_pushlightuserdata( L, CANCEL_ERROR); // settings M CANCEL_ERROR
  2840. lua_setfield( L, -2, "cancel_error"); // settings M
  2841. // we'll need this every time we transfer some C function from/to this state
  2842. lua_newtable( L);
  2843. lua_setfield( L, LUA_REGISTRYINDEX, LOOKUP_REGKEY);
  2844. // register all native functions found in that module in the transferable functions database
  2845. // we process it before _G because we don't want to find the module when scanning _G (this would generate longer names)
  2846. // for example in package.loaded["lanes.core"].*
  2847. populate_func_lookup_table( L, -1, name);
  2848. // record all existing C/JIT-fast functions
  2849. // Lua 5.2 no longer has LUA_GLOBALSINDEX: we must push globals table on the stack
  2850. lua_pushglobaltable( L); // settings M _G
  2851. populate_func_lookup_table( L, -1, NULL);
  2852. lua_pop( L, 1); // settings M
  2853. // set _R[CONFIG_REGKEY] = settings
  2854. lua_pushvalue( L, -2); // settings M settings
  2855. lua_setfield( L, LUA_REGISTRYINDEX, CONFIG_REGKEY); // settings M
  2856. lua_pop( L, 1); // settings
  2857. STACK_END( L, 0);
  2858. DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "%p: lanes.configure() END\n" INDENT_END, L));
  2859. DEBUGSPEW_CODE( -- U->debugspew_indent_depth);
  2860. // Return the settings table
  2861. return 1;
  2862. }
  2863. #if defined PLATFORM_WIN32 && !defined NDEBUG
  2864. #include <signal.h>
  2865. #include <conio.h>
  2866. void signal_handler( int signal)
  2867. {
  2868. if( signal == SIGABRT)
  2869. {
  2870. _cprintf( "caught abnormal termination!");
  2871. abort();
  2872. }
  2873. }
  2874. // helper to have correct callstacks when crashing a Win32 running on 64 bits Windows
  2875. // don't forget to toggle Debug/Exceptions/Win32 in visual Studio too!
  2876. static volatile long s_ecoc_initCount = 0;
  2877. static volatile int s_ecoc_go_ahead = 0;
  2878. static void EnableCrashingOnCrashes( void)
  2879. {
  2880. if( InterlockedCompareExchange( &s_ecoc_initCount, 1, 0) == 0)
  2881. {
  2882. typedef BOOL (WINAPI* tGetPolicy)( LPDWORD lpFlags);
  2883. typedef BOOL (WINAPI* tSetPolicy)( DWORD dwFlags);
  2884. const DWORD EXCEPTION_SWALLOWING = 0x1;
  2885. HMODULE kernel32 = LoadLibraryA("kernel32.dll");
  2886. tGetPolicy pGetPolicy = (tGetPolicy)GetProcAddress(kernel32, "GetProcessUserModeExceptionPolicy");
  2887. tSetPolicy pSetPolicy = (tSetPolicy)GetProcAddress(kernel32, "SetProcessUserModeExceptionPolicy");
  2888. if( pGetPolicy && pSetPolicy)
  2889. {
  2890. DWORD dwFlags;
  2891. if( pGetPolicy( &dwFlags))
  2892. {
  2893. // Turn off the filter
  2894. pSetPolicy( dwFlags & ~EXCEPTION_SWALLOWING);
  2895. }
  2896. }
  2897. //typedef void (* SignalHandlerPointer)( int);
  2898. /*SignalHandlerPointer previousHandler =*/ signal( SIGABRT, signal_handler);
  2899. s_ecoc_go_ahead = 1; // let others pass
  2900. }
  2901. else
  2902. {
  2903. while( !s_ecoc_go_ahead) { Sleep(1); } // changes threads
  2904. }
  2905. }
  2906. #endif // PLATFORM_WIN32
  2907. int LANES_API luaopen_lanes_core( lua_State* L)
  2908. {
  2909. #if defined PLATFORM_WIN32 && !defined NDEBUG
  2910. EnableCrashingOnCrashes();
  2911. #endif // defined PLATFORM_WIN32 && !defined NDEBUG
  2912. STACK_GROW( L, 4);
  2913. STACK_CHECK( L);
  2914. // Create main module interface table
  2915. // we only have 1 closure, which must be called to configure Lanes
  2916. lua_newtable( L); // M
  2917. lua_pushvalue( L, 1); // M "lanes.core"
  2918. lua_pushvalue( L, -2); // M "lanes.core" M
  2919. lua_pushcclosure( L, LG_configure, 2); // M LG_configure()
  2920. lua_getfield( L, LUA_REGISTRYINDEX, CONFIG_REGKEY); // M LG_configure() settings
  2921. if( !lua_isnil( L, -1)) // this is not the first require "lanes.core": call configure() immediately
  2922. {
  2923. lua_pushvalue( L, -1); // M LG_configure() settings settings
  2924. lua_setfield( L, -4, "settings"); // M LG_configure() settings
  2925. lua_call( L, 1, 0); // M
  2926. }
  2927. else
  2928. {
  2929. // will do nothing on first invocation, as we haven't stored settings in the registry yet
  2930. lua_setfield( L, -3, "settings"); // M LG_configure()
  2931. lua_setfield( L, -2, "configure"); // M
  2932. }
  2933. STACK_END( L, 1);
  2934. return 1;
  2935. }
  2936. static int default_luaopen_lanes( lua_State* L)
  2937. {
  2938. int rc = luaL_loadfile( L, "lanes.lua") || lua_pcall( L, 0, 1, 0);
  2939. if( rc != LUA_OK)
  2940. {
  2941. return luaL_error( L, "failed to initialize embedded Lanes");
  2942. }
  2943. return 1;
  2944. }
  2945. // call this instead of luaopen_lanes_core() when embedding Lua and Lanes in a custom application
  2946. void LANES_API luaopen_lanes_embedded( lua_State* L, lua_CFunction _luaopen_lanes)
  2947. {
  2948. STACK_CHECK( L);
  2949. // pre-require lanes.core so that when lanes.lua calls require "lanes.core" it finds it is already loaded
  2950. luaL_requiref( L, "lanes.core", luaopen_lanes_core, 0); // ... lanes.core
  2951. lua_pop( L, 1); // ...
  2952. STACK_MID( L, 0);
  2953. // call user-provided function that runs the chunk "lanes.lua" from wherever they stored it
  2954. luaL_requiref( L, "lanes", _luaopen_lanes ? _luaopen_lanes : default_luaopen_lanes, 0); // ... lanes
  2955. STACK_END( L, 1);
  2956. }