/lua_lanes/src/lanes.c
C | 3312 lines | 2156 code | 350 blank | 806 comment | 398 complexity | 7847040702b43b8fab7ca67a5fd28d7f MD5 | raw file
Possible License(s): Zlib, BSD-3-Clause, CC0-1.0, GPL-3.0, GPL-2.0, CPL-1.0, MPL-2.0-no-copyleft-exception, LGPL-2.0, LGPL-2.1, LGPL-3.0, 0BSD, Cube
Large files files are truncated, but you can click here to view the full file
- /*
- * LANES.C Copyright (c) 2007-08, Asko Kauppi
- * Copyright (C) 2009-14, Benoit Germain
- *
- * Multithreading in Lua.
- *
- * History:
- * See CHANGES
- *
- * Platforms (tested internally):
- * OS X (10.5.7 PowerPC/Intel)
- * Linux x86 (Ubuntu 8.04)
- * Win32 (Windows XP Home SP2, Visual C++ 2005/2008 Express)
- *
- * Platforms (tested externally):
- * Win32 (MSYS) by Ross Berteig.
- *
- * Platforms (testers appreciated):
- * Win64 - should work???
- * Linux x64 - should work
- * FreeBSD - should work
- * QNX - porting shouldn't be hard
- * Sun Solaris - porting shouldn't be hard
- *
- * References:
- * "Porting multithreaded applications from Win32 to Mac OS X":
- * <http://developer.apple.com/macosx/multithreadedprogramming.html>
- *
- * Pthreads:
- * <http://vergil.chemistry.gatech.edu/resources/programming/threads.html>
- *
- * MSDN: <http://msdn2.microsoft.com/en-us/library/ms686679.aspx>
- *
- * <http://ridiculousfish.com/blog/archives/2007/02/17/barrier>
- *
- * Defines:
- * -DLINUX_SCHED_RR: all threads are lifted to SCHED_RR category, to
- * allow negative priorities [-3,-1] be used. Even without this,
- * using priorities will require 'sudo' privileges on Linux.
- *
- * -DUSE_PTHREAD_TIMEDJOIN: use 'pthread_timedjoin_np()' for waiting
- * for threads with a timeout. This changes the thread cleanup
- * mechanism slightly (cleans up at the join, not once the thread
- * has finished). May or may not be a good idea to use it.
- * Available only in selected operating systems (Linux).
- *
- * Bugs:
- *
- * To-do:
- *
- * Make waiting threads cancellable.
- * ...
- */
- char const* VERSION = "3.9.4";
- /*
- ===============================================================================
- Copyright (C) 2007-10 Asko Kauppi <akauppi@gmail.com>
- 2011-14 Benoit Germain <bnt.germain@gmail.com>
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
- ===============================================================================
- */
- #include <string.h>
- #include <stdio.h>
- #include <stdlib.h>
- #include <ctype.h>
- #include "lua.h"
- #include "lauxlib.h"
- #include "threading.h"
- #include "tools.h"
- #include "keeper.h"
- #include "lanes.h"
- #if !(defined( PLATFORM_XBOX) || defined( PLATFORM_WIN32) || defined( PLATFORM_POCKETPC))
- # include <sys/time.h>
- #endif
- /* geteuid() */
- #ifdef PLATFORM_LINUX
- # include <unistd.h>
- # include <sys/types.h>
- #endif
- /* Do you want full call stacks, or just the line where the error happened?
- *
- * TBD: The full stack feature does not seem to work (try 'make error').
- */
- #define ERROR_FULL_STACK 1 // must be either 0 or 1 as we do some index arithmetics with it!
- /*
- * Lane cancellation request modes
- */
- enum e_cancel_request
- {
- CANCEL_NONE, // no pending cancel request
- CANCEL_SOFT, // user wants the lane to cancel itself manually on cancel_test()
- CANCEL_HARD // user wants the lane to be interrupted (meaning code won't return from those functions) from inside linda:send/receive calls
- };
- // NOTE: values to be changed by either thread, during execution, without
- // locking, are marked "volatile"
- //
- struct s_lane
- {
- THREAD_T thread;
- //
- // M: sub-thread OS thread
- // S: not used
- char const* debug_name;
- lua_State* L;
- struct s_Universe* U;
- //
- // M: prepares the state, and reads results
- // S: while S is running, M must keep out of modifying the state
- volatile enum e_status status;
- //
- // M: sets to PENDING (before launching)
- // S: updates -> RUNNING/WAITING -> DONE/ERROR_ST/CANCELLED
- SIGNAL_T* volatile waiting_on;
- //
- // When status is WAITING, points on the linda's signal the thread waits on, else NULL
- volatile enum e_cancel_request cancel_request;
- //
- // M: sets to FALSE, flags TRUE for cancel request
- // S: reads to see if cancel is requested
- #if THREADWAIT_METHOD == THREADWAIT_CONDVAR
- SIGNAL_T done_signal;
- //
- // M: Waited upon at lane ending (if Posix with no PTHREAD_TIMEDJOIN)
- // S: sets the signal once cancellation is noticed (avoids a kill)
- MUTEX_T done_lock;
- //
- // Lock required by 'done_signal' condition variable, protecting
- // lane status changes to DONE/ERROR_ST/CANCELLED.
- #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
- volatile enum {
- NORMAL, // normal master side state
- KILLED // issued an OS kill
- } mstatus;
- //
- // M: sets to NORMAL, if issued a kill changes to KILLED
- // S: not used
- struct s_lane* volatile selfdestruct_next;
- //
- // M: sets to non-NULL if facing lane handle '__gc' cycle but the lane
- // is still running
- // S: cleans up after itself if non-NULL at lane exit
- #if HAVE_LANE_TRACKING
- struct s_lane* volatile tracking_next;
- #endif // HAVE_LANE_TRACKING
- //
- // For tracking only
- };
- // To allow free-running threads (longer lifespan than the handle's)
- // 'struct s_lane' are malloc/free'd and the handle only carries a pointer.
- // This is not deep userdata since the handle's not portable among lanes.
- //
- #define lua_toLane( L, i) (*((struct s_lane**) luaL_checkudata( L, i, "Lane")))
- #define CANCEL_TEST_KEY ((void*)get_lane_from_registry) // used as registry key
- static inline struct s_lane* get_lane_from_registry( lua_State* L)
- {
- struct s_lane* s;
- STACK_GROW( L, 1);
- STACK_CHECK( L);
- lua_pushlightuserdata( L, CANCEL_TEST_KEY);
- lua_rawget( L, LUA_REGISTRYINDEX);
- s = lua_touserdata( L, -1); // lightuserdata (true 's_lane' pointer) / nil
- lua_pop( L, 1);
- STACK_END( L, 0);
- return s;
- }
- // intern the debug name in the specified lua state so that the pointer remains valid when the lane's state is closed
- static void securize_debug_threadname( lua_State* L, struct s_lane* s)
- {
- STACK_CHECK( L);
- STACK_GROW( L, 3);
- lua_getuservalue( L, 1);
- lua_newtable( L);
- // Lua 5.1 can't do 's->debug_name = lua_pushstring( L, s->debug_name);'
- lua_pushstring( L, s->debug_name);
- s->debug_name = lua_tostring( L, -1);
- lua_rawset( L, -3);
- lua_pop( L, 1);
- STACK_END( L, 0);
- }
- /*
- * Check if the thread in question ('L') has been signalled for cancel.
- *
- * Called by cancellation hooks and/or pending Linda operations (because then
- * the check won't affect performance).
- *
- * Returns TRUE if any locks are to be exited, and 'cancel_error()' called,
- * to make execution of the lane end.
- */
- static inline enum e_cancel_request cancel_test( lua_State* L)
- {
- struct s_lane* const s = get_lane_from_registry( L);
- // 's' is NULL for the original main state (and no-one can cancel that)
- return s ? s->cancel_request : CANCEL_NONE;
- }
- #define CANCEL_ERROR ((void*)cancel_error) // 'cancel_error' sentinel
- static int cancel_error( lua_State* L)
- {
- STACK_GROW( L, 1);
- lua_pushlightuserdata( L, CANCEL_ERROR); // special error value
- return lua_error( L); // doesn't return
- }
- static void cancel_hook( lua_State* L, lua_Debug* ar)
- {
- (void)ar;
- if( cancel_test( L) != CANCEL_NONE)
- {
- cancel_error( L);
- }
- }
- #if ERROR_FULL_STACK
- static int lane_error( lua_State* L);
- #define STACK_TRACE_KEY ((void*)lane_error) // used as registry key
- #endif // ERROR_FULL_STACK
- /*
- * registry[FINALIZER_REG_KEY] is either nil (no finalizers) or a table
- * of functions that Lanes will call after the executing 'pcall' has ended.
- *
- * We're NOT using the GC system for finalizer mainly because providing the
- * error (and maybe stack trace) parameters to the finalizer functions would
- * anyways complicate that approach.
- */
- #define FINALIZER_REG_KEY ((void*)LG_set_finalizer)
- struct s_Linda;
- #if 1
- # define DEBUG_SIGNAL( msg, signal_ref ) /* */
- #else
- # define DEBUG_SIGNAL( msg, signal_ref ) \
- { int i; unsigned char *ptr; char buf[999]; \
- sprintf( buf, ">>> " msg ": %p\t", (signal_ref) ); \
- ptr= (unsigned char *)signal_ref; \
- for( i=0; i<sizeof(*signal_ref); i++ ) { \
- sprintf( strchr(buf,'\0'), "%02x %c ", ptr[i], ptr[i] ); \
- } \
- fprintf( stderr, "%s\n", buf ); \
- }
- #endif
- /*
- * Push a table stored in registry onto Lua stack.
- *
- * If there is no existing table, create one if 'create' is TRUE.
- *
- * Returns: TRUE if a table was pushed
- * FALSE if no table found, not created, and nothing pushed
- */
- static bool_t push_registry_table( lua_State* L, void* key, bool_t create)
- {
- STACK_GROW( L, 3);
- STACK_CHECK( L);
- lua_pushlightuserdata( L, key); // key
- lua_gettable( L, LUA_REGISTRYINDEX); // t?
- if( lua_isnil( L, -1)) // nil?
- {
- lua_pop( L, 1); //
- if( !create)
- {
- return FALSE;
- }
- lua_newtable(L); // t
- lua_pushlightuserdata( L, key); // t key
- lua_pushvalue( L, -2); // t key t
- lua_rawset( L, LUA_REGISTRYINDEX); // t
- }
- STACK_END( L, 1);
- return TRUE; // table pushed
- }
- #if HAVE_LANE_TRACKING
- // The chain is ended by '(struct s_lane*)(-1)', not NULL:
- // 'tracking_first -> ... -> ... -> (-1)'
- #define TRACKING_END ((struct s_lane *)(-1))
- /*
- * Add the lane to tracking chain; the ones still running at the end of the
- * whole process will be cancelled.
- */
- static void tracking_add( struct s_lane* s)
- {
- MUTEX_LOCK( &s->U->tracking_cs);
- {
- assert( s->tracking_next == NULL);
- s->tracking_next = s->U->tracking_first;
- s->U->tracking_first = s;
- }
- MUTEX_UNLOCK( &s->U->tracking_cs);
- }
- /*
- * A free-running lane has ended; remove it from tracking chain
- */
- static bool_t tracking_remove( struct s_lane* s)
- {
- bool_t found = FALSE;
- MUTEX_LOCK( &s->U->tracking_cs);
- {
- // Make sure (within the MUTEX) that we actually are in the chain
- // still (at process exit they will remove us from chain and then
- // cancel/kill).
- //
- if (s->tracking_next != NULL)
- {
- struct s_lane** ref = (struct s_lane**) &s->U->tracking_first;
- while( *ref != TRACKING_END)
- {
- if( *ref == s)
- {
- *ref = s->tracking_next;
- s->tracking_next = NULL;
- found = TRUE;
- break;
- }
- ref = (struct s_lane**) &((*ref)->tracking_next);
- }
- assert( found);
- }
- }
- MUTEX_UNLOCK( &s->U->tracking_cs);
- return found;
- }
- #endif // HAVE_LANE_TRACKING
- //---
- // low-level cleanup
- static void lane_cleanup( struct s_lane* s)
- {
- // Clean up after a (finished) thread
- //
- #if THREADWAIT_METHOD == THREADWAIT_CONDVAR
- SIGNAL_FREE( &s->done_signal);
- MUTEX_FREE( &s->done_lock);
- #endif // THREADWAIT_METHOD == THREADWAIT_CONDVAR
- #if HAVE_LANE_TRACKING
- if( s->U->tracking_first != NULL)
- {
- // Lane was cleaned up, no need to handle at process termination
- tracking_remove( s);
- }
- #endif // HAVE_LANE_TRACKING
- free( s);
- }
- /*
- * ###############################################################################################
- * ############################################ Linda ############################################
- * ###############################################################################################
- */
- /*
- * Actual data is kept within a keeper state, which is hashed by the 's_Linda'
- * pointer (which is same to all userdatas pointing to it).
- */
- struct s_Linda
- {
- SIGNAL_T read_happened;
- SIGNAL_T write_happened;
- struct s_Universe* U; // the universe this linda belongs to
- enum e_cancel_request simulate_cancel;
- unsigned long group; // a group to control keeper allocation between lindas
- char name[1];
- };
- #define LINDA_KEEPER_HASHSEED( linda) (linda->group ? linda->group : (unsigned long)linda)
- static void* linda_id( lua_State*, enum eDeepOp);
- static inline struct s_Linda* lua_toLinda( lua_State* L, int idx_)
- {
- struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_);
- luaL_argcheck( L, linda != NULL, idx_, "expecting a linda object");
- return linda;
- }
- static void check_key_types( lua_State* L, int start_, int end_)
- {
- int i;
- for( i = start_; i <= end_; ++ i)
- {
- int t = lua_type( L, i);
- if( t == LUA_TBOOLEAN || t == LUA_TNUMBER || t == LUA_TSTRING || t == LUA_TLIGHTUSERDATA)
- {
- continue;
- }
- (void) luaL_error( L, "argument #%d: invalid key type (not a boolean, string, number or light userdata)", i);
- }
- }
- /*
- * bool= linda_send( linda_ud, [timeout_secs=-1,] [linda.null,] key_num|str|bool|lightuserdata, ... )
- *
- * Send one or more values to a Linda. If there is a limit, all values must fit.
- *
- * Returns: 'true' if the value was queued
- * 'false' for timeout (only happens when the queue size is limited)
- * nil, CANCEL_ERROR if cancelled
- */
- LUAG_FUNC( linda_send)
- {
- struct s_Linda* linda = lua_toLinda( L, 1);
- bool_t ret;
- enum e_cancel_request cancel = CANCEL_NONE;
- int pushed;
- time_d timeout = -1.0;
- uint_t key_i = 2; // index of first key, if timeout not there
- void* as_nil_sentinel; // if not NULL, send() will silently send a single nil if nothing is provided
- if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion
- {
- timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2));
- ++ key_i;
- }
- else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key
- {
- ++ key_i;
- }
- as_nil_sentinel = lua_touserdata( L, key_i);
- if( as_nil_sentinel == NIL_SENTINEL)
- {
- // the real key to send data to is after the NIL_SENTINEL marker
- ++ key_i;
- }
- // make sure the key is of a valid type
- check_key_types( L, key_i, key_i);
- STACK_GROW( L, 1);
- // make sure there is something to send
- if( (uint_t)lua_gettop( L) == key_i)
- {
- if( as_nil_sentinel == NIL_SENTINEL)
- {
- // send a single nil if nothing is provided
- lua_pushlightuserdata( L, NIL_SENTINEL);
- }
- else
- {
- return luaL_error( L, "no data to send");
- }
- }
- // convert nils to some special non-nil sentinel in sent values
- keeper_toggle_nil_sentinels( L, key_i + 1, eLM_ToKeeper);
- {
- bool_t try_again = TRUE;
- struct s_lane* const s = get_lane_from_registry( L);
- struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
- lua_State* KL = K ? K->L : NULL; // need to do this for 'STACK_CHECK'
- if( KL == NULL) return 0;
- STACK_CHECK( KL);
- for( ;;)
- {
- if( s != NULL)
- {
- cancel = s->cancel_request;
- }
- cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel;
- // if user wants to cancel, or looped because of a timeout, the call returns without sending anything
- if( !try_again || cancel != CANCEL_NONE)
- {
- pushed = 0;
- break;
- }
- STACK_MID( KL, 0);
- pushed = keeper_call( linda->U, KL, KEEPER_API( send), L, linda, key_i);
- if( pushed < 0)
- {
- ret = FALSE;
- break;
- }
- ASSERT_L( pushed == 1);
- ret = lua_toboolean( L, -1);
- lua_pop( L, 1);
- if( ret)
- {
- // Wake up ALL waiting threads
- SIGNAL_ALL( &linda->write_happened);
- break;
- }
- // instant timout to bypass the
- if( timeout == 0.0)
- {
- break; /* no wait; instant timeout */
- }
- // storage limit hit, wait until timeout or signalled that we should try again
- {
- enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings
- if( s != NULL)
- {
- // change status of lane to "waiting"
- prev_status = s->status; // RUNNING, most likely
- ASSERT_L( prev_status == RUNNING); // but check, just in case
- s->status = WAITING;
- ASSERT_L( s->waiting_on == NULL);
- s->waiting_on = &linda->read_happened;
- }
- // could not send because no room: wait until some data was read before trying again, or until timeout is reached
- try_again = SIGNAL_WAIT( &linda->read_happened, &K->keeper_cs, timeout);
- if( s != NULL)
- {
- s->waiting_on = NULL;
- s->status = prev_status;
- }
- }
- }
- STACK_END( KL, 0);
- keeper_release( K);
- }
- // must trigger error after keeper state has been released
- if( pushed < 0)
- {
- return luaL_error( L, "tried to copy unsupported types");
- }
- switch( cancel)
- {
- case CANCEL_SOFT:
- // if user wants to soft-cancel, the call returns lanes.cancel_error
- lua_pushlightuserdata( L, CANCEL_ERROR);
- return 1;
- case CANCEL_HARD:
- // raise an error interrupting execution only in case of hard cancel
- return cancel_error( L); // raises an error and doesn't return
- default:
- lua_pushboolean( L, ret); // true (success) or false (timeout)
- return 1;
- }
- }
- /*
- * 2 modes of operation
- * [val, key]= linda_receive( linda_ud, [timeout_secs_num=-1], key_num|str|bool|lightuserdata [, ...] )
- * Consumes a single value from the Linda, in any key.
- * Returns: received value (which is consumed from the slot), and the key which had it
- * [val1, ... valCOUNT]= linda_receive( linda_ud, [timeout_secs_num=-1], linda.batched, key_num|str|bool|lightuserdata, min_COUNT[, max_COUNT])
- * Consumes between min_COUNT and max_COUNT values from the linda, from a single key.
- * returns the actual consumed values, or nil if there weren't enough values to consume
- *
- */
- #define BATCH_SENTINEL "270e6c9d-280f-4983-8fee-a7ecdda01475"
- LUAG_FUNC( linda_receive)
- {
- struct s_Linda* linda = lua_toLinda( L, 1);
- int pushed, expected_pushed_min, expected_pushed_max;
- enum e_cancel_request cancel = CANCEL_NONE;
- keeper_api_t keeper_receive;
-
- time_d timeout = -1.0;
- uint_t key_i = 2;
- if( lua_type( L, 2) == LUA_TNUMBER) // we don't want to use lua_isnumber() because of autocoercion
- {
- timeout = SIGNAL_TIMEOUT_PREPARE( lua_tonumber( L, 2));
- ++ key_i;
- }
- else if( lua_isnil( L, 2)) // alternate explicit "no timeout" by passing nil before the key
- {
- ++ key_i;
- }
- // are we in batched mode?
- {
- int is_batched;
- lua_pushliteral( L, BATCH_SENTINEL);
- is_batched = lua_equal( L, key_i, -1);
- lua_pop( L, 1);
- if( is_batched)
- {
- // no need to pass linda.batched in the keeper state
- ++ key_i;
- // make sure the keys are of a valid type
- check_key_types( L, key_i, key_i);
- // receive multiple values from a single slot
- keeper_receive = KEEPER_API( receive_batched);
- // we expect a user-defined amount of return value
- expected_pushed_min = (int)luaL_checkinteger( L, key_i + 1);
- expected_pushed_max = (int)luaL_optinteger( L, key_i + 2, expected_pushed_min);
- // don't forget to count the key in addition to the values
- ++ expected_pushed_min;
- ++ expected_pushed_max;
- if( expected_pushed_min > expected_pushed_max)
- {
- return luaL_error( L, "batched min/max error");
- }
- }
- else
- {
- // make sure the keys are of a valid type
- check_key_types( L, key_i, lua_gettop( L));
- // receive a single value, checking multiple slots
- keeper_receive = KEEPER_API( receive);
- // we expect a single (value, key) pair of returned values
- expected_pushed_min = expected_pushed_max = 2;
- }
- }
- {
- bool_t try_again = TRUE;
- struct s_lane* const s = get_lane_from_registry( L);
- struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
- if( K == NULL) return 0;
- for( ;;)
- {
- if( s != NULL)
- {
- cancel = s->cancel_request;
- }
- cancel = (cancel != CANCEL_NONE) ? cancel : linda->simulate_cancel;
- // if user wants to cancel, or looped because of a timeout, the call returns without sending anything
- if( !try_again || cancel != CANCEL_NONE)
- {
- pushed = 0;
- break;
- }
- // all arguments of receive() but the first are passed to the keeper's receive function
- pushed = keeper_call( linda->U, K->L, keeper_receive, L, linda, key_i);
- if( pushed < 0)
- {
- break;
- }
- if( pushed > 0)
- {
- ASSERT_L( pushed >= expected_pushed_min && pushed <= expected_pushed_max);
- // replace sentinels with real nils
- keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper);
- // To be done from within the 'K' locking area
- //
- SIGNAL_ALL( &linda->read_happened);
- break;
- }
- if( timeout == 0.0)
- {
- break; /* instant timeout */
- }
- // nothing received, wait until timeout or signalled that we should try again
- {
- enum e_status prev_status = ERROR_ST; // prevent 'might be used uninitialized' warnings
- if( s != NULL)
- {
- // change status of lane to "waiting"
- prev_status = s->status; // RUNNING, most likely
- ASSERT_L( prev_status == RUNNING); // but check, just in case
- s->status = WAITING;
- ASSERT_L( s->waiting_on == NULL);
- s->waiting_on = &linda->write_happened;
- }
- // not enough data to read: wakeup when data was sent, or when timeout is reached
- try_again = SIGNAL_WAIT( &linda->write_happened, &K->keeper_cs, timeout);
- if( s != NULL)
- {
- s->waiting_on = NULL;
- s->status = prev_status;
- }
- }
- }
- keeper_release( K);
- }
- // must trigger error after keeper state has been released
- if( pushed < 0)
- {
- return luaL_error( L, "tried to copy unsupported types");
- }
- switch( cancel)
- {
- case CANCEL_SOFT:
- // if user wants to soft-cancel, the call returns CANCEL_ERROR
- lua_pushlightuserdata( L, CANCEL_ERROR);
- return 1;
- case CANCEL_HARD:
- // raise an error interrupting execution only in case of hard cancel
- return cancel_error( L); // raises an error and doesn't return
- default:
- return pushed;
- }
- }
- /*
- * [true|lanes.cancel_error] = linda_set( linda_ud, key_num|str|bool|lightuserdata [, value [, ...]])
- *
- * Set one or more value to Linda.
- * TODO: what do we do if we set to non-nil and limit is 0?
- *
- * Existing slot value is replaced, and possible queued entries removed.
- */
- LUAG_FUNC( linda_set)
- {
- struct s_Linda* const linda = lua_toLinda( L, 1);
- int pushed;
- bool_t has_value = lua_gettop( L) > 2;
- // make sure the key is of a valid type (throws an error if not the case)
- check_key_types( L, 2, 2);
- {
- struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
- if( K == NULL) return 0;
- if( linda->simulate_cancel == CANCEL_NONE)
- {
- if( has_value)
- {
- // convert nils to some special non-nil sentinel in sent values
- keeper_toggle_nil_sentinels( L, 3, eLM_ToKeeper);
- }
- pushed = keeper_call( linda->U, K->L, KEEPER_API( set), L, linda, 2);
- if( pushed >= 0) // no error?
- {
- ASSERT_L( pushed == 0 || pushed == 1);
- if( has_value)
- {
- // we put some data in the slot, tell readers that they should wake
- SIGNAL_ALL( &linda->write_happened); // To be done from within the 'K' locking area
- }
- if( pushed == 1)
- {
- // the key was full, but it is no longer the case, tell writers they should wake
- ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1);
- SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area
- }
- }
- }
- else // linda is cancelled
- {
- // do nothing and return lanes.cancel_error
- lua_pushlightuserdata( L, CANCEL_ERROR);
- pushed = 1;
- }
- keeper_release( K);
- }
- // must trigger any error after keeper state has been released
- return (pushed < 0) ? luaL_error( L, "tried to copy unsupported types") : pushed;
- }
- /*
- * [val] = linda_count( linda_ud, [key [, ...]])
- *
- * Get a count of the pending elements in the specified keys
- */
- LUAG_FUNC( linda_count)
- {
- struct s_Linda* linda = lua_toLinda( L, 1);
- int pushed;
- // make sure the keys are of a valid type
- check_key_types( L, 2, lua_gettop( L));
- {
- struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
- if( K == NULL) return 0;
- pushed = keeper_call( linda->U, K->L, KEEPER_API( count), L, linda, 2);
- keeper_release( K);
- if( pushed < 0)
- {
- return luaL_error( L, "tried to count an invalid key");
- }
- }
- return pushed;
- }
- /*
- * [val [, ...]] = linda_get( linda_ud, key_num|str|bool|lightuserdata [, count = 1])
- *
- * Get one or more values from Linda.
- */
- LUAG_FUNC( linda_get)
- {
- struct s_Linda* const linda = lua_toLinda( L, 1);
- int pushed;
- int count = luaL_optint( L, 3, 1);
- luaL_argcheck( L, count >= 1, 3, "count should be >= 1");
- luaL_argcheck( L, lua_gettop( L) <= 3, 4, "too many arguments");
- // make sure the key is of a valid type (throws an error if not the case)
- check_key_types( L, 2, 2);
- {
- struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
- if( K == NULL) return 0;
- if( linda->simulate_cancel == CANCEL_NONE)
- {
- pushed = keeper_call( linda->U, K->L, KEEPER_API( get), L, linda, 2);
- if( pushed > 0)
- {
- keeper_toggle_nil_sentinels( L, lua_gettop( L) - pushed, eLM_FromKeeper);
- }
- }
- else // linda is cancelled
- {
- // do nothing and return lanes.cancel_error
- lua_pushlightuserdata( L, CANCEL_ERROR);
- pushed = 1;
- }
- keeper_release( K);
- // must trigger error after keeper state has been released
- // (an error can be raised if we attempt to read an unregistered function)
- if( pushed < 0)
- {
- return luaL_error( L, "tried to copy unsupported types");
- }
- }
- return pushed;
- }
- /*
- * [true] = linda_limit( linda_ud, key_num|str|bool|lightuserdata, int)
- *
- * Set limit to 1 Linda keys.
- * Optionally wake threads waiting to write on the linda, in case the limit enables them to do so
- */
- LUAG_FUNC( linda_limit)
- {
- struct s_Linda* linda = lua_toLinda( L, 1);
- int pushed;
- // make sure we got 3 arguments: the linda, a key and a limit
- luaL_argcheck( L, lua_gettop( L) == 3, 2, "wrong number of arguments");
- // make sure we got a numeric limit
- luaL_checknumber( L, 3);
- // make sure the key is of a valid type
- check_key_types( L, 2, 2);
- {
- struct s_Keeper* K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
- if( K == NULL) return 0;
- if( linda->simulate_cancel == CANCEL_NONE)
- {
- pushed = keeper_call( linda->U, K->L, KEEPER_API( limit), L, linda, 2);
- ASSERT_L( pushed == 0 || pushed == 1); // no error, optional boolean value saying if we should wake blocked writer threads
- if( pushed == 1)
- {
- ASSERT_L( lua_type( L, -1) == LUA_TBOOLEAN && lua_toboolean( L, -1) == 1);
- SIGNAL_ALL( &linda->read_happened); // To be done from within the 'K' locking area
- }
- }
- else // linda is cancelled
- {
- // do nothing and return lanes.cancel_error
- lua_pushlightuserdata( L, CANCEL_ERROR);
- pushed = 1;
- }
- keeper_release( K);
- }
- // propagate pushed boolean if any
- return pushed;
- }
- /*
- * (void) = linda_cancel( linda_ud, "read"|"write"|"both"|"none")
- *
- * Signal linda so that waiting threads wake up as if their own lane was cancelled
- */
- LUAG_FUNC( linda_cancel)
- {
- struct s_Linda* linda = lua_toLinda( L, 1);
- char const* who = luaL_optstring( L, 2, "both");
- struct s_Keeper* K;
- // make sure we got 3 arguments: the linda, a key and a limit
- luaL_argcheck( L, lua_gettop( L) <= 2, 2, "wrong number of arguments");
- // signalling must be done from inside the K locking area
- K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
- if( K == NULL) return 0;
- linda->simulate_cancel = CANCEL_SOFT;
- if( strcmp( who, "both") == 0) // tell everyone writers to wake up
- {
- SIGNAL_ALL( &linda->write_happened);
- SIGNAL_ALL( &linda->read_happened);
- }
- else if( strcmp( who, "none") == 0) // reset flag
- {
- linda->simulate_cancel = CANCEL_NONE;
- }
- else if( strcmp( who, "read") == 0) // tell blocked readers to wake up
- {
- SIGNAL_ALL( &linda->write_happened);
- }
- else if( strcmp( who, "write") == 0) // tell blocked writers to wake up
- {
- SIGNAL_ALL( &linda->read_happened);
- }
- else
- {
- // error ...
- linda = NULL;
- }
- keeper_release( K);
- // ... but we must raise it outside the lock
- if( !linda)
- {
- return luaL_error( L, "unknown wake hint '%s'", who);
- }
- return 0;
- }
- /*
- * lightuserdata= linda_deep( linda_ud )
- *
- * Return the 'deep' userdata pointer, identifying the Linda.
- *
- * This is needed for using Lindas as key indices (timer system needs it);
- * separately created proxies of the same underlying deep object will have
- * different userdata and won't be known to be essentially the same deep one
- * without this.
- */
- LUAG_FUNC( linda_deep)
- {
- struct s_Linda* linda= lua_toLinda( L, 1);
- lua_pushlightuserdata( L, linda); // just the address
- return 1;
- }
- /*
- * string = linda:__tostring( linda_ud)
- *
- * Return the stringification of a linda
- *
- * Useful for concatenation or debugging purposes
- */
- static int linda_tostring( lua_State* L, int idx_, bool_t opt_)
- {
- struct s_Linda* linda = (struct s_Linda*) luaG_todeep( L, linda_id, idx_);
- if( !opt_)
- {
- luaL_argcheck( L, linda, idx_, "expecting a linda object");
- }
- if( linda != NULL)
- {
- char text[128];
- int len;
- if( linda->name[0])
- len = sprintf( text, "Linda: %.*s", (int)sizeof(text) - 8, linda->name);
- else
- len = sprintf( text, "Linda: %p", linda);
- lua_pushlstring( L, text, len);
- return 1;
- }
- return 0;
- }
- LUAG_FUNC( linda_tostring)
- {
- return linda_tostring( L, 1, FALSE);
- }
- /*
- * string = linda:__concat( a, b)
- *
- * Return the concatenation of a pair of items, one of them being a linda
- *
- * Useful for concatenation or debugging purposes
- */
- LUAG_FUNC( linda_concat)
- { // linda1? linda2?
- bool_t atLeastOneLinda = FALSE;
- // Lua semantics enforce that one of the 2 arguments is a Linda, but not necessarily both.
- if( linda_tostring( L, 1, TRUE))
- {
- atLeastOneLinda = TRUE;
- lua_replace( L, 1);
- }
- if( linda_tostring( L, 2, TRUE))
- {
- atLeastOneLinda = TRUE;
- lua_replace( L, 2);
- }
- if( !atLeastOneLinda) // should not be possible
- {
- return luaL_error( L, "internal error: linda_concat called on non-Linda");
- }
- lua_concat( L, 2);
- return 1;
- }
- /*
- * table = linda:dump()
- * return a table listing all pending data inside the linda
- */
- LUAG_FUNC( linda_dump)
- {
- struct s_Linda* linda = lua_toLinda( L, 1);
- ASSERT_L( linda->U == get_universe( L));
- return keeper_push_linda_storage( linda->U, L, linda, LINDA_KEEPER_HASHSEED( linda));
- }
- /*
- * Identity function of a shared userdata object.
- *
- * lightuserdata= linda_id( "new" [, ...] )
- * = linda_id( "delete", lightuserdata )
- *
- * Creation and cleanup of actual 'deep' objects. 'luaG_...' will wrap them into
- * regular userdata proxies, per each state using the deep data.
- *
- * tbl= linda_id( "metatable" )
- *
- * Returns a metatable for the proxy objects ('__gc' method not needed; will
- * be added by 'luaG_...')
- *
- * string= linda_id( "module")
- *
- * Returns the name of the module that a state should require
- * in order to keep a handle on the shared library that exported the idfunc
- *
- * = linda_id( str, ... )
- *
- * For any other strings, the ID function must not react at all. This allows
- * future extensions of the system.
- */
- static void* linda_id( lua_State* L, enum eDeepOp op_)
- {
- switch( op_)
- {
- case eDO_new:
- {
- struct s_Linda* s;
- size_t name_len = 0;
- char const* linda_name = NULL;
- unsigned long linda_group = 0;
- // should have a string and/or a number of the stack as parameters (name and group)
- switch( lua_gettop( L))
- {
- default: // 0
- break;
- case 1: // 1 parameter, either a name or a group
- if( lua_type( L, -1) == LUA_TSTRING)
- {
- linda_name = lua_tolstring( L, -1, &name_len);
- }
- else
- {
- linda_group = (unsigned long) lua_tointeger( L, -1);
- }
- break;
- case 2: // 2 parameters, a name and group, in that order
- linda_name = lua_tolstring( L, -2, &name_len);
- linda_group = lua_tointeger( L, -1);
- break;
- }
- /* The deep data is allocated separately of Lua stack; we might no
- * longer be around when last reference to it is being released.
- * One can use any memory allocation scheme.
- * just don't use L's allocF because we don't know which state will get the honor of GCing the linda
- */
- s = (struct s_Linda*) malloc( sizeof(struct s_Linda) + name_len); // terminating 0 is already included
- if( s)
- {
- SIGNAL_INIT( &s->read_happened);
- SIGNAL_INIT( &s->write_happened);
- s->U = get_universe( L);
- s->simulate_cancel = CANCEL_NONE;
- s->group = linda_group << KEEPER_MAGIC_SHIFT;
- s->name[0] = 0;
- memcpy( s->name, linda_name, name_len ? name_len + 1 : 0);
- }
- return s;
- }
- case eDO_delete:
- {
- struct s_Keeper* K;
- struct s_Linda* linda = lua_touserdata( L, 1);
- ASSERT_L( linda);
- /* Clean associated structures in the keeper state.
- */
- K = keeper_acquire( linda->U->keepers, LINDA_KEEPER_HASHSEED( linda));
- if( K && K->L) // can be NULL if this happens during main state shutdown (lanes is GC'ed -> no keepers -> no need to cleanup)
- {
- keeper_call( linda->U, K->L, KEEPER_API( clear), L, linda, 0);
- }
- keeper_release( K);
- /* There aren't any lanes waiting on these lindas, since all proxies
- * have been gc'ed. Right?
- */
- SIGNAL_FREE( &linda->read_happened);
- SIGNAL_FREE( &linda->write_happened);
- free( linda);
- return NULL;
- }
- case eDO_metatable:
- {
- STACK_CHECK( L);
- lua_newtable( L);
- // metatable is its own index
- lua_pushvalue( L, -1);
- lua_setfield( L, -2, "__index");
- // protect metatable from external access
- lua_pushliteral( L, "Linda");
- lua_setfield( L, -2, "__metatable");
- lua_pushcfunction( L, LG_linda_tostring);
- lua_setfield( L, -2, "__tostring");
- // Decoda __towatch support
- lua_pushcfunction( L, LG_linda_dump);
- lua_setfield( L, -2, "__towatch");
- lua_pushcfunction( L, LG_linda_concat);
- lua_setfield( L, -2, "__concat");
- // [-1]: linda metatable
- lua_pushcfunction( L, LG_linda_send);
- lua_setfield( L, -2, "send");
- lua_pushcfunction( L, LG_linda_receive);
- lua_setfield( L, -2, "receive");
- lua_pushcfunction( L, LG_linda_limit);
- lua_setfield( L, -2, "limit");
- lua_pushcfunction( L, LG_linda_set);
- lua_setfield( L, -2, "set");
- lua_pushcfunction( L, LG_linda_count);
- lua_setfield( L, -2, "count");
- lua_pushcfunction( L, LG_linda_get);
- lua_setfield( L, -2, "get");
- lua_pushcfunction( L, LG_linda_cancel);
- lua_setfield( L, -2, "cancel");
- lua_pushcfunction( L, LG_linda_deep);
- lua_setfield( L, -2, "deep");
- lua_pushcfunction( L, LG_linda_dump);
- lua_setfield( L, -2, "dump");
- lua_pushliteral( L, BATCH_SENTINEL);
- lua_setfield(L, -2, "batched");
- lua_pushlightuserdata( L, NIL_SENTINEL);
- lua_setfield(L, -2, "null");
- STACK_END( L, 1);
- return NULL;
- }
- case eDO_module:
- // linda is a special case because we know lanes must be loaded from the main lua state
- // to be able to ever get here, so we know it will remain loaded as long a the main state is around
- // in other words, forever.
- default:
- {
- return NULL;
- }
- }
- }
- /*
- * ud = lanes.linda( [name[,group]])
- *
- * returns a linda object, or raises an error if creation failed
- */
- LUAG_FUNC( linda)
- {
- int const top = lua_gettop( L);
- luaL_argcheck( L, top <= 2, top, "too many arguments");
- if( top == 1)
- {
- int const t = lua_type( L, 1);
- luaL_argcheck( L, t == LUA_TSTRING || t == LUA_TNUMBER, 1, "wrong parameter (should be a string or a number)");
- }
- else if( top == 2)
- {
- luaL_checktype( L, 1, LUA_TSTRING);
- luaL_checktype( L, 2, LUA_TNUMBER);
- }
- return luaG_newdeepuserdata( L, linda_id);
- }
- /*
- * ###############################################################################################
- * ########################################## Finalizer ##########################################
- * ###############################################################################################
- */
- //---
- // void= finalizer( finalizer_func )
- //
- // finalizer_func( [err, stack_tbl] )
- //
- // Add a function that will be called when exiting the lane, either via
- // normal return or an error.
- //
- LUAG_FUNC( set_finalizer)
- {
- luaL_argcheck( L, lua_isfunction( L, 1), 1, "finalizer should be a function");
- luaL_argcheck( L, lua_gettop( L) == 1, 1, "too many arguments");
- // Get the current finalizer table (if any)
- push_registry_table( L, FINALIZER_REG_KEY, TRUE /*do create if none*/); // finalizer {finalisers}
- STACK_GROW( L, 2);
- lua_pushinteger( L, lua_rawlen( L, -1) + 1); // finalizer {finalisers} idx
- lua_pushvalue( L, 1); // finalizer {finalisers} idx finalizer
- lua_rawset( L, -3); // finalizer {finalisers}
- lua_pop( L, 2); //
- return 0;
- }
- //---
- // Run finalizers - if any - with the given parameters
- //
- // If 'rc' is nonzero, error message and stack index (the latter only when ERROR_FULL_STACK == 1) are available as:
- // [-1]: stack trace (table)
- // [-2]: error message (any type)
- //
- // Returns:
- // 0 if finalizers were run without error (or there were none)
- // LUA_ERRxxx return code if any of the finalizers failed
- //
- // TBD: should we add stack trace on failing finalizer, wouldn't be hard..
- //
- static void push_stack_trace( lua_State* L, int rc_, int stk_base_);
- static int run_finalizers( lua_State* L, int lua_rc)
- {
- int finalizers_index;
- int n;
- int err_handler_index = 0;
- int rc = LUA_OK; // ...
- if( !push_registry_table( L, FINALIZER_REG_KEY, FALSE)) // ... finalizers?
- {
- return 0; // no finalizers
- }
- STACK_GROW( L, 5);
- finalizers_index = lua_gettop( L);
- #if ERROR_FULL_STACK
- lua_pushcfunction( L, lane_error); // ... finalizers lane_error
- err_handler_index = lua_gettop( L);
- #endif // ERROR_FULL_STACK
- for( n = (int) lua_rawlen( L, finalizers_index); n > 0; -- n)
- {
- int args = 0;
- lua_pushinteger( L, n); // ... finalizers lane_error n
- lua_rawget( L, finalizers_index); // ... finalizers lane_error finalizer
- ASSERT_L( lua_isfunction( L, -1));
- if( lua_rc != LUA_OK) // we have an error message and an optional stack trace at the bottom of the stack
- {
- ASSERT_L( finalizers_index == 2 || finalizers_index == 3);
- //char const* err_msg = lua_tostring( L, 1);
- lua_pushvalue( L, 1); // ... finalizers lane_error finalizer err_msg
- // note we don't always have a stack trace for example when CANCEL_ERROR, or when we got an error that doesn't call our handler, such as LUA_ERRMEM
- if( finalizers_index == 3)
- {
- lua_pushvalue( L, 2); // ... finalizers lane_error finalizer err_msg stack_trace
- }
- args = finalizers_index - 1;
- }
- // if no error from the main body, finlizer doesn't receive any argument, else it gets the error message and optional stack trace
- rc = lua_pcall( L, args, 0, err_handler_index); // ... finalizers lane_error err_msg2?
- if( rc != LUA_OK)
- {
- push_stack_trace( L, rc, lua_gettop( L));
- // If one finalizer fails, don't run the others. Return this
- // as the 'real' error, replacing what we could have had (or not)
- // from the actual code.
- break;
- }
- // no error, proceed to next finalizer // ... finalizers lane_error
- }
- if( rc != LUA_OK)
- {
- // ERROR_FULL_STACK accounts for the presence of lane_error on the stack
- int nb_err_slots = lua_gettop( L) - finalizers_index - ERROR_FULL_STACK;
- // a finalizer generated an error, this is what we leave of the stack
- for( n = nb_err_slots; n > 0; -- n)
- {
- lua_replace( L, n);
- }
- // leave on the stack only the error and optional stack trace produced by the error in the finalizer
- lua_settop( L, nb_err_slots);
- }
- else // no error from the finalizers, make sure only the original return values from the lane body remain on the stack
- {
- lua_settop( L, finalizers_index - 1);
- }
- return rc;
- }
- /*
- * ###############################################################################################
- * ########################################### Threads ###########################################
- * ###############################################################################################
- */
- //---
- // = thread_cancel( lane_ud [,timeout_secs=0.0] [,force_kill_bool=false] )
- //
- // The originator thread asking us specifically to cancel the other thread.
- //
- // 'timeout': <0: wait forever, until the lane is finished
- // 0.0: just signal it to cancel, no time waited
- // >0: time to wait for the lane to detect cancellation
- //
- // 'force_kill': if true, and lane does not detect cancellation within timeout,
- // it is forcefully killed. Using this with 0.0 timeout means just kill
- // (unless the lane is already finished).
- //
- // Returns: true if the lane was already finished (DONE/ERROR_ST/CANCELLED) or if we
- // managed to cancel it.
- // false if the cancellation timed out, or a kill was needed.
- //
- typedef enum
- {
- CR_Timeout,
- CR_Cancelled,
- CR_Killed
- } cancel_result;
- static cancel_result thread_cancel( lua_State* L, struct s_lane* s, double secs, bool_t force, double waitkill_timeout_)
- {
- cancel_result result;
- // remember that lanes are not transferable: only one thread can cancel a lane, so no multithreading issue here
- // We can read 's->status' without locks, but not wait for it (if Posix no PTHREAD_TIMEDJOIN)
- if( s->mstatus == KILLED)
- {
- result = CR_Killed;
- }
- else if( s->status < DONE)
- {
- // signal the linda the wake up the thread so that it can react to the cancel query
- // let us hope we never land here with a pointer on a linda that has been destroyed...
- if( secs < 0.0)
- {
- s->cancel_request = CANCEL_SOFT; // it's now signaled to stop
- // negative timeout: we don't want to truly abort the lane, we just want it to react to cancel_test() on its own
- if( force) // wake the thread so that execution returns from any pending linda operation if desired
- {
- SIGNAL_T *waiting_on = s->waiting_on;
- if( s->status == WAITING && waiting_on != NULL)
- {
- SIGNAL_ALL( waiting_on);
- }
- }
- // say we succeeded though
- result = CR_Cancelled;
- }
- else
- {
- s->cancel_request = CANCEL_HARD; // it's now signaled to stop
- {
- SIGNAL_T *waiting_on = s->waiting_on;
- if( s->status == WAITING && waiting_on != NULL)
- {
- SIGNAL_ALL( waiting_on);
- }
- }
- result = THREAD_WAIT( &s->thread, secs, &s->done_signal, &s->done_lock, &s->status) ? CR_Cancelled : CR_Timeout;
- if( (result == CR_Timeout) && force)
- {
- // Killing is asynchronous; we _will_ wait for it to be done at
- // GC, to make sure the data structure can be released (alternative
- // would be use of "cancellation cleanup handlers" that at least
- // PThread seems to have).
- //
- THREAD_KILL( &s->thread);
- #if THREADAPI == THREADAPI_PTHREAD
- // pthread: make sure the thread is really stopped!
- // note that this may block forever if the lane doesn't call a cancellation point and pthread doesn't honor PTHREAD_CANCEL_ASYNCHRONOUS
- result = THREAD_WAIT( &s->thread, waitkill_timeout_, &s->done_signal, &s->done_lock, &s->status);
- if( result == CR_Timeout)
- {
- return luaL_error( L, "force-killed lane failed to terminate within %f second%s", waitkill_timeout_, waitkill_timeout_ > 1 ? "s" : "");
- }
- #endif // THREADAPI == THREADAPI_PTHREAD
- s->mstatus = KILLED; // mark 'gc' to wait for it
- // note that s->status value must remain to whatever it was at the time of the kill
- // because we need to know if we can lua_close() the Lua State or not.
- result = CR_Killed;
- }
- }
- }
- else
- {
- // say "ok" by default, including when lane is already done
- result = CR_Cancelled;
- }
- return result;
- }
- //
- // Protects modifying the selfdestruct chain
- #define SELFDESTRUCT_END ((struct s_lane*)(-1))
- //
- // The chain is ended by '(struct s_lane*)(-1)', not NULL:
- // 'selfdestruct_first -> ... -> ... -> (-1)'
- /*
- * Add the lane to selfdestruct chain; the ones still running at the end of the
- * whole process will be cancelled.
- */
- static void selfdestruct_add( struct s_lane* s)
- {
- MUTEX_LOCK( &s->U->selfdestruct_cs);
- assert( s->selfdestruct_next == NULL);
- s->selfdestruct_next = s->U->selfdestruct_first;
- s->U->selfdestruct_first= s;
- MUTEX_UNLOCK( &s->U->selfdestruct_cs);
- }
- /*
- * A free-running lane has ended; remove it from selfdestruct chain
- */
- static bool_t selfdestruct_remove( struct s_lane* s)
- {
- bool_t found = FALSE;
- MUTEX_LOCK( &s->U->selfdestruct_cs);
- {
- // Make sure (within the MUTEX) that we actually are in the chain
- // still (at process exit they will remove us from chain and then
- // cancel/kill).
- //
- if( s->selfdestruct_next != NULL)
- {
- struct s_lane** ref = (struct s_lane**) &s->U->selfdestruct_first;
- while( *ref != SELFDESTRUCT_END )
- {
- if( *ref == s)
- {
- *ref = s->selfdestruct_next;
- s->selfdestruct_next = NULL;
- // the terminal shutdown should wait until the lane is done with its lua_close()
- ++ s->U->selfdestructing_count;
- found = TRUE;
- break;
- }
- ref = (struct s_lane**) &((*ref)->selfdestruct_next);
- }
- assert( found);
- }
- }
- MUTEX_UNLOCK( &s->U->selfdestruct_cs);
- return found;
- }
- /*
- ** mutex-protected allocator for use with Lua states that have non-threadsafe allocators (such as LuaJIT)
- */
- struct ProtectedAllocator_s
- {
- lua_Alloc allocF;
- void* allocUD;
- MUTEX_T lock;
- };
- void * protected_lua_Alloc( void *ud, void *ptr, size_t osize, size_t nsize)
- {
- void* p;
- struct ProtectedAllocator_s* s = (struct ProtectedAllocator_s*) ud;
- MUTEX_LOCK( &s->lock);
- p = s->allocF( s->allocUD, ptr, osize, nsize);
- MUTEX_UNLOCK( &s->lock);
- return p;
- }
- /*
- * Process end; cancel any still free-running threads
- */
- static int selfdestruct_gc( lua_State* L)
- {
- struct s_Universe* U = (struct s_Universe*) lua_touserdata( L, 1);
- while( U->selfdestruct_first != SELFDESTRUCT_END) // true at most once!
- {
- // Signal _all_ still running threads to exit (including the timer thread)
- //
- MUTEX_LOCK( &U->selfdestruct_cs);
- {
- struct s_lane* s = U->selfdestruct_first;
- while( s != SELFDESTRUCT_END)
- {
- // attempt a regular unforced hard cancel with a small timeout
- bool_t cancelled = THREAD_ISNULL( s->thread) || thread_cancel( L, s, 0.0001, FALSE, 0.0);
- // if we failed, and we know the thread is waiting on a linda
- if( cancelled == FALSE && s->status == WAITING && s->waiting_on != NULL)
- {
- // signal the linda the wake up the thread so that it can react to the cancel query
- // let us hope we never land here with a pointer on a linda that has been destroyed...
- SIGNAL_T *waiting_on = s->waiting_on;
- //s->waiting_on = NULL; // useful, or not?
- SIGNAL_ALL( waiting_on);
- }
- s = s->selfdestruct_next;
- }
- }
- MUTEX_UNLOCK( &U->selfdestruct_cs);
- // When noticing their cancel, the lanes will remove themselves from
- // the selfdestruct chain.
- // TBD: Not sure if Windows (multi core) will require the timed approach,
- // or single Yield. I don't have machine to test that (so leaving
- // for timed approach). -- AKa 25-Oct-2008
- // OS X 10.5 (Intel) needs more to avoid segfaults.
- //
- // "make test" is okay. 100's of "make require" are okay.
- //
- // Tested on MacBook Core Duo 2GHz and 10.5.5:
- // -- AKa 25-Oct-2008
- //
- {
- lua_Number const shutdown_timeout = lua_tonumber( L, lua_upvalueindex( 1));
- double const t_until = now_secs() + shutdown_timeout;
- while( U->selfdestruct_first != SELFDESTRUCT_END)
- {
- YIELD(); // give threads time to act on their cancel
- {
- // count the number of cancelled thread that didn't have the time to act yet
- int n = 0;
- double t_now = 0.0;
- MUTEX_LOCK( &U->selfdestruct_cs);
- {
- struct s_lane* s = U->selfdestruct_first;
- while( s != SELFDESTRUCT_END)
- {
- if( s->cancel_request == CANCEL_HARD)
- ++ n;
- s = s->selfdestruct_next;
- }
- }
- MUTEX_UNLOCK( &U->selfdestruct_cs);
- // if timeout elapsed, or we know all threads have acted, stop waiting
- t_now = now_secs();
- if( n == 0 || (t_now >= t_until))
- {
- DEBUGSPEW_CODE( fprintf( stderr, "%d uncancelled lane(s) remain after waiting %fs at process end.\n", n, shutdown_timeout - (t_until - t_now)));
- break;
- }
- }
- }
- }
- // If some lanes are currently cleaning after themselves, wait until they are done.
- // They are no longer listed in the selfdestruct chain, but they still have to lua_close().
- {
- bool_t again = TRUE;
- do
- {
- MUTEX_LOCK( &U->selfdestruct_cs);
- again = (U->selfdestructing_count > 0) ? TRUE : FALSE;
- MUTEX_UNLOCK( &U->selfdestruct_cs);
- YIELD();
- } while( again);
- }
- //---
- // Kill the still free running threads
- //
- if( U->selfdestruct_first != SELFDESTRUCT_END)
- {
- unsigned int n = 0;
- // first thing we did was to raise the linda signals the threads were waiting on (if any)
- // therefore, any well-behaved thread should be in CANCELLED state
- // these are not running, and the state can be closed
- MUTEX_LOCK( &U->selfdestruct_cs);
- {
- struct s_lane* s = U->sel…
Large files files are truncated, but you can click here to view the full file