PageRenderTime 29ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/dep/acelite/ace/OS_NS_Thread.cpp

https://github.com/chucho/FaceCore
C++ | 1709 lines | 1221 code | 219 blank | 269 comment | 312 complexity | d92194fc64c9407eb015a8ad7a6c046e MD5 | raw file
  1. // $Id: OS_NS_Thread.cpp 91693 2010-09-09 12:57:54Z johnnyw $
  2. #include "ace/OS_NS_Thread.h"
  3. #if !defined (ACE_HAS_INLINED_OSCALLS)
  4. # include "ace/OS_NS_Thread.inl"
  5. #endif /* ACE_HAS_INLINED_OSCALLS */
  6. #include "ace/OS_NS_stdio.h"
  7. #include "ace/Sched_Params.h"
  8. #include "ace/OS_Memory.h"
  9. #include "ace/OS_Thread_Adapter.h"
  10. #include "ace/Min_Max.h"
  11. #include "ace/Object_Manager_Base.h"
  12. #include "ace/OS_NS_errno.h"
  13. #include "ace/OS_NS_ctype.h"
  14. #include "ace/Log_Msg.h" // for ACE_ASSERT
  15. // This is necessary to work around nasty problems with MVS C++.
  16. #include "ace/Auto_Ptr.h"
  17. #include "ace/Thread_Mutex.h"
  18. #include "ace/Condition_T.h"
  19. #include "ace/Guard_T.h"
  20. extern "C" void
  21. ACE_MUTEX_LOCK_CLEANUP_ADAPTER_NAME (void *args)
  22. {
  23. ACE_VERSIONED_NAMESPACE_NAME::ACE_OS::mutex_lock_cleanup (args);
  24. }
  25. #if !defined(ACE_WIN32) && defined (__IBMCPP__) && (__IBMCPP__ >= 400)
  26. # define ACE_BEGINTHREADEX(STACK, STACKSIZE, ENTRY_POINT, ARGS, FLAGS, THR_ID) \
  27. (*THR_ID = ::_beginthreadex ((void(_Optlink*)(void*))ENTRY_POINT, STACK, STACKSIZE, ARGS), *THR_ID)
  28. #elif defined (ACE_HAS_WINCE)
  29. # define ACE_BEGINTHREADEX(STACK, STACKSIZE, ENTRY_POINT, ARGS, FLAGS, THR_ID) \
  30. CreateThread (0, STACKSIZE, (unsigned long (__stdcall *) (void *)) ENTRY_POINT, ARGS, (FLAGS) & (CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION), (unsigned long *) THR_ID)
  31. #elif defined(ACE_HAS_WTHREADS)
  32. // Green Hills compiler gets confused when __stdcall is imbedded in
  33. // parameter list, so we define the type ACE_WIN32THRFUNC_T and use it
  34. // instead.
  35. typedef unsigned (__stdcall *ACE_WIN32THRFUNC_T)(void*);
  36. # define ACE_BEGINTHREADEX(STACK, STACKSIZE, ENTRY_POINT, ARGS, FLAGS, THR_ID) \
  37. ::_beginthreadex (STACK, STACKSIZE, (ACE_WIN32THRFUNC_T) ENTRY_POINT, ARGS, FLAGS, (unsigned int *) THR_ID)
  38. #endif /* defined (__IBMCPP__) && (__IBMCPP__ >= 400) */
  39. /*****************************************************************************/
  40. ACE_BEGIN_VERSIONED_NAMESPACE_DECL
  41. void
  42. ACE_Thread_ID::to_string (char *thr_string) const
  43. {
  44. char format[128]; // Converted format string
  45. char *fp = 0; // Current format pointer
  46. fp = format;
  47. *fp++ = '%'; // Copy in the %
  48. #if defined (ACE_WIN32)
  49. ACE_OS::strcpy (fp, "u");
  50. ACE_OS::sprintf (thr_string,
  51. format,
  52. static_cast <unsigned> (this->thread_id_));
  53. #else
  54. # if defined (ACE_MVS) || defined (ACE_TANDEM_T1248_PTHREADS)
  55. // MVS's pthread_t is a struct... yuck. So use the ACE 5.0
  56. // code for it.
  57. ACE_OS::strcpy (fp, "u");
  58. ACE_OS::sprintf (thr_string, format, thread_handle_);
  59. # else
  60. // Yes, this is an ugly C-style cast, but the
  61. // correct C++ cast is different depending on
  62. // whether the t_id is an integral type or a pointer
  63. // type. FreeBSD uses a pointer type, but doesn't
  64. // have a _np function to get an integral type, like
  65. // the OSes above.
  66. ACE_OS::strcpy (fp, "lu");
  67. ACE_OS::sprintf (thr_string,
  68. format,
  69. (unsigned long) thread_handle_);
  70. # endif /* ACE_MVS || ACE_TANDEM_T1248_PTHREADS */
  71. #endif /* ACE_WIN32 */
  72. }
  73. /*****************************************************************************/
  74. #if defined (ACE_WIN32) || defined (ACE_HAS_TSS_EMULATION)
  75. #if defined (ACE_HAS_TSS_EMULATION)
  76. u_int ACE_TSS_Emulation::total_keys_ = 0;
  77. ACE_TSS_Keys* ACE_TSS_Emulation::tss_keys_used_ = 0;
  78. ACE_TSS_Emulation::ACE_TSS_DESTRUCTOR
  79. ACE_TSS_Emulation::tss_destructor_[ACE_TSS_Emulation::ACE_TSS_THREAD_KEYS_MAX]
  80. = { 0 };
  81. # if defined (ACE_HAS_THREAD_SPECIFIC_STORAGE)
  82. bool ACE_TSS_Emulation::key_created_ = false;
  83. ACE_OS_thread_key_t ACE_TSS_Emulation::native_tss_key_;
  84. /* static */
  85. # if defined (ACE_HAS_THR_C_FUNC)
  86. extern "C"
  87. void
  88. ACE_TSS_Emulation_cleanup (void *)
  89. {
  90. // Really this must be used for ACE_TSS_Emulation code to make the TSS
  91. // cleanup
  92. }
  93. # else
  94. void
  95. ACE_TSS_Emulation_cleanup (void *)
  96. {
  97. // Really this must be used for ACE_TSS_Emulation code to make the TSS
  98. // cleanup
  99. }
  100. # endif /* ACE_HAS_THR_C_FUNC */
  101. void **
  102. ACE_TSS_Emulation::tss_base (void* ts_storage[], u_int *ts_created)
  103. {
  104. // TSS Singleton implementation.
  105. // Create the one native TSS key, if necessary.
  106. if (!key_created_)
  107. {
  108. // Double-checked lock . . .
  109. ACE_TSS_BASE_GUARD
  110. if (!key_created_)
  111. {
  112. ACE_NO_HEAP_CHECK;
  113. if (ACE_OS::thr_keycreate_native (&native_tss_key_,
  114. &ACE_TSS_Emulation_cleanup) != 0)
  115. {
  116. ACE_ASSERT (0);
  117. return 0; // Major problems, this should *never* happen!
  118. }
  119. key_created_ = true;
  120. }
  121. }
  122. void **old_ts_storage = 0;
  123. // Get the tss_storage from thread-OS specific storage.
  124. if (ACE_OS::thr_getspecific_native (native_tss_key_,
  125. (void **) &old_ts_storage) == -1)
  126. {
  127. ACE_ASSERT (false);
  128. return 0; // This should not happen!
  129. }
  130. // Check to see if this is the first time in for this thread.
  131. // This block can also be entered after a fork () in the child process.
  132. if (old_ts_storage == 0)
  133. {
  134. if (ts_created)
  135. *ts_created = 1u;
  136. // Use the ts_storage passed as argument, if non-zero. It is
  137. // possible that this has been implemented in the stack. At the
  138. // moment, this is unknown. The cleanup must not do nothing.
  139. // If ts_storage is zero, allocate (and eventually leak) the
  140. // storage array.
  141. if (ts_storage == 0)
  142. {
  143. ACE_NO_HEAP_CHECK;
  144. ACE_NEW_RETURN (ts_storage,
  145. void*[ACE_TSS_THREAD_KEYS_MAX],
  146. 0);
  147. // Zero the entire TSS array. Do it manually instead of
  148. // using memset, for optimum speed. Though, memset may be
  149. // faster :-)
  150. void **tss_base_p = ts_storage;
  151. for (u_int i = 0;
  152. i < ACE_TSS_THREAD_KEYS_MAX;
  153. ++i)
  154. *tss_base_p++ = 0;
  155. }
  156. // Store the pointer in thread-specific storage. It gets
  157. // deleted via the ACE_TSS_Emulation_cleanup function when the
  158. // thread terminates.
  159. if (ACE_OS::thr_setspecific_native (native_tss_key_,
  160. (void *) ts_storage) != 0)
  161. {
  162. ACE_ASSERT (false);
  163. return 0; // This should not happen!
  164. }
  165. }
  166. else
  167. if (ts_created)
  168. ts_created = 0;
  169. return ts_storage ? ts_storage : old_ts_storage;
  170. }
  171. # endif /* ACE_HAS_THREAD_SPECIFIC_STORAGE */
  172. u_int
  173. ACE_TSS_Emulation::total_keys ()
  174. {
  175. ACE_OS_Recursive_Thread_Mutex_Guard (
  176. *static_cast <ACE_recursive_thread_mutex_t *>
  177. (ACE_OS_Object_Manager::preallocated_object[
  178. ACE_OS_Object_Manager::ACE_TSS_KEY_LOCK]));
  179. return total_keys_;
  180. }
  181. int
  182. ACE_TSS_Emulation::next_key (ACE_thread_key_t &key)
  183. {
  184. ACE_OS_Recursive_Thread_Mutex_Guard (
  185. *static_cast <ACE_recursive_thread_mutex_t *>
  186. (ACE_OS_Object_Manager::preallocated_object[
  187. ACE_OS_Object_Manager::ACE_TSS_KEY_LOCK]));
  188. // Initialize the tss_keys_used_ pointer on first use.
  189. if (tss_keys_used_ == 0)
  190. {
  191. ACE_NEW_RETURN (tss_keys_used_, ACE_TSS_Keys, -1);
  192. }
  193. if (total_keys_ < ACE_TSS_THREAD_KEYS_MAX)
  194. {
  195. u_int counter = 0;
  196. // Loop through all possible keys and check whether a key is free
  197. for ( ;counter < ACE_TSS_THREAD_KEYS_MAX; counter++)
  198. {
  199. ACE_thread_key_t localkey;
  200. # if defined (ACE_HAS_NONSCALAR_THREAD_KEY_T)
  201. ACE_OS::memset (&localkey, 0, sizeof (ACE_thread_key_t));
  202. ACE_OS::memcpy (&localkey, &counter_, sizeof (u_int));
  203. # else
  204. localkey = counter;
  205. # endif /* ACE_HAS_NONSCALAR_THREAD_KEY_T */
  206. // If the key is not set as used, we can give out this key, if not
  207. // we have to search further
  208. if (tss_keys_used_->is_set(localkey) == 0)
  209. {
  210. tss_keys_used_->test_and_set(localkey);
  211. key = localkey;
  212. break;
  213. }
  214. }
  215. ++total_keys_;
  216. return 0;
  217. }
  218. else
  219. {
  220. key = ACE_OS::NULL_key;
  221. return -1;
  222. }
  223. }
  224. int
  225. ACE_TSS_Emulation::release_key (ACE_thread_key_t key)
  226. {
  227. ACE_OS_Recursive_Thread_Mutex_Guard (
  228. *static_cast <ACE_recursive_thread_mutex_t *>
  229. (ACE_OS_Object_Manager::preallocated_object[
  230. ACE_OS_Object_Manager::ACE_TSS_KEY_LOCK]));
  231. if (tss_keys_used_ != 0 &&
  232. tss_keys_used_->test_and_clear (key) == 0)
  233. {
  234. --total_keys_;
  235. return 0;
  236. }
  237. return 1;
  238. }
  239. int
  240. ACE_TSS_Emulation::is_key (ACE_thread_key_t key)
  241. {
  242. ACE_OS_Recursive_Thread_Mutex_Guard (
  243. *static_cast <ACE_recursive_thread_mutex_t *>
  244. (ACE_OS_Object_Manager::preallocated_object[
  245. ACE_OS_Object_Manager::ACE_TSS_KEY_LOCK]));
  246. if (tss_keys_used_ != 0 &&
  247. tss_keys_used_->is_set (key) == 1)
  248. {
  249. return 1;
  250. }
  251. return 0;
  252. }
  253. void *
  254. ACE_TSS_Emulation::tss_open (void *ts_storage[ACE_TSS_THREAD_KEYS_MAX])
  255. {
  256. # if defined (ACE_HAS_THREAD_SPECIFIC_STORAGE)
  257. // On VxWorks, in particular, don't check to see if the field
  258. // is 0. It isn't always, specifically, when a program is run
  259. // directly by the shell (without spawning a new task) after
  260. // another program has been run.
  261. u_int ts_created = 0;
  262. tss_base (ts_storage, &ts_created);
  263. if (ts_created)
  264. {
  265. # else /* ! ACE_HAS_THREAD_SPECIFIC_STORAGE */
  266. tss_base () = ts_storage;
  267. # endif
  268. // Zero the entire TSS array. Do it manually instead of using
  269. // memset, for optimum speed. Though, memset may be faster :-)
  270. void **tss_base_p = tss_base ();
  271. for (u_int i = 0; i < ACE_TSS_THREAD_KEYS_MAX; ++i, ++tss_base_p)
  272. {
  273. *tss_base_p = 0;
  274. }
  275. return tss_base ();
  276. # if defined (ACE_HAS_THREAD_SPECIFIC_STORAGE)
  277. }
  278. else
  279. {
  280. return 0;
  281. }
  282. # endif /* ACE_HAS_THREAD_SPECIFIC_STORAGE */
  283. }
  284. void
  285. ACE_TSS_Emulation::tss_close ()
  286. {
  287. #if defined (ACE_HAS_THREAD_SPECIFIC_STORAGE)
  288. ACE_OS::thr_keyfree_native (native_tss_key_);
  289. #endif /* ACE_HAS_THREAD_SPECIFIC_STORAGE */
  290. }
  291. #endif /* ACE_HAS_TSS_EMULATION */
  292. #endif /* WIN32 || ACE_HAS_TSS_EMULATION */
  293. /*****************************************************************************/
  294. #if defined (ACE_WIN32) || defined (ACE_HAS_TSS_EMULATION)
  295. // Moved class ACE_TSS_Ref declaration to OS.h so it can be visible to
  296. // the single file of template instantiations.
  297. ACE_TSS_Ref::ACE_TSS_Ref (ACE_thread_t id)
  298. : tid_(id)
  299. {
  300. ACE_OS_TRACE ("ACE_TSS_Ref::ACE_TSS_Ref");
  301. }
  302. ACE_TSS_Ref::ACE_TSS_Ref (void)
  303. {
  304. ACE_OS_TRACE ("ACE_TSS_Ref::ACE_TSS_Ref");
  305. }
  306. // Check for equality.
  307. bool
  308. ACE_TSS_Ref::operator== (const ACE_TSS_Ref &info) const
  309. {
  310. ACE_OS_TRACE ("ACE_TSS_Ref::operator==");
  311. return this->tid_ == info.tid_;
  312. }
  313. // Check for inequality.
  314. ACE_SPECIAL_INLINE
  315. bool
  316. ACE_TSS_Ref::operator != (const ACE_TSS_Ref &tss_ref) const
  317. {
  318. ACE_OS_TRACE ("ACE_TSS_Ref::operator !=");
  319. return !(*this == tss_ref);
  320. }
  321. // moved class ACE_TSS_Info declaration
  322. // to OS.h so it can be visible to the
  323. // single file of template instantiations
  324. ACE_TSS_Info::ACE_TSS_Info (ACE_thread_key_t key,
  325. ACE_TSS_Info::Destructor dest)
  326. : key_ (key),
  327. destructor_ (dest),
  328. thread_count_ (-1)
  329. {
  330. ACE_OS_TRACE ("ACE_TSS_Info::ACE_TSS_Info");
  331. }
  332. ACE_TSS_Info::ACE_TSS_Info (void)
  333. : key_ (ACE_OS::NULL_key),
  334. destructor_ (0),
  335. thread_count_ (-1)
  336. {
  337. ACE_OS_TRACE ("ACE_TSS_Info::ACE_TSS_Info");
  338. }
  339. # if defined (ACE_HAS_NONSCALAR_THREAD_KEY_T)
  340. static inline bool operator== (const ACE_thread_key_t &lhs,
  341. const ACE_thread_key_t &rhs)
  342. {
  343. return ! ACE_OS::memcmp (&lhs, &rhs, sizeof (ACE_thread_key_t));
  344. }
  345. static inline bool operator!= (const ACE_thread_key_t &lhs,
  346. const ACE_thread_key_t &rhs)
  347. {
  348. return ! (lhs == rhs);
  349. }
  350. # endif /* ACE_HAS_NONSCALAR_THREAD_KEY_T */
  351. // Check for equality.
  352. bool
  353. ACE_TSS_Info::operator== (const ACE_TSS_Info &info) const
  354. {
  355. ACE_OS_TRACE ("ACE_TSS_Info::operator==");
  356. return this->key_ == info.key_;
  357. }
  358. // Check for inequality.
  359. bool
  360. ACE_TSS_Info::operator != (const ACE_TSS_Info &info) const
  361. {
  362. ACE_OS_TRACE ("ACE_TSS_Info::operator !=");
  363. return !(*this == info);
  364. }
  365. void
  366. ACE_TSS_Info::dump (void)
  367. {
  368. # if defined (ACE_HAS_DUMP)
  369. // ACE_OS_TRACE ("ACE_TSS_Info::dump");
  370. # if 0
  371. ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
  372. ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("key_ = %u\n"), this->key_));
  373. ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("destructor_ = %u\n"), this->destructor_));
  374. ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP));
  375. # endif /* 0 */
  376. # endif /* ACE_HAS_DUMP */
  377. }
  378. // Moved class ACE_TSS_Keys declaration to OS.h so it can be visible
  379. // to the single file of template instantiations.
  380. ACE_TSS_Keys::ACE_TSS_Keys (void)
  381. {
  382. for (u_int i = 0; i < ACE_WORDS; ++i)
  383. {
  384. key_bit_words_[i] = 0;
  385. }
  386. }
  387. ACE_SPECIAL_INLINE
  388. void
  389. ACE_TSS_Keys::find (const u_int key, u_int &word, u_int &bit)
  390. {
  391. word = key / ACE_BITS_PER_WORD;
  392. bit = key % ACE_BITS_PER_WORD;
  393. }
  394. int
  395. ACE_TSS_Keys::test_and_set (const ACE_thread_key_t key)
  396. {
  397. ACE_KEY_INDEX (key_index, key);
  398. u_int word, bit;
  399. find (key_index, word, bit);
  400. if (ACE_BIT_ENABLED (key_bit_words_[word], 1 << bit))
  401. {
  402. return 1;
  403. }
  404. else
  405. {
  406. ACE_SET_BITS (key_bit_words_[word], 1 << bit);
  407. return 0;
  408. }
  409. }
  410. int
  411. ACE_TSS_Keys::test_and_clear (const ACE_thread_key_t key)
  412. {
  413. ACE_KEY_INDEX (key_index, key);
  414. u_int word, bit;
  415. find (key_index, word, bit);
  416. if (word < ACE_WORDS && ACE_BIT_ENABLED (key_bit_words_[word], 1 << bit))
  417. {
  418. ACE_CLR_BITS (key_bit_words_[word], 1 << bit);
  419. return 0;
  420. }
  421. else
  422. {
  423. return 1;
  424. }
  425. }
  426. int
  427. ACE_TSS_Keys::is_set (const ACE_thread_key_t key) const
  428. {
  429. ACE_KEY_INDEX (key_index, key);
  430. u_int word, bit;
  431. find (key_index, word, bit);
  432. return word < ACE_WORDS ? ACE_BIT_ENABLED (key_bit_words_[word], 1 << bit) : 0;
  433. }
  434. /**
  435. * @class ACE_TSS_Cleanup
  436. * @brief Singleton that helps to manage the lifetime of TSS objects and keys.
  437. */
  438. class ACE_TSS_Cleanup
  439. {
  440. public:
  441. /// Register a newly-allocated key
  442. /// @param key the key to be monitored
  443. /// @param destructor the function to call to delete objects stored via this key
  444. int insert (ACE_thread_key_t key, void (*destructor)(void *));
  445. /// Mark a key as being used by this thread.
  446. void thread_use_key (ACE_thread_key_t key);
  447. /// This thread is no longer using this key
  448. /// call destructor if appropriate
  449. int thread_detach_key (ACE_thread_key_t key);
  450. /// This key is no longer used
  451. /// Release key if use count == 0
  452. /// fail if use_count != 0;
  453. /// @param key the key to be released
  454. int free_key (ACE_thread_key_t key);
  455. /// Cleanup the thread-specific objects. Does _NOT_ exit the thread.
  456. /// For each used key perform the same actions as free_key.
  457. void thread_exit (void);
  458. private:
  459. void dump (void);
  460. /// Release a key used by this thread
  461. /// @param info reference to the info for this key
  462. /// @param destructor out arg to receive destructor function ptr
  463. /// @param tss_obj out arg to receive pointer to deletable object
  464. void thread_release (
  465. ACE_TSS_Info &info,
  466. ACE_TSS_Info::Destructor & destructor,
  467. void *& tss_obj);
  468. /// remove key if it's unused (thread_count == 0)
  469. /// @param info reference to the info for this key
  470. int remove_key (ACE_TSS_Info &info);
  471. /// Find the TSS keys (if any) for this thread.
  472. /// @param thread_keys reference to pointer to be filled in by this function.
  473. /// @return false if keys don't exist.
  474. bool find_tss_keys (ACE_TSS_Keys *& thread_keys) const;
  475. /// Accessor for this threads ACE_TSS_Keys instance.
  476. /// Creates the keys if necessary.
  477. ACE_TSS_Keys *tss_keys ();
  478. /// Ensure singleton.
  479. ACE_TSS_Cleanup (void);
  480. ~ACE_TSS_Cleanup (void);
  481. /// ACE_TSS_Cleanup access only via TSS_Cleanup_Instance
  482. friend class TSS_Cleanup_Instance;
  483. private:
  484. // Array of <ACE_TSS_Info> objects.
  485. typedef ACE_TSS_Info ACE_TSS_TABLE[ACE_DEFAULT_THREAD_KEYS];
  486. typedef ACE_TSS_Info *ACE_TSS_TABLE_ITERATOR;
  487. /// Table of <ACE_TSS_Info>'s.
  488. ACE_TSS_TABLE table_;
  489. /// Key for the thread-specific ACE_TSS_Keys
  490. /// Used by find_tss_keys() or tss_keys() to find the
  491. /// bit array that records whether each TSS key is in
  492. /// use by this thread.
  493. ACE_thread_key_t in_use_;
  494. };
  495. /*****************************************************************************/
  496. /**
  497. * @class TSS_Cleanup_Instance
  498. * @A class to manage an instance pointer to ACE_TSS_Cleanup.
  499. * Note: that the double checked locking pattern doesn't allow
  500. * safe deletion.
  501. * Callers who wish to access the singleton ACE_TSS_Cleanup must
  502. * do so by instantiating a TSS_Cleanup_Instance, calling the valid
  503. * method to be sure the ACE_TSS_Cleanup is available, then using
  504. * the TSS_Cleanup_Instance as a pointer to the instance.
  505. * Construction argument to the TSS_Cleanup_Instance determines how
  506. * it is to be used:
  507. * CREATE means allow this call to create an ACE_TSS_Cleanup if necessary.
  508. * USE means use the existing ACE_TSS_Cleanup, but do not create a new one.
  509. * DESTROY means provide exclusive access to the ACE_TSS_Cleanup, then
  510. * delete it when the TSS_Cleanup_Instance goes out of scope.
  511. */
  512. class TSS_Cleanup_Instance
  513. {
  514. public:
  515. enum Purpose
  516. {
  517. CREATE,
  518. USE,
  519. DESTROY
  520. };
  521. TSS_Cleanup_Instance (Purpose purpose = USE);
  522. ~TSS_Cleanup_Instance();
  523. bool valid();
  524. ACE_TSS_Cleanup * operator ->();
  525. private:
  526. ACE_TSS_Cleanup * operator *();
  527. private:
  528. static unsigned int reference_count_;
  529. static ACE_TSS_Cleanup * instance_;
  530. static ACE_Thread_Mutex* mutex_;
  531. static ACE_Thread_Condition<ACE_Thread_Mutex>* condition_;
  532. private:
  533. ACE_TSS_Cleanup * ptr_;
  534. unsigned short flags_;
  535. enum
  536. {
  537. FLAG_DELETING = 1,
  538. FLAG_VALID_CHECKED = 2
  539. };
  540. };
  541. TSS_Cleanup_Instance::TSS_Cleanup_Instance (Purpose purpose)
  542. : ptr_(0)
  543. , flags_(0)
  544. {
  545. // During static construction or construction of the ACE_Object_Manager,
  546. // there can be only one thread in this constructor at any one time, so
  547. // it's safe to check for a zero mutex_. If it's zero, we create a new
  548. // mutex and condition variable.
  549. if (mutex_ == 0)
  550. {
  551. ACE_NEW (mutex_, ACE_Thread_Mutex ());
  552. ACE_NEW (condition_, ACE_Thread_Condition<ACE_Thread_Mutex> (*mutex_));
  553. }
  554. ACE_GUARD (ACE_Thread_Mutex, m, *mutex_);
  555. if (purpose == CREATE)
  556. {
  557. if (instance_ == 0)
  558. {
  559. instance_ = new ACE_TSS_Cleanup();
  560. }
  561. ptr_ = instance_;
  562. ++reference_count_;
  563. }
  564. else if(purpose == DESTROY)
  565. {
  566. if (instance_ != 0)
  567. {
  568. ptr_ = instance_;
  569. instance_ = 0;
  570. ACE_SET_BITS(flags_, FLAG_DELETING);
  571. while (reference_count_ > 0)
  572. {
  573. condition_->wait();
  574. }
  575. }
  576. }
  577. else // must be normal use
  578. {
  579. ACE_ASSERT(purpose == USE);
  580. if (instance_ != 0)
  581. {
  582. ptr_ = instance_;
  583. ++reference_count_;
  584. }
  585. }
  586. }
  587. TSS_Cleanup_Instance::~TSS_Cleanup_Instance (void)
  588. {
  589. // Variable to hold the mutex_ to delete outside the scope of the
  590. // guard.
  591. ACE_Thread_Mutex *del_mutex = 0;
  592. // scope the guard
  593. {
  594. ACE_GUARD (ACE_Thread_Mutex, guard, *mutex_);
  595. if (ptr_ != 0)
  596. {
  597. if (ACE_BIT_ENABLED (flags_, FLAG_DELETING))
  598. {
  599. ACE_ASSERT(instance_ == 0);
  600. ACE_ASSERT(reference_count_ == 0);
  601. delete ptr_;
  602. del_mutex = mutex_ ;
  603. mutex_ = 0;
  604. }
  605. else
  606. {
  607. ACE_ASSERT (reference_count_ > 0);
  608. --reference_count_;
  609. if (reference_count_ == 0 && instance_ == 0)
  610. condition_->signal ();
  611. }
  612. }
  613. }// end of guard scope
  614. if (del_mutex != 0)
  615. {
  616. delete condition_;
  617. condition_ = 0;
  618. delete del_mutex;
  619. }
  620. }
  621. bool
  622. TSS_Cleanup_Instance::valid()
  623. {
  624. ACE_SET_BITS(flags_, FLAG_VALID_CHECKED);
  625. return (this->instance_ != 0);
  626. }
  627. ACE_TSS_Cleanup *
  628. TSS_Cleanup_Instance::operator *()
  629. {
  630. ACE_ASSERT(ACE_BIT_ENABLED(flags_, FLAG_VALID_CHECKED));
  631. return instance_;
  632. }
  633. ACE_TSS_Cleanup *
  634. TSS_Cleanup_Instance::operator ->()
  635. {
  636. ACE_ASSERT(ACE_BIT_ENABLED(flags_, FLAG_VALID_CHECKED));
  637. return instance_;
  638. }
  639. // = Static object initialization.
  640. unsigned int TSS_Cleanup_Instance::reference_count_ = 0;
  641. ACE_TSS_Cleanup * TSS_Cleanup_Instance::instance_ = 0;
  642. ACE_Thread_Mutex* TSS_Cleanup_Instance::mutex_ = 0;
  643. ACE_Thread_Condition<ACE_Thread_Mutex>* TSS_Cleanup_Instance::condition_ = 0;
  644. ACE_TSS_Cleanup::~ACE_TSS_Cleanup (void)
  645. {
  646. }
  647. void
  648. ACE_TSS_Cleanup::thread_exit (void)
  649. {
  650. ACE_OS_TRACE ("ACE_TSS_Cleanup::thread_exit");
  651. // variables to hold the destructors, keys
  652. // and pointers to the object to be destructed
  653. // the actual destruction is deferred until the guard is released
  654. ACE_TSS_Info::Destructor destructor[ACE_DEFAULT_THREAD_KEYS];
  655. void * tss_obj[ACE_DEFAULT_THREAD_KEYS];
  656. ACE_thread_key_t keys[ACE_DEFAULT_THREAD_KEYS];
  657. // count of items to be destroyed
  658. unsigned int d_count = 0;
  659. // scope the guard
  660. {
  661. ACE_TSS_CLEANUP_GUARD
  662. // if not initialized or already cleaned up
  663. ACE_TSS_Keys *this_thread_keys = 0;
  664. if (! find_tss_keys (this_thread_keys) )
  665. {
  666. return;
  667. }
  668. // Minor hack: Iterating in reverse order means the LOG buffer which is
  669. // accidentally allocated first will be accidentally deallocated (almost)
  670. // last -- in case someone logs something from the other destructors.
  671. // applications should not count on this behavior because platforms which
  672. // do not use ACE_TSS_Cleanup may delete objects in other orders.
  673. unsigned int key_index = ACE_DEFAULT_THREAD_KEYS;
  674. while( key_index > 0)
  675. {
  676. --key_index;
  677. ACE_TSS_Info & info = this->table_[key_index];
  678. // if this key is in use by this thread
  679. if (info.key_in_use () && this_thread_keys->is_set(info.key_))
  680. {
  681. // defer deleting the in-use key until all others have been deleted
  682. if(info.key_ != this->in_use_)
  683. {
  684. destructor[d_count] = 0;
  685. tss_obj[d_count] = 0;
  686. keys[d_count] = 0;
  687. this->thread_release (info, destructor[d_count], tss_obj[d_count]);
  688. if (destructor[d_count] != 0 && tss_obj[d_count] != 0)
  689. {
  690. keys[d_count] = info.key_;
  691. ++d_count;
  692. }
  693. }
  694. }
  695. }
  696. // remove the in_use bit vector last
  697. ACE_KEY_INDEX (use_index, this->in_use_);
  698. ACE_TSS_Info & info = this->table_[use_index];
  699. destructor[d_count] = 0;
  700. tss_obj[d_count] = 0;
  701. keys[d_count] = 0;
  702. this->thread_release (info, destructor[d_count], tss_obj[d_count]);
  703. if (destructor[d_count] != 0 && tss_obj[d_count] != 0)
  704. {
  705. keys[d_count] = info.key_;
  706. ++d_count;
  707. }
  708. } // end of guard scope
  709. for (unsigned int d_index = 0; d_index < d_count; ++d_index)
  710. {
  711. (*destructor[d_index])(tss_obj[d_index]);
  712. #if defined (ACE_HAS_TSS_EMULATION)
  713. ACE_TSS_Emulation::ts_object (keys[d_index]) = 0;
  714. #else // defined (ACE_HAS_TSS_EMULATION)
  715. ACE_OS::thr_setspecific_native (keys[d_index], 0);
  716. #endif // defined (ACE_HAS_TSS_EMULATION)
  717. }
  718. }
  719. extern "C" void
  720. ACE_TSS_Cleanup_keys_destroyer (void *tss_keys)
  721. {
  722. delete static_cast <ACE_TSS_Keys *> (tss_keys);
  723. }
  724. ACE_TSS_Cleanup::ACE_TSS_Cleanup (void)
  725. : in_use_ (ACE_OS::NULL_key)
  726. {
  727. ACE_OS_TRACE ("ACE_TSS_Cleanup::ACE_TSS_Cleanup");
  728. }
  729. int
  730. ACE_TSS_Cleanup::insert (ACE_thread_key_t key,
  731. void (*destructor)(void *))
  732. {
  733. ACE_OS_TRACE ("ACE_TSS_Cleanup::insert");
  734. ACE_TSS_CLEANUP_GUARD
  735. ACE_KEY_INDEX (key_index, key);
  736. ACE_ASSERT (key_index < ACE_DEFAULT_THREAD_KEYS);
  737. if (key_index < ACE_DEFAULT_THREAD_KEYS)
  738. {
  739. ACE_ASSERT (table_[key_index].thread_count_ == -1);
  740. table_[key_index] = ACE_TSS_Info (key, destructor);
  741. table_[key_index].thread_count_ = 0; // inserting it does not use it
  742. // but it does "allocate" it
  743. return 0;
  744. }
  745. else
  746. {
  747. return -1;
  748. }
  749. }
  750. int
  751. ACE_TSS_Cleanup::free_key (ACE_thread_key_t key)
  752. {
  753. ACE_OS_TRACE ("ACE_TSS_Cleanup::free_key");
  754. ACE_TSS_CLEANUP_GUARD
  755. ACE_KEY_INDEX (key_index, key);
  756. if (key_index < ACE_DEFAULT_THREAD_KEYS)
  757. {
  758. return remove_key (this->table_ [key_index]);
  759. }
  760. return -1;
  761. }
  762. int
  763. ACE_TSS_Cleanup::remove_key (ACE_TSS_Info &info)
  764. {
  765. // assume CLEANUP_GUARD is held by caller
  766. ACE_OS_TRACE ("ACE_TSS_Cleanup::remove_key");
  767. #if 0 // This was a good idea, but POSIX says it's legal to delete used keys.
  768. // When this is done, any existing TSS objects controlled by this key are leaked
  769. // There is no "right thing" to do in this case
  770. // only remove it if all threads are done with it
  771. if (info.thread_count_ != 0)
  772. {
  773. return -1;
  774. }
  775. #endif // 0
  776. #if !defined (ACE_HAS_TSS_EMULATION)
  777. ACE_OS_thread_key_t temp_key = info.key_;
  778. ACE_OS::thr_keyfree_native (temp_key);
  779. #endif /* !ACE_HAS_TSS_EMULATION */
  780. if (info.key_ == this->in_use_)
  781. {
  782. this->in_use_ = ACE_OS::NULL_key;
  783. }
  784. info.key_in_use (0);
  785. info.destructor_ = 0;
  786. return 0;
  787. }
  788. int
  789. ACE_TSS_Cleanup::thread_detach_key (ACE_thread_key_t key)
  790. {
  791. // variables to hold the destructor and the object to be destructed
  792. // the actual call is deferred until the guard is released
  793. ACE_TSS_Info::Destructor destructor = 0;
  794. void * tss_obj = 0;
  795. // scope the guard
  796. {
  797. ACE_TSS_CLEANUP_GUARD
  798. ACE_KEY_INDEX (key_index, key);
  799. ACE_ASSERT (key_index < sizeof(this->table_)/sizeof(this->table_[0])
  800. && this->table_[key_index].key_ == key);
  801. ACE_TSS_Info &info = this->table_ [key_index];
  802. // sanity check
  803. if (!info.key_in_use ())
  804. {
  805. return -1;
  806. }
  807. this->thread_release (info, destructor, tss_obj);
  808. } // end of scope for the Guard
  809. // if there's a destructor and an object to be destroyed
  810. if (destructor != 0 && tss_obj != 0)
  811. {
  812. (*destructor) (tss_obj);
  813. }
  814. return 0;
  815. }
  816. void
  817. ACE_TSS_Cleanup::thread_release (
  818. ACE_TSS_Info &info,
  819. ACE_TSS_Info::Destructor & destructor,
  820. void *& tss_obj)
  821. {
  822. // assume guard is held by caller
  823. // Find the TSS keys (if any) for this thread
  824. // do not create them if they don't exist
  825. ACE_TSS_Keys * thread_keys = 0;
  826. if (find_tss_keys (thread_keys))
  827. {
  828. // if this key is in use by this thread
  829. if (thread_keys->test_and_clear(info.key_) == 0)
  830. {
  831. // save destructor & pointer to tss object
  832. // until after the guard is released
  833. destructor = info.destructor_;
  834. ACE_OS::thr_getspecific (info.key_, &tss_obj);
  835. ACE_ASSERT (info.thread_count_ > 0);
  836. --info.thread_count_;
  837. }
  838. }
  839. }
  840. void
  841. ACE_TSS_Cleanup::thread_use_key (ACE_thread_key_t key)
  842. {
  843. // If the key's ACE_TSS_Info in-use bit for this thread is not set,
  844. // set it and increment the key's thread_count_.
  845. if (! tss_keys ()->test_and_set (key))
  846. {
  847. ACE_TSS_CLEANUP_GUARD
  848. // Retrieve the key's ACE_TSS_Info and increment its thread_count_.
  849. ACE_KEY_INDEX (key_index, key);
  850. ACE_TSS_Info &key_info = this->table_ [key_index];
  851. ACE_ASSERT (key_info.key_in_use ());
  852. ++key_info.thread_count_;
  853. }
  854. }
  855. void
  856. ACE_TSS_Cleanup::dump (void)
  857. {
  858. # if defined (ACE_HAS_DUMP)
  859. // Iterate through all the thread-specific items and dump them all.
  860. ACE_TSS_TABLE_ITERATOR key_info = table_;
  861. for (unsigned int i = 0;
  862. i < ACE_DEFAULT_THREAD_KEYS;
  863. ++key_info, ++i)
  864. key_info->dump ();
  865. # endif /* ACE_HAS_DUMP */
  866. }
  867. bool
  868. ACE_TSS_Cleanup::find_tss_keys (ACE_TSS_Keys *& tss_keys) const
  869. {
  870. if (this->in_use_ == ACE_OS::NULL_key)
  871. return false;
  872. if (ACE_OS::thr_getspecific (in_use_,
  873. reinterpret_cast<void **> (&tss_keys)) == -1)
  874. {
  875. ACE_ASSERT (false);
  876. return false; // This should not happen!
  877. }
  878. return tss_keys != 0;
  879. }
  880. ACE_TSS_Keys *
  881. ACE_TSS_Cleanup::tss_keys ()
  882. {
  883. if (this->in_use_ == ACE_OS::NULL_key)
  884. {
  885. ACE_TSS_CLEANUP_GUARD
  886. // Double-check;
  887. if (in_use_ == ACE_OS::NULL_key)
  888. {
  889. // Initialize in_use_ with a new key.
  890. if (ACE_OS::thr_keycreate (&in_use_,
  891. &ACE_TSS_Cleanup_keys_destroyer))
  892. {
  893. ACE_ASSERT (false);
  894. return 0; // Major problems, this should *never* happen!
  895. }
  896. }
  897. }
  898. void *ts_keys = 0;
  899. if (ACE_OS::thr_getspecific (in_use_, &ts_keys) == -1)
  900. {
  901. ACE_ASSERT (false);
  902. return 0; // This should not happen!
  903. }
  904. if (ts_keys == 0)
  905. {
  906. ACE_NEW_RETURN (ts_keys,
  907. ACE_TSS_Keys,
  908. 0);
  909. // Store the dynamically allocated pointer in thread-specific
  910. // storage.
  911. if (ACE_OS::thr_setspecific (in_use_, ts_keys) == -1)
  912. {
  913. ACE_ASSERT (false);
  914. delete reinterpret_cast <ACE_TSS_Keys*> (ts_keys);
  915. return 0; // Major problems, this should *never* happen!
  916. }
  917. }
  918. return reinterpret_cast <ACE_TSS_Keys*>(ts_keys);
  919. }
  920. #endif /* ACE_WIN32 || ACE_HAS_TSS_EMULATION */
  921. /*****************************************************************************/
  922. // = Static initialization.
  923. // This is necessary to deal with POSIX pthreads insanity. This
  924. // guarantees that we've got a "zero'd" thread id even when
  925. // ACE_thread_t, ACE_hthread_t, and ACE_thread_key_t are implemented
  926. // as structures... Under no circumstances should these be given
  927. // initial values.
  928. // Note: these three objects require static construction.
  929. ACE_thread_t ACE_OS::NULL_thread;
  930. ACE_hthread_t ACE_OS::NULL_hthread;
  931. #if defined (ACE_HAS_TSS_EMULATION)
  932. ACE_thread_key_t ACE_OS::NULL_key = static_cast <ACE_thread_key_t> (-1);
  933. #else /* ! ACE_HAS_TSS_EMULATION */
  934. ACE_thread_key_t ACE_OS::NULL_key;
  935. #endif /* ! ACE_HAS_TSS_EMULATION */
  936. /*****************************************************************************/
  937. void
  938. ACE_OS::cleanup_tss (const u_int main_thread)
  939. {
  940. #if defined (ACE_HAS_TSS_EMULATION) || defined (ACE_WIN32)
  941. { // scope the cleanup instance
  942. // Call TSS destructors for current thread.
  943. TSS_Cleanup_Instance cleanup;
  944. if (cleanup.valid ())
  945. {
  946. cleanup->thread_exit ();
  947. }
  948. }
  949. #endif /* ACE_HAS_TSS_EMULATION || ACE_WIN32 */
  950. if (main_thread)
  951. {
  952. #if !defined (ACE_HAS_TSS_EMULATION) && !defined (ACE_HAS_MINIMAL_ACE_OS)
  953. // Just close the ACE_Log_Msg for the current (which should be
  954. // main) thread. We don't have TSS emulation; if there's native
  955. // TSS, it should call its destructors when the main thread
  956. // exits.
  957. ACE_Base_Thread_Adapter::close_log_msg ();
  958. #endif /* ! ACE_HAS_TSS_EMULATION && ! ACE_HAS_MINIMAL_ACE_OS */
  959. #if defined (ACE_WIN32) || defined (ACE_HAS_TSS_EMULATION)
  960. // Finally, free up the ACE_TSS_Cleanup instance. This method gets
  961. // called by the ACE_Object_Manager.
  962. TSS_Cleanup_Instance cleanup(TSS_Cleanup_Instance::DESTROY);
  963. if (cleanup.valid ())
  964. {
  965. ; // the pointer deletes the Cleanup when it goes out of scope
  966. }
  967. #endif /* WIN32 || ACE_HAS_TSS_EMULATION */
  968. #if defined (ACE_HAS_TSS_EMULATION)
  969. ACE_TSS_Emulation::tss_close ();
  970. #endif /* ACE_HAS_TSS_EMULATION */
  971. }
  972. }
  973. /*****************************************************************************/
  974. // CONDITIONS BEGIN
  975. /*****************************************************************************/
  976. #if defined (ACE_LACKS_COND_T)
  977. int
  978. ACE_OS::cond_broadcast (ACE_cond_t *cv)
  979. {
  980. ACE_OS_TRACE ("ACE_OS::cond_broadcast");
  981. # if defined (ACE_HAS_THREADS)
  982. // The <external_mutex> must be locked before this call is made.
  983. // This is needed to ensure that <waiters_> and <was_broadcast_> are
  984. // consistent relative to each other.
  985. if (ACE_OS::thread_mutex_lock (&cv->waiters_lock_) != 0)
  986. {
  987. return -1;
  988. }
  989. bool have_waiters = false;
  990. if (cv->waiters_ > 0)
  991. {
  992. // We are broadcasting, even if there is just one waiter...
  993. // Record the fact that we are broadcasting. This helps the
  994. // cond_wait() method know how to optimize itself. Be sure to
  995. // set this with the <waiters_lock_> held.
  996. cv->was_broadcast_ = 1;
  997. have_waiters = true;
  998. }
  999. if (ACE_OS::thread_mutex_unlock (&cv->waiters_lock_) != 0)
  1000. {
  1001. // This is really bad, we have the lock but can't release it anymore
  1002. return -1;
  1003. }
  1004. int result = 0;
  1005. if (have_waiters)
  1006. {
  1007. // Wake up all the waiters.
  1008. if (ACE_OS::sema_post (&cv->sema_, cv->waiters_) == -1)
  1009. result = -1;
  1010. // Wait for all the awakened threads to acquire their part of
  1011. // the counting semaphore.
  1012. # if defined (ACE_VXWORKS)
  1013. else if (ACE_OS::sema_wait (&cv->waiters_done_) == -1)
  1014. # else
  1015. else if (ACE_OS::event_wait (&cv->waiters_done_) == -1)
  1016. # endif /* ACE_VXWORKS */
  1017. result = -1;
  1018. // This is okay, even without the <waiters_lock_> held because
  1019. // no other waiter threads can wake up to access it.
  1020. cv->was_broadcast_ = 0;
  1021. }
  1022. return result;
  1023. # else
  1024. ACE_UNUSED_ARG (cv);
  1025. ACE_NOTSUP_RETURN (-1);
  1026. # endif /* ACE_HAS_THREADS */
  1027. }
  1028. int
  1029. ACE_OS::cond_destroy (ACE_cond_t *cv)
  1030. {
  1031. ACE_OS_TRACE ("ACE_OS::cond_destroy");
  1032. # if defined (ACE_HAS_THREADS)
  1033. # if defined (ACE_HAS_WTHREADS)
  1034. ACE_OS::event_destroy (&cv->waiters_done_);
  1035. # elif defined (ACE_VXWORKS)
  1036. ACE_OS::sema_destroy (&cv->waiters_done_);
  1037. # endif /* ACE_VXWORKS */
  1038. int result = 0;
  1039. if (ACE_OS::thread_mutex_destroy (&cv->waiters_lock_) != 0)
  1040. result = -1;
  1041. if (ACE_OS::sema_destroy (&cv->sema_) != 0)
  1042. result = -1;
  1043. return result;
  1044. # else
  1045. ACE_UNUSED_ARG (cv);
  1046. ACE_NOTSUP_RETURN (-1);
  1047. # endif /* ACE_HAS_THREADS */
  1048. }
  1049. int
  1050. ACE_OS::cond_init (ACE_cond_t *cv,
  1051. ACE_condattr_t &attributes,
  1052. const char *name, void *arg)
  1053. {
  1054. return
  1055. ACE_OS::cond_init (cv, static_cast<short> (attributes.type), name, arg);
  1056. }
  1057. # if defined (ACE_HAS_WCHAR)
  1058. int
  1059. ACE_OS::cond_init (ACE_cond_t *cv,
  1060. ACE_condattr_t &attributes,
  1061. const wchar_t *name, void *arg)
  1062. {
  1063. return
  1064. ACE_OS::cond_init (cv, static_cast<short> (attributes.type), name, arg);
  1065. }
  1066. # endif /* ACE_HAS_WCHAR */
  1067. int
  1068. ACE_OS::cond_init (ACE_cond_t *cv, short type, const char *name, void *arg)
  1069. {
  1070. ACE_OS_TRACE ("ACE_OS::cond_init");
  1071. # if defined (ACE_HAS_THREADS)
  1072. cv->waiters_ = 0;
  1073. cv->was_broadcast_ = 0;
  1074. int result = 0;
  1075. if (ACE_OS::sema_init (&cv->sema_, 0, type, name, arg) == -1)
  1076. result = -1;
  1077. else if (ACE_OS::thread_mutex_init (&cv->waiters_lock_) == -1)
  1078. result = -1;
  1079. # if defined (ACE_VXWORKS)
  1080. else if (ACE_OS::sema_init (&cv->waiters_done_, 0, type) == -1)
  1081. # else
  1082. else if (ACE_OS::event_init (&cv->waiters_done_) == -1)
  1083. # endif /* ACE_VXWORKS */
  1084. result = -1;
  1085. return result;
  1086. # else
  1087. ACE_UNUSED_ARG (cv);
  1088. ACE_UNUSED_ARG (type);
  1089. ACE_UNUSED_ARG (name);
  1090. ACE_UNUSED_ARG (arg);
  1091. ACE_NOTSUP_RETURN (-1);
  1092. # endif /* ACE_HAS_THREADS */
  1093. }
  1094. # if defined (ACE_HAS_WCHAR)
  1095. int
  1096. ACE_OS::cond_init (ACE_cond_t *cv, short type, const wchar_t *name, void *arg)
  1097. {
  1098. ACE_OS_TRACE ("ACE_OS::cond_init");
  1099. # if defined (ACE_HAS_THREADS)
  1100. cv->waiters_ = 0;
  1101. cv->was_broadcast_ = 0;
  1102. int result = 0;
  1103. if (ACE_OS::sema_init (&cv->sema_, 0, type, name, arg) == -1)
  1104. result = -1;
  1105. else if (ACE_OS::thread_mutex_init (&cv->waiters_lock_) == -1)
  1106. result = -1;
  1107. # if defined (ACE_VXWORKS)
  1108. else if (ACE_OS::sema_init (&cv->waiters_done_, 0, type) == -1)
  1109. # else
  1110. else if (ACE_OS::event_init (&cv->waiters_done_) == -1)
  1111. # endif /* ACE_VXWORKS */
  1112. result = -1;
  1113. return result;
  1114. # else
  1115. ACE_UNUSED_ARG (cv);
  1116. ACE_UNUSED_ARG (type);
  1117. ACE_UNUSED_ARG (name);
  1118. ACE_UNUSED_ARG (arg);
  1119. ACE_NOTSUP_RETURN (-1);
  1120. # endif /* ACE_HAS_THREADS */
  1121. }
  1122. # endif /* ACE_HAS_WCHAR */
  1123. int
  1124. ACE_OS::cond_signal (ACE_cond_t *cv)
  1125. {
  1126. ACE_OS_TRACE ("ACE_OS::cond_signal");
  1127. # if defined (ACE_HAS_THREADS)
  1128. // If there aren't any waiters, then this is a no-op. Note that
  1129. // this function *must* be called with the <external_mutex> held
  1130. // since other wise there is a race condition that can lead to the
  1131. // lost wakeup bug... This is needed to ensure that the <waiters_>
  1132. // value is not in an inconsistent internal state while being
  1133. // updated by another thread.
  1134. if (ACE_OS::thread_mutex_lock (&cv->waiters_lock_) != 0)
  1135. return -1;
  1136. bool const have_waiters = cv->waiters_ > 0;
  1137. if (ACE_OS::thread_mutex_unlock (&cv->waiters_lock_) != 0)
  1138. return -1;
  1139. if (have_waiters)
  1140. return ACE_OS::sema_post (&cv->sema_);
  1141. else
  1142. return 0; // No-op
  1143. # else
  1144. ACE_UNUSED_ARG (cv);
  1145. ACE_NOTSUP_RETURN (-1);
  1146. # endif /* ACE_HAS_THREADS */
  1147. }
  1148. int
  1149. ACE_OS::cond_wait (ACE_cond_t *cv,
  1150. ACE_mutex_t *external_mutex)
  1151. {
  1152. ACE_OS_TRACE ("ACE_OS::cond_wait");
  1153. # if defined (ACE_HAS_THREADS)
  1154. // Prevent race conditions on the <waiters_> count.
  1155. if (ACE_OS::thread_mutex_lock (&cv->waiters_lock_) != 0)
  1156. return -1;
  1157. ++cv->waiters_;
  1158. if (ACE_OS::thread_mutex_unlock (&cv->waiters_lock_) != 0)
  1159. return -1;
  1160. int result = 0;
  1161. # if defined (ACE_HAS_SIGNAL_OBJECT_AND_WAIT)
  1162. if (external_mutex->type_ == USYNC_PROCESS)
  1163. {
  1164. // This call will automatically release the mutex and wait on the semaphore.
  1165. ACE_WIN32CALL (ACE_ADAPT_RETVAL (::SignalObjectAndWait (external_mutex->proc_mutex_,
  1166. cv->sema_, INFINITE, FALSE),
  1167. result),
  1168. int, -1, result);
  1169. if (result == -1)
  1170. return result;
  1171. }
  1172. else
  1173. # endif /* ACE_HAS_SIGNAL_OBJECT_AND_WAIT */
  1174. {
  1175. // We keep the lock held just long enough to increment the count of
  1176. // waiters by one. Note that we can't keep it held across the call
  1177. // to ACE_OS::sema_wait() since that will deadlock other calls to
  1178. // ACE_OS::cond_signal().
  1179. if (ACE_OS::mutex_unlock (external_mutex) != 0)
  1180. return -1;
  1181. // Wait to be awakened by a ACE_OS::cond_signal() or
  1182. // ACE_OS::cond_broadcast().
  1183. result = ACE_OS::sema_wait (&cv->sema_);
  1184. }
  1185. // Reacquire lock to avoid race conditions on the <waiters_> count.
  1186. if (ACE_OS::thread_mutex_lock (&cv->waiters_lock_) != 0)
  1187. return -1;
  1188. // We're ready to return, so there's one less waiter.
  1189. --cv->waiters_;
  1190. bool const last_waiter = cv->was_broadcast_ && cv->waiters_ == 0;
  1191. // Release the lock so that other collaborating threads can make
  1192. // progress.
  1193. if (ACE_OS::thread_mutex_unlock (&cv->waiters_lock_) != 0)
  1194. return -1;
  1195. if (result == -1)
  1196. // Bad things happened, so let's just return below.
  1197. /* NOOP */;
  1198. # if defined (ACE_HAS_SIGNAL_OBJECT_AND_WAIT)
  1199. else if (external_mutex->type_ == USYNC_PROCESS)
  1200. {
  1201. if (last_waiter)
  1202. // This call atomically signals the <waiters_done_> event and
  1203. // waits until it can acquire the mutex. This is important to
  1204. // prevent unfairness.
  1205. ACE_WIN32CALL (ACE_ADAPT_RETVAL (::SignalObjectAndWait (cv->waiters_done_,
  1206. external_mutex->proc_mutex_,
  1207. INFINITE, FALSE),
  1208. result),
  1209. int, -1, result);
  1210. else
  1211. // We must always regain the <external_mutex>, even when
  1212. // errors occur because that's the guarantee that we give to
  1213. // our callers.
  1214. if (ACE_OS::mutex_lock (external_mutex) != 0)
  1215. return -1;
  1216. return result;
  1217. /* NOTREACHED */
  1218. }
  1219. # endif /* ACE_HAS_SIGNAL_OBJECT_AND_WAIT */
  1220. // If we're the last waiter thread during this particular broadcast
  1221. // then let all the other threads proceed.
  1222. else if (last_waiter)
  1223. # if defined (ACE_VXWORKS)
  1224. ACE_OS::sema_post (&cv->waiters_done_);
  1225. # else
  1226. ACE_OS::event_signal (&cv->waiters_done_);
  1227. # endif /* ACE_VXWORKS */
  1228. // We must always regain the <external_mutex>, even when errors
  1229. // occur because that's the guarantee that we give to our callers.
  1230. ACE_OS::mutex_lock (external_mutex);
  1231. return result;
  1232. # else
  1233. ACE_UNUSED_ARG (cv);
  1234. ACE_UNUSED_ARG (external_mutex);
  1235. ACE_NOTSUP_RETURN (-1);
  1236. # endif /* ACE_HAS_THREADS */
  1237. }
  1238. int
  1239. ACE_OS::cond_timedwait (ACE_cond_t *cv,
  1240. ACE_mutex_t *external_mutex,
  1241. ACE_Time_Value *timeout)
  1242. {
  1243. ACE_OS_TRACE ("ACE_OS::cond_timedwait");
  1244. # if defined (ACE_HAS_THREADS)
  1245. // Handle the easy case first.
  1246. if (timeout == 0)
  1247. return ACE_OS::cond_wait (cv, external_mutex);
  1248. # if defined (ACE_HAS_WTHREADS) || defined (ACE_VXWORKS)
  1249. // Prevent race conditions on the <waiters_> count.
  1250. if (ACE_OS::thread_mutex_lock (&cv->waiters_lock_) != 0)
  1251. return -1;
  1252. ++cv->waiters_;
  1253. if (ACE_OS::thread_mutex_unlock (&cv->waiters_lock_) != 0)
  1254. return -1;
  1255. int result = 0;
  1256. ACE_Errno_Guard error (errno, 0);
  1257. int msec_timeout = 0;
  1258. if (timeout != 0 && *timeout != ACE_Time_Value::zero)
  1259. {
  1260. // Note that we must convert between absolute time (which is
  1261. // passed as a parameter) and relative time (which is what
  1262. // WaitForSingleObjects() expects).
  1263. ACE_Time_Value relative_time (*timeout - ACE_OS::gettimeofday ());
  1264. // Watchout for situations where a context switch has caused the
  1265. // current time to be > the timeout.
  1266. if (relative_time > ACE_Time_Value::zero)
  1267. msec_timeout = relative_time.msec ();
  1268. }
  1269. # if defined (ACE_HAS_SIGNAL_OBJECT_AND_WAIT)
  1270. if (external_mutex->type_ == USYNC_PROCESS)
  1271. // This call will automatically release the mutex and wait on the
  1272. // semaphore.
  1273. result = ::SignalObjectAndWait (external_mutex->proc_mutex_,
  1274. cv->sema_,
  1275. msec_timeout,
  1276. FALSE);
  1277. else
  1278. # endif /* ACE_HAS_SIGNAL_OBJECT_AND_WAIT */
  1279. {
  1280. // We keep the lock held just long enough to increment the count
  1281. // of waiters by one. Note that we can't keep it held across
  1282. // the call to WaitForSingleObject since that will deadlock
  1283. // other calls to ACE_OS::cond_signal().
  1284. if (ACE_OS::mutex_unlock (external_mutex) != 0)
  1285. return -1;
  1286. // Wait to be awakened by a ACE_OS::signal() or
  1287. // ACE_OS::broadcast().
  1288. # if defined (ACE_WIN32)
  1289. # if !defined (ACE_USES_WINCE_SEMA_SIMULATION)
  1290. result = ::WaitForSingleObject (cv->sema_, msec_timeout);
  1291. # else /* ACE_USES_WINCE_SEMA_SIMULATION */
  1292. // Can't use Win32 API on our simulated semaphores.
  1293. result = ACE_OS::sema_wait (&cv->sema_,
  1294. timeout);
  1295. # endif /* ACE_USES_WINCE_SEMA_SIMULATION */
  1296. # elif defined (ACE_VXWORKS)
  1297. // Inline the call to ACE_OS::sema_wait () because it takes an
  1298. // ACE_Time_Value argument. Avoid the cost of that conversion . . .
  1299. int const ticks_per_sec = ::sysClkRateGet ();
  1300. int const ticks = msec_timeout * ticks_per_sec / ACE_ONE_SECOND_IN_MSECS;
  1301. result = ::semTake (cv->sema_.sema_, ticks);
  1302. # endif /* ACE_WIN32 || VXWORKS */
  1303. }
  1304. // Reacquire lock to avoid race conditions.
  1305. if (ACE_OS::thread_mutex_lock (&cv->waiters_lock_) != 0)
  1306. return -1;
  1307. --cv->waiters_;
  1308. bool const last_waiter = cv->was_broadcast_ && cv->waiters_ == 0;
  1309. if (ACE_OS::thread_mutex_unlock (&cv->waiters_lock_) != 0)
  1310. return -1;
  1311. # if defined (ACE_WIN32)
  1312. if (result != WAIT_OBJECT_0)
  1313. {
  1314. switch (result)
  1315. {
  1316. case WAIT_TIMEOUT:
  1317. error = ETIME;
  1318. break;
  1319. default:
  1320. error = ::GetLastError ();
  1321. break;
  1322. }
  1323. result = -1;
  1324. }
  1325. # elif defined (ACE_VXWORKS)
  1326. if (result == ERROR)
  1327. {
  1328. switch (errno)
  1329. {
  1330. case S_objLib_OBJ_TIMEOUT:
  1331. error = ETIME;
  1332. break;
  1333. case S_objLib_OBJ_UNAVAILABLE:
  1334. if (msec_timeout == 0)
  1335. error = ETIME;
  1336. break;
  1337. default:
  1338. error = errno;
  1339. break;
  1340. }
  1341. result = -1;
  1342. }
  1343. # endif /* ACE_WIN32 || VXWORKS */
  1344. # if defined (ACE_HAS_SIGNAL_OBJECT_AND_WAIT)
  1345. if (external_mutex->type_ == USYNC_PROCESS)
  1346. {
  1347. if (last_waiter)
  1348. // This call atomically signals the <waiters_done_> event and
  1349. // waits until it can acquire the mutex. This is important to
  1350. // prevent unfairness.
  1351. ACE_WIN32CALL (ACE_ADAPT_RETVAL (::SignalObjectAndWait (cv->waiters_done_,
  1352. external_mutex->proc_mutex_,
  1353. INFINITE, FALSE),
  1354. result),
  1355. int, -1, result);
  1356. else
  1357. {
  1358. // We must always regain the <external_Mutex>, even when
  1359. // errors occur because that's the guarantee that we give to
  1360. // our callers.
  1361. if (ACE_OS::mutex_lock (external_mutex) != 0)
  1362. return -1;
  1363. }
  1364. return result;
  1365. /* NOTREACHED */
  1366. }
  1367. # endif /* ACE_HAS_SIGNAL_OBJECT_AND_WAIT */
  1368. // Note that this *must* be an "if" statement rather than an "else
  1369. // if" statement since the caller may have timed out and hence the
  1370. // result would have been -1 above.
  1371. if (last_waiter)
  1372. {
  1373. // Release the signaler/broadcaster if we're the last waiter.
  1374. # if defined (ACE_WIN32)
  1375. if (ACE_OS::event_signal (&cv->waiters_done_) != 0)
  1376. # else
  1377. if (ACE_OS::sema_post (&cv->waiters_done_) != 0)
  1378. # endif /* ACE_WIN32 */
  1379. return -1;
  1380. }
  1381. // We must always regain the <external_mutex>, even when errors
  1382. // occur because that's the guarantee that we give to our callers.
  1383. if (ACE_OS::mutex_lock (external_mutex) != 0)
  1384. return -1;
  1385. return result;
  1386. # endif /* ACE_HAS_WTHREADS || ACE_HAS_VXWORKS */
  1387. # else
  1388. ACE_UNUSED_ARG (cv);
  1389. ACE_UNUSED_ARG (external_mutex);
  1390. ACE_UNUSED_ARG (timeout);
  1391. ACE_NOTSUP_RETURN (-1);
  1392. # endif /* ACE_HAS_THREADS */
  1393. }
  1394. #else
  1395. int
  1396. ACE_OS::cond_init (ACE_cond_t *cv, short type, const char *name, void *arg)
  1397. {
  1398. ACE_condattr_t attributes;
  1399. if (ACE_OS::condattr_init (attributes, type) == 0
  1400. && ACE_OS::cond_init (cv, attributes, name, arg) == 0)
  1401. {
  1402. (void) ACE_OS::condattr_destroy (attributes);
  1403. return 0;
  1404. }
  1405. return -1;
  1406. }
  1407. #endif /* ACE_LACKS_COND_T */
  1408. #if defined (ACE_WIN32) && defined (ACE_HAS_WTHREADS)
  1409. int
  1410. ACE_OS::cond_timedwait (ACE_cond_t *cv,
  1411. ACE_thread_mutex_t *external_mutex,
  1412. ACE_Time_Value *timeout)
  1413. {
  1414. ACE_OS_TRACE ("ACE_OS::cond_timedwait");
  1415. # if defined (ACE_HAS_THREADS)
  1416. // Handle the easy case first.
  1417. if (timeout == 0)
  1418. return ACE_OS::cond_wait (cv, external_mutex);
  1419. # if defined (ACE_HAS_WTHREADS_CONDITION_VARIABLE)
  1420. int msec_timeout = 0;
  1421. int result = 0;
  1422. ACE_Time_Value relative_time (*timeout - ACE_OS::gettimeofday ());
  1423. // Watchout for situations where a context switch has caused the
  1424. // current time to be > the timeout.
  1425. if (relative_time > ACE_Time_Value::zero)
  1426. msec_timeout = relative_time.msec ();
  1427. ACE_OSCALL (ACE_ADAPT_RETVAL (::SleepConditionVariableCS (cv, external_mutex, msec_timeout),
  1428. result),
  1429. int, -1, result);
  1430. return result;
  1431. #else
  1432. // Prevent race conditions on the <waiters_> count.
  1433. if (ACE_OS::thread_mutex_lock (&cv->waiters_lock_) != 0)
  1434. return -1;
  1435. ++cv->waiters_;
  1436. if (ACE_OS::thread_mutex_unlock (&cv->waiters_lock_) != 0)
  1437. return -1;
  1438. int result = 0;
  1439. int error = 0;
  1440. int msec_timeout = 0;
  1441. if (timeout != 0 && *timeout != ACE_Time_Value::zero)
  1442. {
  1443. // Note that we must convert between absolute time (which is
  1444. // passed as a parameter) and relative time (which is what
  1445. // WaitForSingleObjects() expects).
  1446. ACE_Time_Value relative_time (*timeout - ACE_OS::gettimeofday ());
  1447. // Watchout for situations where a context switch has caused the
  1448. // current time to be > the timeout.
  1449. if (relative_time > ACE_Time_Value::zero)
  1450. msec_timeout = relative_time.msec ();
  1451. }
  1452. // We keep the lock held just long enough to increment the count of
  1453. // waiters by one. Note that we can't keep it held across the call
  1454. // to WaitForSingleObject since that will deadlock other calls to
  1455. // ACE_OS::cond_signal().
  1456. if (ACE_OS::thread_mutex_unlock (external_mutex) != 0)
  1457. return -1;
  1458. // Wait to be awakened by a ACE_OS::signal() or ACE_OS::broadcast().
  1459. # if defined (ACE_USES_WINCE_SEMA_SIMULATION)
  1460. // Can't use Win32 API on simulated semaphores.
  1461. result = ACE_OS::sema_wait (&cv->sema_,
  1462. timeout);
  1463. if (result == -1 && errno == ETIME)
  1464. result = WAIT_TIMEOUT;
  1465. # else
  1466. result = ::WaitForSingleObject (cv->sema_, msec_timeout);
  1467. # endif /* ACE_USES_WINCE_SEMA_SIMULATION */
  1468. // Reacquire lock to avoid race conditions.
  1469. if (ACE_OS::thread_mutex_lock (&cv->waiters_lock_) != 0)
  1470. return -1;
  1471. --cv->waiters_;
  1472. bool const last_waiter = cv->was_broadcast_ && cv->waiters_ == 0;
  1473. if (ACE_OS::thread_mutex_unlock (&cv->waiters_lock_) != 0)
  1474. return -1;
  1475. if (result != WAIT_OBJECT_0)
  1476. {
  1477. switch (result)
  1478. {
  1479. case WAIT_TIMEOUT:
  1480. error = ETIME;
  1481. break;
  1482. default:
  1483. error = ::GetLastError ();
  1484. break;
  1485. }
  1486. result = -1;
  1487. }
  1488. if (last_waiter)
  1489. {
  1490. // Release the signaler/broadcaster if we're the