/Src/Dependencies/Boost/boost/asio/detail/impl/task_io_service.ipp

http://hadesmem.googlecode.com/ · C++ Header · 370 lines · 287 code · 62 blank · 21 comment · 41 complexity · 632b77bef9c607a03f80cd66c8813c19 MD5 · raw file

  1. //
  2. // detail/impl/task_io_service.ipp
  3. // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
  11. #define BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include <boost/asio/detail/config.hpp>
  16. #if !defined(BOOST_ASIO_HAS_IOCP)
  17. #include <boost/limits.hpp>
  18. #include <boost/asio/detail/call_stack.hpp>
  19. #include <boost/asio/detail/event.hpp>
  20. #include <boost/asio/detail/reactor.hpp>
  21. #include <boost/asio/detail/task_io_service.hpp>
  22. #include <boost/asio/detail/push_options.hpp>
  23. namespace boost {
  24. namespace asio {
  25. namespace detail {
  26. struct task_io_service::task_cleanup
  27. {
  28. ~task_cleanup()
  29. {
  30. // Enqueue the completed operations and reinsert the task at the end of
  31. // the operation queue.
  32. lock_->lock();
  33. task_io_service_->task_interrupted_ = true;
  34. task_io_service_->op_queue_.push(*ops_);
  35. task_io_service_->op_queue_.push(&task_io_service_->task_operation_);
  36. }
  37. task_io_service* task_io_service_;
  38. mutex::scoped_lock* lock_;
  39. op_queue<operation>* ops_;
  40. };
  41. struct task_io_service::work_finished_on_block_exit
  42. {
  43. ~work_finished_on_block_exit()
  44. {
  45. task_io_service_->work_finished();
  46. }
  47. task_io_service* task_io_service_;
  48. };
  49. struct task_io_service::idle_thread_info
  50. {
  51. event wakeup_event;
  52. idle_thread_info* next;
  53. };
  54. task_io_service::task_io_service(boost::asio::io_service& io_service)
  55. : boost::asio::detail::service_base<task_io_service>(io_service),
  56. mutex_(),
  57. task_(0),
  58. task_interrupted_(true),
  59. outstanding_work_(0),
  60. stopped_(false),
  61. shutdown_(false),
  62. first_idle_thread_(0)
  63. {
  64. BOOST_ASIO_HANDLER_TRACKING_INIT;
  65. }
  66. void task_io_service::init(std::size_t /*concurrency_hint*/)
  67. {
  68. }
  69. void task_io_service::shutdown_service()
  70. {
  71. mutex::scoped_lock lock(mutex_);
  72. shutdown_ = true;
  73. lock.unlock();
  74. // Destroy handler objects.
  75. while (!op_queue_.empty())
  76. {
  77. operation* o = op_queue_.front();
  78. op_queue_.pop();
  79. if (o != &task_operation_)
  80. o->destroy();
  81. }
  82. // Reset to initial state.
  83. task_ = 0;
  84. }
  85. void task_io_service::init_task()
  86. {
  87. mutex::scoped_lock lock(mutex_);
  88. if (!shutdown_ && !task_)
  89. {
  90. task_ = &use_service<reactor>(this->get_io_service());
  91. op_queue_.push(&task_operation_);
  92. wake_one_thread_and_unlock(lock);
  93. }
  94. }
  95. std::size_t task_io_service::run(boost::system::error_code& ec)
  96. {
  97. ec = boost::system::error_code();
  98. if (outstanding_work_ == 0)
  99. {
  100. stop();
  101. return 0;
  102. }
  103. call_stack<task_io_service>::context ctx(this);
  104. idle_thread_info this_idle_thread;
  105. this_idle_thread.next = 0;
  106. mutex::scoped_lock lock(mutex_);
  107. std::size_t n = 0;
  108. for (; do_one(lock, &this_idle_thread); lock.lock())
  109. if (n != (std::numeric_limits<std::size_t>::max)())
  110. ++n;
  111. return n;
  112. }
  113. std::size_t task_io_service::run_one(boost::system::error_code& ec)
  114. {
  115. ec = boost::system::error_code();
  116. if (outstanding_work_ == 0)
  117. {
  118. stop();
  119. return 0;
  120. }
  121. call_stack<task_io_service>::context ctx(this);
  122. idle_thread_info this_idle_thread;
  123. this_idle_thread.next = 0;
  124. mutex::scoped_lock lock(mutex_);
  125. return do_one(lock, &this_idle_thread);
  126. }
  127. std::size_t task_io_service::poll(boost::system::error_code& ec)
  128. {
  129. if (outstanding_work_ == 0)
  130. {
  131. stop();
  132. ec = boost::system::error_code();
  133. return 0;
  134. }
  135. call_stack<task_io_service>::context ctx(this);
  136. mutex::scoped_lock lock(mutex_);
  137. std::size_t n = 0;
  138. for (; do_one(lock, 0); lock.lock())
  139. if (n != (std::numeric_limits<std::size_t>::max)())
  140. ++n;
  141. return n;
  142. }
  143. std::size_t task_io_service::poll_one(boost::system::error_code& ec)
  144. {
  145. ec = boost::system::error_code();
  146. if (outstanding_work_ == 0)
  147. {
  148. stop();
  149. return 0;
  150. }
  151. call_stack<task_io_service>::context ctx(this);
  152. mutex::scoped_lock lock(mutex_);
  153. return do_one(lock, 0);
  154. }
  155. void task_io_service::stop()
  156. {
  157. mutex::scoped_lock lock(mutex_);
  158. stop_all_threads(lock);
  159. }
  160. bool task_io_service::stopped() const
  161. {
  162. mutex::scoped_lock lock(mutex_);
  163. return stopped_;
  164. }
  165. void task_io_service::reset()
  166. {
  167. mutex::scoped_lock lock(mutex_);
  168. stopped_ = false;
  169. }
  170. void task_io_service::post_immediate_completion(task_io_service::operation* op)
  171. {
  172. work_started();
  173. post_deferred_completion(op);
  174. }
  175. void task_io_service::post_deferred_completion(task_io_service::operation* op)
  176. {
  177. mutex::scoped_lock lock(mutex_);
  178. op_queue_.push(op);
  179. wake_one_thread_and_unlock(lock);
  180. }
  181. void task_io_service::post_deferred_completions(
  182. op_queue<task_io_service::operation>& ops)
  183. {
  184. if (!ops.empty())
  185. {
  186. mutex::scoped_lock lock(mutex_);
  187. op_queue_.push(ops);
  188. wake_one_thread_and_unlock(lock);
  189. }
  190. }
  191. void task_io_service::abandon_operations(
  192. op_queue<task_io_service::operation>& ops)
  193. {
  194. op_queue<task_io_service::operation> ops2;
  195. ops2.push(ops);
  196. }
  197. std::size_t task_io_service::do_one(mutex::scoped_lock& lock,
  198. task_io_service::idle_thread_info* this_idle_thread)
  199. {
  200. bool polling = !this_idle_thread;
  201. bool task_has_run = false;
  202. while (!stopped_)
  203. {
  204. if (!op_queue_.empty())
  205. {
  206. // Prepare to execute first handler from queue.
  207. operation* o = op_queue_.front();
  208. op_queue_.pop();
  209. bool more_handlers = (!op_queue_.empty());
  210. if (o == &task_operation_)
  211. {
  212. task_interrupted_ = more_handlers || polling;
  213. // If the task has already run and we're polling then we're done.
  214. if (task_has_run && polling)
  215. {
  216. task_interrupted_ = true;
  217. op_queue_.push(&task_operation_);
  218. return 0;
  219. }
  220. task_has_run = true;
  221. if (!more_handlers || !wake_one_idle_thread_and_unlock(lock))
  222. lock.unlock();
  223. op_queue<operation> completed_ops;
  224. task_cleanup c = { this, &lock, &completed_ops };
  225. (void)c;
  226. // Run the task. May throw an exception. Only block if the operation
  227. // queue is empty and we're not polling, otherwise we want to return
  228. // as soon as possible.
  229. task_->run(!more_handlers && !polling, completed_ops);
  230. }
  231. else
  232. {
  233. if (more_handlers)
  234. wake_one_thread_and_unlock(lock);
  235. else
  236. lock.unlock();
  237. // Ensure the count of outstanding work is decremented on block exit.
  238. work_finished_on_block_exit on_exit = { this };
  239. (void)on_exit;
  240. // Complete the operation. May throw an exception.
  241. o->complete(*this); // deletes the operation object
  242. return 1;
  243. }
  244. }
  245. else if (this_idle_thread)
  246. {
  247. // Nothing to run right now, so just wait for work to do.
  248. this_idle_thread->next = first_idle_thread_;
  249. first_idle_thread_ = this_idle_thread;
  250. this_idle_thread->wakeup_event.clear(lock);
  251. this_idle_thread->wakeup_event.wait(lock);
  252. }
  253. else
  254. {
  255. return 0;
  256. }
  257. }
  258. return 0;
  259. }
  260. void task_io_service::stop_all_threads(
  261. mutex::scoped_lock& lock)
  262. {
  263. stopped_ = true;
  264. while (first_idle_thread_)
  265. {
  266. idle_thread_info* idle_thread = first_idle_thread_;
  267. first_idle_thread_ = idle_thread->next;
  268. idle_thread->next = 0;
  269. idle_thread->wakeup_event.signal(lock);
  270. }
  271. if (!task_interrupted_ && task_)
  272. {
  273. task_interrupted_ = true;
  274. task_->interrupt();
  275. }
  276. }
  277. bool task_io_service::wake_one_idle_thread_and_unlock(
  278. mutex::scoped_lock& lock)
  279. {
  280. if (first_idle_thread_)
  281. {
  282. idle_thread_info* idle_thread = first_idle_thread_;
  283. first_idle_thread_ = idle_thread->next;
  284. idle_thread->next = 0;
  285. idle_thread->wakeup_event.signal_and_unlock(lock);
  286. return true;
  287. }
  288. return false;
  289. }
  290. void task_io_service::wake_one_thread_and_unlock(
  291. mutex::scoped_lock& lock)
  292. {
  293. if (!wake_one_idle_thread_and_unlock(lock))
  294. {
  295. if (!task_interrupted_ && task_)
  296. {
  297. task_interrupted_ = true;
  298. task_->interrupt();
  299. }
  300. lock.unlock();
  301. }
  302. }
  303. } // namespace detail
  304. } // namespace asio
  305. } // namespace boost
  306. #include <boost/asio/detail/pop_options.hpp>
  307. #endif // !defined(BOOST_ASIO_HAS_IOCP)
  308. #endif // BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP