scheduler.ipp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. //
  2. // detail/impl/scheduler.ipp
  3. // ~~~~~~~~~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
  11. #define BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include <boost/asio/detail/config.hpp>
  16. #include <boost/asio/detail/concurrency_hint.hpp>
  17. #include <boost/asio/detail/event.hpp>
  18. #include <boost/asio/detail/limits.hpp>
  19. #include <boost/asio/detail/reactor.hpp>
  20. #include <boost/asio/detail/scheduler.hpp>
  21. #include <boost/asio/detail/scheduler_thread_info.hpp>
  22. #include <boost/asio/detail/signal_blocker.hpp>
  23. #include <boost/asio/detail/push_options.hpp>
  24. namespace boost {
  25. namespace asio {
  26. namespace detail {
  27. class scheduler::thread_function
  28. {
  29. public:
  30. explicit thread_function(scheduler* s)
  31. : this_(s)
  32. {
  33. }
  34. void operator()()
  35. {
  36. boost::system::error_code ec;
  37. this_->run(ec);
  38. }
  39. private:
  40. scheduler* this_;
  41. };
  42. struct scheduler::task_cleanup
  43. {
  44. ~task_cleanup()
  45. {
  46. if (this_thread_->private_outstanding_work > 0)
  47. {
  48. boost::asio::detail::increment(
  49. scheduler_->outstanding_work_,
  50. this_thread_->private_outstanding_work);
  51. }
  52. this_thread_->private_outstanding_work = 0;
  53. // Enqueue the completed operations and reinsert the task at the end of
  54. // the operation queue.
  55. lock_->lock();
  56. scheduler_->task_interrupted_ = true;
  57. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  58. scheduler_->op_queue_.push(&scheduler_->task_operation_);
  59. }
  60. scheduler* scheduler_;
  61. mutex::scoped_lock* lock_;
  62. thread_info* this_thread_;
  63. };
  64. struct scheduler::work_cleanup
  65. {
  66. ~work_cleanup()
  67. {
  68. if (this_thread_->private_outstanding_work > 1)
  69. {
  70. boost::asio::detail::increment(
  71. scheduler_->outstanding_work_,
  72. this_thread_->private_outstanding_work - 1);
  73. }
  74. else if (this_thread_->private_outstanding_work < 1)
  75. {
  76. scheduler_->work_finished();
  77. }
  78. this_thread_->private_outstanding_work = 0;
  79. #if defined(BOOST_ASIO_HAS_THREADS)
  80. if (!this_thread_->private_op_queue.empty())
  81. {
  82. lock_->lock();
  83. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  84. }
  85. #endif // defined(BOOST_ASIO_HAS_THREADS)
  86. }
  87. scheduler* scheduler_;
  88. mutex::scoped_lock* lock_;
  89. thread_info* this_thread_;
  90. };
  91. scheduler::scheduler(boost::asio::execution_context& ctx,
  92. int concurrency_hint, bool own_thread)
  93. : boost::asio::detail::execution_context_service_base<scheduler>(ctx),
  94. one_thread_(concurrency_hint == 1
  95. || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
  96. SCHEDULER, concurrency_hint)
  97. || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
  98. REACTOR_IO, concurrency_hint)),
  99. mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
  100. SCHEDULER, concurrency_hint)),
  101. task_(0),
  102. task_interrupted_(true),
  103. outstanding_work_(0),
  104. stopped_(false),
  105. shutdown_(false),
  106. concurrency_hint_(concurrency_hint),
  107. thread_(0)
  108. {
  109. BOOST_ASIO_HANDLER_TRACKING_INIT;
  110. if (own_thread)
  111. {
  112. ++outstanding_work_;
  113. boost::asio::detail::signal_blocker sb;
  114. thread_ = new boost::asio::detail::thread(thread_function(this));
  115. }
  116. }
  117. scheduler::~scheduler()
  118. {
  119. if (thread_)
  120. {
  121. mutex::scoped_lock lock(mutex_);
  122. shutdown_ = true;
  123. stop_all_threads(lock);
  124. lock.unlock();
  125. thread_->join();
  126. delete thread_;
  127. }
  128. }
  129. void scheduler::shutdown()
  130. {
  131. mutex::scoped_lock lock(mutex_);
  132. shutdown_ = true;
  133. if (thread_)
  134. stop_all_threads(lock);
  135. lock.unlock();
  136. // Join thread to ensure task operation is returned to queue.
  137. if (thread_)
  138. {
  139. thread_->join();
  140. delete thread_;
  141. thread_ = 0;
  142. }
  143. // Destroy handler objects.
  144. while (!op_queue_.empty())
  145. {
  146. operation* o = op_queue_.front();
  147. op_queue_.pop();
  148. if (o != &task_operation_)
  149. o->destroy();
  150. }
  151. // Reset to initial state.
  152. task_ = 0;
  153. }
  154. void scheduler::init_task()
  155. {
  156. mutex::scoped_lock lock(mutex_);
  157. if (!shutdown_ && !task_)
  158. {
  159. task_ = &use_service<reactor>(this->context());
  160. op_queue_.push(&task_operation_);
  161. wake_one_thread_and_unlock(lock);
  162. }
  163. }
  164. std::size_t scheduler::run(boost::system::error_code& ec)
  165. {
  166. ec = boost::system::error_code();
  167. if (outstanding_work_ == 0)
  168. {
  169. stop();
  170. return 0;
  171. }
  172. thread_info this_thread;
  173. this_thread.private_outstanding_work = 0;
  174. thread_call_stack::context ctx(this, this_thread);
  175. mutex::scoped_lock lock(mutex_);
  176. std::size_t n = 0;
  177. for (; do_run_one(lock, this_thread, ec); lock.lock())
  178. if (n != (std::numeric_limits<std::size_t>::max)())
  179. ++n;
  180. return n;
  181. }
  182. std::size_t scheduler::run_one(boost::system::error_code& ec)
  183. {
  184. ec = boost::system::error_code();
  185. if (outstanding_work_ == 0)
  186. {
  187. stop();
  188. return 0;
  189. }
  190. thread_info this_thread;
  191. this_thread.private_outstanding_work = 0;
  192. thread_call_stack::context ctx(this, this_thread);
  193. mutex::scoped_lock lock(mutex_);
  194. return do_run_one(lock, this_thread, ec);
  195. }
  196. std::size_t scheduler::wait_one(long usec, boost::system::error_code& ec)
  197. {
  198. ec = boost::system::error_code();
  199. if (outstanding_work_ == 0)
  200. {
  201. stop();
  202. return 0;
  203. }
  204. thread_info this_thread;
  205. this_thread.private_outstanding_work = 0;
  206. thread_call_stack::context ctx(this, this_thread);
  207. mutex::scoped_lock lock(mutex_);
  208. return do_wait_one(lock, this_thread, usec, ec);
  209. }
  210. std::size_t scheduler::poll(boost::system::error_code& ec)
  211. {
  212. ec = boost::system::error_code();
  213. if (outstanding_work_ == 0)
  214. {
  215. stop();
  216. return 0;
  217. }
  218. thread_info this_thread;
  219. this_thread.private_outstanding_work = 0;
  220. thread_call_stack::context ctx(this, this_thread);
  221. mutex::scoped_lock lock(mutex_);
  222. #if defined(BOOST_ASIO_HAS_THREADS)
  223. // We want to support nested calls to poll() and poll_one(), so any handlers
  224. // that are already on a thread-private queue need to be put on to the main
  225. // queue now.
  226. if (one_thread_)
  227. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  228. op_queue_.push(outer_info->private_op_queue);
  229. #endif // defined(BOOST_ASIO_HAS_THREADS)
  230. std::size_t n = 0;
  231. for (; do_poll_one(lock, this_thread, ec); lock.lock())
  232. if (n != (std::numeric_limits<std::size_t>::max)())
  233. ++n;
  234. return n;
  235. }
  236. std::size_t scheduler::poll_one(boost::system::error_code& ec)
  237. {
  238. ec = boost::system::error_code();
  239. if (outstanding_work_ == 0)
  240. {
  241. stop();
  242. return 0;
  243. }
  244. thread_info this_thread;
  245. this_thread.private_outstanding_work = 0;
  246. thread_call_stack::context ctx(this, this_thread);
  247. mutex::scoped_lock lock(mutex_);
  248. #if defined(BOOST_ASIO_HAS_THREADS)
  249. // We want to support nested calls to poll() and poll_one(), so any handlers
  250. // that are already on a thread-private queue need to be put on to the main
  251. // queue now.
  252. if (one_thread_)
  253. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  254. op_queue_.push(outer_info->private_op_queue);
  255. #endif // defined(BOOST_ASIO_HAS_THREADS)
  256. return do_poll_one(lock, this_thread, ec);
  257. }
  258. void scheduler::stop()
  259. {
  260. mutex::scoped_lock lock(mutex_);
  261. stop_all_threads(lock);
  262. }
  263. bool scheduler::stopped() const
  264. {
  265. mutex::scoped_lock lock(mutex_);
  266. return stopped_;
  267. }
  268. void scheduler::restart()
  269. {
  270. mutex::scoped_lock lock(mutex_);
  271. stopped_ = false;
  272. }
  273. void scheduler::compensating_work_started()
  274. {
  275. thread_info_base* this_thread = thread_call_stack::contains(this);
  276. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  277. }
  278. bool scheduler::can_dispatch()
  279. {
  280. return thread_call_stack::contains(this) != 0;
  281. }
  282. void scheduler::capture_current_exception()
  283. {
  284. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  285. this_thread->capture_current_exception();
  286. }
  287. void scheduler::post_immediate_completion(
  288. scheduler::operation* op, bool is_continuation)
  289. {
  290. #if defined(BOOST_ASIO_HAS_THREADS)
  291. if (one_thread_ || is_continuation)
  292. {
  293. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  294. {
  295. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  296. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  297. return;
  298. }
  299. }
  300. #else // defined(BOOST_ASIO_HAS_THREADS)
  301. (void)is_continuation;
  302. #endif // defined(BOOST_ASIO_HAS_THREADS)
  303. work_started();
  304. mutex::scoped_lock lock(mutex_);
  305. op_queue_.push(op);
  306. wake_one_thread_and_unlock(lock);
  307. }
  308. void scheduler::post_immediate_completions(std::size_t n,
  309. op_queue<scheduler::operation>& ops, bool is_continuation)
  310. {
  311. #if defined(BOOST_ASIO_HAS_THREADS)
  312. if (one_thread_ || is_continuation)
  313. {
  314. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  315. {
  316. static_cast<thread_info*>(this_thread)->private_outstanding_work
  317. += static_cast<long>(n);
  318. static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
  319. return;
  320. }
  321. }
  322. #else // defined(BOOST_ASIO_HAS_THREADS)
  323. (void)is_continuation;
  324. #endif // defined(BOOST_ASIO_HAS_THREADS)
  325. increment(outstanding_work_, static_cast<long>(n));
  326. mutex::scoped_lock lock(mutex_);
  327. op_queue_.push(ops);
  328. wake_one_thread_and_unlock(lock);
  329. }
  330. void scheduler::post_deferred_completion(scheduler::operation* op)
  331. {
  332. #if defined(BOOST_ASIO_HAS_THREADS)
  333. if (one_thread_)
  334. {
  335. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  336. {
  337. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  338. return;
  339. }
  340. }
  341. #endif // defined(BOOST_ASIO_HAS_THREADS)
  342. mutex::scoped_lock lock(mutex_);
  343. op_queue_.push(op);
  344. wake_one_thread_and_unlock(lock);
  345. }
  346. void scheduler::post_deferred_completions(
  347. op_queue<scheduler::operation>& ops)
  348. {
  349. if (!ops.empty())
  350. {
  351. #if defined(BOOST_ASIO_HAS_THREADS)
  352. if (one_thread_)
  353. {
  354. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  355. {
  356. static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
  357. return;
  358. }
  359. }
  360. #endif // defined(BOOST_ASIO_HAS_THREADS)
  361. mutex::scoped_lock lock(mutex_);
  362. op_queue_.push(ops);
  363. wake_one_thread_and_unlock(lock);
  364. }
  365. }
  366. void scheduler::do_dispatch(
  367. scheduler::operation* op)
  368. {
  369. work_started();
  370. mutex::scoped_lock lock(mutex_);
  371. op_queue_.push(op);
  372. wake_one_thread_and_unlock(lock);
  373. }
  374. void scheduler::abandon_operations(
  375. op_queue<scheduler::operation>& ops)
  376. {
  377. op_queue<scheduler::operation> ops2;
  378. ops2.push(ops);
  379. }
  380. std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
  381. scheduler::thread_info& this_thread,
  382. const boost::system::error_code& ec)
  383. {
  384. while (!stopped_)
  385. {
  386. if (!op_queue_.empty())
  387. {
  388. // Prepare to execute first handler from queue.
  389. operation* o = op_queue_.front();
  390. op_queue_.pop();
  391. bool more_handlers = (!op_queue_.empty());
  392. if (o == &task_operation_)
  393. {
  394. task_interrupted_ = more_handlers;
  395. if (more_handlers && !one_thread_)
  396. wakeup_event_.unlock_and_signal_one(lock);
  397. else
  398. lock.unlock();
  399. task_cleanup on_exit = { this, &lock, &this_thread };
  400. (void)on_exit;
  401. // Run the task. May throw an exception. Only block if the operation
  402. // queue is empty and we're not polling, otherwise we want to return
  403. // as soon as possible.
  404. task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
  405. }
  406. else
  407. {
  408. std::size_t task_result = o->task_result_;
  409. if (more_handlers && !one_thread_)
  410. wake_one_thread_and_unlock(lock);
  411. else
  412. lock.unlock();
  413. // Ensure the count of outstanding work is decremented on block exit.
  414. work_cleanup on_exit = { this, &lock, &this_thread };
  415. (void)on_exit;
  416. // Complete the operation. May throw an exception. Deletes the object.
  417. o->complete(this, ec, task_result);
  418. this_thread.rethrow_pending_exception();
  419. return 1;
  420. }
  421. }
  422. else
  423. {
  424. wakeup_event_.clear(lock);
  425. wakeup_event_.wait(lock);
  426. }
  427. }
  428. return 0;
  429. }
  430. std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
  431. scheduler::thread_info& this_thread, long usec,
  432. const boost::system::error_code& ec)
  433. {
  434. if (stopped_)
  435. return 0;
  436. operation* o = op_queue_.front();
  437. if (o == 0)
  438. {
  439. wakeup_event_.clear(lock);
  440. wakeup_event_.wait_for_usec(lock, usec);
  441. usec = 0; // Wait at most once.
  442. o = op_queue_.front();
  443. }
  444. if (o == &task_operation_)
  445. {
  446. op_queue_.pop();
  447. bool more_handlers = (!op_queue_.empty());
  448. task_interrupted_ = more_handlers;
  449. if (more_handlers && !one_thread_)
  450. wakeup_event_.unlock_and_signal_one(lock);
  451. else
  452. lock.unlock();
  453. {
  454. task_cleanup on_exit = { this, &lock, &this_thread };
  455. (void)on_exit;
  456. // Run the task. May throw an exception. Only block if the operation
  457. // queue is empty and we're not polling, otherwise we want to return
  458. // as soon as possible.
  459. task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
  460. }
  461. o = op_queue_.front();
  462. if (o == &task_operation_)
  463. {
  464. if (!one_thread_)
  465. wakeup_event_.maybe_unlock_and_signal_one(lock);
  466. return 0;
  467. }
  468. }
  469. if (o == 0)
  470. return 0;
  471. op_queue_.pop();
  472. bool more_handlers = (!op_queue_.empty());
  473. std::size_t task_result = o->task_result_;
  474. if (more_handlers && !one_thread_)
  475. wake_one_thread_and_unlock(lock);
  476. else
  477. lock.unlock();
  478. // Ensure the count of outstanding work is decremented on block exit.
  479. work_cleanup on_exit = { this, &lock, &this_thread };
  480. (void)on_exit;
  481. // Complete the operation. May throw an exception. Deletes the object.
  482. o->complete(this, ec, task_result);
  483. this_thread.rethrow_pending_exception();
  484. return 1;
  485. }
  486. std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
  487. scheduler::thread_info& this_thread,
  488. const boost::system::error_code& ec)
  489. {
  490. if (stopped_)
  491. return 0;
  492. operation* o = op_queue_.front();
  493. if (o == &task_operation_)
  494. {
  495. op_queue_.pop();
  496. lock.unlock();
  497. {
  498. task_cleanup c = { this, &lock, &this_thread };
  499. (void)c;
  500. // Run the task. May throw an exception. Only block if the operation
  501. // queue is empty and we're not polling, otherwise we want to return
  502. // as soon as possible.
  503. task_->run(0, this_thread.private_op_queue);
  504. }
  505. o = op_queue_.front();
  506. if (o == &task_operation_)
  507. {
  508. wakeup_event_.maybe_unlock_and_signal_one(lock);
  509. return 0;
  510. }
  511. }
  512. if (o == 0)
  513. return 0;
  514. op_queue_.pop();
  515. bool more_handlers = (!op_queue_.empty());
  516. std::size_t task_result = o->task_result_;
  517. if (more_handlers && !one_thread_)
  518. wake_one_thread_and_unlock(lock);
  519. else
  520. lock.unlock();
  521. // Ensure the count of outstanding work is decremented on block exit.
  522. work_cleanup on_exit = { this, &lock, &this_thread };
  523. (void)on_exit;
  524. // Complete the operation. May throw an exception. Deletes the object.
  525. o->complete(this, ec, task_result);
  526. this_thread.rethrow_pending_exception();
  527. return 1;
  528. }
  529. void scheduler::stop_all_threads(
  530. mutex::scoped_lock& lock)
  531. {
  532. stopped_ = true;
  533. wakeup_event_.signal_all(lock);
  534. if (!task_interrupted_ && task_)
  535. {
  536. task_interrupted_ = true;
  537. task_->interrupt();
  538. }
  539. }
  540. void scheduler::wake_one_thread_and_unlock(
  541. mutex::scoped_lock& lock)
  542. {
  543. if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
  544. {
  545. if (!task_interrupted_ && task_)
  546. {
  547. task_interrupted_ = true;
  548. task_->interrupt();
  549. }
  550. lock.unlock();
  551. }
  552. }
  553. } // namespace detail
  554. } // namespace asio
  555. } // namespace boost
  556. #include <boost/asio/detail/pop_options.hpp>
  557. #endif // BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP