| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619 | //// detail/impl/scheduler.ipp// ~~~~~~~~~~~~~~~~~~~~~~~~~//// Copyright (c) 2003-2020 Christopher M. Kohlhoff (chris at kohlhoff dot com)//// Distributed under the Boost Software License, Version 1.0. (See accompanying// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)//#ifndef BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP#define BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP#if defined(_MSC_VER) && (_MSC_VER >= 1200)# pragma once#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)#include <boost/asio/detail/config.hpp>#include <boost/asio/detail/concurrency_hint.hpp>#include <boost/asio/detail/event.hpp>#include <boost/asio/detail/limits.hpp>#include <boost/asio/detail/reactor.hpp>#include <boost/asio/detail/scheduler.hpp>#include <boost/asio/detail/scheduler_thread_info.hpp>#include <boost/asio/detail/signal_blocker.hpp>#include <boost/asio/detail/push_options.hpp>namespace boost {namespace asio {namespace detail {class scheduler::thread_function{public:  explicit thread_function(scheduler* s)    : this_(s)  {  }  void operator()()  {    boost::system::error_code ec;    this_->run(ec);  }private:  scheduler* this_;};struct scheduler::task_cleanup{  ~task_cleanup()  {    if (this_thread_->private_outstanding_work > 0)    {      boost::asio::detail::increment(          scheduler_->outstanding_work_,          this_thread_->private_outstanding_work);    }    this_thread_->private_outstanding_work = 0;    // Enqueue the completed operations and reinsert the task at the end of    // the operation queue.    lock_->lock();    scheduler_->task_interrupted_ = true;    scheduler_->op_queue_.push(this_thread_->private_op_queue);    scheduler_->op_queue_.push(&scheduler_->task_operation_);  }  scheduler* scheduler_;  mutex::scoped_lock* lock_;  thread_info* this_thread_;};struct scheduler::work_cleanup{  ~work_cleanup()  {    if (this_thread_->private_outstanding_work > 1)    {      boost::asio::detail::increment(          scheduler_->outstanding_work_,          this_thread_->private_outstanding_work - 1);    }    else if (this_thread_->private_outstanding_work < 1)    {      scheduler_->work_finished();    }    this_thread_->private_outstanding_work = 0;#if defined(BOOST_ASIO_HAS_THREADS)    if (!this_thread_->private_op_queue.empty())    {      lock_->lock();      scheduler_->op_queue_.push(this_thread_->private_op_queue);    }#endif // defined(BOOST_ASIO_HAS_THREADS)  }  scheduler* scheduler_;  mutex::scoped_lock* lock_;  thread_info* this_thread_;};scheduler::scheduler(boost::asio::execution_context& ctx,    int concurrency_hint, bool own_thread)  : boost::asio::detail::execution_context_service_base<scheduler>(ctx),    one_thread_(concurrency_hint == 1        || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(          SCHEDULER, concurrency_hint)        || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(          REACTOR_IO, concurrency_hint)),    mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(          SCHEDULER, concurrency_hint)),    task_(0),    task_interrupted_(true),    outstanding_work_(0),    stopped_(false),    shutdown_(false),    concurrency_hint_(concurrency_hint),    thread_(0){  BOOST_ASIO_HANDLER_TRACKING_INIT;  if (own_thread)  {    ++outstanding_work_;    boost::asio::detail::signal_blocker sb;    thread_ = new boost::asio::detail::thread(thread_function(this));  }}scheduler::~scheduler(){  if (thread_)  {    thread_->join();    delete thread_;  }}void scheduler::shutdown(){  mutex::scoped_lock lock(mutex_);  shutdown_ = true;  if (thread_)    stop_all_threads(lock);  lock.unlock();  // Join thread to ensure task operation is returned to queue.  if (thread_)  {    thread_->join();    delete thread_;    thread_ = 0;  }  // Destroy handler objects.  while (!op_queue_.empty())  {    operation* o = op_queue_.front();    op_queue_.pop();    if (o != &task_operation_)      o->destroy();  }  // Reset to initial state.  task_ = 0;}void scheduler::init_task(){  mutex::scoped_lock lock(mutex_);  if (!shutdown_ && !task_)  {    task_ = &use_service<reactor>(this->context());    op_queue_.push(&task_operation_);    wake_one_thread_and_unlock(lock);  }}std::size_t scheduler::run(boost::system::error_code& ec){  ec = boost::system::error_code();  if (outstanding_work_ == 0)  {    stop();    return 0;  }  thread_info this_thread;  this_thread.private_outstanding_work = 0;  thread_call_stack::context ctx(this, this_thread);  mutex::scoped_lock lock(mutex_);  std::size_t n = 0;  for (; do_run_one(lock, this_thread, ec); lock.lock())    if (n != (std::numeric_limits<std::size_t>::max)())      ++n;  return n;}std::size_t scheduler::run_one(boost::system::error_code& ec){  ec = boost::system::error_code();  if (outstanding_work_ == 0)  {    stop();    return 0;  }  thread_info this_thread;  this_thread.private_outstanding_work = 0;  thread_call_stack::context ctx(this, this_thread);  mutex::scoped_lock lock(mutex_);  return do_run_one(lock, this_thread, ec);}std::size_t scheduler::wait_one(long usec, boost::system::error_code& ec){  ec = boost::system::error_code();  if (outstanding_work_ == 0)  {    stop();    return 0;  }  thread_info this_thread;  this_thread.private_outstanding_work = 0;  thread_call_stack::context ctx(this, this_thread);  mutex::scoped_lock lock(mutex_);  return do_wait_one(lock, this_thread, usec, ec);}std::size_t scheduler::poll(boost::system::error_code& ec){  ec = boost::system::error_code();  if (outstanding_work_ == 0)  {    stop();    return 0;  }  thread_info this_thread;  this_thread.private_outstanding_work = 0;  thread_call_stack::context ctx(this, this_thread);  mutex::scoped_lock lock(mutex_);#if defined(BOOST_ASIO_HAS_THREADS)  // We want to support nested calls to poll() and poll_one(), so any handlers  // that are already on a thread-private queue need to be put on to the main  // queue now.  if (one_thread_)    if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))      op_queue_.push(outer_info->private_op_queue);#endif // defined(BOOST_ASIO_HAS_THREADS)  std::size_t n = 0;  for (; do_poll_one(lock, this_thread, ec); lock.lock())    if (n != (std::numeric_limits<std::size_t>::max)())      ++n;  return n;}std::size_t scheduler::poll_one(boost::system::error_code& ec){  ec = boost::system::error_code();  if (outstanding_work_ == 0)  {    stop();    return 0;  }  thread_info this_thread;  this_thread.private_outstanding_work = 0;  thread_call_stack::context ctx(this, this_thread);  mutex::scoped_lock lock(mutex_);#if defined(BOOST_ASIO_HAS_THREADS)  // We want to support nested calls to poll() and poll_one(), so any handlers  // that are already on a thread-private queue need to be put on to the main  // queue now.  if (one_thread_)    if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))      op_queue_.push(outer_info->private_op_queue);#endif // defined(BOOST_ASIO_HAS_THREADS)  return do_poll_one(lock, this_thread, ec);}void scheduler::stop(){  mutex::scoped_lock lock(mutex_);  stop_all_threads(lock);}bool scheduler::stopped() const{  mutex::scoped_lock lock(mutex_);  return stopped_;}void scheduler::restart(){  mutex::scoped_lock lock(mutex_);  stopped_ = false;}void scheduler::compensating_work_started(){  thread_info_base* this_thread = thread_call_stack::contains(this);  ++static_cast<thread_info*>(this_thread)->private_outstanding_work;}void scheduler::post_immediate_completion(    scheduler::operation* op, bool is_continuation){#if defined(BOOST_ASIO_HAS_THREADS)  if (one_thread_ || is_continuation)  {    if (thread_info_base* this_thread = thread_call_stack::contains(this))    {      ++static_cast<thread_info*>(this_thread)->private_outstanding_work;      static_cast<thread_info*>(this_thread)->private_op_queue.push(op);      return;    }  }#else // defined(BOOST_ASIO_HAS_THREADS)  (void)is_continuation;#endif // defined(BOOST_ASIO_HAS_THREADS)  work_started();  mutex::scoped_lock lock(mutex_);  op_queue_.push(op);  wake_one_thread_and_unlock(lock);}void scheduler::post_deferred_completion(scheduler::operation* op){#if defined(BOOST_ASIO_HAS_THREADS)  if (one_thread_)  {    if (thread_info_base* this_thread = thread_call_stack::contains(this))    {      static_cast<thread_info*>(this_thread)->private_op_queue.push(op);      return;    }  }#endif // defined(BOOST_ASIO_HAS_THREADS)  mutex::scoped_lock lock(mutex_);  op_queue_.push(op);  wake_one_thread_and_unlock(lock);}void scheduler::post_deferred_completions(    op_queue<scheduler::operation>& ops){  if (!ops.empty())  {#if defined(BOOST_ASIO_HAS_THREADS)    if (one_thread_)    {      if (thread_info_base* this_thread = thread_call_stack::contains(this))      {        static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);        return;      }    }#endif // defined(BOOST_ASIO_HAS_THREADS)    mutex::scoped_lock lock(mutex_);    op_queue_.push(ops);    wake_one_thread_and_unlock(lock);  }}void scheduler::do_dispatch(    scheduler::operation* op){  work_started();  mutex::scoped_lock lock(mutex_);  op_queue_.push(op);  wake_one_thread_and_unlock(lock);}void scheduler::abandon_operations(    op_queue<scheduler::operation>& ops){  op_queue<scheduler::operation> ops2;  ops2.push(ops);}std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,    scheduler::thread_info& this_thread,    const boost::system::error_code& ec){  while (!stopped_)  {    if (!op_queue_.empty())    {      // Prepare to execute first handler from queue.      operation* o = op_queue_.front();      op_queue_.pop();      bool more_handlers = (!op_queue_.empty());      if (o == &task_operation_)      {        task_interrupted_ = more_handlers;        if (more_handlers && !one_thread_)          wakeup_event_.unlock_and_signal_one(lock);        else          lock.unlock();        task_cleanup on_exit = { this, &lock, &this_thread };        (void)on_exit;        // Run the task. May throw an exception. Only block if the operation        // queue is empty and we're not polling, otherwise we want to return        // as soon as possible.        task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);      }      else      {        std::size_t task_result = o->task_result_;        if (more_handlers && !one_thread_)          wake_one_thread_and_unlock(lock);        else          lock.unlock();        // Ensure the count of outstanding work is decremented on block exit.        work_cleanup on_exit = { this, &lock, &this_thread };        (void)on_exit;        // Complete the operation. May throw an exception. Deletes the object.        o->complete(this, ec, task_result);        return 1;      }    }    else    {      wakeup_event_.clear(lock);      wakeup_event_.wait(lock);    }  }  return 0;}std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,    scheduler::thread_info& this_thread, long usec,    const boost::system::error_code& ec){  if (stopped_)    return 0;  operation* o = op_queue_.front();  if (o == 0)  {    wakeup_event_.clear(lock);    wakeup_event_.wait_for_usec(lock, usec);    usec = 0; // Wait at most once.    o = op_queue_.front();  }  if (o == &task_operation_)  {    op_queue_.pop();    bool more_handlers = (!op_queue_.empty());    task_interrupted_ = more_handlers;    if (more_handlers && !one_thread_)      wakeup_event_.unlock_and_signal_one(lock);    else      lock.unlock();    {      task_cleanup on_exit = { this, &lock, &this_thread };      (void)on_exit;      // Run the task. May throw an exception. Only block if the operation      // queue is empty and we're not polling, otherwise we want to return      // as soon as possible.      task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);    }    o = op_queue_.front();    if (o == &task_operation_)    {      if (!one_thread_)        wakeup_event_.maybe_unlock_and_signal_one(lock);      return 0;    }  }  if (o == 0)    return 0;  op_queue_.pop();  bool more_handlers = (!op_queue_.empty());  std::size_t task_result = o->task_result_;  if (more_handlers && !one_thread_)    wake_one_thread_and_unlock(lock);  else    lock.unlock();  // Ensure the count of outstanding work is decremented on block exit.  work_cleanup on_exit = { this, &lock, &this_thread };  (void)on_exit;  // Complete the operation. May throw an exception. Deletes the object.  o->complete(this, ec, task_result);  return 1;}std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,    scheduler::thread_info& this_thread,    const boost::system::error_code& ec){  if (stopped_)    return 0;  operation* o = op_queue_.front();  if (o == &task_operation_)  {    op_queue_.pop();    lock.unlock();    {      task_cleanup c = { this, &lock, &this_thread };      (void)c;      // Run the task. May throw an exception. Only block if the operation      // queue is empty and we're not polling, otherwise we want to return      // as soon as possible.      task_->run(0, this_thread.private_op_queue);    }    o = op_queue_.front();    if (o == &task_operation_)    {      wakeup_event_.maybe_unlock_and_signal_one(lock);      return 0;    }  }  if (o == 0)    return 0;  op_queue_.pop();  bool more_handlers = (!op_queue_.empty());  std::size_t task_result = o->task_result_;  if (more_handlers && !one_thread_)    wake_one_thread_and_unlock(lock);  else    lock.unlock();  // Ensure the count of outstanding work is decremented on block exit.  work_cleanup on_exit = { this, &lock, &this_thread };  (void)on_exit;  // Complete the operation. May throw an exception. Deletes the object.  o->complete(this, ec, task_result);  return 1;}void scheduler::stop_all_threads(    mutex::scoped_lock& lock){  stopped_ = true;  wakeup_event_.signal_all(lock);  if (!task_interrupted_ && task_)  {    task_interrupted_ = true;    task_->interrupt();  }}void scheduler::wake_one_thread_and_unlock(    mutex::scoped_lock& lock){  if (!wakeup_event_.maybe_unlock_and_signal_one(lock))  {    if (!task_interrupted_ && task_)    {      task_interrupted_ = true;      task_->interrupt();    }    lock.unlock();  }}} // namespace detail} // namespace asio} // namespace boost#include <boost/asio/detail/pop_options.hpp>#endif // BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
 |