1 // Copyright (c) 2015-2016 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
8 #include "reverselock.h"
11 #include <boost/bind.hpp>
14 CScheduler::CScheduler() : nThreadsServicingQueue(0), stopRequested(false), stopWhenEmpty(false)
18 CScheduler::~CScheduler()
20 assert(nThreadsServicingQueue
== 0);
24 #if BOOST_VERSION < 105000
25 static boost::system_time
toPosixTime(const boost::chrono::system_clock::time_point
& t
)
27 // Creating the posix_time using from_time_t loses sub-second precision. So rather than exporting the time_point to time_t,
28 // start with a posix_time at the epoch (0) and add the milliseconds that have passed since then.
29 return boost::posix_time::from_time_t(0) + boost::posix_time::milliseconds(boost::chrono::duration_cast
<boost::chrono::milliseconds
>(t
.time_since_epoch()).count());
33 void CScheduler::serviceQueue()
35 boost::unique_lock
<boost::mutex
> lock(newTaskMutex
);
36 ++nThreadsServicingQueue
;
38 // newTaskMutex is locked throughout this loop EXCEPT
39 // when the thread is waiting or when the user's function
41 while (!shouldStop()) {
43 if (!shouldStop() && taskQueue
.empty()) {
44 reverse_lock
<boost::unique_lock
<boost::mutex
> > rlock(lock
);
45 // Use this chance to get a tiny bit more entropy
48 while (!shouldStop() && taskQueue
.empty()) {
49 // Wait until there is something to do.
50 newTaskScheduled
.wait(lock
);
53 // Wait until either there is a new task, or until
54 // the time of the first item on the queue:
56 // wait_until needs boost 1.50 or later; older versions have timed_wait:
57 #if BOOST_VERSION < 105000
58 while (!shouldStop() && !taskQueue
.empty() &&
59 newTaskScheduled
.timed_wait(lock
, toPosixTime(taskQueue
.begin()->first
))) {
60 // Keep waiting until timeout
63 // Some boost versions have a conflicting overload of wait_until that returns void.
64 // Explicitly use a template here to avoid hitting that overload.
65 while (!shouldStop() && !taskQueue
.empty()) {
66 boost::chrono::system_clock::time_point timeToWaitFor
= taskQueue
.begin()->first
;
67 if (newTaskScheduled
.wait_until
<>(lock
, timeToWaitFor
) == boost::cv_status::timeout
)
68 break; // Exit loop after timeout, it means we reached the time of the event
71 // If there are multiple threads, the queue can empty while we're waiting (another
72 // thread may service the task we were waiting on).
73 if (shouldStop() || taskQueue
.empty())
76 Function f
= taskQueue
.begin()->second
;
77 taskQueue
.erase(taskQueue
.begin());
80 // Unlock before calling f, so it can reschedule itself or another task
81 // without deadlocking:
82 reverse_lock
<boost::unique_lock
<boost::mutex
> > rlock(lock
);
86 --nThreadsServicingQueue
;
90 --nThreadsServicingQueue
;
91 newTaskScheduled
.notify_one();
94 void CScheduler::stop(bool drain
)
97 boost::unique_lock
<boost::mutex
> lock(newTaskMutex
);
101 stopRequested
= true;
103 newTaskScheduled
.notify_all();
106 void CScheduler::schedule(CScheduler::Function f
, boost::chrono::system_clock::time_point t
)
109 boost::unique_lock
<boost::mutex
> lock(newTaskMutex
);
110 taskQueue
.insert(std::make_pair(t
, f
));
112 newTaskScheduled
.notify_one();
115 void CScheduler::scheduleFromNow(CScheduler::Function f
, int64_t deltaMilliSeconds
)
117 schedule(f
, boost::chrono::system_clock::now() + boost::chrono::milliseconds(deltaMilliSeconds
));
120 static void Repeat(CScheduler
* s
, CScheduler::Function f
, int64_t deltaMilliSeconds
)
123 s
->scheduleFromNow(boost::bind(&Repeat
, s
, f
, deltaMilliSeconds
), deltaMilliSeconds
);
126 void CScheduler::scheduleEvery(CScheduler::Function f
, int64_t deltaMilliSeconds
)
128 scheduleFromNow(boost::bind(&Repeat
, this, f
, deltaMilliSeconds
), deltaMilliSeconds
);
131 size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point
&first
,
132 boost::chrono::system_clock::time_point
&last
) const
134 boost::unique_lock
<boost::mutex
> lock(newTaskMutex
);
135 size_t result
= taskQueue
.size();
136 if (!taskQueue
.empty()) {
137 first
= taskQueue
.begin()->first
;
138 last
= taskQueue
.rbegin()->first
;
143 bool CScheduler::AreThreadsServicingQueue() const {
144 return nThreadsServicingQueue
;
148 void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() {
150 LOCK(m_cs_callbacks_pending
);
151 // Try to avoid scheduling too many copies here, but if we
152 // accidentally have two ProcessQueue's scheduled at once its
154 if (m_are_callbacks_running
) return;
155 if (m_callbacks_pending
.empty()) return;
157 m_pscheduler
->schedule(std::bind(&SingleThreadedSchedulerClient::ProcessQueue
, this));
160 void SingleThreadedSchedulerClient::ProcessQueue() {
161 std::function
<void (void)> callback
;
163 LOCK(m_cs_callbacks_pending
);
164 if (m_are_callbacks_running
) return;
165 if (m_callbacks_pending
.empty()) return;
166 m_are_callbacks_running
= true;
168 callback
= std::move(m_callbacks_pending
.front());
169 m_callbacks_pending
.pop_front();
172 // RAII the setting of fCallbacksRunning and calling MaybeScheduleProcessQueue
173 // to ensure both happen safely even if callback() throws.
174 struct RAIICallbacksRunning
{
175 SingleThreadedSchedulerClient
* instance
;
176 RAIICallbacksRunning(SingleThreadedSchedulerClient
* _instance
) : instance(_instance
) {}
177 ~RAIICallbacksRunning() {
179 LOCK(instance
->m_cs_callbacks_pending
);
180 instance
->m_are_callbacks_running
= false;
182 instance
->MaybeScheduleProcessQueue();
184 } raiicallbacksrunning(this);
189 void SingleThreadedSchedulerClient::AddToProcessQueue(std::function
<void (void)> func
) {
190 assert(m_pscheduler
);
193 LOCK(m_cs_callbacks_pending
);
194 m_callbacks_pending
.emplace_back(std::move(func
));
196 MaybeScheduleProcessQueue();
199 void SingleThreadedSchedulerClient::EmptyQueue() {
200 assert(!m_pscheduler
->AreThreadsServicingQueue());
201 bool should_continue
= true;
202 while (should_continue
) {
204 LOCK(m_cs_callbacks_pending
);
205 should_continue
= !m_callbacks_pending
.empty();