Fix some spelling errors found by Lintian. Patch from Alessandro Ghedini <ghedo...
[valgrind.git] / drd / tests / tsan_thread_wrappers_pthread.h
blobf15e6ad63105829addcfce69c4b7fa29c0bf4a58
1 /*
2 This file is part of Valgrind, a dynamic binary instrumentation
3 framework.
5 Copyright (C) 2008-2008 Google Inc
6 opensource@google.com
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 The GNU General Public License is contained in the file COPYING.
24 // Author: Konstantin Serebryany <opensource@google.com>
26 // Here we define few simple classes that wrap pthread primitives.
28 // We need this to create unit tests for helgrind (or similar tool)
29 // that will work with different threading frameworks.
31 // If one needs to test helgrind's support for another threading library,
32 // he/she can create a copy of this file and replace pthread_ calls
33 // with appropriate calls to his/her library.
35 // Note, that some of the methods defined here are annotated with
36 // ANNOTATE_* macros defined in dynamic_annotations.h.
38 // DISCLAIMER: the classes defined in this header file
39 // are NOT intended for general use -- only for unit tests.
42 #ifndef THREAD_WRAPPERS_PTHREAD_H
43 #define THREAD_WRAPPERS_PTHREAD_H
45 #include <pthread.h>
46 #include <semaphore.h>
47 #include <unistd.h>
48 #include <queue>
49 #include <stdio.h>
50 #include <limits.h> // INT_MAX
52 #ifdef VGO_darwin
53 #include <libkern/OSAtomic.h>
54 #define NO_BARRIER
55 #define NO_TLS
56 #endif
58 #include <string>
59 using namespace std;
61 #include <sys/time.h>
62 #include <time.h>
64 #include "../../drd/drd.h"
65 #define ANNOTATE_NO_OP(arg) do { } while(0)
66 #define ANNOTATE_EXPECT_RACE(addr, descr) \
67 ANNOTATE_BENIGN_RACE_SIZED(addr, 4, "expected race")
68 static inline bool RunningOnValgrind() { return RUNNING_ON_VALGRIND; }
70 #include <assert.h>
71 #ifdef NDEBUG
72 # error "Pleeease, do not define NDEBUG"
73 #endif
74 #define CHECK assert
76 /// Set this to true if malloc() uses mutex on your platform as this may
77 /// introduce a happens-before arc for a pure happens-before race detector.
78 const bool kMallocUsesMutex = false;
80 /// Current time in milliseconds.
81 static inline int64_t GetCurrentTimeMillis() {
82 struct timeval now;
83 gettimeofday(&now, NULL);
84 return now.tv_sec * 1000 + now.tv_usec / 1000;
87 /// Copy tv to ts adding offset in milliseconds.
88 static inline void timeval2timespec(timeval *const tv,
89 timespec *ts,
90 int64_t offset_milli) {
91 const int64_t ten_9 = 1000000000LL;
92 const int64_t ten_6 = 1000000LL;
93 const int64_t ten_3 = 1000LL;
94 int64_t now_nsec = (int64_t)tv->tv_sec * ten_9;
95 now_nsec += (int64_t)tv->tv_usec * ten_3;
96 int64_t then_nsec = now_nsec + offset_milli * ten_6;
97 ts->tv_sec = then_nsec / ten_9;
98 ts->tv_nsec = then_nsec % ten_9;
102 class CondVar;
104 #ifndef NO_SPINLOCK
105 /// helgrind does not (yet) support spin locks, so we annotate them.
107 #ifndef VGO_darwin
108 class SpinLock {
109 public:
110 SpinLock() {
111 CHECK(0 == pthread_spin_init(&mu_, 0));
112 ANNOTATE_RWLOCK_CREATE((void*)&mu_);
114 ~SpinLock() {
115 ANNOTATE_RWLOCK_DESTROY((void*)&mu_);
116 CHECK(0 == pthread_spin_destroy(&mu_));
118 void Lock() {
119 CHECK(0 == pthread_spin_lock(&mu_));
120 ANNOTATE_RWLOCK_ACQUIRED((void*)&mu_, 1);
122 void Unlock() {
123 ANNOTATE_RWLOCK_RELEASED((void*)&mu_, 1);
124 CHECK(0 == pthread_spin_unlock(&mu_));
126 private:
127 pthread_spinlock_t mu_;
130 #else
132 class SpinLock {
133 public:
134 // Mac OS X version.
135 SpinLock() : mu_(OS_SPINLOCK_INIT) {
136 ANNOTATE_RWLOCK_CREATE((void*)&mu_);
138 ~SpinLock() {
139 ANNOTATE_RWLOCK_DESTROY((void*)&mu_);
141 void Lock() {
142 OSSpinLockLock(&mu_);
143 ANNOTATE_RWLOCK_ACQUIRED((void*)&mu_, 1);
145 void Unlock() {
146 ANNOTATE_RWLOCK_RELEASED((void*)&mu_, 1);
147 OSSpinLockUnlock(&mu_);
149 private:
150 OSSpinLock mu_;
152 #endif // VGO_darwin
154 #endif // NO_SPINLOCK
156 /// Just a boolean condition. Used by Mutex::LockWhen and similar.
157 template <typename T>
158 class Condition {
159 public:
160 typedef bool (*func_t)(void*);
162 Condition(bool (*func)(T*), T* arg)
163 : func1_(func), arg_(arg) {}
165 Condition(bool (*func)())
166 : func0_(func), arg_(NULL) {}
168 bool Eval() const { return func1_ ? func1_(arg_) : func0_(); }
170 private:
171 bool (*func0_)();
172 bool (*func1_)(T*);
173 T *arg_;
177 /// Wrapper for pthread_mutex_t.
179 /// pthread_mutex_t is *not* a reader-writer lock,
180 /// so the methods like ReaderLock() aren't really reader locks.
181 /// We can not use pthread_rwlock_t because it
182 /// does not work with pthread_cond_t.
184 /// TODO: We still need to test reader locks with this class.
185 /// Implement a mode where pthread_rwlock_t will be used
186 /// instead of pthread_mutex_t (only when not used with CondVar or LockWhen).
188 class Mutex {
189 friend class CondVar;
190 public:
191 Mutex() {
192 CHECK(0 == pthread_mutex_init(&mu_, NULL));
193 CHECK(0 == pthread_cond_init(&cv_, NULL));
194 signal_at_unlock_ = true; // Always signal at Unlock to make
195 // Mutex more friendly to hybrid detectors.
197 ~Mutex() {
198 CHECK(0 == pthread_cond_destroy(&cv_));
199 CHECK(0 == pthread_mutex_destroy(&mu_));
201 void Lock() { CHECK(0 == pthread_mutex_lock(&mu_));}
202 bool TryLock() { return (0 == pthread_mutex_trylock(&mu_));}
203 void Unlock() {
204 if (signal_at_unlock_) {
205 CHECK(0 == pthread_cond_signal(&cv_));
207 CHECK(0 == pthread_mutex_unlock(&mu_));
209 void ReaderLock() { Lock(); }
210 bool ReaderTryLock() { return TryLock();}
211 void ReaderUnlock() { Unlock(); }
213 template <typename T>
214 void LockWhen(const Condition<T>& cond) { Lock(); WaitLoop(cond); }
215 template <typename T>
216 void ReaderLockWhen(const Condition<T>& cond) { Lock(); WaitLoop(cond); }
217 template <typename T>
218 void Await(const Condition<T>& cond) { WaitLoop(cond); }
220 template <typename T>
221 bool ReaderLockWhenWithTimeout(const Condition<T>& cond, int millis)
222 { Lock(); return WaitLoopWithTimeout(cond, millis); }
223 template <typename T>
224 bool LockWhenWithTimeout(const Condition<T>& cond, int millis)
225 { Lock(); return WaitLoopWithTimeout(cond, millis); }
226 template <typename T>
227 bool AwaitWithTimeout(const Condition<T>& cond, int millis)
228 { return WaitLoopWithTimeout(cond, millis); }
230 private:
232 template <typename T>
233 void WaitLoop(const Condition<T>& cond) {
234 signal_at_unlock_ = true;
235 while(cond.Eval() == false) {
236 pthread_cond_wait(&cv_, &mu_);
238 ANNOTATE_CONDVAR_LOCK_WAIT(&cv_, &mu_);
241 template <typename T>
242 bool WaitLoopWithTimeout(const Condition<T>& cond, int millis) {
243 struct timeval now;
244 struct timespec timeout;
245 int retcode = 0;
246 gettimeofday(&now, NULL);
247 timeval2timespec(&now, &timeout, millis);
249 signal_at_unlock_ = true;
250 while (cond.Eval() == false && retcode == 0) {
251 retcode = pthread_cond_timedwait(&cv_, &mu_, &timeout);
253 if(retcode == 0) {
254 ANNOTATE_CONDVAR_LOCK_WAIT(&cv_, &mu_);
256 return cond.Eval();
259 // A hack. cv_ should be the first data member so that
260 // ANNOTATE_CONDVAR_WAIT(&MU, &MU) and ANNOTATE_CONDVAR_SIGNAL(&MU) works.
261 // (See also racecheck_unittest.cc)
262 pthread_cond_t cv_;
263 pthread_mutex_t mu_;
264 bool signal_at_unlock_; // Set to true if Wait was called.
268 class MutexLock { // Scoped Mutex Locker/Unlocker
269 public:
270 MutexLock(Mutex *mu)
271 : mu_(mu) {
272 mu_->Lock();
274 ~MutexLock() {
275 mu_->Unlock();
277 private:
278 Mutex *mu_;
282 /// Wrapper for pthread_cond_t.
283 class CondVar {
284 public:
285 CondVar() { CHECK(0 == pthread_cond_init(&cv_, NULL)); }
286 ~CondVar() { CHECK(0 == pthread_cond_destroy(&cv_)); }
287 void Wait(Mutex *mu) { CHECK(0 == pthread_cond_wait(&cv_, &mu->mu_)); }
288 bool WaitWithTimeout(Mutex *mu, int millis) {
289 struct timeval now;
290 struct timespec timeout;
291 gettimeofday(&now, NULL);
292 timeval2timespec(&now, &timeout, millis);
293 return 0 != pthread_cond_timedwait(&cv_, &mu->mu_, &timeout);
295 void Signal() { CHECK(0 == pthread_cond_signal(&cv_)); }
296 void SignalAll() { CHECK(0 == pthread_cond_broadcast(&cv_)); }
297 private:
298 pthread_cond_t cv_;
302 // pthreads do not allow to use condvar with rwlock so we can't make
303 // ReaderLock method of Mutex to be the real rw-lock.
304 // So, we need a special lock class to test reader locks.
305 #define NEEDS_SEPERATE_RW_LOCK
306 class RWLock {
307 public:
308 RWLock() { CHECK(0 == pthread_rwlock_init(&mu_, NULL)); }
309 ~RWLock() { CHECK(0 == pthread_rwlock_destroy(&mu_)); }
310 void Lock() { CHECK(0 == pthread_rwlock_wrlock(&mu_)); }
311 void ReaderLock() { CHECK(0 == pthread_rwlock_rdlock(&mu_)); }
312 void Unlock() { CHECK(0 == pthread_rwlock_unlock(&mu_)); }
313 void ReaderUnlock() { CHECK(0 == pthread_rwlock_unlock(&mu_)); }
314 private:
315 pthread_cond_t dummy; // Damn, this requires some redesign...
316 pthread_rwlock_t mu_;
319 class ReaderLockScoped { // Scoped RWLock Locker/Unlocker
320 public:
321 ReaderLockScoped(RWLock *mu)
322 : mu_(mu) {
323 mu_->ReaderLock();
325 ~ReaderLockScoped() {
326 mu_->ReaderUnlock();
328 private:
329 RWLock *mu_;
332 class WriterLockScoped { // Scoped RWLock Locker/Unlocker
333 public:
334 WriterLockScoped(RWLock *mu)
335 : mu_(mu) {
336 mu_->Lock();
338 ~WriterLockScoped() {
339 mu_->Unlock();
341 private:
342 RWLock *mu_;
348 /// Wrapper for pthread_create()/pthread_join().
349 class MyThread {
350 public:
351 MyThread(void* (*worker)(void *), void *arg = NULL, const char *name = NULL)
352 :wpvpv_(worker), wvv_(), wvpv_(), arg_(arg), name_(name) {}
353 MyThread(void (*worker)(void), void *arg = NULL, const char *name = NULL)
354 :wpvpv_(), wvv_(worker), wvpv_(), arg_(arg), name_(name) {}
355 MyThread(void (*worker)(void *), void *arg = NULL, const char *name = NULL)
356 :wpvpv_(), wvv_(), wvpv_(worker), arg_(arg), name_(name) {}
358 void Start() { CHECK(0 == pthread_create(&t_, NULL, ThreadBody, this));}
359 void Join() { CHECK(0 == pthread_join(t_, NULL));}
360 pthread_t tid() const { return t_; }
361 private:
362 static void *ThreadBody(void *arg) {
363 MyThread *my_thread = reinterpret_cast<MyThread*>(arg);
364 if (my_thread->name_) {
365 ANNOTATE_THREAD_NAME(my_thread->name_);
367 if (my_thread->wpvpv_)
368 return my_thread->wpvpv_(my_thread->arg_);
369 if (my_thread->wvpv_)
370 my_thread->wvpv_(my_thread->arg_);
371 if (my_thread->wvv_)
372 my_thread->wvv_();
373 return NULL;
375 pthread_t t_;
376 void *(*wpvpv_)(void*);
377 void (*wvv_)(void);
378 void (*wvpv_)(void*);
379 void *arg_;
380 const char *name_;
384 /// Just a message queue.
385 class ProducerConsumerQueue {
386 public:
387 ProducerConsumerQueue(int unused) {
388 //ANNOTATE_PCQ_CREATE(this);
390 ~ProducerConsumerQueue() {
391 CHECK(q_.empty());
392 //ANNOTATE_PCQ_DESTROY(this);
395 // Put.
396 void Put(void *item) {
397 mu_.Lock();
398 q_.push(item);
399 ANNOTATE_CONDVAR_SIGNAL(&mu_); // LockWhen in Get()
400 //ANNOTATE_PCQ_PUT(this);
401 mu_.Unlock();
404 // Get.
405 // Blocks if the queue is empty.
406 void *Get() {
407 mu_.LockWhen(Condition<typeof(q_)>(IsQueueNotEmpty, &q_));
408 void * item = NULL;
409 bool ok = TryGetInternal(&item);
410 CHECK(ok);
411 mu_.Unlock();
412 return item;
415 // If queue is not empty,
416 // remove an element from queue, put it into *res and return true.
417 // Otherwise return false.
418 bool TryGet(void **res) {
419 mu_.Lock();
420 bool ok = TryGetInternal(res);
421 mu_.Unlock();
422 return ok;
425 private:
426 Mutex mu_;
427 std::queue<void*> q_; // protected by mu_
429 // Requires mu_
430 bool TryGetInternal(void ** item_ptr) {
431 if (q_.empty())
432 return false;
433 *item_ptr = q_.front();
434 q_.pop();
435 //ANNOTATE_PCQ_GET(this);
436 return true;
439 static bool IsQueueNotEmpty(std::queue<void*> * queue) {
440 return !queue->empty();
446 /// Function pointer with zero, one or two parameters.
447 struct Closure {
448 typedef void (*F0)();
449 typedef void (*F1)(void *arg1);
450 typedef void (*F2)(void *arg1, void *arg2);
451 int n_params;
452 void *f;
453 void *param1;
454 void *param2;
456 void Execute() {
457 if (n_params == 0) {
458 (F0(f))();
459 } else if (n_params == 1) {
460 (F1(f))(param1);
461 } else {
462 CHECK(n_params == 2);
463 (F2(f))(param1, param2);
465 delete this;
469 Closure *NewCallback(void (*f)()) {
470 Closure *res = new Closure;
471 res->n_params = 0;
472 res->f = (void*)(f);
473 res->param1 = NULL;
474 res->param2 = NULL;
475 return res;
478 template <class P1>
479 Closure *NewCallback(void (*f)(P1), P1 p1) {
480 CHECK(sizeof(P1) <= sizeof(void*));
481 Closure *res = new Closure;
482 res->n_params = 1;
483 res->f = (void*)(f);
484 res->param1 = (void*)p1;
485 res->param2 = NULL;
486 return res;
489 template <class T, class P1, class P2>
490 Closure *NewCallback(void (*f)(P1, P2), P1 p1, P2 p2) {
491 CHECK(sizeof(P1) <= sizeof(void*));
492 Closure *res = new Closure;
493 res->n_params = 2;
494 res->f = (void*)(f);
495 res->param1 = (void*)p1;
496 res->param2 = (void*)p2;
497 return res;
500 /*! A thread pool that uses ProducerConsumerQueue.
501 Usage:
503 ThreadPool pool(n_workers);
504 pool.StartWorkers();
505 pool.Add(NewCallback(func_with_no_args));
506 pool.Add(NewCallback(func_with_one_arg, arg));
507 pool.Add(NewCallback(func_with_two_args, arg1, arg2));
508 ... // more calls to pool.Add()
510 // the ~ThreadPool() is called: we wait workers to finish
511 // and then join all threads in the pool.
514 class ThreadPool {
515 public:
516 //! Create n_threads threads, but do not start.
517 explicit ThreadPool(int n_threads)
518 : queue_(INT_MAX) {
519 for (int i = 0; i < n_threads; i++) {
520 MyThread *thread = new MyThread(&ThreadPool::Worker, this);
521 workers_.push_back(thread);
525 //! Start all threads.
526 void StartWorkers() {
527 for (size_t i = 0; i < workers_.size(); i++) {
528 workers_[i]->Start();
532 //! Add a closure.
533 void Add(Closure *closure) {
534 queue_.Put(closure);
537 int num_threads() { return workers_.size();}
539 //! Wait workers to finish, then join all threads.
540 ~ThreadPool() {
541 for (size_t i = 0; i < workers_.size(); i++) {
542 Add(NULL);
544 for (size_t i = 0; i < workers_.size(); i++) {
545 workers_[i]->Join();
546 delete workers_[i];
549 private:
550 std::vector<MyThread*> workers_;
551 ProducerConsumerQueue queue_;
553 static void *Worker(void *p) {
554 ThreadPool *pool = reinterpret_cast<ThreadPool*>(p);
555 while (true) {
556 Closure *closure = reinterpret_cast<Closure*>(pool->queue_.Get());
557 if(closure == NULL) {
558 return NULL;
560 closure->Execute();
565 #ifndef NO_BARRIER
566 /// Wrapper for pthread_barrier_t.
567 class Barrier{
568 public:
569 explicit Barrier(int n_threads) {CHECK(0 == pthread_barrier_init(&b_, 0, n_threads));}
570 ~Barrier() {CHECK(0 == pthread_barrier_destroy(&b_));}
571 void Block() {
572 // helgrind 3.3.0 does not have an interceptor for barrier.
573 // but our current local version does.
574 // ANNOTATE_CONDVAR_SIGNAL(this);
575 pthread_barrier_wait(&b_);
576 // ANNOTATE_CONDVAR_WAIT(this, this);
578 private:
579 pthread_barrier_t b_;
582 #endif // NO_BARRIER
584 class BlockingCounter {
585 public:
586 explicit BlockingCounter(int initial_count) :
587 count_(initial_count) {}
588 bool DecrementCount() {
589 MutexLock lock(&mu_);
590 count_--;
591 return count_ == 0;
593 void Wait() {
594 mu_.LockWhen(Condition<int>(&IsZero, &count_));
595 mu_.Unlock();
597 private:
598 static bool IsZero(int *arg) { return *arg == 0; }
599 Mutex mu_;
600 int count_;
603 int AtomicIncrement(volatile int *value, int increment);
605 #ifndef VGO_darwin
606 inline int AtomicIncrement(volatile int *value, int increment) {
607 return __sync_add_and_fetch(value, increment);
610 #else
611 // Mac OS X version.
612 inline int AtomicIncrement(volatile int *value, int increment) {
613 return OSAtomicAdd32(increment, value);
616 // TODO(timurrrr) this is a hack
617 #define memalign(A,B) malloc(B)
619 // TODO(timurrrr) this is a hack
620 int posix_memalign(void **out, size_t al, size_t size) {
621 *out = memalign(al, size);
622 return (*out == 0);
624 #endif // VGO_darwin
626 #endif // THREAD_WRAPPERS_PTHREAD_H
627 // vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker