1 //===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
12 // DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
13 // The only difference with traditional slab allocators is that DenseSlabAlloc
14 // allocates/free indices of objects and provide a functionality to map
15 // the index onto the real pointer. The index is u32, that is, 2 times smaller
16 // than uptr (hense the Dense prefix).
17 //===----------------------------------------------------------------------===//
18 #ifndef TSAN_DENSE_ALLOC_H
19 #define TSAN_DENSE_ALLOC_H
21 #include "sanitizer_common/sanitizer_common.h"
22 #include "tsan_defs.h"
23 #include "tsan_mutex.h"
27 class DenseSlabAllocCache
{
28 static const uptr kSize
= 128;
32 template<typename T
, uptr kL1Size
, uptr kL2Size
> friend class DenseSlabAlloc
;
35 template<typename T
, uptr kL1Size
, uptr kL2Size
>
36 class DenseSlabAlloc
{
38 typedef DenseSlabAllocCache Cache
;
39 typedef typename
Cache::IndexT IndexT
;
41 explicit DenseSlabAlloc(const char *name
) {
42 // Check that kL1Size and kL2Size are sane.
43 CHECK_EQ(kL1Size
& (kL1Size
- 1), 0);
44 CHECK_EQ(kL2Size
& (kL2Size
- 1), 0);
45 CHECK_GE(1ull << (sizeof(IndexT
) * 8), kL1Size
* kL2Size
);
46 // Check that it makes sense to use the dense alloc.
47 CHECK_GE(sizeof(T
), sizeof(IndexT
));
48 internal_memset(map_
, 0, sizeof(map_
));
55 for (uptr i
= 0; i
< kL1Size
; i
++) {
57 UnmapOrDie(map_
[i
], kL2Size
* sizeof(T
));
61 IndexT
Alloc(Cache
*c
) {
64 return c
->cache
[--c
->pos
];
67 void Free(Cache
*c
, IndexT idx
) {
69 if (c
->pos
== Cache::kSize
)
71 c
->cache
[c
->pos
++] = idx
;
76 DCHECK_LE(idx
, kL1Size
* kL2Size
);
77 return &map_
[idx
/ kL2Size
][idx
% kL2Size
];
80 void FlushCache(Cache
*c
) {
81 SpinMutexLock
lock(&mtx_
);
83 IndexT idx
= c
->cache
[--c
->pos
];
84 *(IndexT
*)Map(idx
) = freelist_
;
89 void InitCache(Cache
*c
) {
91 internal_memset(c
->cache
, 0, sizeof(c
->cache
));
101 void Refill(Cache
*c
) {
102 SpinMutexLock
lock(&mtx_
);
103 if (freelist_
== 0) {
104 if (fillpos_
== kL1Size
) {
105 Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
106 name_
, kL1Size
, kL2Size
);
109 VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
110 name_
, fillpos_
, kL1Size
, kL2Size
);
111 T
*batch
= (T
*)MmapOrDie(kL2Size
* sizeof(T
), name_
);
112 // Reserve 0 as invalid index.
113 IndexT start
= fillpos_
== 0 ? 1 : 0;
114 for (IndexT i
= start
; i
< kL2Size
; i
++) {
116 *(IndexT
*)(batch
+ i
) = i
+ 1 + fillpos_
* kL2Size
;
118 *(IndexT
*)(batch
+ kL2Size
- 1) = 0;
119 freelist_
= fillpos_
* kL2Size
+ start
;
120 map_
[fillpos_
++] = batch
;
122 for (uptr i
= 0; i
< Cache::kSize
/ 2 && freelist_
!= 0; i
++) {
123 IndexT idx
= freelist_
;
124 c
->cache
[c
->pos
++] = idx
;
125 freelist_
= *(IndexT
*)Map(idx
);
129 void Drain(Cache
*c
) {
130 SpinMutexLock
lock(&mtx_
);
131 for (uptr i
= 0; i
< Cache::kSize
/ 2; i
++) {
132 IndexT idx
= c
->cache
[--c
->pos
];
133 *(IndexT
*)Map(idx
) = freelist_
;
139 } // namespace __tsan
141 #endif // TSAN_DENSE_ALLOC_H