* gcc-interface/trans.c (Subprogram_Body_to_gnu): Initialize locus.
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_bytemap.h
blob5e768ce9ef976a39709bff9ead4e52f7ea314aad
1 //===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
15 // Maps integers in rage [0, kSize) to u8 values.
16 template<u64 kSize>
17 class FlatByteMap {
18 public:
19 void TestOnlyInit() {
20 internal_memset(map_, 0, sizeof(map_));
23 void set(uptr idx, u8 val) {
24 CHECK_LT(idx, kSize);
25 CHECK_EQ(0U, map_[idx]);
26 map_[idx] = val;
28 u8 operator[] (uptr idx) {
29 CHECK_LT(idx, kSize);
30 // FIXME: CHECK may be too expensive here.
31 return map_[idx];
33 private:
34 u8 map_[kSize];
37 // TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
38 // It is implemented as a two-dimensional array: array of kSize1 pointers
39 // to kSize2-byte arrays. The secondary arrays are mmaped on demand.
40 // Each value is initially zero and can be set to something else only once.
41 // Setting and getting values from multiple threads is safe w/o extra locking.
42 template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
43 class TwoLevelByteMap {
44 public:
45 void TestOnlyInit() {
46 internal_memset(map1_, 0, sizeof(map1_));
47 mu_.Init();
50 void TestOnlyUnmap() {
51 for (uptr i = 0; i < kSize1; i++) {
52 u8 *p = Get(i);
53 if (!p) continue;
54 MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
55 UnmapOrDie(p, kSize2);
59 uptr size() const { return kSize1 * kSize2; }
60 uptr size1() const { return kSize1; }
61 uptr size2() const { return kSize2; }
63 void set(uptr idx, u8 val) {
64 CHECK_LT(idx, kSize1 * kSize2);
65 u8 *map2 = GetOrCreate(idx / kSize2);
66 CHECK_EQ(0U, map2[idx % kSize2]);
67 map2[idx % kSize2] = val;
70 u8 operator[] (uptr idx) const {
71 CHECK_LT(idx, kSize1 * kSize2);
72 u8 *map2 = Get(idx / kSize2);
73 if (!map2) return 0;
74 return map2[idx % kSize2];
77 private:
78 u8 *Get(uptr idx) const {
79 CHECK_LT(idx, kSize1);
80 return reinterpret_cast<u8 *>(
81 atomic_load(&map1_[idx], memory_order_acquire));
84 u8 *GetOrCreate(uptr idx) {
85 u8 *res = Get(idx);
86 if (!res) {
87 SpinMutexLock l(&mu_);
88 if (!(res = Get(idx))) {
89 res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
90 MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
91 atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
92 memory_order_release);
95 return res;
98 atomic_uintptr_t map1_[kSize1];
99 StaticSpinMutex mu_;