Merge pull request #4630 from BrzVlad/feature-valloc-limit
[mono-project.git] / mono / utils / mono-memory-model.h
blobce0130793a24257f765638140f12f86c084664d5
1 /**
2 * \file
3 * Mapping of the arch memory model.
5 * Author:
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * (C) 2011 Xamarin, Inc
9 */
11 #ifndef _MONO_UTILS_MONO_MEMMODEL_H_
12 #define _MONO_UTILS_MONO_MEMMODEL_H_
14 #include <config.h>
15 #include <mono/utils/mono-membar.h>
18 In order to allow for fast concurrent code, we must use fencing to properly order
19 memory access - specially on arch with weaker memory models such as ARM or PPC.
21 On the other hand, we can't use arm's weak model on targets such as x86 that have
22 a stronger model that requires much much less fencing.
24 The idea of exposing each arch memory model is to avoid fencing whenever possible
25 but at the same time make all required ordering explicit.
27 There are four kinds of barriers, LoadLoad, LoadStore, StoreLoad and StoreStore.
28 Each arch must define which ones needs fencing.
30 We assume 3 kinds of barriers are available: load, store and memory (load+store).
32 TODO: Add support for weaker forms of CAS such as present on ARM.
33 TODO: replace all explicit uses of memory barriers with macros from this section. This will make a nicer read of lazy init code.
34 TODO: if we find places where a data depencency could replace barriers, add macros here to help with it
35 TODO: some arch with strong consistency, such as x86, support weaker access. We might need to expose more kinds of barriers once we exploit this.
39 * Keep in sync with the enum in mini/mini-llvm-cpp.h.
41 enum {
42 MONO_MEMORY_BARRIER_NONE = 0,
43 MONO_MEMORY_BARRIER_ACQ = 1,
44 MONO_MEMORY_BARRIER_REL = 2,
45 MONO_MEMORY_BARRIER_SEQ = 3,
48 #define MEMORY_BARRIER mono_memory_barrier ()
49 #define LOAD_BARRIER mono_memory_read_barrier ()
50 #define STORE_BARRIER mono_memory_write_barrier ()
52 #if defined(__i386__) || defined(__x86_64__)
54 Both x86 and amd64 follow the SPO memory model:
55 -Loads are not reordered with other loads
56 -Stores are not reordered with others stores
57 -Stores are not reordered with earlier loads
60 /*Neither sfence or mfence provide the required semantics here*/
61 #define STORE_LOAD_FENCE MEMORY_BARRIER
63 #define LOAD_RELEASE_FENCE MEMORY_BARRIER
64 #define STORE_ACQUIRE_FENCE MEMORY_BARRIER
66 #elif defined(__arm__)
68 ARM memory model is as weak as it can get. the only guarantee are data dependent
69 accesses.
70 LoadStore fences are much better handled using a data depencency such as:
71 load x; if (x = x) store y;
73 This trick can be applied to other fences such as LoadLoad, but require some assembly:
75 LDR R0, [R1]
76 AND R0, R0, #0
77 LDR R3, [R4, R0]
80 #define STORE_STORE_FENCE STORE_BARRIER
81 #define LOAD_LOAD_FENCE LOAD_BARRIER
82 #define STORE_LOAD_FENCE MEMORY_BARRIER
83 #define STORE_ACQUIRE_FENCE MEMORY_BARRIER
84 #define STORE_RELEASE_FENCE MEMORY_BARRIER
85 #define LOAD_ACQUIRE_FENCE MEMORY_BARRIER
86 #define LOAD_RELEASE_FENCE MEMORY_BARRIER
88 #elif defined(__s390x__)
90 #define STORE_STORE_FENCE do {} while (0)
91 #define LOAD_LOAD_FENCE do {} while (0)
92 #define STORE_LOAD_FENCE do {} while (0)
93 #define LOAD_STORE_FENCE do {} while (0)
94 #define STORE_RELEASE_FENCE do {} while (0)
96 #else
98 /*default implementation with the weakest possible memory model */
99 #define STORE_STORE_FENCE STORE_BARRIER
100 #define LOAD_LOAD_FENCE LOAD_BARRIER
101 #define STORE_LOAD_FENCE MEMORY_BARRIER
102 #define LOAD_STORE_FENCE MEMORY_BARRIER
103 #define STORE_ACQUIRE_FENCE MEMORY_BARRIER
104 #define STORE_RELEASE_FENCE MEMORY_BARRIER
105 #define LOAD_ACQUIRE_FENCE MEMORY_BARRIER
106 #define LOAD_RELEASE_FENCE MEMORY_BARRIER
108 #endif
110 #ifndef STORE_STORE_FENCE
111 #define STORE_STORE_FENCE
112 #endif
114 #ifndef LOAD_LOAD_FENCE
115 #define LOAD_LOAD_FENCE
116 #endif
118 #ifndef STORE_LOAD_FENCE
119 #define STORE_LOAD_FENCE
120 #endif
122 #ifndef LOAD_STORE_FENCE
123 #define LOAD_STORE_FENCE
124 #endif
126 #ifndef STORE_RELEASE_FENCE
127 #define STORE_RELEASE_FENCE
128 #endif
130 #ifndef LOAD_RELEASE_FENCE
131 #define LOAD_RELEASE_FENCE
132 #endif
134 #ifndef STORE_ACQUIRE_FENCE
135 #define STORE_ACQUIRE_FENCE
136 #endif
138 #ifndef LOAD_ACQUIRE_FENCE
139 #define LOAD_ACQUIRE_FENCE
140 #endif
143 /*Makes sure all previous stores as visible before */
144 #define mono_atomic_store_seq(target,value) do { \
145 STORE_STORE_FENCE; \
146 *(target) = (value); \
147 } while (0)
151 Acquire/release semantics macros.
153 #define mono_atomic_store_release(target,value) do { \
154 STORE_RELEASE_FENCE; \
155 *(target) = (value); \
156 } while (0)
158 #define mono_atomic_load_release(_type,target) ({ \
159 _type __tmp; \
160 LOAD_RELEASE_FENCE; \
161 __tmp = *(target); \
162 __tmp; })
164 #define mono_atomic_load_acquire(var,_type,target) do { \
165 _type __tmp = *(target); \
166 LOAD_ACQUIRE_FENCE; \
167 (var) = __tmp; \
168 } while (0)
170 #define mono_atomic_store_acquire(target,value) { \
171 *(target) = (value); \
172 STORE_ACQUIRE_FENCE; \
175 #endif /* _MONO_UTILS_MONO_MEMMODEL_H_ */