Extract the code to emit a memory barrier. Add a memory_barrier_kind field to MonoIns...
[mono-project.git] / mono / utils / mono-memory-model.h
blob8bedf0681398a774825eee2b747ceb4f08c736fa
1 /*
2 * mono-memory-model.h: Mapping of the arch memory model.
4 * Author:
5 * Rodrigo Kumpera (kumpera@gmail.com)
7 * (C) 2011 Xamarin, Inc
8 */
10 #ifndef _MONO_UTILS_MONO_MEMMODEL_H_
11 #define _MONO_UTILS_MONO_MEMMODEL_H_
13 #include <config.h>
14 #include <mono/utils/mono-membar.h>
17 In order to allow for fast concurrent code, we must use fencing to properly order
18 memory access - specially on arch with weaker memory models such as ARM or PPC.
20 On the other hand, we can't use arm's weak model on targets such as x86 that have
21 a stronger model that requires much much less fencing.
23 The idea of exposing each arch memory model is to avoid fencing whenever possible
24 but at the same time make all required ordering explicit.
26 There are four kinds of barriers, LoadLoad, LoadStore, StoreLoad and StoreStore.
27 Each arch must define which ones needs fencing.
29 We assume 3 kinds of barriers are available: load, store and memory (load+store).
31 TODO: Add support for weaker forms of CAS such as present on ARM.
32 TODO: replace all explicit uses of memory barriers with macros from this section. This will make a nicer read of lazy init code.
33 TODO: if we find places where a data depencency could replace barriers, add macros here to help with it
34 TODO: some arch with strong consistency, such as x86, support weaker access. We might need to expose more kinds of barriers once we exploit this.
37 #define MEMORY_BARRIER mono_memory_barrier
38 #define LOAD_BARRIER mono_memory_read_barrier
39 #define STORE_BARRIEE mono_memory_write_barrier
41 enum {
42 StoreStoreBarrier,
43 LoadLoadBarrier,
44 StoreLoadBarrier,
45 LoadStoreBarrier,
46 FullBarrier
49 #if defined(__i386__) || defined(__x86_64__)
51 Both x86 and amd64 follow the SPO memory model:
52 -Loads are not reordered with other loads
53 -Stores are not reordered with others stores
54 -Stores are not reordered with earlier loads
57 /*Neither sfence or mfence provide the required semantics here*/
58 #define STORE_LOAD_FENCE MEMORY_BARRIER
60 #elif defined(__arm__)
62 ARM memory model is as weak as it can get. the only guarantee are data dependent
63 accesses.
64 LoadStore fences are much better handled using a data depencency such as:
65 load x; if (x = x) store y;
67 This trick can be applied to other fences such as LoadLoad, but require some assembly:
69 LDR R0, [R1]
70 AND R0, R0, #0
71 LDR R3, [R4, R0]
74 #define STORE_STORE_FENCE STORE_BARRIER
75 #define LOAD_LOAD_FENCE LOAD_BARRIER
76 #define STORE_LOAD_FENCE MEMORY_BARRIER
77 #define LOAD_STORE_FENCE MEMORY_BARRIER
79 #else
81 /*default implementation with the weakest possible memory model */
82 #define STORE_STORE_FENCE STORE_BARRIER
83 #define LOAD_LOAD_FENCE LOAD_BARRIER
84 #define STORE_LOAD_FENCE MEMORY_BARRIER
85 #define LOAD_STORE_FENCE MEMORY_BARRIER
87 #endif
89 #ifndef STORE_STORE_FENCE
90 #define STORE_STORE_FENCE
91 #endif
93 #ifndef LOAD_LOAD_FENCE
94 #define LOAD_LOAD_FENCE
95 #endif
97 #ifndef STORE_LOAD_FENCE
98 #define STORE_LOAD_FENCE
99 #endif
101 #ifndef LOAD_STORE_FENCE
102 #define LOAD_STORE_FENCE
103 #endif
106 Acquire/release semantics macros.
108 Acquire/release models what most code needs, which is to do load/store pairing of barriers
109 from multiple threads.
110 Release semantics makes sure all previous store have completed before the next memory access.
111 Acquire semantics make sure all following loads won't execute before the previous one.
113 This is a slightly harmless variantion on ECMA's that further constraints ordering amongs
114 different kinds of access.
116 #define mono_atomic_store_release(target,value) do { \
117 STORE_STORE_FENCE; \
118 *(target) = (value); \
119 } while (0)
121 #define mono_atomic_load_acquire(target) ({ \
122 typeof (*target) __tmp = *target; \
123 LOAD_LOAD_FENCE; \
124 __tmp; })
126 #endif /* _MONO_UTILS_MONO_MEMMODEL_H_ */