kernel - do not wire user pages in sysctl
[dragonfly.git] / sys / sys / mplock2.h
blob1ea688a320e6755a767453827bab83eee9245a20
1 /*
2 * SYS/MPLOCK2.H
4 * Implement the MP lock. Note that debug operations
5 */
6 #ifndef _SYS_MPLOCK2_H_
7 #define _SYS_MPLOCK2_H_
9 #ifndef _MACHINE_ATOMIC_H_
10 #include <machine/atomic.h>
11 #endif
12 #ifndef _SYS_THREAD_H_
13 #include <sys/thread.h>
14 #endif
15 #ifndef _SYS_GLOBALDATA_H_
16 #include <sys/globaldata.h>
17 #endif
19 #ifdef SMP
21 #define get_mplock() get_mplock_debug(__FILE__, __LINE__)
22 #define try_mplock() try_mplock_debug(__FILE__, __LINE__)
23 #define cpu_try_mplock() cpu_try_mplock_debug(__FILE__, __LINE__)
25 void _get_mplock_contested(const char *file, int line);
26 void _try_mplock_contested(const char *file, int line);
27 void _cpu_try_mplock_contested(const char *file, int line);
28 void _rel_mplock_contested(void);
29 void cpu_get_initial_mplock(void);
30 void cpu_mplock_contested(void);
31 void yield_mplock(struct thread *td);
33 extern int mp_lock;
34 extern int mp_lock_contention_mask;
35 extern const char *mp_lock_holder_file;
36 extern int mp_lock_holder_line;
39 * Acquire the MP lock, block until we get it.
41 * In order to acquire the MP lock we must first pre-dispose td_mpcount
42 * for the acquisition and then get the actual lock.
44 * The contested function is called only if we do not have or are unable
45 * to acquire the actual lock. It will not return until the lock has
46 * been acquired.
48 static __inline
49 void
50 get_mplock_debug(const char *file, int line)
52 globaldata_t gd = mycpu;
53 thread_t td = gd->gd_curthread;
55 ++td->td_mpcount;
56 if (mp_lock != gd->gd_cpuid) {
57 if (atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0)
58 _get_mplock_contested(file, line);
59 #ifdef INVARIANTS
60 mp_lock_holder_file = file;
61 mp_lock_holder_line = line;
62 #endif
67 * Release the MP lock
69 * In order to release the MP lock we must first pre-dispose td_mpcount
70 * for the release and then, if it is 0, release the actual lock.
72 * The contested function is called only if we are unable to release the
73 * Actual lock. This can occur if we raced an interrupt after decrementing
74 * td_mpcount to 0 and the interrupt acquired and released the lock.
76 * The function also catches the td_mpcount underflow case because the
77 * lock will be in a released state and thus fail the subsequent release.
79 static __inline
80 void
81 rel_mplock(void)
83 globaldata_t gd = mycpu;
84 thread_t td = gd->gd_curthread;
85 int n;
87 n = --td->td_mpcount;
88 if (n <= 0 && atomic_cmpset_int(&mp_lock, gd->gd_cpuid, -1) == 0)
89 _rel_mplock_contested();
93 * Attempt to acquire the MP lock, returning 0 on failure and 1 on success.
95 * The contested function is called on failure and typically serves simply
96 * to log the attempt (if debugging enabled).
98 static __inline
99 int
100 try_mplock_debug(const char *file, int line)
102 globaldata_t gd = mycpu;
103 thread_t td = gd->gd_curthread;
105 ++td->td_mpcount;
106 if (mp_lock != gd->gd_cpuid &&
107 atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) {
108 _try_mplock_contested(file, line);
109 return(0);
111 #ifdef INVARIANTS
112 mp_lock_holder_file = file;
113 mp_lock_holder_line = line;
114 #endif
115 return(1);
119 * Low level acquisition of the MP lock ignoring curthred->td_mpcount
121 * This version of try_mplock() is used when the caller has already
122 * predisposed td->td_mpcount.
124 * Returns non-zero on success, 0 on failure.
126 * WARNING: Must be called from within a critical section if td_mpcount is
127 * zero, otherwise an itnerrupt race can cause the lock to be lost.
129 static __inline
131 cpu_try_mplock_debug(const char *file, int line)
133 globaldata_t gd = mycpu;
135 if (mp_lock != gd->gd_cpuid &&
136 atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) {
137 _cpu_try_mplock_contested(file, line);
138 return(0);
140 #ifdef INVARIANTS
141 mp_lock_holder_file = file;
142 mp_lock_holder_line = line;
143 #endif
144 return(1);
148 * A cpu wanted the MP lock but could not get it. This function is also
149 * called directly from the LWKT scheduler.
151 * Reentrant, may be called even if the cpu is already contending the MP
152 * lock.
154 static __inline
155 void
156 set_mplock_contention_mask(globaldata_t gd)
158 atomic_set_int(&mp_lock_contention_mask, gd->gd_cpumask);
162 * A cpu is no longer contending for the MP lock after previously contending
163 * for it.
165 * Reentrant, may be called even if the cpu was not previously contending
166 * the MP lock.
168 static __inline
169 void
170 clr_mplock_contention_mask(globaldata_t gd)
172 atomic_clear_int(&mp_lock_contention_mask, gd->gd_cpumask);
175 static __inline
177 owner_mplock(void)
179 return (mp_lock);
183 * Low level release of the MP lock ignoring curthread->td_mpcount
185 * WARNING: Caller must be in a critical section, otherwise the
186 * mp_lock can be lost from an interrupt race and we would
187 * end up clearing someone else's lock.
189 static __inline void
190 cpu_rel_mplock(void)
192 mp_lock = -1;
195 #define MP_LOCK_HELD() \
196 (mp_lock == mycpu->gd_cpuid)
197 #define ASSERT_MP_LOCK_HELD(td) \
198 KASSERT(MP_LOCK_HELD(), ("MP_LOCK_HELD: Not held thread %p", td))
200 #else
203 * UNI-PROCESSOR BUILD - Degenerate case macros
205 #define get_mplock()
206 #define rel_mplock()
207 #define try_mplock() 1
208 #define owner_mplock() 0
209 #define MP_LOCK_HELD() (!0)
210 #define ASSERT_MP_LOCK_HELD(td)
212 #endif
214 #endif