sched: Drop the rq argument to sched_class::select_task_rq()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / dlm / ast.c
blobabc49f292454afa387d42a9be4b5437c2ad3f46d
1 /******************************************************************************
2 *******************************************************************************
3 **
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
6 **
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lock.h"
16 #include "user.h"
17 #include "ast.h"
19 #define WAKE_ASTS 0
21 static uint64_t ast_seq_count;
22 static struct list_head ast_queue;
23 static spinlock_t ast_queue_lock;
24 static struct task_struct * astd_task;
25 static unsigned long astd_wakeflags;
26 static struct mutex astd_running;
29 static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
31 int i;
33 log_print("last_bast %x %llu flags %x mode %d sb %d %x",
34 lkb->lkb_id,
35 (unsigned long long)lkb->lkb_last_bast.seq,
36 lkb->lkb_last_bast.flags,
37 lkb->lkb_last_bast.mode,
38 lkb->lkb_last_bast.sb_status,
39 lkb->lkb_last_bast.sb_flags);
41 log_print("last_cast %x %llu flags %x mode %d sb %d %x",
42 lkb->lkb_id,
43 (unsigned long long)lkb->lkb_last_cast.seq,
44 lkb->lkb_last_cast.flags,
45 lkb->lkb_last_cast.mode,
46 lkb->lkb_last_cast.sb_status,
47 lkb->lkb_last_cast.sb_flags);
49 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
50 log_print("cb %x %llu flags %x mode %d sb %d %x",
51 lkb->lkb_id,
52 (unsigned long long)lkb->lkb_callbacks[i].seq,
53 lkb->lkb_callbacks[i].flags,
54 lkb->lkb_callbacks[i].mode,
55 lkb->lkb_callbacks[i].sb_status,
56 lkb->lkb_callbacks[i].sb_flags);
60 void dlm_del_ast(struct dlm_lkb *lkb)
62 spin_lock(&ast_queue_lock);
63 if (!list_empty(&lkb->lkb_astqueue))
64 list_del_init(&lkb->lkb_astqueue);
65 spin_unlock(&ast_queue_lock);
68 int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
69 int status, uint32_t sbflags, uint64_t seq)
71 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
72 uint64_t prev_seq;
73 int prev_mode;
74 int i;
76 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
77 if (lkb->lkb_callbacks[i].seq)
78 continue;
81 * Suppress some redundant basts here, do more on removal.
82 * Don't even add a bast if the callback just before it
83 * is a bast for the same mode or a more restrictive mode.
84 * (the addional > PR check is needed for PR/CW inversion)
87 if ((i > 0) && (flags & DLM_CB_BAST) &&
88 (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) {
90 prev_seq = lkb->lkb_callbacks[i-1].seq;
91 prev_mode = lkb->lkb_callbacks[i-1].mode;
93 if ((prev_mode == mode) ||
94 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
96 log_debug(ls, "skip %x add bast %llu mode %d "
97 "for bast %llu mode %d",
98 lkb->lkb_id,
99 (unsigned long long)seq,
100 mode,
101 (unsigned long long)prev_seq,
102 prev_mode);
103 return 0;
107 lkb->lkb_callbacks[i].seq = seq;
108 lkb->lkb_callbacks[i].flags = flags;
109 lkb->lkb_callbacks[i].mode = mode;
110 lkb->lkb_callbacks[i].sb_status = status;
111 lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
112 break;
115 if (i == DLM_CALLBACKS_SIZE) {
116 log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x",
117 lkb->lkb_id, (unsigned long long)seq,
118 flags, mode, status, sbflags);
119 dlm_dump_lkb_callbacks(lkb);
120 return -1;
123 return 0;
126 int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
127 struct dlm_callback *cb, int *resid)
129 int i;
131 *resid = 0;
133 if (!lkb->lkb_callbacks[0].seq)
134 return -ENOENT;
136 /* oldest undelivered cb is callbacks[0] */
138 memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback));
139 memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback));
141 /* shift others down */
143 for (i = 1; i < DLM_CALLBACKS_SIZE; i++) {
144 if (!lkb->lkb_callbacks[i].seq)
145 break;
146 memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i],
147 sizeof(struct dlm_callback));
148 memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback));
149 (*resid)++;
152 /* if cb is a bast, it should be skipped if the blocking mode is
153 compatible with the last granted mode */
155 if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) {
156 if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) {
157 cb->flags |= DLM_CB_SKIP;
159 log_debug(ls, "skip %x bast %llu mode %d "
160 "for cast %llu mode %d",
161 lkb->lkb_id,
162 (unsigned long long)cb->seq,
163 cb->mode,
164 (unsigned long long)lkb->lkb_last_cast.seq,
165 lkb->lkb_last_cast.mode);
166 return 0;
170 if (cb->flags & DLM_CB_CAST) {
171 memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback));
172 lkb->lkb_last_cast_time = ktime_get();
175 if (cb->flags & DLM_CB_BAST) {
176 memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
177 lkb->lkb_last_bast_time = ktime_get();
180 return 0;
183 void dlm_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
184 uint32_t sbflags)
186 uint64_t seq;
187 int rv;
189 spin_lock(&ast_queue_lock);
191 seq = ++ast_seq_count;
193 if (lkb->lkb_flags & DLM_IFL_USER) {
194 spin_unlock(&ast_queue_lock);
195 dlm_user_add_ast(lkb, flags, mode, status, sbflags, seq);
196 return;
199 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
200 if (rv < 0) {
201 spin_unlock(&ast_queue_lock);
202 return;
205 if (list_empty(&lkb->lkb_astqueue)) {
206 kref_get(&lkb->lkb_ref);
207 list_add_tail(&lkb->lkb_astqueue, &ast_queue);
209 spin_unlock(&ast_queue_lock);
211 set_bit(WAKE_ASTS, &astd_wakeflags);
212 wake_up_process(astd_task);
215 static void process_asts(void)
217 struct dlm_ls *ls = NULL;
218 struct dlm_rsb *r = NULL;
219 struct dlm_lkb *lkb;
220 void (*castfn) (void *astparam);
221 void (*bastfn) (void *astparam, int mode);
222 struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
223 int i, rv, resid;
225 repeat:
226 spin_lock(&ast_queue_lock);
227 list_for_each_entry(lkb, &ast_queue, lkb_astqueue) {
228 r = lkb->lkb_resource;
229 ls = r->res_ls;
231 if (dlm_locking_stopped(ls))
232 continue;
234 /* we remove from astqueue list and remove everything in
235 lkb_callbacks before releasing the spinlock so empty
236 lkb_astqueue is always consistent with empty lkb_callbacks */
238 list_del_init(&lkb->lkb_astqueue);
240 castfn = lkb->lkb_astfn;
241 bastfn = lkb->lkb_bastfn;
243 memset(&callbacks, 0, sizeof(callbacks));
245 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
246 rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
247 if (rv < 0)
248 break;
250 spin_unlock(&ast_queue_lock);
252 if (resid) {
253 /* shouldn't happen, for loop should have removed all */
254 log_error(ls, "callback resid %d lkb %x",
255 resid, lkb->lkb_id);
258 for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
259 if (!callbacks[i].seq)
260 break;
261 if (callbacks[i].flags & DLM_CB_SKIP) {
262 continue;
263 } else if (callbacks[i].flags & DLM_CB_BAST) {
264 bastfn(lkb->lkb_astparam, callbacks[i].mode);
265 } else if (callbacks[i].flags & DLM_CB_CAST) {
266 lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
267 lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
268 castfn(lkb->lkb_astparam);
272 /* removes ref for ast_queue, may cause lkb to be freed */
273 dlm_put_lkb(lkb);
275 cond_resched();
276 goto repeat;
278 spin_unlock(&ast_queue_lock);
281 static inline int no_asts(void)
283 int ret;
285 spin_lock(&ast_queue_lock);
286 ret = list_empty(&ast_queue);
287 spin_unlock(&ast_queue_lock);
288 return ret;
291 static int dlm_astd(void *data)
293 while (!kthread_should_stop()) {
294 set_current_state(TASK_INTERRUPTIBLE);
295 if (!test_bit(WAKE_ASTS, &astd_wakeflags))
296 schedule();
297 set_current_state(TASK_RUNNING);
299 mutex_lock(&astd_running);
300 if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags))
301 process_asts();
302 mutex_unlock(&astd_running);
304 return 0;
307 void dlm_astd_wake(void)
309 if (!no_asts()) {
310 set_bit(WAKE_ASTS, &astd_wakeflags);
311 wake_up_process(astd_task);
315 int dlm_astd_start(void)
317 struct task_struct *p;
318 int error = 0;
320 INIT_LIST_HEAD(&ast_queue);
321 spin_lock_init(&ast_queue_lock);
322 mutex_init(&astd_running);
324 p = kthread_run(dlm_astd, NULL, "dlm_astd");
325 if (IS_ERR(p))
326 error = PTR_ERR(p);
327 else
328 astd_task = p;
329 return error;
332 void dlm_astd_stop(void)
334 kthread_stop(astd_task);
337 void dlm_astd_suspend(void)
339 mutex_lock(&astd_running);
342 void dlm_astd_resume(void)
344 mutex_unlock(&astd_running);