1 /* FS-Cache worker operation management routines
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/operations.txt
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
18 atomic_t fscache_op_debug_id
;
19 EXPORT_SYMBOL(fscache_op_debug_id
);
22 * fscache_enqueue_operation - Enqueue an operation for processing
23 * @op: The operation to enqueue
25 * Enqueue an operation for processing by the FS-Cache thread pool.
27 * This will get its own ref on the object.
29 void fscache_enqueue_operation(struct fscache_operation
*op
)
31 _enter("{OBJ%x OP%x,%u}",
32 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
34 ASSERT(op
->processor
!= NULL
);
35 ASSERTCMP(op
->object
->state
, >=, FSCACHE_OBJECT_AVAILABLE
);
36 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
38 if (list_empty(&op
->pend_link
)) {
39 switch (op
->flags
& FSCACHE_OP_TYPE
) {
42 atomic_inc(&op
->usage
);
43 if (!schedule_work(&op
->fast_work
))
44 fscache_put_operation(op
);
48 slow_work_enqueue(&op
->slow_work
);
50 case FSCACHE_OP_MYTHREAD
:
51 _debug("queue for caller's attention");
54 printk(KERN_ERR
"FS-Cache: Unexpected op type %lx",
59 fscache_stat(&fscache_n_op_enqueue
);
62 EXPORT_SYMBOL(fscache_enqueue_operation
);
67 static void fscache_run_op(struct fscache_object
*object
,
68 struct fscache_operation
*op
)
70 object
->n_in_progress
++;
71 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
72 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
74 fscache_enqueue_operation(op
);
75 fscache_stat(&fscache_n_op_run
);
79 * submit an exclusive operation for an object
80 * - other ops are excluded from running simultaneously with this one
81 * - this gets any extra refs it needs on an op
83 int fscache_submit_exclusive_op(struct fscache_object
*object
,
84 struct fscache_operation
*op
)
88 _enter("{OBJ%x OP%x},", object
->debug_id
, op
->debug_id
);
90 spin_lock(&object
->lock
);
91 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
92 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
95 if (fscache_object_is_active(object
)) {
98 object
->n_exclusive
++; /* reads and writes must wait */
100 if (object
->n_ops
> 0) {
101 atomic_inc(&op
->usage
);
102 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
103 fscache_stat(&fscache_n_op_pend
);
104 } else if (!list_empty(&object
->pending_ops
)) {
105 atomic_inc(&op
->usage
);
106 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
107 fscache_stat(&fscache_n_op_pend
);
108 fscache_start_operations(object
);
110 ASSERTCMP(object
->n_in_progress
, ==, 0);
111 fscache_run_op(object
, op
);
114 /* need to issue a new write op after this */
115 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
117 } else if (object
->state
== FSCACHE_OBJECT_CREATING
) {
120 object
->n_exclusive
++; /* reads and writes must wait */
121 atomic_inc(&op
->usage
);
122 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
123 fscache_stat(&fscache_n_op_pend
);
126 /* not allowed to submit ops in any other state */
130 spin_unlock(&object
->lock
);
135 * report an unexpected submission
137 static void fscache_report_unexpected_submission(struct fscache_object
*object
,
138 struct fscache_operation
*op
,
139 unsigned long ostate
)
141 static bool once_only
;
142 struct fscache_operation
*p
;
149 kdebug("unexpected submission OP%x [OBJ%x %s]",
150 op
->debug_id
, object
->debug_id
,
151 fscache_object_states
[object
->state
]);
152 kdebug("objstate=%s [%s]",
153 fscache_object_states
[object
->state
],
154 fscache_object_states
[ostate
]);
155 kdebug("objflags=%lx", object
->flags
);
156 kdebug("objevent=%lx [%lx]", object
->events
, object
->event_mask
);
157 kdebug("ops=%u inp=%u exc=%u",
158 object
->n_ops
, object
->n_in_progress
, object
->n_exclusive
);
160 if (!list_empty(&object
->pending_ops
)) {
162 list_for_each_entry(p
, &object
->pending_ops
, pend_link
) {
163 ASSERTCMP(p
->object
, ==, object
);
164 kdebug("%p %p", op
->processor
, op
->release
);
175 * submit an operation for an object
176 * - objects may be submitted only in the following states:
177 * - during object creation (write ops may be submitted)
178 * - whilst the object is active
179 * - after an I/O error incurred in one of the two above states (op rejected)
180 * - this gets any extra refs it needs on an op
182 int fscache_submit_op(struct fscache_object
*object
,
183 struct fscache_operation
*op
)
185 unsigned long ostate
;
188 _enter("{OBJ%x OP%x},{%u}",
189 object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
191 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
193 spin_lock(&object
->lock
);
194 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
195 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
197 ostate
= object
->state
;
200 if (fscache_object_is_active(object
)) {
204 if (object
->n_exclusive
> 0) {
205 atomic_inc(&op
->usage
);
206 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
207 fscache_stat(&fscache_n_op_pend
);
208 } else if (!list_empty(&object
->pending_ops
)) {
209 atomic_inc(&op
->usage
);
210 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
211 fscache_stat(&fscache_n_op_pend
);
212 fscache_start_operations(object
);
214 ASSERTCMP(object
->n_exclusive
, ==, 0);
215 fscache_run_op(object
, op
);
218 } else if (object
->state
== FSCACHE_OBJECT_CREATING
) {
221 atomic_inc(&op
->usage
);
222 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
223 fscache_stat(&fscache_n_op_pend
);
225 } else if (!test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
)) {
226 fscache_report_unexpected_submission(object
, op
, ostate
);
227 ASSERT(!fscache_object_is_active(object
));
233 spin_unlock(&object
->lock
);
238 * queue an object for withdrawal on error, aborting all following asynchronous
241 void fscache_abort_object(struct fscache_object
*object
)
243 _enter("{OBJ%x}", object
->debug_id
);
245 fscache_raise_event(object
, FSCACHE_OBJECT_EV_ERROR
);
249 * jump start the operation processing on an object
250 * - caller must hold object->lock
252 void fscache_start_operations(struct fscache_object
*object
)
254 struct fscache_operation
*op
;
257 while (!list_empty(&object
->pending_ops
) && !stop
) {
258 op
= list_entry(object
->pending_ops
.next
,
259 struct fscache_operation
, pend_link
);
261 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
262 if (object
->n_in_progress
> 0)
266 list_del_init(&op
->pend_link
);
267 object
->n_in_progress
++;
269 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
270 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
272 fscache_enqueue_operation(op
);
274 /* the pending queue was holding a ref on the object */
275 fscache_put_operation(op
);
278 ASSERTCMP(object
->n_in_progress
, <=, object
->n_ops
);
280 _debug("woke %d ops on OBJ%x",
281 object
->n_in_progress
, object
->debug_id
);
285 * release an operation
286 * - queues pending ops if this is the last in-progress op
288 void fscache_put_operation(struct fscache_operation
*op
)
290 struct fscache_object
*object
;
291 struct fscache_cache
*cache
;
293 _enter("{OBJ%x OP%x,%d}",
294 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
296 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
298 if (!atomic_dec_and_test(&op
->usage
))
302 if (test_and_set_bit(FSCACHE_OP_DEAD
, &op
->flags
))
305 fscache_stat(&fscache_n_op_release
);
314 /* now... we may get called with the object spinlock held, so we
315 * complete the cleanup here only if we can immediately acquire the
316 * lock, and defer it otherwise */
317 if (!spin_trylock(&object
->lock
)) {
319 fscache_stat(&fscache_n_op_deferred_release
);
321 cache
= object
->cache
;
322 spin_lock(&cache
->op_gc_list_lock
);
323 list_add_tail(&op
->pend_link
, &cache
->op_gc_list
);
324 spin_unlock(&cache
->op_gc_list_lock
);
325 schedule_work(&cache
->op_gc
);
330 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
331 ASSERTCMP(object
->n_exclusive
, >, 0);
332 object
->n_exclusive
--;
335 ASSERTCMP(object
->n_in_progress
, >, 0);
336 object
->n_in_progress
--;
337 if (object
->n_in_progress
== 0)
338 fscache_start_operations(object
);
340 ASSERTCMP(object
->n_ops
, >, 0);
342 if (object
->n_ops
== 0)
343 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
345 spin_unlock(&object
->lock
);
350 EXPORT_SYMBOL(fscache_put_operation
);
353 * garbage collect operations that have had their release deferred
355 void fscache_operation_gc(struct work_struct
*work
)
357 struct fscache_operation
*op
;
358 struct fscache_object
*object
;
359 struct fscache_cache
*cache
=
360 container_of(work
, struct fscache_cache
, op_gc
);
366 spin_lock(&cache
->op_gc_list_lock
);
367 if (list_empty(&cache
->op_gc_list
)) {
368 spin_unlock(&cache
->op_gc_list_lock
);
372 op
= list_entry(cache
->op_gc_list
.next
,
373 struct fscache_operation
, pend_link
);
374 list_del(&op
->pend_link
);
375 spin_unlock(&cache
->op_gc_list_lock
);
379 _debug("GC DEFERRED REL OBJ%x OP%x",
380 object
->debug_id
, op
->debug_id
);
381 fscache_stat(&fscache_n_op_gc
);
383 ASSERTCMP(atomic_read(&op
->usage
), ==, 0);
385 spin_lock(&object
->lock
);
386 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
387 ASSERTCMP(object
->n_exclusive
, >, 0);
388 object
->n_exclusive
--;
391 ASSERTCMP(object
->n_in_progress
, >, 0);
392 object
->n_in_progress
--;
393 if (object
->n_in_progress
== 0)
394 fscache_start_operations(object
);
396 ASSERTCMP(object
->n_ops
, >, 0);
398 if (object
->n_ops
== 0)
399 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
401 spin_unlock(&object
->lock
);
403 } while (count
++ < 20);
405 if (!list_empty(&cache
->op_gc_list
))
406 schedule_work(&cache
->op_gc
);
412 * allow the slow work item processor to get a ref on an operation
414 static int fscache_op_get_ref(struct slow_work
*work
)
416 struct fscache_operation
*op
=
417 container_of(work
, struct fscache_operation
, slow_work
);
419 atomic_inc(&op
->usage
);
424 * allow the slow work item processor to discard a ref on an operation
426 static void fscache_op_put_ref(struct slow_work
*work
)
428 struct fscache_operation
*op
=
429 container_of(work
, struct fscache_operation
, slow_work
);
431 fscache_put_operation(op
);
435 * execute an operation using the slow thread pool to provide processing context
436 * - the caller holds a ref to this object, so we don't need to hold one
438 static void fscache_op_execute(struct slow_work
*work
)
440 struct fscache_operation
*op
=
441 container_of(work
, struct fscache_operation
, slow_work
);
444 _enter("{OBJ%x OP%x,%d}",
445 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
447 ASSERT(op
->processor
!= NULL
);
450 fscache_hist(fscache_ops_histogram
, start
);
455 const struct slow_work_ops fscache_op_slow_work_ops
= {
456 .get_ref
= fscache_op_get_ref
,
457 .put_ref
= fscache_op_put_ref
,
458 .execute
= fscache_op_execute
,