2 * Functions related to io context handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10 #include <linux/slab.h>
15 * For io context allocations
17 static struct kmem_cache
*iocontext_cachep
;
20 * get_io_context - increment reference count to io_context
21 * @ioc: io_context to get
23 * Increment reference count to @ioc.
25 void get_io_context(struct io_context
*ioc
)
27 BUG_ON(atomic_long_read(&ioc
->refcount
) <= 0);
28 atomic_long_inc(&ioc
->refcount
);
30 EXPORT_SYMBOL(get_io_context
);
32 static void icq_free_icq_rcu(struct rcu_head
*head
)
34 struct io_cq
*icq
= container_of(head
, struct io_cq
, __rcu_head
);
36 kmem_cache_free(icq
->__rcu_icq_cache
, icq
);
39 /* Exit an icq. Called with both ioc and q locked. */
40 static void ioc_exit_icq(struct io_cq
*icq
)
42 struct elevator_type
*et
= icq
->q
->elevator
->type
;
44 if (icq
->flags
& ICQ_EXITED
)
47 if (et
->ops
.elevator_exit_icq_fn
)
48 et
->ops
.elevator_exit_icq_fn(icq
);
50 icq
->flags
|= ICQ_EXITED
;
53 /* Release an icq. Called with both ioc and q locked. */
54 static void ioc_destroy_icq(struct io_cq
*icq
)
56 struct io_context
*ioc
= icq
->ioc
;
57 struct request_queue
*q
= icq
->q
;
58 struct elevator_type
*et
= q
->elevator
->type
;
60 lockdep_assert_held(&ioc
->lock
);
61 lockdep_assert_held(q
->queue_lock
);
63 radix_tree_delete(&ioc
->icq_tree
, icq
->q
->id
);
64 hlist_del_init(&icq
->ioc_node
);
65 list_del_init(&icq
->q_node
);
68 * Both setting lookup hint to and clearing it from @icq are done
69 * under queue_lock. If it's not pointing to @icq now, it never
70 * will. Hint assignment itself can race safely.
72 if (rcu_dereference_raw(ioc
->icq_hint
) == icq
)
73 rcu_assign_pointer(ioc
->icq_hint
, NULL
);
78 * @icq->q might have gone away by the time RCU callback runs
79 * making it impossible to determine icq_cache. Record it in @icq.
81 icq
->__rcu_icq_cache
= et
->icq_cache
;
82 call_rcu(&icq
->__rcu_head
, icq_free_icq_rcu
);
86 * Slow path for ioc release in put_io_context(). Performs double-lock
87 * dancing to unlink all icq's and then frees ioc.
89 static void ioc_release_fn(struct work_struct
*work
)
91 struct io_context
*ioc
= container_of(work
, struct io_context
,
96 * Exiting icq may call into put_io_context() through elevator
97 * which will trigger lockdep warning. The ioc's are guaranteed to
98 * be different, use a different locking subclass here. Use
99 * irqsave variant as there's no spin_lock_irq_nested().
101 spin_lock_irqsave_nested(&ioc
->lock
, flags
, 1);
103 while (!hlist_empty(&ioc
->icq_list
)) {
104 struct io_cq
*icq
= hlist_entry(ioc
->icq_list
.first
,
105 struct io_cq
, ioc_node
);
106 struct request_queue
*q
= icq
->q
;
108 if (spin_trylock(q
->queue_lock
)) {
109 ioc_destroy_icq(icq
);
110 spin_unlock(q
->queue_lock
);
112 spin_unlock_irqrestore(&ioc
->lock
, flags
);
114 spin_lock_irqsave_nested(&ioc
->lock
, flags
, 1);
118 spin_unlock_irqrestore(&ioc
->lock
, flags
);
120 kmem_cache_free(iocontext_cachep
, ioc
);
124 * put_io_context - put a reference of io_context
125 * @ioc: io_context to put
127 * Decrement reference count of @ioc and release it if the count reaches
130 void put_io_context(struct io_context
*ioc
)
133 bool free_ioc
= false;
138 BUG_ON(atomic_long_read(&ioc
->refcount
) <= 0);
141 * Releasing ioc requires reverse order double locking and we may
142 * already be holding a queue_lock. Do it asynchronously from wq.
144 if (atomic_long_dec_and_test(&ioc
->refcount
)) {
145 spin_lock_irqsave(&ioc
->lock
, flags
);
146 if (!hlist_empty(&ioc
->icq_list
))
147 schedule_work(&ioc
->release_work
);
150 spin_unlock_irqrestore(&ioc
->lock
, flags
);
154 kmem_cache_free(iocontext_cachep
, ioc
);
156 EXPORT_SYMBOL(put_io_context
);
158 /* Called by the exiting task */
159 void exit_io_context(struct task_struct
*task
)
161 struct io_context
*ioc
;
163 struct hlist_node
*n
;
167 ioc
= task
->io_context
;
168 task
->io_context
= NULL
;
171 if (!atomic_dec_and_test(&ioc
->nr_tasks
)) {
177 * Need ioc lock to walk icq_list and q lock to exit icq. Perform
178 * reverse double locking. Read comment in ioc_release_fn() for
179 * explanation on the nested locking annotation.
182 spin_lock_irqsave_nested(&ioc
->lock
, flags
, 1);
183 hlist_for_each_entry(icq
, n
, &ioc
->icq_list
, ioc_node
) {
184 if (icq
->flags
& ICQ_EXITED
)
186 if (spin_trylock(icq
->q
->queue_lock
)) {
188 spin_unlock(icq
->q
->queue_lock
);
190 spin_unlock_irqrestore(&ioc
->lock
, flags
);
195 spin_unlock_irqrestore(&ioc
->lock
, flags
);
201 * ioc_clear_queue - break any ioc association with the specified queue
202 * @q: request_queue being cleared
204 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
206 void ioc_clear_queue(struct request_queue
*q
)
208 lockdep_assert_held(q
->queue_lock
);
210 while (!list_empty(&q
->icq_list
)) {
211 struct io_cq
*icq
= list_entry(q
->icq_list
.next
,
212 struct io_cq
, q_node
);
213 struct io_context
*ioc
= icq
->ioc
;
215 spin_lock(&ioc
->lock
);
216 ioc_destroy_icq(icq
);
217 spin_unlock(&ioc
->lock
);
221 void create_io_context_slowpath(struct task_struct
*task
, gfp_t gfp_flags
,
224 struct io_context
*ioc
;
226 ioc
= kmem_cache_alloc_node(iocontext_cachep
, gfp_flags
| __GFP_ZERO
,
232 atomic_long_set(&ioc
->refcount
, 1);
233 atomic_set(&ioc
->nr_tasks
, 1);
234 spin_lock_init(&ioc
->lock
);
235 INIT_RADIX_TREE(&ioc
->icq_tree
, GFP_ATOMIC
| __GFP_HIGH
);
236 INIT_HLIST_HEAD(&ioc
->icq_list
);
237 INIT_WORK(&ioc
->release_work
, ioc_release_fn
);
240 * Try to install. ioc shouldn't be installed if someone else
241 * already did or @task, which isn't %current, is exiting. Note
242 * that we need to allow ioc creation on exiting %current as exit
243 * path may issue IOs from e.g. exit_files(). The exit path is
244 * responsible for not issuing IO after exit_io_context().
247 if (!task
->io_context
&&
248 (task
== current
|| !(task
->flags
& PF_EXITING
)))
249 task
->io_context
= ioc
;
251 kmem_cache_free(iocontext_cachep
, ioc
);
256 * get_task_io_context - get io_context of a task
257 * @task: task of interest
258 * @gfp_flags: allocation flags, used if allocation is necessary
259 * @node: allocation node, used if allocation is necessary
261 * Return io_context of @task. If it doesn't exist, it is created with
262 * @gfp_flags and @node. The returned io_context has its reference count
265 * This function always goes through task_lock() and it's better to use
266 * %current->io_context + get_io_context() for %current.
268 struct io_context
*get_task_io_context(struct task_struct
*task
,
269 gfp_t gfp_flags
, int node
)
271 struct io_context
*ioc
;
273 might_sleep_if(gfp_flags
& __GFP_WAIT
);
277 ioc
= task
->io_context
;
284 } while (create_io_context(task
, gfp_flags
, node
));
288 EXPORT_SYMBOL(get_task_io_context
);
291 * ioc_lookup_icq - lookup io_cq from ioc
292 * @ioc: the associated io_context
293 * @q: the associated request_queue
295 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
296 * with @q->queue_lock held.
298 struct io_cq
*ioc_lookup_icq(struct io_context
*ioc
, struct request_queue
*q
)
302 lockdep_assert_held(q
->queue_lock
);
305 * icq's are indexed from @ioc using radix tree and hint pointer,
306 * both of which are protected with RCU. All removals are done
307 * holding both q and ioc locks, and we're holding q lock - if we
308 * find a icq which points to us, it's guaranteed to be valid.
311 icq
= rcu_dereference(ioc
->icq_hint
);
312 if (icq
&& icq
->q
== q
)
315 icq
= radix_tree_lookup(&ioc
->icq_tree
, q
->id
);
316 if (icq
&& icq
->q
== q
)
317 rcu_assign_pointer(ioc
->icq_hint
, icq
); /* allowed to race */
324 EXPORT_SYMBOL(ioc_lookup_icq
);
327 * ioc_create_icq - create and link io_cq
328 * @q: request_queue of interest
329 * @gfp_mask: allocation mask
331 * Make sure io_cq linking %current->io_context and @q exists. If either
332 * io_context and/or icq don't exist, they will be created using @gfp_mask.
334 * The caller is responsible for ensuring @ioc won't go away and @q is
335 * alive and will stay alive until this function returns.
337 struct io_cq
*ioc_create_icq(struct request_queue
*q
, gfp_t gfp_mask
)
339 struct elevator_type
*et
= q
->elevator
->type
;
340 struct io_context
*ioc
;
344 ioc
= create_io_context(current
, gfp_mask
, q
->node
);
348 icq
= kmem_cache_alloc_node(et
->icq_cache
, gfp_mask
| __GFP_ZERO
,
353 if (radix_tree_preload(gfp_mask
) < 0) {
354 kmem_cache_free(et
->icq_cache
, icq
);
360 INIT_LIST_HEAD(&icq
->q_node
);
361 INIT_HLIST_NODE(&icq
->ioc_node
);
363 /* lock both q and ioc and try to link @icq */
364 spin_lock_irq(q
->queue_lock
);
365 spin_lock(&ioc
->lock
);
367 if (likely(!radix_tree_insert(&ioc
->icq_tree
, q
->id
, icq
))) {
368 hlist_add_head(&icq
->ioc_node
, &ioc
->icq_list
);
369 list_add(&icq
->q_node
, &q
->icq_list
);
370 if (et
->ops
.elevator_init_icq_fn
)
371 et
->ops
.elevator_init_icq_fn(icq
);
373 kmem_cache_free(et
->icq_cache
, icq
);
374 icq
= ioc_lookup_icq(ioc
, q
);
376 printk(KERN_ERR
"cfq: icq link failed!\n");
379 spin_unlock(&ioc
->lock
);
380 spin_unlock_irq(q
->queue_lock
);
381 radix_tree_preload_end();
385 void ioc_set_icq_flags(struct io_context
*ioc
, unsigned int flags
)
388 struct hlist_node
*n
;
390 hlist_for_each_entry(icq
, n
, &ioc
->icq_list
, ioc_node
)
395 * ioc_ioprio_changed - notify ioprio change
396 * @ioc: io_context of interest
397 * @ioprio: new ioprio
399 * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
400 * icq's. iosched is responsible for checking the bit and applying it on
401 * request issue path.
403 void ioc_ioprio_changed(struct io_context
*ioc
, int ioprio
)
407 spin_lock_irqsave(&ioc
->lock
, flags
);
408 ioc
->ioprio
= ioprio
;
409 ioc_set_icq_flags(ioc
, ICQ_IOPRIO_CHANGED
);
410 spin_unlock_irqrestore(&ioc
->lock
, flags
);
414 * ioc_cgroup_changed - notify cgroup change
415 * @ioc: io_context of interest
417 * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
418 * iosched is responsible for checking the bit and applying it on request
421 void ioc_cgroup_changed(struct io_context
*ioc
)
425 spin_lock_irqsave(&ioc
->lock
, flags
);
426 ioc_set_icq_flags(ioc
, ICQ_CGROUP_CHANGED
);
427 spin_unlock_irqrestore(&ioc
->lock
, flags
);
429 EXPORT_SYMBOL(ioc_cgroup_changed
);
432 * icq_get_changed - fetch and clear icq changed mask
433 * @icq: icq of interest
435 * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
438 unsigned icq_get_changed(struct io_cq
*icq
)
440 unsigned int changed
= 0;
443 if (unlikely(icq
->flags
& ICQ_CHANGED_MASK
)) {
444 spin_lock_irqsave(&icq
->ioc
->lock
, flags
);
445 changed
= icq
->flags
& ICQ_CHANGED_MASK
;
446 icq
->flags
&= ~ICQ_CHANGED_MASK
;
447 spin_unlock_irqrestore(&icq
->ioc
->lock
, flags
);
451 EXPORT_SYMBOL(icq_get_changed
);
453 static int __init
blk_ioc_init(void)
455 iocontext_cachep
= kmem_cache_create("blkdev_ioc",
456 sizeof(struct io_context
), 0, SLAB_PANIC
, NULL
);
459 subsys_initcall(blk_ioc_init
);