Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / block / blk-ioc.c
blob775c5edf57ee2099aa355213a8b06ef992522466
1 /*
2 * Functions related to io context handling
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
11 #include "blk.h"
14 * For io context allocations
16 static struct kmem_cache *iocontext_cachep;
18 static void cfq_dtor(struct io_context *ioc)
20 <<<<<<< HEAD:block/blk-ioc.c
21 struct cfq_io_context *cic[1];
22 int r;
23 =======
24 if (!hlist_empty(&ioc->cic_list)) {
25 struct cfq_io_context *cic;
26 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:block/blk-ioc.c
28 <<<<<<< HEAD:block/blk-ioc.c
30 * We don't have a specific key to lookup with, so use the gang
31 * lookup to just retrieve the first item stored. The cfq exit
32 * function will iterate the full tree, so any member will do.
34 r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
35 if (r > 0)
36 cic[0]->dtor(ioc);
37 =======
38 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
39 cic_list);
40 cic->dtor(ioc);
42 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:block/blk-ioc.c
46 * IO Context helper functions. put_io_context() returns 1 if there are no
47 * more users of this io context, 0 otherwise.
49 int put_io_context(struct io_context *ioc)
51 if (ioc == NULL)
52 return 1;
54 BUG_ON(atomic_read(&ioc->refcount) == 0);
56 if (atomic_dec_and_test(&ioc->refcount)) {
57 rcu_read_lock();
58 if (ioc->aic && ioc->aic->dtor)
59 ioc->aic->dtor(ioc->aic);
60 rcu_read_unlock();
61 cfq_dtor(ioc);
63 kmem_cache_free(iocontext_cachep, ioc);
64 return 1;
66 return 0;
68 EXPORT_SYMBOL(put_io_context);
70 static void cfq_exit(struct io_context *ioc)
72 <<<<<<< HEAD:block/blk-ioc.c
73 struct cfq_io_context *cic[1];
74 int r;
76 =======
77 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:block/blk-ioc.c
78 rcu_read_lock();
79 <<<<<<< HEAD:block/blk-ioc.c
81 * See comment for cfq_dtor()
83 r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
84 rcu_read_unlock();
85 =======
86 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:block/blk-ioc.c
88 <<<<<<< HEAD:block/blk-ioc.c
89 if (r > 0)
90 cic[0]->exit(ioc);
91 =======
92 if (!hlist_empty(&ioc->cic_list)) {
93 struct cfq_io_context *cic;
95 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
96 cic_list);
97 cic->exit(ioc);
99 rcu_read_unlock();
100 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:block/blk-ioc.c
103 /* Called by the exitting task */
104 void exit_io_context(void)
106 struct io_context *ioc;
108 task_lock(current);
109 ioc = current->io_context;
110 current->io_context = NULL;
111 task_unlock(current);
113 if (atomic_dec_and_test(&ioc->nr_tasks)) {
114 if (ioc->aic && ioc->aic->exit)
115 ioc->aic->exit(ioc->aic);
116 cfq_exit(ioc);
118 put_io_context(ioc);
122 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
124 struct io_context *ret;
126 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
127 if (ret) {
128 atomic_set(&ret->refcount, 1);
129 atomic_set(&ret->nr_tasks, 1);
130 spin_lock_init(&ret->lock);
131 ret->ioprio_changed = 0;
132 ret->ioprio = 0;
133 ret->last_waited = jiffies; /* doesn't matter... */
134 ret->nr_batch_requests = 0; /* because this is 0 */
135 ret->aic = NULL;
136 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
137 <<<<<<< HEAD:block/blk-ioc.c
138 =======
139 INIT_HLIST_HEAD(&ret->cic_list);
140 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:block/blk-ioc.c
141 ret->ioc_data = NULL;
144 return ret;
148 * If the current task has no IO context then create one and initialise it.
149 * Otherwise, return its existing IO context.
151 * This returned IO context doesn't have a specifically elevated refcount,
152 * but since the current task itself holds a reference, the context can be
153 * used in general code, so long as it stays within `current` context.
155 struct io_context *current_io_context(gfp_t gfp_flags, int node)
157 struct task_struct *tsk = current;
158 struct io_context *ret;
160 ret = tsk->io_context;
161 if (likely(ret))
162 return ret;
164 ret = alloc_io_context(gfp_flags, node);
165 if (ret) {
166 /* make sure set_task_ioprio() sees the settings above */
167 smp_wmb();
168 tsk->io_context = ret;
171 return ret;
175 * If the current task has no IO context then create one and initialise it.
176 * If it does have a context, take a ref on it.
178 * This is always called in the context of the task which submitted the I/O.
180 struct io_context *get_io_context(gfp_t gfp_flags, int node)
182 struct io_context *ret = NULL;
185 * Check for unlikely race with exiting task. ioc ref count is
186 * zero when ioc is being detached.
188 do {
189 ret = current_io_context(gfp_flags, node);
190 if (unlikely(!ret))
191 break;
192 } while (!atomic_inc_not_zero(&ret->refcount));
194 return ret;
196 EXPORT_SYMBOL(get_io_context);
198 void copy_io_context(struct io_context **pdst, struct io_context **psrc)
200 struct io_context *src = *psrc;
201 struct io_context *dst = *pdst;
203 if (src) {
204 BUG_ON(atomic_read(&src->refcount) == 0);
205 atomic_inc(&src->refcount);
206 put_io_context(dst);
207 *pdst = src;
210 EXPORT_SYMBOL(copy_io_context);
212 <<<<<<< HEAD:block/blk-ioc.c
213 int __init blk_ioc_init(void)
214 =======
215 static int __init blk_ioc_init(void)
216 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:block/blk-ioc.c
218 iocontext_cachep = kmem_cache_create("blkdev_ioc",
219 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
220 return 0;
222 subsys_initcall(blk_ioc_init);