Import 2.3.12pre9
[davej-history.git] / fs / iobuf.c
bloba227159f17303049482ee97e2a7df048c5a8f82d
1 /*
2 * iobuf.c
4 * Keep track of the general-purpose IO-buffer structures used to track
5 * abstract kernel-space io buffers.
6 *
7 */
9 #include <linux/iobuf.h>
10 #include <linux/malloc.h>
11 #include <linux/slab.h>
13 static kmem_cache_t *kiobuf_cachep;
16 * The default IO completion routine for kiobufs: just wake up
17 * the kiobuf, nothing more.
20 void simple_wakeup_kiobuf(struct kiobuf *kiobuf)
22 wake_up(&kiobuf->wait_queue);
26 void __init kiobuf_init(void)
28 kiobuf_cachep = kmem_cache_create("kiobuf",
29 sizeof(struct kiobuf),
31 SLAB_HWCACHE_ALIGN, NULL, NULL);
32 if(!kiobuf_cachep)
33 panic("Cannot create kernel iobuf cache\n");
37 int alloc_kiovec(int nr, struct kiobuf **bufp)
39 int i;
40 struct kiobuf *iobuf;
42 for (i = 0; i < nr; i++) {
43 iobuf = kmem_cache_alloc(kiobuf_cachep, SLAB_KERNEL);
44 if (!iobuf) {
45 free_kiovec(i, bufp);
46 return -ENOMEM;
49 memset(iobuf, 0, sizeof(*iobuf));
50 init_waitqueue_head(&iobuf->wait_queue);
51 iobuf->end_io = simple_wakeup_kiobuf;
52 iobuf->array_len = KIO_STATIC_PAGES;
53 iobuf->pagelist = iobuf->page_array;
54 iobuf->maplist = iobuf->map_array;
55 *bufp++ = iobuf;
58 return 0;
61 void free_kiovec(int nr, struct kiobuf **bufp)
63 int i;
64 struct kiobuf *iobuf;
66 for (i = 0; i < nr; i++) {
67 iobuf = bufp[i];
68 if (iobuf->array_len > KIO_STATIC_PAGES) {
69 kfree (iobuf->pagelist);
70 kfree (iobuf->maplist);
72 kmem_cache_free(kiobuf_cachep, bufp[i]);
76 int expand_kiobuf(struct kiobuf *iobuf, int wanted)
78 unsigned long * pagelist;
79 struct page ** maplist;
81 if (iobuf->array_len >= wanted)
82 return 0;
84 pagelist = (unsigned long *)
85 kmalloc(wanted * sizeof(unsigned long), GFP_KERNEL);
86 if (!pagelist)
87 return -ENOMEM;
89 maplist = (struct page **)
90 kmalloc(wanted * sizeof(struct page **), GFP_KERNEL);
91 if (!maplist) {
92 kfree(pagelist);
93 return -ENOMEM;
96 /* Did it grow while we waited? */
97 if (iobuf->array_len >= wanted) {
98 kfree(pagelist);
99 kfree(maplist);
100 return 0;
103 memcpy (pagelist, iobuf->pagelist, wanted * sizeof(unsigned long));
104 memcpy (maplist, iobuf->maplist, wanted * sizeof(struct page **));
106 if (iobuf->array_len > KIO_STATIC_PAGES) {
107 kfree (iobuf->pagelist);
108 kfree (iobuf->maplist);
111 iobuf->pagelist = pagelist;
112 iobuf->maplist = maplist;
113 iobuf->array_len = wanted;
114 return 0;
118 void kiobuf_wait_for_io(struct kiobuf *kiobuf)
120 struct task_struct *tsk = current;
121 DECLARE_WAITQUEUE(wait, tsk);
123 add_wait_queue(&kiobuf->wait_queue, &wait);
124 repeat:
125 tsk->state = TASK_UNINTERRUPTIBLE;
126 run_task_queue(&tq_disk);
127 if (atomic_read(&kiobuf->io_count) != 0) {
128 schedule();
129 goto repeat;
131 tsk->state = TASK_RUNNING;
132 remove_wait_queue(&kiobuf->wait_queue, &wait);