Fix reported Samba bug.
[linux-2.6/linux-mips.git] / fs / iobuf.c
blob4be2258702d36611da3ffed34d29b3763a8d4daa
1 /*
2 * iobuf.c
4 * Keep track of the general-purpose IO-buffer structures used to track
5 * abstract kernel-space io buffers.
6 *
7 */
9 #include <linux/iobuf.h>
10 #include <linux/malloc.h>
11 #include <linux/slab.h>
13 static kmem_cache_t *kiobuf_cachep;
16 void end_kio_request(struct kiobuf *kiobuf, int uptodate)
18 if ((!uptodate) && !kiobuf->errno)
19 kiobuf->errno = -EIO;
21 if (atomic_dec_and_test(&kiobuf->io_count)) {
22 if (kiobuf->end_io)
23 kiobuf->end_io(kiobuf);
24 wake_up(&kiobuf->wait_queue);
29 void __init kiobuf_setup(void)
31 kiobuf_cachep = kmem_cache_create("kiobuf",
32 sizeof(struct kiobuf),
34 SLAB_HWCACHE_ALIGN, NULL, NULL);
35 if(!kiobuf_cachep)
36 panic("Cannot create kernel iobuf cache\n");
39 void kiobuf_init(struct kiobuf *iobuf)
41 memset(iobuf, 0, sizeof(*iobuf));
42 init_waitqueue_head(&iobuf->wait_queue);
43 iobuf->array_len = KIO_STATIC_PAGES;
44 iobuf->maplist = iobuf->map_array;
47 int alloc_kiovec(int nr, struct kiobuf **bufp)
49 int i;
50 struct kiobuf *iobuf;
52 for (i = 0; i < nr; i++) {
53 iobuf = kmem_cache_alloc(kiobuf_cachep, SLAB_KERNEL);
54 if (!iobuf) {
55 free_kiovec(i, bufp);
56 return -ENOMEM;
58 kiobuf_init(iobuf);
59 *bufp++ = iobuf;
62 return 0;
65 void free_kiovec(int nr, struct kiobuf **bufp)
67 int i;
68 struct kiobuf *iobuf;
70 for (i = 0; i < nr; i++) {
71 iobuf = bufp[i];
72 if (iobuf->locked)
73 unlock_kiovec(1, &iobuf);
74 if (iobuf->array_len > KIO_STATIC_PAGES)
75 kfree (iobuf->maplist);
76 kmem_cache_free(kiobuf_cachep, bufp[i]);
80 int expand_kiobuf(struct kiobuf *iobuf, int wanted)
82 struct page ** maplist;
84 if (iobuf->array_len >= wanted)
85 return 0;
87 maplist = (struct page **)
88 kmalloc(wanted * sizeof(struct page **), GFP_KERNEL);
89 if (!maplist)
90 return -ENOMEM;
92 /* Did it grow while we waited? */
93 if (iobuf->array_len >= wanted) {
94 kfree(maplist);
95 return 0;
98 memcpy (maplist, iobuf->maplist, iobuf->array_len * sizeof(struct page **));
100 if (iobuf->array_len > KIO_STATIC_PAGES)
101 kfree (iobuf->maplist);
103 iobuf->maplist = maplist;
104 iobuf->array_len = wanted;
105 return 0;
109 void kiobuf_wait_for_io(struct kiobuf *kiobuf)
111 struct task_struct *tsk = current;
112 DECLARE_WAITQUEUE(wait, tsk);
114 if (atomic_read(&kiobuf->io_count) == 0)
115 return;
117 add_wait_queue(&kiobuf->wait_queue, &wait);
118 repeat:
119 run_task_queue(&tq_disk);
120 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
121 if (atomic_read(&kiobuf->io_count) != 0) {
122 schedule();
123 goto repeat;
125 tsk->state = TASK_RUNNING;
126 remove_wait_queue(&kiobuf->wait_queue, &wait);