1 /* bounce buffer handling for block devices
3 * - Split from highmem.c
7 #include <linux/module.h>
8 #include <linux/swap.h>
10 #include <linux/pagemap.h>
11 #include <linux/mempool.h>
12 #include <linux/blkdev.h>
13 #include <linux/init.h>
14 #include <linux/hash.h>
15 #include <linux/highmem.h>
16 #include <linux/blktrace_api.h>
17 #include <asm/tlbflush.h>
20 #define ISA_POOL_SIZE 16
22 static mempool_t
*page_pool
, *isa_page_pool
;
25 static __init
int init_emergency_pool(void)
34 page_pool
= mempool_create_page_pool(POOL_SIZE
, 0);
36 printk("highmem bounce pool size: %d pages\n", POOL_SIZE
);
41 __initcall(init_emergency_pool
);
44 * highmem version, map in to vec
46 static void bounce_copy_vec(struct bio_vec
*to
, unsigned char *vfrom
)
51 local_irq_save(flags
);
52 vto
= kmap_atomic(to
->bv_page
, KM_BOUNCE_READ
);
53 memcpy(vto
+ to
->bv_offset
, vfrom
, to
->bv_len
);
54 kunmap_atomic(vto
, KM_BOUNCE_READ
);
55 local_irq_restore(flags
);
58 #else /* CONFIG_HIGHMEM */
60 #define bounce_copy_vec(to, vfrom) \
61 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
63 #endif /* CONFIG_HIGHMEM */
66 * allocate pages in the DMA region for the ISA pool
68 static void *mempool_alloc_pages_isa(gfp_t gfp_mask
, void *data
)
70 return mempool_alloc_pages(gfp_mask
| GFP_DMA
, data
);
74 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
75 * as the max address, so check if the pool has already been created.
77 int init_emergency_isa_pool(void)
82 isa_page_pool
= mempool_create(ISA_POOL_SIZE
, mempool_alloc_pages_isa
,
83 mempool_free_pages
, (void *) 0);
84 BUG_ON(!isa_page_pool
);
86 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE
);
91 * Simple bounce buffer support for highmem pages. Depending on the
92 * queue gfp mask set, *to may or may not be a highmem page. kmap it
93 * always, it will do the Right Thing
95 static void copy_to_high_bio_irq(struct bio
*to
, struct bio
*from
)
98 struct bio_vec
*tovec
, *fromvec
;
101 __bio_for_each_segment(tovec
, to
, i
, 0) {
102 fromvec
= from
->bi_io_vec
+ i
;
107 if (tovec
->bv_page
== fromvec
->bv_page
)
111 * fromvec->bv_offset and fromvec->bv_len might have been
112 * modified by the block layer, so use the original copy,
113 * bounce_copy_vec already uses tovec->bv_len
115 vfrom
= page_address(fromvec
->bv_page
) + tovec
->bv_offset
;
117 flush_dcache_page(tovec
->bv_page
);
118 bounce_copy_vec(tovec
, vfrom
);
122 static void bounce_end_io(struct bio
*bio
, mempool_t
*pool
, int err
)
124 struct bio
*bio_orig
= bio
->bi_private
;
125 struct bio_vec
*bvec
, *org_vec
;
128 if (test_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
))
129 set_bit(BIO_EOPNOTSUPP
, &bio_orig
->bi_flags
);
132 * free up bounce indirect pages used
134 __bio_for_each_segment(bvec
, bio
, i
, 0) {
135 org_vec
= bio_orig
->bi_io_vec
+ i
;
136 if (bvec
->bv_page
== org_vec
->bv_page
)
139 dec_zone_page_state(bvec
->bv_page
, NR_BOUNCE
);
140 mempool_free(bvec
->bv_page
, pool
);
143 bio_endio(bio_orig
, bio_orig
->bi_size
, err
);
147 static int bounce_end_io_write(struct bio
*bio
, unsigned int bytes_done
, int err
)
152 bounce_end_io(bio
, page_pool
, err
);
156 static int bounce_end_io_write_isa(struct bio
*bio
, unsigned int bytes_done
, int err
)
161 bounce_end_io(bio
, isa_page_pool
, err
);
165 static void __bounce_end_io_read(struct bio
*bio
, mempool_t
*pool
, int err
)
167 struct bio
*bio_orig
= bio
->bi_private
;
169 if (test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
170 copy_to_high_bio_irq(bio_orig
, bio
);
172 bounce_end_io(bio
, pool
, err
);
175 static int bounce_end_io_read(struct bio
*bio
, unsigned int bytes_done
, int err
)
180 __bounce_end_io_read(bio
, page_pool
, err
);
184 static int bounce_end_io_read_isa(struct bio
*bio
, unsigned int bytes_done
, int err
)
189 __bounce_end_io_read(bio
, isa_page_pool
, err
);
193 static void __blk_queue_bounce(request_queue_t
*q
, struct bio
**bio_orig
,
197 struct bio
*bio
= NULL
;
198 int i
, rw
= bio_data_dir(*bio_orig
);
199 struct bio_vec
*to
, *from
;
201 bio_for_each_segment(from
, *bio_orig
, i
) {
202 page
= from
->bv_page
;
205 * is destination page below bounce pfn?
207 if (page_to_pfn(page
) < q
->bounce_pfn
)
214 bio
= bio_alloc(GFP_NOIO
, (*bio_orig
)->bi_vcnt
);
216 to
= bio
->bi_io_vec
+ i
;
218 to
->bv_page
= mempool_alloc(pool
, q
->bounce_gfp
);
219 to
->bv_len
= from
->bv_len
;
220 to
->bv_offset
= from
->bv_offset
;
221 inc_zone_page_state(to
->bv_page
, NR_BOUNCE
);
226 flush_dcache_page(from
->bv_page
);
227 vto
= page_address(to
->bv_page
) + to
->bv_offset
;
228 vfrom
= kmap(from
->bv_page
) + from
->bv_offset
;
229 memcpy(vto
, vfrom
, to
->bv_len
);
230 kunmap(from
->bv_page
);
241 * at least one page was bounced, fill in possible non-highmem
244 __bio_for_each_segment(from
, *bio_orig
, i
, 0) {
245 to
= bio_iovec_idx(bio
, i
);
247 to
->bv_page
= from
->bv_page
;
248 to
->bv_len
= from
->bv_len
;
249 to
->bv_offset
= from
->bv_offset
;
253 bio
->bi_bdev
= (*bio_orig
)->bi_bdev
;
254 bio
->bi_flags
|= (1 << BIO_BOUNCED
);
255 bio
->bi_sector
= (*bio_orig
)->bi_sector
;
256 bio
->bi_rw
= (*bio_orig
)->bi_rw
;
258 bio
->bi_vcnt
= (*bio_orig
)->bi_vcnt
;
259 bio
->bi_idx
= (*bio_orig
)->bi_idx
;
260 bio
->bi_size
= (*bio_orig
)->bi_size
;
262 if (pool
== page_pool
) {
263 bio
->bi_end_io
= bounce_end_io_write
;
265 bio
->bi_end_io
= bounce_end_io_read
;
267 bio
->bi_end_io
= bounce_end_io_write_isa
;
269 bio
->bi_end_io
= bounce_end_io_read_isa
;
272 bio
->bi_private
= *bio_orig
;
276 void blk_queue_bounce(request_queue_t
*q
, struct bio
**bio_orig
)
281 * for non-isa bounce case, just check if the bounce pfn is equal
282 * to or bigger than the highest pfn in the system -- in that case,
283 * don't waste time iterating over bio segments
285 if (!(q
->bounce_gfp
& GFP_DMA
)) {
286 if (q
->bounce_pfn
>= blk_max_pfn
)
290 BUG_ON(!isa_page_pool
);
291 pool
= isa_page_pool
;
294 blk_add_trace_bio(q
, *bio_orig
, BLK_TA_BOUNCE
);
299 __blk_queue_bounce(q
, bio_orig
, pool
);
302 EXPORT_SYMBOL(blk_queue_bounce
);