1 /* bounce buffer handling for block devices
3 * - Split from highmem.c
7 #include <linux/export.h>
8 #include <linux/swap.h>
10 #include <linux/bio.h>
11 #include <linux/pagemap.h>
12 #include <linux/mempool.h>
13 #include <linux/blkdev.h>
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/highmem.h>
17 #include <linux/bootmem.h>
18 #include <asm/tlbflush.h>
20 #include <trace/events/block.h>
23 #define ISA_POOL_SIZE 16
25 static mempool_t
*page_pool
, *isa_page_pool
;
27 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
28 static __init
int init_emergency_pool(void)
30 #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
31 if (max_pfn
<= max_low_pfn
)
35 page_pool
= mempool_create_page_pool(POOL_SIZE
, 0);
37 printk("bounce pool size: %d pages\n", POOL_SIZE
);
42 __initcall(init_emergency_pool
);
47 * highmem version, map in to vec
49 static void bounce_copy_vec(struct bio_vec
*to
, unsigned char *vfrom
)
54 local_irq_save(flags
);
55 vto
= kmap_atomic(to
->bv_page
);
56 memcpy(vto
+ to
->bv_offset
, vfrom
, to
->bv_len
);
58 local_irq_restore(flags
);
61 #else /* CONFIG_HIGHMEM */
63 #define bounce_copy_vec(to, vfrom) \
64 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
66 #endif /* CONFIG_HIGHMEM */
69 * allocate pages in the DMA region for the ISA pool
71 static void *mempool_alloc_pages_isa(gfp_t gfp_mask
, void *data
)
73 return mempool_alloc_pages(gfp_mask
| GFP_DMA
, data
);
77 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
78 * as the max address, so check if the pool has already been created.
80 int init_emergency_isa_pool(void)
85 isa_page_pool
= mempool_create(ISA_POOL_SIZE
, mempool_alloc_pages_isa
,
86 mempool_free_pages
, (void *) 0);
87 BUG_ON(!isa_page_pool
);
89 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE
);
94 * Simple bounce buffer support for highmem pages. Depending on the
95 * queue gfp mask set, *to may or may not be a highmem page. kmap it
96 * always, it will do the Right Thing
98 static void copy_to_high_bio_irq(struct bio
*to
, struct bio
*from
)
100 unsigned char *vfrom
;
101 struct bio_vec
*tovec
, *fromvec
;
104 __bio_for_each_segment(tovec
, to
, i
, 0) {
105 fromvec
= from
->bi_io_vec
+ i
;
110 if (tovec
->bv_page
== fromvec
->bv_page
)
114 * fromvec->bv_offset and fromvec->bv_len might have been
115 * modified by the block layer, so use the original copy,
116 * bounce_copy_vec already uses tovec->bv_len
118 vfrom
= page_address(fromvec
->bv_page
) + tovec
->bv_offset
;
120 bounce_copy_vec(tovec
, vfrom
);
121 flush_dcache_page(tovec
->bv_page
);
125 static void bounce_end_io(struct bio
*bio
, mempool_t
*pool
, int err
)
127 struct bio
*bio_orig
= bio
->bi_private
;
128 struct bio_vec
*bvec
, *org_vec
;
131 if (test_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
))
132 set_bit(BIO_EOPNOTSUPP
, &bio_orig
->bi_flags
);
135 * free up bounce indirect pages used
137 __bio_for_each_segment(bvec
, bio
, i
, 0) {
138 org_vec
= bio_orig
->bi_io_vec
+ i
;
139 if (bvec
->bv_page
== org_vec
->bv_page
)
142 dec_zone_page_state(bvec
->bv_page
, NR_BOUNCE
);
143 mempool_free(bvec
->bv_page
, pool
);
146 bio_endio(bio_orig
, err
);
150 static void bounce_end_io_write(struct bio
*bio
, int err
)
152 bounce_end_io(bio
, page_pool
, err
);
155 static void bounce_end_io_write_isa(struct bio
*bio
, int err
)
158 bounce_end_io(bio
, isa_page_pool
, err
);
161 static void __bounce_end_io_read(struct bio
*bio
, mempool_t
*pool
, int err
)
163 struct bio
*bio_orig
= bio
->bi_private
;
165 if (test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
166 copy_to_high_bio_irq(bio_orig
, bio
);
168 bounce_end_io(bio
, pool
, err
);
171 static void bounce_end_io_read(struct bio
*bio
, int err
)
173 __bounce_end_io_read(bio
, page_pool
, err
);
176 static void bounce_end_io_read_isa(struct bio
*bio
, int err
)
178 __bounce_end_io_read(bio
, isa_page_pool
, err
);
181 #ifdef CONFIG_NEED_BOUNCE_POOL
182 static int must_snapshot_stable_pages(struct request_queue
*q
, struct bio
*bio
)
185 struct backing_dev_info
*bdi
;
186 struct address_space
*mapping
;
187 struct bio_vec
*from
;
190 if (bio_data_dir(bio
) != WRITE
)
193 if (!bdi_cap_stable_pages_required(&q
->backing_dev_info
))
197 * Based on the first page that has a valid mapping, decide whether or
198 * not we have to employ bounce buffering to guarantee stable pages.
200 bio_for_each_segment(from
, bio
, i
) {
201 page
= from
->bv_page
;
202 mapping
= page_mapping(page
);
205 bdi
= mapping
->backing_dev_info
;
206 return mapping
->host
->i_sb
->s_flags
& MS_SNAP_STABLE
;
212 static int must_snapshot_stable_pages(struct request_queue
*q
, struct bio
*bio
)
216 #endif /* CONFIG_NEED_BOUNCE_POOL */
218 static void __blk_queue_bounce(struct request_queue
*q
, struct bio
**bio_orig
,
219 mempool_t
*pool
, int force
)
222 struct bio
*bio
= NULL
;
223 int i
, rw
= bio_data_dir(*bio_orig
);
224 struct bio_vec
*to
, *from
;
226 bio_for_each_segment(from
, *bio_orig
, i
) {
227 page
= from
->bv_page
;
230 * is destination page below bounce pfn?
232 if (page_to_pfn(page
) <= queue_bounce_pfn(q
) && !force
)
239 unsigned int cnt
= (*bio_orig
)->bi_vcnt
;
241 bio
= bio_alloc(GFP_NOIO
, cnt
);
242 memset(bio
->bi_io_vec
, 0, cnt
* sizeof(struct bio_vec
));
246 to
= bio
->bi_io_vec
+ i
;
248 to
->bv_page
= mempool_alloc(pool
, q
->bounce_gfp
);
249 to
->bv_len
= from
->bv_len
;
250 to
->bv_offset
= from
->bv_offset
;
251 inc_zone_page_state(to
->bv_page
, NR_BOUNCE
);
256 flush_dcache_page(from
->bv_page
);
257 vto
= page_address(to
->bv_page
) + to
->bv_offset
;
258 vfrom
= kmap(from
->bv_page
) + from
->bv_offset
;
259 memcpy(vto
, vfrom
, to
->bv_len
);
260 kunmap(from
->bv_page
);
270 trace_block_bio_bounce(q
, *bio_orig
);
273 * at least one page was bounced, fill in possible non-highmem
276 __bio_for_each_segment(from
, *bio_orig
, i
, 0) {
277 to
= bio_iovec_idx(bio
, i
);
279 to
->bv_page
= from
->bv_page
;
280 to
->bv_len
= from
->bv_len
;
281 to
->bv_offset
= from
->bv_offset
;
285 bio
->bi_bdev
= (*bio_orig
)->bi_bdev
;
286 bio
->bi_flags
|= (1 << BIO_BOUNCED
);
287 bio
->bi_sector
= (*bio_orig
)->bi_sector
;
288 bio
->bi_rw
= (*bio_orig
)->bi_rw
;
290 bio
->bi_vcnt
= (*bio_orig
)->bi_vcnt
;
291 bio
->bi_idx
= (*bio_orig
)->bi_idx
;
292 bio
->bi_size
= (*bio_orig
)->bi_size
;
294 if (pool
== page_pool
) {
295 bio
->bi_end_io
= bounce_end_io_write
;
297 bio
->bi_end_io
= bounce_end_io_read
;
299 bio
->bi_end_io
= bounce_end_io_write_isa
;
301 bio
->bi_end_io
= bounce_end_io_read_isa
;
304 bio
->bi_private
= *bio_orig
;
308 void blk_queue_bounce(struct request_queue
*q
, struct bio
**bio_orig
)
314 * Data-less bio, nothing to bounce
316 if (!bio_has_data(*bio_orig
))
319 must_bounce
= must_snapshot_stable_pages(q
, *bio_orig
);
322 * for non-isa bounce case, just check if the bounce pfn is equal
323 * to or bigger than the highest pfn in the system -- in that case,
324 * don't waste time iterating over bio segments
326 if (!(q
->bounce_gfp
& GFP_DMA
)) {
327 if (queue_bounce_pfn(q
) >= blk_max_pfn
&& !must_bounce
)
331 BUG_ON(!isa_page_pool
);
332 pool
= isa_page_pool
;
338 __blk_queue_bounce(q
, bio_orig
, pool
, must_bounce
);
341 EXPORT_SYMBOL(blk_queue_bounce
);