2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/workqueue.h>
28 #include <linux/blktrace_api.h>
29 #include <scsi/sg.h> /* for struct sg_iovec */
31 static struct kmem_cache
*bio_slab __read_mostly
;
33 mempool_t
*bio_split_pool __read_mostly
;
36 * if you change this list, also change bvec_alloc or things will
37 * break badly! cannot be bigger than what you can fit into an
41 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
42 static struct biovec_slab bvec_slabs
[BIOVEC_NR_POOLS
] __read_mostly
= {
43 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES
),
48 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
49 * IO code that does not need private memory pools.
51 struct bio_set
*fs_bio_set
;
53 unsigned int bvec_nr_vecs(unsigned short idx
)
55 return bvec_slabs
[idx
].nr_vecs
;
58 struct bio_vec
*bvec_alloc_bs(gfp_t gfp_mask
, int nr
, unsigned long *idx
, struct bio_set
*bs
)
63 * see comment near bvec_array define!
66 case 1 : *idx
= 0; break;
67 case 2 ... 4: *idx
= 1; break;
68 case 5 ... 16: *idx
= 2; break;
69 case 17 ... 64: *idx
= 3; break;
70 case 65 ... 128: *idx
= 4; break;
71 case 129 ... BIO_MAX_PAGES
: *idx
= 5; break;
76 * idx now points to the pool we want to allocate from
79 bvl
= mempool_alloc(bs
->bvec_pools
[*idx
], gfp_mask
);
81 memset(bvl
, 0, bvec_nr_vecs(*idx
) * sizeof(struct bio_vec
));
86 void bio_free(struct bio
*bio
, struct bio_set
*bio_set
)
89 const int pool_idx
= BIO_POOL_IDX(bio
);
91 BIO_BUG_ON(pool_idx
>= BIOVEC_NR_POOLS
);
93 mempool_free(bio
->bi_io_vec
, bio_set
->bvec_pools
[pool_idx
]);
96 if (bio_integrity(bio
))
97 bio_integrity_free(bio
, bio_set
);
99 mempool_free(bio
, bio_set
->bio_pool
);
103 * default destructor for a bio allocated with bio_alloc_bioset()
105 static void bio_fs_destructor(struct bio
*bio
)
107 bio_free(bio
, fs_bio_set
);
110 void bio_init(struct bio
*bio
)
112 memset(bio
, 0, sizeof(*bio
));
113 bio
->bi_flags
= 1 << BIO_UPTODATE
;
114 atomic_set(&bio
->bi_cnt
, 1);
118 * bio_alloc_bioset - allocate a bio for I/O
119 * @gfp_mask: the GFP_ mask given to the slab allocator
120 * @nr_iovecs: number of iovecs to pre-allocate
121 * @bs: the bio_set to allocate from
124 * bio_alloc_bioset will first try it's on mempool to satisfy the allocation.
125 * If %__GFP_WAIT is set then we will block on the internal pool waiting
126 * for a &struct bio to become free.
128 * allocate bio and iovecs from the memory pools specified by the
131 struct bio
*bio_alloc_bioset(gfp_t gfp_mask
, int nr_iovecs
, struct bio_set
*bs
)
133 struct bio
*bio
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
136 struct bio_vec
*bvl
= NULL
;
139 if (likely(nr_iovecs
)) {
140 unsigned long uninitialized_var(idx
);
142 bvl
= bvec_alloc_bs(gfp_mask
, nr_iovecs
, &idx
, bs
);
143 if (unlikely(!bvl
)) {
144 mempool_free(bio
, bs
->bio_pool
);
148 bio
->bi_flags
|= idx
<< BIO_POOL_OFFSET
;
149 bio
->bi_max_vecs
= bvec_nr_vecs(idx
);
151 bio
->bi_io_vec
= bvl
;
157 struct bio
*bio_alloc(gfp_t gfp_mask
, int nr_iovecs
)
159 struct bio
*bio
= bio_alloc_bioset(gfp_mask
, nr_iovecs
, fs_bio_set
);
162 bio
->bi_destructor
= bio_fs_destructor
;
167 void zero_fill_bio(struct bio
*bio
)
173 bio_for_each_segment(bv
, bio
, i
) {
174 char *data
= bvec_kmap_irq(bv
, &flags
);
175 memset(data
, 0, bv
->bv_len
);
176 flush_dcache_page(bv
->bv_page
);
177 bvec_kunmap_irq(data
, &flags
);
180 EXPORT_SYMBOL(zero_fill_bio
);
183 * bio_put - release a reference to a bio
184 * @bio: bio to release reference to
187 * Put a reference to a &struct bio, either one you have gotten with
188 * bio_alloc or bio_get. The last put of a bio will free it.
190 void bio_put(struct bio
*bio
)
192 BIO_BUG_ON(!atomic_read(&bio
->bi_cnt
));
197 if (atomic_dec_and_test(&bio
->bi_cnt
)) {
199 bio
->bi_destructor(bio
);
203 inline int bio_phys_segments(struct request_queue
*q
, struct bio
*bio
)
205 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
206 blk_recount_segments(q
, bio
);
208 return bio
->bi_phys_segments
;
211 inline int bio_hw_segments(struct request_queue
*q
, struct bio
*bio
)
213 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
214 blk_recount_segments(q
, bio
);
216 return bio
->bi_hw_segments
;
220 * __bio_clone - clone a bio
221 * @bio: destination bio
222 * @bio_src: bio to clone
224 * Clone a &bio. Caller will own the returned bio, but not
225 * the actual data it points to. Reference count of returned
228 void __bio_clone(struct bio
*bio
, struct bio
*bio_src
)
230 memcpy(bio
->bi_io_vec
, bio_src
->bi_io_vec
,
231 bio_src
->bi_max_vecs
* sizeof(struct bio_vec
));
234 * most users will be overriding ->bi_bdev with a new target,
235 * so we don't set nor calculate new physical/hw segment counts here
237 bio
->bi_sector
= bio_src
->bi_sector
;
238 bio
->bi_bdev
= bio_src
->bi_bdev
;
239 bio
->bi_flags
|= 1 << BIO_CLONED
;
240 bio
->bi_rw
= bio_src
->bi_rw
;
241 bio
->bi_vcnt
= bio_src
->bi_vcnt
;
242 bio
->bi_size
= bio_src
->bi_size
;
243 bio
->bi_idx
= bio_src
->bi_idx
;
247 * bio_clone - clone a bio
249 * @gfp_mask: allocation priority
251 * Like __bio_clone, only also allocates the returned bio
253 struct bio
*bio_clone(struct bio
*bio
, gfp_t gfp_mask
)
255 struct bio
*b
= bio_alloc_bioset(gfp_mask
, bio
->bi_max_vecs
, fs_bio_set
);
260 b
->bi_destructor
= bio_fs_destructor
;
263 if (bio_integrity(bio
)) {
266 ret
= bio_integrity_clone(b
, bio
, fs_bio_set
);
276 * bio_get_nr_vecs - return approx number of vecs
279 * Return the approximate number of pages we can send to this target.
280 * There's no guarantee that you will be able to fit this number of pages
281 * into a bio, it does not account for dynamic restrictions that vary
284 int bio_get_nr_vecs(struct block_device
*bdev
)
286 struct request_queue
*q
= bdev_get_queue(bdev
);
289 nr_pages
= ((q
->max_sectors
<< 9) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
290 if (nr_pages
> q
->max_phys_segments
)
291 nr_pages
= q
->max_phys_segments
;
292 if (nr_pages
> q
->max_hw_segments
)
293 nr_pages
= q
->max_hw_segments
;
298 static int __bio_add_page(struct request_queue
*q
, struct bio
*bio
, struct page
299 *page
, unsigned int len
, unsigned int offset
,
300 unsigned short max_sectors
)
302 int retried_segments
= 0;
303 struct bio_vec
*bvec
;
306 * cloned bio must not modify vec list
308 if (unlikely(bio_flagged(bio
, BIO_CLONED
)))
311 if (((bio
->bi_size
+ len
) >> 9) > max_sectors
)
315 * For filesystems with a blocksize smaller than the pagesize
316 * we will often be called with the same page as last time and
317 * a consecutive offset. Optimize this special case.
319 if (bio
->bi_vcnt
> 0) {
320 struct bio_vec
*prev
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
322 if (page
== prev
->bv_page
&&
323 offset
== prev
->bv_offset
+ prev
->bv_len
) {
326 if (q
->merge_bvec_fn
) {
327 struct bvec_merge_data bvm
= {
328 .bi_bdev
= bio
->bi_bdev
,
329 .bi_sector
= bio
->bi_sector
,
330 .bi_size
= bio
->bi_size
,
334 if (q
->merge_bvec_fn(q
, &bvm
, prev
) < len
) {
344 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
348 * we might lose a segment or two here, but rather that than
349 * make this too complex.
352 while (bio
->bi_phys_segments
>= q
->max_phys_segments
353 || bio
->bi_hw_segments
>= q
->max_hw_segments
354 || BIOVEC_VIRT_OVERSIZE(bio
->bi_size
)) {
356 if (retried_segments
)
359 retried_segments
= 1;
360 blk_recount_segments(q
, bio
);
364 * setup the new entry, we might clear it again later if we
365 * cannot add the page
367 bvec
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
368 bvec
->bv_page
= page
;
370 bvec
->bv_offset
= offset
;
373 * if queue has other restrictions (eg varying max sector size
374 * depending on offset), it can specify a merge_bvec_fn in the
375 * queue to get further control
377 if (q
->merge_bvec_fn
) {
378 struct bvec_merge_data bvm
= {
379 .bi_bdev
= bio
->bi_bdev
,
380 .bi_sector
= bio
->bi_sector
,
381 .bi_size
= bio
->bi_size
,
386 * merge_bvec_fn() returns number of bytes it can accept
389 if (q
->merge_bvec_fn(q
, &bvm
, bvec
) < len
) {
390 bvec
->bv_page
= NULL
;
397 /* If we may be able to merge these biovecs, force a recount */
398 if (bio
->bi_vcnt
&& (BIOVEC_PHYS_MERGEABLE(bvec
-1, bvec
) ||
399 BIOVEC_VIRT_MERGEABLE(bvec
-1, bvec
)))
400 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
403 bio
->bi_phys_segments
++;
404 bio
->bi_hw_segments
++;
411 * bio_add_pc_page - attempt to add page to bio
412 * @q: the target queue
413 * @bio: destination bio
415 * @len: vec entry length
416 * @offset: vec entry offset
418 * Attempt to add a page to the bio_vec maplist. This can fail for a
419 * number of reasons, such as the bio being full or target block
420 * device limitations. The target block device must allow bio's
421 * smaller than PAGE_SIZE, so it is always possible to add a single
422 * page to an empty bio. This should only be used by REQ_PC bios.
424 int bio_add_pc_page(struct request_queue
*q
, struct bio
*bio
, struct page
*page
,
425 unsigned int len
, unsigned int offset
)
427 return __bio_add_page(q
, bio
, page
, len
, offset
, q
->max_hw_sectors
);
431 * bio_add_page - attempt to add page to bio
432 * @bio: destination bio
434 * @len: vec entry length
435 * @offset: vec entry offset
437 * Attempt to add a page to the bio_vec maplist. This can fail for a
438 * number of reasons, such as the bio being full or target block
439 * device limitations. The target block device must allow bio's
440 * smaller than PAGE_SIZE, so it is always possible to add a single
441 * page to an empty bio.
443 int bio_add_page(struct bio
*bio
, struct page
*page
, unsigned int len
,
446 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
447 return __bio_add_page(q
, bio
, page
, len
, offset
, q
->max_sectors
);
450 struct bio_map_data
{
451 struct bio_vec
*iovecs
;
453 struct sg_iovec
*sgvecs
;
456 static void bio_set_map_data(struct bio_map_data
*bmd
, struct bio
*bio
,
457 struct sg_iovec
*iov
, int iov_count
)
459 memcpy(bmd
->iovecs
, bio
->bi_io_vec
, sizeof(struct bio_vec
) * bio
->bi_vcnt
);
460 memcpy(bmd
->sgvecs
, iov
, sizeof(struct sg_iovec
) * iov_count
);
461 bmd
->nr_sgvecs
= iov_count
;
462 bio
->bi_private
= bmd
;
465 static void bio_free_map_data(struct bio_map_data
*bmd
)
472 static struct bio_map_data
*bio_alloc_map_data(int nr_segs
, int iov_count
,
475 struct bio_map_data
*bmd
= kmalloc(sizeof(*bmd
), gfp_mask
);
480 bmd
->iovecs
= kmalloc(sizeof(struct bio_vec
) * nr_segs
, gfp_mask
);
486 bmd
->sgvecs
= kmalloc(sizeof(struct sg_iovec
) * iov_count
, gfp_mask
);
495 static int __bio_copy_iov(struct bio
*bio
, struct bio_vec
*iovecs
,
496 struct sg_iovec
*iov
, int iov_count
, int uncopy
)
499 struct bio_vec
*bvec
;
501 unsigned int iov_off
= 0;
502 int read
= bio_data_dir(bio
) == READ
;
504 __bio_for_each_segment(bvec
, bio
, i
, 0) {
505 char *bv_addr
= page_address(bvec
->bv_page
);
506 unsigned int bv_len
= iovecs
[i
].bv_len
;
508 while (bv_len
&& iov_idx
< iov_count
) {
512 bytes
= min_t(unsigned int,
513 iov
[iov_idx
].iov_len
- iov_off
, bv_len
);
514 iov_addr
= iov
[iov_idx
].iov_base
+ iov_off
;
517 if (!read
&& !uncopy
)
518 ret
= copy_from_user(bv_addr
, iov_addr
,
521 ret
= copy_to_user(iov_addr
, bv_addr
,
533 if (iov
[iov_idx
].iov_len
== iov_off
) {
540 __free_page(bvec
->bv_page
);
547 * bio_uncopy_user - finish previously mapped bio
548 * @bio: bio being terminated
550 * Free pages allocated from bio_copy_user() and write back data
551 * to user space in case of a read.
553 int bio_uncopy_user(struct bio
*bio
)
555 struct bio_map_data
*bmd
= bio
->bi_private
;
558 ret
= __bio_copy_iov(bio
, bmd
->iovecs
, bmd
->sgvecs
, bmd
->nr_sgvecs
, 1);
560 bio_free_map_data(bmd
);
566 * bio_copy_user_iov - copy user data to bio
567 * @q: destination block queue
569 * @iov_count: number of elements in the iovec
570 * @write_to_vm: bool indicating writing to pages or not
572 * Prepares and returns a bio for indirect user io, bouncing data
573 * to/from kernel pages as necessary. Must be paired with
574 * call bio_uncopy_user() on io completion.
576 struct bio
*bio_copy_user_iov(struct request_queue
*q
, struct sg_iovec
*iov
,
577 int iov_count
, int write_to_vm
)
579 struct bio_map_data
*bmd
;
580 struct bio_vec
*bvec
;
585 unsigned int len
= 0;
587 for (i
= 0; i
< iov_count
; i
++) {
592 uaddr
= (unsigned long)iov
[i
].iov_base
;
593 end
= (uaddr
+ iov
[i
].iov_len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
594 start
= uaddr
>> PAGE_SHIFT
;
596 nr_pages
+= end
- start
;
597 len
+= iov
[i
].iov_len
;
600 bmd
= bio_alloc_map_data(nr_pages
, iov_count
, GFP_KERNEL
);
602 return ERR_PTR(-ENOMEM
);
605 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
609 bio
->bi_rw
|= (!write_to_vm
<< BIO_RW
);
613 unsigned int bytes
= PAGE_SIZE
;
618 page
= alloc_page(q
->bounce_gfp
| GFP_KERNEL
);
624 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
)
637 ret
= __bio_copy_iov(bio
, bio
->bi_io_vec
, iov
, iov_count
, 0);
642 bio_set_map_data(bmd
, bio
, iov
, iov_count
);
645 bio_for_each_segment(bvec
, bio
, i
)
646 __free_page(bvec
->bv_page
);
650 bio_free_map_data(bmd
);
655 * bio_copy_user - copy user data to bio
656 * @q: destination block queue
657 * @uaddr: start of user address
658 * @len: length in bytes
659 * @write_to_vm: bool indicating writing to pages or not
661 * Prepares and returns a bio for indirect user io, bouncing data
662 * to/from kernel pages as necessary. Must be paired with
663 * call bio_uncopy_user() on io completion.
665 struct bio
*bio_copy_user(struct request_queue
*q
, unsigned long uaddr
,
666 unsigned int len
, int write_to_vm
)
670 iov
.iov_base
= (void __user
*)uaddr
;
673 return bio_copy_user_iov(q
, &iov
, 1, write_to_vm
);
676 static struct bio
*__bio_map_user_iov(struct request_queue
*q
,
677 struct block_device
*bdev
,
678 struct sg_iovec
*iov
, int iov_count
,
688 for (i
= 0; i
< iov_count
; i
++) {
689 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
690 unsigned long len
= iov
[i
].iov_len
;
691 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
692 unsigned long start
= uaddr
>> PAGE_SHIFT
;
694 nr_pages
+= end
- start
;
696 * buffer must be aligned to at least hardsector size for now
698 if (uaddr
& queue_dma_alignment(q
))
699 return ERR_PTR(-EINVAL
);
703 return ERR_PTR(-EINVAL
);
705 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
707 return ERR_PTR(-ENOMEM
);
710 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL
);
714 for (i
= 0; i
< iov_count
; i
++) {
715 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
716 unsigned long len
= iov
[i
].iov_len
;
717 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
718 unsigned long start
= uaddr
>> PAGE_SHIFT
;
719 const int local_nr_pages
= end
- start
;
720 const int page_limit
= cur_page
+ local_nr_pages
;
722 ret
= get_user_pages_fast(uaddr
, local_nr_pages
,
723 write_to_vm
, &pages
[cur_page
]);
724 if (ret
< local_nr_pages
) {
729 offset
= uaddr
& ~PAGE_MASK
;
730 for (j
= cur_page
; j
< page_limit
; j
++) {
731 unsigned int bytes
= PAGE_SIZE
- offset
;
742 if (bio_add_pc_page(q
, bio
, pages
[j
], bytes
, offset
) <
752 * release the pages we didn't map into the bio, if any
754 while (j
< page_limit
)
755 page_cache_release(pages
[j
++]);
761 * set data direction, and check if mapped pages need bouncing
764 bio
->bi_rw
|= (1 << BIO_RW
);
767 bio
->bi_flags
|= (1 << BIO_USER_MAPPED
);
771 for (i
= 0; i
< nr_pages
; i
++) {
774 page_cache_release(pages
[i
]);
783 * bio_map_user - map user address into bio
784 * @q: the struct request_queue for the bio
785 * @bdev: destination block device
786 * @uaddr: start of user address
787 * @len: length in bytes
788 * @write_to_vm: bool indicating writing to pages or not
790 * Map the user space address into a bio suitable for io to a block
791 * device. Returns an error pointer in case of error.
793 struct bio
*bio_map_user(struct request_queue
*q
, struct block_device
*bdev
,
794 unsigned long uaddr
, unsigned int len
, int write_to_vm
)
798 iov
.iov_base
= (void __user
*)uaddr
;
801 return bio_map_user_iov(q
, bdev
, &iov
, 1, write_to_vm
);
805 * bio_map_user_iov - map user sg_iovec table into bio
806 * @q: the struct request_queue for the bio
807 * @bdev: destination block device
809 * @iov_count: number of elements in the iovec
810 * @write_to_vm: bool indicating writing to pages or not
812 * Map the user space address into a bio suitable for io to a block
813 * device. Returns an error pointer in case of error.
815 struct bio
*bio_map_user_iov(struct request_queue
*q
, struct block_device
*bdev
,
816 struct sg_iovec
*iov
, int iov_count
,
821 bio
= __bio_map_user_iov(q
, bdev
, iov
, iov_count
, write_to_vm
);
827 * subtle -- if __bio_map_user() ended up bouncing a bio,
828 * it would normally disappear when its bi_end_io is run.
829 * however, we need it for the unmap, so grab an extra
837 static void __bio_unmap_user(struct bio
*bio
)
839 struct bio_vec
*bvec
;
843 * make sure we dirty pages we wrote to
845 __bio_for_each_segment(bvec
, bio
, i
, 0) {
846 if (bio_data_dir(bio
) == READ
)
847 set_page_dirty_lock(bvec
->bv_page
);
849 page_cache_release(bvec
->bv_page
);
856 * bio_unmap_user - unmap a bio
857 * @bio: the bio being unmapped
859 * Unmap a bio previously mapped by bio_map_user(). Must be called with
862 * bio_unmap_user() may sleep.
864 void bio_unmap_user(struct bio
*bio
)
866 __bio_unmap_user(bio
);
870 static void bio_map_kern_endio(struct bio
*bio
, int err
)
876 static struct bio
*__bio_map_kern(struct request_queue
*q
, void *data
,
877 unsigned int len
, gfp_t gfp_mask
)
879 unsigned long kaddr
= (unsigned long)data
;
880 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
881 unsigned long start
= kaddr
>> PAGE_SHIFT
;
882 const int nr_pages
= end
- start
;
886 bio
= bio_alloc(gfp_mask
, nr_pages
);
888 return ERR_PTR(-ENOMEM
);
890 offset
= offset_in_page(kaddr
);
891 for (i
= 0; i
< nr_pages
; i
++) {
892 unsigned int bytes
= PAGE_SIZE
- offset
;
900 if (bio_add_pc_page(q
, bio
, virt_to_page(data
), bytes
,
909 bio
->bi_end_io
= bio_map_kern_endio
;
914 * bio_map_kern - map kernel address into bio
915 * @q: the struct request_queue for the bio
916 * @data: pointer to buffer to map
917 * @len: length in bytes
918 * @gfp_mask: allocation flags for bio allocation
920 * Map the kernel address into a bio suitable for io to a block
921 * device. Returns an error pointer in case of error.
923 struct bio
*bio_map_kern(struct request_queue
*q
, void *data
, unsigned int len
,
928 bio
= __bio_map_kern(q
, data
, len
, gfp_mask
);
932 if (bio
->bi_size
== len
)
936 * Don't support partial mappings.
939 return ERR_PTR(-EINVAL
);
942 static void bio_copy_kern_endio(struct bio
*bio
, int err
)
944 struct bio_vec
*bvec
;
945 const int read
= bio_data_dir(bio
) == READ
;
946 struct bio_map_data
*bmd
= bio
->bi_private
;
948 char *p
= bmd
->sgvecs
[0].iov_base
;
950 __bio_for_each_segment(bvec
, bio
, i
, 0) {
951 char *addr
= page_address(bvec
->bv_page
);
952 int len
= bmd
->iovecs
[i
].bv_len
;
955 memcpy(p
, addr
, len
);
957 __free_page(bvec
->bv_page
);
961 bio_free_map_data(bmd
);
966 * bio_copy_kern - copy kernel address into bio
967 * @q: the struct request_queue for the bio
968 * @data: pointer to buffer to copy
969 * @len: length in bytes
970 * @gfp_mask: allocation flags for bio and page allocation
971 * @reading: data direction is READ
973 * copy the kernel address into a bio suitable for io to a block
974 * device. Returns an error pointer in case of error.
976 struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
, unsigned int len
,
977 gfp_t gfp_mask
, int reading
)
979 unsigned long kaddr
= (unsigned long)data
;
980 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
981 unsigned long start
= kaddr
>> PAGE_SHIFT
;
982 const int nr_pages
= end
- start
;
984 struct bio_vec
*bvec
;
985 struct bio_map_data
*bmd
;
992 bmd
= bio_alloc_map_data(nr_pages
, 1, gfp_mask
);
994 return ERR_PTR(-ENOMEM
);
997 bio
= bio_alloc(gfp_mask
, nr_pages
);
1003 unsigned int bytes
= PAGE_SIZE
;
1008 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
1014 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
) {
1025 bio_for_each_segment(bvec
, bio
, i
) {
1026 char *addr
= page_address(bvec
->bv_page
);
1028 memcpy(addr
, p
, bvec
->bv_len
);
1033 bio
->bi_private
= bmd
;
1034 bio
->bi_end_io
= bio_copy_kern_endio
;
1036 bio_set_map_data(bmd
, bio
, &iov
, 1);
1039 bio_for_each_segment(bvec
, bio
, i
)
1040 __free_page(bvec
->bv_page
);
1044 bio_free_map_data(bmd
);
1046 return ERR_PTR(ret
);
1050 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1051 * for performing direct-IO in BIOs.
1053 * The problem is that we cannot run set_page_dirty() from interrupt context
1054 * because the required locks are not interrupt-safe. So what we can do is to
1055 * mark the pages dirty _before_ performing IO. And in interrupt context,
1056 * check that the pages are still dirty. If so, fine. If not, redirty them
1057 * in process context.
1059 * We special-case compound pages here: normally this means reads into hugetlb
1060 * pages. The logic in here doesn't really work right for compound pages
1061 * because the VM does not uniformly chase down the head page in all cases.
1062 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1063 * handle them at all. So we skip compound pages here at an early stage.
1065 * Note that this code is very hard to test under normal circumstances because
1066 * direct-io pins the pages with get_user_pages(). This makes
1067 * is_page_cache_freeable return false, and the VM will not clean the pages.
1068 * But other code (eg, pdflush) could clean the pages if they are mapped
1071 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1072 * deferred bio dirtying paths.
1076 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1078 void bio_set_pages_dirty(struct bio
*bio
)
1080 struct bio_vec
*bvec
= bio
->bi_io_vec
;
1083 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1084 struct page
*page
= bvec
[i
].bv_page
;
1086 if (page
&& !PageCompound(page
))
1087 set_page_dirty_lock(page
);
1091 static void bio_release_pages(struct bio
*bio
)
1093 struct bio_vec
*bvec
= bio
->bi_io_vec
;
1096 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1097 struct page
*page
= bvec
[i
].bv_page
;
1105 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1106 * If they are, then fine. If, however, some pages are clean then they must
1107 * have been written out during the direct-IO read. So we take another ref on
1108 * the BIO and the offending pages and re-dirty the pages in process context.
1110 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1111 * here on. It will run one page_cache_release() against each page and will
1112 * run one bio_put() against the BIO.
1115 static void bio_dirty_fn(struct work_struct
*work
);
1117 static DECLARE_WORK(bio_dirty_work
, bio_dirty_fn
);
1118 static DEFINE_SPINLOCK(bio_dirty_lock
);
1119 static struct bio
*bio_dirty_list
;
1122 * This runs in process context
1124 static void bio_dirty_fn(struct work_struct
*work
)
1126 unsigned long flags
;
1129 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1130 bio
= bio_dirty_list
;
1131 bio_dirty_list
= NULL
;
1132 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1135 struct bio
*next
= bio
->bi_private
;
1137 bio_set_pages_dirty(bio
);
1138 bio_release_pages(bio
);
1144 void bio_check_pages_dirty(struct bio
*bio
)
1146 struct bio_vec
*bvec
= bio
->bi_io_vec
;
1147 int nr_clean_pages
= 0;
1150 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1151 struct page
*page
= bvec
[i
].bv_page
;
1153 if (PageDirty(page
) || PageCompound(page
)) {
1154 page_cache_release(page
);
1155 bvec
[i
].bv_page
= NULL
;
1161 if (nr_clean_pages
) {
1162 unsigned long flags
;
1164 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1165 bio
->bi_private
= bio_dirty_list
;
1166 bio_dirty_list
= bio
;
1167 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1168 schedule_work(&bio_dirty_work
);
1175 * bio_endio - end I/O on a bio
1177 * @error: error, if any
1180 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1181 * preferred way to end I/O on a bio, it takes care of clearing
1182 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1183 * established -Exxxx (-EIO, for instance) error values in case
1184 * something went wrong. Noone should call bi_end_io() directly on a
1185 * bio unless they own it and thus know that it has an end_io
1188 void bio_endio(struct bio
*bio
, int error
)
1191 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1192 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1196 bio
->bi_end_io(bio
, error
);
1199 void bio_pair_release(struct bio_pair
*bp
)
1201 if (atomic_dec_and_test(&bp
->cnt
)) {
1202 struct bio
*master
= bp
->bio1
.bi_private
;
1204 bio_endio(master
, bp
->error
);
1205 mempool_free(bp
, bp
->bio2
.bi_private
);
1209 static void bio_pair_end_1(struct bio
*bi
, int err
)
1211 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio1
);
1216 bio_pair_release(bp
);
1219 static void bio_pair_end_2(struct bio
*bi
, int err
)
1221 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio2
);
1226 bio_pair_release(bp
);
1230 * split a bio - only worry about a bio with a single page
1233 struct bio_pair
*bio_split(struct bio
*bi
, mempool_t
*pool
, int first_sectors
)
1235 struct bio_pair
*bp
= mempool_alloc(pool
, GFP_NOIO
);
1240 blk_add_trace_pdu_int(bdev_get_queue(bi
->bi_bdev
), BLK_TA_SPLIT
, bi
,
1241 bi
->bi_sector
+ first_sectors
);
1243 BUG_ON(bi
->bi_vcnt
!= 1);
1244 BUG_ON(bi
->bi_idx
!= 0);
1245 atomic_set(&bp
->cnt
, 3);
1249 bp
->bio2
.bi_sector
+= first_sectors
;
1250 bp
->bio2
.bi_size
-= first_sectors
<< 9;
1251 bp
->bio1
.bi_size
= first_sectors
<< 9;
1253 bp
->bv1
= bi
->bi_io_vec
[0];
1254 bp
->bv2
= bi
->bi_io_vec
[0];
1255 bp
->bv2
.bv_offset
+= first_sectors
<< 9;
1256 bp
->bv2
.bv_len
-= first_sectors
<< 9;
1257 bp
->bv1
.bv_len
= first_sectors
<< 9;
1259 bp
->bio1
.bi_io_vec
= &bp
->bv1
;
1260 bp
->bio2
.bi_io_vec
= &bp
->bv2
;
1262 bp
->bio1
.bi_max_vecs
= 1;
1263 bp
->bio2
.bi_max_vecs
= 1;
1265 bp
->bio1
.bi_end_io
= bio_pair_end_1
;
1266 bp
->bio2
.bi_end_io
= bio_pair_end_2
;
1268 bp
->bio1
.bi_private
= bi
;
1269 bp
->bio2
.bi_private
= pool
;
1271 if (bio_integrity(bi
))
1272 bio_integrity_split(bi
, bp
, first_sectors
);
1279 * create memory pools for biovec's in a bio_set.
1280 * use the global biovec slabs created for general use.
1282 static int biovec_create_pools(struct bio_set
*bs
, int pool_entries
)
1286 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
1287 struct biovec_slab
*bp
= bvec_slabs
+ i
;
1288 mempool_t
**bvp
= bs
->bvec_pools
+ i
;
1290 *bvp
= mempool_create_slab_pool(pool_entries
, bp
->slab
);
1297 static void biovec_free_pools(struct bio_set
*bs
)
1301 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
1302 mempool_t
*bvp
= bs
->bvec_pools
[i
];
1305 mempool_destroy(bvp
);
1310 void bioset_free(struct bio_set
*bs
)
1313 mempool_destroy(bs
->bio_pool
);
1315 bioset_integrity_free(bs
);
1316 biovec_free_pools(bs
);
1321 struct bio_set
*bioset_create(int bio_pool_size
, int bvec_pool_size
)
1323 struct bio_set
*bs
= kzalloc(sizeof(*bs
), GFP_KERNEL
);
1328 bs
->bio_pool
= mempool_create_slab_pool(bio_pool_size
, bio_slab
);
1332 if (bioset_integrity_create(bs
, bio_pool_size
))
1335 if (!biovec_create_pools(bs
, bvec_pool_size
))
1343 static void __init
biovec_init_slabs(void)
1347 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
1349 struct biovec_slab
*bvs
= bvec_slabs
+ i
;
1351 size
= bvs
->nr_vecs
* sizeof(struct bio_vec
);
1352 bvs
->slab
= kmem_cache_create(bvs
->name
, size
, 0,
1353 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
1357 static int __init
init_bio(void)
1359 bio_slab
= KMEM_CACHE(bio
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
1361 bio_integrity_init_slab();
1362 biovec_init_slabs();
1364 fs_bio_set
= bioset_create(BIO_POOL_SIZE
, 2);
1366 panic("bio: can't allocate bios\n");
1368 bio_split_pool
= mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES
,
1369 sizeof(struct bio_pair
));
1370 if (!bio_split_pool
)
1371 panic("bio: can't create split pool\n");
1376 subsys_initcall(init_bio
);
1378 EXPORT_SYMBOL(bio_alloc
);
1379 EXPORT_SYMBOL(bio_put
);
1380 EXPORT_SYMBOL(bio_free
);
1381 EXPORT_SYMBOL(bio_endio
);
1382 EXPORT_SYMBOL(bio_init
);
1383 EXPORT_SYMBOL(__bio_clone
);
1384 EXPORT_SYMBOL(bio_clone
);
1385 EXPORT_SYMBOL(bio_phys_segments
);
1386 EXPORT_SYMBOL(bio_hw_segments
);
1387 EXPORT_SYMBOL(bio_add_page
);
1388 EXPORT_SYMBOL(bio_add_pc_page
);
1389 EXPORT_SYMBOL(bio_get_nr_vecs
);
1390 EXPORT_SYMBOL(bio_map_user
);
1391 EXPORT_SYMBOL(bio_unmap_user
);
1392 EXPORT_SYMBOL(bio_map_kern
);
1393 EXPORT_SYMBOL(bio_copy_kern
);
1394 EXPORT_SYMBOL(bio_pair_release
);
1395 EXPORT_SYMBOL(bio_split
);
1396 EXPORT_SYMBOL(bio_split_pool
);
1397 EXPORT_SYMBOL(bio_copy_user
);
1398 EXPORT_SYMBOL(bio_uncopy_user
);
1399 EXPORT_SYMBOL(bioset_create
);
1400 EXPORT_SYMBOL(bioset_free
);
1401 EXPORT_SYMBOL(bio_alloc_bioset
);