2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/workqueue.h>
28 #include <scsi/sg.h> /* for struct sg_iovec */
30 #include <trace/events/block.h>
33 * Test patch to inline a certain number of bi_io_vec's inside the bio
34 * itself, to shrink a bio data allocation from two mempool calls to one
36 #define BIO_INLINE_VECS 4
38 static mempool_t
*bio_split_pool __read_mostly
;
41 * if you change this list, also change bvec_alloc or things will
42 * break badly! cannot be bigger than what you can fit into an
45 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
46 struct biovec_slab bvec_slabs
[BIOVEC_NR_POOLS
] __read_mostly
= {
47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES
),
52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
53 * IO code that does not need private memory pools.
55 struct bio_set
*fs_bio_set
;
58 * Our slab pool management
61 struct kmem_cache
*slab
;
62 unsigned int slab_ref
;
63 unsigned int slab_size
;
66 static DEFINE_MUTEX(bio_slab_lock
);
67 static struct bio_slab
*bio_slabs
;
68 static unsigned int bio_slab_nr
, bio_slab_max
;
70 static struct kmem_cache
*bio_find_or_create_slab(unsigned int extra_size
)
72 unsigned int sz
= sizeof(struct bio
) + extra_size
;
73 struct kmem_cache
*slab
= NULL
;
74 struct bio_slab
*bslab
;
75 unsigned int i
, entry
= -1;
77 mutex_lock(&bio_slab_lock
);
80 while (i
< bio_slab_nr
) {
81 struct bio_slab
*bslab
= &bio_slabs
[i
];
83 if (!bslab
->slab
&& entry
== -1)
85 else if (bslab
->slab_size
== sz
) {
96 if (bio_slab_nr
== bio_slab_max
&& entry
== -1) {
98 bio_slabs
= krealloc(bio_slabs
,
99 bio_slab_max
* sizeof(struct bio_slab
),
105 entry
= bio_slab_nr
++;
107 bslab
= &bio_slabs
[entry
];
109 snprintf(bslab
->name
, sizeof(bslab
->name
), "bio-%d", entry
);
110 slab
= kmem_cache_create(bslab
->name
, sz
, 0, SLAB_HWCACHE_ALIGN
, NULL
);
114 printk("bio: create slab <%s> at %d\n", bslab
->name
, entry
);
117 bslab
->slab_size
= sz
;
119 mutex_unlock(&bio_slab_lock
);
123 static void bio_put_slab(struct bio_set
*bs
)
125 struct bio_slab
*bslab
= NULL
;
128 mutex_lock(&bio_slab_lock
);
130 for (i
= 0; i
< bio_slab_nr
; i
++) {
131 if (bs
->bio_slab
== bio_slabs
[i
].slab
) {
132 bslab
= &bio_slabs
[i
];
137 if (WARN(!bslab
, KERN_ERR
"bio: unable to find slab!\n"))
140 WARN_ON(!bslab
->slab_ref
);
142 if (--bslab
->slab_ref
)
145 kmem_cache_destroy(bslab
->slab
);
149 mutex_unlock(&bio_slab_lock
);
152 unsigned int bvec_nr_vecs(unsigned short idx
)
154 return bvec_slabs
[idx
].nr_vecs
;
157 void bvec_free_bs(struct bio_set
*bs
, struct bio_vec
*bv
, unsigned int idx
)
159 BIO_BUG_ON(idx
>= BIOVEC_NR_POOLS
);
161 if (idx
== BIOVEC_MAX_IDX
)
162 mempool_free(bv
, bs
->bvec_pool
);
164 struct biovec_slab
*bvs
= bvec_slabs
+ idx
;
166 kmem_cache_free(bvs
->slab
, bv
);
170 struct bio_vec
*bvec_alloc_bs(gfp_t gfp_mask
, int nr
, unsigned long *idx
,
176 * see comment near bvec_array define!
194 case 129 ... BIO_MAX_PAGES
:
202 * idx now points to the pool we want to allocate from. only the
203 * 1-vec entry pool is mempool backed.
205 if (*idx
== BIOVEC_MAX_IDX
) {
207 bvl
= mempool_alloc(bs
->bvec_pool
, gfp_mask
);
209 struct biovec_slab
*bvs
= bvec_slabs
+ *idx
;
210 gfp_t __gfp_mask
= gfp_mask
& ~(__GFP_WAIT
| __GFP_IO
);
213 * Make this allocation restricted and don't dump info on
214 * allocation failures, since we'll fallback to the mempool
215 * in case of failure.
217 __gfp_mask
|= __GFP_NOMEMALLOC
| __GFP_NORETRY
| __GFP_NOWARN
;
220 * Try a slab allocation. If this fails and __GFP_WAIT
221 * is set, retry with the 1-entry mempool
223 bvl
= kmem_cache_alloc(bvs
->slab
, __gfp_mask
);
224 if (unlikely(!bvl
&& (gfp_mask
& __GFP_WAIT
))) {
225 *idx
= BIOVEC_MAX_IDX
;
233 void bio_free(struct bio
*bio
, struct bio_set
*bs
)
237 if (bio_has_allocated_vec(bio
))
238 bvec_free_bs(bs
, bio
->bi_io_vec
, BIO_POOL_IDX(bio
));
240 if (bio_integrity(bio
))
241 bio_integrity_free(bio
, bs
);
244 * If we have front padding, adjust the bio pointer before freeing
250 mempool_free(p
, bs
->bio_pool
);
252 EXPORT_SYMBOL(bio_free
);
254 void bio_init(struct bio
*bio
)
256 memset(bio
, 0, sizeof(*bio
));
257 bio
->bi_flags
= 1 << BIO_UPTODATE
;
258 bio
->bi_comp_cpu
= -1;
259 atomic_set(&bio
->bi_cnt
, 1);
261 EXPORT_SYMBOL(bio_init
);
264 * bio_alloc_bioset - allocate a bio for I/O
265 * @gfp_mask: the GFP_ mask given to the slab allocator
266 * @nr_iovecs: number of iovecs to pre-allocate
267 * @bs: the bio_set to allocate from. If %NULL, just use kmalloc
270 * bio_alloc_bioset will first try its own mempool to satisfy the allocation.
271 * If %__GFP_WAIT is set then we will block on the internal pool waiting
272 * for a &struct bio to become free. If a %NULL @bs is passed in, we will
273 * fall back to just using @kmalloc to allocate the required memory.
275 * Note that the caller must set ->bi_destructor on succesful return
276 * of a bio, to do the appropriate freeing of the bio once the reference
277 * count drops to zero.
279 struct bio
*bio_alloc_bioset(gfp_t gfp_mask
, int nr_iovecs
, struct bio_set
*bs
)
281 unsigned long idx
= BIO_POOL_NONE
;
282 struct bio_vec
*bvl
= NULL
;
286 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
289 bio
= p
+ bs
->front_pad
;
293 if (unlikely(!nr_iovecs
))
296 if (nr_iovecs
<= BIO_INLINE_VECS
) {
297 bvl
= bio
->bi_inline_vecs
;
298 nr_iovecs
= BIO_INLINE_VECS
;
300 bvl
= bvec_alloc_bs(gfp_mask
, nr_iovecs
, &idx
, bs
);
304 nr_iovecs
= bvec_nr_vecs(idx
);
307 bio
->bi_flags
|= idx
<< BIO_POOL_OFFSET
;
308 bio
->bi_max_vecs
= nr_iovecs
;
309 bio
->bi_io_vec
= bvl
;
313 mempool_free(p
, bs
->bio_pool
);
316 EXPORT_SYMBOL(bio_alloc_bioset
);
318 static void bio_fs_destructor(struct bio
*bio
)
320 bio_free(bio
, fs_bio_set
);
324 * bio_alloc - allocate a new bio, memory pool backed
325 * @gfp_mask: allocation mask to use
326 * @nr_iovecs: number of iovecs
328 * bio_alloc will allocate a bio and associated bio_vec array that can hold
329 * at least @nr_iovecs entries. Allocations will be done from the
330 * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
332 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
333 * a bio. This is due to the mempool guarantees. To make this work, callers
334 * must never allocate more than 1 bio at a time from this pool. Callers
335 * that need to allocate more than 1 bio must always submit the previously
336 * allocated bio for IO before attempting to allocate a new one. Failure to
337 * do so can cause livelocks under memory pressure.
340 * Pointer to new bio on success, NULL on failure.
342 struct bio
*bio_alloc(gfp_t gfp_mask
, int nr_iovecs
)
344 struct bio
*bio
= bio_alloc_bioset(gfp_mask
, nr_iovecs
, fs_bio_set
);
347 bio
->bi_destructor
= bio_fs_destructor
;
351 EXPORT_SYMBOL(bio_alloc
);
353 static void bio_kmalloc_destructor(struct bio
*bio
)
355 if (bio_integrity(bio
))
356 bio_integrity_free(bio
, fs_bio_set
);
361 * bio_kmalloc - allocate a bio for I/O using kmalloc()
362 * @gfp_mask: the GFP_ mask given to the slab allocator
363 * @nr_iovecs: number of iovecs to pre-allocate
366 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
367 * %__GFP_WAIT, the allocation is guaranteed to succeed.
370 struct bio
*bio_kmalloc(gfp_t gfp_mask
, int nr_iovecs
)
374 bio
= kmalloc(sizeof(struct bio
) + nr_iovecs
* sizeof(struct bio_vec
),
380 bio
->bi_flags
|= BIO_POOL_NONE
<< BIO_POOL_OFFSET
;
381 bio
->bi_max_vecs
= nr_iovecs
;
382 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
383 bio
->bi_destructor
= bio_kmalloc_destructor
;
387 EXPORT_SYMBOL(bio_kmalloc
);
389 void zero_fill_bio(struct bio
*bio
)
395 bio_for_each_segment(bv
, bio
, i
) {
396 char *data
= bvec_kmap_irq(bv
, &flags
);
397 memset(data
, 0, bv
->bv_len
);
398 flush_dcache_page(bv
->bv_page
);
399 bvec_kunmap_irq(data
, &flags
);
402 EXPORT_SYMBOL(zero_fill_bio
);
405 * bio_put - release a reference to a bio
406 * @bio: bio to release reference to
409 * Put a reference to a &struct bio, either one you have gotten with
410 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
412 void bio_put(struct bio
*bio
)
414 BIO_BUG_ON(!atomic_read(&bio
->bi_cnt
));
419 if (atomic_dec_and_test(&bio
->bi_cnt
)) {
421 bio
->bi_destructor(bio
);
424 EXPORT_SYMBOL(bio_put
);
426 inline int bio_phys_segments(struct request_queue
*q
, struct bio
*bio
)
428 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
429 blk_recount_segments(q
, bio
);
431 return bio
->bi_phys_segments
;
433 EXPORT_SYMBOL(bio_phys_segments
);
436 * __bio_clone - clone a bio
437 * @bio: destination bio
438 * @bio_src: bio to clone
440 * Clone a &bio. Caller will own the returned bio, but not
441 * the actual data it points to. Reference count of returned
444 void __bio_clone(struct bio
*bio
, struct bio
*bio_src
)
446 memcpy(bio
->bi_io_vec
, bio_src
->bi_io_vec
,
447 bio_src
->bi_max_vecs
* sizeof(struct bio_vec
));
450 * most users will be overriding ->bi_bdev with a new target,
451 * so we don't set nor calculate new physical/hw segment counts here
453 bio
->bi_sector
= bio_src
->bi_sector
;
454 bio
->bi_bdev
= bio_src
->bi_bdev
;
455 bio
->bi_flags
|= 1 << BIO_CLONED
;
456 bio
->bi_rw
= bio_src
->bi_rw
;
457 bio
->bi_vcnt
= bio_src
->bi_vcnt
;
458 bio
->bi_size
= bio_src
->bi_size
;
459 bio
->bi_idx
= bio_src
->bi_idx
;
461 EXPORT_SYMBOL(__bio_clone
);
464 * bio_clone - clone a bio
466 * @gfp_mask: allocation priority
468 * Like __bio_clone, only also allocates the returned bio
470 struct bio
*bio_clone(struct bio
*bio
, gfp_t gfp_mask
)
472 struct bio
*b
= bio_alloc_bioset(gfp_mask
, bio
->bi_max_vecs
, fs_bio_set
);
477 b
->bi_destructor
= bio_fs_destructor
;
480 if (bio_integrity(bio
)) {
483 ret
= bio_integrity_clone(b
, bio
, gfp_mask
, fs_bio_set
);
493 EXPORT_SYMBOL(bio_clone
);
496 * bio_get_nr_vecs - return approx number of vecs
499 * Return the approximate number of pages we can send to this target.
500 * There's no guarantee that you will be able to fit this number of pages
501 * into a bio, it does not account for dynamic restrictions that vary
504 int bio_get_nr_vecs(struct block_device
*bdev
)
506 struct request_queue
*q
= bdev_get_queue(bdev
);
509 nr_pages
= ((queue_max_sectors(q
) << 9) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
510 if (nr_pages
> queue_max_phys_segments(q
))
511 nr_pages
= queue_max_phys_segments(q
);
512 if (nr_pages
> queue_max_hw_segments(q
))
513 nr_pages
= queue_max_hw_segments(q
);
517 EXPORT_SYMBOL(bio_get_nr_vecs
);
519 static int __bio_add_page(struct request_queue
*q
, struct bio
*bio
, struct page
520 *page
, unsigned int len
, unsigned int offset
,
521 unsigned short max_sectors
)
523 int retried_segments
= 0;
524 struct bio_vec
*bvec
;
527 * cloned bio must not modify vec list
529 if (unlikely(bio_flagged(bio
, BIO_CLONED
)))
532 if (((bio
->bi_size
+ len
) >> 9) > max_sectors
)
536 * For filesystems with a blocksize smaller than the pagesize
537 * we will often be called with the same page as last time and
538 * a consecutive offset. Optimize this special case.
540 if (bio
->bi_vcnt
> 0) {
541 struct bio_vec
*prev
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
543 if (page
== prev
->bv_page
&&
544 offset
== prev
->bv_offset
+ prev
->bv_len
) {
547 if (q
->merge_bvec_fn
) {
548 struct bvec_merge_data bvm
= {
549 .bi_bdev
= bio
->bi_bdev
,
550 .bi_sector
= bio
->bi_sector
,
551 .bi_size
= bio
->bi_size
,
555 if (q
->merge_bvec_fn(q
, &bvm
, prev
) < len
) {
565 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
569 * we might lose a segment or two here, but rather that than
570 * make this too complex.
573 while (bio
->bi_phys_segments
>= queue_max_phys_segments(q
)
574 || bio
->bi_phys_segments
>= queue_max_hw_segments(q
)) {
576 if (retried_segments
)
579 retried_segments
= 1;
580 blk_recount_segments(q
, bio
);
584 * setup the new entry, we might clear it again later if we
585 * cannot add the page
587 bvec
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
588 bvec
->bv_page
= page
;
590 bvec
->bv_offset
= offset
;
593 * if queue has other restrictions (eg varying max sector size
594 * depending on offset), it can specify a merge_bvec_fn in the
595 * queue to get further control
597 if (q
->merge_bvec_fn
) {
598 struct bvec_merge_data bvm
= {
599 .bi_bdev
= bio
->bi_bdev
,
600 .bi_sector
= bio
->bi_sector
,
601 .bi_size
= bio
->bi_size
,
606 * merge_bvec_fn() returns number of bytes it can accept
609 if (q
->merge_bvec_fn(q
, &bvm
, bvec
) < len
) {
610 bvec
->bv_page
= NULL
;
617 /* If we may be able to merge these biovecs, force a recount */
618 if (bio
->bi_vcnt
&& (BIOVEC_PHYS_MERGEABLE(bvec
-1, bvec
)))
619 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
622 bio
->bi_phys_segments
++;
629 * bio_add_pc_page - attempt to add page to bio
630 * @q: the target queue
631 * @bio: destination bio
633 * @len: vec entry length
634 * @offset: vec entry offset
636 * Attempt to add a page to the bio_vec maplist. This can fail for a
637 * number of reasons, such as the bio being full or target block
638 * device limitations. The target block device must allow bio's
639 * smaller than PAGE_SIZE, so it is always possible to add a single
640 * page to an empty bio. This should only be used by REQ_PC bios.
642 int bio_add_pc_page(struct request_queue
*q
, struct bio
*bio
, struct page
*page
,
643 unsigned int len
, unsigned int offset
)
645 return __bio_add_page(q
, bio
, page
, len
, offset
,
646 queue_max_hw_sectors(q
));
648 EXPORT_SYMBOL(bio_add_pc_page
);
651 * bio_add_page - attempt to add page to bio
652 * @bio: destination bio
654 * @len: vec entry length
655 * @offset: vec entry offset
657 * Attempt to add a page to the bio_vec maplist. This can fail for a
658 * number of reasons, such as the bio being full or target block
659 * device limitations. The target block device must allow bio's
660 * smaller than PAGE_SIZE, so it is always possible to add a single
661 * page to an empty bio.
663 int bio_add_page(struct bio
*bio
, struct page
*page
, unsigned int len
,
666 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
667 return __bio_add_page(q
, bio
, page
, len
, offset
, queue_max_sectors(q
));
669 EXPORT_SYMBOL(bio_add_page
);
671 struct bio_map_data
{
672 struct bio_vec
*iovecs
;
673 struct sg_iovec
*sgvecs
;
678 static void bio_set_map_data(struct bio_map_data
*bmd
, struct bio
*bio
,
679 struct sg_iovec
*iov
, int iov_count
,
682 memcpy(bmd
->iovecs
, bio
->bi_io_vec
, sizeof(struct bio_vec
) * bio
->bi_vcnt
);
683 memcpy(bmd
->sgvecs
, iov
, sizeof(struct sg_iovec
) * iov_count
);
684 bmd
->nr_sgvecs
= iov_count
;
685 bmd
->is_our_pages
= is_our_pages
;
686 bio
->bi_private
= bmd
;
689 static void bio_free_map_data(struct bio_map_data
*bmd
)
696 static struct bio_map_data
*bio_alloc_map_data(int nr_segs
, int iov_count
,
699 struct bio_map_data
*bmd
= kmalloc(sizeof(*bmd
), gfp_mask
);
704 bmd
->iovecs
= kmalloc(sizeof(struct bio_vec
) * nr_segs
, gfp_mask
);
710 bmd
->sgvecs
= kmalloc(sizeof(struct sg_iovec
) * iov_count
, gfp_mask
);
719 static int __bio_copy_iov(struct bio
*bio
, struct bio_vec
*iovecs
,
720 struct sg_iovec
*iov
, int iov_count
,
721 int to_user
, int from_user
, int do_free_page
)
724 struct bio_vec
*bvec
;
726 unsigned int iov_off
= 0;
728 __bio_for_each_segment(bvec
, bio
, i
, 0) {
729 char *bv_addr
= page_address(bvec
->bv_page
);
730 unsigned int bv_len
= iovecs
[i
].bv_len
;
732 while (bv_len
&& iov_idx
< iov_count
) {
734 char __user
*iov_addr
;
736 bytes
= min_t(unsigned int,
737 iov
[iov_idx
].iov_len
- iov_off
, bv_len
);
738 iov_addr
= iov
[iov_idx
].iov_base
+ iov_off
;
742 ret
= copy_to_user(iov_addr
, bv_addr
,
746 ret
= copy_from_user(bv_addr
, iov_addr
,
758 if (iov
[iov_idx
].iov_len
== iov_off
) {
765 __free_page(bvec
->bv_page
);
772 * bio_uncopy_user - finish previously mapped bio
773 * @bio: bio being terminated
775 * Free pages allocated from bio_copy_user() and write back data
776 * to user space in case of a read.
778 int bio_uncopy_user(struct bio
*bio
)
780 struct bio_map_data
*bmd
= bio
->bi_private
;
783 if (!bio_flagged(bio
, BIO_NULL_MAPPED
))
784 ret
= __bio_copy_iov(bio
, bmd
->iovecs
, bmd
->sgvecs
,
785 bmd
->nr_sgvecs
, bio_data_dir(bio
) == READ
,
786 0, bmd
->is_our_pages
);
787 bio_free_map_data(bmd
);
791 EXPORT_SYMBOL(bio_uncopy_user
);
794 * bio_copy_user_iov - copy user data to bio
795 * @q: destination block queue
796 * @map_data: pointer to the rq_map_data holding pages (if necessary)
798 * @iov_count: number of elements in the iovec
799 * @write_to_vm: bool indicating writing to pages or not
800 * @gfp_mask: memory allocation flags
802 * Prepares and returns a bio for indirect user io, bouncing data
803 * to/from kernel pages as necessary. Must be paired with
804 * call bio_uncopy_user() on io completion.
806 struct bio
*bio_copy_user_iov(struct request_queue
*q
,
807 struct rq_map_data
*map_data
,
808 struct sg_iovec
*iov
, int iov_count
,
809 int write_to_vm
, gfp_t gfp_mask
)
811 struct bio_map_data
*bmd
;
812 struct bio_vec
*bvec
;
817 unsigned int len
= 0;
818 unsigned int offset
= map_data
? map_data
->offset
& ~PAGE_MASK
: 0;
820 for (i
= 0; i
< iov_count
; i
++) {
825 uaddr
= (unsigned long)iov
[i
].iov_base
;
826 end
= (uaddr
+ iov
[i
].iov_len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
827 start
= uaddr
>> PAGE_SHIFT
;
829 nr_pages
+= end
- start
;
830 len
+= iov
[i
].iov_len
;
836 bmd
= bio_alloc_map_data(nr_pages
, iov_count
, gfp_mask
);
838 return ERR_PTR(-ENOMEM
);
841 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
845 bio
->bi_rw
|= (!write_to_vm
<< BIO_RW
);
850 nr_pages
= 1 << map_data
->page_order
;
851 i
= map_data
->offset
/ PAGE_SIZE
;
854 unsigned int bytes
= PAGE_SIZE
;
862 if (i
== map_data
->nr_entries
* nr_pages
) {
867 page
= map_data
->pages
[i
/ nr_pages
];
868 page
+= (i
% nr_pages
);
872 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
879 if (bio_add_pc_page(q
, bio
, page
, bytes
, offset
) < bytes
)
892 if ((!write_to_vm
&& (!map_data
|| !map_data
->null_mapped
)) ||
893 (map_data
&& map_data
->from_user
)) {
894 ret
= __bio_copy_iov(bio
, bio
->bi_io_vec
, iov
, iov_count
, 0, 1, 0);
899 bio_set_map_data(bmd
, bio
, iov
, iov_count
, map_data
? 0 : 1);
903 bio_for_each_segment(bvec
, bio
, i
)
904 __free_page(bvec
->bv_page
);
908 bio_free_map_data(bmd
);
913 * bio_copy_user - copy user data to bio
914 * @q: destination block queue
915 * @map_data: pointer to the rq_map_data holding pages (if necessary)
916 * @uaddr: start of user address
917 * @len: length in bytes
918 * @write_to_vm: bool indicating writing to pages or not
919 * @gfp_mask: memory allocation flags
921 * Prepares and returns a bio for indirect user io, bouncing data
922 * to/from kernel pages as necessary. Must be paired with
923 * call bio_uncopy_user() on io completion.
925 struct bio
*bio_copy_user(struct request_queue
*q
, struct rq_map_data
*map_data
,
926 unsigned long uaddr
, unsigned int len
,
927 int write_to_vm
, gfp_t gfp_mask
)
931 iov
.iov_base
= (void __user
*)uaddr
;
934 return bio_copy_user_iov(q
, map_data
, &iov
, 1, write_to_vm
, gfp_mask
);
936 EXPORT_SYMBOL(bio_copy_user
);
938 static struct bio
*__bio_map_user_iov(struct request_queue
*q
,
939 struct block_device
*bdev
,
940 struct sg_iovec
*iov
, int iov_count
,
941 int write_to_vm
, gfp_t gfp_mask
)
950 for (i
= 0; i
< iov_count
; i
++) {
951 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
952 unsigned long len
= iov
[i
].iov_len
;
953 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
954 unsigned long start
= uaddr
>> PAGE_SHIFT
;
956 nr_pages
+= end
- start
;
958 * buffer must be aligned to at least hardsector size for now
960 if (uaddr
& queue_dma_alignment(q
))
961 return ERR_PTR(-EINVAL
);
965 return ERR_PTR(-EINVAL
);
967 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
969 return ERR_PTR(-ENOMEM
);
972 pages
= kcalloc(nr_pages
, sizeof(struct page
*), gfp_mask
);
976 for (i
= 0; i
< iov_count
; i
++) {
977 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
978 unsigned long len
= iov
[i
].iov_len
;
979 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
980 unsigned long start
= uaddr
>> PAGE_SHIFT
;
981 const int local_nr_pages
= end
- start
;
982 const int page_limit
= cur_page
+ local_nr_pages
;
984 ret
= get_user_pages_fast(uaddr
, local_nr_pages
,
985 write_to_vm
, &pages
[cur_page
]);
986 if (ret
< local_nr_pages
) {
991 offset
= uaddr
& ~PAGE_MASK
;
992 for (j
= cur_page
; j
< page_limit
; j
++) {
993 unsigned int bytes
= PAGE_SIZE
- offset
;
1004 if (bio_add_pc_page(q
, bio
, pages
[j
], bytes
, offset
) <
1014 * release the pages we didn't map into the bio, if any
1016 while (j
< page_limit
)
1017 page_cache_release(pages
[j
++]);
1023 * set data direction, and check if mapped pages need bouncing
1026 bio
->bi_rw
|= (1 << BIO_RW
);
1028 bio
->bi_bdev
= bdev
;
1029 bio
->bi_flags
|= (1 << BIO_USER_MAPPED
);
1033 for (i
= 0; i
< nr_pages
; i
++) {
1036 page_cache_release(pages
[i
]);
1041 return ERR_PTR(ret
);
1045 * bio_map_user - map user address into bio
1046 * @q: the struct request_queue for the bio
1047 * @bdev: destination block device
1048 * @uaddr: start of user address
1049 * @len: length in bytes
1050 * @write_to_vm: bool indicating writing to pages or not
1051 * @gfp_mask: memory allocation flags
1053 * Map the user space address into a bio suitable for io to a block
1054 * device. Returns an error pointer in case of error.
1056 struct bio
*bio_map_user(struct request_queue
*q
, struct block_device
*bdev
,
1057 unsigned long uaddr
, unsigned int len
, int write_to_vm
,
1060 struct sg_iovec iov
;
1062 iov
.iov_base
= (void __user
*)uaddr
;
1065 return bio_map_user_iov(q
, bdev
, &iov
, 1, write_to_vm
, gfp_mask
);
1067 EXPORT_SYMBOL(bio_map_user
);
1070 * bio_map_user_iov - map user sg_iovec table into bio
1071 * @q: the struct request_queue for the bio
1072 * @bdev: destination block device
1074 * @iov_count: number of elements in the iovec
1075 * @write_to_vm: bool indicating writing to pages or not
1076 * @gfp_mask: memory allocation flags
1078 * Map the user space address into a bio suitable for io to a block
1079 * device. Returns an error pointer in case of error.
1081 struct bio
*bio_map_user_iov(struct request_queue
*q
, struct block_device
*bdev
,
1082 struct sg_iovec
*iov
, int iov_count
,
1083 int write_to_vm
, gfp_t gfp_mask
)
1087 bio
= __bio_map_user_iov(q
, bdev
, iov
, iov_count
, write_to_vm
,
1093 * subtle -- if __bio_map_user() ended up bouncing a bio,
1094 * it would normally disappear when its bi_end_io is run.
1095 * however, we need it for the unmap, so grab an extra
1103 static void __bio_unmap_user(struct bio
*bio
)
1105 struct bio_vec
*bvec
;
1109 * make sure we dirty pages we wrote to
1111 __bio_for_each_segment(bvec
, bio
, i
, 0) {
1112 if (bio_data_dir(bio
) == READ
)
1113 set_page_dirty_lock(bvec
->bv_page
);
1115 page_cache_release(bvec
->bv_page
);
1122 * bio_unmap_user - unmap a bio
1123 * @bio: the bio being unmapped
1125 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1126 * a process context.
1128 * bio_unmap_user() may sleep.
1130 void bio_unmap_user(struct bio
*bio
)
1132 __bio_unmap_user(bio
);
1135 EXPORT_SYMBOL(bio_unmap_user
);
1137 static void bio_map_kern_endio(struct bio
*bio
, int err
)
1142 static struct bio
*__bio_map_kern(struct request_queue
*q
, void *data
,
1143 unsigned int len
, gfp_t gfp_mask
)
1145 unsigned long kaddr
= (unsigned long)data
;
1146 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1147 unsigned long start
= kaddr
>> PAGE_SHIFT
;
1148 const int nr_pages
= end
- start
;
1152 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1154 return ERR_PTR(-ENOMEM
);
1156 offset
= offset_in_page(kaddr
);
1157 for (i
= 0; i
< nr_pages
; i
++) {
1158 unsigned int bytes
= PAGE_SIZE
- offset
;
1166 if (bio_add_pc_page(q
, bio
, virt_to_page(data
), bytes
,
1175 bio
->bi_end_io
= bio_map_kern_endio
;
1180 * bio_map_kern - map kernel address into bio
1181 * @q: the struct request_queue for the bio
1182 * @data: pointer to buffer to map
1183 * @len: length in bytes
1184 * @gfp_mask: allocation flags for bio allocation
1186 * Map the kernel address into a bio suitable for io to a block
1187 * device. Returns an error pointer in case of error.
1189 struct bio
*bio_map_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1194 bio
= __bio_map_kern(q
, data
, len
, gfp_mask
);
1198 if (bio
->bi_size
== len
)
1202 * Don't support partial mappings.
1205 return ERR_PTR(-EINVAL
);
1207 EXPORT_SYMBOL(bio_map_kern
);
1209 static void bio_copy_kern_endio(struct bio
*bio
, int err
)
1211 struct bio_vec
*bvec
;
1212 const int read
= bio_data_dir(bio
) == READ
;
1213 struct bio_map_data
*bmd
= bio
->bi_private
;
1215 char *p
= bmd
->sgvecs
[0].iov_base
;
1217 __bio_for_each_segment(bvec
, bio
, i
, 0) {
1218 char *addr
= page_address(bvec
->bv_page
);
1219 int len
= bmd
->iovecs
[i
].bv_len
;
1222 memcpy(p
, addr
, len
);
1224 __free_page(bvec
->bv_page
);
1228 bio_free_map_data(bmd
);
1233 * bio_copy_kern - copy kernel address into bio
1234 * @q: the struct request_queue for the bio
1235 * @data: pointer to buffer to copy
1236 * @len: length in bytes
1237 * @gfp_mask: allocation flags for bio and page allocation
1238 * @reading: data direction is READ
1240 * copy the kernel address into a bio suitable for io to a block
1241 * device. Returns an error pointer in case of error.
1243 struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1244 gfp_t gfp_mask
, int reading
)
1247 struct bio_vec
*bvec
;
1250 bio
= bio_copy_user(q
, NULL
, (unsigned long)data
, len
, 1, gfp_mask
);
1257 bio_for_each_segment(bvec
, bio
, i
) {
1258 char *addr
= page_address(bvec
->bv_page
);
1260 memcpy(addr
, p
, bvec
->bv_len
);
1265 bio
->bi_end_io
= bio_copy_kern_endio
;
1269 EXPORT_SYMBOL(bio_copy_kern
);
1272 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1273 * for performing direct-IO in BIOs.
1275 * The problem is that we cannot run set_page_dirty() from interrupt context
1276 * because the required locks are not interrupt-safe. So what we can do is to
1277 * mark the pages dirty _before_ performing IO. And in interrupt context,
1278 * check that the pages are still dirty. If so, fine. If not, redirty them
1279 * in process context.
1281 * We special-case compound pages here: normally this means reads into hugetlb
1282 * pages. The logic in here doesn't really work right for compound pages
1283 * because the VM does not uniformly chase down the head page in all cases.
1284 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1285 * handle them at all. So we skip compound pages here at an early stage.
1287 * Note that this code is very hard to test under normal circumstances because
1288 * direct-io pins the pages with get_user_pages(). This makes
1289 * is_page_cache_freeable return false, and the VM will not clean the pages.
1290 * But other code (eg, pdflush) could clean the pages if they are mapped
1293 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1294 * deferred bio dirtying paths.
1298 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1300 void bio_set_pages_dirty(struct bio
*bio
)
1302 struct bio_vec
*bvec
= bio
->bi_io_vec
;
1305 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1306 struct page
*page
= bvec
[i
].bv_page
;
1308 if (page
&& !PageCompound(page
))
1309 set_page_dirty_lock(page
);
1313 static void bio_release_pages(struct bio
*bio
)
1315 struct bio_vec
*bvec
= bio
->bi_io_vec
;
1318 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1319 struct page
*page
= bvec
[i
].bv_page
;
1327 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1328 * If they are, then fine. If, however, some pages are clean then they must
1329 * have been written out during the direct-IO read. So we take another ref on
1330 * the BIO and the offending pages and re-dirty the pages in process context.
1332 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1333 * here on. It will run one page_cache_release() against each page and will
1334 * run one bio_put() against the BIO.
1337 static void bio_dirty_fn(struct work_struct
*work
);
1339 static DECLARE_WORK(bio_dirty_work
, bio_dirty_fn
);
1340 static DEFINE_SPINLOCK(bio_dirty_lock
);
1341 static struct bio
*bio_dirty_list
;
1344 * This runs in process context
1346 static void bio_dirty_fn(struct work_struct
*work
)
1348 unsigned long flags
;
1351 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1352 bio
= bio_dirty_list
;
1353 bio_dirty_list
= NULL
;
1354 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1357 struct bio
*next
= bio
->bi_private
;
1359 bio_set_pages_dirty(bio
);
1360 bio_release_pages(bio
);
1366 void bio_check_pages_dirty(struct bio
*bio
)
1368 struct bio_vec
*bvec
= bio
->bi_io_vec
;
1369 int nr_clean_pages
= 0;
1372 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1373 struct page
*page
= bvec
[i
].bv_page
;
1375 if (PageDirty(page
) || PageCompound(page
)) {
1376 page_cache_release(page
);
1377 bvec
[i
].bv_page
= NULL
;
1383 if (nr_clean_pages
) {
1384 unsigned long flags
;
1386 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1387 bio
->bi_private
= bio_dirty_list
;
1388 bio_dirty_list
= bio
;
1389 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1390 schedule_work(&bio_dirty_work
);
1397 * bio_endio - end I/O on a bio
1399 * @error: error, if any
1402 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1403 * preferred way to end I/O on a bio, it takes care of clearing
1404 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1405 * established -Exxxx (-EIO, for instance) error values in case
1406 * something went wrong. Noone should call bi_end_io() directly on a
1407 * bio unless they own it and thus know that it has an end_io
1410 void bio_endio(struct bio
*bio
, int error
)
1413 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1414 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1418 bio
->bi_end_io(bio
, error
);
1420 EXPORT_SYMBOL(bio_endio
);
1422 void bio_pair_release(struct bio_pair
*bp
)
1424 if (atomic_dec_and_test(&bp
->cnt
)) {
1425 struct bio
*master
= bp
->bio1
.bi_private
;
1427 bio_endio(master
, bp
->error
);
1428 mempool_free(bp
, bp
->bio2
.bi_private
);
1431 EXPORT_SYMBOL(bio_pair_release
);
1433 static void bio_pair_end_1(struct bio
*bi
, int err
)
1435 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio1
);
1440 bio_pair_release(bp
);
1443 static void bio_pair_end_2(struct bio
*bi
, int err
)
1445 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio2
);
1450 bio_pair_release(bp
);
1454 * split a bio - only worry about a bio with a single page in its iovec
1456 struct bio_pair
*bio_split(struct bio
*bi
, int first_sectors
)
1458 struct bio_pair
*bp
= mempool_alloc(bio_split_pool
, GFP_NOIO
);
1463 trace_block_split(bdev_get_queue(bi
->bi_bdev
), bi
,
1464 bi
->bi_sector
+ first_sectors
);
1466 BUG_ON(bi
->bi_vcnt
!= 1);
1467 BUG_ON(bi
->bi_idx
!= 0);
1468 atomic_set(&bp
->cnt
, 3);
1472 bp
->bio2
.bi_sector
+= first_sectors
;
1473 bp
->bio2
.bi_size
-= first_sectors
<< 9;
1474 bp
->bio1
.bi_size
= first_sectors
<< 9;
1476 bp
->bv1
= bi
->bi_io_vec
[0];
1477 bp
->bv2
= bi
->bi_io_vec
[0];
1478 bp
->bv2
.bv_offset
+= first_sectors
<< 9;
1479 bp
->bv2
.bv_len
-= first_sectors
<< 9;
1480 bp
->bv1
.bv_len
= first_sectors
<< 9;
1482 bp
->bio1
.bi_io_vec
= &bp
->bv1
;
1483 bp
->bio2
.bi_io_vec
= &bp
->bv2
;
1485 bp
->bio1
.bi_max_vecs
= 1;
1486 bp
->bio2
.bi_max_vecs
= 1;
1488 bp
->bio1
.bi_end_io
= bio_pair_end_1
;
1489 bp
->bio2
.bi_end_io
= bio_pair_end_2
;
1491 bp
->bio1
.bi_private
= bi
;
1492 bp
->bio2
.bi_private
= bio_split_pool
;
1494 if (bio_integrity(bi
))
1495 bio_integrity_split(bi
, bp
, first_sectors
);
1499 EXPORT_SYMBOL(bio_split
);
1502 * bio_sector_offset - Find hardware sector offset in bio
1503 * @bio: bio to inspect
1504 * @index: bio_vec index
1505 * @offset: offset in bv_page
1507 * Return the number of hardware sectors between beginning of bio
1508 * and an end point indicated by a bio_vec index and an offset
1509 * within that vector's page.
1511 sector_t
bio_sector_offset(struct bio
*bio
, unsigned short index
,
1512 unsigned int offset
)
1514 unsigned int sector_sz
;
1519 sector_sz
= queue_logical_block_size(bio
->bi_bdev
->bd_disk
->queue
);
1522 if (index
>= bio
->bi_idx
)
1523 index
= bio
->bi_vcnt
- 1;
1525 __bio_for_each_segment(bv
, bio
, i
, 0) {
1527 if (offset
> bv
->bv_offset
)
1528 sectors
+= (offset
- bv
->bv_offset
) / sector_sz
;
1532 sectors
+= bv
->bv_len
/ sector_sz
;
1537 EXPORT_SYMBOL(bio_sector_offset
);
1540 * create memory pools for biovec's in a bio_set.
1541 * use the global biovec slabs created for general use.
1543 static int biovec_create_pools(struct bio_set
*bs
, int pool_entries
)
1545 struct biovec_slab
*bp
= bvec_slabs
+ BIOVEC_MAX_IDX
;
1547 bs
->bvec_pool
= mempool_create_slab_pool(pool_entries
, bp
->slab
);
1554 static void biovec_free_pools(struct bio_set
*bs
)
1556 mempool_destroy(bs
->bvec_pool
);
1559 void bioset_free(struct bio_set
*bs
)
1562 mempool_destroy(bs
->bio_pool
);
1564 bioset_integrity_free(bs
);
1565 biovec_free_pools(bs
);
1570 EXPORT_SYMBOL(bioset_free
);
1573 * bioset_create - Create a bio_set
1574 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1575 * @front_pad: Number of bytes to allocate in front of the returned bio
1578 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1579 * to ask for a number of bytes to be allocated in front of the bio.
1580 * Front pad allocation is useful for embedding the bio inside
1581 * another structure, to avoid allocating extra data to go with the bio.
1582 * Note that the bio must be embedded at the END of that structure always,
1583 * or things will break badly.
1585 struct bio_set
*bioset_create(unsigned int pool_size
, unsigned int front_pad
)
1587 unsigned int back_pad
= BIO_INLINE_VECS
* sizeof(struct bio_vec
);
1590 bs
= kzalloc(sizeof(*bs
), GFP_KERNEL
);
1594 bs
->front_pad
= front_pad
;
1596 bs
->bio_slab
= bio_find_or_create_slab(front_pad
+ back_pad
);
1597 if (!bs
->bio_slab
) {
1602 bs
->bio_pool
= mempool_create_slab_pool(pool_size
, bs
->bio_slab
);
1606 if (bioset_integrity_create(bs
, pool_size
))
1609 if (!biovec_create_pools(bs
, pool_size
))
1616 EXPORT_SYMBOL(bioset_create
);
1618 static void __init
biovec_init_slabs(void)
1622 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
1624 struct biovec_slab
*bvs
= bvec_slabs
+ i
;
1626 #ifndef CONFIG_BLK_DEV_INTEGRITY
1627 if (bvs
->nr_vecs
<= BIO_INLINE_VECS
) {
1633 size
= bvs
->nr_vecs
* sizeof(struct bio_vec
);
1634 bvs
->slab
= kmem_cache_create(bvs
->name
, size
, 0,
1635 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
1639 static int __init
init_bio(void)
1643 bio_slabs
= kzalloc(bio_slab_max
* sizeof(struct bio_slab
), GFP_KERNEL
);
1645 panic("bio: can't allocate bios\n");
1647 bio_integrity_init();
1648 biovec_init_slabs();
1650 fs_bio_set
= bioset_create(BIO_POOL_SIZE
, 0);
1652 panic("bio: can't allocate bios\n");
1654 bio_split_pool
= mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES
,
1655 sizeof(struct bio_pair
));
1656 if (!bio_split_pool
)
1657 panic("bio: can't create split pool\n");
1661 subsys_initcall(init_bio
);