2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/mempool.h>
27 #include <linux/workqueue.h>
29 #define BIO_POOL_SIZE 256
31 static mempool_t
*bio_pool
;
32 static kmem_cache_t
*bio_slab
;
34 #define BIOVEC_NR_POOLS 6
37 * a small number of entries is fine, not going to be performance critical.
38 * basically we just need to survive
40 #define BIO_SPLIT_ENTRIES 8
41 mempool_t
*bio_split_pool
;
51 * if you change this list, also change bvec_alloc or things will
52 * break badly! cannot be bigger than what you can fit into an
56 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
57 static struct biovec_pool bvec_array
[BIOVEC_NR_POOLS
] = {
58 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES
),
62 static inline struct bio_vec
*bvec_alloc(int gfp_mask
, int nr
, unsigned long *idx
)
64 struct biovec_pool
*bp
;
68 * see comment near bvec_array define!
71 case 1 : *idx
= 0; break;
72 case 2 ... 4: *idx
= 1; break;
73 case 5 ... 16: *idx
= 2; break;
74 case 17 ... 64: *idx
= 3; break;
75 case 65 ... 128: *idx
= 4; break;
76 case 129 ... BIO_MAX_PAGES
: *idx
= 5; break;
81 * idx now points to the pool we want to allocate from
83 bp
= bvec_array
+ *idx
;
85 bvl
= mempool_alloc(bp
->pool
, gfp_mask
);
87 memset(bvl
, 0, bp
->nr_vecs
* sizeof(struct bio_vec
));
92 * default destructor for a bio allocated with bio_alloc()
94 void bio_destructor(struct bio
*bio
)
96 const int pool_idx
= BIO_POOL_IDX(bio
);
97 struct biovec_pool
*bp
= bvec_array
+ pool_idx
;
99 BIO_BUG_ON(pool_idx
>= BIOVEC_NR_POOLS
);
102 * cloned bio doesn't own the veclist
104 if (!bio_flagged(bio
, BIO_CLONED
))
105 mempool_free(bio
->bi_io_vec
, bp
->pool
);
107 mempool_free(bio
, bio_pool
);
110 inline void bio_init(struct bio
*bio
)
113 bio
->bi_flags
= 1 << BIO_UPTODATE
;
117 bio
->bi_phys_segments
= 0;
118 bio
->bi_hw_segments
= 0;
119 bio
->bi_hw_front_size
= 0;
120 bio
->bi_hw_back_size
= 0;
122 bio
->bi_max_vecs
= 0;
123 bio
->bi_end_io
= NULL
;
124 atomic_set(&bio
->bi_cnt
, 1);
125 bio
->bi_private
= NULL
;
129 * bio_alloc - allocate a bio for I/O
130 * @gfp_mask: the GFP_ mask given to the slab allocator
131 * @nr_iovecs: number of iovecs to pre-allocate
134 * bio_alloc will first try it's on mempool to satisfy the allocation.
135 * If %__GFP_WAIT is set then we will block on the internal pool waiting
136 * for a &struct bio to become free.
138 struct bio
*bio_alloc(int gfp_mask
, int nr_iovecs
)
140 struct bio
*bio
= mempool_alloc(bio_pool
, gfp_mask
);
143 struct bio_vec
*bvl
= NULL
;
146 if (likely(nr_iovecs
)) {
149 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
);
150 if (unlikely(!bvl
)) {
151 mempool_free(bio
, bio_pool
);
155 bio
->bi_flags
|= idx
<< BIO_POOL_OFFSET
;
156 bio
->bi_max_vecs
= bvec_array
[idx
].nr_vecs
;
158 bio
->bi_io_vec
= bvl
;
159 bio
->bi_destructor
= bio_destructor
;
166 * bio_put - release a reference to a bio
167 * @bio: bio to release reference to
170 * Put a reference to a &struct bio, either one you have gotten with
171 * bio_alloc or bio_get. The last put of a bio will free it.
173 void bio_put(struct bio
*bio
)
175 BIO_BUG_ON(!atomic_read(&bio
->bi_cnt
));
180 if (atomic_dec_and_test(&bio
->bi_cnt
)) {
182 bio
->bi_destructor(bio
);
186 inline int bio_phys_segments(request_queue_t
*q
, struct bio
*bio
)
188 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
189 blk_recount_segments(q
, bio
);
191 return bio
->bi_phys_segments
;
194 inline int bio_hw_segments(request_queue_t
*q
, struct bio
*bio
)
196 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
197 blk_recount_segments(q
, bio
);
199 return bio
->bi_hw_segments
;
203 * __bio_clone - clone a bio
204 * @bio: destination bio
205 * @bio_src: bio to clone
207 * Clone a &bio. Caller will own the returned bio, but not
208 * the actual data it points to. Reference count of returned
211 inline void __bio_clone(struct bio
*bio
, struct bio
*bio_src
)
213 bio
->bi_io_vec
= bio_src
->bi_io_vec
;
215 bio
->bi_sector
= bio_src
->bi_sector
;
216 bio
->bi_bdev
= bio_src
->bi_bdev
;
217 bio
->bi_flags
|= 1 << BIO_CLONED
;
218 bio
->bi_rw
= bio_src
->bi_rw
;
221 * notes -- maybe just leave bi_idx alone. assume identical mapping
224 bio
->bi_vcnt
= bio_src
->bi_vcnt
;
225 bio
->bi_idx
= bio_src
->bi_idx
;
226 if (bio_flagged(bio
, BIO_SEG_VALID
)) {
227 bio
->bi_phys_segments
= bio_src
->bi_phys_segments
;
228 bio
->bi_hw_segments
= bio_src
->bi_hw_segments
;
229 bio
->bi_flags
|= (1 << BIO_SEG_VALID
);
231 bio
->bi_size
= bio_src
->bi_size
;
234 * cloned bio does not own the bio_vec, so users cannot fiddle with
235 * it. clear bi_max_vecs and clear the BIO_POOL_BITS to make this
238 bio
->bi_max_vecs
= 0;
239 bio
->bi_flags
&= (BIO_POOL_MASK
- 1);
243 * bio_clone - clone a bio
245 * @gfp_mask: allocation priority
247 * Like __bio_clone, only also allocates the returned bio
249 struct bio
*bio_clone(struct bio
*bio
, int gfp_mask
)
251 struct bio
*b
= bio_alloc(gfp_mask
, 0);
260 * bio_get_nr_vecs - return approx number of vecs
263 * Return the approximate number of pages we can send to this target.
264 * There's no guarantee that you will be able to fit this number of pages
265 * into a bio, it does not account for dynamic restrictions that vary
268 int bio_get_nr_vecs(struct block_device
*bdev
)
270 request_queue_t
*q
= bdev_get_queue(bdev
);
273 nr_pages
= ((q
->max_sectors
<< 9) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
274 if (nr_pages
> q
->max_phys_segments
)
275 nr_pages
= q
->max_phys_segments
;
276 if (nr_pages
> q
->max_hw_segments
)
277 nr_pages
= q
->max_hw_segments
;
282 static int __bio_add_page(request_queue_t
*q
, struct bio
*bio
, struct page
283 *page
, unsigned int len
, unsigned int offset
)
285 int retried_segments
= 0;
286 struct bio_vec
*bvec
;
289 * cloned bio must not modify vec list
291 if (unlikely(bio_flagged(bio
, BIO_CLONED
)))
294 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
297 if (((bio
->bi_size
+ len
) >> 9) > q
->max_sectors
)
301 * we might lose a segment or two here, but rather that than
302 * make this too complex.
305 while (bio
->bi_phys_segments
>= q
->max_phys_segments
306 || bio
->bi_hw_segments
>= q
->max_hw_segments
307 || BIOVEC_VIRT_OVERSIZE(bio
->bi_size
)) {
309 if (retried_segments
)
312 retried_segments
= 1;
313 blk_recount_segments(q
, bio
);
317 * setup the new entry, we might clear it again later if we
318 * cannot add the page
320 bvec
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
321 bvec
->bv_page
= page
;
323 bvec
->bv_offset
= offset
;
326 * if queue has other restrictions (eg varying max sector size
327 * depending on offset), it can specify a merge_bvec_fn in the
328 * queue to get further control
330 if (q
->merge_bvec_fn
) {
332 * merge_bvec_fn() returns number of bytes it can accept
335 if (q
->merge_bvec_fn(q
, bio
, bvec
) < len
) {
336 bvec
->bv_page
= NULL
;
343 /* If we may be able to merge these biovecs, force a recount */
344 if (bio
->bi_vcnt
&& (BIOVEC_PHYS_MERGEABLE(bvec
-1, bvec
) ||
345 BIOVEC_VIRT_MERGEABLE(bvec
-1, bvec
)))
346 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
349 bio
->bi_phys_segments
++;
350 bio
->bi_hw_segments
++;
356 * bio_add_page - attempt to add page to bio
357 * @bio: destination bio
359 * @len: vec entry length
360 * @offset: vec entry offset
362 * Attempt to add a page to the bio_vec maplist. This can fail for a
363 * number of reasons, such as the bio being full or target block
364 * device limitations. The target block device must allow bio's
365 * smaller than PAGE_SIZE, so it is always possible to add a single
366 * page to an empty bio.
368 int bio_add_page(struct bio
*bio
, struct page
*page
, unsigned int len
,
371 return __bio_add_page(bdev_get_queue(bio
->bi_bdev
), bio
, page
,
375 struct bio_map_data
{
376 struct bio_vec
*iovecs
;
377 void __user
*userptr
;
380 static void bio_set_map_data(struct bio_map_data
*bmd
, struct bio
*bio
)
382 memcpy(bmd
->iovecs
, bio
->bi_io_vec
, sizeof(struct bio_vec
) * bio
->bi_vcnt
);
383 bio
->bi_private
= bmd
;
386 static void bio_free_map_data(struct bio_map_data
*bmd
)
392 static struct bio_map_data
*bio_alloc_map_data(int nr_segs
)
394 struct bio_map_data
*bmd
= kmalloc(sizeof(*bmd
), GFP_KERNEL
);
399 bmd
->iovecs
= kmalloc(sizeof(struct bio_vec
) * nr_segs
, GFP_KERNEL
);
408 * bio_uncopy_user - finish previously mapped bio
409 * @bio: bio being terminated
411 * Free pages allocated from bio_copy_user() and write back data
412 * to user space in case of a read.
414 int bio_uncopy_user(struct bio
*bio
)
416 struct bio_map_data
*bmd
= bio
->bi_private
;
417 const int read
= bio_data_dir(bio
) == READ
;
418 struct bio_vec
*bvec
;
421 __bio_for_each_segment(bvec
, bio
, i
, 0) {
422 char *addr
= page_address(bvec
->bv_page
);
423 unsigned int len
= bmd
->iovecs
[i
].bv_len
;
425 if (read
&& !ret
&& copy_to_user(bmd
->userptr
, addr
, len
))
428 __free_page(bvec
->bv_page
);
431 bio_free_map_data(bmd
);
437 * bio_copy_user - copy user data to bio
438 * @q: destination block queue
439 * @uaddr: start of user address
440 * @len: length in bytes
441 * @write_to_vm: bool indicating writing to pages or not
443 * Prepares and returns a bio for indirect user io, bouncing data
444 * to/from kernel pages as necessary. Must be paired with
445 * call bio_uncopy_user() on io completion.
447 struct bio
*bio_copy_user(request_queue_t
*q
, unsigned long uaddr
,
448 unsigned int len
, int write_to_vm
)
450 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
451 unsigned long start
= uaddr
>> PAGE_SHIFT
;
452 struct bio_map_data
*bmd
;
453 struct bio_vec
*bvec
;
458 bmd
= bio_alloc_map_data(end
- start
);
460 return ERR_PTR(-ENOMEM
);
462 bmd
->userptr
= (void __user
*) uaddr
;
464 bio
= bio_alloc(GFP_KERNEL
, end
- start
);
466 bio_free_map_data(bmd
);
467 return ERR_PTR(-ENOMEM
);
470 bio
->bi_rw
|= (!write_to_vm
<< BIO_RW
);
474 unsigned int bytes
= PAGE_SIZE
;
479 page
= alloc_page(q
->bounce_gfp
| GFP_KERNEL
);
485 if (__bio_add_page(q
, bio
, page
, bytes
, 0) < bytes
) {
500 char __user
*p
= (char __user
*) uaddr
;
503 * for a write, copy in data to kernel pages
506 bio_for_each_segment(bvec
, bio
, i
) {
507 char *addr
= page_address(bvec
->bv_page
);
509 if (copy_from_user(addr
, p
, bvec
->bv_len
))
515 bio_set_map_data(bmd
, bio
);
518 bio_for_each_segment(bvec
, bio
, i
)
519 __free_page(bvec
->bv_page
);
525 static struct bio
*__bio_map_user(request_queue_t
*q
, struct block_device
*bdev
,
526 unsigned long uaddr
, unsigned int len
,
529 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
530 unsigned long start
= uaddr
>> PAGE_SHIFT
;
531 const int nr_pages
= end
- start
;
537 * transfer and buffer must be aligned to at least hardsector
538 * size for now, in the future we can relax this restriction
540 if ((uaddr
& queue_dma_alignment(q
)) || (len
& queue_dma_alignment(q
)))
541 return ERR_PTR(-EINVAL
);
543 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
545 return ERR_PTR(-ENOMEM
);
548 pages
= kmalloc(nr_pages
* sizeof(struct page
*), GFP_KERNEL
);
552 down_read(¤t
->mm
->mmap_sem
);
553 ret
= get_user_pages(current
, current
->mm
, uaddr
, nr_pages
,
554 write_to_vm
, 0, pages
, NULL
);
555 up_read(¤t
->mm
->mmap_sem
);
562 offset
= uaddr
& ~PAGE_MASK
;
563 for (i
= 0; i
< nr_pages
; i
++) {
564 unsigned int bytes
= PAGE_SIZE
- offset
;
575 if (__bio_add_page(q
, bio
, pages
[i
], bytes
, offset
) < bytes
)
583 * release the pages we didn't map into the bio, if any
586 page_cache_release(pages
[i
++]);
591 * set data direction, and check if mapped pages need bouncing
594 bio
->bi_rw
|= (1 << BIO_RW
);
596 bio
->bi_flags
|= (1 << BIO_USER_MAPPED
);
605 * bio_map_user - map user address into bio
606 * @bdev: destination block device
607 * @uaddr: start of user address
608 * @len: length in bytes
609 * @write_to_vm: bool indicating writing to pages or not
611 * Map the user space address into a bio suitable for io to a block
612 * device. Returns an error pointer in case of error.
614 struct bio
*bio_map_user(request_queue_t
*q
, struct block_device
*bdev
,
615 unsigned long uaddr
, unsigned int len
, int write_to_vm
)
619 bio
= __bio_map_user(q
, bdev
, uaddr
, len
, write_to_vm
);
625 * subtle -- if __bio_map_user() ended up bouncing a bio,
626 * it would normally disappear when its bi_end_io is run.
627 * however, we need it for the unmap, so grab an extra
632 if (bio
->bi_size
== len
)
636 * don't support partial mappings
638 bio_endio(bio
, bio
->bi_size
, 0);
640 return ERR_PTR(-EINVAL
);
643 static void __bio_unmap_user(struct bio
*bio
)
645 struct bio_vec
*bvec
;
649 * make sure we dirty pages we wrote to
651 __bio_for_each_segment(bvec
, bio
, i
, 0) {
652 if (bio_data_dir(bio
) == READ
)
653 set_page_dirty_lock(bvec
->bv_page
);
655 page_cache_release(bvec
->bv_page
);
662 * bio_unmap_user - unmap a bio
663 * @bio: the bio being unmapped
665 * Unmap a bio previously mapped by bio_map_user(). Must be called with
668 * bio_unmap_user() may sleep.
670 void bio_unmap_user(struct bio
*bio
)
672 __bio_unmap_user(bio
);
677 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
678 * for performing direct-IO in BIOs.
680 * The problem is that we cannot run set_page_dirty() from interrupt context
681 * because the required locks are not interrupt-safe. So what we can do is to
682 * mark the pages dirty _before_ performing IO. And in interrupt context,
683 * check that the pages are still dirty. If so, fine. If not, redirty them
684 * in process context.
686 * We special-case compound pages here: normally this means reads into hugetlb
687 * pages. The logic in here doesn't really work right for compound pages
688 * because the VM does not uniformly chase down the head page in all cases.
689 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
690 * handle them at all. So we skip compound pages here at an early stage.
692 * Note that this code is very hard to test under normal circumstances because
693 * direct-io pins the pages with get_user_pages(). This makes
694 * is_page_cache_freeable return false, and the VM will not clean the pages.
695 * But other code (eg, pdflush) could clean the pages if they are mapped
698 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
699 * deferred bio dirtying paths.
703 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
705 void bio_set_pages_dirty(struct bio
*bio
)
707 struct bio_vec
*bvec
= bio
->bi_io_vec
;
710 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
711 struct page
*page
= bvec
[i
].bv_page
;
713 if (page
&& !PageCompound(page
))
714 set_page_dirty_lock(page
);
718 static void bio_release_pages(struct bio
*bio
)
720 struct bio_vec
*bvec
= bio
->bi_io_vec
;
723 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
724 struct page
*page
= bvec
[i
].bv_page
;
732 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
733 * If they are, then fine. If, however, some pages are clean then they must
734 * have been written out during the direct-IO read. So we take another ref on
735 * the BIO and the offending pages and re-dirty the pages in process context.
737 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
738 * here on. It will run one page_cache_release() against each page and will
739 * run one bio_put() against the BIO.
742 static void bio_dirty_fn(void *data
);
744 static DECLARE_WORK(bio_dirty_work
, bio_dirty_fn
, NULL
);
745 static spinlock_t bio_dirty_lock
= SPIN_LOCK_UNLOCKED
;
746 static struct bio
*bio_dirty_list
;
749 * This runs in process context
751 static void bio_dirty_fn(void *data
)
756 spin_lock_irqsave(&bio_dirty_lock
, flags
);
757 bio
= bio_dirty_list
;
758 bio_dirty_list
= NULL
;
759 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
762 struct bio
*next
= bio
->bi_private
;
764 bio_set_pages_dirty(bio
);
765 bio_release_pages(bio
);
771 void bio_check_pages_dirty(struct bio
*bio
)
773 struct bio_vec
*bvec
= bio
->bi_io_vec
;
774 int nr_clean_pages
= 0;
777 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
778 struct page
*page
= bvec
[i
].bv_page
;
780 if (PageDirty(page
) || PageCompound(page
)) {
781 page_cache_release(page
);
782 bvec
[i
].bv_page
= NULL
;
788 if (nr_clean_pages
) {
791 spin_lock_irqsave(&bio_dirty_lock
, flags
);
792 bio
->bi_private
= bio_dirty_list
;
793 bio_dirty_list
= bio
;
794 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
795 schedule_work(&bio_dirty_work
);
802 * bio_endio - end I/O on a bio
804 * @bytes_done: number of bytes completed
805 * @error: error, if any
808 * bio_endio() will end I/O on @bytes_done number of bytes. This may be
809 * just a partial part of the bio, or it may be the whole bio. bio_endio()
810 * is the preferred way to end I/O on a bio, it takes care of decrementing
811 * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
812 * and one of the established -Exxxx (-EIO, for instance) error values in
813 * case something went wrong. Noone should call bi_end_io() directly on
814 * a bio unless they own it and thus know that it has an end_io function.
816 void bio_endio(struct bio
*bio
, unsigned int bytes_done
, int error
)
819 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
821 if (unlikely(bytes_done
> bio
->bi_size
)) {
822 printk("%s: want %u bytes done, only %u left\n", __FUNCTION__
,
823 bytes_done
, bio
->bi_size
);
824 bytes_done
= bio
->bi_size
;
827 bio
->bi_size
-= bytes_done
;
828 bio
->bi_sector
+= (bytes_done
>> 9);
831 bio
->bi_end_io(bio
, bytes_done
, error
);
834 void bio_pair_release(struct bio_pair
*bp
)
836 if (atomic_dec_and_test(&bp
->cnt
)) {
837 struct bio
*master
= bp
->bio1
.bi_private
;
839 bio_endio(master
, master
->bi_size
, bp
->error
);
840 mempool_free(bp
, bp
->bio2
.bi_private
);
844 static int bio_pair_end_1(struct bio
* bi
, unsigned int done
, int err
)
846 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio1
);
854 bio_pair_release(bp
);
858 static int bio_pair_end_2(struct bio
* bi
, unsigned int done
, int err
)
860 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio2
);
868 bio_pair_release(bp
);
873 * split a bio - only worry about a bio with a single page
876 struct bio_pair
*bio_split(struct bio
*bi
, mempool_t
*pool
, int first_sectors
)
878 struct bio_pair
*bp
= mempool_alloc(pool
, GFP_NOIO
);
883 BUG_ON(bi
->bi_vcnt
!= 1);
884 BUG_ON(bi
->bi_idx
!= 0);
885 atomic_set(&bp
->cnt
, 3);
889 bp
->bio2
.bi_sector
+= first_sectors
;
890 bp
->bio2
.bi_size
-= first_sectors
<< 9;
891 bp
->bio1
.bi_size
= first_sectors
<< 9;
893 bp
->bv1
= bi
->bi_io_vec
[0];
894 bp
->bv2
= bi
->bi_io_vec
[0];
895 bp
->bv2
.bv_offset
+= first_sectors
<< 9;
896 bp
->bv2
.bv_len
-= first_sectors
<< 9;
897 bp
->bv1
.bv_len
= first_sectors
<< 9;
899 bp
->bio1
.bi_io_vec
= &bp
->bv1
;
900 bp
->bio2
.bi_io_vec
= &bp
->bv2
;
902 bp
->bio1
.bi_end_io
= bio_pair_end_1
;
903 bp
->bio2
.bi_end_io
= bio_pair_end_2
;
905 bp
->bio1
.bi_private
= bi
;
906 bp
->bio2
.bi_private
= pool
;
911 static void *bio_pair_alloc(int gfp_flags
, void *data
)
913 return kmalloc(sizeof(struct bio_pair
), gfp_flags
);
916 static void bio_pair_free(void *bp
, void *data
)
921 static void __init
biovec_init_pools(void)
923 int i
, size
, megabytes
, pool_entries
= BIO_POOL_SIZE
;
924 int scale
= BIOVEC_NR_POOLS
;
926 megabytes
= nr_free_pages() >> (20 - PAGE_SHIFT
);
929 * find out where to start scaling
933 else if (megabytes
<= 32)
935 else if (megabytes
<= 64)
937 else if (megabytes
<= 96)
939 else if (megabytes
<= 128)
943 * scale number of entries
945 pool_entries
= megabytes
* 2;
946 if (pool_entries
> 256)
949 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
950 struct biovec_pool
*bp
= bvec_array
+ i
;
952 size
= bp
->nr_vecs
* sizeof(struct bio_vec
);
954 bp
->slab
= kmem_cache_create(bp
->name
, size
, 0,
955 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
, NULL
);
960 bp
->pool
= mempool_create(pool_entries
, mempool_alloc_slab
,
961 mempool_free_slab
, bp
->slab
);
963 panic("biovec: can't init mempool\n");
967 static int __init
init_bio(void)
969 bio_slab
= kmem_cache_create("bio", sizeof(struct bio
), 0,
970 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
, NULL
);
971 bio_pool
= mempool_create(BIO_POOL_SIZE
, mempool_alloc_slab
,
972 mempool_free_slab
, bio_slab
);
974 panic("bio: can't create mempool\n");
978 bio_split_pool
= mempool_create(BIO_SPLIT_ENTRIES
,
979 bio_pair_alloc
, bio_pair_free
, NULL
);
981 panic("bio: can't create split pool\n");
986 subsys_initcall(init_bio
);
988 EXPORT_SYMBOL(bio_alloc
);
989 EXPORT_SYMBOL(bio_put
);
990 EXPORT_SYMBOL(bio_endio
);
991 EXPORT_SYMBOL(bio_init
);
992 EXPORT_SYMBOL(__bio_clone
);
993 EXPORT_SYMBOL(bio_clone
);
994 EXPORT_SYMBOL(bio_phys_segments
);
995 EXPORT_SYMBOL(bio_hw_segments
);
996 EXPORT_SYMBOL(bio_add_page
);
997 EXPORT_SYMBOL(bio_get_nr_vecs
);
998 EXPORT_SYMBOL(bio_map_user
);
999 EXPORT_SYMBOL(bio_unmap_user
);
1000 EXPORT_SYMBOL(bio_pair_release
);
1001 EXPORT_SYMBOL(bio_split
);
1002 EXPORT_SYMBOL(bio_split_pool
);
1003 EXPORT_SYMBOL(bio_copy_user
);
1004 EXPORT_SYMBOL(bio_uncopy_user
);