2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
11 int blk_rq_append_bio(struct request_queue
*q
, struct request
*rq
,
15 blk_rq_bio_prep(q
, rq
, bio
);
16 else if (!ll_back_merge_fn(q
, rq
, bio
))
19 rq
->biotail
->bi_next
= bio
;
22 rq
->data_len
+= bio
->bi_size
;
26 EXPORT_SYMBOL(blk_rq_append_bio
);
28 static int __blk_rq_unmap_user(struct bio
*bio
)
33 if (bio_flagged(bio
, BIO_USER_MAPPED
))
36 ret
= bio_uncopy_user(bio
);
42 static int __blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
43 void __user
*ubuf
, unsigned int len
)
46 unsigned int alignment
;
47 struct bio
*bio
, *orig_bio
;
50 reading
= rq_data_dir(rq
) == READ
;
53 * if alignment requirement is satisfied, map in user pages for
54 * direct dma. else, set up kernel bounce buffers
56 uaddr
= (unsigned long) ubuf
;
57 alignment
= queue_dma_alignment(q
) | q
->dma_pad_mask
;
58 if (!(uaddr
& alignment
) && !(len
& alignment
))
59 bio
= bio_map_user(q
, NULL
, uaddr
, len
, reading
);
61 bio
= bio_copy_user(q
, uaddr
, len
, reading
);
67 blk_queue_bounce(q
, &bio
);
70 * We link the bounce buffer in and could have to traverse it
71 * later so we have to get a ref to prevent it from being freed
75 ret
= blk_rq_append_bio(q
, rq
, bio
);
79 /* if it was boucned we must call the end io function */
81 __blk_rq_unmap_user(orig_bio
);
87 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
88 * @q: request queue where request should be inserted
89 * @rq: request structure to fill
90 * @ubuf: the user buffer
91 * @len: length of user data
94 * Data will be mapped directly for zero copy io, if possible. Otherwise
95 * a kernel bounce buffer is used.
97 * A matching blk_rq_unmap_user() must be issued at the end of io, while
98 * still in process context.
100 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
101 * before being submitted to the device, as pages mapped may be out of
102 * reach. It's the callers responsibility to make sure this happens. The
103 * original bio must be passed back in to blk_rq_unmap_user() for proper
106 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
107 void __user
*ubuf
, unsigned long len
)
109 unsigned long bytes_read
= 0;
110 struct bio
*bio
= NULL
;
113 if (len
> (q
->max_hw_sectors
<< 9))
118 while (bytes_read
!= len
) {
119 unsigned long map_len
, end
, start
;
121 map_len
= min_t(unsigned long, len
- bytes_read
, BIO_MAX_SIZE
);
122 end
= ((unsigned long)ubuf
+ map_len
+ PAGE_SIZE
- 1)
124 start
= (unsigned long)ubuf
>> PAGE_SHIFT
;
127 * A bad offset could cause us to require BIO_MAX_PAGES + 1
128 * pages. If this happens we just lower the requested
129 * mapping len by a page so that we can fit
131 if (end
- start
> BIO_MAX_PAGES
)
132 map_len
-= PAGE_SIZE
;
134 ret
= __blk_rq_map_user(q
, rq
, ubuf
, map_len
);
144 * __blk_rq_map_user() copies the buffers if starting address
145 * or length isn't aligned to dma_pad_mask. As the copied
146 * buffer is always page aligned, we know that there's enough
147 * room for padding. Extend the last bio and update
148 * rq->data_len accordingly.
150 * On unmap, bio_uncopy_user() will use unmodified
151 * bio_map_data pointed to by bio->bi_private.
153 if (len
& q
->dma_pad_mask
) {
154 unsigned int pad_len
= (q
->dma_pad_mask
& ~len
) + 1;
155 struct bio
*tail
= rq
->biotail
;
157 tail
->bi_io_vec
[tail
->bi_vcnt
- 1].bv_len
+= pad_len
;
158 tail
->bi_size
+= pad_len
;
160 rq
->extra_len
+= pad_len
;
163 rq
->buffer
= rq
->data
= NULL
;
166 blk_rq_unmap_user(bio
);
170 EXPORT_SYMBOL(blk_rq_map_user
);
173 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
174 * @q: request queue where request should be inserted
175 * @rq: request to map data to
176 * @iov: pointer to the iovec
177 * @iov_count: number of elements in the iovec
178 * @len: I/O byte count
181 * Data will be mapped directly for zero copy io, if possible. Otherwise
182 * a kernel bounce buffer is used.
184 * A matching blk_rq_unmap_user() must be issued at the end of io, while
185 * still in process context.
187 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
188 * before being submitted to the device, as pages mapped may be out of
189 * reach. It's the callers responsibility to make sure this happens. The
190 * original bio must be passed back in to blk_rq_unmap_user() for proper
193 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
194 struct sg_iovec
*iov
, int iov_count
, unsigned int len
)
198 if (!iov
|| iov_count
<= 0)
201 /* we don't allow misaligned data like bio_map_user() does. If the
202 * user is using sg, they're expected to know the alignment constraints
203 * and respect them accordingly */
204 bio
= bio_map_user_iov(q
, NULL
, iov
, iov_count
,
205 rq_data_dir(rq
) == READ
);
209 if (bio
->bi_size
!= len
) {
216 blk_rq_bio_prep(q
, rq
, bio
);
217 rq
->buffer
= rq
->data
= NULL
;
222 * blk_rq_unmap_user - unmap a request with user data
223 * @bio: start of bio list
226 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
227 * supply the original rq->bio from the blk_rq_map_user() return, since
228 * the io completion may have changed rq->bio.
230 int blk_rq_unmap_user(struct bio
*bio
)
232 struct bio
*mapped_bio
;
237 if (unlikely(bio_flagged(bio
, BIO_BOUNCED
)))
238 mapped_bio
= bio
->bi_private
;
240 ret2
= __blk_rq_unmap_user(mapped_bio
);
251 EXPORT_SYMBOL(blk_rq_unmap_user
);
254 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
255 * @q: request queue where request should be inserted
256 * @rq: request to fill
257 * @kbuf: the kernel buffer
258 * @len: length of user data
259 * @gfp_mask: memory allocation flags
261 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
262 unsigned int len
, gfp_t gfp_mask
)
266 if (len
> (q
->max_hw_sectors
<< 9))
271 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
275 if (rq_data_dir(rq
) == WRITE
)
276 bio
->bi_rw
|= (1 << BIO_RW
);
278 blk_rq_bio_prep(q
, rq
, bio
);
279 blk_queue_bounce(q
, &rq
->bio
);
280 rq
->buffer
= rq
->data
= NULL
;
283 EXPORT_SYMBOL(blk_rq_map_kern
);