2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
11 int blk_rq_append_bio(struct request_queue
*q
, struct request
*rq
,
15 blk_rq_bio_prep(q
, rq
, bio
);
16 else if (!ll_back_merge_fn(q
, rq
, bio
))
19 rq
->biotail
->bi_next
= bio
;
22 rq
->data_len
+= bio
->bi_size
;
26 EXPORT_SYMBOL(blk_rq_append_bio
);
28 static int __blk_rq_unmap_user(struct bio
*bio
)
33 if (bio_flagged(bio
, BIO_USER_MAPPED
))
36 ret
= bio_uncopy_user(bio
);
42 static int __blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
43 void __user
*ubuf
, unsigned int len
)
46 struct bio
*bio
, *orig_bio
;
49 reading
= rq_data_dir(rq
) == READ
;
52 * if alignment requirement is satisfied, map in user pages for
53 * direct dma. else, set up kernel bounce buffers
55 uaddr
= (unsigned long) ubuf
;
56 if (!(uaddr
& queue_dma_alignment(q
)) &&
57 !(len
& queue_dma_alignment(q
)))
58 bio
= bio_map_user(q
, NULL
, uaddr
, len
, reading
);
60 bio
= bio_copy_user(q
, uaddr
, len
, reading
);
66 blk_queue_bounce(q
, &bio
);
69 * We link the bounce buffer in and could have to traverse it
70 * later so we have to get a ref to prevent it from being freed
74 ret
= blk_rq_append_bio(q
, rq
, bio
);
78 /* if it was boucned we must call the end io function */
80 __blk_rq_unmap_user(orig_bio
);
86 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
87 * @q: request queue where request should be inserted
88 * @rq: request structure to fill
89 * @ubuf: the user buffer
90 * @len: length of user data
93 * Data will be mapped directly for zero copy io, if possible. Otherwise
94 * a kernel bounce buffer is used.
96 * A matching blk_rq_unmap_user() must be issued at the end of io, while
97 * still in process context.
99 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
100 * before being submitted to the device, as pages mapped may be out of
101 * reach. It's the callers responsibility to make sure this happens. The
102 * original bio must be passed back in to blk_rq_unmap_user() for proper
105 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
106 void __user
*ubuf
, unsigned long len
)
108 unsigned long bytes_read
= 0;
109 struct bio
*bio
= NULL
;
112 if (len
> (q
->max_hw_sectors
<< 9))
117 while (bytes_read
!= len
) {
118 unsigned long map_len
, end
, start
;
120 map_len
= min_t(unsigned long, len
- bytes_read
, BIO_MAX_SIZE
);
121 end
= ((unsigned long)ubuf
+ map_len
+ PAGE_SIZE
- 1)
123 start
= (unsigned long)ubuf
>> PAGE_SHIFT
;
126 * A bad offset could cause us to require BIO_MAX_PAGES + 1
127 * pages. If this happens we just lower the requested
128 * mapping len by a page so that we can fit
130 if (end
- start
> BIO_MAX_PAGES
)
131 map_len
-= PAGE_SIZE
;
133 ret
= __blk_rq_map_user(q
, rq
, ubuf
, map_len
);
142 rq
->buffer
= rq
->data
= NULL
;
145 blk_rq_unmap_user(bio
);
148 EXPORT_SYMBOL(blk_rq_map_user
);
151 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
152 * @q: request queue where request should be inserted
153 * @rq: request to map data to
154 * @iov: pointer to the iovec
155 * @iov_count: number of elements in the iovec
156 * @len: I/O byte count
159 * Data will be mapped directly for zero copy io, if possible. Otherwise
160 * a kernel bounce buffer is used.
162 * A matching blk_rq_unmap_user() must be issued at the end of io, while
163 * still in process context.
165 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
166 * before being submitted to the device, as pages mapped may be out of
167 * reach. It's the callers responsibility to make sure this happens. The
168 * original bio must be passed back in to blk_rq_unmap_user() for proper
171 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
172 struct sg_iovec
*iov
, int iov_count
, unsigned int len
)
176 if (!iov
|| iov_count
<= 0)
179 /* we don't allow misaligned data like bio_map_user() does. If the
180 * user is using sg, they're expected to know the alignment constraints
181 * and respect them accordingly */
182 bio
= bio_map_user_iov(q
, NULL
, iov
, iov_count
,
183 rq_data_dir(rq
) == READ
);
187 if (bio
->bi_size
!= len
) {
194 blk_rq_bio_prep(q
, rq
, bio
);
195 rq
->buffer
= rq
->data
= NULL
;
198 EXPORT_SYMBOL(blk_rq_map_user_iov
);
201 * blk_rq_unmap_user - unmap a request with user data
202 * @bio: start of bio list
205 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
206 * supply the original rq->bio from the blk_rq_map_user() return, since
207 * the io completion may have changed rq->bio.
209 int blk_rq_unmap_user(struct bio
*bio
)
211 struct bio
*mapped_bio
;
216 if (unlikely(bio_flagged(bio
, BIO_BOUNCED
)))
217 mapped_bio
= bio
->bi_private
;
219 ret2
= __blk_rq_unmap_user(mapped_bio
);
230 EXPORT_SYMBOL(blk_rq_unmap_user
);
233 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
234 * @q: request queue where request should be inserted
235 * @rq: request to fill
236 * @kbuf: the kernel buffer
237 * @len: length of user data
238 * @gfp_mask: memory allocation flags
240 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
241 unsigned int len
, gfp_t gfp_mask
)
245 if (len
> (q
->max_hw_sectors
<< 9))
250 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
254 if (rq_data_dir(rq
) == WRITE
)
255 bio
->bi_rw
|= (1 << BIO_RW
);
257 blk_rq_bio_prep(q
, rq
, bio
);
258 blk_queue_bounce(q
, &rq
->bio
);
259 rq
->buffer
= rq
->data
= NULL
;
262 EXPORT_SYMBOL(blk_rq_map_kern
);