4 * Copyright (C) 2002, Linus Torvalds.
8 * 04Jul2002 akpm@zip.com.au
12 #include <linux/kernel.h>
13 #include <linux/types.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bio.h>
19 #include <linux/wait.h>
20 #include <linux/err.h>
21 #include <linux/blkdev.h>
22 #include <linux/buffer_head.h>
23 #include <linux/rwsem.h>
24 #include <asm/atomic.h>
27 * How many user pages to map in one call to get_user_pages(). This determines
28 * the size of a structure on the stack.
33 * This code generally works in units of "dio_blocks". A dio_block is
34 * somewhere between the hard sector size and the filesystem block size. it
35 * is determined on a per-invokation basis. When talking to the filesystem
36 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
37 * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted
38 * to bio_block quantities by shifting left by blkfactor.
40 * If blkfactor is zero then the user's request was aligned to the filesystem's
45 /* BIO submission state */
46 struct bio
*bio
; /* bio under assembly */
49 unsigned blkbits
; /* doesn't change */
50 unsigned blkfactor
; /* When we're using an aligment which
51 is finer than the filesystem's soft
52 blocksize, this specifies how much
53 finer. blkfactor=2 means 1/4-block
54 alignment. Does not change */
55 unsigned start_zero_done
; /* flag: sub-blocksize zeroing has
56 been performed at the start of a
58 int pages_in_io
; /* approximate total IO pages */
59 sector_t block_in_file
; /* Current offset into the underlying
60 file in dio_block units. */
61 unsigned blocks_available
; /* At block_in_file. changes */
62 sector_t final_block_in_request
;/* doesn't change */
63 unsigned first_block_in_page
; /* doesn't change, Used only once */
64 int boundary
; /* prev block is at a boundary */
65 int reap_counter
; /* rate limit reaping */
66 get_blocks_t
*get_blocks
; /* block mapping function */
67 sector_t final_block_in_bio
; /* current final block in bio + 1 */
68 sector_t next_block_for_io
; /* next block to be put under IO,
69 in dio_blocks units */
70 struct buffer_head map_bh
; /* last get_blocks() result */
73 * Deferred addition of a page to the dio. These variables are
74 * private to dio_send_cur_page(), submit_page_section() and
77 struct page
*cur_page
; /* The page */
78 unsigned cur_page_offset
; /* Offset into it, in bytes */
79 unsigned cur_page_len
; /* Nr of bytes at cur_page_offset */
80 sector_t cur_page_block
; /* Where it starts */
83 * Page fetching state. These variables belong to dio_refill_pages().
85 int curr_page
; /* changes */
86 int total_pages
; /* doesn't change */
87 unsigned long curr_user_address
;/* changes */
90 * Page queue. These variables belong to dio_refill_pages() and
93 struct page
*pages
[DIO_PAGES
]; /* page buffer */
94 unsigned head
; /* next page to process */
95 unsigned tail
; /* last valid page + 1 */
96 int page_errors
; /* errno from get_user_pages() */
98 /* BIO completion state */
99 atomic_t bio_count
; /* nr bios in flight */
100 spinlock_t bio_list_lock
; /* protects bio_list */
101 struct bio
*bio_list
; /* singly linked via bi_private */
102 struct task_struct
*waiter
; /* waiting task (NULL if none) */
106 * How many pages are in the queue?
108 static inline unsigned dio_pages_present(struct dio
*dio
)
110 return dio
->tail
- dio
->head
;
114 * Go grab and pin some userspace pages. Typically we'll get 64 at a time.
116 static int dio_refill_pages(struct dio
*dio
)
121 nr_pages
= min(dio
->total_pages
- dio
->curr_page
, DIO_PAGES
);
122 down_read(¤t
->mm
->mmap_sem
);
123 ret
= get_user_pages(
124 current
, /* Task for fault acounting */
125 current
->mm
, /* whose pages? */
126 dio
->curr_user_address
, /* Where from? */
127 nr_pages
, /* How many pages? */
128 dio
->rw
== READ
, /* Write to memory? */
132 up_read(¤t
->mm
->mmap_sem
);
134 if (ret
< 0 && dio
->blocks_available
&& (dio
->rw
== WRITE
)) {
136 * A memory fault, but the filesystem has some outstanding
137 * mapped blocks. We need to use those blocks up to avoid
138 * leaking stale data in the file.
140 if (dio
->page_errors
== 0)
141 dio
->page_errors
= ret
;
142 dio
->pages
[0] = ZERO_PAGE(dio
->curr_user_address
);
150 dio
->curr_user_address
+= ret
* PAGE_SIZE
;
151 dio
->curr_page
+= ret
;
161 * Get another userspace page. Returns an ERR_PTR on error. Pages are
162 * buffered inside the dio so that we can call get_user_pages() against a
163 * decent number of pages, less frequently. To provide nicer use of the
166 static struct page
*dio_get_page(struct dio
*dio
)
168 if (dio_pages_present(dio
) == 0) {
171 ret
= dio_refill_pages(dio
);
174 BUG_ON(dio_pages_present(dio
) == 0);
176 return dio
->pages
[dio
->head
++];
180 * The BIO completion handler simply queues the BIO up for the process-context
183 * During I/O bi_private points at the dio. After I/O, bi_private is used to
184 * implement a singly-linked list of completed BIOs, at dio->bio_list.
186 static int dio_bio_end_io(struct bio
*bio
, unsigned int bytes_done
, int error
)
188 struct dio
*dio
= bio
->bi_private
;
194 spin_lock_irqsave(&dio
->bio_list_lock
, flags
);
195 bio
->bi_private
= dio
->bio_list
;
198 wake_up_process(dio
->waiter
);
199 spin_unlock_irqrestore(&dio
->bio_list_lock
, flags
);
204 dio_bio_alloc(struct dio
*dio
, struct block_device
*bdev
,
205 sector_t first_sector
, int nr_vecs
)
209 bio
= bio_alloc(GFP_KERNEL
, nr_vecs
);
214 bio
->bi_sector
= first_sector
;
215 bio
->bi_end_io
= dio_bio_end_io
;
221 static void dio_bio_submit(struct dio
*dio
)
223 struct bio
*bio
= dio
->bio
;
225 bio
->bi_private
= dio
;
226 atomic_inc(&dio
->bio_count
);
227 submit_bio(dio
->rw
, bio
);
234 * Release any resources in case of a failure
236 static void dio_cleanup(struct dio
*dio
)
238 while (dio_pages_present(dio
))
239 page_cache_release(dio_get_page(dio
));
243 * Wait for the next BIO to complete. Remove it and return it.
245 static struct bio
*dio_await_one(struct dio
*dio
)
250 spin_lock_irqsave(&dio
->bio_list_lock
, flags
);
251 while (dio
->bio_list
== NULL
) {
252 set_current_state(TASK_UNINTERRUPTIBLE
);
253 if (dio
->bio_list
== NULL
) {
254 dio
->waiter
= current
;
255 spin_unlock_irqrestore(&dio
->bio_list_lock
, flags
);
258 spin_lock_irqsave(&dio
->bio_list_lock
, flags
);
261 set_current_state(TASK_RUNNING
);
264 dio
->bio_list
= bio
->bi_private
;
265 spin_unlock_irqrestore(&dio
->bio_list_lock
, flags
);
270 * Process one completed BIO. No locks are held.
272 static int dio_bio_complete(struct dio
*dio
, struct bio
*bio
)
274 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
275 struct bio_vec
*bvec
= bio
->bi_io_vec
;
278 for (page_no
= 0; page_no
< bio
->bi_vcnt
; page_no
++) {
279 struct page
*page
= bvec
[page_no
].bv_page
;
282 set_page_dirty(page
);
283 page_cache_release(page
);
285 atomic_dec(&dio
->bio_count
);
287 return uptodate
? 0 : -EIO
;
291 * Wait on and process all in-flight BIOs.
293 static int dio_await_completion(struct dio
*dio
)
300 while (atomic_read(&dio
->bio_count
)) {
301 struct bio
*bio
= dio_await_one(dio
);
304 ret2
= dio_bio_complete(dio
, bio
);
312 * A really large O_DIRECT read or write can generate a lot of BIOs. So
313 * to keep the memory consumption sane we periodically reap any completed BIOs
314 * during the BIO generation phase.
316 * This also helps to limit the peak amount of pinned userspace memory.
318 static int dio_bio_reap(struct dio
*dio
)
322 if (dio
->reap_counter
++ >= 64) {
323 while (dio
->bio_list
) {
327 spin_lock_irqsave(&dio
->bio_list_lock
, flags
);
329 dio
->bio_list
= bio
->bi_private
;
330 spin_unlock_irqrestore(&dio
->bio_list_lock
, flags
);
331 ret
= dio_bio_complete(dio
, bio
);
333 dio
->reap_counter
= 0;
339 * Call into the fs to map some more disk blocks. We record the current number
340 * of available blocks at dio->blocks_available. These are in units of the
341 * fs blocksize, (1 << inode->i_blkbits).
343 * The fs is allowed to map lots of blocks at once. If it wants to do that,
344 * it uses the passed inode-relative block number as the file offset, as usual.
346 * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io
347 * has remaining to do. The fs should not map more than this number of blocks.
349 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
350 * indicate how much contiguous disk space has been made available at
353 * If *any* of the mapped blocks are new, then the fs must set buffer_new().
354 * This isn't very efficient...
356 * In the case of filesystem holes: the fs may return an arbitrarily-large
357 * hole by returning an appropriate value in b_size and by clearing
358 * buffer_mapped(). However the direct-io code will only process holes one
359 * block at a time - it will repeatedly call get_blocks() as it walks the hole.
361 static int get_more_blocks(struct dio
*dio
)
364 struct buffer_head
*map_bh
= &dio
->map_bh
;
365 sector_t fs_startblk
; /* Into file, in filesystem-sized blocks */
366 unsigned long fs_count
; /* Number of filesystem-sized blocks */
367 unsigned long dio_count
;/* Number of dio_block-sized blocks */
368 unsigned long blkmask
;
371 * If there was a memory error and we've overwritten all the
372 * mapped blocks then we can now return that memory error
374 ret
= dio
->page_errors
;
378 BUG_ON(dio
->block_in_file
>= dio
->final_block_in_request
);
379 fs_startblk
= dio
->block_in_file
>> dio
->blkfactor
;
380 dio_count
= dio
->final_block_in_request
- dio
->block_in_file
;
381 fs_count
= dio_count
>> dio
->blkfactor
;
382 blkmask
= (1 << dio
->blkfactor
) - 1;
383 if (dio_count
& blkmask
)
386 ret
= (*dio
->get_blocks
)(dio
->inode
, fs_startblk
, fs_count
,
387 map_bh
, dio
->rw
== WRITE
);
393 * There is no bio. Make one now.
395 static int dio_new_bio(struct dio
*dio
, sector_t blkno
)
400 ret
= dio_bio_reap(dio
);
403 sector
= blkno
<< (dio
->blkbits
- 9);
404 nr_pages
= min(dio
->pages_in_io
, bio_get_nr_vecs(dio
->map_bh
.b_bdev
));
405 BUG_ON(nr_pages
<= 0);
406 ret
= dio_bio_alloc(dio
, dio
->map_bh
.b_bdev
, sector
, nr_pages
);
413 * Attempt tp put the current chunk of 'cur_page' into the current BIO. If
414 * that was successful then update final_block_in_bio and take a ref against
415 * the just-added page.
417 static int dio_bio_add_page(struct dio
*dio
)
421 ret
= bio_add_page(dio
->bio
, dio
->cur_page
,
422 dio
->cur_page_len
, dio
->cur_page_offset
);
423 if (ret
== dio
->cur_page_len
) {
425 page_cache_get(dio
->cur_page
);
426 dio
->final_block_in_bio
= dio
->cur_page_block
+
427 (dio
->cur_page_len
>> dio
->blkbits
);
434 * Put cur_page under IO. The section of cur_page which is described by
435 * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page
436 * starts on-disk at cur_page_block.
438 * We take a ref against the page here (on behalf of its presence in the bio).
440 * The caller of this function is responsible for removing cur_page from the
441 * dio, and for dropping the refcount which came from that presence.
443 static int dio_send_cur_page(struct dio
*dio
)
449 * See whether this new request is contiguous with the old
451 if (dio
->final_block_in_bio
!= dio
->cur_page_block
)
454 * Submit now if the underlying fs is about to perform a
461 if (dio
->bio
== NULL
) {
462 ret
= dio_new_bio(dio
, dio
->cur_page_block
);
467 if (dio_bio_add_page(dio
) != 0) {
469 ret
= dio_new_bio(dio
, dio
->cur_page_block
);
471 ret
= dio_bio_add_page(dio
);
480 * An autonomous function to put a chunk of a page under deferred IO.
482 * The caller doesn't actually know (or care) whether this piece of page is in
483 * a BIO, or is under IO or whatever. We just take care of all possible
484 * situations here. The separation between the logic of do_direct_IO() and
485 * that of submit_page_section() is important for clarity. Please don't break.
487 * The chunk of page starts on-disk at blocknr.
489 * We perform deferred IO, by recording the last-submitted page inside our
490 * private part of the dio structure. If possible, we just expand the IO
491 * across that page here.
493 * If that doesn't work out then we put the old page into the bio and add this
494 * page to the dio instead.
497 submit_page_section(struct dio
*dio
, struct page
*page
,
498 unsigned offset
, unsigned len
, sector_t blocknr
)
503 * Can we just grow the current page's presence in the dio?
505 if ( (dio
->cur_page
== page
) &&
506 (dio
->cur_page_offset
+ dio
->cur_page_len
== offset
) &&
507 (dio
->cur_page_block
+
508 (dio
->cur_page_len
>> dio
->blkbits
) == blocknr
)) {
509 dio
->cur_page_len
+= len
;
512 * If dio->boundary then we want to schedule the IO now to
513 * avoid metadata seeks.
516 ret
= dio_send_cur_page(dio
);
517 page_cache_release(dio
->cur_page
);
518 dio
->cur_page
= NULL
;
524 * If there's a deferred page already there then send it.
527 ret
= dio_send_cur_page(dio
);
528 page_cache_release(dio
->cur_page
);
529 dio
->cur_page
= NULL
;
534 page_cache_get(page
); /* It is in dio */
535 dio
->cur_page
= page
;
536 dio
->cur_page_offset
= offset
;
537 dio
->cur_page_len
= len
;
538 dio
->cur_page_block
= blocknr
;
544 * Clean any dirty buffers in the blockdev mapping which alias newly-created
545 * file blocks. Only called for S_ISREG files - blockdevs do not set
548 static void clean_blockdev_aliases(struct dio
*dio
)
552 for (i
= 0; i
< dio
->blocks_available
; i
++) {
553 unmap_underlying_metadata(dio
->map_bh
.b_bdev
,
554 dio
->map_bh
.b_blocknr
+ i
);
559 * If we are not writing the entire block and get_block() allocated
560 * the block for us, we need to fill-in the unused portion of the
561 * block with zeros. This happens only if user-buffer, fileoffset or
562 * io length is not filesystem block-size multiple.
564 * `end' is zero if we're doing the start of the IO, 1 at the end of the
567 static void dio_zero_block(struct dio
*dio
, int end
)
569 unsigned dio_blocks_per_fs_block
;
570 unsigned this_chunk_blocks
; /* In dio_blocks */
571 unsigned this_chunk_bytes
;
574 dio
->start_zero_done
= 1;
575 if (!dio
->blkfactor
|| !buffer_new(&dio
->map_bh
))
578 dio_blocks_per_fs_block
= 1 << dio
->blkfactor
;
579 this_chunk_blocks
= dio
->block_in_file
& (dio_blocks_per_fs_block
- 1);
581 if (!this_chunk_blocks
)
585 * We need to zero out part of an fs block. It is either at the
586 * beginning or the end of the fs block.
589 this_chunk_blocks
= dio_blocks_per_fs_block
- this_chunk_blocks
;
591 this_chunk_bytes
= this_chunk_blocks
<< dio
->blkbits
;
593 page
= ZERO_PAGE(dio
->cur_user_address
);
594 if (submit_page_section(dio
, page
, 0, this_chunk_bytes
,
595 dio
->next_block_for_io
))
598 dio
->next_block_for_io
+= this_chunk_blocks
;
602 * Walk the user pages, and the file, mapping blocks to disk and generating
603 * a sequence of (page,offset,len,block) mappings. These mappings are injected
604 * into submit_page_section(), which takes care of the next stage of submission
606 * Direct IO against a blockdev is different from a file. Because we can
607 * happily perform page-sized but 512-byte aligned IOs. It is important that
608 * blockdev IO be able to have fine alignment and large sizes.
610 * So what we do is to permit the ->get_blocks function to populate bh.b_size
611 * with the size of IO which is permitted at this offset and this i_blkbits.
613 * For best results, the blockdev should be set up with 512-byte i_blkbits and
614 * it should set b_size to PAGE_SIZE or more inside get_blocks(). This gives
615 * fine alignment but still allows this function to work in PAGE_SIZE units.
617 static int do_direct_IO(struct dio
*dio
)
619 const unsigned blkbits
= dio
->blkbits
;
620 const unsigned blocks_per_page
= PAGE_SIZE
>> blkbits
;
622 unsigned block_in_page
;
623 struct buffer_head
*map_bh
= &dio
->map_bh
;
626 /* The I/O can start at any block offset within the first page */
627 block_in_page
= dio
->first_block_in_page
;
629 while (dio
->block_in_file
< dio
->final_block_in_request
) {
630 page
= dio_get_page(dio
);
636 while (block_in_page
< blocks_per_page
) {
637 unsigned offset_in_page
= block_in_page
<< blkbits
;
638 unsigned this_chunk_bytes
; /* # of bytes mapped */
639 unsigned this_chunk_blocks
; /* # of blocks */
642 if (dio
->blocks_available
== 0) {
644 * Need to go and map some more disk
646 unsigned long blkmask
;
647 unsigned long dio_remainder
;
649 ret
= get_more_blocks(dio
);
651 page_cache_release(page
);
654 if (!buffer_mapped(map_bh
))
657 dio
->blocks_available
=
658 map_bh
->b_size
>> dio
->blkbits
;
659 dio
->next_block_for_io
=
660 map_bh
->b_blocknr
<< dio
->blkfactor
;
661 if (buffer_new(map_bh
))
662 clean_blockdev_aliases(dio
);
667 blkmask
= (1 << dio
->blkfactor
) - 1;
668 dio_remainder
= (dio
->block_in_file
& blkmask
);
671 * If we are at the start of IO and that IO
672 * starts partway into a fs-block,
673 * dio_remainder will be non-zero. If the IO
674 * is a read then we can simply advance the IO
675 * cursor to the first block which is to be
676 * read. But if the IO is a write and the
677 * block was newly allocated we cannot do that;
678 * the start of the fs block must be zeroed out
681 if (!buffer_new(map_bh
))
682 dio
->next_block_for_io
+= dio_remainder
;
683 dio
->blocks_available
-= dio_remainder
;
687 if (!buffer_mapped(map_bh
)) {
688 char *kaddr
= kmap_atomic(page
, KM_USER0
);
689 memset(kaddr
+ (block_in_page
<< blkbits
),
691 flush_dcache_page(page
);
692 kunmap_atomic(kaddr
, KM_USER0
);
693 dio
->block_in_file
++;
699 * If we're performing IO which has an alignment which
700 * is finer than the underlying fs, go check to see if
701 * we must zero out the start of this block.
703 if (unlikely(dio
->blkfactor
&& !dio
->start_zero_done
))
704 dio_zero_block(dio
, 0);
707 * Work out, in this_chunk_blocks, how much disk we
708 * can add to this page
710 this_chunk_blocks
= dio
->blocks_available
;
711 u
= (PAGE_SIZE
- offset_in_page
) >> blkbits
;
712 if (this_chunk_blocks
> u
)
713 this_chunk_blocks
= u
;
714 u
= dio
->final_block_in_request
- dio
->block_in_file
;
715 if (this_chunk_blocks
> u
)
716 this_chunk_blocks
= u
;
717 this_chunk_bytes
= this_chunk_blocks
<< blkbits
;
718 BUG_ON(this_chunk_bytes
== 0);
720 dio
->boundary
= buffer_boundary(map_bh
);
721 ret
= submit_page_section(dio
, page
, offset_in_page
,
722 this_chunk_bytes
, dio
->next_block_for_io
);
724 page_cache_release(page
);
727 dio
->next_block_for_io
+= this_chunk_blocks
;
729 dio
->block_in_file
+= this_chunk_blocks
;
730 block_in_page
+= this_chunk_blocks
;
731 dio
->blocks_available
-= this_chunk_blocks
;
733 if (dio
->block_in_file
> dio
->final_block_in_request
)
735 if (dio
->block_in_file
== dio
->final_block_in_request
)
739 /* Drop the ref which was taken in get_user_pages() */
740 page_cache_release(page
);
748 direct_io_worker(int rw
, struct inode
*inode
, const struct iovec
*iov
,
749 loff_t offset
, unsigned long nr_segs
, unsigned blkbits
,
750 get_blocks_t get_blocks
)
752 unsigned long user_addr
;
753 int seg
, ret2
, ret
= 0;
755 size_t bytes
, tot_bytes
= 0;
760 dio
.blkbits
= blkbits
;
761 dio
.blkfactor
= inode
->i_blkbits
- blkbits
;
762 dio
.start_zero_done
= 0;
763 dio
.block_in_file
= offset
>> blkbits
;
764 dio
.blocks_available
= 0;
769 dio
.reap_counter
= 0;
770 dio
.get_blocks
= get_blocks
;
771 dio
.final_block_in_bio
= -1;
772 dio
.next_block_for_io
= -1;
776 /* BIO completion state */
777 atomic_set(&dio
.bio_count
, 0);
778 spin_lock_init(&dio
.bio_list_lock
);
783 for (seg
= 0; seg
< nr_segs
; seg
++)
784 dio
.pages_in_io
+= (iov
[seg
].iov_len
>> blkbits
) + 2;
786 for (seg
= 0; seg
< nr_segs
; seg
++) {
787 user_addr
= (unsigned long)iov
[seg
].iov_base
;
788 bytes
= iov
[seg
].iov_len
;
790 /* Index into the first page of the first block */
791 dio
.first_block_in_page
= (user_addr
& (PAGE_SIZE
- 1)) >> blkbits
;
792 dio
.final_block_in_request
= dio
.block_in_file
+ (bytes
>> blkbits
);
793 /* Page fetching state */
799 if (user_addr
& (PAGE_SIZE
-1)) {
801 bytes
-= PAGE_SIZE
- (user_addr
& (PAGE_SIZE
- 1));
803 dio
.total_pages
+= (bytes
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
804 dio
.curr_user_address
= user_addr
;
806 ret
= do_direct_IO(&dio
);
813 tot_bytes
+= iov
[seg
].iov_len
- ((dio
.final_block_in_request
-
814 dio
.block_in_file
) << blkbits
);
816 } /* end iovec loop */
819 * There may be some unwritten disk at the end of a part-written
820 * fs-block-sized block. Go zero that now.
822 dio_zero_block(&dio
, 1);
825 ret2
= dio_send_cur_page(&dio
);
826 page_cache_release(dio
.cur_page
);
830 ret2
= dio_await_completion(&dio
);
834 ret
= dio
.page_errors
;
842 * This is a library function for use by filesystem drivers.
845 generic_direct_IO(int rw
, struct inode
*inode
, struct block_device
*bdev
,
846 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
,
847 get_blocks_t get_blocks
)
852 unsigned blkbits
= inode
->i_blkbits
;
853 unsigned bdev_blkbits
= 0;
854 unsigned blocksize_mask
= (1 << blkbits
) - 1;
855 ssize_t retval
= -EINVAL
;
858 bdev_blkbits
= blksize_bits(bdev_hardsect_size(bdev
));
860 if (offset
& blocksize_mask
) {
862 blkbits
= bdev_blkbits
;
863 blocksize_mask
= (1 << blkbits
) - 1;
864 if (offset
& blocksize_mask
)
868 /* Check the memory alignment. Blocks cannot straddle pages */
869 for (seg
= 0; seg
< nr_segs
; seg
++) {
870 addr
= (unsigned long)iov
[seg
].iov_base
;
871 size
= iov
[seg
].iov_len
;
872 if ((addr
& blocksize_mask
) || (size
& blocksize_mask
)) {
874 blkbits
= bdev_blkbits
;
875 blocksize_mask
= (1 << blkbits
) - 1;
876 if ((addr
& blocksize_mask
) || (size
& blocksize_mask
))
881 retval
= direct_io_worker(rw
, inode
, iov
, offset
,
882 nr_segs
, blkbits
, get_blocks
);
888 generic_file_direct_IO(int rw
, struct file
*file
, const struct iovec
*iov
,
889 loff_t offset
, unsigned long nr_segs
)
891 struct address_space
*mapping
= file
->f_dentry
->d_inode
->i_mapping
;
894 if (mapping
->nrpages
) {
895 retval
= filemap_fdatawrite(mapping
);
897 retval
= filemap_fdatawait(mapping
);
902 retval
= mapping
->a_ops
->direct_IO(rw
, file
, iov
, offset
, nr_segs
);
903 if (rw
== WRITE
&& mapping
->nrpages
)
904 invalidate_inode_pages2(mapping
);