2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 static wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
47 static int __init
init_dax_wait_table(void)
51 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
52 init_waitqueue_head(wait_table
+ i
);
55 fs_initcall(init_dax_wait_table
);
57 static int dax_is_pmd_entry(void *entry
)
59 return (unsigned long)entry
& RADIX_DAX_PMD
;
62 static int dax_is_pte_entry(void *entry
)
64 return !((unsigned long)entry
& RADIX_DAX_PMD
);
67 static int dax_is_zero_entry(void *entry
)
69 return (unsigned long)entry
& RADIX_DAX_HZP
;
72 static int dax_is_empty_entry(void *entry
)
74 return (unsigned long)entry
& RADIX_DAX_EMPTY
;
78 * DAX radix tree locking
80 struct exceptional_entry_key
{
81 struct address_space
*mapping
;
85 struct wait_exceptional_entry_queue
{
86 wait_queue_entry_t wait
;
87 struct exceptional_entry_key key
;
90 static wait_queue_head_t
*dax_entry_waitqueue(struct address_space
*mapping
,
91 pgoff_t index
, void *entry
, struct exceptional_entry_key
*key
)
96 * If 'entry' is a PMD, align the 'index' that we use for the wait
97 * queue to the start of that PMD. This ensures that all offsets in
98 * the range covered by the PMD map to the same bit lock.
100 if (dax_is_pmd_entry(entry
))
101 index
&= ~((1UL << (PMD_SHIFT
- PAGE_SHIFT
)) - 1);
103 key
->mapping
= mapping
;
104 key
->entry_start
= index
;
106 hash
= hash_long((unsigned long)mapping
^ index
, DAX_WAIT_TABLE_BITS
);
107 return wait_table
+ hash
;
110 static int wake_exceptional_entry_func(wait_queue_entry_t
*wait
, unsigned int mode
,
111 int sync
, void *keyp
)
113 struct exceptional_entry_key
*key
= keyp
;
114 struct wait_exceptional_entry_queue
*ewait
=
115 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
117 if (key
->mapping
!= ewait
->key
.mapping
||
118 key
->entry_start
!= ewait
->key
.entry_start
)
120 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
124 * Check whether the given slot is locked. The function must be called with
125 * mapping->tree_lock held
127 static inline int slot_locked(struct address_space
*mapping
, void **slot
)
129 unsigned long entry
= (unsigned long)
130 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
131 return entry
& RADIX_DAX_ENTRY_LOCK
;
135 * Mark the given slot is locked. The function must be called with
136 * mapping->tree_lock held
138 static inline void *lock_slot(struct address_space
*mapping
, void **slot
)
140 unsigned long entry
= (unsigned long)
141 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
143 entry
|= RADIX_DAX_ENTRY_LOCK
;
144 radix_tree_replace_slot(&mapping
->page_tree
, slot
, (void *)entry
);
145 return (void *)entry
;
149 * Mark the given slot is unlocked. The function must be called with
150 * mapping->tree_lock held
152 static inline void *unlock_slot(struct address_space
*mapping
, void **slot
)
154 unsigned long entry
= (unsigned long)
155 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
157 entry
&= ~(unsigned long)RADIX_DAX_ENTRY_LOCK
;
158 radix_tree_replace_slot(&mapping
->page_tree
, slot
, (void *)entry
);
159 return (void *)entry
;
163 * Lookup entry in radix tree, wait for it to become unlocked if it is
164 * exceptional entry and return it. The caller must call
165 * put_unlocked_mapping_entry() when he decided not to lock the entry or
166 * put_locked_mapping_entry() when he locked the entry and now wants to
169 * The function must be called with mapping->tree_lock held.
171 static void *get_unlocked_mapping_entry(struct address_space
*mapping
,
172 pgoff_t index
, void ***slotp
)
175 struct wait_exceptional_entry_queue ewait
;
176 wait_queue_head_t
*wq
;
178 init_wait(&ewait
.wait
);
179 ewait
.wait
.func
= wake_exceptional_entry_func
;
182 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
,
184 if (!entry
|| !radix_tree_exceptional_entry(entry
) ||
185 !slot_locked(mapping
, slot
)) {
191 wq
= dax_entry_waitqueue(mapping
, index
, entry
, &ewait
.key
);
192 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
193 TASK_UNINTERRUPTIBLE
);
194 spin_unlock_irq(&mapping
->tree_lock
);
196 finish_wait(wq
, &ewait
.wait
);
197 spin_lock_irq(&mapping
->tree_lock
);
201 static void dax_unlock_mapping_entry(struct address_space
*mapping
,
206 spin_lock_irq(&mapping
->tree_lock
);
207 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
, &slot
);
208 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
) ||
209 !slot_locked(mapping
, slot
))) {
210 spin_unlock_irq(&mapping
->tree_lock
);
213 unlock_slot(mapping
, slot
);
214 spin_unlock_irq(&mapping
->tree_lock
);
215 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, false);
218 static void put_locked_mapping_entry(struct address_space
*mapping
,
219 pgoff_t index
, void *entry
)
221 if (!radix_tree_exceptional_entry(entry
)) {
225 dax_unlock_mapping_entry(mapping
, index
);
230 * Called when we are done with radix tree entry we looked up via
231 * get_unlocked_mapping_entry() and which we didn't lock in the end.
233 static void put_unlocked_mapping_entry(struct address_space
*mapping
,
234 pgoff_t index
, void *entry
)
236 if (!radix_tree_exceptional_entry(entry
))
239 /* We have to wake up next waiter for the radix tree entry lock */
240 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, false);
244 * Find radix tree entry at given index. If it points to a page, return with
245 * the page locked. If it points to the exceptional entry, return with the
246 * radix tree entry locked. If the radix tree doesn't contain given index,
247 * create empty exceptional entry for the index and return with it locked.
249 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
250 * either return that locked entry or will return an error. This error will
251 * happen if there are any 4k entries (either zero pages or DAX entries)
252 * within the 2MiB range that we are requesting.
254 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
255 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
256 * insertion will fail if it finds any 4k entries already in the tree, and a
257 * 4k insertion will cause an existing 2MiB entry to be unmapped and
258 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
259 * well as 2MiB empty entries.
261 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
262 * real storage backing them. We will leave these real 2MiB DAX entries in
263 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
265 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
266 * persistent memory the benefit is doubtful. We can add that later if we can
269 static void *grab_mapping_entry(struct address_space
*mapping
, pgoff_t index
,
270 unsigned long size_flag
)
272 bool pmd_downgrade
= false; /* splitting 2MiB entry into 4k entries? */
276 spin_lock_irq(&mapping
->tree_lock
);
277 entry
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
280 if (size_flag
& RADIX_DAX_PMD
) {
281 if (!radix_tree_exceptional_entry(entry
) ||
282 dax_is_pte_entry(entry
)) {
283 put_unlocked_mapping_entry(mapping
, index
,
285 entry
= ERR_PTR(-EEXIST
);
288 } else { /* trying to grab a PTE entry */
289 if (radix_tree_exceptional_entry(entry
) &&
290 dax_is_pmd_entry(entry
) &&
291 (dax_is_zero_entry(entry
) ||
292 dax_is_empty_entry(entry
))) {
293 pmd_downgrade
= true;
298 /* No entry for given index? Make sure radix tree is big enough. */
299 if (!entry
|| pmd_downgrade
) {
304 * Make sure 'entry' remains valid while we drop
305 * mapping->tree_lock.
307 entry
= lock_slot(mapping
, slot
);
310 spin_unlock_irq(&mapping
->tree_lock
);
312 * Besides huge zero pages the only other thing that gets
313 * downgraded are empty entries which don't need to be
316 if (pmd_downgrade
&& dax_is_zero_entry(entry
))
317 unmap_mapping_range(mapping
,
318 (index
<< PAGE_SHIFT
) & PMD_MASK
, PMD_SIZE
, 0);
320 err
= radix_tree_preload(
321 mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
);
324 put_locked_mapping_entry(mapping
, index
, entry
);
327 spin_lock_irq(&mapping
->tree_lock
);
331 * We needed to drop the page_tree lock while calling
332 * radix_tree_preload() and we didn't have an entry to
333 * lock. See if another thread inserted an entry at
334 * our index during this time.
336 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
,
339 radix_tree_preload_end();
340 spin_unlock_irq(&mapping
->tree_lock
);
346 radix_tree_delete(&mapping
->page_tree
, index
);
347 mapping
->nrexceptional
--;
348 dax_wake_mapping_entry_waiter(mapping
, index
, entry
,
352 entry
= dax_radix_locked_entry(0, size_flag
| RADIX_DAX_EMPTY
);
354 err
= __radix_tree_insert(&mapping
->page_tree
, index
,
355 dax_radix_order(entry
), entry
);
356 radix_tree_preload_end();
358 spin_unlock_irq(&mapping
->tree_lock
);
360 * Our insertion of a DAX entry failed, most likely
361 * because we were inserting a PMD entry and it
362 * collided with a PTE sized entry at a different
363 * index in the PMD range. We haven't inserted
364 * anything into the radix tree and have no waiters to
369 /* Good, we have inserted empty locked entry into the tree. */
370 mapping
->nrexceptional
++;
371 spin_unlock_irq(&mapping
->tree_lock
);
374 /* Normal page in radix tree? */
375 if (!radix_tree_exceptional_entry(entry
)) {
376 struct page
*page
= entry
;
379 spin_unlock_irq(&mapping
->tree_lock
);
381 /* Page got truncated? Retry... */
382 if (unlikely(page
->mapping
!= mapping
)) {
389 entry
= lock_slot(mapping
, slot
);
391 spin_unlock_irq(&mapping
->tree_lock
);
396 * We do not necessarily hold the mapping->tree_lock when we call this
397 * function so it is possible that 'entry' is no longer a valid item in the
398 * radix tree. This is okay because all we really need to do is to find the
399 * correct waitqueue where tasks might be waiting for that old 'entry' and
402 void dax_wake_mapping_entry_waiter(struct address_space
*mapping
,
403 pgoff_t index
, void *entry
, bool wake_all
)
405 struct exceptional_entry_key key
;
406 wait_queue_head_t
*wq
;
408 wq
= dax_entry_waitqueue(mapping
, index
, entry
, &key
);
411 * Checking for locked entry and prepare_to_wait_exclusive() happens
412 * under mapping->tree_lock, ditto for entry handling in our callers.
413 * So at this point all tasks that could have seen our entry locked
414 * must be in the waitqueue and the following check will see them.
416 if (waitqueue_active(wq
))
417 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
420 static int __dax_invalidate_mapping_entry(struct address_space
*mapping
,
421 pgoff_t index
, bool trunc
)
425 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
427 spin_lock_irq(&mapping
->tree_lock
);
428 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
429 if (!entry
|| !radix_tree_exceptional_entry(entry
))
432 (radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_DIRTY
) ||
433 radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
)))
435 radix_tree_delete(page_tree
, index
);
436 mapping
->nrexceptional
--;
439 put_unlocked_mapping_entry(mapping
, index
, entry
);
440 spin_unlock_irq(&mapping
->tree_lock
);
444 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
445 * entry to get unlocked before deleting it.
447 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
449 int ret
= __dax_invalidate_mapping_entry(mapping
, index
, true);
452 * This gets called from truncate / punch_hole path. As such, the caller
453 * must hold locks protecting against concurrent modifications of the
454 * radix tree (usually fs-private i_mmap_sem for writing). Since the
455 * caller has seen exceptional entry for this index, we better find it
456 * at that index as well...
463 * Invalidate exceptional DAX entry if it is clean.
465 int dax_invalidate_mapping_entry_sync(struct address_space
*mapping
,
468 return __dax_invalidate_mapping_entry(mapping
, index
, false);
472 * The user has performed a load from a hole in the file. Allocating
473 * a new page in the file would cause excessive storage usage for
474 * workloads with sparse files. We allocate a page cache page instead.
475 * We'll kick it out of the page cache if it's ever written to,
476 * otherwise it will simply fall out of the page cache under memory
477 * pressure without ever having been dirtied.
479 static int dax_load_hole(struct address_space
*mapping
, void **entry
,
480 struct vm_fault
*vmf
)
482 struct inode
*inode
= mapping
->host
;
486 /* Hole page already exists? Return it... */
487 if (!radix_tree_exceptional_entry(*entry
)) {
492 /* This will replace locked radix tree entry with a hole page */
493 page
= find_or_create_page(mapping
, vmf
->pgoff
,
494 vmf
->gfp_mask
| __GFP_ZERO
);
502 ret
= finish_fault(vmf
);
506 /* Grab reference for PTE that is now referencing the page */
508 ret
= VM_FAULT_NOPAGE
;
511 trace_dax_load_hole(inode
, vmf
, ret
);
515 static int copy_user_dax(struct block_device
*bdev
, struct dax_device
*dax_dev
,
516 sector_t sector
, size_t size
, struct page
*to
,
525 rc
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
529 id
= dax_read_lock();
530 rc
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
), &kaddr
, &pfn
);
535 vto
= kmap_atomic(to
);
536 copy_user_page(vto
, (void __force
*)kaddr
, vaddr
, to
);
543 * By this point grab_mapping_entry() has ensured that we have a locked entry
544 * of the appropriate size so we don't have to worry about downgrading PMDs to
545 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
546 * already in the tree, we will skip the insertion and just dirty the PMD as
549 static void *dax_insert_mapping_entry(struct address_space
*mapping
,
550 struct vm_fault
*vmf
,
551 void *entry
, sector_t sector
,
554 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
556 bool hole_fill
= false;
558 pgoff_t index
= vmf
->pgoff
;
560 if (vmf
->flags
& FAULT_FLAG_WRITE
)
561 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
563 /* Replacing hole page with block mapping? */
564 if (!radix_tree_exceptional_entry(entry
)) {
567 * Unmap the page now before we remove it from page cache below.
568 * The page is locked so it cannot be faulted in again.
570 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
572 error
= radix_tree_preload(vmf
->gfp_mask
& ~__GFP_HIGHMEM
);
574 return ERR_PTR(error
);
575 } else if (dax_is_zero_entry(entry
) && !(flags
& RADIX_DAX_HZP
)) {
576 /* replacing huge zero page with PMD block mapping */
577 unmap_mapping_range(mapping
,
578 (vmf
->pgoff
<< PAGE_SHIFT
) & PMD_MASK
, PMD_SIZE
, 0);
581 spin_lock_irq(&mapping
->tree_lock
);
582 new_entry
= dax_radix_locked_entry(sector
, flags
);
585 __delete_from_page_cache(entry
, NULL
);
586 /* Drop pagecache reference */
588 error
= __radix_tree_insert(page_tree
, index
,
589 dax_radix_order(new_entry
), new_entry
);
591 new_entry
= ERR_PTR(error
);
594 mapping
->nrexceptional
++;
595 } else if (dax_is_zero_entry(entry
) || dax_is_empty_entry(entry
)) {
597 * Only swap our new entry into the radix tree if the current
598 * entry is a zero page or an empty entry. If a normal PTE or
599 * PMD entry is already in the tree, we leave it alone. This
600 * means that if we are trying to insert a PTE and the
601 * existing entry is a PMD, we will just leave the PMD in the
602 * tree and dirty it if necessary.
604 struct radix_tree_node
*node
;
608 ret
= __radix_tree_lookup(page_tree
, index
, &node
, &slot
);
609 WARN_ON_ONCE(ret
!= entry
);
610 __radix_tree_replace(page_tree
, node
, slot
,
611 new_entry
, NULL
, NULL
);
613 if (vmf
->flags
& FAULT_FLAG_WRITE
)
614 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
616 spin_unlock_irq(&mapping
->tree_lock
);
618 radix_tree_preload_end();
620 * We don't need hole page anymore, it has been replaced with
621 * locked radix tree entry now.
623 if (mapping
->a_ops
->freepage
)
624 mapping
->a_ops
->freepage(entry
);
631 static inline unsigned long
632 pgoff_address(pgoff_t pgoff
, struct vm_area_struct
*vma
)
634 unsigned long address
;
636 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
637 VM_BUG_ON_VMA(address
< vma
->vm_start
|| address
>= vma
->vm_end
, vma
);
641 /* Walk all mappings of a given index of a file and writeprotect them */
642 static void dax_mapping_entry_mkclean(struct address_space
*mapping
,
643 pgoff_t index
, unsigned long pfn
)
645 struct vm_area_struct
*vma
;
646 pte_t pte
, *ptep
= NULL
;
651 i_mmap_lock_read(mapping
);
652 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, index
, index
) {
653 unsigned long address
;
657 if (!(vma
->vm_flags
& VM_SHARED
))
660 address
= pgoff_address(index
, vma
);
662 if (follow_pte_pmd(vma
->vm_mm
, address
, &ptep
, &pmdp
, &ptl
))
666 #ifdef CONFIG_FS_DAX_PMD
669 if (pfn
!= pmd_pfn(*pmdp
))
671 if (!pmd_dirty(*pmdp
) && !pmd_write(*pmdp
))
674 flush_cache_page(vma
, address
, pfn
);
675 pmd
= pmdp_huge_clear_flush(vma
, address
, pmdp
);
676 pmd
= pmd_wrprotect(pmd
);
677 pmd
= pmd_mkclean(pmd
);
678 set_pmd_at(vma
->vm_mm
, address
, pmdp
, pmd
);
684 if (pfn
!= pte_pfn(*ptep
))
686 if (!pte_dirty(*ptep
) && !pte_write(*ptep
))
689 flush_cache_page(vma
, address
, pfn
);
690 pte
= ptep_clear_flush(vma
, address
, ptep
);
691 pte
= pte_wrprotect(pte
);
692 pte
= pte_mkclean(pte
);
693 set_pte_at(vma
->vm_mm
, address
, ptep
, pte
);
696 pte_unmap_unlock(ptep
, ptl
);
700 mmu_notifier_invalidate_page(vma
->vm_mm
, address
);
702 i_mmap_unlock_read(mapping
);
705 static int dax_writeback_one(struct block_device
*bdev
,
706 struct dax_device
*dax_dev
, struct address_space
*mapping
,
707 pgoff_t index
, void *entry
)
709 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
710 void *entry2
, **slot
, *kaddr
;
718 * A page got tagged dirty in DAX mapping? Something is seriously
721 if (WARN_ON(!radix_tree_exceptional_entry(entry
)))
724 spin_lock_irq(&mapping
->tree_lock
);
725 entry2
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
726 /* Entry got punched out / reallocated? */
727 if (!entry2
|| !radix_tree_exceptional_entry(entry2
))
730 * Entry got reallocated elsewhere? No need to writeback. We have to
731 * compare sectors as we must not bail out due to difference in lockbit
734 if (dax_radix_sector(entry2
) != dax_radix_sector(entry
))
736 if (WARN_ON_ONCE(dax_is_empty_entry(entry
) ||
737 dax_is_zero_entry(entry
))) {
742 /* Another fsync thread may have already written back this entry */
743 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
745 /* Lock the entry to serialize with page faults */
746 entry
= lock_slot(mapping
, slot
);
748 * We can clear the tag now but we have to be careful so that concurrent
749 * dax_writeback_one() calls for the same index cannot finish before we
750 * actually flush the caches. This is achieved as the calls will look
751 * at the entry only under tree_lock and once they do that they will
752 * see the entry locked and wait for it to unlock.
754 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
755 spin_unlock_irq(&mapping
->tree_lock
);
758 * Even if dax_writeback_mapping_range() was given a wbc->range_start
759 * in the middle of a PMD, the 'index' we are given will be aligned to
760 * the start index of the PMD, as will the sector we pull from
761 * 'entry'. This allows us to flush for PMD_SIZE and not have to
762 * worry about partial PMD writebacks.
764 sector
= dax_radix_sector(entry
);
765 size
= PAGE_SIZE
<< dax_radix_order(entry
);
767 id
= dax_read_lock();
768 ret
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
773 * dax_direct_access() may sleep, so cannot hold tree_lock over
776 ret
= dax_direct_access(dax_dev
, pgoff
, size
/ PAGE_SIZE
, &kaddr
, &pfn
);
780 if (WARN_ON_ONCE(ret
< size
/ PAGE_SIZE
)) {
785 dax_mapping_entry_mkclean(mapping
, index
, pfn_t_to_pfn(pfn
));
786 dax_flush(dax_dev
, pgoff
, kaddr
, size
);
788 * After we have flushed the cache, we can clear the dirty tag. There
789 * cannot be new dirty data in the pfn after the flush has completed as
790 * the pfn mappings are writeprotected and fault waits for mapping
793 spin_lock_irq(&mapping
->tree_lock
);
794 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
795 spin_unlock_irq(&mapping
->tree_lock
);
796 trace_dax_writeback_one(mapping
->host
, index
, size
>> PAGE_SHIFT
);
799 put_locked_mapping_entry(mapping
, index
, entry
);
803 put_unlocked_mapping_entry(mapping
, index
, entry2
);
804 spin_unlock_irq(&mapping
->tree_lock
);
809 * Flush the mapping to the persistent domain within the byte range of [start,
810 * end]. This is required by data integrity operations to ensure file data is
811 * on persistent storage prior to completion of the operation.
813 int dax_writeback_mapping_range(struct address_space
*mapping
,
814 struct block_device
*bdev
, struct writeback_control
*wbc
)
816 struct inode
*inode
= mapping
->host
;
817 pgoff_t start_index
, end_index
;
818 pgoff_t indices
[PAGEVEC_SIZE
];
819 struct dax_device
*dax_dev
;
824 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
827 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
830 dax_dev
= dax_get_by_host(bdev
->bd_disk
->disk_name
);
834 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
835 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
837 trace_dax_writeback_range(inode
, start_index
, end_index
);
839 tag_pages_for_writeback(mapping
, start_index
, end_index
);
841 pagevec_init(&pvec
, 0);
843 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
844 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
845 pvec
.pages
, indices
);
850 for (i
= 0; i
< pvec
.nr
; i
++) {
851 if (indices
[i
] > end_index
) {
856 ret
= dax_writeback_one(bdev
, dax_dev
, mapping
,
857 indices
[i
], pvec
.pages
[i
]);
859 mapping_set_error(mapping
, ret
);
863 start_index
= indices
[pvec
.nr
- 1] + 1;
867 trace_dax_writeback_range_done(inode
, start_index
, end_index
);
868 return (ret
< 0 ? ret
: 0);
870 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
872 static int dax_insert_mapping(struct address_space
*mapping
,
873 struct block_device
*bdev
, struct dax_device
*dax_dev
,
874 sector_t sector
, size_t size
, void **entryp
,
875 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
877 unsigned long vaddr
= vmf
->address
;
878 void *entry
= *entryp
;
884 rc
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
888 id
= dax_read_lock();
889 rc
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
), &kaddr
, &pfn
);
896 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, sector
, 0);
901 trace_dax_insert_mapping(mapping
->host
, vmf
, ret
);
902 return vm_insert_mixed(vma
, vaddr
, pfn
);
906 * dax_pfn_mkwrite - handle first write to DAX page
907 * @vmf: The description of the fault
909 int dax_pfn_mkwrite(struct vm_fault
*vmf
)
911 struct file
*file
= vmf
->vma
->vm_file
;
912 struct address_space
*mapping
= file
->f_mapping
;
913 struct inode
*inode
= mapping
->host
;
915 pgoff_t index
= vmf
->pgoff
;
917 spin_lock_irq(&mapping
->tree_lock
);
918 entry
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
919 if (!entry
|| !radix_tree_exceptional_entry(entry
)) {
921 put_unlocked_mapping_entry(mapping
, index
, entry
);
922 spin_unlock_irq(&mapping
->tree_lock
);
923 trace_dax_pfn_mkwrite_no_entry(inode
, vmf
, VM_FAULT_NOPAGE
);
924 return VM_FAULT_NOPAGE
;
926 radix_tree_tag_set(&mapping
->page_tree
, index
, PAGECACHE_TAG_DIRTY
);
927 entry
= lock_slot(mapping
, slot
);
928 spin_unlock_irq(&mapping
->tree_lock
);
930 * If we race with somebody updating the PTE and finish_mkwrite_fault()
931 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
932 * the fault in either case.
934 finish_mkwrite_fault(vmf
);
935 put_locked_mapping_entry(mapping
, index
, entry
);
936 trace_dax_pfn_mkwrite(inode
, vmf
, VM_FAULT_NOPAGE
);
937 return VM_FAULT_NOPAGE
;
939 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
941 static bool dax_range_is_aligned(struct block_device
*bdev
,
942 unsigned int offset
, unsigned int length
)
944 unsigned short sector_size
= bdev_logical_block_size(bdev
);
946 if (!IS_ALIGNED(offset
, sector_size
))
948 if (!IS_ALIGNED(length
, sector_size
))
954 int __dax_zero_page_range(struct block_device
*bdev
,
955 struct dax_device
*dax_dev
, sector_t sector
,
956 unsigned int offset
, unsigned int size
)
958 if (dax_range_is_aligned(bdev
, offset
, size
)) {
959 sector_t start_sector
= sector
+ (offset
>> 9);
961 return blkdev_issue_zeroout(bdev
, start_sector
,
962 size
>> 9, GFP_NOFS
, 0);
969 rc
= bdev_dax_pgoff(bdev
, sector
, PAGE_SIZE
, &pgoff
);
973 id
= dax_read_lock();
974 rc
= dax_direct_access(dax_dev
, pgoff
, 1, &kaddr
,
980 memset(kaddr
+ offset
, 0, size
);
981 dax_flush(dax_dev
, pgoff
, kaddr
+ offset
, size
);
986 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
988 static sector_t
dax_iomap_sector(struct iomap
*iomap
, loff_t pos
)
990 return iomap
->blkno
+ (((pos
& PAGE_MASK
) - iomap
->offset
) >> 9);
994 dax_iomap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
997 struct block_device
*bdev
= iomap
->bdev
;
998 struct dax_device
*dax_dev
= iomap
->dax_dev
;
999 struct iov_iter
*iter
= data
;
1000 loff_t end
= pos
+ length
, done
= 0;
1004 if (iov_iter_rw(iter
) == READ
) {
1005 end
= min(end
, i_size_read(inode
));
1009 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
1010 return iov_iter_zero(min(length
, end
- pos
), iter
);
1013 if (WARN_ON_ONCE(iomap
->type
!= IOMAP_MAPPED
))
1017 * Write can allocate block for an area which has a hole page mapped
1018 * into page tables. We have to tear down these mappings so that data
1019 * written by write(2) is visible in mmap.
1021 if (iomap
->flags
& IOMAP_F_NEW
) {
1022 invalidate_inode_pages2_range(inode
->i_mapping
,
1024 (end
- 1) >> PAGE_SHIFT
);
1027 id
= dax_read_lock();
1029 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1030 const size_t size
= ALIGN(length
+ offset
, PAGE_SIZE
);
1031 const sector_t sector
= dax_iomap_sector(iomap
, pos
);
1037 if (fatal_signal_pending(current
)) {
1042 ret
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
1046 map_len
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
),
1053 map_len
= PFN_PHYS(map_len
);
1056 if (map_len
> end
- pos
)
1057 map_len
= end
- pos
;
1059 if (iov_iter_rw(iter
) == WRITE
)
1060 map_len
= dax_copy_from_iter(dax_dev
, pgoff
, kaddr
,
1063 map_len
= copy_to_iter(kaddr
, map_len
, iter
);
1065 ret
= map_len
? map_len
: -EFAULT
;
1073 dax_read_unlock(id
);
1075 return done
? done
: ret
;
1079 * dax_iomap_rw - Perform I/O to a DAX file
1080 * @iocb: The control block for this I/O
1081 * @iter: The addresses to do I/O from or to
1082 * @ops: iomap ops passed from the file system
1084 * This function performs read and write operations to directly mapped
1085 * persistent memory. The callers needs to take care of read/write exclusion
1086 * and evicting any page cache pages in the region under I/O.
1089 dax_iomap_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1090 const struct iomap_ops
*ops
)
1092 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1093 struct inode
*inode
= mapping
->host
;
1094 loff_t pos
= iocb
->ki_pos
, ret
= 0, done
= 0;
1097 if (iov_iter_rw(iter
) == WRITE
) {
1098 lockdep_assert_held_exclusive(&inode
->i_rwsem
);
1099 flags
|= IOMAP_WRITE
;
1101 lockdep_assert_held(&inode
->i_rwsem
);
1104 while (iov_iter_count(iter
)) {
1105 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
), flags
, ops
,
1106 iter
, dax_iomap_actor
);
1113 iocb
->ki_pos
+= done
;
1114 return done
? done
: ret
;
1116 EXPORT_SYMBOL_GPL(dax_iomap_rw
);
1118 static int dax_fault_return(int error
)
1121 return VM_FAULT_NOPAGE
;
1122 if (error
== -ENOMEM
)
1123 return VM_FAULT_OOM
;
1124 return VM_FAULT_SIGBUS
;
1127 static int dax_iomap_pte_fault(struct vm_fault
*vmf
,
1128 const struct iomap_ops
*ops
)
1130 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1131 struct inode
*inode
= mapping
->host
;
1132 unsigned long vaddr
= vmf
->address
;
1133 loff_t pos
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
;
1135 struct iomap iomap
= { 0 };
1136 unsigned flags
= IOMAP_FAULT
;
1137 int error
, major
= 0;
1141 trace_dax_pte_fault(inode
, vmf
, vmf_ret
);
1143 * Check whether offset isn't beyond end of file now. Caller is supposed
1144 * to hold locks serializing us with truncate / punch hole so this is
1147 if (pos
>= i_size_read(inode
)) {
1148 vmf_ret
= VM_FAULT_SIGBUS
;
1152 if ((vmf
->flags
& FAULT_FLAG_WRITE
) && !vmf
->cow_page
)
1153 flags
|= IOMAP_WRITE
;
1155 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
, 0);
1156 if (IS_ERR(entry
)) {
1157 vmf_ret
= dax_fault_return(PTR_ERR(entry
));
1162 * It is possible, particularly with mixed reads & writes to private
1163 * mappings, that we have raced with a PMD fault that overlaps with
1164 * the PTE we need to set up. If so just return and the fault will be
1167 if (pmd_trans_huge(*vmf
->pmd
) || pmd_devmap(*vmf
->pmd
)) {
1168 vmf_ret
= VM_FAULT_NOPAGE
;
1173 * Note that we don't bother to use iomap_apply here: DAX required
1174 * the file system block size to be equal the page size, which means
1175 * that we never have to deal with more than a single extent here.
1177 error
= ops
->iomap_begin(inode
, pos
, PAGE_SIZE
, flags
, &iomap
);
1179 vmf_ret
= dax_fault_return(error
);
1182 if (WARN_ON_ONCE(iomap
.offset
+ iomap
.length
< pos
+ PAGE_SIZE
)) {
1183 error
= -EIO
; /* fs corruption? */
1184 goto error_finish_iomap
;
1187 sector
= dax_iomap_sector(&iomap
, pos
);
1189 if (vmf
->cow_page
) {
1190 switch (iomap
.type
) {
1192 case IOMAP_UNWRITTEN
:
1193 clear_user_highpage(vmf
->cow_page
, vaddr
);
1196 error
= copy_user_dax(iomap
.bdev
, iomap
.dax_dev
,
1197 sector
, PAGE_SIZE
, vmf
->cow_page
, vaddr
);
1206 goto error_finish_iomap
;
1208 __SetPageUptodate(vmf
->cow_page
);
1209 vmf_ret
= finish_fault(vmf
);
1211 vmf_ret
= VM_FAULT_DONE_COW
;
1215 switch (iomap
.type
) {
1217 if (iomap
.flags
& IOMAP_F_NEW
) {
1218 count_vm_event(PGMAJFAULT
);
1219 count_memcg_event_mm(vmf
->vma
->vm_mm
, PGMAJFAULT
);
1220 major
= VM_FAULT_MAJOR
;
1222 error
= dax_insert_mapping(mapping
, iomap
.bdev
, iomap
.dax_dev
,
1223 sector
, PAGE_SIZE
, &entry
, vmf
->vma
, vmf
);
1224 /* -EBUSY is fine, somebody else faulted on the same PTE */
1225 if (error
== -EBUSY
)
1228 case IOMAP_UNWRITTEN
:
1230 if (!(vmf
->flags
& FAULT_FLAG_WRITE
)) {
1231 vmf_ret
= dax_load_hole(mapping
, &entry
, vmf
);
1242 vmf_ret
= dax_fault_return(error
) | major
;
1244 if (ops
->iomap_end
) {
1245 int copied
= PAGE_SIZE
;
1247 if (vmf_ret
& VM_FAULT_ERROR
)
1250 * The fault is done by now and there's no way back (other
1251 * thread may be already happily using PTE we have installed).
1252 * Just ignore error from ->iomap_end since we cannot do much
1255 ops
->iomap_end(inode
, pos
, PAGE_SIZE
, copied
, flags
, &iomap
);
1258 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
1260 trace_dax_pte_fault_done(inode
, vmf
, vmf_ret
);
1264 #ifdef CONFIG_FS_DAX_PMD
1266 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1267 * more often than one might expect in the below functions.
1269 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1271 static int dax_pmd_insert_mapping(struct vm_fault
*vmf
, struct iomap
*iomap
,
1272 loff_t pos
, void **entryp
)
1274 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1275 const sector_t sector
= dax_iomap_sector(iomap
, pos
);
1276 struct dax_device
*dax_dev
= iomap
->dax_dev
;
1277 struct block_device
*bdev
= iomap
->bdev
;
1278 struct inode
*inode
= mapping
->host
;
1279 const size_t size
= PMD_SIZE
;
1280 void *ret
= NULL
, *kaddr
;
1286 if (bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
) != 0)
1289 id
= dax_read_lock();
1290 length
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
), &kaddr
, &pfn
);
1292 goto unlock_fallback
;
1293 length
= PFN_PHYS(length
);
1296 goto unlock_fallback
;
1297 if (pfn_t_to_pfn(pfn
) & PG_PMD_COLOUR
)
1298 goto unlock_fallback
;
1299 if (!pfn_t_devmap(pfn
))
1300 goto unlock_fallback
;
1301 dax_read_unlock(id
);
1303 ret
= dax_insert_mapping_entry(mapping
, vmf
, *entryp
, sector
,
1309 trace_dax_pmd_insert_mapping(inode
, vmf
, length
, pfn
, ret
);
1310 return vmf_insert_pfn_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
,
1311 pfn
, vmf
->flags
& FAULT_FLAG_WRITE
);
1314 dax_read_unlock(id
);
1316 trace_dax_pmd_insert_mapping_fallback(inode
, vmf
, length
, pfn
, ret
);
1317 return VM_FAULT_FALLBACK
;
1320 static int dax_pmd_load_hole(struct vm_fault
*vmf
, struct iomap
*iomap
,
1323 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1324 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1325 struct inode
*inode
= mapping
->host
;
1326 struct page
*zero_page
;
1331 zero_page
= mm_get_huge_zero_page(vmf
->vma
->vm_mm
);
1333 if (unlikely(!zero_page
))
1336 ret
= dax_insert_mapping_entry(mapping
, vmf
, *entryp
, 0,
1337 RADIX_DAX_PMD
| RADIX_DAX_HZP
);
1342 ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1343 if (!pmd_none(*(vmf
->pmd
))) {
1348 pmd_entry
= mk_pmd(zero_page
, vmf
->vma
->vm_page_prot
);
1349 pmd_entry
= pmd_mkhuge(pmd_entry
);
1350 set_pmd_at(vmf
->vma
->vm_mm
, pmd_addr
, vmf
->pmd
, pmd_entry
);
1352 trace_dax_pmd_load_hole(inode
, vmf
, zero_page
, ret
);
1353 return VM_FAULT_NOPAGE
;
1356 trace_dax_pmd_load_hole_fallback(inode
, vmf
, zero_page
, ret
);
1357 return VM_FAULT_FALLBACK
;
1360 static int dax_iomap_pmd_fault(struct vm_fault
*vmf
,
1361 const struct iomap_ops
*ops
)
1363 struct vm_area_struct
*vma
= vmf
->vma
;
1364 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1365 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1366 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1367 unsigned int iomap_flags
= (write
? IOMAP_WRITE
: 0) | IOMAP_FAULT
;
1368 struct inode
*inode
= mapping
->host
;
1369 int result
= VM_FAULT_FALLBACK
;
1370 struct iomap iomap
= { 0 };
1371 pgoff_t max_pgoff
, pgoff
;
1377 * Check whether offset isn't beyond end of file now. Caller is
1378 * supposed to hold locks serializing us with truncate / punch hole so
1379 * this is a reliable test.
1381 pgoff
= linear_page_index(vma
, pmd_addr
);
1382 max_pgoff
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
1384 trace_dax_pmd_fault(inode
, vmf
, max_pgoff
, 0);
1386 /* Fall back to PTEs if we're going to COW */
1387 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
1390 /* If the PMD would extend outside the VMA */
1391 if (pmd_addr
< vma
->vm_start
)
1393 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
1396 if (pgoff
> max_pgoff
) {
1397 result
= VM_FAULT_SIGBUS
;
1401 /* If the PMD would extend beyond the file size */
1402 if ((pgoff
| PG_PMD_COLOUR
) > max_pgoff
)
1406 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1407 * PMD or a HZP entry. If it can't (because a 4k page is already in
1408 * the tree, for instance), it will return -EEXIST and we just fall
1409 * back to 4k entries.
1411 entry
= grab_mapping_entry(mapping
, pgoff
, RADIX_DAX_PMD
);
1416 * It is possible, particularly with mixed reads & writes to private
1417 * mappings, that we have raced with a PTE fault that overlaps with
1418 * the PMD we need to set up. If so just return and the fault will be
1421 if (!pmd_none(*vmf
->pmd
) && !pmd_trans_huge(*vmf
->pmd
) &&
1422 !pmd_devmap(*vmf
->pmd
)) {
1428 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1429 * setting up a mapping, so really we're using iomap_begin() as a way
1430 * to look up our filesystem block.
1432 pos
= (loff_t
)pgoff
<< PAGE_SHIFT
;
1433 error
= ops
->iomap_begin(inode
, pos
, PMD_SIZE
, iomap_flags
, &iomap
);
1437 if (iomap
.offset
+ iomap
.length
< pos
+ PMD_SIZE
)
1440 switch (iomap
.type
) {
1442 result
= dax_pmd_insert_mapping(vmf
, &iomap
, pos
, &entry
);
1444 case IOMAP_UNWRITTEN
:
1446 if (WARN_ON_ONCE(write
))
1448 result
= dax_pmd_load_hole(vmf
, &iomap
, &entry
);
1456 if (ops
->iomap_end
) {
1457 int copied
= PMD_SIZE
;
1459 if (result
== VM_FAULT_FALLBACK
)
1462 * The fault is done by now and there's no way back (other
1463 * thread may be already happily using PMD we have installed).
1464 * Just ignore error from ->iomap_end since we cannot do much
1467 ops
->iomap_end(inode
, pos
, PMD_SIZE
, copied
, iomap_flags
,
1471 put_locked_mapping_entry(mapping
, pgoff
, entry
);
1473 if (result
== VM_FAULT_FALLBACK
) {
1474 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1475 count_vm_event(THP_FAULT_FALLBACK
);
1478 trace_dax_pmd_fault_done(inode
, vmf
, max_pgoff
, result
);
1482 static int dax_iomap_pmd_fault(struct vm_fault
*vmf
,
1483 const struct iomap_ops
*ops
)
1485 return VM_FAULT_FALLBACK
;
1487 #endif /* CONFIG_FS_DAX_PMD */
1490 * dax_iomap_fault - handle a page fault on a DAX file
1491 * @vmf: The description of the fault
1492 * @ops: iomap ops passed from the file system
1494 * When a page fault occurs, filesystems may call this helper in
1495 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1496 * has done all the necessary locking for page fault to proceed
1499 int dax_iomap_fault(struct vm_fault
*vmf
, enum page_entry_size pe_size
,
1500 const struct iomap_ops
*ops
)
1504 return dax_iomap_pte_fault(vmf
, ops
);
1506 return dax_iomap_pmd_fault(vmf
, ops
);
1508 return VM_FAULT_FALLBACK
;
1511 EXPORT_SYMBOL_GPL(dax_iomap_fault
);