Skip to content

Commit f449b93

Browse files
jankaradjbw
authored andcommitted
dax: Finish fault completely when loading holes
The only case when we do not finish the page fault completely is when we are loading hole pages into a radix tree. Avoid this special case and finish the fault in that case as well inside the DAX fault handler. It will allow us for easier iomap handling. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent e3fce68 commit f449b93

File tree

1 file changed

+18
-9
lines changed

1 file changed

+18
-9
lines changed

fs/dax.c

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -539,24 +539,34 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
539539
* otherwise it will simply fall out of the page cache under memory
540540
* pressure without ever having been dirtied.
541541
*/
542-
static int dax_load_hole(struct address_space *mapping, void *entry,
542+
static int dax_load_hole(struct address_space *mapping, void **entry,
543543
struct vm_fault *vmf)
544544
{
545545
struct page *page;
546+
int ret;
546547

547548
/* Hole page already exists? Return it... */
548-
if (!radix_tree_exceptional_entry(entry)) {
549-
vmf->page = entry;
550-
return VM_FAULT_LOCKED;
549+
if (!radix_tree_exceptional_entry(*entry)) {
550+
page = *entry;
551+
goto out;
551552
}
552553

553554
/* This will replace locked radix tree entry with a hole page */
554555
page = find_or_create_page(mapping, vmf->pgoff,
555556
vmf->gfp_mask | __GFP_ZERO);
556557
if (!page)
557558
return VM_FAULT_OOM;
559+
out:
558560
vmf->page = page;
559-
return VM_FAULT_LOCKED;
561+
ret = finish_fault(vmf);
562+
vmf->page = NULL;
563+
*entry = page;
564+
if (!ret) {
565+
/* Grab reference for PTE that is now referencing the page */
566+
get_page(page);
567+
return VM_FAULT_NOPAGE;
568+
}
569+
return ret;
560570
}
561571

562572
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -1163,8 +1173,8 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
11631173
case IOMAP_UNWRITTEN:
11641174
case IOMAP_HOLE:
11651175
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1166-
vmf_ret = dax_load_hole(mapping, entry, vmf);
1167-
break;
1176+
vmf_ret = dax_load_hole(mapping, &entry, vmf);
1177+
goto finish_iomap;
11681178
}
11691179
/*FALLTHRU*/
11701180
default:
@@ -1185,8 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
11851195
}
11861196
}
11871197
unlock_entry:
1188-
if (vmf_ret != VM_FAULT_LOCKED || error)
1189-
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1198+
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
11901199
out:
11911200
if (error == -ENOMEM)
11921201
return VM_FAULT_OOM | major;

0 commit comments

Comments
 (0)