Skip to content

Commit f5b7b74

Browse files
jankaradjbw
authored andcommitted
dax: Allow tuning whether dax_insert_mapping_entry() dirties entry
Currently we dirty radix tree entry whenever dax_insert_mapping_entry() gets called for a write fault. With synchronous page faults we would like to insert clean radix tree entry and dirty it only once we call fdatasync() and update page tables to save some unnecessary cache flushing. Add 'dirty' argument to dax_insert_mapping_entry() for that. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent 9a0dd42 commit f5b7b74

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

fs/dax.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -526,13 +526,13 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
526526
static void *dax_insert_mapping_entry(struct address_space *mapping,
527527
struct vm_fault *vmf,
528528
void *entry, sector_t sector,
529-
unsigned long flags)
529+
unsigned long flags, bool dirty)
530530
{
531531
struct radix_tree_root *page_tree = &mapping->page_tree;
532532
void *new_entry;
533533
pgoff_t index = vmf->pgoff;
534534

535-
if (vmf->flags & FAULT_FLAG_WRITE)
535+
if (dirty)
536536
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
537537

538538
if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
@@ -569,7 +569,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
569569
entry = new_entry;
570570
}
571571

572-
if (vmf->flags & FAULT_FLAG_WRITE)
572+
if (dirty)
573573
radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
574574

575575
spin_unlock_irq(&mapping->tree_lock);
@@ -881,7 +881,7 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
881881
}
882882

883883
entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0,
884-
RADIX_DAX_ZERO_PAGE);
884+
RADIX_DAX_ZERO_PAGE, false);
885885
if (IS_ERR(entry2)) {
886886
ret = VM_FAULT_SIGBUS;
887887
goto out;
@@ -1182,7 +1182,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
11821182

11831183
entry = dax_insert_mapping_entry(mapping, vmf, entry,
11841184
dax_iomap_sector(&iomap, pos),
1185-
0);
1185+
0, write);
11861186
if (IS_ERR(entry)) {
11871187
error = PTR_ERR(entry);
11881188
goto error_finish_iomap;
@@ -1258,7 +1258,7 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
12581258
goto fallback;
12591259

12601260
ret = dax_insert_mapping_entry(mapping, vmf, entry, 0,
1261-
RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE);
1261+
RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
12621262
if (IS_ERR(ret))
12631263
goto fallback;
12641264

@@ -1379,7 +1379,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
13791379

13801380
entry = dax_insert_mapping_entry(mapping, vmf, entry,
13811381
dax_iomap_sector(&iomap, pos),
1382-
RADIX_DAX_PMD);
1382+
RADIX_DAX_PMD, write);
13831383
if (IS_ERR(entry))
13841384
goto finish_iomap;
13851385

0 commit comments

Comments
 (0)