Skip to content

Commit 2a1180f

Browse files
josefbaciktorvalds
authored andcommitted
filemap: pass vm_fault to the mmap ra helpers
All of the arguments to these functions come from the vmf. Cut down on the amount of arguments passed by simply passing in the vmf to these two helpers. Link: http://lkml.kernel.org/r/20181211173801.29535-3-josef@toxicpanda.com Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Dave Chinner <david@fromorbit.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent ebc551f commit 2a1180f

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

mm/filemap.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2420,20 +2420,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
24202420
* Synchronous readahead happens when we don't even find
24212421
* a page in the page cache at all.
24222422
*/
2423-
static void do_sync_mmap_readahead(struct vm_area_struct *vma,
2424-
struct file_ra_state *ra,
2425-
struct file *file,
2426-
pgoff_t offset)
2423+
static void do_sync_mmap_readahead(struct vm_fault *vmf)
24272424
{
2425+
struct file *file = vmf->vma->vm_file;
2426+
struct file_ra_state *ra = &file->f_ra;
24282427
struct address_space *mapping = file->f_mapping;
2428+
pgoff_t offset = vmf->pgoff;
24292429

24302430
/* If we don't want any read-ahead, don't bother */
2431-
if (vma->vm_flags & VM_RAND_READ)
2431+
if (vmf->vma->vm_flags & VM_RAND_READ)
24322432
return;
24332433
if (!ra->ra_pages)
24342434
return;
24352435

2436-
if (vma->vm_flags & VM_SEQ_READ) {
2436+
if (vmf->vma->vm_flags & VM_SEQ_READ) {
24372437
page_cache_sync_readahead(mapping, ra, file, offset,
24382438
ra->ra_pages);
24392439
return;
@@ -2463,16 +2463,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
24632463
* Asynchronous readahead happens when we find the page and PG_readahead,
24642464
* so we want to possibly extend the readahead further..
24652465
*/
2466-
static void do_async_mmap_readahead(struct vm_area_struct *vma,
2467-
struct file_ra_state *ra,
2468-
struct file *file,
2469-
struct page *page,
2470-
pgoff_t offset)
2466+
static void do_async_mmap_readahead(struct vm_fault *vmf,
2467+
struct page *page)
24712468
{
2469+
struct file *file = vmf->vma->vm_file;
2470+
struct file_ra_state *ra = &file->f_ra;
24722471
struct address_space *mapping = file->f_mapping;
2472+
pgoff_t offset = vmf->pgoff;
24732473

24742474
/* If we don't want any read-ahead, don't bother */
2475-
if (vma->vm_flags & VM_RAND_READ)
2475+
if (vmf->vma->vm_flags & VM_RAND_READ)
24762476
return;
24772477
if (ra->mmap_miss > 0)
24782478
ra->mmap_miss--;
@@ -2531,10 +2531,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
25312531
* We found the page, so try async readahead before
25322532
* waiting for the lock.
25332533
*/
2534-
do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
2534+
do_async_mmap_readahead(vmf, page);
25352535
} else if (!page) {
25362536
/* No page in the page cache at all */
2537-
do_sync_mmap_readahead(vmf->vma, ra, file, offset);
2537+
do_sync_mmap_readahead(vmf);
25382538
count_vm_event(PGMAJFAULT);
25392539
count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
25402540
ret = VM_FAULT_MAJOR;

0 commit comments

Comments
 (0)