Skip to content

Commit e30331f

Browse files
Ross Zwislertorvalds
authored andcommitted
dax: relocate some dax functions
dax_load_hole() will soon need to call dax_insert_mapping_entry(), so it needs to be moved lower in dax.c so the definition exists. dax_wake_mapping_entry_waiter() will soon be removed from dax.h and be made static to dax.c, so we need to move its definition above all its callers. Link: http://lkml.kernel.org/r/20170724170616.25810-3-ross.zwisler@linux.intel.com Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Christoph Hellwig <hch@lst.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent b2770da commit e30331f

File tree

1 file changed

+69
-69
lines changed

1 file changed

+69
-69
lines changed

fs/dax.c

Lines changed: 69 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,31 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mo
120120
return autoremove_wake_function(wait, mode, sync, NULL);
121121
}
122122

123+
/*
124+
* We do not necessarily hold the mapping->tree_lock when we call this
125+
* function so it is possible that 'entry' is no longer a valid item in the
126+
* radix tree. This is okay because all we really need to do is to find the
127+
* correct waitqueue where tasks might be waiting for that old 'entry' and
128+
* wake them.
129+
*/
130+
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
131+
pgoff_t index, void *entry, bool wake_all)
132+
{
133+
struct exceptional_entry_key key;
134+
wait_queue_head_t *wq;
135+
136+
wq = dax_entry_waitqueue(mapping, index, entry, &key);
137+
138+
/*
139+
* Checking for locked entry and prepare_to_wait_exclusive() happens
140+
* under mapping->tree_lock, ditto for entry handling in our callers.
141+
* So at this point all tasks that could have seen our entry locked
142+
* must be in the waitqueue and the following check will see them.
143+
*/
144+
if (waitqueue_active(wq))
145+
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
146+
}
147+
123148
/*
124149
* Check whether the given slot is locked. The function must be called with
125150
* mapping->tree_lock held
@@ -392,31 +417,6 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
392417
return entry;
393418
}
394419

395-
/*
396-
* We do not necessarily hold the mapping->tree_lock when we call this
397-
* function so it is possible that 'entry' is no longer a valid item in the
398-
* radix tree. This is okay because all we really need to do is to find the
399-
* correct waitqueue where tasks might be waiting for that old 'entry' and
400-
* wake them.
401-
*/
402-
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
403-
pgoff_t index, void *entry, bool wake_all)
404-
{
405-
struct exceptional_entry_key key;
406-
wait_queue_head_t *wq;
407-
408-
wq = dax_entry_waitqueue(mapping, index, entry, &key);
409-
410-
/*
411-
* Checking for locked entry and prepare_to_wait_exclusive() happens
412-
* under mapping->tree_lock, ditto for entry handling in our callers.
413-
* So at this point all tasks that could have seen our entry locked
414-
* must be in the waitqueue and the following check will see them.
415-
*/
416-
if (waitqueue_active(wq))
417-
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
418-
}
419-
420420
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
421421
pgoff_t index, bool trunc)
422422
{
@@ -468,50 +468,6 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
468468
return __dax_invalidate_mapping_entry(mapping, index, false);
469469
}
470470

471-
/*
472-
* The user has performed a load from a hole in the file. Allocating
473-
* a new page in the file would cause excessive storage usage for
474-
* workloads with sparse files. We allocate a page cache page instead.
475-
* We'll kick it out of the page cache if it's ever written to,
476-
* otherwise it will simply fall out of the page cache under memory
477-
* pressure without ever having been dirtied.
478-
*/
479-
static int dax_load_hole(struct address_space *mapping, void **entry,
480-
struct vm_fault *vmf)
481-
{
482-
struct inode *inode = mapping->host;
483-
struct page *page;
484-
int ret;
485-
486-
/* Hole page already exists? Return it... */
487-
if (!radix_tree_exceptional_entry(*entry)) {
488-
page = *entry;
489-
goto finish_fault;
490-
}
491-
492-
/* This will replace locked radix tree entry with a hole page */
493-
page = find_or_create_page(mapping, vmf->pgoff,
494-
vmf->gfp_mask | __GFP_ZERO);
495-
if (!page) {
496-
ret = VM_FAULT_OOM;
497-
goto out;
498-
}
499-
500-
finish_fault:
501-
vmf->page = page;
502-
ret = finish_fault(vmf);
503-
vmf->page = NULL;
504-
*entry = page;
505-
if (!ret) {
506-
/* Grab reference for PTE that is now referencing the page */
507-
get_page(page);
508-
ret = VM_FAULT_NOPAGE;
509-
}
510-
out:
511-
trace_dax_load_hole(inode, vmf, ret);
512-
return ret;
513-
}
514-
515471
static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
516472
sector_t sector, size_t size, struct page *to,
517473
unsigned long vaddr)
@@ -941,6 +897,50 @@ int dax_pfn_mkwrite(struct vm_fault *vmf)
941897
}
942898
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
943899

900+
/*
901+
* The user has performed a load from a hole in the file. Allocating
902+
* a new page in the file would cause excessive storage usage for
903+
* workloads with sparse files. We allocate a page cache page instead.
904+
* We'll kick it out of the page cache if it's ever written to,
905+
* otherwise it will simply fall out of the page cache under memory
906+
* pressure without ever having been dirtied.
907+
*/
908+
static int dax_load_hole(struct address_space *mapping, void **entry,
909+
struct vm_fault *vmf)
910+
{
911+
struct inode *inode = mapping->host;
912+
struct page *page;
913+
int ret;
914+
915+
/* Hole page already exists? Return it... */
916+
if (!radix_tree_exceptional_entry(*entry)) {
917+
page = *entry;
918+
goto finish_fault;
919+
}
920+
921+
/* This will replace locked radix tree entry with a hole page */
922+
page = find_or_create_page(mapping, vmf->pgoff,
923+
vmf->gfp_mask | __GFP_ZERO);
924+
if (!page) {
925+
ret = VM_FAULT_OOM;
926+
goto out;
927+
}
928+
929+
finish_fault:
930+
vmf->page = page;
931+
ret = finish_fault(vmf);
932+
vmf->page = NULL;
933+
*entry = page;
934+
if (!ret) {
935+
/* Grab reference for PTE that is now referencing the page */
936+
get_page(page);
937+
ret = VM_FAULT_NOPAGE;
938+
}
939+
out:
940+
trace_dax_load_hole(inode, vmf, ret);
941+
return ret;
942+
}
943+
944944
static bool dax_range_is_aligned(struct block_device *bdev,
945945
unsigned int offset, unsigned int length)
946946
{

0 commit comments

Comments
 (0)