Skip to content

Commit 2d6d7f9

Browse files
hnaztorvalds
authored andcommitted
mm: protect set_page_dirty() from ongoing truncation
Tejun, while reviewing the code, spotted the following race condition between the dirtying and truncation of a page: __set_page_dirty_nobuffers() __delete_from_page_cache() if (TestSetPageDirty(page)) page->mapping = NULL if (PageDirty()) dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); if (page->mapping) account_page_dirtied(page) __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); which results in an imbalance of NR_FILE_DIRTY and BDI_RECLAIMABLE. Dirtiers usually lock out truncation, either by holding the page lock directly, or in case of zap_pte_range(), by pinning the mapcount with the page table lock held. The notable exception to this rule, though, is do_wp_page(), for which this race exists. However, do_wp_page() already waits for a locked page to unlock before setting the dirty bit, in order to prevent a race where clear_page_dirty() misses the page bit in the presence of dirty ptes. Upgrade that wait to a fully locked set_page_dirty() to also cover the situation explained above. Afterwards, the code in set_page_dirty() dealing with a truncation race is no longer needed. Remove it. Reported-by: Tejun Heo <tj@kernel.org> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 7a3ef20 commit 2d6d7f9

File tree

3 files changed

+29
-42
lines changed

3 files changed

+29
-42
lines changed

include/linux/writeback.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
177177
struct writeback_control *wbc, writepage_t writepage,
178178
void *data);
179179
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
180-
void set_page_dirty_balance(struct page *page);
181180
void writeback_set_ratelimit(void);
182181
void tag_pages_for_writeback(struct address_space *mapping,
183182
pgoff_t start, pgoff_t end);

mm/memory.c

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2137,17 +2137,24 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
21372137
if (!dirty_page)
21382138
return ret;
21392139

2140-
/*
2141-
* Yes, Virginia, this is actually required to prevent a race
2142-
* with clear_page_dirty_for_io() from clearing the page dirty
2143-
* bit after it clear all dirty ptes, but before a racing
2144-
* do_wp_page installs a dirty pte.
2145-
*
2146-
* do_shared_fault is protected similarly.
2147-
*/
21482140
if (!page_mkwrite) {
2149-
wait_on_page_locked(dirty_page);
2150-
set_page_dirty_balance(dirty_page);
2141+
struct address_space *mapping;
2142+
int dirtied;
2143+
2144+
lock_page(dirty_page);
2145+
dirtied = set_page_dirty(dirty_page);
2146+
VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
2147+
mapping = dirty_page->mapping;
2148+
unlock_page(dirty_page);
2149+
2150+
if (dirtied && mapping) {
2151+
/*
2152+
* Some device drivers do not set page.mapping
2153+
* but still dirty their pages
2154+
*/
2155+
balance_dirty_pages_ratelimited(mapping);
2156+
}
2157+
21512158
/* file_update_time outside page_lock */
21522159
if (vma->vm_file)
21532160
file_update_time(vma->vm_file);

mm/page-writeback.c

Lines changed: 12 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1541,16 +1541,6 @@ static void balance_dirty_pages(struct address_space *mapping,
15411541
bdi_start_background_writeback(bdi);
15421542
}
15431543

1544-
void set_page_dirty_balance(struct page *page)
1545-
{
1546-
if (set_page_dirty(page)) {
1547-
struct address_space *mapping = page_mapping(page);
1548-
1549-
if (mapping)
1550-
balance_dirty_pages_ratelimited(mapping);
1551-
}
1552-
}
1553-
15541544
static DEFINE_PER_CPU(int, bdp_ratelimits);
15551545

15561546
/*
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
21232113
* page dirty in that case, but not all the buffers. This is a "bottom-up"
21242114
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
21252115
*
2126-
* Most callers have locked the page, which pins the address_space in memory.
2127-
* But zap_pte_range() does not lock the page, however in that case the
2128-
* mapping is pinned by the vma's ->vm_file reference.
2129-
*
2130-
* We take care to handle the case where the page was truncated from the
2131-
* mapping by re-checking page_mapping() inside tree_lock.
2116+
* The caller must ensure this doesn't race with truncation. Most will simply
2117+
* hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2118+
* the pte lock held, which also locks out truncation.
21322119
*/
21332120
int __set_page_dirty_nobuffers(struct page *page)
21342121
{
21352122
if (!TestSetPageDirty(page)) {
21362123
struct address_space *mapping = page_mapping(page);
2137-
struct address_space *mapping2;
21382124
unsigned long flags;
21392125

21402126
if (!mapping)
21412127
return 1;
21422128

21432129
spin_lock_irqsave(&mapping->tree_lock, flags);
2144-
mapping2 = page_mapping(page);
2145-
if (mapping2) { /* Race with truncate? */
2146-
BUG_ON(mapping2 != mapping);
2147-
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2148-
account_page_dirtied(page, mapping);
2149-
radix_tree_tag_set(&mapping->page_tree,
2150-
page_index(page), PAGECACHE_TAG_DIRTY);
2151-
}
2130+
BUG_ON(page_mapping(page) != mapping);
2131+
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2132+
account_page_dirtied(page, mapping);
2133+
radix_tree_tag_set(&mapping->page_tree, page_index(page),
2134+
PAGECACHE_TAG_DIRTY);
21522135
spin_unlock_irqrestore(&mapping->tree_lock, flags);
21532136
if (mapping->host) {
21542137
/* !PageAnon && !swapper_space */
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
23052288
/*
23062289
* We carefully synchronise fault handlers against
23072290
* installing a dirty pte and marking the page dirty
2308-
* at this point. We do this by having them hold the
2309-
* page lock at some point after installing their
2310-
* pte, but before marking the page dirty.
2311-
* Pages are always locked coming in here, so we get
2312-
* the desired exclusion. See mm/memory.c:do_wp_page()
2313-
* for more comments.
2291+
* at this point. We do this by having them hold the
2292+
* page lock while dirtying the page, and pages are
2293+
* always locked coming in here, so we get the desired
2294+
* exclusion.
23142295
*/
23152296
if (TestClearPageDirty(page)) {
23162297
dec_zone_page_state(page, NR_FILE_DIRTY);

0 commit comments

Comments
 (0)