Skip to content

Commit ed4a108

Browse files
committed
Merge branch 'akpm' (patches from Andrew Morton)
Merge fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: hugetlb: fix copy_hugetlb_page_range() simple_xattr: permit 0-size extended attributes mm/fs: fix pessimization in hole-punching pagecache shmem: fix splicing from a hole while it's punched shmem: fix faulting into a hole, not taking i_mutex mm: do not call do_fault_around for non-linear fault sh: also try passing -m4-nofpu for SH2A builds zram: avoid lockdep splat by revalidate_disk mm/rmap.c: fix pgoff calculation to handle hugepage correctly coredump: fix the setting of PF_DUMPCORE
2 parents 15ba223 + 0253d63 commit ed4a108

File tree

11 files changed

+117
-55
lines changed

11 files changed

+117
-55
lines changed

arch/sh/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,8 @@ endif
3232

3333
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
3434
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
35-
$(call cc-option,-m2a-nofpu,)
35+
$(call cc-option,-m2a-nofpu,) \
36+
$(call cc-option,-m4-nofpu,)
3637
cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
3738
cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
3839
$(call cc-option,-mno-implicit-fp,-m4-nofpu)

drivers/block/zram/zram_drv.c

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
622622
memset(&zram->stats, 0, sizeof(zram->stats));
623623

624624
zram->disksize = 0;
625-
if (reset_capacity) {
625+
if (reset_capacity)
626626
set_capacity(zram->disk, 0);
627-
revalidate_disk(zram->disk);
628-
}
627+
629628
up_write(&zram->init_lock);
629+
630+
/*
631+
* Revalidate disk out of the init_lock to avoid lockdep splat.
632+
* It's okay because disk's capacity is protected by init_lock
633+
* so that revalidate_disk always sees up-to-date capacity.
634+
*/
635+
if (reset_capacity)
636+
revalidate_disk(zram->disk);
630637
}
631638

632639
static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
666673
zram->comp = comp;
667674
zram->disksize = disksize;
668675
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
669-
revalidate_disk(zram->disk);
670676
up_write(&zram->init_lock);
677+
678+
/*
679+
* Revalidate disk out of the init_lock to avoid lockdep splat.
680+
* It's okay because disk's capacity is protected by init_lock
681+
* so that revalidate_disk always sees up-to-date capacity.
682+
*/
683+
revalidate_disk(zram->disk);
684+
671685
return len;
672686

673687
out_destroy_comp:

fs/coredump.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
306306
if (unlikely(nr < 0))
307307
return nr;
308308

309-
tsk->flags = PF_DUMPCORE;
309+
tsk->flags |= PF_DUMPCORE;
310310
if (atomic_read(&mm->mm_users) == nr + 1)
311311
goto done;
312312
/*

fs/xattr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
843843

844844
/* wrap around? */
845845
len = sizeof(*new_xattr) + size;
846-
if (len <= sizeof(*new_xattr))
846+
if (len < sizeof(*new_xattr))
847847
return NULL;
848848

849849
new_xattr = kmalloc(len, GFP_KERNEL);

include/linux/pagemap.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
398398
return read_cache_page(mapping, index, filler, data);
399399
}
400400

401+
/*
402+
* Get the offset in PAGE_SIZE.
403+
* (TODO: hugepage should have ->index in PAGE_SIZE)
404+
*/
405+
static inline pgoff_t page_to_pgoff(struct page *page)
406+
{
407+
if (unlikely(PageHeadHuge(page)))
408+
return page->index << compound_order(page);
409+
else
410+
return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
411+
}
412+
401413
/*
402414
* Return byte-offset into filesystem object for page.
403415
*/

mm/hugetlb.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
26042604
} else {
26052605
if (cow)
26062606
huge_ptep_set_wrprotect(src, addr, src_pte);
2607+
entry = huge_ptep_get(src_pte);
26072608
ptepage = pte_page(entry);
26082609
get_page(ptepage);
26092610
page_dup_rmap(ptepage);

mm/memory-failure.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
435435
if (av == NULL) /* Not actually mapped anymore */
436436
return;
437437

438-
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
438+
pgoff = page_to_pgoff(page);
439439
read_lock(&tasklist_lock);
440440
for_each_process (tsk) {
441441
struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
469469
mutex_lock(&mapping->i_mmap_mutex);
470470
read_lock(&tasklist_lock);
471471
for_each_process(tsk) {
472-
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
472+
pgoff_t pgoff = page_to_pgoff(page);
473473
struct task_struct *t = task_early_kill(tsk, force_early);
474474

475475
if (!t)

mm/memory.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
28822882
* if page by the offset is not ready to be mapped (cold cache or
28832883
* something).
28842884
*/
2885-
if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
2885+
if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
2886+
fault_around_pages() > 1) {
28862887
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
28872888
do_fault_around(vma, address, pte, pgoff, flags);
28882889
if (!pte_same(*pte, orig_pte))

mm/rmap.c

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
517517
static inline unsigned long
518518
__vma_address(struct page *page, struct vm_area_struct *vma)
519519
{
520-
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
521-
522-
if (unlikely(is_vm_hugetlb_page(vma)))
523-
pgoff = page->index << huge_page_order(page_hstate(page));
524-
520+
pgoff_t pgoff = page_to_pgoff(page);
525521
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526522
}
527523

@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
16391635
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
16401636
{
16411637
struct anon_vma *anon_vma;
1642-
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1638+
pgoff_t pgoff = page_to_pgoff(page);
16431639
struct anon_vma_chain *avc;
16441640
int ret = SWAP_AGAIN;
16451641

@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
16801676
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
16811677
{
16821678
struct address_space *mapping = page->mapping;
1683-
pgoff_t pgoff = page->index << compound_order(page);
1679+
pgoff_t pgoff = page_to_pgoff(page);
16841680
struct vm_area_struct *vma;
16851681
int ret = SWAP_AGAIN;
16861682

mm/shmem.c

Lines changed: 67 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
8585
* a time): we would prefer not to enlarge the shmem inode just for that.
8686
*/
8787
struct shmem_falloc {
88-
int mode; /* FALLOC_FL mode currently operating */
88+
wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
8989
pgoff_t start; /* start of range currently being fallocated */
9090
pgoff_t next; /* the next page offset to be fallocated */
9191
pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
468468
return;
469469

470470
index = start;
471-
for ( ; ; ) {
471+
while (index < end) {
472472
cond_resched();
473473

474474
pvec.nr = find_get_entries(mapping, index,
475475
min(end - index, (pgoff_t)PAGEVEC_SIZE),
476476
pvec.pages, indices);
477477
if (!pvec.nr) {
478-
if (index == start || unfalloc)
478+
/* If all gone or hole-punch or unfalloc, we're done */
479+
if (index == start || end != -1)
479480
break;
481+
/* But if truncating, restart to make sure all gone */
480482
index = start;
481483
continue;
482484
}
483-
if ((index == start || unfalloc) && indices[0] >= end) {
484-
pagevec_remove_exceptionals(&pvec);
485-
pagevec_release(&pvec);
486-
break;
487-
}
488485
mem_cgroup_uncharge_start();
489486
for (i = 0; i < pagevec_count(&pvec); i++) {
490487
struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
496493
if (radix_tree_exceptional_entry(page)) {
497494
if (unfalloc)
498495
continue;
499-
nr_swaps_freed += !shmem_free_swap(mapping,
500-
index, page);
496+
if (shmem_free_swap(mapping, index, page)) {
497+
/* Swap was replaced by page: retry */
498+
index--;
499+
break;
500+
}
501+
nr_swaps_freed++;
501502
continue;
502503
}
503504

@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
506507
if (page->mapping == mapping) {
507508
VM_BUG_ON_PAGE(PageWriteback(page), page);
508509
truncate_inode_page(mapping, page);
510+
} else {
511+
/* Page was replaced by swap: retry */
512+
unlock_page(page);
513+
index--;
514+
break;
509515
}
510516
}
511517
unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
760766
spin_lock(&inode->i_lock);
761767
shmem_falloc = inode->i_private;
762768
if (shmem_falloc &&
763-
!shmem_falloc->mode &&
769+
!shmem_falloc->waitq &&
764770
index >= shmem_falloc->start &&
765771
index < shmem_falloc->next)
766772
shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12481254
* Trinity finds that probing a hole which tmpfs is punching can
12491255
* prevent the hole-punch from ever completing: which in turn
12501256
* locks writers out with its hold on i_mutex. So refrain from
1251-
* faulting pages into the hole while it's being punched, and
1252-
* wait on i_mutex to be released if vmf->flags permits.
1257+
* faulting pages into the hole while it's being punched. Although
1258+
* shmem_undo_range() does remove the additions, it may be unable to
1259+
* keep up, as each new page needs its own unmap_mapping_range() call,
1260+
* and the i_mmap tree grows ever slower to scan if new vmas are added.
1261+
*
1262+
* It does not matter if we sometimes reach this check just before the
1263+
* hole-punch begins, so that one fault then races with the punch:
1264+
* we just need to make racing faults a rare case.
1265+
*
1266+
* The implementation below would be much simpler if we just used a
1267+
* standard mutex or completion: but we cannot take i_mutex in fault,
1268+
* and bloating every shmem inode for this unlikely case would be sad.
12531269
*/
12541270
if (unlikely(inode->i_private)) {
12551271
struct shmem_falloc *shmem_falloc;
12561272

12571273
spin_lock(&inode->i_lock);
12581274
shmem_falloc = inode->i_private;
1259-
if (!shmem_falloc ||
1260-
shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
1261-
vmf->pgoff < shmem_falloc->start ||
1262-
vmf->pgoff >= shmem_falloc->next)
1263-
shmem_falloc = NULL;
1264-
spin_unlock(&inode->i_lock);
1265-
/*
1266-
* i_lock has protected us from taking shmem_falloc seriously
1267-
* once return from shmem_fallocate() went back up that stack.
1268-
* i_lock does not serialize with i_mutex at all, but it does
1269-
* not matter if sometimes we wait unnecessarily, or sometimes
1270-
* miss out on waiting: we just need to make those cases rare.
1271-
*/
1272-
if (shmem_falloc) {
1275+
if (shmem_falloc &&
1276+
shmem_falloc->waitq &&
1277+
vmf->pgoff >= shmem_falloc->start &&
1278+
vmf->pgoff < shmem_falloc->next) {
1279+
wait_queue_head_t *shmem_falloc_waitq;
1280+
DEFINE_WAIT(shmem_fault_wait);
1281+
1282+
ret = VM_FAULT_NOPAGE;
12731283
if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
12741284
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1285+
/* It's polite to up mmap_sem if we can */
12751286
up_read(&vma->vm_mm->mmap_sem);
1276-
mutex_lock(&inode->i_mutex);
1277-
mutex_unlock(&inode->i_mutex);
1278-
return VM_FAULT_RETRY;
1287+
ret = VM_FAULT_RETRY;
12791288
}
1280-
/* cond_resched? Leave that to GUP or return to user */
1281-
return VM_FAULT_NOPAGE;
1289+
1290+
shmem_falloc_waitq = shmem_falloc->waitq;
1291+
prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1292+
TASK_UNINTERRUPTIBLE);
1293+
spin_unlock(&inode->i_lock);
1294+
schedule();
1295+
1296+
/*
1297+
* shmem_falloc_waitq points into the shmem_fallocate()
1298+
* stack of the hole-punching task: shmem_falloc_waitq
1299+
* is usually invalid by the time we reach here, but
1300+
* finish_wait() does not dereference it in that case;
1301+
* though i_lock needed lest racing with wake_up_all().
1302+
*/
1303+
spin_lock(&inode->i_lock);
1304+
finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1305+
spin_unlock(&inode->i_lock);
1306+
return ret;
12821307
}
1308+
spin_unlock(&inode->i_lock);
12831309
}
12841310

12851311
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
17741800

17751801
mutex_lock(&inode->i_mutex);
17761802

1777-
shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1778-
17791803
if (mode & FALLOC_FL_PUNCH_HOLE) {
17801804
struct address_space *mapping = file->f_mapping;
17811805
loff_t unmap_start = round_up(offset, PAGE_SIZE);
17821806
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1807+
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
17831808

1809+
shmem_falloc.waitq = &shmem_falloc_waitq;
17841810
shmem_falloc.start = unmap_start >> PAGE_SHIFT;
17851811
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
17861812
spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
17921818
1 + unmap_end - unmap_start, 0);
17931819
shmem_truncate_range(inode, offset, offset + len - 1);
17941820
/* No need to unmap again: hole-punching leaves COWed pages */
1821+
1822+
spin_lock(&inode->i_lock);
1823+
inode->i_private = NULL;
1824+
wake_up_all(&shmem_falloc_waitq);
1825+
spin_unlock(&inode->i_lock);
17951826
error = 0;
1796-
goto undone;
1827+
goto out;
17971828
}
17981829

17991830
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
18091840
goto out;
18101841
}
18111842

1843+
shmem_falloc.waitq = NULL;
18121844
shmem_falloc.start = start;
18131845
shmem_falloc.next = start;
18141846
shmem_falloc.nr_falloced = 0;

mm/truncate.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
355355
for ( ; ; ) {
356356
cond_resched();
357357
if (!pagevec_lookup_entries(&pvec, mapping, index,
358-
min(end - index, (pgoff_t)PAGEVEC_SIZE),
359-
indices)) {
358+
min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
359+
/* If all gone from start onwards, we're done */
360360
if (index == start)
361361
break;
362+
/* Otherwise restart to make sure all gone */
362363
index = start;
363364
continue;
364365
}
365366
if (index == start && indices[0] >= end) {
367+
/* All gone out of hole to be punched, we're done */
366368
pagevec_remove_exceptionals(&pvec);
367369
pagevec_release(&pvec);
368370
break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
373375

374376
/* We rely upon deletion not changing page->index */
375377
index = indices[i];
376-
if (index >= end)
378+
if (index >= end) {
379+
/* Restart punch to make sure all gone */
380+
index = start - 1;
377381
break;
382+
}
378383

379384
if (radix_tree_exceptional_entry(page)) {
380385
clear_exceptional_entry(mapping, index, page);

0 commit comments

Comments
 (0)