Skip to content

Commit 380173f

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "13 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: dma-mapping: avoid oops when parameter cpu_addr is null mm/hugetlb: use EOPNOTSUPP in hugetlb sysctl handlers memremap: check pfn validity before passing to pfn_to_page() mm, thp: fix migration of PTE-mapped transparent huge pages dax: check return value of dax_radix_entry() ocfs2: fix return value from ocfs2_page_mkwrite() arm64: kasan: clear stale stack poison sched/kasan: remove stale KASAN poison after hotplug kasan: add functions to clear stack poison mm: fix mixed zone detection in devm_memremap_pages list: kill list_force_poison() mm: __delete_from_page_cache show Bad page if mapped mm/hugetlb: hugetlb_no_page: rate-limit warning message
2 parents 2f0d94e + d6b7eae commit 380173f

File tree

13 files changed

+88
-37
lines changed

13 files changed

+88
-37
lines changed

arch/arm64/kernel/sleep.S

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,10 @@ ENTRY(cpu_resume_mmu)
145145
ENDPROC(cpu_resume_mmu)
146146
.popsection
147147
cpu_resume_after_mmu:
148+
#ifdef CONFIG_KASAN
149+
mov x0, sp
150+
bl kasan_unpoison_remaining_stack
151+
#endif
148152
mov x0, #0 // return zero on success
149153
ldp x19, x20, [sp, #16]
150154
ldp x21, x22, [sp, #32]

fs/dax.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1056,6 +1056,7 @@ EXPORT_SYMBOL_GPL(dax_pmd_fault);
10561056
int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
10571057
{
10581058
struct file *file = vma->vm_file;
1059+
int error;
10591060

10601061
/*
10611062
* We pass NO_SECTOR to dax_radix_entry() because we expect that a
@@ -1065,7 +1066,13 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
10651066
* saves us from having to make a call to get_block() here to look
10661067
* up the sector.
10671068
*/
1068-
dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false, true);
1069+
error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
1070+
true);
1071+
1072+
if (error == -ENOMEM)
1073+
return VM_FAULT_OOM;
1074+
if (error)
1075+
return VM_FAULT_SIGBUS;
10691076
return VM_FAULT_NOPAGE;
10701077
}
10711078
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);

fs/ocfs2/mmap.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,10 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
147147
ret = ocfs2_inode_lock(inode, &di_bh, 1);
148148
if (ret < 0) {
149149
mlog_errno(ret);
150+
if (ret == -ENOMEM)
151+
ret = VM_FAULT_OOM;
152+
else
153+
ret = VM_FAULT_SIGBUS;
150154
goto out;
151155
}
152156

include/linux/dma-mapping.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
386386
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
387387
return;
388388

389-
if (!ops->free)
389+
if (!ops->free || !cpu_addr)
390390
return;
391391

392392
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);

include/linux/kasan.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#ifndef _LINUX_KASAN_H
22
#define _LINUX_KASAN_H
33

4+
#include <linux/sched.h>
45
#include <linux/types.h>
56

67
struct kmem_cache;
@@ -13,7 +14,6 @@ struct vm_struct;
1314

1415
#include <asm/kasan.h>
1516
#include <asm/pgtable.h>
16-
#include <linux/sched.h>
1717

1818
extern unsigned char kasan_zero_page[PAGE_SIZE];
1919
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
@@ -43,6 +43,8 @@ static inline void kasan_disable_current(void)
4343

4444
void kasan_unpoison_shadow(const void *address, size_t size);
4545

46+
void kasan_unpoison_task_stack(struct task_struct *task);
47+
4648
void kasan_alloc_pages(struct page *page, unsigned int order);
4749
void kasan_free_pages(struct page *page, unsigned int order);
4850

@@ -66,6 +68,8 @@ void kasan_free_shadow(const struct vm_struct *vm);
6668

6769
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
6870

71+
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
72+
6973
static inline void kasan_enable_current(void) {}
7074
static inline void kasan_disable_current(void) {}
7175

include/linux/list.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -113,17 +113,6 @@ extern void __list_del_entry(struct list_head *entry);
113113
extern void list_del(struct list_head *entry);
114114
#endif
115115

116-
#ifdef CONFIG_DEBUG_LIST
117-
/*
118-
* See devm_memremap_pages() which wants DEBUG_LIST=y to assert if one
119-
* of the pages it allocates is ever passed to list_add()
120-
*/
121-
extern void list_force_poison(struct list_head *entry);
122-
#else
123-
/* fallback to the less strict LIST_POISON* definitions */
124-
#define list_force_poison list_del
125-
#endif
126-
127116
/**
128117
* list_replace - replace old entry by new one
129118
* @old : the element to be replaced

kernel/memremap.c

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,10 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
2929

3030
static void *try_ram_remap(resource_size_t offset, size_t size)
3131
{
32-
struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
32+
unsigned long pfn = PHYS_PFN(offset);
3333

3434
/* In the simple case just return the existing linear address */
35-
if (!PageHighMem(page))
35+
if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
3636
return __va(offset);
3737
return NULL; /* fallback to ioremap_cache */
3838
}
@@ -270,13 +270,16 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
270270
void *devm_memremap_pages(struct device *dev, struct resource *res,
271271
struct percpu_ref *ref, struct vmem_altmap *altmap)
272272
{
273-
int is_ram = region_intersects(res->start, resource_size(res),
274-
"System RAM");
275273
resource_size_t key, align_start, align_size, align_end;
276274
struct dev_pagemap *pgmap;
277275
struct page_map *page_map;
276+
int error, nid, is_ram;
278277
unsigned long pfn;
279-
int error, nid;
278+
279+
align_start = res->start & ~(SECTION_SIZE - 1);
280+
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
281+
- align_start;
282+
is_ram = region_intersects(align_start, align_size, "System RAM");
280283

281284
if (is_ram == REGION_MIXED) {
282285
WARN_ONCE(1, "%s attempted on mixed region %pr\n",
@@ -314,8 +317,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
314317

315318
mutex_lock(&pgmap_lock);
316319
error = 0;
317-
align_start = res->start & ~(SECTION_SIZE - 1);
318-
align_size = ALIGN(resource_size(res), SECTION_SIZE);
319320
align_end = align_start + align_size - 1;
320321
for (key = align_start; key <= align_end; key += SECTION_SIZE) {
321322
struct dev_pagemap *dup;
@@ -351,8 +352,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
351352
for_each_device_pfn(pfn, page_map) {
352353
struct page *page = pfn_to_page(pfn);
353354

354-
/* ZONE_DEVICE pages must never appear on a slab lru */
355-
list_force_poison(&page->lru);
355+
/*
356+
* ZONE_DEVICE pages union ->lru with a ->pgmap back
357+
* pointer. It is a bug if a ZONE_DEVICE page is ever
358+
* freed or placed on a driver-private list. Seed the
359+
* storage with LIST_POISON* values.
360+
*/
361+
list_del(&page->lru);
356362
page->pgmap = pgmap;
357363
}
358364
devres_add(dev, page_map);

kernel/sched/core.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
* Thomas Gleixner, Mike Kravetz
2727
*/
2828

29+
#include <linux/kasan.h>
2930
#include <linux/mm.h>
3031
#include <linux/module.h>
3132
#include <linux/nmi.h>
@@ -5096,6 +5097,8 @@ void init_idle(struct task_struct *idle, int cpu)
50965097
idle->state = TASK_RUNNING;
50975098
idle->se.exec_start = sched_clock();
50985099

5100+
kasan_unpoison_task_stack(idle);
5101+
50995102
#ifdef CONFIG_SMP
51005103
/*
51015104
* Its possible that init_idle() gets called multiple times on a task,

lib/list_debug.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,6 @@
1212
#include <linux/kernel.h>
1313
#include <linux/rculist.h>
1414

15-
static struct list_head force_poison;
16-
void list_force_poison(struct list_head *entry)
17-
{
18-
entry->next = &force_poison;
19-
entry->prev = &force_poison;
20-
}
21-
2215
/*
2316
* Insert a new entry between two known consecutive entries.
2417
*
@@ -30,8 +23,6 @@ void __list_add(struct list_head *new,
3023
struct list_head *prev,
3124
struct list_head *next)
3225
{
33-
WARN(new->next == &force_poison || new->prev == &force_poison,
34-
"list_add attempted on force-poisoned entry\n");
3526
WARN(next->prev != prev,
3627
"list_add corruption. next->prev should be "
3728
"prev (%p), but was %p. (next=%p).\n",

mm/filemap.c

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,30 @@ void __delete_from_page_cache(struct page *page, void *shadow,
195195
else
196196
cleancache_invalidate_page(mapping, page);
197197

198+
VM_BUG_ON_PAGE(page_mapped(page), page);
199+
if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
200+
int mapcount;
201+
202+
pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
203+
current->comm, page_to_pfn(page));
204+
dump_page(page, "still mapped when deleted");
205+
dump_stack();
206+
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
207+
208+
mapcount = page_mapcount(page);
209+
if (mapping_exiting(mapping) &&
210+
page_count(page) >= mapcount + 2) {
211+
/*
212+
* All vmas have already been torn down, so it's
213+
* a good bet that actually the page is unmapped,
214+
* and we'd prefer not to leak it: if we're wrong,
215+
* some other bad page check should catch it later.
216+
*/
217+
page_mapcount_reset(page);
218+
atomic_sub(mapcount, &page->_count);
219+
}
220+
}
221+
198222
page_cache_tree_delete(mapping, page, shadow);
199223

200224
page->mapping = NULL;
@@ -205,7 +229,6 @@ void __delete_from_page_cache(struct page *page, void *shadow,
205229
__dec_zone_page_state(page, NR_FILE_PAGES);
206230
if (PageSwapBacked(page))
207231
__dec_zone_page_state(page, NR_SHMEM);
208-
VM_BUG_ON_PAGE(page_mapped(page), page);
209232

210233
/*
211234
* At this point page must be either written or cleaned by truncate.

mm/hugetlb.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2751,7 +2751,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
27512751
int ret;
27522752

27532753
if (!hugepages_supported())
2754-
return -ENOTSUPP;
2754+
return -EOPNOTSUPP;
27552755

27562756
table->data = &tmp;
27572757
table->maxlen = sizeof(unsigned long);
@@ -2792,7 +2792,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
27922792
int ret;
27932793

27942794
if (!hugepages_supported())
2795-
return -ENOTSUPP;
2795+
return -EOPNOTSUPP;
27962796

27972797
tmp = h->nr_overcommit_huge_pages;
27982798

@@ -3502,7 +3502,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
35023502
* COW. Warn that such a situation has occurred as it may not be obvious
35033503
*/
35043504
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3505-
pr_warning("PID %d killed due to inadequate hugepage pool\n",
3505+
pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
35063506
current->pid);
35073507
return ret;
35083508
}

mm/kasan/kasan.c

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <linux/init.h>
2121
#include <linux/kernel.h>
2222
#include <linux/kmemleak.h>
23+
#include <linux/linkage.h>
2324
#include <linux/memblock.h>
2425
#include <linux/memory.h>
2526
#include <linux/mm.h>
@@ -60,6 +61,25 @@ void kasan_unpoison_shadow(const void *address, size_t size)
6061
}
6162
}
6263

64+
static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
65+
{
66+
void *base = task_stack_page(task);
67+
size_t size = sp - base;
68+
69+
kasan_unpoison_shadow(base, size);
70+
}
71+
72+
/* Unpoison the entire stack for a task. */
73+
void kasan_unpoison_task_stack(struct task_struct *task)
74+
{
75+
__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
76+
}
77+
78+
/* Unpoison the stack for the current task beyond a watermark sp value. */
79+
asmlinkage void kasan_unpoison_remaining_stack(void *sp)
80+
{
81+
__kasan_unpoison_stack(current, sp);
82+
}
6383

6484
/*
6585
* All functions below always inlined so compiler could

mm/mempolicy.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -532,7 +532,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
532532
nid = page_to_nid(page);
533533
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
534534
continue;
535-
if (PageTail(page) && PageAnon(page)) {
535+
if (PageTransCompound(page) && PageAnon(page)) {
536536
get_page(page);
537537
pte_unmap_unlock(pte, ptl);
538538
lock_page(page);

0 commit comments

Comments
 (0)