Skip to content

Commit 75f296d

Browse files
Levin, Alexander (Sasha Levin)torvalds
authored andcommitted
kmemcheck: stop using GFP_NOTRACK and SLAB_NOTRACK
Convert all allocations that used a NOTRACK flag to stop using it. Link: http://lkml.kernel.org/r/20171007030159.22241-3-alexander.levin@verizon.com Signed-off-by: Sasha Levin <alexander.levin@verizon.com> Cc: Alexander Potapenko <glider@google.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Tim Hansen <devtimhansen@gmail.com> Cc: Vegard Nossum <vegardno@ifi.uio.no> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 4950276 commit 75f296d

File tree

23 files changed

+36
-48
lines changed

23 files changed

+36
-48
lines changed

arch/arm/include/asm/pgalloc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5757
extern pgd_t *pgd_alloc(struct mm_struct *mm);
5858
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
5959

60-
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
60+
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
6161

6262
static inline void clean_pte_table(pte_t *pte)
6363
{

arch/arm64/include/asm/pgalloc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626

2727
#define check_pgt_cache() do { } while (0)
2828

29-
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
29+
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
3030
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
3131

3232
#if CONFIG_PGTABLE_LEVELS > 2

arch/powerpc/include/asm/pgalloc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
1818
}
1919
#endif /* MODULE */
2020

21-
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
21+
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
2222

2323
#ifdef CONFIG_PPC_BOOK3S
2424
#include <asm/book3s/pgalloc.h>

arch/sh/kernel/dwarf.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void)
11721172

11731173
dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
11741174
sizeof(struct dwarf_frame), 0,
1175-
SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1175+
SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
11761176

11771177
dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
11781178
sizeof(struct dwarf_reg), 0,
1179-
SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1179+
SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
11801180

11811181
dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
11821182
dwarf_frame_cachep);

arch/sh/kernel/process.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ void arch_task_cache_init(void)
5959

6060
task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
6161
__alignof__(union thread_xstate),
62-
SLAB_PANIC | SLAB_NOTRACK, NULL);
62+
SLAB_PANIC, NULL);
6363
}
6464

6565
#ifdef CONFIG_SH_FPU_EMU

arch/sparc/mm/init_64.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2927,7 +2927,7 @@ void __flush_tlb_all(void)
29272927
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
29282928
unsigned long address)
29292929
{
2930-
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2930+
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
29312931
pte_t *pte = NULL;
29322932

29332933
if (page)
@@ -2939,7 +2939,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
29392939
pgtable_t pte_alloc_one(struct mm_struct *mm,
29402940
unsigned long address)
29412941
{
2942-
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2942+
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
29432943
if (!page)
29442944
return NULL;
29452945
if (!pgtable_page_ctor(page)) {

arch/unicore32/include/asm/pgalloc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
2828
#define pgd_alloc(mm) get_pgd_slow(mm)
2929
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
3030

31-
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
31+
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
3232

3333
/*
3434
* Allocate one PTE table.

arch/x86/kernel/espfix_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
# error "Need more virtual address space for the ESPFIX hack"
5858
#endif
5959

60-
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
60+
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
6161

6262
/* This contains the *bottom* address of the espfix stack */
6363
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);

arch/x86/mm/init.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,7 @@ __ref void *alloc_low_pages(unsigned int num)
9292
unsigned int order;
9393

9494
order = get_order((unsigned long)num << PAGE_SHIFT);
95-
return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
96-
__GFP_ZERO, order);
95+
return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
9796
}
9897

9998
if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {

arch/x86/mm/init_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ static __ref void *spp_getpage(void)
184184
void *ptr;
185185

186186
if (after_bootmem)
187-
ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
187+
ptr = (void *) get_zeroed_page(GFP_ATOMIC);
188188
else
189189
ptr = alloc_bootmem_pages(PAGE_SIZE);
190190

arch/x86/mm/pageattr.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -753,7 +753,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
753753

754754
if (!debug_pagealloc_enabled())
755755
spin_unlock(&cpa_lock);
756-
base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
756+
base = alloc_pages(GFP_KERNEL, 0);
757757
if (!debug_pagealloc_enabled())
758758
spin_lock(&cpa_lock);
759759
if (!base)
@@ -904,7 +904,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
904904

905905
static int alloc_pte_page(pmd_t *pmd)
906906
{
907-
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
907+
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
908908
if (!pte)
909909
return -1;
910910

@@ -914,7 +914,7 @@ static int alloc_pte_page(pmd_t *pmd)
914914

915915
static int alloc_pmd_page(pud_t *pud)
916916
{
917-
pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
917+
pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
918918
if (!pmd)
919919
return -1;
920920

@@ -1120,7 +1120,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
11201120
pgd_entry = cpa->pgd + pgd_index(addr);
11211121

11221122
if (pgd_none(*pgd_entry)) {
1123-
p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
1123+
p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
11241124
if (!p4d)
11251125
return -1;
11261126

@@ -1132,7 +1132,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
11321132
*/
11331133
p4d = p4d_offset(pgd_entry, addr);
11341134
if (p4d_none(*p4d)) {
1135-
pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
1135+
pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
11361136
if (!pud)
11371137
return -1;
11381138

arch/x86/mm/pgtable.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <asm/fixmap.h>
88
#include <asm/mtrr.h>
99

10-
#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
10+
#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
1111

1212
#ifdef CONFIG_HIGHPTE
1313
#define PGALLOC_USER_GFP __GFP_HIGHMEM

arch/x86/platform/efi/efi_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ int __init efi_alloc_page_tables(void)
207207
if (efi_enabled(EFI_OLD_MEMMAP))
208208
return 0;
209209

210-
gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
210+
gfp_mask = GFP_KERNEL | __GFP_ZERO;
211211
efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
212212
if (!efi_pgd)
213213
return -ENOMEM;

crypto/xor.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,12 +122,7 @@ calibrate_xor_blocks(void)
122122
goto out;
123123
}
124124

125-
/*
126-
* Note: Since the memory is not actually used for _anything_ but to
127-
* test the XOR speed, we don't really want kmemcheck to warn about
128-
* reading uninitialized bytes here.
129-
*/
130-
b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2);
125+
b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
131126
if (!b1) {
132127
printk(KERN_WARNING "xor: Yikes! No memory available.\n");
133128
return -ENOMEM;

include/linux/thread_info.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,9 @@ enum {
4444
#endif
4545

4646
#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
47-
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
48-
__GFP_ZERO)
47+
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
4948
#else
50-
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK)
49+
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
5150
#endif
5251

5352
/*

init/do_mounts.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -380,8 +380,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
380380

381381
void __init mount_block_root(char *name, int flags)
382382
{
383-
struct page *page = alloc_page(GFP_KERNEL |
384-
__GFP_NOTRACK_FALSE_POSITIVE);
383+
struct page *page = alloc_page(GFP_KERNEL);
385384
char *fs_names = page_address(page);
386385
char *p;
387386
#ifdef CONFIG_BLOCK

kernel/fork.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -469,7 +469,7 @@ void __init fork_init(void)
469469
/* create a slab on which task_structs can be allocated */
470470
task_struct_cachep = kmem_cache_create("task_struct",
471471
arch_task_struct_size, align,
472-
SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
472+
SLAB_PANIC|SLAB_ACCOUNT, NULL);
473473
#endif
474474

475475
/* do the arch specific task caches init */
@@ -2205,18 +2205,18 @@ void __init proc_caches_init(void)
22052205
sighand_cachep = kmem_cache_create("sighand_cache",
22062206
sizeof(struct sighand_struct), 0,
22072207
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
2208-
SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
2208+
SLAB_ACCOUNT, sighand_ctor);
22092209
signal_cachep = kmem_cache_create("signal_cache",
22102210
sizeof(struct signal_struct), 0,
2211-
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2211+
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
22122212
NULL);
22132213
files_cachep = kmem_cache_create("files_cache",
22142214
sizeof(struct files_struct), 0,
2215-
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2215+
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
22162216
NULL);
22172217
fs_cachep = kmem_cache_create("fs_cache",
22182218
sizeof(struct fs_struct), 0,
2219-
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2219+
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
22202220
NULL);
22212221
/*
22222222
* FIXME! The "sizeof(struct mm_struct)" currently includes the
@@ -2227,7 +2227,7 @@ void __init proc_caches_init(void)
22272227
*/
22282228
mm_cachep = kmem_cache_create("mm_struct",
22292229
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
2230-
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2230+
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
22312231
NULL);
22322232
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
22332233
mmap_init();

kernel/signal.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1036,8 +1036,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
10361036
else
10371037
override_rlimit = 0;
10381038

1039-
q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1040-
override_rlimit);
1039+
q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
10411040
if (q) {
10421041
list_add_tail(&q->list, &pending->list);
10431042
switch ((unsigned long) info) {

mm/kmemcheck.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
1818
* With kmemcheck enabled, we need to allocate a memory area for the
1919
* shadow bits as well.
2020
*/
21-
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
21+
shadow = alloc_pages_node(node, flags, order);
2222
if (!shadow) {
2323
if (printk_ratelimit())
2424
pr_err("kmemcheck: failed to allocate shadow bitmap\n");

mm/slab.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1410,7 +1410,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
14101410

14111411
flags |= cachep->allocflags;
14121412

1413-
page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1413+
page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
14141414
if (!page) {
14151415
slab_out_of_memory(cachep, flags, nodeid);
14161416
return NULL;

mm/slab.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -141,10 +141,10 @@ static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
141141
#if defined(CONFIG_SLAB)
142142
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
143143
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
144-
SLAB_NOTRACK | SLAB_ACCOUNT)
144+
SLAB_ACCOUNT)
145145
#elif defined(CONFIG_SLUB)
146146
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
147-
SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
147+
SLAB_TEMPORARY | SLAB_ACCOUNT)
148148
#else
149149
#define SLAB_CACHE_FLAGS (0)
150150
#endif
@@ -163,7 +163,6 @@ static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
163163
SLAB_NOLEAKTRACE | \
164164
SLAB_RECLAIM_ACCOUNT | \
165165
SLAB_TEMPORARY | \
166-
SLAB_NOTRACK | \
167166
SLAB_ACCOUNT)
168167

169168
int __kmem_cache_shutdown(struct kmem_cache *);

mm/slab_common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
4444
SLAB_FAILSLAB | SLAB_KASAN)
4545

4646
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
47-
SLAB_NOTRACK | SLAB_ACCOUNT)
47+
SLAB_ACCOUNT)
4848

4949
/*
5050
* Merge control. If this is set then no merging of slab caches will occur.

mm/slub.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1436,8 +1436,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
14361436
struct page *page;
14371437
int order = oo_order(oo);
14381438

1439-
flags |= __GFP_NOTRACK;
1440-
14411439
if (node == NUMA_NO_NODE)
14421440
page = alloc_pages(flags, order);
14431441
else
@@ -3774,7 +3772,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
37743772
struct page *page;
37753773
void *ptr = NULL;
37763774

3777-
flags |= __GFP_COMP | __GFP_NOTRACK;
3775+
flags |= __GFP_COMP;
37783776
page = alloc_pages_node(node, flags, get_order(size));
37793777
if (page)
37803778
ptr = page_address(page);

0 commit comments

Comments
 (0)