Skip to content

Commit 617aebe

Browse files
committed
Merge tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull hardened usercopy whitelisting from Kees Cook: "Currently, hardened usercopy performs dynamic bounds checking on slab cache objects. This is good, but still leaves a lot of kernel memory available to be copied to/from userspace in the face of bugs. To further restrict what memory is available for copying, this creates a way to whitelist specific areas of a given slab cache object for copying to/from userspace, allowing much finer granularity of access control. Slab caches that are never exposed to userspace can declare no whitelist for their objects, thereby keeping them unavailable to userspace via dynamic copy operations. (Note, an implicit form of whitelisting is the use of constant sizes in usercopy operations and get_user()/put_user(); these bypass all hardened usercopy checks since these sizes cannot change at runtime.) This new check is WARN-by-default, so any mistakes can be found over the next several releases without breaking anyone's system. The series has roughly the following sections: - remove %p and improve reporting with offset - prepare infrastructure and whitelist kmalloc - update VFS subsystem with whitelists - update SCSI subsystem with whitelists - update network subsystem with whitelists - update process memory with whitelists - update per-architecture thread_struct with whitelists - update KVM with whitelists and fix ioctl bug - mark all other allocations as not whitelisted - update lkdtm for more sensible test overage" * tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (38 commits) lkdtm: Update usercopy tests for whitelisting usercopy: Restrict non-usercopy caches to size 0 kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl kvm: whitelist struct kvm_vcpu_arch arm: Implement thread_struct whitelist for hardened usercopy arm64: Implement thread_struct whitelist for hardened usercopy x86: Implement thread_struct whitelist for hardened usercopy fork: Provide usercopy whitelisting for task_struct fork: Define usercopy region in thread_stack slab caches fork: Define usercopy region in mm_struct slab caches net: Restrict unwhitelisted proto caches to size 0 sctp: Copy struct sctp_sock.autoclose to userspace using put_user() sctp: Define usercopy region in SCTP proto slab cache caif: Define usercopy region in caif proto slab cache ip: Define usercopy region in IP proto slab cache net: Define usercopy region in struct proto slab cache scsi: Define usercopy region in scsi_sense_cache slab cache cifs: Define usercopy region in cifs_request slab cache vxfs: Define usercopy region in vxfs_inode slab cache ufs: Define usercopy region in ufs_inode_cache slab cache ...
2 parents 0771ad4 + e47e311 commit 617aebe

File tree

45 files changed

+515
-215
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+515
-215
lines changed

arch/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -245,6 +245,17 @@ config ARCH_TASK_STRUCT_ON_STACK
245245
config ARCH_TASK_STRUCT_ALLOCATOR
246246
bool
247247

248+
config HAVE_ARCH_THREAD_STRUCT_WHITELIST
249+
bool
250+
depends on !ARCH_TASK_STRUCT_ALLOCATOR
251+
help
252+
An architecture should select this to provide hardened usercopy
253+
knowledge about what region of the thread_struct should be
254+
whitelisted for copying to userspace. Normally this is only the
255+
FPU registers. Specifically, arch_thread_struct_whitelist()
256+
should be implemented. Without this, the entire thread_struct
257+
field in task_struct will be left whitelisted.
258+
248259
# Select if arch has its private alloc_thread_stack() function
249260
config ARCH_THREAD_STACK_ALLOCATOR
250261
bool

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ config ARM
5151
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
5252
select HAVE_ARCH_MMAP_RND_BITS if MMU
5353
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
54+
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
5455
select HAVE_ARCH_TRACEHOOK
5556
select HAVE_ARM_SMCCC if CPU_V7
5657
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32

arch/arm/include/asm/processor.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,16 @@ struct thread_struct {
4545
struct debug_info debug;
4646
};
4747

48+
/*
49+
* Everything usercopied to/from thread_struct is statically-sized, so
50+
* no hardened usercopy whitelist is needed.
51+
*/
52+
static inline void arch_thread_struct_whitelist(unsigned long *offset,
53+
unsigned long *size)
54+
{
55+
*offset = *size = 0;
56+
}
57+
4858
#define INIT_THREAD { }
4959

5060
#define start_thread(regs,pc,sp) \

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ config ARM64
9191
select HAVE_ARCH_MMAP_RND_BITS
9292
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
9393
select HAVE_ARCH_SECCOMP_FILTER
94+
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
9495
select HAVE_ARCH_TRACEHOOK
9596
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
9697
select HAVE_ARCH_VMAP_STACK

arch/arm64/include/asm/processor.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,16 @@ struct thread_struct {
113113
struct debug_info debug; /* debugging */
114114
};
115115

116+
/*
117+
* Everything usercopied to/from thread_struct is statically-sized, so
118+
* no hardened usercopy whitelist is needed.
119+
*/
120+
static inline void arch_thread_struct_whitelist(unsigned long *offset,
121+
unsigned long *size)
122+
{
123+
*offset = *size = 0;
124+
}
125+
116126
#ifdef CONFIG_COMPAT
117127
#define task_user_tls(t) \
118128
({ \

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ config X86
116116
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
117117
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
118118
select HAVE_ARCH_SECCOMP_FILTER
119+
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
119120
select HAVE_ARCH_TRACEHOOK
120121
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
121122
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64

arch/x86/include/asm/processor.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -507,6 +507,14 @@ struct thread_struct {
507507
*/
508508
};
509509

510+
/* Whitelist the FPU state from the task_struct for hardened usercopy. */
511+
static inline void arch_thread_struct_whitelist(unsigned long *offset,
512+
unsigned long *size)
513+
{
514+
*offset = offsetof(struct thread_struct, fpu.state);
515+
*size = fpu_kernel_xstate_size;
516+
}
517+
510518
/*
511519
* Thread-synchronous status.
512520
*

arch/x86/kvm/x86.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4237,13 +4237,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
42374237
mutex_unlock(&kvm->lock);
42384238
break;
42394239
case KVM_XEN_HVM_CONFIG: {
4240+
struct kvm_xen_hvm_config xhc;
42404241
r = -EFAULT;
4241-
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
4242-
sizeof(struct kvm_xen_hvm_config)))
4242+
if (copy_from_user(&xhc, argp, sizeof(xhc)))
42434243
goto out;
42444244
r = -EINVAL;
4245-
if (kvm->arch.xen_hvm_config.flags)
4245+
if (xhc.flags)
42464246
goto out;
4247+
memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
42474248
r = 0;
42484249
break;
42494250
}

drivers/misc/lkdtm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,8 @@ void __init lkdtm_usercopy_init(void);
7676
void __exit lkdtm_usercopy_exit(void);
7777
void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
7878
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
79-
void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
80-
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
79+
void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
80+
void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
8181
void lkdtm_USERCOPY_STACK_FRAME_TO(void);
8282
void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
8383
void lkdtm_USERCOPY_STACK_BEYOND(void);

drivers/misc/lkdtm_core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,8 @@ static const struct crashtype crashtypes[] = {
177177
CRASHTYPE(ATOMIC_TIMING),
178178
CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
179179
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
180-
CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
181-
CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
180+
CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
181+
CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
182182
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
183183
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
184184
CRASHTYPE(USERCOPY_STACK_BEYOND),

drivers/misc/lkdtm_usercopy.c

Lines changed: 58 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
*/
2121
static volatile size_t unconst = 0;
2222
static volatile size_t cache_size = 1024;
23-
static struct kmem_cache *bad_cache;
23+
static struct kmem_cache *whitelist_cache;
2424

2525
static const unsigned char test_text[] = "This is a test.\n";
2626

@@ -115,10 +115,16 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
115115
vm_munmap(user_addr, PAGE_SIZE);
116116
}
117117

118+
/*
119+
* This checks for whole-object size validation with hardened usercopy,
120+
* with or without usercopy whitelisting.
121+
*/
118122
static void do_usercopy_heap_size(bool to_user)
119123
{
120124
unsigned long user_addr;
121125
unsigned char *one, *two;
126+
void __user *test_user_addr;
127+
void *test_kern_addr;
122128
size_t size = unconst + 1024;
123129

124130
one = kmalloc(size, GFP_KERNEL);
@@ -139,27 +145,30 @@ static void do_usercopy_heap_size(bool to_user)
139145
memset(one, 'A', size);
140146
memset(two, 'B', size);
141147

148+
test_user_addr = (void __user *)(user_addr + 16);
149+
test_kern_addr = one + 16;
150+
142151
if (to_user) {
143152
pr_info("attempting good copy_to_user of correct size\n");
144-
if (copy_to_user((void __user *)user_addr, one, size)) {
153+
if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
145154
pr_warn("copy_to_user failed unexpectedly?!\n");
146155
goto free_user;
147156
}
148157

149158
pr_info("attempting bad copy_to_user of too large size\n");
150-
if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
159+
if (copy_to_user(test_user_addr, test_kern_addr, size)) {
151160
pr_warn("copy_to_user failed, but lacked Oops\n");
152161
goto free_user;
153162
}
154163
} else {
155164
pr_info("attempting good copy_from_user of correct size\n");
156-
if (copy_from_user(one, (void __user *)user_addr, size)) {
165+
if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
157166
pr_warn("copy_from_user failed unexpectedly?!\n");
158167
goto free_user;
159168
}
160169

161170
pr_info("attempting bad copy_from_user of too large size\n");
162-
if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
171+
if (copy_from_user(test_kern_addr, test_user_addr, size)) {
163172
pr_warn("copy_from_user failed, but lacked Oops\n");
164173
goto free_user;
165174
}
@@ -172,77 +181,79 @@ static void do_usercopy_heap_size(bool to_user)
172181
kfree(two);
173182
}
174183

175-
static void do_usercopy_heap_flag(bool to_user)
184+
/*
185+
* This checks for the specific whitelist window within an object. If this
186+
* test passes, then do_usercopy_heap_size() tests will pass too.
187+
*/
188+
static void do_usercopy_heap_whitelist(bool to_user)
176189
{
177-
unsigned long user_addr;
178-
unsigned char *good_buf = NULL;
179-
unsigned char *bad_buf = NULL;
190+
unsigned long user_alloc;
191+
unsigned char *buf = NULL;
192+
unsigned char __user *user_addr;
193+
size_t offset, size;
180194

181195
/* Make sure cache was prepared. */
182-
if (!bad_cache) {
196+
if (!whitelist_cache) {
183197
pr_warn("Failed to allocate kernel cache\n");
184198
return;
185199
}
186200

187201
/*
188-
* Allocate one buffer from each cache (kmalloc will have the
189-
* SLAB_USERCOPY flag already, but "bad_cache" won't).
202+
* Allocate a buffer with a whitelisted window in the buffer.
190203
*/
191-
good_buf = kmalloc(cache_size, GFP_KERNEL);
192-
bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
193-
if (!good_buf || !bad_buf) {
194-
pr_warn("Failed to allocate buffers from caches\n");
204+
buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
205+
if (!buf) {
206+
pr_warn("Failed to allocate buffer from whitelist cache\n");
195207
goto free_alloc;
196208
}
197209

198210
/* Allocate user memory we'll poke at. */
199-
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
211+
user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
200212
PROT_READ | PROT_WRITE | PROT_EXEC,
201213
MAP_ANONYMOUS | MAP_PRIVATE, 0);
202-
if (user_addr >= TASK_SIZE) {
214+
if (user_alloc >= TASK_SIZE) {
203215
pr_warn("Failed to allocate user memory\n");
204216
goto free_alloc;
205217
}
218+
user_addr = (void __user *)user_alloc;
206219

207-
memset(good_buf, 'A', cache_size);
208-
memset(bad_buf, 'B', cache_size);
220+
memset(buf, 'B', cache_size);
221+
222+
/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
223+
offset = (cache_size / 4) + unconst;
224+
size = (cache_size / 16) + unconst;
209225

210226
if (to_user) {
211-
pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
212-
if (copy_to_user((void __user *)user_addr, good_buf,
213-
cache_size)) {
227+
pr_info("attempting good copy_to_user inside whitelist\n");
228+
if (copy_to_user(user_addr, buf + offset, size)) {
214229
pr_warn("copy_to_user failed unexpectedly?!\n");
215230
goto free_user;
216231
}
217232

218-
pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
219-
if (copy_to_user((void __user *)user_addr, bad_buf,
220-
cache_size)) {
233+
pr_info("attempting bad copy_to_user outside whitelist\n");
234+
if (copy_to_user(user_addr, buf + offset - 1, size)) {
221235
pr_warn("copy_to_user failed, but lacked Oops\n");
222236
goto free_user;
223237
}
224238
} else {
225-
pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
226-
if (copy_from_user(good_buf, (void __user *)user_addr,
227-
cache_size)) {
239+
pr_info("attempting good copy_from_user inside whitelist\n");
240+
if (copy_from_user(buf + offset, user_addr, size)) {
228241
pr_warn("copy_from_user failed unexpectedly?!\n");
229242
goto free_user;
230243
}
231244

232-
pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
233-
if (copy_from_user(bad_buf, (void __user *)user_addr,
234-
cache_size)) {
245+
pr_info("attempting bad copy_from_user outside whitelist\n");
246+
if (copy_from_user(buf + offset - 1, user_addr, size)) {
235247
pr_warn("copy_from_user failed, but lacked Oops\n");
236248
goto free_user;
237249
}
238250
}
239251

240252
free_user:
241-
vm_munmap(user_addr, PAGE_SIZE);
253+
vm_munmap(user_alloc, PAGE_SIZE);
242254
free_alloc:
243-
if (bad_buf)
244-
kmem_cache_free(bad_cache, bad_buf);
245-
kfree(good_buf);
255+
if (buf)
256+
kmem_cache_free(whitelist_cache, buf);
246257
}
247258

248259
/* Callable tests. */
@@ -256,14 +267,14 @@ void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
256267
do_usercopy_heap_size(false);
257268
}
258269

259-
void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
270+
void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
260271
{
261-
do_usercopy_heap_flag(true);
272+
do_usercopy_heap_whitelist(true);
262273
}
263274

264-
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
275+
void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
265276
{
266-
do_usercopy_heap_flag(false);
277+
do_usercopy_heap_whitelist(false);
267278
}
268279

269280
void lkdtm_USERCOPY_STACK_FRAME_TO(void)
@@ -314,11 +325,15 @@ void lkdtm_USERCOPY_KERNEL(void)
314325
void __init lkdtm_usercopy_init(void)
315326
{
316327
/* Prepare cache that lacks SLAB_USERCOPY flag. */
317-
bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
318-
0, NULL);
328+
whitelist_cache =
329+
kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
330+
0, 0,
331+
cache_size / 4,
332+
cache_size / 16,
333+
NULL);
319334
}
320335

321336
void __exit lkdtm_usercopy_exit(void)
322337
{
323-
kmem_cache_destroy(bad_cache);
338+
kmem_cache_destroy(whitelist_cache);
324339
}

drivers/scsi/scsi_lib.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -79,14 +79,15 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
7979
if (shost->unchecked_isa_dma) {
8080
scsi_sense_isadma_cache =
8181
kmem_cache_create("scsi_sense_cache(DMA)",
82-
SCSI_SENSE_BUFFERSIZE, 0,
83-
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
82+
SCSI_SENSE_BUFFERSIZE, 0,
83+
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
8484
if (!scsi_sense_isadma_cache)
8585
ret = -ENOMEM;
8686
} else {
8787
scsi_sense_cache =
88-
kmem_cache_create("scsi_sense_cache",
89-
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
88+
kmem_cache_create_usercopy("scsi_sense_cache",
89+
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
90+
0, SCSI_SENSE_BUFFERSIZE, NULL);
9091
if (!scsi_sense_cache)
9192
ret = -ENOMEM;
9293
}

fs/befs/linuxvfs.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -444,11 +444,15 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
444444
static int __init
445445
befs_init_inodecache(void)
446446
{
447-
befs_inode_cachep = kmem_cache_create("befs_inode_cache",
448-
sizeof (struct befs_inode_info),
449-
0, (SLAB_RECLAIM_ACCOUNT|
450-
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
451-
init_once);
447+
befs_inode_cachep = kmem_cache_create_usercopy("befs_inode_cache",
448+
sizeof(struct befs_inode_info), 0,
449+
(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
450+
SLAB_ACCOUNT),
451+
offsetof(struct befs_inode_info,
452+
i_data.symlink),
453+
sizeof_field(struct befs_inode_info,
454+
i_data.symlink),
455+
init_once);
452456
if (befs_inode_cachep == NULL)
453457
return -ENOMEM;
454458

0 commit comments

Comments
 (0)