Skip to content

Commit 5905429

Browse files
committed
fork: Provide usercopy whitelisting for task_struct
While the blocked and saved_sigmask fields of task_struct are copied to userspace (via sigmask_to_save() and setup_rt_frame()), it is always copied with a static length (i.e. sizeof(sigset_t)). The only portion of task_struct that is potentially dynamically sized and may be copied to userspace is in the architecture-specific thread_struct at the end of task_struct. cache object allocation: kernel/fork.c: alloc_task_struct_node(...): return kmem_cache_alloc_node(task_struct_cachep, ...); dup_task_struct(...): ... tsk = alloc_task_struct_node(node); copy_process(...): ... dup_task_struct(...) _do_fork(...): ... copy_process(...) example usage trace: arch/x86/kernel/fpu/signal.c: __fpu__restore_sig(...): ... struct task_struct *tsk = current; struct fpu *fpu = &tsk->thread.fpu; ... __copy_from_user(&fpu->state.xsave, ..., state_size); fpu__restore_sig(...): ... return __fpu__restore_sig(...); arch/x86/kernel/signal.c: restore_sigcontext(...): ... fpu__restore_sig(...) This introduces arch_thread_struct_whitelist() to let an architecture declare specifically where the whitelist should be within thread_struct. If undefined, the entire thread_struct field is left whitelisted. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Laura Abbott <labbott@redhat.com> Cc: "Mickaël Salaün" <mic@digikod.net> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@kernel.org> Signed-off-by: Kees Cook <keescook@chromium.org> Acked-by: Rik van Riel <riel@redhat.com>
1 parent f9d2994 commit 5905429

File tree

3 files changed

+45
-2
lines changed

3 files changed

+45
-2
lines changed

arch/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -242,6 +242,17 @@ config ARCH_INIT_TASK
242242
config ARCH_TASK_STRUCT_ALLOCATOR
243243
bool
244244

245+
config HAVE_ARCH_THREAD_STRUCT_WHITELIST
246+
bool
247+
depends on !ARCH_TASK_STRUCT_ALLOCATOR
248+
help
249+
An architecture should select this to provide hardened usercopy
250+
knowledge about what region of the thread_struct should be
251+
whitelisted for copying to userspace. Normally this is only the
252+
FPU registers. Specifically, arch_thread_struct_whitelist()
253+
should be implemented. Without this, the entire thread_struct
254+
field in task_struct will be left whitelisted.
255+
245256
# Select if arch has its private alloc_thread_stack() function
246257
config ARCH_THREAD_STACK_ALLOCATOR
247258
bool

include/linux/sched/task.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,20 @@ extern int arch_task_struct_size __read_mostly;
104104
# define arch_task_struct_size (sizeof(struct task_struct))
105105
#endif
106106

107+
#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
108+
/*
109+
* If an architecture has not declared a thread_struct whitelist we
110+
* must assume something there may need to be copied to userspace.
111+
*/
112+
static inline void arch_thread_struct_whitelist(unsigned long *offset,
113+
unsigned long *size)
114+
{
115+
*offset = 0;
116+
/* Handle dynamically sized thread_struct. */
117+
*size = arch_task_struct_size - offsetof(struct task_struct, thread);
118+
}
119+
#endif
120+
107121
#ifdef CONFIG_VMAP_STACK
108122
static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
109123
{

kernel/fork.c

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -458,6 +458,21 @@ static void set_max_threads(unsigned int max_threads_suggested)
458458
int arch_task_struct_size __read_mostly;
459459
#endif
460460

461+
static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
462+
{
463+
/* Fetch thread_struct whitelist for the architecture. */
464+
arch_thread_struct_whitelist(offset, size);
465+
466+
/*
467+
* Handle zero-sized whitelist or empty thread_struct, otherwise
468+
* adjust offset to position of thread_struct in task_struct.
469+
*/
470+
if (unlikely(*size == 0))
471+
*offset = 0;
472+
else
473+
*offset += offsetof(struct task_struct, thread);
474+
}
475+
461476
void __init fork_init(void)
462477
{
463478
int i;
@@ -466,11 +481,14 @@ void __init fork_init(void)
466481
#define ARCH_MIN_TASKALIGN 0
467482
#endif
468483
int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
484+
unsigned long useroffset, usersize;
469485

470486
/* create a slab on which task_structs can be allocated */
471-
task_struct_cachep = kmem_cache_create("task_struct",
487+
task_struct_whitelist(&useroffset, &usersize);
488+
task_struct_cachep = kmem_cache_create_usercopy("task_struct",
472489
arch_task_struct_size, align,
473-
SLAB_PANIC|SLAB_ACCOUNT, NULL);
490+
SLAB_PANIC|SLAB_ACCOUNT,
491+
useroffset, usersize, NULL);
474492
#endif
475493

476494
/* do the arch specific task caches init */

0 commit comments

Comments
 (0)