Skip to content

Commit e47e311

Browse files
committed
lkdtm: Update usercopy tests for whitelisting
This updates the USERCOPY_HEAP_FLAG_* tests to USERCOPY_HEAP_WHITELIST_*, since the final form of usercopy whitelisting ended up using an offset/size window instead of the earlier proposed allocation flags. Signed-off-by: Kees Cook <keescook@chromium.org>
1 parent 6d07d1c commit e47e311

File tree

3 files changed

+53
-43
lines changed

3 files changed

+53
-43
lines changed

drivers/misc/lkdtm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,8 @@ void __init lkdtm_usercopy_init(void);
7676
void __exit lkdtm_usercopy_exit(void);
7777
void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
7878
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
79-
void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
80-
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
79+
void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
80+
void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
8181
void lkdtm_USERCOPY_STACK_FRAME_TO(void);
8282
void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
8383
void lkdtm_USERCOPY_STACK_BEYOND(void);

drivers/misc/lkdtm_core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,8 @@ static const struct crashtype crashtypes[] = {
177177
CRASHTYPE(ATOMIC_TIMING),
178178
CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
179179
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
180-
CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
181-
CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
180+
CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
181+
CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
182182
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
183183
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
184184
CRASHTYPE(USERCOPY_STACK_BEYOND),

drivers/misc/lkdtm_usercopy.c

Lines changed: 49 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
*/
2121
static volatile size_t unconst = 0;
2222
static volatile size_t cache_size = 1024;
23-
static struct kmem_cache *bad_cache;
23+
static struct kmem_cache *whitelist_cache;
2424

2525
static const unsigned char test_text[] = "This is a test.\n";
2626

@@ -115,6 +115,10 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
115115
vm_munmap(user_addr, PAGE_SIZE);
116116
}
117117

118+
/*
119+
* This checks for whole-object size validation with hardened usercopy,
120+
* with or without usercopy whitelisting.
121+
*/
118122
static void do_usercopy_heap_size(bool to_user)
119123
{
120124
unsigned long user_addr;
@@ -177,77 +181,79 @@ static void do_usercopy_heap_size(bool to_user)
177181
kfree(two);
178182
}
179183

180-
static void do_usercopy_heap_flag(bool to_user)
184+
/*
185+
* This checks for the specific whitelist window within an object. If this
186+
* test passes, then do_usercopy_heap_size() tests will pass too.
187+
*/
188+
static void do_usercopy_heap_whitelist(bool to_user)
181189
{
182-
unsigned long user_addr;
183-
unsigned char *good_buf = NULL;
184-
unsigned char *bad_buf = NULL;
190+
unsigned long user_alloc;
191+
unsigned char *buf = NULL;
192+
unsigned char __user *user_addr;
193+
size_t offset, size;
185194

186195
/* Make sure cache was prepared. */
187-
if (!bad_cache) {
196+
if (!whitelist_cache) {
188197
pr_warn("Failed to allocate kernel cache\n");
189198
return;
190199
}
191200

192201
/*
193-
* Allocate one buffer from each cache (kmalloc will have the
194-
* SLAB_USERCOPY flag already, but "bad_cache" won't).
202+
* Allocate a buffer with a whitelisted window in the buffer.
195203
*/
196-
good_buf = kmalloc(cache_size, GFP_KERNEL);
197-
bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
198-
if (!good_buf || !bad_buf) {
199-
pr_warn("Failed to allocate buffers from caches\n");
204+
buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
205+
if (!buf) {
206+
pr_warn("Failed to allocate buffer from whitelist cache\n");
200207
goto free_alloc;
201208
}
202209

203210
/* Allocate user memory we'll poke at. */
204-
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
211+
user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
205212
PROT_READ | PROT_WRITE | PROT_EXEC,
206213
MAP_ANONYMOUS | MAP_PRIVATE, 0);
207-
if (user_addr >= TASK_SIZE) {
214+
if (user_alloc >= TASK_SIZE) {
208215
pr_warn("Failed to allocate user memory\n");
209216
goto free_alloc;
210217
}
218+
user_addr = (void __user *)user_alloc;
211219

212-
memset(good_buf, 'A', cache_size);
213-
memset(bad_buf, 'B', cache_size);
220+
memset(buf, 'B', cache_size);
221+
222+
/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
223+
offset = (cache_size / 4) + unconst;
224+
size = (cache_size / 16) + unconst;
214225

215226
if (to_user) {
216-
pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
217-
if (copy_to_user((void __user *)user_addr, good_buf,
218-
cache_size)) {
227+
pr_info("attempting good copy_to_user inside whitelist\n");
228+
if (copy_to_user(user_addr, buf + offset, size)) {
219229
pr_warn("copy_to_user failed unexpectedly?!\n");
220230
goto free_user;
221231
}
222232

223-
pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
224-
if (copy_to_user((void __user *)user_addr, bad_buf,
225-
cache_size)) {
233+
pr_info("attempting bad copy_to_user outside whitelist\n");
234+
if (copy_to_user(user_addr, buf + offset - 1, size)) {
226235
pr_warn("copy_to_user failed, but lacked Oops\n");
227236
goto free_user;
228237
}
229238
} else {
230-
pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
231-
if (copy_from_user(good_buf, (void __user *)user_addr,
232-
cache_size)) {
239+
pr_info("attempting good copy_from_user inside whitelist\n");
240+
if (copy_from_user(buf + offset, user_addr, size)) {
233241
pr_warn("copy_from_user failed unexpectedly?!\n");
234242
goto free_user;
235243
}
236244

237-
pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
238-
if (copy_from_user(bad_buf, (void __user *)user_addr,
239-
cache_size)) {
245+
pr_info("attempting bad copy_from_user outside whitelist\n");
246+
if (copy_from_user(buf + offset - 1, user_addr, size)) {
240247
pr_warn("copy_from_user failed, but lacked Oops\n");
241248
goto free_user;
242249
}
243250
}
244251

245252
free_user:
246-
vm_munmap(user_addr, PAGE_SIZE);
253+
vm_munmap(user_alloc, PAGE_SIZE);
247254
free_alloc:
248-
if (bad_buf)
249-
kmem_cache_free(bad_cache, bad_buf);
250-
kfree(good_buf);
255+
if (buf)
256+
kmem_cache_free(whitelist_cache, buf);
251257
}
252258

253259
/* Callable tests. */
@@ -261,14 +267,14 @@ void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
261267
do_usercopy_heap_size(false);
262268
}
263269

264-
void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
270+
void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
265271
{
266-
do_usercopy_heap_flag(true);
272+
do_usercopy_heap_whitelist(true);
267273
}
268274

269-
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
275+
void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
270276
{
271-
do_usercopy_heap_flag(false);
277+
do_usercopy_heap_whitelist(false);
272278
}
273279

274280
void lkdtm_USERCOPY_STACK_FRAME_TO(void)
@@ -319,11 +325,15 @@ void lkdtm_USERCOPY_KERNEL(void)
319325
void __init lkdtm_usercopy_init(void)
320326
{
321327
/* Prepare cache that lacks SLAB_USERCOPY flag. */
322-
bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
323-
0, NULL);
328+
whitelist_cache =
329+
kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
330+
0, 0,
331+
cache_size / 4,
332+
cache_size / 16,
333+
NULL);
324334
}
325335

326336
void __exit lkdtm_usercopy_exit(void)
327337
{
328-
kmem_cache_destroy(bad_cache);
338+
kmem_cache_destroy(whitelist_cache);
329339
}

0 commit comments

Comments
 (0)