20
20
*/
21
21
static volatile size_t unconst = 0 ;
22
22
static volatile size_t cache_size = 1024 ;
23
- static struct kmem_cache * bad_cache ;
23
+ static struct kmem_cache * whitelist_cache ;
24
24
25
25
static const unsigned char test_text [] = "This is a test.\n" ;
26
26
@@ -115,6 +115,10 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
115
115
vm_munmap (user_addr , PAGE_SIZE );
116
116
}
117
117
118
+ /*
119
+ * This checks for whole-object size validation with hardened usercopy,
120
+ * with or without usercopy whitelisting.
121
+ */
118
122
static void do_usercopy_heap_size (bool to_user )
119
123
{
120
124
unsigned long user_addr ;
@@ -177,77 +181,79 @@ static void do_usercopy_heap_size(bool to_user)
177
181
kfree (two );
178
182
}
179
183
180
- static void do_usercopy_heap_flag (bool to_user )
184
+ /*
185
+ * This checks for the specific whitelist window within an object. If this
186
+ * test passes, then do_usercopy_heap_size() tests will pass too.
187
+ */
188
+ static void do_usercopy_heap_whitelist (bool to_user )
181
189
{
182
- unsigned long user_addr ;
183
- unsigned char * good_buf = NULL ;
184
- unsigned char * bad_buf = NULL ;
190
+ unsigned long user_alloc ;
191
+ unsigned char * buf = NULL ;
192
+ unsigned char __user * user_addr ;
193
+ size_t offset , size ;
185
194
186
195
/* Make sure cache was prepared. */
187
- if (!bad_cache ) {
196
+ if (!whitelist_cache ) {
188
197
pr_warn ("Failed to allocate kernel cache\n" );
189
198
return ;
190
199
}
191
200
192
201
/*
193
- * Allocate one buffer from each cache (kmalloc will have the
194
- * SLAB_USERCOPY flag already, but "bad_cache" won't).
202
+ * Allocate a buffer with a whitelisted window in the buffer.
195
203
*/
196
- good_buf = kmalloc (cache_size , GFP_KERNEL );
197
- bad_buf = kmem_cache_alloc (bad_cache , GFP_KERNEL );
198
- if (!good_buf || !bad_buf ) {
199
- pr_warn ("Failed to allocate buffers from caches\n" );
204
+ buf = kmem_cache_alloc (whitelist_cache , GFP_KERNEL );
205
+ if (!buf ) {
206
+ pr_warn ("Failed to allocate buffer from whitelist cache\n" );
200
207
goto free_alloc ;
201
208
}
202
209
203
210
/* Allocate user memory we'll poke at. */
204
- user_addr = vm_mmap (NULL , 0 , PAGE_SIZE ,
211
+ user_alloc = vm_mmap (NULL , 0 , PAGE_SIZE ,
205
212
PROT_READ | PROT_WRITE | PROT_EXEC ,
206
213
MAP_ANONYMOUS | MAP_PRIVATE , 0 );
207
- if (user_addr >= TASK_SIZE ) {
214
+ if (user_alloc >= TASK_SIZE ) {
208
215
pr_warn ("Failed to allocate user memory\n" );
209
216
goto free_alloc ;
210
217
}
218
+ user_addr = (void __user * )user_alloc ;
211
219
212
- memset (good_buf , 'A' , cache_size );
213
- memset (bad_buf , 'B' , cache_size );
220
+ memset (buf , 'B' , cache_size );
221
+
222
+ /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
223
+ offset = (cache_size / 4 ) + unconst ;
224
+ size = (cache_size / 16 ) + unconst ;
214
225
215
226
if (to_user ) {
216
- pr_info ("attempting good copy_to_user with SLAB_USERCOPY\n" );
217
- if (copy_to_user ((void __user * )user_addr , good_buf ,
218
- cache_size )) {
227
+ pr_info ("attempting good copy_to_user inside whitelist\n" );
228
+ if (copy_to_user (user_addr , buf + offset , size )) {
219
229
pr_warn ("copy_to_user failed unexpectedly?!\n" );
220
230
goto free_user ;
221
231
}
222
232
223
- pr_info ("attempting bad copy_to_user w/o SLAB_USERCOPY\n" );
224
- if (copy_to_user ((void __user * )user_addr , bad_buf ,
225
- cache_size )) {
233
+ pr_info ("attempting bad copy_to_user outside whitelist\n" );
234
+ if (copy_to_user (user_addr , buf + offset - 1 , size )) {
226
235
pr_warn ("copy_to_user failed, but lacked Oops\n" );
227
236
goto free_user ;
228
237
}
229
238
} else {
230
- pr_info ("attempting good copy_from_user with SLAB_USERCOPY\n" );
231
- if (copy_from_user (good_buf , (void __user * )user_addr ,
232
- cache_size )) {
239
+ pr_info ("attempting good copy_from_user inside whitelist\n" );
240
+ if (copy_from_user (buf + offset , user_addr , size )) {
233
241
pr_warn ("copy_from_user failed unexpectedly?!\n" );
234
242
goto free_user ;
235
243
}
236
244
237
- pr_info ("attempting bad copy_from_user w/o SLAB_USERCOPY\n" );
238
- if (copy_from_user (bad_buf , (void __user * )user_addr ,
239
- cache_size )) {
245
+ pr_info ("attempting bad copy_from_user outside whitelist\n" );
246
+ if (copy_from_user (buf + offset - 1 , user_addr , size )) {
240
247
pr_warn ("copy_from_user failed, but lacked Oops\n" );
241
248
goto free_user ;
242
249
}
243
250
}
244
251
245
252
free_user :
246
- vm_munmap (user_addr , PAGE_SIZE );
253
+ vm_munmap (user_alloc , PAGE_SIZE );
247
254
free_alloc :
248
- if (bad_buf )
249
- kmem_cache_free (bad_cache , bad_buf );
250
- kfree (good_buf );
255
+ if (buf )
256
+ kmem_cache_free (whitelist_cache , buf );
251
257
}
252
258
253
259
/* Callable tests. */
@@ -261,14 +267,14 @@ void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
261
267
do_usercopy_heap_size (false);
262
268
}
263
269
264
- void lkdtm_USERCOPY_HEAP_FLAG_TO (void )
270
+ void lkdtm_USERCOPY_HEAP_WHITELIST_TO (void )
265
271
{
266
- do_usercopy_heap_flag (true);
272
+ do_usercopy_heap_whitelist (true);
267
273
}
268
274
269
- void lkdtm_USERCOPY_HEAP_FLAG_FROM (void )
275
+ void lkdtm_USERCOPY_HEAP_WHITELIST_FROM (void )
270
276
{
271
- do_usercopy_heap_flag (false);
277
+ do_usercopy_heap_whitelist (false);
272
278
}
273
279
274
280
void lkdtm_USERCOPY_STACK_FRAME_TO (void )
@@ -319,11 +325,15 @@ void lkdtm_USERCOPY_KERNEL(void)
319
325
void __init lkdtm_usercopy_init (void )
320
326
{
321
327
/* Prepare cache that lacks SLAB_USERCOPY flag. */
322
- bad_cache = kmem_cache_create ("lkdtm-no-usercopy" , cache_size , 0 ,
323
- 0 , NULL );
328
+ whitelist_cache =
329
+ kmem_cache_create_usercopy ("lkdtm-usercopy" , cache_size ,
330
+ 0 , 0 ,
331
+ cache_size / 4 ,
332
+ cache_size / 16 ,
333
+ NULL );
324
334
}
325
335
326
336
void __exit lkdtm_usercopy_exit (void )
327
337
{
328
- kmem_cache_destroy (bad_cache );
338
+ kmem_cache_destroy (whitelist_cache );
329
339
}
0 commit comments