125
125
* are set to NOT_INIT to indicate that they are no longer readable.
126
126
*/
127
127
128
+ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
129
+
128
130
/* single container for all structs
129
131
* one verifier_env per bpf_check() call
130
132
*/
131
133
struct verifier_env {
134
+ struct bpf_prog * prog ; /* eBPF program being verified */
135
+ struct bpf_map * used_maps [MAX_USED_MAPS ]; /* array of map's used by eBPF program */
136
+ u32 used_map_cnt ; /* number of used maps */
132
137
};
133
138
134
139
/* verbose verifier prints what it's seeing
@@ -300,6 +305,115 @@ static void print_bpf_insn(struct bpf_insn *insn)
300
305
}
301
306
}
302
307
308
+ /* return the map pointer stored inside BPF_LD_IMM64 instruction */
309
+ static struct bpf_map * ld_imm64_to_map_ptr (struct bpf_insn * insn )
310
+ {
311
+ u64 imm64 = ((u64 ) (u32 ) insn [0 ].imm ) | ((u64 ) (u32 ) insn [1 ].imm ) << 32 ;
312
+
313
+ return (struct bpf_map * ) (unsigned long ) imm64 ;
314
+ }
315
+
316
+ /* look for pseudo eBPF instructions that access map FDs and
317
+ * replace them with actual map pointers
318
+ */
319
+ static int replace_map_fd_with_map_ptr (struct verifier_env * env )
320
+ {
321
+ struct bpf_insn * insn = env -> prog -> insnsi ;
322
+ int insn_cnt = env -> prog -> len ;
323
+ int i , j ;
324
+
325
+ for (i = 0 ; i < insn_cnt ; i ++ , insn ++ ) {
326
+ if (insn [0 ].code == (BPF_LD | BPF_IMM | BPF_DW )) {
327
+ struct bpf_map * map ;
328
+ struct fd f ;
329
+
330
+ if (i == insn_cnt - 1 || insn [1 ].code != 0 ||
331
+ insn [1 ].dst_reg != 0 || insn [1 ].src_reg != 0 ||
332
+ insn [1 ].off != 0 ) {
333
+ verbose ("invalid bpf_ld_imm64 insn\n" );
334
+ return - EINVAL ;
335
+ }
336
+
337
+ if (insn -> src_reg == 0 )
338
+ /* valid generic load 64-bit imm */
339
+ goto next_insn ;
340
+
341
+ if (insn -> src_reg != BPF_PSEUDO_MAP_FD ) {
342
+ verbose ("unrecognized bpf_ld_imm64 insn\n" );
343
+ return - EINVAL ;
344
+ }
345
+
346
+ f = fdget (insn -> imm );
347
+
348
+ map = bpf_map_get (f );
349
+ if (IS_ERR (map )) {
350
+ verbose ("fd %d is not pointing to valid bpf_map\n" ,
351
+ insn -> imm );
352
+ fdput (f );
353
+ return PTR_ERR (map );
354
+ }
355
+
356
+ /* store map pointer inside BPF_LD_IMM64 instruction */
357
+ insn [0 ].imm = (u32 ) (unsigned long ) map ;
358
+ insn [1 ].imm = ((u64 ) (unsigned long ) map ) >> 32 ;
359
+
360
+ /* check whether we recorded this map already */
361
+ for (j = 0 ; j < env -> used_map_cnt ; j ++ )
362
+ if (env -> used_maps [j ] == map ) {
363
+ fdput (f );
364
+ goto next_insn ;
365
+ }
366
+
367
+ if (env -> used_map_cnt >= MAX_USED_MAPS ) {
368
+ fdput (f );
369
+ return - E2BIG ;
370
+ }
371
+
372
+ /* remember this map */
373
+ env -> used_maps [env -> used_map_cnt ++ ] = map ;
374
+
375
+ /* hold the map. If the program is rejected by verifier,
376
+ * the map will be released by release_maps() or it
377
+ * will be used by the valid program until it's unloaded
378
+ * and all maps are released in free_bpf_prog_info()
379
+ */
380
+ atomic_inc (& map -> refcnt );
381
+
382
+ fdput (f );
383
+ next_insn :
384
+ insn ++ ;
385
+ i ++ ;
386
+ }
387
+ }
388
+
389
+ /* now all pseudo BPF_LD_IMM64 instructions load valid
390
+ * 'struct bpf_map *' into a register instead of user map_fd.
391
+ * These pointers will be used later by verifier to validate map access.
392
+ */
393
+ return 0 ;
394
+ }
395
+
396
+ /* drop refcnt of maps used by the rejected program */
397
+ static void release_maps (struct verifier_env * env )
398
+ {
399
+ int i ;
400
+
401
+ for (i = 0 ; i < env -> used_map_cnt ; i ++ )
402
+ bpf_map_put (env -> used_maps [i ]);
403
+ }
404
+
405
+ /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
406
+ static void convert_pseudo_ld_imm64 (struct verifier_env * env )
407
+ {
408
+ struct bpf_insn * insn = env -> prog -> insnsi ;
409
+ int insn_cnt = env -> prog -> len ;
410
+ int i ;
411
+
412
+ for (i = 0 ; i < insn_cnt ; i ++ , insn ++ )
413
+ if (insn -> code == (BPF_LD | BPF_IMM | BPF_DW ))
414
+ insn -> src_reg = 0 ;
415
+ }
416
+
303
417
int bpf_check (struct bpf_prog * prog , union bpf_attr * attr )
304
418
{
305
419
char __user * log_ubuf = NULL ;
@@ -316,6 +430,8 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
316
430
if (!env )
317
431
return - ENOMEM ;
318
432
433
+ env -> prog = prog ;
434
+
319
435
/* grab the mutex to protect few globals used by verifier */
320
436
mutex_lock (& bpf_verifier_lock );
321
437
@@ -342,8 +458,14 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
342
458
log_level = 0 ;
343
459
}
344
460
461
+ ret = replace_map_fd_with_map_ptr (env );
462
+ if (ret < 0 )
463
+ goto skip_full_check ;
464
+
345
465
/* ret = do_check(env); */
346
466
467
+ skip_full_check :
468
+
347
469
if (log_level && log_len >= log_size - 1 ) {
348
470
BUG_ON (log_len >= log_size );
349
471
/* verifier log exceeded user supplied buffer */
@@ -357,11 +479,36 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
357
479
goto free_log_buf ;
358
480
}
359
481
482
+ if (ret == 0 && env -> used_map_cnt ) {
483
+ /* if program passed verifier, update used_maps in bpf_prog_info */
484
+ prog -> aux -> used_maps = kmalloc_array (env -> used_map_cnt ,
485
+ sizeof (env -> used_maps [0 ]),
486
+ GFP_KERNEL );
487
+
488
+ if (!prog -> aux -> used_maps ) {
489
+ ret = - ENOMEM ;
490
+ goto free_log_buf ;
491
+ }
492
+
493
+ memcpy (prog -> aux -> used_maps , env -> used_maps ,
494
+ sizeof (env -> used_maps [0 ]) * env -> used_map_cnt );
495
+ prog -> aux -> used_map_cnt = env -> used_map_cnt ;
496
+
497
+ /* program is valid. Convert pseudo bpf_ld_imm64 into generic
498
+ * bpf_ld_imm64 instructions
499
+ */
500
+ convert_pseudo_ld_imm64 (env );
501
+ }
360
502
361
503
free_log_buf :
362
504
if (log_level )
363
505
vfree (log_buf );
364
506
free_env :
507
+ if (!prog -> aux -> used_maps )
508
+ /* if we didn't copy map pointers into bpf_prog_info, release
509
+ * them now. Otherwise free_bpf_prog_info() will release them.
510
+ */
511
+ release_maps (env );
365
512
kfree (env );
366
513
mutex_unlock (& bpf_verifier_lock );
367
514
return ret ;
0 commit comments