@@ -313,6 +313,191 @@ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
313
313
return (struct bpf_map * ) (unsigned long ) imm64 ;
314
314
}
315
315
316
+ /* non-recursive DFS pseudo code
317
+ * 1 procedure DFS-iterative(G,v):
318
+ * 2 label v as discovered
319
+ * 3 let S be a stack
320
+ * 4 S.push(v)
321
+ * 5 while S is not empty
322
+ * 6 t <- S.pop()
323
+ * 7 if t is what we're looking for:
324
+ * 8 return t
325
+ * 9 for all edges e in G.adjacentEdges(t) do
326
+ * 10 if edge e is already labelled
327
+ * 11 continue with the next edge
328
+ * 12 w <- G.adjacentVertex(t,e)
329
+ * 13 if vertex w is not discovered and not explored
330
+ * 14 label e as tree-edge
331
+ * 15 label w as discovered
332
+ * 16 S.push(w)
333
+ * 17 continue at 5
334
+ * 18 else if vertex w is discovered
335
+ * 19 label e as back-edge
336
+ * 20 else
337
+ * 21 // vertex w is explored
338
+ * 22 label e as forward- or cross-edge
339
+ * 23 label t as explored
340
+ * 24 S.pop()
341
+ *
342
+ * convention:
343
+ * 0x10 - discovered
344
+ * 0x11 - discovered and fall-through edge labelled
345
+ * 0x12 - discovered and fall-through and branch edges labelled
346
+ * 0x20 - explored
347
+ */
348
+
349
+ enum {
350
+ DISCOVERED = 0x10 ,
351
+ EXPLORED = 0x20 ,
352
+ FALLTHROUGH = 1 ,
353
+ BRANCH = 2 ,
354
+ };
355
+
356
+ static int * insn_stack ; /* stack of insns to process */
357
+ static int cur_stack ; /* current stack index */
358
+ static int * insn_state ;
359
+
360
+ /* t, w, e - match pseudo-code above:
361
+ * t - index of current instruction
362
+ * w - next instruction
363
+ * e - edge
364
+ */
365
+ static int push_insn (int t , int w , int e , struct verifier_env * env )
366
+ {
367
+ if (e == FALLTHROUGH && insn_state [t ] >= (DISCOVERED | FALLTHROUGH ))
368
+ return 0 ;
369
+
370
+ if (e == BRANCH && insn_state [t ] >= (DISCOVERED | BRANCH ))
371
+ return 0 ;
372
+
373
+ if (w < 0 || w >= env -> prog -> len ) {
374
+ verbose ("jump out of range from insn %d to %d\n" , t , w );
375
+ return - EINVAL ;
376
+ }
377
+
378
+ if (insn_state [w ] == 0 ) {
379
+ /* tree-edge */
380
+ insn_state [t ] = DISCOVERED | e ;
381
+ insn_state [w ] = DISCOVERED ;
382
+ if (cur_stack >= env -> prog -> len )
383
+ return - E2BIG ;
384
+ insn_stack [cur_stack ++ ] = w ;
385
+ return 1 ;
386
+ } else if ((insn_state [w ] & 0xF0 ) == DISCOVERED ) {
387
+ verbose ("back-edge from insn %d to %d\n" , t , w );
388
+ return - EINVAL ;
389
+ } else if (insn_state [w ] == EXPLORED ) {
390
+ /* forward- or cross-edge */
391
+ insn_state [t ] = DISCOVERED | e ;
392
+ } else {
393
+ verbose ("insn state internal bug\n" );
394
+ return - EFAULT ;
395
+ }
396
+ return 0 ;
397
+ }
398
+
399
+ /* non-recursive depth-first-search to detect loops in BPF program
400
+ * loop == back-edge in directed graph
401
+ */
402
+ static int check_cfg (struct verifier_env * env )
403
+ {
404
+ struct bpf_insn * insns = env -> prog -> insnsi ;
405
+ int insn_cnt = env -> prog -> len ;
406
+ int ret = 0 ;
407
+ int i , t ;
408
+
409
+ insn_state = kcalloc (insn_cnt , sizeof (int ), GFP_KERNEL );
410
+ if (!insn_state )
411
+ return - ENOMEM ;
412
+
413
+ insn_stack = kcalloc (insn_cnt , sizeof (int ), GFP_KERNEL );
414
+ if (!insn_stack ) {
415
+ kfree (insn_state );
416
+ return - ENOMEM ;
417
+ }
418
+
419
+ insn_state [0 ] = DISCOVERED ; /* mark 1st insn as discovered */
420
+ insn_stack [0 ] = 0 ; /* 0 is the first instruction */
421
+ cur_stack = 1 ;
422
+
423
+ peek_stack :
424
+ if (cur_stack == 0 )
425
+ goto check_state ;
426
+ t = insn_stack [cur_stack - 1 ];
427
+
428
+ if (BPF_CLASS (insns [t ].code ) == BPF_JMP ) {
429
+ u8 opcode = BPF_OP (insns [t ].code );
430
+
431
+ if (opcode == BPF_EXIT ) {
432
+ goto mark_explored ;
433
+ } else if (opcode == BPF_CALL ) {
434
+ ret = push_insn (t , t + 1 , FALLTHROUGH , env );
435
+ if (ret == 1 )
436
+ goto peek_stack ;
437
+ else if (ret < 0 )
438
+ goto err_free ;
439
+ } else if (opcode == BPF_JA ) {
440
+ if (BPF_SRC (insns [t ].code ) != BPF_K ) {
441
+ ret = - EINVAL ;
442
+ goto err_free ;
443
+ }
444
+ /* unconditional jump with single edge */
445
+ ret = push_insn (t , t + insns [t ].off + 1 ,
446
+ FALLTHROUGH , env );
447
+ if (ret == 1 )
448
+ goto peek_stack ;
449
+ else if (ret < 0 )
450
+ goto err_free ;
451
+ } else {
452
+ /* conditional jump with two edges */
453
+ ret = push_insn (t , t + 1 , FALLTHROUGH , env );
454
+ if (ret == 1 )
455
+ goto peek_stack ;
456
+ else if (ret < 0 )
457
+ goto err_free ;
458
+
459
+ ret = push_insn (t , t + insns [t ].off + 1 , BRANCH , env );
460
+ if (ret == 1 )
461
+ goto peek_stack ;
462
+ else if (ret < 0 )
463
+ goto err_free ;
464
+ }
465
+ } else {
466
+ /* all other non-branch instructions with single
467
+ * fall-through edge
468
+ */
469
+ ret = push_insn (t , t + 1 , FALLTHROUGH , env );
470
+ if (ret == 1 )
471
+ goto peek_stack ;
472
+ else if (ret < 0 )
473
+ goto err_free ;
474
+ }
475
+
476
+ mark_explored :
477
+ insn_state [t ] = EXPLORED ;
478
+ if (cur_stack -- <= 0 ) {
479
+ verbose ("pop stack internal bug\n" );
480
+ ret = - EFAULT ;
481
+ goto err_free ;
482
+ }
483
+ goto peek_stack ;
484
+
485
+ check_state :
486
+ for (i = 0 ; i < insn_cnt ; i ++ ) {
487
+ if (insn_state [i ] != EXPLORED ) {
488
+ verbose ("unreachable insn %d\n" , i );
489
+ ret = - EINVAL ;
490
+ goto err_free ;
491
+ }
492
+ }
493
+ ret = 0 ; /* cfg looks good */
494
+
495
+ err_free :
496
+ kfree (insn_state );
497
+ kfree (insn_stack );
498
+ return ret ;
499
+ }
500
+
316
501
/* look for pseudo eBPF instructions that access map FDs and
317
502
* replace them with actual map pointers
318
503
*/
@@ -462,6 +647,10 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
462
647
if (ret < 0 )
463
648
goto skip_full_check ;
464
649
650
+ ret = check_cfg (env );
651
+ if (ret < 0 )
652
+ goto skip_full_check ;
653
+
465
654
/* ret = do_check(env); */
466
655
467
656
skip_full_check :
0 commit comments