Skip to content

Commit bab89ad

Browse files
committed
Merge branch 'bpf-improve-verifier-state-analysis'
Alexei Starovoitov says: ==================== v1->v2: With optimization suggested by Jakub patch 4 safety check became cheap enough. Several improvements to verifier state logic. Patch 1 - trivial optimization Patch 3 - significant optimization for stack state equivalence Patch 4 - safety check for liveness and prep for future state merging ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2 parents eb415c9 + 9242b5f commit bab89ad

File tree

3 files changed

+154
-9
lines changed

3 files changed

+154
-9
lines changed

include/linux/bpf_verifier.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ enum bpf_reg_liveness {
3838
REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
3939
REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
4040
REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
41+
REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */
4142
};
4243

4344
struct bpf_reg_state {

kernel/bpf/verifier.c

Lines changed: 117 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -397,12 +397,14 @@ static char slot_type_char[] = {
397397
static void print_liveness(struct bpf_verifier_env *env,
398398
enum bpf_reg_liveness live)
399399
{
400-
if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
400+
if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
401401
verbose(env, "_");
402402
if (live & REG_LIVE_READ)
403403
verbose(env, "r");
404404
if (live & REG_LIVE_WRITTEN)
405405
verbose(env, "w");
406+
if (live & REG_LIVE_DONE)
407+
verbose(env, "D");
406408
}
407409

408410
static struct bpf_func_state *func(struct bpf_verifier_env *env,
@@ -1132,6 +1134,12 @@ static int mark_reg_read(struct bpf_verifier_env *env,
11321134
/* if read wasn't screened by an earlier write ... */
11331135
if (writes && state->live & REG_LIVE_WRITTEN)
11341136
break;
1137+
if (parent->live & REG_LIVE_DONE) {
1138+
verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1139+
reg_type_str[parent->type],
1140+
parent->var_off.value, parent->off);
1141+
return -EFAULT;
1142+
}
11351143
/* ... then we depend on parent's value */
11361144
parent->live |= REG_LIVE_READ;
11371145
state = parent;
@@ -5078,6 +5086,102 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
50785086
return false;
50795087
}
50805088

5089+
static void clean_func_state(struct bpf_verifier_env *env,
5090+
struct bpf_func_state *st)
5091+
{
5092+
enum bpf_reg_liveness live;
5093+
int i, j;
5094+
5095+
for (i = 0; i < BPF_REG_FP; i++) {
5096+
live = st->regs[i].live;
5097+
/* liveness must not touch this register anymore */
5098+
st->regs[i].live |= REG_LIVE_DONE;
5099+
if (!(live & REG_LIVE_READ))
5100+
/* since the register is unused, clear its state
5101+
* to make further comparison simpler
5102+
*/
5103+
__mark_reg_not_init(&st->regs[i]);
5104+
}
5105+
5106+
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
5107+
live = st->stack[i].spilled_ptr.live;
5108+
/* liveness must not touch this stack slot anymore */
5109+
st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
5110+
if (!(live & REG_LIVE_READ)) {
5111+
__mark_reg_not_init(&st->stack[i].spilled_ptr);
5112+
for (j = 0; j < BPF_REG_SIZE; j++)
5113+
st->stack[i].slot_type[j] = STACK_INVALID;
5114+
}
5115+
}
5116+
}
5117+
5118+
static void clean_verifier_state(struct bpf_verifier_env *env,
5119+
struct bpf_verifier_state *st)
5120+
{
5121+
int i;
5122+
5123+
if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
5124+
/* all regs in this state in all frames were already marked */
5125+
return;
5126+
5127+
for (i = 0; i <= st->curframe; i++)
5128+
clean_func_state(env, st->frame[i]);
5129+
}
5130+
5131+
/* the parentage chains form a tree.
5132+
* the verifier states are added to state lists at given insn and
5133+
* pushed into state stack for future exploration.
5134+
* when the verifier reaches bpf_exit insn some of the verifer states
5135+
* stored in the state lists have their final liveness state already,
5136+
* but a lot of states will get revised from liveness point of view when
5137+
* the verifier explores other branches.
5138+
* Example:
5139+
* 1: r0 = 1
5140+
* 2: if r1 == 100 goto pc+1
5141+
* 3: r0 = 2
5142+
* 4: exit
5143+
* when the verifier reaches exit insn the register r0 in the state list of
5144+
* insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
5145+
* of insn 2 and goes exploring further. At the insn 4 it will walk the
5146+
* parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
5147+
*
5148+
* Since the verifier pushes the branch states as it sees them while exploring
5149+
* the program the condition of walking the branch instruction for the second
5150+
* time means that all states below this branch were already explored and
5151+
* their final liveness markes are already propagated.
5152+
* Hence when the verifier completes the search of state list in is_state_visited()
5153+
* we can call this clean_live_states() function to mark all liveness states
5154+
* as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
5155+
* will not be used.
5156+
* This function also clears the registers and stack for states that !READ
5157+
* to simplify state merging.
5158+
*
5159+
* Important note here that walking the same branch instruction in the callee
5160+
* doesn't meant that the states are DONE. The verifier has to compare
5161+
* the callsites
5162+
*/
5163+
static void clean_live_states(struct bpf_verifier_env *env, int insn,
5164+
struct bpf_verifier_state *cur)
5165+
{
5166+
struct bpf_verifier_state_list *sl;
5167+
int i;
5168+
5169+
sl = env->explored_states[insn];
5170+
if (!sl)
5171+
return;
5172+
5173+
while (sl != STATE_LIST_MARK) {
5174+
if (sl->state.curframe != cur->curframe)
5175+
goto next;
5176+
for (i = 0; i <= cur->curframe; i++)
5177+
if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
5178+
goto next;
5179+
clean_verifier_state(env, &sl->state);
5180+
next:
5181+
sl = sl->next;
5182+
}
5183+
}
5184+
50815185
/* Returns true if (rold safe implies rcur safe) */
50825186
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
50835187
struct idpair *idmap)
@@ -5191,25 +5295,28 @@ static bool stacksafe(struct bpf_func_state *old,
51915295
{
51925296
int i, spi;
51935297

5194-
/* if explored stack has more populated slots than current stack
5195-
* such stacks are not equivalent
5196-
*/
5197-
if (old->allocated_stack > cur->allocated_stack)
5198-
return false;
5199-
52005298
/* walk slots of the explored stack and ignore any additional
52015299
* slots in the current stack, since explored(safe) state
52025300
* didn't use them
52035301
*/
52045302
for (i = 0; i < old->allocated_stack; i++) {
52055303
spi = i / BPF_REG_SIZE;
52065304

5207-
if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
5305+
if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
5306+
i += BPF_REG_SIZE - 1;
52085307
/* explored state didn't use this */
52095308
continue;
5309+
}
52105310

52115311
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
52125312
continue;
5313+
5314+
/* explored stack has more populated slots than current stack
5315+
* and these slots were used
5316+
*/
5317+
if (i >= cur->allocated_stack)
5318+
return false;
5319+
52135320
/* if old state was safe with misc data in the stack
52145321
* it will be safe with zero-initialized stack.
52155322
* The opposite is not true
@@ -5393,6 +5500,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
53935500
*/
53945501
return 0;
53955502

5503+
clean_live_states(env, insn_idx, cur);
5504+
53965505
while (sl != STATE_LIST_MARK) {
53975506
if (states_equal(env, &sl->state, cur)) {
53985507
/* reached equivalent register/stack state,

tools/testing/selftests/bpf/test_verifier.c

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ struct bpf_test {
7676
int fixup_percpu_cgroup_storage[MAX_FIXUPS];
7777
const char *errstr;
7878
const char *errstr_unpriv;
79-
uint32_t retval, retval_unpriv;
79+
uint32_t retval, retval_unpriv, insn_processed;
8080
enum {
8181
UNDEF,
8282
ACCEPT,
@@ -13647,6 +13647,28 @@ static struct bpf_test tests[] = {
1364713647
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1364813648
.result = ACCEPT,
1364913649
},
13650+
{
13651+
"allocated_stack",
13652+
.insns = {
13653+
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13654+
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
13655+
BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
13656+
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13657+
BPF_MOV64_IMM(BPF_REG_0, 0),
13658+
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
13659+
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
13660+
BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
13661+
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
13662+
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13663+
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13664+
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13665+
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13666+
BPF_EXIT_INSN(),
13667+
},
13668+
.result = ACCEPT,
13669+
.result_unpriv = ACCEPT,
13670+
.insn_processed = 15,
13671+
},
1365013672
{
1365113673
"reference tracking in call: free reference in subprog and outside",
1365213674
.insns = {
@@ -14444,6 +14466,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
1444414466
}
1444514467
}
1444614468

14469+
if (test->insn_processed) {
14470+
uint32_t insn_processed;
14471+
char *proc;
14472+
14473+
proc = strstr(bpf_vlog, "processed ");
14474+
insn_processed = atoi(proc + 10);
14475+
if (test->insn_processed != insn_processed) {
14476+
printf("FAIL\nUnexpected insn_processed %u vs %u\n",
14477+
insn_processed, test->insn_processed);
14478+
goto fail_log;
14479+
}
14480+
}
14481+
1444714482
if (fd_prog >= 0) {
1444814483
__u8 tmp[TEST_DATA_LEN << 2];
1444914484
__u32 size_tmp = sizeof(tmp);

0 commit comments

Comments
 (0)