@@ -397,12 +397,14 @@ static char slot_type_char[] = {
397
397
static void print_liveness (struct bpf_verifier_env * env ,
398
398
enum bpf_reg_liveness live )
399
399
{
400
- if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN ))
400
+ if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE ))
401
401
verbose (env , "_" );
402
402
if (live & REG_LIVE_READ )
403
403
verbose (env , "r" );
404
404
if (live & REG_LIVE_WRITTEN )
405
405
verbose (env , "w" );
406
+ if (live & REG_LIVE_DONE )
407
+ verbose (env , "D" );
406
408
}
407
409
408
410
static struct bpf_func_state * func (struct bpf_verifier_env * env ,
@@ -1132,6 +1134,12 @@ static int mark_reg_read(struct bpf_verifier_env *env,
1132
1134
/* if read wasn't screened by an earlier write ... */
1133
1135
if (writes && state -> live & REG_LIVE_WRITTEN )
1134
1136
break ;
1137
+ if (parent -> live & REG_LIVE_DONE ) {
1138
+ verbose (env , "verifier BUG type %s var_off %lld off %d\n" ,
1139
+ reg_type_str [parent -> type ],
1140
+ parent -> var_off .value , parent -> off );
1141
+ return - EFAULT ;
1142
+ }
1135
1143
/* ... then we depend on parent's value */
1136
1144
parent -> live |= REG_LIVE_READ ;
1137
1145
state = parent ;
@@ -5078,6 +5086,102 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
5078
5086
return false;
5079
5087
}
5080
5088
5089
+ static void clean_func_state (struct bpf_verifier_env * env ,
5090
+ struct bpf_func_state * st )
5091
+ {
5092
+ enum bpf_reg_liveness live ;
5093
+ int i , j ;
5094
+
5095
+ for (i = 0 ; i < BPF_REG_FP ; i ++ ) {
5096
+ live = st -> regs [i ].live ;
5097
+ /* liveness must not touch this register anymore */
5098
+ st -> regs [i ].live |= REG_LIVE_DONE ;
5099
+ if (!(live & REG_LIVE_READ ))
5100
+ /* since the register is unused, clear its state
5101
+ * to make further comparison simpler
5102
+ */
5103
+ __mark_reg_not_init (& st -> regs [i ]);
5104
+ }
5105
+
5106
+ for (i = 0 ; i < st -> allocated_stack / BPF_REG_SIZE ; i ++ ) {
5107
+ live = st -> stack [i ].spilled_ptr .live ;
5108
+ /* liveness must not touch this stack slot anymore */
5109
+ st -> stack [i ].spilled_ptr .live |= REG_LIVE_DONE ;
5110
+ if (!(live & REG_LIVE_READ )) {
5111
+ __mark_reg_not_init (& st -> stack [i ].spilled_ptr );
5112
+ for (j = 0 ; j < BPF_REG_SIZE ; j ++ )
5113
+ st -> stack [i ].slot_type [j ] = STACK_INVALID ;
5114
+ }
5115
+ }
5116
+ }
5117
+
5118
+ static void clean_verifier_state (struct bpf_verifier_env * env ,
5119
+ struct bpf_verifier_state * st )
5120
+ {
5121
+ int i ;
5122
+
5123
+ if (st -> frame [0 ]-> regs [0 ].live & REG_LIVE_DONE )
5124
+ /* all regs in this state in all frames were already marked */
5125
+ return ;
5126
+
5127
+ for (i = 0 ; i <= st -> curframe ; i ++ )
5128
+ clean_func_state (env , st -> frame [i ]);
5129
+ }
5130
+
5131
+ /* the parentage chains form a tree.
5132
+ * the verifier states are added to state lists at given insn and
5133
+ * pushed into state stack for future exploration.
5134
+ * when the verifier reaches bpf_exit insn some of the verifer states
5135
+ * stored in the state lists have their final liveness state already,
5136
+ * but a lot of states will get revised from liveness point of view when
5137
+ * the verifier explores other branches.
5138
+ * Example:
5139
+ * 1: r0 = 1
5140
+ * 2: if r1 == 100 goto pc+1
5141
+ * 3: r0 = 2
5142
+ * 4: exit
5143
+ * when the verifier reaches exit insn the register r0 in the state list of
5144
+ * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
5145
+ * of insn 2 and goes exploring further. At the insn 4 it will walk the
5146
+ * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
5147
+ *
5148
+ * Since the verifier pushes the branch states as it sees them while exploring
5149
+ * the program the condition of walking the branch instruction for the second
5150
+ * time means that all states below this branch were already explored and
5151
+ * their final liveness markes are already propagated.
5152
+ * Hence when the verifier completes the search of state list in is_state_visited()
5153
+ * we can call this clean_live_states() function to mark all liveness states
5154
+ * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
5155
+ * will not be used.
5156
+ * This function also clears the registers and stack for states that !READ
5157
+ * to simplify state merging.
5158
+ *
5159
+ * Important note here that walking the same branch instruction in the callee
5160
+ * doesn't meant that the states are DONE. The verifier has to compare
5161
+ * the callsites
5162
+ */
5163
+ static void clean_live_states (struct bpf_verifier_env * env , int insn ,
5164
+ struct bpf_verifier_state * cur )
5165
+ {
5166
+ struct bpf_verifier_state_list * sl ;
5167
+ int i ;
5168
+
5169
+ sl = env -> explored_states [insn ];
5170
+ if (!sl )
5171
+ return ;
5172
+
5173
+ while (sl != STATE_LIST_MARK ) {
5174
+ if (sl -> state .curframe != cur -> curframe )
5175
+ goto next ;
5176
+ for (i = 0 ; i <= cur -> curframe ; i ++ )
5177
+ if (sl -> state .frame [i ]-> callsite != cur -> frame [i ]-> callsite )
5178
+ goto next ;
5179
+ clean_verifier_state (env , & sl -> state );
5180
+ next :
5181
+ sl = sl -> next ;
5182
+ }
5183
+ }
5184
+
5081
5185
/* Returns true if (rold safe implies rcur safe) */
5082
5186
static bool regsafe (struct bpf_reg_state * rold , struct bpf_reg_state * rcur ,
5083
5187
struct idpair * idmap )
@@ -5191,25 +5295,28 @@ static bool stacksafe(struct bpf_func_state *old,
5191
5295
{
5192
5296
int i , spi ;
5193
5297
5194
- /* if explored stack has more populated slots than current stack
5195
- * such stacks are not equivalent
5196
- */
5197
- if (old -> allocated_stack > cur -> allocated_stack )
5198
- return false;
5199
-
5200
5298
/* walk slots of the explored stack and ignore any additional
5201
5299
* slots in the current stack, since explored(safe) state
5202
5300
* didn't use them
5203
5301
*/
5204
5302
for (i = 0 ; i < old -> allocated_stack ; i ++ ) {
5205
5303
spi = i / BPF_REG_SIZE ;
5206
5304
5207
- if (!(old -> stack [spi ].spilled_ptr .live & REG_LIVE_READ ))
5305
+ if (!(old -> stack [spi ].spilled_ptr .live & REG_LIVE_READ )) {
5306
+ i += BPF_REG_SIZE - 1 ;
5208
5307
/* explored state didn't use this */
5209
5308
continue ;
5309
+ }
5210
5310
5211
5311
if (old -> stack [spi ].slot_type [i % BPF_REG_SIZE ] == STACK_INVALID )
5212
5312
continue ;
5313
+
5314
+ /* explored stack has more populated slots than current stack
5315
+ * and these slots were used
5316
+ */
5317
+ if (i >= cur -> allocated_stack )
5318
+ return false;
5319
+
5213
5320
/* if old state was safe with misc data in the stack
5214
5321
* it will be safe with zero-initialized stack.
5215
5322
* The opposite is not true
@@ -5393,6 +5500,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
5393
5500
*/
5394
5501
return 0 ;
5395
5502
5503
+ clean_live_states (env , insn_idx , cur );
5504
+
5396
5505
while (sl != STATE_LIST_MARK ) {
5397
5506
if (states_equal (env , & sl -> state , cur )) {
5398
5507
/* reached equivalent register/stack state,
0 commit comments