@@ -721,6 +721,8 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
721
721
dst_state -> speculative = src -> speculative ;
722
722
dst_state -> curframe = src -> curframe ;
723
723
dst_state -> active_spin_lock = src -> active_spin_lock ;
724
+ dst_state -> branches = src -> branches ;
725
+ dst_state -> parent = src -> parent ;
724
726
for (i = 0 ; i <= src -> curframe ; i ++ ) {
725
727
dst = dst_state -> frame [i ];
726
728
if (!dst ) {
@@ -736,6 +738,23 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
736
738
return 0 ;
737
739
}
738
740
741
+ static void update_branch_counts (struct bpf_verifier_env * env , struct bpf_verifier_state * st )
742
+ {
743
+ while (st ) {
744
+ u32 br = -- st -> branches ;
745
+
746
+ /* WARN_ON(br > 1) technically makes sense here,
747
+ * but see comment in push_stack(), hence:
748
+ */
749
+ WARN_ONCE ((int )br < 0 ,
750
+ "BUG update_branch_counts:branches_to_explore=%d\n" ,
751
+ br );
752
+ if (br )
753
+ break ;
754
+ st = st -> parent ;
755
+ }
756
+ }
757
+
739
758
static int pop_stack (struct bpf_verifier_env * env , int * prev_insn_idx ,
740
759
int * insn_idx )
741
760
{
@@ -789,6 +808,18 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
789
808
env -> stack_size );
790
809
goto err ;
791
810
}
811
+ if (elem -> st .parent ) {
812
+ ++ elem -> st .parent -> branches ;
813
+ /* WARN_ON(branches > 2) technically makes sense here,
814
+ * but
815
+ * 1. speculative states will bump 'branches' for non-branch
816
+ * instructions
817
+ * 2. is_state_visited() heuristics may decide not to create
818
+ * a new state for a sequence of branches and all such current
819
+ * and cloned states will be pointing to a single parent state
820
+ * which might have large 'branches' count.
821
+ */
822
+ }
792
823
return & elem -> st ;
793
824
err :
794
825
free_verifier_state (env -> cur_state , true);
@@ -5682,7 +5713,8 @@ static void init_explored_state(struct bpf_verifier_env *env, int idx)
5682
5713
* w - next instruction
5683
5714
* e - edge
5684
5715
*/
5685
- static int push_insn (int t , int w , int e , struct bpf_verifier_env * env )
5716
+ static int push_insn (int t , int w , int e , struct bpf_verifier_env * env ,
5717
+ bool loop_ok )
5686
5718
{
5687
5719
int * insn_stack = env -> cfg .insn_stack ;
5688
5720
int * insn_state = env -> cfg .insn_state ;
@@ -5712,6 +5744,8 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
5712
5744
insn_stack [env -> cfg .cur_stack ++ ] = w ;
5713
5745
return 1 ;
5714
5746
} else if ((insn_state [w ] & 0xF0 ) == DISCOVERED ) {
5747
+ if (loop_ok && env -> allow_ptr_leaks )
5748
+ return 0 ;
5715
5749
verbose_linfo (env , t , "%d: " , t );
5716
5750
verbose_linfo (env , w , "%d: " , w );
5717
5751
verbose (env , "back-edge from insn %d to %d\n" , t , w );
@@ -5763,7 +5797,7 @@ static int check_cfg(struct bpf_verifier_env *env)
5763
5797
if (opcode == BPF_EXIT ) {
5764
5798
goto mark_explored ;
5765
5799
} else if (opcode == BPF_CALL ) {
5766
- ret = push_insn (t , t + 1 , FALLTHROUGH , env );
5800
+ ret = push_insn (t , t + 1 , FALLTHROUGH , env , false );
5767
5801
if (ret == 1 )
5768
5802
goto peek_stack ;
5769
5803
else if (ret < 0 )
@@ -5772,7 +5806,8 @@ static int check_cfg(struct bpf_verifier_env *env)
5772
5806
init_explored_state (env , t + 1 );
5773
5807
if (insns [t ].src_reg == BPF_PSEUDO_CALL ) {
5774
5808
init_explored_state (env , t );
5775
- ret = push_insn (t , t + insns [t ].imm + 1 , BRANCH , env );
5809
+ ret = push_insn (t , t + insns [t ].imm + 1 , BRANCH ,
5810
+ env , false);
5776
5811
if (ret == 1 )
5777
5812
goto peek_stack ;
5778
5813
else if (ret < 0 )
@@ -5785,7 +5820,7 @@ static int check_cfg(struct bpf_verifier_env *env)
5785
5820
}
5786
5821
/* unconditional jump with single edge */
5787
5822
ret = push_insn (t , t + insns [t ].off + 1 ,
5788
- FALLTHROUGH , env );
5823
+ FALLTHROUGH , env , true );
5789
5824
if (ret == 1 )
5790
5825
goto peek_stack ;
5791
5826
else if (ret < 0 )
@@ -5798,13 +5833,13 @@ static int check_cfg(struct bpf_verifier_env *env)
5798
5833
} else {
5799
5834
/* conditional jump with two edges */
5800
5835
init_explored_state (env , t );
5801
- ret = push_insn (t , t + 1 , FALLTHROUGH , env );
5836
+ ret = push_insn (t , t + 1 , FALLTHROUGH , env , true );
5802
5837
if (ret == 1 )
5803
5838
goto peek_stack ;
5804
5839
else if (ret < 0 )
5805
5840
goto err_free ;
5806
5841
5807
- ret = push_insn (t , t + insns [t ].off + 1 , BRANCH , env );
5842
+ ret = push_insn (t , t + insns [t ].off + 1 , BRANCH , env , true );
5808
5843
if (ret == 1 )
5809
5844
goto peek_stack ;
5810
5845
else if (ret < 0 )
@@ -5814,7 +5849,7 @@ static int check_cfg(struct bpf_verifier_env *env)
5814
5849
/* all other non-branch instructions with single
5815
5850
* fall-through edge
5816
5851
*/
5817
- ret = push_insn (t , t + 1 , FALLTHROUGH , env );
5852
+ ret = push_insn (t , t + 1 , FALLTHROUGH , env , false );
5818
5853
if (ret == 1 )
5819
5854
goto peek_stack ;
5820
5855
else if (ret < 0 )
@@ -6247,6 +6282,8 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
6247
6282
6248
6283
sl = * explored_state (env , insn );
6249
6284
while (sl ) {
6285
+ if (sl -> state .branches )
6286
+ goto next ;
6250
6287
if (sl -> state .insn_idx != insn ||
6251
6288
sl -> state .curframe != cur -> curframe )
6252
6289
goto next ;
@@ -6611,19 +6648,51 @@ static int propagate_liveness(struct bpf_verifier_env *env,
6611
6648
return 0 ;
6612
6649
}
6613
6650
6651
+ static bool states_maybe_looping (struct bpf_verifier_state * old ,
6652
+ struct bpf_verifier_state * cur )
6653
+ {
6654
+ struct bpf_func_state * fold , * fcur ;
6655
+ int i , fr = cur -> curframe ;
6656
+
6657
+ if (old -> curframe != fr )
6658
+ return false;
6659
+
6660
+ fold = old -> frame [fr ];
6661
+ fcur = cur -> frame [fr ];
6662
+ for (i = 0 ; i < MAX_BPF_REG ; i ++ )
6663
+ if (memcmp (& fold -> regs [i ], & fcur -> regs [i ],
6664
+ offsetof(struct bpf_reg_state , parent )))
6665
+ return false;
6666
+ return true;
6667
+ }
6668
+
6669
+
6614
6670
static int is_state_visited (struct bpf_verifier_env * env , int insn_idx )
6615
6671
{
6616
6672
struct bpf_verifier_state_list * new_sl ;
6617
6673
struct bpf_verifier_state_list * sl , * * pprev ;
6618
6674
struct bpf_verifier_state * cur = env -> cur_state , * new ;
6619
6675
int i , j , err , states_cnt = 0 ;
6676
+ bool add_new_state = false;
6620
6677
6621
6678
if (!env -> insn_aux_data [insn_idx ].prune_point )
6622
6679
/* this 'insn_idx' instruction wasn't marked, so we will not
6623
6680
* be doing state search here
6624
6681
*/
6625
6682
return 0 ;
6626
6683
6684
+ /* bpf progs typically have pruning point every 4 instructions
6685
+ * http://vger.kernel.org/bpfconf2019.html#session-1
6686
+ * Do not add new state for future pruning if the verifier hasn't seen
6687
+ * at least 2 jumps and at least 8 instructions.
6688
+ * This heuristics helps decrease 'total_states' and 'peak_states' metric.
6689
+ * In tests that amounts to up to 50% reduction into total verifier
6690
+ * memory consumption and 20% verifier time speedup.
6691
+ */
6692
+ if (env -> jmps_processed - env -> prev_jmps_processed >= 2 &&
6693
+ env -> insn_processed - env -> prev_insn_processed >= 8 )
6694
+ add_new_state = true;
6695
+
6627
6696
pprev = explored_state (env , insn_idx );
6628
6697
sl = * pprev ;
6629
6698
@@ -6633,6 +6702,30 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
6633
6702
states_cnt ++ ;
6634
6703
if (sl -> state .insn_idx != insn_idx )
6635
6704
goto next ;
6705
+ if (sl -> state .branches ) {
6706
+ if (states_maybe_looping (& sl -> state , cur ) &&
6707
+ states_equal (env , & sl -> state , cur )) {
6708
+ verbose_linfo (env , insn_idx , "; " );
6709
+ verbose (env , "infinite loop detected at insn %d\n" , insn_idx );
6710
+ return - EINVAL ;
6711
+ }
6712
+ /* if the verifier is processing a loop, avoid adding new state
6713
+ * too often, since different loop iterations have distinct
6714
+ * states and may not help future pruning.
6715
+ * This threshold shouldn't be too low to make sure that
6716
+ * a loop with large bound will be rejected quickly.
6717
+ * The most abusive loop will be:
6718
+ * r1 += 1
6719
+ * if r1 < 1000000 goto pc-2
6720
+ * 1M insn_procssed limit / 100 == 10k peak states.
6721
+ * This threshold shouldn't be too high either, since states
6722
+ * at the end of the loop are likely to be useful in pruning.
6723
+ */
6724
+ if (env -> jmps_processed - env -> prev_jmps_processed < 20 &&
6725
+ env -> insn_processed - env -> prev_insn_processed < 100 )
6726
+ add_new_state = false;
6727
+ goto miss ;
6728
+ }
6636
6729
if (states_equal (env , & sl -> state , cur )) {
6637
6730
sl -> hit_cnt ++ ;
6638
6731
/* reached equivalent register/stack state,
@@ -6650,7 +6743,15 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
6650
6743
return err ;
6651
6744
return 1 ;
6652
6745
}
6653
- sl -> miss_cnt ++ ;
6746
+ miss :
6747
+ /* when new state is not going to be added do not increase miss count.
6748
+ * Otherwise several loop iterations will remove the state
6749
+ * recorded earlier. The goal of these heuristics is to have
6750
+ * states from some iterations of the loop (some in the beginning
6751
+ * and some at the end) to help pruning.
6752
+ */
6753
+ if (add_new_state )
6754
+ sl -> miss_cnt ++ ;
6654
6755
/* heuristic to determine whether this state is beneficial
6655
6756
* to keep checking from state equivalence point of view.
6656
6757
* Higher numbers increase max_states_per_insn and verification time,
@@ -6662,6 +6763,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
6662
6763
*/
6663
6764
* pprev = sl -> next ;
6664
6765
if (sl -> state .frame [0 ]-> regs [0 ].live & REG_LIVE_DONE ) {
6766
+ u32 br = sl -> state .branches ;
6767
+
6768
+ WARN_ONCE (br ,
6769
+ "BUG live_done but branches_to_explore %d\n" ,
6770
+ br );
6665
6771
free_verifier_state (& sl -> state , false);
6666
6772
kfree (sl );
6667
6773
env -> peak_states -- ;
@@ -6687,18 +6793,25 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
6687
6793
if (!env -> allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES )
6688
6794
return 0 ;
6689
6795
6690
- /* there were no equivalent states, remember current one.
6691
- * technically the current state is not proven to be safe yet,
6796
+ if (!add_new_state )
6797
+ return 0 ;
6798
+
6799
+ /* There were no equivalent states, remember the current one.
6800
+ * Technically the current state is not proven to be safe yet,
6692
6801
* but it will either reach outer most bpf_exit (which means it's safe)
6693
- * or it will be rejected. Since there are no loops, we won't be
6802
+ * or it will be rejected. When there are no loops the verifier won't be
6694
6803
* seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
6695
- * again on the way to bpf_exit
6804
+ * again on the way to bpf_exit.
6805
+ * When looping the sl->state.branches will be > 0 and this state
6806
+ * will not be considered for equivalence until branches == 0.
6696
6807
*/
6697
6808
new_sl = kzalloc (sizeof (struct bpf_verifier_state_list ), GFP_KERNEL );
6698
6809
if (!new_sl )
6699
6810
return - ENOMEM ;
6700
6811
env -> total_states ++ ;
6701
6812
env -> peak_states ++ ;
6813
+ env -> prev_jmps_processed = env -> jmps_processed ;
6814
+ env -> prev_insn_processed = env -> insn_processed ;
6702
6815
6703
6816
/* add new state to the head of linked list */
6704
6817
new = & new_sl -> state ;
@@ -6709,6 +6822,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
6709
6822
return err ;
6710
6823
}
6711
6824
new -> insn_idx = insn_idx ;
6825
+ WARN_ONCE (new -> branches != 1 ,
6826
+ "BUG is_state_visited:branches_to_explore=%d insn %d\n" , new -> branches , insn_idx );
6827
+ cur -> parent = new ;
6712
6828
new_sl -> next = * explored_state (env , insn_idx );
6713
6829
* explored_state (env , insn_idx ) = new_sl ;
6714
6830
/* connect new state to parentage chain. Current frame needs all
@@ -6795,6 +6911,7 @@ static int do_check(struct bpf_verifier_env *env)
6795
6911
return - ENOMEM ;
6796
6912
state -> curframe = 0 ;
6797
6913
state -> speculative = false;
6914
+ state -> branches = 1 ;
6798
6915
state -> frame [0 ] = kzalloc (sizeof (struct bpf_func_state ), GFP_KERNEL );
6799
6916
if (!state -> frame [0 ]) {
6800
6917
kfree (state );
@@ -7001,6 +7118,7 @@ static int do_check(struct bpf_verifier_env *env)
7001
7118
} else if (class == BPF_JMP || class == BPF_JMP32 ) {
7002
7119
u8 opcode = BPF_OP (insn -> code );
7003
7120
7121
+ env -> jmps_processed ++ ;
7004
7122
if (opcode == BPF_CALL ) {
7005
7123
if (BPF_SRC (insn -> code ) != BPF_K ||
7006
7124
insn -> off != 0 ||
@@ -7086,6 +7204,7 @@ static int do_check(struct bpf_verifier_env *env)
7086
7204
if (err )
7087
7205
return err ;
7088
7206
process_bpf_exit :
7207
+ update_branch_counts (env , env -> cur_state );
7089
7208
err = pop_stack (env , & env -> prev_insn_idx ,
7090
7209
& env -> insn_idx );
7091
7210
if (err < 0 ) {
0 commit comments