@@ -217,30 +217,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
217217 return 0 ;
218218}
219219
220- static bool bpf_is_jmp_and_has_target (const struct bpf_insn * insn )
221- {
222- return BPF_CLASS (insn -> code ) == BPF_JMP &&
223- /* Call and Exit are both special jumps with no
224- * target inside the BPF instruction image.
225- */
226- BPF_OP (insn -> code ) != BPF_CALL &&
227- BPF_OP (insn -> code ) != BPF_EXIT ;
228- }
229-
230220static void bpf_adj_branches (struct bpf_prog * prog , u32 pos , u32 delta )
231221{
232222 struct bpf_insn * insn = prog -> insnsi ;
233223 u32 i , insn_cnt = prog -> len ;
224+ bool pseudo_call ;
225+ u8 code ;
226+ int off ;
234227
235228 for (i = 0 ; i < insn_cnt ; i ++ , insn ++ ) {
236- if (!bpf_is_jmp_and_has_target (insn ))
229+ code = insn -> code ;
230+ if (BPF_CLASS (code ) != BPF_JMP )
237231 continue ;
232+ if (BPF_OP (code ) == BPF_EXIT )
233+ continue ;
234+ if (BPF_OP (code ) == BPF_CALL ) {
235+ if (insn -> src_reg == BPF_PSEUDO_CALL )
236+ pseudo_call = true;
237+ else
238+ continue ;
239+ } else {
240+ pseudo_call = false;
241+ }
242+ off = pseudo_call ? insn -> imm : insn -> off ;
238243
239244 /* Adjust offset of jmps if we cross boundaries. */
240- if (i < pos && i + insn -> off + 1 > pos )
241- insn -> off += delta ;
242- else if (i > pos + delta && i + insn -> off + 1 <= pos + delta )
243- insn -> off -= delta ;
245+ if (i < pos && i + off + 1 > pos )
246+ off += delta ;
247+ else if (i > pos + delta && i + off + 1 <= pos + delta )
248+ off -= delta ;
249+
250+ if (pseudo_call )
251+ insn -> imm = off ;
252+ else
253+ insn -> off = off ;
244254 }
245255}
246256
@@ -774,8 +784,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
774784 *
775785 * Decode and execute eBPF instructions.
776786 */
777- static unsigned int ___bpf_prog_run (u64 * regs , const struct bpf_insn * insn ,
778- u64 * stack )
787+ static u64 ___bpf_prog_run (u64 * regs , const struct bpf_insn * insn , u64 * stack )
779788{
780789 u64 tmp ;
781790 static const void * jumptable [256 ] = {
@@ -835,6 +844,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
835844 [BPF_ALU64 | BPF_NEG ] = & & ALU64_NEG ,
836845 /* Call instruction */
837846 [BPF_JMP | BPF_CALL ] = & & JMP_CALL ,
847+ [BPF_JMP | BPF_CALL_ARGS ] = & & JMP_CALL_ARGS ,
838848 [BPF_JMP | BPF_TAIL_CALL ] = & & JMP_TAIL_CALL ,
839849 /* Jumps */
840850 [BPF_JMP | BPF_JA ] = & & JMP_JA ,
@@ -1025,6 +1035,13 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
10251035 BPF_R4 , BPF_R5 );
10261036 CONT ;
10271037
1038+ JMP_CALL_ARGS :
1039+ BPF_R0 = (__bpf_call_base_args + insn -> imm )(BPF_R1 , BPF_R2 ,
1040+ BPF_R3 , BPF_R4 ,
1041+ BPF_R5 ,
1042+ insn + insn -> off + 1 );
1043+ CONT ;
1044+
10281045 JMP_TAIL_CALL : {
10291046 struct bpf_map * map = (struct bpf_map * ) (unsigned long ) BPF_R2 ;
10301047 struct bpf_array * array = container_of (map , struct bpf_array , map );
@@ -1297,6 +1314,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
12971314 return ___bpf_prog_run(regs, insn, stack); \
12981315}
12991316
1317+ #define PROG_NAME_ARGS (stack_size ) __bpf_prog_run_args##stack_size
1318+ #define DEFINE_BPF_PROG_RUN_ARGS (stack_size ) \
1319+ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1320+ const struct bpf_insn *insn) \
1321+ { \
1322+ u64 stack[stack_size / sizeof(u64)]; \
1323+ u64 regs[MAX_BPF_REG]; \
1324+ \
1325+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1326+ BPF_R1 = r1; \
1327+ BPF_R2 = r2; \
1328+ BPF_R3 = r3; \
1329+ BPF_R4 = r4; \
1330+ BPF_R5 = r5; \
1331+ return ___bpf_prog_run(regs, insn, stack); \
1332+ }
1333+
13001334#define EVAL1 (FN , X ) FN(X)
13011335#define EVAL2 (FN , X , Y ...) FN(X) EVAL1(FN, Y)
13021336#define EVAL3 (FN , X , Y ...) FN(X) EVAL2(FN, Y)
@@ -1308,6 +1342,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
13081342EVAL6 (DEFINE_BPF_PROG_RUN , 224 , 256 , 288 , 320 , 352 , 384 );
13091343EVAL4 (DEFINE_BPF_PROG_RUN , 416 , 448 , 480 , 512 );
13101344
1345+ EVAL6 (DEFINE_BPF_PROG_RUN_ARGS , 32 , 64 , 96 , 128 , 160 , 192 );
1346+ EVAL6 (DEFINE_BPF_PROG_RUN_ARGS , 224 , 256 , 288 , 320 , 352 , 384 );
1347+ EVAL4 (DEFINE_BPF_PROG_RUN_ARGS , 416 , 448 , 480 , 512 );
1348+
13111349#define PROG_NAME_LIST (stack_size ) PROG_NAME(stack_size),
13121350
13131351static unsigned int (* interpreters [])(const void * ctx ,
@@ -1316,6 +1354,24 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
13161354EVAL6 (PROG_NAME_LIST , 224 , 256 , 288 , 320 , 352 , 384 )
13171355EVAL4 (PROG_NAME_LIST , 416 , 448 , 480 , 512 )
13181356};
1357+ #undef PROG_NAME_LIST
1358+ #define PROG_NAME_LIST (stack_size ) PROG_NAME_ARGS(stack_size),
1359+ static u64 (* interpreters_args [])(u64 r1 , u64 r2 , u64 r3 , u64 r4 , u64 r5 ,
1360+ const struct bpf_insn * insn ) = {
1361+ EVAL6 (PROG_NAME_LIST , 32 , 64 , 96 , 128 , 160 , 192 )
1362+ EVAL6 (PROG_NAME_LIST , 224 , 256 , 288 , 320 , 352 , 384 )
1363+ EVAL4 (PROG_NAME_LIST , 416 , 448 , 480 , 512 )
1364+ };
1365+ #undef PROG_NAME_LIST
1366+
1367+ void bpf_patch_call_args (struct bpf_insn * insn , u32 stack_depth )
1368+ {
1369+ stack_depth = max_t (u32 , stack_depth , 1 );
1370+ insn -> off = (s16 ) insn -> imm ;
1371+ insn -> imm = interpreters_args [(round_up (stack_depth , 32 ) / 32 ) - 1 ] -
1372+ __bpf_call_base_args ;
1373+ insn -> code = BPF_JMP | BPF_CALL_ARGS ;
1374+ }
13191375
13201376bool bpf_prog_array_compatible (struct bpf_array * array ,
13211377 const struct bpf_prog * fp )
0 commit comments