Skip to content

Commit e5bdd6a

Browse files
eddyz87Alexei Starovoitov
authored andcommitted
selftests/bpf: validate jit behaviour for tail calls
A program calling sub-program which does a tail call. The idea is to verify instructions generated by jit for tail calls: - in program and sub-program prologues; - for subprogram call instruction; - for tail call itself. Signed-off-by: Eduard Zingerman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 7d743e4 commit e5bdd6a

File tree

2 files changed

+107
-0
lines changed

2 files changed

+107
-0
lines changed

tools/testing/selftests/bpf/prog_tests/verifier.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@
7575
#include "verifier_stack_ptr.skel.h"
7676
#include "verifier_subprog_precision.skel.h"
7777
#include "verifier_subreg.skel.h"
78+
#include "verifier_tailcall_jit.skel.h"
7879
#include "verifier_typedef.skel.h"
7980
#include "verifier_uninit.skel.h"
8081
#include "verifier_unpriv.skel.h"
@@ -198,6 +199,7 @@ void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
198199
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
199200
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
200201
void test_verifier_subreg(void) { RUN(verifier_subreg); }
202+
void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); }
201203
void test_verifier_typedef(void) { RUN(verifier_typedef); }
202204
void test_verifier_uninit(void) { RUN(verifier_uninit); }
203205
void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
#include <linux/bpf.h>
3+
#include <bpf/bpf_helpers.h>
4+
#include "bpf_misc.h"
5+
6+
int main(void);
7+
8+
struct {
9+
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
10+
__uint(max_entries, 1);
11+
__uint(key_size, sizeof(__u32));
12+
__array(values, void (void));
13+
} jmp_table SEC(".maps") = {
14+
.values = {
15+
[0] = (void *) &main,
16+
},
17+
};
18+
19+
__noinline __auxiliary
20+
static __naked int sub(void)
21+
{
22+
asm volatile (
23+
"r2 = %[jmp_table] ll;"
24+
"r3 = 0;"
25+
"call 12;"
26+
"exit;"
27+
:
28+
: __imm_addr(jmp_table)
29+
: __clobber_all);
30+
}
31+
32+
__success
33+
__arch_x86_64
34+
/* program entry for main(), regular function prologue */
35+
__jited(" endbr64")
36+
__jited(" nopl (%rax,%rax)")
37+
__jited(" xorq %rax, %rax")
38+
__jited(" pushq %rbp")
39+
__jited(" movq %rsp, %rbp")
40+
/* tail call prologue for program:
41+
* - establish memory location for tail call counter at &rbp[-8];
42+
* - spill tail_call_cnt_ptr at &rbp[-16];
43+
* - expect tail call counter to be passed in rax;
44+
* - for entry program rax is a raw counter, value < 33;
45+
* - for tail called program rax is tail_call_cnt_ptr (value > 33).
46+
*/
47+
__jited(" endbr64")
48+
__jited(" cmpq $0x21, %rax")
49+
__jited(" ja L0")
50+
__jited(" pushq %rax")
51+
__jited(" movq %rsp, %rax")
52+
__jited(" jmp L1")
53+
__jited("L0: pushq %rax") /* rbp[-8] = rax */
54+
__jited("L1: pushq %rax") /* rbp[-16] = rax */
55+
/* on subprogram call restore rax to be tail_call_cnt_ptr from rbp[-16]
56+
* (cause original rax might be clobbered by this point)
57+
*/
58+
__jited(" movq -0x10(%rbp), %rax")
59+
__jited(" callq 0x{{.*}}") /* call to sub() */
60+
__jited(" xorl %eax, %eax")
61+
__jited(" leave")
62+
__jited(" retq")
63+
__jited("...")
64+
/* subprogram entry for sub(), regular function prologue */
65+
__jited(" endbr64")
66+
__jited(" nopl (%rax,%rax)")
67+
__jited(" nopl (%rax)")
68+
__jited(" pushq %rbp")
69+
__jited(" movq %rsp, %rbp")
70+
/* tail call prologue for subprogram address of tail call counter
71+
* stored at rbp[-16].
72+
*/
73+
__jited(" endbr64")
74+
__jited(" pushq %rax") /* rbp[-8] = rax */
75+
__jited(" pushq %rax") /* rbp[-16] = rax */
76+
__jited(" movabsq ${{.*}}, %rsi") /* r2 = &jmp_table */
77+
__jited(" xorl %edx, %edx") /* r3 = 0 */
78+
/* bpf_tail_call implementation:
79+
* - load tail_call_cnt_ptr from rbp[-16];
80+
* - if *tail_call_cnt_ptr < 33, increment it and jump to target;
81+
* - otherwise do nothing.
82+
*/
83+
__jited(" movq -0x10(%rbp), %rax")
84+
__jited(" cmpq $0x21, (%rax)")
85+
__jited(" jae L0")
86+
__jited(" nopl (%rax,%rax)")
87+
__jited(" addq $0x1, (%rax)") /* *tail_call_cnt_ptr += 1 */
88+
__jited(" popq %rax")
89+
__jited(" popq %rax")
90+
__jited(" jmp {{.*}}") /* jump to tail call tgt */
91+
__jited("L0: leave")
92+
__jited(" retq")
93+
SEC("tc")
94+
__naked int main(void)
95+
{
96+
asm volatile (
97+
"call %[sub];"
98+
"r0 = 0;"
99+
"exit;"
100+
:
101+
: __imm(sub)
102+
: __clobber_all);
103+
}
104+
105+
char __license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)