|
32 | 32 | * |
33 | 33 | * [ prev sp ] <------------- |
34 | 34 | * [ tail_call_info ] 8 | |
35 | | - * [ nv gpr save area ] 6*8 + (12*8) | |
| 35 | + * [ nv gpr save area ] (6 * 8) | |
| 36 | + * [ addl. nv gpr save area] (12 * 8) | <--- exception boundary/callback program |
36 | 37 | * [ local_tmp_var ] 24 | |
37 | 38 | * fp (r31) --> [ ebpf stack space ] upto 512 | |
38 | 39 | * [ frame header ] 32/112 | |
39 | 40 | * sp (r1) ---> [ stack pointer ] -------------- |
40 | 41 | * |
41 | | - * Additional (12*8) in 'nv gpr save area' only in case of |
42 | | - * exception boundary. |
| 42 | + * Additional (12 * 8) in 'nv gpr save area' only in case of |
| 43 | + * exception boundary/callback. |
43 | 44 | */ |
44 | 45 |
|
45 | 46 | /* BPF non-volatile registers save area size */ |
|
51 | 52 | * for additional non volatile registers(r14-r25) to be saved |
52 | 53 | * at exception boundary |
53 | 54 | */ |
54 | | -#define BPF_PPC_EXC_STACK_SAVE (12*8) |
| 55 | +#define BPF_PPC_EXC_STACK_SAVE (12 * 8) |
55 | 56 |
|
56 | 57 | /* stack frame excluding BPF stack, ensure this is quadword aligned */ |
57 | 58 | #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ |
@@ -128,12 +129,13 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) |
128 | 129 | * [ ... ] | |
129 | 130 | * sp (r1) ---> [ stack pointer ] -------------- |
130 | 131 | * [ tail_call_info ] 8 |
131 | | - * [ nv gpr save area ] 6*8 + (12*8) |
| 132 | + * [ nv gpr save area ] (6 * 8) |
| 133 | + * [ addl. nv gpr save area] (12 * 8) <--- exception boundary/callback program |
132 | 134 | * [ local_tmp_var ] 24 |
133 | 135 | * [ unused red zone ] 224 |
134 | 136 | * |
135 | | - * Additional (12*8) in 'nv gpr save area' only in case of |
136 | | - * exception boundary. |
| 137 | + * Additional (12 * 8) in 'nv gpr save area' only in case of |
| 138 | + * exception boundary/callback. |
137 | 139 | */ |
138 | 140 | static int bpf_jit_stack_local(struct codegen_context *ctx) |
139 | 141 | { |
@@ -240,10 +242,6 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) |
240 | 242 |
|
241 | 243 | if (bpf_has_stack_frame(ctx) && !ctx->exception_cb) { |
242 | 244 | /* |
243 | | - * exception_cb uses boundary frame after stack walk. |
244 | | - * It can simply use redzone, this optimization reduces |
245 | | - * stack walk loop by one level. |
246 | | - * |
247 | 245 | * We need a stack frame, but we don't necessarily need to |
248 | 246 | * save/restore LR unless we call other functions |
249 | 247 | */ |
@@ -287,6 +285,22 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) |
287 | 285 | * program(main prog) as third arg |
288 | 286 | */ |
289 | 287 | EMIT(PPC_RAW_MR(_R1, _R5)); |
| 288 | + /* |
| 289 | + * Exception callback reuses the stack frame of exception boundary. |
| 290 | + * But BPF stack depth of exception callback and exception boundary |
| 291 | + * don't have to be same. If BPF stack depth is different, adjust the |
| 292 | + * stack frame size considering BPF stack depth of exception callback. |
| 293 | + * The non-volatile register save area remains unchanged. These non- |
| 294 | + * volatile registers are restored in exception callback's epilogue. |
| 295 | + */ |
| 296 | + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R5, 0)); |
| 297 | + EMIT(PPC_RAW_SUB(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_1), _R1)); |
| 298 | + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), |
| 299 | + -BPF_PPC_EXC_STACKFRAME)); |
| 300 | + EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), ctx->stack_size)); |
| 301 | + PPC_BCC_CONST_SHORT(COND_EQ, 12); |
| 302 | + EMIT(PPC_RAW_MR(_R1, bpf_to_ppc(TMP_REG_1))); |
| 303 | + EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_EXC_STACKFRAME + ctx->stack_size))); |
290 | 304 | } |
291 | 305 |
|
292 | 306 | /* |
|
0 commit comments