Skip to content

Commit 54ac9ff

Browse files
ardbiesheuvelctmarinas
authored andcommitted
arm64: Use static call trampolines when kCFI is enabled
Implement arm64 support for the 'unoptimized' static call variety, which routes all calls through a trampoline that performs a tail call to the chosen function, and wire it up for use when kCFI is enabled. This works around an issue with kCFI and generic static calls, where the prototypes of default handlers such as __static_call_nop() and __static_call_ret0() don't match the expected prototype of the call site, resulting in kCFI false positives [0]. Since static call targets may be located in modules loaded out of direct branching range, this needs an ADRP/LDR pair to load the branch target into R16 and a branch-to-register (BR) instruction to perform an indirect call. Unlike on x86, there is no pressing need on arm64 to avoid indirect calls at all cost, but hiding it from the compiler as is done here does have some benefits: - the literal is located in .rodata, which gives us the same robustness advantage that code patching does; - no D-cache pollution from fetching hash values from .text sections. From an execution speed PoV, this is unlikely to make any difference at all. Cc: Sami Tolvanen <[email protected]> Cc: Sean Christopherson <[email protected]> Cc: Kees Cook <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Will McVicker <[email protected]> Reported-by: Carlos Llamas <[email protected]> Closes: https://lore.kernel.org/all/[email protected]/ [0] Signed-off-by: Ard Biesheuvel <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>
1 parent 8c6e9b6 commit 54ac9ff

5 files changed

Lines changed: 57 additions & 0 deletions

File tree

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,7 @@ config ARM64
252252
select HAVE_RSEQ
253253
select HAVE_RUST if RUSTC_SUPPORTS_ARM64
254254
select HAVE_STACKPROTECTOR
255+
select HAVE_STATIC_CALL if CFI
255256
select HAVE_SYSCALL_TRACEPOINTS
256257
select HAVE_KPROBES
257258
select HAVE_KRETPROBES
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef _ASM_STATIC_CALL_H
3+
#define _ASM_STATIC_CALL_H
4+
5+
#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, target) \
6+
asm(" .pushsection .static_call.text, \"ax\" \n" \
7+
" .align 4 \n" \
8+
" .globl " name " \n" \
9+
name ": \n" \
10+
" hint 34 /* BTI C */ \n" \
11+
" adrp x16, 1f \n" \
12+
" ldr x16, [x16, :lo12:1f] \n" \
13+
" br x16 \n" \
14+
" .type " name ", %function \n" \
15+
" .size " name ", . - " name " \n" \
16+
" .popsection \n" \
17+
" .pushsection .rodata, \"a\" \n" \
18+
" .align 3 \n" \
19+
"1: .quad " target " \n" \
20+
" .popsection \n")
21+
22+
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
23+
__ARCH_DEFINE_STATIC_CALL_TRAMP(STATIC_CALL_TRAMP_STR(name), #func)
24+
25+
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
26+
ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
27+
28+
#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) \
29+
ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
30+
31+
#endif /* _ASM_STATIC_CALL_H */

arch/arm64/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ obj-$(CONFIG_MODULES) += module.o module-plts.o
4646
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
4747
obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
4848
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
49+
obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
4950
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
5051
obj-$(CONFIG_KGDB) += kgdb.o
5152
obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o

arch/arm64/kernel/static_call.c

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
#include <linux/static_call.h>
3+
#include <linux/memory.h>
4+
#include <asm/text-patching.h>
5+
6+
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
7+
{
8+
u64 literal;
9+
int ret;
10+
11+
if (!func)
12+
func = __static_call_return0;
13+
14+
/* decode the instructions to discover the literal address */
15+
literal = ALIGN_DOWN((u64)tramp + 4, SZ_4K) +
16+
aarch64_insn_adrp_get_offset(le32_to_cpup(tramp + 4)) +
17+
8 * aarch64_insn_decode_immediate(AARCH64_INSN_IMM_12,
18+
le32_to_cpup(tramp + 8));
19+
20+
ret = aarch64_insn_write_literal_u64((void *)literal, (u64)func);
21+
WARN_ON_ONCE(ret);
22+
}
23+
EXPORT_SYMBOL_GPL(arch_static_call_transform);

arch/arm64/kernel/vmlinux.lds.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,7 @@ SECTIONS
191191
LOCK_TEXT
192192
KPROBES_TEXT
193193
HYPERVISOR_TEXT
194+
STATIC_CALL_TEXT
194195
*(.gnu.warning)
195196
}
196197

0 commit comments

Comments
 (0)