Skip to content

Commit 858fbd7

Browse files
committed
Merge branch 'for-next/c1-pro-erratum-4193714' into for-next/core
* for-next/c1-pro-erratum-4193714: : Work around C1-Pro erratum 4193714 (CVE-2026-0995) arm64: errata: Work around early CME DVMSync acknowledgement arm64: cputype: Add C1-Pro definitions arm64: tlb: Pass the corresponding mm to __tlbi_sync_s1ish() arm64: tlb: Introduce __tlbi_sync_s1ish_{kernel,batch}() for TLB maintenance
2 parents 818f644 + 0baba94 commit 858fbd7

13 files changed

Lines changed: 284 additions & 10 deletions

File tree

Documentation/arch/arm64/silicon-errata.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,8 @@ stable kernels.
202202
+----------------+-----------------+-----------------+-----------------------------+
203203
| ARM | Neoverse-V3AE | #3312417 | ARM64_ERRATUM_3194386 |
204204
+----------------+-----------------+-----------------+-----------------------------+
205+
| ARM | C1-Pro | #4193714 | ARM64_ERRATUM_4193714 |
206+
+----------------+-----------------+-----------------+-----------------------------+
205207
| ARM | MMU-500 | #841119,826419 | ARM_SMMU_MMU_500_CPRE_ERRATA|
206208
| | | #562869,1047329 | |
207209
+----------------+-----------------+-----------------+-----------------------------+

arch/arm64/Kconfig

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1149,6 +1149,18 @@ config ARM64_ERRATUM_4311569
11491149

11501150
If unsure, say Y.
11511151

1152+
config ARM64_ERRATUM_4193714
1153+
bool "C1-Pro: 4193714: SME DVMSync early acknowledgement"
1154+
depends on ARM64_SME
1155+
default y
1156+
help
1157+
Enable workaround for C1-Pro acknowledging the DVMSync before
1158+
the SME memory accesses are complete. This will cause TLB
1159+
maintenance for processes using SME to also issue an IPI to
1160+
the affected CPUs.
1161+
1162+
If unsure, say Y.
1163+
11521164
config CAVIUM_ERRATUM_22375
11531165
bool "Cavium erratum 22375, 24313"
11541166
default y

arch/arm64/include/asm/cpucaps.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,8 @@ cpucap_is_possible(const unsigned int cap)
6464
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
6565
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
6666
return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
67+
case ARM64_WORKAROUND_4193714:
68+
return IS_ENABLED(CONFIG_ARM64_ERRATUM_4193714);
6769
case ARM64_MPAM:
6870
/*
6971
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.

arch/arm64/include/asm/cputype.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@
9898
#define ARM_CPU_PART_CORTEX_A725 0xD87
9999
#define ARM_CPU_PART_CORTEX_A720AE 0xD89
100100
#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
101+
#define ARM_CPU_PART_C1_PRO 0xD8B
101102

102103
#define APM_CPU_PART_XGENE 0x000
103104
#define APM_CPU_VAR_POTENZA 0x00
@@ -189,6 +190,7 @@
189190
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
190191
#define MIDR_CORTEX_A720AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720AE)
191192
#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
193+
#define MIDR_C1_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_C1_PRO)
192194
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
193195
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
194196
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)

arch/arm64/include/asm/fpsimd.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -428,6 +428,24 @@ static inline size_t sme_state_size(struct task_struct const *task)
428428
return __sme_state_size(task_get_sme_vl(task));
429429
}
430430

431+
void sme_enable_dvmsync(void);
432+
void sme_set_active(void);
433+
void sme_clear_active(void);
434+
435+
static inline void sme_enter_from_user_mode(void)
436+
{
437+
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714) &&
438+
test_thread_flag(TIF_SME))
439+
sme_clear_active();
440+
}
441+
442+
static inline void sme_exit_to_user_mode(void)
443+
{
444+
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714) &&
445+
test_thread_flag(TIF_SME))
446+
sme_set_active();
447+
}
448+
431449
#else
432450

433451
static inline void sme_user_disable(void) { BUILD_BUG(); }
@@ -456,6 +474,9 @@ static inline size_t sme_state_size(struct task_struct const *task)
456474
return 0;
457475
}
458476

477+
static inline void sme_enter_from_user_mode(void) { }
478+
static inline void sme_exit_to_user_mode(void) { }
479+
459480
#endif /* ! CONFIG_ARM64_SME */
460481

461482
/* For use by EFI runtime services calls only */

arch/arm64/include/asm/tlbbatch.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,17 @@
22
#ifndef _ARCH_ARM64_TLBBATCH_H
33
#define _ARCH_ARM64_TLBBATCH_H
44

5+
#include <linux/cpumask.h>
6+
57
struct arch_tlbflush_unmap_batch {
8+
#ifdef CONFIG_ARM64_ERRATUM_4193714
69
/*
7-
* For arm64, HW can do tlb shootdown, so we don't
8-
* need to record cpumask for sending IPI
10+
* Track CPUs that need SME DVMSync on completion of this batch.
11+
* Otherwise, the arm64 HW can do tlb shootdown, so we don't need to
12+
* record cpumask for sending IPI
913
*/
14+
cpumask_var_t cpumask;
15+
#endif
1016
};
1117

1218
#endif /* _ARCH_ARM64_TLBBATCH_H */

arch/arm64/include/asm/tlbflush.h

Lines changed: 87 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,71 @@ static inline unsigned long get_trans_granule(void)
8080
}
8181
}
8282

83+
#ifdef CONFIG_ARM64_ERRATUM_4193714
84+
85+
void sme_do_dvmsync(const struct cpumask *mask);
86+
87+
static inline void sme_dvmsync(struct mm_struct *mm)
88+
{
89+
if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714))
90+
return;
91+
92+
sme_do_dvmsync(mm_cpumask(mm));
93+
}
94+
95+
static inline void sme_dvmsync_add_pending(struct arch_tlbflush_unmap_batch *batch,
96+
struct mm_struct *mm)
97+
{
98+
if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714))
99+
return;
100+
101+
/*
102+
* Order the mm_cpumask() read after the hardware DVMSync.
103+
*/
104+
dsb(ish);
105+
if (cpumask_empty(mm_cpumask(mm)))
106+
return;
107+
108+
/*
109+
* Allocate the batch cpumask on first use. Fall back to an immediate
110+
* IPI for this mm in case of failure.
111+
*/
112+
if (!cpumask_available(batch->cpumask) &&
113+
!zalloc_cpumask_var(&batch->cpumask, GFP_ATOMIC)) {
114+
sme_do_dvmsync(mm_cpumask(mm));
115+
return;
116+
}
117+
118+
cpumask_or(batch->cpumask, batch->cpumask, mm_cpumask(mm));
119+
}
120+
121+
static inline void sme_dvmsync_batch(struct arch_tlbflush_unmap_batch *batch)
122+
{
123+
if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714))
124+
return;
125+
126+
if (!cpumask_available(batch->cpumask))
127+
return;
128+
129+
sme_do_dvmsync(batch->cpumask);
130+
cpumask_clear(batch->cpumask);
131+
}
132+
133+
#else
134+
135+
static inline void sme_dvmsync(struct mm_struct *mm)
136+
{
137+
}
138+
static inline void sme_dvmsync_add_pending(struct arch_tlbflush_unmap_batch *batch,
139+
struct mm_struct *mm)
140+
{
141+
}
142+
static inline void sme_dvmsync_batch(struct arch_tlbflush_unmap_batch *batch)
143+
{
144+
}
145+
146+
#endif /* CONFIG_ARM64_ERRATUM_4193714 */
147+
83148
/*
84149
* Level-based TLBI operations.
85150
*
@@ -213,7 +278,21 @@ do { \
213278
* Complete broadcast TLB maintenance issued by the host which invalidates
214279
* stage 1 information in the host's own translation regime.
215280
*/
216-
static inline void __tlbi_sync_s1ish(void)
281+
static inline void __tlbi_sync_s1ish(struct mm_struct *mm)
282+
{
283+
dsb(ish);
284+
__repeat_tlbi_sync(vale1is, 0);
285+
sme_dvmsync(mm);
286+
}
287+
288+
static inline void __tlbi_sync_s1ish_batch(struct arch_tlbflush_unmap_batch *batch)
289+
{
290+
dsb(ish);
291+
__repeat_tlbi_sync(vale1is, 0);
292+
sme_dvmsync_batch(batch);
293+
}
294+
295+
static inline void __tlbi_sync_s1ish_kernel(void)
217296
{
218297
dsb(ish);
219298
__repeat_tlbi_sync(vale1is, 0);
@@ -322,7 +401,7 @@ static inline void flush_tlb_all(void)
322401
{
323402
dsb(ishst);
324403
__tlbi(vmalle1is);
325-
__tlbi_sync_s1ish();
404+
__tlbi_sync_s1ish_kernel();
326405
isb();
327406
}
328407

@@ -334,7 +413,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
334413
asid = __TLBI_VADDR(0, ASID(mm));
335414
__tlbi(aside1is, asid);
336415
__tlbi_user(aside1is, asid);
337-
__tlbi_sync_s1ish();
416+
__tlbi_sync_s1ish(mm);
338417
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
339418
}
340419

@@ -355,7 +434,7 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
355434
*/
356435
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
357436
{
358-
__tlbi_sync_s1ish();
437+
__tlbi_sync_s1ish_batch(batch);
359438
}
360439

361440
/*
@@ -557,7 +636,7 @@ static __always_inline void __do_flush_tlb_range(struct vm_area_struct *vma,
557636

558637
if (!(flags & TLBF_NOSYNC)) {
559638
if (!(flags & TLBF_NOBROADCAST))
560-
__tlbi_sync_s1ish();
639+
__tlbi_sync_s1ish(mm);
561640
else
562641
dsb(nsh);
563642
}
@@ -618,7 +697,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
618697
dsb(ishst);
619698
__flush_s1_tlb_range_op(vaale1is, start, pages, stride, 0,
620699
TLBI_TTL_UNKNOWN);
621-
__tlbi_sync_s1ish();
700+
__tlbi_sync_s1ish_kernel();
622701
isb();
623702
}
624703

@@ -632,7 +711,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
632711

633712
dsb(ishst);
634713
__tlbi(vaae1is, addr);
635-
__tlbi_sync_s1ish();
714+
__tlbi_sync_s1ish_kernel();
636715
isb();
637716
}
638717

@@ -643,6 +722,7 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
643722

644723
__flush_tlb_range(&vma, start, end, PAGE_SIZE, 3,
645724
TLBF_NOWALKCACHE | TLBF_NOSYNC);
725+
sme_dvmsync_add_pending(batch, mm);
646726
}
647727

648728
static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval)

arch/arm64/kernel/cpu_errata.c

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <asm/cpu.h>
1212
#include <asm/cputype.h>
1313
#include <asm/cpufeature.h>
14+
#include <asm/fpsimd.h>
1415
#include <asm/kvm_asm.h>
1516
#include <asm/smp_plat.h>
1617

@@ -575,6 +576,23 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
575576
};
576577
#endif
577578

579+
#ifdef CONFIG_ARM64_ERRATUM_4193714
580+
static bool has_sme_dvmsync_erratum(const struct arm64_cpu_capabilities *entry,
581+
int scope)
582+
{
583+
if (!id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1)))
584+
return false;
585+
586+
return is_affected_midr_range(entry, scope);
587+
}
588+
589+
static void cpu_enable_sme_dvmsync(const struct arm64_cpu_capabilities *__unused)
590+
{
591+
if (this_cpu_has_cap(ARM64_WORKAROUND_4193714))
592+
sme_enable_dvmsync();
593+
}
594+
#endif
595+
578596
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
579597
static const struct midr_range erratum_ac03_cpu_38_list[] = {
580598
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
@@ -901,6 +919,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
901919
.matches = need_arm_si_l1_workaround_4311569,
902920
},
903921
#endif
922+
#ifdef CONFIG_ARM64_ERRATUM_4193714
923+
{
924+
.desc = "C1-Pro SME DVMSync early acknowledgement",
925+
.capability = ARM64_WORKAROUND_4193714,
926+
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
927+
.matches = has_sme_dvmsync_erratum,
928+
.cpu_enable = cpu_enable_sme_dvmsync,
929+
/* C1-Pro r0p0 - r1p2 (the latter only when REVIDR_EL1[0]==0) */
930+
.midr_range = MIDR_RANGE(MIDR_C1_PRO, 0, 0, 1, 2),
931+
MIDR_FIXED(MIDR_CPU_VAR_REV(1, 2), BIT(0)),
932+
},
933+
#endif
904934
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
905935
{
906936
.desc = "ARM errata 2966298, 3117295",

arch/arm64/kernel/entry-common.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <asm/daifflags.h>
2222
#include <asm/esr.h>
2323
#include <asm/exception.h>
24+
#include <asm/fpsimd.h>
2425
#include <asm/irq_regs.h>
2526
#include <asm/kprobes.h>
2627
#include <asm/mmu.h>
@@ -70,6 +71,7 @@ static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs)
7071
{
7172
enter_from_user_mode(regs);
7273
mte_disable_tco_entry(current);
74+
sme_enter_from_user_mode();
7375
}
7476

7577
/*
@@ -83,6 +85,7 @@ static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs)
8385
local_irq_disable();
8486
exit_to_user_mode_prepare_legacy(regs);
8587
local_daif_mask();
88+
sme_exit_to_user_mode();
8689
mte_check_tfsr_exit();
8790
exit_to_user_mode();
8891
}

0 commit comments

Comments
 (0)