@@ -80,6 +80,71 @@ static inline unsigned long get_trans_granule(void)
8080 }
8181}
8282
83+ #ifdef CONFIG_ARM64_ERRATUM_4193714
84+
85+ void sme_do_dvmsync (const struct cpumask * mask );
86+
87+ static inline void sme_dvmsync (struct mm_struct * mm )
88+ {
89+ if (!alternative_has_cap_unlikely (ARM64_WORKAROUND_4193714 ))
90+ return ;
91+
92+ sme_do_dvmsync (mm_cpumask (mm ));
93+ }
94+
95+ static inline void sme_dvmsync_add_pending (struct arch_tlbflush_unmap_batch * batch ,
96+ struct mm_struct * mm )
97+ {
98+ if (!alternative_has_cap_unlikely (ARM64_WORKAROUND_4193714 ))
99+ return ;
100+
101+ /*
102+ * Order the mm_cpumask() read after the hardware DVMSync.
103+ */
104+ dsb (ish );
105+ if (cpumask_empty (mm_cpumask (mm )))
106+ return ;
107+
108+ /*
109+ * Allocate the batch cpumask on first use. Fall back to an immediate
110+ * IPI for this mm in case of failure.
111+ */
112+ if (!cpumask_available (batch -> cpumask ) &&
113+ !zalloc_cpumask_var (& batch -> cpumask , GFP_ATOMIC )) {
114+ sme_do_dvmsync (mm_cpumask (mm ));
115+ return ;
116+ }
117+
118+ cpumask_or (batch -> cpumask , batch -> cpumask , mm_cpumask (mm ));
119+ }
120+
121+ static inline void sme_dvmsync_batch (struct arch_tlbflush_unmap_batch * batch )
122+ {
123+ if (!alternative_has_cap_unlikely (ARM64_WORKAROUND_4193714 ))
124+ return ;
125+
126+ if (!cpumask_available (batch -> cpumask ))
127+ return ;
128+
129+ sme_do_dvmsync (batch -> cpumask );
130+ cpumask_clear (batch -> cpumask );
131+ }
132+
133+ #else
134+
135+ static inline void sme_dvmsync (struct mm_struct * mm )
136+ {
137+ }
138+ static inline void sme_dvmsync_add_pending (struct arch_tlbflush_unmap_batch * batch ,
139+ struct mm_struct * mm )
140+ {
141+ }
142+ static inline void sme_dvmsync_batch (struct arch_tlbflush_unmap_batch * batch )
143+ {
144+ }
145+
146+ #endif /* CONFIG_ARM64_ERRATUM_4193714 */
147+
83148/*
84149 * Level-based TLBI operations.
85150 *
@@ -213,7 +278,21 @@ do { \
213278 * Complete broadcast TLB maintenance issued by the host which invalidates
214279 * stage 1 information in the host's own translation regime.
215280 */
216- static inline void __tlbi_sync_s1ish (void )
281+ static inline void __tlbi_sync_s1ish (struct mm_struct * mm )
282+ {
283+ dsb (ish );
284+ __repeat_tlbi_sync (vale1is , 0 );
285+ sme_dvmsync (mm );
286+ }
287+
288+ static inline void __tlbi_sync_s1ish_batch (struct arch_tlbflush_unmap_batch * batch )
289+ {
290+ dsb (ish );
291+ __repeat_tlbi_sync (vale1is , 0 );
292+ sme_dvmsync_batch (batch );
293+ }
294+
295+ static inline void __tlbi_sync_s1ish_kernel (void )
217296{
218297 dsb (ish );
219298 __repeat_tlbi_sync (vale1is , 0 );
@@ -322,7 +401,7 @@ static inline void flush_tlb_all(void)
322401{
323402 dsb (ishst );
324403 __tlbi (vmalle1is );
325- __tlbi_sync_s1ish ();
404+ __tlbi_sync_s1ish_kernel ();
326405 isb ();
327406}
328407
@@ -334,7 +413,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
334413 asid = __TLBI_VADDR (0 , ASID (mm ));
335414 __tlbi (aside1is , asid );
336415 __tlbi_user (aside1is , asid );
337- __tlbi_sync_s1ish ();
416+ __tlbi_sync_s1ish (mm );
338417 mmu_notifier_arch_invalidate_secondary_tlbs (mm , 0 , -1UL );
339418}
340419
@@ -355,7 +434,7 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
355434 */
356435static inline void arch_tlbbatch_flush (struct arch_tlbflush_unmap_batch * batch )
357436{
358- __tlbi_sync_s1ish ( );
437+ __tlbi_sync_s1ish_batch ( batch );
359438}
360439
361440/*
@@ -557,7 +636,7 @@ static __always_inline void __do_flush_tlb_range(struct vm_area_struct *vma,
557636
558637 if (!(flags & TLBF_NOSYNC )) {
559638 if (!(flags & TLBF_NOBROADCAST ))
560- __tlbi_sync_s1ish ();
639+ __tlbi_sync_s1ish (mm );
561640 else
562641 dsb (nsh );
563642 }
@@ -618,7 +697,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
618697 dsb (ishst );
619698 __flush_s1_tlb_range_op (vaale1is , start , pages , stride , 0 ,
620699 TLBI_TTL_UNKNOWN );
621- __tlbi_sync_s1ish ();
700+ __tlbi_sync_s1ish_kernel ();
622701 isb ();
623702}
624703
@@ -632,7 +711,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
632711
633712 dsb (ishst );
634713 __tlbi (vaae1is , addr );
635- __tlbi_sync_s1ish ();
714+ __tlbi_sync_s1ish_kernel ();
636715 isb ();
637716}
638717
@@ -643,6 +722,7 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
643722
644723 __flush_tlb_range (& vma , start , end , PAGE_SIZE , 3 ,
645724 TLBF_NOWALKCACHE | TLBF_NOSYNC );
725+ sme_dvmsync_add_pending (batch , mm );
646726}
647727
648728static inline bool __pte_flags_need_flush (ptdesc_t oldval , ptdesc_t newval )
0 commit comments