55#include <linux/sched.h>
66#include <asm/sbi.h>
77#include <asm/mmu_context.h>
8- #include <asm/tlbflush.h>
8+
9+ static inline void local_flush_tlb_all_asid (unsigned long asid )
10+ {
11+ __asm__ __volatile__ ("sfence.vma x0, %0"
12+ :
13+ : "r" (asid )
14+ : "memory" );
15+ }
16+
17+ static inline void local_flush_tlb_page_asid (unsigned long addr ,
18+ unsigned long asid )
19+ {
20+ __asm__ __volatile__ ("sfence.vma %0, %1"
21+ :
22+ : "r" (addr ), "r" (asid )
23+ : "memory" );
24+ }
925
1026void flush_tlb_all (void )
1127{
@@ -15,7 +31,6 @@ void flush_tlb_all(void)
1531static void __sbi_tlb_flush_range (struct mm_struct * mm , unsigned long start ,
1632 unsigned long size , unsigned long stride )
1733{
18- struct cpumask * pmask = & mm -> context .tlb_stale_mask ;
1934 struct cpumask * cmask = mm_cpumask (mm );
2035 unsigned int cpuid ;
2136 bool broadcast ;
@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
2944 if (static_branch_unlikely (& use_asid_allocator )) {
3045 unsigned long asid = atomic_long_read (& mm -> context .id );
3146
32- /*
33- * TLB will be immediately flushed on harts concurrently
34- * executing this MM context. TLB flush on other harts
35- * is deferred until this MM context migrates there.
36- */
37- cpumask_setall (pmask );
38- cpumask_clear_cpu (cpuid , pmask );
39- cpumask_andnot (pmask , pmask , cmask );
40-
4147 if (broadcast ) {
4248 sbi_remote_sfence_vma_asid (cmask , start , size , asid );
4349 } else if (size <= stride ) {
0 commit comments