@@ -269,10 +269,7 @@ static inline void __tlbi_sync_s1ish_hyp(void)
269269 * unmapping pages from vmalloc/io space.
270270 *
271271 * flush_tlb_page(vma, addr)
272- * Invalidate a single user mapping for address 'addr' in the
273- * address space corresponding to 'vma->mm'. Note that this
274- * operation only invalidates a single, last-level page-table
275- * entry and therefore does not affect any walk-caches.
272+ * Equivalent to __flush_tlb_page(..., flags=TLBF_NONE)
276273 *
277274 *
278275 * Next, we have some undocumented invalidation routines that you probably
@@ -300,13 +297,14 @@ static inline void __tlbi_sync_s1ish_hyp(void)
300297 * TLBF_NOSYNC (don't issue trailing dsb) and TLBF_NOBROADCAST
301298 * (only perform the invalidation for the local cpu).
302299 *
303- * local_flush_tlb_page(vma, addr)
304- * Local variant of flush_tlb_page(). Stale TLB entries may
305- * remain in remote CPUs.
306- *
307- * local_flush_tlb_page_nonotify(vma, addr)
308- * Same as local_flush_tlb_page() except MMU notifier will not be
309- * called.
300+ * __flush_tlb_page(vma, addr, flags)
301+ * Invalidate a single user mapping for address 'addr' in the
302+ * address space corresponding to 'vma->mm'. Note that this
303+ * operation only invalidates a single, last-level page-table entry
304+ * and therefore does not affect any walk-caches. flags may contain
305+ * any combination of TLBF_NONOTIFY (don't call mmu notifiers),
306+ * TLBF_NOSYNC (don't issue trailing dsb) and TLBF_NOBROADCAST
307+ * (only perform the invalidation for the local cpu).
310308 *
311309 * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
312310 * on top of these routines, since that is our interface to the mmu_gather
@@ -340,51 +338,6 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
340338 mmu_notifier_arch_invalidate_secondary_tlbs (mm , 0 , -1UL );
341339}
342340
343- static inline void __local_flush_tlb_page_nonotify_nosync (struct mm_struct * mm ,
344- unsigned long uaddr )
345- {
346- dsb (nshst );
347- __tlbi_level_asid (vale1 , uaddr , TLBI_TTL_UNKNOWN , ASID (mm ));
348- }
349-
350- static inline void local_flush_tlb_page_nonotify (struct vm_area_struct * vma ,
351- unsigned long uaddr )
352- {
353- __local_flush_tlb_page_nonotify_nosync (vma -> vm_mm , uaddr );
354- dsb (nsh );
355- }
356-
357- static inline void local_flush_tlb_page (struct vm_area_struct * vma ,
358- unsigned long uaddr )
359- {
360- __local_flush_tlb_page_nonotify_nosync (vma -> vm_mm , uaddr );
361- mmu_notifier_arch_invalidate_secondary_tlbs (vma -> vm_mm , uaddr & PAGE_MASK ,
362- (uaddr & PAGE_MASK ) + PAGE_SIZE );
363- dsb (nsh );
364- }
365-
366- static inline void __flush_tlb_page_nosync (struct mm_struct * mm ,
367- unsigned long uaddr )
368- {
369- dsb (ishst );
370- __tlbi_level_asid (vale1is , uaddr , TLBI_TTL_UNKNOWN , ASID (mm ));
371- mmu_notifier_arch_invalidate_secondary_tlbs (mm , uaddr & PAGE_MASK ,
372- (uaddr & PAGE_MASK ) + PAGE_SIZE );
373- }
374-
375- static inline void flush_tlb_page_nosync (struct vm_area_struct * vma ,
376- unsigned long uaddr )
377- {
378- return __flush_tlb_page_nosync (vma -> vm_mm , uaddr );
379- }
380-
381- static inline void flush_tlb_page (struct vm_area_struct * vma ,
382- unsigned long uaddr )
383- {
384- flush_tlb_page_nosync (vma , uaddr );
385- __tlbi_sync_s1ish ();
386- }
387-
388341static inline bool arch_tlbbatch_should_defer (struct mm_struct * mm )
389342{
390343 return true;
@@ -632,6 +585,22 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
632585 __flush_tlb_range (vma , start , end , PAGE_SIZE , TLBI_TTL_UNKNOWN , TLBF_NONE );
633586}
634587
588+ static inline void __flush_tlb_page (struct vm_area_struct * vma ,
589+ unsigned long uaddr , tlbf_t flags )
590+ {
591+ unsigned long start = round_down (uaddr , PAGE_SIZE );
592+ unsigned long end = start + PAGE_SIZE ;
593+
594+ __do_flush_tlb_range (vma , start , end , PAGE_SIZE , TLBI_TTL_UNKNOWN ,
595+ TLBF_NOWALKCACHE | flags );
596+ }
597+
598+ static inline void flush_tlb_page (struct vm_area_struct * vma ,
599+ unsigned long uaddr )
600+ {
601+ __flush_tlb_page (vma , uaddr , TLBF_NONE );
602+ }
603+
635604static inline void flush_tlb_kernel_range (unsigned long start , unsigned long end )
636605{
637606 const unsigned long stride = PAGE_SIZE ;
0 commit comments