@@ -993,11 +993,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
993993 return 0 ;
994994 }
995995
996- ptep = __find_linux_pte (kvm -> arch . pgtable , gpa , NULL , & shift );
996+ ptep = find_kvm_secondary_pte (kvm , gpa , & shift );
997997 if (ptep && pte_present (* ptep ))
998998 kvmppc_unmap_pte (kvm , ptep , gpa , shift , memslot ,
999999 kvm -> arch .lpid );
1000- return 0 ;
1000+ return 0 ;
10011001}
10021002
10031003/* Called with kvm->mmu_lock held */
@@ -1013,7 +1013,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
10131013 if (kvm -> arch .secure_guest & KVMPPC_SECURE_INIT_DONE )
10141014 return ref ;
10151015
1016- ptep = __find_linux_pte (kvm -> arch . pgtable , gpa , NULL , & shift );
1016+ ptep = find_kvm_secondary_pte (kvm , gpa , & shift );
10171017 if (ptep && pte_present (* ptep ) && pte_young (* ptep )) {
10181018 old = kvmppc_radix_update_pte (kvm , ptep , _PAGE_ACCESSED , 0 ,
10191019 gpa , shift );
@@ -1040,7 +1040,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
10401040 if (kvm -> arch .secure_guest & KVMPPC_SECURE_INIT_DONE )
10411041 return ref ;
10421042
1043- ptep = __find_linux_pte (kvm -> arch . pgtable , gpa , NULL , & shift );
1043+ ptep = find_kvm_secondary_pte (kvm , gpa , & shift );
10441044 if (ptep && pte_present (* ptep ) && pte_young (* ptep ))
10451045 ref = 1 ;
10461046 return ref ;
@@ -1052,20 +1052,43 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
10521052{
10531053 unsigned long gfn = memslot -> base_gfn + pagenum ;
10541054 unsigned long gpa = gfn << PAGE_SHIFT ;
1055- pte_t * ptep ;
1055+ pte_t * ptep , pte ;
10561056 unsigned int shift ;
10571057 int ret = 0 ;
10581058 unsigned long old , * rmapp ;
10591059
10601060 if (kvm -> arch .secure_guest & KVMPPC_SECURE_INIT_DONE )
10611061 return ret ;
10621062
1063- ptep = __find_linux_pte (kvm -> arch .pgtable , gpa , NULL , & shift );
1064- if (ptep && pte_present (* ptep ) && pte_dirty (* ptep )) {
1065- ret = 1 ;
1066- if (shift )
1067- ret = 1 << (shift - PAGE_SHIFT );
1063+ /*
1064+ * For performance reasons we don't hold kvm->mmu_lock while walking the
1065+ * partition scoped table.
1066+ */
1067+ ptep = find_kvm_secondary_pte_unlocked (kvm , gpa , & shift );
1068+ if (!ptep )
1069+ return 0 ;
1070+
1071+ pte = READ_ONCE (* ptep );
1072+ if (pte_present (pte ) && pte_dirty (pte )) {
10681073 spin_lock (& kvm -> mmu_lock );
1074+ /*
1075+ * Recheck the pte again
1076+ */
1077+ if (pte_val (pte ) != pte_val (* ptep )) {
1078+ /*
1079+ * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
1080+ * only find PAGE_SIZE pte entries here. We can continue
1081+ * to use the pte addr returned by above page table
1082+ * walk.
1083+ */
1084+ if (!pte_present (* ptep ) || !pte_dirty (* ptep )) {
1085+ spin_unlock (& kvm -> mmu_lock );
1086+ return 0 ;
1087+ }
1088+ }
1089+
1090+ ret = 1 ;
1091+ VM_BUG_ON (shift );
10691092 old = kvmppc_radix_update_pte (kvm , ptep , _PAGE_DIRTY , 0 ,
10701093 gpa , shift );
10711094 kvmppc_radix_tlbie_page (kvm , gpa , shift , kvm -> arch .lpid );
@@ -1121,7 +1144,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
11211144 gpa = memslot -> base_gfn << PAGE_SHIFT ;
11221145 spin_lock (& kvm -> mmu_lock );
11231146 for (n = memslot -> npages ; n ; -- n ) {
1124- ptep = __find_linux_pte (kvm -> arch . pgtable , gpa , NULL , & shift );
1147+ ptep = find_kvm_secondary_pte (kvm , gpa , & shift );
11251148 if (ptep && pte_present (* ptep ))
11261149 kvmppc_unmap_pte (kvm , ptep , gpa , shift , memslot ,
11271150 kvm -> arch .lpid );
0 commit comments