@@ -52,7 +52,7 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
5252}
5353
5454static bool zap_gfn_range (struct kvm * kvm , struct kvm_mmu_page * root ,
55- gfn_t start , gfn_t end );
55+ gfn_t start , gfn_t end , bool can_yield );
5656
5757void kvm_tdp_mmu_free_root (struct kvm * kvm , struct kvm_mmu_page * root )
5858{
@@ -65,7 +65,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
6565
6666 list_del (& root -> link );
6767
68- zap_gfn_range (kvm , root , 0 , max_gfn );
68+ zap_gfn_range (kvm , root , 0 , max_gfn , false );
6969
7070 free_page ((unsigned long )root -> spt );
7171 kmem_cache_free (mmu_page_header_cache , root );
@@ -303,9 +303,14 @@ static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *it
303303 * non-root pages mapping GFNs strictly within that range. Returns true if
304304 * SPTEs have been cleared and a TLB flush is needed before releasing the
305305 * MMU lock.
306+ * If can_yield is true, will release the MMU lock and reschedule if the
307+ * scheduler needs the CPU or there is contention on the MMU lock. If this
308+ * function cannot yield, it will not release the MMU lock or reschedule and
309+ * the caller must ensure it does not supply too large a GFN range, or the
310+ * operation can cause a soft lockup.
306311 */
307312static bool zap_gfn_range (struct kvm * kvm , struct kvm_mmu_page * root ,
308- gfn_t start , gfn_t end )
313+ gfn_t start , gfn_t end , bool can_yield )
309314{
310315 struct tdp_iter iter ;
311316 bool flush_needed = false;
@@ -326,7 +331,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
326331
327332 tdp_mmu_set_spte (kvm , & iter , 0 );
328333
329- flush_needed = tdp_mmu_iter_flush_cond_resched (kvm , & iter );
334+ if (can_yield )
335+ flush_needed = tdp_mmu_iter_flush_cond_resched (kvm , & iter );
336+ else
337+ flush_needed = true;
330338 }
331339 return flush_needed ;
332340}
@@ -349,7 +357,7 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
349357 */
350358 kvm_mmu_get_root (kvm , root );
351359
352- flush |= zap_gfn_range (kvm , root , start , end );
360+ flush |= zap_gfn_range (kvm , root , start , end , true );
353361
354362 kvm_mmu_put_root (kvm , root );
355363 }
@@ -496,3 +504,65 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
496504
497505 return ret ;
498506}
507+
508+ static int kvm_tdp_mmu_handle_hva_range (struct kvm * kvm , unsigned long start ,
509+ unsigned long end , unsigned long data ,
510+ int (* handler )(struct kvm * kvm , struct kvm_memory_slot * slot ,
511+ struct kvm_mmu_page * root , gfn_t start ,
512+ gfn_t end , unsigned long data ))
513+ {
514+ struct kvm_memslots * slots ;
515+ struct kvm_memory_slot * memslot ;
516+ struct kvm_mmu_page * root ;
517+ int ret = 0 ;
518+ int as_id ;
519+
520+ for_each_tdp_mmu_root (kvm , root ) {
521+ /*
522+ * Take a reference on the root so that it cannot be freed if
523+ * this thread releases the MMU lock and yields in this loop.
524+ */
525+ kvm_mmu_get_root (kvm , root );
526+
527+ as_id = kvm_mmu_page_as_id (root );
528+ slots = __kvm_memslots (kvm , as_id );
529+ kvm_for_each_memslot (memslot , slots ) {
530+ unsigned long hva_start , hva_end ;
531+ gfn_t gfn_start , gfn_end ;
532+
533+ hva_start = max (start , memslot -> userspace_addr );
534+ hva_end = min (end , memslot -> userspace_addr +
535+ (memslot -> npages << PAGE_SHIFT ));
536+ if (hva_start >= hva_end )
537+ continue ;
538+ /*
539+ * {gfn(page) | page intersects with [hva_start, hva_end)} =
540+ * {gfn_start, gfn_start+1, ..., gfn_end-1}.
541+ */
542+ gfn_start = hva_to_gfn_memslot (hva_start , memslot );
543+ gfn_end = hva_to_gfn_memslot (hva_end + PAGE_SIZE - 1 , memslot );
544+
545+ ret |= handler (kvm , memslot , root , gfn_start ,
546+ gfn_end , data );
547+ }
548+
549+ kvm_mmu_put_root (kvm , root );
550+ }
551+
552+ return ret ;
553+ }
554+
555+ static int zap_gfn_range_hva_wrapper (struct kvm * kvm ,
556+ struct kvm_memory_slot * slot ,
557+ struct kvm_mmu_page * root , gfn_t start ,
558+ gfn_t end , unsigned long unused )
559+ {
560+ return zap_gfn_range (kvm , root , start , end , false);
561+ }
562+
563+ int kvm_tdp_mmu_zap_hva_range (struct kvm * kvm , unsigned long start ,
564+ unsigned long end )
565+ {
566+ return kvm_tdp_mmu_handle_hva_range (kvm , start , end , 0 ,
567+ zap_gfn_range_hva_wrapper );
568+ }
0 commit comments