2424#undef pr_fmt
2525#define pr_fmt (fmt ) KBUILD_MODNAME ": " fmt
2626
27- #define pr_tdx_error (__fn , __err ) \
28- pr_err_ratelimited("SEAMCALL %s failed: 0x%llx\n", #__fn, __err)
27+ #define __TDX_BUG_ON (__err , __f , __kvm , __fmt , __args ...) \
28+ ({ \
29+ struct kvm *_kvm = (__kvm); \
30+ bool __ret = !!(__err); \
31+ \
32+ if (WARN_ON_ONCE(__ret && (!_kvm || !_kvm->vm_bugged))) { \
33+ if (_kvm) \
34+ kvm_vm_bugged(_kvm); \
35+ pr_err_ratelimited("SEAMCALL " __f " failed: 0x%llx" __fmt "\n",\
36+ __err, __args); \
37+ } \
38+ unlikely(__ret); \
39+ })
2940
30- #define __pr_tdx_error_N ( __fn_str , __err , __fmt , ...) \
31- pr_err_ratelimited("SEAMCALL " __fn_str " failed: 0x%llx, " __fmt, __err, __VA_ARGS__ )
41+ #define TDX_BUG_ON ( __err , __fn , __kvm ) \
42+ __TDX_BUG_ON(__err, #__fn, __kvm, "%s", "" )
3243
33- #define pr_tdx_error_1 ( __fn , __err , __rcx ) \
34- __pr_tdx_error_N( #__fn, __err , "rcx 0x%llx\n ", __rcx)
44+ #define TDX_BUG_ON_1 ( __err , __fn , __rcx , __kvm ) \
45+ __TDX_BUG_ON(__err, #__fn, __kvm , ", rcx 0x%llx", __rcx)
3546
36- #define pr_tdx_error_2 (__fn , __err , __rcx , __rdx ) \
37- __pr_tdx_error_N(#__fn, __err, "rcx 0x%llx, rdx 0x%llx\n", __rcx, __rdx)
47+ #define TDX_BUG_ON_2 (__err , __fn , __rcx , __rdx , __kvm ) \
48+ __TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx, rdx 0x%llx", __rcx, __rdx)
49+
50+ #define TDX_BUG_ON_3 (__err , __fn , __rcx , __rdx , __r8 , __kvm ) \
51+ __TDX_BUG_ON(__err, #__fn, __kvm, ", rcx 0x%llx, rdx 0x%llx, r8 0x%llx", __rcx, __rdx, __r8)
3852
39- #define pr_tdx_error_3 (__fn , __err , __rcx , __rdx , __r8 ) \
40- __pr_tdx_error_N(#__fn, __err, "rcx 0x%llx, rdx 0x%llx, r8 0x%llx\n", __rcx, __rdx, __r8)
4153
4254bool enable_tdx __ro_after_init ;
4355module_param_named (tdx , enable_tdx , bool , 0444 );
@@ -313,10 +325,9 @@ static int __tdx_reclaim_page(struct page *page)
313325 * before the HKID is released and control pages have also been
314326 * released at this point, so there is no possibility of contention.
315327 */
316- if (WARN_ON_ONCE (err )) {
317- pr_tdx_error_3 (TDH_PHYMEM_PAGE_RECLAIM , err , rcx , rdx , r8 );
328+ if (TDX_BUG_ON_3 (err , TDH_PHYMEM_PAGE_RECLAIM , rcx , rdx , r8 , NULL ))
318329 return - EIO ;
319- }
330+
320331 return 0 ;
321332}
322333
@@ -404,8 +415,8 @@ static void tdx_flush_vp_on_cpu(struct kvm_vcpu *vcpu)
404415 return ;
405416
406417 smp_call_function_single (cpu , tdx_flush_vp , & arg , 1 );
407- if ( KVM_BUG_ON ( arg . err , vcpu -> kvm ))
408- pr_tdx_error ( TDH_VP_FLUSH , arg .err );
418+
419+ TDX_BUG_ON ( arg .err , TDH_VP_FLUSH , vcpu -> kvm );
409420}
410421
411422void tdx_disable_virtualization_cpu (void )
@@ -464,8 +475,7 @@ static void smp_func_do_phymem_cache_wb(void *unused)
464475 }
465476
466477out :
467- if (WARN_ON_ONCE (err ))
468- pr_tdx_error (TDH_PHYMEM_CACHE_WB , err );
478+ TDX_BUG_ON (err , TDH_PHYMEM_CACHE_WB , NULL );
469479}
470480
471481void tdx_mmu_release_hkid (struct kvm * kvm )
@@ -504,8 +514,7 @@ void tdx_mmu_release_hkid(struct kvm *kvm)
504514 err = tdh_mng_vpflushdone (& kvm_tdx -> td );
505515 if (err == TDX_FLUSHVP_NOT_DONE )
506516 goto out ;
507- if (KVM_BUG_ON (err , kvm )) {
508- pr_tdx_error (TDH_MNG_VPFLUSHDONE , err );
517+ if (TDX_BUG_ON (err , TDH_MNG_VPFLUSHDONE , kvm )) {
509518 pr_err ("tdh_mng_vpflushdone() failed. HKID %d is leaked.\n" ,
510519 kvm_tdx -> hkid );
511520 goto out ;
@@ -528,8 +537,7 @@ void tdx_mmu_release_hkid(struct kvm *kvm)
528537 * tdh_mng_key_freeid() will fail.
529538 */
530539 err = tdh_mng_key_freeid (& kvm_tdx -> td );
531- if (KVM_BUG_ON (err , kvm )) {
532- pr_tdx_error (TDH_MNG_KEY_FREEID , err );
540+ if (TDX_BUG_ON (err , TDH_MNG_KEY_FREEID , kvm )) {
533541 pr_err ("tdh_mng_key_freeid() failed. HKID %d is leaked.\n" ,
534542 kvm_tdx -> hkid );
535543 } else {
@@ -580,10 +588,9 @@ static void tdx_reclaim_td_control_pages(struct kvm *kvm)
580588 * when it is reclaiming TDCS).
581589 */
582590 err = tdh_phymem_page_wbinvd_tdr (& kvm_tdx -> td );
583- if (KVM_BUG_ON (err , kvm )) {
584- pr_tdx_error (TDH_PHYMEM_PAGE_WBINVD , err );
591+ if (TDX_BUG_ON (err , TDH_PHYMEM_PAGE_WBINVD , kvm ))
585592 return ;
586- }
593+
587594 tdx_quirk_reset_page (kvm_tdx -> td .tdr_page );
588595
589596 __free_page (kvm_tdx -> td .tdr_page );
@@ -606,11 +613,8 @@ static int tdx_do_tdh_mng_key_config(void *param)
606613
607614 /* TDX_RND_NO_ENTROPY related retries are handled by sc_retry() */
608615 err = tdh_mng_key_config (& kvm_tdx -> td );
609-
610- if (KVM_BUG_ON (err , & kvm_tdx -> kvm )) {
611- pr_tdx_error (TDH_MNG_KEY_CONFIG , err );
616+ if (TDX_BUG_ON (err , TDH_MNG_KEY_CONFIG , & kvm_tdx -> kvm ))
612617 return - EIO ;
613- }
614618
615619 return 0 ;
616620}
@@ -1601,10 +1605,8 @@ static int tdx_mem_page_add(struct kvm *kvm, gfn_t gfn, enum pg_level level,
16011605 if (unlikely (tdx_operand_busy (err )))
16021606 return - EBUSY ;
16031607
1604- if (KVM_BUG_ON (err , kvm )) {
1605- pr_tdx_error_2 (TDH_MEM_PAGE_ADD , err , entry , level_state );
1608+ if (TDX_BUG_ON_2 (err , TDH_MEM_PAGE_ADD , entry , level_state , kvm ))
16061609 return - EIO ;
1607- }
16081610
16091611 return 0 ;
16101612}
@@ -1623,10 +1625,8 @@ static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
16231625 if (unlikely (tdx_operand_busy (err )))
16241626 return - EBUSY ;
16251627
1626- if (KVM_BUG_ON (err , kvm )) {
1627- pr_tdx_error_2 (TDH_MEM_PAGE_AUG , err , entry , level_state );
1628+ if (TDX_BUG_ON_2 (err , TDH_MEM_PAGE_AUG , entry , level_state , kvm ))
16281629 return - EIO ;
1629- }
16301630
16311631 return 0 ;
16321632}
@@ -1675,10 +1675,8 @@ static int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
16751675 if (unlikely (tdx_operand_busy (err )))
16761676 return - EBUSY ;
16771677
1678- if (KVM_BUG_ON (err , kvm )) {
1679- pr_tdx_error_2 (TDH_MEM_SEPT_ADD , err , entry , level_state );
1678+ if (TDX_BUG_ON_2 (err , TDH_MEM_SEPT_ADD , entry , level_state , kvm ))
16801679 return - EIO ;
1681- }
16821680
16831681 return 0 ;
16841682}
@@ -1726,8 +1724,7 @@ static void tdx_track(struct kvm *kvm)
17261724 tdx_no_vcpus_enter_stop (kvm );
17271725 }
17281726
1729- if (KVM_BUG_ON (err , kvm ))
1730- pr_tdx_error (TDH_MEM_TRACK , err );
1727+ TDX_BUG_ON (err , TDH_MEM_TRACK , kvm );
17311728
17321729 kvm_make_all_cpus_request (kvm , KVM_REQ_OUTSIDE_GUEST_MODE );
17331730}
@@ -1784,10 +1781,8 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
17841781 tdx_no_vcpus_enter_stop (kvm );
17851782 }
17861783
1787- if (KVM_BUG_ON (err , kvm )) {
1788- pr_tdx_error_2 (TDH_MEM_RANGE_BLOCK , err , entry , level_state );
1784+ if (TDX_BUG_ON_2 (err , TDH_MEM_RANGE_BLOCK , entry , level_state , kvm ))
17891785 return ;
1790- }
17911786
17921787 /*
17931788 * TDX requires TLB tracking before dropping private page. Do
@@ -1814,16 +1809,12 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
18141809 tdx_no_vcpus_enter_stop (kvm );
18151810 }
18161811
1817- if (KVM_BUG_ON (err , kvm )) {
1818- pr_tdx_error_2 (TDH_MEM_PAGE_REMOVE , err , entry , level_state );
1812+ if (TDX_BUG_ON_2 (err , TDH_MEM_PAGE_REMOVE , entry , level_state , kvm ))
18191813 return ;
1820- }
18211814
18221815 err = tdh_phymem_page_wbinvd_hkid ((u16 )kvm_tdx -> hkid , page );
1823- if (KVM_BUG_ON (err , kvm )) {
1824- pr_tdx_error (TDH_PHYMEM_PAGE_WBINVD , err );
1816+ if (TDX_BUG_ON (err , TDH_PHYMEM_PAGE_WBINVD , kvm ))
18251817 return ;
1826- }
18271818
18281819 tdx_quirk_reset_page (page );
18291820}
@@ -2463,8 +2454,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
24632454 goto free_packages ;
24642455 }
24652456
2466- if (WARN_ON_ONCE (err )) {
2467- pr_tdx_error (TDH_MNG_CREATE , err );
2457+ if (TDX_BUG_ON (err , TDH_MNG_CREATE , kvm )) {
24682458 ret = - EIO ;
24692459 goto free_packages ;
24702460 }
@@ -2505,8 +2495,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
25052495 ret = - EAGAIN ;
25062496 goto teardown ;
25072497 }
2508- if (WARN_ON_ONCE (err )) {
2509- pr_tdx_error (TDH_MNG_ADDCX , err );
2498+ if (TDX_BUG_ON (err , TDH_MNG_ADDCX , kvm )) {
25102499 ret = - EIO ;
25112500 goto teardown ;
25122501 }
@@ -2523,8 +2512,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
25232512 * seamcall_err = err ;
25242513 ret = - EINVAL ;
25252514 goto teardown ;
2526- } else if (WARN_ON_ONCE (err )) {
2527- pr_tdx_error_1 (TDH_MNG_INIT , err , rcx );
2515+ } else if (TDX_BUG_ON_1 (err , TDH_MNG_INIT , rcx , kvm )) {
25282516 ret = - EIO ;
25292517 goto teardown ;
25302518 }
@@ -2802,10 +2790,8 @@ static int tdx_td_finalize(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
28022790 cmd -> hw_error = tdh_mr_finalize (& kvm_tdx -> td );
28032791 if (tdx_operand_busy (cmd -> hw_error ))
28042792 return - EBUSY ;
2805- if (KVM_BUG_ON (cmd -> hw_error , kvm )) {
2806- pr_tdx_error (TDH_MR_FINALIZE , cmd -> hw_error );
2793+ if (TDX_BUG_ON (cmd -> hw_error , TDH_MR_FINALIZE , kvm ))
28072794 return - EIO ;
2808- }
28092795
28102796 kvm_tdx -> state = TD_STATE_RUNNABLE ;
28112797 /* TD_STATE_RUNNABLE must be set before 'pre_fault_allowed' */
@@ -2892,16 +2878,14 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
28922878 }
28932879
28942880 err = tdh_vp_create (& kvm_tdx -> td , & tdx -> vp );
2895- if (KVM_BUG_ON (err , vcpu -> kvm )) {
2881+ if (TDX_BUG_ON (err , TDH_VP_CREATE , vcpu -> kvm )) {
28962882 ret = - EIO ;
2897- pr_tdx_error (TDH_VP_CREATE , err );
28982883 goto free_tdcx ;
28992884 }
29002885
29012886 for (i = 0 ; i < kvm_tdx -> td .tdcx_nr_pages ; i ++ ) {
29022887 err = tdh_vp_addcx (& tdx -> vp , tdx -> vp .tdcx_pages [i ]);
2903- if (KVM_BUG_ON (err , vcpu -> kvm )) {
2904- pr_tdx_error (TDH_VP_ADDCX , err );
2888+ if (TDX_BUG_ON (err , TDH_VP_ADDCX , vcpu -> kvm )) {
29052889 /*
29062890 * Pages already added are reclaimed by the vcpu_free
29072891 * method, but the rest are freed here.
@@ -2915,10 +2899,8 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
29152899 }
29162900
29172901 err = tdh_vp_init (& tdx -> vp , vcpu_rcx , vcpu -> vcpu_id );
2918- if (KVM_BUG_ON (err , vcpu -> kvm )) {
2919- pr_tdx_error (TDH_VP_INIT , err );
2902+ if (TDX_BUG_ON (err , TDH_VP_INIT , vcpu -> kvm ))
29202903 return - EIO ;
2921- }
29222904
29232905 vcpu -> arch .mp_state = KVM_MP_STATE_RUNNABLE ;
29242906
0 commit comments