@@ -914,18 +914,71 @@ void profiling_ipi_handler(__unused void *data)
914
914
*/
915
915
void profiling_vmenter_handler (__unused struct vcpu * vcpu )
916
916
{
917
- /* to be implemented */
917
+ if (((get_cpu_var (profiling_info .sep_state ).pmu_state == PMU_RUNNING ) &&
918
+ ((sep_collection_switch &
919
+ (1UL << (uint64_t )VM_SWITCH_TRACING )) > 0UL )) ||
920
+ ((get_cpu_var (profiling_info .soc_state ) == SW_RUNNING ) &&
921
+ ((socwatch_collection_switch &
922
+ (1UL << (uint64_t )SOCWATCH_VM_SWITCH_TRACING )) > 0UL ))) {
923
+
924
+ get_cpu_var (profiling_info .vm_info ).vmenter_tsc = rdtsc ();
925
+ }
918
926
}
919
927
920
928
/*
921
929
* Save the VCPU info on vmexit
922
930
*/
923
- void profiling_vmexit_handler (__unused struct vcpu * vcpu , __unused uint64_t exit_reason )
931
+ void profiling_vmexit_handler (struct vcpu * vcpu , uint64_t exit_reason )
924
932
{
925
- if (exit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT ) {
926
- /* to be implemented */
927
- } else {
928
- /* to be implemented */
933
+ per_cpu (profiling_info .sep_state , vcpu -> pcpu_id ).total_vmexit_count ++ ;
934
+
935
+ if ((get_cpu_var (profiling_info .sep_state ).pmu_state == PMU_RUNNING ) ||
936
+ (get_cpu_var (profiling_info .soc_state ) == SW_RUNNING )) {
937
+
938
+ get_cpu_var (profiling_info .vm_info ).vmexit_tsc = rdtsc ();
939
+ get_cpu_var (profiling_info .vm_info ).vmexit_reason = exit_reason ;
940
+ if (exit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT ) {
941
+ get_cpu_var (profiling_info .vm_info ).external_vector
942
+ = (int32_t )(exec_vmread (VMX_EXIT_INT_INFO ) & 0xFFUL );
943
+ } else {
944
+ get_cpu_var (profiling_info .vm_info ).external_vector = -1 ;
945
+ }
946
+ get_cpu_var (profiling_info .vm_info ).guest_rip
947
+ = vcpu_get_rip (vcpu );
948
+
949
+ get_cpu_var (profiling_info .vm_info ).guest_rflags
950
+ = vcpu_get_rflags (vcpu );
951
+
952
+ get_cpu_var (profiling_info .vm_info ).guest_cs
953
+ = exec_vmread64 (VMX_GUEST_CS_SEL );
954
+
955
+ get_cpu_var (profiling_info .vm_info ).guest_vm_id = (int32_t )vcpu -> vm -> vm_id ;
956
+
957
+ /* Generate vmswitch sample */
958
+ if (((sep_collection_switch &
959
+ (1UL << (uint64_t )VM_SWITCH_TRACING )) > 0UL ) ||
960
+ ((socwatch_collection_switch &
961
+ (1UL << (uint64_t )SOCWATCH_VM_SWITCH_TRACING )) > 0UL )) {
962
+ get_cpu_var (profiling_info .vm_switch_trace ).os_id
963
+ = (int32_t )vcpu -> vm -> vm_id ;
964
+ get_cpu_var (profiling_info .vm_switch_trace ).vm_enter_tsc
965
+ = get_cpu_var (profiling_info .vm_info ).vmenter_tsc ;
966
+ get_cpu_var (profiling_info .vm_switch_trace ).vm_exit_tsc
967
+ = get_cpu_var (profiling_info .vm_info ).vmexit_tsc ;
968
+ get_cpu_var (profiling_info .vm_switch_trace ).vm_exit_reason
969
+ = exit_reason ;
970
+
971
+ if ((sep_collection_switch &
972
+ (1UL << (uint64_t )VM_SWITCH_TRACING )) > 0UL ) {
973
+ (void )profiling_generate_data (COLLECT_PROFILE_DATA ,
974
+ VM_SWITCH_TRACING );
975
+ }
976
+ if ((socwatch_collection_switch &
977
+ (1UL << (uint64_t )SOCWATCH_VM_SWITCH_TRACING )) > 0UL ) {
978
+ (void )profiling_generate_data (COLLECT_POWER_DATA ,
979
+ SOCWATCH_VM_SWITCH_TRACING );
980
+ }
981
+ }
929
982
}
930
983
}
931
984
0 commit comments