13
13
#define MAJOR_VERSION 1
14
14
#define MINOR_VERSION 0
15
15
16
-
16
+ #define LBR_NUM_REGISTERS 32U
17
+ #define PERF_OVF_BIT_MASK 0xC0000070000000FULL
17
18
#define LVT_PERFCTR_BIT_UNMASK 0xFFFEFFFFU
18
19
#define LVT_PERFCTR_BIT_MASK 0x10000U
19
20
#define VALID_DEBUGCTL_BIT_MASK 0x1801U
@@ -24,6 +25,8 @@ static bool in_pmu_profiling;
24
25
25
26
static uint32_t profiling_pmi_irq = IRQ_INVALID ;
26
27
28
+ extern struct irq_desc irq_desc_array [NR_IRQS ];
29
+
27
30
static void profiling_initialize_vmsw (void )
28
31
{
29
32
dev_dbg (ACRN_DBG_PROFILING , "%s: entering cpu%d" ,
@@ -324,9 +327,151 @@ static void profiling_handle_msrops(void)
324
327
/*
325
328
* Interrupt handler for performance monitoring interrupts
326
329
*/
327
- static void profiling_pmi_handler (__unused unsigned int irq , __unused void * data )
330
+ static void profiling_pmi_handler (unsigned int irq , __unused void * data )
328
331
{
329
- /* to be implemented */
332
+ uint64_t perf_ovf_status ;
333
+ uint32_t lvt_perf_ctr ;
334
+ uint32_t i ;
335
+ uint32_t group_id ;
336
+ struct profiling_msr_op * msrop = NULL ;
337
+ struct pmu_sample * psample = & (get_cpu_var (profiling_info .pmu_sample ));
338
+ struct sep_state * ss = & (get_cpu_var (profiling_info .sep_state ));
339
+
340
+ if ((ss == NULL ) || (psample == NULL )) {
341
+ dev_dbg (ACRN_ERR_PROFILING , "%s: exiting cpu%d" ,
342
+ __func__ , get_cpu_id ());
343
+ return ;
344
+ }
345
+ /* Stop all the counters first */
346
+ msr_write (MSR_IA32_PERF_GLOBAL_CTRL , 0x0U );
347
+
348
+ group_id = ss -> current_pmi_group_id ;
349
+ for (i = 0U ; i < MAX_MSR_LIST_NUM ; i ++ ) {
350
+ msrop = & (ss -> pmi_entry_msr_list [group_id ][i ]);
351
+ if (msrop != NULL ) {
352
+ if (msrop -> msr_id == (uint32_t )-1 ) {
353
+ break ;
354
+ }
355
+ if (msrop -> msr_op_type == (uint8_t )MSR_OP_WRITE ) {
356
+ msr_write (msrop -> msr_id , msrop -> value );
357
+ }
358
+ }
359
+ }
360
+
361
+ ss -> total_pmi_count ++ ;
362
+ perf_ovf_status = msr_read (MSR_IA32_PERF_GLOBAL_STATUS );
363
+ lvt_perf_ctr = (uint32_t )msr_read (MSR_IA32_EXT_APIC_LVT_PMI );
364
+
365
+ if (perf_ovf_status == 0U ) {
366
+ goto reconfig ;
367
+ }
368
+
369
+ if ((perf_ovf_status & 0x80000000000000FULL ) == 0U ) {
370
+ ss -> nofrozen_pmi ++ ;
371
+ }
372
+
373
+ (void )memset (psample , 0U , sizeof (struct pmu_sample ));
374
+
375
+ /* Attribute PMI to guest context */
376
+ if ((get_cpu_var (profiling_info .vm_info ).vmexit_reason
377
+ == VMX_EXIT_REASON_EXTERNAL_INTERRUPT ) &&
378
+ ((uint64_t )get_cpu_var (profiling_info .vm_info ).external_vector
379
+ == VECTOR_PMI )) {
380
+ psample -> csample .os_id
381
+ = (uint32_t ) get_cpu_var (profiling_info .vm_info ).guest_vm_id ;
382
+ (void )memset (psample -> csample .task , 0U , 16 );
383
+ psample -> csample .cpu_id = get_cpu_id ();
384
+ psample -> csample .process_id = 0U ;
385
+ psample -> csample .task_id = 0U ;
386
+ psample -> csample .overflow_status = perf_ovf_status ;
387
+ psample -> csample .rip = get_cpu_var (profiling_info .vm_info ).guest_rip ;
388
+ psample -> csample .rflags
389
+ = (uint32_t )get_cpu_var (profiling_info .vm_info ).guest_rflags ;
390
+ psample -> csample .cs
391
+ = (uint32_t )get_cpu_var (profiling_info .vm_info ).guest_cs ;
392
+ get_cpu_var (profiling_info .vm_info ).vmexit_reason = 0U ;
393
+ get_cpu_var (profiling_info .vm_info ).external_vector = -1 ;
394
+ /* Attribute PMI to hypervisor context */
395
+ } else {
396
+ psample -> csample .os_id = 0xFFFFFFFFU ;
397
+ (void )memcpy_s (psample -> csample .task , 16 , "VMM\0" , 4 );
398
+ psample -> csample .cpu_id = get_cpu_id ();
399
+ psample -> csample .process_id = 0U ;
400
+ psample -> csample .task_id = 0U ;
401
+ psample -> csample .overflow_status = perf_ovf_status ;
402
+ psample -> csample .rip = irq_desc_array [irq ].ctx_rip ;
403
+ psample -> csample .rflags
404
+ = (uint32_t )irq_desc_array [irq ].ctx_rflags ;
405
+ psample -> csample .cs = (uint32_t )irq_desc_array [irq ].ctx_cs ;
406
+ }
407
+
408
+ if ((sep_collection_switch &
409
+ (1UL << (uint64_t )LBR_PMU_SAMPLING )) > 0UL ) {
410
+ psample -> lsample .lbr_tos = msr_read (MSR_CORE_LASTBRANCH_TOS );
411
+ for (i = 0U ; i < LBR_NUM_REGISTERS ; i ++ ) {
412
+ psample -> lsample .lbr_from_ip [i ]
413
+ = msr_read (MSR_CORE_LASTBRANCH_0_FROM_IP + i );
414
+ psample -> lsample .lbr_to_ip [i ]
415
+ = msr_read (MSR_CORE_LASTBRANCH_0_TO_IP + i );
416
+ }
417
+ /* Generate core pmu sample and lbr data */
418
+ (void )profiling_generate_data (COLLECT_PROFILE_DATA , LBR_PMU_SAMPLING );
419
+ } else {
420
+ /* Generate core pmu sample only */
421
+ (void )profiling_generate_data (COLLECT_PROFILE_DATA , CORE_PMU_SAMPLING );
422
+ }
423
+
424
+ /* Clear PERF_GLOBAL_OVF_STATUS bits */
425
+ msr_write (MSR_IA32_PERF_GLOBAL_OVF_CTRL ,
426
+ perf_ovf_status & PERF_OVF_BIT_MASK );
427
+
428
+ ss -> valid_pmi_count ++ ;
429
+
430
+ group_id = ss -> current_pmi_group_id ;
431
+ for (i = 0U ; i < MAX_MSR_LIST_NUM ; i ++ ) {
432
+ msrop = & (ss -> pmi_exit_msr_list [group_id ][i ]);
433
+ if (msrop != NULL ) {
434
+ if (msrop -> msr_id == (uint32_t )-1 ) {
435
+ break ;
436
+ }
437
+ if (msrop -> msr_op_type == (uint8_t )MSR_OP_WRITE ) {
438
+ if (msrop -> reg_type != (uint8_t )PMU_MSR_DATA ) {
439
+ if (msrop -> msr_id != MSR_IA32_PERF_GLOBAL_CTRL ) {
440
+ msr_write (msrop -> msr_id , msrop -> value );
441
+ }
442
+ }
443
+ else {
444
+ if (((perf_ovf_status >> msrop -> param ) & 0x1U ) > 0U ) {
445
+ msr_write (msrop -> msr_id , msrop -> value );
446
+ }
447
+ }
448
+ }
449
+ }
450
+ }
451
+
452
+ reconfig :
453
+
454
+ if (ss -> pmu_state == PMU_RUNNING ) {
455
+ /* Unmask the interrupt */
456
+ lvt_perf_ctr &= LVT_PERFCTR_BIT_UNMASK ;
457
+ msr_write (MSR_IA32_EXT_APIC_LVT_PMI , lvt_perf_ctr );
458
+ group_id = ss -> current_pmi_group_id ;
459
+ for (i = 0U ; i < MAX_MSR_LIST_NUM ; i ++ ) {
460
+ msrop = & (ss -> pmi_start_msr_list [group_id ][i ]);
461
+ if (msrop != NULL ) {
462
+ if (msrop -> msr_id == (uint32_t )-1 ) {
463
+ break ;
464
+ }
465
+ if (msrop -> msr_op_type == (uint8_t )MSR_OP_WRITE ) {
466
+ msr_write (msrop -> msr_id , msrop -> value );
467
+ }
468
+ }
469
+ }
470
+ } else {
471
+ /* Mask the interrupt */
472
+ lvt_perf_ctr |= LVT_PERFCTR_BIT_MASK ;
473
+ msr_write (MSR_IA32_EXT_APIC_LVT_PMI , lvt_perf_ctr );
474
+ }
330
475
}
331
476
332
477
/*
0 commit comments