8
8
#include <hypervisor.h>
9
9
10
10
#define ACRN_DBG_PROFILING 5U
11
+ #define ACRN_ERR_PROFILING 3U
11
12
12
13
#define MAJOR_VERSION 1
13
14
#define MINOR_VERSION 0
@@ -20,15 +21,57 @@ static uint32_t profiling_pmi_irq = IRQ_INVALID;
20
21
21
22
static void profiling_initialize_vmsw (void )
22
23
{
23
- /* to be implemented */
24
+ dev_dbg (ACRN_DBG_PROFILING , "%s: entering cpu%d" ,
25
+ __func__ , get_cpu_id ());
26
+
27
+ dev_dbg (ACRN_DBG_PROFILING , "%s: exiting cpu%d" ,
28
+ __func__ , get_cpu_id ());
24
29
}
25
30
26
31
/*
27
32
* Configure the PMU's for sep/socwatch profiling.
33
+ * Initial write of PMU registers.
34
+ * Walk through the entries and write the value of the register accordingly.
35
+ * Note: current_group is always set to 0, only 1 group is supported.
28
36
*/
29
37
static void profiling_initialize_pmi (void )
30
38
{
31
- /* to be implemented */
39
+ uint32_t i , group_id ;
40
+ struct profiling_msr_op * msrop = NULL ;
41
+ struct sep_state * ss = & get_cpu_var (profiling_info .sep_state );
42
+
43
+ dev_dbg (ACRN_DBG_PROFILING , "%s: entering cpu%d" ,
44
+ __func__ , get_cpu_id ());
45
+
46
+ if (ss == NULL ) {
47
+ dev_dbg (ACRN_ERR_PROFILING , "%s: exiting cpu%d" ,
48
+ __func__ , get_cpu_id ());
49
+ return ;
50
+ }
51
+
52
+ group_id = ss -> current_pmi_group_id = 0U ;
53
+ for (i = 0U ; i < MAX_MSR_LIST_NUM ; i ++ ) {
54
+ msrop = & (ss -> pmi_initial_msr_list [group_id ][i ]);
55
+ if (msrop != NULL ) {
56
+ if (msrop -> msr_id == (uint32_t )-1 ) {
57
+ break ;
58
+ }
59
+ if (msrop -> msr_id == MSR_IA32_DEBUGCTL ) {
60
+ ss -> guest_debugctl_value = msrop -> value ;
61
+ }
62
+ if (msrop -> msr_op_type == (uint8_t )MSR_OP_WRITE ) {
63
+ msr_write (msrop -> msr_id , msrop -> value );
64
+ dev_dbg (ACRN_DBG_PROFILING ,
65
+ "%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%llx" ,
66
+ __func__ , get_cpu_id (), msrop -> msr_id , msrop -> value );
67
+ }
68
+ }
69
+ }
70
+
71
+ ss -> pmu_state = PMU_SETUP ;
72
+
73
+ dev_dbg (ACRN_DBG_PROFILING , "%s: exiting cpu%d" ,
74
+ __func__ , get_cpu_id ());
32
75
}
33
76
34
77
/*
@@ -225,23 +268,142 @@ int32_t profiling_set_control(__unused struct vm *vm, __unused uint64_t addr)
225
268
/*
226
269
* Configure PMI on all cpus
227
270
*/
228
- int32_t profiling_configure_pmi (__unused struct vm * vm , __unused uint64_t addr )
271
+ int32_t profiling_configure_pmi (struct vm * vm , uint64_t addr )
229
272
{
230
- /* to be implemented
231
- * call to smp_call_function profiling_ipi_handler
232
- */
273
+ uint16_t i ;
274
+ struct profiling_pmi_config pmi_config ;
275
+
276
+ (void )memset ((void * )& pmi_config , 0U , sizeof (pmi_config ));
277
+
278
+ dev_dbg (ACRN_DBG_PROFILING , "%s: entering" , __func__ );
279
+
280
+ if (copy_from_gpa (vm , & pmi_config , addr , sizeof (pmi_config )) != 0 ) {
281
+ pr_err ("%s: Unable to copy addr from vm\n" , __func__ );
282
+ return - EINVAL ;
283
+ }
284
+
285
+ for (i = 0U ; i < phys_cpu_num ; i ++ ) {
286
+ if (!((per_cpu (profiling_info .sep_state , i ).pmu_state ==
287
+ PMU_INITIALIZED ) ||
288
+ (per_cpu (profiling_info .sep_state , i ).pmu_state ==
289
+ PMU_SETUP ))) {
290
+ pr_err ("%s: invalid pmu_state %u on cpu%d" ,
291
+ __func__ , per_cpu (profiling_info .sep_state , i ).pmu_state , i );
292
+ return - EINVAL ;
293
+ }
294
+ }
295
+
296
+ if (pmi_config .num_groups == 0U ||
297
+ pmi_config .num_groups > MAX_GROUP_NUM ) {
298
+ pr_err ("%s: invalid num_groups %u" ,
299
+ __func__ , pmi_config .num_groups );
300
+ return - EINVAL ;
301
+ }
302
+
303
+ for (i = 0U ; i < phys_cpu_num ; i ++ ) {
304
+ per_cpu (profiling_info .ipi_cmd , i ) = IPI_PMU_CONFIG ;
305
+ per_cpu (profiling_info .sep_state , i ).num_pmi_groups
306
+ = pmi_config .num_groups ;
307
+
308
+ (void )memcpy_s ((void * )per_cpu (profiling_info .sep_state , i ).pmi_initial_msr_list ,
309
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM ,
310
+ (void * )pmi_config .initial_list ,
311
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM );
312
+
313
+ (void )memcpy_s ((void * )per_cpu (profiling_info .sep_state , i ).pmi_start_msr_list ,
314
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM ,
315
+ (void * )pmi_config .start_list ,
316
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM );
317
+
318
+ (void )memcpy_s ((void * )per_cpu (profiling_info .sep_state , i ).pmi_stop_msr_list ,
319
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM ,
320
+ (void * )pmi_config .stop_list ,
321
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM );
322
+
323
+ (void )memcpy_s ((void * )per_cpu (profiling_info .sep_state , i ).pmi_entry_msr_list ,
324
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM ,
325
+ (void * )pmi_config .entry_list ,
326
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM );
327
+
328
+ (void )memcpy_s ((void * )per_cpu (profiling_info .sep_state , i ).pmi_exit_msr_list ,
329
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM ,
330
+ (void * )pmi_config .exit_list ,
331
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM * MAX_GROUP_NUM );
332
+ }
333
+
334
+ smp_call_function (pcpu_active_bitmap , profiling_ipi_handler , NULL );
335
+
336
+ if (copy_to_gpa (vm , & pmi_config , addr , sizeof (pmi_config )) != 0 ) {
337
+ pr_err ("%s: Unable to copy addr to vm\n" , __func__ );
338
+ return - EINVAL ;
339
+ }
340
+
341
+ dev_dbg (ACRN_DBG_PROFILING , "%s: exiting" , __func__ );
233
342
return 0 ;
234
343
}
235
344
236
345
/*
237
346
* Configure for VM-switch data on all cpus
238
347
*/
239
- int32_t profiling_configure_vmsw (__unused struct vm * vm , __unused uint64_t addr )
348
+ int32_t profiling_configure_vmsw (struct vm * vm , uint64_t addr )
240
349
{
241
- /* to be implemented
242
- * call to smp_call_function profiling_ipi_handler
243
- */
244
- return 0 ;
350
+ uint16_t i ;
351
+ int32_t ret = 0 ;
352
+ struct profiling_vmsw_config vmsw_config ;
353
+
354
+ (void )memset ((void * )& vmsw_config , 0U , sizeof (vmsw_config ));
355
+
356
+ dev_dbg (ACRN_DBG_PROFILING , "%s: entering" , __func__ );
357
+
358
+ if (copy_from_gpa (vm , & vmsw_config , addr , sizeof (vmsw_config )) != 0 ) {
359
+ pr_err ("%s: Unable to copy addr from vm\n" , __func__ );
360
+ return - EINVAL ;
361
+ }
362
+
363
+ switch (vmsw_config .collector_id ) {
364
+ case COLLECT_PROFILE_DATA :
365
+ for (i = 0U ; i < phys_cpu_num ; i ++ ) {
366
+ per_cpu (profiling_info .ipi_cmd , i ) = IPI_VMSW_CONFIG ;
367
+
368
+ (void )memcpy_s (
369
+ (void * )per_cpu (profiling_info .sep_state , i ).vmsw_initial_msr_list ,
370
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM ,
371
+ (void * )vmsw_config .initial_list ,
372
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM );
373
+
374
+ (void )memcpy_s (
375
+ (void * )per_cpu (profiling_info .sep_state , i ).vmsw_entry_msr_list ,
376
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM ,
377
+ (void * )vmsw_config .entry_list ,
378
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM );
379
+
380
+ (void )memcpy_s (
381
+ (void * )per_cpu (profiling_info .sep_state , i ).vmsw_exit_msr_list ,
382
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM ,
383
+ (void * )vmsw_config .exit_list ,
384
+ sizeof (struct profiling_msr_op )* MAX_MSR_LIST_NUM );
385
+ }
386
+
387
+ smp_call_function (pcpu_active_bitmap , profiling_ipi_handler , NULL );
388
+
389
+ break ;
390
+ case COLLECT_POWER_DATA :
391
+ break ;
392
+ default :
393
+ pr_err ("%s: unknown collector %d" ,
394
+ __func__ , vmsw_config .collector_id );
395
+ ret = - EINVAL ;
396
+ break ;
397
+ }
398
+
399
+ if (copy_to_gpa (vm , & vmsw_config , addr , sizeof (vmsw_config )) != 0 ) {
400
+ pr_err ("%s: Unable to copy addr to vm\n" , __func__ );
401
+ return - EINVAL ;
402
+ }
403
+
404
+ dev_dbg (ACRN_DBG_PROFILING , "%s: exiting" , __func__ );
405
+
406
+ return ret ;
245
407
}
246
408
247
409
/*
@@ -349,4 +511,4 @@ void profiling_setup(void)
349
511
dev_dbg (ACRN_DBG_PROFILING , "%s: exiting" , __func__ );
350
512
}
351
513
352
- #endif
514
+ #endif
0 commit comments