Skip to content

Commit 898b9c8

Browse files
mchinthwenlingz
authored andcommitted
HV:Added support to configure PMI and VM switch info
This patch provides interface to configure(setup before actual collection) PMI and VM switch tracing information. profiling_config_pmi: Receives required information for configuring PMI from guest, populates the information info per_cpu region and SMP calls profiling_initilaize_pmi profiling_initialize_pmi: Configure the PMU's for sep/socwatch profiling. Initial write of PMU registers. Walk through the entries and write the value of the register accordingly. profiling_config_vmsw: Receives required information for configuring VMswitch from guest, Configure for VM-switch data on all cpus profiling_initialize_vmsw: initializes VMSwitch tracing Tracked-On: #1409 Acked-by: Eddie Dong <eddie.dong@intel.com> Signed-off-by: Manisha <manisha.chinthapally@intel.com>
1 parent df54909 commit 898b9c8

File tree

2 files changed

+250
-13
lines changed

2 files changed

+250
-13
lines changed

hypervisor/debug/profiling.c

Lines changed: 174 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <hypervisor.h>
99

1010
#define ACRN_DBG_PROFILING 5U
11+
#define ACRN_ERR_PROFILING 3U
1112

1213
#define MAJOR_VERSION 1
1314
#define MINOR_VERSION 0
@@ -20,15 +21,57 @@ static uint32_t profiling_pmi_irq = IRQ_INVALID;
2021

2122
static void profiling_initialize_vmsw(void)
2223
{
23-
/* to be implemented */
24+
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
25+
__func__, get_cpu_id());
26+
27+
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
28+
__func__, get_cpu_id());
2429
}
2530

2631
/*
2732
* Configure the PMU's for sep/socwatch profiling.
33+
* Initial write of PMU registers.
34+
* Walk through the entries and write the value of the register accordingly.
35+
* Note: current_group is always set to 0, only 1 group is supported.
2836
*/
2937
static void profiling_initialize_pmi(void)
3038
{
31-
/* to be implemented */
39+
uint32_t i, group_id;
40+
struct profiling_msr_op *msrop = NULL;
41+
struct sep_state *ss = &get_cpu_var(profiling_info.sep_state);
42+
43+
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
44+
__func__, get_cpu_id());
45+
46+
if (ss == NULL) {
47+
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
48+
__func__, get_cpu_id());
49+
return;
50+
}
51+
52+
group_id = ss->current_pmi_group_id = 0U;
53+
for (i = 0U; i < MAX_MSR_LIST_NUM; i++) {
54+
msrop = &(ss->pmi_initial_msr_list[group_id][i]);
55+
if (msrop != NULL) {
56+
if (msrop->msr_id == (uint32_t)-1) {
57+
break;
58+
}
59+
if (msrop->msr_id == MSR_IA32_DEBUGCTL) {
60+
ss->guest_debugctl_value = msrop->value;
61+
}
62+
if (msrop->msr_op_type == (uint8_t)MSR_OP_WRITE) {
63+
msr_write(msrop->msr_id, msrop->value);
64+
dev_dbg(ACRN_DBG_PROFILING,
65+
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%llx",
66+
__func__, get_cpu_id(), msrop->msr_id, msrop->value);
67+
}
68+
}
69+
}
70+
71+
ss->pmu_state = PMU_SETUP;
72+
73+
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
74+
__func__, get_cpu_id());
3275
}
3376

3477
/*
@@ -225,23 +268,142 @@ int32_t profiling_set_control(__unused struct vm *vm, __unused uint64_t addr)
225268
/*
226269
* Configure PMI on all cpus
227270
*/
228-
int32_t profiling_configure_pmi(__unused struct vm *vm, __unused uint64_t addr)
271+
int32_t profiling_configure_pmi(struct vm *vm, uint64_t addr)
229272
{
230-
/* to be implemented
231-
* call to smp_call_function profiling_ipi_handler
232-
*/
273+
uint16_t i;
274+
struct profiling_pmi_config pmi_config;
275+
276+
(void)memset((void *)&pmi_config, 0U, sizeof(pmi_config));
277+
278+
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
279+
280+
if (copy_from_gpa(vm, &pmi_config, addr, sizeof(pmi_config)) != 0) {
281+
pr_err("%s: Unable to copy addr from vm\n", __func__);
282+
return -EINVAL;
283+
}
284+
285+
for (i = 0U; i < phys_cpu_num; i++) {
286+
if (!((per_cpu(profiling_info.sep_state, i).pmu_state ==
287+
PMU_INITIALIZED) ||
288+
(per_cpu(profiling_info.sep_state, i).pmu_state ==
289+
PMU_SETUP))) {
290+
pr_err("%s: invalid pmu_state %u on cpu%d",
291+
__func__, per_cpu(profiling_info.sep_state, i).pmu_state, i);
292+
return -EINVAL;
293+
}
294+
}
295+
296+
if (pmi_config.num_groups == 0U ||
297+
pmi_config.num_groups > MAX_GROUP_NUM) {
298+
pr_err("%s: invalid num_groups %u",
299+
__func__, pmi_config.num_groups);
300+
return -EINVAL;
301+
}
302+
303+
for (i = 0U; i < phys_cpu_num; i++) {
304+
per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_CONFIG;
305+
per_cpu(profiling_info.sep_state, i).num_pmi_groups
306+
= pmi_config.num_groups;
307+
308+
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_initial_msr_list,
309+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
310+
(void *)pmi_config.initial_list,
311+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
312+
313+
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_start_msr_list,
314+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
315+
(void *)pmi_config.start_list,
316+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
317+
318+
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_stop_msr_list,
319+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
320+
(void *)pmi_config.stop_list,
321+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
322+
323+
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_entry_msr_list,
324+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
325+
(void *)pmi_config.entry_list,
326+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
327+
328+
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_exit_msr_list,
329+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
330+
(void *)pmi_config.exit_list,
331+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
332+
}
333+
334+
smp_call_function(pcpu_active_bitmap, profiling_ipi_handler, NULL);
335+
336+
if (copy_to_gpa(vm, &pmi_config, addr, sizeof(pmi_config)) != 0) {
337+
pr_err("%s: Unable to copy addr to vm\n", __func__);
338+
return -EINVAL;
339+
}
340+
341+
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
233342
return 0;
234343
}
235344

236345
/*
237346
* Configure for VM-switch data on all cpus
238347
*/
239-
int32_t profiling_configure_vmsw(__unused struct vm *vm, __unused uint64_t addr)
348+
int32_t profiling_configure_vmsw(struct vm *vm, uint64_t addr)
240349
{
241-
/* to be implemented
242-
* call to smp_call_function profiling_ipi_handler
243-
*/
244-
return 0;
350+
uint16_t i;
351+
int32_t ret = 0;
352+
struct profiling_vmsw_config vmsw_config;
353+
354+
(void)memset((void *)&vmsw_config, 0U, sizeof(vmsw_config));
355+
356+
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
357+
358+
if (copy_from_gpa(vm, &vmsw_config, addr, sizeof(vmsw_config)) != 0) {
359+
pr_err("%s: Unable to copy addr from vm\n", __func__);
360+
return -EINVAL;
361+
}
362+
363+
switch (vmsw_config.collector_id) {
364+
case COLLECT_PROFILE_DATA:
365+
for (i = 0U; i < phys_cpu_num; i++) {
366+
per_cpu(profiling_info.ipi_cmd, i) = IPI_VMSW_CONFIG;
367+
368+
(void)memcpy_s(
369+
(void *)per_cpu(profiling_info.sep_state, i).vmsw_initial_msr_list,
370+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM,
371+
(void *)vmsw_config.initial_list,
372+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM);
373+
374+
(void)memcpy_s(
375+
(void *)per_cpu(profiling_info.sep_state, i).vmsw_entry_msr_list,
376+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM,
377+
(void *)vmsw_config.entry_list,
378+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM);
379+
380+
(void)memcpy_s(
381+
(void *)per_cpu(profiling_info.sep_state, i).vmsw_exit_msr_list,
382+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM,
383+
(void *)vmsw_config.exit_list,
384+
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM);
385+
}
386+
387+
smp_call_function(pcpu_active_bitmap, profiling_ipi_handler, NULL);
388+
389+
break;
390+
case COLLECT_POWER_DATA:
391+
break;
392+
default:
393+
pr_err("%s: unknown collector %d",
394+
__func__, vmsw_config.collector_id);
395+
ret = -EINVAL;
396+
break;
397+
}
398+
399+
if (copy_to_gpa(vm, &vmsw_config, addr, sizeof(vmsw_config)) != 0) {
400+
pr_err("%s: Unable to copy addr to vm\n", __func__);
401+
return -EINVAL;
402+
}
403+
404+
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
405+
406+
return ret;
245407
}
246408

247409
/*
@@ -349,4 +511,4 @@ void profiling_setup(void)
349511
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
350512
}
351513

352-
#endif
514+
#endif

hypervisor/include/debug/profiling_internal.h

Lines changed: 76 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,19 @@
1111

1212
#define MAX_NR_VCPUS 8
1313
#define MAX_NR_VMS 6
14+
#define MAX_MSR_LIST_NUM 15U
15+
#define MAX_GROUP_NUM 1U
1416

1517
#define COLLECT_PROFILE_DATA 0
1618
#define COLLECT_POWER_DATA 1
1719

20+
enum MSR_CMD_TYPE {
21+
MSR_OP_NONE = 0,
22+
MSR_OP_READ,
23+
MSR_OP_WRITE,
24+
MSR_OP_READ_CLEAR
25+
};
26+
1827
typedef enum IPI_COMMANDS {
1928
IPI_MSR_OP = 0,
2029
IPI_PMU_CONFIG,
@@ -24,6 +33,14 @@ typedef enum IPI_COMMANDS {
2433
IPI_UNKNOWN,
2534
} ipi_commands;
2635

36+
typedef enum SEP_PMU_STATE {
37+
PMU_INITIALIZED = 0,
38+
PMU_SETUP,
39+
PMU_RUNNING,
40+
PMU_UNINITIALIZED,
41+
PMU_UNKNOWN
42+
} sep_pmu_state;
43+
2744
typedef enum PROFILING_SEP_FEATURE {
2845
CORE_PMU_SAMPLING = 0,
2946
CORE_PMU_COUNTING,
@@ -75,12 +92,70 @@ struct profiling_vm_info_list {
7592
struct profiling_vm_info vm_list[MAX_NR_VMS];
7693
};
7794

95+
struct profiling_msr_op {
96+
/* value to write or location to write into */
97+
uint64_t value;
98+
/* MSR address to read/write; last entry will have value of -1 */
99+
uint32_t msr_id;
100+
/* parameter; usage depends on operation */
101+
uint16_t param;
102+
uint8_t msr_op_type;
103+
uint8_t reg_type;
104+
};
105+
106+
struct profiling_pmi_config {
107+
uint32_t num_groups;
108+
uint32_t trigger_count;
109+
struct profiling_msr_op initial_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
110+
struct profiling_msr_op start_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
111+
struct profiling_msr_op stop_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
112+
struct profiling_msr_op entry_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
113+
struct profiling_msr_op exit_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
114+
};
115+
116+
struct profiling_vmsw_config {
117+
int32_t collector_id;
118+
struct profiling_msr_op initial_list[MAX_MSR_LIST_NUM];
119+
struct profiling_msr_op entry_list[MAX_MSR_LIST_NUM];
120+
struct profiling_msr_op exit_list[MAX_MSR_LIST_NUM];
121+
};
122+
123+
struct sep_state {
124+
sep_pmu_state pmu_state;
125+
126+
uint32_t current_pmi_group_id;
127+
uint32_t num_pmi_groups;
128+
129+
struct profiling_msr_op
130+
pmi_initial_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
131+
struct profiling_msr_op
132+
pmi_start_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
133+
struct profiling_msr_op
134+
pmi_stop_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
135+
struct profiling_msr_op
136+
pmi_entry_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
137+
struct profiling_msr_op
138+
pmi_exit_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
139+
140+
uint32_t current_vmsw_group_id;
141+
uint32_t num_msw_groups;
142+
struct profiling_msr_op
143+
vmsw_initial_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
144+
struct profiling_msr_op
145+
vmsw_entry_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
146+
struct profiling_msr_op
147+
vmsw_exit_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
148+
149+
uint64_t guest_debugctl_value;
150+
} __aligned(8);
151+
78152
/*
79153
* Wrapper containing SEP sampling/profiling related data structures
80154
*/
81155
struct profiling_info_wrapper {
156+
struct sep_state sep_state;
82157
ipi_commands ipi_cmd;
83-
};
158+
} __aligned(8);
84159

85160
int32_t profiling_get_version_info(struct vm *vm, uint64_t addr);
86161
int32_t profiling_get_pcpu_id(struct vm *vm, uint64_t addr);

0 commit comments

Comments
 (0)