Skip to content

Commit 682824d

Browse files
mingqiangchiwenlingz
authored andcommitted
hv:Change phys_cpu_num to static
-- change phys_cpu_num to static -- add get_pcpu_nums() and is_pcpu_active() APIs -- replace phys_cpu_num with get_pcpu_nums() except cpu.c Tracked-On: #1842 Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com>
1 parent 59e2de4 commit 682824d

File tree

16 files changed

+57
-35
lines changed

16 files changed

+57
-35
lines changed

hypervisor/arch/x86/cpu.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
#include <security.h>
1515

1616
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE);
17-
uint16_t phys_cpu_num = 0U;
17+
static uint16_t phys_cpu_num = 0U;
1818
static uint64_t pcpu_sync = 0UL;
1919
static uint16_t up_count = 0U;
2020
static uint64_t startup_paddr = 0UL;
@@ -68,6 +68,15 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
6868
per_cpu(boot_state, pcpu_id) = state;
6969
}
7070

71+
uint16_t get_pcpu_nums(void)
72+
{
73+
return phys_cpu_num;
74+
}
75+
76+
bool is_pcpu_active(uint16_t pcpu_id)
77+
{
78+
return bitmap_test(pcpu_id, &pcpu_active_bitmap);
79+
}
7180
void init_cpu_pre(uint16_t pcpu_id_args)
7281
{
7382
uint16_t pcpu_id = pcpu_id_args;
@@ -219,7 +228,7 @@ static void start_cpu(uint16_t pcpu_id)
219228
* configured time-out has expired
220229
*/
221230
timeout = (uint32_t)CONFIG_CPU_UP_TIMEOUT * 1000U;
222-
while ((bitmap_test(pcpu_id, &pcpu_active_bitmap) == false) && (timeout != 0U)) {
231+
while (!is_pcpu_active(pcpu_id) && (timeout != 0U)) {
223232
/* Delay 10us */
224233
udelay(10U);
225234

@@ -228,7 +237,7 @@ static void start_cpu(uint16_t pcpu_id)
228237
}
229238

230239
/* Check to see if expected CPU is actually up */
231-
if (bitmap_test(pcpu_id, &pcpu_active_bitmap) == false) {
240+
if (!is_pcpu_active(pcpu_id)) {
232241
/* Print error */
233242
pr_fatal("Secondary CPUs failed to come up");
234243

hypervisor/arch/x86/cpu_caps.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -405,8 +405,8 @@ int32_t detect_hardware_support(void)
405405
} else if (is_vmx_disabled()) {
406406
pr_fatal("%s, VMX can not be enabled\n", __func__);
407407
ret = -ENODEV;
408-
} else if (phys_cpu_num > CONFIG_MAX_PCPU_NUM) {
409-
pr_fatal("%s, pcpu number(%d) is out of range\n", __func__, phys_cpu_num);
408+
} else if (get_pcpu_nums() > CONFIG_MAX_PCPU_NUM) {
409+
pr_fatal("%s, pcpu number(%d) is out of range\n", __func__, get_pcpu_nums());
410410
ret = -ENODEV;
411411
} else {
412412
ret = check_vmx_mmu_cap();

hypervisor/arch/x86/guest/vlapic.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ static uint16_t vm_apicid2vcpu_id(struct acrn_vm *vm, uint8_t lapicid)
125125

126126
pr_err("%s: bad lapicid %hhu", __func__, lapicid);
127127

128-
return phys_cpu_num;
128+
return get_pcpu_nums();
129129
}
130130

131131
static uint64_t
@@ -1713,13 +1713,11 @@ vlapic_reset(struct acrn_vlapic *vlapic)
17131713

17141714
/**
17151715
* @pre vlapic->vm != NULL
1716+
* @pre vlapic->vcpu->vcpu_id < CONFIG_MAX_VCPUS_PER_VM
17161717
*/
17171718
void
17181719
vlapic_init(struct acrn_vlapic *vlapic)
17191720
{
1720-
ASSERT(vlapic->vcpu->vcpu_id < phys_cpu_num,
1721-
"%s: vcpu_id is not initialized", __func__);
1722-
17231721
vlapic_init_timer(vlapic);
17241722

17251723
vlapic_reset(vlapic);

hypervisor/arch/x86/guest/vm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@ static int32_t prepare_vm0(void)
398398
struct vm_description vm0_desc;
399399

400400
(void)memset((void *)&vm0_desc, 0U, sizeof(vm0_desc));
401-
vm0_desc.vm_hw_num_cores = phys_cpu_num;
401+
vm0_desc.vm_hw_num_cores = get_pcpu_nums();
402402

403403
err = create_vm(&vm0_desc, &vm);
404404

hypervisor/arch/x86/lapic.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ void send_dest_ipi_mask(uint32_t dest_mask, uint32_t vector)
256256

257257
while (pcpu_id != INVALID_BIT_INDEX) {
258258
bitmap32_clear_nolock(pcpu_id, &mask);
259-
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
259+
if (is_pcpu_active(pcpu_id)) {
260260
icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id);
261261
msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
262262
} else {
@@ -270,7 +270,7 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector)
270270
{
271271
union apic_icr icr;
272272

273-
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
273+
if (is_pcpu_active(pcpu_id)) {
274274
/* Set the destination field to the target processor. */
275275
icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id);
276276

hypervisor/arch/x86/notify.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
3939
pcpu_id = ffs64(mask);
4040
while (pcpu_id != INVALID_BIT_INDEX) {
4141
bitmap_clear_nolock(pcpu_id, &mask);
42-
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
42+
if (is_pcpu_active(pcpu_id)) {
4343
smp_call = &per_cpu(smp_call_info, pcpu_id);
4444
smp_call->func = func;
4545
smp_call->data = data;

hypervisor/common/schedule.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,9 @@ void init_scheduler(void)
1414
{
1515
struct sched_context *ctx;
1616
uint32_t i;
17+
uint16_t pcpu_nums = get_pcpu_nums();
1718

18-
for (i = 0U; i < phys_cpu_num; i++) {
19+
for (i = 0U; i < pcpu_nums; i++) {
1920
ctx = &per_cpu(sched_ctx, i);
2021

2122
spinlock_init(&ctx->runqueue_lock);
@@ -42,8 +43,9 @@ uint16_t allocate_pcpu(void)
4243
{
4344
uint16_t i;
4445
uint16_t ret = INVALID_CPU_ID;
46+
uint16_t pcpu_nums = get_pcpu_nums();
4547

46-
for (i = 0U; i < phys_cpu_num; i++) {
48+
for (i = 0U; i < pcpu_nums; i++) {
4749
if (bitmap_test_and_set_lock(i, &pcpu_used_bitmap) == 0) {
4850
ret = i;
4951
break;

hypervisor/debug/hypercall.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ static int32_t hcall_get_hw_info(struct acrn_vm *vm, uint64_t param)
141141

142142
(void)memset((void *)&hw_info, 0U, sizeof(hw_info));
143143

144-
hw_info.cpu_num = phys_cpu_num;
144+
hw_info.cpu_num = get_pcpu_nums();
145145
ret = copy_to_gpa(vm, &hw_info, param, sizeof(hw_info));
146146
if (ret != 0) {
147147
pr_err("%s: Unable to copy param to vm", __func__);

hypervisor/debug/npk_log.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ static inline int32_t npk_write(const char *value, void *addr, size_t sz)
6666
void npk_log_setup(struct hv_npk_log_param *param)
6767
{
6868
uint16_t i;
69+
uint16_t pcpu_nums;
6970

7071
pr_info("HV_NPK_LOG: cmd %d param 0x%llx\n", param->cmd,
7172
param->mmio_addr);
@@ -90,7 +91,8 @@ void npk_log_setup(struct hv_npk_log_param *param)
9091
}
9192
if ((base != 0UL) && (param->cmd == HV_NPK_LOG_CMD_ENABLE)) {
9293
if (!npk_log_enabled) {
93-
for (i = 0U; i < phys_cpu_num; i++) {
94+
pcpu_nums = get_pcpu_nums();
95+
for (i = 0U; i < pcpu_nums; i++) {
9496
per_cpu(npk_log_ref, i) = 0U;
9597
}
9698
}

hypervisor/debug/profiling.c

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -718,22 +718,23 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data)
718718
static void profiling_start_pmu(void)
719719
{
720720
uint16_t i;
721+
uint16_t pcpu_nums = get_pcpu_nums();
721722

722723
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
723724

724725
if (in_pmu_profiling) {
725726
return;
726727
}
727728

728-
for (i = 0U; i < phys_cpu_num; i++) {
729+
for (i = 0U; i < pcpu_nums; i++) {
729730
if (per_cpu(profiling_info.sep_state, i).pmu_state != PMU_SETUP) {
730731
pr_err("%s: invalid pmu_state %u on cpu%d",
731732
__func__, get_cpu_var(profiling_info.sep_state).pmu_state, i);
732733
return;
733734
}
734735
}
735736

736-
for (i = 0U; i < phys_cpu_num; i++) {
737+
for (i = 0U; i < pcpu_nums; i++) {
737738
per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_START;
738739
per_cpu(profiling_info.sep_state, i).samples_logged = 0U;
739740
per_cpu(profiling_info.sep_state, i).samples_dropped = 0U;
@@ -759,11 +760,12 @@ static void profiling_start_pmu(void)
759760
static void profiling_stop_pmu(void)
760761
{
761762
uint16_t i;
763+
uint16_t pcpu_nums = get_pcpu_nums();
762764

763765
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
764766

765767
if (in_pmu_profiling) {
766-
for (i = 0U; i < phys_cpu_num; i++) {
768+
for (i = 0U; i < pcpu_nums; i++) {
767769
per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_STOP;
768770
if (per_cpu(profiling_info.sep_state, i).pmu_state == PMU_RUNNING) {
769771
per_cpu(profiling_info.sep_state, i).pmu_state = PMU_SETUP;
@@ -812,7 +814,8 @@ static void profiling_stop_pmu(void)
812814
int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr)
813815
{
814816
uint16_t i;
815-
struct profiling_msr_ops_list msr_list[phys_cpu_num];
817+
uint16_t pcpu_nums = get_pcpu_nums();
818+
struct profiling_msr_ops_list msr_list[pcpu_nums];
816819

817820
(void)memset((void *)&msr_list, 0U, sizeof(msr_list));
818821

@@ -823,7 +826,7 @@ int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr)
823826
return -EINVAL;
824827
}
825828

826-
for (i = 0U; i < phys_cpu_num; i++) {
829+
for (i = 0U; i < pcpu_nums; i++) {
827830
per_cpu(profiling_info.ipi_cmd, i) = IPI_MSR_OP;
828831
per_cpu(profiling_info.msr_node, i) = &(msr_list[i]);
829832
}
@@ -849,6 +852,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
849852
int32_t vm_idx;
850853
uint16_t i, j;
851854
struct profiling_vm_info_list vm_info_list;
855+
uint16_t pcpu_nums = get_pcpu_nums();
852856

853857
(void)memset((void *)&vm_info_list, 0U, sizeof(vm_info_list));
854858

@@ -862,7 +866,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
862866
vm_idx = 0;
863867
vm_info_list.vm_list[vm_idx].vm_id_num = -1;
864868
(void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].vm_name, 4U, "VMM\0", 4U);
865-
for (i = 0U; i < phys_cpu_num; i++) {
869+
for (i = 0U; i < pcpu_nums; i++) {
866870
vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = (int32_t)i;
867871
vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = (int32_t)i;
868872
vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id
@@ -985,7 +989,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
985989
uint64_t old_switch;
986990
uint64_t new_switch;
987991
uint16_t i;
988-
992+
uint16_t pcpu_nums = get_pcpu_nums();
989993
struct profiling_control prof_control;
990994

991995
(void)memset((void *)&prof_control, 0U, sizeof(prof_control));
@@ -1062,15 +1066,15 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
10621066
}
10631067
}
10641068
}
1065-
for (i = 0U; i < phys_cpu_num; i++) {
1069+
for (i = 0U; i < pcpu_nums ; i++) {
10661070
per_cpu(profiling_info.soc_state, i)
10671071
= SW_RUNNING;
10681072
}
10691073
} else { /* stop socwatch collection */
10701074
dev_dbg(ACRN_DBG_PROFILING,
10711075
"%s: socwatch stop collection invoked or collection switch not set!",
10721076
__func__);
1073-
for (i = 0U; i < phys_cpu_num; i++) {
1077+
for (i = 0U; i < pcpu_nums ; i++) {
10741078
per_cpu(profiling_info.soc_state, i)
10751079
= SW_STOPPED;
10761080
}
@@ -1099,6 +1103,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
10991103
{
11001104
uint16_t i;
11011105
struct profiling_pmi_config pmi_config;
1106+
uint16_t pcpu_nums = get_pcpu_nums();
11021107

11031108
(void)memset((void *)&pmi_config, 0U, sizeof(pmi_config));
11041109

@@ -1109,7 +1114,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
11091114
return -EINVAL;
11101115
}
11111116

1112-
for (i = 0U; i < phys_cpu_num; i++) {
1117+
for (i = 0U; i < pcpu_nums; i++) {
11131118
if (!((per_cpu(profiling_info.sep_state, i).pmu_state ==
11141119
PMU_INITIALIZED) ||
11151120
(per_cpu(profiling_info.sep_state, i).pmu_state ==
@@ -1127,7 +1132,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
11271132
return -EINVAL;
11281133
}
11291134

1130-
for (i = 0U; i < phys_cpu_num; i++) {
1135+
for (i = 0U; i < pcpu_nums; i++) {
11311136
per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_CONFIG;
11321137
per_cpu(profiling_info.sep_state, i).num_pmi_groups
11331138
= pmi_config.num_groups;
@@ -1177,6 +1182,7 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr)
11771182
uint16_t i;
11781183
int32_t ret = 0;
11791184
struct profiling_vmsw_config vmsw_config;
1185+
uint16_t pcpu_nums = get_pcpu_nums();
11801186

11811187
(void)memset((void *)&vmsw_config, 0U, sizeof(vmsw_config));
11821188

@@ -1189,7 +1195,7 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr)
11891195

11901196
switch (vmsw_config.collector_id) {
11911197
case COLLECT_PROFILE_DATA:
1192-
for (i = 0U; i < phys_cpu_num; i++) {
1198+
for (i = 0U; i < pcpu_nums; i++) {
11931199
per_cpu(profiling_info.ipi_cmd, i) = IPI_VMSW_CONFIG;
11941200

11951201
(void)memcpy_s(

0 commit comments

Comments
 (0)