Skip to content

Commit 4c1cb60

Browse files
fyin1lijinxia
authored andcommitted
hv: Remove the up_count_spinlock and use atomic for up_count
It's possible that the up_count_spinlock is not release during system enter S3. The case is like following: BSP AP stop_cpus cpu_dead cpu_set_current_state spinlock_abtain up_count-- wait_for up_count == 1 enter S3 spinlock_release Especially, considering the real spinlock release could be delayed by cache. Actually, the most content protected by up_count_spinlock is per cpu data and could be changed without lock. Only left is up_count. This patchset remove the up_count_spinlock and use atomic API for up_count changing. Tracked-On: #1691 Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Reviewed-by: Eddie Dong <eddie.dong@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com>
1 parent b747206 commit 4c1cb60

File tree

2 files changed

+9
-16
lines changed

2 files changed

+9
-16
lines changed

hypervisor/arch/x86/cpu.c

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -14,15 +14,10 @@ spinlock_t trampoline_spinlock = {
1414
.tail = 0U
1515
};
1616

17-
static spinlock_t up_count_spinlock = {
18-
.head = 0U,
19-
.tail = 0U
20-
};
21-
2217
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(CPU_PAGE_SIZE);
2318
uint16_t phys_cpu_num = 0U;
2419
static uint64_t pcpu_sync = 0UL;
25-
static volatile uint16_t up_count = 0U;
20+
static uint16_t up_count = 0U;
2621
static uint64_t startup_paddr = 0UL;
2722

2823
/* physical cpu active bitmap, support up to 64 cpus */
@@ -309,26 +304,22 @@ static void init_percpu_lapic_id(void)
309304

310305
static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
311306
{
312-
spinlock_obtain(&up_count_spinlock);
313-
314307
/* Check if state is initializing */
315308
if (state == PCPU_STATE_INITIALIZING) {
316309
/* Increment CPU up count */
317-
up_count++;
310+
atomic_inc16(&up_count);
318311

319312
/* Save this CPU's logical ID to the TSC AUX MSR */
320313
set_current_cpu_id(pcpu_id);
321314
}
322315

323316
/* If cpu is dead, decrement CPU up count */
324317
if (state == PCPU_STATE_DEAD) {
325-
up_count--;
318+
atomic_dec16(&up_count);
326319
}
327320

328321
/* Set state for the specified CPU */
329322
per_cpu(boot_state, pcpu_id) = state;
330-
331-
spinlock_release(&up_count_spinlock);
332323
}
333324

334325
#ifdef STACK_PROTECTOR
@@ -645,7 +636,7 @@ void start_cpus(void)
645636
* configured time-out has expired
646637
*/
647638
timeout = CONFIG_CPU_UP_TIMEOUT * 1000U;
648-
while ((up_count != expected_up) && (timeout != 0U)) {
639+
while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) {
649640
/* Delay 10us */
650641
udelay(10U);
651642

@@ -654,7 +645,7 @@ void start_cpus(void)
654645
}
655646

656647
/* Check to see if all expected CPUs are actually up */
657-
if (up_count != expected_up) {
648+
if (atomic_load16(&up_count) != expected_up) {
658649
/* Print error */
659650
pr_fatal("Secondary CPUs failed to come up");
660651

@@ -682,15 +673,15 @@ void stop_cpus(void)
682673
}
683674

684675
expected_up = 1U;
685-
while ((up_count != expected_up) && (timeout != 0U)) {
676+
while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) {
686677
/* Delay 10us */
687678
udelay(10U);
688679

689680
/* Decrement timeout value */
690681
timeout -= 10U;
691682
}
692683

693-
if (up_count != expected_up) {
684+
if (atomic_load16(&up_count) != expected_up) {
694685
pr_fatal("Can't make all APs offline");
695686

696687
/* if partial APs is down, it's not easy to recover

hypervisor/include/lib/atomic.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ static inline type name(const volatile type *ptr) \
4141
: "cc", "memory"); \
4242
return ret; \
4343
}
44+
build_atomic_load(atomic_load16, "w", uint16_t)
4445
build_atomic_load(atomic_load32, "l", uint32_t)
4546
build_atomic_load(atomic_load64, "q", uint64_t)
4647

@@ -63,6 +64,7 @@ static inline void name(type *ptr) \
6364
: "=m" (*ptr) \
6465
: "m" (*ptr)); \
6566
}
67+
build_atomic_inc(atomic_inc16, "w", uint16_t)
6668
build_atomic_inc(atomic_inc32, "l", uint32_t)
6769
build_atomic_inc(atomic_inc64, "q", uint64_t)
6870

0 commit comments

Comments
 (0)