Skip to content

Commit 4b03c97

Browse files
JasonChenCJlijinxia
authored andcommitted
add smp_call_function support
take use of VCPU_NOTIFY vector, add smp_call_function support. added a per_cpu field smp_call_info, and make each smp_call_function is not re-entered, and the caller CPU is returned when all the target CPUs complete the call. v4: - remove global lock - take use of wait_sync_change function to do the sequence sync v3: - remove per_cpu lock in smp_call_info - use a global lock to ensure smp_call_function sequence - use pcpu_sync_sleep to wait IPI complete v2: - after new smp function come, if old one exist, changed from overwirte with the new one to ignore the new one. Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
1 parent 8ef0721 commit 4b03c97

File tree

3 files changed

+48
-2
lines changed

3 files changed

+48
-2
lines changed

hypervisor/arch/x86/notify.c

Lines changed: 40 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,53 @@
88

99
static uint32_t notification_irq = IRQ_INVALID;
1010

11+
static volatile uint64_t smp_call_mask = 0UL;
12+
1113
/* run in interrupt context */
1214
static int kick_notification(__unused uint32_t irq, __unused void *data)
1315
{
14-
/* Notification vector does not require handling here, it's just used
15-
* to kick taget cpu out of non-root mode.
16+
/* Notification vector is used to kick taget cpu out of non-root mode.
17+
* And it also serves for smp call.
1618
*/
19+
uint16_t pcpu_id = get_cpu_id();
20+
21+
if (bitmap_test(pcpu_id, &smp_call_mask)) {
22+
struct smp_call_info_data *smp_call =
23+
&per_cpu(smp_call_info, pcpu_id);
24+
25+
if (smp_call->func)
26+
smp_call->func(smp_call->data);
27+
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
28+
}
29+
1730
return 0;
1831
}
1932

33+
void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
34+
{
35+
uint16_t pcpu_id;
36+
struct smp_call_info_data *smp_call;
37+
38+
/* wait for previous smp call complete, which may run on other cpus */
39+
while (atomic_cmpxchg64(&smp_call_mask, 0UL, mask & INVALID_BIT_INDEX));
40+
while ((pcpu_id = ffs64(mask)) != INVALID_BIT_INDEX) {
41+
bitmap_clear_nolock(pcpu_id, &mask);
42+
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
43+
smp_call = &per_cpu(smp_call_info, pcpu_id);
44+
smp_call->func = func;
45+
smp_call->data = data;
46+
} else {
47+
/* pcpu is not in active, print error */
48+
pr_err("pcpu_id %d not in active!", pcpu_id);
49+
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
50+
}
51+
}
52+
send_dest_ipi(smp_call_mask, VECTOR_NOTIFY_VCPU,
53+
INTR_LAPIC_ICR_LOGICAL);
54+
/* wait for current smp call complete */
55+
wait_sync_change(&smp_call_mask, 0UL);
56+
}
57+
2058
static int request_notification_irq(irq_action_t func, void *data,
2159
const char *name)
2260
{

hypervisor/include/arch/x86/irq.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,13 @@ struct intr_excp_ctx {
4747
uint64_t ss;
4848
};
4949

50+
typedef void (*smp_call_func_t)(void *data);
51+
struct smp_call_info_data {
52+
smp_call_func_t func;
53+
void *data;
54+
};
55+
56+
void smp_call_function(uint64_t mask, smp_call_func_t func, void *data);
5057
int handle_level_interrupt_common(struct irq_desc *desc,
5158
__unused void *handler_data);
5259
int common_handler_edge(struct irq_desc *desc, __unused void *handler_data);

hypervisor/include/arch/x86/per_cpu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ struct per_cpu_region {
4646
uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
4747
char logbuf[LOG_MESSAGE_MAX_SIZE];
4848
uint8_t lapic_id;
49+
struct smp_call_info_data smp_call_info;
4950
} __aligned(CPU_PAGE_SIZE); //per_cpu_region size aligned with CPU_PAGE_SIZE
5051

5152
extern struct per_cpu_region *per_cpu_data_base_ptr;

0 commit comments

Comments
 (0)