Skip to content

Commit 8a8b1a4

Browse files
mgcaojren1
authored andcommitted
remove timer spinlock
for timer list is operated by per-cpu; and no interrupt service operates it too. So it's unnecessary for spinlock. Signed-off-by: Minggui Cao <minggui.cao@intel.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 3648a0c commit 8a8b1a4

File tree

1 file changed

+0
-36
lines changed

1 file changed

+0
-36
lines changed

hypervisor/arch/x86/timer.c

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,6 @@ struct per_cpu_timers {
6565
struct timer *timers_pool; /* it's timers pool for allocation */
6666
uint64_t free_bitmap;
6767
struct list_head timer_list; /* it's for runtime active timer list */
68-
spinlock_t lock;
6968
int cpu_id;
7069
struct timer_statistics stat;
7170
};
@@ -85,13 +84,9 @@ static struct timer *alloc_timer(int cpu_id)
8584
struct per_cpu_timers *cpu_timer;
8685
struct timer *timer;
8786

88-
spinlock_rflags;
89-
9087
cpu_timer = &per_cpu(cpu_timers, cpu_id);
91-
spinlock_irqsave_obtain(&cpu_timer->lock);
9288
idx = bitmap_ffs(&cpu_timer->free_bitmap);
9389
if (idx < 0) {
94-
spinlock_irqrestore_release(&cpu_timer->lock);
9590
return NULL;
9691
}
9792

@@ -102,7 +97,6 @@ static struct timer *alloc_timer(int cpu_id)
10297
/* assign unique handle and never duplicate */
10398
timer = cpu_timer->timers_pool + idx;
10499
timer->handle = cpu_timer->stat.total_added_cnt;
105-
spinlock_irqrestore_release(&cpu_timer->lock);
106100

107101
ASSERT((cpu_timer->timers_pool[cpu_id].cpu_id == cpu_id),
108102
"timer cpu_id did not match");
@@ -113,16 +107,12 @@ static void release_timer(struct timer *timer)
113107
{
114108
struct per_cpu_timers *cpu_timer;
115109

116-
spinlock_rflags;
117-
118110
cpu_timer = &per_cpu(cpu_timers, timer->cpu_id);
119111
timer->priv_data = 0;
120112
timer->func = NULL;
121113
timer->deadline = 0;
122-
spinlock_irqsave_obtain(&cpu_timer->lock);
123114
bitmap_set(timer->id, &cpu_timer->free_bitmap);
124115
cpu_timer->stat.pending_cnt--;
125-
spinlock_irqrestore_release(&cpu_timer->lock);
126116
}
127117

128118
static int get_target_cpu(void)
@@ -137,17 +127,13 @@ find_expired_timer(struct per_cpu_timers *cpu_timer, uint64_t tsc_now)
137127
struct timer *timer;
138128
struct list_head *pos;
139129

140-
spinlock_rflags;
141-
142-
spinlock_irqsave_obtain(&cpu_timer->lock);
143130
list_for_each(pos, &cpu_timer->timer_list) {
144131
timer = list_entry(pos, struct timer, node);
145132
if (timer->deadline <= tsc_now)
146133
goto UNLOCK;
147134
}
148135
timer = NULL;
149136
UNLOCK:
150-
spinlock_irqrestore_release(&cpu_timer->lock);
151137
return timer;
152138
}
153139

@@ -191,12 +177,9 @@ _search_timer_by_handle(struct per_cpu_timers *cpu_timer, long handle)
191177
static void
192178
run_timer(struct per_cpu_timers *cpu_timer, struct timer *timer)
193179
{
194-
spinlock_rflags;
195180

196181
/* remove from list first */
197-
spinlock_irqsave_obtain(&cpu_timer->lock);
198182
list_del(&timer->node);
199-
spinlock_irqrestore_release(&cpu_timer->lock);
200183

201184
/* deadline = 0 means stop timer, we should skip */
202185
if (timer->func && timer->deadline != 0UL)
@@ -223,15 +206,11 @@ static inline void schedule_next_timer(int cpu)
223206
struct timer *timer;
224207
struct per_cpu_timers *cpu_timer = &per_cpu(cpu_timers, cpu);
225208

226-
spinlock_rflags;
227-
228-
spinlock_irqsave_obtain(&cpu_timer->lock);
229209
timer = _search_nearest_timer(cpu_timer);
230210
if (timer) {
231211
/* it is okay to program a expired time */
232212
msr_write(MSR_IA32_TSC_DEADLINE, timer->deadline);
233213
}
234-
spinlock_irqrestore_release(&cpu_timer->lock);
235214
}
236215

237216
int request_timer_irq(int cpu, dev_handler_t func, void *data, const char *name)
@@ -280,7 +259,6 @@ static void init_timer_pool(void)
280259
cpu_timer->free_bitmap = (1UL<<MAX_TIMER_ACTIONS)-1;
281260

282261
INIT_LIST_HEAD(&cpu_timer->timer_list);
283-
spinlock_init(&cpu_timer->lock);
284262
for (j = 0; j < MAX_TIMER_ACTIONS; j++) {
285263
timers_pool[j].id = j;
286264
timers_pool[j].cpu_id = i;
@@ -371,8 +349,6 @@ long add_timer(timer_handle_t func, uint64_t data, uint64_t deadline)
371349
struct per_cpu_timers *cpu_timer;
372350
int cpu_id = get_target_cpu();
373351

374-
spinlock_rflags;
375-
376352
if (deadline == 0 || func == NULL)
377353
return -1;
378354

@@ -389,12 +365,10 @@ long add_timer(timer_handle_t func, uint64_t data, uint64_t deadline)
389365
cpu_timer = &per_cpu(cpu_timers, timer->cpu_id);
390366

391367
/* We need irqsave here even softirq enabled to protect timer_list */
392-
spinlock_irqsave_obtain(&cpu_timer->lock);
393368
list_add_tail(&timer->node, &cpu_timer->timer_list);
394369
cpu_timer->stat.last.added_id = timer->id;
395370
cpu_timer->stat.last.added_time = rdtsc();
396371
cpu_timer->stat.last.added_deadline = timer->deadline;
397-
spinlock_irqrestore_release(&cpu_timer->lock);
398372
TRACE_4I(TRACE_TIMER_ACTION_ADDED, timer->id, timer->deadline,
399373
timer->deadline >> 32, cpu_timer->stat.total_added_cnt);
400374

@@ -413,14 +387,12 @@ update_timer(long handle, timer_handle_t func, uint64_t data,
413387
struct per_cpu_timers *cpu_timer;
414388
int cpu_id = get_target_cpu();
415389

416-
spinlock_rflags;
417390
bool ret = false;
418391

419392
if (deadline == 0)
420393
return -1;
421394

422395
cpu_timer = &per_cpu(cpu_timers, cpu_id);
423-
spinlock_irqsave_obtain(&cpu_timer->lock);
424396
timer = _search_timer_by_handle(cpu_timer, handle);
425397
if (timer) {
426398
/* update deadline and re-sort */
@@ -432,7 +404,6 @@ update_timer(long handle, timer_handle_t func, uint64_t data,
432404
cpu_timer->stat.total_added_cnt);
433405
ret = true;
434406
}
435-
spinlock_irqrestore_release(&cpu_timer->lock);
436407

437408
if (ret)
438409
schedule_next_timer(cpu_id);
@@ -451,11 +422,9 @@ bool cancel_timer(long handle, int cpu_id)
451422
struct timer *timer;
452423
struct per_cpu_timers *cpu_timer;
453424

454-
spinlock_rflags;
455425
bool ret = false;
456426

457427
cpu_timer = &per_cpu(cpu_timers, cpu_id);
458-
spinlock_irqsave_obtain(&cpu_timer->lock);
459428
timer = _search_timer_by_handle(cpu_timer, handle);
460429
if (timer) {
461430
/* NOTE: we can not directly release timer here.
@@ -466,7 +435,6 @@ bool cancel_timer(long handle, int cpu_id)
466435
timer->func = NULL;
467436
ret = true;
468437
}
469-
spinlock_irqrestore_release(&cpu_timer->lock);
470438
return ret;
471439
}
472440

@@ -477,8 +445,6 @@ void dump_timer_pool_info(int cpu_id)
477445
struct list_head *pos;
478446
int cn = 0;
479447

480-
spinlock_rflags;
481-
482448
if (cpu_id >= phy_cpu_num)
483449
return;
484450

@@ -500,13 +466,11 @@ void dump_timer_pool_info(int cpu_id)
500466
cpu_timer->stat.last.added_time,
501467
cpu_timer->stat.last.added_deadline);
502468

503-
spinlock_irqsave_obtain(&cpu_timer->lock);
504469
list_for_each(pos, &cpu_timer->timer_list) {
505470
cn++;
506471
pr_info("-->pending: %d trigger: 0x%llx", cn,
507472
list_entry(pos, struct timer, node)->deadline);
508473
}
509-
spinlock_irqrestore_release(&cpu_timer->lock);
510474
}
511475

512476
void check_tsc(void)

0 commit comments

Comments
 (0)