@@ -65,7 +65,6 @@ struct per_cpu_timers {
65
65
struct timer * timers_pool ; /* it's timers pool for allocation */
66
66
uint64_t free_bitmap ;
67
67
struct list_head timer_list ; /* it's for runtime active timer list */
68
- spinlock_t lock ;
69
68
int cpu_id ;
70
69
struct timer_statistics stat ;
71
70
};
@@ -85,13 +84,9 @@ static struct timer *alloc_timer(int cpu_id)
85
84
struct per_cpu_timers * cpu_timer ;
86
85
struct timer * timer ;
87
86
88
- spinlock_rflags ;
89
-
90
87
cpu_timer = & per_cpu (cpu_timers , cpu_id );
91
- spinlock_irqsave_obtain (& cpu_timer -> lock );
92
88
idx = bitmap_ffs (& cpu_timer -> free_bitmap );
93
89
if (idx < 0 ) {
94
- spinlock_irqrestore_release (& cpu_timer -> lock );
95
90
return NULL ;
96
91
}
97
92
@@ -102,7 +97,6 @@ static struct timer *alloc_timer(int cpu_id)
102
97
/* assign unique handle and never duplicate */
103
98
timer = cpu_timer -> timers_pool + idx ;
104
99
timer -> handle = cpu_timer -> stat .total_added_cnt ;
105
- spinlock_irqrestore_release (& cpu_timer -> lock );
106
100
107
101
ASSERT ((cpu_timer -> timers_pool [cpu_id ].cpu_id == cpu_id ),
108
102
"timer cpu_id did not match" );
@@ -113,16 +107,12 @@ static void release_timer(struct timer *timer)
113
107
{
114
108
struct per_cpu_timers * cpu_timer ;
115
109
116
- spinlock_rflags ;
117
-
118
110
cpu_timer = & per_cpu (cpu_timers , timer -> cpu_id );
119
111
timer -> priv_data = 0 ;
120
112
timer -> func = NULL ;
121
113
timer -> deadline = 0 ;
122
- spinlock_irqsave_obtain (& cpu_timer -> lock );
123
114
bitmap_set (timer -> id , & cpu_timer -> free_bitmap );
124
115
cpu_timer -> stat .pending_cnt -- ;
125
- spinlock_irqrestore_release (& cpu_timer -> lock );
126
116
}
127
117
128
118
static int get_target_cpu (void )
@@ -137,17 +127,13 @@ find_expired_timer(struct per_cpu_timers *cpu_timer, uint64_t tsc_now)
137
127
struct timer * timer ;
138
128
struct list_head * pos ;
139
129
140
- spinlock_rflags ;
141
-
142
- spinlock_irqsave_obtain (& cpu_timer -> lock );
143
130
list_for_each (pos , & cpu_timer -> timer_list ) {
144
131
timer = list_entry (pos , struct timer , node );
145
132
if (timer -> deadline <= tsc_now )
146
133
goto UNLOCK ;
147
134
}
148
135
timer = NULL ;
149
136
UNLOCK :
150
- spinlock_irqrestore_release (& cpu_timer -> lock );
151
137
return timer ;
152
138
}
153
139
@@ -191,12 +177,9 @@ _search_timer_by_handle(struct per_cpu_timers *cpu_timer, long handle)
191
177
static void
192
178
run_timer (struct per_cpu_timers * cpu_timer , struct timer * timer )
193
179
{
194
- spinlock_rflags ;
195
180
196
181
/* remove from list first */
197
- spinlock_irqsave_obtain (& cpu_timer -> lock );
198
182
list_del (& timer -> node );
199
- spinlock_irqrestore_release (& cpu_timer -> lock );
200
183
201
184
/* deadline = 0 means stop timer, we should skip */
202
185
if (timer -> func && timer -> deadline != 0UL )
@@ -223,15 +206,11 @@ static inline void schedule_next_timer(int cpu)
223
206
struct timer * timer ;
224
207
struct per_cpu_timers * cpu_timer = & per_cpu (cpu_timers , cpu );
225
208
226
- spinlock_rflags ;
227
-
228
- spinlock_irqsave_obtain (& cpu_timer -> lock );
229
209
timer = _search_nearest_timer (cpu_timer );
230
210
if (timer ) {
231
211
/* it is okay to program a expired time */
232
212
msr_write (MSR_IA32_TSC_DEADLINE , timer -> deadline );
233
213
}
234
- spinlock_irqrestore_release (& cpu_timer -> lock );
235
214
}
236
215
237
216
int request_timer_irq (int cpu , dev_handler_t func , void * data , const char * name )
@@ -280,7 +259,6 @@ static void init_timer_pool(void)
280
259
cpu_timer -> free_bitmap = (1UL <<MAX_TIMER_ACTIONS )- 1 ;
281
260
282
261
INIT_LIST_HEAD (& cpu_timer -> timer_list );
283
- spinlock_init (& cpu_timer -> lock );
284
262
for (j = 0 ; j < MAX_TIMER_ACTIONS ; j ++ ) {
285
263
timers_pool [j ].id = j ;
286
264
timers_pool [j ].cpu_id = i ;
@@ -371,8 +349,6 @@ long add_timer(timer_handle_t func, uint64_t data, uint64_t deadline)
371
349
struct per_cpu_timers * cpu_timer ;
372
350
int cpu_id = get_target_cpu ();
373
351
374
- spinlock_rflags ;
375
-
376
352
if (deadline == 0 || func == NULL )
377
353
return -1 ;
378
354
@@ -389,12 +365,10 @@ long add_timer(timer_handle_t func, uint64_t data, uint64_t deadline)
389
365
cpu_timer = & per_cpu (cpu_timers , timer -> cpu_id );
390
366
391
367
/* We need irqsave here even softirq enabled to protect timer_list */
392
- spinlock_irqsave_obtain (& cpu_timer -> lock );
393
368
list_add_tail (& timer -> node , & cpu_timer -> timer_list );
394
369
cpu_timer -> stat .last .added_id = timer -> id ;
395
370
cpu_timer -> stat .last .added_time = rdtsc ();
396
371
cpu_timer -> stat .last .added_deadline = timer -> deadline ;
397
- spinlock_irqrestore_release (& cpu_timer -> lock );
398
372
TRACE_4I (TRACE_TIMER_ACTION_ADDED , timer -> id , timer -> deadline ,
399
373
timer -> deadline >> 32 , cpu_timer -> stat .total_added_cnt );
400
374
@@ -413,14 +387,12 @@ update_timer(long handle, timer_handle_t func, uint64_t data,
413
387
struct per_cpu_timers * cpu_timer ;
414
388
int cpu_id = get_target_cpu ();
415
389
416
- spinlock_rflags ;
417
390
bool ret = false;
418
391
419
392
if (deadline == 0 )
420
393
return -1 ;
421
394
422
395
cpu_timer = & per_cpu (cpu_timers , cpu_id );
423
- spinlock_irqsave_obtain (& cpu_timer -> lock );
424
396
timer = _search_timer_by_handle (cpu_timer , handle );
425
397
if (timer ) {
426
398
/* update deadline and re-sort */
@@ -432,7 +404,6 @@ update_timer(long handle, timer_handle_t func, uint64_t data,
432
404
cpu_timer -> stat .total_added_cnt );
433
405
ret = true;
434
406
}
435
- spinlock_irqrestore_release (& cpu_timer -> lock );
436
407
437
408
if (ret )
438
409
schedule_next_timer (cpu_id );
@@ -451,11 +422,9 @@ bool cancel_timer(long handle, int cpu_id)
451
422
struct timer * timer ;
452
423
struct per_cpu_timers * cpu_timer ;
453
424
454
- spinlock_rflags ;
455
425
bool ret = false;
456
426
457
427
cpu_timer = & per_cpu (cpu_timers , cpu_id );
458
- spinlock_irqsave_obtain (& cpu_timer -> lock );
459
428
timer = _search_timer_by_handle (cpu_timer , handle );
460
429
if (timer ) {
461
430
/* NOTE: we can not directly release timer here.
@@ -466,7 +435,6 @@ bool cancel_timer(long handle, int cpu_id)
466
435
timer -> func = NULL ;
467
436
ret = true;
468
437
}
469
- spinlock_irqrestore_release (& cpu_timer -> lock );
470
438
return ret ;
471
439
}
472
440
@@ -477,8 +445,6 @@ void dump_timer_pool_info(int cpu_id)
477
445
struct list_head * pos ;
478
446
int cn = 0 ;
479
447
480
- spinlock_rflags ;
481
-
482
448
if (cpu_id >= phy_cpu_num )
483
449
return ;
484
450
@@ -500,13 +466,11 @@ void dump_timer_pool_info(int cpu_id)
500
466
cpu_timer -> stat .last .added_time ,
501
467
cpu_timer -> stat .last .added_deadline );
502
468
503
- spinlock_irqsave_obtain (& cpu_timer -> lock );
504
469
list_for_each (pos , & cpu_timer -> timer_list ) {
505
470
cn ++ ;
506
471
pr_info ("-->pending: %d trigger: 0x%llx" , cn ,
507
472
list_entry (pos , struct timer , node )-> deadline );
508
473
}
509
- spinlock_irqrestore_release (& cpu_timer -> lock );
510
474
}
511
475
512
476
void check_tsc (void )
0 commit comments