@@ -15,39 +15,12 @@ static uint32_t vector_to_irq[NR_MAX_VECTOR + 1];
15
15
16
16
spurious_handler_t spurious_handler ;
17
17
18
- static inline void handle_irq (struct irq_desc * desc );
19
-
20
18
#define NR_STATIC_MAPPINGS (2U)
21
19
static uint32_t irq_static_mappings [NR_STATIC_MAPPINGS ][2 ] = {
22
20
{TIMER_IRQ , VECTOR_TIMER },
23
21
{NOTIFY_IRQ , VECTOR_NOTIFY_VCPU },
24
22
};
25
23
26
- static void init_irq_desc (void )
27
- {
28
- uint32_t i ;
29
-
30
- for (i = 0U ; i < NR_IRQS ; i ++ ) {
31
- irq_desc_array [i ].irq = i ;
32
- irq_desc_array [i ].vector = VECTOR_INVALID ;
33
- spinlock_init (& irq_desc_array [i ].lock );
34
- }
35
-
36
- for (i = 0U ; i <= NR_MAX_VECTOR ; i ++ ) {
37
- vector_to_irq [i ] = IRQ_INVALID ;
38
- }
39
-
40
- /* init fixed mapping for specific irq and vector */
41
- for (i = 0U ; i < NR_STATIC_MAPPINGS ; i ++ ) {
42
- uint32_t irq = irq_static_mappings [i ][0 ];
43
- uint32_t vr = irq_static_mappings [i ][1 ];
44
-
45
- irq_desc_array [irq ].vector = vr ;
46
- irq_desc_array [irq ].used = IRQ_ASSIGNED ;
47
- vector_to_irq [vr ] = irq ;
48
- }
49
- }
50
-
51
24
/*
52
25
* alloc an free irq if req_irq is IRQ_INVALID, or else set assigned
53
26
* return: irq num on success, IRQ_INVALID on failure
@@ -184,12 +157,6 @@ void free_irq_vector(uint32_t irq)
184
157
spinlock_irqrestore_release (& irq_alloc_spinlock , rflags );
185
158
}
186
159
187
- static void disable_pic_irq (void )
188
- {
189
- pio_write8 (0xffU , 0xA1U );
190
- pio_write8 (0xffU , 0x21U );
191
- }
192
-
193
160
/*
194
161
* There are four cases as to irq/vector allocation:
195
162
* case 1: req_irq = IRQ_INVALID
@@ -256,44 +223,55 @@ int32_t request_irq(uint32_t req_irq,
256
223
return (int32_t )irq ;
257
224
}
258
225
259
- uint32_t irq_to_vector (uint32_t irq )
226
+ void free_irq (uint32_t irq )
260
227
{
261
- if (irq < NR_IRQS ) {
262
- return irq_desc_array [irq ].vector ;
263
- } else {
264
- return VECTOR_INVALID ;
265
- }
266
- }
228
+ uint64_t rflags ;
229
+ struct irq_desc * desc ;
267
230
268
- void init_default_irqs (uint16_t cpu_id )
269
- {
270
- if (cpu_id != BOOT_CPU_ID ) {
231
+ if (irq >= NR_IRQS ) {
271
232
return ;
272
233
}
273
234
274
- init_irq_desc ();
235
+ desc = & irq_desc_array [irq ];
236
+ dev_dbg (ACRN_DBG_IRQ , "[%s] irq%d vr:0x%x" ,
237
+ __func__ , irq , irq_to_vector (irq ));
275
238
276
- /* we use ioapic only, disable legacy PIC */
277
- disable_pic_irq ();
278
- setup_ioapic_irq ();
279
- init_softirq ();
239
+ free_irq_vector (irq );
240
+ free_irq_num (irq );
241
+
242
+ spinlock_irqsave_obtain (& desc -> lock , & rflags );
243
+ desc -> action = NULL ;
244
+ desc -> priv_data = NULL ;
245
+ desc -> flags = IRQF_NONE ;
246
+ spinlock_irqrestore_release (& desc -> lock , rflags );
280
247
}
281
248
282
- void dispatch_exception ( struct intr_excp_ctx * ctx )
249
+ void set_irq_trigger_mode ( uint32_t irq , bool is_level_trigger )
283
250
{
284
- uint16_t pcpu_id = get_cpu_id ();
285
-
286
- /* Obtain lock to ensure exception dump doesn't get corrupted */
287
- spinlock_obtain (& exception_spinlock );
251
+ uint64_t rflags ;
252
+ struct irq_desc * desc ;
288
253
289
- /* Dump exception context */
290
- dump_exception (ctx , pcpu_id );
254
+ if (irq >= NR_IRQS ) {
255
+ return ;
256
+ }
291
257
292
- /* Release lock to let other CPUs handle exception */
293
- spinlock_release (& exception_spinlock );
258
+ desc = & irq_desc_array [irq ];
259
+ spinlock_irqsave_obtain (& desc -> lock , & rflags );
260
+ if (is_level_trigger == true) {
261
+ desc -> flags |= IRQF_LEVEL ;
262
+ } else {
263
+ desc -> flags &= ~IRQF_LEVEL ;
264
+ }
265
+ spinlock_irqrestore_release (& desc -> lock , rflags );
266
+ }
294
267
295
- /* Halt the CPU */
296
- cpu_dead (pcpu_id );
268
+ uint32_t irq_to_vector (uint32_t irq )
269
+ {
270
+ if (irq < NR_IRQS ) {
271
+ return irq_desc_array [irq ].vector ;
272
+ } else {
273
+ return VECTOR_INVALID ;
274
+ }
297
275
}
298
276
299
277
static void handle_spurious_interrupt (uint32_t vector )
@@ -309,6 +287,41 @@ static void handle_spurious_interrupt(uint32_t vector)
309
287
}
310
288
}
311
289
290
+ static inline bool irq_need_mask (struct irq_desc * desc )
291
+ {
292
+ /* level triggered gsi should be masked */
293
+ return (((desc -> flags & IRQF_LEVEL ) != 0U )
294
+ && irq_is_gsi (desc -> irq ));
295
+ }
296
+
297
+ static inline bool irq_need_unmask (struct irq_desc * desc )
298
+ {
299
+ /* level triggered gsi for non-ptdev should be unmasked */
300
+ return (((desc -> flags & IRQF_LEVEL ) != 0U )
301
+ && ((desc -> flags & IRQF_PT ) == 0U )
302
+ && irq_is_gsi (desc -> irq ));
303
+ }
304
+
305
+ static inline void handle_irq (struct irq_desc * desc )
306
+ {
307
+ irq_action_t action = desc -> action ;
308
+
309
+ if (irq_need_mask (desc )) {
310
+ GSI_MASK_IRQ (desc -> irq );
311
+ }
312
+
313
+ /* Send EOI to LAPIC/IOAPIC IRR */
314
+ send_lapic_eoi ();
315
+
316
+ if (action != NULL ) {
317
+ action (desc -> irq , desc -> priv_data );
318
+ }
319
+
320
+ if (irq_need_unmask (desc )) {
321
+ GSI_UNMASK_IRQ (desc -> irq );
322
+ }
323
+ }
324
+
312
325
/* do_IRQ() */
313
326
void dispatch_interrupt (struct intr_excp_ctx * ctx )
314
327
{
@@ -339,6 +352,23 @@ void dispatch_interrupt(struct intr_excp_ctx *ctx)
339
352
return ;
340
353
}
341
354
355
+ void dispatch_exception (struct intr_excp_ctx * ctx )
356
+ {
357
+ uint16_t pcpu_id = get_cpu_id ();
358
+
359
+ /* Obtain lock to ensure exception dump doesn't get corrupted */
360
+ spinlock_obtain (& exception_spinlock );
361
+
362
+ /* Dump exception context */
363
+ dump_exception (ctx , pcpu_id );
364
+
365
+ /* Release lock to let other CPUs handle exception */
366
+ spinlock_release (& exception_spinlock );
367
+
368
+ /* Halt the CPU */
369
+ cpu_dead (pcpu_id );
370
+ }
371
+
342
372
#ifdef CONFIG_PARTITION_MODE
343
373
void partition_mode_dispatch_interrupt (struct intr_excp_ctx * ctx )
344
374
{
@@ -361,83 +391,6 @@ void partition_mode_dispatch_interrupt(struct intr_excp_ctx *ctx)
361
391
}
362
392
#endif
363
393
364
- static inline bool irq_need_mask (struct irq_desc * desc )
365
- {
366
- /* level triggered gsi should be masked */
367
- return (((desc -> flags & IRQF_LEVEL ) != 0U )
368
- && irq_is_gsi (desc -> irq ));
369
- }
370
-
371
- static inline bool irq_need_unmask (struct irq_desc * desc )
372
- {
373
- /* level triggered gsi for non-ptdev should be unmasked */
374
- return (((desc -> flags & IRQF_LEVEL ) != 0U )
375
- && ((desc -> flags & IRQF_PT ) == 0U )
376
- && irq_is_gsi (desc -> irq ));
377
- }
378
-
379
- static inline void handle_irq (struct irq_desc * desc )
380
- {
381
- irq_action_t action = desc -> action ;
382
-
383
- if (irq_need_mask (desc )) {
384
- GSI_MASK_IRQ (desc -> irq );
385
- }
386
-
387
- /* Send EOI to LAPIC/IOAPIC IRR */
388
- send_lapic_eoi ();
389
-
390
- if (action != NULL ) {
391
- action (desc -> irq , desc -> priv_data );
392
- }
393
-
394
- if (irq_need_unmask (desc )) {
395
- GSI_UNMASK_IRQ (desc -> irq );
396
- }
397
- }
398
-
399
- void set_irq_trigger_mode (uint32_t irq , bool is_level_trigger )
400
- {
401
- uint64_t rflags ;
402
- struct irq_desc * desc ;
403
-
404
- if (irq >= NR_IRQS ) {
405
- return ;
406
- }
407
-
408
- desc = & irq_desc_array [irq ];
409
- spinlock_irqsave_obtain (& desc -> lock , & rflags );
410
- if (is_level_trigger == true) {
411
- desc -> flags |= IRQF_LEVEL ;
412
- } else {
413
- desc -> flags &= ~IRQF_LEVEL ;
414
- }
415
- spinlock_irqrestore_release (& desc -> lock , rflags );
416
- }
417
-
418
- void free_irq (uint32_t irq )
419
- {
420
- uint64_t rflags ;
421
- struct irq_desc * desc ;
422
-
423
- if (irq >= NR_IRQS ) {
424
- return ;
425
- }
426
-
427
- desc = & irq_desc_array [irq ];
428
- dev_dbg (ACRN_DBG_IRQ , "[%s] irq%d vr:0x%x" ,
429
- __func__ , irq , irq_to_vector (irq ));
430
-
431
- free_irq_vector (irq );
432
- free_irq_num (irq );
433
-
434
- spinlock_irqsave_obtain (& desc -> lock , & rflags );
435
- desc -> action = NULL ;
436
- desc -> priv_data = NULL ;
437
- desc -> flags = IRQF_NONE ;
438
- spinlock_irqrestore_release (& desc -> lock , rflags );
439
- }
440
-
441
394
#ifdef HV_DEBUG
442
395
void get_cpu_interrupt_info (char * str_arg , int str_max )
443
396
{
@@ -476,6 +429,51 @@ void get_cpu_interrupt_info(char *str_arg, int str_max)
476
429
}
477
430
#endif /* HV_DEBUG */
478
431
432
+ static void init_irq_descs (void )
433
+ {
434
+ uint32_t i ;
435
+
436
+ for (i = 0U ; i < NR_IRQS ; i ++ ) {
437
+ irq_desc_array [i ].irq = i ;
438
+ irq_desc_array [i ].vector = VECTOR_INVALID ;
439
+ spinlock_init (& irq_desc_array [i ].lock );
440
+ }
441
+
442
+ for (i = 0U ; i <= NR_MAX_VECTOR ; i ++ ) {
443
+ vector_to_irq [i ] = IRQ_INVALID ;
444
+ }
445
+
446
+ /* init fixed mapping for specific irq and vector */
447
+ for (i = 0U ; i < NR_STATIC_MAPPINGS ; i ++ ) {
448
+ uint32_t irq = irq_static_mappings [i ][0 ];
449
+ uint32_t vr = irq_static_mappings [i ][1 ];
450
+
451
+ irq_desc_array [irq ].vector = vr ;
452
+ irq_desc_array [irq ].used = IRQ_ASSIGNED ;
453
+ vector_to_irq [vr ] = irq ;
454
+ }
455
+ }
456
+
457
+ static void disable_pic_irqs (void )
458
+ {
459
+ pio_write8 (0xffU , 0xA1U );
460
+ pio_write8 (0xffU , 0x21U );
461
+ }
462
+
463
+ void init_default_irqs (uint16_t cpu_id )
464
+ {
465
+ if (cpu_id != BOOT_CPU_ID ) {
466
+ return ;
467
+ }
468
+
469
+ init_irq_descs ();
470
+
471
+ /* we use ioapic only, disable legacy PIC */
472
+ disable_pic_irqs ();
473
+ setup_ioapic_irqs ();
474
+ init_softirq ();
475
+ }
476
+
479
477
void interrupt_init (uint16_t pcpu_id )
480
478
{
481
479
struct host_idt_descriptor * idtd = & HOST_IDTR ;
0 commit comments