16
16
* (typically dom0).
17
17
* 2. VIRQs, typically used for timers. These are per-cpu events.
18
18
* 3. IPIs.
19
- * 4. Hardware interrupts. Not supported at present .
19
+ * 4. PIRQs - Hardware interrupts .
20
20
*
21
21
* Jeremy Fitzhardinge <[email protected] >, XenSource Inc, 2007
22
22
*/
46
46
#include <xen/interface/hvm/hvm_op.h>
47
47
#include <xen/interface/hvm/params.h>
48
48
49
+ /* Leave low irqs free for identity mapping */
50
+ #define LEGACY_IRQS 16
51
+
49
52
/*
50
53
* This lock protects updates to the following mapping and reference-count
51
54
* arrays. The lock does not need to be acquired to read the mapping tables.
@@ -89,10 +92,12 @@ struct irq_info
89
92
enum ipi_vector ipi ;
90
93
struct {
91
94
unsigned short gsi ;
92
- unsigned short vector ;
95
+ unsigned char vector ;
96
+ unsigned char flags ;
93
97
} pirq ;
94
98
} u ;
95
99
};
100
+ #define PIRQ_NEEDS_EOI (1 << 0)
96
101
97
102
static struct irq_info irq_info [NR_IRQS ];
98
103
@@ -113,6 +118,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
113
118
114
119
static struct irq_chip xen_dynamic_chip ;
115
120
static struct irq_chip xen_percpu_chip ;
121
+ static struct irq_chip xen_pirq_chip ;
116
122
117
123
/* Constructor for packed IRQ information. */
118
124
static struct irq_info mk_unbound_info (void )
@@ -225,6 +231,15 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
225
231
return ret ;
226
232
}
227
233
234
+ static bool pirq_needs_eoi (unsigned irq )
235
+ {
236
+ struct irq_info * info = info_for_irq (irq );
237
+
238
+ BUG_ON (info -> type != IRQT_PIRQ );
239
+
240
+ return info -> u .pirq .flags & PIRQ_NEEDS_EOI ;
241
+ }
242
+
228
243
static inline unsigned long active_evtchns (unsigned int cpu ,
229
244
struct shared_info * sh ,
230
245
unsigned int idx )
@@ -365,6 +380,210 @@ static int find_unbound_irq(void)
365
380
return irq ;
366
381
}
367
382
383
+ static bool identity_mapped_irq (unsigned irq )
384
+ {
385
+ /* only identity map legacy irqs */
386
+ return irq < LEGACY_IRQS ;
387
+ }
388
+
389
+ static void pirq_unmask_notify (int irq )
390
+ {
391
+ struct physdev_eoi eoi = { .irq = irq };
392
+
393
+ if (unlikely (pirq_needs_eoi (irq ))) {
394
+ int rc = HYPERVISOR_physdev_op (PHYSDEVOP_eoi , & eoi );
395
+ WARN_ON (rc );
396
+ }
397
+ }
398
+
399
+ static void pirq_query_unmask (int irq )
400
+ {
401
+ struct physdev_irq_status_query irq_status ;
402
+ struct irq_info * info = info_for_irq (irq );
403
+
404
+ BUG_ON (info -> type != IRQT_PIRQ );
405
+
406
+ irq_status .irq = irq ;
407
+ if (HYPERVISOR_physdev_op (PHYSDEVOP_irq_status_query , & irq_status ))
408
+ irq_status .flags = 0 ;
409
+
410
+ info -> u .pirq .flags &= ~PIRQ_NEEDS_EOI ;
411
+ if (irq_status .flags & XENIRQSTAT_needs_eoi )
412
+ info -> u .pirq .flags |= PIRQ_NEEDS_EOI ;
413
+ }
414
+
415
+ static bool probing_irq (int irq )
416
+ {
417
+ struct irq_desc * desc = irq_to_desc (irq );
418
+
419
+ return desc && desc -> action == NULL ;
420
+ }
421
+
422
+ static unsigned int startup_pirq (unsigned int irq )
423
+ {
424
+ struct evtchn_bind_pirq bind_pirq ;
425
+ struct irq_info * info = info_for_irq (irq );
426
+ int evtchn = evtchn_from_irq (irq );
427
+
428
+ BUG_ON (info -> type != IRQT_PIRQ );
429
+
430
+ if (VALID_EVTCHN (evtchn ))
431
+ goto out ;
432
+
433
+ bind_pirq .pirq = irq ;
434
+ /* NB. We are happy to share unless we are probing. */
435
+ bind_pirq .flags = probing_irq (irq ) ? 0 : BIND_PIRQ__WILL_SHARE ;
436
+ if (HYPERVISOR_event_channel_op (EVTCHNOP_bind_pirq , & bind_pirq ) != 0 ) {
437
+ if (!probing_irq (irq ))
438
+ printk (KERN_INFO "Failed to obtain physical IRQ %d\n" ,
439
+ irq );
440
+ return 0 ;
441
+ }
442
+ evtchn = bind_pirq .port ;
443
+
444
+ pirq_query_unmask (irq );
445
+
446
+ evtchn_to_irq [evtchn ] = irq ;
447
+ bind_evtchn_to_cpu (evtchn , 0 );
448
+ info -> evtchn = evtchn ;
449
+
450
+ out :
451
+ unmask_evtchn (evtchn );
452
+ pirq_unmask_notify (irq );
453
+
454
+ return 0 ;
455
+ }
456
+
457
+ static void shutdown_pirq (unsigned int irq )
458
+ {
459
+ struct evtchn_close close ;
460
+ struct irq_info * info = info_for_irq (irq );
461
+ int evtchn = evtchn_from_irq (irq );
462
+
463
+ BUG_ON (info -> type != IRQT_PIRQ );
464
+
465
+ if (!VALID_EVTCHN (evtchn ))
466
+ return ;
467
+
468
+ mask_evtchn (evtchn );
469
+
470
+ close .port = evtchn ;
471
+ if (HYPERVISOR_event_channel_op (EVTCHNOP_close , & close ) != 0 )
472
+ BUG ();
473
+
474
+ bind_evtchn_to_cpu (evtchn , 0 );
475
+ evtchn_to_irq [evtchn ] = -1 ;
476
+ info -> evtchn = 0 ;
477
+ }
478
+
479
+ static void enable_pirq (unsigned int irq )
480
+ {
481
+ startup_pirq (irq );
482
+ }
483
+
484
+ static void disable_pirq (unsigned int irq )
485
+ {
486
+ }
487
+
488
+ static void ack_pirq (unsigned int irq )
489
+ {
490
+ int evtchn = evtchn_from_irq (irq );
491
+
492
+ move_native_irq (irq );
493
+
494
+ if (VALID_EVTCHN (evtchn )) {
495
+ mask_evtchn (evtchn );
496
+ clear_evtchn (evtchn );
497
+ }
498
+ }
499
+
500
+ static void end_pirq (unsigned int irq )
501
+ {
502
+ int evtchn = evtchn_from_irq (irq );
503
+ struct irq_desc * desc = irq_to_desc (irq );
504
+
505
+ if (WARN_ON (!desc ))
506
+ return ;
507
+
508
+ if ((desc -> status & (IRQ_DISABLED |IRQ_PENDING )) ==
509
+ (IRQ_DISABLED |IRQ_PENDING )) {
510
+ shutdown_pirq (irq );
511
+ } else if (VALID_EVTCHN (evtchn )) {
512
+ unmask_evtchn (evtchn );
513
+ pirq_unmask_notify (irq );
514
+ }
515
+ }
516
+
517
+ static int find_irq_by_gsi (unsigned gsi )
518
+ {
519
+ int irq ;
520
+
521
+ for (irq = 0 ; irq < NR_IRQS ; irq ++ ) {
522
+ struct irq_info * info = info_for_irq (irq );
523
+
524
+ if (info == NULL || info -> type != IRQT_PIRQ )
525
+ continue ;
526
+
527
+ if (gsi_from_irq (irq ) == gsi )
528
+ return irq ;
529
+ }
530
+
531
+ return -1 ;
532
+ }
533
+
534
+ /*
535
+ * Allocate a physical irq, along with a vector. We don't assign an
536
+ * event channel until the irq actually started up. Return an
537
+ * existing irq if we've already got one for the gsi.
538
+ */
539
+ int xen_allocate_pirq (unsigned gsi )
540
+ {
541
+ int irq ;
542
+ struct physdev_irq irq_op ;
543
+
544
+ spin_lock (& irq_mapping_update_lock );
545
+
546
+ irq = find_irq_by_gsi (gsi );
547
+ if (irq != -1 ) {
548
+ printk (KERN_INFO "xen_allocate_pirq: returning irq %d for gsi %u\n" ,
549
+ irq , gsi );
550
+ goto out ; /* XXX need refcount? */
551
+ }
552
+
553
+ if (identity_mapped_irq (gsi )) {
554
+ irq = gsi ;
555
+ dynamic_irq_init (irq );
556
+ } else
557
+ irq = find_unbound_irq ();
558
+
559
+ set_irq_chip_and_handler_name (irq , & xen_pirq_chip ,
560
+ handle_level_irq , "pirq" );
561
+
562
+ irq_op .irq = irq ;
563
+ if (HYPERVISOR_physdev_op (PHYSDEVOP_alloc_irq_vector , & irq_op )) {
564
+ dynamic_irq_cleanup (irq );
565
+ irq = - ENOSPC ;
566
+ goto out ;
567
+ }
568
+
569
+ irq_info [irq ] = mk_pirq_info (0 , gsi , irq_op .vector );
570
+
571
+ out :
572
+ spin_unlock (& irq_mapping_update_lock );
573
+
574
+ return irq ;
575
+ }
576
+
577
+ int xen_vector_from_irq (unsigned irq )
578
+ {
579
+ return vector_from_irq (irq );
580
+ }
581
+
582
+ int xen_gsi_from_irq (unsigned irq )
583
+ {
584
+ return gsi_from_irq (irq );
585
+ }
586
+
368
587
int bind_evtchn_to_irq (unsigned int evtchn )
369
588
{
370
589
int irq ;
@@ -964,6 +1183,26 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
964
1183
.retrigger = retrigger_dynirq ,
965
1184
};
966
1185
1186
+ static struct irq_chip xen_pirq_chip __read_mostly = {
1187
+ .name = "xen-pirq" ,
1188
+
1189
+ .startup = startup_pirq ,
1190
+ .shutdown = shutdown_pirq ,
1191
+
1192
+ .enable = enable_pirq ,
1193
+ .unmask = enable_pirq ,
1194
+
1195
+ .disable = disable_pirq ,
1196
+ .mask = disable_pirq ,
1197
+
1198
+ .ack = ack_pirq ,
1199
+ .end = end_pirq ,
1200
+
1201
+ .set_affinity = set_affinity_irq ,
1202
+
1203
+ .retrigger = retrigger_dynirq ,
1204
+ };
1205
+
967
1206
static struct irq_chip xen_percpu_chip __read_mostly = {
968
1207
.name = "xen-percpu" ,
969
1208
0 commit comments