35
35
#define ARM_SPI_BIND_CPU_ID 0
36
36
#endif
37
37
38
- #ifndef RT_USING_SMP
38
+ #if !defined( RT_USING_SMP ) && !defined( RT_USING_AMP )
39
39
#define RT_CPUS_NR 1
40
- extern int rt_hw_cpu_id (void );
41
40
#else
42
41
extern rt_uint64_t rt_cpu_mpidr_early [];
43
42
#endif /* RT_USING_SMP */
@@ -114,6 +113,7 @@ static unsigned int _gic_max_irq;
114
113
/* Macro to access the Generic Interrupt Controller Distributor (GICD) */
115
114
#define GIC_DIST_CTRL (hw_base ) HWREG32((hw_base) + 0x000U)
116
115
#define GIC_DIST_TYPE (hw_base ) HWREG32((hw_base) + 0x004U)
116
+ #define GIC_DIST_IIDR (hw_base ) HWREG32((hw_base) + 0x008U)
117
117
#define GIC_DIST_IGROUP (hw_base , n ) HWREG32((hw_base) + 0x080U + ((n) / 32U) * 4U)
118
118
#define GIC_DIST_ENABLE_SET (hw_base , n ) HWREG32((hw_base) + 0x100U + ((n) / 32U) * 4U)
119
119
#define GIC_DIST_ENABLE_CLEAR (hw_base , n ) HWREG32((hw_base) + 0x180U + ((n) / 32U) * 4U)
@@ -335,6 +335,26 @@ void arm_gic_clear_active(rt_uint64_t index, int irq)
335
335
GIC_DIST_ACTIVE_CLEAR (_gic_table [index ].dist_hw_base , irq ) = mask ;
336
336
}
337
337
338
+ void arm_gic_set_router_cpu (rt_uint64_t index , int irq , rt_uint64_t aff )
339
+ {
340
+ RT_ASSERT (index < ARM_GIC_MAX_NR );
341
+
342
+ irq = irq - _gic_table [index ].offset ;
343
+ RT_ASSERT (irq >= 32 );
344
+
345
+ GIC_DIST_IROUTER (_gic_table [index ].dist_hw_base , irq ) = aff & 0xff00ffffffULL ;
346
+ }
347
+
348
+ rt_uint64_t arm_gic_get_router_cpu (rt_uint64_t index , int irq )
349
+ {
350
+ RT_ASSERT (index < ARM_GIC_MAX_NR );
351
+
352
+ irq = irq - _gic_table [index ].offset ;
353
+ RT_ASSERT (irq >= 32 );
354
+
355
+ return GIC_DIST_IROUTER (_gic_table [index ].dist_hw_base , irq );
356
+ }
357
+
338
358
/* Set up the cpu mask for the specific interrupt */
339
359
void arm_gic_set_cpu (rt_uint64_t index , int irq , unsigned int cpumask )
340
360
{
@@ -478,76 +498,118 @@ rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq)
478
498
return ((active << 1 ) | pending );
479
499
}
480
500
481
- #ifdef RT_USING_SMP
482
- void arm_gic_send_affinity_sgi ( rt_uint64_t index , int irq , rt_uint32_t cpu_masks [], rt_uint64_t routing_mode )
501
+ #if defined( RT_USING_SMP ) || defined( RT_USING_AMP )
502
+ struct gicv3_sgi_aff
483
503
{
484
- const int cpu_mask_cpu_max_nr = sizeof (cpu_masks [0 ]) * 8 ;
485
- rt_uint64_t int_id = (irq & 0xf ) << 24 ;
486
- rt_uint64_t irm = routing_mode << 40 ; /* Interrupt Routing Mode */
504
+ rt_uint64_t aff ;
505
+ rt_uint32_t cpu_mask [(RT_CPUS_NR + 31 ) >> 5 ];
506
+ rt_uint16_t target_list ;
507
+ };
487
508
488
- if (routing_mode == GICV3_ROUTED_TO_SPEC )
509
+ static struct gicv3_sgi_aff sgi_aff_table [RT_CPUS_NR ];
510
+ static rt_uint64_t sgi_aff_table_num ;
511
+ static void sgi_aff_add_table (rt_uint64_t aff , rt_uint64_t cpu_index )
512
+ {
513
+ rt_uint64_t i ;
514
+
515
+ for (i = 0 ; i < sgi_aff_table_num ; i ++ )
489
516
{
490
- int cpu_id , cpu_mask_bit , i , cpu_masks_nr = RT_CPUS_NR / cpu_mask_cpu_max_nr ;
491
- rt_uint16_t target_list ;
492
- rt_uint64_t rs = 0 ; /* Range Selector */
493
- rt_uint64_t affinity_val , next_affinity_val ;
517
+ if (sgi_aff_table [i ].aff == aff )
518
+ {
519
+ sgi_aff_table [i ].cpu_mask [cpu_index >> 5 ] |= (1 << (cpu_index & 0x1F ));
520
+ return ;
521
+ }
522
+ }
523
+
524
+ sgi_aff_table [sgi_aff_table_num ].aff = aff ;
525
+ sgi_aff_table [sgi_aff_table_num ].cpu_mask [cpu_index >> 5 ] |= (1 << (cpu_index & 0x1F ));
526
+ sgi_aff_table_num ++ ;
527
+ }
528
+
529
+ static rt_uint64_t gicv3_sgi_init (void )
530
+ {
531
+ rt_uint64_t i , icc_sgi1r_value ;
494
532
495
- if (cpu_masks_nr * cpu_mask_cpu_max_nr != RT_CPUS_NR )
533
+ for (i = 0 ; i < RT_CPUS_NR ; i ++ )
534
+ {
535
+ icc_sgi1r_value = (rt_uint64_t )((rt_cpu_mpidr_early [i ] >> 8 ) & 0xFF ) << 16 ;
536
+ icc_sgi1r_value |= (rt_uint64_t )((rt_cpu_mpidr_early [i ] >> 16 ) & 0xFF ) << 32 ;
537
+ icc_sgi1r_value |= (rt_uint64_t )((rt_cpu_mpidr_early [i ] >> 32 ) & 0xFF ) << 48 ;
538
+ icc_sgi1r_value |= (rt_uint64_t )((rt_cpu_mpidr_early [i ] >> 4 ) & 0xF ) << 44 ;
539
+ sgi_aff_add_table (icc_sgi1r_value , i );
540
+ }
541
+
542
+ return (RT_CPUS_NR + 31 ) >> 5 ;
543
+ }
544
+
545
+ rt_inline void gicv3_sgi_send (rt_uint64_t int_id )
546
+ {
547
+ rt_uint64_t i ;
548
+ for (i = 0 ; i < sgi_aff_table_num ; i ++ )
549
+ {
550
+ if (sgi_aff_table [i ].target_list )
496
551
{
497
- ++ cpu_masks_nr ;
552
+ __DSB ();
553
+ /* Interrupts routed to the PEs specified by Aff3.Aff2.Aff1.<target list>. */
554
+ SET_GICV3_REG (ICC_SGI1R_EL1 , sgi_aff_table [i ].aff | int_id | sgi_aff_table [i ].target_list );
555
+ __ISB ();
556
+ sgi_aff_table [i ].target_list = 0 ;
498
557
}
558
+ }
559
+ }
560
+
561
+ rt_inline void gicv3_sgi_target_list_set (rt_uint64_t array , rt_uint32_t cpu_mask )
562
+ {
563
+ rt_uint64_t i , value ;
499
564
500
- for (i = cpu_id = 0 ; i < cpu_masks_nr ;)
565
+ for (i = 0 ; i < sgi_aff_table_num ; i ++ )
566
+ {
567
+ if (sgi_aff_table [i ].cpu_mask [array ] & cpu_mask )
501
568
{
502
- /* No cpu in this mask */
503
- if (cpu_masks [i ] == 0 )
569
+ while (cpu_mask )
504
570
{
505
- ++ i ;
506
- cpu_id += cpu_mask_cpu_max_nr ;
507
- continue ;
571
+ value = __builtin_ctzl ( cpu_mask ) ;
572
+ cpu_mask &= ~( 1 << value ) ;
573
+ sgi_aff_table [ i ]. target_list |= 1 << ( rt_cpu_mpidr_early [( array << 5 ) | value ] & 0xF ) ;
508
574
}
575
+ }
576
+ }
577
+ }
578
+
579
+ void arm_gic_send_affinity_sgi (rt_uint64_t index , int irq , rt_uint32_t cpu_masks [], rt_uint64_t routing_mode )
580
+ {
581
+ rt_uint64_t i ;
582
+ rt_uint64_t int_id = (irq & 0xf ) << 24 ;
583
+ static rt_uint64_t masks_nrs = 0 ;
509
584
510
- /* Get last cpu affinity value */
511
- affinity_val = rt_cpu_mpidr_early [cpu_id ] & 0xff00ffff00ULL ;
585
+ if (routing_mode == GICV3_ROUTED_TO_SPEC )
586
+ {
587
+ if (!masks_nrs )
588
+ {
589
+ masks_nrs = gicv3_sgi_init ();
590
+ }
512
591
513
- /* Read 16 cpus information */
514
- for (cpu_mask_bit = 0 ; cpu_mask_bit < 16 ; ++ cpu_mask_bit , ++ cpu_id )
592
+ for (i = 0 ; i < masks_nrs ; i ++ )
593
+ {
594
+ if (cpu_masks [i ] == 0 )
515
595
{
516
- /* MPIDR_EL1: aff3[39:32], aff2[23:16], aff1[15:8] */
517
- next_affinity_val = rt_cpu_mpidr_early [cpu_id ] & 0xff00ffff00ULL ;
518
-
519
- /* Affinity value is different, read end */
520
- if (affinity_val != next_affinity_val )
521
- {
522
- break ;
523
- }
596
+ continue ;
524
597
}
525
598
526
- /* Get all valid cpu mask */
527
- target_list = (0xffff >> (16 - cpu_mask_bit )) & cpu_masks [i ];
528
- /* Clear read mask */
529
- cpu_masks [i ] >>= cpu_mask_bit ;
530
- /* ICC_SGI1R_EL1: aff3[55:48], aff2[39:32], aff1[23:16] */
531
- affinity_val <<= 8 ;
532
-
533
- __DSB ();
534
- /* Interrupts routed to the PEs specified by Aff3.Aff2.Aff1.<target list>. */
535
- SET_GICV3_REG (ICC_SGI1R_EL1 , affinity_val | (rs << 44 ) | irm | int_id | target_list );
536
- __ISB ();
537
-
538
- /* Check if reset the range selector */
539
- rs = affinity_val != next_affinity_val ? 0 : rs + 1 ;
599
+ gicv3_sgi_target_list_set (i , cpu_masks [i ]);
540
600
}
601
+
602
+ gicv3_sgi_send (int_id );
541
603
}
542
604
else
543
605
{
544
606
__DSB ();
545
607
/* Interrupts routed to all PEs in the system, excluding "self". */
546
- SET_GICV3_REG (ICC_SGI1R_EL1 , irm | int_id );
608
+ SET_GICV3_REG (ICC_SGI1R_EL1 , ( 0x10000000000ULL ) | int_id );
547
609
__ISB ();
548
610
}
549
611
}
550
- #endif /* RT_USING_SMP */
612
+ #endif /* defined( RT_USING_SMP) || defined(RT_USING_AMP) */
551
613
552
614
rt_uint64_t arm_gic_get_high_pending_irq (rt_uint64_t index )
553
615
{
@@ -636,6 +698,8 @@ int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
636
698
unsigned int gic_type ;
637
699
rt_uint64_t main_cpu_affinity_val ;
638
700
701
+ RT_UNUSED (i );
702
+ RT_UNUSED (main_cpu_affinity_val );
639
703
RT_ASSERT (index < ARM_GIC_MAX_NR );
640
704
641
705
_gic_table [index ].dist_hw_base = dist_base ;
@@ -660,6 +724,8 @@ int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
660
724
_gic_max_irq = ARM_GIC_NR_IRQS ;
661
725
}
662
726
727
+ #ifndef RT_AMP_SLAVE
728
+
663
729
GIC_DIST_CTRL (dist_base ) = 0 ;
664
730
/* Wait for register write pending */
665
731
arm_gicv3_wait_rwp (0 , 32 );
@@ -724,6 +790,7 @@ int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
724
790
*/
725
791
GIC_DIST_CTRL (dist_base ) = GICD_CTLR_ARE_NS | GICD_CTLR_ENGRP1NS ;
726
792
793
+ #endif /* RT_AMP_SLAVE */
727
794
return 0 ;
728
795
}
729
796
@@ -811,13 +878,19 @@ int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base)
811
878
void arm_gic_dump_type (rt_uint64_t index )
812
879
{
813
880
unsigned int gic_type ;
881
+ unsigned int gic_version ;
882
+ unsigned int gic_rp ;
814
883
884
+ gic_version = (GIC_DIST_IIDR (_gic_table [index ].dist_hw_base ) >> 24 ) & 0xfUL ;
885
+ gic_rp = (GIC_DIST_IIDR (_gic_table [index ].dist_hw_base ) >> 12 ) & 0xfUL ;
815
886
gic_type = GIC_DIST_TYPE (_gic_table [index ].dist_hw_base );
816
- rt_kprintf ("GICv%d on %p, max IRQs: %d, %s security extension(%08x)\n" ,
817
- (GIC_DIST_ICPIDR2 (_gic_table [index ].dist_hw_base ) >> 4 ) & 0xf ,
887
+ rt_kprintf ("GICv3-%d r%dp%d on %p, max IRQs: %d, %s security extension(%08x)\n" ,
888
+ (gic_version == 0 ) ? 500 : (gic_version == 2 ) ? 600 : 0 ,
889
+ (gic_rp >> 4 ) & 0xF ,
890
+ gic_rp & 0xF ,
818
891
_gic_table [index ].dist_hw_base ,
819
892
_gic_max_irq ,
820
- gic_type & (1 << 10 ) ? "has" : "no" ,
893
+ gic_type & (1U << 10U ) ? "has" : "no" ,
821
894
gic_type );
822
895
}
823
896
@@ -850,10 +923,36 @@ void arm_gic_dump(rt_uint64_t index)
850
923
rt_kprintf ("\b\b\n" );
851
924
}
852
925
926
+ static void arm_gic_bind_dump (void )
927
+ {
928
+ #ifdef BSP_USING_GICV3
929
+ int i ;
930
+ for (i = 32 ; i < _gic_max_irq ; i ++ )
931
+ {
932
+ rt_kprintf ("irq(%d) -> 0x%X\n" , i , arm_gic_get_router_cpu (0 , i ));
933
+ }
934
+ #endif /* BSP_USING_GICV3 */
935
+ }
936
+
937
+ static void arm_gic_sgi_dump (rt_uint64_t index )
938
+ {
939
+ rt_int32_t cpu_id = rt_hw_cpu_id ();
940
+
941
+ rt_kprintf ("redist_hw_base = 0x%X\n" , _gic_table [index ].redist_hw_base [cpu_id ]);
942
+ rt_kprintf ("--- sgi mask ---\n" );
943
+ rt_kprintf ("0x%08x\n" , GIC_RDISTSGI_ISENABLER0 (_gic_table [index ].redist_hw_base [cpu_id ]));
944
+ rt_kprintf ("--- sgi pending ---\n" );
945
+ rt_kprintf ("0x%08x\n" , GIC_RDISTSGI_ISPENDR0 (_gic_table [index ].redist_hw_base [cpu_id ]));
946
+ rt_kprintf ("--- sgi active ---\n" );
947
+ rt_kprintf ("0x%08x\n" , GIC_RDISTSGI_ISACTIVER0 (_gic_table [index ].redist_hw_base [cpu_id ]));
948
+ }
949
+
853
950
long gic_dump (void )
854
951
{
855
952
arm_gic_dump_type (0 );
856
953
arm_gic_dump (0 );
954
+ arm_gic_bind_dump ();
955
+ arm_gic_sgi_dump (0 );
857
956
858
957
return 0 ;
859
958
}
0 commit comments