@@ -586,3 +586,106 @@ static struct irq_chip its_irq_chip = {
586
586
.irq_eoi = its_eoi_irq ,
587
587
.irq_set_affinity = its_set_affinity ,
588
588
};
589
+
590
+ /*
591
+ * How we allocate LPIs:
592
+ *
593
+ * The GIC has id_bits bits for interrupt identifiers. From there, we
594
+ * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
595
+ * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
596
+ * bits to the right.
597
+ *
598
+ * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
599
+ */
600
+ #define IRQS_PER_CHUNK_SHIFT 5
601
+ #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
602
+
603
+ static unsigned long * lpi_bitmap ;
604
+ static u32 lpi_chunks ;
605
+ static DEFINE_SPINLOCK (lpi_lock );
606
+
607
+ static int its_lpi_to_chunk (int lpi )
608
+ {
609
+ return (lpi - 8192 ) >> IRQS_PER_CHUNK_SHIFT ;
610
+ }
611
+
612
+ static int its_chunk_to_lpi (int chunk )
613
+ {
614
+ return (chunk << IRQS_PER_CHUNK_SHIFT ) + 8192 ;
615
+ }
616
+
617
+ static int its_lpi_init (u32 id_bits )
618
+ {
619
+ lpi_chunks = its_lpi_to_chunk (1UL << id_bits );
620
+
621
+ lpi_bitmap = kzalloc (BITS_TO_LONGS (lpi_chunks ) * sizeof (long ),
622
+ GFP_KERNEL );
623
+ if (!lpi_bitmap ) {
624
+ lpi_chunks = 0 ;
625
+ return - ENOMEM ;
626
+ }
627
+
628
+ pr_info ("ITS: Allocated %d chunks for LPIs\n" , (int )lpi_chunks );
629
+ return 0 ;
630
+ }
631
+
632
+ static unsigned long * its_lpi_alloc_chunks (int nr_irqs , int * base , int * nr_ids )
633
+ {
634
+ unsigned long * bitmap = NULL ;
635
+ int chunk_id ;
636
+ int nr_chunks ;
637
+ int i ;
638
+
639
+ nr_chunks = DIV_ROUND_UP (nr_irqs , IRQS_PER_CHUNK );
640
+
641
+ spin_lock (& lpi_lock );
642
+
643
+ do {
644
+ chunk_id = bitmap_find_next_zero_area (lpi_bitmap , lpi_chunks ,
645
+ 0 , nr_chunks , 0 );
646
+ if (chunk_id < lpi_chunks )
647
+ break ;
648
+
649
+ nr_chunks -- ;
650
+ } while (nr_chunks > 0 );
651
+
652
+ if (!nr_chunks )
653
+ goto out ;
654
+
655
+ bitmap = kzalloc (BITS_TO_LONGS (nr_chunks * IRQS_PER_CHUNK ) * sizeof (long ),
656
+ GFP_ATOMIC );
657
+ if (!bitmap )
658
+ goto out ;
659
+
660
+ for (i = 0 ; i < nr_chunks ; i ++ )
661
+ set_bit (chunk_id + i , lpi_bitmap );
662
+
663
+ * base = its_chunk_to_lpi (chunk_id );
664
+ * nr_ids = nr_chunks * IRQS_PER_CHUNK ;
665
+
666
+ out :
667
+ spin_unlock (& lpi_lock );
668
+
669
+ return bitmap ;
670
+ }
671
+
672
+ static void its_lpi_free (unsigned long * bitmap , int base , int nr_ids )
673
+ {
674
+ int lpi ;
675
+
676
+ spin_lock (& lpi_lock );
677
+
678
+ for (lpi = base ; lpi < (base + nr_ids ); lpi += IRQS_PER_CHUNK ) {
679
+ int chunk = its_lpi_to_chunk (lpi );
680
+ BUG_ON (chunk > lpi_chunks );
681
+ if (test_bit (chunk , lpi_bitmap )) {
682
+ clear_bit (chunk , lpi_bitmap );
683
+ } else {
684
+ pr_err ("Bad LPI chunk %d\n" , chunk );
685
+ }
686
+ }
687
+
688
+ spin_unlock (& lpi_lock );
689
+
690
+ kfree (bitmap );
691
+ }
0 commit comments