10
10
#include <drm/drm_managed.h>
11
11
12
12
#include "display/xe_display.h"
13
+ #include "regs/xe_guc_regs.h"
13
14
#include "regs/xe_irq_regs.h"
14
15
#include "xe_device.h"
15
16
#include "xe_drv.h"
29
30
#define IIR (offset ) XE_REG(offset + 0x8)
30
31
#define IER (offset ) XE_REG(offset + 0xc)
31
32
33
+ static int xe_irq_msix_init (struct xe_device * xe );
34
+ static void xe_irq_msix_free (struct xe_device * xe );
35
+ static int xe_irq_msix_request_irqs (struct xe_device * xe );
36
+ static void xe_irq_msix_synchronize_irq (struct xe_device * xe );
37
+
32
38
static void assert_iir_is_zero (struct xe_mmio * mmio , struct xe_reg reg )
33
39
{
34
40
u32 val = xe_mmio_read32 (mmio , reg );
@@ -572,6 +578,11 @@ static void xe_irq_reset(struct xe_device *xe)
572
578
if (IS_SRIOV_VF (xe ))
573
579
return vf_irq_reset (xe );
574
580
581
+ if (xe_device_uses_memirq (xe )) {
582
+ for_each_tile (tile , xe , id )
583
+ xe_memirq_reset (& tile -> memirq );
584
+ }
585
+
575
586
for_each_tile (tile , xe , id ) {
576
587
if (GRAPHICS_VERx100 (xe ) >= 1210 )
577
588
dg1_irq_reset (tile );
@@ -614,6 +625,14 @@ static void xe_irq_postinstall(struct xe_device *xe)
614
625
if (IS_SRIOV_VF (xe ))
615
626
return vf_irq_postinstall (xe );
616
627
628
+ if (xe_device_uses_memirq (xe )) {
629
+ struct xe_tile * tile ;
630
+ unsigned int id ;
631
+
632
+ for_each_tile (tile , xe , id )
633
+ xe_memirq_postinstall (& tile -> memirq );
634
+ }
635
+
617
636
xe_display_irq_postinstall (xe , xe_root_mmio_gt (xe ));
618
637
619
638
/*
@@ -656,60 +675,83 @@ static irq_handler_t xe_irq_handler(struct xe_device *xe)
656
675
return xelp_irq_handler ;
657
676
}
658
677
659
- static void irq_uninstall (void * arg )
678
+ static int xe_irq_msi_request_irqs (struct xe_device * xe )
679
+ {
680
+ struct pci_dev * pdev = to_pci_dev (xe -> drm .dev );
681
+ irq_handler_t irq_handler ;
682
+ int irq , err ;
683
+
684
+ irq_handler = xe_irq_handler (xe );
685
+ if (!irq_handler ) {
686
+ drm_err (& xe -> drm , "No supported interrupt handler" );
687
+ return - EINVAL ;
688
+ }
689
+
690
+ irq = pci_irq_vector (pdev , 0 );
691
+ err = request_irq (irq , irq_handler , IRQF_SHARED , DRIVER_NAME , xe );
692
+ if (err < 0 ) {
693
+ drm_err (& xe -> drm , "Failed to request MSI IRQ %d\n" , err );
694
+ return err ;
695
+ }
696
+
697
+ return 0 ;
698
+ }
699
+
700
+ static void xe_irq_msi_free (struct xe_device * xe )
660
701
{
661
- struct xe_device * xe = arg ;
662
702
struct pci_dev * pdev = to_pci_dev (xe -> drm .dev );
663
703
int irq ;
664
704
705
+ irq = pci_irq_vector (pdev , 0 );
706
+ free_irq (irq , xe );
707
+ }
708
+
709
+ static void irq_uninstall (void * arg )
710
+ {
711
+ struct xe_device * xe = arg ;
712
+
665
713
if (!atomic_xchg (& xe -> irq .enabled , 0 ))
666
714
return ;
667
715
668
716
xe_irq_reset (xe );
669
717
670
- irq = pci_irq_vector (pdev , 0 );
671
- free_irq (irq , xe );
718
+ if (xe_device_has_msix (xe ))
719
+ xe_irq_msix_free (xe );
720
+ else
721
+ xe_irq_msi_free (xe );
722
+ }
723
+
724
+ int xe_irq_init (struct xe_device * xe )
725
+ {
726
+ spin_lock_init (& xe -> irq .lock );
727
+
728
+ return xe_irq_msix_init (xe );
672
729
}
673
730
674
731
int xe_irq_install (struct xe_device * xe )
675
732
{
676
733
struct pci_dev * pdev = to_pci_dev (xe -> drm .dev );
677
- unsigned int irq_flags = PCI_IRQ_MSIX ;
678
- irq_handler_t irq_handler ;
679
- int err , irq , nvec ;
680
-
681
- irq_handler = xe_irq_handler (xe );
682
- if (!irq_handler ) {
683
- drm_err (& xe -> drm , "No supported interrupt handler" );
684
- return - EINVAL ;
685
- }
734
+ unsigned int irq_flags = PCI_IRQ_MSI ;
735
+ int nvec = 1 ;
736
+ int err ;
686
737
687
738
xe_irq_reset (xe );
688
739
689
- nvec = pci_msix_vec_count (pdev );
690
- if (nvec <= 0 ) {
691
- if (nvec == - EINVAL ) {
692
- /* MSIX capability is not supported in the device, using MSI */
693
- irq_flags = PCI_IRQ_MSI ;
694
- nvec = 1 ;
695
- } else {
696
- drm_err (& xe -> drm , "MSIX: Failed getting count\n" );
697
- return nvec ;
698
- }
740
+ if (xe_device_has_msix (xe )) {
741
+ nvec = xe -> irq .msix .nvec ;
742
+ irq_flags = PCI_IRQ_MSIX ;
699
743
}
700
744
701
745
err = pci_alloc_irq_vectors (pdev , nvec , nvec , irq_flags );
702
746
if (err < 0 ) {
703
- drm_err (& xe -> drm , "MSI/MSIX: Failed to enable support %d\n" , err );
747
+ drm_err (& xe -> drm , "Failed to allocate IRQ vectors: %d\n" , err );
704
748
return err ;
705
749
}
706
750
707
- irq = pci_irq_vector (pdev , 0 );
708
- err = request_irq (irq , irq_handler , IRQF_SHARED , DRIVER_NAME , xe );
709
- if (err < 0 ) {
710
- drm_err (& xe -> drm , "Failed to request MSI/MSIX IRQ %d\n" , err );
751
+ err = xe_device_has_msix (xe ) ? xe_irq_msix_request_irqs (xe ) :
752
+ xe_irq_msi_request_irqs (xe );
753
+ if (err )
711
754
return err ;
712
- }
713
755
714
756
atomic_set (& xe -> irq .enabled , 1 );
715
757
@@ -722,18 +764,28 @@ int xe_irq_install(struct xe_device *xe)
722
764
return 0 ;
723
765
724
766
free_irq_handler :
725
- free_irq (irq , xe );
767
+ if (xe_device_has_msix (xe ))
768
+ xe_irq_msix_free (xe );
769
+ else
770
+ xe_irq_msi_free (xe );
726
771
727
772
return err ;
728
773
}
729
774
730
- void xe_irq_suspend (struct xe_device * xe )
775
+ static void xe_irq_msi_synchronize_irq (struct xe_device * xe )
731
776
{
732
- int irq = to_pci_dev (xe -> drm .dev )-> irq ;
777
+ synchronize_irq (to_pci_dev (xe -> drm .dev )-> irq );
778
+ }
733
779
780
+ void xe_irq_suspend (struct xe_device * xe )
781
+ {
734
782
atomic_set (& xe -> irq .enabled , 0 ); /* no new irqs */
735
783
736
- synchronize_irq (irq ); /* flush irqs */
784
+ /* flush irqs */
785
+ if (xe_device_has_msix (xe ))
786
+ xe_irq_msix_synchronize_irq (xe );
787
+ else
788
+ xe_irq_msi_synchronize_irq (xe );
737
789
xe_irq_reset (xe ); /* turn irqs off */
738
790
}
739
791
@@ -754,3 +806,142 @@ void xe_irq_resume(struct xe_device *xe)
754
806
for_each_gt (gt , xe , id )
755
807
xe_irq_enable_hwe (gt );
756
808
}
809
+
810
+ /* MSI-X related definitions and functions below. */
811
+
812
+ enum xe_irq_msix_static {
813
+ GUC2HOST_MSIX = 0 ,
814
+ DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX ,
815
+ /* Must be last */
816
+ NUM_OF_STATIC_MSIX ,
817
+ };
818
+
819
+ static int xe_irq_msix_init (struct xe_device * xe )
820
+ {
821
+ struct pci_dev * pdev = to_pci_dev (xe -> drm .dev );
822
+ int nvec = pci_msix_vec_count (pdev );
823
+
824
+ if (nvec == - EINVAL )
825
+ return 0 ; /* MSI */
826
+
827
+ if (nvec < 0 ) {
828
+ drm_err (& xe -> drm , "Failed getting MSI-X vectors count: %d\n" , nvec );
829
+ return nvec ;
830
+ }
831
+
832
+ xe -> irq .msix .nvec = nvec ;
833
+ return 0 ;
834
+ }
835
+
836
+ static irqreturn_t guc2host_irq_handler (int irq , void * arg )
837
+ {
838
+ struct xe_device * xe = arg ;
839
+ struct xe_tile * tile ;
840
+ u8 id ;
841
+
842
+ if (!atomic_read (& xe -> irq .enabled ))
843
+ return IRQ_NONE ;
844
+
845
+ for_each_tile (tile , xe , id )
846
+ xe_guc_irq_handler (& tile -> primary_gt -> uc .guc ,
847
+ GUC_INTR_GUC2HOST );
848
+
849
+ return IRQ_HANDLED ;
850
+ }
851
+
852
+ static irqreturn_t xe_irq_msix_default_hwe_handler (int irq , void * arg )
853
+ {
854
+ unsigned int tile_id , gt_id ;
855
+ struct xe_device * xe = arg ;
856
+ struct xe_memirq * memirq ;
857
+ struct xe_hw_engine * hwe ;
858
+ enum xe_hw_engine_id id ;
859
+ struct xe_tile * tile ;
860
+ struct xe_gt * gt ;
861
+
862
+ if (!atomic_read (& xe -> irq .enabled ))
863
+ return IRQ_NONE ;
864
+
865
+ for_each_tile (tile , xe , tile_id ) {
866
+ memirq = & tile -> memirq ;
867
+ if (!memirq -> bo )
868
+ continue ;
869
+
870
+ for_each_gt (gt , xe , gt_id ) {
871
+ if (gt -> tile != tile )
872
+ continue ;
873
+
874
+ for_each_hw_engine (hwe , gt , id )
875
+ xe_memirq_hwe_handler (memirq , hwe );
876
+ }
877
+ }
878
+
879
+ return IRQ_HANDLED ;
880
+ }
881
+
882
+ static int xe_irq_msix_request_irq (struct xe_device * xe , irq_handler_t handler ,
883
+ const char * name , u16 msix )
884
+ {
885
+ struct pci_dev * pdev = to_pci_dev (xe -> drm .dev );
886
+ int ret , irq ;
887
+
888
+ irq = pci_irq_vector (pdev , msix );
889
+ if (irq < 0 )
890
+ return irq ;
891
+
892
+ ret = request_irq (irq , handler , IRQF_SHARED , name , xe );
893
+ if (ret < 0 )
894
+ return ret ;
895
+
896
+ return 0 ;
897
+ }
898
+
899
+ static void xe_irq_msix_free_irq (struct xe_device * xe , u16 msix )
900
+ {
901
+ struct pci_dev * pdev = to_pci_dev (xe -> drm .dev );
902
+ int irq ;
903
+
904
+ irq = pci_irq_vector (pdev , msix );
905
+ if (irq < 0 ) {
906
+ drm_err (& xe -> drm , "MSI-X %u can't be released, there is no matching IRQ\n" , msix );
907
+ return ;
908
+ }
909
+
910
+ free_irq (irq , xe );
911
+ }
912
+
913
+ static int xe_irq_msix_request_irqs (struct xe_device * xe )
914
+ {
915
+ int err ;
916
+
917
+ err = xe_irq_msix_request_irq (xe , guc2host_irq_handler ,
918
+ DRIVER_NAME "-guc2host" , GUC2HOST_MSIX );
919
+ if (err ) {
920
+ drm_err (& xe -> drm , "Failed to request MSI-X IRQ %d: %d\n" , GUC2HOST_MSIX , err );
921
+ return err ;
922
+ }
923
+
924
+ err = xe_irq_msix_request_irq (xe , xe_irq_msix_default_hwe_handler ,
925
+ DRIVER_NAME "-default-msix" , DEFAULT_MSIX );
926
+ if (err ) {
927
+ drm_err (& xe -> drm , "Failed to request MSI-X IRQ %d: %d\n" , DEFAULT_MSIX , err );
928
+ xe_irq_msix_free_irq (xe , GUC2HOST_MSIX );
929
+ return err ;
930
+ }
931
+
932
+ return 0 ;
933
+ }
934
+
935
+ static void xe_irq_msix_free (struct xe_device * xe )
936
+ {
937
+ xe_irq_msix_free_irq (xe , GUC2HOST_MSIX );
938
+ xe_irq_msix_free_irq (xe , DEFAULT_MSIX );
939
+ }
940
+
941
+ static void xe_irq_msix_synchronize_irq (struct xe_device * xe )
942
+ {
943
+ struct pci_dev * pdev = to_pci_dev (xe -> drm .dev );
944
+
945
+ synchronize_irq (pci_irq_vector (pdev , GUC2HOST_MSIX ));
946
+ synchronize_irq (pci_irq_vector (pdev , DEFAULT_MSIX ));
947
+ }
0 commit comments