2
2
3
3
static struct intel_uncore_type * empty_uncore [] = { NULL , };
4
4
static struct intel_uncore_type * * msr_uncores = empty_uncore ;
5
+ static struct intel_uncore_type * * pci_uncores = empty_uncore ;
6
+ /* pci bus to socket mapping */
7
+ static int pcibus_to_physid [256 ] = { [0 ... 255 ] = -1 , };
8
+
9
+ static DEFINE_RAW_SPINLOCK (uncore_box_lock );
5
10
6
11
/* mask of cpus that collect uncore events */
7
12
static cpumask_t uncore_cpu_mask ;
@@ -205,13 +210,13 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box,
205
210
hwc -> last_tag = ++ box -> tags [idx ];
206
211
207
212
if (hwc -> idx == UNCORE_PMC_IDX_FIXED ) {
208
- hwc -> event_base = uncore_msr_fixed_ctr (box );
209
- hwc -> config_base = uncore_msr_fixed_ctl (box );
213
+ hwc -> event_base = uncore_fixed_ctr (box );
214
+ hwc -> config_base = uncore_fixed_ctl (box );
210
215
return ;
211
216
}
212
217
213
- hwc -> config_base = uncore_msr_event_ctl (box , hwc -> idx );
214
- hwc -> event_base = uncore_msr_perf_ctr (box , hwc -> idx );
218
+ hwc -> config_base = uncore_event_ctl (box , hwc -> idx );
219
+ hwc -> event_base = uncore_perf_ctr (box , hwc -> idx );
215
220
}
216
221
217
222
static void uncore_perf_event_update (struct intel_uncore_box * box ,
@@ -305,6 +310,22 @@ struct intel_uncore_box *uncore_alloc_box(int cpu)
305
310
static struct intel_uncore_box *
306
311
uncore_pmu_to_box (struct intel_uncore_pmu * pmu , int cpu )
307
312
{
313
+ static struct intel_uncore_box * box ;
314
+
315
+ box = * per_cpu_ptr (pmu -> box , cpu );
316
+ if (box )
317
+ return box ;
318
+
319
+ raw_spin_lock (& uncore_box_lock );
320
+ list_for_each_entry (box , & pmu -> box_list , list ) {
321
+ if (box -> phys_id == topology_physical_package_id (cpu )) {
322
+ atomic_inc (& box -> refcnt );
323
+ * per_cpu_ptr (pmu -> box , cpu ) = box ;
324
+ break ;
325
+ }
326
+ }
327
+ raw_spin_unlock (& uncore_box_lock );
328
+
308
329
return * per_cpu_ptr (pmu -> box , cpu );
309
330
}
310
331
@@ -706,6 +727,13 @@ static void __init uncore_type_exit(struct intel_uncore_type *type)
706
727
type -> attr_groups [1 ] = NULL ;
707
728
}
708
729
730
+ static void uncore_types_exit (struct intel_uncore_type * * types )
731
+ {
732
+ int i ;
733
+ for (i = 0 ; types [i ]; i ++ )
734
+ uncore_type_exit (types [i ]);
735
+ }
736
+
709
737
static int __init uncore_type_init (struct intel_uncore_type * type )
710
738
{
711
739
struct intel_uncore_pmu * pmus ;
@@ -725,6 +753,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
725
753
pmus [i ].func_id = -1 ;
726
754
pmus [i ].pmu_idx = i ;
727
755
pmus [i ].type = type ;
756
+ INIT_LIST_HEAD (& pmus [i ].box_list );
728
757
pmus [i ].box = alloc_percpu (struct intel_uncore_box * );
729
758
if (!pmus [i ].box )
730
759
goto fail ;
@@ -773,6 +802,127 @@ static int __init uncore_types_init(struct intel_uncore_type **types)
773
802
return ret ;
774
803
}
775
804
805
+ static struct pci_driver * uncore_pci_driver ;
806
+ static bool pcidrv_registered ;
807
+
808
+ /*
809
+ * add a pci uncore device
810
+ */
811
+ static int __devinit uncore_pci_add (struct intel_uncore_type * type ,
812
+ struct pci_dev * pdev )
813
+ {
814
+ struct intel_uncore_pmu * pmu ;
815
+ struct intel_uncore_box * box ;
816
+ int i , phys_id ;
817
+
818
+ phys_id = pcibus_to_physid [pdev -> bus -> number ];
819
+ if (phys_id < 0 )
820
+ return - ENODEV ;
821
+
822
+ box = uncore_alloc_box (0 );
823
+ if (!box )
824
+ return - ENOMEM ;
825
+
826
+ /*
827
+ * for performance monitoring unit with multiple boxes,
828
+ * each box has a different function id.
829
+ */
830
+ for (i = 0 ; i < type -> num_boxes ; i ++ ) {
831
+ pmu = & type -> pmus [i ];
832
+ if (pmu -> func_id == pdev -> devfn )
833
+ break ;
834
+ if (pmu -> func_id < 0 ) {
835
+ pmu -> func_id = pdev -> devfn ;
836
+ break ;
837
+ }
838
+ pmu = NULL ;
839
+ }
840
+
841
+ if (!pmu ) {
842
+ kfree (box );
843
+ return - EINVAL ;
844
+ }
845
+
846
+ box -> phys_id = phys_id ;
847
+ box -> pci_dev = pdev ;
848
+ box -> pmu = pmu ;
849
+ uncore_box_init (box );
850
+ pci_set_drvdata (pdev , box );
851
+
852
+ raw_spin_lock (& uncore_box_lock );
853
+ list_add_tail (& box -> list , & pmu -> box_list );
854
+ raw_spin_unlock (& uncore_box_lock );
855
+
856
+ return 0 ;
857
+ }
858
+
859
+ static void __devexit uncore_pci_remove (struct pci_dev * pdev )
860
+ {
861
+ struct intel_uncore_box * box = pci_get_drvdata (pdev );
862
+ struct intel_uncore_pmu * pmu = box -> pmu ;
863
+ int cpu , phys_id = pcibus_to_physid [pdev -> bus -> number ];
864
+
865
+ if (WARN_ON_ONCE (phys_id != box -> phys_id ))
866
+ return ;
867
+
868
+ raw_spin_lock (& uncore_box_lock );
869
+ list_del (& box -> list );
870
+ raw_spin_unlock (& uncore_box_lock );
871
+
872
+ for_each_possible_cpu (cpu ) {
873
+ if (* per_cpu_ptr (pmu -> box , cpu ) == box ) {
874
+ * per_cpu_ptr (pmu -> box , cpu ) = NULL ;
875
+ atomic_dec (& box -> refcnt );
876
+ }
877
+ }
878
+
879
+ WARN_ON_ONCE (atomic_read (& box -> refcnt ) != 1 );
880
+ kfree (box );
881
+ }
882
+
883
+ static int __devinit uncore_pci_probe (struct pci_dev * pdev ,
884
+ const struct pci_device_id * id )
885
+ {
886
+ struct intel_uncore_type * type ;
887
+
888
+ type = (struct intel_uncore_type * )id -> driver_data ;
889
+ return uncore_pci_add (type , pdev );
890
+ }
891
+
892
+ static int __init uncore_pci_init (void )
893
+ {
894
+ int ret ;
895
+
896
+ switch (boot_cpu_data .x86_model ) {
897
+ default :
898
+ return 0 ;
899
+ }
900
+
901
+ ret = uncore_types_init (pci_uncores );
902
+ if (ret )
903
+ return ret ;
904
+
905
+ uncore_pci_driver -> probe = uncore_pci_probe ;
906
+ uncore_pci_driver -> remove = uncore_pci_remove ;
907
+
908
+ ret = pci_register_driver (uncore_pci_driver );
909
+ if (ret == 0 )
910
+ pcidrv_registered = true;
911
+ else
912
+ uncore_types_exit (pci_uncores );
913
+
914
+ return ret ;
915
+ }
916
+
917
+ static void __init uncore_pci_exit (void )
918
+ {
919
+ if (pcidrv_registered ) {
920
+ pcidrv_registered = false;
921
+ pci_unregister_driver (uncore_pci_driver );
922
+ uncore_types_exit (pci_uncores );
923
+ }
924
+ }
925
+
776
926
static void __cpuinit uncore_cpu_dying (int cpu )
777
927
{
778
928
struct intel_uncore_type * type ;
@@ -921,6 +1071,7 @@ static void __cpuinit uncore_event_exit_cpu(int cpu)
921
1071
cpumask_set_cpu (target , & uncore_cpu_mask );
922
1072
923
1073
uncore_change_context (msr_uncores , cpu , target );
1074
+ uncore_change_context (pci_uncores , cpu , target );
924
1075
}
925
1076
926
1077
static void __cpuinit uncore_event_init_cpu (int cpu )
@@ -936,6 +1087,7 @@ static void __cpuinit uncore_event_init_cpu(int cpu)
936
1087
cpumask_set_cpu (cpu , & uncore_cpu_mask );
937
1088
938
1089
uncore_change_context (msr_uncores , -1 , cpu );
1090
+ uncore_change_context (pci_uncores , -1 , cpu );
939
1091
}
940
1092
941
1093
static int __cpuinit uncore_cpu_notifier (struct notifier_block * self ,
@@ -1051,6 +1203,14 @@ static int __init uncore_pmus_register(void)
1051
1203
}
1052
1204
}
1053
1205
1206
+ for (i = 0 ; pci_uncores [i ]; i ++ ) {
1207
+ type = pci_uncores [i ];
1208
+ for (j = 0 ; j < type -> num_boxes ; j ++ ) {
1209
+ pmu = & type -> pmus [j ];
1210
+ uncore_pmu_register (pmu );
1211
+ }
1212
+ }
1213
+
1054
1214
return 0 ;
1055
1215
}
1056
1216
@@ -1061,9 +1221,14 @@ static int __init intel_uncore_init(void)
1061
1221
if (boot_cpu_data .x86_vendor != X86_VENDOR_INTEL )
1062
1222
return - ENODEV ;
1063
1223
1064
- ret = uncore_cpu_init ();
1224
+ ret = uncore_pci_init ();
1065
1225
if (ret )
1066
1226
goto fail ;
1227
+ ret = uncore_cpu_init ();
1228
+ if (ret ) {
1229
+ uncore_pci_exit ();
1230
+ goto fail ;
1231
+ }
1067
1232
1068
1233
uncore_pmus_register ();
1069
1234
return 0 ;
0 commit comments