@@ -24,7 +24,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
24
24
25
25
static u32 kvm_pmu_event_mask (struct kvm * kvm )
26
26
{
27
- switch (kvm -> arch .pmuver ) {
27
+ unsigned int pmuver ;
28
+
29
+ pmuver = kvm -> arch .arm_pmu -> pmuver ;
30
+
31
+ switch (pmuver ) {
28
32
case ID_AA64DFR0_PMUVER_8_0 :
29
33
return GENMASK (9 , 0 );
30
34
case ID_AA64DFR0_PMUVER_8_1 :
@@ -33,7 +37,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
33
37
case ID_AA64DFR0_PMUVER_8_7 :
34
38
return GENMASK (15 , 0 );
35
39
default : /* Shouldn't be here, just for sanity */
36
- WARN_ONCE (1 , "Unknown PMU version %d\n" , kvm -> arch . pmuver );
40
+ WARN_ONCE (1 , "Unknown PMU version %d\n" , pmuver );
37
41
return 0 ;
38
42
}
39
43
}
@@ -600,6 +604,7 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
600
604
*/
601
605
static void kvm_pmu_create_perf_event (struct kvm_vcpu * vcpu , u64 select_idx )
602
606
{
607
+ struct arm_pmu * arm_pmu = vcpu -> kvm -> arch .arm_pmu ;
603
608
struct kvm_pmu * pmu = & vcpu -> arch .pmu ;
604
609
struct kvm_pmc * pmc ;
605
610
struct perf_event * event ;
@@ -636,7 +641,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
636
641
return ;
637
642
638
643
memset (& attr , 0 , sizeof (struct perf_event_attr ));
639
- attr .type = PERF_TYPE_RAW ;
644
+ attr .type = arm_pmu -> pmu . type ;
640
645
attr .size = sizeof (attr );
641
646
attr .pinned = 1 ;
642
647
attr .disabled = !kvm_pmu_counter_is_enabled (vcpu , pmc -> idx );
@@ -750,12 +755,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
750
755
static_branch_enable (& kvm_arm_pmu_available );
751
756
}
752
757
753
- static int kvm_pmu_probe_pmuver (void )
758
+ static struct arm_pmu * kvm_pmu_probe_armpmu (void )
754
759
{
755
760
struct perf_event_attr attr = { };
756
761
struct perf_event * event ;
757
- struct arm_pmu * pmu ;
758
- int pmuver = ID_AA64DFR0_PMUVER_IMP_DEF ;
762
+ struct arm_pmu * pmu = NULL ;
759
763
760
764
/*
761
765
* Create a dummy event that only counts user cycles. As we'll never
@@ -780,19 +784,20 @@ static int kvm_pmu_probe_pmuver(void)
780
784
if (IS_ERR (event )) {
781
785
pr_err_once ("kvm: pmu event creation failed %ld\n" ,
782
786
PTR_ERR (event ));
783
- return ID_AA64DFR0_PMUVER_IMP_DEF ;
787
+ return NULL ;
784
788
}
785
789
786
790
if (event -> pmu ) {
787
791
pmu = to_arm_pmu (event -> pmu );
788
- if (pmu -> pmuver )
789
- pmuver = pmu -> pmuver ;
792
+ if (pmu -> pmuver == 0 ||
793
+ pmu -> pmuver == ID_AA64DFR0_PMUVER_IMP_DEF )
794
+ pmu = NULL ;
790
795
}
791
796
792
797
perf_event_disable (event );
793
798
perf_event_release_kernel (event );
794
799
795
- return pmuver ;
800
+ return pmu ;
796
801
}
797
802
798
803
u64 kvm_pmu_get_pmceid (struct kvm_vcpu * vcpu , bool pmceid1 )
@@ -810,7 +815,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
810
815
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
811
816
* as RAZ
812
817
*/
813
- if (vcpu -> kvm -> arch .pmuver >= ID_AA64DFR0_PMUVER_8_4 )
818
+ if (vcpu -> kvm -> arch .arm_pmu -> pmuver >= ID_AA64DFR0_PMUVER_8_4 )
814
819
val &= ~BIT_ULL (ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32 );
815
820
base = 32 ;
816
821
}
@@ -932,11 +937,16 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
932
937
if (vcpu -> arch .pmu .created )
933
938
return - EBUSY ;
934
939
935
- if (!vcpu -> kvm -> arch .pmuver )
936
- vcpu -> kvm -> arch .pmuver = kvm_pmu_probe_pmuver ();
937
-
938
- if (vcpu -> kvm -> arch .pmuver == ID_AA64DFR0_PMUVER_IMP_DEF )
939
- return - ENODEV ;
940
+ mutex_lock (& kvm -> lock );
941
+ if (!kvm -> arch .arm_pmu ) {
942
+ /* No PMU set, get the default one */
943
+ kvm -> arch .arm_pmu = kvm_pmu_probe_armpmu ();
944
+ if (!kvm -> arch .arm_pmu ) {
945
+ mutex_unlock (& kvm -> lock );
946
+ return - ENODEV ;
947
+ }
948
+ }
949
+ mutex_unlock (& kvm -> lock );
940
950
941
951
switch (attr -> attr ) {
942
952
case KVM_ARM_VCPU_PMU_V3_IRQ : {
0 commit comments