forked from SamuelTulach/BetterTiming
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathrdtsc_timing.patch
104 lines (97 loc) · 3.1 KB
/
rdtsc_timing.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
diff --color -ruN ./linux-5.8.1/arch/x86/kvm/svm/svm.c ./linux-5.8.1_patched/arch/x86/kvm/svm/svm.c
--- ./linux-5.8.1/arch/x86/kvm/svm/svm.c 2020-08-11 09:48:12.000000000 -0400
+++ ./linux-5.8.1_patched/arch/x86/kvm/svm/svm.c 2020-08-17 01:43:49.932519654 -0400
@@ -1026,6 +1026,7 @@
set_intercept(svm, INTERCEPT_XSETBV);
set_intercept(svm, INTERCEPT_RDPRU);
set_intercept(svm, INTERCEPT_RSM);
+ set_intercept(svm, INTERCEPT_RDTSC);
if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
set_intercept(svm, INTERCEPT_MONITOR);
@@ -2156,6 +2157,7 @@
static int cpuid_interception(struct vcpu_svm *svm)
{
+ svm->vcpu.run->exit_reason = 123;
return kvm_emulate_cpuid(&svm->vcpu);
}
@@ -2717,6 +2719,26 @@
return nop_interception(svm);
}
+static int handle_rdtsc_interception(struct vcpu_svm *svm)
+{
+ u64 differece;
+ u64 final_time;
+ u64 data;
+
+ differece = rdtsc() - svm->vcpu.last_exit_start;
+ final_time = svm->vcpu.total_exit_time + differece;
+
+ data = rdtsc() - final_time;
+
+ svm->vcpu.run->exit_reason = 123;
+ svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & -1u;
+ svm->vcpu.arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
+
+ skip_emulated_instruction(&svm->vcpu);
+
+ return 1;
+}
+
static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR0] = cr_interception,
[SVM_EXIT_READ_CR3] = cr_interception,
@@ -2783,6 +2805,7 @@
[SVM_EXIT_RSM] = rsm_interception,
[SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
[SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
+ [SVM_EXIT_RDTSC] = handle_rdtsc_interception,
};
static void dump_vmcb(struct kvm_vcpu *vcpu)
diff --color -ruN ./linux-5.8.1/arch/x86/kvm/x86.c ./linux-5.8.1_patched/arch/x86/kvm/x86.c
--- ./linux-5.8.1/arch/x86/kvm/x86.c 2020-08-11 09:48:12.000000000 -0400
+++ ./linux-5.8.1_patched/arch/x86/kvm/x86.c 2020-08-17 01:46:13.402520753 -0400
@@ -8311,7 +8311,7 @@
* exiting to the userspace. Otherwise, the value will be returned to the
* userspace.
*/
-static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+static int vcpu_enter_guest_real(struct kvm_vcpu *vcpu)
{
int r;
bool req_int_win =
@@ -8609,6 +8609,24 @@
return r;
}
+static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+{
+ int result;
+ u64 differece;
+
+ vcpu->last_exit_start = rdtsc();
+
+ result = vcpu_enter_guest_real(vcpu);
+
+ if (vcpu->run->exit_reason == 123)
+ {
+ differece = rdtsc() - vcpu->last_exit_start;
+ vcpu->total_exit_time += differece;
+ }
+
+ return result;
+}
+
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu) &&
diff --color -ruN ./linux-5.8.1/include/linux/kvm_host.h ./linux-5.8.1_patched/include/linux/kvm_host.h
--- ./linux-5.8.1/include/linux/kvm_host.h 2020-08-11 09:48:12.000000000 -0400
+++ ./linux-5.8.1_patched/include/linux/kvm_host.h 2020-08-17 01:49:16.262522154 -0400
@@ -319,6 +319,9 @@
bool preempted;
bool ready;
struct kvm_vcpu_arch arch;
+
+ u64 last_exit_start;
+ u64 total_exit_time;
};
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)