Skip to content

Commit bf33f52

Browse files
sean-jcgregkh
authored andcommitted
KVM: x86: Convert vcpu_run()'s immediate exit param into a generic bitmap
commit 2478b1b upstream. Convert kvm_x86_ops.vcpu_run()'s "force_immediate_exit" boolean parameter into an a generic bitmap so that similar "take action" information can be passed to vendor code without creating a pile of boolean parameters. This will allow dropping kvm_x86_ops.set_dr6() in favor of a new flag, and will also allow for adding similar functionality for re-loading debugctl in the active VMCS. Opportunistically massage the TDX WARN and comment to prepare for adding more run_flags, all of which are expected to be mutually exclusive with TDX, i.e. should be WARNed on. No functional change intended. Cc: [email protected] Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 4639f1d commit bf33f52

7 files changed

Lines changed: 31 additions & 21 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1680,6 +1680,10 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
16801680
return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
16811681
}
16821682

1683+
enum kvm_x86_run_flags {
1684+
KVM_RUN_FORCE_IMMEDIATE_EXIT = BIT(0),
1685+
};
1686+
16831687
struct kvm_x86_ops {
16841688
const char *name;
16851689

@@ -1761,7 +1765,7 @@ struct kvm_x86_ops {
17611765

17621766
int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
17631767
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
1764-
bool force_immediate_exit);
1768+
u64 run_flags);
17651769
int (*handle_exit)(struct kvm_vcpu *vcpu,
17661770
enum exit_fastpath_completion exit_fastpath);
17671771
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);

arch/x86/kvm/svm/svm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4389,9 +4389,9 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
43894389
guest_state_exit_irqoff();
43904390
}
43914391

4392-
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
4393-
bool force_immediate_exit)
4392+
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
43944393
{
4394+
bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
43954395
struct vcpu_svm *svm = to_svm(vcpu);
43964396
bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
43974397

arch/x86/kvm/vmx/main.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,12 +175,12 @@ static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu)
175175
return vmx_vcpu_pre_run(vcpu);
176176
}
177177

178-
static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
178+
static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
179179
{
180180
if (is_td_vcpu(vcpu))
181-
return tdx_vcpu_run(vcpu, force_immediate_exit);
181+
return tdx_vcpu_run(vcpu, run_flags);
182182

183-
return vmx_vcpu_run(vcpu, force_immediate_exit);
183+
return vmx_vcpu_run(vcpu, run_flags);
184184
}
185185

186186
static int vt_handle_exit(struct kvm_vcpu *vcpu,

arch/x86/kvm/vmx/tdx.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,20 +1025,20 @@ static void tdx_load_host_xsave_state(struct kvm_vcpu *vcpu)
10251025
DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI | \
10261026
DEBUGCTLMSR_FREEZE_IN_SMM)
10271027

1028-
fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
1028+
fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
10291029
{
10301030
struct vcpu_tdx *tdx = to_tdx(vcpu);
10311031
struct vcpu_vt *vt = to_vt(vcpu);
10321032

10331033
/*
1034-
* force_immediate_exit requires vCPU entering for events injection with
1035-
* an immediately exit followed. But The TDX module doesn't guarantee
1036-
* entry, it's already possible for KVM to _think_ it completely entry
1037-
* to the guest without actually having done so.
1038-
* Since KVM never needs to force an immediate exit for TDX, and can't
1039-
* do direct injection, just warn on force_immediate_exit.
1034+
* WARN if KVM wants to force an immediate exit, as the TDX module does
1035+
* not guarantee entry into the guest, i.e. it's possible for KVM to
1036+
* _think_ it completed entry to the guest and forced an immediate exit
1037+
* without actually having done so. Luckily, KVM never needs to force
1038+
* an immediate exit for TDX (KVM can't do direct event injection, so
1039+
* just WARN and continue on.
10401040
*/
1041-
WARN_ON_ONCE(force_immediate_exit);
1041+
WARN_ON_ONCE(run_flags);
10421042

10431043
/*
10441044
* Wait until retry of SEPT-zap-related SEAMCALL completes before
@@ -1048,7 +1048,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
10481048
if (unlikely(READ_ONCE(to_kvm_tdx(vcpu->kvm)->wait_for_sept_zap)))
10491049
return EXIT_FASTPATH_EXIT_HANDLED;
10501050

1051-
trace_kvm_entry(vcpu, force_immediate_exit);
1051+
trace_kvm_entry(vcpu, run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT);
10521052

10531053
if (pi_test_on(&vt->pi_desc)) {
10541054
apic->send_IPI_self(POSTED_INTR_VECTOR);

arch/x86/kvm/vmx/vmx.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7323,8 +7323,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
73237323
guest_state_exit_irqoff();
73247324
}
73257325

7326-
fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
7326+
fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
73277327
{
7328+
bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
73287329
struct vcpu_vmx *vmx = to_vmx(vcpu);
73297330
unsigned long cr3, cr4;
73307331

arch/x86/kvm/vmx/x86_ops.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ void vmx_vm_destroy(struct kvm *kvm);
2121
int vmx_vcpu_precreate(struct kvm *kvm);
2222
int vmx_vcpu_create(struct kvm_vcpu *vcpu);
2323
int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
24-
fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
24+
fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags);
2525
void vmx_vcpu_free(struct kvm_vcpu *vcpu);
2626
void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
2727
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
@@ -133,7 +133,7 @@ void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
133133
void tdx_vcpu_free(struct kvm_vcpu *vcpu);
134134
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
135135
int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
136-
fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
136+
fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags);
137137
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
138138
void tdx_vcpu_put(struct kvm_vcpu *vcpu);
139139
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);

arch/x86/kvm/x86.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10785,6 +10785,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1078510785
dm_request_for_irq_injection(vcpu) &&
1078610786
kvm_cpu_accept_dm_intr(vcpu);
1078710787
fastpath_t exit_fastpath;
10788+
u64 run_flags;
1078810789

1078910790
bool req_immediate_exit = false;
1079010791

@@ -11029,8 +11030,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1102911030
goto cancel_injection;
1103011031
}
1103111032

11032-
if (req_immediate_exit)
11033+
run_flags = 0;
11034+
if (req_immediate_exit) {
11035+
run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT;
1103311036
kvm_make_request(KVM_REQ_EVENT, vcpu);
11037+
}
1103411038

1103511039
fpregs_assert_state_consistent();
1103611040
if (test_thread_flag(TIF_NEED_FPU_LOAD))
@@ -11067,8 +11071,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1106711071
WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
1106811072
(kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
1106911073

11070-
exit_fastpath = kvm_x86_call(vcpu_run)(vcpu,
11071-
req_immediate_exit);
11074+
exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, run_flags);
1107211075
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
1107311076
break;
1107411077

@@ -11080,6 +11083,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1108011083
break;
1108111084
}
1108211085

11086+
run_flags = 0;
11087+
1108311088
/* Note, VM-Exits that go down the "slow" path are accounted below. */
1108411089
++vcpu->stat.exits;
1108511090
}

0 commit comments

Comments
 (0)