Skip to content

Commit 8b59d02

Browse files
yang-weijiangsean-jc
authored andcommitted
KVM: VMX: Emulate read and write to CET MSRs
Add emulation interface for CET MSR access. The emulation code is split into common part and vendor specific part. The former does common checks for MSRs, e.g., accessibility, data validity etc., then passes operation to either XSAVE-managed MSRs via the helpers or CET VMCS fields. SSP can only be read via RDSSP. Writing even requires destructive and potentially faulting operations such as SAVEPREVSSP/RSTORSSP or SETSSBSY/CLRSSBSY. Let the host use a pseudo-MSR that is just a wrapper for the GUEST_SSP field of the VMCS. Suggested-by: Sean Christopherson <[email protected]> Signed-off-by: Yang Weijiang <[email protected]> Tested-by: Mathias Krause <[email protected]> Tested-by: John Allen <[email protected]> Tested-by: Rick Edgecombe <[email protected]> Signed-off-by: Chao Gao <[email protected]> [sean: drop call to kvm_set_xstate_msr() for S_CET, consolidate code] Reviewed-by: Binbin Wu <[email protected]> Reviewed-by: Xiaoyao Li <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 9d6812d commit 8b59d02

3 files changed

Lines changed: 103 additions & 2 deletions

File tree

arch/x86/kvm/vmx/vmx.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2093,6 +2093,15 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20932093
else
20942094
msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
20952095
break;
2096+
case MSR_IA32_S_CET:
2097+
msr_info->data = vmcs_readl(GUEST_S_CET);
2098+
break;
2099+
case MSR_KVM_INTERNAL_GUEST_SSP:
2100+
msr_info->data = vmcs_readl(GUEST_SSP);
2101+
break;
2102+
case MSR_IA32_INT_SSP_TAB:
2103+
msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
2104+
break;
20962105
case MSR_IA32_DEBUGCTLMSR:
20972106
msr_info->data = vmx_guest_debugctl_read();
20982107
break;
@@ -2411,6 +2420,15 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24112420
else
24122421
vmx->pt_desc.guest.addr_a[index / 2] = data;
24132422
break;
2423+
case MSR_IA32_S_CET:
2424+
vmcs_writel(GUEST_S_CET, data);
2425+
break;
2426+
case MSR_KVM_INTERNAL_GUEST_SSP:
2427+
vmcs_writel(GUEST_SSP, data);
2428+
break;
2429+
case MSR_IA32_INT_SSP_TAB:
2430+
vmcs_writel(GUEST_INTR_SSP_TABLE, data);
2431+
break;
24142432
case MSR_IA32_PERF_CAPABILITIES:
24152433
if (data & PERF_CAP_LBR_FMT) {
24162434
if ((data & PERF_CAP_LBR_FMT) !=

arch/x86/kvm/x86.c

Lines changed: 62 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1890,6 +1890,44 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
18901890

18911891
data = (u32)data;
18921892
break;
1893+
case MSR_IA32_U_CET:
1894+
case MSR_IA32_S_CET:
1895+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
1896+
!guest_cpu_cap_has(vcpu, X86_FEATURE_IBT))
1897+
return KVM_MSR_RET_UNSUPPORTED;
1898+
if (!kvm_is_valid_u_s_cet(vcpu, data))
1899+
return 1;
1900+
break;
1901+
case MSR_KVM_INTERNAL_GUEST_SSP:
1902+
if (!host_initiated)
1903+
return 1;
1904+
fallthrough;
1905+
/*
1906+
* Note that the MSR emulation here is flawed when a vCPU
1907+
* doesn't support the Intel 64 architecture. The expected
1908+
* architectural behavior in this case is that the upper 32
1909+
* bits do not exist and should always read '0'. However,
1910+
* because the actual hardware on which the virtual CPU is
1911+
* running does support Intel 64, XRSTORS/XSAVES in the
1912+
* guest could observe behavior that violates the
1913+
* architecture. Intercepting XRSTORS/XSAVES for this
1914+
* special case isn't deemed worthwhile.
1915+
*/
1916+
case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
1917+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
1918+
return KVM_MSR_RET_UNSUPPORTED;
1919+
/*
1920+
* MSR_IA32_INT_SSP_TAB is not present on processors that do
1921+
* not support Intel 64 architecture.
1922+
*/
1923+
if (index == MSR_IA32_INT_SSP_TAB && !guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
1924+
return KVM_MSR_RET_UNSUPPORTED;
1925+
if (is_noncanonical_msr_address(data, vcpu))
1926+
return 1;
1927+
/* All SSP MSRs except MSR_IA32_INT_SSP_TAB must be 4-byte aligned */
1928+
if (index != MSR_IA32_INT_SSP_TAB && !IS_ALIGNED(data, 4))
1929+
return 1;
1930+
break;
18931931
}
18941932

18951933
msr.data = data;
@@ -1934,6 +1972,20 @@ static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
19341972
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
19351973
return 1;
19361974
break;
1975+
case MSR_IA32_U_CET:
1976+
case MSR_IA32_S_CET:
1977+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
1978+
!guest_cpu_cap_has(vcpu, X86_FEATURE_IBT))
1979+
return KVM_MSR_RET_UNSUPPORTED;
1980+
break;
1981+
case MSR_KVM_INTERNAL_GUEST_SSP:
1982+
if (!host_initiated)
1983+
return 1;
1984+
fallthrough;
1985+
case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
1986+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
1987+
return KVM_MSR_RET_UNSUPPORTED;
1988+
break;
19371989
}
19381990

19391991
msr.index = index;
@@ -3865,12 +3917,12 @@ static __always_inline void kvm_access_xstate_msr(struct kvm_vcpu *vcpu,
38653917
kvm_fpu_put();
38663918
}
38673919

3868-
static __maybe_unused void kvm_set_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3920+
static void kvm_set_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
38693921
{
38703922
kvm_access_xstate_msr(vcpu, msr_info, MSR_TYPE_W);
38713923
}
38723924

3873-
static __maybe_unused void kvm_get_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3925+
static void kvm_get_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
38743926
{
38753927
kvm_access_xstate_msr(vcpu, msr_info, MSR_TYPE_R);
38763928
}
@@ -4256,6 +4308,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
42564308
vcpu->arch.guest_fpu.xfd_err = data;
42574309
break;
42584310
#endif
4311+
case MSR_IA32_U_CET:
4312+
case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
4313+
kvm_set_xstate_msr(vcpu, msr_info);
4314+
break;
42594315
default:
42604316
if (kvm_pmu_is_valid_msr(vcpu, msr))
42614317
return kvm_pmu_set_msr(vcpu, msr_info);
@@ -4605,6 +4661,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
46054661
msr_info->data = vcpu->arch.guest_fpu.xfd_err;
46064662
break;
46074663
#endif
4664+
case MSR_IA32_U_CET:
4665+
case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
4666+
kvm_get_xstate_msr(vcpu, msr_info);
4667+
break;
46084668
default:
46094669
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
46104670
return kvm_pmu_get_msr(vcpu, msr_info);

arch/x86/kvm/x86.h

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -710,4 +710,27 @@ int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
710710

711711
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
712712

713+
#define CET_US_RESERVED_BITS GENMASK(9, 6)
714+
#define CET_US_SHSTK_MASK_BITS GENMASK(1, 0)
715+
#define CET_US_IBT_MASK_BITS (GENMASK_ULL(5, 2) | GENMASK_ULL(63, 10))
716+
#define CET_US_LEGACY_BITMAP_BASE(data) ((data) >> 12)
717+
718+
static inline bool kvm_is_valid_u_s_cet(struct kvm_vcpu *vcpu, u64 data)
719+
{
720+
if (data & CET_US_RESERVED_BITS)
721+
return false;
722+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
723+
(data & CET_US_SHSTK_MASK_BITS))
724+
return false;
725+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_IBT) &&
726+
(data & CET_US_IBT_MASK_BITS))
727+
return false;
728+
if (!IS_ALIGNED(CET_US_LEGACY_BITMAP_BASE(data), 4))
729+
return false;
730+
/* IBT can be suppressed iff the TRACKER isn't WAIT_ENDBR. */
731+
if ((data & CET_SUPPRESS) && (data & CET_WAIT_ENDBR))
732+
return false;
733+
734+
return true;
735+
}
713736
#endif

0 commit comments

Comments
 (0)