Skip to content

Commit 7f830e1

Browse files
tlendackybp3tk0v
authored andcommitted
x86/sev: Guard sev_evict_cache() with CONFIG_AMD_MEM_ENCRYPT
The sev_evict_cache() is guest-related code and should be guarded by CONFIG_AMD_MEM_ENCRYPT, not CONFIG_KVM_AMD_SEV. CONFIG_AMD_MEM_ENCRYPT=y is required for a guest to run properly as an SEV-SNP guest, but a guest kernel built with CONFIG_KVM_AMD_SEV=n would get the stub function of sev_evict_cache() instead of the version that performs the actual eviction. Move the function declarations under the appropriate #ifdef. Fixes: 7b306df ("x86/sev: Evict cache lines during SNP memory validation") Signed-off-by: Tom Lendacky <[email protected]> Signed-off-by: Borislav Petkov (AMD) <[email protected]> Cc: [email protected] # 6.16.x Link: https://lore.kernel.org/r/70e38f2c4a549063de54052c9f64929705313526.1757708959.git.thomas.lendacky@amd.com
1 parent f83ec76 commit 7f830e1

1 file changed

Lines changed: 19 additions & 19 deletions

File tree

  • arch/x86/include/asm

arch/x86/include/asm/sev.h

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -562,6 +562,24 @@ enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
562562

563563
extern struct ghcb *boot_ghcb;
564564

565+
static inline void sev_evict_cache(void *va, int npages)
566+
{
567+
volatile u8 val __always_unused;
568+
u8 *bytes = va;
569+
int page_idx;
570+
571+
/*
572+
* For SEV guests, a read from the first/last cache-lines of a 4K page
573+
* using the guest key is sufficient to cause a flush of all cache-lines
574+
* associated with that 4K page without incurring all the overhead of a
575+
* full CLFLUSH sequence.
576+
*/
577+
for (page_idx = 0; page_idx < npages; page_idx++) {
578+
val = bytes[page_idx * PAGE_SIZE];
579+
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
580+
}
581+
}
582+
565583
#else /* !CONFIG_AMD_MEM_ENCRYPT */
566584

567585
#define snp_vmpl 0
@@ -605,6 +623,7 @@ static inline int snp_send_guest_request(struct snp_msg_desc *mdesc,
605623
static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; }
606624
static inline void __init snp_secure_tsc_prepare(void) { }
607625
static inline void __init snp_secure_tsc_init(void) { }
626+
static inline void sev_evict_cache(void *va, int npages) {}
608627

609628
#endif /* CONFIG_AMD_MEM_ENCRYPT */
610629

@@ -619,24 +638,6 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
619638
void snp_leak_pages(u64 pfn, unsigned int npages);
620639
void kdump_sev_callback(void);
621640
void snp_fixup_e820_tables(void);
622-
623-
static inline void sev_evict_cache(void *va, int npages)
624-
{
625-
volatile u8 val __always_unused;
626-
u8 *bytes = va;
627-
int page_idx;
628-
629-
/*
630-
* For SEV guests, a read from the first/last cache-lines of a 4K page
631-
* using the guest key is sufficient to cause a flush of all cache-lines
632-
* associated with that 4K page without incurring all the overhead of a
633-
* full CLFLUSH sequence.
634-
*/
635-
for (page_idx = 0; page_idx < npages; page_idx++) {
636-
val = bytes[page_idx * PAGE_SIZE];
637-
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
638-
}
639-
}
640641
#else
641642
static inline bool snp_probe_rmptable_info(void) { return false; }
642643
static inline int snp_rmptable_init(void) { return -ENOSYS; }
@@ -652,7 +653,6 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
652653
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
653654
static inline void kdump_sev_callback(void) { }
654655
static inline void snp_fixup_e820_tables(void) {}
655-
static inline void sev_evict_cache(void *va, int npages) {}
656656
#endif
657657

658658
#endif

0 commit comments

Comments
 (0)