[PATCH 17/22] KVM: x86/mmu: introduce cpu_role bit for availability of PFEC.I/D

From: Paolo Bonzini

Date: Fri Mar 20 2026 - 20:14:03 EST


While GMET looks a lot like SMEP, it has several annoying differences.
The main one is that the availability of the I/D bit in the page fault
error code still depends on the host CR4.SMEP and EFER.NXE bits. If the
base.cr4_smep bit of the cpu_role is (ab)used to enable GMET, there needs
to be another place where the host CR4.SMEP is read from; just merge it
with EFER.NXE into a new cpu_role bit that tells paging_tmpl.h whether
to set the I/D bit at all.

Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 7 +++++++
arch/x86/kvm/mmu/mmu.c | 8 ++++++++
arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
3 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0d6d20ab48dd..3162414186f0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -403,6 +403,13 @@ union kvm_mmu_extended_role {
unsigned int cr4_smap:1;
unsigned int cr4_la57:1;
unsigned int efer_lma:1;
+
+ /*
+ * True if either CR4.SMEP or EFER.NXE are set. For AMD NPT
+ * this is the "real" host CR4.SMEP whereas cr4_smep is
+ * actually GMET.
+ */
+ unsigned int has_pferr_fetch:1;
};
};

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 647dffb69d85..1788620e6dfc 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -234,6 +234,11 @@ BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma);

+static inline bool has_pferr_fetch(struct kvm_mmu *mmu)
+{
+ return mmu->cpu_role.ext.has_pferr_fetch;
+}
+
static inline bool is_cr0_pg(struct kvm_mmu *mmu)
{
return mmu->cpu_role.base.level > 0;
@@ -5672,6 +5677,8 @@ static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
role.ext.efer_lma = ____is_efer_lma(regs);
+
+ role.ext.has_pferr_fetch = role.base.efer_nx | role.base.cr4_smep;
return role;
}

@@ -5825,6 +5832,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,

/* NPT requires CR0.PG=1. */
WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
+ cpu_role.base.cr4_smep = false;

root_role = cpu_role.base;
root_role.level = kvm_mmu_get_tdp_level(vcpu);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index d50085308506..bc6b0a1a1c8a 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -486,7 +486,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,

error:
errcode |= write_fault | user_fault;
- if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
+ if (fetch_fault && has_pferr_fetch(mmu))
errcode |= PFERR_FETCH_MASK;

walker->fault.vector = PF_VECTOR;
--
2.52.0