[RFC, PATCH 09/12] KVM: TDX: Preallocate PAMT pages to be used in page fault path

From: Kirill A. Shutemov
Date: Fri May 02 2025 - 09:10:55 EST


Preallocate a page to be used in the link_external_spt() and
set_external_spte() paths.

In the worst-case scenario, handling a page fault might require a
tdx_nr_pamt_pages() pages for each page table level.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/mmu/mmu.c | 10 ++++++++++
2 files changed, 12 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 91958c55f918..a5661499a176 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -849,6 +849,8 @@ struct kvm_vcpu_arch {
*/
struct kvm_mmu_memory_cache mmu_external_spt_cache;

+ struct kvm_mmu_memory_cache pamt_page_cache;
+
/*
* QEMU userspace and the guest each have their own FPU state.
* In vcpu_run, we switch between the user and guest FPU contexts.
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a284dce227a0..7bfa0dc50440 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -616,6 +616,15 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
if (r)
return r;
}
+
+ if (vcpu->kvm->arch.vm_type == KVM_X86_TDX_VM) {
+ int nr = tdx_nr_pamt_pages(tdx_get_sysinfo());
+ r = kvm_mmu_topup_memory_cache(&vcpu->arch.pamt_page_cache,
+ nr * PT64_ROOT_MAX_LEVEL);
+ if (r)
+ return r;
+ }
+
return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
PT64_ROOT_MAX_LEVEL);
}
@@ -626,6 +635,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache);
+ kvm_mmu_free_memory_cache(&vcpu->arch.pamt_page_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}

--
2.47.2