mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 04:04:06 +00:00
KVM: x86/mmu: Move lpage_disallowed_link further "down" in kvm_mmu_page
Move "lpage_disallowed_link" out of the first 64 bytes, i.e. out of the first cache line, of kvm_mmu_page so that "spt" and to a lesser extent "gfns" land in the first cache line. "lpage_disallowed_link" is accessed relatively infrequently compared to "spt", which is accessed any time KVM is walking and/or manipulating the shadow page tables. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210901221023.1303578-4-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ca41c34cab
commit
1148bfc47b
1 changed files with 5 additions and 1 deletions
|
@ -31,9 +31,12 @@ extern bool dbg;
|
|||
#define IS_VALID_PAE_ROOT(x) (!!(x))
|
||||
|
||||
struct kvm_mmu_page {
|
||||
/*
|
||||
* Note, "link" through "spt" fit in a single 64 byte cache line on
|
||||
* 64-bit kernels, keep it that way unless there's a reason not to.
|
||||
*/
|
||||
struct list_head link;
|
||||
struct hlist_node hash_link;
|
||||
struct list_head lpage_disallowed_link;
|
||||
|
||||
bool tdp_mmu_page;
|
||||
bool unsync;
|
||||
|
@ -59,6 +62,7 @@ struct kvm_mmu_page {
|
|||
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
|
||||
DECLARE_BITMAP(unsync_child_bitmap, 512);
|
||||
|
||||
struct list_head lpage_disallowed_link;
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Used out of the mmu-lock to avoid reading spte values while an
|
||||
|
|
Loading…
Add table
Reference in a new issue