mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-06 14:48:06 +00:00
ARM: KVM: Unmap IPA on memslot delete/move
Currently when a KVM region is deleted or moved after KVM_SET_USER_MEMORY_REGION ioctl, the corresponding intermediate physical memory is not unmapped. This patch corrects this and unmaps the region's IPA range in kvm_arch_commit_memory_region using unmap_stage2_range. Signed-off-by: Eric Auger <eric.auger@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
This commit is contained in:
parent
4f853a714b
commit
df6ce24f2e
2 changed files with 46 additions and 37 deletions
|
@ -155,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
|
||||||
struct kvm_memory_slot *dont)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
||||||
unsigned long npages)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_arch_destroy_vm - destroy the VM data structure
|
* kvm_arch_destroy_vm - destroy the VM data structure
|
||||||
|
@ -225,33 +215,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_memslots_updated(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
||||||
struct kvm_memory_slot *memslot,
|
|
||||||
struct kvm_userspace_memory_region *mem,
|
|
||||||
enum kvm_mr_change change)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|
||||||
struct kvm_userspace_memory_region *mem,
|
|
||||||
const struct kvm_memory_slot *old,
|
|
||||||
enum kvm_mr_change change)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
|
||||||
struct kvm_memory_slot *slot)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1111,3 +1111,49 @@ out:
|
||||||
free_hyp_pgds();
|
free_hyp_pgds();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
const struct kvm_memory_slot *old,
|
||||||
|
enum kvm_mr_change change)
|
||||||
|
{
|
||||||
|
gpa_t gpa = old->base_gfn << PAGE_SHIFT;
|
||||||
|
phys_addr_t size = old->npages << PAGE_SHIFT;
|
||||||
|
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
|
||||||
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
unmap_stage2_range(kvm, gpa, size);
|
||||||
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
|
struct kvm_memory_slot *memslot,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
enum kvm_mr_change change)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||||
|
struct kvm_memory_slot *dont)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||||
|
unsigned long npages)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arch_memslots_updated(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||||
|
struct kvm_memory_slot *slot)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue