mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
KVM: convert slots_lock to a mutex
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
f656ce0185
commit
79fac95ecf
11 changed files with 39 additions and 39 deletions
|
@ -1834,7 +1834,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
int is_dirty = 0;
|
int is_dirty = 0;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
spin_lock(&kvm->arch.dirty_log_lock);
|
spin_lock(&kvm->arch.dirty_log_lock);
|
||||||
|
|
||||||
r = kvm_ia64_sync_dirty_log(kvm, log);
|
r = kvm_ia64_sync_dirty_log(kvm, log);
|
||||||
|
@ -1854,7 +1854,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||||
}
|
}
|
||||||
r = 0;
|
r = 0;
|
||||||
out:
|
out:
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
spin_unlock(&kvm->arch.dirty_log_lock);
|
spin_unlock(&kvm->arch.dirty_log_lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -857,7 +857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||||
int is_dirty = 0;
|
int is_dirty = 0;
|
||||||
int r, n;
|
int r, n;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
|
|
||||||
r = kvm_get_dirty_log(kvm, log, &is_dirty);
|
r = kvm_get_dirty_log(kvm, log, &is_dirty);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -879,7 +879,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||||
|
|
||||||
r = 0;
|
r = 0;
|
||||||
out:
|
out:
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -605,7 +605,7 @@ static const struct kvm_io_device_ops speaker_dev_ops = {
|
||||||
.write = speaker_ioport_write,
|
.write = speaker_ioport_write,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Caller must have writers lock on slots_lock */
|
/* Caller must hold slots_lock */
|
||||||
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
|
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
|
||||||
{
|
{
|
||||||
struct kvm_pit *pit;
|
struct kvm_pit *pit;
|
||||||
|
|
|
@ -533,9 +533,9 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
|
||||||
* Initialize PIO device
|
* Initialize PIO device
|
||||||
*/
|
*/
|
||||||
kvm_iodevice_init(&s->dev, &picdev_ops);
|
kvm_iodevice_init(&s->dev, &picdev_ops);
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev);
|
ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev);
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
kfree(s);
|
kfree(s);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -2223,7 +2223,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
|
||||||
struct kvm_userspace_memory_region kvm_userspace_mem;
|
struct kvm_userspace_memory_region kvm_userspace_mem;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
if (kvm->arch.apic_access_page)
|
if (kvm->arch.apic_access_page)
|
||||||
goto out;
|
goto out;
|
||||||
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
|
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
|
||||||
|
@ -2236,7 +2236,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
|
||||||
|
|
||||||
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
|
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
|
||||||
out:
|
out:
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2245,7 +2245,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
|
||||||
struct kvm_userspace_memory_region kvm_userspace_mem;
|
struct kvm_userspace_memory_region kvm_userspace_mem;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
if (kvm->arch.ept_identity_pagetable)
|
if (kvm->arch.ept_identity_pagetable)
|
||||||
goto out;
|
goto out;
|
||||||
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
|
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
|
||||||
|
@ -2260,7 +2260,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
|
||||||
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
|
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
|
||||||
kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
|
kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
|
||||||
out:
|
out:
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2208,14 +2208,14 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
|
||||||
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
|
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
|
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
|
||||||
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
|
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
|
||||||
|
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2292,7 +2292,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
|
||||||
if (!aliases)
|
if (!aliases)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
|
|
||||||
/* invalidate any gfn reference in case of deletion/shrinking */
|
/* invalidate any gfn reference in case of deletion/shrinking */
|
||||||
memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
|
memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
|
||||||
|
@ -2328,7 +2328,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
|
||||||
r = 0;
|
r = 0;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
out:
|
out:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -2462,7 +2462,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||||
unsigned long is_dirty = 0;
|
unsigned long is_dirty = 0;
|
||||||
unsigned long *dirty_bitmap = NULL;
|
unsigned long *dirty_bitmap = NULL;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
|
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
if (log->slot >= KVM_MEMORY_SLOTS)
|
if (log->slot >= KVM_MEMORY_SLOTS)
|
||||||
|
@ -2512,7 +2512,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||||
out_free:
|
out_free:
|
||||||
vfree(dirty_bitmap);
|
vfree(dirty_bitmap);
|
||||||
out:
|
out:
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2625,7 +2625,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||||
sizeof(struct kvm_pit_config)))
|
sizeof(struct kvm_pit_config)))
|
||||||
goto out;
|
goto out;
|
||||||
create_pit:
|
create_pit:
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
r = -EEXIST;
|
r = -EEXIST;
|
||||||
if (kvm->arch.vpit)
|
if (kvm->arch.vpit)
|
||||||
goto create_pit_unlock;
|
goto create_pit_unlock;
|
||||||
|
@ -2634,7 +2634,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||||
if (kvm->arch.vpit)
|
if (kvm->arch.vpit)
|
||||||
r = 0;
|
r = 0;
|
||||||
create_pit_unlock:
|
create_pit_unlock:
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
break;
|
break;
|
||||||
case KVM_IRQ_LINE_STATUS:
|
case KVM_IRQ_LINE_STATUS:
|
||||||
case KVM_IRQ_LINE: {
|
case KVM_IRQ_LINE: {
|
||||||
|
|
|
@ -161,7 +161,7 @@ struct kvm_memslots {
|
||||||
struct kvm {
|
struct kvm {
|
||||||
spinlock_t mmu_lock;
|
spinlock_t mmu_lock;
|
||||||
spinlock_t requests_lock;
|
spinlock_t requests_lock;
|
||||||
struct rw_semaphore slots_lock;
|
struct mutex slots_lock;
|
||||||
struct mm_struct *mm; /* userspace tied to this vm */
|
struct mm_struct *mm; /* userspace tied to this vm */
|
||||||
struct kvm_memslots *memslots;
|
struct kvm_memslots *memslots;
|
||||||
struct srcu_struct srcu;
|
struct srcu_struct srcu;
|
||||||
|
|
|
@ -110,9 +110,9 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
|
||||||
dev->kvm = kvm;
|
dev->kvm = kvm;
|
||||||
kvm->coalesced_mmio_dev = dev;
|
kvm->coalesced_mmio_dev = dev;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
|
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_free_dev;
|
goto out_free_dev;
|
||||||
|
|
||||||
|
@ -140,16 +140,16 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
|
||||||
if (dev == NULL)
|
if (dev == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
|
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
return -ENOBUFS;
|
return -ENOBUFS;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->zone[dev->nb_zones] = *zone;
|
dev->zone[dev->nb_zones] = *zone;
|
||||||
dev->nb_zones++;
|
dev->nb_zones++;
|
||||||
|
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,7 +163,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
|
||||||
if (dev == NULL)
|
if (dev == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
|
|
||||||
i = dev->nb_zones;
|
i = dev->nb_zones;
|
||||||
while(i) {
|
while(i) {
|
||||||
|
@ -181,7 +181,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
|
||||||
i--;
|
i--;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -508,7 +508,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||||
else
|
else
|
||||||
p->wildcard = true;
|
p->wildcard = true;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
|
|
||||||
/* Verify that there isnt a match already */
|
/* Verify that there isnt a match already */
|
||||||
if (ioeventfd_check_collision(kvm, p)) {
|
if (ioeventfd_check_collision(kvm, p)) {
|
||||||
|
@ -524,12 +524,12 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||||
|
|
||||||
list_add_tail(&p->list, &kvm->ioeventfds);
|
list_add_tail(&p->list, &kvm->ioeventfds);
|
||||||
|
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
unlock_fail:
|
unlock_fail:
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
kfree(p);
|
kfree(p);
|
||||||
|
@ -551,7 +551,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||||
if (IS_ERR(eventfd))
|
if (IS_ERR(eventfd))
|
||||||
return PTR_ERR(eventfd);
|
return PTR_ERR(eventfd);
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
|
list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
|
||||||
bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
|
bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
|
||||||
|
@ -571,7 +571,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
|
|
||||||
eventfd_ctx_put(eventfd);
|
eventfd_ctx_put(eventfd);
|
||||||
|
|
||||||
|
|
|
@ -372,9 +372,9 @@ int kvm_ioapic_init(struct kvm *kvm)
|
||||||
kvm_ioapic_reset(ioapic);
|
kvm_ioapic_reset(ioapic);
|
||||||
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
|
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
|
||||||
ioapic->kvm = kvm;
|
ioapic->kvm = kvm;
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
|
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
kfree(ioapic);
|
kfree(ioapic);
|
||||||
|
|
||||||
|
|
|
@ -429,7 +429,7 @@ static struct kvm *kvm_create_vm(void)
|
||||||
kvm_eventfd_init(kvm);
|
kvm_eventfd_init(kvm);
|
||||||
mutex_init(&kvm->lock);
|
mutex_init(&kvm->lock);
|
||||||
mutex_init(&kvm->irq_lock);
|
mutex_init(&kvm->irq_lock);
|
||||||
init_rwsem(&kvm->slots_lock);
|
mutex_init(&kvm->slots_lock);
|
||||||
atomic_set(&kvm->users_count, 1);
|
atomic_set(&kvm->users_count, 1);
|
||||||
spin_lock(&kvm_lock);
|
spin_lock(&kvm_lock);
|
||||||
list_add(&kvm->vm_list, &vm_list);
|
list_add(&kvm->vm_list, &vm_list);
|
||||||
|
@ -763,9 +763,9 @@ int kvm_set_memory_region(struct kvm *kvm,
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
down_write(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
r = __kvm_set_memory_region(kvm, mem, user_alloc);
|
r = __kvm_set_memory_region(kvm, mem, user_alloc);
|
||||||
up_write(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
|
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
|
||||||
|
@ -1997,7 +1997,7 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Caller must have write lock on slots_lock. */
|
/* Caller must hold slots_lock. */
|
||||||
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||||
struct kvm_io_device *dev)
|
struct kvm_io_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -2019,7 +2019,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Caller must have write lock on slots_lock. */
|
/* Caller must hold slots_lock. */
|
||||||
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||||
struct kvm_io_device *dev)
|
struct kvm_io_device *dev)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue