mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-06 21:19:01 +00:00
1173 lines
34 KiB
Diff
1173 lines
34 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 3712b4deafbed..d503d041b5267 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 197
|
|
+SUBLEVEL = 198
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
diff --git a/block/blk-core.c b/block/blk-core.c
|
|
index 4e04c79aa2c27..2407c898ba7d8 100644
|
|
--- a/block/blk-core.c
|
|
+++ b/block/blk-core.c
|
|
@@ -852,6 +852,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
|
|
|
q->backing_dev_info->ra_pages =
|
|
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
|
|
+ q->backing_dev_info->io_pages =
|
|
+ (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
|
|
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
|
q->backing_dev_info->name = "block";
|
|
q->node = node_id;
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
index 7de38ae5c18f2..a03239ba1a323 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
@@ -6378,14 +6378,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
|
}
|
|
}
|
|
|
|
- bnxt_enable_napi(bp);
|
|
-
|
|
rc = bnxt_init_nic(bp, irq_re_init);
|
|
if (rc) {
|
|
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
|
|
- goto open_err;
|
|
+ goto open_err_irq;
|
|
}
|
|
|
|
+ bnxt_enable_napi(bp);
|
|
+
|
|
if (link_re_init) {
|
|
mutex_lock(&bp->link_lock);
|
|
rc = bnxt_update_phy_setting(bp);
|
|
@@ -6410,9 +6410,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
|
bnxt_vf_reps_open(bp);
|
|
return 0;
|
|
|
|
-open_err:
|
|
- bnxt_disable_napi(bp);
|
|
-
|
|
open_err_irq:
|
|
bnxt_del_napi(bp);
|
|
|
|
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
|
|
index b91f92e4e5f22..915ac75b55fc7 100644
|
|
--- a/drivers/net/usb/dm9601.c
|
|
+++ b/drivers/net/usb/dm9601.c
|
|
@@ -625,6 +625,10 @@ static const struct usb_device_id products[] = {
|
|
USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
|
|
.driver_info = (unsigned long)&dm9601_info,
|
|
},
|
|
+ {
|
|
+ USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */
|
|
+ .driver_info = (unsigned long)&dm9601_info,
|
|
+ },
|
|
{}, // END
|
|
};
|
|
|
|
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
|
|
index 550ab7707b57f..794dc90aa5c95 100644
|
|
--- a/drivers/vfio/pci/vfio_pci.c
|
|
+++ b/drivers/vfio/pci/vfio_pci.c
|
|
@@ -29,6 +29,7 @@
|
|
#include <linux/vfio.h>
|
|
#include <linux/vgaarb.h>
|
|
#include <linux/nospec.h>
|
|
+#include <linux/sched/mm.h>
|
|
|
|
#include "vfio_pci_private.h"
|
|
|
|
@@ -181,6 +182,7 @@ no_mmap:
|
|
|
|
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
|
|
static void vfio_pci_disable(struct vfio_pci_device *vdev);
|
|
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
|
|
|
|
/*
|
|
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
|
|
@@ -644,6 +646,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
|
|
return 0;
|
|
}
|
|
|
|
+struct vfio_devices {
|
|
+ struct vfio_device **devices;
|
|
+ int cur_index;
|
|
+ int max_index;
|
|
+};
|
|
+
|
|
static long vfio_pci_ioctl(void *device_data,
|
|
unsigned int cmd, unsigned long arg)
|
|
{
|
|
@@ -717,7 +725,7 @@ static long vfio_pci_ioctl(void *device_data,
|
|
{
|
|
void __iomem *io;
|
|
size_t size;
|
|
- u16 orig_cmd;
|
|
+ u16 cmd;
|
|
|
|
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
|
|
info.flags = 0;
|
|
@@ -737,10 +745,7 @@ static long vfio_pci_ioctl(void *device_data,
|
|
* Is it really there? Enable memory decode for
|
|
* implicit access in pci_map_rom().
|
|
*/
|
|
- pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
|
|
- pci_write_config_word(pdev, PCI_COMMAND,
|
|
- orig_cmd | PCI_COMMAND_MEMORY);
|
|
-
|
|
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
|
|
io = pci_map_rom(pdev, &size);
|
|
if (io) {
|
|
info.flags = VFIO_REGION_INFO_FLAG_READ;
|
|
@@ -748,8 +753,8 @@ static long vfio_pci_ioctl(void *device_data,
|
|
} else {
|
|
info.size = 0;
|
|
}
|
|
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
|
|
|
- pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
|
|
break;
|
|
}
|
|
case VFIO_PCI_VGA_REGION_INDEX:
|
|
@@ -885,8 +890,16 @@ static long vfio_pci_ioctl(void *device_data,
|
|
return ret;
|
|
|
|
} else if (cmd == VFIO_DEVICE_RESET) {
|
|
- return vdev->reset_works ?
|
|
- pci_try_reset_function(vdev->pdev) : -EINVAL;
|
|
+ int ret;
|
|
+
|
|
+ if (!vdev->reset_works)
|
|
+ return -EINVAL;
|
|
+
|
|
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
|
|
+ ret = pci_try_reset_function(vdev->pdev);
|
|
+ up_write(&vdev->memory_lock);
|
|
+
|
|
+ return ret;
|
|
|
|
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
|
|
struct vfio_pci_hot_reset_info hdr;
|
|
@@ -966,8 +979,9 @@ reset_info_exit:
|
|
int32_t *group_fds;
|
|
struct vfio_pci_group_entry *groups;
|
|
struct vfio_pci_group_info info;
|
|
+ struct vfio_devices devs = { .cur_index = 0 };
|
|
bool slot = false;
|
|
- int i, count = 0, ret = 0;
|
|
+ int i, group_idx, mem_idx = 0, count = 0, ret = 0;
|
|
|
|
minsz = offsetofend(struct vfio_pci_hot_reset, count);
|
|
|
|
@@ -1019,9 +1033,9 @@ reset_info_exit:
|
|
* user interface and store the group and iommu ID. This
|
|
* ensures the group is held across the reset.
|
|
*/
|
|
- for (i = 0; i < hdr.count; i++) {
|
|
+ for (group_idx = 0; group_idx < hdr.count; group_idx++) {
|
|
struct vfio_group *group;
|
|
- struct fd f = fdget(group_fds[i]);
|
|
+ struct fd f = fdget(group_fds[group_idx]);
|
|
if (!f.file) {
|
|
ret = -EBADF;
|
|
break;
|
|
@@ -1034,8 +1048,9 @@ reset_info_exit:
|
|
break;
|
|
}
|
|
|
|
- groups[i].group = group;
|
|
- groups[i].id = vfio_external_user_iommu_id(group);
|
|
+ groups[group_idx].group = group;
|
|
+ groups[group_idx].id =
|
|
+ vfio_external_user_iommu_id(group);
|
|
}
|
|
|
|
kfree(group_fds);
|
|
@@ -1054,14 +1069,65 @@ reset_info_exit:
|
|
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
|
|
vfio_pci_validate_devs,
|
|
&info, slot);
|
|
- if (!ret)
|
|
- /* User has access, do the reset */
|
|
- ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
|
|
- pci_try_reset_bus(vdev->pdev->bus);
|
|
+
|
|
+ if (ret)
|
|
+ goto hot_reset_release;
|
|
+
|
|
+ devs.max_index = count;
|
|
+ devs.devices = kcalloc(count, sizeof(struct vfio_device *),
|
|
+ GFP_KERNEL);
|
|
+ if (!devs.devices) {
|
|
+ ret = -ENOMEM;
|
|
+ goto hot_reset_release;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * We need to get memory_lock for each device, but devices
|
|
+ * can share mmap_sem, therefore we need to zap and hold
|
|
+ * the vma_lock for each device, and only then get each
|
|
+ * memory_lock.
|
|
+ */
|
|
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
|
|
+ vfio_pci_try_zap_and_vma_lock_cb,
|
|
+ &devs, slot);
|
|
+ if (ret)
|
|
+ goto hot_reset_release;
|
|
+
|
|
+ for (; mem_idx < devs.cur_index; mem_idx++) {
|
|
+ struct vfio_pci_device *tmp;
|
|
+
|
|
+ tmp = vfio_device_data(devs.devices[mem_idx]);
|
|
+
|
|
+ ret = down_write_trylock(&tmp->memory_lock);
|
|
+ if (!ret) {
|
|
+ ret = -EBUSY;
|
|
+ goto hot_reset_release;
|
|
+ }
|
|
+ mutex_unlock(&tmp->vma_lock);
|
|
+ }
|
|
+
|
|
+ /* User has access, do the reset */
|
|
+ ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
|
|
+ pci_try_reset_bus(vdev->pdev->bus);
|
|
|
|
hot_reset_release:
|
|
- for (i--; i >= 0; i--)
|
|
- vfio_group_put_external_user(groups[i].group);
|
|
+ for (i = 0; i < devs.cur_index; i++) {
|
|
+ struct vfio_device *device;
|
|
+ struct vfio_pci_device *tmp;
|
|
+
|
|
+ device = devs.devices[i];
|
|
+ tmp = vfio_device_data(device);
|
|
+
|
|
+ if (i < mem_idx)
|
|
+ up_write(&tmp->memory_lock);
|
|
+ else
|
|
+ mutex_unlock(&tmp->vma_lock);
|
|
+ vfio_device_put(device);
|
|
+ }
|
|
+ kfree(devs.devices);
|
|
+
|
|
+ for (group_idx--; group_idx >= 0; group_idx--)
|
|
+ vfio_group_put_external_user(groups[group_idx].group);
|
|
|
|
kfree(groups);
|
|
return ret;
|
|
@@ -1120,6 +1186,202 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
|
|
return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
|
|
}
|
|
|
|
+/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
|
|
+static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
|
|
+{
|
|
+ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
|
|
+
|
|
+ /*
|
|
+ * Lock ordering:
|
|
+ * vma_lock is nested under mmap_sem for vm_ops callback paths.
|
|
+ * The memory_lock semaphore is used by both code paths calling
|
|
+ * into this function to zap vmas and the vm_ops.fault callback
|
|
+ * to protect the memory enable state of the device.
|
|
+ *
|
|
+ * When zapping vmas we need to maintain the mmap_sem => vma_lock
|
|
+ * ordering, which requires using vma_lock to walk vma_list to
|
|
+ * acquire an mm, then dropping vma_lock to get the mmap_sem and
|
|
+ * reacquiring vma_lock. This logic is derived from similar
|
|
+ * requirements in uverbs_user_mmap_disassociate().
|
|
+ *
|
|
+ * mmap_sem must always be the top-level lock when it is taken.
|
|
+ * Therefore we can only hold the memory_lock write lock when
|
|
+ * vma_list is empty, as we'd need to take mmap_sem to clear
|
|
+ * entries. vma_list can only be guaranteed empty when holding
|
|
+ * vma_lock, thus memory_lock is nested under vma_lock.
|
|
+ *
|
|
+ * This enables the vm_ops.fault callback to acquire vma_lock,
|
|
+ * followed by memory_lock read lock, while already holding
|
|
+ * mmap_sem without risk of deadlock.
|
|
+ */
|
|
+ while (1) {
|
|
+ struct mm_struct *mm = NULL;
|
|
+
|
|
+ if (try) {
|
|
+ if (!mutex_trylock(&vdev->vma_lock))
|
|
+ return 0;
|
|
+ } else {
|
|
+ mutex_lock(&vdev->vma_lock);
|
|
+ }
|
|
+ while (!list_empty(&vdev->vma_list)) {
|
|
+ mmap_vma = list_first_entry(&vdev->vma_list,
|
|
+ struct vfio_pci_mmap_vma,
|
|
+ vma_next);
|
|
+ mm = mmap_vma->vma->vm_mm;
|
|
+ if (mmget_not_zero(mm))
|
|
+ break;
|
|
+
|
|
+ list_del(&mmap_vma->vma_next);
|
|
+ kfree(mmap_vma);
|
|
+ mm = NULL;
|
|
+ }
|
|
+ if (!mm)
|
|
+ return 1;
|
|
+ mutex_unlock(&vdev->vma_lock);
|
|
+
|
|
+ if (try) {
|
|
+ if (!down_read_trylock(&mm->mmap_sem)) {
|
|
+ mmput(mm);
|
|
+ return 0;
|
|
+ }
|
|
+ } else {
|
|
+ down_read(&mm->mmap_sem);
|
|
+ }
|
|
+ if (mmget_still_valid(mm)) {
|
|
+ if (try) {
|
|
+ if (!mutex_trylock(&vdev->vma_lock)) {
|
|
+ up_read(&mm->mmap_sem);
|
|
+ mmput(mm);
|
|
+ return 0;
|
|
+ }
|
|
+ } else {
|
|
+ mutex_lock(&vdev->vma_lock);
|
|
+ }
|
|
+ list_for_each_entry_safe(mmap_vma, tmp,
|
|
+ &vdev->vma_list, vma_next) {
|
|
+ struct vm_area_struct *vma = mmap_vma->vma;
|
|
+
|
|
+ if (vma->vm_mm != mm)
|
|
+ continue;
|
|
+
|
|
+ list_del(&mmap_vma->vma_next);
|
|
+ kfree(mmap_vma);
|
|
+
|
|
+ zap_vma_ptes(vma, vma->vm_start,
|
|
+ vma->vm_end - vma->vm_start);
|
|
+ }
|
|
+ mutex_unlock(&vdev->vma_lock);
|
|
+ }
|
|
+ up_read(&mm->mmap_sem);
|
|
+ mmput(mm);
|
|
+ }
|
|
+}
|
|
+
|
|
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
|
|
+{
|
|
+ vfio_pci_zap_and_vma_lock(vdev, false);
|
|
+ down_write(&vdev->memory_lock);
|
|
+ mutex_unlock(&vdev->vma_lock);
|
|
+}
|
|
+
|
|
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
|
|
+{
|
|
+ u16 cmd;
|
|
+
|
|
+ down_write(&vdev->memory_lock);
|
|
+ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
|
|
+ if (!(cmd & PCI_COMMAND_MEMORY))
|
|
+ pci_write_config_word(vdev->pdev, PCI_COMMAND,
|
|
+ cmd | PCI_COMMAND_MEMORY);
|
|
+
|
|
+ return cmd;
|
|
+}
|
|
+
|
|
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
|
|
+{
|
|
+ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
|
|
+ up_write(&vdev->memory_lock);
|
|
+}
|
|
+
|
|
+/* Caller holds vma_lock */
|
|
+static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
|
|
+ struct vm_area_struct *vma)
|
|
+{
|
|
+ struct vfio_pci_mmap_vma *mmap_vma;
|
|
+
|
|
+ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
|
|
+ if (!mmap_vma)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ mmap_vma->vma = vma;
|
|
+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Zap mmaps on open so that we can fault them in on access and therefore
|
|
+ * our vma_list only tracks mappings accessed since last zap.
|
|
+ */
|
|
+static void vfio_pci_mmap_open(struct vm_area_struct *vma)
|
|
+{
|
|
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
|
|
+}
|
|
+
|
|
+static void vfio_pci_mmap_close(struct vm_area_struct *vma)
|
|
+{
|
|
+ struct vfio_pci_device *vdev = vma->vm_private_data;
|
|
+ struct vfio_pci_mmap_vma *mmap_vma;
|
|
+
|
|
+ mutex_lock(&vdev->vma_lock);
|
|
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
|
|
+ if (mmap_vma->vma == vma) {
|
|
+ list_del(&mmap_vma->vma_next);
|
|
+ kfree(mmap_vma);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&vdev->vma_lock);
|
|
+}
|
|
+
|
|
+static int vfio_pci_mmap_fault(struct vm_fault *vmf)
|
|
+{
|
|
+ struct vm_area_struct *vma = vmf->vma;
|
|
+ struct vfio_pci_device *vdev = vma->vm_private_data;
|
|
+ int ret = VM_FAULT_NOPAGE;
|
|
+
|
|
+ mutex_lock(&vdev->vma_lock);
|
|
+ down_read(&vdev->memory_lock);
|
|
+
|
|
+ if (!__vfio_pci_memory_enabled(vdev)) {
|
|
+ ret = VM_FAULT_SIGBUS;
|
|
+ mutex_unlock(&vdev->vma_lock);
|
|
+ goto up_out;
|
|
+ }
|
|
+
|
|
+ if (__vfio_pci_add_vma(vdev, vma)) {
|
|
+ ret = VM_FAULT_OOM;
|
|
+ mutex_unlock(&vdev->vma_lock);
|
|
+ goto up_out;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&vdev->vma_lock);
|
|
+
|
|
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
|
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
|
|
+ ret = VM_FAULT_SIGBUS;
|
|
+
|
|
+up_out:
|
|
+ up_read(&vdev->memory_lock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static const struct vm_operations_struct vfio_pci_mmap_ops = {
|
|
+ .open = vfio_pci_mmap_open,
|
|
+ .close = vfio_pci_mmap_close,
|
|
+ .fault = vfio_pci_mmap_fault,
|
|
+};
|
|
+
|
|
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
|
|
{
|
|
struct vfio_pci_device *vdev = device_data;
|
|
@@ -1185,8 +1447,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
|
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
|
|
|
|
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
|
- req_len, vma->vm_page_prot);
|
|
+ /*
|
|
+ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
|
|
+ * change vm_flags within the fault handler. Set them now.
|
|
+ */
|
|
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
|
+ vma->vm_ops = &vfio_pci_mmap_ops;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static void vfio_pci_request(void *device_data, unsigned int count)
|
|
@@ -1243,6 +1511,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
vdev->irq_type = VFIO_PCI_NUM_IRQS;
|
|
mutex_init(&vdev->igate);
|
|
spin_lock_init(&vdev->irqlock);
|
|
+ mutex_init(&vdev->vma_lock);
|
|
+ INIT_LIST_HEAD(&vdev->vma_list);
|
|
+ init_rwsem(&vdev->memory_lock);
|
|
|
|
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
|
|
if (ret) {
|
|
@@ -1337,12 +1608,6 @@ static struct pci_driver vfio_pci_driver = {
|
|
.err_handler = &vfio_err_handlers,
|
|
};
|
|
|
|
-struct vfio_devices {
|
|
- struct vfio_device **devices;
|
|
- int cur_index;
|
|
- int max_index;
|
|
-};
|
|
-
|
|
static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
|
|
{
|
|
struct vfio_devices *devs = data;
|
|
@@ -1364,6 +1629,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
|
|
return 0;
|
|
}
|
|
|
|
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
|
|
+{
|
|
+ struct vfio_devices *devs = data;
|
|
+ struct vfio_device *device;
|
|
+ struct vfio_pci_device *vdev;
|
|
+
|
|
+ if (devs->cur_index == devs->max_index)
|
|
+ return -ENOSPC;
|
|
+
|
|
+ device = vfio_device_get_from_dev(&pdev->dev);
|
|
+ if (!device)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (pci_dev_driver(pdev) != &vfio_pci_driver) {
|
|
+ vfio_device_put(device);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ vdev = vfio_device_data(device);
|
|
+
|
|
+ /*
|
|
+ * Locking multiple devices is prone to deadlock, runaway and
|
|
+ * unwind if we hit contention.
|
|
+ */
|
|
+ if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
|
|
+ vfio_device_put(device);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ devs->devices[devs->cur_index++] = device;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/*
|
|
* Attempt to do a bus/slot reset if there are devices affected by a reset for
|
|
* this device that are needs_reset and all of the affected devices are unused
|
|
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
|
|
index 36bc8f104e42e..a1a26465d224c 100644
|
|
--- a/drivers/vfio/pci/vfio_pci_config.c
|
|
+++ b/drivers/vfio/pci/vfio_pci_config.c
|
|
@@ -398,6 +398,20 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
|
|
*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
|
|
}
|
|
|
|
+/* Caller should hold memory_lock semaphore */
|
|
+bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
|
|
+{
|
|
+ struct pci_dev *pdev = vdev->pdev;
|
|
+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
|
|
+
|
|
+ /*
|
|
+ * SR-IOV VF memory enable is handled by the MSE bit in the
|
|
+ * PF SR-IOV capability, there's therefore no need to trigger
|
|
+ * faults based on the virtual value.
|
|
+ */
|
|
+ return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
|
|
+}
|
|
+
|
|
/*
|
|
* Restore the *real* BARs after we detect a FLR or backdoor reset.
|
|
* (backdoor = some device specific technique that we didn't catch)
|
|
@@ -558,13 +572,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
|
|
|
new_cmd = le32_to_cpu(val);
|
|
|
|
+ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
|
|
+ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
|
|
+ new_io = !!(new_cmd & PCI_COMMAND_IO);
|
|
+
|
|
phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
|
|
virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
|
|
new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
|
|
|
|
- phys_io = !!(phys_cmd & PCI_COMMAND_IO);
|
|
- virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
|
|
- new_io = !!(new_cmd & PCI_COMMAND_IO);
|
|
+ if (!new_mem)
|
|
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
|
|
+ else
|
|
+ down_write(&vdev->memory_lock);
|
|
|
|
/*
|
|
* If the user is writing mem/io enable (new_mem/io) and we
|
|
@@ -581,8 +600,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
|
}
|
|
|
|
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
|
|
- if (count < 0)
|
|
+ if (count < 0) {
|
|
+ if (offset == PCI_COMMAND)
|
|
+ up_write(&vdev->memory_lock);
|
|
return count;
|
|
+ }
|
|
|
|
/*
|
|
* Save current memory/io enable bits in vconfig to allow for
|
|
@@ -593,6 +615,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
|
|
|
*virt_cmd &= cpu_to_le16(~mask);
|
|
*virt_cmd |= cpu_to_le16(new_cmd & mask);
|
|
+
|
|
+ up_write(&vdev->memory_lock);
|
|
}
|
|
|
|
/* Emulate INTx disable */
|
|
@@ -830,8 +854,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
|
|
pos - offset + PCI_EXP_DEVCAP,
|
|
&cap);
|
|
|
|
- if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
|
|
+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
|
|
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
|
|
pci_try_reset_function(vdev->pdev);
|
|
+ up_write(&vdev->memory_lock);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -909,8 +936,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
|
|
pos - offset + PCI_AF_CAP,
|
|
&cap);
|
|
|
|
- if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
|
|
+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
|
|
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
|
|
pci_try_reset_function(vdev->pdev);
|
|
+ up_write(&vdev->memory_lock);
|
|
+ }
|
|
}
|
|
|
|
return count;
|
|
@@ -1708,6 +1738,15 @@ int vfio_config_init(struct vfio_pci_device *vdev)
|
|
vconfig[PCI_INTERRUPT_PIN]);
|
|
|
|
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
|
|
+
|
|
+ /*
|
|
+ * VFs do no implement the memory enable bit of the COMMAND
|
|
+ * register therefore we'll not have it set in our initial
|
|
+ * copy of config space after pci_enable_device(). For
|
|
+ * consistency with PFs, set the virtual enable bit here.
|
|
+ */
|
|
+ *(__le16 *)&vconfig[PCI_COMMAND] |=
|
|
+ cpu_to_le16(PCI_COMMAND_MEMORY);
|
|
}
|
|
|
|
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
|
|
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
|
|
index 94594dc63c417..bdfdd506bc588 100644
|
|
--- a/drivers/vfio/pci/vfio_pci_intrs.c
|
|
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
|
|
@@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
|
struct pci_dev *pdev = vdev->pdev;
|
|
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
|
|
int ret;
|
|
+ u16 cmd;
|
|
|
|
if (!is_irq_none(vdev))
|
|
return -EINVAL;
|
|
@@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
|
return -ENOMEM;
|
|
|
|
/* return the number of supported vectors if we can't get all: */
|
|
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
|
|
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
|
|
if (ret < nvec) {
|
|
if (ret > 0)
|
|
pci_free_irq_vectors(pdev);
|
|
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
|
kfree(vdev->ctx);
|
|
return ret;
|
|
}
|
|
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
|
|
|
vdev->num_ctx = nvec;
|
|
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
|
|
@@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|
struct pci_dev *pdev = vdev->pdev;
|
|
struct eventfd_ctx *trigger;
|
|
int irq, ret;
|
|
+ u16 cmd;
|
|
|
|
if (vector < 0 || vector >= vdev->num_ctx)
|
|
return -EINVAL;
|
|
@@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|
|
|
if (vdev->ctx[vector].trigger) {
|
|
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
|
|
+
|
|
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
|
|
free_irq(irq, vdev->ctx[vector].trigger);
|
|
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
|
+
|
|
kfree(vdev->ctx[vector].name);
|
|
eventfd_ctx_put(vdev->ctx[vector].trigger);
|
|
vdev->ctx[vector].trigger = NULL;
|
|
@@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|
* such a reset it would be unsuccessful. To avoid this, restore the
|
|
* cached value of the message prior to enabling.
|
|
*/
|
|
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
|
|
if (msix) {
|
|
struct msi_msg msg;
|
|
|
|
@@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|
|
|
ret = request_irq(irq, vfio_msihandler, 0,
|
|
vdev->ctx[vector].name, trigger);
|
|
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
|
if (ret) {
|
|
kfree(vdev->ctx[vector].name);
|
|
eventfd_ctx_put(trigger);
|
|
@@ -379,6 +390,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
|
|
{
|
|
struct pci_dev *pdev = vdev->pdev;
|
|
int i;
|
|
+ u16 cmd;
|
|
|
|
for (i = 0; i < vdev->num_ctx; i++) {
|
|
vfio_virqfd_disable(&vdev->ctx[i].unmask);
|
|
@@ -387,7 +399,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
|
|
|
|
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
|
|
|
|
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
|
|
pci_free_irq_vectors(pdev);
|
|
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
|
|
|
/*
|
|
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
|
|
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
|
|
index f561ac1c78a0d..f896cebb5c2c2 100644
|
|
--- a/drivers/vfio/pci/vfio_pci_private.h
|
|
+++ b/drivers/vfio/pci/vfio_pci_private.h
|
|
@@ -63,6 +63,11 @@ struct vfio_pci_dummy_resource {
|
|
struct list_head res_next;
|
|
};
|
|
|
|
+struct vfio_pci_mmap_vma {
|
|
+ struct vm_area_struct *vma;
|
|
+ struct list_head vma_next;
|
|
+};
|
|
+
|
|
struct vfio_pci_device {
|
|
struct pci_dev *pdev;
|
|
void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
|
|
@@ -95,6 +100,9 @@ struct vfio_pci_device {
|
|
struct eventfd_ctx *err_trigger;
|
|
struct eventfd_ctx *req_trigger;
|
|
struct list_head dummy_resources_list;
|
|
+ struct mutex vma_lock;
|
|
+ struct list_head vma_list;
|
|
+ struct rw_semaphore memory_lock;
|
|
};
|
|
|
|
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
|
|
@@ -130,6 +138,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
|
|
unsigned int type, unsigned int subtype,
|
|
const struct vfio_pci_regops *ops,
|
|
size_t size, u32 flags, void *data);
|
|
+
|
|
+extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
|
|
+extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
|
|
+ *vdev);
|
|
+extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
|
|
+extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
|
|
+ u16 cmd);
|
|
+
|
|
#ifdef CONFIG_VFIO_PCI_IGD
|
|
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
|
|
#else
|
|
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
|
|
index 357243d76f108..6445461a56013 100644
|
|
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
|
|
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
|
|
@@ -122,6 +122,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
|
size_t x_start = 0, x_end = 0;
|
|
resource_size_t end;
|
|
void __iomem *io;
|
|
+ struct resource *res = &vdev->pdev->resource[bar];
|
|
ssize_t done;
|
|
|
|
if (pci_resource_start(pdev, bar))
|
|
@@ -137,6 +138,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
|
|
|
count = min(count, (size_t)(end - pos));
|
|
|
|
+ if (res->flags & IORESOURCE_MEM) {
|
|
+ down_read(&vdev->memory_lock);
|
|
+ if (!__vfio_pci_memory_enabled(vdev)) {
|
|
+ up_read(&vdev->memory_lock);
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (bar == PCI_ROM_RESOURCE) {
|
|
/*
|
|
* The ROM can fill less space than the BAR, so we start the
|
|
@@ -144,20 +153,21 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
|
* filling large ROM BARs much faster.
|
|
*/
|
|
io = pci_map_rom(pdev, &x_start);
|
|
- if (!io)
|
|
- return -ENOMEM;
|
|
+ if (!io) {
|
|
+ done = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
x_end = end;
|
|
} else if (!vdev->barmap[bar]) {
|
|
- int ret;
|
|
-
|
|
- ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
|
|
- if (ret)
|
|
- return ret;
|
|
+ done = pci_request_selected_regions(pdev, 1 << bar, "vfio");
|
|
+ if (done)
|
|
+ goto out;
|
|
|
|
io = pci_iomap(pdev, bar, 0);
|
|
if (!io) {
|
|
pci_release_selected_regions(pdev, 1 << bar);
|
|
- return -ENOMEM;
|
|
+ done = -ENOMEM;
|
|
+ goto out;
|
|
}
|
|
|
|
vdev->barmap[bar] = io;
|
|
@@ -176,6 +186,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
|
|
|
if (bar == PCI_ROM_RESOURCE)
|
|
pci_unmap_rom(pdev, io);
|
|
+out:
|
|
+ if (res->flags & IORESOURCE_MEM)
|
|
+ up_read(&vdev->memory_lock);
|
|
|
|
return done;
|
|
}
|
|
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
|
|
index f22425501bc16..9c8ed9d7f9aa5 100644
|
|
--- a/drivers/vfio/vfio_iommu_type1.c
|
|
+++ b/drivers/vfio/vfio_iommu_type1.c
|
|
@@ -336,6 +336,32 @@ static int put_pfn(unsigned long pfn, int prot)
|
|
return 0;
|
|
}
|
|
|
|
+static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
|
|
+ unsigned long vaddr, unsigned long *pfn,
|
|
+ bool write_fault)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = follow_pfn(vma, vaddr, pfn);
|
|
+ if (ret) {
|
|
+ bool unlocked = false;
|
|
+
|
|
+ ret = fixup_user_fault(NULL, mm, vaddr,
|
|
+ FAULT_FLAG_REMOTE |
|
|
+ (write_fault ? FAULT_FLAG_WRITE : 0),
|
|
+ &unlocked);
|
|
+ if (unlocked)
|
|
+ return -EAGAIN;
|
|
+
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = follow_pfn(vma, vaddr, pfn);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
|
int prot, unsigned long *pfn)
|
|
{
|
|
@@ -375,12 +401,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
+retry:
|
|
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
|
|
|
|
if (vma && vma->vm_flags & VM_PFNMAP) {
|
|
- if (!follow_pfn(vma, vaddr, pfn) &&
|
|
- is_invalid_reserved_pfn(*pfn))
|
|
- ret = 0;
|
|
+ ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
|
|
+ if (ret == -EAGAIN)
|
|
+ goto retry;
|
|
+
|
|
+ if (!ret && !is_invalid_reserved_pfn(*pfn))
|
|
+ ret = -EFAULT;
|
|
}
|
|
|
|
up_read(&mm->mmap_sem);
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 7ec549e481e39..d101f95a47fbc 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -5532,12 +5532,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
|
|
pr_err_once("netif_napi_add() called with weight %d on device %s\n",
|
|
weight, dev->name);
|
|
napi->weight = weight;
|
|
- list_add(&napi->dev_list, &dev->napi_list);
|
|
napi->dev = dev;
|
|
#ifdef CONFIG_NETPOLL
|
|
napi->poll_owner = -1;
|
|
#endif
|
|
set_bit(NAPI_STATE_SCHED, &napi->state);
|
|
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
|
|
+ list_add_rcu(&napi->dev_list, &dev->napi_list);
|
|
napi_hash_add(napi);
|
|
}
|
|
EXPORT_SYMBOL(netif_napi_add);
|
|
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
|
|
index abab3753a9e0e..0b247f7cd5833 100644
|
|
--- a/net/core/netpoll.c
|
|
+++ b/net/core/netpoll.c
|
|
@@ -179,7 +179,7 @@ static void poll_napi(struct net_device *dev)
|
|
struct napi_struct *napi;
|
|
int cpu = smp_processor_id();
|
|
|
|
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
|
|
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
|
|
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
|
|
poll_one_napi(napi);
|
|
smp_store_release(&napi->poll_owner, -1);
|
|
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
|
|
index 41d0e95d171e1..b1a1718495f34 100644
|
|
--- a/net/netlabel/netlabel_domainhash.c
|
|
+++ b/net/netlabel/netlabel_domainhash.c
|
|
@@ -99,6 +99,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
|
|
kfree(netlbl_domhsh_addr6_entry(iter6));
|
|
}
|
|
#endif /* IPv6 */
|
|
+ kfree(ptr->def.addrsel);
|
|
}
|
|
kfree(ptr->domain);
|
|
kfree(ptr);
|
|
@@ -550,6 +551,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
|
|
goto add_return;
|
|
}
|
|
#endif /* IPv6 */
|
|
+ /* cleanup the new entry since we've moved everything over */
|
|
+ netlbl_domhsh_free_entry(&entry->rcu);
|
|
} else
|
|
ret_val = -EINVAL;
|
|
|
|
@@ -593,6 +596,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
|
|
{
|
|
int ret_val = 0;
|
|
struct audit_buffer *audit_buf;
|
|
+ struct netlbl_af4list *iter4;
|
|
+ struct netlbl_domaddr4_map *map4;
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
+ struct netlbl_af6list *iter6;
|
|
+ struct netlbl_domaddr6_map *map6;
|
|
+#endif /* IPv6 */
|
|
|
|
if (entry == NULL)
|
|
return -ENOENT;
|
|
@@ -610,6 +619,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
|
|
ret_val = -ENOENT;
|
|
spin_unlock(&netlbl_domhsh_lock);
|
|
|
|
+ if (ret_val)
|
|
+ return ret_val;
|
|
+
|
|
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
|
|
if (audit_buf != NULL) {
|
|
audit_log_format(audit_buf,
|
|
@@ -619,40 +631,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
|
|
audit_log_end(audit_buf);
|
|
}
|
|
|
|
- if (ret_val == 0) {
|
|
- struct netlbl_af4list *iter4;
|
|
- struct netlbl_domaddr4_map *map4;
|
|
-#if IS_ENABLED(CONFIG_IPV6)
|
|
- struct netlbl_af6list *iter6;
|
|
- struct netlbl_domaddr6_map *map6;
|
|
-#endif /* IPv6 */
|
|
-
|
|
- switch (entry->def.type) {
|
|
- case NETLBL_NLTYPE_ADDRSELECT:
|
|
- netlbl_af4list_foreach_rcu(iter4,
|
|
- &entry->def.addrsel->list4) {
|
|
- map4 = netlbl_domhsh_addr4_entry(iter4);
|
|
- cipso_v4_doi_putdef(map4->def.cipso);
|
|
- }
|
|
+ switch (entry->def.type) {
|
|
+ case NETLBL_NLTYPE_ADDRSELECT:
|
|
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
|
|
+ map4 = netlbl_domhsh_addr4_entry(iter4);
|
|
+ cipso_v4_doi_putdef(map4->def.cipso);
|
|
+ }
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
- netlbl_af6list_foreach_rcu(iter6,
|
|
- &entry->def.addrsel->list6) {
|
|
- map6 = netlbl_domhsh_addr6_entry(iter6);
|
|
- calipso_doi_putdef(map6->def.calipso);
|
|
- }
|
|
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
|
|
+ map6 = netlbl_domhsh_addr6_entry(iter6);
|
|
+ calipso_doi_putdef(map6->def.calipso);
|
|
+ }
|
|
#endif /* IPv6 */
|
|
- break;
|
|
- case NETLBL_NLTYPE_CIPSOV4:
|
|
- cipso_v4_doi_putdef(entry->def.cipso);
|
|
- break;
|
|
+ break;
|
|
+ case NETLBL_NLTYPE_CIPSOV4:
|
|
+ cipso_v4_doi_putdef(entry->def.cipso);
|
|
+ break;
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
- case NETLBL_NLTYPE_CALIPSO:
|
|
- calipso_doi_putdef(entry->def.calipso);
|
|
- break;
|
|
+ case NETLBL_NLTYPE_CALIPSO:
|
|
+ calipso_doi_putdef(entry->def.calipso);
|
|
+ break;
|
|
#endif /* IPv6 */
|
|
- }
|
|
- call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
|
|
}
|
|
+ call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
|
|
|
|
return ret_val;
|
|
}
|
|
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
|
|
index 442780515760e..1f154276a681f 100644
|
|
--- a/net/sctp/socket.c
|
|
+++ b/net/sctp/socket.c
|
|
@@ -7086,8 +7086,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
|
|
|
|
pr_debug("%s: begins, snum:%d\n", __func__, snum);
|
|
|
|
- local_bh_disable();
|
|
-
|
|
if (snum == 0) {
|
|
/* Search for an available port. */
|
|
int low, high, remaining, index;
|
|
@@ -7106,20 +7104,21 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
|
|
continue;
|
|
index = sctp_phashfn(sock_net(sk), rover);
|
|
head = &sctp_port_hashtable[index];
|
|
- spin_lock(&head->lock);
|
|
+ spin_lock_bh(&head->lock);
|
|
sctp_for_each_hentry(pp, &head->chain)
|
|
if ((pp->port == rover) &&
|
|
net_eq(sock_net(sk), pp->net))
|
|
goto next;
|
|
break;
|
|
next:
|
|
- spin_unlock(&head->lock);
|
|
+ spin_unlock_bh(&head->lock);
|
|
+ cond_resched();
|
|
} while (--remaining > 0);
|
|
|
|
/* Exhausted local port range during search? */
|
|
ret = 1;
|
|
if (remaining <= 0)
|
|
- goto fail;
|
|
+ return ret;
|
|
|
|
/* OK, here is the one we will use. HEAD (the port
|
|
* hash table list entry) is non-NULL and we hold it's
|
|
@@ -7134,7 +7133,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
|
|
* port iterator, pp being NULL.
|
|
*/
|
|
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
|
|
- spin_lock(&head->lock);
|
|
+ spin_lock_bh(&head->lock);
|
|
sctp_for_each_hentry(pp, &head->chain) {
|
|
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
|
|
goto pp_found;
|
|
@@ -7218,10 +7217,7 @@ success:
|
|
ret = 0;
|
|
|
|
fail_unlock:
|
|
- spin_unlock(&head->lock);
|
|
-
|
|
-fail:
|
|
- local_bh_enable();
|
|
+ spin_unlock_bh(&head->lock);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
|
|
index d9ec6335c7dcf..cbec2242f79ab 100644
|
|
--- a/net/tipc/socket.c
|
|
+++ b/net/tipc/socket.c
|
|
@@ -2126,18 +2126,21 @@ static int tipc_shutdown(struct socket *sock, int how)
|
|
lock_sock(sk);
|
|
|
|
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
|
|
- sk->sk_shutdown = SEND_SHUTDOWN;
|
|
+ if (tipc_sk_type_connectionless(sk))
|
|
+ sk->sk_shutdown = SHUTDOWN_MASK;
|
|
+ else
|
|
+ sk->sk_shutdown = SEND_SHUTDOWN;
|
|
|
|
if (sk->sk_state == TIPC_DISCONNECTING) {
|
|
/* Discard any unreceived messages */
|
|
__skb_queue_purge(&sk->sk_receive_queue);
|
|
|
|
- /* Wake up anyone sleeping in poll */
|
|
- sk->sk_state_change(sk);
|
|
res = 0;
|
|
} else {
|
|
res = -ENOTCONN;
|
|
}
|
|
+ /* Wake up anyone sleeping in poll. */
|
|
+ sk->sk_state_change(sk);
|
|
|
|
release_sock(sk);
|
|
return res;
|
|
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
|
|
index d3fdc463a884e..1e61cdce28952 100644
|
|
--- a/sound/firewire/tascam/tascam.c
|
|
+++ b/sound/firewire/tascam/tascam.c
|
|
@@ -225,11 +225,39 @@ static void snd_tscm_remove(struct fw_unit *unit)
|
|
}
|
|
|
|
static const struct ieee1394_device_id snd_tscm_id_table[] = {
|
|
+ // Tascam, FW-1884.
|
|
{
|
|
.match_flags = IEEE1394_MATCH_VENDOR_ID |
|
|
- IEEE1394_MATCH_SPECIFIER_ID,
|
|
+ IEEE1394_MATCH_SPECIFIER_ID |
|
|
+ IEEE1394_MATCH_VERSION,
|
|
.vendor_id = 0x00022e,
|
|
.specifier_id = 0x00022e,
|
|
+ .version = 0x800000,
|
|
+ },
|
|
+ // Tascam, FE-8 (.version = 0x800001)
|
|
+ // This kernel module doesn't support FE-8 because the most of features
|
|
+ // can be implemented in userspace without any specific support of this
|
|
+ // module.
|
|
+ //
|
|
+ // .version = 0x800002 is unknown.
|
|
+ //
|
|
+ // Tascam, FW-1082.
|
|
+ {
|
|
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
|
|
+ IEEE1394_MATCH_SPECIFIER_ID |
|
|
+ IEEE1394_MATCH_VERSION,
|
|
+ .vendor_id = 0x00022e,
|
|
+ .specifier_id = 0x00022e,
|
|
+ .version = 0x800003,
|
|
+ },
|
|
+ // Tascam, FW-1804.
|
|
+ {
|
|
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
|
|
+ IEEE1394_MATCH_SPECIFIER_ID |
|
|
+ IEEE1394_MATCH_VERSION,
|
|
+ .vendor_id = 0x00022e,
|
|
+ .specifier_id = 0x00022e,
|
|
+ .version = 0x800004,
|
|
},
|
|
/* FE-08 requires reverse-engineering because it just has faders. */
|
|
{}
|