mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 04:04:06 +00:00
Networking fixes for 5.15-rc8/final, including fixes from WiFi
(mac80211), and BPF. Current release - regressions: - skb_expand_head: adjust skb->truesize to fix socket memory accounting - mptcp: fix corrupt receiver key in MPC + data + checksum Previous releases - regressions: - multicast: calculate csum of looped-back and forwarded packets - cgroup: fix memory leak caused by missing cgroup_bpf_offline - cfg80211: fix management registrations locking, prevent list corruption - cfg80211: correct false positive in bridge/4addr mode check - tcp_bpf: fix race in the tcp_bpf_send_verdict resulting in reusing previous verdict Previous releases - always broken: - sctp: enhancements for the verification tag, prevent attackers from killing SCTP sessions - tipc: fix size validations for the MSG_CRYPTO type - mac80211: mesh: fix HE operation element length check, prevent out of bound access - tls: fix sign of socket errors, prevent positive error codes being reported from read()/write() - cfg80211: scan: extend RCU protection in cfg80211_add_nontrans_list() - implement ->sock_is_readable() for UDP and AF_UNIX, fix poll() for sockets in a BPF sockmap - bpf: fix potential race in tail call compatibility check resulting in two operations which would make the map incompatible succeeding - bpf: prevent increasing bpf_jit_limit above max - bpf: fix error usage of map_fd and fdget() in generic batch update - phy: ethtool: lock the phy for consistency of results - prevent infinite while loop in skb_tx_hash() when Tx races with driver reconfiguring the queue <> traffic class mapping - usbnet: fixes for bad HW conjured by syzbot - xen: stop tx queues during live migration, prevent UAF - net-sysfs: initialize uid and gid before calling net_ns_get_ownership - mlxsw: prevent Rx stalls under memory pressure Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmF6zR8ACgkQMUZtbf5S Irtu4w//e7BJVjn1zKnGHo5SdpHwIxePP5sgD7rGT9udTlYUeteRAEQPALlA8oHo 6nx59eZBlvOt4+1yyK8qVzT94aLca8dwJ4j7dlONrvLFyWinSUlSZ5ayH8Co2f8t ZGYVg+EinR6b+iaeUp5PG0VGbW+FmyIO6iS2xytireJxW6sytZ2BLlrus4+j7BCM oCaLD+P6A1bCH1PxDMO2GRFNXphxPX3azec33HpNevHD0iwdgrjAlnz0+PZ5uiEA AQ92PVw59+I1el/h0SxDsxfOrCdUMWbM4ZoC2wf0jDC8o6tAg4KxhlI0lAiMvhla iqo0BTxhMWAeqC4CMmZJVGAR7zY7xpZiLowp92qSCzZpG0XKVxPaomLFGHFiU3Im JDNGGmWAsE5maWsMbUtGr9Gd4Gxoor5r2YfFwCBsMsg7VtO98lCIdORi44VGVD3A z2OtHEk0ismOmu8ktDumpFPTF0l1G5HR+YxsVP7obEh3T+hv4nEwPtHQ7OsByuxg BBUrmOGr3hB5kcYtbmT/CqLqGXHNCCEAS0f+7mCPsGJfMvsOZeVurdk5GS7SjPIE 870ogJycV+KRP7ZoefXQxiRe9oCmQs9tFQqCxyPwZ8O5vevTx78D5vj1dkuF4Q5w cbn2qbgJAtNe0UN1Gxw3emarofXarAVQoO7n+29CDFG5fe/NlnY= =61p6 -----END PGP SIGNATURE----- Merge tag 'net-5.15-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from WiFi (mac80211), and BPF. Current release - regressions: - skb_expand_head: adjust skb->truesize to fix socket memory accounting - mptcp: fix corrupt receiver key in MPC + data + checksum Previous releases - regressions: - multicast: calculate csum of looped-back and forwarded packets - cgroup: fix memory leak caused by missing cgroup_bpf_offline - cfg80211: fix management registrations locking, prevent list corruption - cfg80211: correct false positive in bridge/4addr mode check - tcp_bpf: fix race in the tcp_bpf_send_verdict resulting in reusing previous verdict Previous releases - always broken: - sctp: enhancements for the verification tag, prevent attackers from killing SCTP sessions - tipc: fix size validations for the MSG_CRYPTO type - mac80211: mesh: fix HE operation element length check, prevent out of bound access - tls: fix sign of socket errors, prevent positive error codes being reported from read()/write() - cfg80211: scan: extend RCU protection in cfg80211_add_nontrans_list() - implement ->sock_is_readable() for UDP and AF_UNIX, fix poll() for sockets in a BPF sockmap - bpf: fix potential race in tail call compatibility check resulting in two operations which would make the map incompatible succeeding - bpf: prevent increasing bpf_jit_limit above max - bpf: fix error usage of map_fd and fdget() in generic batch update - phy: ethtool: lock the phy for consistency of results - prevent infinite while loop in skb_tx_hash() when Tx races with driver reconfiguring the queue <> traffic class mapping - usbnet: fixes for bad HW conjured by syzbot - xen: stop tx queues during live migration, prevent UAF - net-sysfs: initialize uid and gid before calling net_ns_get_ownership - mlxsw: prevent Rx stalls under memory pressure" * tag 'net-5.15-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (67 commits) Revert "net: hns3: fix pause config problem after autoneg disabled" mptcp: fix corrupt receiver key in MPC + data + checksum riscv, bpf: Fix potential NULL dereference octeontx2-af: Fix possible null pointer dereference. octeontx2-af: Display all enabled PF VF rsrc_alloc entries. octeontx2-af: Check whether ipolicers exists net: ethernet: microchip: lan743x: Fix skb allocation failure net/tls: Fix flipped sign in async_wait.err assignment net/tls: Fix flipped sign in tls_err_abort() calls net/smc: Correct spelling mistake to TCPF_SYN_RECV net/smc: Fix smc_link->llc_testlink_time overflow nfp: bpf: relax prog rejection for mtu check through max_pkt_offset vmxnet3: do not stop tx queues after netif_device_detach() r8169: Add device 10ec:8162 to driver r8169 ptp: Document the PTP_CLK_MAGIC ioctl number usbnet: fix error return code in usbnet_probe() net: hns3: adjust string spaces of some parameters of tx bd info in debugfs net: hns3: expand buffer len for some debugfs command net: hns3: add more string spaces for dumping packets number of queue info in debugfs net: hns3: fix data endian problem of some functions of debugfs ...
This commit is contained in:
commit
411a44c24a
71 changed files with 747 additions and 462 deletions
|
@ -104,6 +104,7 @@ Code Seq# Include File Comments
|
|||
'8' all SNP8023 advanced NIC card
|
||||
<mailto:mcr@solidum.com>
|
||||
';' 64-7F linux/vfio.h
|
||||
'=' 00-3f uapi/linux/ptp_clock.h <mailto:richardcochran@gmail.com>
|
||||
'@' 00-0F linux/radeonfb.h conflict!
|
||||
'@' 00-0F drivers/video/aty/aty128fb.c conflict!
|
||||
'A' 00-1F linux/apm_bios.h conflict!
|
||||
|
|
|
@ -11291,7 +11291,6 @@ F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
|
|||
F: drivers/net/ethernet/marvell/octeontx2/af/
|
||||
|
||||
MARVELL PRESTERA ETHERNET SWITCH DRIVER
|
||||
M: Vadym Kochan <vkochan@marvell.com>
|
||||
M: Taras Chornyi <tchornyi@marvell.com>
|
||||
S: Supported
|
||||
W: https://github.com/Marvell-switching/switchdev-prestera
|
||||
|
|
|
@ -1136,6 +1136,11 @@ out:
|
|||
return prog;
|
||||
}
|
||||
|
||||
u64 bpf_jit_alloc_exec_limit(void)
|
||||
{
|
||||
return BPF_JIT_REGION_SIZE;
|
||||
}
|
||||
|
||||
void *bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
||||
|
|
|
@ -125,7 +125,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
|
||||
if (i == NR_JIT_ITERATIONS) {
|
||||
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
if (jit_data->header)
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
|
@ -166,6 +167,11 @@ out:
|
|||
return prog;
|
||||
}
|
||||
|
||||
u64 bpf_jit_alloc_exec_limit(void)
|
||||
{
|
||||
return BPF_JIT_REGION_SIZE;
|
||||
}
|
||||
|
||||
void *bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
||||
|
|
|
@ -137,7 +137,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
|||
.name = "uc",
|
||||
.cmd = HNAE3_DBG_CMD_MAC_UC,
|
||||
.dentry = HNS3_DBG_DENTRY_MAC,
|
||||
.buf_len = HNS3_DBG_READ_LEN,
|
||||
.buf_len = HNS3_DBG_READ_LEN_128KB,
|
||||
.init = hns3_dbg_common_file_init,
|
||||
},
|
||||
{
|
||||
|
@ -256,7 +256,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
|||
.name = "tqp",
|
||||
.cmd = HNAE3_DBG_CMD_REG_TQP,
|
||||
.dentry = HNS3_DBG_DENTRY_REG,
|
||||
.buf_len = HNS3_DBG_READ_LEN,
|
||||
.buf_len = HNS3_DBG_READ_LEN_128KB,
|
||||
.init = hns3_dbg_common_file_init,
|
||||
},
|
||||
{
|
||||
|
@ -298,7 +298,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
|||
.name = "fd_tcam",
|
||||
.cmd = HNAE3_DBG_CMD_FD_TCAM,
|
||||
.dentry = HNS3_DBG_DENTRY_FD,
|
||||
.buf_len = HNS3_DBG_READ_LEN,
|
||||
.buf_len = HNS3_DBG_READ_LEN_1MB,
|
||||
.init = hns3_dbg_common_file_init,
|
||||
},
|
||||
{
|
||||
|
@ -462,7 +462,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
|
|||
{ "TAIL", 2 },
|
||||
{ "HEAD", 2 },
|
||||
{ "FBDNUM", 2 },
|
||||
{ "PKTNUM", 2 },
|
||||
{ "PKTNUM", 5 },
|
||||
{ "COPYBREAK", 2 },
|
||||
{ "RING_EN", 2 },
|
||||
{ "RX_RING_EN", 2 },
|
||||
|
@ -565,7 +565,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {
|
|||
{ "HEAD", 2 },
|
||||
{ "FBDNUM", 2 },
|
||||
{ "OFFSET", 2 },
|
||||
{ "PKTNUM", 2 },
|
||||
{ "PKTNUM", 5 },
|
||||
{ "RING_EN", 2 },
|
||||
{ "TX_RING_EN", 2 },
|
||||
{ "BASE_ADDR", 10 },
|
||||
|
@ -790,13 +790,13 @@ static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
|
|||
}
|
||||
|
||||
static const struct hns3_dbg_item tx_bd_info_items[] = {
|
||||
{ "BD_IDX", 5 },
|
||||
{ "ADDRESS", 2 },
|
||||
{ "BD_IDX", 2 },
|
||||
{ "ADDRESS", 13 },
|
||||
{ "VLAN_TAG", 2 },
|
||||
{ "SIZE", 2 },
|
||||
{ "T_CS_VLAN_TSO", 2 },
|
||||
{ "OT_VLAN_TAG", 3 },
|
||||
{ "TV", 2 },
|
||||
{ "TV", 5 },
|
||||
{ "OLT_VLAN_LEN", 2 },
|
||||
{ "PAYLEN_OL4CS", 2 },
|
||||
{ "BD_FE_SC_VLD", 2 },
|
||||
|
|
|
@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
|
|||
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
||||
int *pos)
|
||||
{
|
||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
||||
struct hclge_dbg_bitmap_cmd req;
|
||||
struct hclge_desc desc;
|
||||
u16 qset_id, qset_num;
|
||||
int ret;
|
||||
|
@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
||||
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||
|
||||
*pos += scnprintf(buf + *pos, len - *pos,
|
||||
"%04u %#x %#x %#x %#x\n",
|
||||
qset_id, bitmap->bit0, bitmap->bit1,
|
||||
bitmap->bit2, bitmap->bit3);
|
||||
qset_id, req.bit0, req.bit1, req.bit2,
|
||||
req.bit3);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
|||
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
||||
int *pos)
|
||||
{
|
||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
||||
struct hclge_dbg_bitmap_cmd req;
|
||||
struct hclge_desc desc;
|
||||
u8 pri_id, pri_num;
|
||||
int ret;
|
||||
|
@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
||||
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||
|
||||
*pos += scnprintf(buf + *pos, len - *pos,
|
||||
"%03u %#x %#x %#x\n",
|
||||
pri_id, bitmap->bit0, bitmap->bit1,
|
||||
bitmap->bit2);
|
||||
pri_id, req.bit0, req.bit1, req.bit2);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
|||
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
|
||||
int *pos)
|
||||
{
|
||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
||||
struct hclge_dbg_bitmap_cmd req;
|
||||
struct hclge_desc desc;
|
||||
u8 pg_id;
|
||||
int ret;
|
||||
|
@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
||||
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||
|
||||
*pos += scnprintf(buf + *pos, len - *pos,
|
||||
"%03u %#x %#x %#x\n",
|
||||
pg_id, bitmap->bit0, bitmap->bit1,
|
||||
bitmap->bit2);
|
||||
pg_id, req.bit0, req.bit1, req.bit2);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
|
|||
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
|
||||
int *pos)
|
||||
{
|
||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
||||
struct hclge_dbg_bitmap_cmd req;
|
||||
struct hclge_desc desc;
|
||||
u8 port_id = 0;
|
||||
int ret;
|
||||
|
@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
||||
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||
|
||||
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
|
||||
bitmap->bit0);
|
||||
req.bit0);
|
||||
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
|
||||
bitmap->bit1);
|
||||
req.bit1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2847,33 +2847,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
|
|||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task, 0);
|
||||
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task, 0);
|
||||
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task, 0);
|
||||
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task,
|
||||
delay_time);
|
||||
mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
|
||||
}
|
||||
|
||||
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
|
||||
|
@ -3491,33 +3487,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
|
|||
hdev->num_msi_used += 1;
|
||||
}
|
||||
|
||||
static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
|
||||
const cpumask_t *mask)
|
||||
{
|
||||
struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
|
||||
affinity_notify);
|
||||
|
||||
cpumask_copy(&hdev->affinity_mask, mask);
|
||||
}
|
||||
|
||||
static void hclge_irq_affinity_release(struct kref *ref)
|
||||
{
|
||||
}
|
||||
|
||||
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
|
||||
{
|
||||
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
|
||||
&hdev->affinity_mask);
|
||||
|
||||
hdev->affinity_notify.notify = hclge_irq_affinity_notify;
|
||||
hdev->affinity_notify.release = hclge_irq_affinity_release;
|
||||
irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
|
||||
&hdev->affinity_notify);
|
||||
}
|
||||
|
||||
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
|
||||
{
|
||||
irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
|
||||
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
|
||||
}
|
||||
|
||||
|
@ -13052,7 +13029,7 @@ static int hclge_init(void)
|
|||
{
|
||||
pr_info("%s is initializing\n", HCLGE_NAME);
|
||||
|
||||
hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
|
||||
hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
|
||||
if (!hclge_wq) {
|
||||
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -944,7 +944,6 @@ struct hclge_dev {
|
|||
|
||||
/* affinity mask and notify for misc interrupt */
|
||||
cpumask_t affinity_mask;
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
struct hclge_ptp *ptp;
|
||||
struct devlink *devlink;
|
||||
};
|
||||
|
|
|
@ -2232,6 +2232,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
|
|||
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
|
||||
test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
|
||||
&hdev->state))
|
||||
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
|
||||
|
@ -3449,6 +3450,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
|||
|
||||
hclgevf_init_rxd_adv_layout(hdev);
|
||||
|
||||
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
|
||||
|
||||
hdev->last_reset_time = jiffies;
|
||||
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
|
@ -3899,7 +3902,7 @@ static int hclgevf_init(void)
|
|||
{
|
||||
pr_info("%s is initializing\n", HCLGEVF_NAME);
|
||||
|
||||
hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
|
||||
hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
|
||||
if (!hclgevf_wq) {
|
||||
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -146,6 +146,7 @@ enum hclgevf_states {
|
|||
HCLGEVF_STATE_REMOVING,
|
||||
HCLGEVF_STATE_NIC_REGISTERED,
|
||||
HCLGEVF_STATE_ROCE_REGISTERED,
|
||||
HCLGEVF_STATE_SERVICE_INITED,
|
||||
/* task states */
|
||||
HCLGEVF_STATE_RST_SERVICE_SCHED,
|
||||
HCLGEVF_STATE_RST_HANDLING,
|
||||
|
|
|
@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)
|
|||
*/
|
||||
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
|
||||
{
|
||||
struct net_device *event_netdev, *netdev_tmp;
|
||||
struct netdev_notifier_bonding_info *info;
|
||||
struct netdev_bonding_info *bonding_info;
|
||||
struct net_device *event_netdev;
|
||||
const char *lag_netdev_name;
|
||||
|
||||
event_netdev = netdev_notifier_info_to_dev(ptr);
|
||||
|
@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
|
|||
goto lag_out;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
|
||||
if (!netif_is_ice(netdev_tmp))
|
||||
continue;
|
||||
|
||||
if (netdev_tmp && netdev_tmp != lag->netdev &&
|
||||
lag->peer_netdev != netdev_tmp) {
|
||||
dev_hold(netdev_tmp);
|
||||
lag->peer_netdev = netdev_tmp;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (bonding_info->slave.state)
|
||||
ice_lag_set_backup(lag);
|
||||
else
|
||||
|
@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
|
|||
case NETDEV_BONDING_INFO:
|
||||
ice_lag_info_event(lag, ptr);
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
ice_lag_unlink(lag, ptr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1571,6 +1571,9 @@ err_kworker:
|
|||
*/
|
||||
void ice_ptp_release(struct ice_pf *pf)
|
||||
{
|
||||
if (!test_bit(ICE_FLAG_PTP, pf->flags))
|
||||
return;
|
||||
|
||||
/* Disable timestamping for both Tx and Rx */
|
||||
ice_ptp_cfg_timestamp(pf, false);
|
||||
|
||||
|
|
|
@ -226,18 +226,85 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
|
|||
|
||||
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
|
||||
|
||||
static void get_lf_str_list(struct rvu_block block, int pcifunc,
|
||||
char *lfs)
|
||||
{
|
||||
int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
|
||||
|
||||
for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
|
||||
if (lf >= block.lf.max)
|
||||
break;
|
||||
|
||||
if (block.fn_map[lf] != pcifunc)
|
||||
continue;
|
||||
|
||||
if (lf == prev_lf + 1) {
|
||||
prev_lf = lf;
|
||||
seq = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (seq)
|
||||
len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
|
||||
else
|
||||
len += (len ? sprintf(lfs + len, ",%d", lf) :
|
||||
sprintf(lfs + len, "%d", lf));
|
||||
|
||||
prev_lf = lf;
|
||||
seq = 0;
|
||||
}
|
||||
|
||||
if (seq)
|
||||
len += sprintf(lfs + len, "-%d", prev_lf);
|
||||
|
||||
lfs[len] = '\0';
|
||||
}
|
||||
|
||||
static int get_max_column_width(struct rvu *rvu)
|
||||
{
|
||||
int index, pf, vf, lf_str_size = 12, buf_size = 256;
|
||||
struct rvu_block block;
|
||||
u16 pcifunc;
|
||||
char *buf;
|
||||
|
||||
buf = kzalloc(buf_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
||||
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
|
||||
pcifunc = pf << 10 | vf;
|
||||
if (!pcifunc)
|
||||
continue;
|
||||
|
||||
for (index = 0; index < BLK_COUNT; index++) {
|
||||
block = rvu->hw->block[index];
|
||||
if (!strlen(block.name))
|
||||
continue;
|
||||
|
||||
get_lf_str_list(block, pcifunc, buf);
|
||||
if (lf_str_size <= strlen(buf))
|
||||
lf_str_size = strlen(buf) + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
return lf_str_size;
|
||||
}
|
||||
|
||||
/* Dumps current provisioning status of all RVU block LFs */
|
||||
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
||||
char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int index, off = 0, flag = 0, go_back = 0, len = 0;
|
||||
int index, off = 0, flag = 0, len = 0, i = 0;
|
||||
struct rvu *rvu = filp->private_data;
|
||||
int lf, pf, vf, pcifunc;
|
||||
int bytes_not_copied = 0;
|
||||
struct rvu_block block;
|
||||
int bytes_not_copied;
|
||||
int lf_str_size = 12;
|
||||
int pf, vf, pcifunc;
|
||||
int buf_size = 2048;
|
||||
int lf_str_size;
|
||||
char *lfs;
|
||||
char *buf;
|
||||
|
||||
|
@ -249,6 +316,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||
if (!buf)
|
||||
return -ENOSPC;
|
||||
|
||||
/* Get the maximum width of a column */
|
||||
lf_str_size = get_max_column_width(rvu);
|
||||
|
||||
lfs = kzalloc(lf_str_size, GFP_KERNEL);
|
||||
if (!lfs) {
|
||||
kfree(buf);
|
||||
|
@ -262,65 +332,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||
"%-*s", lf_str_size,
|
||||
rvu->hw->block[index].name);
|
||||
}
|
||||
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
||||
bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
|
||||
if (bytes_not_copied)
|
||||
goto out;
|
||||
|
||||
i++;
|
||||
*ppos += off;
|
||||
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
||||
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
|
||||
off = 0;
|
||||
flag = 0;
|
||||
pcifunc = pf << 10 | vf;
|
||||
if (!pcifunc)
|
||||
continue;
|
||||
|
||||
if (vf) {
|
||||
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
|
||||
go_back = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
off = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
} else {
|
||||
sprintf(lfs, "PF%d", pf);
|
||||
go_back = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
off = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
}
|
||||
|
||||
off += go_back;
|
||||
for (index = 0; index < BLKTYPE_MAX; index++) {
|
||||
for (index = 0; index < BLK_COUNT; index++) {
|
||||
block = rvu->hw->block[index];
|
||||
if (!strlen(block.name))
|
||||
continue;
|
||||
len = 0;
|
||||
lfs[len] = '\0';
|
||||
for (lf = 0; lf < block.lf.max; lf++) {
|
||||
if (block.fn_map[lf] != pcifunc)
|
||||
continue;
|
||||
get_lf_str_list(block, pcifunc, lfs);
|
||||
if (strlen(lfs))
|
||||
flag = 1;
|
||||
len += sprintf(&lfs[len], "%d,", lf);
|
||||
}
|
||||
|
||||
if (flag)
|
||||
len--;
|
||||
lfs[len] = '\0';
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off,
|
||||
"%-*s", lf_str_size, lfs);
|
||||
if (!strlen(lfs))
|
||||
go_back += lf_str_size;
|
||||
}
|
||||
if (!flag)
|
||||
off -= go_back;
|
||||
else
|
||||
flag = 0;
|
||||
off--;
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
||||
if (flag) {
|
||||
off += scnprintf(&buf[off],
|
||||
buf_size - 1 - off, "\n");
|
||||
bytes_not_copied = copy_to_user(buffer +
|
||||
(i * off),
|
||||
buf, off);
|
||||
if (bytes_not_copied)
|
||||
goto out;
|
||||
|
||||
i++;
|
||||
*ppos += off;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bytes_not_copied = copy_to_user(buffer, buf, off);
|
||||
out:
|
||||
kfree(lfs);
|
||||
kfree(buf);
|
||||
|
||||
if (bytes_not_copied)
|
||||
return -EFAULT;
|
||||
|
||||
*ppos = off;
|
||||
return off;
|
||||
return *ppos;
|
||||
}
|
||||
|
||||
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
|
||||
|
@ -504,7 +578,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
|
|||
if (cmd_buf)
|
||||
ret = -EINVAL;
|
||||
|
||||
if (!strncmp(subtoken, "help", 4) || ret < 0) {
|
||||
if (ret < 0 || !strncmp(subtoken, "help", 4)) {
|
||||
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
|
||||
goto qsize_write_done;
|
||||
}
|
||||
|
@ -1719,6 +1793,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
|
|||
u16 pcifunc;
|
||||
char *str;
|
||||
|
||||
/* Ingress policers do not exist on all platforms */
|
||||
if (!nix_hw->ipolicer)
|
||||
return 0;
|
||||
|
||||
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
||||
if (layer == BAND_PROF_INVAL_LAYER)
|
||||
continue;
|
||||
|
@ -1768,6 +1846,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
|
|||
int layer;
|
||||
char *str;
|
||||
|
||||
/* Ingress policers do not exist on all platforms */
|
||||
if (!nix_hw->ipolicer)
|
||||
return 0;
|
||||
|
||||
seq_puts(m, "\nBandwidth profile resource free count\n");
|
||||
seq_puts(m, "=====================================\n");
|
||||
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
||||
|
|
|
@ -2507,6 +2507,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
|
|||
return;
|
||||
|
||||
nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||
if (!nix_hw)
|
||||
return;
|
||||
|
||||
vlan = &nix_hw->txvlan;
|
||||
|
||||
mutex_lock(&vlan->rsrc_lock);
|
||||
|
|
|
@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
|
|||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
elem_info->u.rdq.skb = NULL;
|
||||
skb = netdev_alloc_skb_ip_align(NULL, buf_len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Assume that wqe was previously zeroed. */
|
||||
|
||||
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
|
||||
buf_len, DMA_FROM_DEVICE);
|
||||
if (err)
|
||||
|
@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
|||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||
struct mlxsw_pci_queue_elem_info *elem_info;
|
||||
struct mlxsw_rx_info rx_info = {};
|
||||
char *wqe;
|
||||
char wqe[MLXSW_PCI_WQE_SIZE];
|
||||
struct sk_buff *skb;
|
||||
u16 byte_count;
|
||||
int err;
|
||||
|
||||
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
|
||||
skb = elem_info->u.sdq.skb;
|
||||
if (!skb)
|
||||
return;
|
||||
wqe = elem_info->elem;
|
||||
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
|
||||
skb = elem_info->u.rdq.skb;
|
||||
memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
|
||||
|
||||
if (q->consumer_counter++ != consumer_counter_limit)
|
||||
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
|
||||
|
||||
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
|
||||
if (err) {
|
||||
dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
|
||||
|
||||
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
|
||||
rx_info.is_lag = true;
|
||||
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
|
||||
|
@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
|||
skb_put(skb, byte_count);
|
||||
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
|
||||
|
||||
memset(wqe, 0, q->elem_size);
|
||||
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
|
||||
if (err)
|
||||
dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
|
||||
out:
|
||||
/* Everything is set up, ring doorbell to pass elem to HW */
|
||||
q->producer_counter++;
|
||||
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
|
||||
|
|
|
@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
|
|||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
|
||||
DMA_BIT_MASK(64))) {
|
||||
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
|
||||
DMA_BIT_MASK(32))) {
|
||||
dev_warn(&tx->adapter->pdev->dev,
|
||||
"lan743x_: No suitable DMA available\n");
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
ring_allocation_size = ALIGN(tx->ring_size *
|
||||
sizeof(struct lan743x_tx_descriptor),
|
||||
PAGE_SIZE);
|
||||
|
@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
|
|||
index);
|
||||
}
|
||||
|
||||
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
|
||||
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct net_device *netdev = rx->adapter->netdev;
|
||||
struct device *dev = &rx->adapter->pdev->dev;
|
||||
|
@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
|
|||
|
||||
descriptor = &rx->ring_cpu_ptr[index];
|
||||
buffer_info = &rx->buffer_info[index];
|
||||
skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
|
||||
skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
|
||||
|
@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
|
|||
|
||||
/* save existing skb, allocate new skb and map to dma */
|
||||
skb = buffer_info->skb;
|
||||
if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
|
||||
if (lan743x_rx_init_ring_element(rx, rx->last_head,
|
||||
GFP_ATOMIC | GFP_DMA)) {
|
||||
/* failed to allocate next skb.
|
||||
* Memory is very low.
|
||||
* Drop this packet and reuse buffer.
|
||||
|
@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
|
|||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
|
||||
DMA_BIT_MASK(64))) {
|
||||
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
|
||||
DMA_BIT_MASK(32))) {
|
||||
dev_warn(&rx->adapter->pdev->dev,
|
||||
"lan743x_: No suitable DMA available\n");
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
ring_allocation_size = ALIGN(rx->ring_size *
|
||||
sizeof(struct lan743x_rx_descriptor),
|
||||
PAGE_SIZE);
|
||||
|
@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
|
|||
|
||||
rx->last_head = 0;
|
||||
for (index = 0; index < rx->ring_size; index++) {
|
||||
ret = lan743x_rx_init_ring_element(rx, index);
|
||||
ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
netif_warn(rx->adapter, ifup, rx->adapter->netdev,
|
||||
"Error allocating memory for LAN743x\n");
|
||||
|
||||
lan743x_rx_ring_cleanup(rx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)
|
|||
if (ret) {
|
||||
netif_err(adapter, probe, adapter->netdev,
|
||||
"lan743x_hardware_init returned %d\n", ret);
|
||||
lan743x_pci_cleanup(adapter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* open netdev when netdev is at running state while resume.
|
||||
|
|
|
@ -182,15 +182,21 @@ static int
|
|||
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
unsigned int max_mtu;
|
||||
struct nfp_bpf_vnic *bv;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
|
||||
return 0;
|
||||
|
||||
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
||||
if (new_mtu > max_mtu) {
|
||||
nn_info(nn, "BPF offload active, MTU over %u not supported\n",
|
||||
max_mtu);
|
||||
if (nn->xdp_hw.prog) {
|
||||
prog = nn->xdp_hw.prog;
|
||||
} else {
|
||||
bv = nn->app_priv;
|
||||
prog = bv->tc_prog;
|
||||
}
|
||||
|
||||
if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
|
||||
nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
|
|||
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
|
||||
int nfp_bpf_jit(struct nfp_prog *prog);
|
||||
bool nfp_bpf_supported_opcode(u8 code);
|
||||
bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
|
||||
unsigned int mtu);
|
||||
|
||||
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
|
||||
int prev_insn_idx);
|
||||
|
|
|
@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
|
||||
unsigned int mtu)
|
||||
{
|
||||
unsigned int fw_mtu, pkt_off;
|
||||
|
||||
fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
||||
pkt_off = min(prog->aux->max_pkt_offset, mtu);
|
||||
|
||||
return fw_mtu < pkt_off;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
|
||||
unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
|
||||
unsigned int max_stack, max_prog_len;
|
||||
dma_addr_t dma_addr;
|
||||
void *img;
|
||||
int err;
|
||||
|
||||
fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
||||
pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
|
||||
if (fw_mtu < pkt_off) {
|
||||
if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)
|
|||
napi_disable(&pldat->napi);
|
||||
netif_stop_queue(ndev);
|
||||
|
||||
if (ndev->phydev)
|
||||
phy_stop(ndev->phydev);
|
||||
|
||||
spin_lock_irqsave(&pldat->lock, flags);
|
||||
__lpc_eth_reset(pldat);
|
||||
netif_carrier_off(ndev);
|
||||
|
@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)
|
|||
writel(0, LPC_ENET_MAC2(pldat->net_base));
|
||||
spin_unlock_irqrestore(&pldat->lock, flags);
|
||||
|
||||
if (ndev->phydev)
|
||||
phy_stop(ndev->phydev);
|
||||
clk_disable_unprepare(pldat->clk);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -157,6 +157,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(REALTEK, 0x8129) },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8161) },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8162) },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8167) },
|
||||
{ PCI_VDEVICE(REALTEK, 0x8168) },
|
||||
{ PCI_VDEVICE(NCUBE, 0x8168) },
|
||||
|
|
|
@ -243,62 +243,10 @@ static void phy_sanitize_settings(struct phy_device *phydev)
|
|||
}
|
||||
}
|
||||
|
||||
int phy_ethtool_ksettings_set(struct phy_device *phydev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
|
||||
u8 autoneg = cmd->base.autoneg;
|
||||
u8 duplex = cmd->base.duplex;
|
||||
u32 speed = cmd->base.speed;
|
||||
|
||||
if (cmd->base.phy_address != phydev->mdio.addr)
|
||||
return -EINVAL;
|
||||
|
||||
linkmode_copy(advertising, cmd->link_modes.advertising);
|
||||
|
||||
/* We make sure that we don't pass unsupported values in to the PHY */
|
||||
linkmode_and(advertising, advertising, phydev->supported);
|
||||
|
||||
/* Verify the settings we care about. */
|
||||
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
|
||||
return -EINVAL;
|
||||
|
||||
if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
|
||||
return -EINVAL;
|
||||
|
||||
if (autoneg == AUTONEG_DISABLE &&
|
||||
((speed != SPEED_1000 &&
|
||||
speed != SPEED_100 &&
|
||||
speed != SPEED_10) ||
|
||||
(duplex != DUPLEX_HALF &&
|
||||
duplex != DUPLEX_FULL)))
|
||||
return -EINVAL;
|
||||
|
||||
phydev->autoneg = autoneg;
|
||||
|
||||
if (autoneg == AUTONEG_DISABLE) {
|
||||
phydev->speed = speed;
|
||||
phydev->duplex = duplex;
|
||||
}
|
||||
|
||||
linkmode_copy(phydev->advertising, advertising);
|
||||
|
||||
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||
phydev->advertising, autoneg == AUTONEG_ENABLE);
|
||||
|
||||
phydev->master_slave_set = cmd->base.master_slave_cfg;
|
||||
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
|
||||
|
||||
/* Restart the PHY */
|
||||
phy_start_aneg(phydev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
|
||||
|
||||
void phy_ethtool_ksettings_get(struct phy_device *phydev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
mutex_lock(&phydev->lock);
|
||||
linkmode_copy(cmd->link_modes.supported, phydev->supported);
|
||||
linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
|
||||
linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
|
||||
|
@ -317,6 +265,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
|
|||
cmd->base.autoneg = phydev->autoneg;
|
||||
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
|
||||
cmd->base.eth_tp_mdix = phydev->mdix;
|
||||
mutex_unlock(&phydev->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(phy_ethtool_ksettings_get);
|
||||
|
||||
|
@ -750,6 +699,37 @@ static int phy_check_link_status(struct phy_device *phydev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* _phy_start_aneg - start auto-negotiation for this PHY device
|
||||
* @phydev: the phy_device struct
|
||||
*
|
||||
* Description: Sanitizes the settings (if we're not autonegotiating
|
||||
* them), and then calls the driver's config_aneg function.
|
||||
* If the PHYCONTROL Layer is operating, we change the state to
|
||||
* reflect the beginning of Auto-negotiation or forcing.
|
||||
*/
|
||||
static int _phy_start_aneg(struct phy_device *phydev)
|
||||
{
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&phydev->lock);
|
||||
|
||||
if (!phydev->drv)
|
||||
return -EIO;
|
||||
|
||||
if (AUTONEG_DISABLE == phydev->autoneg)
|
||||
phy_sanitize_settings(phydev);
|
||||
|
||||
err = phy_config_aneg(phydev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (phy_is_started(phydev))
|
||||
err = phy_check_link_status(phydev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_start_aneg - start auto-negotiation for this PHY device
|
||||
* @phydev: the phy_device struct
|
||||
|
@ -763,21 +743,8 @@ int phy_start_aneg(struct phy_device *phydev)
|
|||
{
|
||||
int err;
|
||||
|
||||
if (!phydev->drv)
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
|
||||
if (AUTONEG_DISABLE == phydev->autoneg)
|
||||
phy_sanitize_settings(phydev);
|
||||
|
||||
err = phy_config_aneg(phydev);
|
||||
if (err < 0)
|
||||
goto out_unlock;
|
||||
|
||||
if (phy_is_started(phydev))
|
||||
err = phy_check_link_status(phydev);
|
||||
out_unlock:
|
||||
err = _phy_start_aneg(phydev);
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
return err;
|
||||
|
@ -800,6 +767,61 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
|
|||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
int phy_ethtool_ksettings_set(struct phy_device *phydev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
|
||||
u8 autoneg = cmd->base.autoneg;
|
||||
u8 duplex = cmd->base.duplex;
|
||||
u32 speed = cmd->base.speed;
|
||||
|
||||
if (cmd->base.phy_address != phydev->mdio.addr)
|
||||
return -EINVAL;
|
||||
|
||||
linkmode_copy(advertising, cmd->link_modes.advertising);
|
||||
|
||||
/* We make sure that we don't pass unsupported values in to the PHY */
|
||||
linkmode_and(advertising, advertising, phydev->supported);
|
||||
|
||||
/* Verify the settings we care about. */
|
||||
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
|
||||
return -EINVAL;
|
||||
|
||||
if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
|
||||
return -EINVAL;
|
||||
|
||||
if (autoneg == AUTONEG_DISABLE &&
|
||||
((speed != SPEED_1000 &&
|
||||
speed != SPEED_100 &&
|
||||
speed != SPEED_10) ||
|
||||
(duplex != DUPLEX_HALF &&
|
||||
duplex != DUPLEX_FULL)))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
phydev->autoneg = autoneg;
|
||||
|
||||
if (autoneg == AUTONEG_DISABLE) {
|
||||
phydev->speed = speed;
|
||||
phydev->duplex = duplex;
|
||||
}
|
||||
|
||||
linkmode_copy(phydev->advertising, advertising);
|
||||
|
||||
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||
phydev->advertising, autoneg == AUTONEG_ENABLE);
|
||||
|
||||
phydev->master_slave_set = cmd->base.master_slave_cfg;
|
||||
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
|
||||
|
||||
/* Restart the PHY */
|
||||
_phy_start_aneg(phydev);
|
||||
|
||||
mutex_unlock(&phydev->lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
|
||||
|
||||
/**
|
||||
* phy_speed_down - set speed to lowest speed supported by both link partners
|
||||
* @phydev: the phy_device struct
|
||||
|
|
|
@ -4122,6 +4122,12 @@ static int lan78xx_probe(struct usb_interface *intf,
|
|||
|
||||
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
|
||||
|
||||
/* Reject broken descriptors. */
|
||||
if (dev->maxpacket == 0) {
|
||||
ret = -ENODEV;
|
||||
goto out4;
|
||||
}
|
||||
|
||||
/* driver requires remote-wakeup capability during autosuspend. */
|
||||
intf->needs_remote_wakeup = 1;
|
||||
|
||||
|
|
|
@ -1790,6 +1790,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
|
||||
if (dev->maxpacket == 0) {
|
||||
/* that is a broken device */
|
||||
status = -ENODEV;
|
||||
goto out4;
|
||||
}
|
||||
|
||||
|
|
|
@ -3833,7 +3833,6 @@ vmxnet3_suspend(struct device *device)
|
|||
vmxnet3_free_intr_resources(adapter);
|
||||
|
||||
netif_device_detach(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
/* Create wake-up filters. */
|
||||
pmConf = adapter->pm_conf;
|
||||
|
|
|
@ -1730,6 +1730,10 @@ static int netfront_resume(struct xenbus_device *dev)
|
|||
|
||||
dev_dbg(&dev->dev, "%s\n", dev->nodename);
|
||||
|
||||
netif_tx_lock_bh(info->netdev);
|
||||
netif_device_detach(info->netdev);
|
||||
netif_tx_unlock_bh(info->netdev);
|
||||
|
||||
xennet_disconnect_backend(info);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2349,6 +2353,10 @@ static int xennet_connect(struct net_device *dev)
|
|||
* domain a kick because we've probably just requeued some
|
||||
* packets.
|
||||
*/
|
||||
netif_tx_lock_bh(np->netdev);
|
||||
netif_device_attach(np->netdev);
|
||||
netif_tx_unlock_bh(np->netdev);
|
||||
|
||||
netif_carrier_on(np->netdev);
|
||||
for (j = 0; j < num_queues; ++j) {
|
||||
queue = &np->queues[j];
|
||||
|
|
|
@ -1006,11 +1006,11 @@ static u64 port100_get_command_type_mask(struct port100 *dev)
|
|||
|
||||
skb = port100_alloc_skb(dev, 0);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
|
||||
resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
|
||||
if (IS_ERR(resp))
|
||||
return PTR_ERR(resp);
|
||||
return 0;
|
||||
|
||||
if (resp->len < 8)
|
||||
mask = 0;
|
||||
|
|
|
@ -929,8 +929,11 @@ struct bpf_array_aux {
|
|||
* stored in the map to make sure that all callers and callees have
|
||||
* the same prog type and JITed flag.
|
||||
*/
|
||||
enum bpf_prog_type type;
|
||||
bool jited;
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
enum bpf_prog_type type;
|
||||
bool jited;
|
||||
} owner;
|
||||
/* Programs with direct jumps into programs part of this array. */
|
||||
struct list_head poke_progs;
|
||||
struct bpf_map *map;
|
||||
|
|
|
@ -101,14 +101,14 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
|
|||
#endif
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
|
||||
#ifdef CONFIG_NET
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
|
||||
#ifdef CONFIG_BPF_LSM
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
|
||||
#endif
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
|
||||
#ifdef CONFIG_NET
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
|
||||
#if defined(CONFIG_XDP_SOCKETS)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
|
||||
|
|
|
@ -1051,6 +1051,7 @@ extern int bpf_jit_enable;
|
|||
extern int bpf_jit_harden;
|
||||
extern int bpf_jit_kallsyms;
|
||||
extern long bpf_jit_limit;
|
||||
extern long bpf_jit_limit_max;
|
||||
|
||||
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
|
|||
struct sk_msg *msg, u32 bytes);
|
||||
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
|
||||
int len, int flags);
|
||||
bool sk_msg_is_readable(struct sock *sk);
|
||||
|
||||
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
|
||||
{
|
||||
|
|
|
@ -5376,7 +5376,6 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
|
|||
* netdev and may otherwise be used by driver read-only, will be update
|
||||
* by cfg80211 on change_interface
|
||||
* @mgmt_registrations: list of registrations for management frames
|
||||
* @mgmt_registrations_lock: lock for the list
|
||||
* @mgmt_registrations_need_update: mgmt registrations were updated,
|
||||
* need to propagate the update to the driver
|
||||
* @mtx: mutex used to lock data in this struct, may be used by drivers
|
||||
|
@ -5423,7 +5422,6 @@ struct wireless_dev {
|
|||
u32 identifier;
|
||||
|
||||
struct list_head mgmt_registrations;
|
||||
spinlock_t mgmt_registrations_lock;
|
||||
u8 mgmt_registrations_need_update:1;
|
||||
|
||||
struct mutex mtx;
|
||||
|
|
|
@ -69,6 +69,10 @@ struct mptcp_out_options {
|
|||
struct {
|
||||
u64 sndr_key;
|
||||
u64 rcvr_key;
|
||||
u64 data_seq;
|
||||
u32 subflow_seq;
|
||||
u16 data_len;
|
||||
__sum16 csum;
|
||||
};
|
||||
struct {
|
||||
struct mptcp_addr_info addr;
|
||||
|
|
|
@ -1208,7 +1208,7 @@ struct proto {
|
|||
#endif
|
||||
|
||||
bool (*stream_memory_free)(const struct sock *sk, int wake);
|
||||
bool (*stream_memory_read)(const struct sock *sk);
|
||||
bool (*sock_is_readable)(struct sock *sk);
|
||||
/* Memory pressure */
|
||||
void (*enter_memory_pressure)(struct sock *sk);
|
||||
void (*leave_memory_pressure)(struct sock *sk);
|
||||
|
@ -2820,4 +2820,10 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs);
|
|||
|
||||
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
|
||||
|
||||
static inline bool sk_is_readable(struct sock *sk)
|
||||
{
|
||||
if (sk->sk_prot->sock_is_readable)
|
||||
return sk->sk_prot->sock_is_readable(sk);
|
||||
return false;
|
||||
}
|
||||
#endif /* _SOCK_H */
|
||||
|
|
|
@ -358,6 +358,7 @@ int tls_sk_query(struct sock *sk, int optname, char __user *optval,
|
|||
int __user *optlen);
|
||||
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
|
||||
unsigned int optlen);
|
||||
void tls_err_abort(struct sock *sk, int err);
|
||||
|
||||
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
|
||||
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
|
||||
|
@ -375,7 +376,7 @@ void tls_sw_release_resources_rx(struct sock *sk);
|
|||
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
|
||||
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
int nonblock, int flags, int *addr_len);
|
||||
bool tls_sw_stream_read(const struct sock *sk);
|
||||
bool tls_sw_sock_is_readable(struct sock *sk);
|
||||
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe,
|
||||
size_t len, unsigned int flags);
|
||||
|
@ -466,12 +467,6 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void tls_err_abort(struct sock *sk, int err)
|
||||
{
|
||||
sk->sk_err = err;
|
||||
sk_error_report(sk);
|
||||
}
|
||||
|
||||
static inline bool tls_bigint_increment(unsigned char *seq, int len)
|
||||
{
|
||||
int i;
|
||||
|
@ -512,7 +507,7 @@ static inline void tls_advance_record_sn(struct sock *sk,
|
|||
struct cipher_context *ctx)
|
||||
{
|
||||
if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
|
||||
if (prot->version != TLS_1_3_VERSION &&
|
||||
prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
|
||||
|
|
|
@ -494,8 +494,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
|
|||
* CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
|
||||
* packets in udp_gro_complete_segment. As does UDP GSO, verified by
|
||||
* udp_send_skb. But when those packets are looped in dev_loopback_xmit
|
||||
* their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
|
||||
* specific case, where PARTIAL is both correct and required.
|
||||
* their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
|
||||
* Reset in this specific case, where PARTIAL is both correct and
|
||||
* required.
|
||||
*/
|
||||
if (skb->pkt_type == PACKET_LOOPBACK)
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
|
|
|
@ -1072,6 +1072,7 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
|
|||
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
|
||||
INIT_LIST_HEAD(&aux->poke_progs);
|
||||
mutex_init(&aux->poke_mutex);
|
||||
spin_lock_init(&aux->owner.lock);
|
||||
|
||||
map = array_map_alloc(attr);
|
||||
if (IS_ERR(map)) {
|
||||
|
|
|
@ -524,6 +524,7 @@ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
|
|||
int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
|
||||
int bpf_jit_harden __read_mostly;
|
||||
long bpf_jit_limit __read_mostly;
|
||||
long bpf_jit_limit_max __read_mostly;
|
||||
|
||||
static void
|
||||
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
|
||||
|
@ -817,7 +818,8 @@ u64 __weak bpf_jit_alloc_exec_limit(void)
|
|||
static int __init bpf_jit_charge_init(void)
|
||||
{
|
||||
/* Only used as heuristic here to derive limit. */
|
||||
bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
|
||||
bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
|
||||
bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
|
||||
PAGE_SIZE), LONG_MAX);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1821,20 +1823,26 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
|
|||
bool bpf_prog_array_compatible(struct bpf_array *array,
|
||||
const struct bpf_prog *fp)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if (fp->kprobe_override)
|
||||
return false;
|
||||
|
||||
if (!array->aux->type) {
|
||||
spin_lock(&array->aux->owner.lock);
|
||||
|
||||
if (!array->aux->owner.type) {
|
||||
/* There's no owner yet where we could check for
|
||||
* compatibility.
|
||||
*/
|
||||
array->aux->type = fp->type;
|
||||
array->aux->jited = fp->jited;
|
||||
return true;
|
||||
array->aux->owner.type = fp->type;
|
||||
array->aux->owner.jited = fp->jited;
|
||||
ret = true;
|
||||
} else {
|
||||
ret = array->aux->owner.type == fp->type &&
|
||||
array->aux->owner.jited == fp->jited;
|
||||
}
|
||||
|
||||
return array->aux->type == fp->type &&
|
||||
array->aux->jited == fp->jited;
|
||||
spin_unlock(&array->aux->owner.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bpf_check_tail_call(const struct bpf_prog *fp)
|
||||
|
|
|
@ -543,8 +543,10 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
|
|||
|
||||
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
|
||||
array = container_of(map, struct bpf_array, map);
|
||||
type = array->aux->type;
|
||||
jited = array->aux->jited;
|
||||
spin_lock(&array->aux->owner.lock);
|
||||
type = array->aux->owner.type;
|
||||
jited = array->aux->owner.jited;
|
||||
spin_unlock(&array->aux->owner.lock);
|
||||
}
|
||||
|
||||
seq_printf(m,
|
||||
|
@ -1337,12 +1339,11 @@ int generic_map_update_batch(struct bpf_map *map,
|
|||
void __user *values = u64_to_user_ptr(attr->batch.values);
|
||||
void __user *keys = u64_to_user_ptr(attr->batch.keys);
|
||||
u32 value_size, cp, max_count;
|
||||
int ufd = attr->map_fd;
|
||||
int ufd = attr->batch.map_fd;
|
||||
void *key, *value;
|
||||
struct fd f;
|
||||
int err = 0;
|
||||
|
||||
f = fdget(ufd);
|
||||
if (attr->batch.elem_flags & ~BPF_F_LOCK)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1367,6 +1368,7 @@ int generic_map_update_batch(struct bpf_map *map,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
|
||||
for (cp = 0; cp < max_count; cp++) {
|
||||
err = -EFAULT;
|
||||
if (copy_from_user(key, keys + cp * map->key_size,
|
||||
|
@ -1386,6 +1388,7 @@ int generic_map_update_batch(struct bpf_map *map,
|
|||
|
||||
kvfree(value);
|
||||
kvfree(key);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -2187,8 +2187,10 @@ static void cgroup_kill_sb(struct super_block *sb)
|
|||
* And don't kill the default root.
|
||||
*/
|
||||
if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
|
||||
!percpu_ref_is_dying(&root->cgrp.self.refcnt))
|
||||
!percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
|
||||
cgroup_bpf_offline(&root->cgrp);
|
||||
percpu_ref_kill(&root->cgrp.self.refcnt);
|
||||
}
|
||||
cgroup_put(&root->cgrp);
|
||||
kernfs_kill_sb(sb);
|
||||
}
|
||||
|
|
|
@ -1560,11 +1560,15 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
|
|||
return 0;
|
||||
|
||||
bat_priv->bla.claim_hash = batadv_hash_new(128);
|
||||
bat_priv->bla.backbone_hash = batadv_hash_new(32);
|
||||
|
||||
if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
|
||||
if (!bat_priv->bla.claim_hash)
|
||||
return -ENOMEM;
|
||||
|
||||
bat_priv->bla.backbone_hash = batadv_hash_new(32);
|
||||
if (!bat_priv->bla.backbone_hash) {
|
||||
batadv_hash_destroy(bat_priv->bla.claim_hash);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
|
||||
&batadv_claim_hash_lock_class_key);
|
||||
batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
|
||||
|
|
|
@ -190,29 +190,41 @@ int batadv_mesh_init(struct net_device *soft_iface)
|
|||
|
||||
bat_priv->gw.generation = 0;
|
||||
|
||||
ret = batadv_v_mesh_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
ret = batadv_originator_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_orig;
|
||||
}
|
||||
|
||||
ret = batadv_tt_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_tt;
|
||||
}
|
||||
|
||||
ret = batadv_v_mesh_init(bat_priv);
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_v;
|
||||
}
|
||||
|
||||
ret = batadv_bla_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_bla;
|
||||
}
|
||||
|
||||
ret = batadv_dat_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_dat;
|
||||
}
|
||||
|
||||
ret = batadv_nc_mesh_init(bat_priv);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret < 0) {
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
|
||||
goto err_nc;
|
||||
}
|
||||
|
||||
batadv_gw_init(bat_priv);
|
||||
batadv_mcast_init(bat_priv);
|
||||
|
@ -222,8 +234,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
|
|||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
batadv_mesh_free(soft_iface);
|
||||
err_nc:
|
||||
batadv_dat_free(bat_priv);
|
||||
err_dat:
|
||||
batadv_bla_free(bat_priv);
|
||||
err_bla:
|
||||
batadv_v_mesh_free(bat_priv);
|
||||
err_v:
|
||||
batadv_tt_free(bat_priv);
|
||||
err_tt:
|
||||
batadv_originator_free(bat_priv);
|
||||
err_orig:
|
||||
batadv_purge_outstanding_packets(bat_priv, NULL);
|
||||
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -152,8 +152,10 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
|
|||
&batadv_nc_coding_hash_lock_class_key);
|
||||
|
||||
bat_priv->nc.decoding_hash = batadv_hash_new(128);
|
||||
if (!bat_priv->nc.decoding_hash)
|
||||
if (!bat_priv->nc.decoding_hash) {
|
||||
batadv_hash_destroy(bat_priv->nc.coding_hash);
|
||||
goto err;
|
||||
}
|
||||
|
||||
batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
|
||||
&batadv_nc_decoding_hash_lock_class_key);
|
||||
|
|
|
@ -4162,8 +4162,10 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
|
|||
return ret;
|
||||
|
||||
ret = batadv_tt_global_init(bat_priv);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
batadv_tt_local_table_free(bat_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
|
||||
batadv_tt_tvlv_unicast_handler_v1,
|
||||
|
|
|
@ -3163,6 +3163,12 @@ static u16 skb_tx_hash(const struct net_device *dev,
|
|||
|
||||
qoffset = sb_dev->tc_to_txq[tc].offset;
|
||||
qcount = sb_dev->tc_to_txq[tc].count;
|
||||
if (unlikely(!qcount)) {
|
||||
net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
|
||||
sb_dev->name, qoffset, tc);
|
||||
qoffset = 0;
|
||||
qcount = dev->real_num_tx_queues;
|
||||
}
|
||||
}
|
||||
|
||||
if (skb_rx_queue_recorded(skb)) {
|
||||
|
@ -3906,7 +3912,8 @@ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
skb_reset_mac_header(skb);
|
||||
__skb_pull(skb, skb_network_offset(skb));
|
||||
skb->pkt_type = PACKET_LOOPBACK;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
if (skb->ip_summed == CHECKSUM_NONE)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
WARN_ON(!skb_dst(skb));
|
||||
skb_dst_force(skb);
|
||||
netif_rx_ni(skb);
|
||||
|
|
|
@ -1973,9 +1973,9 @@ int netdev_register_kobject(struct net_device *ndev)
|
|||
int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
|
||||
const struct net *net_new)
|
||||
{
|
||||
kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
|
||||
kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
|
||||
struct device *dev = &ndev->dev;
|
||||
kuid_t old_uid, new_uid;
|
||||
kgid_t old_gid, new_gid;
|
||||
int error;
|
||||
|
||||
net_ns_get_ownership(net_old, &old_uid, &old_gid);
|
||||
|
|
|
@ -80,6 +80,7 @@
|
|||
#include <linux/indirect_call_wrapper.h>
|
||||
|
||||
#include "datagram.h"
|
||||
#include "sock_destructor.h"
|
||||
|
||||
struct kmem_cache *skbuff_head_cache __ro_after_init;
|
||||
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
|
||||
|
@ -1804,30 +1805,39 @@ EXPORT_SYMBOL(skb_realloc_headroom);
|
|||
struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
|
||||
{
|
||||
int delta = headroom - skb_headroom(skb);
|
||||
int osize = skb_end_offset(skb);
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
if (WARN_ONCE(delta <= 0,
|
||||
"%s is expecting an increase in the headroom", __func__))
|
||||
return skb;
|
||||
|
||||
/* pskb_expand_head() might crash, if skb is shared */
|
||||
if (skb_shared(skb)) {
|
||||
delta = SKB_DATA_ALIGN(delta);
|
||||
/* pskb_expand_head() might crash, if skb is shared. */
|
||||
if (skb_shared(skb) || !is_skb_wmem(skb)) {
|
||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (likely(nskb)) {
|
||||
if (skb->sk)
|
||||
skb_set_owner_w(nskb, skb->sk);
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
}
|
||||
if (unlikely(!nskb))
|
||||
goto fail;
|
||||
|
||||
if (sk)
|
||||
skb_set_owner_w(nskb, sk);
|
||||
consume_skb(skb);
|
||||
skb = nskb;
|
||||
}
|
||||
if (skb &&
|
||||
pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
|
||||
goto fail;
|
||||
|
||||
if (sk && is_skb_wmem(skb)) {
|
||||
delta = skb_end_offset(skb) - osize;
|
||||
refcount_add(delta, &sk->sk_wmem_alloc);
|
||||
skb->truesize += delta;
|
||||
}
|
||||
return skb;
|
||||
|
||||
fail:
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_expand_head);
|
||||
|
||||
|
|
|
@ -474,6 +474,20 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
|
||||
|
||||
bool sk_msg_is_readable(struct sock *sk)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
bool empty = true;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock))
|
||||
empty = list_empty(&psock->ingress_msg);
|
||||
rcu_read_unlock();
|
||||
return !empty;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_is_readable);
|
||||
|
||||
static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
|
12
net/core/sock_destructor.h
Normal file
12
net/core/sock_destructor.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
#ifndef _NET_CORE_SOCK_DESTRUCTOR_H
|
||||
#define _NET_CORE_SOCK_DESTRUCTOR_H
|
||||
#include <net/tcp.h>
|
||||
|
||||
static inline bool is_skb_wmem(const struct sk_buff *skb)
|
||||
{
|
||||
return skb->destructor == sock_wfree ||
|
||||
skb->destructor == __sock_wfree ||
|
||||
(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree);
|
||||
}
|
||||
#endif
|
|
@ -419,7 +419,7 @@ static struct ctl_table net_core_table[] = {
|
|||
.mode = 0600,
|
||||
.proc_handler = proc_dolongvec_minmax_bpf_restricted,
|
||||
.extra1 = &long_one,
|
||||
.extra2 = &long_max,
|
||||
.extra2 = &bpf_jit_limit_max,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -486,10 +486,7 @@ static bool tcp_stream_is_readable(struct sock *sk, int target)
|
|||
{
|
||||
if (tcp_epollin_ready(sk, target))
|
||||
return true;
|
||||
|
||||
if (sk->sk_prot->stream_memory_read)
|
||||
return sk->sk_prot->stream_memory_read(sk);
|
||||
return false;
|
||||
return sk_is_readable(sk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -150,19 +150,6 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
|
|||
EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static bool tcp_bpf_stream_read(const struct sock *sk)
|
||||
{
|
||||
struct sk_psock *psock;
|
||||
bool empty = true;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock))
|
||||
empty = list_empty(&psock->ingress_msg);
|
||||
rcu_read_unlock();
|
||||
return !empty;
|
||||
}
|
||||
|
||||
static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
|
||||
long timeo)
|
||||
{
|
||||
|
@ -232,6 +219,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
|
|||
bool cork = false, enospc = sk_msg_full(msg);
|
||||
struct sock *sk_redir;
|
||||
u32 tosend, delta = 0;
|
||||
u32 eval = __SK_NONE;
|
||||
int ret;
|
||||
|
||||
more_data:
|
||||
|
@ -275,13 +263,24 @@ more_data:
|
|||
case __SK_REDIRECT:
|
||||
sk_redir = psock->sk_redir;
|
||||
sk_msg_apply_bytes(psock, tosend);
|
||||
if (!psock->apply_bytes) {
|
||||
/* Clean up before releasing the sock lock. */
|
||||
eval = psock->eval;
|
||||
psock->eval = __SK_NONE;
|
||||
psock->sk_redir = NULL;
|
||||
}
|
||||
if (psock->cork) {
|
||||
cork = true;
|
||||
psock->cork = NULL;
|
||||
}
|
||||
sk_msg_return(sk, msg, tosend);
|
||||
release_sock(sk);
|
||||
|
||||
ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
|
||||
|
||||
if (eval == __SK_REDIRECT)
|
||||
sock_put(sk_redir);
|
||||
|
||||
lock_sock(sk);
|
||||
if (unlikely(ret < 0)) {
|
||||
int free = sk_msg_free_nocharge(sk, msg);
|
||||
|
@ -479,7 +478,7 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
|
|||
prot[TCP_BPF_BASE].unhash = sock_map_unhash;
|
||||
prot[TCP_BPF_BASE].close = sock_map_close;
|
||||
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
|
||||
prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;
|
||||
prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable;
|
||||
|
||||
prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
|
||||
prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
|
||||
|
|
|
@ -2867,6 +2867,9 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|||
!(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
|
||||
mask &= ~(EPOLLIN | EPOLLRDNORM);
|
||||
|
||||
/* psock ingress_msg queue should not contain any bad checksum frames */
|
||||
if (sk_is_readable(sk))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
return mask;
|
||||
|
||||
}
|
||||
|
|
|
@ -114,6 +114,7 @@ static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
|
|||
*prot = *base;
|
||||
prot->close = sock_map_close;
|
||||
prot->recvmsg = udp_bpf_recvmsg;
|
||||
prot->sock_is_readable = sk_msg_is_readable;
|
||||
}
|
||||
|
||||
static void udp_bpf_check_v6_needs_rebuild(struct proto *ops)
|
||||
|
|
|
@ -672,7 +672,7 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
|
|||
u8 *ie, u8 ie_len)
|
||||
{
|
||||
struct ieee80211_supported_band *sband;
|
||||
const u8 *cap;
|
||||
const struct element *cap;
|
||||
const struct ieee80211_he_operation *he_oper = NULL;
|
||||
|
||||
sband = ieee80211_get_sband(sdata);
|
||||
|
@ -687,9 +687,10 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
sdata->vif.bss_conf.he_support = true;
|
||||
|
||||
cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
|
||||
if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3]))
|
||||
he_oper = (void *)(cap + 3);
|
||||
cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
|
||||
if (cap && cap->datalen >= 1 + sizeof(*he_oper) &&
|
||||
cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1))
|
||||
he_oper = (void *)(cap->data + 1);
|
||||
|
||||
if (he_oper)
|
||||
sdata->vif.bss_conf.he_oper.params =
|
||||
|
|
|
@ -485,11 +485,11 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
|
|||
mpext = mptcp_get_ext(skb);
|
||||
data_len = mpext ? mpext->data_len : 0;
|
||||
|
||||
/* we will check ext_copy.data_len in mptcp_write_options() to
|
||||
/* we will check ops->data_len in mptcp_write_options() to
|
||||
* discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and
|
||||
* TCPOLEN_MPTCP_MPC_ACK
|
||||
*/
|
||||
opts->ext_copy.data_len = data_len;
|
||||
opts->data_len = data_len;
|
||||
opts->suboptions = OPTION_MPTCP_MPC_ACK;
|
||||
opts->sndr_key = subflow->local_key;
|
||||
opts->rcvr_key = subflow->remote_key;
|
||||
|
@ -505,9 +505,9 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
|
|||
len = TCPOLEN_MPTCP_MPC_ACK_DATA;
|
||||
if (opts->csum_reqd) {
|
||||
/* we need to propagate more info to csum the pseudo hdr */
|
||||
opts->ext_copy.data_seq = mpext->data_seq;
|
||||
opts->ext_copy.subflow_seq = mpext->subflow_seq;
|
||||
opts->ext_copy.csum = mpext->csum;
|
||||
opts->data_seq = mpext->data_seq;
|
||||
opts->subflow_seq = mpext->subflow_seq;
|
||||
opts->csum = mpext->csum;
|
||||
len += TCPOLEN_MPTCP_DSS_CHECKSUM;
|
||||
}
|
||||
*size = ALIGN(len, 4);
|
||||
|
@ -1227,7 +1227,7 @@ static void mptcp_set_rwin(const struct tcp_sock *tp)
|
|||
WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
|
||||
}
|
||||
|
||||
static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
|
||||
static u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __sum16 sum)
|
||||
{
|
||||
struct csum_pseudo_header header;
|
||||
__wsum csum;
|
||||
|
@ -1237,15 +1237,21 @@ static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
|
|||
* always the 64-bit value, irrespective of what length is used in the
|
||||
* DSS option itself.
|
||||
*/
|
||||
header.data_seq = cpu_to_be64(mpext->data_seq);
|
||||
header.subflow_seq = htonl(mpext->subflow_seq);
|
||||
header.data_len = htons(mpext->data_len);
|
||||
header.data_seq = cpu_to_be64(data_seq);
|
||||
header.subflow_seq = htonl(subflow_seq);
|
||||
header.data_len = htons(data_len);
|
||||
header.csum = 0;
|
||||
|
||||
csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum));
|
||||
csum = csum_partial(&header, sizeof(header), ~csum_unfold(sum));
|
||||
return (__force u16)csum_fold(csum);
|
||||
}
|
||||
|
||||
static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
|
||||
{
|
||||
return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
|
||||
mpext->csum);
|
||||
}
|
||||
|
||||
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
||||
struct mptcp_out_options *opts)
|
||||
{
|
||||
|
@ -1337,7 +1343,7 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
|||
len = TCPOLEN_MPTCP_MPC_SYN;
|
||||
} else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) {
|
||||
len = TCPOLEN_MPTCP_MPC_SYNACK;
|
||||
} else if (opts->ext_copy.data_len) {
|
||||
} else if (opts->data_len) {
|
||||
len = TCPOLEN_MPTCP_MPC_ACK_DATA;
|
||||
if (opts->csum_reqd)
|
||||
len += TCPOLEN_MPTCP_DSS_CHECKSUM;
|
||||
|
@ -1366,14 +1372,17 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
|||
|
||||
put_unaligned_be64(opts->rcvr_key, ptr);
|
||||
ptr += 2;
|
||||
if (!opts->ext_copy.data_len)
|
||||
if (!opts->data_len)
|
||||
goto mp_capable_done;
|
||||
|
||||
if (opts->csum_reqd) {
|
||||
put_unaligned_be32(opts->ext_copy.data_len << 16 |
|
||||
mptcp_make_csum(&opts->ext_copy), ptr);
|
||||
put_unaligned_be32(opts->data_len << 16 |
|
||||
__mptcp_make_csum(opts->data_seq,
|
||||
opts->subflow_seq,
|
||||
opts->data_len,
|
||||
opts->csum), ptr);
|
||||
} else {
|
||||
put_unaligned_be32(opts->ext_copy.data_len << 16 |
|
||||
put_unaligned_be32(opts->data_len << 16 |
|
||||
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
|
||||
}
|
||||
ptr += 1;
|
||||
|
|
|
@ -156,6 +156,12 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
|
|||
void *arg,
|
||||
struct sctp_cmd_seq *commands);
|
||||
|
||||
static enum sctp_disposition
|
||||
__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
|
||||
const struct sctp_association *asoc,
|
||||
const union sctp_subtype type, void *arg,
|
||||
struct sctp_cmd_seq *commands);
|
||||
|
||||
/* Small helper function that checks if the chunk length
|
||||
* is of the appropriate length. The 'required_length' argument
|
||||
* is set to be the size of a specific chunk we are testing.
|
||||
|
@ -337,6 +343,14 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
|
|||
if (!chunk->singleton)
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the INIT chunk has a valid length.
|
||||
* Normally, this would cause an ABORT with a Protocol Violation
|
||||
* error, but since we don't have an association, we'll
|
||||
* just discard the packet.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* If the packet is an OOTB packet which is temporarily on the
|
||||
* control endpoint, respond with an ABORT.
|
||||
*/
|
||||
|
@ -351,14 +365,6 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
|
|||
if (chunk->sctp_hdr->vtag != 0)
|
||||
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the INIT chunk has a valid length.
|
||||
* Normally, this would cause an ABORT with a Protocol Violation
|
||||
* error, but since we don't have an association, we'll
|
||||
* just discard the packet.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* If the INIT is coming toward a closing socket, we'll send back
|
||||
* and ABORT. Essentially, this catches the race of INIT being
|
||||
* backloged to the socket at the same time as the user issues close().
|
||||
|
@ -704,6 +710,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
|
|||
struct sock *sk;
|
||||
int error = 0;
|
||||
|
||||
if (asoc && !sctp_vtag_verify(chunk, asoc))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* If the packet is an OOTB packet which is temporarily on the
|
||||
* control endpoint, respond with an ABORT.
|
||||
*/
|
||||
|
@ -718,7 +727,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
|
|||
* in sctp_unpack_cookie().
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* If the endpoint is not listening or if the number of associations
|
||||
* on the TCP-style socket exceed the max backlog, respond with an
|
||||
|
@ -1524,20 +1534,16 @@ static enum sctp_disposition sctp_sf_do_unexpected_init(
|
|||
if (!chunk->singleton)
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the INIT chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
|
||||
* Tag.
|
||||
*/
|
||||
if (chunk->sctp_hdr->vtag != 0)
|
||||
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the INIT chunk has a valid length.
|
||||
* In this case, we generate a protocol violation since we have
|
||||
* an association established.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
if (SCTP_INPUT_CB(chunk->skb)->encap_port != chunk->transport->encap_port)
|
||||
return sctp_sf_new_encap_port(net, ep, asoc, type, arg, commands);
|
||||
|
||||
|
@ -1882,9 +1888,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
|
|||
* its peer.
|
||||
*/
|
||||
if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
|
||||
disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
|
||||
SCTP_ST_CHUNK(chunk->chunk_hdr->type),
|
||||
chunk, commands);
|
||||
disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc,
|
||||
SCTP_ST_CHUNK(chunk->chunk_hdr->type),
|
||||
chunk, commands);
|
||||
if (SCTP_DISPOSITION_NOMEM == disposition)
|
||||
goto nomem;
|
||||
|
||||
|
@ -2202,9 +2208,11 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
|
|||
* enough for the chunk header. Cookie length verification is
|
||||
* done later.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) {
|
||||
if (!sctp_vtag_verify(chunk, asoc))
|
||||
asoc = NULL;
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
||||
/* "Decode" the chunk. We have no optional parameters so we
|
||||
* are in good shape.
|
||||
|
@ -2341,7 +2349,7 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
|
|||
*/
|
||||
if (SCTP_ADDR_DEL ==
|
||||
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (!sctp_err_chunk_valid(chunk))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
@ -2387,7 +2395,7 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
|
|||
*/
|
||||
if (SCTP_ADDR_DEL ==
|
||||
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (!sctp_err_chunk_valid(chunk))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
@ -2657,7 +2665,7 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
|
|||
*/
|
||||
if (SCTP_ADDR_DEL ==
|
||||
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (!sctp_err_chunk_valid(chunk))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
@ -2970,13 +2978,11 @@ enum sctp_disposition sctp_sf_do_9_2_shut_ctsn(
|
|||
* that belong to this association, it should discard the INIT chunk and
|
||||
* retransmit the SHUTDOWN ACK chunk.
|
||||
*/
|
||||
enum sctp_disposition sctp_sf_do_9_2_reshutack(
|
||||
struct net *net,
|
||||
const struct sctp_endpoint *ep,
|
||||
const struct sctp_association *asoc,
|
||||
const union sctp_subtype type,
|
||||
void *arg,
|
||||
struct sctp_cmd_seq *commands)
|
||||
static enum sctp_disposition
|
||||
__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
|
||||
const struct sctp_association *asoc,
|
||||
const union sctp_subtype type, void *arg,
|
||||
struct sctp_cmd_seq *commands)
|
||||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
struct sctp_chunk *reply;
|
||||
|
@ -3010,6 +3016,26 @@ nomem:
|
|||
return SCTP_DISPOSITION_NOMEM;
|
||||
}
|
||||
|
||||
enum sctp_disposition
|
||||
sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
|
||||
const struct sctp_association *asoc,
|
||||
const union sctp_subtype type, void *arg,
|
||||
struct sctp_cmd_seq *commands)
|
||||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
|
||||
if (!chunk->singleton)
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
if (chunk->sctp_hdr->vtag != 0)
|
||||
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
|
||||
|
||||
return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
||||
/*
|
||||
* sctp_sf_do_ecn_cwr
|
||||
*
|
||||
|
@ -3662,6 +3688,9 @@ enum sctp_disposition sctp_sf_ootb(struct net *net,
|
|||
|
||||
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
|
||||
|
||||
if (asoc && !sctp_vtag_verify(chunk, asoc))
|
||||
asoc = NULL;
|
||||
|
||||
ch = (struct sctp_chunkhdr *)chunk->chunk_hdr;
|
||||
do {
|
||||
/* Report violation if the chunk is less then minimal */
|
||||
|
@ -3777,12 +3806,6 @@ static enum sctp_disposition sctp_sf_shut_8_4_5(
|
|||
|
||||
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
|
||||
|
||||
/* If the chunk length is invalid, we don't want to process
|
||||
* the reset of the packet.
|
||||
*/
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* We need to discard the rest of the packet to prevent
|
||||
* potential boomming attacks from additional bundled chunks.
|
||||
* This is documented in SCTP Threats ID.
|
||||
|
@ -3810,6 +3833,9 @@ enum sctp_disposition sctp_sf_do_8_5_1_E_sa(struct net *net,
|
|||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
|
||||
if (!sctp_vtag_verify(chunk, asoc))
|
||||
asoc = NULL;
|
||||
|
||||
/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
|
@ -3845,6 +3871,11 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
|
|||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
||||
/* Make sure that the ASCONF ADDIP chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* ADD-IP: Section 4.1.1
|
||||
* This chunk MUST be sent in an authenticated way by using
|
||||
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
|
||||
|
@ -3853,13 +3884,7 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
|
|||
*/
|
||||
if (!asoc->peer.asconf_capable ||
|
||||
(!net->sctp.addip_noauth && !chunk->auth))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* Make sure that the ASCONF ADDIP chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
hdr = (struct sctp_addiphdr *)chunk->skb->data;
|
||||
serial = ntohl(hdr->serial);
|
||||
|
@ -3988,6 +4013,12 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
|
|||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
||||
/* Make sure that the ADDIP chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(asconf_ack,
|
||||
sizeof(struct sctp_addip_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* ADD-IP, Section 4.1.2:
|
||||
* This chunk MUST be sent in an authenticated way by using
|
||||
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
|
||||
|
@ -3996,14 +4027,7 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
|
|||
*/
|
||||
if (!asoc->peer.asconf_capable ||
|
||||
(!net->sctp.addip_noauth && !asconf_ack->auth))
|
||||
return sctp_sf_discard_chunk(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* Make sure that the ADDIP chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(asconf_ack,
|
||||
sizeof(struct sctp_addip_chunk)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data;
|
||||
rcvd_serial = ntohl(addip_hdr->serial);
|
||||
|
@ -4575,6 +4599,9 @@ enum sctp_disposition sctp_sf_discard_chunk(struct net *net,
|
|||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
|
||||
if (asoc && !sctp_vtag_verify(chunk, asoc))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the chunk has a valid length.
|
||||
* Since we don't know the chunk type, we use a general
|
||||
* chunkhdr structure to make a comparison.
|
||||
|
@ -4642,6 +4669,9 @@ enum sctp_disposition sctp_sf_violation(struct net *net,
|
|||
{
|
||||
struct sctp_chunk *chunk = arg;
|
||||
|
||||
if (!sctp_vtag_verify(chunk, asoc))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Make sure that the chunk has a valid length. */
|
||||
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
|
@ -6348,6 +6378,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(
|
|||
* yet.
|
||||
*/
|
||||
switch (chunk->chunk_hdr->type) {
|
||||
case SCTP_CID_INIT:
|
||||
case SCTP_CID_INIT_ACK:
|
||||
{
|
||||
struct sctp_initack_chunk *initack;
|
||||
|
|
|
@ -1057,7 +1057,7 @@ static void smc_connect_work(struct work_struct *work)
|
|||
if (smc->clcsock->sk->sk_err) {
|
||||
smc->sk.sk_err = smc->clcsock->sk->sk_err;
|
||||
} else if ((1 << smc->clcsock->sk->sk_state) &
|
||||
(TCPF_SYN_SENT | TCP_SYN_RECV)) {
|
||||
(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
||||
rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
|
||||
if ((rc == -EPIPE) &&
|
||||
((1 << smc->clcsock->sk->sk_state) &
|
||||
|
|
|
@ -1822,7 +1822,7 @@ void smc_llc_link_active(struct smc_link *link)
|
|||
link->smcibdev->ibdev->name, link->ibport);
|
||||
link->state = SMC_LNK_ACTIVE;
|
||||
if (link->lgr->llc_testlink_time) {
|
||||
link->llc_testlink_time = link->lgr->llc_testlink_time * HZ;
|
||||
link->llc_testlink_time = link->lgr->llc_testlink_time;
|
||||
schedule_delayed_work(&link->llc_testlink_wrk,
|
||||
link->llc_testlink_time);
|
||||
}
|
||||
|
|
|
@ -2285,43 +2285,53 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
|
|||
u16 key_gen = msg_key_gen(hdr);
|
||||
u16 size = msg_data_sz(hdr);
|
||||
u8 *data = msg_data(hdr);
|
||||
unsigned int keylen;
|
||||
|
||||
/* Verify whether the size can exist in the packet */
|
||||
if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
|
||||
pr_debug("%s: message data size is too small\n", rx->name);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
|
||||
|
||||
/* Verify the supplied size values */
|
||||
if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
|
||||
keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
|
||||
pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
spin_lock(&rx->lock);
|
||||
if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
|
||||
pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
|
||||
rx->skey, key_gen, rx->key_gen);
|
||||
goto exit;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* Allocate memory for the key */
|
||||
skey = kmalloc(size, GFP_ATOMIC);
|
||||
if (unlikely(!skey)) {
|
||||
pr_err("%s: unable to allocate memory for skey\n", rx->name);
|
||||
goto exit;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* Copy key from msg data */
|
||||
skey->keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
|
||||
skey->keylen = keylen;
|
||||
memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
|
||||
memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
|
||||
skey->keylen);
|
||||
|
||||
/* Sanity check */
|
||||
if (unlikely(size != tipc_aead_key_size(skey))) {
|
||||
kfree(skey);
|
||||
skey = NULL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rx->key_gen = key_gen;
|
||||
rx->skey_mode = msg_key_mode(hdr);
|
||||
rx->skey = skey;
|
||||
rx->nokey = 0;
|
||||
mb(); /* for nokey flag */
|
||||
|
||||
exit:
|
||||
exit_unlock:
|
||||
spin_unlock(&rx->lock);
|
||||
|
||||
exit:
|
||||
/* Schedule the key attaching on this crypto */
|
||||
if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
|
||||
return true;
|
||||
|
|
|
@ -681,12 +681,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
|
|||
|
||||
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
|
||||
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
|
||||
prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
|
||||
prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
|
||||
prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
|
||||
|
||||
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
|
||||
prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
|
||||
prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
|
||||
prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
|
||||
prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
|
||||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/splice.h>
|
||||
|
@ -43,6 +44,14 @@
|
|||
#include <net/strparser.h>
|
||||
#include <net/tls.h>
|
||||
|
||||
noinline void tls_err_abort(struct sock *sk, int err)
|
||||
{
|
||||
WARN_ON_ONCE(err >= 0);
|
||||
/* sk->sk_err should contain a positive error code. */
|
||||
sk->sk_err = -err;
|
||||
sk_error_report(sk);
|
||||
}
|
||||
|
||||
static int __skb_nsg(struct sk_buff *skb, int offset, int len,
|
||||
unsigned int recursion_level)
|
||||
{
|
||||
|
@ -419,7 +428,7 @@ int tls_tx_records(struct sock *sk, int flags)
|
|||
|
||||
tx_err:
|
||||
if (rc < 0 && rc != -EAGAIN)
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -450,7 +459,7 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
|
|||
|
||||
/* If err is already set on socket, return the same code */
|
||||
if (sk->sk_err) {
|
||||
ctx->async_wait.err = sk->sk_err;
|
||||
ctx->async_wait.err = -sk->sk_err;
|
||||
} else {
|
||||
ctx->async_wait.err = err;
|
||||
tls_err_abort(sk, err);
|
||||
|
@ -763,7 +772,7 @@ static int tls_push_record(struct sock *sk, int flags,
|
|||
msg_pl->sg.size + prot->tail_size, i);
|
||||
if (rc < 0) {
|
||||
if (rc != -EINPROGRESS) {
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
if (split) {
|
||||
tls_ctx->pending_open_record_frags = true;
|
||||
tls_merge_open_record(sk, rec, tmp, orig_end);
|
||||
|
@ -1827,7 +1836,7 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
|
||||
&chunk, &zc, async_capable);
|
||||
if (err < 0 && err != -EINPROGRESS) {
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
goto recv_end;
|
||||
}
|
||||
|
||||
|
@ -2007,7 +2016,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
|||
}
|
||||
|
||||
if (err < 0) {
|
||||
tls_err_abort(sk, EBADMSG);
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
goto splice_read_end;
|
||||
}
|
||||
ctx->decrypted = 1;
|
||||
|
@ -2026,7 +2035,7 @@ splice_read_end:
|
|||
return copied ? : err;
|
||||
}
|
||||
|
||||
bool tls_sw_stream_read(const struct sock *sk)
|
||||
bool tls_sw_sock_is_readable(struct sock *sk)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
|
||||
|
|
|
@ -3052,6 +3052,8 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
|
|||
/* readable? */
|
||||
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
if (sk_is_readable(sk))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
/* Connection-based need to check for termination and startup */
|
||||
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
|
||||
|
@ -3091,6 +3093,8 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
|
|||
/* readable? */
|
||||
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
if (sk_is_readable(sk))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
/* Connection-based need to check for termination and startup */
|
||||
if (sk->sk_type == SOCK_SEQPACKET) {
|
||||
|
|
|
@ -102,6 +102,7 @@ static void unix_dgram_bpf_rebuild_protos(struct proto *prot, const struct proto
|
|||
*prot = *base;
|
||||
prot->close = sock_map_close;
|
||||
prot->recvmsg = unix_bpf_recvmsg;
|
||||
prot->sock_is_readable = sk_msg_is_readable;
|
||||
}
|
||||
|
||||
static void unix_stream_bpf_rebuild_protos(struct proto *prot,
|
||||
|
@ -110,6 +111,7 @@ static void unix_stream_bpf_rebuild_protos(struct proto *prot,
|
|||
*prot = *base;
|
||||
prot->close = sock_map_close;
|
||||
prot->recvmsg = unix_bpf_recvmsg;
|
||||
prot->sock_is_readable = sk_msg_is_readable;
|
||||
prot->unhash = sock_map_unhash;
|
||||
}
|
||||
|
||||
|
|
|
@ -524,6 +524,7 @@ use_default_name:
|
|||
INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk);
|
||||
INIT_WORK(&rdev->mgmt_registrations_update_wk,
|
||||
cfg80211_mgmt_registrations_update_wk);
|
||||
spin_lock_init(&rdev->mgmt_registrations_lock);
|
||||
|
||||
#ifdef CONFIG_CFG80211_DEFAULT_PS
|
||||
rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
|
||||
|
@ -1279,7 +1280,6 @@ void cfg80211_init_wdev(struct wireless_dev *wdev)
|
|||
INIT_LIST_HEAD(&wdev->event_list);
|
||||
spin_lock_init(&wdev->event_lock);
|
||||
INIT_LIST_HEAD(&wdev->mgmt_registrations);
|
||||
spin_lock_init(&wdev->mgmt_registrations_lock);
|
||||
INIT_LIST_HEAD(&wdev->pmsr_list);
|
||||
spin_lock_init(&wdev->pmsr_lock);
|
||||
INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
|
||||
|
|
|
@ -100,6 +100,8 @@ struct cfg80211_registered_device {
|
|||
struct work_struct propagate_cac_done_wk;
|
||||
|
||||
struct work_struct mgmt_registrations_update_wk;
|
||||
/* lock for all wdev lists */
|
||||
spinlock_t mgmt_registrations_lock;
|
||||
|
||||
/* must be last because of the way we do wiphy_priv(),
|
||||
* and it should at least be aligned to NETDEV_ALIGN */
|
||||
|
|
|
@ -452,9 +452,9 @@ static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev)
|
|||
|
||||
lockdep_assert_held(&rdev->wiphy.mtx);
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
if (!wdev->mgmt_registrations_need_update) {
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -479,7 +479,7 @@ static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev)
|
|||
rcu_read_unlock();
|
||||
|
||||
wdev->mgmt_registrations_need_update = 0;
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
rdev_update_mgmt_frame_registrations(rdev, wdev, &upd);
|
||||
}
|
||||
|
@ -503,6 +503,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
|
|||
int match_len, bool multicast_rx,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
|
||||
struct cfg80211_mgmt_registration *reg, *nreg;
|
||||
int err = 0;
|
||||
u16 mgmt_type;
|
||||
|
@ -548,7 +549,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
|
|||
if (!nreg)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
|
||||
int mlen = min(match_len, reg->match_len);
|
||||
|
@ -583,7 +584,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
|
|||
list_add(&nreg->list, &wdev->mgmt_registrations);
|
||||
}
|
||||
wdev->mgmt_registrations_need_update = 1;
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
cfg80211_mgmt_registrations_update(wdev);
|
||||
|
||||
|
@ -591,7 +592,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
|
|||
|
||||
out:
|
||||
kfree(nreg);
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -602,7 +603,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
|
|||
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
|
||||
struct cfg80211_mgmt_registration *reg, *tmp;
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
|
||||
if (reg->nlportid != nlportid)
|
||||
|
@ -615,7 +616,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
|
|||
schedule_work(&rdev->mgmt_registrations_update_wk);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
if (nlportid && rdev->crit_proto_nlportid == nlportid) {
|
||||
rdev->crit_proto_nlportid = 0;
|
||||
|
@ -628,15 +629,16 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
|
|||
|
||||
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
|
||||
{
|
||||
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
|
||||
struct cfg80211_mgmt_registration *reg, *tmp;
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
|
||||
list_del(®->list);
|
||||
kfree(reg);
|
||||
}
|
||||
wdev->mgmt_registrations_need_update = 1;
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
cfg80211_mgmt_registrations_update(wdev);
|
||||
}
|
||||
|
@ -784,7 +786,7 @@ bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm,
|
|||
data = buf + ieee80211_hdrlen(mgmt->frame_control);
|
||||
data_len = len - ieee80211_hdrlen(mgmt->frame_control);
|
||||
|
||||
spin_lock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_lock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
|
||||
if (reg->frame_type != ftype)
|
||||
|
@ -808,7 +810,7 @@ bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm,
|
|||
break;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&wdev->mgmt_registrations_lock);
|
||||
spin_unlock_bh(&rdev->mgmt_registrations_lock);
|
||||
|
||||
trace_cfg80211_return_bool(result);
|
||||
return result;
|
||||
|
|
|
@ -418,14 +418,17 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
|
|||
}
|
||||
ssid_len = ssid[1];
|
||||
ssid = ssid + 2;
|
||||
rcu_read_unlock();
|
||||
|
||||
/* check if nontrans_bss is in the list */
|
||||
list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
|
||||
if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
|
||||
if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
/* add to the list */
|
||||
list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
|
||||
return 0;
|
||||
|
|
|
@ -1028,14 +1028,14 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
|
|||
!(rdev->wiphy.interface_modes & (1 << ntype)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* if it's part of a bridge, reject changing type to station/ibss */
|
||||
if (netif_is_bridge_port(dev) &&
|
||||
(ntype == NL80211_IFTYPE_ADHOC ||
|
||||
ntype == NL80211_IFTYPE_STATION ||
|
||||
ntype == NL80211_IFTYPE_P2P_CLIENT))
|
||||
return -EBUSY;
|
||||
|
||||
if (ntype != otype) {
|
||||
/* if it's part of a bridge, reject changing type to station/ibss */
|
||||
if (netif_is_bridge_port(dev) &&
|
||||
(ntype == NL80211_IFTYPE_ADHOC ||
|
||||
ntype == NL80211_IFTYPE_STATION ||
|
||||
ntype == NL80211_IFTYPE_P2P_CLIENT))
|
||||
return -EBUSY;
|
||||
|
||||
dev->ieee80211_ptr->use_4addr = false;
|
||||
dev->ieee80211_ptr->mesh_id_up_len = 0;
|
||||
wdev_lock(dev->ieee80211_ptr);
|
||||
|
|
|
@ -949,7 +949,6 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
|
|||
int err, n;
|
||||
u32 key;
|
||||
char b;
|
||||
int retries = 100;
|
||||
|
||||
zero_verdict_count(verd_mapfd);
|
||||
|
||||
|
@ -1002,17 +1001,11 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
|
|||
goto close_peer1;
|
||||
if (pass != 1)
|
||||
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
|
||||
again:
|
||||
n = read(c0, &b, 1);
|
||||
if (n < 0) {
|
||||
if (errno == EAGAIN && retries--) {
|
||||
usleep(1000);
|
||||
goto again;
|
||||
}
|
||||
FAIL_ERRNO("%s: read", log_prefix);
|
||||
}
|
||||
n = recv_timeout(c0, &b, 1, 0, IO_TIMEOUT_SEC);
|
||||
if (n < 0)
|
||||
FAIL_ERRNO("%s: recv_timeout", log_prefix);
|
||||
if (n == 0)
|
||||
FAIL("%s: incomplete read", log_prefix);
|
||||
FAIL("%s: incomplete recv", log_prefix);
|
||||
|
||||
close_peer1:
|
||||
xclose(p1);
|
||||
|
@ -1571,7 +1564,6 @@ static void unix_redir_to_connected(int sotype, int sock_mapfd,
|
|||
const char *log_prefix = redir_mode_str(mode);
|
||||
int c0, c1, p0, p1;
|
||||
unsigned int pass;
|
||||
int retries = 100;
|
||||
int err, n;
|
||||
int sfd[2];
|
||||
u32 key;
|
||||
|
@ -1606,17 +1598,11 @@ static void unix_redir_to_connected(int sotype, int sock_mapfd,
|
|||
if (pass != 1)
|
||||
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
|
||||
|
||||
again:
|
||||
n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1);
|
||||
if (n < 0) {
|
||||
if (errno == EAGAIN && retries--) {
|
||||
usleep(1000);
|
||||
goto again;
|
||||
}
|
||||
FAIL_ERRNO("%s: read", log_prefix);
|
||||
}
|
||||
n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
|
||||
if (n < 0)
|
||||
FAIL_ERRNO("%s: recv_timeout", log_prefix);
|
||||
if (n == 0)
|
||||
FAIL("%s: incomplete read", log_prefix);
|
||||
FAIL("%s: incomplete recv", log_prefix);
|
||||
|
||||
close:
|
||||
xclose(c1);
|
||||
|
@ -1748,7 +1734,6 @@ static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd,
|
|||
const char *log_prefix = redir_mode_str(mode);
|
||||
int c0, c1, p0, p1;
|
||||
unsigned int pass;
|
||||
int retries = 100;
|
||||
int err, n;
|
||||
u32 key;
|
||||
char b;
|
||||
|
@ -1781,17 +1766,11 @@ static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd,
|
|||
if (pass != 1)
|
||||
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
|
||||
|
||||
again:
|
||||
n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1);
|
||||
if (n < 0) {
|
||||
if (errno == EAGAIN && retries--) {
|
||||
usleep(1000);
|
||||
goto again;
|
||||
}
|
||||
FAIL_ERRNO("%s: read", log_prefix);
|
||||
}
|
||||
n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
|
||||
if (n < 0)
|
||||
FAIL_ERRNO("%s: recv_timeout", log_prefix);
|
||||
if (n == 0)
|
||||
FAIL("%s: incomplete read", log_prefix);
|
||||
FAIL("%s: incomplete recv", log_prefix);
|
||||
|
||||
close_cli1:
|
||||
xclose(c1);
|
||||
|
@ -1841,7 +1820,6 @@ static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd,
|
|||
const char *log_prefix = redir_mode_str(mode);
|
||||
int c0, c1, p0, p1;
|
||||
unsigned int pass;
|
||||
int retries = 100;
|
||||
int err, n;
|
||||
int sfd[2];
|
||||
u32 key;
|
||||
|
@ -1876,17 +1854,11 @@ static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd,
|
|||
if (pass != 1)
|
||||
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
|
||||
|
||||
again:
|
||||
n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1);
|
||||
if (n < 0) {
|
||||
if (errno == EAGAIN && retries--) {
|
||||
usleep(1000);
|
||||
goto again;
|
||||
}
|
||||
FAIL_ERRNO("%s: read", log_prefix);
|
||||
}
|
||||
n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
|
||||
if (n < 0)
|
||||
FAIL_ERRNO("%s: recv_timeout", log_prefix);
|
||||
if (n == 0)
|
||||
FAIL("%s: incomplete read", log_prefix);
|
||||
FAIL("%s: incomplete recv", log_prefix);
|
||||
|
||||
close_cli1:
|
||||
xclose(c1);
|
||||
|
@ -1932,7 +1904,6 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
|
|||
int sfd[2];
|
||||
u32 key;
|
||||
char b;
|
||||
int retries = 100;
|
||||
|
||||
zero_verdict_count(verd_mapfd);
|
||||
|
||||
|
@ -1963,17 +1934,11 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
|
|||
if (pass != 1)
|
||||
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
|
||||
|
||||
again:
|
||||
n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1);
|
||||
if (n < 0) {
|
||||
if (errno == EAGAIN && retries--) {
|
||||
usleep(1000);
|
||||
goto again;
|
||||
}
|
||||
FAIL_ERRNO("%s: read", log_prefix);
|
||||
}
|
||||
n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC);
|
||||
if (n < 0)
|
||||
FAIL_ERRNO("%s: recv_timeout", log_prefix);
|
||||
if (n == 0)
|
||||
FAIL("%s: incomplete read", log_prefix);
|
||||
FAIL("%s: incomplete recv", log_prefix);
|
||||
|
||||
close:
|
||||
xclose(c1);
|
||||
|
|
|
@ -445,10 +445,13 @@ cleanup()
|
|||
ip -netns ${NSA} link set dev ${NSA_DEV} down
|
||||
ip -netns ${NSA} link del dev ${NSA_DEV}
|
||||
|
||||
ip netns pids ${NSA} | xargs kill 2>/dev/null
|
||||
ip netns del ${NSA}
|
||||
fi
|
||||
|
||||
ip netns pids ${NSB} | xargs kill 2>/dev/null
|
||||
ip netns del ${NSB}
|
||||
ip netns pids ${NSC} | xargs kill 2>/dev/null
|
||||
ip netns del ${NSC} >/dev/null 2>&1
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue