Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Handle notifier registry failures properly in tun/tap driver, from
    Tonghao Zhang.

 2) Fix bpf verifier handling of subtraction bounds and add a testcase
    for this, from Edward Cree.

 3) Increase reset timeout in ftgmac100 driver, from Ben Herrenschmidt.

 4) Fix use after free in prd_retire_rx_blk_timer_exired() in AF_PACKET,
    from Cong Wang.

 5) Fix SElinux regression due to recent UDP optimizations, from Paolo
    Abeni.

 6) We accidently increment IPSTATS_MIB_FRAGFAILS in the ipv6 code
    paths, fix from Stefano Brivio.

 7) Fix some mem leaks in dccp, from Xin Long.

 8) Adjust MDIO_BUS kconfig deps to avoid build errors, from Arnd
    Bergmann.

 9) Mac address length check and buffer size fixes from Cong Wang.

10) Don't leak sockets in ipv6 udp early demux, from Paolo Abeni.

11) Fix return value when copy_from_user() fails in
    bpf_prog_get_info_by_fd(), from Daniel Borkmann.

12) Handle PHY_HALTED properly in phy library state machine, from
    Florian Fainelli.

13) Fix OOPS in fib_sync_down_dev(), from Ido Schimmel.

14) Fix truesize calculation in virtio_net which led to performance
    regressions, from Michael S Tsirkin.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits)
  samples/bpf: fix bpf tunnel cleanup
  udp6: fix jumbogram reception
  ppp: Fix a scheduling-while-atomic bug in del_chan
  Revert "net: bcmgenet: Remove init parameter from bcmgenet_mii_config"
  virtio_net: fix truesize for mergeable buffers
  mv643xx_eth: fix of_irq_to_resource() error check
  MAINTAINERS: Add more files to the PHY LIBRARY section
  ipv4: fib: Fix NULL pointer deref during fib_sync_down_dev()
  net: phy: Correctly process PHY_HALTED in phy_stop_machine()
  sunhme: fix up GREG_STAT and GREG_IMASK register offsets
  bpf: fix bpf_prog_get_info_by_fd to dump correct xlated_prog_len
  tcp: avoid bogus gcc-7 array-bounds warning
  net: tc35815: fix spelling mistake: "Intterrupt" -> "Interrupt"
  bpf: don't indicate success when copy_from_user fails
  udp6: fix socket leak on early demux
  net: thunderx: Fix BGX transmit stall due to underflow
  Revert "vhost: cache used event for better performance"
  team: use a larger struct for mac address
  net: check dev->addr_len for dev_set_mac_address()
  phy: bcm-ns-usb3: fix MDIO_BUS dependency
  ...
This commit is contained in:
Linus Torvalds 2017-07-31 22:36:42 -07:00
commit bc78d646e7
80 changed files with 639 additions and 306 deletions

View file

@ -5090,12 +5090,20 @@ M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: include/linux/phy.h F: Documentation/ABI/testing/sysfs-bus-mdio
F: include/linux/phy_fixed.h F: Documentation/devicetree/bindings/net/mdio*
F: drivers/net/phy/
F: Documentation/networking/phy.txt F: Documentation/networking/phy.txt
F: drivers/net/phy/
F: drivers/of/of_mdio.c F: drivers/of/of_mdio.c
F: drivers/of/of_net.c F: drivers/of/of_net.c
F: include/linux/*mdio*.h
F: include/linux/of_net.h
F: include/linux/phy.h
F: include/linux/phy_fixed.h
F: include/linux/platform_data/mdio-gpio.h
F: include/trace/events/mdio.h
F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
EXT2 FILE SYSTEM EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.com> M: Jan Kara <jack@suse.com>

View file

@ -2050,6 +2050,7 @@ static int bond_miimon_inspect(struct bonding *bond)
continue; continue;
bond_propose_link_state(slave, BOND_LINK_FAIL); bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
slave->delay = bond->params.downdelay; slave->delay = bond->params.downdelay;
if (slave->delay) { if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@ -2088,6 +2089,7 @@ static int bond_miimon_inspect(struct bonding *bond)
continue; continue;
bond_propose_link_state(slave, BOND_LINK_BACK); bond_propose_link_state(slave, BOND_LINK_BACK);
commit++;
slave->delay = bond->params.updelay; slave->delay = bond->params.updelay;
if (slave->delay) { if (slave->delay) {

View file

@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
mac_mode |= HALF_DUPLEX; mac_mode |= HALF_DUPLEX;
if (gigabit) { if (gigabit) {
if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) if (phy_interface_is_rgmii(dev->phydev))
mac_mode |= RGMII_MODE; mac_mode |= RGMII_MODE;
mac_mode |= GMAC_MODE; mac_mode |= GMAC_MODE;
@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
break; break;
case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII:
pad_mode = PAD_MODE_RGMII; case PHY_INTERFACE_MODE_RGMII_ID:
break; case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_RGMII_TXID:
pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; pad_mode = PAD_MODE_RGMII;
break; break;
default: default:

View file

@ -3669,7 +3669,7 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev); phy_init_hw(priv->phydev);
/* Speed settings must be restored */ /* Speed settings must be restored */
bcmgenet_mii_config(priv->dev); bcmgenet_mii_config(priv->dev, false);
/* disable ethernet MAC while updating its registers */ /* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);

View file

@ -698,7 +698,7 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */ /* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev); int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev); int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev); int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev); void bcmgenet_mii_reset(struct net_device *dev);

View file

@ -238,7 +238,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_fixed_phy_link_update); bcmgenet_fixed_phy_link_update);
} }
int bcmgenet_mii_config(struct net_device *dev) int bcmgenet_mii_config(struct net_device *dev, bool init)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev; struct phy_device *phydev = priv->phydev;
@ -327,7 +327,8 @@ int bcmgenet_mii_config(struct net_device *dev)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
} }
dev_info_once(kdev, "configuring instance for %s\n", phy_name); if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
return 0; return 0;
} }
@ -375,7 +376,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure * PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately. * things appropriately.
*/ */
ret = bcmgenet_mii_config(dev); ret = bcmgenet_mii_config(dev, true);
if (ret) { if (ret) {
phy_disconnect(priv->phydev); phy_disconnect(priv->phydev);
return ret; return ret;

View file

@ -292,11 +292,30 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
u64 cmr_cfg; u64 cmr_cfg;
u64 port_cfg = 0; u64 port_cfg = 0;
u64 misc_ctl = 0; u64 misc_ctl = 0;
bool tx_en, rx_en;
cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
cmr_cfg &= ~CMR_EN; tx_en = cmr_cfg & CMR_PKT_TX_EN;
rx_en = cmr_cfg & CMR_PKT_RX_EN;
cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
GMI_PORT_CFG_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
bgx->bgx_id, lmac->lmacid);
return;
}
/* Wait for BGX TX to be idle */
if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
GMI_PORT_CFG_TX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
bgx->bgx_id, lmac->lmacid);
return;
}
port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
@ -347,10 +366,8 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); /* Restore CMR config settings */
cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
/* Re-enable lmac */
cmr_cfg |= CMR_EN;
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))

View file

@ -170,6 +170,8 @@
#define GMI_PORT_CFG_DUPLEX BIT_ULL(2) #define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) #define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) #define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
#define BGX_GMP_GMI_RXX_JABBER 0x38038 #define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210 #define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218 #define BGX_GMP_GMI_TXX_APPEND 0x38218

View file

@ -125,7 +125,7 @@ static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
iowrite32(maccr | FTGMAC100_MACCR_SW_RST, iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
priv->base + FTGMAC100_OFFSET_MACCR); priv->base + FTGMAC100_OFFSET_MACCR);
for (i = 0; i < 50; i++) { for (i = 0; i < 200; i++) {
unsigned int maccr; unsigned int maccr;
maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
@ -392,7 +392,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t map; dma_addr_t map;
int err; int err = 0;
skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
@ -428,7 +428,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
else else
rxdes->rxdes0 = 0; rxdes->rxdes0 = 0;
return 0; return err;
} }
static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
@ -1682,6 +1682,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
priv->mii_bus->name = "ftgmac100_mdio"; priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id); pdev->name, pdev->id);
priv->mii_bus->parent = priv->dev;
priv->mii_bus->priv = priv->netdev; priv->mii_bus->priv = priv->netdev;
priv->mii_bus->read = ftgmac100_mdiobus_read; priv->mii_bus->read = ftgmac100_mdiobus_read;
priv->mii_bus->write = ftgmac100_mdiobus_write; priv->mii_bus->write = ftgmac100_mdiobus_write;

View file

@ -2734,7 +2734,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
ppd.shared = pdev; ppd.shared = pdev;
memset(&res, 0, sizeof(res)); memset(&res, 0, sizeof(res));
if (!of_irq_to_resource(pnp, 0, &res)) { if (of_irq_to_resource(pnp, 0, &res) <= 0) {
dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
return -EINVAL; return -EINVAL;
} }

View file

@ -22,6 +22,7 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/interrupt.h>
#include "mtk_eth_soc.h" #include "mtk_eth_soc.h"
@ -947,6 +948,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
RX_DMA_FPORT_MASK; RX_DMA_FPORT_MASK;
mac--; mac--;
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
goto release_desc;
netdev = eth->netdev[mac]; netdev = eth->netdev[mac];
if (unlikely(test_bit(MTK_RESETTING, &eth->state))) if (unlikely(test_bit(MTK_RESETTING, &eth->state)))

View file

@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
} }
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg);
static void cmd_work_handler(struct work_struct *work) static void cmd_work_handler(struct work_struct *work)
{ {
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work)
struct semaphore *sem; struct semaphore *sem;
unsigned long flags; unsigned long flags;
bool poll_cmd = ent->polling; bool poll_cmd = ent->polling;
int alloc_ret;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem); down(sem);
if (!ent->page_queue) { if (!ent->page_queue) {
ent->idx = alloc_ent(cmd); alloc_ret = alloc_ent(cmd);
if (ent->idx < 0) { if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n"); mlx5_core_err(dev, "failed to allocate command entry\n");
if (ent->callback) {
ent->callback(-EAGAIN, ent->context);
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
free_cmd(ent);
} else {
ent->ret = -EAGAIN;
complete(&ent->done);
}
up(sem); up(sem);
return; return;
} }
ent->idx = alloc_ret;
} else { } else {
ent->idx = cmd->max_reg_cmds; ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags); spin_lock_irqsave(&cmd->alloc_lock, flags);
@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent); err = wait_func(dev, ent);
if (err == -ETIMEDOUT) if (err == -ETIMEDOUT)
goto out_free; goto out;
ds = ent->ts2 - ent->ts1; ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode); op = MLX5_GET(mbox_in, in->first.data, opcode);
@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx); ent->idx);
free_ent(cmd, ent->idx); free_ent(cmd, ent->idx);
free_cmd(ent);
} }
continue; continue;
} }
@ -1488,6 +1504,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
free_msg(dev, ent->in); free_msg(dev, ent->in);
err = err ? err : ent->status; err = err ? err : ent->status;
if (!forced)
free_cmd(ent); free_cmd(ent);
callback(err, context); callback(err, context);
} else { } else {

View file

@ -266,6 +266,14 @@ struct mlx5e_dcbx {
}; };
#endif #endif
#define MAX_PIN_NUM 8
struct mlx5e_pps {
u8 pin_caps[MAX_PIN_NUM];
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u8 enabled;
};
struct mlx5e_tstamp { struct mlx5e_tstamp {
rwlock_t lock; rwlock_t lock;
struct cyclecounter cycles; struct cyclecounter cycles;
@ -277,7 +285,7 @@ struct mlx5e_tstamp {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct ptp_clock *ptp; struct ptp_clock *ptp;
struct ptp_clock_info ptp_info; struct ptp_clock_info ptp_info;
u8 *pps_pin_caps; struct mlx5e_pps pps_info;
}; };
enum { enum {

View file

@ -53,6 +53,15 @@ enum {
MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
}; };
enum {
MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts) struct skb_shared_hwtstamps *hwts)
{ {
@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
} }
static void mlx5e_pps_out(struct work_struct *work)
{
struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
out_work);
struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
pps_info);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
u64 tstart;
write_lock_irqsave(&tstamp->lock, flags);
tstart = tstamp->pps_info.start[i];
tstamp->pps_info.start[i] = 0;
write_unlock_irqrestore(&tstamp->lock, flags);
if (!tstart)
continue;
MLX5_SET(mtpps_reg, in, pin, i);
MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
}
}
static void mlx5e_timestamp_overflow(struct work_struct *work) static void mlx5e_timestamp_overflow(struct work_struct *work)
{ {
struct delayed_work *dwork = to_delayed_work(work); struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
overflow_work); overflow_work);
struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
unsigned long flags; unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags); write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock); timecounter_read(&tstamp->clock);
write_unlock_irqrestore(&tstamp->lock, flags); write_unlock_irqrestore(&tstamp->lock, flags);
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); queue_delayed_work(priv->wq, &tstamp->overflow_work,
msecs_to_jiffies(tstamp->overflow_period * 1000));
} }
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
int neg_adj = 0; int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info); ptp_info);
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
/* For future use need to add a loop for finding all 1PPS out pins */
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
mlx5_set_mtpps(priv->mdev, in, sizeof(in));
}
if (delta < 0) { if (delta < 0) {
neg_adj = 1; neg_adj = 1;
@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv = struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp); container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0; u8 pattern = 0;
int pin = -1; int pin = -1;
int err = 0; int err = 0;
if (!MLX5_CAP_GEN(priv->mdev, pps) || if (!MLX5_PPS_CAP(priv->mdev))
!MLX5_CAP_GEN(priv->mdev, pps_modify))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (rq->extts.index >= tstamp->ptp_info.n_pins) if (rq->extts.index >= tstamp->ptp_info.n_pins)
@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0) if (pin < 0)
return -EBUSY; return -EBUSY;
pin_mode = MLX5E_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5E_MTPPS_FS_PIN_MODE |
MLX5E_MTPPS_FS_PATTERN |
MLX5E_MTPPS_FS_ENABLE;
} else {
pin = rq->extts.index;
field_select = MLX5E_MTPPS_FS_ENABLE;
} }
if (rq->extts.flags & PTP_FALLING_EDGE)
pattern = 1;
MLX5_SET(mtpps_reg, in, pin, pin); MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern); MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET(mtpps_reg, in, field_select, field_select);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err) if (err)
@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv = struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp); container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u64 nsec_now, nsec_delta, time_stamp; u64 nsec_now, nsec_delta, time_stamp = 0;
u64 cycles_now, cycles_delta; u64 cycles_now, cycles_delta;
struct timespec64 ts; struct timespec64 ts;
unsigned long flags; unsigned long flags;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1; int pin = -1;
int err = 0;
s64 ns; s64 ns;
if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) if (!MLX5_PPS_CAP(priv->mdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (rq->perout.index >= tstamp->ptp_info.n_pins) if (rq->perout.index >= tstamp->ptp_info.n_pins)
@ -313,14 +350,16 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
rq->perout.index); rq->perout.index);
if (pin < 0) if (pin < 0)
return -EBUSY; return -EBUSY;
}
pin_mode = MLX5E_PIN_MODE_OUT;
pattern = MLX5E_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec; ts.tv_sec = rq->perout.period.sec;
ts.tv_nsec = rq->perout.period.nsec; ts.tv_nsec = rq->perout.period.nsec;
ns = timespec64_to_ns(&ts); ns = timespec64_to_ns(&ts);
if (on)
if ((ns >> 1) != 500000000LL) if ((ns >> 1) != 500000000LL)
return -EINVAL; return -EINVAL;
ts.tv_sec = rq->perout.start.sec; ts.tv_sec = rq->perout.start.sec;
ts.tv_nsec = rq->perout.start.nsec; ts.tv_nsec = rq->perout.start.nsec;
ns = timespec64_to_ns(&ts); ns = timespec64_to_ns(&ts);
@ -332,13 +371,39 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
tstamp->cycles.mult); tstamp->cycles.mult);
write_unlock_irqrestore(&tstamp->lock, flags); write_unlock_irqrestore(&tstamp->lock, flags);
time_stamp = cycles_now + cycles_delta; time_stamp = cycles_now + cycles_delta;
field_select = MLX5E_MTPPS_FS_PIN_MODE |
MLX5E_MTPPS_FS_PATTERN |
MLX5E_MTPPS_FS_ENABLE |
MLX5E_MTPPS_FS_TIME_STAMP;
} else {
pin = rq->perout.index;
field_select = MLX5E_MTPPS_FS_ENABLE;
}
MLX5_SET(mtpps_reg, in, pin, pin); MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
MLX5_SET(mtpps_reg, in, field_select, field_select);
return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err)
return err;
return mlx5_set_mtppse(priv->mdev, pin, 0,
MLX5E_EVENT_MODE_REPETETIVE & on);
}
static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5e_tstamp *tstamp =
container_of(ptp, struct mlx5e_tstamp, ptp_info);
tstamp->pps_info.enabled = !!on;
return 0;
} }
static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
return mlx5e_extts_configure(ptp, rq, on); return mlx5e_extts_configure(ptp, rq, on);
case PTP_CLK_REQ_PEROUT: case PTP_CLK_REQ_PEROUT:
return mlx5e_perout_configure(ptp, rq, on); return mlx5e_perout_configure(ptp, rq, on);
case PTP_CLK_REQ_PPS:
return mlx5e_pps_configure(ptp, rq, on);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
return -ENOMEM; return -ENOMEM;
tstamp->ptp_info.enable = mlx5e_ptp_enable; tstamp->ptp_info.enable = mlx5e_ptp_enable;
tstamp->ptp_info.verify = mlx5e_ptp_verify; tstamp->ptp_info.verify = mlx5e_ptp_verify;
tstamp->ptp_info.pps = 1;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) { for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
snprintf(tstamp->ptp_info.pin_config[i].name, snprintf(tstamp->ptp_info.pin_config[i].name,
@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins); cap_max_num_of_pps_out_pins);
tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
} }
void mlx5e_pps_event_handler(struct mlx5e_priv *priv, void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event) struct ptp_clock_event *event)
{ {
struct net_device *netdev = priv->netdev;
struct mlx5e_tstamp *tstamp = &priv->tstamp; struct mlx5e_tstamp *tstamp = &priv->tstamp;
struct timespec64 ts;
u64 nsec_now, nsec_delta;
u64 cycles_now, cycles_delta;
int pin = event->index;
s64 ns;
unsigned long flags;
switch (tstamp->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
if (tstamp->pps_info.enabled) {
event->type = PTP_CLOCK_PPSUSR;
event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
} else {
event->type = PTP_CLOCK_EXTTS;
}
ptp_clock_event(tstamp->ptp, event); ptp_clock_event(tstamp->ptp, event);
break;
case PTP_PF_PEROUT:
mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
cycles_now = mlx5_read_internal_timer(tstamp->mdev);
ts.tv_sec += 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
write_lock_irqsave(&tstamp->lock, flags);
nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
tstamp->cycles.mult);
tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
queue_work(priv->wq, &tstamp->pps_info.out_work);
write_unlock_irqrestore(&tstamp->lock, flags);
break;
default:
netdev_err(netdev, "%s: Unhandled event\n", __func__);
}
} }
void mlx5e_timestamp_init(struct mlx5e_priv *priv) void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
do_div(ns, NSEC_PER_SEC / 2 / HZ); do_div(ns, NSEC_PER_SEC / 2 / HZ);
tstamp->overflow_period = ns; tstamp->overflow_period = ns;
INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
if (tstamp->overflow_period) if (tstamp->overflow_period)
schedule_delayed_work(&tstamp->overflow_work, 0); queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
else else
mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
/* Initialize 1PPS data structures */ /* Initialize 1PPS data structures */
#define MAX_PIN_NUM 8 if (MLX5_PPS_CAP(priv->mdev))
tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
if (tstamp->pps_pin_caps) {
if (MLX5_CAP_GEN(priv->mdev, pps))
mlx5e_get_pps_caps(priv, tstamp); mlx5e_get_pps_caps(priv, tstamp);
if (tstamp->ptp_info.n_pins) if (tstamp->ptp_info.n_pins)
mlx5e_init_pin_config(tstamp); mlx5e_init_pin_config(tstamp);
} else {
mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
}
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev); &priv->mdev->pdev->dev);
@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
priv->tstamp.ptp = NULL; priv->tstamp.ptp = NULL;
} }
kfree(tstamp->pps_pin_caps); cancel_work_sync(&tstamp->pps_info.out_work);
kfree(tstamp->ptp_info.pin_config);
cancel_delayed_work_sync(&tstamp->overflow_work); cancel_delayed_work_sync(&tstamp->overflow_work);
kfree(tstamp->ptp_info.pin_config);
} }

View file

@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
static bool outer_header_zero(u32 *match_criteria) static bool outer_header_zero(u32 *match_criteria)
{ {
int size = MLX5_ST_SZ_BYTES(fte_match_param); int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers); outer_headers);
@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",

View file

@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
break; break;
case MLX5_DEV_EVENT_PPS: case MLX5_DEV_EVENT_PPS:
eqe = (struct mlx5_eqe *)param; eqe = (struct mlx5_eqe *)param;
ptp_event.type = PTP_CLOCK_EXTTS;
ptp_event.index = eqe->data.pps.pin; ptp_event.index = eqe->data.pps.pin;
ptp_event.timestamp = ptp_event.timestamp =
timecounter_cyc2time(&priv->tstamp.clock, timecounter_cyc2time(&priv->tstamp.clock,

View file

@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
else else
mlx5_core_dbg(dev, "port_module_event is not set\n"); mlx5_core_dbg(dev, "port_module_event is not set\n");
if (MLX5_CAP_GEN(dev, pps)) if (MLX5_PPS_CAP(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
if (MLX5_CAP_GEN(dev, fpga)) if (MLX5_CAP_GEN(dev, fpga))

View file

@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int i; int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH ||
esw->mode == SRIOV_NONE)
return; return;
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",

View file

@ -178,8 +178,6 @@ out:
static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{ {
mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
mlx5_core_destroy_qp(mdev, qp); mlx5_core_destroy_qp(mdev, qp);
} }
@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err; return err;
} }
mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
if (err) { if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv)
{ {
struct mlx5i_priv *ipriv = priv->ppriv;
int err; int err;
err = mlx5e_create_indirect_rqt(priv); err = mlx5e_create_indirect_rqt(priv);
@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err) if (err)
goto err_destroy_indirect_tirs; goto err_destroy_indirect_tirs;
err = mlx5i_create_flow_steering(priv); err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
if (err) if (err)
goto err_destroy_direct_tirs; goto err_destroy_direct_tirs;
err = mlx5i_create_flow_steering(priv);
if (err)
goto err_remove_rx_underlay_qpn;
return 0; return 0;
err_remove_rx_underlay_qpn:
mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err_destroy_direct_tirs: err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs: err_destroy_indirect_tirs:
@ -290,6 +293,9 @@ err_destroy_indirect_rqts:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{ {
struct mlx5i_priv *ipriv = priv->ppriv;
mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
mlx5i_destroy_flow_steering(priv); mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_indirect_tirs(priv);

View file

@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2) u8 *port1, u8 *port2)
{ {
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
if (tracker->netdev_state[0].tx_enabled) {
*port1 = 1;
*port2 = 1;
} else {
*port1 = 2;
*port2 = 2;
}
} else {
*port1 = 1; *port1 = 1;
*port2 = 2; *port2 = 2;
if (!tracker->netdev_state[0].link_up) if (!tracker->netdev_state[0].tx_enabled ||
!tracker->netdev_state[0].link_up) {
*port1 = 2; *port1 = 2;
else if (!tracker->netdev_state[1].link_up) return;
*port2 = 1;
} }
if (!tracker->netdev_state[1].tx_enabled ||
!tracker->netdev_state[1].link_up)
*port2 = 1;
} }
static void mlx5_activate_lag(struct mlx5_lag *ldev, static void mlx5_activate_lag(struct mlx5_lag *ldev,

View file

@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw); int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
void mlx5e_init(void); void mlx5e_init(void);

View file

@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
int vf; int vf;
if (!sriov->enabled_vfs) if (!sriov->enabled_vfs)
#ifdef CONFIG_MLX5_CORE_EN
goto disable_sriov_resources;
#else
return; return;
#endif
for (vf = 0; vf < sriov->num_vfs; vf++) { for (vf = 0; vf < sriov->num_vfs; vf++) {
if (!sriov->vfs_ctx[vf].enabled) if (!sriov->vfs_ctx[vf].enabled)
@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
} }
#ifdef CONFIG_MLX5_CORE_EN #ifdef CONFIG_MLX5_CORE_EN
disable_sriov_resources:
mlx5_eswitch_disable_sriov(dev->priv.eswitch); mlx5_eswitch_disable_sriov(dev->priv.eswitch);
#endif #endif

View file

@ -1512,6 +1512,10 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry); struct mlxsw_sp_fib_entry *fib_entry);
static bool
mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
const struct mlxsw_sp_fib_entry *fib_entry);
static int static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp) struct mlxsw_sp_nexthop_group *nh_grp)
@ -1520,6 +1524,9 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
int err; int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
fib_entry))
continue;
err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err) if (err)
return err; return err;

View file

@ -205,7 +205,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
{ {
int i; int i;
for (i = 0; i < 23; i++) for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
if ((i < 12) || (i > 17)) if ((i < 12) || (i > 17))
reg_space[DMA_BUS_MODE / 4 + i] = reg_space[DMA_BUS_MODE / 4 + i] =
readl(ioaddr + DMA_BUS_MODE + i * 4); readl(ioaddr + DMA_BUS_MODE + i * 4);

View file

@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
{ {
int i; int i;
for (i = 0; i < 9; i++) for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++)
reg_space[DMA_BUS_MODE / 4 + i] = reg_space[DMA_BUS_MODE / 4 + i] =
readl(ioaddr + DMA_BUS_MODE + i * 4); readl(ioaddr + DMA_BUS_MODE + i * 4);

View file

@ -136,6 +136,9 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
void dwmac_enable_dma_transmission(void __iomem *ioaddr); void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);

View file

@ -33,6 +33,8 @@
#define MAC100_ETHTOOL_NAME "st_mac100" #define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac" #define GMAC_ETHTOOL_NAME "st_gmac"
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats { struct stmmac_stats {
char stat_string[ETH_GSTRING_LEN]; char stat_string[ETH_GSTRING_LEN];
int sizeof_stat; int sizeof_stat;
@ -442,6 +444,9 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
priv->hw->mac->dump_regs(priv->hw, reg_space); priv->hw->mac->dump_regs(priv->hw, reg_space);
priv->hw->dma->dump_regs(priv->ioaddr, reg_space); priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
/* Copy DMA registers to where ethtool expects them */
memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
} }
static void static void

View file

@ -13,9 +13,9 @@
/* Happy Meal global registers. */ /* Happy Meal global registers. */
#define GREG_SWRESET 0x000UL /* Software Reset */ #define GREG_SWRESET 0x000UL /* Software Reset */
#define GREG_CFG 0x004UL /* Config Register */ #define GREG_CFG 0x004UL /* Config Register */
#define GREG_STAT 0x108UL /* Status */ #define GREG_STAT 0x100UL /* Status */
#define GREG_IMASK 0x10cUL /* Interrupt Mask */ #define GREG_IMASK 0x104UL /* Interrupt Mask */
#define GREG_REG_SIZE 0x110UL #define GREG_REG_SIZE 0x108UL
/* Global reset register. */ /* Global reset register. */
#define GREG_RESET_ETX 0x01 #define GREG_RESET_ETX 0x01

View file

@ -1338,7 +1338,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
{ {
static int count; static int count;
printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):",
dev->name, status); dev->name, status);
if (status & Int_IntPCI) if (status & Int_IntPCI)
printk(" IntPCI"); printk(" IntPCI");

View file

@ -315,14 +315,34 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
return slots_used; return slots_used;
} }
/* Estimate number of page buffers neede to transmit static int count_skb_frag_slots(struct sk_buff *skb)
* Need at most 2 for RNDIS header plus skb body and fragments.
*/
static unsigned int netvsc_get_slots(const struct sk_buff *skb)
{ {
return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)) int i, frags = skb_shinfo(skb)->nr_frags;
+ skb_shinfo(skb)->nr_frags int pages = 0;
+ 2;
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
unsigned long size = skb_frag_size(frag);
unsigned long offset = frag->page_offset;
/* Skip unused frames from start of page */
offset &= ~PAGE_MASK;
pages += PFN_UP(offset + size);
}
return pages;
}
static int netvsc_get_slots(struct sk_buff *skb)
{
char *data = skb->data;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
int slots;
int frag_slots;
slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
frag_slots = count_skb_frag_slots(skb);
return slots + frag_slots;
} }
static u32 net_checksum_info(struct sk_buff *skb) static u32 net_checksum_info(struct sk_buff *skb)
@ -360,18 +380,21 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf; struct hv_page_buffer *pb = page_buf;
/* We can only transmit MAX_PAGE_BUFFER_COUNT number /* We will atmost need two pages to describe the rndis
* header. We can only transmit MAX_PAGE_BUFFER_COUNT number
* of pages in a single packet. If skb is scattered around * of pages in a single packet. If skb is scattered around
* more pages we try linearizing it. * more pages we try linearizing it.
*/ */
num_data_pgs = netvsc_get_slots(skb);
num_data_pgs = netvsc_get_slots(skb) + 2;
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
++net_device_ctx->eth_stats.tx_scattered; ++net_device_ctx->eth_stats.tx_scattered;
if (skb_linearize(skb)) if (skb_linearize(skb))
goto no_memory; goto no_memory;
num_data_pgs = netvsc_get_slots(skb); num_data_pgs = netvsc_get_slots(skb) + 2;
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
++net_device_ctx->eth_stats.tx_too_big; ++net_device_ctx->eth_stats.tx_too_big;
goto drop; goto drop;

View file

@ -141,10 +141,20 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
{ {
struct usb_device *dev = mcs->usbdev; struct usb_device *dev = mcs->usbdev;
int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, void *dmabuf;
MCS_RD_RTYPE, 0, reg, val, 2, int ret;
dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
if (!dmabuf)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, dmabuf, 2,
msecs_to_jiffies(MCS_CTRL_TIMEOUT)); msecs_to_jiffies(MCS_CTRL_TIMEOUT));
memcpy(val, dmabuf, sizeof(__u16));
kfree(dmabuf);
return ret; return ret;
} }

View file

@ -7,7 +7,16 @@ menuconfig MDIO_DEVICE
help help
MDIO devices and driver infrastructure code. MDIO devices and driver infrastructure code.
if MDIO_DEVICE config MDIO_BUS
tristate
default m if PHYLIB=m
default MDIO_DEVICE
help
This internal symbol is used for link time dependencies and it
reflects whether the mdio_bus/mdio_device code is built as a
loadable module or built-in.
if MDIO_BUS
config MDIO_BCM_IPROC config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller" tristate "Broadcom iProc MDIO bus controller"
@ -28,7 +37,6 @@ config MDIO_BCM_UNIMAC
config MDIO_BITBANG config MDIO_BITBANG
tristate "Bitbanged MDIO buses" tristate "Bitbanged MDIO buses"
depends on !(MDIO_DEVICE=y && PHYLIB=m)
help help
This module implements the MDIO bus protocol in software, This module implements the MDIO bus protocol in software,
for use by low level drivers that export the ability to for use by low level drivers that export the ability to
@ -127,7 +135,6 @@ config MDIO_THUNDER
tristate "ThunderX SOCs MDIO buses" tristate "ThunderX SOCs MDIO buses"
depends on 64BIT depends on 64BIT
depends on PCI depends on PCI
depends on !(MDIO_DEVICE=y && PHYLIB=m)
select MDIO_CAVIUM select MDIO_CAVIUM
help help
This driver supports the MDIO interfaces found on Cavium This driver supports the MDIO interfaces found on Cavium

View file

@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP; phydev->state = PHY_UP;
mutex_unlock(&phydev->lock); mutex_unlock(&phydev->lock);
/* Now we can run the state machine synchronously */
phy_state_machine(&phydev->state_queue.work);
} }
/** /**

View file

@ -131,7 +131,6 @@ static void del_chan(struct pppox_sock *sock)
clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
spin_unlock(&chan_lock); spin_unlock(&chan_lock);
synchronize_rcu();
} }
static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
@ -520,6 +519,7 @@ static int pptp_release(struct socket *sock)
po = pppox_sk(sk); po = pppox_sk(sk);
del_chan(po); del_chan(po);
synchronize_rcu();
pppox_unbind_sock(sk); pppox_unbind_sock(sk);
sk->sk_state = PPPOX_DEAD; sk->sk_state = PPPOX_DEAD;

View file

@ -60,11 +60,11 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
static int __set_port_dev_addr(struct net_device *port_dev, static int __set_port_dev_addr(struct net_device *port_dev,
const unsigned char *dev_addr) const unsigned char *dev_addr)
{ {
struct sockaddr addr; struct sockaddr_storage addr;
memcpy(addr.sa_data, dev_addr, port_dev->addr_len); memcpy(addr.__data, dev_addr, port_dev->addr_len);
addr.sa_family = port_dev->type; addr.ss_family = port_dev->type;
return dev_set_mac_address(port_dev, &addr); return dev_set_mac_address(port_dev, (struct sockaddr *)&addr);
} }
static int team_port_set_orig_dev_addr(struct team_port *port) static int team_port_set_orig_dev_addr(struct team_port *port)

View file

@ -2598,8 +2598,16 @@ static int __init tun_init(void)
goto err_misc; goto err_misc;
} }
register_netdevice_notifier(&tun_notifier_block); ret = register_netdevice_notifier(&tun_notifier_block);
if (ret) {
pr_err("Can't register netdevice notifier\n");
goto err_notifier;
}
return 0; return 0;
err_notifier:
misc_deregister(&tun_miscdev);
err_misc: err_misc:
rtnl_link_unregister(&tun_link_ops); rtnl_link_unregister(&tun_link_ops);
err_linkops: err_linkops:

View file

@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */ buf += headroom; /* advance address leaving hole at front of pkt */
ctx = (void *)(unsigned long)len;
get_page(alloc_frag->page); get_page(alloc_frag->page);
alloc_frag->offset += len + headroom; alloc_frag->offset += len + headroom;
hole = alloc_frag->size - alloc_frag->offset; hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + headroom) { if (hole < len + headroom) {
/* To avoid internal fragmentation, if there is very likely not /* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to * enough space for another buffer, add the remaining space to
* the current buffer. This extra space is not included in * the current buffer.
* the truesize stored in ctx.
*/ */
len += hole; len += hole;
alloc_frag->offset += hole; alloc_frag->offset += hole;
} }
sg_init_one(rq->sg, buf, len); sg_init_one(rq->sg, buf, len);
ctx = (void *)(unsigned long)len;
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) if (err < 0)
put_page(virt_to_head_page(buf)); put_page(virt_to_head_page(buf));

View file

@ -214,7 +214,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
/* Make sure there's enough writeable headroom */ /* Make sure there's enough writeable headroom */
if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
head_delta = drvr->hdrlen - skb_headroom(skb); head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
brcmf_ifname(ifp), head_delta); brcmf_ifname(ifp), head_delta);

View file

@ -2053,12 +2053,13 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
atomic_inc(&stats->pktcow_failed); atomic_inc(&stats->pktcow_failed);
return -ENOMEM; return -ENOMEM;
} }
head_pad = 0;
} }
skb_push(pkt, head_pad); skb_push(pkt, head_pad);
dat_buf = (u8 *)(pkt->data); dat_buf = (u8 *)(pkt->data);
} }
memset(dat_buf, 0, head_pad + bus->tx_hdrlen); memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
return 0; return head_pad;
} }
/** /**
@ -4174,11 +4175,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
goto fail; goto fail;
} }
/* allocate scatter-gather table. sg support
* will be disabled upon allocation failure.
*/
brcmf_sdiod_sgtable_alloc(bus->sdiodev);
/* Query the F2 block size, set roundup accordingly */ /* Query the F2 block size, set roundup accordingly */
bus->blocksize = bus->sdiodev->func[2]->cur_blksize; bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize); bus->roundup = min(max_roundup, bus->blocksize);

View file

@ -1189,11 +1189,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
next_reclaimed; next_reclaimed;
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
next_reclaimed); next_reclaimed);
iwlagn_check_ratid_empty(priv, sta_id, tid);
} }
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0; freed = 0;
/* process frames */ /* process frames */

View file

@ -55,8 +55,8 @@ static inline bool iwl_trace_data(struct sk_buff *skb)
/* also account for the RFC 1042 header, of course */ /* also account for the RFC 1042 header, of course */
offs += 6; offs += 6;
return skb->len > offs + 2 && return skb->len <= offs + 2 ||
*(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE); *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE);
} }
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,

View file

@ -1084,7 +1084,13 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
* Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
* so later code will - from now on - see that we're doing it.
*/
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
/* Clean up some internal and mac80211 state on restart */ /* Clean up some internal and mac80211 state on restart */
iwl_mvm_restart_cleanup(mvm); iwl_mvm_restart_cleanup(mvm);
} else { } else {

View file

@ -1090,6 +1090,7 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
* @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
* @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
* @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
@ -1101,6 +1102,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL, IWL_MVM_STATUS_HW_CTKILL,
IWL_MVM_STATUS_ROC_RUNNING, IWL_MVM_STATUS_ROC_RUNNING,
IWL_MVM_STATUS_HW_RESTART_REQUESTED,
IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_ROC_AUX_RUNNING,

View file

@ -1236,8 +1236,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
if (!mvm->fw_restart && fw_error) { if (!mvm->fw_restart && fw_error) {
iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
NULL); NULL);
} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
&mvm->status)) {
struct iwl_mvm_reprobe *reprobe; struct iwl_mvm_reprobe *reprobe;
IWL_ERR(mvm, IWL_ERR(mvm,
@ -1268,6 +1267,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
if (fw_error && mvm->fw_restart > 0) if (fw_error && mvm->fw_restart > 0)
mvm->fw_restart--; mvm->fw_restart--;
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
ieee80211_restart_hw(mvm->hw); ieee80211_restart_hw(mvm->hw);
} }
} }

View file

@ -277,6 +277,18 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
/* Timer expired */ /* Timer expired */
sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
/*
* sta should be valid unless the following happens:
* The firmware asserts which triggers a reconfig flow, but
* the reconfig fails before we set the pointer to sta into
* the fw_id_to_mac_id pointer table. Mac80211 can't stop
* A-MDPU and hence the timer continues to run. Then, the
* timer expires and sta is NULL.
*/
if (!sta)
goto unlock;
mvm_sta = iwl_mvm_sta_from_mac80211(sta); mvm_sta = iwl_mvm_sta_from_mac80211(sta);
ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
sta->addr, ba_data->tid); sta->addr, ba_data->tid);
@ -2015,7 +2027,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_MAX_TID_COUNT, IWL_MAX_TID_COUNT,
wdg_timeout); wdg_timeout);
if (vif->type == NL80211_IFTYPE_AP) if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
mvm->probe_queue = queue; mvm->probe_queue = queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_dev_queue = queue; mvm->p2p_dev_queue = queue;

View file

@ -3150,7 +3150,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->d0i3_waitq); init_waitqueue_head(&trans_pcie->d0i3_waitq);
if (trans_pcie->msix_enabled) { if (trans_pcie->msix_enabled) {
if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
if (ret)
goto out_no_pci; goto out_no_pci;
} else { } else {
ret = iwl_pcie_alloc_ict(trans); ret = iwl_pcie_alloc_ict(trans);

View file

@ -298,6 +298,9 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i]; struct iwl_txq *txq = trans_pcie->txq[i];
if (!test_bit(i, trans_pcie->queue_used))
continue;
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
if (txq->need_update) { if (txq->need_update) {
iwl_pcie_txq_inc_wr_ptr(trans, txq); iwl_pcie_txq_inc_wr_ptr(trans, txq);

View file

@ -846,9 +846,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
return false; return false;
} }
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));

View file

@ -2547,7 +2547,6 @@ struct bt_coexist_info {
struct rtl_btc_ops { struct rtl_btc_ops {
void (*btc_init_variables) (struct rtl_priv *rtlpriv); void (*btc_init_variables) (struct rtl_priv *rtlpriv);
void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);

View file

@ -30,8 +30,8 @@ config PHY_BCM_NS_USB3
tristate "Broadcom Northstar USB 3.0 PHY Driver" tristate "Broadcom Northstar USB 3.0 PHY Driver"
depends on ARCH_BCM_IPROC || COMPILE_TEST depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_IOMEM && OF depends on HAS_IOMEM && OF
depends on MDIO_BUS
select GENERIC_PHY select GENERIC_PHY
select MDIO_DEVICE
help help
Enable this to support Broadcom USB 3.0 PHY connected to the USB Enable this to support Broadcom USB 3.0 PHY connected to the USB
controller on Northstar family. controller on Northstar family.

View file

@ -308,7 +308,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL; vq->avail = NULL;
vq->used = NULL; vq->used = NULL;
vq->last_avail_idx = 0; vq->last_avail_idx = 0;
vq->last_used_event = 0;
vq->avail_idx = 0; vq->avail_idx = 0;
vq->last_used_idx = 0; vq->last_used_idx = 0;
vq->signalled_used = 0; vq->signalled_used = 0;
@ -1402,7 +1401,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EINVAL; r = -EINVAL;
break; break;
} }
vq->last_avail_idx = vq->last_used_event = s.num; vq->last_avail_idx = s.num;
/* Forget the cached index value. */ /* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx; vq->avail_idx = vq->last_avail_idx;
break; break;
@ -2241,6 +2240,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__u16 old, new; __u16 old, new;
__virtio16 event; __virtio16 event;
bool v; bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx)) unlikely(vq->avail_idx == vq->last_avail_idx))
@ -2248,10 +2251,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags; __virtio16 flags;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (vhost_get_avail(vq, flags, &vq->avail->flags)) { if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags"); vq_err(vq, "Failed to get flags");
return true; return true;
@ -2266,26 +2265,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v)) if (unlikely(!v))
return true; return true;
/* We're sure if the following conditions are met, there's no
* need to notify guest:
* 1) cached used event is ahead of new
* 2) old to new updating does not cross cached used event. */
if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
!vring_need_event(vq->last_used_event, new, old))
return false;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (vhost_get_avail(vq, event, vhost_used_event(vq))) { if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx"); vq_err(vq, "Failed to get used event idx");
return true; return true;
} }
vq->last_used_event = vhost16_to_cpu(vq, event); return vring_need_event(vhost16_to_cpu(vq, event), new, old);
return vring_need_event(vq->last_used_event, new, old);
} }
/* This actually signals the guest, using eventfd. */ /* This actually signals the guest, using eventfd. */

View file

@ -115,9 +115,6 @@ struct vhost_virtqueue {
/* Last index we used. */ /* Last index we used. */
u16 last_used_idx; u16 last_used_idx;
/* Last used evet we've seen */
u16 last_used_event;
/* Used flags */ /* Used flags */
u16 used_flags; u16 used_flags;

View file

@ -128,6 +128,7 @@ struct inet6_skb_parm {
#define IP6SKB_FRAGMENTED 16 #define IP6SKB_FRAGMENTED 16
#define IP6SKB_HOPBYHOP 32 #define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64 #define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
}; };
#if defined(CONFIG_NET_L3_MASTER_DEV) #if defined(CONFIG_NET_L3_MASTER_DEV)
@ -152,6 +153,11 @@ static inline int inet6_iif(const struct sk_buff *skb)
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
} }
static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
{
return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM);
}
/* can not be used in TCP layer after tcp_v6_fill_cb */ /* can not be used in TCP layer after tcp_v6_fill_cb */
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
{ {

View file

@ -7749,8 +7749,10 @@ struct mlx5_ifc_pcam_reg_bits {
}; };
struct mlx5_ifc_mcam_enhanced_features_bits { struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x7f]; u8 reserved_at_0[0x7d];
u8 mtpps_enh_out_per_adj[0x1];
u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1]; u8 pcie_performance_group[0x1];
}; };
@ -8159,7 +8161,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_78[0x4]; u8 reserved_at_78[0x4];
u8 cap_pin_4_mode[0x4]; u8 cap_pin_4_mode[0x4];
u8 reserved_at_80[0x80]; u8 field_select[0x20];
u8 reserved_at_a0[0x60];
u8 enable[0x1]; u8 enable[0x1];
u8 reserved_at_101[0xb]; u8 reserved_at_101[0xb];
@ -8174,8 +8177,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 out_pulse_duration[0x10]; u8 out_pulse_duration[0x10];
u8 out_periodic_adjustment[0x10]; u8 out_periodic_adjustment[0x10];
u8 enhanced_out_periodic_adjustment[0x20];
u8 reserved_at_1a0[0x60]; u8 reserved_at_1c0[0x20];
}; };
struct mlx5_ifc_mtppse_reg_bits { struct mlx5_ifc_mtppse_reg_bits {

View file

@ -830,7 +830,7 @@ static inline int phy_read_status(struct phy_device *phydev)
dev_err(&_phydev->mdio.dev, format, ##args) dev_err(&_phydev->mdio.dev, format, ##args)
#define phydev_dbg(_phydev, format, args...) \ #define phydev_dbg(_phydev, format, args...) \
dev_dbg(&_phydev->mdio.dev, format, ##args); dev_dbg(&_phydev->mdio.dev, format, ##args)
static inline const char *phydev_name(const struct phy_device *phydev) static inline const char *phydev_name(const struct phy_device *phydev)
{ {

View file

@ -469,7 +469,7 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\ #define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\ for (pos.v = chunk->member;\
(pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <\ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
(void *)chunk + end) &&\ (void *)chunk + end) &&\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\ ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\
@ -481,7 +481,7 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
#define _sctp_walk_errors(err, chunk_hdr, end)\ #define _sctp_walk_errors(err, chunk_hdr, end)\
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(struct sctp_chunkhdr));\ sizeof(struct sctp_chunkhdr));\
((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <\ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
(void *)chunk_hdr + end) &&\ (void *)chunk_hdr + end) &&\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \ ntohs(err->length) >= sizeof(sctp_errhdr_t); \

View file

@ -260,6 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
} }
void udp_v4_early_demux(struct sk_buff *skb); void udp_v4_early_demux(struct sk_buff *skb);
void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum, int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *, int (*saddr_cmp)(const struct sock *,
const struct sock *)); const struct sock *));
@ -305,33 +306,44 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
/* UDP uses skb->dev_scratch to cache as much information as possible and avoid /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
* possibly multiple cache miss on dequeue() * possibly multiple cache miss on dequeue()
*/ */
#if BITS_PER_LONG == 64
/* truesize, len and the bit needed to compute skb_csum_unnecessary will be on
* cold cache lines at recvmsg time.
* skb->len can be stored on 16 bits since the udp header has been already
* validated and pulled.
*/
struct udp_dev_scratch { struct udp_dev_scratch {
u32 truesize; /* skb->truesize and the stateless bit are embedded in a single field;
* do not use a bitfield since the compiler emits better/smaller code
* this way
*/
u32 _tsize_state;
#if BITS_PER_LONG == 64
/* len and the bit needed to compute skb_csum_unnecessary
* will be on cold cache lines at recvmsg time.
* skb->len can be stored on 16 bits since the udp header has been
* already validated and pulled.
*/
u16 len; u16 len;
bool is_linear; bool is_linear;
bool csum_unnecessary; bool csum_unnecessary;
#endif
}; };
static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
{
return (struct udp_dev_scratch *)&skb->dev_scratch;
}
#if BITS_PER_LONG == 64
static inline unsigned int udp_skb_len(struct sk_buff *skb) static inline unsigned int udp_skb_len(struct sk_buff *skb)
{ {
return ((struct udp_dev_scratch *)&skb->dev_scratch)->len; return udp_skb_scratch(skb)->len;
} }
static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb) static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{ {
return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary; return udp_skb_scratch(skb)->csum_unnecessary;
} }
static inline bool udp_skb_is_linear(struct sk_buff *skb) static inline bool udp_skb_is_linear(struct sk_buff *skb)
{ {
return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear; return udp_skb_scratch(skb)->is_linear;
} }
#else #else

View file

@ -1289,7 +1289,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
info_len = min_t(u32, sizeof(info), info_len); info_len = min_t(u32, sizeof(info), info_len);
if (copy_from_user(&info, uinfo, info_len)) if (copy_from_user(&info, uinfo, info_len))
return err; return -EFAULT;
info.type = prog->type; info.type = prog->type;
info.id = prog->aux->id; info.id = prog->aux->id;
@ -1312,7 +1312,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
} }
ulen = info.xlated_prog_len; ulen = info.xlated_prog_len;
info.xlated_prog_len = bpf_prog_size(prog->len); info.xlated_prog_len = bpf_prog_insn_size(prog);
if (info.xlated_prog_len && ulen) { if (info.xlated_prog_len && ulen) {
uinsns = u64_to_user_ptr(info.xlated_prog_insns); uinsns = u64_to_user_ptr(info.xlated_prog_insns);
ulen = min_t(u32, info.xlated_prog_len, ulen); ulen = min_t(u32, info.xlated_prog_len, ulen);

View file

@ -1865,10 +1865,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
* do our normal operations to the register, we need to set the values * do our normal operations to the register, we need to set the values
* to the min/max since they are undefined. * to the min/max since they are undefined.
*/ */
if (opcode != BPF_SUB) {
if (min_val == BPF_REGISTER_MIN_RANGE) if (min_val == BPF_REGISTER_MIN_RANGE)
dst_reg->min_value = BPF_REGISTER_MIN_RANGE; dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
if (max_val == BPF_REGISTER_MAX_RANGE) if (max_val == BPF_REGISTER_MAX_RANGE)
dst_reg->max_value = BPF_REGISTER_MAX_RANGE; dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
}
switch (opcode) { switch (opcode) {
case BPF_ADD: case BPF_ADD:
@ -1879,10 +1881,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
dst_reg->min_align = min(src_align, dst_align); dst_reg->min_align = min(src_align, dst_align);
break; break;
case BPF_SUB: case BPF_SUB:
/* If one of our values was at the end of our ranges, then the
* _opposite_ value in the dst_reg goes to the end of our range.
*/
if (min_val == BPF_REGISTER_MIN_RANGE)
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
if (max_val == BPF_REGISTER_MAX_RANGE)
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value -= min_val; dst_reg->min_value -= max_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value -= max_val; dst_reg->max_value -= min_val;
dst_reg->min_align = min(src_align, dst_align); dst_reg->min_align = min(src_align, dst_align);
break; break;
case BPF_MUL: case BPF_MUL:

View file

@ -56,8 +56,13 @@ static bool enomem_retry = false;
module_param(enomem_retry, bool, 0); module_param(enomem_retry, bool, 0);
MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)"); MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
struct test_obj_val {
int id;
int tid;
};
struct test_obj { struct test_obj {
int value; struct test_obj_val value;
struct rhash_head node; struct rhash_head node;
}; };
@ -72,7 +77,7 @@ static struct test_obj array[MAX_ENTRIES];
static struct rhashtable_params test_rht_params = { static struct rhashtable_params test_rht_params = {
.head_offset = offsetof(struct test_obj, node), .head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value), .key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int), .key_len = sizeof(struct test_obj_val),
.hashfn = jhash, .hashfn = jhash,
.nulls_base = (3U << RHT_BASE_SHIFT), .nulls_base = (3U << RHT_BASE_SHIFT),
}; };
@ -109,24 +114,26 @@ static int __init test_rht_lookup(struct rhashtable *ht)
for (i = 0; i < entries * 2; i++) { for (i = 0; i < entries * 2; i++) {
struct test_obj *obj; struct test_obj *obj;
bool expected = !(i % 2); bool expected = !(i % 2);
u32 key = i; struct test_obj_val key = {
.id = i,
};
if (array[i / 2].value == TEST_INSERT_FAIL) if (array[i / 2].value.id == TEST_INSERT_FAIL)
expected = false; expected = false;
obj = rhashtable_lookup_fast(ht, &key, test_rht_params); obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
if (expected && !obj) { if (expected && !obj) {
pr_warn("Test failed: Could not find key %u\n", key); pr_warn("Test failed: Could not find key %u\n", key.id);
return -ENOENT; return -ENOENT;
} else if (!expected && obj) { } else if (!expected && obj) {
pr_warn("Test failed: Unexpected entry found for key %u\n", pr_warn("Test failed: Unexpected entry found for key %u\n",
key); key.id);
return -EEXIST; return -EEXIST;
} else if (expected && obj) { } else if (expected && obj) {
if (obj->value != i) { if (obj->value.id != i) {
pr_warn("Test failed: Lookup value mismatch %u!=%u\n", pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
obj->value, i); obj->value.id, i);
return -EINVAL; return -EINVAL;
} }
} }
@ -195,7 +202,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
for (i = 0; i < entries; i++) { for (i = 0; i < entries; i++) {
struct test_obj *obj = &array[i]; struct test_obj *obj = &array[i];
obj->value = i * 2; obj->value.id = i * 2;
err = insert_retry(ht, &obj->node, test_rht_params); err = insert_retry(ht, &obj->node, test_rht_params);
if (err > 0) if (err > 0)
insert_retries += err; insert_retries += err;
@ -216,9 +223,11 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
pr_info(" Deleting %d keys\n", entries); pr_info(" Deleting %d keys\n", entries);
for (i = 0; i < entries; i++) { for (i = 0; i < entries; i++) {
u32 key = i * 2; struct test_obj_val key = {
.id = i * 2,
};
if (array[i].value != TEST_INSERT_FAIL) { if (array[i].value.id != TEST_INSERT_FAIL) {
obj = rhashtable_lookup_fast(ht, &key, test_rht_params); obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
BUG_ON(!obj); BUG_ON(!obj);
@ -242,18 +251,21 @@ static int thread_lookup_test(struct thread_data *tdata)
for (i = 0; i < entries; i++) { for (i = 0; i < entries; i++) {
struct test_obj *obj; struct test_obj *obj;
int key = (tdata->id << 16) | i; struct test_obj_val key = {
.id = i,
.tid = tdata->id,
};
obj = rhashtable_lookup_fast(&ht, &key, test_rht_params); obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) { if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) {
pr_err(" found unexpected object %d\n", key); pr_err(" found unexpected object %d-%d\n", key.tid, key.id);
err++; err++;
} else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) { } else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) {
pr_err(" object %d not found!\n", key); pr_err(" object %d-%d not found!\n", key.tid, key.id);
err++; err++;
} else if (obj && (obj->value != key)) { } else if (obj && memcmp(&obj->value, &key, sizeof(key))) {
pr_err(" wrong object returned (got %d, expected %d)\n", pr_err(" wrong object returned (got %d-%d, expected %d-%d)\n",
obj->value, key); obj->value.tid, obj->value.id, key.tid, key.id);
err++; err++;
} }
@ -272,7 +284,8 @@ static int threadfunc(void *data)
pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); pr_err(" thread[%d]: down_interruptible failed\n", tdata->id);
for (i = 0; i < entries; i++) { for (i = 0; i < entries; i++) {
tdata->objs[i].value = (tdata->id << 16) | i; tdata->objs[i].value.id = i;
tdata->objs[i].value.tid = tdata->id;
err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params); err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params);
if (err > 0) { if (err > 0) {
insert_retries += err; insert_retries += err;
@ -295,7 +308,7 @@ static int threadfunc(void *data)
for (step = 10; step > 0; step--) { for (step = 10; step > 0; step--) {
for (i = 0; i < entries; i += step) { for (i = 0; i < entries; i += step) {
if (tdata->objs[i].value == TEST_INSERT_FAIL) if (tdata->objs[i].value.id == TEST_INSERT_FAIL)
continue; continue;
err = rhashtable_remove_fast(&ht, &tdata->objs[i].node, err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
test_rht_params); test_rht_params);
@ -304,7 +317,7 @@ static int threadfunc(void *data)
tdata->id); tdata->id);
goto out; goto out;
} }
tdata->objs[i].value = TEST_INSERT_FAIL; tdata->objs[i].value.id = TEST_INSERT_FAIL;
cond_resched(); cond_resched();
} }

View file

@ -263,6 +263,8 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return dev_set_mtu(dev, ifr->ifr_mtu); return dev_set_mtu(dev, ifr->ifr_mtu);
case SIOCSIFHWADDR: case SIOCSIFHWADDR:
if (dev->addr_len > sizeof(struct sockaddr))
return -EINVAL;
return dev_set_mac_address(dev, &ifr->ifr_hwaddr); return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
case SIOCSIFHWBROADCAST: case SIOCSIFHWBROADCAST:

View file

@ -666,7 +666,7 @@ int netpoll_setup(struct netpoll *np)
int err; int err;
rtnl_lock(); rtnl_lock();
if (np->dev_name) { if (np->dev_name[0]) {
struct net *net = current->nsproxy->net_ns; struct net *net = current->nsproxy->net_ns;
ndev = __dev_get_by_name(net, np->dev_name); ndev = __dev_get_by_name(net, np->dev_name);
} }

View file

@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
* singleton values (which always leads to failure). * singleton values (which always leads to failure).
* These settings can still (later) be overridden via sockopts. * These settings can still (later) be overridden via sockopts.
*/ */
if (ccid_get_builtin_ccids(&tx.val, &tx.len) || if (ccid_get_builtin_ccids(&tx.val, &tx.len))
ccid_get_builtin_ccids(&rx.val, &rx.len))
return -ENOBUFS; return -ENOBUFS;
if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
kfree(tx.val);
return -ENOBUFS;
}
if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
!dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))

View file

@ -631,6 +631,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free; goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
reqsk_put(req);
return 0; return 0;
drop_and_free: drop_and_free:

View file

@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free; goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
reqsk_put(req);
return 0; return 0;
drop_and_free: drop_and_free:

View file

@ -509,21 +509,22 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
dst->cpu_dp->netdev = ethernet_dev; dst->cpu_dp->netdev = ethernet_dev;
} }
tag_protocol = ds->ops->get_tag_protocol(ds);
dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
if (IS_ERR(dst->tag_ops)) {
dev_warn(ds->dev, "No tagger for this switch\n");
return PTR_ERR(dst->tag_ops);
}
dst->rcv = dst->tag_ops->rcv;
/* Initialize cpu_port_mask now for drv->setup() /* Initialize cpu_port_mask now for drv->setup()
* to have access to a correct value, just like what * to have access to a correct value, just like what
* net/dsa/dsa.c::dsa_switch_setup_one does. * net/dsa/dsa.c::dsa_switch_setup_one does.
*/ */
ds->cpu_port_mask |= BIT(index); ds->cpu_port_mask |= BIT(index);
tag_protocol = ds->ops->get_tag_protocol(ds);
dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
if (IS_ERR(dst->tag_ops)) {
dev_warn(ds->dev, "No tagger for this switch\n");
ds->cpu_port_mask &= ~BIT(index);
return PTR_ERR(dst->tag_ops);
}
dst->rcv = dst->tag_ops->rcv;
return 0; return 0;
} }

View file

@ -1452,7 +1452,7 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type, return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
&info.info); &info.info);
case FIB_EVENT_NH_DEL: case FIB_EVENT_NH_DEL:
if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
fib_nh->nh_flags & RTNH_F_LINKDOWN) || fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
(fib_nh->nh_flags & RTNH_F_DEAD)) (fib_nh->nh_flags & RTNH_F_DEAD))
return call_fib_notifiers(dev_net(fib_nh->nh_dev), return call_fib_notifiers(dev_net(fib_nh->nh_dev),

View file

@ -2202,9 +2202,10 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
{ {
const u32 now = tcp_jiffies32; const u32 now = tcp_jiffies32;
enum tcp_chrono old = tp->chrono_type;
if (tp->chrono_type > TCP_CHRONO_UNSPEC) if (old > TCP_CHRONO_UNSPEC)
tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start; tp->chrono_stat[old - 1] += now - tp->chrono_start;
tp->chrono_start = now; tp->chrono_start = now;
tp->chrono_type = new; tp->chrono_type = new;
} }

View file

@ -1163,34 +1163,32 @@ out:
return ret; return ret;
} }
#if BITS_PER_LONG == 64 #define UDP_SKB_IS_STATELESS 0x80000000
static void udp_set_dev_scratch(struct sk_buff *skb) static void udp_set_dev_scratch(struct sk_buff *skb)
{ {
struct udp_dev_scratch *scratch; struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
scratch = (struct udp_dev_scratch *)&skb->dev_scratch; scratch->_tsize_state = skb->truesize;
scratch->truesize = skb->truesize; #if BITS_PER_LONG == 64
scratch->len = skb->len; scratch->len = skb->len;
scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
scratch->is_linear = !skb_is_nonlinear(skb); scratch->is_linear = !skb_is_nonlinear(skb);
}
static int udp_skb_truesize(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize;
}
#else
static void udp_set_dev_scratch(struct sk_buff *skb)
{
skb->dev_scratch = skb->truesize;
}
static int udp_skb_truesize(struct sk_buff *skb)
{
return skb->dev_scratch;
}
#endif #endif
if (likely(!skb->_skb_refdst))
scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
}
static int udp_skb_truesize(struct sk_buff *skb)
{
return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
}
static bool udp_skb_has_head_state(struct sk_buff *skb)
{
return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
}
/* fully reclaim rmem/fwd memory allocated for skb */ /* fully reclaim rmem/fwd memory allocated for skb */
static void udp_rmem_release(struct sock *sk, int size, int partial, static void udp_rmem_release(struct sock *sk, int size, int partial,
@ -1388,10 +1386,10 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
unlock_sock_fast(sk, slow); unlock_sock_fast(sk, slow);
} }
/* we cleared the head states previously only if the skb lacks any IP /* In the more common cases we cleared the head states previously,
* options, see __udp_queue_rcv_skb(). * see __udp_queue_rcv_skb().
*/ */
if (unlikely(IPCB(skb)->opt.optlen > 0)) if (unlikely(udp_skb_has_head_state(skb)))
skb_release_head_state(skb); skb_release_head_state(skb);
consume_stateless_skb(skb); consume_stateless_skb(skb);
} }
@ -1784,11 +1782,11 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk_mark_napi_id_once(sk, skb); sk_mark_napi_id_once(sk, skb);
} }
/* At recvmsg() time we need skb->dst to process IP options-related /* At recvmsg() time we may access skb->dst or skb->sp depending on
* cmsg, elsewhere can we clear all pending head states while they are * the IP options and the cmsg flags, elsewhere can we clear all
* hot in the cache * pending head states while they are hot in the cache
*/ */
if (likely(IPCB(skb)->opt.optlen == 0)) if (likely(IPCB(skb)->opt.optlen == 0 && !skb_sec_path(skb)))
skb_release_head_state(skb); skb_release_head_state(skb);
rc = __udp_enqueue_schedule_skb(sk, skb); rc = __udp_enqueue_schedule_skb(sk, skb);
@ -1930,7 +1928,7 @@ drop:
/* For TCP sockets, sk_rx_dst is protected by socket lock /* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes. * For UDP, we use xchg() to guard against concurrent changes.
*/ */
static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{ {
struct dst_entry *old; struct dst_entry *old;
@ -1939,6 +1937,7 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
dst_release(old); dst_release(old);
} }
} }
EXPORT_SYMBOL(udp_sk_rx_dst_set);
/* /*
* Multicasts and broadcasts go to each listener. * Multicasts and broadcasts go to each listener.

View file

@ -756,6 +756,7 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
goto drop; goto drop;
IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
return true; return true;
drop: drop:

View file

@ -673,8 +673,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
*prevhdr = NEXTHDR_FRAGMENT; *prevhdr = NEXTHDR_FRAGMENT;
tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
if (!tmp_hdr) { if (!tmp_hdr) {
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
err = -ENOMEM; err = -ENOMEM;
goto fail; goto fail;
} }
@ -789,8 +787,6 @@ slow_path:
frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
hroom + troom, GFP_ATOMIC); hroom + troom, GFP_ATOMIC);
if (!frag) { if (!frag) {
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
err = -ENOMEM; err = -ENOMEM;
goto fail; goto fail;
} }

View file

@ -291,11 +291,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
struct udp_table *udptable) struct udp_table *udptable)
{ {
const struct ipv6hdr *iph = ipv6_hdr(skb); const struct ipv6hdr *iph = ipv6_hdr(skb);
struct sock *sk;
sk = skb_steal_sock(skb);
if (unlikely(sk))
return sk;
return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb), &iph->daddr, dport, inet6_iif(skb),
udptable, skb); udptable, skb);
@ -332,6 +328,15 @@ struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be
EXPORT_SYMBOL_GPL(udp6_lib_lookup); EXPORT_SYMBOL_GPL(udp6_lib_lookup);
#endif #endif
/* do not use the scratch area len for jumbogram: their length execeeds the
* scratch area space; note that the IP6CB flags is still in the first
* cacheline, so checking for jumbograms is cheap
*/
static int udp6_skb_len(struct sk_buff *skb)
{
return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
}
/* /*
* This should be easy, if there is something there we * This should be easy, if there is something there we
* return it, otherwise we block. * return it, otherwise we block.
@ -362,7 +367,7 @@ try_again:
if (!skb) if (!skb)
return err; return err;
ulen = udp_skb_len(skb); ulen = udp6_skb_len(skb);
copied = len; copied = len;
if (copied > ulen - off) if (copied > ulen - off)
copied = ulen - off; copied = ulen - off;
@ -804,6 +809,24 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp6_csum_init(skb, uh, proto)) if (udp6_csum_init(skb, uh, proto))
goto csum_error; goto csum_error;
/* Check if the socket is already available, e.g. due to early demux */
sk = skb_steal_sock(skb);
if (sk) {
struct dst_entry *dst = skb_dst(skb);
int ret;
if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input */
if (ret > 0)
return ret;
return 0;
}
/* /*
* Multicast receive code * Multicast receive code
*/ */
@ -812,11 +835,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
saddr, daddr, udptable, proto); saddr, daddr, udptable, proto);
/* Unicast */ /* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) { if (sk) {
int ret; int ret;

View file

@ -1310,8 +1310,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
nla_for_each_nested(a, attr, rem) { nla_for_each_nested(a, attr, rem) {
int type = nla_type(a); int type = nla_type(a);
int maxlen = ovs_ct_attr_lens[type].maxlen; int maxlen;
int minlen = ovs_ct_attr_lens[type].minlen; int minlen;
if (type > OVS_CT_ATTR_MAX) { if (type > OVS_CT_ATTR_MAX) {
OVS_NLERR(log, OVS_NLERR(log,
@ -1319,6 +1319,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
type, OVS_CT_ATTR_MAX); type, OVS_CT_ATTR_MAX);
return -EINVAL; return -EINVAL;
} }
maxlen = ovs_ct_attr_lens[type].maxlen;
minlen = ovs_ct_attr_lens[type].minlen;
if (nla_len(a) < minlen || nla_len(a) > maxlen) { if (nla_len(a) < minlen || nla_len(a) > maxlen) {
OVS_NLERR(log, OVS_NLERR(log,
"Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",

View file

@ -4329,7 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
register_prot_hook(sk); register_prot_hook(sk);
} }
spin_unlock(&po->bind_lock); spin_unlock(&po->bind_lock);
if (closing && (po->tp_version > TPACKET_V2)) { if (pg_vec && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */ /* Because we don't support block-based V3 on tx-ring */
if (!tx_ring) if (!tx_ring)
prb_shutdown_retire_blk_timer(po, rb_queue); prb_shutdown_retire_blk_timer(po, rb_queue);

View file

@ -1916,7 +1916,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
if (copy_from_user(&msg, umsg, sizeof(*umsg))) if (copy_from_user(&msg, umsg, sizeof(*umsg)))
return -EFAULT; return -EFAULT;
kmsg->msg_control = msg.msg_control; kmsg->msg_control = (void __force *)msg.msg_control;
kmsg->msg_controllen = msg.msg_controllen; kmsg->msg_controllen = msg.msg_controllen;
kmsg->msg_flags = msg.msg_flags; kmsg->msg_flags = msg.msg_flags;
@ -1935,7 +1935,8 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
if (msg.msg_name && kmsg->msg_namelen) { if (msg.msg_name && kmsg->msg_namelen) {
if (!save_addr) { if (!save_addr) {
err = move_addr_to_kernel(msg.msg_name, kmsg->msg_namelen, err = move_addr_to_kernel(msg.msg_name,
kmsg->msg_namelen,
kmsg->msg_name); kmsg->msg_name);
if (err < 0) if (err < 0)
return err; return err;

View file

@ -147,9 +147,9 @@ int _geneve_set_tunnel(struct __sk_buff *skb)
__builtin_memset(&gopt, 0x0, sizeof(gopt)); __builtin_memset(&gopt, 0x0, sizeof(gopt));
gopt.opt_class = 0x102; /* Open Virtual Networking (OVN) */ gopt.opt_class = 0x102; /* Open Virtual Networking (OVN) */
gopt.type = 0x08; gopt.type = 0x08;
gopt.r1 = 1; gopt.r1 = 0;
gopt.r2 = 0; gopt.r2 = 0;
gopt.r3 = 1; gopt.r3 = 0;
gopt.length = 2; /* 4-byte multiple */ gopt.length = 2; /* 4-byte multiple */
*(int *) &gopt.opt_data = 0xdeadbeef; *(int *) &gopt.opt_data = 0xdeadbeef;

View file

@ -149,6 +149,7 @@ function cleanup {
ip link del veth1 ip link del veth1
ip link del ipip11 ip link del ipip11
ip link del gretap11 ip link del gretap11
ip link del vxlan11
ip link del geneve11 ip link del geneve11
pkill tcpdump pkill tcpdump
pkill cat pkill cat

View file

@ -314,7 +314,6 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
int err; int err;
bzero(&attr, sizeof(attr)); bzero(&attr, sizeof(attr));
bzero(info, *info_len);
attr.info.bpf_fd = prog_fd; attr.info.bpf_fd = prog_fd;
attr.info.info_len = *info_len; attr.info.info_len = *info_len;
attr.info.info = ptr_to_u64(info); attr.info.info = ptr_to_u64(info);

View file

@ -340,6 +340,7 @@ static void test_bpf_obj_id(void)
/* Check getting prog info */ /* Check getting prog info */
info_len = sizeof(struct bpf_prog_info) * 2; info_len = sizeof(struct bpf_prog_info) * 2;
bzero(&prog_infos[i], info_len);
prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
prog_infos[i].jited_prog_len = sizeof(jited_insns); prog_infos[i].jited_prog_len = sizeof(jited_insns);
prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
@ -369,6 +370,7 @@ static void test_bpf_obj_id(void)
/* Check getting map info */ /* Check getting map info */
info_len = sizeof(struct bpf_map_info) * 2; info_len = sizeof(struct bpf_map_info) * 2;
bzero(&map_infos[i], info_len);
err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
&info_len); &info_len);
if (CHECK(err || if (CHECK(err ||
@ -394,7 +396,7 @@ static void test_bpf_obj_id(void)
nr_id_found = 0; nr_id_found = 0;
next_id = 0; next_id = 0;
while (!bpf_prog_get_next_id(next_id, &next_id)) { while (!bpf_prog_get_next_id(next_id, &next_id)) {
struct bpf_prog_info prog_info; struct bpf_prog_info prog_info = {};
int prog_fd; int prog_fd;
info_len = sizeof(prog_info); info_len = sizeof(prog_info);
@ -418,6 +420,8 @@ static void test_bpf_obj_id(void)
nr_id_found++; nr_id_found++;
err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
prog_infos[i].jited_prog_insns = 0;
prog_infos[i].xlated_prog_insns = 0;
CHECK(err || info_len != sizeof(struct bpf_prog_info) || CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
memcmp(&prog_info, &prog_infos[i], info_len), memcmp(&prog_info, &prog_infos[i], info_len),
"get-prog-info(next_id->fd)", "get-prog-info(next_id->fd)",
@ -436,7 +440,7 @@ static void test_bpf_obj_id(void)
nr_id_found = 0; nr_id_found = 0;
next_id = 0; next_id = 0;
while (!bpf_map_get_next_id(next_id, &next_id)) { while (!bpf_map_get_next_id(next_id, &next_id)) {
struct bpf_map_info map_info; struct bpf_map_info map_info = {};
int map_fd; int map_fd;
info_len = sizeof(map_info); info_len = sizeof(map_info);

View file

@ -5980,6 +5980,34 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
.result_unpriv = REJECT, .result_unpriv = REJECT,
}, },
{
"subtraction bounds (map value)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
.result_unpriv = REJECT,
},
}; };
static int probe_filter_length(const struct bpf_insn *fp) static int probe_filter_length(const struct bpf_insn *fp)