mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: wimax/i2400m: fix missing endian correction read in fw loader net8139: fix a race at the end of NAPI pktgen: Fix accuracy of inter-packet delay. pkt_sched: gen_estimator: add a new lock net: deliver skbs on inactive slaves to exact matches ipv6: fix ICMP6_MIB_OUTERRORS r8169: fix mdio_read and update mdio_write according to hw specs gianfar: Revive the driver for eTSEC devices (disable timestamping) caif: fix a couple range checks phylib: Add support for the LXT973 phy. net: Print num_rx_queues imbalance warning only when there are allocated queues
This commit is contained in:
commit
4cea8706c3
14 changed files with 103 additions and 28 deletions
|
@ -598,8 +598,8 @@ rx_next:
|
||||||
goto rx_status_loop;
|
goto rx_status_loop;
|
||||||
|
|
||||||
spin_lock_irqsave(&cp->lock, flags);
|
spin_lock_irqsave(&cp->lock, flags);
|
||||||
cpw16_f(IntrMask, cp_intr_mask);
|
|
||||||
__napi_complete(napi);
|
__napi_complete(napi);
|
||||||
|
cpw16_f(IntrMask, cp_intr_mask);
|
||||||
spin_unlock_irqrestore(&cp->lock, flags);
|
spin_unlock_irqrestore(&cp->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2089,8 +2089,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
|
||||||
* again when we think we are done.
|
* again when we think we are done.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&tp->lock, flags);
|
spin_lock_irqsave(&tp->lock, flags);
|
||||||
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
|
||||||
__napi_complete(napi);
|
__napi_complete(napi);
|
||||||
|
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
||||||
spin_unlock_irqrestore(&tp->lock, flags);
|
spin_unlock_irqrestore(&tp->lock, flags);
|
||||||
}
|
}
|
||||||
spin_unlock(&tp->rx_lock);
|
spin_unlock(&tp->rx_lock);
|
||||||
|
|
|
@ -747,8 +747,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
|
||||||
FSL_GIANFAR_DEV_HAS_CSUM |
|
FSL_GIANFAR_DEV_HAS_CSUM |
|
||||||
FSL_GIANFAR_DEV_HAS_VLAN |
|
FSL_GIANFAR_DEV_HAS_VLAN |
|
||||||
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
|
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
|
||||||
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
|
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
|
||||||
FSL_GIANFAR_DEV_HAS_TIMER;
|
|
||||||
|
|
||||||
ctype = of_get_property(np, "phy-connection-type", NULL);
|
ctype = of_get_property(np, "phy-connection-type", NULL);
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,9 @@
|
||||||
|
|
||||||
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
|
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
|
||||||
|
|
||||||
|
/* register definitions for the 973 */
|
||||||
|
#define MII_LXT973_PCR 16 /* Port Configuration Register */
|
||||||
|
#define PCR_FIBER_SELECT 1
|
||||||
|
|
||||||
MODULE_DESCRIPTION("Intel LXT PHY driver");
|
MODULE_DESCRIPTION("Intel LXT PHY driver");
|
||||||
MODULE_AUTHOR("Andy Fleming");
|
MODULE_AUTHOR("Andy Fleming");
|
||||||
|
@ -119,6 +122,33 @@ static int lxt971_config_intr(struct phy_device *phydev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int lxt973_probe(struct phy_device *phydev)
|
||||||
|
{
|
||||||
|
int val = phy_read(phydev, MII_LXT973_PCR);
|
||||||
|
|
||||||
|
if (val & PCR_FIBER_SELECT) {
|
||||||
|
/*
|
||||||
|
* If fiber is selected, then the only correct setting
|
||||||
|
* is 100Mbps, full duplex, and auto negotiation off.
|
||||||
|
*/
|
||||||
|
val = phy_read(phydev, MII_BMCR);
|
||||||
|
val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
|
||||||
|
val &= ~BMCR_ANENABLE;
|
||||||
|
phy_write(phydev, MII_BMCR, val);
|
||||||
|
/* Remember that the port is in fiber mode. */
|
||||||
|
phydev->priv = lxt973_probe;
|
||||||
|
} else {
|
||||||
|
phydev->priv = NULL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int lxt973_config_aneg(struct phy_device *phydev)
|
||||||
|
{
|
||||||
|
/* Do nothing if port is in fiber mode. */
|
||||||
|
return phydev->priv ? 0 : genphy_config_aneg(phydev);
|
||||||
|
}
|
||||||
|
|
||||||
static struct phy_driver lxt970_driver = {
|
static struct phy_driver lxt970_driver = {
|
||||||
.phy_id = 0x78100000,
|
.phy_id = 0x78100000,
|
||||||
.name = "LXT970",
|
.name = "LXT970",
|
||||||
|
@ -146,6 +176,18 @@ static struct phy_driver lxt971_driver = {
|
||||||
.driver = { .owner = THIS_MODULE,},
|
.driver = { .owner = THIS_MODULE,},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct phy_driver lxt973_driver = {
|
||||||
|
.phy_id = 0x00137a10,
|
||||||
|
.name = "LXT973",
|
||||||
|
.phy_id_mask = 0xfffffff0,
|
||||||
|
.features = PHY_BASIC_FEATURES,
|
||||||
|
.flags = 0,
|
||||||
|
.probe = lxt973_probe,
|
||||||
|
.config_aneg = lxt973_config_aneg,
|
||||||
|
.read_status = genphy_read_status,
|
||||||
|
.driver = { .owner = THIS_MODULE,},
|
||||||
|
};
|
||||||
|
|
||||||
static int __init lxt_init(void)
|
static int __init lxt_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -157,8 +199,14 @@ static int __init lxt_init(void)
|
||||||
ret = phy_driver_register(&lxt971_driver);
|
ret = phy_driver_register(&lxt971_driver);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err2;
|
goto err2;
|
||||||
|
|
||||||
|
ret = phy_driver_register(&lxt973_driver);
|
||||||
|
if (ret)
|
||||||
|
goto err3;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err3:
|
||||||
|
phy_driver_unregister(&lxt971_driver);
|
||||||
err2:
|
err2:
|
||||||
phy_driver_unregister(&lxt970_driver);
|
phy_driver_unregister(&lxt970_driver);
|
||||||
err1:
|
err1:
|
||||||
|
@ -169,6 +217,7 @@ static void __exit lxt_exit(void)
|
||||||
{
|
{
|
||||||
phy_driver_unregister(&lxt970_driver);
|
phy_driver_unregister(&lxt970_driver);
|
||||||
phy_driver_unregister(&lxt971_driver);
|
phy_driver_unregister(&lxt971_driver);
|
||||||
|
phy_driver_unregister(&lxt973_driver);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(lxt_init);
|
module_init(lxt_init);
|
||||||
|
|
|
@ -560,10 +560,10 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
|
||||||
udelay(25);
|
udelay(25);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Some configurations require a small delay even after the write
|
* According to hardware specs a 20us delay is required after write
|
||||||
* completed indication or the next write might fail.
|
* complete indication, but before sending next command.
|
||||||
*/
|
*/
|
||||||
udelay(25);
|
udelay(20);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mdio_read(void __iomem *ioaddr, int reg_addr)
|
static int mdio_read(void __iomem *ioaddr, int reg_addr)
|
||||||
|
@ -583,6 +583,12 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
|
||||||
}
|
}
|
||||||
udelay(25);
|
udelay(25);
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* According to hardware specs a 20us delay is required after read
|
||||||
|
* complete indication, but before sending next command.
|
||||||
|
*/
|
||||||
|
udelay(20);
|
||||||
|
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1192,7 +1192,7 @@ int i2400m_fw_hdr_check(struct i2400m *i2400m,
|
||||||
unsigned module_type, header_len, major_version, minor_version,
|
unsigned module_type, header_len, major_version, minor_version,
|
||||||
module_id, module_vendor, date, size;
|
module_id, module_vendor, date, size;
|
||||||
|
|
||||||
module_type = bcf_hdr->module_type;
|
module_type = le32_to_cpu(bcf_hdr->module_type);
|
||||||
header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
|
header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
|
||||||
major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
|
major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
|
||||||
>> 16;
|
>> 16;
|
||||||
|
|
|
@ -380,7 +380,10 @@ struct sk_buff {
|
||||||
kmemcheck_bitfield_begin(flags2);
|
kmemcheck_bitfield_begin(flags2);
|
||||||
__u16 queue_mapping:16;
|
__u16 queue_mapping:16;
|
||||||
#ifdef CONFIG_IPV6_NDISC_NODETYPE
|
#ifdef CONFIG_IPV6_NDISC_NODETYPE
|
||||||
__u8 ndisc_nodetype:2;
|
__u8 ndisc_nodetype:2,
|
||||||
|
deliver_no_wcard:1;
|
||||||
|
#else
|
||||||
|
__u8 deliver_no_wcard:1;
|
||||||
#endif
|
#endif
|
||||||
kmemcheck_bitfield_end(flags2);
|
kmemcheck_bitfield_end(flags2);
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
|
||||||
return NET_RX_DROP;
|
return NET_RX_DROP;
|
||||||
|
|
||||||
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
|
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
|
||||||
goto drop;
|
skb->deliver_no_wcard = 1;
|
||||||
|
|
||||||
skb->skb_iif = skb->dev->ifindex;
|
skb->skb_iif = skb->dev->ifindex;
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||||
|
@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
|
||||||
struct sk_buff *p;
|
struct sk_buff *p;
|
||||||
|
|
||||||
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
|
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
|
||||||
goto drop;
|
skb->deliver_no_wcard = 1;
|
||||||
|
|
||||||
skb->skb_iif = skb->dev->ifindex;
|
skb->skb_iif = skb->dev->ifindex;
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||||
|
|
|
@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
|
||||||
if (!cfsrvl_ready(service, &ret))
|
if (!cfsrvl_ready(service, &ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
|
if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
|
||||||
pr_err("CAIF: %s():Packet too large - size=%d\n",
|
pr_err("CAIF: %s():Packet too large - size=%d\n",
|
||||||
__func__, cfpkt_getlen(pkt));
|
__func__, cfpkt_getlen(pkt));
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
|
@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
|
||||||
return ret;
|
return ret;
|
||||||
caif_assert(layr->dn != NULL);
|
caif_assert(layr->dn != NULL);
|
||||||
caif_assert(layr->dn->transmit != NULL);
|
caif_assert(layr->dn->transmit != NULL);
|
||||||
if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
|
if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
|
||||||
pr_warning("CAIF: %s(): Packet too large - size=%d\n",
|
pr_warning("CAIF: %s(): Packet too large - size=%d\n",
|
||||||
__func__, cfpkt_getlen(pkt));
|
__func__, cfpkt_getlen(pkt));
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
|
@ -2253,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
||||||
if (skb_rx_queue_recorded(skb)) {
|
if (skb_rx_queue_recorded(skb)) {
|
||||||
u16 index = skb_get_rx_queue(skb);
|
u16 index = skb_get_rx_queue(skb);
|
||||||
if (unlikely(index >= dev->num_rx_queues)) {
|
if (unlikely(index >= dev->num_rx_queues)) {
|
||||||
if (net_ratelimit()) {
|
WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
|
||||||
pr_warning("%s received packet on queue "
|
"on queue %u, but number of RX queues is %u\n",
|
||||||
"%u, but number of RX queues is %u\n",
|
dev->name, index, dev->num_rx_queues);
|
||||||
dev->name, index, dev->num_rx_queues);
|
|
||||||
}
|
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
rxqueue = dev->_rx + index;
|
rxqueue = dev->_rx + index;
|
||||||
|
@ -2812,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
|
||||||
if (!skb->skb_iif)
|
if (!skb->skb_iif)
|
||||||
skb->skb_iif = skb->dev->ifindex;
|
skb->skb_iif = skb->dev->ifindex;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bonding note: skbs received on inactive slaves should only
|
||||||
|
* be delivered to pkt handlers that are exact matches. Also
|
||||||
|
* the deliver_no_wcard flag will be set. If packet handlers
|
||||||
|
* are sensitive to duplicate packets these skbs will need to
|
||||||
|
* be dropped at the handler. The vlan accel path may have
|
||||||
|
* already set the deliver_no_wcard flag.
|
||||||
|
*/
|
||||||
null_or_orig = NULL;
|
null_or_orig = NULL;
|
||||||
orig_dev = skb->dev;
|
orig_dev = skb->dev;
|
||||||
master = ACCESS_ONCE(orig_dev->master);
|
master = ACCESS_ONCE(orig_dev->master);
|
||||||
if (master) {
|
if (skb->deliver_no_wcard)
|
||||||
if (skb_bond_should_drop(skb, master))
|
null_or_orig = orig_dev;
|
||||||
|
else if (master) {
|
||||||
|
if (skb_bond_should_drop(skb, master)) {
|
||||||
|
skb->deliver_no_wcard = 1;
|
||||||
null_or_orig = orig_dev; /* deliver only exact match */
|
null_or_orig = orig_dev; /* deliver only exact match */
|
||||||
else
|
} else
|
||||||
skb->dev = master;
|
skb->dev = master;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
|
||||||
|
|
||||||
/* Protects against soft lockup during large deletion */
|
/* Protects against soft lockup during large deletion */
|
||||||
static struct rb_root est_root = RB_ROOT;
|
static struct rb_root est_root = RB_ROOT;
|
||||||
|
static DEFINE_SPINLOCK(est_tree_lock);
|
||||||
|
|
||||||
static void est_timer(unsigned long arg)
|
static void est_timer(unsigned long arg)
|
||||||
{
|
{
|
||||||
|
@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
|
||||||
*
|
*
|
||||||
* Returns 0 on success or a negative error code.
|
* Returns 0 on success or a negative error code.
|
||||||
*
|
*
|
||||||
* NOTE: Called under rtnl_mutex
|
|
||||||
*/
|
*/
|
||||||
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
struct gnet_stats_rate_est *rate_est,
|
struct gnet_stats_rate_est *rate_est,
|
||||||
|
@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
est->last_packets = bstats->packets;
|
est->last_packets = bstats->packets;
|
||||||
est->avpps = rate_est->pps<<10;
|
est->avpps = rate_est->pps<<10;
|
||||||
|
|
||||||
|
spin_lock(&est_tree_lock);
|
||||||
if (!elist[idx].timer.function) {
|
if (!elist[idx].timer.function) {
|
||||||
INIT_LIST_HEAD(&elist[idx].list);
|
INIT_LIST_HEAD(&elist[idx].list);
|
||||||
setup_timer(&elist[idx].timer, est_timer, idx);
|
setup_timer(&elist[idx].timer, est_timer, idx);
|
||||||
|
@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
|
|
||||||
list_add_rcu(&est->list, &elist[idx].list);
|
list_add_rcu(&est->list, &elist[idx].list);
|
||||||
gen_add_node(est);
|
gen_add_node(est);
|
||||||
|
spin_unlock(&est_tree_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
|
||||||
*
|
*
|
||||||
* Removes the rate estimator specified by &bstats and &rate_est.
|
* Removes the rate estimator specified by &bstats and &rate_est.
|
||||||
*
|
*
|
||||||
* NOTE: Called under rtnl_mutex
|
|
||||||
*/
|
*/
|
||||||
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
|
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
struct gnet_stats_rate_est *rate_est)
|
struct gnet_stats_rate_est *rate_est)
|
||||||
{
|
{
|
||||||
struct gen_estimator *e;
|
struct gen_estimator *e;
|
||||||
|
|
||||||
|
spin_lock(&est_tree_lock);
|
||||||
while ((e = gen_find_node(bstats, rate_est))) {
|
while ((e = gen_find_node(bstats, rate_est))) {
|
||||||
rb_erase(&e->node, &est_root);
|
rb_erase(&e->node, &est_root);
|
||||||
|
|
||||||
|
@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
list_del_rcu(&e->list);
|
list_del_rcu(&e->list);
|
||||||
call_rcu(&e->e_rcu, __gen_kill_estimator);
|
call_rcu(&e->e_rcu, __gen_kill_estimator);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&est_tree_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(gen_kill_estimator);
|
EXPORT_SYMBOL(gen_kill_estimator);
|
||||||
|
|
||||||
|
@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
|
||||||
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
|
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
|
||||||
const struct gnet_stats_rate_est *rate_est)
|
const struct gnet_stats_rate_est *rate_est)
|
||||||
{
|
{
|
||||||
|
bool res;
|
||||||
|
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
return gen_find_node(bstats, rate_est) != NULL;
|
spin_lock(&est_tree_lock);
|
||||||
|
res = gen_find_node(bstats, rate_est) != NULL;
|
||||||
|
spin_unlock(&est_tree_lock);
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(gen_estimator_active);
|
EXPORT_SYMBOL(gen_estimator_active);
|
||||||
|
|
|
@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
||||||
end_time = ktime_now();
|
end_time = ktime_now();
|
||||||
|
|
||||||
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
|
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
|
||||||
pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
|
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
|
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
|
||||||
|
|
|
@ -483,7 +483,7 @@ route_done:
|
||||||
np->tclass, NULL, &fl, (struct rt6_info*)dst,
|
np->tclass, NULL, &fl, (struct rt6_info*)dst,
|
||||||
MSG_DONTWAIT, np->dontfrag);
|
MSG_DONTWAIT, np->dontfrag);
|
||||||
if (err) {
|
if (err) {
|
||||||
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
|
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
|
||||||
ip6_flush_pending_frames(sk);
|
ip6_flush_pending_frames(sk);
|
||||||
goto out_put;
|
goto out_put;
|
||||||
}
|
}
|
||||||
|
@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
|
||||||
np->dontfrag);
|
np->dontfrag);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
|
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
|
||||||
ip6_flush_pending_frames(sk);
|
ip6_flush_pending_frames(sk);
|
||||||
goto out_put;
|
goto out_put;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue