mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-25 08:02:56 +00:00
Merge branch 'bnx2x'
Michal Schmidt says: ==================== bnx2x: minor cleanups related to TPA bits I noticed some simplification possibilities while looking into the bug fixed by "bnx2x: really disable TPA if 'disable_tpa' is set'. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ad93e1d7b9
5 changed files with 33 additions and 53 deletions
|
@ -521,6 +521,7 @@ struct bnx2x_fp_txdata {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum bnx2x_tpa_mode_t {
|
enum bnx2x_tpa_mode_t {
|
||||||
|
TPA_MODE_DISABLED,
|
||||||
TPA_MODE_LRO,
|
TPA_MODE_LRO,
|
||||||
TPA_MODE_GRO
|
TPA_MODE_GRO
|
||||||
};
|
};
|
||||||
|
@ -589,7 +590,6 @@ struct bnx2x_fastpath {
|
||||||
|
|
||||||
/* TPA related */
|
/* TPA related */
|
||||||
struct bnx2x_agg_info *tpa_info;
|
struct bnx2x_agg_info *tpa_info;
|
||||||
u8 disable_tpa;
|
|
||||||
#ifdef BNX2X_STOP_ON_ERROR
|
#ifdef BNX2X_STOP_ON_ERROR
|
||||||
u64 tpa_queue_used;
|
u64 tpa_queue_used;
|
||||||
#endif
|
#endif
|
||||||
|
@ -1545,9 +1545,7 @@ struct bnx2x {
|
||||||
#define USING_MSIX_FLAG (1 << 5)
|
#define USING_MSIX_FLAG (1 << 5)
|
||||||
#define USING_MSI_FLAG (1 << 6)
|
#define USING_MSI_FLAG (1 << 6)
|
||||||
#define DISABLE_MSI_FLAG (1 << 7)
|
#define DISABLE_MSI_FLAG (1 << 7)
|
||||||
#define TPA_ENABLE_FLAG (1 << 8)
|
|
||||||
#define NO_MCP_FLAG (1 << 9)
|
#define NO_MCP_FLAG (1 << 9)
|
||||||
#define GRO_ENABLE_FLAG (1 << 10)
|
|
||||||
#define MF_FUNC_DIS (1 << 11)
|
#define MF_FUNC_DIS (1 << 11)
|
||||||
#define OWN_CNIC_IRQ (1 << 12)
|
#define OWN_CNIC_IRQ (1 << 12)
|
||||||
#define NO_ISCSI_OOO_FLAG (1 << 13)
|
#define NO_ISCSI_OOO_FLAG (1 << 13)
|
||||||
|
|
|
@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
||||||
u16 frag_size, pages;
|
u16 frag_size, pages;
|
||||||
#ifdef BNX2X_STOP_ON_ERROR
|
#ifdef BNX2X_STOP_ON_ERROR
|
||||||
/* sanity check */
|
/* sanity check */
|
||||||
if (fp->disable_tpa &&
|
if (fp->mode == TPA_MODE_DISABLED &&
|
||||||
(CQE_TYPE_START(cqe_fp_type) ||
|
(CQE_TYPE_START(cqe_fp_type) ||
|
||||||
CQE_TYPE_STOP(cqe_fp_type)))
|
CQE_TYPE_STOP(cqe_fp_type)))
|
||||||
BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
|
BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
|
||||||
CQE_TYPE(cqe_fp_type));
|
CQE_TYPE(cqe_fp_type));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
|
||||||
DP(NETIF_MSG_IFUP,
|
DP(NETIF_MSG_IFUP,
|
||||||
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
|
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
|
||||||
|
|
||||||
if (!fp->disable_tpa) {
|
if (fp->mode != TPA_MODE_DISABLED) {
|
||||||
/* Fill the per-aggregation pool */
|
/* Fill the per-aggregation pool */
|
||||||
for (i = 0; i < MAX_AGG_QS(bp); i++) {
|
for (i = 0; i < MAX_AGG_QS(bp); i++) {
|
||||||
struct bnx2x_agg_info *tpa_info =
|
struct bnx2x_agg_info *tpa_info =
|
||||||
|
@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
|
||||||
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
|
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
|
||||||
j);
|
j);
|
||||||
bnx2x_free_tpa_pool(bp, fp, i);
|
bnx2x_free_tpa_pool(bp, fp, i);
|
||||||
fp->disable_tpa = 1;
|
fp->mode = TPA_MODE_DISABLED;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
dma_unmap_addr_set(first_buf, mapping, 0);
|
dma_unmap_addr_set(first_buf, mapping, 0);
|
||||||
|
@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
|
||||||
ring_prod);
|
ring_prod);
|
||||||
bnx2x_free_tpa_pool(bp, fp,
|
bnx2x_free_tpa_pool(bp, fp,
|
||||||
MAX_AGG_QS(bp));
|
MAX_AGG_QS(bp));
|
||||||
fp->disable_tpa = 1;
|
fp->mode = TPA_MODE_DISABLED;
|
||||||
ring_prod = 0;
|
ring_prod = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
|
||||||
|
|
||||||
bnx2x_free_rx_bds(fp);
|
bnx2x_free_rx_bds(fp);
|
||||||
|
|
||||||
if (!fp->disable_tpa)
|
if (fp->mode != TPA_MODE_DISABLED)
|
||||||
bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
|
bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2477,19 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
|
||||||
/* set the tpa flag for each queue. The tpa flag determines the queue
|
/* set the tpa flag for each queue. The tpa flag determines the queue
|
||||||
* minimal size so it must be set prior to queue memory allocation
|
* minimal size so it must be set prior to queue memory allocation
|
||||||
*/
|
*/
|
||||||
fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
|
if (bp->dev->features & NETIF_F_LRO)
|
||||||
(bp->flags & GRO_ENABLE_FLAG &&
|
|
||||||
bnx2x_mtu_allows_gro(bp->dev->mtu)));
|
|
||||||
if (bp->flags & TPA_ENABLE_FLAG)
|
|
||||||
fp->mode = TPA_MODE_LRO;
|
fp->mode = TPA_MODE_LRO;
|
||||||
else if (bp->flags & GRO_ENABLE_FLAG)
|
else if (bp->dev->features & NETIF_F_GRO &&
|
||||||
|
bnx2x_mtu_allows_gro(bp->dev->mtu))
|
||||||
fp->mode = TPA_MODE_GRO;
|
fp->mode = TPA_MODE_GRO;
|
||||||
|
else
|
||||||
|
fp->mode = TPA_MODE_DISABLED;
|
||||||
|
|
||||||
/* We don't want TPA if it's disabled in bp
|
/* We don't want TPA if it's disabled in bp
|
||||||
* or if this is an FCoE L2 ring.
|
* or if this is an FCoE L2 ring.
|
||||||
*/
|
*/
|
||||||
if (bp->disable_tpa || IS_FCOE_FP(fp))
|
if (bp->disable_tpa || IS_FCOE_FP(fp))
|
||||||
fp->disable_tpa = 1;
|
fp->mode = TPA_MODE_DISABLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bnx2x_load_cnic(struct bnx2x *bp)
|
int bnx2x_load_cnic(struct bnx2x *bp)
|
||||||
|
@ -2610,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||||
/*
|
/*
|
||||||
* Zero fastpath structures preserving invariants like napi, which are
|
* Zero fastpath structures preserving invariants like napi, which are
|
||||||
* allocated only once, fp index, max_cos, bp pointer.
|
* allocated only once, fp index, max_cos, bp pointer.
|
||||||
* Also set fp->disable_tpa and txdata_ptr.
|
* Also set fp->mode and txdata_ptr.
|
||||||
*/
|
*/
|
||||||
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
|
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
|
||||||
for_each_queue(bp, i)
|
for_each_queue(bp, i)
|
||||||
|
@ -3249,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi)
|
||||||
|
|
||||||
if ((bp->state == BNX2X_STATE_CLOSED) ||
|
if ((bp->state == BNX2X_STATE_CLOSED) ||
|
||||||
(bp->state == BNX2X_STATE_ERROR) ||
|
(bp->state == BNX2X_STATE_ERROR) ||
|
||||||
(bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
|
(bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
|
||||||
return LL_FLUSH_FAILED;
|
return LL_FLUSH_FAILED;
|
||||||
|
|
||||||
if (!bnx2x_fp_lock_poll(fp))
|
if (!bnx2x_fp_lock_poll(fp))
|
||||||
|
@ -4545,7 +4545,7 @@ alloc_mem_err:
|
||||||
* In these cases we disable the queue
|
* In these cases we disable the queue
|
||||||
* Min size is different for OOO, TPA and non-TPA queues
|
* Min size is different for OOO, TPA and non-TPA queues
|
||||||
*/
|
*/
|
||||||
if (ring_size < (fp->disable_tpa ?
|
if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
|
||||||
MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
|
MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
|
||||||
/* release memory allocated for this queue */
|
/* release memory allocated for this queue */
|
||||||
bnx2x_free_fp_mem_at(bp, index);
|
bnx2x_free_fp_mem_at(bp, index);
|
||||||
|
@ -4834,29 +4834,15 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
|
||||||
features &= ~NETIF_F_GRO;
|
features &= ~NETIF_F_GRO;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note: do not disable SW GRO in kernel when HW GRO is off */
|
|
||||||
if (bp->disable_tpa)
|
|
||||||
features &= ~NETIF_F_LRO;
|
|
||||||
|
|
||||||
return features;
|
return features;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
|
int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
|
||||||
{
|
{
|
||||||
struct bnx2x *bp = netdev_priv(dev);
|
struct bnx2x *bp = netdev_priv(dev);
|
||||||
u32 flags = bp->flags;
|
netdev_features_t changes = features ^ dev->features;
|
||||||
u32 changes;
|
|
||||||
bool bnx2x_reload = false;
|
bool bnx2x_reload = false;
|
||||||
|
int rc;
|
||||||
if (features & NETIF_F_LRO)
|
|
||||||
flags |= TPA_ENABLE_FLAG;
|
|
||||||
else
|
|
||||||
flags &= ~TPA_ENABLE_FLAG;
|
|
||||||
|
|
||||||
if (features & NETIF_F_GRO)
|
|
||||||
flags |= GRO_ENABLE_FLAG;
|
|
||||||
else
|
|
||||||
flags &= ~GRO_ENABLE_FLAG;
|
|
||||||
|
|
||||||
/* VFs or non SRIOV PFs should be able to change loopback feature */
|
/* VFs or non SRIOV PFs should be able to change loopback feature */
|
||||||
if (!pci_num_vf(bp->pdev)) {
|
if (!pci_num_vf(bp->pdev)) {
|
||||||
|
@ -4873,24 +4859,23 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
changes = flags ^ bp->flags;
|
|
||||||
|
|
||||||
/* if GRO is changed while LRO is enabled, don't force a reload */
|
/* if GRO is changed while LRO is enabled, don't force a reload */
|
||||||
if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
|
if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
|
||||||
changes &= ~GRO_ENABLE_FLAG;
|
changes &= ~NETIF_F_GRO;
|
||||||
|
|
||||||
/* if GRO is changed while HW TPA is off, don't force a reload */
|
/* if GRO is changed while HW TPA is off, don't force a reload */
|
||||||
if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
|
if ((changes & NETIF_F_GRO) && bp->disable_tpa)
|
||||||
changes &= ~GRO_ENABLE_FLAG;
|
changes &= ~NETIF_F_GRO;
|
||||||
|
|
||||||
if (changes)
|
if (changes)
|
||||||
bnx2x_reload = true;
|
bnx2x_reload = true;
|
||||||
|
|
||||||
bp->flags = flags;
|
|
||||||
|
|
||||||
if (bnx2x_reload) {
|
if (bnx2x_reload) {
|
||||||
if (bp->recovery_state == BNX2X_RECOVERY_DONE)
|
if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
|
||||||
return bnx2x_reload_if_running(dev);
|
dev->features = features;
|
||||||
|
rc = bnx2x_reload_if_running(dev);
|
||||||
|
return rc ? rc : 1;
|
||||||
|
}
|
||||||
/* else: bnx2x_nic_load() will be called at end of recovery */
|
/* else: bnx2x_nic_load() will be called at end of recovery */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (fp->disable_tpa)
|
if (fp->mode == TPA_MODE_DISABLED)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < last; i++)
|
for (i = 0; i < last; i++)
|
||||||
|
|
|
@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
|
||||||
__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
|
__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!fp->disable_tpa) {
|
if (fp->mode != TPA_MODE_DISABLED) {
|
||||||
__set_bit(BNX2X_Q_FLG_TPA, &flags);
|
__set_bit(BNX2X_Q_FLG_TPA, &flags);
|
||||||
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
|
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
|
||||||
if (fp->mode == TPA_MODE_GRO)
|
if (fp->mode == TPA_MODE_GRO)
|
||||||
|
@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
|
||||||
u16 sge_sz = 0;
|
u16 sge_sz = 0;
|
||||||
u16 tpa_agg_size = 0;
|
u16 tpa_agg_size = 0;
|
||||||
|
|
||||||
if (!fp->disable_tpa) {
|
if (fp->mode != TPA_MODE_DISABLED) {
|
||||||
pause->sge_th_lo = SGE_TH_LO(bp);
|
pause->sge_th_lo = SGE_TH_LO(bp);
|
||||||
pause->sge_th_hi = SGE_TH_HI(bp);
|
pause->sge_th_hi = SGE_TH_HI(bp);
|
||||||
|
|
||||||
|
@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
|
||||||
/* This flag is relevant for E1x only.
|
/* This flag is relevant for E1x only.
|
||||||
* E2 doesn't have a TPA configuration in a function level.
|
* E2 doesn't have a TPA configuration in a function level.
|
||||||
*/
|
*/
|
||||||
flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
|
flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
|
||||||
|
|
||||||
func_init.func_flgs = flags;
|
func_init.func_flgs = flags;
|
||||||
func_init.pf_id = BP_FUNC(bp);
|
func_init.pf_id = BP_FUNC(bp);
|
||||||
|
@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
|
||||||
|
|
||||||
/* Set TPA flags */
|
/* Set TPA flags */
|
||||||
if (bp->disable_tpa) {
|
if (bp->disable_tpa) {
|
||||||
bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
|
bp->dev->hw_features &= ~NETIF_F_LRO;
|
||||||
bp->dev->features &= ~NETIF_F_LRO;
|
bp->dev->features &= ~NETIF_F_LRO;
|
||||||
} else {
|
|
||||||
bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
|
|
||||||
bp->dev->features |= NETIF_F_LRO;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CHIP_IS_E1(bp))
|
if (CHIP_IS_E1(bp))
|
||||||
|
|
|
@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
||||||
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
|
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
|
||||||
|
|
||||||
/* select tpa mode to request */
|
/* select tpa mode to request */
|
||||||
if (!fp->disable_tpa) {
|
if (fp->mode != TPA_MODE_DISABLED) {
|
||||||
flags |= VFPF_QUEUE_FLG_TPA;
|
flags |= VFPF_QUEUE_FLG_TPA;
|
||||||
flags |= VFPF_QUEUE_FLG_TPA_IPV6;
|
flags |= VFPF_QUEUE_FLG_TPA_IPV6;
|
||||||
if (fp->mode == TPA_MODE_GRO)
|
if (fp->mode == TPA_MODE_GRO)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue