mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 17:41:50 +00:00
ixgbe: Fix race condition where RX buffer could become corrupted.
There was a race condition in the reset path where the RX buffer could become corrupted during Fdir configuration.This is due to a HW bug.The fix right now is to lock the buffer while we do the fdir configuration.Since we were using similar workaround for another bug, I moved the existing code to a function and reused it.HW team also recommended that IXGBE_MAX_SECRX_POLL value be changed from 30 to 40.The erratum for this bug will be published in the next release 82599 Spec Update Signed-off-by: Atita Shirwaikar <atita.shirwaikar@intel.com> Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
3ed69d7e31
commit
d2f5e7f3af
6 changed files with 84 additions and 24 deletions
|
@ -1907,38 +1907,17 @@ out:
|
||||||
**/
|
**/
|
||||||
static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
|
static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
|
||||||
{
|
{
|
||||||
#define IXGBE_MAX_SECRX_POLL 30
|
|
||||||
int i;
|
|
||||||
int secrxreg;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Workaround for 82599 silicon errata when enabling the Rx datapath.
|
* Workaround for 82599 silicon errata when enabling the Rx datapath.
|
||||||
* If traffic is incoming before we enable the Rx unit, it could hang
|
* If traffic is incoming before we enable the Rx unit, it could hang
|
||||||
* the Rx DMA unit. Therefore, make sure the security engine is
|
* the Rx DMA unit. Therefore, make sure the security engine is
|
||||||
* completely disabled prior to enabling the Rx unit.
|
* completely disabled prior to enabling the Rx unit.
|
||||||
*/
|
*/
|
||||||
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
|
hw->mac.ops.disable_rx_buff(hw);
|
||||||
secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
|
|
||||||
for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
|
|
||||||
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
|
|
||||||
if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
|
|
||||||
break;
|
|
||||||
else
|
|
||||||
/* Use interrupt-safe sleep just in case */
|
|
||||||
udelay(10);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* For informational purposes only */
|
|
||||||
if (i >= IXGBE_MAX_SECRX_POLL)
|
|
||||||
hw_dbg(hw, "Rx unit being enabled before security "
|
|
||||||
"path fully disabled. Continuing with init.\n");
|
|
||||||
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
|
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
|
||||||
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
|
|
||||||
secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
|
hw->mac.ops.enable_rx_buff(hw);
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
|
|
||||||
IXGBE_WRITE_FLUSH(hw);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2103,6 +2082,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
|
||||||
.get_media_type = &ixgbe_get_media_type_82599,
|
.get_media_type = &ixgbe_get_media_type_82599,
|
||||||
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
|
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
|
||||||
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
|
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
|
||||||
|
.disable_rx_buff = &ixgbe_disable_rx_buff_generic,
|
||||||
|
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
|
||||||
.get_mac_addr = &ixgbe_get_mac_addr_generic,
|
.get_mac_addr = &ixgbe_get_mac_addr_generic,
|
||||||
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
|
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
|
||||||
.get_device_caps = &ixgbe_get_device_caps_generic,
|
.get_device_caps = &ixgbe_get_device_caps_generic,
|
||||||
|
|
|
@ -2577,6 +2577,58 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
|
||||||
ixgbe_release_eeprom_semaphore(hw);
|
ixgbe_release_eeprom_semaphore(hw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ixgbe_disable_rx_buff_generic - Stops the receive data path
|
||||||
|
* @hw: pointer to hardware structure
|
||||||
|
*
|
||||||
|
* Stops the receive data path and waits for the HW to internally
|
||||||
|
* empty the Rx security block.
|
||||||
|
**/
|
||||||
|
s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
|
||||||
|
{
|
||||||
|
#define IXGBE_MAX_SECRX_POLL 40
|
||||||
|
int i;
|
||||||
|
int secrxreg;
|
||||||
|
|
||||||
|
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
|
||||||
|
secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
|
||||||
|
for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
|
||||||
|
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
|
||||||
|
if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
|
||||||
|
break;
|
||||||
|
else
|
||||||
|
/* Use interrupt-safe sleep just in case */
|
||||||
|
udelay(10);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* For informational purposes only */
|
||||||
|
if (i >= IXGBE_MAX_SECRX_POLL)
|
||||||
|
hw_dbg(hw, "Rx unit being enabled before security "
|
||||||
|
"path fully disabled. Continuing with init.\n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ixgbe_enable_rx_buff - Enables the receive data path
|
||||||
|
* @hw: pointer to hardware structure
|
||||||
|
*
|
||||||
|
* Enables the receive data path
|
||||||
|
**/
|
||||||
|
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
|
||||||
|
{
|
||||||
|
int secrxreg;
|
||||||
|
|
||||||
|
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
|
||||||
|
secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
|
||||||
|
IXGBE_WRITE_FLUSH(hw);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
|
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
|
||||||
* @hw: pointer to hardware structure
|
* @hw: pointer to hardware structure
|
||||||
|
|
|
@ -74,6 +74,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
|
||||||
struct net_device *netdev);
|
struct net_device *netdev);
|
||||||
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
|
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
|
||||||
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
|
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
|
||||||
|
s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
|
||||||
|
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
|
||||||
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
|
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
|
||||||
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
|
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
|
||||||
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
|
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
|
||||||
|
|
|
@ -3661,6 +3661,8 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
|
||||||
|
|
||||||
static void ixgbe_configure(struct ixgbe_adapter *adapter)
|
static void ixgbe_configure(struct ixgbe_adapter *adapter)
|
||||||
{
|
{
|
||||||
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
|
|
||||||
ixgbe_configure_pb(adapter);
|
ixgbe_configure_pb(adapter);
|
||||||
#ifdef CONFIG_IXGBE_DCB
|
#ifdef CONFIG_IXGBE_DCB
|
||||||
ixgbe_configure_dcb(adapter);
|
ixgbe_configure_dcb(adapter);
|
||||||
|
@ -3674,6 +3676,16 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
|
||||||
ixgbe_configure_fcoe(adapter);
|
ixgbe_configure_fcoe(adapter);
|
||||||
|
|
||||||
#endif /* IXGBE_FCOE */
|
#endif /* IXGBE_FCOE */
|
||||||
|
|
||||||
|
switch (hw->mac.type) {
|
||||||
|
case ixgbe_mac_82599EB:
|
||||||
|
case ixgbe_mac_X540:
|
||||||
|
hw->mac.ops.disable_rx_buff(hw);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
|
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
|
||||||
ixgbe_init_fdir_signature_82599(&adapter->hw,
|
ixgbe_init_fdir_signature_82599(&adapter->hw,
|
||||||
adapter->fdir_pballoc);
|
adapter->fdir_pballoc);
|
||||||
|
@ -3683,6 +3695,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
|
||||||
ixgbe_fdir_filter_restore(adapter);
|
ixgbe_fdir_filter_restore(adapter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (hw->mac.type) {
|
||||||
|
case ixgbe_mac_82599EB:
|
||||||
|
case ixgbe_mac_X540:
|
||||||
|
hw->mac.ops.enable_rx_buff(hw);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
ixgbe_configure_virtualization(adapter);
|
ixgbe_configure_virtualization(adapter);
|
||||||
|
|
||||||
ixgbe_configure_tx(adapter);
|
ixgbe_configure_tx(adapter);
|
||||||
|
|
|
@ -2728,6 +2728,8 @@ struct ixgbe_mac_operations {
|
||||||
s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
|
s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
|
||||||
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
|
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
|
||||||
s32 (*setup_sfp)(struct ixgbe_hw *);
|
s32 (*setup_sfp)(struct ixgbe_hw *);
|
||||||
|
s32 (*disable_rx_buff)(struct ixgbe_hw *);
|
||||||
|
s32 (*enable_rx_buff)(struct ixgbe_hw *);
|
||||||
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
|
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
|
||||||
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
|
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
|
||||||
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
|
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
|
||||||
|
|
|
@ -847,6 +847,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
|
||||||
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
|
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
|
||||||
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
|
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
|
||||||
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
|
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
|
||||||
|
.disable_rx_buff = &ixgbe_disable_rx_buff_generic,
|
||||||
|
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
|
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue