mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-29 18:11:20 +00:00
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2017-10-10 This series contains updates to i40e only. Stefano Brivio fixes the grammar in a function header comment. Alex fixes a memory leak where we were not correctly placing the pages from buffers that had been used to return a filter programming status back on the ring. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3668bb8da1
2 changed files with 37 additions and 28 deletions
|
@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
|
* __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
|
||||||
* @hw: pointer to the HW structure
|
* @hw: pointer to the HW structure
|
||||||
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
|
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
|
||||||
* @data: word read from the Shadow RAM
|
* @data: word read from the Shadow RAM
|
||||||
|
|
|
@ -1037,6 +1037,32 @@ reset_latency:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
|
||||||
|
* @rx_ring: rx descriptor ring to store buffers on
|
||||||
|
* @old_buff: donor buffer to have page reused
|
||||||
|
*
|
||||||
|
* Synchronizes page for reuse by the adapter
|
||||||
|
**/
|
||||||
|
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
|
||||||
|
struct i40e_rx_buffer *old_buff)
|
||||||
|
{
|
||||||
|
struct i40e_rx_buffer *new_buff;
|
||||||
|
u16 nta = rx_ring->next_to_alloc;
|
||||||
|
|
||||||
|
new_buff = &rx_ring->rx_bi[nta];
|
||||||
|
|
||||||
|
/* update, and store next to alloc */
|
||||||
|
nta++;
|
||||||
|
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||||
|
|
||||||
|
/* transfer page from old buffer to new buffer */
|
||||||
|
new_buff->dma = old_buff->dma;
|
||||||
|
new_buff->page = old_buff->page;
|
||||||
|
new_buff->page_offset = old_buff->page_offset;
|
||||||
|
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_rx_is_programming_status - check for programming status descriptor
|
* i40e_rx_is_programming_status - check for programming status descriptor
|
||||||
* @qw: qword representing status_error_len in CPU ordering
|
* @qw: qword representing status_error_len in CPU ordering
|
||||||
|
@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
|
||||||
union i40e_rx_desc *rx_desc,
|
union i40e_rx_desc *rx_desc,
|
||||||
u64 qw)
|
u64 qw)
|
||||||
{
|
{
|
||||||
u32 ntc = rx_ring->next_to_clean + 1;
|
struct i40e_rx_buffer *rx_buffer;
|
||||||
|
u32 ntc = rx_ring->next_to_clean;
|
||||||
u8 id;
|
u8 id;
|
||||||
|
|
||||||
/* fetch, update, and store next to clean */
|
/* fetch, update, and store next to clean */
|
||||||
|
rx_buffer = &rx_ring->rx_bi[ntc++];
|
||||||
ntc = (ntc < rx_ring->count) ? ntc : 0;
|
ntc = (ntc < rx_ring->count) ? ntc : 0;
|
||||||
rx_ring->next_to_clean = ntc;
|
rx_ring->next_to_clean = ntc;
|
||||||
|
|
||||||
prefetch(I40E_RX_DESC(rx_ring, ntc));
|
prefetch(I40E_RX_DESC(rx_ring, ntc));
|
||||||
|
|
||||||
|
/* place unused page back on the ring */
|
||||||
|
i40e_reuse_rx_page(rx_ring, rx_buffer);
|
||||||
|
rx_ring->rx_stats.page_reuse_count++;
|
||||||
|
|
||||||
|
/* clear contents of buffer_info */
|
||||||
|
rx_buffer->page = NULL;
|
||||||
|
|
||||||
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
|
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
|
||||||
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
|
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
|
||||||
|
|
||||||
|
@ -1638,32 +1673,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
|
|
||||||
* @rx_ring: rx descriptor ring to store buffers on
|
|
||||||
* @old_buff: donor buffer to have page reused
|
|
||||||
*
|
|
||||||
* Synchronizes page for reuse by the adapter
|
|
||||||
**/
|
|
||||||
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
|
|
||||||
struct i40e_rx_buffer *old_buff)
|
|
||||||
{
|
|
||||||
struct i40e_rx_buffer *new_buff;
|
|
||||||
u16 nta = rx_ring->next_to_alloc;
|
|
||||||
|
|
||||||
new_buff = &rx_ring->rx_bi[nta];
|
|
||||||
|
|
||||||
/* update, and store next to alloc */
|
|
||||||
nta++;
|
|
||||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
|
||||||
|
|
||||||
/* transfer page from old buffer to new buffer */
|
|
||||||
new_buff->dma = old_buff->dma;
|
|
||||||
new_buff->page = old_buff->page;
|
|
||||||
new_buff->page_offset = old_buff->page_offset;
|
|
||||||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_page_is_reusable - check if any reuse is possible
|
* i40e_page_is_reusable - check if any reuse is possible
|
||||||
* @page: page struct to check
|
* @page: page struct to check
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue