mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-01 12:04:08 +00:00
i40e/i40evf: Use length to determine if descriptor is done
This change makes it so that we use the length of the packet instead of the DD status bit to determine if a new descriptor is ready to be processed. The obvious advantage is that it cuts down on reads as we don't really even need the DD bit if going from a 0 to a non-zero value on size is enough to inform us that the packet has been completed. Change-ID: Iebdf9cdb36c454ef092df27199b92ad09c374231 Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
3a104f8df2
commit
d57c0e08c7
2 changed files with 24 additions and 24 deletions
|
@ -1757,6 +1757,7 @@ add_tail_frag:
|
||||||
* i40e_fetch_rx_buffer - Allocate skb and populate it
|
* i40e_fetch_rx_buffer - Allocate skb and populate it
|
||||||
* @rx_ring: rx descriptor ring to transact packets on
|
* @rx_ring: rx descriptor ring to transact packets on
|
||||||
* @rx_desc: descriptor containing info written by hardware
|
* @rx_desc: descriptor containing info written by hardware
|
||||||
|
* @size: size of buffer to add to skb
|
||||||
*
|
*
|
||||||
* This function allocates an skb on the fly, and populates it with the page
|
* This function allocates an skb on the fly, and populates it with the page
|
||||||
* data from the current receive descriptor, taking care to set up the skb
|
* data from the current receive descriptor, taking care to set up the skb
|
||||||
|
@ -1766,13 +1767,9 @@ add_tail_frag:
|
||||||
static inline
|
static inline
|
||||||
struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
|
struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
|
||||||
union i40e_rx_desc *rx_desc,
|
union i40e_rx_desc *rx_desc,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb,
|
||||||
|
unsigned int size)
|
||||||
{
|
{
|
||||||
u64 local_status_error_len =
|
|
||||||
le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
|
||||||
unsigned int size =
|
|
||||||
(local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
|
|
||||||
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
|
|
||||||
struct i40e_rx_buffer *rx_buffer;
|
struct i40e_rx_buffer *rx_buffer;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
|
@ -1890,6 +1887,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||||
|
|
||||||
while (likely(total_rx_packets < budget)) {
|
while (likely(total_rx_packets < budget)) {
|
||||||
union i40e_rx_desc *rx_desc;
|
union i40e_rx_desc *rx_desc;
|
||||||
|
unsigned int size;
|
||||||
u16 vlan_tag;
|
u16 vlan_tag;
|
||||||
u8 rx_ptype;
|
u8 rx_ptype;
|
||||||
u64 qword;
|
u64 qword;
|
||||||
|
@ -1906,19 +1904,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||||
/* status_error_len will always be zero for unused descriptors
|
/* status_error_len will always be zero for unused descriptors
|
||||||
* because it's cleared in cleanup, and overlaps with hdr_addr
|
* because it's cleared in cleanup, and overlaps with hdr_addr
|
||||||
* which is always zero because packet split isn't used, if the
|
* which is always zero because packet split isn't used, if the
|
||||||
* hardware wrote DD then it will be non-zero
|
* hardware wrote DD then the length will be non-zero
|
||||||
*/
|
*/
|
||||||
if (!i40e_test_staterr(rx_desc,
|
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
||||||
BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
|
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
|
||||||
|
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
|
||||||
|
if (!size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* This memory barrier is needed to keep us from reading
|
/* This memory barrier is needed to keep us from reading
|
||||||
* any other fields out of the rx_desc until we know the
|
* any other fields out of the rx_desc until we have
|
||||||
* DD bit is set.
|
* verified the descriptor has been written back.
|
||||||
*/
|
*/
|
||||||
dma_rmb();
|
dma_rmb();
|
||||||
|
|
||||||
skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
|
skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb, size);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -1116,6 +1116,7 @@ add_tail_frag:
|
||||||
* i40evf_fetch_rx_buffer - Allocate skb and populate it
|
* i40evf_fetch_rx_buffer - Allocate skb and populate it
|
||||||
* @rx_ring: rx descriptor ring to transact packets on
|
* @rx_ring: rx descriptor ring to transact packets on
|
||||||
* @rx_desc: descriptor containing info written by hardware
|
* @rx_desc: descriptor containing info written by hardware
|
||||||
|
* @size: size of buffer to add to skb
|
||||||
*
|
*
|
||||||
* This function allocates an skb on the fly, and populates it with the page
|
* This function allocates an skb on the fly, and populates it with the page
|
||||||
* data from the current receive descriptor, taking care to set up the skb
|
* data from the current receive descriptor, taking care to set up the skb
|
||||||
|
@ -1125,13 +1126,9 @@ add_tail_frag:
|
||||||
static inline
|
static inline
|
||||||
struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
|
struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
|
||||||
union i40e_rx_desc *rx_desc,
|
union i40e_rx_desc *rx_desc,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb,
|
||||||
|
unsigned int size)
|
||||||
{
|
{
|
||||||
u64 local_status_error_len =
|
|
||||||
le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
|
||||||
unsigned int size =
|
|
||||||
(local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
|
|
||||||
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
|
|
||||||
struct i40e_rx_buffer *rx_buffer;
|
struct i40e_rx_buffer *rx_buffer;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
|
@ -1244,6 +1241,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||||
|
|
||||||
while (likely(total_rx_packets < budget)) {
|
while (likely(total_rx_packets < budget)) {
|
||||||
union i40e_rx_desc *rx_desc;
|
union i40e_rx_desc *rx_desc;
|
||||||
|
unsigned int size;
|
||||||
u16 vlan_tag;
|
u16 vlan_tag;
|
||||||
u8 rx_ptype;
|
u8 rx_ptype;
|
||||||
u64 qword;
|
u64 qword;
|
||||||
|
@ -1260,19 +1258,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||||
/* status_error_len will always be zero for unused descriptors
|
/* status_error_len will always be zero for unused descriptors
|
||||||
* because it's cleared in cleanup, and overlaps with hdr_addr
|
* because it's cleared in cleanup, and overlaps with hdr_addr
|
||||||
* which is always zero because packet split isn't used, if the
|
* which is always zero because packet split isn't used, if the
|
||||||
* hardware wrote DD then it will be non-zero
|
* hardware wrote DD then the length will be non-zero
|
||||||
*/
|
*/
|
||||||
if (!i40e_test_staterr(rx_desc,
|
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
||||||
BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
|
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
|
||||||
|
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
|
||||||
|
if (!size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* This memory barrier is needed to keep us from reading
|
/* This memory barrier is needed to keep us from reading
|
||||||
* any other fields out of the rx_desc until we know the
|
* any other fields out of the rx_desc until we have
|
||||||
* DD bit is set.
|
* verified the descriptor has been written back.
|
||||||
*/
|
*/
|
||||||
dma_rmb();
|
dma_rmb();
|
||||||
|
|
||||||
skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb);
|
skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb, size);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue