mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-19 21:21:09 +00:00
via-rhine: allocate and map receive buffer in a single transaction
It's used to initialize the receive ring but it will actually shine when the receive poll code is reworked. Signed-off-by: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e45af49795
commit
a21bb8bae1
1 changed files with 43 additions and 14 deletions
|
@ -1213,12 +1213,47 @@ static void free_ring(struct net_device* dev)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void alloc_rbufs(struct net_device *dev)
|
struct rhine_skb_dma {
|
||||||
|
struct sk_buff *skb;
|
||||||
|
dma_addr_t dma;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline int rhine_skb_dma_init(struct net_device *dev,
|
||||||
|
struct rhine_skb_dma *sd)
|
||||||
{
|
{
|
||||||
struct rhine_private *rp = netdev_priv(dev);
|
struct rhine_private *rp = netdev_priv(dev);
|
||||||
struct device *hwdev = dev->dev.parent;
|
struct device *hwdev = dev->dev.parent;
|
||||||
|
const int size = rp->rx_buf_sz;
|
||||||
|
|
||||||
|
sd->skb = netdev_alloc_skb(dev, size);
|
||||||
|
if (!sd->skb)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
|
||||||
|
if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
|
||||||
|
netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
|
||||||
|
dev_kfree_skb_any(sd->skb);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
|
||||||
|
struct rhine_skb_dma *sd, int entry)
|
||||||
|
{
|
||||||
|
rp->rx_skbuff_dma[entry] = sd->dma;
|
||||||
|
rp->rx_skbuff[entry] = sd->skb;
|
||||||
|
|
||||||
|
rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
|
||||||
|
dma_wmb();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void alloc_rbufs(struct net_device *dev)
|
||||||
|
{
|
||||||
|
struct rhine_private *rp = netdev_priv(dev);
|
||||||
dma_addr_t next;
|
dma_addr_t next;
|
||||||
int i;
|
int rc, i;
|
||||||
|
|
||||||
rp->dirty_rx = rp->cur_rx = 0;
|
rp->dirty_rx = rp->cur_rx = 0;
|
||||||
|
|
||||||
|
@ -1239,20 +1274,14 @@ static void alloc_rbufs(struct net_device *dev)
|
||||||
|
|
||||||
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
|
struct rhine_skb_dma sd;
|
||||||
rp->rx_skbuff[i] = skb;
|
|
||||||
if (skb == NULL)
|
rc = rhine_skb_dma_init(dev, &sd);
|
||||||
|
if (rc < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
rp->rx_skbuff_dma[i] =
|
rhine_skb_dma_nic_store(rp, &sd, i);
|
||||||
dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
|
|
||||||
rp->rx_skbuff_dma[i] = 0;
|
|
||||||
dev_kfree_skb(skb);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
|
|
||||||
rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
|
rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
|
||||||
}
|
}
|
||||||
rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue