mirror of
https://github.com/Fishwaldo/u-boot.git
synced 2025-06-26 16:41:42 +00:00
net: fec_mxc: allow use with cache enabled
Ensure that transmit and receive buffers are cache-line aligned. Invalidate cache for each packet as received, update receive buffer descriptors one cache line at a time, flush cache before transmitting. Original patch by Marek: http://lists.denx.de/pipermail/u-boot/2012-February/117695.html Signed-off-by: Eric Nelson <eric.nelson at boundarydevices.com> Acked-by: Marek Vasut <marex@denx.de> Tested-by: Marek Vasut <marex@denx.de>
This commit is contained in:
parent
2c4b3c19a1
commit
5c1ad3e6f8
2 changed files with 191 additions and 103 deletions
|
@ -50,6 +50,18 @@ DECLARE_GLOBAL_DATA_PTR;
|
||||||
#define CONFIG_FEC_MXC_SWAP_PACKET
|
#define CONFIG_FEC_MXC_SWAP_PACKET
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
|
||||||
|
|
||||||
|
/* Check various alignment issues at compile time */
|
||||||
|
#if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
|
||||||
|
#error "ARCH_DMA_MINALIGN must be multiple of 16!"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
|
||||||
|
(PKTALIGN % ARCH_DMA_MINALIGN != 0))
|
||||||
|
#error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
|
||||||
|
#endif
|
||||||
|
|
||||||
#undef DEBUG
|
#undef DEBUG
|
||||||
|
|
||||||
struct nbuf {
|
struct nbuf {
|
||||||
|
@ -259,43 +271,52 @@ static int fec_tx_task_disable(struct fec_priv *fec)
|
||||||
* Initialize receive task's buffer descriptors
|
* Initialize receive task's buffer descriptors
|
||||||
* @param[in] fec all we know about the device yet
|
* @param[in] fec all we know about the device yet
|
||||||
* @param[in] count receive buffer count to be allocated
|
* @param[in] count receive buffer count to be allocated
|
||||||
* @param[in] size size of each receive buffer
|
* @param[in] dsize desired size of each receive buffer
|
||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
*
|
*
|
||||||
* For this task we need additional memory for the data buffers. And each
|
* For this task we need additional memory for the data buffers. And each
|
||||||
* data buffer requires some alignment. Thy must be aligned to a specific
|
* data buffer requires some alignment. Thy must be aligned to a specific
|
||||||
* boundary each (DB_DATA_ALIGNMENT).
|
* boundary each.
|
||||||
*/
|
*/
|
||||||
static int fec_rbd_init(struct fec_priv *fec, int count, int size)
|
static int fec_rbd_init(struct fec_priv *fec, int count, int dsize)
|
||||||
{
|
{
|
||||||
int ix;
|
uint32_t size;
|
||||||
uint32_t p = 0;
|
int i;
|
||||||
|
|
||||||
/* reserve data memory and consider alignment */
|
|
||||||
if (fec->rdb_ptr == NULL)
|
|
||||||
fec->rdb_ptr = malloc(size * count + DB_DATA_ALIGNMENT);
|
|
||||||
p = (uint32_t)fec->rdb_ptr;
|
|
||||||
if (!p) {
|
|
||||||
puts("fec_mxc: not enough malloc memory\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
memset((void *)p, 0, size * count + DB_DATA_ALIGNMENT);
|
|
||||||
p += DB_DATA_ALIGNMENT-1;
|
|
||||||
p &= ~(DB_DATA_ALIGNMENT-1);
|
|
||||||
|
|
||||||
for (ix = 0; ix < count; ix++) {
|
|
||||||
writel(p, &fec->rbd_base[ix].data_pointer);
|
|
||||||
p += size;
|
|
||||||
writew(FEC_RBD_EMPTY, &fec->rbd_base[ix].status);
|
|
||||||
writew(0, &fec->rbd_base[ix].data_length);
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* mark the last RBD to close the ring
|
* Allocate memory for the buffers. This allocation respects the
|
||||||
|
* alignment
|
||||||
*/
|
*/
|
||||||
writew(FEC_RBD_WRAP | FEC_RBD_EMPTY, &fec->rbd_base[ix - 1].status);
|
size = roundup(dsize, ARCH_DMA_MINALIGN);
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
uint32_t data_ptr = readl(&fec->rbd_base[i].data_pointer);
|
||||||
|
if (data_ptr == 0) {
|
||||||
|
uint8_t *data = memalign(ARCH_DMA_MINALIGN,
|
||||||
|
size);
|
||||||
|
if (!data) {
|
||||||
|
printf("%s: error allocating rxbuf %d\n",
|
||||||
|
__func__, i);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
writel((uint32_t)data, &fec->rbd_base[i].data_pointer);
|
||||||
|
} /* needs allocation */
|
||||||
|
writew(FEC_RBD_EMPTY, &fec->rbd_base[i].status);
|
||||||
|
writew(0, &fec->rbd_base[i].data_length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Mark the last RBD to close the ring. */
|
||||||
|
writew(FEC_RBD_WRAP | FEC_RBD_EMPTY, &fec->rbd_base[i - 1].status);
|
||||||
fec->rbd_index = 0;
|
fec->rbd_index = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
for (; i >= 0; i--) {
|
||||||
|
uint32_t data_ptr = readl(&fec->rbd_base[i].data_pointer);
|
||||||
|
free((void *)data_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -312,9 +333,13 @@ static int fec_rbd_init(struct fec_priv *fec, int count, int size)
|
||||||
*/
|
*/
|
||||||
static void fec_tbd_init(struct fec_priv *fec)
|
static void fec_tbd_init(struct fec_priv *fec)
|
||||||
{
|
{
|
||||||
|
unsigned addr = (unsigned)fec->tbd_base;
|
||||||
|
unsigned size = roundup(2 * sizeof(struct fec_bd),
|
||||||
|
ARCH_DMA_MINALIGN);
|
||||||
writew(0x0000, &fec->tbd_base[0].status);
|
writew(0x0000, &fec->tbd_base[0].status);
|
||||||
writew(FEC_TBD_WRAP, &fec->tbd_base[1].status);
|
writew(FEC_TBD_WRAP, &fec->tbd_base[1].status);
|
||||||
fec->tbd_index = 0;
|
fec->tbd_index = 0;
|
||||||
|
flush_dcache_range(addr, addr+size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -324,16 +349,10 @@ static void fec_tbd_init(struct fec_priv *fec)
|
||||||
*/
|
*/
|
||||||
static void fec_rbd_clean(int last, struct fec_bd *pRbd)
|
static void fec_rbd_clean(int last, struct fec_bd *pRbd)
|
||||||
{
|
{
|
||||||
/*
|
unsigned short flags = FEC_RBD_EMPTY;
|
||||||
* Reset buffer descriptor as empty
|
|
||||||
*/
|
|
||||||
if (last)
|
if (last)
|
||||||
writew(FEC_RBD_WRAP | FEC_RBD_EMPTY, &pRbd->status);
|
flags |= FEC_RBD_WRAP;
|
||||||
else
|
writew(flags, &pRbd->status);
|
||||||
writew(FEC_RBD_EMPTY, &pRbd->status);
|
|
||||||
/*
|
|
||||||
* no data in it
|
|
||||||
*/
|
|
||||||
writew(0, &pRbd->data_length);
|
writew(0, &pRbd->data_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,12 +406,25 @@ static int fec_open(struct eth_device *edev)
|
||||||
{
|
{
|
||||||
struct fec_priv *fec = (struct fec_priv *)edev->priv;
|
struct fec_priv *fec = (struct fec_priv *)edev->priv;
|
||||||
int speed;
|
int speed;
|
||||||
|
uint32_t addr, size;
|
||||||
|
int i;
|
||||||
|
|
||||||
debug("fec_open: fec_open(dev)\n");
|
debug("fec_open: fec_open(dev)\n");
|
||||||
/* full-duplex, heartbeat disabled */
|
/* full-duplex, heartbeat disabled */
|
||||||
writel(1 << 2, &fec->eth->x_cntrl);
|
writel(1 << 2, &fec->eth->x_cntrl);
|
||||||
fec->rbd_index = 0;
|
fec->rbd_index = 0;
|
||||||
|
|
||||||
|
/* Invalidate all descriptors */
|
||||||
|
for (i = 0; i < FEC_RBD_NUM - 1; i++)
|
||||||
|
fec_rbd_clean(0, &fec->rbd_base[i]);
|
||||||
|
fec_rbd_clean(1, &fec->rbd_base[i]);
|
||||||
|
|
||||||
|
/* Flush the descriptors into RAM */
|
||||||
|
size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
|
||||||
|
ARCH_DMA_MINALIGN);
|
||||||
|
addr = (uint32_t)fec->rbd_base;
|
||||||
|
flush_dcache_range(addr, addr + size);
|
||||||
|
|
||||||
#ifdef FEC_QUIRK_ENET_MAC
|
#ifdef FEC_QUIRK_ENET_MAC
|
||||||
/* Enable ENET HW endian SWAP */
|
/* Enable ENET HW endian SWAP */
|
||||||
writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
|
writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
|
||||||
|
@ -478,38 +510,55 @@ static int fec_open(struct eth_device *edev)
|
||||||
|
|
||||||
static int fec_init(struct eth_device *dev, bd_t* bd)
|
static int fec_init(struct eth_device *dev, bd_t* bd)
|
||||||
{
|
{
|
||||||
uint32_t base;
|
|
||||||
struct fec_priv *fec = (struct fec_priv *)dev->priv;
|
struct fec_priv *fec = (struct fec_priv *)dev->priv;
|
||||||
uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
|
uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
|
||||||
uint32_t rcntrl;
|
uint32_t rcntrl;
|
||||||
int i;
|
uint32_t size;
|
||||||
|
int i, ret;
|
||||||
|
|
||||||
/* Initialize MAC address */
|
/* Initialize MAC address */
|
||||||
fec_set_hwaddr(dev);
|
fec_set_hwaddr(dev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* reserve memory for both buffer descriptor chains at once
|
* Allocate transmit descriptors, there are two in total. This
|
||||||
* Datasheet forces the startaddress of each chain is 16 byte
|
* allocation respects cache alignment.
|
||||||
* aligned
|
|
||||||
*/
|
*/
|
||||||
if (fec->base_ptr == NULL)
|
if (!fec->tbd_base) {
|
||||||
fec->base_ptr = malloc((2 + FEC_RBD_NUM) *
|
size = roundup(2 * sizeof(struct fec_bd),
|
||||||
sizeof(struct fec_bd) + DB_ALIGNMENT);
|
ARCH_DMA_MINALIGN);
|
||||||
base = (uint32_t)fec->base_ptr;
|
fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
|
||||||
if (!base) {
|
if (!fec->tbd_base) {
|
||||||
puts("fec_mxc: not enough malloc memory\n");
|
ret = -ENOMEM;
|
||||||
return -ENOMEM;
|
goto err1;
|
||||||
|
}
|
||||||
|
memset(fec->tbd_base, 0, size);
|
||||||
|
fec_tbd_init(fec);
|
||||||
|
flush_dcache_range((unsigned)fec->tbd_base, size);
|
||||||
}
|
}
|
||||||
memset((void *)base, 0, (2 + FEC_RBD_NUM) *
|
|
||||||
sizeof(struct fec_bd) + DB_ALIGNMENT);
|
|
||||||
base += (DB_ALIGNMENT-1);
|
|
||||||
base &= ~(DB_ALIGNMENT-1);
|
|
||||||
|
|
||||||
fec->rbd_base = (struct fec_bd *)base;
|
/*
|
||||||
|
* Allocate receive descriptors. This allocation respects cache
|
||||||
base += FEC_RBD_NUM * sizeof(struct fec_bd);
|
* alignment.
|
||||||
|
*/
|
||||||
fec->tbd_base = (struct fec_bd *)base;
|
if (!fec->rbd_base) {
|
||||||
|
size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
|
||||||
|
ARCH_DMA_MINALIGN);
|
||||||
|
fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
|
||||||
|
if (!fec->rbd_base) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto err2;
|
||||||
|
}
|
||||||
|
memset(fec->rbd_base, 0, size);
|
||||||
|
/*
|
||||||
|
* Initialize RxBD ring
|
||||||
|
*/
|
||||||
|
if (fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE) < 0) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto err3;
|
||||||
|
}
|
||||||
|
flush_dcache_range((unsigned)fec->rbd_base,
|
||||||
|
(unsigned)fec->rbd_base + size);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set interrupt mask register
|
* Set interrupt mask register
|
||||||
|
@ -566,23 +615,19 @@ static int fec_init(struct eth_device *dev, bd_t* bd)
|
||||||
writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
|
writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
|
||||||
writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
|
writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialize RxBD/TxBD rings
|
|
||||||
*/
|
|
||||||
if (fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE) < 0) {
|
|
||||||
free(fec->base_ptr);
|
|
||||||
fec->base_ptr = NULL;
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
fec_tbd_init(fec);
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef CONFIG_PHYLIB
|
#ifndef CONFIG_PHYLIB
|
||||||
if (fec->xcv_type != SEVENWIRE)
|
if (fec->xcv_type != SEVENWIRE)
|
||||||
miiphy_restart_aneg(dev);
|
miiphy_restart_aneg(dev);
|
||||||
#endif
|
#endif
|
||||||
fec_open(dev);
|
fec_open(dev);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err3:
|
||||||
|
free(fec->rbd_base);
|
||||||
|
err2:
|
||||||
|
free(fec->tbd_base);
|
||||||
|
err1:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -631,9 +676,11 @@ static void fec_halt(struct eth_device *dev)
|
||||||
* @param[in] length Data count in bytes
|
* @param[in] length Data count in bytes
|
||||||
* @return 0 on success
|
* @return 0 on success
|
||||||
*/
|
*/
|
||||||
static int fec_send(struct eth_device *dev, volatile void* packet, int length)
|
static int fec_send(struct eth_device *dev, volatile void *packet, int length)
|
||||||
{
|
{
|
||||||
unsigned int status;
|
unsigned int status;
|
||||||
|
uint32_t size;
|
||||||
|
uint32_t addr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine transmits one frame. This routine only accepts
|
* This routine transmits one frame. This routine only accepts
|
||||||
|
@ -650,15 +697,21 @@ static int fec_send(struct eth_device *dev, volatile void* packet, int length)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the transmit buffer
|
* Setup the transmit buffer. We are always using the first buffer for
|
||||||
* Note: We are always using the first buffer for transmission,
|
* transmission, the second will be empty and only used to stop the DMA
|
||||||
* the second will be empty and only used to stop the DMA engine
|
* engine. We also flush the packet to RAM here to avoid cache trouble.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_FEC_MXC_SWAP_PACKET
|
#ifdef CONFIG_FEC_MXC_SWAP_PACKET
|
||||||
swap_packet((uint32_t *)packet, length);
|
swap_packet((uint32_t *)packet, length);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
addr = (uint32_t)packet;
|
||||||
|
size = roundup(length, ARCH_DMA_MINALIGN);
|
||||||
|
flush_dcache_range(addr, addr + size);
|
||||||
|
|
||||||
writew(length, &fec->tbd_base[fec->tbd_index].data_length);
|
writew(length, &fec->tbd_base[fec->tbd_index].data_length);
|
||||||
writel((uint32_t)packet, &fec->tbd_base[fec->tbd_index].data_pointer);
|
writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* update BD's status now
|
* update BD's status now
|
||||||
* This block:
|
* This block:
|
||||||
|
@ -671,17 +724,31 @@ static int fec_send(struct eth_device *dev, volatile void* packet, int length)
|
||||||
status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
|
status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
|
||||||
writew(status, &fec->tbd_base[fec->tbd_index].status);
|
writew(status, &fec->tbd_base[fec->tbd_index].status);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush data cache. This code flushes both TX descriptors to RAM.
|
||||||
|
* After this code, the descriptors will be safely in RAM and we
|
||||||
|
* can start DMA.
|
||||||
|
*/
|
||||||
|
size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
|
||||||
|
addr = (uint32_t)fec->tbd_base;
|
||||||
|
flush_dcache_range(addr, addr + size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable SmartDMA transmit task
|
* Enable SmartDMA transmit task
|
||||||
*/
|
*/
|
||||||
fec_tx_task_enable(fec);
|
fec_tx_task_enable(fec);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* wait until frame is sent .
|
* Wait until frame is sent. On each turn of the wait cycle, we must
|
||||||
|
* invalidate data cache to see what's really in RAM. Also, we need
|
||||||
|
* barrier here.
|
||||||
*/
|
*/
|
||||||
|
invalidate_dcache_range(addr, addr + size);
|
||||||
while (readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_READY) {
|
while (readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_READY) {
|
||||||
udelay(1);
|
udelay(1);
|
||||||
|
invalidate_dcache_range(addr, addr + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
debug("fec_send: status 0x%x index %d\n",
|
debug("fec_send: status 0x%x index %d\n",
|
||||||
readw(&fec->tbd_base[fec->tbd_index].status),
|
readw(&fec->tbd_base[fec->tbd_index].status),
|
||||||
fec->tbd_index);
|
fec->tbd_index);
|
||||||
|
@ -707,6 +774,8 @@ static int fec_recv(struct eth_device *dev)
|
||||||
int frame_length, len = 0;
|
int frame_length, len = 0;
|
||||||
struct nbuf *frame;
|
struct nbuf *frame;
|
||||||
uint16_t bd_status;
|
uint16_t bd_status;
|
||||||
|
uint32_t addr, size;
|
||||||
|
int i;
|
||||||
uchar buff[FEC_MAX_PKT_SIZE];
|
uchar buff[FEC_MAX_PKT_SIZE];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -737,8 +806,23 @@ static int fec_recv(struct eth_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ensure reading the right buffer status
|
* Read the buffer status. Before the status can be read, the data cache
|
||||||
|
* must be invalidated, because the data in RAM might have been changed
|
||||||
|
* by DMA. The descriptors are properly aligned to cachelines so there's
|
||||||
|
* no need to worry they'd overlap.
|
||||||
|
*
|
||||||
|
* WARNING: By invalidating the descriptor here, we also invalidate
|
||||||
|
* the descriptors surrounding this one. Therefore we can NOT change the
|
||||||
|
* contents of this descriptor nor the surrounding ones. The problem is
|
||||||
|
* that in order to mark the descriptor as processed, we need to change
|
||||||
|
* the descriptor. The solution is to mark the whole cache line when all
|
||||||
|
* descriptors in the cache line are processed.
|
||||||
*/
|
*/
|
||||||
|
addr = (uint32_t)rbd;
|
||||||
|
addr &= ~(ARCH_DMA_MINALIGN - 1);
|
||||||
|
size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
|
||||||
|
invalidate_dcache_range(addr, addr + size);
|
||||||
|
|
||||||
bd_status = readw(&rbd->status);
|
bd_status = readw(&rbd->status);
|
||||||
debug("fec_recv: status 0x%x\n", bd_status);
|
debug("fec_recv: status 0x%x\n", bd_status);
|
||||||
|
|
||||||
|
@ -750,6 +834,13 @@ static int fec_recv(struct eth_device *dev)
|
||||||
*/
|
*/
|
||||||
frame = (struct nbuf *)readl(&rbd->data_pointer);
|
frame = (struct nbuf *)readl(&rbd->data_pointer);
|
||||||
frame_length = readw(&rbd->data_length) - 4;
|
frame_length = readw(&rbd->data_length) - 4;
|
||||||
|
/*
|
||||||
|
* Invalidate data cache over the buffer
|
||||||
|
*/
|
||||||
|
addr = (uint32_t)frame;
|
||||||
|
size = roundup(frame_length, ARCH_DMA_MINALIGN);
|
||||||
|
invalidate_dcache_range(addr, addr + size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fill the buffer and pass it to upper layers
|
* Fill the buffer and pass it to upper layers
|
||||||
*/
|
*/
|
||||||
|
@ -765,11 +856,25 @@ static int fec_recv(struct eth_device *dev)
|
||||||
(ulong)rbd->data_pointer,
|
(ulong)rbd->data_pointer,
|
||||||
bd_status);
|
bd_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* free the current buffer, restart the engine
|
* Free the current buffer, restart the engine and move forward
|
||||||
* and move forward to the next buffer
|
* to the next buffer. Here we check if the whole cacheline of
|
||||||
|
* descriptors was already processed and if so, we mark it free
|
||||||
|
* as whole.
|
||||||
*/
|
*/
|
||||||
fec_rbd_clean(fec->rbd_index == (FEC_RBD_NUM - 1) ? 1 : 0, rbd);
|
size = RXDESC_PER_CACHELINE - 1;
|
||||||
|
if ((fec->rbd_index & size) == size) {
|
||||||
|
i = fec->rbd_index - size;
|
||||||
|
addr = (uint32_t)&fec->rbd_base[i];
|
||||||
|
for (; i <= fec->rbd_index ; i++) {
|
||||||
|
fec_rbd_clean(i == (FEC_RBD_NUM - 1),
|
||||||
|
&fec->rbd_base[i]);
|
||||||
|
}
|
||||||
|
flush_dcache_range(addr,
|
||||||
|
addr + ARCH_DMA_MINALIGN);
|
||||||
|
}
|
||||||
|
|
||||||
fec_rx_task_enable(fec);
|
fec_rx_task_enable(fec);
|
||||||
fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
|
fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,22 +233,6 @@ struct ethernet_regs {
|
||||||
#define MIIGSK_ENR_EN (1 << 1)
|
#define MIIGSK_ENR_EN (1 << 1)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Descriptor buffer alignment
|
|
||||||
*
|
|
||||||
* i.MX27 requires a 16 byte alignment (but for the first element only)
|
|
||||||
*/
|
|
||||||
#define DB_ALIGNMENT 16
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Data buffer alignment
|
|
||||||
*
|
|
||||||
* i.MX27 requires a four byte alignment for transmit and 16 bits
|
|
||||||
* alignment for receive so take 16
|
|
||||||
* Note: Valid for member data_pointer in struct buffer_descriptor
|
|
||||||
*/
|
|
||||||
#define DB_DATA_ALIGNMENT 16
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Receive & Transmit Buffer Descriptor definitions
|
* @brief Receive & Transmit Buffer Descriptor definitions
|
||||||
*
|
*
|
||||||
|
@ -282,8 +266,7 @@ struct fec_priv {
|
||||||
struct fec_bd *tbd_base; /* TBD ring */
|
struct fec_bd *tbd_base; /* TBD ring */
|
||||||
int tbd_index; /* next transmit BD to write */
|
int tbd_index; /* next transmit BD to write */
|
||||||
bd_t *bd;
|
bd_t *bd;
|
||||||
void *rdb_ptr;
|
uint8_t *tdb_ptr;
|
||||||
void *base_ptr;
|
|
||||||
int dev_id;
|
int dev_id;
|
||||||
int phy_id;
|
int phy_id;
|
||||||
struct mii_dev *bus;
|
struct mii_dev *bus;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue