net: Remove redundant NAPI functions

Following the removal of the unused struct net_device * parameter from
the NAPI functions named *netif_rx_* in commit 908a7a1, they are
exactly equivalent to the corresponding *napi_* functions and are
therefore redundant.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ben Hutchings 2009-01-19 16:43:59 -08:00 committed by David S. Miller
parent 627af770c6
commit 288379f050
66 changed files with 227 additions and 277 deletions

View file

@ -1574,56 +1574,6 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
return (1 << debug_value) - 1;
}
/* Test if receive needs to be scheduled but only if up */
static inline int netif_rx_schedule_prep(struct napi_struct *napi)
{
return napi_schedule_prep(napi);
}
/* Add interface to tail of rx poll list. This assumes that _prep has
* already been called and returned 1.
*/
static inline void __netif_rx_schedule(struct napi_struct *napi)
{
__napi_schedule(napi);
}
/* Try to reschedule poll. Called by irq handler. */
static inline void netif_rx_schedule(struct napi_struct *napi)
{
if (netif_rx_schedule_prep(napi))
__netif_rx_schedule(napi);
}
/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
static inline int netif_rx_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__netif_rx_schedule(napi);
return 1;
}
return 0;
}
/* same as netif_rx_complete, except that local_irq_save(flags)
* has already been issued
*/
static inline void __netif_rx_complete(struct napi_struct *napi)
{
__napi_complete(napi);
}
/* Remove interface from poll list: it must be in the poll list
* on current cpu. This primitive is called by dev->poll(), when
* it completes the work. The device cannot be out of poll list at this
* moment, it is BUG().
*/
static inline void netif_rx_complete(struct napi_struct *napi)
{
napi_complete(napi);
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);