mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 14:41:27 +00:00
net_dma: simple removal
Per commit "7787380336
net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: David Whipple <whipple@securedatainnovations.ch>
Cc: Alexander Duyck <alexander.h.duyck@intel.com>
Cc: <stable@vger.kernel.org>
Reported-by: Roman Gushchin <klamm@yandex-team.ru>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
08223d80df
commit
7bced39751
28 changed files with 35 additions and 894 deletions
|
@ -73,7 +73,6 @@
|
|||
#include <net/inet_common.h>
|
||||
#include <linux/ipsec.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <net/netdma.h>
|
||||
|
||||
int sysctl_tcp_timestamps __read_mostly = 1;
|
||||
int sysctl_tcp_window_scaling __read_mostly = 1;
|
||||
|
@ -4970,53 +4969,6 @@ static inline bool tcp_checksum_complete_user(struct sock *sk,
|
|||
__tcp_checksum_complete_user(sk, skb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
|
||||
int hlen)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int chunk = skb->len - hlen;
|
||||
int dma_cookie;
|
||||
bool copied_early = false;
|
||||
|
||||
if (tp->ucopy.wakeup)
|
||||
return false;
|
||||
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
|
||||
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
|
||||
|
||||
dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
|
||||
skb, hlen,
|
||||
tp->ucopy.iov, chunk,
|
||||
tp->ucopy.pinned_list);
|
||||
|
||||
if (dma_cookie < 0)
|
||||
goto out;
|
||||
|
||||
tp->ucopy.dma_cookie = dma_cookie;
|
||||
copied_early = true;
|
||||
|
||||
tp->ucopy.len -= chunk;
|
||||
tp->copied_seq += chunk;
|
||||
tcp_rcv_space_adjust(sk);
|
||||
|
||||
if ((tp->ucopy.len == 0) ||
|
||||
(tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
|
||||
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
|
||||
tp->ucopy.wakeup = 1;
|
||||
sk->sk_data_ready(sk, 0);
|
||||
}
|
||||
} else if (chunk > 0) {
|
||||
tp->ucopy.wakeup = 1;
|
||||
sk->sk_data_ready(sk, 0);
|
||||
}
|
||||
out:
|
||||
return copied_early;
|
||||
}
|
||||
#endif /* CONFIG_NET_DMA */
|
||||
|
||||
/* Does PAWS and seqno based validation of an incoming segment, flags will
|
||||
* play significant role here.
|
||||
*/
|
||||
|
@ -5201,14 +5153,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
if (tp->copied_seq == tp->rcv_nxt &&
|
||||
len - tcp_header_len <= tp->ucopy.len) {
|
||||
#ifdef CONFIG_NET_DMA
|
||||
if (tp->ucopy.task == current &&
|
||||
sock_owned_by_user(sk) &&
|
||||
tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
|
||||
copied_early = 1;
|
||||
eaten = 1;
|
||||
}
|
||||
#endif
|
||||
if (tp->ucopy.task == current &&
|
||||
sock_owned_by_user(sk) && !copied_early) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
@ -5274,11 +5218,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|||
if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
|
||||
__tcp_ack_snd_check(sk, 0);
|
||||
no_ack:
|
||||
#ifdef CONFIG_NET_DMA
|
||||
if (copied_early)
|
||||
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
|
||||
else
|
||||
#endif
|
||||
if (eaten)
|
||||
kfree_skb_partial(skb, fragstolen);
|
||||
sk->sk_data_ready(sk, 0);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue