mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-04-23 22:53:56 +00:00
xen/netfront: handle compound page fragments on transmit
An SKB paged fragment can consist of a compound page with order > 0.
However the netchannel protocol deals only in PAGE_SIZE frames.
Handle this in xennet_make_frags by iterating over the frames which
make up the page.
This is the netfront equivalent to 6a8ed462f1
for netback.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Cc: netdev@vger.kernel.org
Cc: xen-devel@lists.xen.org
Cc: Eric Dumazet <edumazet@google.com>
Cc: Konrad Rzeszutek Wilk <konrad@kernel.org>
Cc: ANNIE LI <annie.li@oracle.com>
Cc: Sander Eikelenboom <linux@eikelenboom.it>
Cc: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
f30a944392
commit
f36c374782
1 changed files with 77 additions and 21 deletions
|
@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
|
||||||
/* Grant backend access to each skb fragment page. */
|
/* Grant backend access to each skb fragment page. */
|
||||||
for (i = 0; i < frags; i++) {
|
for (i = 0; i < frags; i++) {
|
||||||
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
||||||
|
struct page *page = skb_frag_page(frag);
|
||||||
|
|
||||||
tx->flags |= XEN_NETTXF_more_data;
|
len = skb_frag_size(frag);
|
||||||
|
offset = frag->page_offset;
|
||||||
|
|
||||||
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
|
/* Data must not cross a page boundary. */
|
||||||
np->tx_skbs[id].skb = skb_get(skb);
|
BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
|
||||||
tx = RING_GET_REQUEST(&np->tx, prod++);
|
|
||||||
tx->id = id;
|
|
||||||
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
|
|
||||||
BUG_ON((signed short)ref < 0);
|
|
||||||
|
|
||||||
mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
|
/* Skip unused frames from start of page */
|
||||||
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
|
page += offset >> PAGE_SHIFT;
|
||||||
mfn, GNTMAP_readonly);
|
offset &= ~PAGE_MASK;
|
||||||
|
|
||||||
tx->gref = np->grant_tx_ref[id] = ref;
|
while (len > 0) {
|
||||||
tx->offset = frag->page_offset;
|
unsigned long bytes;
|
||||||
tx->size = skb_frag_size(frag);
|
|
||||||
tx->flags = 0;
|
BUG_ON(offset >= PAGE_SIZE);
|
||||||
|
|
||||||
|
bytes = PAGE_SIZE - offset;
|
||||||
|
if (bytes > len)
|
||||||
|
bytes = len;
|
||||||
|
|
||||||
|
tx->flags |= XEN_NETTXF_more_data;
|
||||||
|
|
||||||
|
id = get_id_from_freelist(&np->tx_skb_freelist,
|
||||||
|
np->tx_skbs);
|
||||||
|
np->tx_skbs[id].skb = skb_get(skb);
|
||||||
|
tx = RING_GET_REQUEST(&np->tx, prod++);
|
||||||
|
tx->id = id;
|
||||||
|
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
|
||||||
|
BUG_ON((signed short)ref < 0);
|
||||||
|
|
||||||
|
mfn = pfn_to_mfn(page_to_pfn(page));
|
||||||
|
gnttab_grant_foreign_access_ref(ref,
|
||||||
|
np->xbdev->otherend_id,
|
||||||
|
mfn, GNTMAP_readonly);
|
||||||
|
|
||||||
|
tx->gref = np->grant_tx_ref[id] = ref;
|
||||||
|
tx->offset = offset;
|
||||||
|
tx->size = bytes;
|
||||||
|
tx->flags = 0;
|
||||||
|
|
||||||
|
offset += bytes;
|
||||||
|
len -= bytes;
|
||||||
|
|
||||||
|
/* Next frame */
|
||||||
|
if (offset == PAGE_SIZE && len) {
|
||||||
|
BUG_ON(!PageCompound(page));
|
||||||
|
page++;
|
||||||
|
offset = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
np->tx.req_prod_pvt = prod;
|
np->tx.req_prod_pvt = prod;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Count how many ring slots are required to send the frags of this
|
||||||
|
* skb. Each frag might be a compound page.
|
||||||
|
*/
|
||||||
|
static int xennet_count_skb_frag_slots(struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
int i, frags = skb_shinfo(skb)->nr_frags;
|
||||||
|
int pages = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < frags; i++) {
|
||||||
|
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
||||||
|
unsigned long size = skb_frag_size(frag);
|
||||||
|
unsigned long offset = frag->page_offset;
|
||||||
|
|
||||||
|
/* Skip unused frames from start of page */
|
||||||
|
offset &= ~PAGE_MASK;
|
||||||
|
|
||||||
|
pages += PFN_UP(offset + size);
|
||||||
|
}
|
||||||
|
|
||||||
|
return pages;
|
||||||
|
}
|
||||||
|
|
||||||
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
unsigned short id;
|
unsigned short id;
|
||||||
|
@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
grant_ref_t ref;
|
grant_ref_t ref;
|
||||||
unsigned long mfn;
|
unsigned long mfn;
|
||||||
int notify;
|
int notify;
|
||||||
int frags = skb_shinfo(skb)->nr_frags;
|
int slots;
|
||||||
unsigned int offset = offset_in_page(data);
|
unsigned int offset = offset_in_page(data);
|
||||||
unsigned int len = skb_headlen(skb);
|
unsigned int len = skb_headlen(skb);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
|
slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
|
||||||
if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
|
xennet_count_skb_frag_slots(skb);
|
||||||
printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
|
if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
|
||||||
frags);
|
net_alert_ratelimited(
|
||||||
dump_stack();
|
"xennet: skb rides the rocket: %d slots\n", slots);
|
||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&np->tx_lock, flags);
|
spin_lock_irqsave(&np->tx_lock, flags);
|
||||||
|
|
||||||
if (unlikely(!netif_carrier_ok(dev) ||
|
if (unlikely(!netif_carrier_ok(dev) ||
|
||||||
(frags > 1 && !xennet_can_sg(dev)) ||
|
(slots > 1 && !xennet_can_sg(dev)) ||
|
||||||
netif_needs_gso(skb, netif_skb_features(skb)))) {
|
netif_needs_gso(skb, netif_skb_features(skb)))) {
|
||||||
spin_unlock_irqrestore(&np->tx_lock, flags);
|
spin_unlock_irqrestore(&np->tx_lock, flags);
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
Loading…
Add table
Reference in a new issue