Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
	net/ipv6/ip6_tunnel.c
	net/ipv6/ip6_vti.c

ipv6 tunnel statistic bug fixes conflicting with consolidation into
generic sw per-cpu net stats.

qlogic conflict between queue counting bug fix and the addition
of multiple MAC address support.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-01-06 17:37:45 -05:00
commit 56a4342dfe
347 changed files with 3697 additions and 1791 deletions

View file

@ -101,6 +101,13 @@ struct xenvif_rx_meta {
#define MAX_PENDING_REQS 256
/* It's possible for an skb to have a maximal number of frags
* but still be less than MAX_BUFFER_OFFSET in size. Thus the
* worst-case number of copy operations is MAX_SKB_FRAGS per
* ring slot.
*/
#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@ -141,13 +148,13 @@ struct xenvif {
*/
bool rx_event;
/* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
* head/fragment page uses 2 copy operations because it
* straddles two buffers in the frontend.
*/
struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
/* This array is allocated seperately as it is large */
struct gnttab_copy *grant_copy_op;
/* We create one meta structure per ring request we consume, so
* the maximum number is the same as the ring size.
*/
struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
u8 fe_dev_addr[6];