Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next

Pull networking merge from David Miller:
 "1) Move ixgbe driver over to purely page based buffering on receive.
     From Alexander Duyck.

  2) Add receive packet steering support to e1000e, from Bruce Allan.

  3) Convert TCP MD5 support over to RCU, from Eric Dumazet.

  4) Reduce cpu usage in handling out-of-order TCP packets on modern
     systems, also from Eric Dumazet.

  5) Support the IP{,V6}_UNICAST_IF socket options, making the wine
     folks happy, from Erich Hoover.

  6) Support VLAN trunking from guests in hyperv driver, from Haiyang
     Zhang.

  7) Support byte-queue-limtis in r8169, from Igor Maravic.

  8) Outline code intended for IP_RECVTOS in IP_PKTOPTIONS existed but
     was never properly implemented, Jiri Benc fixed that.

  9) 64-bit statistics support in r8169 and 8139too, from Junchang Wang.

  10) Support kernel side dump filtering by ctmark in netfilter
      ctnetlink, from Pablo Neira Ayuso.

  11) Support byte-queue-limits in gianfar driver, from Paul Gortmaker.

  12) Add new peek socket options to assist with socket migration, from
      Pavel Emelyanov.

  13) Add sch_plug packet scheduler whose queue is controlled by
      userland daemons using explicit freeze and release commands.  From
      Shriram Rajagopalan.

  14) Fix FCOE checksum offload handling on transmit, from Yi Zou."

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1846 commits)
  Fix pppol2tp getsockname()
  Remove printk from rds_sendmsg
  ipv6: fix incorrent ipv6 ipsec packet fragment
  cpsw: Hook up default ndo_change_mtu.
  net: qmi_wwan: fix build error due to cdc-wdm dependecy
  netdev: driver: ethernet: Add TI CPSW driver
  netdev: driver: ethernet: add cpsw address lookup engine support
  phy: add am79c874 PHY support
  mlx4_core: fix race on comm channel
  bonding: send igmp report for its master
  fs_enet: Add MPC5125 FEC support and PHY interface selection
  net: bpf_jit: fix BPF_S_LDX_B_MSH compilation
  net: update the usage of CHECKSUM_UNNECESSARY
  fcoe: use CHECKSUM_UNNECESSARY instead of CHECKSUM_PARTIAL on tx
  net: do not do gso for CHECKSUM_UNNECESSARY in netif_needs_gso
  ixgbe: Fix issues with SR-IOV loopback when flow control is disabled
  net/hyperv: Fix the code handling tx busy
  ixgbe: fix namespace issues when FCoE/DCB is not enabled
  rtlwifi: Remove unused ETH_ADDR_LEN defines
  igbvf: Use ETH_ALEN
  ...

Fix up fairly trivial conflicts in drivers/isdn/gigaset/interface.c and
drivers/net/usb/{Kconfig,qmi_wwan.c} as per David.
This commit is contained in:
Linus Torvalds 2012-03-20 21:04:47 -07:00
commit 3b59bf0816
1687 changed files with 123578 additions and 96539 deletions

View file

@ -336,7 +336,7 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
}
static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
@ -348,7 +348,7 @@ static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
compare_ether_addr(dst, mpath->dst) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
@ -371,12 +371,12 @@ static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
*/
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
}
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
}
@ -517,7 +517,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
if (compare_ether_addr(dst, sdata->vif.addr) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@ -553,12 +553,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
if (mpath->sdata == sdata &&
compare_ether_addr(dst, mpath->dst) == 0)
goto err_exists;
}
@ -569,7 +570,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
mesh_paths_generation++;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@ -578,7 +579,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
return 0;
err_exists:
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
@ -649,7 +650,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
if (compare_ether_addr(dst, sdata->vif.addr) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@ -681,12 +682,13 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
if (mpath->sdata == sdata &&
compare_ether_addr(dst, mpath->dst) == 0)
goto err_exists;
}
@ -695,7 +697,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@ -704,7 +706,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
return 0;
err_exists:
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
@ -803,9 +805,9 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta) {
spin_lock_bh(&tbl->hashwlock[i]);
spin_lock(&tbl->hashwlock[i]);
__mesh_path_del(tbl, node);
spin_unlock_bh(&tbl->hashwlock[i]);
spin_unlock(&tbl->hashwlock[i]);
}
}
read_unlock_bh(&pathtbl_resize_lock);
@ -876,11 +878,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
hash_idx = mesh_table_hash(addr, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
spin_lock(&tbl->hashwlock[hash_idx]);
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
compare_ether_addr(addr, mpath->dst) == 0) {
__mesh_path_del(tbl, node);
goto enddel;
}
@ -889,7 +891,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
err = -ENXIO;
enddel:
mesh_paths_generation++;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
return err;
}