mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-06 06:37:59 +00:00
mac80211: mesh: embed gates hlist head directly
Since we have converted the mesh path tables to rhashtable, we are no longer swapping out the entire mesh_pathtbl pointer with RCU. As a result, we no longer need indirection to the hlist head for the gates list and can simply embed it, saving a pair of pointer-sized allocations. Signed-off-by: Bob Copeland <me@bobcopeland.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
parent
47a0489ce1
commit
18b27ff7d2
2 changed files with 5 additions and 15 deletions
|
@ -134,7 +134,7 @@ struct mesh_path {
|
||||||
*/
|
*/
|
||||||
struct mesh_table {
|
struct mesh_table {
|
||||||
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
||||||
struct hlist_head *known_gates;
|
struct hlist_head known_gates;
|
||||||
spinlock_t gates_lock;
|
spinlock_t gates_lock;
|
||||||
|
|
||||||
struct rhashtable rhead;
|
struct rhashtable rhead;
|
||||||
|
|
|
@ -58,12 +58,7 @@ static struct mesh_table *mesh_table_alloc(void)
|
||||||
if (!newtbl)
|
if (!newtbl)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
newtbl->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
|
INIT_HLIST_HEAD(&newtbl->known_gates);
|
||||||
if (!newtbl->known_gates) {
|
|
||||||
kfree(newtbl);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
INIT_HLIST_HEAD(newtbl->known_gates);
|
|
||||||
atomic_set(&newtbl->entries, 0);
|
atomic_set(&newtbl->entries, 0);
|
||||||
spin_lock_init(&newtbl->gates_lock);
|
spin_lock_init(&newtbl->gates_lock);
|
||||||
|
|
||||||
|
@ -341,7 +336,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
|
||||||
mpath->sdata->u.mesh.num_gates++;
|
mpath->sdata->u.mesh.num_gates++;
|
||||||
|
|
||||||
spin_lock(&tbl->gates_lock);
|
spin_lock(&tbl->gates_lock);
|
||||||
hlist_add_head_rcu(&mpath->gate_list, tbl->known_gates);
|
hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
|
||||||
spin_unlock(&tbl->gates_lock);
|
spin_unlock(&tbl->gates_lock);
|
||||||
|
|
||||||
spin_unlock_bh(&mpath->state_lock);
|
spin_unlock_bh(&mpath->state_lock);
|
||||||
|
@ -759,16 +754,11 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
|
||||||
struct mesh_path *from_mpath = mpath;
|
struct mesh_path *from_mpath = mpath;
|
||||||
struct mesh_path *gate;
|
struct mesh_path *gate;
|
||||||
bool copy = false;
|
bool copy = false;
|
||||||
struct hlist_head *known_gates;
|
|
||||||
|
|
||||||
tbl = sdata->u.mesh.mesh_paths;
|
tbl = sdata->u.mesh.mesh_paths;
|
||||||
known_gates = tbl->known_gates;
|
|
||||||
|
|
||||||
if (!known_gates)
|
|
||||||
return -EHOSTUNREACH;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
hlist_for_each_entry_rcu(gate, known_gates, gate_list) {
|
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
|
||||||
if (gate->flags & MESH_PATH_ACTIVE) {
|
if (gate->flags & MESH_PATH_ACTIVE) {
|
||||||
mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
|
mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
|
||||||
mesh_path_move_to_queue(gate, from_mpath, copy);
|
mesh_path_move_to_queue(gate, from_mpath, copy);
|
||||||
|
@ -781,7 +771,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hlist_for_each_entry_rcu(gate, known_gates, gate_list) {
|
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
|
||||||
mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
|
mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
|
||||||
mesh_path_tx_pending(gate);
|
mesh_path_tx_pending(gate);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue