mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-19 13:11:14 +00:00
SUNRPC: More optimisations of svc_xprt_enqueue()
Just move the transport locking out of the spin lock protected area altogether. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
parent
a4aa8054a6
commit
0c0746d03e
1 changed files with 7 additions and 14 deletions
|
@ -346,18 +346,6 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
|
||||||
if (!svc_xprt_has_something_to_do(xprt))
|
if (!svc_xprt_has_something_to_do(xprt))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cpu = get_cpu();
|
|
||||||
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
|
|
||||||
spin_lock_bh(&pool->sp_lock);
|
|
||||||
|
|
||||||
if (!list_empty(&pool->sp_threads) &&
|
|
||||||
!list_empty(&pool->sp_sockets))
|
|
||||||
printk(KERN_ERR
|
|
||||||
"svc_xprt_enqueue: "
|
|
||||||
"threads and transports both waiting??\n");
|
|
||||||
|
|
||||||
pool->sp_stats.packets++;
|
|
||||||
|
|
||||||
/* Mark transport as busy. It will remain in this state until
|
/* Mark transport as busy. It will remain in this state until
|
||||||
* the provider calls svc_xprt_received. We update XPT_BUSY
|
* the provider calls svc_xprt_received. We update XPT_BUSY
|
||||||
* atomically because it also guards against trying to enqueue
|
* atomically because it also guards against trying to enqueue
|
||||||
|
@ -366,9 +354,15 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
|
||||||
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
|
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
|
||||||
/* Don't enqueue transport while already enqueued */
|
/* Don't enqueue transport while already enqueued */
|
||||||
dprintk("svc: transport %p busy, not enqueued\n", xprt);
|
dprintk("svc: transport %p busy, not enqueued\n", xprt);
|
||||||
goto out_unlock;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpu = get_cpu();
|
||||||
|
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
|
||||||
|
spin_lock_bh(&pool->sp_lock);
|
||||||
|
|
||||||
|
pool->sp_stats.packets++;
|
||||||
|
|
||||||
if (!list_empty(&pool->sp_threads)) {
|
if (!list_empty(&pool->sp_threads)) {
|
||||||
rqstp = list_entry(pool->sp_threads.next,
|
rqstp = list_entry(pool->sp_threads.next,
|
||||||
struct svc_rqst,
|
struct svc_rqst,
|
||||||
|
@ -395,7 +389,6 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
|
||||||
pool->sp_stats.sockets_queued++;
|
pool->sp_stats.sockets_queued++;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
spin_unlock_bh(&pool->sp_lock);
|
spin_unlock_bh(&pool->sp_lock);
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue