mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
svcrdma: Improve allocation of struct svc_rdma_op_ctxt
When the maximum payload size of NFS READ and WRITE was increased
by commit cc9a903d91
("svcrdma: Change maximum server payload back
to RPCSVC_MAXPAYLOAD"), the size of struct svc_rdma_op_ctxt
increased to over 6KB (on x86_64). That makes allocating one of
these from a kmem_cache more likely to fail in situations when
system memory is exhausted.
Since I'm about to add a caller where this allocation must always
work _and_ it cannot sleep, pre-allocate ctxts for each connection.
Another motivation for this change is that NFSv4.x servers are
required by specification not to drop NFS requests. Pre-allocating
memory resources reduces the likelihood of a drop.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Acked-by: Bruce Fields <bfields@fieldses.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
ced4ac0c4f
commit
cc886c9ff1
2 changed files with 94 additions and 14 deletions
|
@ -69,6 +69,7 @@ extern atomic_t rdma_stat_sq_prod;
|
|||
* completes.
|
||||
*/
|
||||
struct svc_rdma_op_ctxt {
|
||||
struct list_head free;
|
||||
struct svc_rdma_op_ctxt *read_hdr;
|
||||
struct svc_rdma_fastreg_mr *frmr;
|
||||
int hdr_count;
|
||||
|
@ -141,7 +142,10 @@ struct svcxprt_rdma {
|
|||
struct ib_pd *sc_pd;
|
||||
|
||||
atomic_t sc_dma_used;
|
||||
atomic_t sc_ctxt_used;
|
||||
spinlock_t sc_ctxt_lock;
|
||||
struct list_head sc_ctxts;
|
||||
int sc_ctxt_used;
|
||||
|
||||
struct list_head sc_rq_dto_q;
|
||||
spinlock_t sc_rq_dto_lock;
|
||||
struct ib_qp *sc_qp;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue