mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 00:51:35 +00:00
RPC: Clean up RPC task structure
Shrink the RPC task structure. Instead of storing separate pointers for task->tk_exit and task->tk_release, put them in a structure. Also pass the user data pointer as a parameter instead of passing it via task->tk_calldata. This enables us to nest callbacks. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
abbcf28f23
commit
963d8fe533
18 changed files with 241 additions and 185 deletions
|
@ -26,11 +26,12 @@
|
|||
static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
|
||||
static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
|
||||
static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
|
||||
static void nlmclnt_unlock_callback(struct rpc_task *);
|
||||
static void nlmclnt_cancel_callback(struct rpc_task *);
|
||||
static int nlm_stat_to_errno(u32 stat);
|
||||
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
|
||||
|
||||
static const struct rpc_call_ops nlmclnt_unlock_ops;
|
||||
static const struct rpc_call_ops nlmclnt_cancel_ops;
|
||||
|
||||
/*
|
||||
* Cookie counter for NLM requests
|
||||
*/
|
||||
|
@ -399,8 +400,7 @@ in_grace_period:
|
|||
/*
|
||||
* Generic NLM call, async version.
|
||||
*/
|
||||
int
|
||||
nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
|
||||
int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
|
||||
{
|
||||
struct nlm_host *host = req->a_host;
|
||||
struct rpc_clnt *clnt;
|
||||
|
@ -419,13 +419,12 @@ nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
|
|||
msg.rpc_proc = &clnt->cl_procinfo[proc];
|
||||
|
||||
/* bootstrap and kick off the async RPC call */
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int
|
||||
nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
|
||||
static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
|
||||
{
|
||||
struct nlm_host *host = req->a_host;
|
||||
struct rpc_clnt *clnt;
|
||||
|
@ -448,7 +447,7 @@ nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
|
|||
/* Increment host refcount */
|
||||
nlm_get_host(host);
|
||||
/* bootstrap and kick off the async RPC call */
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
|
||||
if (status < 0)
|
||||
nlm_release_host(host);
|
||||
return status;
|
||||
|
@ -664,7 +663,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
|
|||
|
||||
if (req->a_flags & RPC_TASK_ASYNC) {
|
||||
status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
|
||||
nlmclnt_unlock_callback);
|
||||
&nlmclnt_unlock_ops);
|
||||
/* Hrmf... Do the unlock early since locks_remove_posix()
|
||||
* really expects us to free the lock synchronously */
|
||||
do_vfs_lock(fl);
|
||||
|
@ -692,10 +691,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
|
|||
return -ENOLCK;
|
||||
}
|
||||
|
||||
static void
|
||||
nlmclnt_unlock_callback(struct rpc_task *task)
|
||||
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *req = data;
|
||||
int status = req->a_res.status;
|
||||
|
||||
if (RPC_ASSASSINATED(task))
|
||||
|
@ -722,6 +720,10 @@ die:
|
|||
rpc_restart_call(task);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlmclnt_unlock_ops = {
|
||||
.rpc_call_done = nlmclnt_unlock_callback,
|
||||
};
|
||||
|
||||
/*
|
||||
* Cancel a blocked lock request.
|
||||
* We always use an async RPC call for this in order not to hang a
|
||||
|
@ -750,8 +752,7 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
|
|||
|
||||
nlmclnt_setlockargs(req, fl);
|
||||
|
||||
status = nlmclnt_async_call(req, NLMPROC_CANCEL,
|
||||
nlmclnt_cancel_callback);
|
||||
status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
|
||||
if (status < 0) {
|
||||
nlmclnt_release_lockargs(req);
|
||||
kfree(req);
|
||||
|
@ -765,10 +766,9 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
|
|||
return status;
|
||||
}
|
||||
|
||||
static void
|
||||
nlmclnt_cancel_callback(struct rpc_task *task)
|
||||
static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *req = data;
|
||||
|
||||
if (RPC_ASSASSINATED(task))
|
||||
goto die;
|
||||
|
@ -807,6 +807,10 @@ retry_cancel:
|
|||
rpc_delay(task, 30 * HZ);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlmclnt_cancel_ops = {
|
||||
.rpc_call_done = nlmclnt_cancel_callback,
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert an NLM status code to a generic kernel errno
|
||||
*/
|
||||
|
|
|
@ -22,7 +22,8 @@
|
|||
#define NLMDBG_FACILITY NLMDBG_CLIENT
|
||||
|
||||
static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *);
|
||||
static void nlm4svc_callback_exit(struct rpc_task *);
|
||||
|
||||
static const struct rpc_call_ops nlm4svc_callback_ops;
|
||||
|
||||
/*
|
||||
* Obtain client and file from arguments
|
||||
|
@ -470,7 +471,6 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
|
|||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* This is the generic lockd callback for async RPC calls
|
||||
*/
|
||||
|
@ -494,7 +494,7 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
|
|||
call->a_host = host;
|
||||
memcpy(&call->a_args, resp, sizeof(*resp));
|
||||
|
||||
if (nlmsvc_async_call(call, proc, nlm4svc_callback_exit) < 0)
|
||||
if (nlmsvc_async_call(call, proc, &nlm4svc_callback_ops) < 0)
|
||||
goto error;
|
||||
|
||||
return rpc_success;
|
||||
|
@ -504,10 +504,9 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
|
|||
return rpc_system_err;
|
||||
}
|
||||
|
||||
static void
|
||||
nlm4svc_callback_exit(struct rpc_task *task)
|
||||
static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *call = data;
|
||||
|
||||
if (task->tk_status < 0) {
|
||||
dprintk("lockd: %4d callback failed (errno = %d)\n",
|
||||
|
@ -517,6 +516,10 @@ nlm4svc_callback_exit(struct rpc_task *task)
|
|||
kfree(call);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlm4svc_callback_ops = {
|
||||
.rpc_call_done = nlm4svc_callback_exit,
|
||||
};
|
||||
|
||||
/*
|
||||
* NLM Server procedures.
|
||||
*/
|
||||
|
|
|
@ -41,7 +41,8 @@
|
|||
|
||||
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
|
||||
static int nlmsvc_remove_block(struct nlm_block *block);
|
||||
static void nlmsvc_grant_callback(struct rpc_task *task);
|
||||
|
||||
static const struct rpc_call_ops nlmsvc_grant_ops;
|
||||
|
||||
/*
|
||||
* The list of blocked locks to retry
|
||||
|
@ -562,7 +563,7 @@ callback:
|
|||
/* Call the client */
|
||||
nlm_get_host(block->b_call.a_host);
|
||||
if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG,
|
||||
nlmsvc_grant_callback) < 0)
|
||||
&nlmsvc_grant_ops) < 0)
|
||||
nlm_release_host(block->b_call.a_host);
|
||||
up(&file->f_sema);
|
||||
}
|
||||
|
@ -575,10 +576,9 @@ callback:
|
|||
* chain once more in order to have it removed by lockd itself (which can
|
||||
* then sleep on the file semaphore without disrupting e.g. the nfs client).
|
||||
*/
|
||||
static void
|
||||
nlmsvc_grant_callback(struct rpc_task *task)
|
||||
static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *call = data;
|
||||
struct nlm_block *block;
|
||||
unsigned long timeout;
|
||||
struct sockaddr_in *peer_addr = RPC_PEERADDR(task->tk_client);
|
||||
|
@ -614,6 +614,10 @@ nlmsvc_grant_callback(struct rpc_task *task)
|
|||
nlm_release_host(call->a_host);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlmsvc_grant_ops = {
|
||||
.rpc_call_done = nlmsvc_grant_callback,
|
||||
};
|
||||
|
||||
/*
|
||||
* We received a GRANT_RES callback. Try to find the corresponding
|
||||
* block.
|
||||
|
|
|
@ -23,7 +23,8 @@
|
|||
#define NLMDBG_FACILITY NLMDBG_CLIENT
|
||||
|
||||
static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *);
|
||||
static void nlmsvc_callback_exit(struct rpc_task *);
|
||||
|
||||
static const struct rpc_call_ops nlmsvc_callback_ops;
|
||||
|
||||
#ifdef CONFIG_LOCKD_V4
|
||||
static u32
|
||||
|
@ -518,7 +519,7 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
|
|||
call->a_host = host;
|
||||
memcpy(&call->a_args, resp, sizeof(*resp));
|
||||
|
||||
if (nlmsvc_async_call(call, proc, nlmsvc_callback_exit) < 0)
|
||||
if (nlmsvc_async_call(call, proc, &nlmsvc_callback_ops) < 0)
|
||||
goto error;
|
||||
|
||||
return rpc_success;
|
||||
|
@ -528,10 +529,9 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
|
|||
return rpc_system_err;
|
||||
}
|
||||
|
||||
static void
|
||||
nlmsvc_callback_exit(struct rpc_task *task)
|
||||
static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *call = data;
|
||||
|
||||
if (task->tk_status < 0) {
|
||||
dprintk("lockd: %4d callback failed (errno = %d)\n",
|
||||
|
@ -541,6 +541,10 @@ nlmsvc_callback_exit(struct rpc_task *task)
|
|||
kfree(call);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlmsvc_callback_ops = {
|
||||
.rpc_call_done = nlmsvc_callback_exit,
|
||||
};
|
||||
|
||||
/*
|
||||
* NLM Server procedures.
|
||||
*/
|
||||
|
|
|
@ -269,7 +269,6 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
|
|||
|
||||
data->task.tk_cookie = (unsigned long) inode;
|
||||
data->task.tk_calldata = data;
|
||||
data->task.tk_release = nfs_readdata_release;
|
||||
data->complete = nfs_direct_read_result;
|
||||
|
||||
lock_kernel();
|
||||
|
|
|
@ -732,19 +732,23 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|||
|
||||
extern u32 *nfs3_decode_dirent(u32 *, struct nfs_entry *, int);
|
||||
|
||||
static void
|
||||
nfs3_read_done(struct rpc_task *task)
|
||||
static void nfs3_read_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
|
||||
struct nfs_read_data *data = calldata;
|
||||
|
||||
if (nfs3_async_handle_jukebox(task))
|
||||
return;
|
||||
/* Call back common NFS readpage processing */
|
||||
if (task->tk_status >= 0)
|
||||
nfs_refresh_inode(data->inode, &data->fattr);
|
||||
nfs_readpage_result(task);
|
||||
nfs_readpage_result(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs3_read_ops = {
|
||||
.rpc_call_done = nfs3_read_done,
|
||||
.rpc_release = nfs_readdata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs3_proc_read_setup(struct nfs_read_data *data)
|
||||
{
|
||||
|
@ -762,23 +766,26 @@ nfs3_proc_read_setup(struct nfs_read_data *data)
|
|||
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs3_read_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_read_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs3_write_done(struct rpc_task *task)
|
||||
static void nfs3_write_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data;
|
||||
struct nfs_write_data *data = calldata;
|
||||
|
||||
if (nfs3_async_handle_jukebox(task))
|
||||
return;
|
||||
data = (struct nfs_write_data *)task->tk_calldata;
|
||||
if (task->tk_status >= 0)
|
||||
nfs_post_op_update_inode(data->inode, data->res.fattr);
|
||||
nfs_writeback_done(task);
|
||||
nfs_writeback_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs3_write_ops = {
|
||||
.rpc_call_done = nfs3_write_done,
|
||||
.rpc_release = nfs_writedata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs3_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -806,23 +813,26 @@ nfs3_proc_write_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs3_write_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_write_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs3_commit_done(struct rpc_task *task)
|
||||
static void nfs3_commit_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data;
|
||||
struct nfs_write_data *data = calldata;
|
||||
|
||||
if (nfs3_async_handle_jukebox(task))
|
||||
return;
|
||||
data = (struct nfs_write_data *)task->tk_calldata;
|
||||
if (task->tk_status >= 0)
|
||||
nfs_post_op_update_inode(data->inode, data->res.fattr);
|
||||
nfs_commit_done(task);
|
||||
nfs_commit_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs3_commit_ops = {
|
||||
.rpc_call_done = nfs3_commit_done,
|
||||
.rpc_release = nfs_commit_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -840,7 +850,7 @@ nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs3_commit_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_commit_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -196,14 +196,12 @@ static void update_changeattr(struct inode *inode, struct nfs4_change_info *cinf
|
|||
|
||||
/* Helper for asynchronous RPC calls */
|
||||
static int nfs4_call_async(struct rpc_clnt *clnt, rpc_action tk_begin,
|
||||
rpc_action tk_exit, void *calldata)
|
||||
const struct rpc_call_ops *tk_ops, void *calldata)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
|
||||
if (!(task = rpc_new_task(clnt, tk_exit, RPC_TASK_ASYNC)))
|
||||
if (!(task = rpc_new_task(clnt, RPC_TASK_ASYNC, tk_ops, calldata)))
|
||||
return -ENOMEM;
|
||||
|
||||
task->tk_calldata = calldata;
|
||||
task->tk_action = tk_begin;
|
||||
rpc_execute(task);
|
||||
return 0;
|
||||
|
@ -867,10 +865,10 @@ struct nfs4_closedata {
|
|||
struct nfs_fattr fattr;
|
||||
};
|
||||
|
||||
static void nfs4_free_closedata(struct nfs4_closedata *calldata)
|
||||
static void nfs4_free_closedata(void *data)
|
||||
{
|
||||
struct nfs4_state *state = calldata->state;
|
||||
struct nfs4_state_owner *sp = state->owner;
|
||||
struct nfs4_closedata *calldata = data;
|
||||
struct nfs4_state_owner *sp = calldata->state->owner;
|
||||
|
||||
nfs4_put_open_state(calldata->state);
|
||||
nfs_free_seqid(calldata->arg.seqid);
|
||||
|
@ -878,9 +876,9 @@ static void nfs4_free_closedata(struct nfs4_closedata *calldata)
|
|||
kfree(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_close_done(struct rpc_task *task)
|
||||
static void nfs4_close_done(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
|
||||
struct nfs4_closedata *calldata = data;
|
||||
struct nfs4_state *state = calldata->state;
|
||||
struct nfs_server *server = NFS_SERVER(calldata->inode);
|
||||
|
||||
|
@ -904,7 +902,6 @@ static void nfs4_close_done(struct rpc_task *task)
|
|||
}
|
||||
}
|
||||
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
|
||||
nfs4_free_closedata(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_close_begin(struct rpc_task *task)
|
||||
|
@ -918,10 +915,8 @@ static void nfs4_close_begin(struct rpc_task *task)
|
|||
.rpc_cred = state->owner->so_cred,
|
||||
};
|
||||
int mode = 0, old_mode;
|
||||
int status;
|
||||
|
||||
status = nfs_wait_on_sequence(calldata->arg.seqid, task);
|
||||
if (status != 0)
|
||||
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
||||
return;
|
||||
/* Recalculate the new open mode in case someone reopened the file
|
||||
* while we were waiting in line to be scheduled.
|
||||
|
@ -937,9 +932,8 @@ static void nfs4_close_begin(struct rpc_task *task)
|
|||
spin_unlock(&calldata->inode->i_lock);
|
||||
spin_unlock(&state->owner->so_lock);
|
||||
if (mode == old_mode || test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
||||
nfs4_free_closedata(calldata);
|
||||
task->tk_exit = NULL;
|
||||
rpc_exit(task, 0);
|
||||
/* Note: exit _without_ calling nfs4_close_done */
|
||||
task->tk_action = NULL;
|
||||
return;
|
||||
}
|
||||
nfs_fattr_init(calldata->res.fattr);
|
||||
|
@ -949,6 +943,11 @@ static void nfs4_close_begin(struct rpc_task *task)
|
|||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_close_ops = {
|
||||
.rpc_call_done = nfs4_close_done,
|
||||
.rpc_release = nfs4_free_closedata,
|
||||
};
|
||||
|
||||
/*
|
||||
* It is possible for data to be read/written from a mem-mapped file
|
||||
* after the sys_close call (which hits the vfs layer as a flush).
|
||||
|
@ -982,7 +981,7 @@ int nfs4_do_close(struct inode *inode, struct nfs4_state *state)
|
|||
calldata->res.server = server;
|
||||
|
||||
status = nfs4_call_async(server->client, nfs4_close_begin,
|
||||
nfs4_close_done, calldata);
|
||||
&nfs4_close_ops, calldata);
|
||||
if (status == 0)
|
||||
goto out;
|
||||
|
||||
|
@ -2125,10 +2124,9 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
nfs4_read_done(struct rpc_task *task)
|
||||
static void nfs4_read_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
|
||||
struct nfs_read_data *data = calldata;
|
||||
struct inode *inode = data->inode;
|
||||
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
|
||||
|
@ -2138,9 +2136,14 @@ nfs4_read_done(struct rpc_task *task)
|
|||
if (task->tk_status > 0)
|
||||
renew_lease(NFS_SERVER(inode), data->timestamp);
|
||||
/* Call back common NFS readpage processing */
|
||||
nfs_readpage_result(task);
|
||||
nfs_readpage_result(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_read_ops = {
|
||||
.rpc_call_done = nfs4_read_done,
|
||||
.rpc_release = nfs_readdata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs4_proc_read_setup(struct nfs_read_data *data)
|
||||
{
|
||||
|
@ -2160,14 +2163,13 @@ nfs4_proc_read_setup(struct nfs_read_data *data)
|
|||
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs4_read_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_read_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs4_write_done(struct rpc_task *task)
|
||||
static void nfs4_write_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
struct inode *inode = data->inode;
|
||||
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
|
||||
|
@ -2179,9 +2181,14 @@ nfs4_write_done(struct rpc_task *task)
|
|||
nfs_post_op_update_inode(inode, data->res.fattr);
|
||||
}
|
||||
/* Call back common NFS writeback processing */
|
||||
nfs_writeback_done(task);
|
||||
nfs_writeback_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_write_ops = {
|
||||
.rpc_call_done = nfs4_write_done,
|
||||
.rpc_release = nfs_writedata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs4_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -2214,14 +2221,13 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs4_write_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_write_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs4_commit_done(struct rpc_task *task)
|
||||
static void nfs4_commit_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
struct inode *inode = data->inode;
|
||||
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
|
||||
|
@ -2231,9 +2237,14 @@ nfs4_commit_done(struct rpc_task *task)
|
|||
if (task->tk_status >= 0)
|
||||
nfs_post_op_update_inode(inode, data->res.fattr);
|
||||
/* Call back common NFS writeback processing */
|
||||
nfs_commit_done(task);
|
||||
nfs_commit_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_commit_ops = {
|
||||
.rpc_call_done = nfs4_commit_done,
|
||||
.rpc_release = nfs_commit_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -2255,7 +2266,7 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs4_commit_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_commit_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
|
@ -2263,11 +2274,10 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
|
|||
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
|
||||
* standalone procedure for queueing an asynchronous RENEW.
|
||||
*/
|
||||
static void
|
||||
renew_done(struct rpc_task *task)
|
||||
static void nfs4_renew_done(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
|
||||
unsigned long timestamp = (unsigned long)task->tk_calldata;
|
||||
unsigned long timestamp = (unsigned long)data;
|
||||
|
||||
if (task->tk_status < 0) {
|
||||
switch (task->tk_status) {
|
||||
|
@ -2284,6 +2294,10 @@ renew_done(struct rpc_task *task)
|
|||
spin_unlock(&clp->cl_lock);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_renew_ops = {
|
||||
.rpc_call_done = nfs4_renew_done,
|
||||
};
|
||||
|
||||
int
|
||||
nfs4_proc_async_renew(struct nfs4_client *clp)
|
||||
{
|
||||
|
@ -2294,7 +2308,7 @@ nfs4_proc_async_renew(struct nfs4_client *clp)
|
|||
};
|
||||
|
||||
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
|
||||
renew_done, (void *)jiffies);
|
||||
&nfs4_renew_ops, (void *)jiffies);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -2866,15 +2880,16 @@ static void nfs4_locku_release_calldata(struct nfs4_unlockdata *calldata)
|
|||
}
|
||||
}
|
||||
|
||||
static void nfs4_locku_complete(struct nfs4_unlockdata *calldata)
|
||||
static void nfs4_locku_complete(void *data)
|
||||
{
|
||||
struct nfs4_unlockdata *calldata = data;
|
||||
complete(&calldata->completion);
|
||||
nfs4_locku_release_calldata(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_locku_done(struct rpc_task *task)
|
||||
static void nfs4_locku_done(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
|
||||
struct nfs4_unlockdata *calldata = data;
|
||||
|
||||
nfs_increment_lock_seqid(task->tk_status, calldata->luargs.seqid);
|
||||
switch (task->tk_status) {
|
||||
|
@ -2890,10 +2905,8 @@ static void nfs4_locku_done(struct rpc_task *task)
|
|||
default:
|
||||
if (nfs4_async_handle_error(task, calldata->res.server) == -EAGAIN) {
|
||||
rpc_restart_call(task);
|
||||
return;
|
||||
}
|
||||
}
|
||||
nfs4_locku_complete(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_locku_begin(struct rpc_task *task)
|
||||
|
@ -2911,14 +2924,18 @@ static void nfs4_locku_begin(struct rpc_task *task)
|
|||
if (status != 0)
|
||||
return;
|
||||
if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
|
||||
nfs4_locku_complete(calldata);
|
||||
task->tk_exit = NULL;
|
||||
rpc_exit(task, 0);
|
||||
/* Note: exit _without_ running nfs4_locku_done */
|
||||
task->tk_action = NULL;
|
||||
return;
|
||||
}
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_locku_ops = {
|
||||
.rpc_call_done = nfs4_locku_done,
|
||||
.rpc_release = nfs4_locku_complete,
|
||||
};
|
||||
|
||||
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
|
||||
{
|
||||
struct nfs4_unlockdata *calldata;
|
||||
|
@ -2963,7 +2980,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
|
|||
init_completion(&calldata->completion);
|
||||
|
||||
status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_locku_begin,
|
||||
nfs4_locku_done, calldata);
|
||||
&nfs4_locku_ops, calldata);
|
||||
if (status == 0)
|
||||
wait_for_completion_interruptible(&calldata->completion);
|
||||
do_vfs_lock(request->fl_file, request);
|
||||
|
|
|
@ -547,10 +547,9 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|||
|
||||
extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int);
|
||||
|
||||
static void
|
||||
nfs_read_done(struct rpc_task *task)
|
||||
static void nfs_read_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
|
||||
struct nfs_read_data *data = calldata;
|
||||
|
||||
if (task->tk_status >= 0) {
|
||||
nfs_refresh_inode(data->inode, data->res.fattr);
|
||||
|
@ -560,9 +559,14 @@ nfs_read_done(struct rpc_task *task)
|
|||
if (data->args.offset + data->args.count >= data->res.fattr->size)
|
||||
data->res.eof = 1;
|
||||
}
|
||||
nfs_readpage_result(task);
|
||||
nfs_readpage_result(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs_read_ops = {
|
||||
.rpc_call_done = nfs_read_done,
|
||||
.rpc_release = nfs_readdata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs_proc_read_setup(struct nfs_read_data *data)
|
||||
{
|
||||
|
@ -580,20 +584,24 @@ nfs_proc_read_setup(struct nfs_read_data *data)
|
|||
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs_read_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs_read_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs_write_done(struct rpc_task *task)
|
||||
static void nfs_write_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
|
||||
if (task->tk_status >= 0)
|
||||
nfs_post_op_update_inode(data->inode, data->res.fattr);
|
||||
nfs_writeback_done(task);
|
||||
nfs_writeback_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs_write_ops = {
|
||||
.rpc_call_done = nfs_write_done,
|
||||
.rpc_release = nfs_writedata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -614,7 +622,7 @@ nfs_proc_write_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs_write_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs_write_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,9 +42,8 @@ mempool_t *nfs_rdata_mempool;
|
|||
|
||||
#define MIN_POOL_READ (32)
|
||||
|
||||
void nfs_readdata_release(struct rpc_task *task)
|
||||
void nfs_readdata_release(void *data)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
|
||||
nfs_readdata_free(data);
|
||||
}
|
||||
|
||||
|
@ -220,9 +219,6 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
|
|||
NFS_PROTO(inode)->read_setup(data);
|
||||
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
data->task.tk_calldata = data;
|
||||
/* Release requests */
|
||||
data->task.tk_release = nfs_readdata_release;
|
||||
|
||||
dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
|
||||
data->task.tk_pid,
|
||||
|
@ -452,9 +448,9 @@ static void nfs_readpage_result_full(struct nfs_read_data *data, int status)
|
|||
* This is the callback from RPC telling us whether a reply was
|
||||
* received or some error occurred (timeout or socket shutdown).
|
||||
*/
|
||||
void nfs_readpage_result(struct rpc_task *task)
|
||||
void nfs_readpage_result(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
|
||||
struct nfs_read_data *data = calldata;
|
||||
struct nfs_readargs *argp = &data->args;
|
||||
struct nfs_readres *resp = &data->res;
|
||||
int status = task->tk_status;
|
||||
|
|
|
@ -116,10 +116,9 @@ nfs_async_unlink_init(struct rpc_task *task)
|
|||
*
|
||||
* Do the directory attribute update.
|
||||
*/
|
||||
static void
|
||||
nfs_async_unlink_done(struct rpc_task *task)
|
||||
static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
|
||||
struct nfs_unlinkdata *data = calldata;
|
||||
struct dentry *dir = data->dir;
|
||||
struct inode *dir_i;
|
||||
|
||||
|
@ -141,13 +140,17 @@ nfs_async_unlink_done(struct rpc_task *task)
|
|||
* We need to call nfs_put_unlinkdata as a 'tk_release' task since the
|
||||
* rpc_task would be freed too.
|
||||
*/
|
||||
static void
|
||||
nfs_async_unlink_release(struct rpc_task *task)
|
||||
static void nfs_async_unlink_release(void *calldata)
|
||||
{
|
||||
struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
|
||||
struct nfs_unlinkdata *data = calldata;
|
||||
nfs_put_unlinkdata(data);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs_unlink_ops = {
|
||||
.rpc_call_done = nfs_async_unlink_done,
|
||||
.rpc_release = nfs_async_unlink_release,
|
||||
};
|
||||
|
||||
/**
|
||||
* nfs_async_unlink - asynchronous unlinking of a file
|
||||
* @dentry: dentry to unlink
|
||||
|
@ -179,10 +182,8 @@ nfs_async_unlink(struct dentry *dentry)
|
|||
data->count = 1;
|
||||
|
||||
task = &data->task;
|
||||
rpc_init_task(task, clnt, nfs_async_unlink_done , RPC_TASK_ASYNC);
|
||||
task->tk_calldata = data;
|
||||
rpc_init_task(task, clnt, RPC_TASK_ASYNC, &nfs_unlink_ops, data);
|
||||
task->tk_action = nfs_async_unlink_init;
|
||||
task->tk_release = nfs_async_unlink_release;
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_flags |= DCACHE_NFSFS_RENAMED;
|
||||
|
|
|
@ -104,9 +104,8 @@ static inline void nfs_commit_free(struct nfs_write_data *p)
|
|||
mempool_free(p, nfs_commit_mempool);
|
||||
}
|
||||
|
||||
static void nfs_writedata_release(struct rpc_task *task)
|
||||
void nfs_writedata_release(void *wdata)
|
||||
{
|
||||
struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
|
||||
nfs_writedata_free(wdata);
|
||||
}
|
||||
|
||||
|
@ -871,9 +870,6 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
|
|||
|
||||
data->task.tk_priority = flush_task_priority(how);
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
data->task.tk_calldata = data;
|
||||
/* Release requests */
|
||||
data->task.tk_release = nfs_writedata_release;
|
||||
|
||||
dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
|
||||
data->task.tk_pid,
|
||||
|
@ -1131,9 +1127,9 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
|
|||
/*
|
||||
* This function is called when the WRITE call is complete.
|
||||
*/
|
||||
void nfs_writeback_done(struct rpc_task *task)
|
||||
void nfs_writeback_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
struct nfs_writeargs *argp = &data->args;
|
||||
struct nfs_writeres *resp = &data->res;
|
||||
|
||||
|
@ -1200,9 +1196,8 @@ void nfs_writeback_done(struct rpc_task *task)
|
|||
|
||||
|
||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||
static void nfs_commit_release(struct rpc_task *task)
|
||||
void nfs_commit_release(void *wdata)
|
||||
{
|
||||
struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
|
||||
nfs_commit_free(wdata);
|
||||
}
|
||||
|
||||
|
@ -1238,9 +1233,6 @@ static void nfs_commit_rpcsetup(struct list_head *head,
|
|||
|
||||
data->task.tk_priority = flush_task_priority(how);
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
data->task.tk_calldata = data;
|
||||
/* Release requests */
|
||||
data->task.tk_release = nfs_commit_release;
|
||||
|
||||
dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
|
||||
}
|
||||
|
@ -1277,10 +1269,9 @@ nfs_commit_list(struct list_head *head, int how)
|
|||
/*
|
||||
* COMMIT call returned
|
||||
*/
|
||||
void
|
||||
nfs_commit_done(struct rpc_task *task)
|
||||
void nfs_commit_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *)task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
struct nfs_page *req;
|
||||
int res = 0;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
#define NFSPROC4_CB_COMPOUND 1
|
||||
|
||||
/* declarations */
|
||||
static void nfs4_cb_null(struct rpc_task *task);
|
||||
static const struct rpc_call_ops nfs4_cb_null_ops;
|
||||
|
||||
/* Index of predefined Linux callback client operations */
|
||||
|
||||
|
@ -447,7 +447,7 @@ nfsd4_probe_callback(struct nfs4_client *clp)
|
|||
msg.rpc_cred = nfsd4_lookupcred(clp,0);
|
||||
if (IS_ERR(msg.rpc_cred))
|
||||
goto out_rpciod;
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, nfs4_cb_null, NULL);
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL);
|
||||
put_rpccred(msg.rpc_cred);
|
||||
|
||||
if (status != 0) {
|
||||
|
@ -469,7 +469,7 @@ out_err:
|
|||
}
|
||||
|
||||
static void
|
||||
nfs4_cb_null(struct rpc_task *task)
|
||||
nfs4_cb_null(struct rpc_task *task, void *dummy)
|
||||
{
|
||||
struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
|
||||
struct nfs4_callback *cb = &clp->cl_callback;
|
||||
|
@ -488,6 +488,10 @@ out:
|
|||
put_nfs4_client(clp);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_cb_null_ops = {
|
||||
.rpc_call_done = nfs4_cb_null,
|
||||
};
|
||||
|
||||
/*
|
||||
* called with dp->dl_count inc'ed.
|
||||
* nfs4_lock_state() may or may not have been called.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue