mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-09 08:02:05 +00:00
Merge branch 'flexfiles'
* flexfiles: pNFS/flexfiles: Ensure we record layoutstats even if RPC is terminated early pNFS: Add flag to track if we've called nfs4_ff_layout_stat_io_start_read/write pNFS/flexfiles: Fix a statistics gathering imbalance pNFS/flexfiles: Don't mark the entire layout as failed, when returning it pNFS/flexfiles: Don't prevent flexfiles client from retrying LAYOUTGET pnfs/flexfiles: count io stat in rpc_count_stats callback pnfs/flexfiles: do not mark delay-like status as DS failure NFS41: map NFS4ERR_LAYOUTUNAVAILABLE to ENODATA nfs: only remove page from mapping if launder_page fails nfs: handle request add failure properly nfs: centralize pgio error cleanup nfs: clean up rest of reqs when failing to add one NFS41: pop some layoutget errors to application pNFS/flexfiles: Support server-supplied layoutstats sampling period
This commit is contained in:
commit
58baac0ac7
16 changed files with 356 additions and 177 deletions
|
@ -670,6 +670,10 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
|
||||||
|
|
||||||
req = nfs_list_entry(reqs.next);
|
req = nfs_list_entry(reqs.next);
|
||||||
nfs_direct_setup_mirroring(dreq, &desc, req);
|
nfs_direct_setup_mirroring(dreq, &desc, req);
|
||||||
|
if (desc.pg_error < 0) {
|
||||||
|
list_splice_init(&reqs, &failed);
|
||||||
|
goto out_failed;
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
|
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
|
||||||
if (!nfs_pageio_add_request(&desc, req)) {
|
if (!nfs_pageio_add_request(&desc, req)) {
|
||||||
|
@ -677,13 +681,17 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
|
||||||
nfs_list_add_request(req, &failed);
|
nfs_list_add_request(req, &failed);
|
||||||
spin_lock(cinfo.lock);
|
spin_lock(cinfo.lock);
|
||||||
dreq->flags = 0;
|
dreq->flags = 0;
|
||||||
dreq->error = -EIO;
|
if (desc.pg_error < 0)
|
||||||
|
dreq->error = desc.pg_error;
|
||||||
|
else
|
||||||
|
dreq->error = -EIO;
|
||||||
spin_unlock(cinfo.lock);
|
spin_unlock(cinfo.lock);
|
||||||
}
|
}
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
}
|
}
|
||||||
nfs_pageio_complete(&desc);
|
nfs_pageio_complete(&desc);
|
||||||
|
|
||||||
|
out_failed:
|
||||||
while (!list_empty(&failed)) {
|
while (!list_empty(&failed)) {
|
||||||
req = nfs_list_entry(failed.next);
|
req = nfs_list_entry(failed.next);
|
||||||
nfs_list_remove_request(req);
|
nfs_list_remove_request(req);
|
||||||
|
@ -900,6 +908,11 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
|
||||||
}
|
}
|
||||||
|
|
||||||
nfs_direct_setup_mirroring(dreq, &desc, req);
|
nfs_direct_setup_mirroring(dreq, &desc, req);
|
||||||
|
if (desc.pg_error < 0) {
|
||||||
|
nfs_free_request(req);
|
||||||
|
result = desc.pg_error;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
nfs_lock_request(req);
|
nfs_lock_request(req);
|
||||||
req->wb_index = pos >> PAGE_SHIFT;
|
req->wb_index = pos >> PAGE_SHIFT;
|
||||||
|
|
|
@ -545,7 +545,7 @@ static int nfs_launder_page(struct page *page)
|
||||||
inode->i_ino, (long long)page_offset(page));
|
inode->i_ino, (long long)page_offset(page));
|
||||||
|
|
||||||
nfs_fscache_wait_on_page_write(nfsi, page);
|
nfs_fscache_wait_on_page_write(nfsi, page);
|
||||||
return nfs_wb_page(inode, page);
|
return nfs_wb_launder_page(inode, page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||||
|
|
|
@ -202,6 +202,7 @@ static int filelayout_async_handle_error(struct rpc_task *task,
|
||||||
task->tk_status);
|
task->tk_status);
|
||||||
nfs4_mark_deviceid_unavailable(devid);
|
nfs4_mark_deviceid_unavailable(devid);
|
||||||
pnfs_error_mark_layout_for_return(inode, lseg);
|
pnfs_error_mark_layout_for_return(inode, lseg);
|
||||||
|
pnfs_set_lo_fail(lseg);
|
||||||
rpc_wake_up(&tbl->slot_tbl_waitq);
|
rpc_wake_up(&tbl->slot_tbl_waitq);
|
||||||
/* fall through */
|
/* fall through */
|
||||||
default:
|
default:
|
||||||
|
@ -883,13 +884,19 @@ static void
|
||||||
filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
|
filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
|
||||||
struct nfs_page *req)
|
struct nfs_page *req)
|
||||||
{
|
{
|
||||||
if (!pgio->pg_lseg)
|
if (!pgio->pg_lseg) {
|
||||||
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
||||||
req->wb_context,
|
req->wb_context,
|
||||||
0,
|
0,
|
||||||
NFS4_MAX_UINT64,
|
NFS4_MAX_UINT64,
|
||||||
IOMODE_READ,
|
IOMODE_READ,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
if (IS_ERR(pgio->pg_lseg)) {
|
||||||
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
||||||
|
pgio->pg_lseg = NULL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
/* If no lseg, fall back to read through mds */
|
/* If no lseg, fall back to read through mds */
|
||||||
if (pgio->pg_lseg == NULL)
|
if (pgio->pg_lseg == NULL)
|
||||||
nfs_pageio_reset_read_mds(pgio);
|
nfs_pageio_reset_read_mds(pgio);
|
||||||
|
@ -902,13 +909,20 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
|
||||||
struct nfs_commit_info cinfo;
|
struct nfs_commit_info cinfo;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (!pgio->pg_lseg)
|
if (!pgio->pg_lseg) {
|
||||||
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
||||||
req->wb_context,
|
req->wb_context,
|
||||||
0,
|
0,
|
||||||
NFS4_MAX_UINT64,
|
NFS4_MAX_UINT64,
|
||||||
IOMODE_RW,
|
IOMODE_RW,
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
|
if (IS_ERR(pgio->pg_lseg)) {
|
||||||
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
||||||
|
pgio->pg_lseg = NULL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* If no lseg, fall back to write through mds */
|
/* If no lseg, fall back to write through mds */
|
||||||
if (pgio->pg_lseg == NULL)
|
if (pgio->pg_lseg == NULL)
|
||||||
goto out_mds;
|
goto out_mds;
|
||||||
|
|
|
@ -505,9 +505,17 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
|
||||||
}
|
}
|
||||||
|
|
||||||
p = xdr_inline_decode(&stream, 4);
|
p = xdr_inline_decode(&stream, 4);
|
||||||
if (p)
|
if (!p)
|
||||||
fls->flags = be32_to_cpup(p);
|
goto out_sort_mirrors;
|
||||||
|
fls->flags = be32_to_cpup(p);
|
||||||
|
|
||||||
|
p = xdr_inline_decode(&stream, 4);
|
||||||
|
if (!p)
|
||||||
|
goto out_sort_mirrors;
|
||||||
|
for (i=0; i < fls->mirror_array_cnt; i++)
|
||||||
|
fls->mirror_array[i]->report_interval = be32_to_cpup(p);
|
||||||
|
|
||||||
|
out_sort_mirrors:
|
||||||
ff_layout_sort_mirrors(fls);
|
ff_layout_sort_mirrors(fls);
|
||||||
rc = ff_layout_check_layout(lgr);
|
rc = ff_layout_check_layout(lgr);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -603,7 +611,9 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
|
||||||
mirror->start_time = now;
|
mirror->start_time = now;
|
||||||
if (ktime_equal(mirror->last_report_time, notime))
|
if (ktime_equal(mirror->last_report_time, notime))
|
||||||
mirror->last_report_time = now;
|
mirror->last_report_time = now;
|
||||||
if (layoutstats_timer != 0)
|
if (mirror->report_interval != 0)
|
||||||
|
report_interval = (s64)mirror->report_interval * 1000LL;
|
||||||
|
else if (layoutstats_timer != 0)
|
||||||
report_interval = (s64)layoutstats_timer * 1000LL;
|
report_interval = (s64)layoutstats_timer * 1000LL;
|
||||||
if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
|
if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
|
||||||
report_interval) {
|
report_interval) {
|
||||||
|
@ -785,13 +795,19 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
|
||||||
int ds_idx;
|
int ds_idx;
|
||||||
|
|
||||||
/* Use full layout for now */
|
/* Use full layout for now */
|
||||||
if (!pgio->pg_lseg)
|
if (!pgio->pg_lseg) {
|
||||||
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
||||||
req->wb_context,
|
req->wb_context,
|
||||||
0,
|
0,
|
||||||
NFS4_MAX_UINT64,
|
NFS4_MAX_UINT64,
|
||||||
IOMODE_READ,
|
IOMODE_READ,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
if (IS_ERR(pgio->pg_lseg)) {
|
||||||
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
||||||
|
pgio->pg_lseg = NULL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
/* If no lseg, fall back to read through mds */
|
/* If no lseg, fall back to read through mds */
|
||||||
if (pgio->pg_lseg == NULL)
|
if (pgio->pg_lseg == NULL)
|
||||||
goto out_mds;
|
goto out_mds;
|
||||||
|
@ -825,13 +841,19 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
|
||||||
int i;
|
int i;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (!pgio->pg_lseg)
|
if (!pgio->pg_lseg) {
|
||||||
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
||||||
req->wb_context,
|
req->wb_context,
|
||||||
0,
|
0,
|
||||||
NFS4_MAX_UINT64,
|
NFS4_MAX_UINT64,
|
||||||
IOMODE_RW,
|
IOMODE_RW,
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
|
if (IS_ERR(pgio->pg_lseg)) {
|
||||||
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
||||||
|
pgio->pg_lseg = NULL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
/* If no lseg, fall back to write through mds */
|
/* If no lseg, fall back to write through mds */
|
||||||
if (pgio->pg_lseg == NULL)
|
if (pgio->pg_lseg == NULL)
|
||||||
goto out_mds;
|
goto out_mds;
|
||||||
|
@ -867,18 +889,25 @@ static unsigned int
|
||||||
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
|
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
|
||||||
struct nfs_page *req)
|
struct nfs_page *req)
|
||||||
{
|
{
|
||||||
if (!pgio->pg_lseg)
|
if (!pgio->pg_lseg) {
|
||||||
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
||||||
req->wb_context,
|
req->wb_context,
|
||||||
0,
|
0,
|
||||||
NFS4_MAX_UINT64,
|
NFS4_MAX_UINT64,
|
||||||
IOMODE_RW,
|
IOMODE_RW,
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
|
if (IS_ERR(pgio->pg_lseg)) {
|
||||||
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
||||||
|
pgio->pg_lseg = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (pgio->pg_lseg)
|
if (pgio->pg_lseg)
|
||||||
return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
|
return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
|
||||||
|
|
||||||
/* no lseg means that pnfs is not in use, so no mirroring here */
|
/* no lseg means that pnfs is not in use, so no mirroring here */
|
||||||
nfs_pageio_reset_write_mds(pgio);
|
nfs_pageio_reset_write_mds(pgio);
|
||||||
|
out:
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1101,7 +1130,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
|
||||||
return -NFS4ERR_RESET_TO_PNFS;
|
return -NFS4ERR_RESET_TO_PNFS;
|
||||||
out_retry:
|
out_retry:
|
||||||
task->tk_status = 0;
|
task->tk_status = 0;
|
||||||
rpc_restart_call(task);
|
rpc_restart_call_prepare(task);
|
||||||
rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
|
rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
@ -1159,6 +1188,14 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (status) {
|
||||||
|
case NFS4ERR_DELAY:
|
||||||
|
case NFS4ERR_GRACE:
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
mirror = FF_LAYOUT_COMP(lseg, idx);
|
mirror = FF_LAYOUT_COMP(lseg, idx);
|
||||||
err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
|
err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
|
||||||
mirror, offset, length, status, opnum,
|
mirror, offset, length, status, opnum,
|
||||||
|
@ -1242,14 +1279,31 @@ ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
|
||||||
return ff_layout_test_devid_unavailable(node);
|
return ff_layout_test_devid_unavailable(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ff_layout_read_prepare_common(struct rpc_task *task,
|
static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
|
||||||
struct nfs_pgio_header *hdr)
|
struct nfs_pgio_header *hdr)
|
||||||
{
|
{
|
||||||
|
if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
|
||||||
|
return;
|
||||||
nfs4_ff_layout_stat_io_start_read(hdr->inode,
|
nfs4_ff_layout_stat_io_start_read(hdr->inode,
|
||||||
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
||||||
hdr->args.count,
|
hdr->args.count,
|
||||||
task->tk_start);
|
task->tk_start);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
|
||||||
|
struct nfs_pgio_header *hdr)
|
||||||
|
{
|
||||||
|
if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
|
||||||
|
return;
|
||||||
|
nfs4_ff_layout_stat_io_end_read(task,
|
||||||
|
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
||||||
|
hdr->args.count,
|
||||||
|
hdr->res.count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ff_layout_read_prepare_common(struct rpc_task *task,
|
||||||
|
struct nfs_pgio_header *hdr)
|
||||||
|
{
|
||||||
if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
|
if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
|
||||||
rpc_exit(task, -EIO);
|
rpc_exit(task, -EIO);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -1265,6 +1319,7 @@ static int ff_layout_read_prepare_common(struct rpc_task *task,
|
||||||
}
|
}
|
||||||
hdr->pgio_done_cb = ff_layout_read_done_cb;
|
hdr->pgio_done_cb = ff_layout_read_done_cb;
|
||||||
|
|
||||||
|
ff_layout_read_record_layoutstats_start(task, hdr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1323,10 +1378,6 @@ static void ff_layout_read_call_done(struct rpc_task *task, void *data)
|
||||||
|
|
||||||
dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
|
dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
|
||||||
|
|
||||||
nfs4_ff_layout_stat_io_end_read(task,
|
|
||||||
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
|
||||||
hdr->args.count, hdr->res.count);
|
|
||||||
|
|
||||||
if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
|
if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
|
||||||
task->tk_status == 0) {
|
task->tk_status == 0) {
|
||||||
nfs4_sequence_done(task, &hdr->res.seq_res);
|
nfs4_sequence_done(task, &hdr->res.seq_res);
|
||||||
|
@ -1341,10 +1392,20 @@ static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs_pgio_header *hdr = data;
|
struct nfs_pgio_header *hdr = data;
|
||||||
|
|
||||||
|
ff_layout_read_record_layoutstats_done(task, hdr);
|
||||||
rpc_count_iostats_metrics(task,
|
rpc_count_iostats_metrics(task,
|
||||||
&NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
|
&NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ff_layout_read_release(void *data)
|
||||||
|
{
|
||||||
|
struct nfs_pgio_header *hdr = data;
|
||||||
|
|
||||||
|
ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
|
||||||
|
pnfs_generic_rw_release(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int ff_layout_write_done_cb(struct rpc_task *task,
|
static int ff_layout_write_done_cb(struct rpc_task *task,
|
||||||
struct nfs_pgio_header *hdr)
|
struct nfs_pgio_header *hdr)
|
||||||
{
|
{
|
||||||
|
@ -1362,15 +1423,12 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
|
||||||
|
|
||||||
switch (err) {
|
switch (err) {
|
||||||
case -NFS4ERR_RESET_TO_PNFS:
|
case -NFS4ERR_RESET_TO_PNFS:
|
||||||
pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
|
|
||||||
ff_layout_reset_write(hdr, true);
|
ff_layout_reset_write(hdr, true);
|
||||||
return task->tk_status;
|
return task->tk_status;
|
||||||
case -NFS4ERR_RESET_TO_MDS:
|
case -NFS4ERR_RESET_TO_MDS:
|
||||||
pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
|
|
||||||
ff_layout_reset_write(hdr, false);
|
ff_layout_reset_write(hdr, false);
|
||||||
return task->tk_status;
|
return task->tk_status;
|
||||||
case -EAGAIN:
|
case -EAGAIN:
|
||||||
rpc_restart_call_prepare(task);
|
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1402,11 +1460,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
|
||||||
|
|
||||||
switch (err) {
|
switch (err) {
|
||||||
case -NFS4ERR_RESET_TO_PNFS:
|
case -NFS4ERR_RESET_TO_PNFS:
|
||||||
pnfs_set_retry_layoutget(data->lseg->pls_layout);
|
|
||||||
pnfs_generic_prepare_to_resend_writes(data);
|
pnfs_generic_prepare_to_resend_writes(data);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
case -NFS4ERR_RESET_TO_MDS:
|
case -NFS4ERR_RESET_TO_MDS:
|
||||||
pnfs_clear_retry_layoutget(data->lseg->pls_layout);
|
|
||||||
pnfs_generic_prepare_to_resend_writes(data);
|
pnfs_generic_prepare_to_resend_writes(data);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
case -EAGAIN:
|
case -EAGAIN:
|
||||||
|
@ -1421,14 +1477,31 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ff_layout_write_prepare_common(struct rpc_task *task,
|
static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
|
||||||
struct nfs_pgio_header *hdr)
|
struct nfs_pgio_header *hdr)
|
||||||
{
|
{
|
||||||
|
if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
|
||||||
|
return;
|
||||||
nfs4_ff_layout_stat_io_start_write(hdr->inode,
|
nfs4_ff_layout_stat_io_start_write(hdr->inode,
|
||||||
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
||||||
hdr->args.count,
|
hdr->args.count,
|
||||||
task->tk_start);
|
task->tk_start);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
|
||||||
|
struct nfs_pgio_header *hdr)
|
||||||
|
{
|
||||||
|
if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
|
||||||
|
return;
|
||||||
|
nfs4_ff_layout_stat_io_end_write(task,
|
||||||
|
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
||||||
|
hdr->args.count, hdr->res.count,
|
||||||
|
hdr->res.verf->committed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ff_layout_write_prepare_common(struct rpc_task *task,
|
||||||
|
struct nfs_pgio_header *hdr)
|
||||||
|
{
|
||||||
if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
|
if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
|
||||||
rpc_exit(task, -EIO);
|
rpc_exit(task, -EIO);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -1445,6 +1518,7 @@ static int ff_layout_write_prepare_common(struct rpc_task *task,
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ff_layout_write_record_layoutstats_start(task, hdr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1480,11 +1554,6 @@ static void ff_layout_write_call_done(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs_pgio_header *hdr = data;
|
struct nfs_pgio_header *hdr = data;
|
||||||
|
|
||||||
nfs4_ff_layout_stat_io_end_write(task,
|
|
||||||
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
|
||||||
hdr->args.count, hdr->res.count,
|
|
||||||
hdr->res.verf->committed);
|
|
||||||
|
|
||||||
if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
|
if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
|
||||||
task->tk_status == 0) {
|
task->tk_status == 0) {
|
||||||
nfs4_sequence_done(task, &hdr->res.seq_res);
|
nfs4_sequence_done(task, &hdr->res.seq_res);
|
||||||
|
@ -1499,16 +1568,51 @@ static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs_pgio_header *hdr = data;
|
struct nfs_pgio_header *hdr = data;
|
||||||
|
|
||||||
|
ff_layout_write_record_layoutstats_done(task, hdr);
|
||||||
rpc_count_iostats_metrics(task,
|
rpc_count_iostats_metrics(task,
|
||||||
&NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
|
&NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ff_layout_write_release(void *data)
|
||||||
|
{
|
||||||
|
struct nfs_pgio_header *hdr = data;
|
||||||
|
|
||||||
|
ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
|
||||||
|
pnfs_generic_rw_release(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
|
||||||
|
struct nfs_commit_data *cdata)
|
||||||
|
{
|
||||||
|
if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
|
||||||
|
return;
|
||||||
|
nfs4_ff_layout_stat_io_start_write(cdata->inode,
|
||||||
|
FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
|
||||||
|
0, task->tk_start);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
|
||||||
|
struct nfs_commit_data *cdata)
|
||||||
|
{
|
||||||
|
struct nfs_page *req;
|
||||||
|
__u64 count = 0;
|
||||||
|
|
||||||
|
if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (task->tk_status == 0) {
|
||||||
|
list_for_each_entry(req, &cdata->pages, wb_list)
|
||||||
|
count += req->wb_bytes;
|
||||||
|
}
|
||||||
|
nfs4_ff_layout_stat_io_end_write(task,
|
||||||
|
FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
|
||||||
|
count, count, NFS_FILE_SYNC);
|
||||||
|
}
|
||||||
|
|
||||||
static void ff_layout_commit_prepare_common(struct rpc_task *task,
|
static void ff_layout_commit_prepare_common(struct rpc_task *task,
|
||||||
struct nfs_commit_data *cdata)
|
struct nfs_commit_data *cdata)
|
||||||
{
|
{
|
||||||
nfs4_ff_layout_stat_io_start_write(cdata->inode,
|
ff_layout_commit_record_layoutstats_start(task, cdata);
|
||||||
FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
|
|
||||||
0, task->tk_start);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
|
static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
|
||||||
|
@ -1531,19 +1635,6 @@ static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
|
||||||
|
|
||||||
static void ff_layout_commit_done(struct rpc_task *task, void *data)
|
static void ff_layout_commit_done(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs_commit_data *cdata = data;
|
|
||||||
struct nfs_page *req;
|
|
||||||
__u64 count = 0;
|
|
||||||
|
|
||||||
if (task->tk_status == 0) {
|
|
||||||
list_for_each_entry(req, &cdata->pages, wb_list)
|
|
||||||
count += req->wb_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
nfs4_ff_layout_stat_io_end_write(task,
|
|
||||||
FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
|
|
||||||
count, count, NFS_FILE_SYNC);
|
|
||||||
|
|
||||||
pnfs_generic_write_commit_done(task, data);
|
pnfs_generic_write_commit_done(task, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1551,50 +1642,59 @@ static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs_commit_data *cdata = data;
|
struct nfs_commit_data *cdata = data;
|
||||||
|
|
||||||
|
ff_layout_commit_record_layoutstats_done(task, cdata);
|
||||||
rpc_count_iostats_metrics(task,
|
rpc_count_iostats_metrics(task,
|
||||||
&NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
|
&NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ff_layout_commit_release(void *data)
|
||||||
|
{
|
||||||
|
struct nfs_commit_data *cdata = data;
|
||||||
|
|
||||||
|
ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
|
||||||
|
pnfs_generic_commit_release(data);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
|
static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
|
||||||
.rpc_call_prepare = ff_layout_read_prepare_v3,
|
.rpc_call_prepare = ff_layout_read_prepare_v3,
|
||||||
.rpc_call_done = ff_layout_read_call_done,
|
.rpc_call_done = ff_layout_read_call_done,
|
||||||
.rpc_count_stats = ff_layout_read_count_stats,
|
.rpc_count_stats = ff_layout_read_count_stats,
|
||||||
.rpc_release = pnfs_generic_rw_release,
|
.rpc_release = ff_layout_read_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
|
static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
|
||||||
.rpc_call_prepare = ff_layout_read_prepare_v4,
|
.rpc_call_prepare = ff_layout_read_prepare_v4,
|
||||||
.rpc_call_done = ff_layout_read_call_done,
|
.rpc_call_done = ff_layout_read_call_done,
|
||||||
.rpc_count_stats = ff_layout_read_count_stats,
|
.rpc_count_stats = ff_layout_read_count_stats,
|
||||||
.rpc_release = pnfs_generic_rw_release,
|
.rpc_release = ff_layout_read_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
|
static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
|
||||||
.rpc_call_prepare = ff_layout_write_prepare_v3,
|
.rpc_call_prepare = ff_layout_write_prepare_v3,
|
||||||
.rpc_call_done = ff_layout_write_call_done,
|
.rpc_call_done = ff_layout_write_call_done,
|
||||||
.rpc_count_stats = ff_layout_write_count_stats,
|
.rpc_count_stats = ff_layout_write_count_stats,
|
||||||
.rpc_release = pnfs_generic_rw_release,
|
.rpc_release = ff_layout_write_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
|
static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
|
||||||
.rpc_call_prepare = ff_layout_write_prepare_v4,
|
.rpc_call_prepare = ff_layout_write_prepare_v4,
|
||||||
.rpc_call_done = ff_layout_write_call_done,
|
.rpc_call_done = ff_layout_write_call_done,
|
||||||
.rpc_count_stats = ff_layout_write_count_stats,
|
.rpc_count_stats = ff_layout_write_count_stats,
|
||||||
.rpc_release = pnfs_generic_rw_release,
|
.rpc_release = ff_layout_write_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
|
static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
|
||||||
.rpc_call_prepare = ff_layout_commit_prepare_v3,
|
.rpc_call_prepare = ff_layout_commit_prepare_v3,
|
||||||
.rpc_call_done = ff_layout_commit_done,
|
.rpc_call_done = ff_layout_commit_done,
|
||||||
.rpc_count_stats = ff_layout_commit_count_stats,
|
.rpc_count_stats = ff_layout_commit_count_stats,
|
||||||
.rpc_release = pnfs_generic_commit_release,
|
.rpc_release = ff_layout_commit_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
|
static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
|
||||||
.rpc_call_prepare = ff_layout_commit_prepare_v4,
|
.rpc_call_prepare = ff_layout_commit_prepare_v4,
|
||||||
.rpc_call_done = ff_layout_commit_done,
|
.rpc_call_done = ff_layout_commit_done,
|
||||||
.rpc_count_stats = ff_layout_commit_count_stats,
|
.rpc_count_stats = ff_layout_commit_count_stats,
|
||||||
.rpc_release = pnfs_generic_commit_release,
|
.rpc_release = ff_layout_commit_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static enum pnfs_try_status
|
static enum pnfs_try_status
|
||||||
|
|
|
@ -85,6 +85,7 @@ struct nfs4_ff_layout_mirror {
|
||||||
struct nfs4_ff_layoutstat write_stat;
|
struct nfs4_ff_layoutstat write_stat;
|
||||||
ktime_t start_time;
|
ktime_t start_time;
|
||||||
ktime_t last_report_time;
|
ktime_t last_report_time;
|
||||||
|
u32 report_interval;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nfs4_ff_layout_segment {
|
struct nfs4_ff_layout_segment {
|
||||||
|
|
|
@ -429,22 +429,14 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
|
||||||
mirror, lseg->pls_range.offset,
|
mirror, lseg->pls_range.offset,
|
||||||
lseg->pls_range.length, NFS4ERR_NXIO,
|
lseg->pls_range.length, NFS4ERR_NXIO,
|
||||||
OP_ILLEGAL, GFP_NOIO);
|
OP_ILLEGAL, GFP_NOIO);
|
||||||
if (fail_return) {
|
if (!fail_return) {
|
||||||
pnfs_error_mark_layout_for_return(ino, lseg);
|
|
||||||
if (ff_layout_has_available_ds(lseg))
|
|
||||||
pnfs_set_retry_layoutget(lseg->pls_layout);
|
|
||||||
else
|
|
||||||
pnfs_clear_retry_layoutget(lseg->pls_layout);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
if (ff_layout_has_available_ds(lseg))
|
if (ff_layout_has_available_ds(lseg))
|
||||||
set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
|
set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
|
||||||
&lseg->pls_layout->plh_flags);
|
&lseg->pls_layout->plh_flags);
|
||||||
else {
|
else
|
||||||
pnfs_error_mark_layout_for_return(ino, lseg);
|
pnfs_error_mark_layout_for_return(ino, lseg);
|
||||||
pnfs_clear_retry_layoutget(lseg->pls_layout);
|
} else
|
||||||
}
|
pnfs_error_mark_layout_for_return(ino, lseg);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
out_update_creds:
|
out_update_creds:
|
||||||
if (ff_layout_update_mirror_cred(mirror, ds))
|
if (ff_layout_update_mirror_cred(mirror, ds))
|
||||||
|
|
|
@ -912,6 +912,12 @@ void nfs_file_clear_open_context(struct file *filp)
|
||||||
if (ctx) {
|
if (ctx) {
|
||||||
struct inode *inode = d_inode(ctx->dentry);
|
struct inode *inode = d_inode(ctx->dentry);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We fatal error on write before. Try to writeback
|
||||||
|
* every page again.
|
||||||
|
*/
|
||||||
|
if (ctx->error < 0)
|
||||||
|
invalidate_inode_pages2(inode->i_mapping);
|
||||||
filp->private_data = NULL;
|
filp->private_data = NULL;
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
|
list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
|
||||||
|
|
|
@ -711,3 +711,17 @@ static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline bool nfs_error_is_fatal(int err)
|
||||||
|
{
|
||||||
|
switch (err) {
|
||||||
|
case -ERESTARTSYS:
|
||||||
|
case -EIO:
|
||||||
|
case -ENOSPC:
|
||||||
|
case -EROFS:
|
||||||
|
case -E2BIG:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -7811,6 +7811,15 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
|
||||||
switch (task->tk_status) {
|
switch (task->tk_status) {
|
||||||
case 0:
|
case 0:
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
|
||||||
|
* on the file. set tk_status to -ENODATA to tell upper layer to
|
||||||
|
* retry go inband.
|
||||||
|
*/
|
||||||
|
case -NFS4ERR_LAYOUTUNAVAILABLE:
|
||||||
|
task->tk_status = -ENODATA;
|
||||||
|
goto out;
|
||||||
/*
|
/*
|
||||||
* NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
|
* NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
|
||||||
* length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
|
* length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
|
||||||
|
|
|
@ -664,22 +664,11 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
|
||||||
* @desc: IO descriptor
|
* @desc: IO descriptor
|
||||||
* @hdr: pageio header
|
* @hdr: pageio header
|
||||||
*/
|
*/
|
||||||
static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
|
static void nfs_pgio_error(struct nfs_pgio_header *hdr)
|
||||||
struct nfs_pgio_header *hdr)
|
|
||||||
{
|
{
|
||||||
struct nfs_pgio_mirror *mirror;
|
|
||||||
u32 midx;
|
|
||||||
|
|
||||||
set_bit(NFS_IOHDR_REDO, &hdr->flags);
|
set_bit(NFS_IOHDR_REDO, &hdr->flags);
|
||||||
nfs_pgio_data_destroy(hdr);
|
nfs_pgio_data_destroy(hdr);
|
||||||
hdr->completion_ops->completion(hdr);
|
hdr->completion_ops->completion(hdr);
|
||||||
/* TODO: Make sure it's right to clean up all mirrors here
|
|
||||||
* and not just hdr->pgio_mirror_idx */
|
|
||||||
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
|
|
||||||
mirror = &desc->pg_mirrors[midx];
|
|
||||||
desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
|
|
||||||
}
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -800,8 +789,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
|
||||||
unsigned int pagecount, pageused;
|
unsigned int pagecount, pageused;
|
||||||
|
|
||||||
pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
|
pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
|
||||||
if (!nfs_pgarray_set(&hdr->page_array, pagecount))
|
if (!nfs_pgarray_set(&hdr->page_array, pagecount)) {
|
||||||
return nfs_pgio_error(desc, hdr);
|
nfs_pgio_error(hdr);
|
||||||
|
desc->pg_error = -ENOMEM;
|
||||||
|
return desc->pg_error;
|
||||||
|
}
|
||||||
|
|
||||||
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
|
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
|
||||||
pages = hdr->page_array.pagevec;
|
pages = hdr->page_array.pagevec;
|
||||||
|
@ -819,8 +811,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
|
||||||
*pages++ = last_page = req->wb_page;
|
*pages++ = last_page = req->wb_page;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (WARN_ON_ONCE(pageused != pagecount))
|
if (WARN_ON_ONCE(pageused != pagecount)) {
|
||||||
return nfs_pgio_error(desc, hdr);
|
nfs_pgio_error(hdr);
|
||||||
|
desc->pg_error = -EINVAL;
|
||||||
|
return desc->pg_error;
|
||||||
|
}
|
||||||
|
|
||||||
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
|
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
|
||||||
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
|
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
|
||||||
|
@ -843,10 +838,8 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
|
||||||
|
|
||||||
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
||||||
if (!hdr) {
|
if (!hdr) {
|
||||||
/* TODO: make sure this is right with mirroring - or
|
desc->pg_error = -ENOMEM;
|
||||||
* should it back out all mirrors? */
|
return desc->pg_error;
|
||||||
desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
|
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
|
||||||
ret = nfs_generic_pgio(desc, hdr);
|
ret = nfs_generic_pgio(desc, hdr);
|
||||||
|
@ -874,6 +867,9 @@ static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
|
||||||
|
|
||||||
mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
|
mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
|
||||||
|
|
||||||
|
if (pgio->pg_error < 0)
|
||||||
|
return pgio->pg_error;
|
||||||
|
|
||||||
if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
|
if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -982,6 +978,8 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
|
||||||
} else {
|
} else {
|
||||||
if (desc->pg_ops->pg_init)
|
if (desc->pg_ops->pg_init)
|
||||||
desc->pg_ops->pg_init(desc, req);
|
desc->pg_ops->pg_init(desc, req);
|
||||||
|
if (desc->pg_error < 0)
|
||||||
|
return 0;
|
||||||
mirror->pg_base = req->wb_pgbase;
|
mirror->pg_base = req->wb_pgbase;
|
||||||
}
|
}
|
||||||
if (!nfs_can_coalesce_requests(prev, req, desc))
|
if (!nfs_can_coalesce_requests(prev, req, desc))
|
||||||
|
@ -1147,6 +1145,8 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
||||||
bytes = req->wb_bytes;
|
bytes = req->wb_bytes;
|
||||||
|
|
||||||
nfs_pageio_setup_mirroring(desc, req);
|
nfs_pageio_setup_mirroring(desc, req);
|
||||||
|
if (desc->pg_error < 0)
|
||||||
|
goto out_failed;
|
||||||
|
|
||||||
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
|
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
|
||||||
if (midx) {
|
if (midx) {
|
||||||
|
@ -1163,7 +1163,8 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
||||||
|
|
||||||
if (IS_ERR(dupreq)) {
|
if (IS_ERR(dupreq)) {
|
||||||
nfs_page_group_unlock(req);
|
nfs_page_group_unlock(req);
|
||||||
return 0;
|
desc->pg_error = PTR_ERR(dupreq);
|
||||||
|
goto out_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
nfs_lock_request(dupreq);
|
nfs_lock_request(dupreq);
|
||||||
|
@ -1176,10 +1177,32 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
||||||
if (nfs_pgio_has_mirroring(desc))
|
if (nfs_pgio_has_mirroring(desc))
|
||||||
desc->pg_mirror_idx = midx;
|
desc->pg_mirror_idx = midx;
|
||||||
if (!nfs_pageio_add_request_mirror(desc, dupreq))
|
if (!nfs_pageio_add_request_mirror(desc, dupreq))
|
||||||
return 0;
|
goto out_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
out_failed:
|
||||||
|
/*
|
||||||
|
* We might have failed before sending any reqs over wire.
|
||||||
|
* Clean up rest of the reqs in mirror pg_list.
|
||||||
|
*/
|
||||||
|
if (desc->pg_error) {
|
||||||
|
struct nfs_pgio_mirror *mirror;
|
||||||
|
void (*func)(struct list_head *);
|
||||||
|
|
||||||
|
/* remember fatal errors */
|
||||||
|
if (nfs_error_is_fatal(desc->pg_error))
|
||||||
|
mapping_set_error(desc->pg_inode->i_mapping,
|
||||||
|
desc->pg_error);
|
||||||
|
|
||||||
|
func = desc->pg_completion_ops->error_cleanup;
|
||||||
|
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
|
||||||
|
mirror = &desc->pg_mirrors[midx];
|
||||||
|
func(&mirror->pg_list);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1232,7 +1255,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
|
||||||
nfs_pageio_complete(desc);
|
nfs_pageio_complete(desc);
|
||||||
if (!list_empty(&failed)) {
|
if (!list_empty(&failed)) {
|
||||||
list_move(&failed, &hdr->pages);
|
list_move(&failed, &hdr->pages);
|
||||||
return -EIO;
|
return desc->pg_error < 0 ? desc->pg_error : -EIO;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -618,7 +618,6 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
|
||||||
pnfs_get_layout_hdr(lo);
|
pnfs_get_layout_hdr(lo);
|
||||||
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
|
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
|
||||||
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
|
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
|
||||||
pnfs_clear_retry_layoutget(lo);
|
|
||||||
spin_unlock(&nfsi->vfs_inode.i_lock);
|
spin_unlock(&nfsi->vfs_inode.i_lock);
|
||||||
pnfs_free_lseg_list(&tmp_list);
|
pnfs_free_lseg_list(&tmp_list);
|
||||||
pnfs_put_layout_hdr(lo);
|
pnfs_put_layout_hdr(lo);
|
||||||
|
@ -904,17 +903,9 @@ send_layoutget(struct pnfs_layout_hdr *lo,
|
||||||
lseg = nfs4_proc_layoutget(lgp, gfp_flags);
|
lseg = nfs4_proc_layoutget(lgp, gfp_flags);
|
||||||
} while (lseg == ERR_PTR(-EAGAIN));
|
} while (lseg == ERR_PTR(-EAGAIN));
|
||||||
|
|
||||||
if (IS_ERR(lseg)) {
|
if (IS_ERR(lseg) && !nfs_error_is_fatal(PTR_ERR(lseg)))
|
||||||
switch (PTR_ERR(lseg)) {
|
lseg = NULL;
|
||||||
case -ENOMEM:
|
else
|
||||||
case -ERESTARTSYS:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
/* remember that LAYOUTGET failed and suspend trying */
|
|
||||||
pnfs_layout_io_set_failed(lo, range->iomode);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
} else
|
|
||||||
pnfs_layout_clear_fail_bit(lo,
|
pnfs_layout_clear_fail_bit(lo,
|
||||||
pnfs_iomode_to_fail_bit(range->iomode));
|
pnfs_iomode_to_fail_bit(range->iomode));
|
||||||
|
|
||||||
|
@ -1102,7 +1093,6 @@ bool pnfs_roc(struct inode *ino)
|
||||||
&lo->plh_flags))
|
&lo->plh_flags))
|
||||||
layoutreturn = pnfs_prepare_layoutreturn(lo);
|
layoutreturn = pnfs_prepare_layoutreturn(lo);
|
||||||
|
|
||||||
pnfs_clear_retry_layoutget(lo);
|
|
||||||
list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
|
list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
|
||||||
/* If we are sending layoutreturn, invalidate all valid lsegs */
|
/* If we are sending layoutreturn, invalidate all valid lsegs */
|
||||||
if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
|
if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
|
||||||
|
@ -1465,25 +1455,15 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
|
|
||||||
static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode)
|
|
||||||
{
|
|
||||||
if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
|
|
||||||
return 1;
|
|
||||||
return nfs_wait_bit_killable(key, mode);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
|
static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
|
||||||
{
|
{
|
||||||
if (!pnfs_should_retry_layoutget(lo))
|
|
||||||
return false;
|
|
||||||
/*
|
/*
|
||||||
* send layoutcommit as it can hold up layoutreturn due to lseg
|
* send layoutcommit as it can hold up layoutreturn due to lseg
|
||||||
* reference
|
* reference
|
||||||
*/
|
*/
|
||||||
pnfs_layoutcommit_inode(lo->plh_inode, false);
|
pnfs_layoutcommit_inode(lo->plh_inode, false);
|
||||||
return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
|
return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
|
||||||
pnfs_layoutget_retry_bit_wait,
|
nfs_wait_bit_killable,
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1558,8 +1538,7 @@ lookup_again:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if LAYOUTGET already failed once we don't try again */
|
/* if LAYOUTGET already failed once we don't try again */
|
||||||
if (pnfs_layout_io_test_failed(lo, iomode) &&
|
if (pnfs_layout_io_test_failed(lo, iomode)) {
|
||||||
!pnfs_should_retry_layoutget(lo)) {
|
|
||||||
trace_pnfs_update_layout(ino, pos, count, iomode, lo,
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo,
|
||||||
PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
|
PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -1636,7 +1615,6 @@ lookup_again:
|
||||||
arg.length = PAGE_CACHE_ALIGN(arg.length);
|
arg.length = PAGE_CACHE_ALIGN(arg.length);
|
||||||
|
|
||||||
lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
|
lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
|
||||||
pnfs_clear_retry_layoutget(lo);
|
|
||||||
atomic_dec(&lo->plh_outstanding);
|
atomic_dec(&lo->plh_outstanding);
|
||||||
trace_pnfs_update_layout(ino, pos, count, iomode, lo,
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo,
|
||||||
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
|
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
|
||||||
|
@ -1649,7 +1627,7 @@ out:
|
||||||
"(%s, offset: %llu, length: %llu)\n",
|
"(%s, offset: %llu, length: %llu)\n",
|
||||||
__func__, ino->i_sb->s_id,
|
__func__, ino->i_sb->s_id,
|
||||||
(unsigned long long)NFS_FILEID(ino),
|
(unsigned long long)NFS_FILEID(ino),
|
||||||
lseg == NULL ? "not found" : "found",
|
IS_ERR_OR_NULL(lseg) ? "not found" : "found",
|
||||||
iomode==IOMODE_RW ? "read/write" : "read-only",
|
iomode==IOMODE_RW ? "read/write" : "read-only",
|
||||||
(unsigned long long)pos,
|
(unsigned long long)pos,
|
||||||
(unsigned long long)count);
|
(unsigned long long)count);
|
||||||
|
@ -1785,7 +1763,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
|
||||||
struct pnfs_layout_segment *lseg)
|
struct pnfs_layout_segment *lseg)
|
||||||
{
|
{
|
||||||
struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
|
struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
|
||||||
int iomode = pnfs_iomode_to_fail_bit(lseg->pls_range.iomode);
|
|
||||||
struct pnfs_layout_range range = {
|
struct pnfs_layout_range range = {
|
||||||
.iomode = lseg->pls_range.iomode,
|
.iomode = lseg->pls_range.iomode,
|
||||||
.offset = 0,
|
.offset = 0,
|
||||||
|
@ -1794,8 +1771,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
|
||||||
LIST_HEAD(free_me);
|
LIST_HEAD(free_me);
|
||||||
|
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
/* set failure bit so that pnfs path will be retried later */
|
|
||||||
pnfs_layout_set_fail_bit(lo, iomode);
|
|
||||||
if (lo->plh_return_iomode == 0)
|
if (lo->plh_return_iomode == 0)
|
||||||
lo->plh_return_iomode = range.iomode;
|
lo->plh_return_iomode = range.iomode;
|
||||||
else if (lo->plh_return_iomode != range.iomode)
|
else if (lo->plh_return_iomode != range.iomode)
|
||||||
|
@ -1828,6 +1803,11 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
|
||||||
rd_size,
|
rd_size,
|
||||||
IOMODE_READ,
|
IOMODE_READ,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
if (IS_ERR(pgio->pg_lseg)) {
|
||||||
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
||||||
|
pgio->pg_lseg = NULL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/* If no lseg, fall back to read through mds */
|
/* If no lseg, fall back to read through mds */
|
||||||
if (pgio->pg_lseg == NULL)
|
if (pgio->pg_lseg == NULL)
|
||||||
|
@ -1840,13 +1820,19 @@ void
|
||||||
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
|
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
|
||||||
struct nfs_page *req, u64 wb_size)
|
struct nfs_page *req, u64 wb_size)
|
||||||
{
|
{
|
||||||
if (pgio->pg_lseg == NULL)
|
if (pgio->pg_lseg == NULL) {
|
||||||
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
||||||
req->wb_context,
|
req->wb_context,
|
||||||
req_offset(req),
|
req_offset(req),
|
||||||
wb_size,
|
wb_size,
|
||||||
IOMODE_RW,
|
IOMODE_RW,
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
|
if (IS_ERR(pgio->pg_lseg)) {
|
||||||
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
||||||
|
pgio->pg_lseg = NULL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
/* If no lseg, fall back to write through mds */
|
/* If no lseg, fall back to write through mds */
|
||||||
if (pgio->pg_lseg == NULL)
|
if (pgio->pg_lseg == NULL)
|
||||||
nfs_pageio_reset_write_mds(pgio);
|
nfs_pageio_reset_write_mds(pgio);
|
||||||
|
@ -2014,15 +2000,13 @@ static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
|
||||||
int
|
int
|
||||||
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
|
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
|
||||||
{
|
{
|
||||||
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
|
|
||||||
|
|
||||||
struct nfs_pgio_header *hdr;
|
struct nfs_pgio_header *hdr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
||||||
if (!hdr) {
|
if (!hdr) {
|
||||||
desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
|
desc->pg_error = -ENOMEM;
|
||||||
return -ENOMEM;
|
return desc->pg_error;
|
||||||
}
|
}
|
||||||
nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
|
nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
|
||||||
|
|
||||||
|
@ -2145,15 +2129,13 @@ static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
|
||||||
int
|
int
|
||||||
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
|
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
|
||||||
{
|
{
|
||||||
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
|
|
||||||
|
|
||||||
struct nfs_pgio_header *hdr;
|
struct nfs_pgio_header *hdr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
||||||
if (!hdr) {
|
if (!hdr) {
|
||||||
desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
|
desc->pg_error = -ENOMEM;
|
||||||
return -ENOMEM;
|
return desc->pg_error;
|
||||||
}
|
}
|
||||||
nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
|
nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
|
||||||
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
|
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
|
||||||
|
|
|
@ -98,7 +98,6 @@ enum {
|
||||||
NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */
|
NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */
|
||||||
NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
|
NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
|
||||||
NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
|
NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
|
||||||
NFS_LAYOUT_RETRY_LAYOUTGET, /* Retry layoutget */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum layoutdriver_policy_flags {
|
enum layoutdriver_policy_flags {
|
||||||
|
@ -379,26 +378,6 @@ nfs4_get_deviceid(struct nfs4_deviceid_node *d)
|
||||||
return d;
|
return d;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pnfs_set_retry_layoutget(struct pnfs_layout_hdr *lo)
|
|
||||||
{
|
|
||||||
if (!test_and_set_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags))
|
|
||||||
atomic_inc(&lo->plh_refcount);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pnfs_clear_retry_layoutget(struct pnfs_layout_hdr *lo)
|
|
||||||
{
|
|
||||||
if (test_and_clear_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags)) {
|
|
||||||
atomic_dec(&lo->plh_refcount);
|
|
||||||
/* wake up waiters for LAYOUTRETURN as that is not needed */
|
|
||||||
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool pnfs_should_retry_layoutget(struct pnfs_layout_hdr *lo)
|
|
||||||
{
|
|
||||||
return test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct pnfs_layout_segment *
|
static inline struct pnfs_layout_segment *
|
||||||
pnfs_get_lseg(struct pnfs_layout_segment *lseg)
|
pnfs_get_lseg(struct pnfs_layout_segment *lseg)
|
||||||
{
|
{
|
||||||
|
|
|
@ -85,6 +85,23 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
|
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
|
||||||
|
|
||||||
|
static void nfs_readpage_release(struct nfs_page *req)
|
||||||
|
{
|
||||||
|
struct inode *inode = d_inode(req->wb_context->dentry);
|
||||||
|
|
||||||
|
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
|
||||||
|
(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
|
||||||
|
(long long)req_offset(req));
|
||||||
|
|
||||||
|
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
|
||||||
|
if (PageUptodate(req->wb_page))
|
||||||
|
nfs_readpage_to_fscache(inode, req->wb_page, 0);
|
||||||
|
|
||||||
|
unlock_page(req->wb_page);
|
||||||
|
}
|
||||||
|
nfs_release_request(req);
|
||||||
|
}
|
||||||
|
|
||||||
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
|
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
|
||||||
struct page *page)
|
struct page *page)
|
||||||
{
|
{
|
||||||
|
@ -106,7 +123,10 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
|
||||||
|
|
||||||
nfs_pageio_init_read(&pgio, inode, false,
|
nfs_pageio_init_read(&pgio, inode, false,
|
||||||
&nfs_async_read_completion_ops);
|
&nfs_async_read_completion_ops);
|
||||||
nfs_pageio_add_request(&pgio, new);
|
if (!nfs_pageio_add_request(&pgio, new)) {
|
||||||
|
nfs_list_remove_request(new);
|
||||||
|
nfs_readpage_release(new);
|
||||||
|
}
|
||||||
nfs_pageio_complete(&pgio);
|
nfs_pageio_complete(&pgio);
|
||||||
|
|
||||||
/* It doesn't make sense to do mirrored reads! */
|
/* It doesn't make sense to do mirrored reads! */
|
||||||
|
@ -115,24 +135,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
|
||||||
pgm = &pgio.pg_mirrors[0];
|
pgm = &pgio.pg_mirrors[0];
|
||||||
NFS_I(inode)->read_io += pgm->pg_bytes_written;
|
NFS_I(inode)->read_io += pgm->pg_bytes_written;
|
||||||
|
|
||||||
return 0;
|
return pgio.pg_error < 0 ? pgio.pg_error : 0;
|
||||||
}
|
|
||||||
|
|
||||||
static void nfs_readpage_release(struct nfs_page *req)
|
|
||||||
{
|
|
||||||
struct inode *inode = d_inode(req->wb_context->dentry);
|
|
||||||
|
|
||||||
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
|
|
||||||
(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
|
|
||||||
(long long)req_offset(req));
|
|
||||||
|
|
||||||
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
|
|
||||||
if (PageUptodate(req->wb_page))
|
|
||||||
nfs_readpage_to_fscache(inode, req->wb_page, 0);
|
|
||||||
|
|
||||||
unlock_page(req->wb_page);
|
|
||||||
}
|
|
||||||
nfs_release_request(req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfs_page_group_set_uptodate(struct nfs_page *req)
|
static void nfs_page_group_set_uptodate(struct nfs_page *req)
|
||||||
|
@ -361,6 +364,8 @@ readpage_async_filler(void *data, struct page *page)
|
||||||
if (len < PAGE_CACHE_SIZE)
|
if (len < PAGE_CACHE_SIZE)
|
||||||
zero_user_segment(page, len, PAGE_CACHE_SIZE);
|
zero_user_segment(page, len, PAGE_CACHE_SIZE);
|
||||||
if (!nfs_pageio_add_request(desc->pgio, new)) {
|
if (!nfs_pageio_add_request(desc->pgio, new)) {
|
||||||
|
nfs_list_remove_request(new);
|
||||||
|
nfs_readpage_release(new);
|
||||||
error = desc->pgio->pg_error;
|
error = desc->pgio->pg_error;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -545,12 +545,22 @@ try_again:
|
||||||
return head;
|
return head;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nfs_write_error_remove_page(struct nfs_page *req)
|
||||||
|
{
|
||||||
|
nfs_unlock_request(req);
|
||||||
|
nfs_end_page_writeback(req);
|
||||||
|
nfs_release_request(req);
|
||||||
|
generic_error_remove_page(page_file_mapping(req->wb_page),
|
||||||
|
req->wb_page);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find an associated nfs write request, and prepare to flush it out
|
* Find an associated nfs write request, and prepare to flush it out
|
||||||
* May return an error if the user signalled nfs_wait_on_request().
|
* May return an error if the user signalled nfs_wait_on_request().
|
||||||
*/
|
*/
|
||||||
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
||||||
struct page *page, bool nonblock)
|
struct page *page, bool nonblock,
|
||||||
|
bool launder)
|
||||||
{
|
{
|
||||||
struct nfs_page *req;
|
struct nfs_page *req;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -567,8 +577,21 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
if (!nfs_pageio_add_request(pgio, req)) {
|
if (!nfs_pageio_add_request(pgio, req)) {
|
||||||
nfs_redirty_request(req);
|
|
||||||
ret = pgio->pg_error;
|
ret = pgio->pg_error;
|
||||||
|
/*
|
||||||
|
* Remove the problematic req upon fatal errors
|
||||||
|
* in launder case, while other dirty pages can
|
||||||
|
* still be around until they get flushed.
|
||||||
|
*/
|
||||||
|
if (nfs_error_is_fatal(ret)) {
|
||||||
|
nfs_context_set_write_error(req->wb_context, ret);
|
||||||
|
if (launder) {
|
||||||
|
nfs_write_error_remove_page(req);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nfs_redirty_request(req);
|
||||||
|
ret = -EAGAIN;
|
||||||
} else
|
} else
|
||||||
nfs_add_stats(page_file_mapping(page)->host,
|
nfs_add_stats(page_file_mapping(page)->host,
|
||||||
NFSIOS_WRITEPAGES, 1);
|
NFSIOS_WRITEPAGES, 1);
|
||||||
|
@ -576,12 +599,14 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
|
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
|
||||||
|
struct nfs_pageio_descriptor *pgio, bool launder)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
nfs_pageio_cond_complete(pgio, page_file_index(page));
|
nfs_pageio_cond_complete(pgio, page_file_index(page));
|
||||||
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
|
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE,
|
||||||
|
launder);
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
redirty_page_for_writepage(wbc, page);
|
redirty_page_for_writepage(wbc, page);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -592,7 +617,9 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
|
||||||
/*
|
/*
|
||||||
* Write an mmapped page to the server.
|
* Write an mmapped page to the server.
|
||||||
*/
|
*/
|
||||||
static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
|
static int nfs_writepage_locked(struct page *page,
|
||||||
|
struct writeback_control *wbc,
|
||||||
|
bool launder)
|
||||||
{
|
{
|
||||||
struct nfs_pageio_descriptor pgio;
|
struct nfs_pageio_descriptor pgio;
|
||||||
struct inode *inode = page_file_mapping(page)->host;
|
struct inode *inode = page_file_mapping(page)->host;
|
||||||
|
@ -601,7 +628,7 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
|
||||||
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
|
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
|
||||||
nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
|
nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
|
||||||
false, &nfs_async_write_completion_ops);
|
false, &nfs_async_write_completion_ops);
|
||||||
err = nfs_do_writepage(page, wbc, &pgio);
|
err = nfs_do_writepage(page, wbc, &pgio, launder);
|
||||||
nfs_pageio_complete(&pgio);
|
nfs_pageio_complete(&pgio);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -614,7 +641,7 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nfs_writepage_locked(page, wbc);
|
ret = nfs_writepage_locked(page, wbc, false);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -623,7 +650,7 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nfs_do_writepage(page, wbc, data);
|
ret = nfs_do_writepage(page, wbc, data, false);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1911,7 +1938,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
|
||||||
/*
|
/*
|
||||||
* Write back all requests on one page - we do this before reading it.
|
* Write back all requests on one page - we do this before reading it.
|
||||||
*/
|
*/
|
||||||
int nfs_wb_page(struct inode *inode, struct page *page)
|
int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
|
||||||
{
|
{
|
||||||
loff_t range_start = page_file_offset(page);
|
loff_t range_start = page_file_offset(page);
|
||||||
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
|
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
|
||||||
|
@ -1928,7 +1955,7 @@ int nfs_wb_page(struct inode *inode, struct page *page)
|
||||||
for (;;) {
|
for (;;) {
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
if (clear_page_dirty_for_io(page)) {
|
if (clear_page_dirty_for_io(page)) {
|
||||||
ret = nfs_writepage_locked(page, &wbc);
|
ret = nfs_writepage_locked(page, &wbc, launder);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_error;
|
goto out_error;
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -517,12 +517,24 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
|
||||||
*/
|
*/
|
||||||
extern int nfs_sync_inode(struct inode *inode);
|
extern int nfs_sync_inode(struct inode *inode);
|
||||||
extern int nfs_wb_all(struct inode *inode);
|
extern int nfs_wb_all(struct inode *inode);
|
||||||
extern int nfs_wb_page(struct inode *inode, struct page* page);
|
extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder);
|
||||||
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
|
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
|
||||||
extern int nfs_commit_inode(struct inode *, int);
|
extern int nfs_commit_inode(struct inode *, int);
|
||||||
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
|
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
|
||||||
extern void nfs_commit_free(struct nfs_commit_data *data);
|
extern void nfs_commit_free(struct nfs_commit_data *data);
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
nfs_wb_launder_page(struct inode *inode, struct page *page)
|
||||||
|
{
|
||||||
|
return nfs_wb_single_page(inode, page, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
nfs_wb_page(struct inode *inode, struct page *page)
|
||||||
|
{
|
||||||
|
return nfs_wb_single_page(inode, page, false);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
nfs_have_writebacks(struct inode *inode)
|
nfs_have_writebacks(struct inode *inode)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1375,6 +1375,7 @@ enum {
|
||||||
NFS_IOHDR_ERROR = 0,
|
NFS_IOHDR_ERROR = 0,
|
||||||
NFS_IOHDR_EOF,
|
NFS_IOHDR_EOF,
|
||||||
NFS_IOHDR_REDO,
|
NFS_IOHDR_REDO,
|
||||||
|
NFS_IOHDR_STAT,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nfs_pgio_header {
|
struct nfs_pgio_header {
|
||||||
|
@ -1454,6 +1455,7 @@ struct nfs_commit_data {
|
||||||
const struct rpc_call_ops *mds_ops;
|
const struct rpc_call_ops *mds_ops;
|
||||||
const struct nfs_commit_completion_ops *completion_ops;
|
const struct nfs_commit_completion_ops *completion_ops;
|
||||||
int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
|
int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
|
||||||
|
unsigned long flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nfs_pgio_completion_ops {
|
struct nfs_pgio_completion_ops {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue