mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-30 19:15:14 +00:00
dax: Finish fault completely when loading holes
The only case when we do not finish the page fault completely is when we are loading hole pages into a radix tree. Avoid this special case and finish the fault in that case as well inside the DAX fault handler. It will allow us for easier iomap handling. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
e3fce68cdb
commit
f449b936f1
1 changed files with 18 additions and 9 deletions
27
fs/dax.c
27
fs/dax.c
|
@ -539,15 +539,16 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||||
* otherwise it will simply fall out of the page cache under memory
|
* otherwise it will simply fall out of the page cache under memory
|
||||||
* pressure without ever having been dirtied.
|
* pressure without ever having been dirtied.
|
||||||
*/
|
*/
|
||||||
static int dax_load_hole(struct address_space *mapping, void *entry,
|
static int dax_load_hole(struct address_space *mapping, void **entry,
|
||||||
struct vm_fault *vmf)
|
struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* Hole page already exists? Return it... */
|
/* Hole page already exists? Return it... */
|
||||||
if (!radix_tree_exceptional_entry(entry)) {
|
if (!radix_tree_exceptional_entry(*entry)) {
|
||||||
vmf->page = entry;
|
page = *entry;
|
||||||
return VM_FAULT_LOCKED;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This will replace locked radix tree entry with a hole page */
|
/* This will replace locked radix tree entry with a hole page */
|
||||||
|
@ -555,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
|
||||||
vmf->gfp_mask | __GFP_ZERO);
|
vmf->gfp_mask | __GFP_ZERO);
|
||||||
if (!page)
|
if (!page)
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
|
out:
|
||||||
vmf->page = page;
|
vmf->page = page;
|
||||||
return VM_FAULT_LOCKED;
|
ret = finish_fault(vmf);
|
||||||
|
vmf->page = NULL;
|
||||||
|
*entry = page;
|
||||||
|
if (!ret) {
|
||||||
|
/* Grab reference for PTE that is now referencing the page */
|
||||||
|
get_page(page);
|
||||||
|
return VM_FAULT_NOPAGE;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
|
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
|
||||||
|
@ -1163,8 +1173,8 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||||
case IOMAP_UNWRITTEN:
|
case IOMAP_UNWRITTEN:
|
||||||
case IOMAP_HOLE:
|
case IOMAP_HOLE:
|
||||||
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
|
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
|
||||||
vmf_ret = dax_load_hole(mapping, entry, vmf);
|
vmf_ret = dax_load_hole(mapping, &entry, vmf);
|
||||||
break;
|
goto finish_iomap;
|
||||||
}
|
}
|
||||||
/*FALLTHRU*/
|
/*FALLTHRU*/
|
||||||
default:
|
default:
|
||||||
|
@ -1185,8 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock_entry:
|
unlock_entry:
|
||||||
if (vmf_ret != VM_FAULT_LOCKED || error)
|
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
|
||||||
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
|
|
||||||
out:
|
out:
|
||||||
if (error == -ENOMEM)
|
if (error == -ENOMEM)
|
||||||
return VM_FAULT_OOM | major;
|
return VM_FAULT_OOM | major;
|
||||||
|
|
Loading…
Add table
Reference in a new issue