mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-20 13:41:30 +00:00
dax,iomap: Add helper dax_iomap_zero() to zero a range
Add a helper dax_ioamp_zero() to zero a range. This patch basically merges __dax_zero_page_range() and iomap_dax_zero(). Suggested-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20200228163456.1587-7-vgoyal@redhat.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
0a23f9ffa5
commit
4f3b4f161d
3 changed files with 12 additions and 30 deletions
16
fs/dax.c
16
fs/dax.c
|
@ -1038,10 +1038,10 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __dax_zero_page_range(struct block_device *bdev,
|
int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
|
||||||
struct dax_device *dax_dev, sector_t sector,
|
struct iomap *iomap)
|
||||||
unsigned int offset, unsigned int size)
|
|
||||||
{
|
{
|
||||||
|
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
long rc, id;
|
long rc, id;
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
@ -1052,16 +1052,17 @@ int __dax_zero_page_range(struct block_device *bdev,
|
||||||
IS_ALIGNED(size, PAGE_SIZE))
|
IS_ALIGNED(size, PAGE_SIZE))
|
||||||
page_aligned = true;
|
page_aligned = true;
|
||||||
|
|
||||||
rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
|
rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
|
|
||||||
if (page_aligned)
|
if (page_aligned)
|
||||||
rc = dax_zero_page_range(dax_dev, pgoff, size >> PAGE_SHIFT);
|
rc = dax_zero_page_range(iomap->dax_dev, pgoff,
|
||||||
|
size >> PAGE_SHIFT);
|
||||||
else
|
else
|
||||||
rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
|
rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
dax_read_unlock(id);
|
dax_read_unlock(id);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -1069,12 +1070,11 @@ int __dax_zero_page_range(struct block_device *bdev,
|
||||||
|
|
||||||
if (!page_aligned) {
|
if (!page_aligned) {
|
||||||
memset(kaddr + offset, 0, size);
|
memset(kaddr + offset, 0, size);
|
||||||
dax_flush(dax_dev, kaddr + offset, size);
|
dax_flush(iomap->dax_dev, kaddr + offset, size);
|
||||||
}
|
}
|
||||||
dax_read_unlock(id);
|
dax_read_unlock(id);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__dax_zero_page_range);
|
|
||||||
|
|
||||||
static loff_t
|
static loff_t
|
||||||
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||||
|
|
|
@ -974,13 +974,6 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
|
||||||
return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
|
return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
|
|
||||||
struct iomap *iomap)
|
|
||||||
{
|
|
||||||
return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
|
|
||||||
iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
static loff_t
|
static loff_t
|
||||||
iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
|
iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
|
||||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||||
|
@ -1000,7 +993,7 @@ iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
|
||||||
bytes = min_t(loff_t, PAGE_SIZE - offset, count);
|
bytes = min_t(loff_t, PAGE_SIZE - offset, count);
|
||||||
|
|
||||||
if (IS_DAX(inode))
|
if (IS_DAX(inode))
|
||||||
status = iomap_dax_zero(pos, offset, bytes, iomap);
|
status = dax_iomap_zero(pos, offset, bytes, iomap);
|
||||||
else
|
else
|
||||||
status = iomap_zero(inode, pos, offset, bytes, iomap,
|
status = iomap_zero(inode, pos, offset, bytes, iomap,
|
||||||
srcmap);
|
srcmap);
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
typedef unsigned long dax_entry_t;
|
typedef unsigned long dax_entry_t;
|
||||||
|
|
||||||
struct iomap_ops;
|
struct iomap_ops;
|
||||||
|
struct iomap;
|
||||||
struct dax_device;
|
struct dax_device;
|
||||||
struct dax_operations {
|
struct dax_operations {
|
||||||
/*
|
/*
|
||||||
|
@ -214,20 +215,8 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
|
||||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||||
pgoff_t index);
|
pgoff_t index);
|
||||||
|
int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
|
||||||
#ifdef CONFIG_FS_DAX
|
struct iomap *iomap);
|
||||||
int __dax_zero_page_range(struct block_device *bdev,
|
|
||||||
struct dax_device *dax_dev, sector_t sector,
|
|
||||||
unsigned int offset, unsigned int length);
|
|
||||||
#else
|
|
||||||
static inline int __dax_zero_page_range(struct block_device *bdev,
|
|
||||||
struct dax_device *dax_dev, sector_t sector,
|
|
||||||
unsigned int offset, unsigned int length)
|
|
||||||
{
|
|
||||||
return -ENXIO;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline bool dax_mapping(struct address_space *mapping)
|
static inline bool dax_mapping(struct address_space *mapping)
|
||||||
{
|
{
|
||||||
return mapping->host && IS_DAX(mapping->host);
|
return mapping->host && IS_DAX(mapping->host);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue