mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-07 07:08:07 +00:00
dax: Rename some functions
Remove mentions of 'radix' and 'radix tree'. Simplify some names by dropping the word 'mapping'. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
5ec2d99de7
commit
a77d19f46a
1 changed files with 42 additions and 44 deletions
84
fs/dax.c
84
fs/dax.c
|
@ -74,18 +74,18 @@ fs_initcall(init_dax_wait_table);
|
||||||
#define DAX_ZERO_PAGE (1UL << 2)
|
#define DAX_ZERO_PAGE (1UL << 2)
|
||||||
#define DAX_EMPTY (1UL << 3)
|
#define DAX_EMPTY (1UL << 3)
|
||||||
|
|
||||||
static unsigned long dax_radix_pfn(void *entry)
|
static unsigned long dax_to_pfn(void *entry)
|
||||||
{
|
{
|
||||||
return xa_to_value(entry) >> DAX_SHIFT;
|
return xa_to_value(entry) >> DAX_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
|
static void *dax_make_locked(unsigned long pfn, unsigned long flags)
|
||||||
{
|
{
|
||||||
return xa_mk_value(flags | ((unsigned long)pfn << DAX_SHIFT) |
|
return xa_mk_value(flags | ((unsigned long)pfn << DAX_SHIFT) |
|
||||||
DAX_LOCKED);
|
DAX_LOCKED);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int dax_radix_order(void *entry)
|
static unsigned int dax_entry_order(void *entry)
|
||||||
{
|
{
|
||||||
if (xa_to_value(entry) & DAX_PMD)
|
if (xa_to_value(entry) & DAX_PMD)
|
||||||
return PMD_SHIFT - PAGE_SHIFT;
|
return PMD_SHIFT - PAGE_SHIFT;
|
||||||
|
@ -113,7 +113,7 @@ static int dax_is_empty_entry(void *entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DAX radix tree locking
|
* DAX page cache entry locking
|
||||||
*/
|
*/
|
||||||
struct exceptional_entry_key {
|
struct exceptional_entry_key {
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
|
@ -217,7 +217,7 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lookup entry in radix tree, wait for it to become unlocked if it is
|
* Lookup entry in page cache, wait for it to become unlocked if it is
|
||||||
* a DAX entry and return it. The caller must call
|
* a DAX entry and return it. The caller must call
|
||||||
* put_unlocked_mapping_entry() when he decided not to lock the entry or
|
* put_unlocked_mapping_entry() when he decided not to lock the entry or
|
||||||
* put_locked_mapping_entry() when he locked the entry and now wants to
|
* put_locked_mapping_entry() when he locked the entry and now wants to
|
||||||
|
@ -299,7 +299,7 @@ static void put_locked_mapping_entry(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called when we are done with radix tree entry we looked up via
|
* Called when we are done with page cache entry we looked up via
|
||||||
* get_unlocked_mapping_entry() and which we didn't lock in the end.
|
* get_unlocked_mapping_entry() and which we didn't lock in the end.
|
||||||
*/
|
*/
|
||||||
static void put_unlocked_mapping_entry(struct address_space *mapping,
|
static void put_unlocked_mapping_entry(struct address_space *mapping,
|
||||||
|
@ -308,7 +308,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
|
||||||
if (!entry)
|
if (!entry)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* We have to wake up next waiter for the radix tree entry lock */
|
/* We have to wake up next waiter for the page cache entry lock */
|
||||||
dax_wake_mapping_entry_waiter(mapping, index, entry, false);
|
dax_wake_mapping_entry_waiter(mapping, index, entry, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -324,9 +324,9 @@ static unsigned long dax_entry_size(void *entry)
|
||||||
return PAGE_SIZE;
|
return PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long dax_radix_end_pfn(void *entry)
|
static unsigned long dax_end_pfn(void *entry)
|
||||||
{
|
{
|
||||||
return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
|
return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -334,8 +334,8 @@ static unsigned long dax_radix_end_pfn(void *entry)
|
||||||
* 'empty' and 'zero' entries.
|
* 'empty' and 'zero' entries.
|
||||||
*/
|
*/
|
||||||
#define for_each_mapped_pfn(entry, pfn) \
|
#define for_each_mapped_pfn(entry, pfn) \
|
||||||
for (pfn = dax_radix_pfn(entry); \
|
for (pfn = dax_to_pfn(entry); \
|
||||||
pfn < dax_radix_end_pfn(entry); pfn++)
|
pfn < dax_end_pfn(entry); pfn++)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: for reflink+dax we need a way to associate a single page with
|
* TODO: for reflink+dax we need a way to associate a single page with
|
||||||
|
@ -471,9 +471,9 @@ void dax_unlock_mapping_entry(struct page *page)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find radix tree entry at given index. If it is a DAX entry, return it
|
* Find page cache entry at given index. If it is a DAX entry, return it
|
||||||
* with the radix tree entry locked. If the radix tree doesn't contain the
|
* with the entry locked. If the page cache doesn't contain an entry at
|
||||||
* given index, create an empty entry for the index and return with it locked.
|
* that index, add a locked empty entry.
|
||||||
*
|
*
|
||||||
* When requesting an entry with size DAX_PMD, grab_mapping_entry() will
|
* When requesting an entry with size DAX_PMD, grab_mapping_entry() will
|
||||||
* either return that locked entry or will return an error. This error will
|
* either return that locked entry or will return an error. This error will
|
||||||
|
@ -582,10 +582,10 @@ restart:
|
||||||
true);
|
true);
|
||||||
}
|
}
|
||||||
|
|
||||||
entry = dax_radix_locked_entry(0, size_flag | DAX_EMPTY);
|
entry = dax_make_locked(0, size_flag | DAX_EMPTY);
|
||||||
|
|
||||||
err = __radix_tree_insert(&mapping->i_pages, index,
|
err = __radix_tree_insert(&mapping->i_pages, index,
|
||||||
dax_radix_order(entry), entry);
|
dax_entry_order(entry), entry);
|
||||||
radix_tree_preload_end();
|
radix_tree_preload_end();
|
||||||
if (err) {
|
if (err) {
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
@ -701,7 +701,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
|
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
|
||||||
|
|
||||||
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
|
static int __dax_invalidate_entry(struct address_space *mapping,
|
||||||
pgoff_t index, bool trunc)
|
pgoff_t index, bool trunc)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -731,12 +731,12 @@ out:
|
||||||
*/
|
*/
|
||||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
|
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
|
||||||
{
|
{
|
||||||
int ret = __dax_invalidate_mapping_entry(mapping, index, true);
|
int ret = __dax_invalidate_entry(mapping, index, true);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This gets called from truncate / punch_hole path. As such, the caller
|
* This gets called from truncate / punch_hole path. As such, the caller
|
||||||
* must hold locks protecting against concurrent modifications of the
|
* must hold locks protecting against concurrent modifications of the
|
||||||
* radix tree (usually fs-private i_mmap_sem for writing). Since the
|
* page cache (usually fs-private i_mmap_sem for writing). Since the
|
||||||
* caller has seen a DAX entry for this index, we better find it
|
* caller has seen a DAX entry for this index, we better find it
|
||||||
* at that index as well...
|
* at that index as well...
|
||||||
*/
|
*/
|
||||||
|
@ -750,7 +750,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
|
||||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||||
pgoff_t index)
|
pgoff_t index)
|
||||||
{
|
{
|
||||||
return __dax_invalidate_mapping_entry(mapping, index, false);
|
return __dax_invalidate_entry(mapping, index, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
|
static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
|
||||||
|
@ -786,10 +786,9 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
|
||||||
* already in the tree, we will skip the insertion and just dirty the PMD as
|
* already in the tree, we will skip the insertion and just dirty the PMD as
|
||||||
* appropriate.
|
* appropriate.
|
||||||
*/
|
*/
|
||||||
static void *dax_insert_mapping_entry(struct address_space *mapping,
|
static void *dax_insert_entry(struct address_space *mapping,
|
||||||
struct vm_fault *vmf,
|
struct vm_fault *vmf,
|
||||||
void *entry, pfn_t pfn_t,
|
void *entry, pfn_t pfn_t, unsigned long flags, bool dirty)
|
||||||
unsigned long flags, bool dirty)
|
|
||||||
{
|
{
|
||||||
struct radix_tree_root *pages = &mapping->i_pages;
|
struct radix_tree_root *pages = &mapping->i_pages;
|
||||||
unsigned long pfn = pfn_t_to_pfn(pfn_t);
|
unsigned long pfn = pfn_t_to_pfn(pfn_t);
|
||||||
|
@ -809,7 +808,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
|
|
||||||
xa_lock_irq(pages);
|
xa_lock_irq(pages);
|
||||||
new_entry = dax_radix_locked_entry(pfn, flags);
|
new_entry = dax_make_locked(pfn, flags);
|
||||||
if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
|
if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
|
||||||
dax_disassociate_entry(entry, mapping, false);
|
dax_disassociate_entry(entry, mapping, false);
|
||||||
dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
|
dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
|
||||||
|
@ -817,9 +816,9 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
|
||||||
|
|
||||||
if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
|
if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
|
||||||
/*
|
/*
|
||||||
* Only swap our new entry into the radix tree if the current
|
* Only swap our new entry into the page cache if the current
|
||||||
* entry is a zero page or an empty entry. If a normal PTE or
|
* entry is a zero page or an empty entry. If a normal PTE or
|
||||||
* PMD entry is already in the tree, we leave it alone. This
|
* PMD entry is already in the cache, we leave it alone. This
|
||||||
* means that if we are trying to insert a PTE and the
|
* means that if we are trying to insert a PTE and the
|
||||||
* existing entry is a PMD, we will just leave the PMD in the
|
* existing entry is a PMD, we will just leave the PMD in the
|
||||||
* tree and dirty it if necessary.
|
* tree and dirty it if necessary.
|
||||||
|
@ -842,8 +841,8 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline
|
||||||
pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
|
unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
|
|
||||||
|
@ -853,8 +852,8 @@ pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Walk all mappings of a given index of a file and writeprotect them */
|
/* Walk all mappings of a given index of a file and writeprotect them */
|
||||||
static void dax_mapping_entry_mkclean(struct address_space *mapping,
|
static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
|
||||||
pgoff_t index, unsigned long pfn)
|
unsigned long pfn)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
pte_t pte, *ptep = NULL;
|
pte_t pte, *ptep = NULL;
|
||||||
|
@ -950,7 +949,7 @@ static int dax_writeback_one(struct dax_device *dax_dev,
|
||||||
* compare pfns as we must not bail out due to difference in lockbit
|
* compare pfns as we must not bail out due to difference in lockbit
|
||||||
* or entry type.
|
* or entry type.
|
||||||
*/
|
*/
|
||||||
if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
|
if (dax_to_pfn(entry2) != dax_to_pfn(entry))
|
||||||
goto put_unlocked;
|
goto put_unlocked;
|
||||||
if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
|
if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
|
||||||
dax_is_zero_entry(entry))) {
|
dax_is_zero_entry(entry))) {
|
||||||
|
@ -980,10 +979,10 @@ static int dax_writeback_one(struct dax_device *dax_dev,
|
||||||
* This allows us to flush for PMD_SIZE and not have to worry about
|
* This allows us to flush for PMD_SIZE and not have to worry about
|
||||||
* partial PMD writebacks.
|
* partial PMD writebacks.
|
||||||
*/
|
*/
|
||||||
pfn = dax_radix_pfn(entry);
|
pfn = dax_to_pfn(entry);
|
||||||
size = PAGE_SIZE << dax_radix_order(entry);
|
size = PAGE_SIZE << dax_entry_order(entry);
|
||||||
|
|
||||||
dax_mapping_entry_mkclean(mapping, index, pfn);
|
dax_entry_mkclean(mapping, index, pfn);
|
||||||
dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
|
dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
|
||||||
/*
|
/*
|
||||||
* After we have flushed the cache, we can clear the dirty tag. There
|
* After we have flushed the cache, we can clear the dirty tag. There
|
||||||
|
@ -1120,7 +1119,7 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
|
||||||
pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
|
pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
|
||||||
vm_fault_t ret;
|
vm_fault_t ret;
|
||||||
|
|
||||||
dax_insert_mapping_entry(mapping, vmf, entry, pfn,
|
dax_insert_entry(mapping, vmf, entry, pfn,
|
||||||
DAX_ZERO_PAGE, false);
|
DAX_ZERO_PAGE, false);
|
||||||
|
|
||||||
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
|
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
|
||||||
|
@ -1431,7 +1430,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||||
if (error < 0)
|
if (error < 0)
|
||||||
goto error_finish_iomap;
|
goto error_finish_iomap;
|
||||||
|
|
||||||
entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
|
entry = dax_insert_entry(mapping, vmf, entry, pfn,
|
||||||
0, write && !sync);
|
0, write && !sync);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1511,7 +1510,7 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
pfn = page_to_pfn_t(zero_page);
|
pfn = page_to_pfn_t(zero_page);
|
||||||
ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
|
ret = dax_insert_entry(mapping, vmf, entry, pfn,
|
||||||
DAX_PMD | DAX_ZERO_PAGE, false);
|
DAX_PMD | DAX_ZERO_PAGE, false);
|
||||||
|
|
||||||
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
|
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
|
||||||
|
@ -1564,7 +1563,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||||
* Make sure that the faulting address's PMD offset (color) matches
|
* Make sure that the faulting address's PMD offset (color) matches
|
||||||
* the PMD offset from the start of the file. This is necessary so
|
* the PMD offset from the start of the file. This is necessary so
|
||||||
* that a PMD range in the page table overlaps exactly with a PMD
|
* that a PMD range in the page table overlaps exactly with a PMD
|
||||||
* range in the radix tree.
|
* range in the page cache.
|
||||||
*/
|
*/
|
||||||
if ((vmf->pgoff & PG_PMD_COLOUR) !=
|
if ((vmf->pgoff & PG_PMD_COLOUR) !=
|
||||||
((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
|
((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
|
||||||
|
@ -1632,7 +1631,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||||
if (error < 0)
|
if (error < 0)
|
||||||
goto finish_iomap;
|
goto finish_iomap;
|
||||||
|
|
||||||
entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
|
entry = dax_insert_entry(mapping, vmf, entry, pfn,
|
||||||
DAX_PMD, write && !sync);
|
DAX_PMD, write && !sync);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1725,15 +1724,14 @@ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dax_iomap_fault);
|
EXPORT_SYMBOL_GPL(dax_iomap_fault);
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
|
* dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
|
||||||
* @vmf: The description of the fault
|
* @vmf: The description of the fault
|
||||||
* @pe_size: Size of entry to be inserted
|
* @pe_size: Size of entry to be inserted
|
||||||
* @pfn: PFN to insert
|
* @pfn: PFN to insert
|
||||||
*
|
*
|
||||||
* This function inserts writeable PTE or PMD entry into page tables for mmaped
|
* This function inserts a writeable PTE or PMD entry into the page tables
|
||||||
* DAX file. It takes care of marking corresponding radix tree entry as dirty
|
* for an mmaped DAX file. It also marks the page cache entry as dirty.
|
||||||
* as well.
|
|
||||||
*/
|
*/
|
||||||
static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
|
static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
|
||||||
enum page_entry_size pe_size,
|
enum page_entry_size pe_size,
|
||||||
|
|
Loading…
Add table
Reference in a new issue