mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-17 20:54:10 +00:00
erofs: adapt managed inode operations into folios
This patch gets rid of erofs_try_to_free_cached_page() and fold it into .release_folio(). It also moves managed inode operations into zdata.c, which simplifies the code a bit. No logic changes. Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> Reviewed-by: Yue Hu <huyue2@coolpad.com> Link: https://lore.kernel.org/r/20230526201459.128169-5-hsiangkao@linux.alibaba.com
This commit is contained in:
parent
967c28b23f
commit
7b4e372c36
3 changed files with 53 additions and 71 deletions
|
@ -500,7 +500,6 @@ int __init z_erofs_init_zip_subsystem(void);
|
||||||
void z_erofs_exit_zip_subsystem(void);
|
void z_erofs_exit_zip_subsystem(void);
|
||||||
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||||
struct erofs_workgroup *egrp);
|
struct erofs_workgroup *egrp);
|
||||||
int erofs_try_to_free_cached_page(struct page *page);
|
|
||||||
int z_erofs_load_lz4_config(struct super_block *sb,
|
int z_erofs_load_lz4_config(struct super_block *sb,
|
||||||
struct erofs_super_block *dsb,
|
struct erofs_super_block *dsb,
|
||||||
struct z_erofs_lz4_cfgs *lz4, int len);
|
struct z_erofs_lz4_cfgs *lz4, int len);
|
||||||
|
@ -511,6 +510,7 @@ void erofs_put_pcpubuf(void *ptr);
|
||||||
int erofs_pcpubuf_growsize(unsigned int nrpages);
|
int erofs_pcpubuf_growsize(unsigned int nrpages);
|
||||||
void __init erofs_pcpubuf_init(void);
|
void __init erofs_pcpubuf_init(void);
|
||||||
void erofs_pcpubuf_exit(void);
|
void erofs_pcpubuf_exit(void);
|
||||||
|
int erofs_init_managed_cache(struct super_block *sb);
|
||||||
#else
|
#else
|
||||||
static inline void erofs_shrinker_register(struct super_block *sb) {}
|
static inline void erofs_shrinker_register(struct super_block *sb) {}
|
||||||
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
|
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
|
||||||
|
@ -530,6 +530,7 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb,
|
||||||
}
|
}
|
||||||
static inline void erofs_pcpubuf_init(void) {}
|
static inline void erofs_pcpubuf_init(void) {}
|
||||||
static inline void erofs_pcpubuf_exit(void) {}
|
static inline void erofs_pcpubuf_exit(void) {}
|
||||||
|
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
|
||||||
#endif /* !CONFIG_EROFS_FS_ZIP */
|
#endif /* !CONFIG_EROFS_FS_ZIP */
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
|
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
|
||||||
|
|
|
@ -599,68 +599,6 @@ static int erofs_fc_parse_param(struct fs_context *fc,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP
|
|
||||||
static const struct address_space_operations managed_cache_aops;
|
|
||||||
|
|
||||||
static bool erofs_managed_cache_release_folio(struct folio *folio, gfp_t gfp)
|
|
||||||
{
|
|
||||||
bool ret = true;
|
|
||||||
struct address_space *const mapping = folio->mapping;
|
|
||||||
|
|
||||||
DBG_BUGON(!folio_test_locked(folio));
|
|
||||||
DBG_BUGON(mapping->a_ops != &managed_cache_aops);
|
|
||||||
|
|
||||||
if (folio_test_private(folio))
|
|
||||||
ret = erofs_try_to_free_cached_page(&folio->page);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* It will be called only on inode eviction. In case that there are still some
|
|
||||||
* decompression requests in progress, wait with rescheduling for a bit here.
|
|
||||||
* We could introduce an extra locking instead but it seems unnecessary.
|
|
||||||
*/
|
|
||||||
static void erofs_managed_cache_invalidate_folio(struct folio *folio,
|
|
||||||
size_t offset, size_t length)
|
|
||||||
{
|
|
||||||
const size_t stop = length + offset;
|
|
||||||
|
|
||||||
DBG_BUGON(!folio_test_locked(folio));
|
|
||||||
|
|
||||||
/* Check for potential overflow in debug mode */
|
|
||||||
DBG_BUGON(stop > folio_size(folio) || stop < length);
|
|
||||||
|
|
||||||
if (offset == 0 && stop == folio_size(folio))
|
|
||||||
while (!erofs_managed_cache_release_folio(folio, GFP_NOFS))
|
|
||||||
cond_resched();
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct address_space_operations managed_cache_aops = {
|
|
||||||
.release_folio = erofs_managed_cache_release_folio,
|
|
||||||
.invalidate_folio = erofs_managed_cache_invalidate_folio,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int erofs_init_managed_cache(struct super_block *sb)
|
|
||||||
{
|
|
||||||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
|
||||||
struct inode *const inode = new_inode(sb);
|
|
||||||
|
|
||||||
if (!inode)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
set_nlink(inode, 1);
|
|
||||||
inode->i_size = OFFSET_MAX;
|
|
||||||
|
|
||||||
inode->i_mapping->a_ops = &managed_cache_aops;
|
|
||||||
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
|
||||||
sbi->managed_cache = inode;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static struct inode *erofs_nfs_get_inode(struct super_block *sb,
|
static struct inode *erofs_nfs_get_inode(struct super_block *sb,
|
||||||
u64 ino, u32 generation)
|
u64 ino, u32 generation)
|
||||||
{
|
{
|
||||||
|
|
|
@ -665,29 +665,72 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int erofs_try_to_free_cached_page(struct page *page)
|
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct z_erofs_pcluster *const pcl = (void *)page_private(page);
|
struct z_erofs_pcluster *pcl = folio_get_private(folio);
|
||||||
int ret, i;
|
bool ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!folio_test_private(folio))
|
||||||
|
return true;
|
||||||
|
|
||||||
if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
|
if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
ret = 0;
|
ret = false;
|
||||||
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
|
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
|
||||||
for (i = 0; i < pcl->pclusterpages; ++i) {
|
for (i = 0; i < pcl->pclusterpages; ++i) {
|
||||||
if (pcl->compressed_bvecs[i].page == page) {
|
if (pcl->compressed_bvecs[i].page == &folio->page) {
|
||||||
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
|
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
|
||||||
ret = 1;
|
ret = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
erofs_workgroup_unfreeze(&pcl->obj, 1);
|
erofs_workgroup_unfreeze(&pcl->obj, 1);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
detach_page_private(page);
|
folio_detach_private(folio);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It will be called only on inode eviction. In case that there are still some
|
||||||
|
* decompression requests in progress, wait with rescheduling for a bit here.
|
||||||
|
* An extra lock could be introduced instead but it seems unnecessary.
|
||||||
|
*/
|
||||||
|
static void z_erofs_cache_invalidate_folio(struct folio *folio,
|
||||||
|
size_t offset, size_t length)
|
||||||
|
{
|
||||||
|
const size_t stop = length + offset;
|
||||||
|
|
||||||
|
/* Check for potential overflow in debug mode */
|
||||||
|
DBG_BUGON(stop > folio_size(folio) || stop < length);
|
||||||
|
|
||||||
|
if (offset == 0 && stop == folio_size(folio))
|
||||||
|
while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
|
||||||
|
cond_resched();
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct address_space_operations z_erofs_cache_aops = {
|
||||||
|
.release_folio = z_erofs_cache_release_folio,
|
||||||
|
.invalidate_folio = z_erofs_cache_invalidate_folio,
|
||||||
|
};
|
||||||
|
|
||||||
|
int erofs_init_managed_cache(struct super_block *sb)
|
||||||
|
{
|
||||||
|
struct inode *const inode = new_inode(sb);
|
||||||
|
|
||||||
|
if (!inode)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
set_nlink(inode, 1);
|
||||||
|
inode->i_size = OFFSET_MAX;
|
||||||
|
inode->i_mapping->a_ops = &z_erofs_cache_aops;
|
||||||
|
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
||||||
|
EROFS_SB(sb)->managed_cache = inode;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
|
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
|
||||||
struct z_erofs_bvec *bvec)
|
struct z_erofs_bvec *bvec)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Reference in a new issue