mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
ext4: New inode allocation for FLEX_BG meta-data groups.
This patch mostly controls the way inode are allocated in order to make ialloc aware of flex_bg block group grouping. It achieves this by bypassing the Orlov allocator when block group meta-data are packed toghether through mke2fs. Since the impact on the block allocator is minimal, this patch should have little or no effect on other block allocation algorithms. By controlling the inode allocation, it can basically control where the initial search for new block begins and thus indirectly manipulate the block allocator. This allocator favors data and meta-data locality so the disk will gradually be filled from block group zero upward. This helps improve performance by reducing seek time. Since the group of inode tables within one flex_bg are treated as one giant inode table, uninitialized block groups would not need to partially initialize as many inode table as with Orlov which would help fsck time as the filesystem usage goes up. Signed-off-by: Jose R. Santos <jrs@us.ibm.com> Signed-off-by: Valerie Clement <valerie.clement@bull.net> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
parent
736603ab29
commit
772cb7c83b
6 changed files with 209 additions and 1 deletions
|
@ -809,6 +809,13 @@ do_more:
|
|||
spin_unlock(sb_bgl_lock(sbi, block_group));
|
||||
percpu_counter_add(&sbi->s_freeblocks_counter, count);
|
||||
|
||||
if (sbi->s_log_groups_per_flex) {
|
||||
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
|
||||
spin_lock(sb_bgl_lock(sbi, flex_group));
|
||||
sbi->s_flex_groups[flex_group].free_blocks += count;
|
||||
spin_unlock(sb_bgl_lock(sbi, flex_group));
|
||||
}
|
||||
|
||||
/* We dirtied the bitmap block */
|
||||
BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
|
||||
err = ext4_journal_dirty_metadata(handle, bitmap_bh);
|
||||
|
@ -1883,6 +1890,13 @@ allocated:
|
|||
spin_unlock(sb_bgl_lock(sbi, group_no));
|
||||
percpu_counter_sub(&sbi->s_freeblocks_counter, num);
|
||||
|
||||
if (sbi->s_log_groups_per_flex) {
|
||||
ext4_group_t flex_group = ext4_flex_group(sbi, group_no);
|
||||
spin_lock(sb_bgl_lock(sbi, flex_group));
|
||||
sbi->s_flex_groups[flex_group].free_blocks -= num;
|
||||
spin_unlock(sb_bgl_lock(sbi, flex_group));
|
||||
}
|
||||
|
||||
BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
|
||||
err = ext4_journal_dirty_metadata(handle, gdp_bh);
|
||||
if (!fatal)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue