mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-05-02 21:33:58 +00:00
Btrfs: avoid taking the chunk_mutex in do_chunk_alloc
Everytime we try to allocate disk space we try and see if we can pre-emptively allocate a chunk, but in the common case we don't allocate anything, so there is no sense in taking the chunk_mutex at all. So instead if we are allocating a chunk, mark it in the space_info so we don't get two people trying to allocate at the same time. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com> Reviewed-by: Liu Bo <liubo2009@cn.fujitsu.com>
This commit is contained in:
parent
0d399205ed
commit
6d74119f1a
2 changed files with 28 additions and 6 deletions
|
@ -740,8 +740,10 @@ struct btrfs_space_info {
|
||||||
*/
|
*/
|
||||||
unsigned long reservation_progress;
|
unsigned long reservation_progress;
|
||||||
|
|
||||||
int full; /* indicates that we cannot allocate any more
|
int full:1; /* indicates that we cannot allocate any more
|
||||||
chunks for this space */
|
chunks for this space */
|
||||||
|
int chunk_alloc:1; /* set if we are allocating a chunk */
|
||||||
|
|
||||||
int force_alloc; /* set if we need to force a chunk alloc for
|
int force_alloc; /* set if we need to force a chunk alloc for
|
||||||
this space */
|
this space */
|
||||||
|
|
||||||
|
|
|
@ -3039,6 +3039,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
||||||
found->bytes_may_use = 0;
|
found->bytes_may_use = 0;
|
||||||
found->full = 0;
|
found->full = 0;
|
||||||
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
||||||
|
found->chunk_alloc = 0;
|
||||||
*space_info = found;
|
*space_info = found;
|
||||||
list_add_rcu(&found->list, &info->space_info);
|
list_add_rcu(&found->list, &info->space_info);
|
||||||
atomic_set(&found->caching_threads, 0);
|
atomic_set(&found->caching_threads, 0);
|
||||||
|
@ -3318,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||||
{
|
{
|
||||||
struct btrfs_space_info *space_info;
|
struct btrfs_space_info *space_info;
|
||||||
struct btrfs_fs_info *fs_info = extent_root->fs_info;
|
struct btrfs_fs_info *fs_info = extent_root->fs_info;
|
||||||
|
int wait_for_alloc = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&fs_info->chunk_mutex);
|
|
||||||
|
|
||||||
flags = btrfs_reduce_alloc_profile(extent_root, flags);
|
flags = btrfs_reduce_alloc_profile(extent_root, flags);
|
||||||
|
|
||||||
space_info = __find_space_info(extent_root->fs_info, flags);
|
space_info = __find_space_info(extent_root->fs_info, flags);
|
||||||
|
@ -3332,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||||
}
|
}
|
||||||
BUG_ON(!space_info);
|
BUG_ON(!space_info);
|
||||||
|
|
||||||
|
again:
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
if (space_info->force_alloc)
|
if (space_info->force_alloc)
|
||||||
force = space_info->force_alloc;
|
force = space_info->force_alloc;
|
||||||
if (space_info->full) {
|
if (space_info->full) {
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
goto out;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
|
if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
goto out;
|
return 0;
|
||||||
|
} else if (space_info->chunk_alloc) {
|
||||||
|
wait_for_alloc = 1;
|
||||||
|
} else {
|
||||||
|
space_info->chunk_alloc = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
|
|
||||||
|
mutex_lock(&fs_info->chunk_mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The chunk_mutex is held throughout the entirety of a chunk
|
||||||
|
* allocation, so once we've acquired the chunk_mutex we know that the
|
||||||
|
* other guy is done and we need to recheck and see if we should
|
||||||
|
* allocate.
|
||||||
|
*/
|
||||||
|
if (wait_for_alloc) {
|
||||||
|
mutex_unlock(&fs_info->chunk_mutex);
|
||||||
|
wait_for_alloc = 0;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we have mixed data/metadata chunks we want to make sure we keep
|
* If we have mixed data/metadata chunks we want to make sure we keep
|
||||||
* allocating mixed chunks instead of individual chunks.
|
* allocating mixed chunks instead of individual chunks.
|
||||||
|
@ -3372,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||||
space_info->full = 1;
|
space_info->full = 1;
|
||||||
else
|
else
|
||||||
ret = 1;
|
ret = 1;
|
||||||
|
|
||||||
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
||||||
|
space_info->chunk_alloc = 0;
|
||||||
spin_unlock(&space_info->lock);
|
spin_unlock(&space_info->lock);
|
||||||
out:
|
|
||||||
mutex_unlock(&extent_root->fs_info->chunk_mutex);
|
mutex_unlock(&extent_root->fs_info->chunk_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue