mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
btrfs: assert that extent buffers are write locked instead of only locked
We currently use lockdep_assert_held() at btrfs_assert_tree_locked(), and that checks that we hold a lock either in read mode or write mode. However in all contexts we use btrfs_assert_tree_locked(), we actually want to check if we are holding a write lock on the extent buffer's rw semaphore - it would be a bug if in any of those contexts we were holding a read lock instead. So change btrfs_assert_tree_locked() to use lockdep_assert_held_write() instead and, to make it more explicit, rename btrfs_assert_tree_locked() to btrfs_assert_tree_write_locked(), so that it's clear we want to check we are holding a write lock. For now there are no contexts where we want to assert that we must have a read lock, but in case that is needed in the future, we can add a new helper function that just calls out lockdep_assert_held_read(). Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
8ef9dc0f14
commit
49d0c6424c
5 changed files with 15 additions and 14 deletions
|
@ -1036,7 +1036,7 @@ static int btree_set_page_dirty(struct page *page)
|
|||
BUG_ON(!eb);
|
||||
BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
|
||||
BUG_ON(!atomic_read(&eb->refs));
|
||||
btrfs_assert_tree_locked(eb);
|
||||
btrfs_assert_tree_write_locked(eb);
|
||||
return __set_page_dirty_nobuffers(page);
|
||||
}
|
||||
ASSERT(PagePrivate(page) && page->private);
|
||||
|
@ -1061,7 +1061,7 @@ static int btree_set_page_dirty(struct page *page)
|
|||
ASSERT(eb);
|
||||
ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
|
||||
ASSERT(atomic_read(&eb->refs));
|
||||
btrfs_assert_tree_locked(eb);
|
||||
btrfs_assert_tree_write_locked(eb);
|
||||
free_extent_buffer(eb);
|
||||
|
||||
cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
|
||||
|
@ -1125,7 +1125,7 @@ void btrfs_clean_tree_block(struct extent_buffer *buf)
|
|||
struct btrfs_fs_info *fs_info = buf->fs_info;
|
||||
if (btrfs_header_generation(buf) ==
|
||||
fs_info->running_transaction->transid) {
|
||||
btrfs_assert_tree_locked(buf);
|
||||
btrfs_assert_tree_write_locked(buf);
|
||||
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
|
||||
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
|
||||
|
@ -4481,7 +4481,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
|
|||
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
|
||||
return;
|
||||
#endif
|
||||
btrfs_assert_tree_locked(buf);
|
||||
btrfs_assert_tree_write_locked(buf);
|
||||
if (transid != fs_info->generation)
|
||||
WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
|
||||
buf->start, transid, fs_info->generation);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue