mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 07:01:23 +00:00
enforce ->sync_fs is only called for rw superblock
Make sure a superblock really is writeable by checking MS_RDONLY under s_umount. sync_filesystems needed some re-arragement for that, but all but one sync_filesystem caller had the correct locking already so that we could add that check there. cachefiles grew s_umount locking. I've also added a WARN_ON to sync_filesystem to assert this for future callers. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
e500475338
commit
5af7926ff3
5 changed files with 27 additions and 25 deletions
|
@ -394,9 +394,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
|
||||||
struct btrfs_root *root = btrfs_sb(sb);
|
struct btrfs_root *root = btrfs_sb(sb);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (sb->s_flags & MS_RDONLY)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!wait) {
|
if (!wait) {
|
||||||
filemap_flush(root->fs_info->btree_inode->i_mapping);
|
filemap_flush(root->fs_info->btree_inode->i_mapping);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -354,7 +354,9 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache)
|
||||||
/* make sure all pages pinned by operations on behalf of the netfs are
|
/* make sure all pages pinned by operations on behalf of the netfs are
|
||||||
* written to disc */
|
* written to disc */
|
||||||
cachefiles_begin_secure(cache, &saved_cred);
|
cachefiles_begin_secure(cache, &saved_cred);
|
||||||
|
down_read(&cache->mnt->mnt_sb->s_umount);
|
||||||
ret = sync_filesystem(cache->mnt->mnt_sb);
|
ret = sync_filesystem(cache->mnt->mnt_sb);
|
||||||
|
up_read(&cache->mnt->mnt_sb->s_umount);
|
||||||
cachefiles_end_secure(cache, saved_cred);
|
cachefiles_end_secure(cache, saved_cred);
|
||||||
|
|
||||||
if (ret == -EIO)
|
if (ret == -EIO)
|
||||||
|
|
|
@ -64,8 +64,8 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf);
|
||||||
|
|
||||||
static int reiserfs_sync_fs(struct super_block *s, int wait)
|
static int reiserfs_sync_fs(struct super_block *s, int wait)
|
||||||
{
|
{
|
||||||
if (!(s->s_flags & MS_RDONLY)) {
|
|
||||||
struct reiserfs_transaction_handle th;
|
struct reiserfs_transaction_handle th;
|
||||||
|
|
||||||
reiserfs_write_lock(s);
|
reiserfs_write_lock(s);
|
||||||
if (!journal_begin(&th, s, 1))
|
if (!journal_begin(&th, s, 1))
|
||||||
if (!journal_end_sync(&th, s, 1))
|
if (!journal_end_sync(&th, s, 1))
|
||||||
|
@ -73,9 +73,6 @@ static int reiserfs_sync_fs(struct super_block *s, int wait)
|
||||||
s->s_dirt = 0; /* Even if it's not true.
|
s->s_dirt = 0; /* Even if it's not true.
|
||||||
* We'll loop forever in sync_supers otherwise */
|
* We'll loop forever in sync_supers otherwise */
|
||||||
reiserfs_write_unlock(s);
|
reiserfs_write_unlock(s);
|
||||||
} else {
|
|
||||||
s->s_dirt = 0;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
23
fs/sync.c
23
fs/sync.c
|
@ -51,6 +51,18 @@ int sync_filesystem(struct super_block *sb)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to be protected against the filesystem going from
|
||||||
|
* r/o to r/w or vice versa.
|
||||||
|
*/
|
||||||
|
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No point in syncing out anything if the filesystem is read-only.
|
||||||
|
*/
|
||||||
|
if (sb->s_flags & MS_RDONLY)
|
||||||
|
return 0;
|
||||||
|
|
||||||
ret = __sync_filesystem(sb, 0);
|
ret = __sync_filesystem(sb, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -79,25 +91,22 @@ static void sync_filesystems(int wait)
|
||||||
|
|
||||||
mutex_lock(&mutex); /* Could be down_interruptible */
|
mutex_lock(&mutex); /* Could be down_interruptible */
|
||||||
spin_lock(&sb_lock);
|
spin_lock(&sb_lock);
|
||||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
list_for_each_entry(sb, &super_blocks, s_list)
|
||||||
if (sb->s_flags & MS_RDONLY)
|
|
||||||
continue;
|
|
||||||
sb->s_need_sync = 1;
|
sb->s_need_sync = 1;
|
||||||
}
|
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||||
if (!sb->s_need_sync)
|
if (!sb->s_need_sync)
|
||||||
continue;
|
continue;
|
||||||
sb->s_need_sync = 0;
|
sb->s_need_sync = 0;
|
||||||
if (sb->s_flags & MS_RDONLY)
|
|
||||||
continue; /* hm. Was remounted r/o meanwhile */
|
|
||||||
sb->s_count++;
|
sb->s_count++;
|
||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
|
|
||||||
down_read(&sb->s_umount);
|
down_read(&sb->s_umount);
|
||||||
if (sb->s_root)
|
if (!(sb->s_flags & MS_RDONLY) && sb->s_root)
|
||||||
__sync_filesystem(sb, wait);
|
__sync_filesystem(sb, wait);
|
||||||
up_read(&sb->s_umount);
|
up_read(&sb->s_umount);
|
||||||
|
|
||||||
/* restart only when sb is no longer on the list */
|
/* restart only when sb is no longer on the list */
|
||||||
spin_lock(&sb_lock);
|
spin_lock(&sb_lock);
|
||||||
if (__put_super_and_need_restart(sb))
|
if (__put_super_and_need_restart(sb))
|
||||||
|
|
|
@ -447,9 +447,6 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
|
||||||
if (!wait)
|
if (!wait)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (sb->s_flags & MS_RDONLY)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VFS calls '->sync_fs()' before synchronizing all dirty inodes and
|
* VFS calls '->sync_fs()' before synchronizing all dirty inodes and
|
||||||
* pages, so synchronize them first, then commit the journal. Strictly
|
* pages, so synchronize them first, then commit the journal. Strictly
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue