ntfs: use zero_user_page

Use zero_user_page() instead of open-coding it.

[akpm@linux-foundation.org: kmap-type fixes]
Signed-off-by: Nate Diller <nate.diller@gmail.com>
Acked-by: Anton Altaparmakov <aia21@cantab.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Nate Diller 2007-05-12 10:36:54 -07:00 committed by Linus Torvalds
parent 6d690dcac9
commit e3bf460f3e
2 changed files with 26 additions and 69 deletions

View file

@ -606,11 +606,8 @@ do_next_page:
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
u8 *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + bh_offset(bh), 0,
blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
zero_user_page(page, bh_offset(bh),
blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
}
@ -685,12 +682,9 @@ map_buffer_cached:
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
u8 *kaddr = kmap_atomic(page,
KM_USER0);
memset(kaddr + bh_offset(bh),
0, blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
zero_user_page(page,
bh_offset(bh),
blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
}
@ -708,11 +702,8 @@ map_buffer_cached:
*/
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) {
u8 *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + bh_offset(bh), 0,
blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
zero_user_page(page, bh_offset(bh),
blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
mark_buffer_dirty(bh);
@ -751,10 +742,8 @@ map_buffer_cached:
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
u8 *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + bh_offset(bh), 0, blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
zero_user_page(page, bh_offset(bh), blocksize,
KM_USER0);
set_buffer_uptodate(bh);
}
continue;
@ -878,11 +867,8 @@ rl_not_mapped_enoent:
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
u8 *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + bh_offset(bh), 0,
blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
zero_user_page(page, bh_offset(bh),
blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
continue;
@ -1137,16 +1123,12 @@ rl_not_mapped_enoent:
* to zero the overflowing region.
*/
if (unlikely(bh_pos + blocksize > initialized_size)) {
u8 *kaddr;
int ofs = 0;
if (likely(bh_pos < initialized_size))
ofs = initialized_size - bh_pos;
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + bh_offset(bh) + ofs, 0,
blocksize - ofs);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
zero_user_page(page, bh_offset(bh) + ofs,
blocksize - ofs, KM_USER0);
}
} else /* if (unlikely(!buffer_uptodate(bh))) */
err = -EIO;
@ -1286,11 +1268,8 @@ rl_not_mapped_enoent:
if (PageUptodate(page))
set_buffer_uptodate(bh);
else {
u8 *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + bh_offset(bh), 0,
blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
zero_user_page(page, bh_offset(bh),
blocksize, KM_USER0);
set_buffer_uptodate(bh);
}
}
@ -1350,9 +1329,7 @@ err_out:
len = PAGE_CACHE_SIZE;
if (len > bytes)
len = bytes;
kaddr = kmap_atomic(*pages, KM_USER0);
memset(kaddr, 0, len);
kunmap_atomic(kaddr, KM_USER0);
zero_user_page(*pages, 0, len, KM_USER0);
}
goto out;
}
@ -1473,9 +1450,7 @@ err_out:
len = PAGE_CACHE_SIZE;
if (len > bytes)
len = bytes;
kaddr = kmap_atomic(*pages, KM_USER0);
memset(kaddr, 0, len);
kunmap_atomic(kaddr, KM_USER0);
zero_user_page(*pages, 0, len, KM_USER0);
}
goto out;
}