mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-29 18:11:20 +00:00
[PATCH] read_mapping_page for address space
Add read_mapping_page() which is used for callers that pass mapping->a_ops->readpage as the filler for read_cache_page. This removes some duplication from filesystem code. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
c330dda908
commit
090d2b185d
22 changed files with 38 additions and 56 deletions
|
@ -185,9 +185,7 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
|
||||||
|
|
||||||
_enter("{%lu},%lu", dir->i_ino, index);
|
_enter("{%lu},%lu", dir->i_ino, index);
|
||||||
|
|
||||||
page = read_cache_page(dir->i_mapping,index,
|
page = read_mapping_page(dir->i_mapping, index, NULL);
|
||||||
(filler_t *) dir->i_mapping->a_ops->readpage,
|
|
||||||
NULL);
|
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
kmap(page);
|
kmap(page);
|
||||||
|
|
|
@ -63,7 +63,6 @@ unsigned long afs_mntpt_expiry_timeout = 20;
|
||||||
int afs_mntpt_check_symlink(struct afs_vnode *vnode)
|
int afs_mntpt_check_symlink(struct afs_vnode *vnode)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
filler_t *filler;
|
|
||||||
size_t size;
|
size_t size;
|
||||||
char *buf;
|
char *buf;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -71,10 +70,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
|
||||||
_enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
|
_enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
|
||||||
|
|
||||||
/* read the contents of the symlink into the pagecache */
|
/* read the contents of the symlink into the pagecache */
|
||||||
filler = (filler_t *) AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage;
|
page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL);
|
||||||
|
|
||||||
page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
|
|
||||||
filler, NULL);
|
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
ret = PTR_ERR(page);
|
ret = PTR_ERR(page);
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -160,7 +156,6 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
size_t size;
|
size_t size;
|
||||||
char *buf, *devname = NULL, *options = NULL;
|
char *buf, *devname = NULL, *options = NULL;
|
||||||
filler_t *filler;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
kenter("{%s}", mntpt->d_name.name);
|
kenter("{%s}", mntpt->d_name.name);
|
||||||
|
@ -182,9 +177,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
/* read the contents of the AFS special symlink */
|
/* read the contents of the AFS special symlink */
|
||||||
filler = (filler_t *)mntpt->d_inode->i_mapping->a_ops->readpage;
|
page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
|
||||||
|
|
||||||
page = read_cache_page(mntpt->d_inode->i_mapping, 0, filler, NULL);
|
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
ret = PTR_ERR(page);
|
ret = PTR_ERR(page);
|
||||||
goto error;
|
goto error;
|
||||||
|
|
|
@ -181,9 +181,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
if (blocknr + i < devsize) {
|
if (blocknr + i < devsize) {
|
||||||
page = read_cache_page(mapping, blocknr + i,
|
page = read_mapping_page(mapping, blocknr + i, NULL);
|
||||||
(filler_t *)mapping->a_ops->readpage,
|
|
||||||
NULL);
|
|
||||||
/* synchronous error? */
|
/* synchronous error? */
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
page = NULL;
|
page = NULL;
|
||||||
|
|
|
@ -159,8 +159,7 @@ fail:
|
||||||
static struct page * ext2_get_page(struct inode *dir, unsigned long n)
|
static struct page * ext2_get_page(struct inode *dir, unsigned long n)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = dir->i_mapping;
|
struct address_space *mapping = dir->i_mapping;
|
||||||
struct page *page = read_cache_page(mapping, n,
|
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
kmap(page);
|
kmap(page);
|
||||||
|
|
|
@ -71,8 +71,7 @@ vxfs_get_page(struct address_space *mapping, u_long n)
|
||||||
{
|
{
|
||||||
struct page * pp;
|
struct page * pp;
|
||||||
|
|
||||||
pp = read_cache_page(mapping, n,
|
pp = read_mapping_page(mapping, n, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
|
|
||||||
if (!IS_ERR(pp)) {
|
if (!IS_ERR(pp)) {
|
||||||
wait_on_page_locked(pp);
|
wait_on_page_locked(pp);
|
||||||
|
|
|
@ -280,7 +280,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
|
||||||
block = off >> PAGE_CACHE_SHIFT;
|
block = off >> PAGE_CACHE_SHIFT;
|
||||||
node->page_offset = off & ~PAGE_CACHE_MASK;
|
node->page_offset = off & ~PAGE_CACHE_MASK;
|
||||||
for (i = 0; i < tree->pages_per_bnode; i++) {
|
for (i = 0; i < tree->pages_per_bnode; i++) {
|
||||||
page = read_cache_page(mapping, block++, (filler_t *)mapping->a_ops->readpage, NULL);
|
page = read_mapping_page(mapping, block++, NULL);
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
goto fail;
|
goto fail;
|
||||||
if (PageError(page)) {
|
if (PageError(page)) {
|
||||||
|
|
|
@ -59,7 +59,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
|
||||||
unlock_new_inode(tree->inode);
|
unlock_new_inode(tree->inode);
|
||||||
|
|
||||||
mapping = tree->inode->i_mapping;
|
mapping = tree->inode->i_mapping;
|
||||||
page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
|
page = read_mapping_page(mapping, 0, NULL);
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
goto free_tree;
|
goto free_tree;
|
||||||
|
|
||||||
|
|
|
@ -31,8 +31,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
|
||||||
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
|
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
|
||||||
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
|
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
|
||||||
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
|
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
|
||||||
page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
|
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
|
||||||
(filler_t *)mapping->a_ops->readpage, NULL);
|
|
||||||
pptr = kmap(page);
|
pptr = kmap(page);
|
||||||
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
||||||
i = offset % 32;
|
i = offset % 32;
|
||||||
|
@ -72,8 +71,8 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
|
||||||
offset += PAGE_CACHE_BITS;
|
offset += PAGE_CACHE_BITS;
|
||||||
if (offset >= size)
|
if (offset >= size)
|
||||||
break;
|
break;
|
||||||
page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
|
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
|
||||||
(filler_t *)mapping->a_ops->readpage, NULL);
|
NULL);
|
||||||
curr = pptr = kmap(page);
|
curr = pptr = kmap(page);
|
||||||
if ((size ^ offset) / PAGE_CACHE_BITS)
|
if ((size ^ offset) / PAGE_CACHE_BITS)
|
||||||
end = pptr + PAGE_CACHE_BITS / 32;
|
end = pptr + PAGE_CACHE_BITS / 32;
|
||||||
|
@ -119,8 +118,8 @@ found:
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
kunmap(page);
|
kunmap(page);
|
||||||
offset += PAGE_CACHE_BITS;
|
offset += PAGE_CACHE_BITS;
|
||||||
page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
|
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
|
||||||
(filler_t *)mapping->a_ops->readpage, NULL);
|
NULL);
|
||||||
pptr = kmap(page);
|
pptr = kmap(page);
|
||||||
curr = pptr;
|
curr = pptr;
|
||||||
end = pptr + PAGE_CACHE_BITS / 32;
|
end = pptr + PAGE_CACHE_BITS / 32;
|
||||||
|
@ -167,7 +166,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||||
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
|
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
|
||||||
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
|
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
|
||||||
pnr = offset / PAGE_CACHE_BITS;
|
pnr = offset / PAGE_CACHE_BITS;
|
||||||
page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
|
page = read_mapping_page(mapping, pnr, NULL);
|
||||||
pptr = kmap(page);
|
pptr = kmap(page);
|
||||||
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
||||||
end = pptr + PAGE_CACHE_BITS / 32;
|
end = pptr + PAGE_CACHE_BITS / 32;
|
||||||
|
@ -199,7 +198,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||||
break;
|
break;
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
kunmap(page);
|
kunmap(page);
|
||||||
page = read_cache_page(mapping, ++pnr, (filler_t *)mapping->a_ops->readpage, NULL);
|
page = read_mapping_page(mapping, ++pnr, NULL);
|
||||||
pptr = kmap(page);
|
pptr = kmap(page);
|
||||||
curr = pptr;
|
curr = pptr;
|
||||||
end = pptr + PAGE_CACHE_BITS / 32;
|
end = pptr + PAGE_CACHE_BITS / 32;
|
||||||
|
|
|
@ -440,7 +440,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
|
||||||
block = off >> PAGE_CACHE_SHIFT;
|
block = off >> PAGE_CACHE_SHIFT;
|
||||||
node->page_offset = off & ~PAGE_CACHE_MASK;
|
node->page_offset = off & ~PAGE_CACHE_MASK;
|
||||||
for (i = 0; i < tree->pages_per_bnode; block++, i++) {
|
for (i = 0; i < tree->pages_per_bnode; block++, i++) {
|
||||||
page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL);
|
page = read_mapping_page(mapping, block, NULL);
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
goto fail;
|
goto fail;
|
||||||
if (PageError(page)) {
|
if (PageError(page)) {
|
||||||
|
|
|
@ -38,7 +38,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
|
||||||
goto free_tree;
|
goto free_tree;
|
||||||
|
|
||||||
mapping = tree->inode->i_mapping;
|
mapping = tree->inode->i_mapping;
|
||||||
page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
|
page = read_mapping_page(mapping, 0, NULL);
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
goto free_tree;
|
goto free_tree;
|
||||||
|
|
||||||
|
|
|
@ -632,10 +632,9 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
|
||||||
}
|
}
|
||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
} else {
|
} else {
|
||||||
page = read_cache_page(mapping, page_index,
|
page = read_mapping_page(mapping, page_index, NULL);
|
||||||
(filler_t *)mapping->a_ops->readpage, NULL);
|
|
||||||
if (IS_ERR(page) || !PageUptodate(page)) {
|
if (IS_ERR(page) || !PageUptodate(page)) {
|
||||||
jfs_err("read_cache_page failed!");
|
jfs_err("read_mapping_page failed!");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
lock_page(page);
|
lock_page(page);
|
||||||
|
|
|
@ -60,8 +60,7 @@ static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
|
||||||
static struct page * dir_get_page(struct inode *dir, unsigned long n)
|
static struct page * dir_get_page(struct inode *dir, unsigned long n)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = dir->i_mapping;
|
struct address_space *mapping = dir->i_mapping;
|
||||||
struct page *page = read_cache_page(mapping, n,
|
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
kmap(page);
|
kmap(page);
|
||||||
|
|
|
@ -2577,8 +2577,7 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage)
|
||||||
{
|
{
|
||||||
struct page * page;
|
struct page * page;
|
||||||
struct address_space *mapping = dentry->d_inode->i_mapping;
|
struct address_space *mapping = dentry->d_inode->i_mapping;
|
||||||
page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage,
|
page = read_mapping_page(mapping, 0, NULL);
|
||||||
NULL);
|
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
goto sync_fail;
|
goto sync_fail;
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
|
|
|
@ -86,8 +86,7 @@ static inline void ntfs_unmap_page(struct page *page)
|
||||||
static inline struct page *ntfs_map_page(struct address_space *mapping,
|
static inline struct page *ntfs_map_page(struct address_space *mapping,
|
||||||
unsigned long index)
|
unsigned long index)
|
||||||
{
|
{
|
||||||
struct page *page = read_cache_page(mapping, index,
|
struct page *page = read_mapping_page(mapping, index, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
|
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
|
|
|
@ -2529,8 +2529,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
|
||||||
end >>= PAGE_CACHE_SHIFT;
|
end >>= PAGE_CACHE_SHIFT;
|
||||||
/* If there is a first partial page, need to do it the slow way. */
|
/* If there is a first partial page, need to do it the slow way. */
|
||||||
if (start_ofs) {
|
if (start_ofs) {
|
||||||
page = read_cache_page(mapping, idx,
|
page = read_mapping_page(mapping, idx, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
ntfs_error(vol->sb, "Failed to read first partial "
|
ntfs_error(vol->sb, "Failed to read first partial "
|
||||||
"page (sync error, index 0x%lx).", idx);
|
"page (sync error, index 0x%lx).", idx);
|
||||||
|
@ -2600,8 +2599,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
|
||||||
}
|
}
|
||||||
/* If there is a last partial page, need to do it the slow way. */
|
/* If there is a last partial page, need to do it the slow way. */
|
||||||
if (end_ofs) {
|
if (end_ofs) {
|
||||||
page = read_cache_page(mapping, idx,
|
page = read_mapping_page(mapping, idx, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
ntfs_error(vol->sb, "Failed to read last partial page "
|
ntfs_error(vol->sb, "Failed to read last partial page "
|
||||||
"(sync error, index 0x%lx).", idx);
|
"(sync error, index 0x%lx).", idx);
|
||||||
|
|
|
@ -231,8 +231,7 @@ do_non_resident_extend:
|
||||||
* Read the page. If the page is not present, this will zero
|
* Read the page. If the page is not present, this will zero
|
||||||
* the uninitialized regions for us.
|
* the uninitialized regions for us.
|
||||||
*/
|
*/
|
||||||
page = read_cache_page(mapping, index,
|
page = read_mapping_page(mapping, index, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
err = PTR_ERR(page);
|
err = PTR_ERR(page);
|
||||||
goto init_err_out;
|
goto init_err_out;
|
||||||
|
|
|
@ -64,8 +64,7 @@ static char *ocfs2_page_getlink(struct dentry * dentry,
|
||||||
{
|
{
|
||||||
struct page * page;
|
struct page * page;
|
||||||
struct address_space *mapping = dentry->d_inode->i_mapping;
|
struct address_space *mapping = dentry->d_inode->i_mapping;
|
||||||
page = read_cache_page(mapping, 0,
|
page = read_mapping_page(mapping, 0, NULL);
|
||||||
(filler_t *)mapping->a_ops->readpage, NULL);
|
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
goto sync_fail;
|
goto sync_fail;
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
|
|
|
@ -499,8 +499,8 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
page = read_cache_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
|
page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
|
||||||
(filler_t *)mapping->a_ops->readpage, NULL);
|
NULL);
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
if (!PageUptodate(page))
|
if (!PageUptodate(page))
|
||||||
|
|
|
@ -452,8 +452,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n)
|
||||||
/* We can deadlock if we try to free dentries,
|
/* We can deadlock if we try to free dentries,
|
||||||
and an unlink/rmdir has just occured - GFP_NOFS avoids this */
|
and an unlink/rmdir has just occured - GFP_NOFS avoids this */
|
||||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||||
page = read_cache_page(mapping, n,
|
page = read_mapping_page(mapping, n, NULL);
|
||||||
(filler_t *) mapping->a_ops->readpage, NULL);
|
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
kmap(page);
|
kmap(page);
|
||||||
|
|
|
@ -53,8 +53,7 @@ static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
|
||||||
static struct page * dir_get_page(struct inode *dir, unsigned long n)
|
static struct page * dir_get_page(struct inode *dir, unsigned long n)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = dir->i_mapping;
|
struct address_space *mapping = dir->i_mapping;
|
||||||
struct page *page = read_cache_page(mapping, n,
|
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
kmap(page);
|
kmap(page);
|
||||||
|
|
|
@ -99,6 +99,13 @@ extern struct page * read_cache_page(struct address_space *mapping,
|
||||||
extern int read_cache_pages(struct address_space *mapping,
|
extern int read_cache_pages(struct address_space *mapping,
|
||||||
struct list_head *pages, filler_t *filler, void *data);
|
struct list_head *pages, filler_t *filler, void *data);
|
||||||
|
|
||||||
|
static inline struct page *read_mapping_page(struct address_space *mapping,
|
||||||
|
unsigned long index, void *data)
|
||||||
|
{
|
||||||
|
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
|
||||||
|
return read_cache_page(mapping, index, filler, data);
|
||||||
|
}
|
||||||
|
|
||||||
int add_to_page_cache(struct page *page, struct address_space *mapping,
|
int add_to_page_cache(struct page *page, struct address_space *mapping,
|
||||||
unsigned long index, gfp_t gfp_mask);
|
unsigned long index, gfp_t gfp_mask);
|
||||||
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
||||||
|
|
|
@ -1477,8 +1477,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
goto bad_swap;
|
goto bad_swap;
|
||||||
}
|
}
|
||||||
page = read_cache_page(mapping, 0,
|
page = read_mapping_page(mapping, 0, swap_file);
|
||||||
(filler_t *)mapping->a_ops->readpage, swap_file);
|
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
error = PTR_ERR(page);
|
error = PTR_ERR(page);
|
||||||
goto bad_swap;
|
goto bad_swap;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue