mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-26 16:41:25 +00:00
Merge branch 'splice' of git://brick.kernel.dk/data/git/linux-2.6-block
* 'splice' of git://brick.kernel.dk/data/git/linux-2.6-block: [PATCH] splice: make the read-side do batched page lookups [PATCH] Add find_get_pages_contig(): contiguous variant of find_get_pages() [PATCH] splice: switch to using page_cache_readahead()
This commit is contained in:
commit
7c1c3eb855
3 changed files with 110 additions and 41 deletions
117
fs/splice.c
117
fs/splice.c
|
@ -279,7 +279,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
|
||||||
pgoff_t index, end_index;
|
pgoff_t index, end_index;
|
||||||
loff_t isize;
|
loff_t isize;
|
||||||
size_t total_len;
|
size_t total_len;
|
||||||
int error;
|
int error, page_nr;
|
||||||
struct splice_pipe_desc spd = {
|
struct splice_pipe_desc spd = {
|
||||||
.pages = pages,
|
.pages = pages,
|
||||||
.partial = partial,
|
.partial = partial,
|
||||||
|
@ -299,15 +299,64 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
|
||||||
* read-ahead if this is a non-zero offset (we are likely doing small
|
* read-ahead if this is a non-zero offset (we are likely doing small
|
||||||
* chunk splice and the page is already there) for a single page.
|
* chunk splice and the page is already there) for a single page.
|
||||||
*/
|
*/
|
||||||
if (!loff || spd.nr_pages > 1)
|
if (!loff || nr_pages > 1)
|
||||||
do_page_cache_readahead(mapping, in, index, spd.nr_pages);
|
page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now fill in the holes:
|
* Now fill in the holes:
|
||||||
*/
|
*/
|
||||||
error = 0;
|
error = 0;
|
||||||
total_len = 0;
|
total_len = 0;
|
||||||
for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) {
|
|
||||||
|
/*
|
||||||
|
* Lookup the (hopefully) full range of pages we need.
|
||||||
|
*/
|
||||||
|
spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If find_get_pages_contig() returned fewer pages than we needed,
|
||||||
|
* allocate the rest.
|
||||||
|
*/
|
||||||
|
index += spd.nr_pages;
|
||||||
|
while (spd.nr_pages < nr_pages) {
|
||||||
|
/*
|
||||||
|
* Page could be there, find_get_pages_contig() breaks on
|
||||||
|
* the first hole.
|
||||||
|
*/
|
||||||
|
page = find_get_page(mapping, index);
|
||||||
|
if (!page) {
|
||||||
|
/*
|
||||||
|
* page didn't exist, allocate one.
|
||||||
|
*/
|
||||||
|
page = page_cache_alloc_cold(mapping);
|
||||||
|
if (!page)
|
||||||
|
break;
|
||||||
|
|
||||||
|
error = add_to_page_cache_lru(page, mapping, index,
|
||||||
|
mapping_gfp_mask(mapping));
|
||||||
|
if (unlikely(error)) {
|
||||||
|
page_cache_release(page);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* add_to_page_cache() locks the page, unlock it
|
||||||
|
* to avoid convoluting the logic below even more.
|
||||||
|
*/
|
||||||
|
unlock_page(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
pages[spd.nr_pages++] = page;
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now loop over the map and see if we need to start IO on any
|
||||||
|
* pages, fill in the partial map, etc.
|
||||||
|
*/
|
||||||
|
index = *ppos >> PAGE_CACHE_SHIFT;
|
||||||
|
nr_pages = spd.nr_pages;
|
||||||
|
spd.nr_pages = 0;
|
||||||
|
for (page_nr = 0; page_nr < nr_pages; page_nr++) {
|
||||||
unsigned int this_len;
|
unsigned int this_len;
|
||||||
|
|
||||||
if (!len)
|
if (!len)
|
||||||
|
@ -317,28 +366,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
|
||||||
* this_len is the max we'll use from this page
|
* this_len is the max we'll use from this page
|
||||||
*/
|
*/
|
||||||
this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
|
this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
|
||||||
find_page:
|
page = pages[page_nr];
|
||||||
/*
|
|
||||||
* lookup the page for this index
|
|
||||||
*/
|
|
||||||
page = find_get_page(mapping, index);
|
|
||||||
if (!page) {
|
|
||||||
/*
|
|
||||||
* page didn't exist, allocate one
|
|
||||||
*/
|
|
||||||
page = page_cache_alloc_cold(mapping);
|
|
||||||
if (!page)
|
|
||||||
break;
|
|
||||||
|
|
||||||
error = add_to_page_cache_lru(page, mapping, index,
|
|
||||||
mapping_gfp_mask(mapping));
|
|
||||||
if (unlikely(error)) {
|
|
||||||
page_cache_release(page);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
goto readpage;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the page isn't uptodate, we may need to start io on it
|
* If the page isn't uptodate, we may need to start io on it
|
||||||
|
@ -360,7 +388,6 @@ find_page:
|
||||||
*/
|
*/
|
||||||
if (!page->mapping) {
|
if (!page->mapping) {
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
page_cache_release(page);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -371,16 +398,20 @@ find_page:
|
||||||
goto fill_it;
|
goto fill_it;
|
||||||
}
|
}
|
||||||
|
|
||||||
readpage:
|
|
||||||
/*
|
/*
|
||||||
* need to read in the page
|
* need to read in the page
|
||||||
*/
|
*/
|
||||||
error = mapping->a_ops->readpage(in, page);
|
error = mapping->a_ops->readpage(in, page);
|
||||||
|
|
||||||
if (unlikely(error)) {
|
if (unlikely(error)) {
|
||||||
page_cache_release(page);
|
/*
|
||||||
|
* We really should re-lookup the page here,
|
||||||
|
* but it complicates things a lot. Instead
|
||||||
|
* lets just do what we already stored, and
|
||||||
|
* we'll get it the next time we are called.
|
||||||
|
*/
|
||||||
if (error == AOP_TRUNCATED_PAGE)
|
if (error == AOP_TRUNCATED_PAGE)
|
||||||
goto find_page;
|
error = 0;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,10 +420,8 @@ readpage:
|
||||||
*/
|
*/
|
||||||
isize = i_size_read(mapping->host);
|
isize = i_size_read(mapping->host);
|
||||||
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
|
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
|
||||||
if (unlikely(!isize || index > end_index)) {
|
if (unlikely(!isize || index > end_index))
|
||||||
page_cache_release(page);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if this is the last page, see if we need to shrink
|
* if this is the last page, see if we need to shrink
|
||||||
|
@ -400,27 +429,33 @@ readpage:
|
||||||
*/
|
*/
|
||||||
if (end_index == index) {
|
if (end_index == index) {
|
||||||
loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
|
loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
|
||||||
if (total_len + loff > isize) {
|
if (total_len + loff > isize)
|
||||||
page_cache_release(page);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* force quit after adding this page
|
* force quit after adding this page
|
||||||
*/
|
*/
|
||||||
nr_pages = spd.nr_pages;
|
len = this_len;
|
||||||
this_len = min(this_len, loff);
|
this_len = min(this_len, loff);
|
||||||
loff = 0;
|
loff = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fill_it:
|
fill_it:
|
||||||
pages[spd.nr_pages] = page;
|
partial[page_nr].offset = loff;
|
||||||
partial[spd.nr_pages].offset = loff;
|
partial[page_nr].len = this_len;
|
||||||
partial[spd.nr_pages].len = this_len;
|
|
||||||
len -= this_len;
|
len -= this_len;
|
||||||
total_len += this_len;
|
total_len += this_len;
|
||||||
loff = 0;
|
loff = 0;
|
||||||
|
spd.nr_pages++;
|
||||||
|
index++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Release any pages at the end, if we quit early. 'i' is how far
|
||||||
|
* we got, 'nr_pages' is how many pages are in the map.
|
||||||
|
*/
|
||||||
|
while (page_nr < nr_pages)
|
||||||
|
page_cache_release(pages[page_nr++]);
|
||||||
|
|
||||||
if (spd.nr_pages)
|
if (spd.nr_pages)
|
||||||
return splice_to_pipe(pipe, &spd);
|
return splice_to_pipe(pipe, &spd);
|
||||||
|
|
||||||
|
|
|
@ -78,6 +78,8 @@ extern struct page * find_or_create_page(struct address_space *mapping,
|
||||||
unsigned long index, gfp_t gfp_mask);
|
unsigned long index, gfp_t gfp_mask);
|
||||||
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
|
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
|
||||||
unsigned int nr_pages, struct page **pages);
|
unsigned int nr_pages, struct page **pages);
|
||||||
|
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
|
||||||
|
unsigned int nr_pages, struct page **pages);
|
||||||
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
|
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
|
||||||
int tag, unsigned int nr_pages, struct page **pages);
|
int tag, unsigned int nr_pages, struct page **pages);
|
||||||
|
|
||||||
|
|
32
mm/filemap.c
32
mm/filemap.c
|
@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* find_get_pages_contig - gang contiguous pagecache lookup
|
||||||
|
* @mapping: The address_space to search
|
||||||
|
* @index: The starting page index
|
||||||
|
* @nr_pages: The maximum number of pages
|
||||||
|
* @pages: Where the resulting pages are placed
|
||||||
|
*
|
||||||
|
* find_get_pages_contig() works exactly like find_get_pages(), except
|
||||||
|
* that the returned number of pages are guaranteed to be contiguous.
|
||||||
|
*
|
||||||
|
* find_get_pages_contig() returns the number of pages which were found.
|
||||||
|
*/
|
||||||
|
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
|
||||||
|
unsigned int nr_pages, struct page **pages)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
unsigned int ret;
|
||||||
|
|
||||||
|
read_lock_irq(&mapping->tree_lock);
|
||||||
|
ret = radix_tree_gang_lookup(&mapping->page_tree,
|
||||||
|
(void **)pages, index, nr_pages);
|
||||||
|
for (i = 0; i < ret; i++) {
|
||||||
|
if (pages[i]->mapping == NULL || pages[i]->index != index)
|
||||||
|
break;
|
||||||
|
|
||||||
|
page_cache_get(pages[i]);
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
read_unlock_irq(&mapping->tree_lock);
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Like find_get_pages, except we only return pages which are tagged with
|
* Like find_get_pages, except we only return pages which are tagged with
|
||||||
* `tag'. We update *index to index the next page for the traversal.
|
* `tag'. We update *index to index the next page for the traversal.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue