mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 09:31:14 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "5 patches. Subsystems affected by this patch series: coda, overlayfs, and mm (pagecache and memcg)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: tools/cgroup/slabinfo.py: updated to work on current kernel mm/filemap: fix mapping_seek_hole_data on THP & 32-bit mm/filemap: fix find_lock_entries hang on 32-bit THP ovl: fix reference counting in ovl_mmap error path coda: fix reference counting in coda_file_mmap error path
This commit is contained in:
commit
e77a830c82
4 changed files with 27 additions and 29 deletions
|
@ -175,10 +175,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
|
||||||
ret = call_mmap(vma->vm_file, vma);
|
ret = call_mmap(vma->vm_file, vma);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/* if call_mmap fails, our caller will put coda_file so we
|
/* if call_mmap fails, our caller will put host_file so we
|
||||||
* should drop the reference to the host_file that we got.
|
* should drop the reference to the coda_file that we got.
|
||||||
*/
|
*/
|
||||||
fput(host_file);
|
fput(coda_file);
|
||||||
kfree(cvm_ops);
|
kfree(cvm_ops);
|
||||||
} else {
|
} else {
|
||||||
/* here we add redirects for the open/close vm_operations */
|
/* here we add redirects for the open/close vm_operations */
|
||||||
|
|
|
@ -430,20 +430,11 @@ static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
if (WARN_ON(file != vma->vm_file))
|
if (WARN_ON(file != vma->vm_file))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
vma->vm_file = get_file(realfile);
|
vma_set_file(vma, realfile);
|
||||||
|
|
||||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||||
ret = call_mmap(vma->vm_file, vma);
|
ret = call_mmap(vma->vm_file, vma);
|
||||||
revert_creds(old_cred);
|
revert_creds(old_cred);
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
/* Drop reference count from new vm_file value */
|
|
||||||
fput(realfile);
|
|
||||||
} else {
|
|
||||||
/* Drop reference count from previous vm_file value */
|
|
||||||
fput(file);
|
|
||||||
}
|
|
||||||
|
|
||||||
ovl_file_accessed(file);
|
ovl_file_accessed(file);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
31
mm/filemap.c
31
mm/filemap.c
|
@ -1969,8 +1969,14 @@ unlock:
|
||||||
put:
|
put:
|
||||||
put_page(page);
|
put_page(page);
|
||||||
next:
|
next:
|
||||||
if (!xa_is_value(page) && PageTransHuge(page))
|
if (!xa_is_value(page) && PageTransHuge(page)) {
|
||||||
xas_set(&xas, page->index + thp_nr_pages(page));
|
unsigned int nr_pages = thp_nr_pages(page);
|
||||||
|
|
||||||
|
/* Final THP may cross MAX_LFS_FILESIZE on 32-bit */
|
||||||
|
xas_set(&xas, page->index + nr_pages);
|
||||||
|
if (xas.xa_index < nr_pages)
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
@ -2672,7 +2678,7 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
|
||||||
loff_t end, int whence)
|
loff_t end, int whence)
|
||||||
{
|
{
|
||||||
XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
|
XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
|
||||||
pgoff_t max = (end - 1) / PAGE_SIZE;
|
pgoff_t max = (end - 1) >> PAGE_SHIFT;
|
||||||
bool seek_data = (whence == SEEK_DATA);
|
bool seek_data = (whence == SEEK_DATA);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
|
@ -2681,7 +2687,8 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
while ((page = find_get_entry(&xas, max, XA_PRESENT))) {
|
while ((page = find_get_entry(&xas, max, XA_PRESENT))) {
|
||||||
loff_t pos = xas.xa_index * PAGE_SIZE;
|
loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
|
||||||
|
unsigned int seek_size;
|
||||||
|
|
||||||
if (start < pos) {
|
if (start < pos) {
|
||||||
if (!seek_data)
|
if (!seek_data)
|
||||||
|
@ -2689,25 +2696,25 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
|
||||||
start = pos;
|
start = pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
pos += seek_page_size(&xas, page);
|
seek_size = seek_page_size(&xas, page);
|
||||||
|
pos = round_up(pos + 1, seek_size);
|
||||||
start = page_seek_hole_data(&xas, mapping, page, start, pos,
|
start = page_seek_hole_data(&xas, mapping, page, start, pos,
|
||||||
seek_data);
|
seek_data);
|
||||||
if (start < pos)
|
if (start < pos)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
if (start >= end)
|
||||||
|
break;
|
||||||
|
if (seek_size > PAGE_SIZE)
|
||||||
|
xas_set(&xas, pos >> PAGE_SHIFT);
|
||||||
if (!xa_is_value(page))
|
if (!xa_is_value(page))
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
if (seek_data)
|
if (seek_data)
|
||||||
return -ENXIO;
|
start = -ENXIO;
|
||||||
goto out;
|
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (!xa_is_value(page))
|
if (page && !xa_is_value(page))
|
||||||
put_page(page);
|
put_page(page);
|
||||||
out:
|
|
||||||
if (start > end)
|
if (start > end)
|
||||||
return end;
|
return end;
|
||||||
return start;
|
return start;
|
||||||
|
|
|
@ -128,9 +128,9 @@ def detect_kernel_config():
|
||||||
|
|
||||||
cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
|
cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
|
||||||
|
|
||||||
if prog.type('struct kmem_cache').members[1][1] == 'flags':
|
if prog.type('struct kmem_cache').members[1].name == 'flags':
|
||||||
cfg['allocator'] = 'SLUB'
|
cfg['allocator'] = 'SLUB'
|
||||||
elif prog.type('struct kmem_cache').members[1][1] == 'batchcount':
|
elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
|
||||||
cfg['allocator'] = 'SLAB'
|
cfg['allocator'] = 'SLAB'
|
||||||
else:
|
else:
|
||||||
err('Can\'t determine the slab allocator')
|
err('Can\'t determine the slab allocator')
|
||||||
|
@ -193,7 +193,7 @@ def main():
|
||||||
# look over all slab pages, belonging to non-root memcgs
|
# look over all slab pages, belonging to non-root memcgs
|
||||||
# and look for objects belonging to the given memory cgroup
|
# and look for objects belonging to the given memory cgroup
|
||||||
for page in for_each_slab_page(prog):
|
for page in for_each_slab_page(prog):
|
||||||
objcg_vec_raw = page.obj_cgroups.value_()
|
objcg_vec_raw = page.memcg_data.value_()
|
||||||
if objcg_vec_raw == 0:
|
if objcg_vec_raw == 0:
|
||||||
continue
|
continue
|
||||||
cache = page.slab_cache
|
cache = page.slab_cache
|
||||||
|
@ -202,7 +202,7 @@ def main():
|
||||||
addr = cache.value_()
|
addr = cache.value_()
|
||||||
caches[addr] = cache
|
caches[addr] = cache
|
||||||
# clear the lowest bit to get the true obj_cgroups
|
# clear the lowest bit to get the true obj_cgroups
|
||||||
objcg_vec = Object(prog, page.obj_cgroups.type_,
|
objcg_vec = Object(prog, 'struct obj_cgroup **',
|
||||||
value=objcg_vec_raw & ~1)
|
value=objcg_vec_raw & ~1)
|
||||||
|
|
||||||
if addr not in stats:
|
if addr not in stats:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue