mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Merge branch 'hwpoison' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6
* 'hwpoison' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6: (21 commits) HWPOISON: Enable error_remove_page on btrfs HWPOISON: Add simple debugfs interface to inject hwpoison on arbitary PFNs HWPOISON: Add madvise() based injector for hardware poisoned pages v4 HWPOISON: Enable error_remove_page for NFS HWPOISON: Enable .remove_error_page for migration aware file systems HWPOISON: The high level memory error handler in the VM v7 HWPOISON: Add PR_MCE_KILL prctl to control early kill behaviour per process HWPOISON: shmem: call set_page_dirty() with locked page HWPOISON: Define a new error_remove_page address space op for async truncation HWPOISON: Add invalidate_inode_page HWPOISON: Refactor truncate to allow direct truncating of page v2 HWPOISON: check and isolate corrupted free pages v2 HWPOISON: Handle hardware poisoned pages in try_to_unmap HWPOISON: Use bitmask/action code for try_to_unmap behaviour HWPOISON: x86: Add VM_FAULT_HWPOISON handling to x86 page fault handler v2 HWPOISON: Add poison check to page fault handling HWPOISON: Add basic support for poisoned pages in fault handler v3 HWPOISON: Add new SIGBUS error codes for hardware poison signals HWPOISON: Add support for poison swap entries v2 HWPOISON: Export some rmap vma locking to outside world ...
This commit is contained in:
commit
db16826367
40 changed files with 1331 additions and 68 deletions
60
mm/rmap.c
60
mm/rmap.c
|
@ -36,6 +36,11 @@
|
|||
* mapping->tree_lock (widely used, in set_page_dirty,
|
||||
* in arch-dependent flush_dcache_mmap_lock,
|
||||
* within inode_lock in __sync_single_inode)
|
||||
*
|
||||
* (code doesn't rely on that order so it could be switched around)
|
||||
* ->tasklist_lock
|
||||
* anon_vma->lock (memory_failure, collect_procs_anon)
|
||||
* pte map lock
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
@ -191,7 +196,7 @@ void __init anon_vma_init(void)
|
|||
* Getting a lock on a stable anon_vma from a page off the LRU is
|
||||
* tricky: page_lock_anon_vma rely on RCU to guard against the races.
|
||||
*/
|
||||
static struct anon_vma *page_lock_anon_vma(struct page *page)
|
||||
struct anon_vma *page_lock_anon_vma(struct page *page)
|
||||
{
|
||||
struct anon_vma *anon_vma;
|
||||
unsigned long anon_mapping;
|
||||
|
@ -211,7 +216,7 @@ out:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void page_unlock_anon_vma(struct anon_vma *anon_vma)
|
||||
void page_unlock_anon_vma(struct anon_vma *anon_vma)
|
||||
{
|
||||
spin_unlock(&anon_vma->lock);
|
||||
rcu_read_unlock();
|
||||
|
@ -311,7 +316,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
|
|||
* if the page is not mapped into the page tables of this VMA. Only
|
||||
* valid for normal file or anonymous VMAs.
|
||||
*/
|
||||
static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
|
||||
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long address;
|
||||
pte_t *pte;
|
||||
|
@ -756,7 +761,7 @@ void page_remove_rmap(struct page *page)
|
|||
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
|
||||
*/
|
||||
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
int migration)
|
||||
enum ttu_flags flags)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long address;
|
||||
|
@ -778,11 +783,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
* If it's recently referenced (perhaps page_referenced
|
||||
* skipped over this mm) then we should reactivate it.
|
||||
*/
|
||||
if (!migration) {
|
||||
if (!(flags & TTU_IGNORE_MLOCK)) {
|
||||
if (vma->vm_flags & VM_LOCKED) {
|
||||
ret = SWAP_MLOCK;
|
||||
goto out_unmap;
|
||||
}
|
||||
}
|
||||
if (!(flags & TTU_IGNORE_ACCESS)) {
|
||||
if (ptep_clear_flush_young_notify(vma, address, pte)) {
|
||||
ret = SWAP_FAIL;
|
||||
goto out_unmap;
|
||||
|
@ -800,7 +807,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
/* Update high watermark before we lower rss */
|
||||
update_hiwater_rss(mm);
|
||||
|
||||
if (PageAnon(page)) {
|
||||
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
|
||||
if (PageAnon(page))
|
||||
dec_mm_counter(mm, anon_rss);
|
||||
else
|
||||
dec_mm_counter(mm, file_rss);
|
||||
set_pte_at(mm, address, pte,
|
||||
swp_entry_to_pte(make_hwpoison_entry(page)));
|
||||
} else if (PageAnon(page)) {
|
||||
swp_entry_t entry = { .val = page_private(page) };
|
||||
|
||||
if (PageSwapCache(page)) {
|
||||
|
@ -822,12 +836,12 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
* pte. do_swap_page() will wait until the migration
|
||||
* pte is removed and then restart fault handling.
|
||||
*/
|
||||
BUG_ON(!migration);
|
||||
BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
|
||||
entry = make_migration_entry(page, pte_write(pteval));
|
||||
}
|
||||
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
|
||||
BUG_ON(pte_file(*pte));
|
||||
} else if (PAGE_MIGRATION && migration) {
|
||||
} else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
|
||||
/* Establish migration entry for a file page */
|
||||
swp_entry_t entry;
|
||||
entry = make_migration_entry(page, pte_write(pteval));
|
||||
|
@ -996,12 +1010,13 @@ static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
|
|||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||
* 'LOCKED.
|
||||
*/
|
||||
static int try_to_unmap_anon(struct page *page, int unlock, int migration)
|
||||
static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
struct anon_vma *anon_vma;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned int mlocked = 0;
|
||||
int ret = SWAP_AGAIN;
|
||||
int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
|
||||
|
||||
if (MLOCK_PAGES && unlikely(unlock))
|
||||
ret = SWAP_SUCCESS; /* default for try_to_munlock() */
|
||||
|
@ -1017,7 +1032,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration)
|
|||
continue; /* must visit all unlocked vmas */
|
||||
ret = SWAP_MLOCK; /* saw at least one mlocked vma */
|
||||
} else {
|
||||
ret = try_to_unmap_one(page, vma, migration);
|
||||
ret = try_to_unmap_one(page, vma, flags);
|
||||
if (ret == SWAP_FAIL || !page_mapped(page))
|
||||
break;
|
||||
}
|
||||
|
@ -1041,8 +1056,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration)
|
|||
/**
|
||||
* try_to_unmap_file - unmap/unlock file page using the object-based rmap method
|
||||
* @page: the page to unmap/unlock
|
||||
* @unlock: request for unlock rather than unmap [unlikely]
|
||||
* @migration: unmapping for migration - ignored if @unlock
|
||||
* @flags: action and flags
|
||||
*
|
||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||
* contained in the address_space struct it points to.
|
||||
|
@ -1054,7 +1068,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration)
|
|||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||
* 'LOCKED.
|
||||
*/
|
||||
static int try_to_unmap_file(struct page *page, int unlock, int migration)
|
||||
static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
|
@ -1066,6 +1080,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
|
|||
unsigned long max_nl_size = 0;
|
||||
unsigned int mapcount;
|
||||
unsigned int mlocked = 0;
|
||||
int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
|
||||
|
||||
if (MLOCK_PAGES && unlikely(unlock))
|
||||
ret = SWAP_SUCCESS; /* default for try_to_munlock() */
|
||||
|
@ -1078,7 +1093,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
|
|||
continue; /* must visit all vmas */
|
||||
ret = SWAP_MLOCK;
|
||||
} else {
|
||||
ret = try_to_unmap_one(page, vma, migration);
|
||||
ret = try_to_unmap_one(page, vma, flags);
|
||||
if (ret == SWAP_FAIL || !page_mapped(page))
|
||||
goto out;
|
||||
}
|
||||
|
@ -1103,7 +1118,8 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
|
|||
ret = SWAP_MLOCK; /* leave mlocked == 0 */
|
||||
goto out; /* no need to look further */
|
||||
}
|
||||
if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED))
|
||||
if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
|
||||
(vma->vm_flags & VM_LOCKED))
|
||||
continue;
|
||||
cursor = (unsigned long) vma->vm_private_data;
|
||||
if (cursor > max_nl_cursor)
|
||||
|
@ -1137,7 +1153,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
|
|||
do {
|
||||
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
|
||||
shared.vm_set.list) {
|
||||
if (!MLOCK_PAGES && !migration &&
|
||||
if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
|
||||
(vma->vm_flags & VM_LOCKED))
|
||||
continue;
|
||||
cursor = (unsigned long) vma->vm_private_data;
|
||||
|
@ -1177,7 +1193,7 @@ out:
|
|||
/**
|
||||
* try_to_unmap - try to remove all page table mappings to a page
|
||||
* @page: the page to get unmapped
|
||||
* @migration: migration flag
|
||||
* @flags: action and flags
|
||||
*
|
||||
* Tries to remove all the page table entries which are mapping this
|
||||
* page, used in the pageout path. Caller must hold the page lock.
|
||||
|
@ -1188,16 +1204,16 @@ out:
|
|||
* SWAP_FAIL - the page is unswappable
|
||||
* SWAP_MLOCK - page is mlocked.
|
||||
*/
|
||||
int try_to_unmap(struct page *page, int migration)
|
||||
int try_to_unmap(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
||||
if (PageAnon(page))
|
||||
ret = try_to_unmap_anon(page, 0, migration);
|
||||
ret = try_to_unmap_anon(page, flags);
|
||||
else
|
||||
ret = try_to_unmap_file(page, 0, migration);
|
||||
ret = try_to_unmap_file(page, flags);
|
||||
if (ret != SWAP_MLOCK && !page_mapped(page))
|
||||
ret = SWAP_SUCCESS;
|
||||
return ret;
|
||||
|
@ -1222,8 +1238,8 @@ int try_to_munlock(struct page *page)
|
|||
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
|
||||
|
||||
if (PageAnon(page))
|
||||
return try_to_unmap_anon(page, 1, 0);
|
||||
return try_to_unmap_anon(page, TTU_MUNLOCK);
|
||||
else
|
||||
return try_to_unmap_file(page, 1, 0);
|
||||
return try_to_unmap_file(page, TTU_MUNLOCK);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue