mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
mm: convert a few VM_BUG_ON callers to VM_BUG_ON_VMA
Trivially convert a few VM_BUG_ON calls to VM_BUG_ON_VMA to extract more information when they trigger. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Michel Lespinasse <walken@google.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fa3759ccd5
commit
81d1b09c6b
9 changed files with 24 additions and 23 deletions
|
@ -527,7 +527,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
|
|||
unsigned long address = __vma_address(page, vma);
|
||||
|
||||
/* page should be within @vma mapping range */
|
||||
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
||||
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
@ -897,7 +897,7 @@ void page_move_anon_rmap(struct page *page,
|
|||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON(!anon_vma);
|
||||
VM_BUG_ON_VMA(!anon_vma, vma);
|
||||
VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
|
||||
|
||||
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
|
||||
|
@ -1024,7 +1024,7 @@ void do_page_add_anon_rmap(struct page *page,
|
|||
void page_add_new_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
||||
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
|
||||
SetPageSwapBacked(page);
|
||||
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
|
||||
if (PageTransHuge(page))
|
||||
|
@ -1670,7 +1670,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
|
|||
* structure at mapping cannot be freed and reused yet,
|
||||
* so we can safely take mapping->i_mmap_mutex.
|
||||
*/
|
||||
VM_BUG_ON(!PageLocked(page));
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
|
||||
if (!mapping)
|
||||
return ret;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue