mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-11 09:09:36 +00:00
mm: codgin-style fixes
Fix whitespace issues, extraneous braces. Link: http://lkml.kernel.org/r/1485992240-10986-5-git-send-email-me@tobin.cc Signed-off-by: Tobin C Harding <me@tobin.cc> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7f2b6ce8e3
commit
166f61b943
1 changed files with 29 additions and 31 deletions
60
mm/memory.c
60
mm/memory.c
|
@ -30,7 +30,7 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 05.04.94 - Multi-page memory management added for v1.1.
|
* 05.04.94 - Multi-page memory management added for v1.1.
|
||||||
* Idea by Alex Bligh (alex@cconcepts.co.uk)
|
* Idea by Alex Bligh (alex@cconcepts.co.uk)
|
||||||
*
|
*
|
||||||
* 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
|
* 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
|
||||||
* (Gerhard.Wichert@pdb.siemens.de)
|
* (Gerhard.Wichert@pdb.siemens.de)
|
||||||
|
@ -82,9 +82,9 @@
|
||||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||||
/* use the per-pgdat data instead for discontigmem - mbligh */
|
/* use the per-pgdat data instead for discontigmem - mbligh */
|
||||||
unsigned long max_mapnr;
|
unsigned long max_mapnr;
|
||||||
struct page *mem_map;
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(max_mapnr);
|
EXPORT_SYMBOL(max_mapnr);
|
||||||
|
|
||||||
|
struct page *mem_map;
|
||||||
EXPORT_SYMBOL(mem_map);
|
EXPORT_SYMBOL(mem_map);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -95,8 +95,7 @@ EXPORT_SYMBOL(mem_map);
|
||||||
* highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
|
* highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
|
||||||
* and ZONE_HIGHMEM.
|
* and ZONE_HIGHMEM.
|
||||||
*/
|
*/
|
||||||
void * high_memory;
|
void *high_memory;
|
||||||
|
|
||||||
EXPORT_SYMBOL(high_memory);
|
EXPORT_SYMBOL(high_memory);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -120,10 +119,10 @@ static int __init disable_randmaps(char *s)
|
||||||
__setup("norandmaps", disable_randmaps);
|
__setup("norandmaps", disable_randmaps);
|
||||||
|
|
||||||
unsigned long zero_pfn __read_mostly;
|
unsigned long zero_pfn __read_mostly;
|
||||||
unsigned long highest_memmap_pfn __read_mostly;
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(zero_pfn);
|
EXPORT_SYMBOL(zero_pfn);
|
||||||
|
|
||||||
|
unsigned long highest_memmap_pfn __read_mostly;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
|
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
|
||||||
*/
|
*/
|
||||||
|
@ -556,7 +555,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||||
|
|
||||||
if (is_vm_hugetlb_page(vma)) {
|
if (is_vm_hugetlb_page(vma)) {
|
||||||
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
|
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
|
||||||
floor, next? next->vm_start: ceiling);
|
floor, next ? next->vm_start : ceiling);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Optimization: gather nearby vmas into one call down
|
* Optimization: gather nearby vmas into one call down
|
||||||
|
@ -569,7 +568,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||||
unlink_file_vma(vma);
|
unlink_file_vma(vma);
|
||||||
}
|
}
|
||||||
free_pgd_range(tlb, addr, vma->vm_end,
|
free_pgd_range(tlb, addr, vma->vm_end,
|
||||||
floor, next? next->vm_start: ceiling);
|
floor, next ? next->vm_start : ceiling);
|
||||||
}
|
}
|
||||||
vma = next;
|
vma = next;
|
||||||
}
|
}
|
||||||
|
@ -1141,9 +1140,8 @@ again:
|
||||||
arch_enter_lazy_mmu_mode();
|
arch_enter_lazy_mmu_mode();
|
||||||
do {
|
do {
|
||||||
pte_t ptent = *pte;
|
pte_t ptent = *pte;
|
||||||
if (pte_none(ptent)) {
|
if (pte_none(ptent))
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
if (pte_present(ptent)) {
|
if (pte_present(ptent)) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -1463,10 +1461,10 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes);
|
||||||
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
|
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
|
||||||
spinlock_t **ptl)
|
spinlock_t **ptl)
|
||||||
{
|
{
|
||||||
pgd_t * pgd = pgd_offset(mm, addr);
|
pgd_t *pgd = pgd_offset(mm, addr);
|
||||||
pud_t * pud = pud_alloc(mm, pgd, addr);
|
pud_t *pud = pud_alloc(mm, pgd, addr);
|
||||||
if (pud) {
|
if (pud) {
|
||||||
pmd_t * pmd = pmd_alloc(mm, pud, addr);
|
pmd_t *pmd = pmd_alloc(mm, pud, addr);
|
||||||
if (pmd) {
|
if (pmd) {
|
||||||
VM_BUG_ON(pmd_trans_huge(*pmd));
|
VM_BUG_ON(pmd_trans_huge(*pmd));
|
||||||
return pte_alloc_map_lock(mm, pmd, addr, ptl);
|
return pte_alloc_map_lock(mm, pmd, addr, ptl);
|
||||||
|
@ -2525,7 +2523,7 @@ void unmap_mapping_range(struct address_space *mapping,
|
||||||
hlen = ULONG_MAX - hba + 1;
|
hlen = ULONG_MAX - hba + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
details.check_mapping = even_cows? NULL: mapping;
|
details.check_mapping = even_cows ? NULL : mapping;
|
||||||
details.first_index = hba;
|
details.first_index = hba;
|
||||||
details.last_index = hba + hlen - 1;
|
details.last_index = hba + hlen - 1;
|
||||||
if (details.last_index < details.first_index)
|
if (details.last_index < details.first_index)
|
||||||
|
@ -3407,14 +3405,14 @@ static int do_numa_page(struct vm_fault *vmf)
|
||||||
int flags = 0;
|
int flags = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The "pte" at this point cannot be used safely without
|
* The "pte" at this point cannot be used safely without
|
||||||
* validation through pte_unmap_same(). It's of NUMA type but
|
* validation through pte_unmap_same(). It's of NUMA type but
|
||||||
* the pfn may be screwed if the read is non atomic.
|
* the pfn may be screwed if the read is non atomic.
|
||||||
*
|
*
|
||||||
* We can safely just do a "set_pte_at()", because the old
|
* We can safely just do a "set_pte_at()", because the old
|
||||||
* page table entry is not accessible, so there would be no
|
* page table entry is not accessible, so there would be no
|
||||||
* concurrent hardware modifications to the PTE.
|
* concurrent hardware modifications to the PTE.
|
||||||
*/
|
*/
|
||||||
vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
|
vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
|
||||||
spin_lock(vmf->ptl);
|
spin_lock(vmf->ptl);
|
||||||
if (unlikely(!pte_same(*vmf->pte, pte))) {
|
if (unlikely(!pte_same(*vmf->pte, pte))) {
|
||||||
|
@ -3750,14 +3748,14 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
|
||||||
|
|
||||||
if (flags & FAULT_FLAG_USER) {
|
if (flags & FAULT_FLAG_USER) {
|
||||||
mem_cgroup_oom_disable();
|
mem_cgroup_oom_disable();
|
||||||
/*
|
/*
|
||||||
* The task may have entered a memcg OOM situation but
|
* The task may have entered a memcg OOM situation but
|
||||||
* if the allocation error was handled gracefully (no
|
* if the allocation error was handled gracefully (no
|
||||||
* VM_FAULT_OOM), there is no need to kill anything.
|
* VM_FAULT_OOM), there is no need to kill anything.
|
||||||
* Just clean up the OOM state peacefully.
|
* Just clean up the OOM state peacefully.
|
||||||
*/
|
*/
|
||||||
if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
|
if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
|
||||||
mem_cgroup_oom_synchronize(false);
|
mem_cgroup_oom_synchronize(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Add table
Reference in a new issue