mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-14 02:14:35 +00:00
Concentrate code to modify totalram_pages into the mm core, so the arch memory initialized code doesn't need to take care of it. With these changes applied, only following functions from mm core modify global variable totalram_pages: free_bootmem_late(), free_all_bootmem(), free_all_bootmem_node(), adjust_managed_page_count(). With this patch applied, it will be much more easier for us to keep totalram_pages and zone->managed_pages in consistence. Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Acked-by: David Howells <dhowells@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: <sworddragon2@aol.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michel Lespinasse <walken@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
196 lines
5 KiB
C
196 lines
5 KiB
C
/*
|
|
* linux/arch/m32r/mm/init.c
|
|
*
|
|
* Copyright (c) 2001, 2002 Hitoshi Yamamoto
|
|
*
|
|
* Some code taken from sh version.
|
|
* Copyright (C) 1999 Niibe Yutaka
|
|
* Based on linux/arch/i386/mm/init.c:
|
|
* Copyright (C) 1995 Linus Torvalds
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/gfp.h>
|
|
#include <asm/types.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/sections.h>
|
|
|
|
pgd_t swapper_pg_dir[1024];
|
|
|
|
/*
|
|
* Cache of MMU context last used.
|
|
*/
|
|
#ifndef CONFIG_SMP
|
|
unsigned long mmu_context_cache_dat;
|
|
#else
|
|
unsigned long mmu_context_cache_dat[NR_CPUS];
|
|
#endif
|
|
static unsigned long hole_pages;
|
|
|
|
/*
|
|
* function prototype
|
|
*/
|
|
void __init paging_init(void);
|
|
void __init mem_init(void);
|
|
void free_initmem(void);
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
void free_initrd_mem(unsigned long, unsigned long);
|
|
#endif
|
|
|
|
/* It'd be good if these lines were in the standard header file. */
|
|
#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
|
|
#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
|
|
|
|
#ifndef CONFIG_DISCONTIGMEM
|
|
unsigned long __init zone_sizes_init(void)
|
|
{
|
|
unsigned long zones_size[MAX_NR_ZONES] = {0, };
|
|
unsigned long max_dma;
|
|
unsigned long low;
|
|
unsigned long start_pfn;
|
|
|
|
#ifdef CONFIG_MMU
|
|
start_pfn = START_PFN(0);
|
|
max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
|
|
low = MAX_LOW_PFN(0);
|
|
|
|
if (low < max_dma){
|
|
zones_size[ZONE_DMA] = low - start_pfn;
|
|
zones_size[ZONE_NORMAL] = 0;
|
|
} else {
|
|
zones_size[ZONE_DMA] = low - start_pfn;
|
|
zones_size[ZONE_NORMAL] = low - max_dma;
|
|
}
|
|
#else
|
|
zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT;
|
|
zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT;
|
|
start_pfn = __MEMORY_START >> PAGE_SHIFT;
|
|
#endif /* CONFIG_MMU */
|
|
|
|
free_area_init_node(0, zones_size, start_pfn, 0);
|
|
|
|
return 0;
|
|
}
|
|
#else /* CONFIG_DISCONTIGMEM */
|
|
extern unsigned long zone_sizes_init(void);
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
|
|
|
/*======================================================================*
|
|
* paging_init() : sets up the page tables
|
|
*======================================================================*/
|
|
void __init paging_init(void)
|
|
{
|
|
#ifdef CONFIG_MMU
|
|
int i;
|
|
pgd_t *pg_dir;
|
|
|
|
/* We don't need kernel mapping as hardware support that. */
|
|
pg_dir = swapper_pg_dir;
|
|
|
|
for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++)
|
|
pgd_val(pg_dir[i]) = 0;
|
|
#endif /* CONFIG_MMU */
|
|
hole_pages = zone_sizes_init();
|
|
}
|
|
|
|
int __init reservedpages_count(void)
|
|
{
|
|
int reservedpages, nid, i;
|
|
|
|
reservedpages = 0;
|
|
for_each_online_node(nid) {
|
|
unsigned long flags;
|
|
pgdat_resize_lock(NODE_DATA(nid), &flags);
|
|
for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++)
|
|
if (PageReserved(nid_page_nr(nid, i)))
|
|
reservedpages++;
|
|
pgdat_resize_unlock(NODE_DATA(nid), &flags);
|
|
}
|
|
|
|
return reservedpages;
|
|
}
|
|
|
|
/*======================================================================*
|
|
* mem_init() :
|
|
* orig : arch/sh/mm/init.c
|
|
*======================================================================*/
|
|
void __init mem_init(void)
|
|
{
|
|
int codesize, reservedpages, datasize, initsize;
|
|
int nid;
|
|
#ifndef CONFIG_MMU
|
|
extern unsigned long memory_end;
|
|
#endif
|
|
|
|
num_physpages = 0;
|
|
for_each_online_node(nid)
|
|
num_physpages += MAX_LOW_PFN(nid) - START_PFN(nid) + 1;
|
|
|
|
num_physpages -= hole_pages;
|
|
|
|
#ifndef CONFIG_DISCONTIGMEM
|
|
max_mapnr = num_physpages;
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
|
|
|
#ifdef CONFIG_MMU
|
|
high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0)));
|
|
#else
|
|
high_memory = (void *)(memory_end & PAGE_MASK);
|
|
#endif /* CONFIG_MMU */
|
|
|
|
/* clear the zero-page */
|
|
memset(empty_zero_page, 0, PAGE_SIZE);
|
|
|
|
/* this will put all low memory onto the freelists */
|
|
for_each_online_node(nid)
|
|
free_all_bootmem_node(NODE_DATA(nid));
|
|
|
|
reservedpages = reservedpages_count() - hole_pages;
|
|
codesize = (unsigned long) &_etext - (unsigned long)&_text;
|
|
datasize = (unsigned long) &_edata - (unsigned long)&_etext;
|
|
initsize = (unsigned long) &__init_end - (unsigned long)&__init_begin;
|
|
|
|
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
|
|
"%dk reserved, %dk data, %dk init)\n",
|
|
nr_free_pages() << (PAGE_SHIFT-10),
|
|
num_physpages << (PAGE_SHIFT-10),
|
|
codesize >> 10,
|
|
reservedpages << (PAGE_SHIFT-10),
|
|
datasize >> 10,
|
|
initsize >> 10);
|
|
}
|
|
|
|
/*======================================================================*
|
|
* free_initmem() :
|
|
* orig : arch/sh/mm/init.c
|
|
*======================================================================*/
|
|
void free_initmem(void)
|
|
{
|
|
free_initmem_default(-1);
|
|
}
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
/*======================================================================*
|
|
* free_initrd_mem() :
|
|
* orig : arch/sh/mm/init.c
|
|
*======================================================================*/
|
|
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
{
|
|
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
}
|
|
#endif
|