14 #include <linux/module.h>
20 #include <linux/pci.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
31 #include <asm/mmzone.h>
32 #include <asm/sections.h>
48 #ifdef CONFIG_DISCONTIGMEM
53 static struct resource data_resource = {
54 .name =
"Kernel data",
58 static struct resource code_resource = {
59 .name =
"Kernel code",
63 static struct resource pdcdata_resource = {
64 .name =
"PDC data (Page Zero)",
80 #define MAX_MEM (~0UL)
82 #define MAX_MEM (3584U*1024U*1024U)
85 static unsigned long mem_limit __read_mostly =
MAX_MEM;
87 static void __init mem_limit_func(
void)
96 if (
memcmp(cp,
"mem=", 4) == 0) {
103 while (*cp !=
' ' && *cp)
110 if (limit < mem_limit)
114 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
116 static void __init setup_bootmem(
void)
118 unsigned long bootmap_size;
119 unsigned long mem_max;
120 unsigned long bootmap_pages;
121 unsigned long bootmap_start_pfn;
122 unsigned long bootmap_pfn;
123 #ifndef CONFIG_DISCONTIGMEM
127 int i, sysram_resource_count;
140 for (j = i; j > 0; j--) {
157 #ifndef CONFIG_DISCONTIGMEM
163 for (i = 1; i < npmem_ranges; i++) {
168 printk(
"Large gap in memory detected (%ld pages). "
169 "Consider turning on CONFIG_DISCONTIGMEM\n",
178 if (npmem_ranges > 1) {
184 for (i = 0; i < npmem_ranges; i++) {
191 i,start, start + (size - 1), size >> 20);
195 sysram_resource_count = npmem_ranges;
196 for (i = 0; i < sysram_resource_count; i++) {
198 res->
name =
"System RAM";
218 for (i = 0; i < npmem_ranges; i++) {
222 if ((mem_max + rsize) > mem_limit) {
224 if (mem_max == mem_limit)
229 npmem_ranges = i + 1;
241 #ifndef CONFIG_DISCONTIGMEM
245 unsigned long end_pfn;
246 unsigned long hole_pages;
250 for (i = 1; i < npmem_ranges; i++) {
254 pmem_holes[npmem_holes].
start_pfn = end_pfn;
255 pmem_holes[npmem_holes++].
pages = hole_pages;
256 end_pfn += hole_pages;
267 for (i = 0; i < npmem_ranges; i++)
272 #ifdef CONFIG_DISCONTIGMEM
277 memset(pfnnid_map, 0xff,
sizeof(pfnnid_map));
279 for (i = 0; i < npmem_ranges; i++) {
292 bootmap_pfn = bootmap_start_pfn;
294 for (i = 0; i < npmem_ranges; i++) {
295 unsigned long start_pfn;
296 unsigned long npages;
304 (start_pfn + npages) );
309 if ((start_pfn + npages) >
max_pfn)
321 BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
325 #define PDC_CONSOLE_IO_IODC_SIZE 32768
328 (
unsigned long)(
PAGE0->mem_free +
333 ((bootmap_pfn - bootmap_start_pfn) <<
PAGE_SHIFT),
336 #ifndef CONFIG_DISCONTIGMEM
340 for (i = 0; i < npmem_holes; i++) {
348 #ifdef CONFIG_BLK_DEV_INITRD
352 unsigned long initrd_reserve;
376 for (i = 0; i < sysram_resource_count; i++) {
384 static void __init map_pages(
unsigned long start_vaddr,
385 unsigned long start_paddr,
unsigned long size,
391 unsigned long end_paddr;
392 unsigned long start_pmd;
393 unsigned long start_pte;
398 unsigned long ro_start;
399 unsigned long ro_end;
400 unsigned long fv_addr;
401 unsigned long gw_addr;
403 extern void *
const linux_gateway_page;
405 ro_start =
__pa((
unsigned long)_text);
406 ro_end =
__pa((
unsigned long)&data_start);
408 gw_addr =
__pa((
unsigned long)&linux_gateway_page) &
PAGE_MASK;
410 end_paddr = start_paddr +
size;
414 #if PTRS_PER_PMD == 1
421 address = start_paddr;
423 while (address < end_paddr) {
424 #if PTRS_PER_PMD == 1
445 for (tmp1 = start_pmd; tmp1 <
PTRS_PER_PMD; tmp1++, pmd++) {
462 pg_table = (
pte_t *)
__va(pg_table) + start_pte;
463 for (tmp2 = start_pte; tmp2 <
PTRS_PER_PTE; tmp2++, pg_table++) {
476 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
477 if (address >= ro_start && address < ro_end
478 && address != fv_addr
479 && address != gw_addr)
485 if (address >= end_paddr) {
499 if (address >= end_paddr)
519 map_pages(init_begin,
__pa(init_begin), init_end - init_begin,
523 map_pages(init_begin,
__pa(init_begin), init_end - init_begin,
531 memset((
void *)init_begin, 0x00, init_end - init_begin);
536 for (addr = init_begin; addr < init_end; addr +=
PAGE_SIZE) {
548 (init_end - init_begin) >> 10);
552 #ifdef CONFIG_DEBUG_RODATA
553 void mark_rodata_ro(
void)
557 printk (
KERN_INFO "Write protecting the kernel read-only data: %luk\n",
558 (
unsigned long)(__end_rodata - __start_rodata) >> 10);
575 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
576 #error KERNEL_MAP_START is in gateway reserved region
578 #define MAP_START (KERNEL_MAP_START)
580 #define VM_MAP_OFFSET (32*1024)
581 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
582 & ~(VM_MAP_OFFSET-1)))
593 int codesize, reservedpages, datasize, initsize;
604 #ifndef CONFIG_DISCONTIGMEM
623 #ifdef CONFIG_DISCONTIGMEM
633 for (pfn = 0; pfn <
max_pfn; pfn++) {
656 printk(
KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
665 #ifdef CONFIG_DEBUG_KERNEL
666 printk(
"virtual kernel memory layout:\n"
667 " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
668 " memory : 0x%p - 0x%p (%4ld MB)\n"
669 " .init : 0x%p - 0x%p (%4ld kB)\n"
670 " .data : 0x%p - 0x%p (%4ld kB)\n"
671 " .text : 0x%p - 0x%p (%4ld kB)\n",
686 ((
unsigned long)_etext - (
unsigned long)
_text) >> 10);
700 #ifndef CONFIG_DISCONTIGMEM
706 else if (PageSwapCache(
mem_map+i))
708 else if (!page_count(&
mem_map[i]))
711 shared += page_count(&
mem_map[i]) - 1;
727 else if (PageSwapCache(p))
729 else if (!page_count(p))
732 shared += page_count(p) - 1;
733 pgdat_resize_unlock(
NODE_DATA(i), &flags);
743 #ifdef CONFIG_DISCONTIGMEM
748 for (i = 0; i < npmem_ranges; i++) {
749 zl = node_zonelist(i, 0);
750 for (j = 0; j < MAX_NR_ZONES; j++) {
754 printk(
"Zone list for zone %d on node %d: ", j, i);
756 printk(
"[%d/%s] ", zone_to_nid(zone),
780 unsigned long start_paddr;
781 unsigned long end_paddr;
788 map_pages((
unsigned long)
__va(start_paddr), start_paddr,
789 size, PAGE_KERNEL, 0);
792 #ifdef CONFIG_BLK_DEV_INITRD
804 static void __init gateway_init(
void)
806 unsigned long linux_gateway_page_addr;
809 extern void *
const linux_gateway_page;
820 map_pages(linux_gateway_page_addr,
__pa(&linux_gateway_page),
831 unsigned long start_pmd;
832 unsigned long start_pte;
834 unsigned long hpux_gw_page_addr;
837 extern void *
const hpux_gateway_page;
850 #if PTRS_PER_PMD == 1
853 start_pmd = ((hpux_gw_page_addr >>
PMD_SHIFT) & (PTRS_PER_PMD - 1));
855 start_pte = ((hpux_gw_page_addr >>
PAGE_SHIFT) & (PTRS_PER_PTE - 1));
857 address =
__pa(&hpux_gateway_page);
858 #if PTRS_PER_PMD == 1
890 pg_table = (
pte_t *)
__va(pg_table) + start_pte;
906 for (i = 0; i < npmem_ranges; i++) {
907 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
911 #ifdef CONFIG_DISCONTIGMEM
916 for (j = (
pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
936 #define NR_SPACE_IDS 262144
948 #define NR_SPACE_IDS 32768
952 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
953 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
957 static unsigned long space_id_index;
959 static unsigned long dirty_space_ids = 0;
967 spin_lock(&sid_lock);
969 if (free_space_ids == 0) {
970 if (dirty_space_ids != 0) {
971 spin_unlock(&sid_lock);
973 spin_lock(&sid_lock);
975 BUG_ON(free_space_ids == 0);
982 space_id_index =
index;
984 spin_unlock(&sid_lock);
992 unsigned long *dirty_space_offset;
997 spin_lock(&sid_lock);
999 BUG_ON(*dirty_space_offset & (1
L << index));
1001 *dirty_space_offset |= (1
L <<
index);
1004 spin_unlock(&sid_lock);
1009 static void get_dirty_sids(
unsigned long *ndirtyptr,
unsigned long *dirty_array)
1015 *ndirtyptr = dirty_space_ids;
1016 if (dirty_space_ids != 0) {
1018 dirty_array[
i] = dirty_space_id[
i];
1019 dirty_space_id[
i] = 0;
1021 dirty_space_ids = 0;
1027 static void recycle_sids(
unsigned long ndirty,
unsigned long *dirty_array)
1035 space_id[
i] ^= dirty_array[
i];
1038 free_space_ids += ndirty;
1045 static void recycle_sids(
void)
1051 if (dirty_space_ids != 0) {
1053 space_id[
i] ^= dirty_space_id[
i];
1054 dirty_space_id[
i] = 0;
1057 free_space_ids += dirty_space_ids;
1058 dirty_space_ids = 0;
1072 static unsigned long recycle_ndirty;
1074 static unsigned int recycle_inuse;
1081 spin_lock(&sid_lock);
1084 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
1088 spin_unlock(&sid_lock);
1091 spin_lock(&sid_lock);
1092 recycle_sids(recycle_ndirty,recycle_dirty_array);
1094 spin_unlock(&sid_lock);
1100 spin_lock(&sid_lock);
1103 spin_unlock(&sid_lock);
1107 #ifdef CONFIG_BLK_DEV_INITRD
1112 printk(
KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);