16 #include <linux/module.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
32 #include <linux/poison.h>
34 #include <linux/slab.h>
39 #include <asm/mmu_context.h>
40 #include <asm/processor.h>
41 #include <asm/pgtable.h>
42 #include <asm/pgalloc.h>
44 #include <asm/fixmap.h>
46 #include <asm/tlbflush.h>
47 #include <asm/sections.h>
48 #include <asm/setup.h>
55 #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
83 static void init_prealloc_ptes(
int node,
int pages)
95 int node = pfn_to_nid(pfn);
98 BUG_ON(pfn >= num_l2_ptes[node]);
99 return &l2_ptes[
node][pfn];
107 static int initial_heap_home(
void)
109 #if CHIP_HAS_CBOX_HOME_MAP()
111 return PAGE_HOME_HASH;
127 *(
pte_t *)pmd = pteval;
134 static inline pmd_t *alloc_pmd(
void)
139 static inline void assign_pmd(
pud_t *pud,
pmd_t *pmd)
150 assign_pte(pmd, pte);
158 assign_pmd(pud, alloc_pmd());
185 for (vaddr = start; vaddr <
end; vaddr +=
PMD_SIZE) {
186 pmd_t *pmd = get_pmd(pgd, vaddr);
188 assign_pte(pmd, alloc_pte());
193 #if CHIP_HAS_CBOX_HOME_MAP()
218 #if CHIP_HAS_CBOX_HOME_MAP()
239 #if CHIP_HAS_CBOX_HOME_MAP()
242 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
246 if (address < (
ulong) _sinittext - CODE_DELTA)
253 if ((address >= (
ulong) __start_rodata &&
254 address < (
ulong) __end_rodata) ||
260 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
262 if (address == (
ulong) atomic_locks)
263 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
280 if (address >= (
ulong) _end || address < (
ulong) _einitdata)
281 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
283 #if CHIP_HAS_CBOX_HOME_MAP()
286 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
294 if (address >= (
ulong)__w1data_begin && address < (
ulong)__w1data_end)
295 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
306 cpu = cpumask_first(&kdata_mask);
308 if (page >= (
ulong)&init_thread_union &&
311 if (page == (
ulong)empty_zero_page)
314 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
315 if (page == (
ulong)atomic_locks)
319 cpu = cpumask_next(cpu, &kdata_mask);
321 cpu = cpumask_first(&kdata_mask);
323 return construct_pgprot(PAGE_KERNEL, cpu);
349 if (
strncmp(str,
"nocache", 7) == 0) {
351 pr_info(
"ktext: disabling local caching of kernel text\n");
362 if (
strcmp(str,
"huge") == 0)
363 pr_info(
"ktext: using one huge locally cached page\n");
366 else if (
strcmp(str,
"local") == 0) {
369 pr_info(
"ktext: using small pages with local caching\n");
373 else if (
strcmp(str,
"all") == 0) {
376 pr_info(
"ktext: using maximal caching neighborhood\n");
381 else if (cpulist_parse(str, &ktext_mask) == 0) {
383 cpulist_scnprintf(buf,
sizeof(buf), &ktext_mask);
384 if (cpumask_weight(&ktext_mask) > 1) {
386 pr_info(
"ktext: using caching neighborhood %s "
387 "with small pages\n", buf);
389 pr_info(
"ktext: caching on cpu %s with one huge page\n",
406 prot = hv_pte_set_nc(prot);
407 #if CHIP_HAS_NC_AND_NOALLOC_BITS()
409 prot = hv_pte_set_no_alloc_l2(prot);
432 static void __init kernel_physical_mapping_init(
pgd_t *pgd_base)
434 unsigned long long irqmask;
443 #if CHIP_HAS_CBOX_HOME_MAP()
444 if (ktext_arg_seen && ktext_hash) {
445 pr_warning(
"warning: \"ktext\" boot argument ignored"
446 " if \"kcache_hash\" sets up text hash-for-home\n");
450 if (kdata_arg_seen && kdata_hash) {
451 pr_warning(
"warning: \"kdata\" boot argument ignored"
452 " if \"kcache_hash\" sets up data hash-for-home\n");
455 if (kdata_huge && !hash_default) {
456 pr_warning(
"warning: disabling \"kdata=huge\"; requires"
457 " kcache_hash=all or =allbutstack\n");
468 cpumask_copy(&kstripe_mask, cpu_possible_mask);
470 kdata_mask = kstripe_mask;
474 #ifdef CONFIG_HIGHMEM
475 unsigned long end_pfn = node_lowmem_end_pfn[
i];
479 unsigned long end_huge_pfn = 0;
488 init_prealloc_ptes(i, end_pfn - pfn);
491 while (pfn < end_pfn) {
493 pmd = get_pmd(pgtables, address);
495 if (pfn < end_huge_pfn) {
496 pgprot_t prot = init_pgprot(address);
500 pte[pte_ofs] =
pfn_pte(pfn, prot);
504 " page at %#lx\n", address);
506 pfn++, pte_ofs++, address +=
PAGE_SIZE) {
507 pgprot_t prot = init_pgprot(address);
508 pte[pte_ofs] =
pfn_pte(pfn, prot);
510 assign_pte(pmd, pte);
520 cpumask_copy(&ktext_mask, cpu_possible_mask);
521 else if (ktext_nondataplane)
522 ktext_mask = kstripe_mask;
523 else if (!cpumask_empty(&ktext_mask)) {
526 cpumask_andnot(&
bad, &ktext_mask, cpu_possible_mask);
527 cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
528 if (!cpumask_empty(&
bad)) {
530 cpulist_scnprintf(buf,
sizeof(buf), &
bad);
531 pr_info(
"ktext: not using unavailable cpus %s\n", buf);
533 if (cpumask_empty(&ktext_mask)) {
534 pr_warning(
"ktext: no valid cpus; caching on %d.\n",
536 cpumask_copy(&ktext_mask,
542 pmd = get_pmd(pgtables, address);
552 prot = hv_pte_set_mode(prot,
555 prot = hv_pte_set_mode(prot,
558 prot = hv_pte_set_mode(prot,
560 cpu = cpumask_first(&ktext_mask);
562 prot = ktext_set_nocache(prot);
565 BUG_ON(address != (
unsigned long)_stext);
567 for (; address < (
unsigned long)_einittext;
572 assign_pte(pmd++, pte);
577 cpu = cpumask_next(cpu, &ktext_mask);
579 cpu = cpumask_first(&ktext_mask);
581 pte[pte_ofs] =
pfn_pte(pfn, prot);
584 assign_pte(pmd, pte);
588 #if CHIP_HAS_CBOX_HOME_MAP()
590 pteval = hv_pte_set_mode(pteval,
592 pteval = ktext_set_nocache(pteval);
595 if (cpumask_weight(&ktext_mask) == 1) {
597 cpumask_first(&ktext_mask));
598 pteval = hv_pte_set_mode(pteval,
600 pteval = ktext_set_nocache(pteval);
601 }
else if (ktext_nocache)
602 pteval = hv_pte_set_mode(pteval,
605 pteval = hv_pte_set_mode(pteval,
607 for (; address < (
unsigned long)_einittext;
627 init_pgprot((
unsigned long)pgtables),
634 memcpy(pgd_base, pgtables,
sizeof(pgtables));
647 __insn_finv(&swapper_pgprot);
665 return pagenr < kaddr_to_pfn(
_end) &&
667 pagenr < kaddr_to_pfn(_einitdata)) &&
672 #ifdef CONFIG_HIGHMEM
673 static void __init permanent_kmaps_init(
pgd_t *pgd_base)
694 static void __init init_free_pfn_range(
unsigned long start,
unsigned long end)
699 for (pfn = start; pfn <
end; ) {
708 while (pfn + count > end) {
712 for (p = page, i = 0; i <
count; ++
i, ++
p) {
713 __ClearPageReserved(p);
721 init_page_count(page);
723 totalram_pages +=
count;
730 static void __init set_non_bootmem_pages_init(
void)
736 #ifdef CONFIG_HIGHMEM
745 #ifdef CONFIG_HIGHMEM
746 if (idx == ZONE_HIGHMEM)
751 if (start < percpu_pfn && end > percpu_pfn)
755 if (start <= pci_reserve_start_pfn &&
756 end > pci_reserve_start_pfn) {
757 if (end > pci_reserve_end_pfn)
758 init_free_pfn_range(pci_reserve_end_pfn, end);
759 end = pci_reserve_start_pfn;
762 init_free_pfn_range(start, end);
778 kernel_physical_mapping_init(pgd_base);
787 #ifdef CONFIG_HIGHMEM
788 permanent_kmaps_init(pgd_base);
801 assign_pmd(pud, alloc_pmd());
815 static void __init set_max_mapnr_init(
void)
817 #ifdef CONFIG_FLATMEM
824 int codesize, datasize, initsize;
830 #ifdef CONFIG_FLATMEM
834 #ifdef CONFIG_HIGHMEM
837 pr_err(
"fixmap and kmap areas overlap"
838 " - this will crash\n");
839 pr_err(
"pkstart: %lxh pkend: %lxh fixstart %lxh\n",
846 set_max_mapnr_init();
853 set_non_bootmem_pages_init();
859 initsize += (
unsigned long)&_einitdata - (
unsigned long)&
_sinitdata;
861 pr_info(
"Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
862 (
unsigned long) nr_free_pages() << (
PAGE_SHIFT-10),
873 #ifdef CONFIG_HIGHMEM
879 #ifdef CONFIG_HUGEVMAP
886 for (i = MAX_NUMNODES-1; i >= 0; --
i) {
889 unsigned long start = (
unsigned long)
891 unsigned long end = start +
899 for (i = MAX_NUMNODES-1; i >= 0; --
i) {
900 if ((
unsigned long)vbase_map[i] != -1
UL) {
902 i, (
unsigned long) (vbase_map[i]),
903 (
unsigned long) (last-1));
923 #ifndef CONFIG_NEED_MULTIPLE_NODES
928 unsigned long start_pfn = start >>
PAGE_SHIFT;
931 return __add_pages(zone, start_pfn, nr_pages);
946 panic(
"pgtable_cache_init(): Cannot create pgd cache");
949 #if !CHIP_HAS_COHERENT_LOCAL_CACHE()
955 static void mark_w1data_ro(
void)
958 unsigned long addr = (
unsigned long)__w1data_begin;
960 for (; addr <= (
unsigned long)__w1data_end - 1; addr +=
PAGE_SIZE) {
961 unsigned long pfn = kaddr_to_pfn((
void *)addr);
969 #ifdef CONFIG_DEBUG_PAGEALLOC
976 static int __init set_initfree(
char *str)
981 pr_info(
"initfree: %s free init pages\n",
982 initfree ?
"will" :
"won't");
986 __setup(
"initfree=", set_initfree);
990 unsigned long addr = (
unsigned long) begin;
992 if (kdata_huge && !initfree) {
994 " incompatible with kdata=huge\n");
1006 int pfn = kaddr_to_pfn((
void *)addr);
1019 __ClearPageReserved(page);
1020 init_page_count(page);
1030 pr_info(
"Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
1050 (
unsigned long)_einitdata);
1060 #if !CHIP_HAS_COHERENT_LOCAL_CACHE()