8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
19 #include <linux/poison.h>
32 #include <asm/pgalloc.h>
33 #include <asm/pgtable.h>
34 #include <asm/oplib.h>
35 #include <asm/iommu.h>
37 #include <asm/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/tlbflush.h>
44 #include <asm/sections.h>
46 #include <asm/hypervisor.h>
78 #ifndef CONFIG_DEBUG_PAGEALLOC
86 static unsigned long cpu_pgsz_mask;
93 static int cmp_p64(
const void *
a,
const void *
b)
114 prom_printf(
"The machine has more %s property entries than "
115 "this kernel can support (%d).\n",
122 prom_printf(
"Couldn't get %s property from /memory.\n",
130 for (i = 0; i < ents; i++) {
140 size -= new_base - base;
141 if ((
long) size < 0L)
150 memmove(®s[i], ®s[i + 1],
151 (ents - i - 1) *
sizeof(regs[0]));
190 #ifdef CONFIG_DEBUG_DCFLUSH
200 #ifdef CONFIG_DEBUG_DCFLUSH
204 #ifdef DCACHE_ALIASING_POSSIBLE
207 page_mapping(page) !=
NULL));
209 if (page_mapping(page) !=
NULL &&
215 #define PG_dcache_dirty PG_arch_1
216 #define PG_dcache_cpu_shift 32UL
217 #define PG_dcache_cpu_mask \
218 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
220 #define dcache_dirty_cpu(page) \
221 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223 static inline void set_dcache_dirty(
struct page *
page,
int this_cpu)
225 unsigned long mask = this_cpu;
226 unsigned long non_cpu_bits;
233 "and %%g7, %1, %%g1\n\t"
234 "or %%g1, %0, %%g1\n\t"
235 "casx [%2], %%g7, %%g1\n\t"
237 "bne,pn %%xcc, 1b\n\t"
240 :
"r" (mask),
"r" (non_cpu_bits),
"r" (&page->
flags)
244 static inline void clear_dcache_dirty_cpu(
struct page *
page,
unsigned long cpu)
248 __asm__ __volatile__(
"! test_and_clear_dcache_dirty\n"
251 "srlx %%g7, %4, %%g1\n\t"
252 "and %%g1, %3, %%g1\n\t"
254 "bne,pn %%icc, 2f\n\t"
255 " andn %%g7, %1, %%g1\n\t"
256 "casx [%2], %%g7, %%g1\n\t"
258 "bne,pn %%xcc, 1b\n\t"
262 :
"r" (cpu),
"r" (mask),
"r" (&page->
flags),
268 static inline void tsb_insert(
struct tsb *
ent,
unsigned long tag,
unsigned long pte)
270 unsigned long tsb_addr = (
unsigned long) ent;
273 tsb_addr =
__pa(tsb_addr);
286 unsigned long pg_flags;
288 pg_flags = page->
flags;
302 clear_dcache_dirty_cpu(page, cpu);
310 static void __update_mmu_tsb_insert(
struct mm_struct *mm,
unsigned long tsb_index,
311 unsigned long tsb_hash_shift,
unsigned long address,
317 tsb += ((address >> tsb_hash_shift) &
319 tag = (address >> 22
UL);
320 tsb_insert(tsb, tag, tte);
325 unsigned long tsb_index, tsb_hash_shift,
flags;
330 unsigned long pfn =
pte_pfn(pte);
343 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
349 tsb_index = MM_TSB_HUGE;
355 __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift,
378 mapping = page_mapping(page);
379 if (mapping && !mapping_mapped(mapping)) {
384 if (dirty_cpu == this_cpu)
388 set_dcache_dirty(page, this_cpu);
416 paddr = kaddr &
mask;
433 static const char *pgsz_strings[] = {
434 "8K",
"64K",
"512K",
"4MB",
"32MB",
435 "256MB",
"2GB",
"16GB",
446 seq_printf(m,
"MMU Type\t: Hypervisor (sun4v)\n");
452 for (i = 0; i <
ARRAY_SIZE(pgsz_strings); i++) {
453 if (cpu_pgsz_mask & (1
UL << i)) {
455 printed ?
"," :
"", pgsz_strings[i]);
461 #ifdef CONFIG_DEBUG_DCFLUSH
480 static inline int in_obp_range(
unsigned long vaddr)
486 static int cmp_ptrans(
const void *a,
const void *b)
490 if (x->
virt > y->virt)
492 if (x->
virt < y->virt)
498 static void __init read_obp_translations(
void)
505 prom_printf(
"prom_mappings: Couldn't get size.\n");
509 prom_printf(
"prom_mappings: Size %d is too big.\n", n);
516 prom_printf(
"prom_mappings: Couldn't get property.\n");
528 for (i = 0; i < ents; i++) {
533 for (; i < ents; i++) {
539 for (i = 0; i < (last -
first); i++) {
545 for (; i < ents; i++) {
564 static void __init hypervisor_tlb_lock(
unsigned long vaddr,
571 prom_printf(
"hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
572 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
577 static unsigned long kern_large_tte(
unsigned long paddr);
579 static void __init remap_kernel(
void)
581 unsigned long phys_page, tte_vaddr, tte_data;
582 int i, tlb_ent = sparc64_highest_locked_tlbent();
586 tte_data = kern_large_tte(phys_page);
588 kern_locked_tte_data = tte_data;
593 hypervisor_tlb_lock(tte_vaddr, tte_data,
HV_MMU_DMMU);
594 hypervisor_tlb_lock(tte_vaddr, tte_data,
HV_MMU_IMMU);
595 tte_vaddr += 0x400000;
596 tte_data += 0x400000;
602 tte_vaddr += 0x400000;
603 tte_data += 0x400000;
616 static void __init inherit_prom_mappings(
void)
619 printk(
"Remapping the kernel... ");
629 __asm__ __volatile__(
"flushw");
639 for (va = start; va <
end; va += 32) {
640 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
647 for (va = start; va <
end; va += 32)
648 __asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
660 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
661 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
676 unsigned long ctx, new_ctx;
677 unsigned long orig_pgsz_bits;
688 if (new_ctx >= ctx) {
715 tlb_context_cache = new_ctx;
723 static int numa_enabled = 1;
724 static int numa_debug;
726 static int __init early_numa(
char *
p)
741 #define numadbg(f, a...) \
742 do { if (numa_debug) \
743 printk(KERN_INFO f, ## a); \
746 static void __init find_ramdisk(
unsigned long phys_base)
748 #ifdef CONFIG_BLK_DEV_INITRD
749 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
759 ramdisk_image = sparc_ramdisk_image;
761 ramdisk_image = sparc_ramdisk_image64;
769 ramdisk_image += phys_base;
771 numadbg(
"Found ramdisk at physical address 0x%lx, size %u\n",
772 ramdisk_image, sparc_ramdisk_size);
775 initrd_end = ramdisk_image + sparc_ramdisk_size;
790 static int num_node_masks;
795 #ifdef CONFIG_NEED_MULTIPLE_NODES
797 struct mdesc_mblock {
802 static struct mdesc_mblock *mblocks;
803 static int num_mblocks;
805 static unsigned long ra_to_pa(
unsigned long addr)
809 for (i = 0; i < num_mblocks; i++) {
810 struct mdesc_mblock *
m = &mblocks[
i];
812 if (addr >= m->base &&
813 addr < (m->base + m->size)) {
821 static int find_node(
unsigned long addr)
825 addr = ra_to_pa(addr);
826 for (i = 0; i < num_node_masks; i++) {
829 if ((addr & p->
mask) == p->
val)
837 *nid = find_node(start);
839 while (start < end) {
840 int n = find_node(start);
858 static void __init allocate_node_data(
int nid)
861 unsigned long start_pfn, end_pfn;
862 #ifdef CONFIG_NEED_MULTIPLE_NODES
867 prom_printf(
"Cannot allocate pglist_data for nid[%d]\n", nid);
878 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
883 static void init_node_masks_nonnuma(
void)
887 numadbg(
"Initializing tables for non-numa.\n");
889 node_masks[0].mask = node_masks[0].val = 0;
893 numa_cpu_lookup_table[i] = 0;
895 cpumask_setall(&numa_cpumask_lookup_table[0]);
898 #ifdef CONFIG_NEED_MULTIPLE_NODES
905 struct mdesc_mlgroup {
911 static struct mdesc_mlgroup *mlgroups;
912 static int num_mlgroups;
925 if (val && *val == cfg_handle)
934 u64 arc, candidate, best_latency = ~(
u64)0;
942 if (
strcmp(name,
"pio-latency-group"))
949 if (*val < best_latency) {
958 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
980 cfg_handle = (regs->
phys_addr >> 32
UL) & 0x0fffffff;
987 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
999 static void __init add_node_ranges(
void)
1001 struct memblock_region *
reg;
1003 for_each_memblock(
memory, reg) {
1004 unsigned long size = reg->size;
1009 while (start < end) {
1010 unsigned long this_end;
1013 this_end = memblock_nid_range(start, end, &nid);
1015 numadbg(
"Setting memblock NUMA node nid[%d] "
1016 "start[%lx] end[%lx]\n",
1017 nid, start, this_end);
1019 memblock_set_node(start, this_end - start, nid);
1027 unsigned long paddr;
1041 mlgroups =
__va(paddr);
1042 num_mlgroups = count;
1046 struct mdesc_mlgroup *
m = &mlgroups[count++];
1058 numadbg(
"MLGROUP[%d]: node[%llx] latency[%llx] "
1059 "match[%llx] mask[%llx]\n",
1060 count - 1, m->node, m->latency, m->match, m->mask);
1068 unsigned long paddr;
1082 mblocks =
__va(paddr);
1083 num_mblocks = count;
1087 struct mdesc_mblock *
m = &mblocks[count++];
1095 "address-congruence-offset",
NULL);
1098 numadbg(
"MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1099 count - 1, m->base, m->size, m->offset);
1110 cpumask_clear(mask);
1120 if (*
id < nr_cpu_ids)
1121 cpumask_set_cpu(*
id, mask);
1125 static struct mdesc_mlgroup *
__init find_mlgroup(
u64 node)
1129 for (i = 0; i < num_mlgroups; i++) {
1130 struct mdesc_mlgroup *
m = &mlgroups[
i];
1131 if (m->node == node)
1140 struct mdesc_mlgroup *candidate =
NULL;
1141 u64 arc, best_latency = ~(
u64)0;
1146 struct mdesc_mlgroup *
m = find_mlgroup(target);
1149 if (m->latency < best_latency) {
1151 best_latency = m->latency;
1157 if (num_node_masks != index) {
1159 "index[%d] != num_node_masks[%d]\n",
1160 index, num_node_masks);
1164 n = &node_masks[num_node_masks++];
1166 n->
mask = candidate->mask;
1167 n->
val = candidate->match;
1169 numadbg(
"NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1170 index, n->
mask, n->
val, candidate->latency);
1181 numa_parse_mdesc_group_cpus(md, grp, &mask);
1184 numa_cpu_lookup_table[cpu] = index;
1185 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1194 return numa_attach_mlgroup(md, grp, index);
1197 static
int __init numa_parse_mdesc(
void)
1209 err = grab_mblocks(md);
1213 err = grab_mlgroups(md);
1219 err = numa_parse_mdesc_group(md, node, count);
1227 for (i = 0; i < num_node_masks; i++) {
1228 allocate_node_data(i);
1238 static int __init numa_parse_jbus(
void)
1247 numa_cpu_lookup_table[
cpu] =
index;
1248 cpumask_copy(&numa_cpumask_lookup_table[index],
cpumask_of(cpu));
1250 node_masks[
index].val = cpu << 36
UL;
1254 num_node_masks =
index;
1258 for (index = 0; index < num_node_masks; index++) {
1259 allocate_node_data(index);
1266 static int __init numa_parse_sun4u(
void)
1271 __asm__ (
"rdpr %%ver, %0" :
"=r" (ver));
1274 return numa_parse_jbus();
1279 static int __init bootmem_init_numa(
void)
1283 numadbg(
"bootmem_init_numa()\n");
1287 err = numa_parse_mdesc();
1289 err = numa_parse_sun4u();
1296 static int bootmem_init_numa(
void)
1303 static void __init bootmem_init_nonnuma(
void)
1308 numadbg(
"bootmem_init_nonnuma()\n");
1311 top_of_ram, total_ram);
1313 (top_of_ram - total_ram) >> 20);
1315 init_node_masks_nonnuma();
1317 allocate_node_data(0);
1323 unsigned long end_pfn;
1329 if (bootmem_init_numa() < 0)
1330 bootmem_init_nonnuma();
1333 memblock_dump_all();
1337 sparse_memory_present_with_active_regions(
MAX_NUMNODES);
1346 #ifdef CONFIG_DEBUG_PAGEALLOC
1347 static unsigned long __ref kernel_map_range(
unsigned long pstart,
1352 unsigned long alloc_bytes = 0
UL;
1355 prom_printf(
"kernel_map: Unaligned physmem[%lx:%lx]\n",
1360 while (vstart < vend) {
1361 unsigned long this_end, paddr =
__pa(vstart);
1387 if (this_end > vend)
1390 while (vstart < this_end) {
1402 extern unsigned int kvmap_linear_patch[1];
1405 static void __init kpte_set_val(
unsigned long index,
unsigned long val)
1415 static const unsigned long kpte_shift_min = 28;
1416 static const unsigned long kpte_shift_max = 34;
1417 static const unsigned long kpte_shift_incr = 3;
1419 static unsigned long kpte_mark_using_shift(
unsigned long start,
unsigned long end,
1420 unsigned long shift)
1422 unsigned long size = (1
UL << shift);
1423 unsigned long mask = (size - 1
UL);
1424 unsigned long remains = end -
start;
1427 if (remains < size || (start & mask))
1436 val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
1439 if (shift != kpte_shift_max)
1443 unsigned long index = start >> kpte_shift_min;
1445 kpte_set_val(index, val);
1447 start += 1
UL << kpte_shift_min;
1448 remains -= 1
UL << kpte_shift_min;
1454 static void __init mark_kpte_bitmap(
unsigned long start,
unsigned long end)
1456 unsigned long smallest_size, smallest_mask;
1459 smallest_size = (1
UL << kpte_shift_min);
1460 smallest_mask = (smallest_size - 1
UL);
1462 while (start < end) {
1463 unsigned long orig_start =
start;
1465 for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
1466 start = kpte_mark_using_shift(start, end, s);
1468 if (start != orig_start)
1472 if (start == orig_start)
1473 start = (start + smallest_size) & ~smallest_mask;
1477 static void __init init_kpte_bitmap(
void)
1481 for (i = 0; i < pall_ents; i++) {
1482 unsigned long phys_start, phys_end;
1484 phys_start = pall[
i].phys_addr;
1485 phys_end = phys_start + pall[
i].reg_size;
1487 mark_kpte_bitmap(phys_start, phys_end);
1491 static void __init kernel_physical_mapping_init(
void)
1493 #ifdef CONFIG_DEBUG_PAGEALLOC
1494 unsigned long i, mem_alloced = 0
UL;
1496 for (i = 0; i < pall_ents; i++) {
1497 unsigned long phys_start, phys_end;
1499 phys_start = pall[
i].phys_addr;
1500 phys_end = phys_start + pall[
i].reg_size;
1502 mem_alloced += kernel_map_range(phys_start, phys_end,
1506 printk(
"Allocated %ld bytes for kernel page tables.\n",
1509 kvmap_linear_patch[0] = 0x01000000;
1510 flushi(&kvmap_linear_patch[0]);
1516 #ifdef CONFIG_DEBUG_PAGEALLOC
1520 unsigned long phys_end = phys_start + (numpages *
PAGE_SIZE);
1522 kernel_map_range(phys_start, phys_end,
1523 (enable ? PAGE_KERNEL :
__pgprot(0)));
1540 for (i = 0; i < pavail_ents; i++) {
1542 return pavail[
i].phys_addr;
1548 static void __init tsb_phys_patch(
void)
1553 pquad = &__tsb_ldquad_phys_patch;
1554 while (pquad < &__tsb_ldquad_phys_patch_end) {
1555 unsigned long addr = pquad->
addr;
1562 __asm__ __volatile__(
"flush %0"
1569 p = &__tsb_phys_patch;
1570 while (p < &__tsb_phys_patch_end) {
1571 unsigned long addr = p->
addr;
1573 *(
unsigned int *) addr = p->
insn;
1575 __asm__ __volatile__(
"flush %0"
1584 #ifndef CONFIG_DEBUG_PAGEALLOC
1585 #define NUM_KTSB_DESCR 2
1587 #define NUM_KTSB_DESCR 1
1592 static void patch_one_ktsb_phys(
unsigned int *start,
unsigned int *end,
unsigned long pa)
1596 while (start < end) {
1597 unsigned int *ia = (
unsigned int *)(
unsigned long)*
start;
1599 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1600 __asm__ __volatile__(
"flush %0" : :
"r" (ia));
1602 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1603 __asm__ __volatile__(
"flush %0" : :
"r" (ia + 1));
1609 static void ktsb_phys_patch(
void)
1611 extern unsigned int __swapper_tsb_phys_patch;
1612 extern unsigned int __swapper_tsb_phys_patch_end;
1613 unsigned long ktsb_pa;
1616 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1617 &__swapper_tsb_phys_patch_end, ktsb_pa);
1618 #ifndef CONFIG_DEBUG_PAGEALLOC
1620 extern unsigned int __swapper_4m_tsb_phys_patch;
1621 extern unsigned int __swapper_4m_tsb_phys_patch_end;
1624 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1625 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1630 static void __init sun4v_ktsb_init(
void)
1632 unsigned long ktsb_pa;
1654 case 4 * 1024 * 1024:
1660 ktsb_descr[0].assoc = 1;
1662 ktsb_descr[0].ctx_idx = 0;
1663 ktsb_descr[0].tsb_base = ktsb_pa;
1664 ktsb_descr[0].resv = 0;
1666 #ifndef CONFIG_DEBUG_PAGEALLOC
1677 ktsb_descr[1].assoc = 1;
1679 ktsb_descr[1].ctx_idx = 0;
1680 ktsb_descr[1].tsb_base = ktsb_pa;
1681 ktsb_descr[1].resv = 0;
1687 unsigned long pa,
ret;
1694 "errors with %lx\n", pa, ret);
1699 static void __init sun4u_linear_pte_xor_finalize(
void)
1701 #ifndef CONFIG_DEBUG_PAGEALLOC
1708 static void __init sun4v_linear_pte_xor_finalize(
void)
1710 #ifndef CONFIG_DEBUG_PAGEALLOC
1713 0xfffff80000000000UL;
1722 0xfffff80000000000UL;
1731 0xfffff80000000000UL;
1745 static void sun4u_pgprot_init(
void);
1746 static void sun4v_pgprot_init(
void);
1750 unsigned long end_pfn, shift, phys_base;
1751 unsigned long real_end,
i;
1774 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1784 #ifndef CONFIG_DEBUG_PAGEALLOC
1789 sun4v_pgprot_init();
1791 sun4u_pgprot_init();
1810 read_obp_translations();
1811 read_obp_memory(
"reg", &pall[0], &pall_ents);
1812 read_obp_memory(
"available", &pavail[0], &pavail_ents);
1813 read_obp_memory(
"available", &pavail[0], &pavail_ents);
1815 phys_base = 0xffffffffffffffff
UL;
1816 for (i = 0; i < pavail_ents; i++) {
1823 find_ramdisk(phys_base);
1828 memblock_dump_all();
1836 printk(
"Kernel: Using %d locked TLB entries for main kernel image.\n",
1837 num_kernel_image_mappings);
1850 inherit_prom_mappings();
1873 sun4v_linear_pte_xor_finalize();
1878 unsigned long impl,
ver;
1883 __asm__ __volatile__(
"rdpr %%ver, %0" :
"=r" (ver));
1884 impl = ((ver >> 32) & 0xffff);
1889 sun4u_linear_pte_xor_finalize();
1896 #ifndef CONFIG_DEBUG_PAGEALLOC
1919 kernel_physical_mapping_init();
1922 unsigned long max_zone_pfns[MAX_NR_ZONES];
1924 memset(max_zone_pfns, 0,
sizeof(max_zone_pfns));
1928 free_area_init_nodes(max_zone_pfns);
1931 printk(
"Booting Linux...\n");
1940 for (i = 0; i < pavail_ents; i++) {
1943 start = pavail[
i].phys_addr;
1944 end = start + pavail[
i].reg_size;
1946 if (paddr >= start && paddr < end)
1951 #ifdef CONFIG_BLK_DEV_INITRD
1969 static void __init setup_valid_addr_bitmap_from_pavail(
unsigned long *
bitmap)
1973 read_obp_memory(
"available", &pavail_rescan[0], &pavail_rescan_ents);
1975 for (i = 0; i < pavail_ents; i++) {
1976 unsigned long old_start, old_end;
1978 old_start = pavail[
i].phys_addr;
1979 old_end = old_start + pavail[
i].reg_size;
1980 while (old_start < old_end) {
1983 for (n = 0; n < pavail_rescan_ents; n++) {
1984 unsigned long new_start, new_end;
1986 new_start = pavail_rescan[
n].phys_addr;
1987 new_end = new_start +
1988 pavail_rescan[
n].reg_size;
1990 if (new_start <= old_start &&
1992 set_bit(old_start >> 22, bitmap);
1998 prom_printf(
"mem_init: OLD start[%lx] size[%lx]\n",
2001 prom_printf(
"mem_init: NEW start[%lx] size[%lx]\n",
2004 prom_printf(
"mem_init: Cannot continue, aborting.\n");
2013 static void __init patch_tlb_miss_handler_bitmap(
void)
2015 extern unsigned int valid_addr_bitmap_insn[];
2016 extern unsigned int valid_addr_bitmap_patch[];
2018 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
2020 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
2021 flushi(&valid_addr_bitmap_insn[0]);
2026 unsigned long codepages, datapages, initpages;
2027 unsigned long addr, last;
2031 while (addr < last) {
2037 patch_tlb_miss_handler_bitmap();
2041 #ifdef CONFIG_NEED_MULTIPLE_NODES
2059 totalram_pages -= 1;
2068 prom_printf(
"paging_init: Cannot alloc zero page.\n");
2080 printk(
"Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
2093 unsigned long addr, initend;
2109 for (; addr < initend; addr +=
PAGE_SIZE) {
2121 ClearPageReserved(p);
2130 #ifdef CONFIG_BLK_DEV_INITRD
2134 printk (
"Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2138 ClearPageReserved(p);
2147 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2148 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2149 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2150 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2151 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2152 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2174 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2175 unsigned long vmemmap_table[VMEMMAP_SIZE];
2182 unsigned long vstart = (
unsigned long) start;
2183 unsigned long vend = (
unsigned long) (start + nr);
2186 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2187 unsigned long end = VMEMMAP_ALIGN(phys_end);
2188 unsigned long pte_base;
2198 for (; addr <
end; addr += VMEMMAP_CHUNK) {
2199 unsigned long *vmem_pp =
2200 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2208 *vmem_pp = pte_base |
__pa(block);
2211 if (addr_end != addr || node_start != node) {
2214 addr_start, addr_end-1, node_start);
2218 addr_end = addr + VMEMMAP_CHUNK;
2224 void __meminit vmemmap_populate_print_last(
void)
2228 addr_start, addr_end-1, node_start);
2236 static void prot_init_common(
unsigned long page_none,
2237 unsigned long page_shared,
2238 unsigned long page_copy,
2239 unsigned long page_readonly,
2240 unsigned long page_exec_bit)
2263 static void __init sun4u_pgprot_init(
void)
2265 unsigned long page_none, page_shared, page_copy, page_readonly;
2266 unsigned long page_exec_bit;
2285 #ifdef CONFIG_DEBUG_PAGEALLOC
2289 0xfffff80000000000UL;
2294 for (i = 1; i < 4; i++)
2295 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2312 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2316 static void __init sun4v_pgprot_init(
void)
2318 unsigned long page_none, page_shared, page_copy, page_readonly;
2319 unsigned long page_exec_bit;
2332 #ifdef CONFIG_DEBUG_PAGEALLOC
2336 0xfffff80000000000UL;
2341 for (i = 1; i < 4; i++)
2342 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2362 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2377 case 4 * 1024 * 1024:
2389 case 4 * 1024 * 1024:
2406 static unsigned long kern_large_tte(
unsigned long paddr)
2427 __asm__ __volatile__(
"flushw\n\t"
2428 "rdpr %%pstate, %0\n\t"
2429 "wrpr %0, %1, %%pstate"
2435 for (i = 0; i < 64; i++) {
2440 __asm__ __volatile__(
"stxa %0, [%1] %2\n\t"
2444 "r" (PRIMARY_CONTEXT),
"i" (
ASI_DMMU));
2446 if (!(spitfire_get_dtlb_data(i) &
_PAGE_L_4U)) {
2447 __asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
2450 :
"r" (TLB_TAG_ACCESS),
"i" (
ASI_DMMU));
2451 spitfire_put_dtlb_data(i, 0x0UL);
2458 __asm__ __volatile__(
"stxa %0, [%1] %2\n\t"
2462 "r" (PRIMARY_CONTEXT),
"i" (
ASI_DMMU));
2464 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2465 __asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
2468 :
"r" (TLB_TAG_ACCESS),
"i" (
ASI_IMMU));
2469 spitfire_put_itlb_data(i, 0x0UL);
2473 cheetah_flush_dtlb_all();
2474 cheetah_flush_itlb_all();
2476 __asm__ __volatile__(
"wrpr %0, 0, %%pstate"
2500 static struct page *__alloc_for_cache(
struct mm_struct *mm)
2517 unsigned long address)
2522 pte = get_from_cache(mm);
2526 page = __alloc_for_cache(mm);
2534 unsigned long address)
2539 pte = get_from_cache(mm);
2543 page = __alloc_for_cache(mm);
2545 pgtable_page_ctor(page);
2555 if (put_page_testzero(page))
2562 if (put_page_testzero(page)) {
2563 pgtable_page_dtor(page);
2581 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2585 pmd_val(pmd) |= PMD_HUGE_PRESENT;
2588 pmd_val(pmd) |= PMD_HUGE_WRITE;
2590 pmd_val(pmd) |= PMD_HUGE_EXEC;
2594 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2596 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2600 pmd_val(pmd) |= PMD_HUGE_WRITE;
2602 pmd_val(pmd) |= PMD_HUGE_EXEC;
2606 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2608 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2621 pmd = pmd_set_protbits(pmd, pgprot,
false);
2627 pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
2630 pmd = pmd_set_protbits(pmd, newprot,
true);
2636 unsigned long pte = 0;
2638 if (
pmd_val(entry) & PMD_HUGE_PRESENT)
2642 if (
pmd_val(entry) & PMD_HUGE_PRESENT)
2644 if (
pmd_val(entry) & PMD_HUGE_EXEC)
2646 if (
pmd_val(entry) & PMD_HUGE_WRITE)
2648 if (
pmd_val(entry) & PMD_HUGE_ACCESSED)
2650 if (
pmd_val(entry) & PMD_HUGE_DIRTY)
2654 if (
pmd_val(entry) & PMD_HUGE_PRESENT)
2656 if (
pmd_val(entry) & PMD_HUGE_EXEC)
2658 if (
pmd_val(entry) & PMD_HUGE_WRITE)
2660 if (
pmd_val(entry) & PMD_HUGE_ACCESSED)
2662 if (
pmd_val(entry) & PMD_HUGE_DIRTY)
2681 pte = (
pmd_val(entry) & ~PMD_HUGE_PROTBITS);
2685 prot = pmd_pgprot(entry);
2699 __update_mmu_tsb_insert(mm, MM_TSB_HUGE,
HPAGE_SHIFT,
2706 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2707 static void context_reload(
void *
__data)
2715 void hugetlb_setup(
struct mm_struct *mm)
2723 tsb_context_switch(mm);
2732 spin_lock(&ctx_alloc_lock);
2753 spin_unlock(&ctx_alloc_lock);