13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
24 #include <asm/pgtable.h>
25 #include <asm/fcntl.h>
38 static inline void pat_disable(
const char *
reason)
46 pat_disable(
"PAT support disabled.");
51 static inline void pat_disable(
const char *
reason)
60 static int __init pat_debug_setup(
char *
str)
65 __setup(
"debugpat", pat_debug_setup);
78 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
83 bool boot_cpu = !boot_pat_state;
89 if (!boot_pat_state) {
90 pat_disable(
"PAT not supported by CPU.");
99 "but not supported by secondary CPU\n");
142 static unsigned long pat_x_mtrr_type(
u64 start,
u64 end,
unsigned long req_type)
168 pagerange_is_ram_callback(
unsigned long initial_pfn,
unsigned long total_nr_pages,
void *
arg)
173 state->
ram |= total_nr_pages > 0;
174 state->
cur_pfn = initial_pfn + total_nr_pages;
182 unsigned long start_pfn = start >>
PAGE_SHIFT;
183 unsigned long end_pfn = (end +
PAGE_SIZE - 1) >> PAGE_SHIFT;
193 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
196 if (start_pfn < end_pfn) {
198 &state, pagerange_is_ram_callback);
201 return (ret > 0) ? -1 : (state.
ram ? 1 : 0);
210 static int reserve_ram_pages_type(
u64 start,
u64 end,
unsigned long req_type,
211 unsigned long *new_type)
222 for (pfn = (start >> PAGE_SHIFT); pfn < (end >>
PAGE_SHIFT); ++pfn) {
226 type = get_page_memtype(page);
228 printk(
KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
229 start, end - 1, type, req_type);
238 *new_type = req_type;
240 for (pfn = (start >> PAGE_SHIFT); pfn < (end >>
PAGE_SHIFT); ++pfn) {
242 set_page_memtype(page, req_type);
247 static int free_ram_pages_type(
u64 start,
u64 end)
252 for (pfn = (start >> PAGE_SHIFT); pfn < (end >>
PAGE_SHIFT); ++pfn) {
254 set_page_memtype(page, -1);
272 unsigned long *new_type)
275 unsigned long actual_type;
308 *new_type = actual_type;
310 is_range_ram = pat_pagerange_is_ram(start, end);
311 if (is_range_ram == 1) {
313 err = reserve_ram_pages_type(start, end, req_type, new_type);
316 }
else if (is_range_ram < 0) {
326 new->type = actual_type;
328 spin_lock(&memtype_lock);
332 printk(
KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
334 cattr_name(new->type), cattr_name(req_type));
336 spin_unlock(&memtype_lock);
341 spin_unlock(&memtype_lock);
343 dprintk(
"reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
344 start, end - 1, cattr_name(new->type), cattr_name(req_type),
345 new_type ? cattr_name(*new_type) :
"-");
363 is_range_ram = pat_pagerange_is_ram(start, end);
364 if (is_range_ram == 1) {
366 err = free_ram_pages_type(start, end);
369 }
else if (is_range_ram < 0) {
373 spin_lock(&memtype_lock);
375 spin_unlock(&memtype_lock);
378 printk(
KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
385 dprintk(
"free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
400 static unsigned long lookup_memtype(
u64 paddr)
408 if (pat_pagerange_is_ram(paddr, paddr +
PAGE_SIZE)) {
411 rettype = get_page_memtype(page);
422 spin_lock(&memtype_lock);
426 rettype = entry->
type;
430 spin_unlock(&memtype_lock);
448 unsigned long req_type = *
type;
449 unsigned long new_type;
458 if (!is_new_memtype_allowed(start, size, req_type, new_type))
490 #ifdef CONFIG_STRICT_DEVMEM
492 static inline int range_is_allowed(
unsigned long pfn,
unsigned long size)
498 static inline int range_is_allowed(
unsigned long pfn,
unsigned long size)
507 while (cursor < to) {
509 printk(
KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
521 unsigned long size,
pgprot_t *vma_prot)
525 if (!range_is_allowed(pfn, size))
572 "for [mem %#010Lx-%#010Lx]\n",
575 base, (
unsigned long long)(base + size-1));
586 static int reserve_pfn_range(
u64 paddr,
unsigned long size,
pgprot_t *vma_prot,
592 unsigned long flags = want_flags;
594 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
605 flags = lookup_memtype(paddr);
606 if (want_flags != flags) {
607 printk(
KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
609 cattr_name(want_flags),
610 (
unsigned long long)paddr,
611 (
unsigned long long)(paddr + size - 1),
624 if (flags != want_flags) {
626 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
629 " for [mem %#010Lx-%#010Lx], got %s\n",
631 cattr_name(want_flags),
632 (
unsigned long long)paddr,
633 (
unsigned long long)(paddr + size - 1),
657 static void free_pfn_range(
u64 paddr,
unsigned long size)
661 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
685 if (follow_phys(vma, vma->
vm_start, 0, &prot, &paddr)) {
690 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
702 unsigned long pfn,
unsigned long addr,
unsigned long size)
711 ret = reserve_pfn_range(paddr, size, prot, 0);
724 flags = lookup_memtype(paddr);
730 if (flags != lookup_memtype(paddr))
772 if (!paddr && !size) {
773 if (follow_phys(vma, vma->
vm_start, 0, &prot, &paddr)) {
780 free_pfn_range(paddr, size);
793 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
795 static struct memtype *memtype_get_idx(loff_t
pos)
804 spin_lock(&memtype_lock);
805 ret = rbt_memtype_copy_nth_element(print_entry, pos);
806 spin_unlock(&memtype_lock);
816 static void *memtype_seq_start(
struct seq_file *seq, loff_t *pos)
823 return memtype_get_idx(*pos);
826 static void *memtype_seq_next(
struct seq_file *seq,
void *
v, loff_t *pos)
829 return memtype_get_idx(*pos);
832 static void memtype_seq_stop(
struct seq_file *seq,
void *
v)
836 static int memtype_seq_show(
struct seq_file *seq,
void *
v)
840 seq_printf(seq,
"%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->
type),
841 print_entry->
start, print_entry->
end);
848 .
start = memtype_seq_start,
849 .next = memtype_seq_next,
850 .stop = memtype_seq_stop,
851 .show = memtype_seq_show,
856 return seq_open(file, &memtype_seq_ops);
860 .
open = memtype_seq_open,
866 static int __init pat_memtype_list_init(
void)