14 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sizes.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
32 #include <asm/mmu_context.h>
70 static unsigned int pmb_iomapping_enabled;
79 return mk_pmb_entry(entry) |
PMB_ADDR;
84 return mk_pmb_entry(entry) |
PMB_DATA;
101 unsigned long flags = 0;
103 #if defined(CONFIG_CACHE_OFF)
105 #elif defined(CONFIG_CACHE_WRITETHROUGH)
107 #elif defined(CONFIG_CACHE_WRITEBACK)
117 static inline unsigned long pgprot_to_pmb_flags(
pgprot_t prot)
119 unsigned long pmb_flags = 0;
144 for (i = 0; i <
ARRAY_SIZE(pmb_entry_list); i++) {
151 pmbe = &pmb_entry_list[
i];
156 if ((vaddr < pmbe->
vpn) || (vaddr >= (pmbe->
vpn + pmbe->
size)))
158 if ((phys < pmbe->
ppn) || (phys >= (pmbe->
ppn + pmbe->
size)))
164 if (size <= pmbe->size) {
175 for (iter = pmbe->
link; iter; iter = iter->
link)
191 static bool pmb_size_valid(
unsigned long size)
196 if (pmb_sizes[i].size == size)
202 static inline bool pmb_addr_valid(
unsigned long addr,
unsigned long size)
204 return (addr >=
P1SEG && (addr + size - 1) <
P3SEG);
207 static inline bool pmb_prot_valid(
pgprot_t prot)
212 static int pmb_size_to_flags(
unsigned long size)
217 if (pmb_sizes[i].size == size)
218 return pmb_sizes[
i].flag;
223 static int pmb_alloc_entry(
void)
236 static struct pmb_entry *pmb_alloc(
unsigned long vpn,
unsigned long ppn,
237 unsigned long flags,
int entry)
240 unsigned long irqflags;
247 pos = pmb_alloc_entry();
263 pmbe = &pmb_entry_list[
pos];
281 static void pmb_free(
struct pmb_entry *pmbe)
292 static void __set_pmb_entry(
struct pmb_entry *pmbe)
296 addr = mk_pmb_addr(pmbe->
entry);
297 data = mk_pmb_data(pmbe->
entry);
308 static void __clear_pmb_entry(
struct pmb_entry *pmbe)
311 unsigned long addr_val, data_val;
313 addr = mk_pmb_addr(pmbe->
entry);
314 data = mk_pmb_data(pmbe->
entry);
320 writel_uncached(addr_val & ~
PMB_V, addr);
321 writel_uncached(data_val & ~
PMB_V, data);
325 static void set_pmb_entry(
struct pmb_entry *pmbe)
330 __set_pmb_entry(pmbe);
339 unsigned long orig_addr, orig_size;
340 unsigned long flags, pmb_flags;
345 if (!pmb_addr_valid(vaddr, size))
347 if (pmb_mapping_exists(vaddr, phys, size))
355 pmb_flags = pgprot_to_pmb_flags(prot);
359 for (i = mapped = 0; i <
ARRAY_SIZE(pmb_sizes); i++) {
360 if (size < pmb_sizes[i].size)
363 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
366 pmb_unmap_entry(pmbp, mapped);
367 return PTR_ERR(pmbe);
372 pmbe->
size = pmb_sizes[
i].size;
374 __set_pmb_entry(pmbe);
420 if (!pmb_iomapping_enabled)
428 if (!pmb_prot_valid(prot))
432 if (size >= pmb_sizes[i].size)
435 last_addr = phys +
size;
436 align_mask = ~(pmb_sizes[
i].size - 1);
437 offset = phys & ~align_mask;
439 aligned =
ALIGN(last_addr, pmb_sizes[i].size) -
phys;
458 return (
void __iomem *)(offset + (
char *)vaddr);
464 unsigned long vaddr = (
unsigned long __force)addr;
469 for (i = 0; i <
ARRAY_SIZE(pmb_entry_list); i++) {
471 pmbe = &pmb_entry_list[
i];
472 if (pmbe->
vpn == vaddr) {
504 __clear_pmb_entry(pmbe);
508 pmbe = pmblink->
link;
511 }
while (pmbe && --depth);
514 static void pmb_unmap_entry(
struct pmb_entry *pmbe,
int depth)
522 __pmb_unmap_entry(pmbe, depth);
526 static void __init pmb_notify(
void)
530 pr_info(
"PMB: boot mappings:\n");
534 for (i = 0; i <
ARRAY_SIZE(pmb_entry_list); i++) {
540 pmbe = &pmb_entry_list[
i];
542 pr_info(
" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
555 static void __init pmb_synchronize(
void)
578 unsigned long addr_val, data_val;
580 unsigned long irqflags;
584 addr = mk_pmb_addr(i);
585 data = mk_pmb_data(i);
593 if (!(data_val &
PMB_V) || !(addr_val & PMB_V))
602 if (!pmb_ppn_in_range(ppn)) {
606 writel_uncached(addr_val & ~PMB_V, addr);
607 writel_uncached(data_val & ~PMB_V, data);
614 if (data_val &
PMB_C) {
616 data_val |= pmb_cache_flags();
618 writel_uncached(data_val, data);
624 pmbe = pmb_alloc(vpn, ppn, flags, i);
633 if (pmb_sizes[j].
flag == size)
634 pmbe->
size = pmb_sizes[
j].size;
644 if (pmb_can_merge(pmbp, pmbe))
657 unsigned long span, newsize;
659 int i = 1, depth = 0;
661 span = newsize = head->
size;
667 if (pmb_size_valid(span)) {
683 if (!depth || !pmb_size_valid(newsize))
686 head->
flags &= ~PMB_SZ_MASK;
687 head->
flags |= pmb_size_to_flags(newsize);
689 head->
size = newsize;
691 __pmb_unmap_entry(head->
link, depth);
692 __set_pmb_entry(head);
695 static void __init pmb_coalesce(
void)
702 for (i = 0; i <
ARRAY_SIZE(pmb_entry_list); i++) {
708 pmbe = &pmb_entry_list[
i];
729 #ifdef CONFIG_UNCACHED_MAPPING
730 static void __init pmb_resize(
void)
743 for (i = 0; i <
ARRAY_SIZE(pmb_entry_list); i++) {
750 pmbe = &pmb_entry_list[
i];
761 pmbe->
flags &= ~PMB_SZ_MASK;
762 pmbe->
flags |= pmb_size_to_flags(pmbe->
size);
766 __set_pmb_entry(pmbe);
775 static int __init early_pmb(
char *
p)
781 pmb_iomapping_enabled = 1;
795 #ifdef CONFIG_UNCACHED_MAPPING
815 static int pmb_seq_show(
struct seq_file *
file,
void *iter)
819 seq_printf(file,
"V: Valid, C: Cacheable, WT: Write-Through\n"
820 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
838 seq_printf(file,
"%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
839 i, ((addr & PMB_V) && (data & PMB_V)) ?
'V' :
' ',
840 (addr >> 24) & 0xff, (data >> 24) & 0xff,
841 sz_str, (data & PMB_C) ?
'C' :
' ',
842 (data &
PMB_WT) ?
"WT" :
"CB",
843 (data &
PMB_UB) ?
"UB" :
" B");
849 static int pmb_debugfs_open(
struct inode *
inode,
struct file *file)
856 .open = pmb_debugfs_open,
862 static int __init pmb_debugfs_init(
void)
876 static void pmb_syscore_resume(
void)
883 for (i = 0; i <
ARRAY_SIZE(pmb_entry_list); i++) {
885 pmbe = &pmb_entry_list[
i];
894 .
resume = pmb_syscore_resume,
897 static int __init pmb_sysdev_init(
void)