13 #include <linux/module.h>
14 #include <linux/tty.h>
17 #ifdef CONFIG_MTD_UCLINUX
20 #include <linux/cramfs_fs.h>
25 #include <asm/cacheflush.h>
29 #include <asm/div64.h>
52 #ifdef CONFIG_MTD_UCLINUX
64 #define BFIN_MEMMAP_MAX 128
65 #define BFIN_MEMMAP_RAM 1
66 #define BFIN_MEMMAP_RESERVED 2
67 static struct bfin_memmap {
88 static int early_init_clkin_hz(
char *
buf);
90 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
91 void __init generate_cplb_tables(
void)
104 #ifdef CONFIG_BFIN_ICACHE
108 #ifdef CONFIG_BFIN_DCACHE
120 #ifdef CONFIG_BFIN_ICACHE
123 # ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
128 " in instruction cache\n");
131 # ifdef CONFIG_BFIN_L2_ICACHEABLE
136 " in instruction cache\n");
142 #ifdef CONFIG_BFIN_DCACHE
145 #
if defined CONFIG_BFIN_EXTMEM_WRITEBACK
146 " cacheable (write-back)"
147 # elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
148 " cacheable (write-through)"
155 #
if defined CONFIG_BFIN_L2_WRITEBACK
156 " cacheable (write-back)"
157 # elif defined CONFIG_BFIN_L2_WRITETHROUGH
158 " cacheable (write-through)"
178 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
179 generate_cplb_tables();
223 #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
224 blackfin_iflush_l1_entry[0] = (
unsigned long)blackfin_icache_flush_range_l1;
235 unsigned long text_l1_len = (
unsigned long)_text_l1_len;
236 unsigned long data_l1_len = (
unsigned long)_data_l1_len;
237 unsigned long data_b_l1_len = (
unsigned long)_data_b_l1_len;
258 #ifdef CONFIG_ICACHE_FLUSH_L1
259 blackfin_iflush_l1_entry[1] = (
unsigned long)blackfin_icache_flush_range_l1 -
265 #ifdef CONFIG_ROMKERNEL
266 void __init bfin_relocate_xip_data(
void)
271 memcpy(_sinitdata, _init_data_lma, (
unsigned long)_init_data_len);
276 static void __init add_memory_region(
unsigned long long start,
281 i = bfin_memmap.nr_map;
288 bfin_memmap.map[
i].addr =
start;
289 bfin_memmap.map[
i].size =
size;
290 bfin_memmap.map[
i].type =
type;
291 bfin_memmap.nr_map++;
297 static int __init sanitize_memmap(
struct bfin_memmap_entry *
map,
int *pnr_map)
300 unsigned long current_type, last_type;
301 unsigned long long last_addr;
302 int chgidx, still_changing;
305 int old_nr, new_nr, chg_nr;
350 for (i = 0; i < old_nr; i++)
351 if (map[i].
addr + map[i].size < map[i].
addr)
355 for (i = 0; i < 2*old_nr; i++)
356 change_point[i] = &change_point_list[i];
361 for (i = 0; i < old_nr; i++) {
362 if (map[i].size != 0) {
363 change_point[chgidx]->addr = map[
i].addr;
364 change_point[chgidx++]->pentry = &map[
i];
365 change_point[chgidx]->addr = map[
i].addr + map[
i].size;
366 change_point[chgidx++]->pentry = &map[
i];
373 while (still_changing) {
375 for (i = 1; i < chg_nr; i++) {
378 if ((change_point[i]->addr < change_point[i-1]->addr) ||
379 ((change_point[i]->addr == change_point[i-1]->addr) &&
380 (change_point[i]->addr == change_point[i]->
pentry->addr) &&
381 (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
383 change_tmp = change_point[
i];
384 change_point[
i] = change_point[i-1];
385 change_point[i-1] = change_tmp;
397 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
399 if (change_point[chgidx]->addr == change_point[chgidx]->
pentry->addr) {
401 overlap_list[overlap_entries++] = change_point[chgidx]->
pentry;
404 for (i = 0; i < overlap_entries; i++) {
405 if (overlap_list[i] == change_point[chgidx]->
pentry)
406 overlap_list[
i] = overlap_list[overlap_entries-1];
413 for (i = 0; i < overlap_entries; i++)
414 if (overlap_list[i]->type > current_type)
415 current_type = overlap_list[
i]->type;
417 if (current_type != last_type) {
418 if (last_type != 0) {
419 new_map[new_entry].size =
420 change_point[chgidx]->addr - last_addr;
422 if (new_map[new_entry].size != 0)
426 if (current_type != 0) {
427 new_map[new_entry].addr = change_point[chgidx]->addr;
428 new_map[new_entry].type = current_type;
429 last_addr = change_point[chgidx]->addr;
431 last_type = current_type;
437 memcpy(map, new_map, new_nr*
sizeof(
struct bfin_memmap_entry));
443 static void __init print_memory_map(
char *who)
447 for (i = 0; i < bfin_memmap.nr_map; i++) {
449 bfin_memmap.map[i].addr,
450 bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
451 switch (bfin_memmap.map[i].type) {
465 static __init int parse_memmap(
char *
arg)
467 unsigned long long start_at,
mem_size;
476 }
else if (*arg ==
'$') {
494 static __init void parse_cmdline_early(
char *cmdline_p)
496 char c =
' ', *to = cmdline_p;
500 if (!
memcmp(to,
"mem=", 4)) {
506 }
else if (!
memcmp(to,
"max_mem=", 8)) {
520 }
else if (!
memcmp(to,
"clkin_hz=", 9)) {
522 early_init_clkin_hz(to);
523 #ifdef CONFIG_EARLY_PRINTK
524 }
else if (!
memcmp(to,
"earlyprintk=", 12)) {
528 }
else if (!
memcmp(to,
"memmap=", 7)) {
552 static __init void memory_setup(
void)
554 #ifdef CONFIG_MTD_UCLINUX
555 unsigned long mtd_phys = 0;
557 unsigned long max_mem;
564 panic(
"DMA region exceeds memory limit: %lu.",
569 #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
574 # if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
575 if (max_mem >= 56 * 1024 * 1024)
576 max_mem = 56 * 1024 * 1024;
578 if (max_mem >= 60 * 1024 * 1024)
579 max_mem = 60 * 1024 * 1024;
591 #if defined(CONFIG_MTD_UCLINUX)
598 # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
601 PAGE_ALIGN(*((
unsigned long *)(mtd_phys + 0x404)) << 10);
604 # if defined(CONFIG_CRAMFS)
609 # if defined(CONFIG_ROMFS_FS)
611 && ((
unsigned long *)mtd_phys)[1] ==
ROMSB_WORD1) {
617 pr_info(
"Limiting kernel memory to %liMB due to anomaly 05000263\n",
629 if (mtd_size == 0 ||
memory_end <= mtd_size) {
630 pr_emerg(
"Could not find valid ram mtd attached.\n");
637 pr_info(
"Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
638 _end, mtd_size, (
void *)memory_mtd_start);
648 pr_info(
"Limiting kernel memory to %liMB due to anomaly 05000263\n",
654 #if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
672 " fixedcode = 0x%p-0x%p\n"
673 " text = 0x%p-0x%p\n"
674 " rodata = 0x%p-0x%p\n"
676 " data = 0x%p-0x%p\n"
677 " stack = 0x%p-0x%p\n"
678 " init = 0x%p-0x%p\n"
679 " available = 0x%p-0x%p\n"
680 #ifdef CONFIG_MTD_UCLINUX
681 " rootfs = 0x%p-0x%p\n"
683 #
if DMA_UNCACHED_REGION > 0
684 " DMA Zone = 0x%p-0x%p\n"
688 __start_rodata, __end_rodata,
689 __bss_start, __bss_stop,
691 (
void *)&init_thread_union,
693 __init_begin, __init_end,
695 #ifdef CONFIG_MTD_UCLINUX
696 , (
void *)memory_mtd_start, (
void *)(memory_mtd_start + mtd_size)
698 #
if DMA_UNCACHED_REGION > 0
714 for (i = 0; i < bfin_memmap.nr_map; i++) {
719 start =
PFN_UP(bfin_memmap.map[i].addr);
720 end =
PFN_DOWN(bfin_memmap.map[i].addr +
721 bfin_memmap.map[i].size);
731 static __init void setup_bootmem_allocator(
void)
735 unsigned long start_pfn, end_pfn;
736 unsigned long curr_pfn, last_pfn,
size;
742 sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
743 print_memory_map(
"boot memmap");
753 if (min_low_pfn < memory_start >> PAGE_SHIFT)
767 for (i = 0; i < bfin_memmap.nr_map; i++) {
776 curr_pfn =
PFN_UP(bfin_memmap.map[i].addr);
777 if (curr_pfn >= end_pfn)
782 last_pfn =
PFN_DOWN(bfin_memmap.map[i].addr +
783 bfin_memmap.map[i].size);
785 if (last_pfn > end_pfn)
792 if (last_pfn <= curr_pfn)
795 size = last_pfn - curr_pfn;
805 #define EBSZ_TO_MEG(ebsz) \
808 switch (ebsz & 0xf) { \
809 case 0x1: meg = 16; break; \
810 case 0x3: meg = 32; break; \
811 case 0x5: meg = 64; break; \
812 case 0x7: meg = 128; break; \
813 case 0x9: meg = 256; break; \
814 case 0xb: meg = 512; break; \
818 static inline int __init get_mem_size(
void)
820 #if defined(EBIU_SDBCTL)
821 # if defined(BF561_FAMILY)
832 #elif defined(EBIU_DDRCTL1)
835 switch (ddrctl & 0xc0000) {
849 switch (ddrctl & 0x30000) {
857 if ((ddrctl & 0xc000) == 0x4000)
860 #elif defined(CONFIG_BF60x)
863 switch (ddrctl & 0xf00) {
912 unsigned long sclk, cclk;
920 if (
unlikely(CPUID != bfin_cpuid()))
921 printk(
KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
922 CPU, bfin_cpuid(), bfin_revid());
924 #ifdef CONFIG_DUMMY_CONSOLE
928 #if defined(CONFIG_CMDLINE_BOOL)
938 memset(&bfin_memmap, 0,
sizeof(bfin_memmap));
951 _ramend = get_mem_size() * 1024 * 1024;
963 #ifdef CONFIG_EBIU_MBSCTLVAL
969 #ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
974 ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
981 panic(
"ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
992 (mmr & 0x1) ?
"active" :
"off",
993 (mmr & 0x2) ?
"en" :
"dis");
999 #if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
1000 defined(CONFIG_BF538) || defined(CONFIG_BF539)
1007 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
1010 #ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
1020 #ifdef CONFIG_DEBUG_DOUBLEFAULT
1037 printk(
KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
1038 if (bfin_compiled_revid() == 0xffff)
1039 printk(
KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n",
CPU, bfin_revid());
1040 else if (bfin_compiled_revid() == -1)
1045 if (
likely(CPUID == bfin_cpuid())) {
1046 if (bfin_revid() != bfin_compiled_revid()) {
1047 if (bfin_compiled_revid() == -1)
1048 printk(
KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
1050 else if (bfin_compiled_revid() != 0xffff) {
1051 printk(
KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
1052 bfin_compiled_revid(), bfin_revid());
1053 if (bfin_compiled_revid() > bfin_revid())
1054 panic(
"Error: you are missing anomaly workarounds for this rev");
1057 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
1058 printk(
KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
1062 printk(
KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
1065 printk(
KERN_INFO "Processor Speed: %lu MHz core clock, %lu MHz SCLk, %lu MHz SCLK0, %lu MHz SCLK1 and %lu MHz DCLK\n",
1066 cclk / 1000000, bfin_get_clk(
"SYSCLK") / 1000000, get_sclk0() / 1000000, get_sclk1() / 1000000, get_dclk() / 1000000);
1068 printk(
KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
1069 cclk / 1000000, sclk / 1000000);
1072 setup_bootmem_allocator();
1080 BUG_ON((
char *)&sigreturn_stub - (
char *)&fixed_code_start
1082 BUG_ON((
char *)&atomic_xchg32 - (
char *)&fixed_code_start
1084 BUG_ON((
char *)&atomic_cas32 - (
char *)&fixed_code_start
1086 BUG_ON((
char *)&atomic_add32 - (
char *)&fixed_code_start
1088 BUG_ON((
char *)&atomic_sub32 - (
char *)&fixed_code_start
1090 BUG_ON((
char *)&atomic_ior32 - (
char *)&fixed_code_start
1092 BUG_ON((
char *)&atomic_and32 - (
char *)&fixed_code_start
1094 BUG_ON((
char *)&atomic_xor32 - (
char *)&fixed_code_start
1096 BUG_ON((
char *)&safe_user_instruction - (
char *)&fixed_code_start
1106 static int __init topology_init(
void)
1120 static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
1121 #ifndef CONFIG_BF60x
1122 static u_long get_clkin_hz(
void)
1124 return cached_clkin_hz;
1127 static int __init early_init_clkin_hz(
char *
buf)
1130 #ifdef BFIN_KERNEL_CLOCK
1131 if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1132 panic(
"cannot change clkin_hz when reprogramming clocks");
1138 #ifndef CONFIG_BF60x
1140 static u_long get_vco(
void)
1142 static u_long cached_vco;
1152 msel = (pll_ctl >> 9) & 0x3F;
1156 cached_vco = get_clkin_hz();
1157 cached_vco >>= (1 & pll_ctl);
1167 return bfin_get_clk(
"CCLK");
1169 static u_long cached_cclk_pll_div, cached_cclk;
1173 return get_clkin_hz();
1176 if (ssel == cached_cclk_pll_div)
1179 cached_cclk_pll_div = ssel;
1181 csel = ((ssel >> 4) & 0x03);
1183 if (ssel && ssel < (1 << csel))
1184 cached_cclk = get_vco() / ssel;
1186 cached_cclk = get_vco() >> csel;
1196 return bfin_get_clk(
"SCLK0");
1203 return bfin_get_clk(
"SCLK1");
1210 return bfin_get_clk(
"DCLK");
1221 static u_long cached_sclk;
1231 return get_clkin_hz();
1239 cached_sclk = get_vco() / ssel;
1264 static int show_cpuinfo(
struct seq_file *
m,
void *
v)
1268 int cpu_num = *(
unsigned int *)v;
1276 revid = bfin_revid();
1283 vendor =
"Analog Devices";
1290 seq_printf(m,
"processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1292 if (CPUID == bfin_cpuid())
1293 seq_printf(m,
"cpu family\t: 0x%04x\n", CPUID);
1295 seq_printf(m,
"cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1296 CPUID, bfin_cpuid());
1298 seq_printf(m,
"model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1300 cpu, cclk/1000000, sclk/1000000,
1308 if (bfin_revid() != bfin_compiled_revid()) {
1309 if (bfin_compiled_revid() == -1)
1311 else if (bfin_compiled_revid() == 0xffff)
1314 seq_printf(m,
"(Compiled for Rev %d)", bfin_compiled_revid());
1317 seq_printf(m,
"\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1318 cclk/1000000, cclk%1000000,
1319 sclk/1000000, sclk%1000000);
1321 "Calibration\t: %lu loops\n",
1329 cache =
"dbank-A/B\t: cache/sram";
1334 cache =
"dbank-A/B\t: cache/cache";
1339 cache =
"dbank-A/B\t: sram/sram";
1357 seq_printf(m,
"cache size\t: %d KB(L1 icache) "
1358 "%d KB(L1 dcache) %d KB(L2 cache)\n",
1359 icache_size, dcache_size, 0);
1362 #
if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1367 " in instruction cache\n");
1369 #
if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1370 "cacheable (write-back)"
1371 #elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1372 "cacheable (write-through)"
1376 " in data cache\n");
1379 seq_printf(m,
"icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1385 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1388 #ifdef __ARCH_SYNC_CORE_DCACHE
1389 seq_printf(m,
"dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1391 #ifdef __ARCH_SYNC_CORE_ICACHE
1392 seq_printf(m,
"icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1403 #
if defined(CONFIG_BFIN_L2_ICACHEABLE)
1408 " in instruction cache\n");
1410 #
if defined(CONFIG_BFIN_L2_WRITEBACK)
1411 "cacheable (write-back)"
1412 #elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1413 "cacheable (write-through)"
1417 " in data cache\n");
1420 seq_printf(m,
"board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1422 seq_printf(m,
"kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1432 *pos = cpumask_first(cpu_online_mask);
1439 static void *c_next(
struct seq_file *m,
void *v, loff_t *pos)
1441 *pos = cpumask_next(*pos, cpu_online_mask);
1446 static void c_stop(
struct seq_file *m,
void *v)
1454 .show = show_cpuinfo,