16 #include <linux/compiler.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
21 #include <linux/sched.h>
24 #include <asm/tlbflush.h>
25 #include <asm/unaligned.h>
29 #define CODING_BITS(i) (i & 0xe0000120)
31 #define LDST_P_BIT(i) (i & (1 << 28))
32 #define LDST_U_BIT(i) (i & (1 << 27))
33 #define LDST_W_BIT(i) (i & (1 << 25))
34 #define LDST_L_BIT(i) (i & (1 << 24))
36 #define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 27)) == 0)
38 #define LDSTH_I_BIT(i) (i & (1 << 26))
39 #define LDM_S_BIT(i) (i & (1 << 26))
40 #define LDM_H_BIT(i) (i & (1 << 6))
42 #define RN_BITS(i) ((i >> 19) & 31)
43 #define RD_BITS(i) ((i >> 14) & 31)
44 #define RM_BITS(i) (i & 31)
46 #define REGMASK_BITS(i) (((i & 0x7fe00) >> 3) | (i & 0x3f))
47 #define OFFSET_BITS(i) (i & 0x03fff)
49 #define SHIFT_BITS(i) ((i >> 9) & 0x1f)
50 #define SHIFT_TYPE(i) (i & 0xc0)
51 #define SHIFT_LSL 0x00
52 #define SHIFT_LSR 0x40
53 #define SHIFT_ASR 0x80
54 #define SHIFT_RORRRX 0xc0
68 #define get8_unaligned_check(val, addr, err) \
70 "1: ldb.u %1, [%2], #1\n" \
72 " .pushsection .fixup,\"ax\"\n" \
77 " .pushsection __ex_table,\"a\"\n" \
81 : "=r" (err), "=&r" (val), "=r" (addr) \
82 : "0" (err), "2" (addr))
84 #define get8t_unaligned_check(val, addr, err) \
86 "1: ldb.u %1, [%2], #1\n" \
88 " .pushsection .fixup,\"ax\"\n" \
93 " .pushsection __ex_table,\"a\"\n" \
97 : "=r" (err), "=&r" (val), "=r" (addr) \
98 : "0" (err), "2" (addr))
100 #define get16_unaligned_check(val, addr) \
102 unsigned int err = 0, v, a = addr; \
103 get8_unaligned_check(val, a, err); \
104 get8_unaligned_check(v, a, err); \
110 #define put16_unaligned_check(val, addr) \
112 unsigned int err = 0, v = val, a = addr; \
114 "1: stb.u %1, [%2], #1\n" \
115 " mov %1, %1 >> #8\n" \
116 "2: stb.u %1, [%2]\n" \
118 " .pushsection .fixup,\"ax\"\n" \
123 " .pushsection __ex_table,\"a\"\n" \
128 : "=r" (err), "=&r" (v), "=&r" (a) \
129 : "0" (err), "1" (v), "2" (a)); \
134 #define __put32_unaligned_check(ins, val, addr) \
136 unsigned int err = 0, v = val, a = addr; \
138 "1: "ins" %1, [%2], #1\n" \
139 " mov %1, %1 >> #8\n" \
140 "2: "ins" %1, [%2], #1\n" \
141 " mov %1, %1 >> #8\n" \
142 "3: "ins" %1, [%2], #1\n" \
143 " mov %1, %1 >> #8\n" \
144 "4: "ins" %1, [%2]\n" \
146 " .pushsection .fixup,\"ax\"\n" \
151 " .pushsection __ex_table,\"a\"\n" \
158 : "=r" (err), "=&r" (v), "=&r" (a) \
159 : "0" (err), "1" (v), "2" (a)); \
164 #define get32_unaligned_check(val, addr) \
166 unsigned int err = 0, v, a = addr; \
167 get8_unaligned_check(val, a, err); \
168 get8_unaligned_check(v, a, err); \
170 get8_unaligned_check(v, a, err); \
172 get8_unaligned_check(v, a, err); \
178 #define put32_unaligned_check(val, addr) \
179 __put32_unaligned_check("stb.u", val, addr)
181 #define get32t_unaligned_check(val, addr) \
183 unsigned int err = 0, v, a = addr; \
184 get8t_unaligned_check(val, a, err); \
185 get8t_unaligned_check(v, a, err); \
187 get8t_unaligned_check(v, a, err); \
189 get8t_unaligned_check(v, a, err); \
195 #define put32t_unaligned_check(val, addr) \
196 __put32_unaligned_check("stb.u", val, addr)
199 do_alignment_finish_ldst(
unsigned long addr,
unsigned long instr,
203 offset.
un = -offset.
un;
213 do_alignment_ldrhstrh(
unsigned long addr,
unsigned long instr,
219 if ((instr & 0x4b003fe0) == 0x40000120)
228 val = (
signed long)((
signed short)
val);
248 do_alignment_ldrstr(
unsigned long addr,
unsigned long instr,
251 unsigned int rd =
RD_BITS(instr);
287 do_alignment_ldmstm(
unsigned long addr,
unsigned long instr,
290 unsigned int rd, rn, pc_correction, reg_correction, nr_regs, regbits;
291 unsigned long eaddr, newaddr;
302 newaddr = eaddr = regs->
uregs[rn];
319 "addr = %08lx, eaddr = %08lx\n",
325 reg_correction = 0x10;
327 reg_correction = 0x00;
330 regbits >>= 1, rd += 1)
334 uregs[rd + reg_correction], eaddr);
337 uregs[rd + reg_correction], eaddr);
342 regs->
uregs[rn] = newaddr;
346 regs->UCreg_pc -= pc_correction;
350 printk(
KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
358 unsigned long instr, instrptr;
359 int (*handler) (
unsigned long addr,
unsigned long instr,
365 instr = *(
unsigned long *)instrptr;
378 offset.un = (instr & 0x3e00) >> 4 | (instr & 31);
381 handler = do_alignment_ldrhstrh;
417 offset.un << (32 - shiftval);
433 type =
handler(addr, instr, regs);
439 do_alignment_finish_ldst(addr, instr, regs,
offset);
458 #ifdef CONFIG_UNICORE_FPU_F64
460 #define CODING_COLS 0xc0000000
461 #define COLS_OFFSET_BITS(i) (i & 0x1FF)
462 #define COLS_L_BITS(i) (i & (1<<24))
463 #define COLS_FN_BITS(i) ((i>>14) & 31)
464 if ((instr & 0xe0000000) == CODING_COLS) {
465 unsigned int fn = COLS_FN_BITS(instr);
466 unsigned long val = 0;
467 if (COLS_L_BITS(instr)) {
470 #define ASM_MTF(n) case n: \
471 __asm__ __volatile__("MTF %0, F" __stringify(n) \
474 ASM_MTF(0); ASM_MTF(1); ASM_MTF(2); ASM_MTF(3);
475 ASM_MTF(4); ASM_MTF(5); ASM_MTF(6); ASM_MTF(7);
476 ASM_MTF(8); ASM_MTF(9); ASM_MTF(10); ASM_MTF(11);
477 ASM_MTF(12); ASM_MTF(13); ASM_MTF(14); ASM_MTF(15);
478 ASM_MTF(16); ASM_MTF(17); ASM_MTF(18); ASM_MTF(19);
479 ASM_MTF(20); ASM_MTF(21); ASM_MTF(22); ASM_MTF(23);
480 ASM_MTF(24); ASM_MTF(25); ASM_MTF(26); ASM_MTF(27);
481 ASM_MTF(28); ASM_MTF(29); ASM_MTF(30); ASM_MTF(31);
486 #define ASM_MFF(n) case n: \
487 __asm__ __volatile__("MFF %0, F" __stringify(n) \
490 ASM_MFF(0); ASM_MFF(1); ASM_MFF(2); ASM_MFF(3);
491 ASM_MFF(4); ASM_MFF(5); ASM_MFF(6); ASM_MFF(7);
492 ASM_MFF(8); ASM_MFF(9); ASM_MFF(10); ASM_MFF(11);
493 ASM_MFF(12); ASM_MFF(13); ASM_MFF(14); ASM_MFF(15);
494 ASM_MFF(16); ASM_MFF(17); ASM_MFF(18); ASM_MFF(19);
495 ASM_MFF(20); ASM_MFF(21); ASM_MFF(22); ASM_MFF(23);
496 ASM_MFF(24); ASM_MFF(25); ASM_MFF(26); ASM_MFF(27);
497 ASM_MFF(28); ASM_MFF(29); ASM_MFF(30); ASM_MFF(31);
508 "%08lx at [<%08lx>]\n", instr, instrptr);
517 static int __init alignment_init(
void)
520 "alignment exception");