15 #include <linux/compiler.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
22 #include <linux/sched.h>
27 #include <asm/unaligned.h>
41 #define CODING_BITS(i) (i & 0x0e000000)
43 #define LDST_I_BIT(i) (i & (1 << 26))
44 #define LDST_P_BIT(i) (i & (1 << 24))
45 #define LDST_U_BIT(i) (i & (1 << 23))
46 #define LDST_W_BIT(i) (i & (1 << 21))
47 #define LDST_L_BIT(i) (i & (1 << 20))
49 #define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
51 #define LDSTHD_I_BIT(i) (i & (1 << 22))
52 #define LDM_S_BIT(i) (i & (1 << 22))
54 #define RN_BITS(i) ((i >> 16) & 15)
55 #define RD_BITS(i) ((i >> 12) & 15)
56 #define RM_BITS(i) (i & 15)
58 #define REGMASK_BITS(i) (i & 0xffff)
59 #define OFFSET_BITS(i) (i & 0x0fff)
61 #define IS_SHIFT(i) (i & 0x0ff0)
62 #define SHIFT_BITS(i) ((i >> 7) & 0x1f)
63 #define SHIFT_TYPE(i) (i & 0x60)
64 #define SHIFT_LSL 0x00
65 #define SHIFT_LSR 0x20
66 #define SHIFT_ASR 0x40
67 #define SHIFT_RORRRX 0x60
69 #define BAD_INSTR 0xdeadc0de
72 #define IS_T32(hi16) \
73 (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
75 static unsigned long ai_user;
76 static unsigned long ai_sys;
77 static unsigned long ai_skipped;
78 static unsigned long ai_half;
79 static unsigned long ai_word;
80 static unsigned long ai_dword;
81 static unsigned long ai_multi;
82 static int ai_usermode;
86 #define UM_WARN (1 << 0)
87 #define UM_FIXUP (1 << 1)
88 #define UM_SIGNAL (1 << 2)
91 static bool cpu_is_v6_unaligned(
void)
96 static int safe_usermode(
int new_usermode,
bool warn)
111 printk(
KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
117 #ifdef CONFIG_PROC_FS
118 static const char *usermode_action[] = {
127 static int alignment_proc_show(
struct seq_file *
m,
void *
v)
137 seq_printf(m,
"User faults:\t%i (%s)\n", ai_usermode,
138 usermode_action[ai_usermode]);
156 if (mode >=
'0' && mode <=
'5')
157 ai_usermode = safe_usermode(mode -
'0',
true);
163 .
open = alignment_proc_open,
167 .write = alignment_proc_write,
183 #define FIRST_BYTE_16 "mov %1, %1, ror #8\n"
184 #define FIRST_BYTE_32 "mov %1, %1, ror #24\n"
185 #define NEXT_BYTE "ror #24"
188 #define FIRST_BYTE_16
189 #define FIRST_BYTE_32
190 #define NEXT_BYTE "lsr #8"
193 #define __get8_unaligned_check(ins,val,addr,err) \
195 ARM( "1: "ins" %1, [%2], #1\n" ) \
196 THUMB( "1: "ins" %1, [%2]\n" ) \
197 THUMB( " add %2, %2, #1\n" ) \
199 " .pushsection .fixup,\"ax\"\n" \
204 " .pushsection __ex_table,\"a\"\n" \
208 : "=r" (err), "=&r" (val), "=r" (addr) \
209 : "0" (err), "2" (addr))
211 #define __get16_unaligned_check(ins,val,addr) \
213 unsigned int err = 0, v, a = addr; \
214 __get8_unaligned_check(ins,v,a,err); \
215 val = v << ((BE) ? 8 : 0); \
216 __get8_unaligned_check(ins,v,a,err); \
217 val |= v << ((BE) ? 0 : 8); \
222 #define get16_unaligned_check(val,addr) \
223 __get16_unaligned_check("ldrb",val,addr)
225 #define get16t_unaligned_check(val,addr) \
226 __get16_unaligned_check("ldrbt",val,addr)
228 #define __get32_unaligned_check(ins,val,addr) \
230 unsigned int err = 0, v, a = addr; \
231 __get8_unaligned_check(ins,v,a,err); \
232 val = v << ((BE) ? 24 : 0); \
233 __get8_unaligned_check(ins,v,a,err); \
234 val |= v << ((BE) ? 16 : 8); \
235 __get8_unaligned_check(ins,v,a,err); \
236 val |= v << ((BE) ? 8 : 16); \
237 __get8_unaligned_check(ins,v,a,err); \
238 val |= v << ((BE) ? 0 : 24); \
243 #define get32_unaligned_check(val,addr) \
244 __get32_unaligned_check("ldrb",val,addr)
246 #define get32t_unaligned_check(val,addr) \
247 __get32_unaligned_check("ldrbt",val,addr)
249 #define __put16_unaligned_check(ins,val,addr) \
251 unsigned int err = 0, v = val, a = addr; \
252 __asm__( FIRST_BYTE_16 \
253 ARM( "1: "ins" %1, [%2], #1\n" ) \
254 THUMB( "1: "ins" %1, [%2]\n" ) \
255 THUMB( " add %2, %2, #1\n" ) \
256 " mov %1, %1, "NEXT_BYTE"\n" \
257 "2: "ins" %1, [%2]\n" \
259 " .pushsection .fixup,\"ax\"\n" \
264 " .pushsection __ex_table,\"a\"\n" \
269 : "=r" (err), "=&r" (v), "=&r" (a) \
270 : "0" (err), "1" (v), "2" (a)); \
275 #define put16_unaligned_check(val,addr) \
276 __put16_unaligned_check("strb",val,addr)
278 #define put16t_unaligned_check(val,addr) \
279 __put16_unaligned_check("strbt",val,addr)
281 #define __put32_unaligned_check(ins,val,addr) \
283 unsigned int err = 0, v = val, a = addr; \
284 __asm__( FIRST_BYTE_32 \
285 ARM( "1: "ins" %1, [%2], #1\n" ) \
286 THUMB( "1: "ins" %1, [%2]\n" ) \
287 THUMB( " add %2, %2, #1\n" ) \
288 " mov %1, %1, "NEXT_BYTE"\n" \
289 ARM( "2: "ins" %1, [%2], #1\n" ) \
290 THUMB( "2: "ins" %1, [%2]\n" ) \
291 THUMB( " add %2, %2, #1\n" ) \
292 " mov %1, %1, "NEXT_BYTE"\n" \
293 ARM( "3: "ins" %1, [%2], #1\n" ) \
294 THUMB( "3: "ins" %1, [%2]\n" ) \
295 THUMB( " add %2, %2, #1\n" ) \
296 " mov %1, %1, "NEXT_BYTE"\n" \
297 "4: "ins" %1, [%2]\n" \
299 " .pushsection .fixup,\"ax\"\n" \
304 " .pushsection __ex_table,\"a\"\n" \
311 : "=r" (err), "=&r" (v), "=&r" (a) \
312 : "0" (err), "1" (v), "2" (a)); \
317 #define put32_unaligned_check(val,addr) \
318 __put32_unaligned_check("strb", val, addr)
320 #define put32t_unaligned_check(val,addr) \
321 __put32_unaligned_check("strbt", val, addr)
327 offset.
un = -offset.
un;
352 val = (
signed long)((
signed short)
val);
367 val = (
signed long)((
signed short)
val);
380 do_alignment_ldrdstrd(
unsigned long addr,
unsigned long instr,
383 unsigned int rd =
RD_BITS(instr);
387 if ((instr & 0xfe000000) == 0xe8000000) {
389 rd2 = (instr >> 8) & 0xf;
391 }
else if (((rd & 1) == 1) || (rd == 14))
394 load = ((instr & 0xf0) == 0xd0);
436 do_alignment_ldrstr(
unsigned long addr,
unsigned long instr,
struct pt_regs *regs)
438 unsigned int rd =
RD_BITS(instr);
480 do_alignment_ldmstm(
unsigned long addr,
unsigned long instr,
struct pt_regs *regs)
482 unsigned int rd, rn, correction, nr_regs, regbits;
483 unsigned long eaddr, newaddr;
489 regs->ARM_pc += correction;
497 newaddr = eaddr = regs->
uregs[rn];
515 #if !(defined CONFIG_CPU_ARM922T) && !(defined CONFIG_CPU_ARM920T)
522 "addr = %08lx, eaddr = %08lx\n",
530 regbits >>= 1, rd += 1)
542 regbits >>= 1, rd += 1)
555 regs->
uregs[rn] = newaddr;
557 regs->ARM_pc -= correction;
561 regs->ARM_pc -= correction;
565 printk(
KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
586 thumb2arm(
u16 tinstr)
588 u32 L = (tinstr & (1<<11)) >> 11;
590 switch ((tinstr & 0xf800) >> 11) {
597 ((tinstr & (1<<12)) << (22-12)) |
599 ((tinstr & (7<<0)) << (12-0)) |
600 ((tinstr & (7<<3)) << (16-3)) |
601 ((tinstr & (31<<6)) >>
602 (6 - ((tinstr & (1<<12)) ? 0 : 2)));
607 ((tinstr & (7<<0)) << (12-0)) |
608 ((tinstr & (7<<3)) << (16-3)) |
609 ((tinstr & (7<<6)) >> (6-1)) |
610 ((tinstr & (3<<9)) >> (9-8));
616 static const u32 subset[8] = {
626 return subset[(tinstr & (7<<9)) >> 9] |
627 ((tinstr & (7<<0)) << (12-0)) |
628 ((tinstr & (7<<3)) << (16-3)) |
629 ((tinstr & (7<<6)) >> (6-0));
640 ((tinstr & (7<<8)) << (12-8)) |
641 ((tinstr & 255) << (2-0));
648 ((tinstr & (7<<8)) << (12-8)) |
649 ((tinstr & 255) << 2);
655 u32 Rn = (tinstr & (7<<8)) >> 8;
656 u32 W = ((L<<
Rn) & (tinstr&255)) ? 0 : 1<<21;
658 return 0xe8800000 | W | (L<<20) | (Rn<<16) |
665 if ((tinstr & (3 << 9)) == 0x0400) {
666 static const u32 subset[4] = {
672 return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
696 do_alignment_t32_to_handler(
unsigned long *pinstr,
struct pt_regs *regs,
699 unsigned long instr = *pinstr;
700 u16 tinst1 = (instr >> 16) & 0xffff;
701 u16 tinst2 = instr & 0xffff;
703 switch (tinst1 & 0xffe0) {
710 return do_alignment_ldmstm;
713 if (
RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
715 const u32 subset[2] = {
719 *pinstr = subset[L] | (1<<
RD_BITS(instr));
720 return do_alignment_ldmstm;
730 poffset->
un = (tinst2 & 0xff) << 2;
733 return do_alignment_ldrdstrd;
746 do_alignment(
unsigned long addr,
unsigned int fsr,
struct pt_regs *regs)
749 unsigned long instr = 0, instrptr;
773 instr = (tinstr << 16) | tinst2;
777 instr = thumb2arm(tinstr);
796 regs->ARM_pc += isize;
801 offset.un = (instr & 0xf00) >> 4 | (instr & 15);
805 if ((instr & 0x000000f0) == 0x000000b0 ||
806 (instr & 0x001000f0) == 0x001000f0)
807 handler = do_alignment_ldrhstrh;
808 else if ((instr & 0x001000f0) == 0x000000d0 ||
809 (instr & 0x001000f0) == 0x000000f0)
810 handler = do_alignment_ldrdstrd;
811 else if ((instr & 0x01f00ff0) == 0x01000090)
848 offset.un << (32 - shiftval);
858 handler = do_alignment_t32_to_handler(&instr, regs, &
offset);
871 type =
handler(addr, instr, regs);
874 regs->ARM_pc -= isize;
879 do_alignment_finish_ldst(addr, instr, regs,
offset);
893 printk(
KERN_ERR "Alignment trap: not handling swp instruction\n");
900 "%0*lx at [<%08lx>]\n",
902 isize == 2 ? tinstr : instr, instrptr);
910 printk(
"Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
911 "Address=0x%08lx FSR 0x%03x\n",
current->comm,
912 task_pid_nr(
current), instrptr,
914 isize == 2 ? tinstr : instr,
926 si.si_addr = (
void __user *)addr;
944 set_cr(cr_no_alignment);
956 static int __init alignment_init(
void)
958 #ifdef CONFIG_PROC_FS
962 &alignment_proc_fops);
967 if (cpu_is_v6_unaligned()) {
970 set_cr(cr_alignment);
971 ai_usermode = safe_usermode(ai_usermode,
false);
975 "alignment exception");
986 "alignment exception");