11 #include <linux/kernel.h>
12 #include <linux/sched.h>
14 #include <linux/module.h>
16 #include <asm/ptrace.h>
18 #include <asm/processor.h>
19 #include <asm/uaccess.h>
21 #include <linux/bitops.h>
22 #include <linux/perf_event.h>
25 #include <asm/cacheflush.h>
36 static inline enum direction decode_direction(
unsigned int insn)
38 unsigned long tmp = (insn >> 21) & 1;
43 switch ((insn>>19)&0xf) {
53 static inline int decode_access_size(
struct pt_regs *
regs,
unsigned int insn)
57 tmp = ((insn >> 19) & 0xf);
58 if (tmp == 11 || tmp == 14)
68 printk(
"Impossible unaligned trap. insn=%08x\n", insn);
81 static inline int decode_asi(
unsigned int insn,
struct pt_regs *regs)
83 if (insn & 0x800000) {
85 return (
unsigned char)(regs->tstate >> 24);
87 return (
unsigned char)(insn >> 5);
93 static inline int decode_signedness(
unsigned int insn)
95 return (insn & 0x400000);
98 static inline void maybe_flush_windows(
unsigned int rs1,
unsigned int rs2,
99 unsigned int rd,
int from_kernel)
101 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
102 if (from_kernel != 0)
103 __asm__ __volatile__(
"flushw");
109 static inline long sign_extend_imm13(
long imm)
111 return imm << 51 >> 51;
114 static unsigned long fetch_reg(
unsigned int reg,
struct pt_regs *regs)
119 return (!reg ? 0 : regs->
u_regs[reg]);
124 struct reg_window *
win;
126 value = win->locals[reg - 16];
127 }
else if (!test_thread_64bit_stack(fp)) {
134 get_user(value, &win->locals[reg - 16]);
139 static unsigned long *fetch_reg_addr(
unsigned int reg,
struct pt_regs *regs)
149 struct reg_window *
win;
151 return &win->locals[reg - 16];
152 }
else if (!test_thread_64bit_stack(fp)) {
155 return (
unsigned long *)&win32->
locals[reg - 16];
157 struct reg_window *
win;
159 return &win->locals[reg - 16];
164 unsigned int insn,
unsigned int rd)
166 unsigned int rs1 = (insn >> 14) & 0x1f;
167 unsigned int rs2 = insn & 0x1f;
168 int from_kernel = (regs->tstate &
TSTATE_PRIV) != 0;
171 maybe_flush_windows(rs1, 0, rd, from_kernel);
172 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
174 maybe_flush_windows(rs1, rs2, rd, from_kernel);
175 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
189 unsigned long src_val,
int asi);
191 static inline int do_int_store(
int reg_num,
int size,
unsigned long *
dst_addr,
192 struct pt_regs *regs,
int asi,
int orig_asi)
194 unsigned long zero = 0;
195 unsigned long *src_val_p = &
zero;
196 unsigned long src_val;
200 zero = (((
long)(reg_num ?
201 (
unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
202 (unsigned)fetch_reg(reg_num + 1, regs);
203 }
else if (reg_num) {
204 src_val_p = fetch_reg_addr(reg_num, regs);
206 src_val = *src_val_p;
210 src_val =
swab16(src_val);
213 src_val =
swab32(src_val);
216 src_val =
swab64(src_val);
227 static inline void advance(
struct pt_regs *regs)
229 regs->tpc = regs->tnpc;
231 if (test_thread_flag(TIF_32BIT)) {
232 regs->tpc &= 0xffffffff;
233 regs->tnpc &= 0xffffffff;
237 static inline int floating_point_load_or_store_p(
unsigned int insn)
239 return (insn >> 24) & 1;
242 static inline int ok_for_kernel(
unsigned int insn)
244 return !floating_point_load_or_store_p(insn);
247 static void kernel_mna_trap_fault(
int fixup_tstate_asi)
258 ((insn >> 25) & 0x1f));
261 "pointer dereference in mna handler");
264 "request in mna handler");
271 (
unsigned long)
current->active_mm->pgd));
275 regs->tpc = entry->
fixup;
276 regs->tnpc = regs->tpc + 4;
278 if (fixup_tstate_asi) {
284 static void log_unaligned(
struct pt_regs *regs)
289 printk(
"Kernel unaligned access at TPC[%lx] %pS\n",
290 regs->tpc, (
void *) regs->tpc);
297 int size = decode_access_size(regs, insn);
303 orig_asi = asi = decode_asi(insn, regs);
309 kernel_mna_trap_fault(0);
315 if (!ok_for_kernel(insn) || dir ==
both) {
316 printk(
"Unsupported unaligned load/store trap for kernel "
317 "at <%016lx>.\n", regs->tpc);
318 unaligned_panic(
"Kernel does fpu/atomic "
319 "unaligned load/store.", regs);
321 kernel_mna_trap_fault(0);
327 ((insn >> 25) & 0x1f));
342 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
344 (
unsigned long *) addr,
345 decode_signedness(insn), asi);
368 err = do_int_store(((insn>>25)&0x1f), size,
369 (
unsigned long *) addr, regs,
374 panic(
"Impossible kernel unaligned trap.");
378 kernel_mna_trap_fault(1);
386 int from_kernel = (regs->tstate &
TSTATE_PRIV) != 0;
387 int ret, rd = ((insn >> 25) & 0x1f);
392 maybe_flush_windows(0, 0, rd, from_kernel);
393 value = sign_extend_imm13(insn);
395 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
396 value = fetch_reg(insn & 0x1f, regs);
405 if (!test_thread_64bit_stack(fp)) {
410 struct reg_window __user *
win;
411 win = (
struct reg_window __user *)(fp +
STACK_BIAS);
412 put_user(ret, &win->locals[rd - 16]);
426 unsigned long type_ctx);
431 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
433 int asi = decode_asi(insn, regs);
438 save_and_clear_fpu();
445 if (insn & 0x200000) {
450 first = *(
u64 *)&f->
regs[freg];
464 u64 tmp = __swab64p(&first);
466 first = __swab64p(&
second);
503 switch (insn & 0x180000) {
504 case 0x000000: size = 1;
break;
505 case 0x100000: size = 4;
break;
506 default: size = 2;
break;
508 for (i = 0; i <
size; i++)
513 for (i = 1; i <
size; i++)
516 if (err && !(asi & 0x2 )) {
555 int rd = ((insn >> 25) & 0x1f);
556 int from_kernel = (regs->tstate &
TSTATE_PRIV) != 0;
561 maybe_flush_windows(0, 0, rd, from_kernel);
562 reg = fetch_reg_addr(rd, regs);
563 if (from_kernel || rd < 16) {
565 if ((insn & 0x780000) == 0x180000)
567 }
else if (!test_thread_64bit_stack(regs->
u_regs[
UREG_FP])) {
569 if ((insn & 0x780000) == 0x180000)
573 if ((insn & 0x780000) == 0x180000)
581 unsigned long pc = regs->tpc;
582 unsigned long tstate = regs->tstate;
592 if (test_thread_flag(TIF_32BIT))
595 int asi = decode_asi(insn, regs);
611 save_and_clear_fpu();
612 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
613 value = (((
u64)first) << 32) | second;
615 value = __swab64p(&value);
642 unsigned long pc = regs->tpc;
643 unsigned long tstate = regs->tstate;
653 if (test_thread_flag(TIF_32BIT))
656 int asi = decode_asi(insn, regs);
657 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
663 save_and_clear_fpu();
665 value = *(
u64 *)&f->
regs[freg];
671 value = __swab64p(&value);
break;