17 #include <linux/kernel.h>
18 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
25 #include <linux/string.h>
26 #include <linux/signal.h>
28 #include <asm/cacheflush.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/processor.h>
33 #include <asm/mmu_context.h>
38 static inline unsigned long int
52 put_stack_long(
struct task_struct *task,
int offset,
unsigned long data)
62 static int reg_offset[] = {
71 static int ptrace_read_user(
struct task_struct *tsk,
unsigned long off,
72 unsigned long __user *data)
79 if ((off & 3) || off >
sizeof(
struct user) - 3)
92 psw = get_stack_long(tsk,
PT_PSW);
93 tmp = ((psw >> 8) & 1);
97 unsigned long psw, bbpsw;
98 psw = get_stack_long(tsk,
PT_PSW);
99 bbpsw = get_stack_long(tsk,
PT_BBPSW);
100 tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
104 tmp = get_stack_long(tsk,
PT_BPC);
110 if (off < (
sizeof(
struct pt_regs) >> 2))
111 tmp = get_stack_long(tsk, off);
113 else if (off >= (
long)(&dummy->
fpu >> 2) &&
116 if (off == (
long)(&dummy->
fpu.fpscr >> 2))
121 tmp = ((
long *)(&tsk->
thread.fpu >> 2))
122 [off - (
long)&dummy->
fpu];
123 }
else if (off == (
long)(&dummy->
u_fpvalid >> 2))
133 static int ptrace_write_user(
struct task_struct *tsk,
unsigned long off,
141 if ((off & 3) || off >
sizeof(
struct user) - 3)
156 psw = get_stack_long(tsk,
PT_PSW);
157 psw = (psw & ~0x100) | ((data & 1) << 8);
158 ret = put_stack_long(tsk,
PT_PSW, psw);
166 if (off < (
sizeof(
struct pt_regs) >> 2))
167 ret = put_stack_long(tsk, off, data);
169 else if (off >= (
long)(&dummy->
fpu >> 2) &&
172 ((
long *)&tsk->
thread.fpu)
173 [off - (
long)&dummy->
fpu] = data;
175 }
else if (off == (
long)(&dummy->
u_fpvalid >> 2)) {
189 static int ptrace_getregs(
struct task_struct *tsk,
void __user *uregs)
199 static int ptrace_setregs(
struct task_struct *tsk,
void __user *uregs)
218 return (
int)((get_stack_long(child,
PT_PSW) >> 8) & 1);
222 check_condition_src(
unsigned long op,
unsigned long regno1,
227 reg2 = get_stack_long(child, reg_offset[regno2]);
231 reg1 = get_stack_long(child, reg_offset[regno1]);
234 reg1 = get_stack_long(child, reg_offset[regno1]);
241 return (
int)reg2 < 0;
243 return (
int)reg2 >= 0;
245 return (
int)reg2 <= 0;
247 return (
int)reg2 > 0;
255 compute_next_pc_for_16bit_insn(
unsigned long insn,
unsigned long pc,
256 unsigned long *next_pc,
259 unsigned long op, op2, op3;
264 if (insn & 0x00008000)
271 op = (insn >> 12) & 0xf;
272 op2 = (insn >> 8) & 0xf;
273 op3 = (insn >> 4) & 0xf;
279 if (!check_condition_bit(child)) {
280 disp = (
long)(insn << 24) >> 22;
281 *next_pc = (pc & ~0x3) + disp;
287 if (check_condition_bit(child)) {
288 disp = (
long)(insn << 24) >> 22;
289 *next_pc = (pc & ~0x3) + disp;
295 disp = (
long)(insn << 24) >> 22;
296 *next_pc = (pc & ~0x3) + disp;
300 }
else if (op == 0x1) {
309 unsigned long trapno;
316 *next_pc = evb + (trapno << 2);
319 }
else if (op3 == 0xd) {
320 *next_pc = get_stack_long(child,
PT_BPC);
325 if (op3 == 0
xc && check_condition_bit(child)) {
327 *next_pc = get_stack_long(child,
333 if (op3 == 0
xc && !check_condition_bit(child)) {
335 *next_pc = get_stack_long(child,
344 *next_pc = get_stack_long(child,
358 compute_next_pc_for_32bit_insn(
unsigned long insn,
unsigned long pc,
359 unsigned long *next_pc,
365 unsigned long regno1, regno2;
367 op = (insn >> 28) & 0xf;
369 op2 = (insn >> 24) & 0xf;
373 if (!check_condition_bit(child)) {
374 disp = (
long)(insn << 8) >> 6;
375 *next_pc = (pc & ~0x3) + disp;
381 if (check_condition_bit(child)) {
382 disp = (
long)(insn << 8) >> 6;
383 *next_pc = (pc & ~0x3) + disp;
389 disp = (
long)(insn << 8) >> 6;
390 *next_pc = (pc & ~0x3) + disp;
393 }
else if (op == 0
xb) {
394 op2 = (insn >> 20) & 0xf;
404 regno1 = ((insn >> 24) & 0xf);
405 regno2 = ((insn >> 16) & 0xf);
406 if (check_condition_src(op2, regno1, regno2, child)) {
407 disp = (
long)(insn << 16) >> 14;
408 *next_pc = (pc & ~0x3) + disp;
418 compute_next_pc(
unsigned long insn,
unsigned long pc,
421 if (insn & 0x80000000)
422 compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
424 compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
428 register_debug_trap(
struct task_struct *child,
unsigned long next_pc,
429 unsigned long next_insn,
unsigned long *
code)
432 unsigned long addr = next_pc & ~3;
435 printk(
"kernel BUG at %s %d: p->nr_trap = %d\n",
436 __FILE__, __LINE__, p->
nr_trap);
443 *code = (next_insn & 0xffff0000) | 0x10f1;
446 if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
450 *code = (next_insn & 0xffff) | 0x10f10000;
458 unregister_debug_trap(
struct task_struct *child,
unsigned long addr,
465 for (i = 0; i < p->
nr_trap; i++) {
466 if (p->
addr[i] == addr)
490 unregister_all_debug_traps(
struct task_struct *child)
495 for (i = 0; i < p->
nr_trap; i++)
503 #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
513 "stb r1, @r0 ; cache off \n\t"
517 "stb r1, @r0 ; cache invalidate \n\t"
520 "ldb r1, @r0 ; invalidate check \n\t"
525 "stb r1, @r0 ; cache on \n\t"
526 : : :
"r0",
"r1",
"memory"
535 embed_debug_trap(
struct task_struct *child,
unsigned long next_pc)
537 unsigned long next_insn,
code;
538 unsigned long addr = next_pc & ~3;
541 !=
sizeof(next_insn)) {
546 if (register_debug_trap(child, next_pc, next_insn, &code)) {
562 addr = (regs->
bpc - 2) & ~3;
564 if (unregister_debug_trap(
current, addr, &code)) {
584 unsigned long next_pc;
590 pc = get_stack_long(child,
PT_BPC);
596 compute_next_pc(insn, pc, &next_pc, child);
597 if (next_pc & 0x80000000)
600 if (embed_debug_trap(child, next_pc))
608 unregister_all_debug_traps(child);
624 unsigned long addr,
unsigned long data)
627 unsigned long __user *datap = (
unsigned long __user *) data;
642 ret = ptrace_read_user(child, addr, datap);
659 ret = ptrace_write_user(child, addr, data);
663 ret = ptrace_getregs(child, datap);
667 ret = ptrace_setregs(child, datap);