21 #include <linux/slab.h>
24 #include <linux/mman.h>
25 #include <linux/types.h>
27 #include <asm/cacheflush.h>
28 #include <asm/unaligned.h>
32 #define signExtend17(val) sign_extend((val), 17)
33 #define TILE_X1_MASK (0xffffffffULL << 31)
37 static int __init setup_unaligned_printk(
char *
str)
43 pr_info(
"Printk for each unaligned data accesses is %s\n",
47 __setup(
"unaligned_printk=", setup_unaligned_printk);
68 result |= create_BrOff_X1(offset);
82 create_Dest_X1(dest) |
100 n |= (create_SrcA_X1(src) |
101 create_Dest_X1(dest) |
102 create_Imm8_X1(imm) |
115 int size,
int sign_ext)
118 int val_reg, addr_reg,
err,
val;
122 addr_reg = get_SrcA_Y2(bundle);
123 val_reg = get_SrcBDest_Y2(bundle);
125 addr_reg = get_SrcA_X1(bundle);
126 val_reg = get_Dest_X1(bundle);
128 addr_reg = get_SrcA_X1(bundle);
129 val_reg = get_SrcB_X1(bundle);
153 if (((
unsigned long)addr % size) == 0)
170 (
unsigned long)addr,
SIGBUS);
177 unsigned short val_16;
181 val = sign_ext ? ((
short)val_16) : val_16;
195 unsigned short val_16;
223 pr_info(
"Process %d/%s: PC %#lx: Fixup of"
224 " unaligned %s at %#lx.\n",
229 (
unsigned long)addr);
233 P(
"Unaligned fixups in the kernel will slow your application considerably.\n");
234 P(
"To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
235 P(
"which requests the kernel show all unaligned fixups, or write a \"0\"\n");
236 P(
"to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
237 P(
"access will become a SIGBUS you can debug. No further warnings will be\n");
238 P(
"shown so as to avoid additional slowdown, but you can track the number\n");
239 P(
"of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
240 P(
"Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
247 if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
249 bundle &= ~(create_SrcBDest_Y2(-1) |
250 create_Opcode_Y2(-1));
251 bundle |= (create_SrcBDest_Y2(
TREG_ZERO) |
255 bundle = addi_X1(bundle, addr_reg, addr_reg,
256 get_Imm8_X1(bundle));
259 bundle = addi_X1(bundle, addr_reg, addr_reg,
260 get_Dest_Imm8_X1(bundle));
263 bundle &= ~(create_Opcode_X1(-1) |
264 create_UnShOpcodeExtension_X1(-1) |
265 create_UnOpcodeExtension_X1(-1));
267 create_UnShOpcodeExtension_X1(
269 create_UnOpcodeExtension_X1(
320 int size = 0, sign_ext = 0;
323 " .pushsection .rodata.single_step\n"
325 " .globl __single_step_ill_insn\n"
326 "__single_step_ill_insn:\n"
328 " .globl __single_step_addli_insn\n"
329 "__single_step_addli_insn:\n"
330 " { nop; addli r0, zero, 0 }\n"
331 " .globl __single_step_auli_insn\n"
332 "__single_step_auli_insn:\n"
333 " { nop; auli r0, r0, 0 }\n"
334 " .globl __single_step_j_insn\n"
335 "__single_step_j_insn:\n"
352 pr_err(
"Out of kernel memory trying to single-step\n");
362 if (IS_ERR((
void __force *)buffer)) {
364 pr_err(
"Out of kernel pages trying to single-step\n");
374 BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
376 BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
379 BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
380 BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
395 pr_err(
"Couldn't read instruction at %p trying to step\n", pc);
405 if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
407 int opcode = get_Opcode_X1(bundle);
439 bundle = set_BrOff_X1(bundle, 2);
448 (
unsigned long) (pc + get_JOffLong_X1(bundle));
454 (
unsigned long) (pc + get_JOffLong_X1(bundle));
455 bundle = nop_X1(bundle);
459 switch (get_RRROpcodeExtension_X1(bundle)) {
465 regs->
regs[get_SrcA_X1(bundle)];
471 regs->
regs[get_SrcA_X1(bundle)];
472 bundle = nop_X1(bundle);
477 target_reg = get_Dest_X1(bundle);
495 if (get_UnShOpcodeExtension_X1(bundle) ==
497 switch (get_UnOpcodeExtension_X1(bundle)) {
517 unsigned long ex0_0 = __insn_mfspr(
519 unsigned long ex0_1 = __insn_mfspr(
529 bundle = nop_X1(bundle);
539 switch (get_ImmOpcodeExtension_X1(bundle)) {
582 u32 mask = (
u32) ~((1ULL << get_Dest_X0(bundle)) |
583 (1ULL << get_SrcA_X0(bundle)) |
584 (1ULL << get_SrcB_X0(bundle)) |
585 (1ULL << target_reg));
586 temp_reg = __builtin_ctz(mask);
591 bundle = move_X1(bundle, target_reg, temp_reg);
594 int opcode = get_Opcode_Y2(bundle);
633 bundle = rewrite_load_store_unaligned(state, bundle, regs,
634 mem_op, size, sign_ext);
648 if (is_single_step) {
649 err |=
__put_user(__single_step_ill_insn, buffer++);
650 err |=
__put_user(__single_step_ill_insn, buffer++);
657 bundle = __single_step_addli_insn;
661 bundle = __single_step_auli_insn;
665 bundle |= create_Imm16_X1(ha16);
674 bundle = __single_step_j_insn;
675 bundle |= create_JOffLong_X1(delta);
680 pr_err(
"Fault when writing to single-step buffer\n");
689 (
unsigned long)buffer);
702 #include <linux/ptrace.h>
703 #include <arch/spr_def.h>
733 void gx_singlestep_handle(
struct pt_regs *regs,
int fault_num)
738 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
740 if (is_single_step == 0) {
741 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
743 }
else if ((*ss_pc != regs->
pc) ||
749 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
762 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
767 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
768 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 <<
USER_PL);