Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
single_step.c
Go to the documentation of this file.
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation, version 2.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  * NON INFRINGEMENT. See the GNU General Public License for
12  * more details.
13  *
14  * A code-rewriter that enables instruction single-stepping.
15  * Derived from iLib's single-stepping code.
16  */
17 
18 #ifndef __tilegx__ /* Hardware support for single step unavailable. */
19 
20 /* These functions are only used on the TILE platform */
21 #include <linux/slab.h>
22 #include <linux/thread_info.h>
23 #include <linux/uaccess.h>
24 #include <linux/mman.h>
25 #include <linux/types.h>
26 #include <linux/err.h>
27 #include <asm/cacheflush.h>
28 #include <asm/unaligned.h>
29 #include <arch/abi.h>
30 #include <arch/opcode.h>
31 
32 #define signExtend17(val) sign_extend((val), 17)
33 #define TILE_X1_MASK (0xffffffffULL << 31)
34 
36 
37 static int __init setup_unaligned_printk(char *str)
38 {
39  long val;
40  if (strict_strtol(str, 0, &val) != 0)
41  return 0;
43  pr_info("Printk for each unaligned data accesses is %s\n",
44  unaligned_printk ? "enabled" : "disabled");
45  return 1;
46 }
47 __setup("unaligned_printk=", setup_unaligned_printk);
48 
49 unsigned int unaligned_fixup_count;
50 
51 enum mem_op {
57 };
58 
59 static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
60 {
62 
63  /* mask out the old offset */
64  tile_bundle_bits mask = create_BrOff_X1(-1);
65  result = n & (~mask);
66 
67  /* or in the new offset */
68  result |= create_BrOff_X1(offset);
69 
70  return result;
71 }
72 
73 static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
74 {
77 
78  result = n & (~TILE_X1_MASK);
79 
80  op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
81  create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
82  create_Dest_X1(dest) |
83  create_SrcB_X1(TREG_ZERO) |
84  create_SrcA_X1(src) ;
85 
86  result |= op;
87  return result;
88 }
89 
90 static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
91 {
92  return move_X1(n, TREG_ZERO, TREG_ZERO);
93 }
94 
95 static inline tile_bundle_bits addi_X1(
96  tile_bundle_bits n, int dest, int src, int imm)
97 {
98  n &= ~TILE_X1_MASK;
99 
100  n |= (create_SrcA_X1(src) |
101  create_Dest_X1(dest) |
102  create_Imm8_X1(imm) |
103  create_S_X1(0) |
104  create_Opcode_X1(IMM_0_OPCODE_X1) |
105  create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
106 
107  return n;
108 }
109 
110 static tile_bundle_bits rewrite_load_store_unaligned(
111  struct single_step_state *state,
112  tile_bundle_bits bundle,
113  struct pt_regs *regs,
114  enum mem_op mem_op,
115  int size, int sign_ext)
116 {
117  unsigned char __user *addr;
118  int val_reg, addr_reg, err, val;
119 
120  /* Get address and value registers */
121  if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
122  addr_reg = get_SrcA_Y2(bundle);
123  val_reg = get_SrcBDest_Y2(bundle);
124  } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
125  addr_reg = get_SrcA_X1(bundle);
126  val_reg = get_Dest_X1(bundle);
127  } else {
128  addr_reg = get_SrcA_X1(bundle);
129  val_reg = get_SrcB_X1(bundle);
130  }
131 
132  /*
133  * If registers are not GPRs, don't try to handle it.
134  *
135  * FIXME: we could handle non-GPR loads by getting the real value
136  * from memory, writing it to the single step buffer, using a
137  * temp_reg to hold a pointer to that memory, then executing that
138  * instruction and resetting temp_reg. For non-GPR stores, it's a
139  * little trickier; we could use the single step buffer for that
140  * too, but we'd have to add some more state bits so that we could
141  * call back in here to copy that value to the real target. For
142  * now, we just handle the simple case.
143  */
144  if ((val_reg >= PTREGS_NR_GPRS &&
145  (val_reg != TREG_ZERO ||
146  mem_op == MEMOP_LOAD ||
147  mem_op == MEMOP_LOAD_POSTINCR)) ||
148  addr_reg >= PTREGS_NR_GPRS)
149  return bundle;
150 
151  /* If it's aligned, don't handle it specially */
152  addr = (void __user *)regs->regs[addr_reg];
153  if (((unsigned long)addr % size) == 0)
154  return bundle;
155 
156  /*
157  * Return SIGBUS with the unaligned address, if requested.
158  * Note that we return SIGBUS even for completely invalid addresses
159  * as long as they are in fact unaligned; this matches what the
160  * tilepro hardware would be doing, if it could provide us with the
161  * actual bad address in an SPR, which it doesn't.
162  */
163  if (unaligned_fixup == 0) {
164  siginfo_t info = {
165  .si_signo = SIGBUS,
166  .si_code = BUS_ADRALN,
167  .si_addr = addr
168  };
169  trace_unhandled_signal("unaligned trap", regs,
170  (unsigned long)addr, SIGBUS);
171  force_sig_info(info.si_signo, &info, current);
172  return (tilepro_bundle_bits) 0;
173  }
174 
175  /* Handle unaligned load/store */
176  if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
177  unsigned short val_16;
178  switch (size) {
179  case 2:
180  err = copy_from_user(&val_16, addr, sizeof(val_16));
181  val = sign_ext ? ((short)val_16) : val_16;
182  break;
183  case 4:
184  err = copy_from_user(&val, addr, sizeof(val));
185  break;
186  default:
187  BUG();
188  }
189  if (err == 0) {
190  state->update_reg = val_reg;
191  state->update_value = val;
192  state->update = 1;
193  }
194  } else {
195  unsigned short val_16;
196  val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
197  switch (size) {
198  case 2:
199  val_16 = val;
200  err = copy_to_user(addr, &val_16, sizeof(val_16));
201  break;
202  case 4:
203  err = copy_to_user(addr, &val, sizeof(val));
204  break;
205  default:
206  BUG();
207  }
208  }
209 
210  if (err) {
211  siginfo_t info = {
212  .si_signo = SIGSEGV,
213  .si_code = SEGV_MAPERR,
214  .si_addr = addr
215  };
216  trace_unhandled_signal("segfault", regs,
217  (unsigned long)addr, SIGSEGV);
218  force_sig_info(info.si_signo, &info, current);
219  return (tile_bundle_bits) 0;
220  }
221 
223  pr_info("Process %d/%s: PC %#lx: Fixup of"
224  " unaligned %s at %#lx.\n",
225  current->pid, current->comm, regs->pc,
226  (mem_op == MEMOP_LOAD ||
227  mem_op == MEMOP_LOAD_POSTINCR) ?
228  "load" : "store",
229  (unsigned long)addr);
230  if (!unaligned_printk) {
231 #define P pr_info
232 P("\n");
233 P("Unaligned fixups in the kernel will slow your application considerably.\n");
234 P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
235 P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
236 P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
237 P("access will become a SIGBUS you can debug. No further warnings will be\n");
238 P("shown so as to avoid additional slowdown, but you can track the number\n");
239 P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
240 P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
241 P("\n");
242 #undef P
243  }
244  }
246 
247  if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
248  /* Convert the Y2 instruction to a prefetch. */
249  bundle &= ~(create_SrcBDest_Y2(-1) |
250  create_Opcode_Y2(-1));
251  bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
252  create_Opcode_Y2(LW_OPCODE_Y2));
253  /* Replace the load postincr with an addi */
254  } else if (mem_op == MEMOP_LOAD_POSTINCR) {
255  bundle = addi_X1(bundle, addr_reg, addr_reg,
256  get_Imm8_X1(bundle));
257  /* Replace the store postincr with an addi */
258  } else if (mem_op == MEMOP_STORE_POSTINCR) {
259  bundle = addi_X1(bundle, addr_reg, addr_reg,
260  get_Dest_Imm8_X1(bundle));
261  } else {
262  /* Convert the X1 instruction to a nop. */
263  bundle &= ~(create_Opcode_X1(-1) |
264  create_UnShOpcodeExtension_X1(-1) |
265  create_UnOpcodeExtension_X1(-1));
266  bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
267  create_UnShOpcodeExtension_X1(
269  create_UnOpcodeExtension_X1(
271  }
272 
273  return bundle;
274 }
275 
276 /*
277  * Called after execve() has started the new image. This allows us
278  * to reset the info state. Note that the the mmap'ed memory, if there
279  * was any, has already been unmapped by the exec.
280  */
282 {
283  struct thread_info *ti = current_thread_info();
284  kfree(ti->step_state);
285  ti->step_state = NULL;
286 }
287 
305 void single_step_once(struct pt_regs *regs)
306 {
307  extern tile_bundle_bits __single_step_ill_insn;
308  extern tile_bundle_bits __single_step_j_insn;
309  extern tile_bundle_bits __single_step_addli_insn;
310  extern tile_bundle_bits __single_step_auli_insn;
311  struct thread_info *info = (void *)current_thread_info();
312  struct single_step_state *state = info->step_state;
313  int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
315  tile_bundle_bits bundle;
316  int temp_reg;
317  int target_reg = TREG_LR;
318  int err;
319  enum mem_op mem_op = MEMOP_NONE;
320  int size = 0, sign_ext = 0; /* happy compiler */
321 
322  asm(
323 " .pushsection .rodata.single_step\n"
324 " .align 8\n"
325 " .globl __single_step_ill_insn\n"
326 "__single_step_ill_insn:\n"
327 " ill\n"
328 " .globl __single_step_addli_insn\n"
329 "__single_step_addli_insn:\n"
330 " { nop; addli r0, zero, 0 }\n"
331 " .globl __single_step_auli_insn\n"
332 "__single_step_auli_insn:\n"
333 " { nop; auli r0, r0, 0 }\n"
334 " .globl __single_step_j_insn\n"
335 "__single_step_j_insn:\n"
336 " j .\n"
337 " .popsection\n"
338  );
339 
340  /*
341  * Enable interrupts here to allow touching userspace and the like.
342  * The callers expect this: do_trap() already has interrupts
343  * enabled, and do_work_pending() handles functions that enable
344  * interrupts internally.
345  */
347 
348  if (state == NULL) {
349  /* allocate a page of writable, executable memory */
350  state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
351  if (state == NULL) {
352  pr_err("Out of kernel memory trying to single-step\n");
353  return;
354  }
355 
356  /* allocate a cache line of writable, executable memory */
357  buffer = (void __user *) vm_mmap(NULL, 0, 64,
360  0);
361 
362  if (IS_ERR((void __force *)buffer)) {
363  kfree(state);
364  pr_err("Out of kernel pages trying to single-step\n");
365  return;
366  }
367 
368  state->buffer = buffer;
369  state->is_enabled = 0;
370 
371  info->step_state = state;
372 
373  /* Validate our stored instruction patterns */
374  BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
376  BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
378  BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
379  BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
380  BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
381  }
382 
383  /*
384  * If we are returning from a syscall, we still haven't hit the
385  * "ill" for the swint1 instruction. So back the PC up to be
386  * pointing at the swint1, but we'll actually return directly
387  * back to the "ill" so we come back in via SIGILL as if we
388  * had "executed" the swint1 without ever being in kernel space.
389  */
390  if (regs->faultnum == INT_SWINT_1)
391  regs->pc -= 8;
392 
393  pc = (tile_bundle_bits __user *)(regs->pc);
394  if (get_user(bundle, pc) != 0) {
395  pr_err("Couldn't read instruction at %p trying to step\n", pc);
396  return;
397  }
398 
399  /* We'll follow the instruction with 2 ill op bundles */
400  state->orig_pc = (unsigned long)pc;
401  state->next_pc = (unsigned long)(pc + 1);
402  state->branch_next_pc = 0;
403  state->update = 0;
404 
405  if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
406  /* two wide, check for control flow */
407  int opcode = get_Opcode_X1(bundle);
408 
409  switch (opcode) {
410  /* branches */
411  case BRANCH_OPCODE_X1:
412  {
413  s32 offset = signExtend17(get_BrOff_X1(bundle));
414 
415  /*
416  * For branches, we use a rewriting trick to let the
417  * hardware evaluate whether the branch is taken or
418  * untaken. We record the target offset and then
419  * rewrite the branch instruction to target 1 insn
420  * ahead if the branch is taken. We then follow the
421  * rewritten branch with two bundles, each containing
422  * an "ill" instruction. The supervisor examines the
423  * pc after the single step code is executed, and if
424  * the pc is the first ill instruction, then the
425  * branch (if any) was not taken. If the pc is the
426  * second ill instruction, then the branch was
427  * taken. The new pc is computed for these cases, and
428  * inserted into the registers for the thread. If
429  * the pc is the start of the single step code, then
430  * an exception or interrupt was taken before the
431  * code started processing, and the same "original"
432  * pc is restored. This change, different from the
433  * original implementation, has the advantage of
434  * executing a single user instruction.
435  */
436  state->branch_next_pc = (unsigned long)(pc + offset);
437 
438  /* rewrite branch offset to go forward one bundle */
439  bundle = set_BrOff_X1(bundle, 2);
440  }
441  break;
442 
443  /* jumps */
444  case JALB_OPCODE_X1:
445  case JALF_OPCODE_X1:
446  state->update = 1;
447  state->next_pc =
448  (unsigned long) (pc + get_JOffLong_X1(bundle));
449  break;
450 
451  case JB_OPCODE_X1:
452  case JF_OPCODE_X1:
453  state->next_pc =
454  (unsigned long) (pc + get_JOffLong_X1(bundle));
455  bundle = nop_X1(bundle);
456  break;
457 
458  case SPECIAL_0_OPCODE_X1:
459  switch (get_RRROpcodeExtension_X1(bundle)) {
460  /* jump-register */
463  state->update = 1;
464  state->next_pc =
465  regs->regs[get_SrcA_X1(bundle)];
466  break;
467 
470  state->next_pc =
471  regs->regs[get_SrcA_X1(bundle)];
472  bundle = nop_X1(bundle);
473  break;
474 
476  state->update = 1;
477  target_reg = get_Dest_X1(bundle);
478  break;
479 
480  /* stores */
482  mem_op = MEMOP_STORE;
483  size = 2;
484  break;
485 
487  mem_op = MEMOP_STORE;
488  size = 4;
489  break;
490  }
491  break;
492 
493  /* loads and iret */
494  case SHUN_0_OPCODE_X1:
495  if (get_UnShOpcodeExtension_X1(bundle) ==
497  switch (get_UnOpcodeExtension_X1(bundle)) {
499  mem_op = MEMOP_LOAD;
500  size = 2;
501  sign_ext = 1;
502  break;
503 
505  mem_op = MEMOP_LOAD;
506  size = 2;
507  sign_ext = 0;
508  break;
509 
511  mem_op = MEMOP_LOAD;
512  size = 4;
513  break;
514 
516  {
517  unsigned long ex0_0 = __insn_mfspr(
519  unsigned long ex0_1 = __insn_mfspr(
521  /*
522  * Special-case it if we're iret'ing
523  * to PL0 again. Otherwise just let
524  * it run and it will generate SIGILL.
525  */
526  if (EX1_PL(ex0_1) == USER_PL) {
527  state->next_pc = ex0_0;
528  regs->ex1 = ex0_1;
529  bundle = nop_X1(bundle);
530  }
531  }
532  }
533  }
534  break;
535 
536 #if CHIP_HAS_WH64()
537  /* postincrement operations */
538  case IMM_0_OPCODE_X1:
539  switch (get_ImmOpcodeExtension_X1(bundle)) {
541  mem_op = MEMOP_LOAD_POSTINCR;
542  size = 4;
543  break;
544 
546  mem_op = MEMOP_LOAD_POSTINCR;
547  size = 2;
548  sign_ext = 1;
549  break;
550 
552  mem_op = MEMOP_LOAD_POSTINCR;
553  size = 2;
554  sign_ext = 0;
555  break;
556 
558  mem_op = MEMOP_STORE_POSTINCR;
559  size = 4;
560  break;
561 
563  mem_op = MEMOP_STORE_POSTINCR;
564  size = 2;
565  break;
566 
567  default:
568  break;
569  }
570  break;
571 #endif /* CHIP_HAS_WH64() */
572  }
573 
574  if (state->update) {
575  /*
576  * Get an available register. We start with a
577  * bitmask with 1's for available registers.
578  * We truncate to the low 32 registers since
579  * we are guaranteed to have set bits in the
580  * low 32 bits, then use ctz to pick the first.
581  */
582  u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
583  (1ULL << get_SrcA_X0(bundle)) |
584  (1ULL << get_SrcB_X0(bundle)) |
585  (1ULL << target_reg));
586  temp_reg = __builtin_ctz(mask);
587  state->update_reg = temp_reg;
588  state->update_value = regs->regs[temp_reg];
589  regs->regs[temp_reg] = (unsigned long) (pc+1);
590  regs->flags |= PT_FLAGS_RESTORE_REGS;
591  bundle = move_X1(bundle, target_reg, temp_reg);
592  }
593  } else {
594  int opcode = get_Opcode_Y2(bundle);
595 
596  switch (opcode) {
597  /* loads */
598  case LH_OPCODE_Y2:
599  mem_op = MEMOP_LOAD;
600  size = 2;
601  sign_ext = 1;
602  break;
603 
604  case LH_U_OPCODE_Y2:
605  mem_op = MEMOP_LOAD;
606  size = 2;
607  sign_ext = 0;
608  break;
609 
610  case LW_OPCODE_Y2:
611  mem_op = MEMOP_LOAD;
612  size = 4;
613  break;
614 
615  /* stores */
616  case SH_OPCODE_Y2:
617  mem_op = MEMOP_STORE;
618  size = 2;
619  break;
620 
621  case SW_OPCODE_Y2:
622  mem_op = MEMOP_STORE;
623  size = 4;
624  break;
625  }
626  }
627 
628  /*
629  * Check if we need to rewrite an unaligned load/store.
630  * Returning zero is a special value meaning we need to SIGSEGV.
631  */
632  if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
633  bundle = rewrite_load_store_unaligned(state, bundle, regs,
634  mem_op, size, sign_ext);
635  if (bundle == 0)
636  return;
637  }
638 
639  /* write the bundle to our execution area */
640  buffer = state->buffer;
641  err = __put_user(bundle, buffer++);
642 
643  /*
644  * If we're really single-stepping, we take an INT_ILL after.
645  * If we're just handling an unaligned access, we can just
646  * jump directly back to where we were in user code.
647  */
648  if (is_single_step) {
649  err |= __put_user(__single_step_ill_insn, buffer++);
650  err |= __put_user(__single_step_ill_insn, buffer++);
651  } else {
652  long delta;
653 
654  if (state->update) {
655  /* We have some state to update; do it inline */
656  int ha16;
657  bundle = __single_step_addli_insn;
658  bundle |= create_Dest_X1(state->update_reg);
659  bundle |= create_Imm16_X1(state->update_value);
660  err |= __put_user(bundle, buffer++);
661  bundle = __single_step_auli_insn;
662  bundle |= create_Dest_X1(state->update_reg);
663  bundle |= create_SrcA_X1(state->update_reg);
664  ha16 = (state->update_value + 0x8000) >> 16;
665  bundle |= create_Imm16_X1(ha16);
666  err |= __put_user(bundle, buffer++);
667  state->update = 0;
668  }
669 
670  /* End with a jump back to the next instruction */
671  delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
672  (unsigned long)buffer) >>
674  bundle = __single_step_j_insn;
675  bundle |= create_JOffLong_X1(delta);
676  err |= __put_user(bundle, buffer++);
677  }
678 
679  if (err) {
680  pr_err("Fault when writing to single-step buffer\n");
681  return;
682  }
683 
684  /*
685  * Flush the buffer.
686  * We do a local flush only, since this is a thread-specific buffer.
687  */
688  __flush_icache_range((unsigned long)state->buffer,
689  (unsigned long)buffer);
690 
691  /* Indicate enabled */
692  state->is_enabled = is_single_step;
693  regs->pc = (unsigned long)state->buffer;
694 
695  /* Fault immediately if we are coming back from a syscall. */
696  if (regs->faultnum == INT_SWINT_1)
697  regs->pc += 8;
698 }
699 
700 #else
701 #include <linux/smp.h>
702 #include <linux/ptrace.h>
703 #include <arch/spr_def.h>
704 
705 static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
706 
707 
708 /*
709  * Called directly on the occasion of an interrupt.
710  *
711  * If the process doesn't have single step set, then we use this as an
712  * opportunity to turn single step off.
713  *
714  * It has been mentioned that we could conditionally turn off single stepping
715  * on each entry into the kernel and rely on single_step_once to turn it
716  * on for the processes that matter (as we already do), but this
717  * implementation is somewhat more efficient in that we muck with registers
718  * once on a bum interrupt rather than on every entry into the kernel.
719  *
720  * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
721  * so we have to run through this process again before we can say that an
722  * instruction has executed.
723  *
724  * swint will set CANCELED, but it's a legitimate instruction. Fortunately
725  * it changes the PC. If it hasn't changed, then we know that the interrupt
726  * wasn't generated by swint and we'll need to run this process again before
727  * we can say an instruction has executed.
728  *
729  * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
730  * on with our lives.
731  */
732 
733 void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
734 {
735  unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
736  struct thread_info *info = (void *)current_thread_info();
737  int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
738  unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
739 
740  if (is_single_step == 0) {
741  __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
742 
743  } else if ((*ss_pc != regs->pc) ||
745 
749  __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
750  }
751 }
752 
753 
754 /*
755  * Called from need_singlestep. Set up the control registers and the enable
756  * register, then return back.
757  */
758 
759 void single_step_once(struct pt_regs *regs)
760 {
761  unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
762  unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
763 
764  *ss_pc = regs->pc;
767  __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
768  __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
769 }
770 
771 void single_step_execve(void)
772 {
773  /* Nothing */
774 }
775 
776 #endif /* !__tilegx__ */