16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
22 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
23 #define MCOUNT_OFFSET_INSNS 5
25 #define MCOUNT_OFFSET_INSNS 4
34 static inline int in_kernel_space(
unsigned long ip)
36 if (ip >= (
unsigned long)
_stext &&
37 ip <= (
unsigned long)
_etext)
42 #ifdef CONFIG_DYNAMIC_FTRACE
44 #define JAL 0x0c000000
45 #define ADDR_MASK 0x03ffffff
46 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
48 #define INSN_NOP 0x00000000
49 #define INSN_JAL(addr) \
50 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
56 static inline void ftrace_dyn_arch_init_insns(
void)
63 buf = (
u32 *)&insn_lui_v1_hi16_mcount;
67 buf = (
u32 *)&insn_jal_ftrace_caller;
68 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
70 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
72 buf = (
u32 *)&insn_j_ftrace_graph_caller;
73 uasm_i_j(&buf, (
unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
77 static int ftrace_modify_code(
unsigned long ip,
unsigned int new_code)
82 safe_store_code(new_code, ip, faulted);
121 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
124 struct dyn_ftrace *rec,
unsigned long addr)
127 unsigned long ip = rec->ip;
133 new = in_kernel_space(ip) ?
INSN_NOP : INSN_B_1F;
135 return ftrace_modify_code(ip,
new);
141 unsigned long ip = rec->ip;
143 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
144 insn_lui_v1_hi16_mcount;
146 return ftrace_modify_code(ip,
new);
149 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
155 new = INSN_JAL((
unsigned long)func);
157 return ftrace_modify_code(FTRACE_CALL_IP,
new);
163 ftrace_dyn_arch_init_insns();
169 *(
unsigned long *)data = 0;
175 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
177 #ifdef CONFIG_DYNAMIC_FTRACE
179 extern void ftrace_graph_call(
void);
180 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
182 int ftrace_enable_ftrace_graph_caller(
void)
184 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
185 insn_j_ftrace_graph_caller);
188 int ftrace_disable_ftrace_graph_caller(
void)
190 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
INSN_NOP);
195 #ifndef KBUILD_MCOUNT_RA_ADDRESS
197 #define S_RA_SP (0xafbf << 16)
198 #define S_R_SP (0xafb0 << 16)
199 #define OFFSET_MASK 0xffff
201 unsigned long ftrace_get_parent_ra_addr(
unsigned long self_ra,
unsigned long
202 old_parent_ra,
unsigned long parent_ra_addr,
unsigned long fp)
213 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
221 safe_load_code(code, ip, faulted);
230 if ((code & S_R_SP) != S_R_SP)
231 return parent_ra_addr;
235 }
while ((code & S_RA_SP) != S_RA_SP);
240 safe_load_stack(tmp, sp, faulted);
244 if (tmp == old_parent_ra)
255 void prepare_ftrace_return(
unsigned long *parent_ra_addr,
unsigned long self_ra,
258 unsigned long old_parent_ra;
260 unsigned long return_hooker = (
unsigned long)
285 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
288 #ifndef KBUILD_MCOUNT_RA_ADDRESS
289 parent_ra_addr = (
unsigned long *)ftrace_get_parent_ra_addr(self_ra,
290 old_parent_ra, (
unsigned long)parent_ra_addr,
fp);
295 if (parent_ra_addr == 0)
299 safe_store_stack(return_hooker, parent_ra_addr, faulted);
305 *parent_ra_addr = old_parent_ra;
319 if (!ftrace_graph_entry(&
trace)) {
321 *parent_ra_addr = old_parent_ra;