1717#include <asm/cacheflush.h>
1818#include <asm/uasm.h>
1919
20- /*
21- * If the Instruction Pointer is in module space (0xc0000000), return true;
22- * otherwise, it is in kernel space (0x80000000), return false.
23- *
24- * FIXME: This will not work when the kernel space and module space are the
25- * same. If they are the same, we need to modify scripts/recordmcount.pl,
26- * ftrace_make_nop/call() and the other related parts to ensure the
27- * enabling/disabling of the calling site to _mcount is right for both kernel
28- * and module.
29- */
30-
31- static inline int in_module (unsigned long ip )
32- {
33- return ip & 0x40000000 ;
34- }
20+ #include <asm-generic/sections.h>
3521
3622#ifdef CONFIG_DYNAMIC_FTRACE
3723
3824#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
3925#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
4026
41- #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
42- #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
4327#define INSN_NOP 0x00000000 /* nop */
4428#define INSN_JAL (addr ) \
4529 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
6953#endif
7054}
7155
56+ /*
57+ * Check if the address is in kernel space
58+ *
59+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
60+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
61+ */
62+ static inline int in_kernel_space (unsigned long ip )
63+ {
64+ if (ip >= (unsigned long )_stext &&
65+ ip <= (unsigned long )_etext )
66+ return 1 ;
67+ return 0 ;
68+ }
69+
7270static int ftrace_modify_code (unsigned long ip , unsigned int new_code )
7371{
7472 int faulted ;
@@ -84,46 +82,54 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
8482 return 0 ;
8583}
8684
85+ /*
86+ * The details about the calling site of mcount on MIPS
87+ *
88+ * 1. For kernel:
89+ *
90+ * move at, ra
91+ * jal _mcount --> nop
92+ *
93+ * 2. For modules:
94+ *
95+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
96+ *
97+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
98+ * addiu v1, v1, low_16bit_of_mcount
99+ * move at, ra
100+ * move $12, ra_address
101+ * jalr v1
102+ * sub sp, sp, 8
103+ * 1: offset = 5 instructions
104+ * 2.2 For the Other situations
105+ *
106+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
107+ * addiu v1, v1, low_16bit_of_mcount
108+ * move at, ra
109+ * jalr v1
110+ * nop | move $12, ra_address | sub sp, sp, 8
111+ * 1: offset = 4 instructions
112+ */
113+
114+ #if defined(KBUILD_MCOUNT_RA_ADDRESS ) && defined(CONFIG_32BIT )
115+ #define MCOUNT_OFFSET_INSNS 5
116+ #else
117+ #define MCOUNT_OFFSET_INSNS 4
118+ #endif
119+ #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
120+
87121int ftrace_make_nop (struct module * mod ,
88122 struct dyn_ftrace * rec , unsigned long addr )
89123{
90124 unsigned int new ;
91125 unsigned long ip = rec -> ip ;
92126
93127 /*
94- * We have compiled module with -mlong-calls, but compiled the kernel
95- * without it, we need to cope with them respectively .
128+ * If ip is in kernel space, no long call, otherwise, long call is
129+ * needed .
96130 */
97- if (in_module (ip )) {
98- #if defined(KBUILD_MCOUNT_RA_ADDRESS ) && defined(CONFIG_32BIT )
99- /*
100- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
101- * addiu v1, v1, low_16bit_of_mcount
102- * move at, ra
103- * move $12, ra_address
104- * jalr v1
105- * sub sp, sp, 8
106- * 1: offset = 5 instructions
107- */
108- new = INSN_B_1F_5 ;
109- #else
110- /*
111- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
112- * addiu v1, v1, low_16bit_of_mcount
113- * move at, ra
114- * jalr v1
115- * nop | move $12, ra_address | sub sp, sp, 8
116- * 1: offset = 4 instructions
117- */
118- new = INSN_B_1F_4 ;
119- #endif
120- } else {
121- /*
122- * move at, ra
123- * jal _mcount --> nop
124- */
125- new = INSN_NOP ;
126- }
131+ new = in_kernel_space (ip ) ? INSN_NOP : INSN_B_1F ;
132+
127133 return ftrace_modify_code (ip , new );
128134}
129135
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
132138 unsigned int new ;
133139 unsigned long ip = rec -> ip ;
134140
135- /* ip, module: 0xc0000000, kernel: 0x80000000 */
136- new = in_module ( ip ) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller ;
141+ new = in_kernel_space ( ip ) ? insn_jal_ftrace_caller :
142+ insn_lui_v1_hi16_mcount ;
137143
138144 return ftrace_modify_code (ip , new );
139145}
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void)
190196#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
191197#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
192198
193- unsigned long ftrace_get_parent_addr (unsigned long self_addr ,
194- unsigned long parent ,
195- unsigned long parent_addr ,
196- unsigned long fp )
199+ unsigned long ftrace_get_parent_ra_addr (unsigned long self_ra , unsigned long
200+ old_parent_ra , unsigned long parent_ra_addr , unsigned long fp )
197201{
198- unsigned long sp , ip , ra ;
202+ unsigned long sp , ip , tmp ;
199203 unsigned int code ;
200204 int faulted ;
201205
202206 /*
203- * For module, move the ip from calling site of mcount to the
204- * instruction "lui v1, hi_16bit_of_mcount"(offset is 20 ), but for
205- * kernel, move to the instruction "move ra, at"(offset is 12 )
207+ * For module, move the ip from the return address after the
208+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 24 ), but for
209+ * kernel, move after the instruction "move ra, at"(offset is 16 )
206210 */
207- ip = self_addr - (in_module ( self_addr ) ? 20 : 12 );
211+ ip = self_ra - (in_kernel_space ( self_ra ) ? 16 : 24 );
208212
209213 /*
210214 * search the text until finding the non-store instruction or "s{d,w}
211215 * ra, offset(sp)" instruction
212216 */
213217 do {
214- ip -= 4 ;
215-
216218 /* get the code at "ip": code = *(unsigned int *)ip; */
217219 safe_load_code (code , ip , faulted );
218220
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
224226 * store the ra on the stack
225227 */
226228 if ((code & S_R_SP ) != S_R_SP )
227- return parent_addr ;
229+ return parent_ra_addr ;
228230
229- } while (((code & S_RA_SP ) != S_RA_SP ));
231+ /* Move to the next instruction */
232+ ip -= 4 ;
233+ } while ((code & S_RA_SP ) != S_RA_SP );
230234
231235 sp = fp + (code & OFFSET_MASK );
232236
233- /* ra = *(unsigned long *)sp; */
234- safe_load_stack (ra , sp , faulted );
237+ /* tmp = *(unsigned long *)sp; */
238+ safe_load_stack (tmp , sp , faulted );
235239 if (unlikely (faulted ))
236240 return 0 ;
237241
238- if (ra == parent )
242+ if (tmp == old_parent_ra )
239243 return sp ;
240244 return 0 ;
241245}
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
246250 * Hook the return address and push it in the stack of return addrs
247251 * in current thread info.
248252 */
249- void prepare_ftrace_return (unsigned long * parent , unsigned long self_addr ,
253+ void prepare_ftrace_return (unsigned long * parent_ra_addr , unsigned long self_ra ,
250254 unsigned long fp )
251255{
252- unsigned long old ;
256+ unsigned long old_parent_ra ;
253257 struct ftrace_graph_ent trace ;
254258 unsigned long return_hooker = (unsigned long )
255259 & return_to_handler ;
256- int faulted ;
260+ int faulted , insns ;
257261
258262 if (unlikely (atomic_read (& current -> tracing_graph_pause )))
259263 return ;
260264
261265 /*
262- * "parent " is the stack address saved the return address of the caller
263- * of _mcount.
266+ * "parent_ra_addr " is the stack address saved the return address of
267+ * the caller of _mcount.
264268 *
265269 * if the gcc < 4.5, a leaf function does not save the return address
266270 * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
275279 * do it in ftrace_graph_caller of mcount.S.
276280 */
277281
278- /* old = *parent ; */
279- safe_load_stack (old , parent , faulted );
282+ /* old_parent_ra = *parent_ra_addr ; */
283+ safe_load_stack (old_parent_ra , parent_ra_addr , faulted );
280284 if (unlikely (faulted ))
281285 goto out ;
282286#ifndef KBUILD_MCOUNT_RA_ADDRESS
283- parent = (unsigned long * )ftrace_get_parent_addr ( self_addr , old ,
284- (unsigned long )parent , fp );
287+ parent_ra_addr = (unsigned long * )ftrace_get_parent_ra_addr ( self_ra ,
288+ old_parent_ra , (unsigned long )parent_ra_addr , fp );
285289 /*
286290 * If fails when getting the stack address of the non-leaf function's
287291 * ra, stop function graph tracer and return
288292 */
289- if (parent == 0 )
293+ if (parent_ra_addr == 0 )
290294 goto out ;
291295#endif
292- /* *parent = return_hooker; */
293- safe_store_stack (return_hooker , parent , faulted );
296+ /* *parent_ra_addr = return_hooker; */
297+ safe_store_stack (return_hooker , parent_ra_addr , faulted );
294298 if (unlikely (faulted ))
295299 goto out ;
296300
297- if (ftrace_push_return_trace (old , self_addr , & trace .depth , fp ) ==
298- - EBUSY ) {
299- * parent = old ;
301+ if (ftrace_push_return_trace (old_parent_ra , self_ra , & trace .depth , fp )
302+ == - EBUSY ) {
303+ * parent_ra_addr = old_parent_ra ;
300304 return ;
301305 }
302306
303- trace .func = self_addr ;
307+ /*
308+ * Get the recorded ip of the current mcount calling site in the
309+ * __mcount_loc section, which will be used to filter the function
310+ * entries configured through the tracing/set_graph_function interface.
311+ */
312+
313+ insns = in_kernel_space (self_ra ) ? 2 : MCOUNT_OFFSET_INSNS + 1 ;
314+ trace .func = self_ra - (MCOUNT_INSN_SIZE * insns );
304315
305316 /* Only trace if the calling function expects to */
306317 if (!ftrace_graph_entry (& trace )) {
307318 current -> curr_ret_stack -- ;
308- * parent = old ;
319+ * parent_ra_addr = old_parent_ra ;
309320 }
310321 return ;
311322out :
0 commit comments