@@ -158,12 +158,13 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
158
158
void patch_imm32_load_insns (unsigned int val , kprobe_opcode_t * addr )
159
159
{
160
160
/* addis r4,0,(insn)@h */
161
- * addr ++ = PPC_INST_ADDIS | ___PPC_RT (4 ) |
162
- ((val >> 16 ) & 0xffff );
161
+ patch_instruction (addr , PPC_INST_ADDIS | ___PPC_RT (4 ) |
162
+ ((val >> 16 ) & 0xffff ));
163
+ addr ++ ;
163
164
164
165
/* ori r4,r4,(insn)@l */
165
- * addr = PPC_INST_ORI | ___PPC_RA ( 4 ) | ___PPC_RS (4 ) |
166
- ( val & 0xffff );
166
+ patch_instruction ( addr , PPC_INST_ORI | ___PPC_RA (4 ) |
167
+ ___PPC_RS ( 4 ) | ( val & 0xffff ) );
167
168
}
168
169
169
170
/*
@@ -173,32 +174,37 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
173
174
void patch_imm64_load_insns (unsigned long val , kprobe_opcode_t * addr )
174
175
{
175
176
/* lis r3,(op)@highest */
176
- * addr ++ = PPC_INST_ADDIS | ___PPC_RT (3 ) |
177
- ((val >> 48 ) & 0xffff );
177
+ patch_instruction (addr , PPC_INST_ADDIS | ___PPC_RT (3 ) |
178
+ ((val >> 48 ) & 0xffff ));
179
+ addr ++ ;
178
180
179
181
/* ori r3,r3,(op)@higher */
180
- * addr ++ = PPC_INST_ORI | ___PPC_RA (3 ) | ___PPC_RS (3 ) |
181
- ((val >> 32 ) & 0xffff );
182
+ patch_instruction (addr , PPC_INST_ORI | ___PPC_RA (3 ) |
183
+ ___PPC_RS (3 ) | ((val >> 32 ) & 0xffff ));
184
+ addr ++ ;
182
185
183
186
/* rldicr r3,r3,32,31 */
184
- * addr ++ = PPC_INST_RLDICR | ___PPC_RA (3 ) | ___PPC_RS (3 ) |
185
- __PPC_SH64 (32 ) | __PPC_ME64 (31 );
187
+ patch_instruction (addr , PPC_INST_RLDICR | ___PPC_RA (3 ) |
188
+ ___PPC_RS (3 ) | __PPC_SH64 (32 ) | __PPC_ME64 (31 ));
189
+ addr ++ ;
186
190
187
191
/* oris r3,r3,(op)@h */
188
- * addr ++ = PPC_INST_ORIS | ___PPC_RA (3 ) | ___PPC_RS (3 ) |
189
- ((val >> 16 ) & 0xffff );
192
+ patch_instruction (addr , PPC_INST_ORIS | ___PPC_RA (3 ) |
193
+ ___PPC_RS (3 ) | ((val >> 16 ) & 0xffff ));
194
+ addr ++ ;
190
195
191
196
/* ori r3,r3,(op)@l */
192
- * addr = PPC_INST_ORI | ___PPC_RA ( 3 ) | ___PPC_RS (3 ) |
193
- ( val & 0xffff );
197
+ patch_instruction ( addr , PPC_INST_ORI | ___PPC_RA (3 ) |
198
+ ___PPC_RS ( 3 ) | ( val & 0xffff ) );
194
199
}
195
200
196
201
int arch_prepare_optimized_kprobe (struct optimized_kprobe * op , struct kprobe * p )
197
202
{
198
203
kprobe_opcode_t * buff , branch_op_callback , branch_emulate_step ;
199
204
kprobe_opcode_t * op_callback_addr , * emulate_step_addr ;
200
205
long b_offset ;
201
- unsigned long nip ;
206
+ unsigned long nip , size ;
207
+ int rc , i ;
202
208
203
209
kprobe_ppc_optinsn_slots .insn_size = MAX_OPTINSN_SIZE ;
204
210
@@ -231,8 +237,14 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
231
237
goto error ;
232
238
233
239
/* Setup template */
234
- memcpy (buff , optprobe_template_entry ,
235
- TMPL_END_IDX * sizeof (kprobe_opcode_t ));
240
+ /* We can optimize this via patch_instruction_window later */
241
+ size = (TMPL_END_IDX * sizeof (kprobe_opcode_t )) / sizeof (int );
242
+ pr_devel ("Copying template to %p, size %lu\n" , buff , size );
243
+ for (i = 0 ; i < size ; i ++ ) {
244
+ rc = patch_instruction (buff + i , * (optprobe_template_entry + i ));
245
+ if (rc < 0 )
246
+ goto error ;
247
+ }
236
248
237
249
/*
238
250
* Fixup the template with instructions to:
@@ -261,8 +273,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
261
273
if (!branch_op_callback || !branch_emulate_step )
262
274
goto error ;
263
275
264
- buff [ TMPL_CALL_HDLR_IDX ] = branch_op_callback ;
265
- buff [ TMPL_EMULATE_IDX ] = branch_emulate_step ;
276
+ patch_instruction ( buff + TMPL_CALL_HDLR_IDX , branch_op_callback ) ;
277
+ patch_instruction ( buff + TMPL_EMULATE_IDX , branch_emulate_step ) ;
266
278
267
279
/*
268
280
* 3. load instruction to be emulated into relevant register, and
@@ -272,8 +284,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
272
284
/*
273
285
* 4. branch back from trampoline
274
286
*/
275
- buff [TMPL_RET_IDX ] = create_branch ((unsigned int * )buff + TMPL_RET_IDX ,
276
- (unsigned long )nip , 0 );
287
+ patch_branch (buff + TMPL_RET_IDX , (unsigned long )nip , 0 );
277
288
278
289
flush_icache_range ((unsigned long )buff ,
279
290
(unsigned long )(& buff [TMPL_END_IDX ]));
0 commit comments