2 * Kernel Probes (KProbes)
3 * arch/x86_64/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2004-Oct Jim Keniston <kenistoj@us.ibm.com> and Prasanna S Panchamukhi
27 * <prasanna@in.ibm.com> adapted for x86_64
28 * 2005-Mar Roland McGrath <roland@redhat.com>
29 * Fixed to handle %rip-relative addressing mode correctly.
30 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
31 * Added function return probes functionality
34 #include <linux/config.h>
35 #include <linux/kprobes.h>
36 #include <linux/ptrace.h>
37 #include <linux/spinlock.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/preempt.h>
41 #include <linux/moduleloader.h>
43 #include <asm/pgtable.h>
44 #include <asm/kdebug.h>
46 static DECLARE_MUTEX(kprobe_mutex
);
48 /* kprobe_status settings */
49 #define KPROBE_HIT_ACTIVE 0x00000001
50 #define KPROBE_HIT_SS 0x00000002
52 static struct kprobe
*current_kprobe
;
53 static unsigned long kprobe_status
, kprobe_old_rflags
, kprobe_saved_rflags
;
54 static struct pt_regs jprobe_saved_regs
;
55 static long *jprobe_saved_rsp
;
56 static kprobe_opcode_t
*get_insn_slot(void);
57 static void free_insn_slot(kprobe_opcode_t
*slot
);
58 void jprobe_return_end(void);
60 /* copy of the kernel stack at the probe fire time */
61 static kprobe_opcode_t jprobes_stack
[MAX_STACK_SIZE
];
64 * returns non-zero if opcode modifies the interrupt flag.
66 static inline int is_IF_modifier(kprobe_opcode_t
*insn
)
71 case 0xcf: /* iret/iretd */
72 case 0x9d: /* popf/popfd */
76 if (*insn
>= 0x40 && *insn
<= 0x4f && *++insn
== 0xcf)
81 int arch_prepare_kprobe(struct kprobe
*p
)
83 /* insn: must be on special executable page on x86_64. */
85 p
->ainsn
.insn
= get_insn_slot();
94 * Determine if the instruction uses the %rip-relative addressing mode.
95 * If it does, return the address of the 32-bit displacement word.
96 * If not, return null.
98 static inline s32
*is_riprel(u8
*insn
)
100 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
101 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
102 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
103 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
104 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
106 static const u64 onebyte_has_modrm
[256 / 64] = {
107 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
108 /* ------------------------------- */
109 W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */
110 W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */
111 W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */
112 W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */
113 W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */
114 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */
115 W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */
116 W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */
117 W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */
118 W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */
119 W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */
120 W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */
121 W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */
122 W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */
123 W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */
124 W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1) /* f0 */
125 /* ------------------------------- */
126 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
128 static const u64 twobyte_has_modrm
[256 / 64] = {
129 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
130 /* ------------------------------- */
131 W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */
132 W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */
133 W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */
134 W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */
135 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */
136 W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */
137 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */
138 W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */
139 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */
140 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */
141 W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */
142 W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */
143 W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */
144 W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */
145 W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */
146 W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0) /* ff */
147 /* ------------------------------- */
148 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
153 /* Skip legacy instruction prefixes. */
173 /* Skip REX instruction prefix. */
174 if ((*insn
& 0xf0) == 0x40)
177 if (*insn
== 0x0f) { /* Two-byte opcode. */
179 need_modrm
= test_bit(*insn
, twobyte_has_modrm
);
180 } else { /* One-byte opcode. */
181 need_modrm
= test_bit(*insn
, onebyte_has_modrm
);
186 if ((modrm
& 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
187 /* Displacement follows ModRM byte. */
188 return (s32
*) ++insn
;
192 /* No %rip-relative addressing mode here. */
196 void arch_copy_kprobe(struct kprobe
*p
)
199 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
);
200 ripdisp
= is_riprel(p
->ainsn
.insn
);
203 * The copied instruction uses the %rip-relative
204 * addressing mode. Adjust the displacement for the
205 * difference between the original location of this
206 * instruction and the location of the copy that will
207 * actually be run. The tricky bit here is making sure
208 * that the sign extension happens correctly in this
209 * calculation, since we need a signed 32-bit result to
210 * be sign-extended to 64 bits when it's added to the
211 * %rip value and yield the same 64-bit result that the
212 * sign-extension of the original signed 32-bit
213 * displacement would have given.
215 s64 disp
= (u8
*) p
->addr
+ *ripdisp
- (u8
*) p
->ainsn
.insn
;
216 BUG_ON((s64
) (s32
) disp
!= disp
); /* Sanity check. */
221 void arch_remove_kprobe(struct kprobe
*p
)
224 free_insn_slot(p
->ainsn
.insn
);
228 static inline void disarm_kprobe(struct kprobe
*p
, struct pt_regs
*regs
)
230 *p
->addr
= p
->opcode
;
231 regs
->rip
= (unsigned long)p
->addr
;
234 static void prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
236 regs
->eflags
|= TF_MASK
;
237 regs
->eflags
&= ~IF_MASK
;
238 /*single step inline if the instruction is an int3*/
239 if (p
->opcode
== BREAKPOINT_INSTRUCTION
)
240 regs
->rip
= (unsigned long)p
->addr
;
242 regs
->rip
= (unsigned long)p
->ainsn
.insn
;
245 struct task_struct
*arch_get_kprobe_task(void *ptr
)
247 return ((struct thread_info
*) (((unsigned long) ptr
) &
248 (~(THREAD_SIZE
-1))))->task
;
251 void arch_prepare_kretprobe(struct kretprobe
*rp
, struct pt_regs
*regs
)
253 unsigned long *sara
= (unsigned long *)regs
->rsp
;
254 struct kretprobe_instance
*ri
;
255 static void *orig_ret_addr
;
258 * Save the return address when the return probe hits
259 * the first time, and use it to populate the (krprobe
260 * instance)->ret_addr for subsequent return probes at
261 * the same addrress since stack address would have
262 * the kretprobe_trampoline by then.
264 if (((void*) *sara
) != kretprobe_trampoline
)
265 orig_ret_addr
= (void*) *sara
;
267 if ((ri
= get_free_rp_inst(rp
)) != NULL
) {
269 ri
->stack_addr
= sara
;
270 ri
->ret_addr
= orig_ret_addr
;
272 /* Replace the return addr with trampoline addr */
273 *sara
= (unsigned long) &kretprobe_trampoline
;
279 void arch_kprobe_flush_task(struct task_struct
*tk
)
281 struct kretprobe_instance
*ri
;
282 while ((ri
= get_rp_inst_tsk(tk
)) != NULL
) {
283 *((unsigned long *)(ri
->stack_addr
)) =
284 (unsigned long) ri
->ret_addr
;
290 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
291 * remain disabled thorough out this function.
293 int kprobe_handler(struct pt_regs
*regs
)
297 kprobe_opcode_t
*addr
= (kprobe_opcode_t
*)(regs
->rip
- sizeof(kprobe_opcode_t
));
299 /* We're in an interrupt, but this is clear and BUG()-safe. */
302 /* Check we're not actually recursing */
303 if (kprobe_running()) {
304 /* We *are* holding lock here, so this is safe.
305 Disarm the probe we just hit, and ignore it. */
306 p
= get_kprobe(addr
);
308 if (kprobe_status
== KPROBE_HIT_SS
) {
309 regs
->eflags
&= ~TF_MASK
;
310 regs
->eflags
|= kprobe_saved_rflags
;
314 disarm_kprobe(p
, regs
);
318 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
322 /* If it's not ours, can't be delete race, (we hold lock). */
327 p
= get_kprobe(addr
);
330 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
332 * The breakpoint instruction was removed right
333 * after we hit it. Another cpu has removed
334 * either a probepoint or a debugger breakpoint
335 * at this address. In either case, no further
336 * handling of this interrupt is appropriate.
340 /* Not one of ours: let kernel handle it */
344 kprobe_status
= KPROBE_HIT_ACTIVE
;
346 kprobe_saved_rflags
= kprobe_old_rflags
347 = (regs
->eflags
& (TF_MASK
| IF_MASK
));
348 if (is_IF_modifier(p
->ainsn
.insn
))
349 kprobe_saved_rflags
&= ~IF_MASK
;
351 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
352 /* handler has already set things up, so skip ss setup */
356 prepare_singlestep(p
, regs
);
357 kprobe_status
= KPROBE_HIT_SS
;
361 preempt_enable_no_resched();
366 * For function-return probes, init_kprobes() establishes a probepoint
367 * here. When a retprobed function returns, this probe is hit and
368 * trampoline_probe_handler() runs, calling the kretprobe's handler.
370 void kretprobe_trampoline_holder(void)
372 asm volatile ( ".global kretprobe_trampoline\n"
373 "kretprobe_trampoline: \n"
378 * Called when we hit the probe point at kretprobe_trampoline
380 int trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
382 struct task_struct
*tsk
;
383 struct kretprobe_instance
*ri
;
384 struct hlist_head
*head
;
385 struct hlist_node
*node
;
386 unsigned long *sara
= (unsigned long *)regs
->rsp
- 1;
388 tsk
= arch_get_kprobe_task(sara
);
389 head
= kretprobe_inst_table_head(tsk
);
391 hlist_for_each_entry(ri
, node
, head
, hlist
) {
392 if (ri
->stack_addr
== sara
&& ri
->rp
) {
394 ri
->rp
->handler(ri
, regs
);
400 void trampoline_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
403 struct kretprobe_instance
*ri
;
404 /* RA already popped */
405 unsigned long *sara
= ((unsigned long *)regs
->rsp
) - 1;
407 while ((ri
= get_rp_inst(sara
))) {
408 regs
->rip
= (unsigned long)ri
->ret_addr
;
411 regs
->eflags
&= ~TF_MASK
;
415 * Called after single-stepping. p->addr is the address of the
416 * instruction whose first byte has been replaced by the "int 3"
417 * instruction. To avoid the SMP problems that can occur when we
418 * temporarily put back the original opcode to single-step, we
419 * single-stepped a copy of the instruction. The address of this
420 * copy is p->ainsn.insn.
422 * This function prepares to return from the post-single-step
423 * interrupt. We have to fix up the stack as follows:
425 * 0) Except in the case of absolute or indirect jump or call instructions,
426 * the new rip is relative to the copied instruction. We need to make
427 * it relative to the original instruction.
429 * 1) If the single-stepped instruction was pushfl, then the TF and IF
430 * flags are set in the just-pushed eflags, and may need to be cleared.
432 * 2) If the single-stepped instruction was a call, the return address
433 * that is atop the stack is the address following the copied instruction.
434 * We need to make it the address following the original instruction.
436 static void resume_execution(struct kprobe
*p
, struct pt_regs
*regs
)
438 unsigned long *tos
= (unsigned long *)regs
->rsp
;
439 unsigned long next_rip
= 0;
440 unsigned long copy_rip
= (unsigned long)p
->ainsn
.insn
;
441 unsigned long orig_rip
= (unsigned long)p
->addr
;
442 kprobe_opcode_t
*insn
= p
->ainsn
.insn
;
444 /*skip the REX prefix*/
445 if (*insn
>= 0x40 && *insn
<= 0x4f)
449 case 0x9c: /* pushfl */
450 *tos
&= ~(TF_MASK
| IF_MASK
);
451 *tos
|= kprobe_old_rflags
;
453 case 0xc3: /* ret/lret */
457 regs
->eflags
&= ~TF_MASK
;
458 /* rip is already adjusted, no more changes required*/
460 case 0xe8: /* call relative - Fix return addr */
461 *tos
= orig_rip
+ (*tos
- copy_rip
);
464 if ((*insn
& 0x30) == 0x10) {
465 /* call absolute, indirect */
466 /* Fix return addr; rip is correct. */
467 next_rip
= regs
->rip
;
468 *tos
= orig_rip
+ (*tos
- copy_rip
);
469 } else if (((*insn
& 0x31) == 0x20) || /* jmp near, absolute indirect */
470 ((*insn
& 0x31) == 0x21)) { /* jmp far, absolute indirect */
471 /* rip is correct. */
472 next_rip
= regs
->rip
;
475 case 0xea: /* jmp absolute -- rip is correct */
476 next_rip
= regs
->rip
;
482 regs
->eflags
&= ~TF_MASK
;
484 regs
->rip
= next_rip
;
486 regs
->rip
= orig_rip
+ (regs
->rip
- copy_rip
);
491 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
492 * remain disabled thoroughout this function. And we hold kprobe lock.
494 int post_kprobe_handler(struct pt_regs
*regs
)
496 if (!kprobe_running())
499 if (current_kprobe
->post_handler
)
500 current_kprobe
->post_handler(current_kprobe
, regs
, 0);
502 if (current_kprobe
->post_handler
!= trampoline_post_handler
)
503 resume_execution(current_kprobe
, regs
);
504 regs
->eflags
|= kprobe_saved_rflags
;
507 preempt_enable_no_resched();
510 * if somebody else is singlestepping across a probe point, eflags
511 * will have TF set, in which case, continue the remaining processing
512 * of do_debug, as if this is not a probe hit.
514 if (regs
->eflags
& TF_MASK
)
520 /* Interrupts disabled, kprobe_lock held. */
521 int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
523 if (current_kprobe
->fault_handler
524 && current_kprobe
->fault_handler(current_kprobe
, regs
, trapnr
))
527 if (kprobe_status
& KPROBE_HIT_SS
) {
528 resume_execution(current_kprobe
, regs
);
529 regs
->eflags
|= kprobe_old_rflags
;
532 preempt_enable_no_resched();
538 * Wrapper routine for handling exceptions.
540 int kprobe_exceptions_notify(struct notifier_block
*self
, unsigned long val
,
543 struct die_args
*args
= (struct die_args
*)data
;
546 if (kprobe_handler(args
->regs
))
550 if (post_kprobe_handler(args
->regs
))
554 if (kprobe_running() &&
555 kprobe_fault_handler(args
->regs
, args
->trapnr
))
559 if (kprobe_running() &&
560 kprobe_fault_handler(args
->regs
, args
->trapnr
))
569 int setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
571 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
574 jprobe_saved_regs
= *regs
;
575 jprobe_saved_rsp
= (long *) regs
->rsp
;
576 addr
= (unsigned long)jprobe_saved_rsp
;
578 * As Linus pointed out, gcc assumes that the callee
579 * owns the argument space and could overwrite it, e.g.
580 * tailcall optimization. So, to be absolutely safe
581 * we also save and restore enough stack bytes to cover
584 memcpy(jprobes_stack
, (kprobe_opcode_t
*) addr
, MIN_STACK_SIZE(addr
));
585 regs
->eflags
&= ~IF_MASK
;
586 regs
->rip
= (unsigned long)(jp
->entry
);
590 void jprobe_return(void)
592 preempt_enable_no_resched();
593 asm volatile (" xchg %%rbx,%%rsp \n"
595 " .globl jprobe_return_end \n"
596 " jprobe_return_end: \n"
598 (jprobe_saved_rsp
):"memory");
601 int longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
603 u8
*addr
= (u8
*) (regs
->rip
- 1);
604 unsigned long stack_addr
= (unsigned long)jprobe_saved_rsp
;
605 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
607 if ((addr
> (u8
*) jprobe_return
) && (addr
< (u8
*) jprobe_return_end
)) {
608 if ((long *)regs
->rsp
!= jprobe_saved_rsp
) {
609 struct pt_regs
*saved_regs
=
610 container_of(jprobe_saved_rsp
, struct pt_regs
, rsp
);
611 printk("current rsp %p does not match saved rsp %p\n",
612 (long *)regs
->rsp
, jprobe_saved_rsp
);
613 printk("Saved registers for jprobe %p\n", jp
);
614 show_registers(saved_regs
);
615 printk("Current registers\n");
616 show_registers(regs
);
619 *regs
= jprobe_saved_regs
;
620 memcpy((kprobe_opcode_t
*) stack_addr
, jprobes_stack
,
621 MIN_STACK_SIZE(stack_addr
));
628 * kprobe->ainsn.insn points to the copy of the instruction to be single-stepped.
629 * By default on x86_64, pages we get from kmalloc or vmalloc are not
630 * executable. Single-stepping an instruction on such a page yields an
631 * oops. So instead of storing the instruction copies in their respective
632 * kprobe objects, we allocate a page, map it executable, and store all the
633 * instruction copies there. (We can allocate additional pages if somebody
634 * inserts a huge number of probes.) Each page can hold up to INSNS_PER_PAGE
635 * instruction slots, each of which is MAX_INSN_SIZE*sizeof(kprobe_opcode_t)
638 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE*sizeof(kprobe_opcode_t)))
639 struct kprobe_insn_page
{
640 struct hlist_node hlist
;
641 kprobe_opcode_t
*insns
; /* page of instruction slots */
642 char slot_used
[INSNS_PER_PAGE
];
646 static struct hlist_head kprobe_insn_pages
;
649 * get_insn_slot() - Find a slot on an executable page for an instruction.
650 * We allocate an executable page if there's no room on existing ones.
652 static kprobe_opcode_t
*get_insn_slot(void)
654 struct kprobe_insn_page
*kip
;
655 struct hlist_node
*pos
;
657 hlist_for_each(pos
, &kprobe_insn_pages
) {
658 kip
= hlist_entry(pos
, struct kprobe_insn_page
, hlist
);
659 if (kip
->nused
< INSNS_PER_PAGE
) {
661 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
662 if (!kip
->slot_used
[i
]) {
663 kip
->slot_used
[i
] = 1;
665 return kip
->insns
+ (i
*MAX_INSN_SIZE
);
668 /* Surprise! No unused slots. Fix kip->nused. */
669 kip
->nused
= INSNS_PER_PAGE
;
673 /* All out of space. Need to allocate a new page. Use slot 0.*/
674 kip
= kmalloc(sizeof(struct kprobe_insn_page
), GFP_KERNEL
);
680 * For the %rip-relative displacement fixups to be doable, we
681 * need our instruction copy to be within +/- 2GB of any data it
682 * might access via %rip. That is, within 2GB of where the
683 * kernel image and loaded module images reside. So we allocate
684 * a page in the module loading area.
686 kip
->insns
= module_alloc(PAGE_SIZE
);
691 INIT_HLIST_NODE(&kip
->hlist
);
692 hlist_add_head(&kip
->hlist
, &kprobe_insn_pages
);
693 memset(kip
->slot_used
, 0, INSNS_PER_PAGE
);
694 kip
->slot_used
[0] = 1;
700 * free_insn_slot() - Free instruction slot obtained from get_insn_slot().
702 static void free_insn_slot(kprobe_opcode_t
*slot
)
704 struct kprobe_insn_page
*kip
;
705 struct hlist_node
*pos
;
707 hlist_for_each(pos
, &kprobe_insn_pages
) {
708 kip
= hlist_entry(pos
, struct kprobe_insn_page
, hlist
);
709 if (kip
->insns
<= slot
710 && slot
< kip
->insns
+(INSNS_PER_PAGE
*MAX_INSN_SIZE
)) {
711 int i
= (slot
- kip
->insns
) / MAX_INSN_SIZE
;
712 kip
->slot_used
[i
] = 0;
714 if (kip
->nused
== 0) {
716 * Page is no longer in use. Free it unless
717 * it's the last one. We keep the last one
718 * so as not to have to set it up again the
719 * next time somebody inserts a probe.
721 hlist_del(&kip
->hlist
);
722 if (hlist_empty(&kprobe_insn_pages
)) {
723 INIT_HLIST_NODE(&kip
->hlist
);
724 hlist_add_head(&kip
->hlist
,
727 module_free(NULL
, kip
->insns
);