1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
18 #if defined(_ASM_X86_PROCESSOR_H) || \
19 defined(__ASM_X86_PROCESSOR_H) /* New unified x86 */
20 #define KSPLICE_IP(x) ((x)->thread.ip)
21 #define KSPLICE_SP(x) ((x)->thread.sp)
22 #elif defined(CONFIG_X86_64) /* Old x86 64-bit */
23 /* The IP is on the stack, so we don't need to check it separately.
24 * Instead, we need to prevent Ksplice from patching thread_return.
26 extern const char thread_return
[];
27 EXTRACT_SYMBOL(thread_return
);
28 #define KSPLICE_IP(x) ((unsigned long)thread_return)
29 #define KSPLICE_SP(x) ((x)->thread.rsp)
30 #else /* Old x86 32-bit */
31 #define KSPLICE_IP(x) ((x)->thread.eip)
32 #define KSPLICE_SP(x) ((x)->thread.esp)
33 #endif /* __ASM_X86_PROCESSOR_H */
35 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
38 #include <asm/ftrace.h>
39 #include <linux/ftrace.h>
41 extern ftrace_func_t ftrace_trace_function
;
42 EXTRACT_SYMBOL(ftrace_trace_function
);
43 #endif /* CONFIG_FTRACE */
45 #define N_BITS(n) ((n) < sizeof(long) * 8 ? ~(~0L << (n)) : ~0L)
47 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
48 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
49 #if defined(_I386_BUG_H) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) || \
50 defined(CONFIG_DEBUG_BUGVERBOSE)) && \
51 !defined(do_each_thread_ve) /* OpenVZ */
52 /* 38326f786cf4529a86b1ccde3aa17f4fa7e8472a was after 2.6.10 */
53 /* ud2 means BUG(). On old i386 kernels, it is followed
54 by 2 bytes and then a 4-byte relocation; and is not
55 disassembler-friendly. */
60 } __attribute__((packed
));
61 #define KSPLICE_USE_BUG_FRAME
62 #elif defined(__ASM_X8664_BUG_H)
63 #define KSPLICE_USE_BUG_FRAME
65 #endif /* LINUX_VERSION_CODE */
67 static abort_t
compare_instructions(struct ksplice_mod_change
*change
,
68 struct ksplice_section
*sect
,
69 const struct ksplice_reloc
**fingerp
,
70 const unsigned char *run_start
,
71 const unsigned char *run
,
72 const unsigned char *pre
, struct ud
*run_ud
,
73 struct ud
*pre_ud
, enum run_pre_mode mode
);
74 static abort_t
compare_operands(struct ksplice_mod_change
*change
,
75 struct ksplice_section
*sect
,
76 const struct ksplice_reloc
**fingerp
,
77 const unsigned char *run_start
,
78 const unsigned char *run
,
79 const unsigned char *pre
, struct ud
*run_ud
,
80 struct ud
*pre_ud
, int opnum
,
81 enum run_pre_mode mode
);
82 static uint8_t ud_operand_len(struct ud_operand
*operand
);
83 static uint8_t ud_prefix_len(struct ud
*ud
);
84 static long ud_operand_lval(struct ud_operand
*operand
);
85 static int next_run_byte(struct ud
*ud
);
86 static bool is_nop(struct ud
*ud
, const unsigned char *addr
);
87 static bool is_unconditional_jump(struct ud
*ud
);
88 static bool is_mcount_call(struct ud
*ud
, const unsigned char *addr
);
89 static void initialize_ksplice_ud(struct ud
*ud
);
91 static abort_t
arch_run_pre_cmp(struct ksplice_mod_change
*change
,
92 struct ksplice_section
*sect
,
93 unsigned long run_addr
,
94 struct list_head
*safety_records
,
95 enum run_pre_mode mode
)
98 const unsigned char *run
, *pre
, *run_start
, *pre_start
, *safety_start
;
99 /* struct ud is big so we avoid putting it on the stack. This
100 * is safe because we are holding module_mutex. */
101 static struct ud pre_ud
, run_ud
;
102 const unsigned char **match_map
;
103 const struct ksplice_reloc
*finger
;
104 unsigned long pre_offset
, run_offset
;
105 bool run_unconditional
= false;
106 bool pre_nop
= true, run_nop
= true;
111 pre_start
= (const unsigned char *)sect
->address
;
112 run_start
= (const unsigned char *)run_addr
;
114 finger
= init_reloc_search(change
, sect
);
119 initialize_ksplice_ud(&pre_ud
);
120 ud_set_input_buffer(&pre_ud
, (unsigned char *)pre
, sect
->size
);
122 initialize_ksplice_ud(&run_ud
);
123 ud_set_input_hook(&run_ud
, next_run_byte
);
124 ud_set_user_opaque_data(&run_ud
, (unsigned char *)run_addr
);
125 safety_start
= run_start
;
127 match_map
= vmalloc(sizeof(*match_map
) * sect
->size
);
128 if (match_map
== NULL
)
129 return OUT_OF_MEMORY
;
130 memset(match_map
, 0, sizeof(*match_map
) * sect
->size
);
131 match_map
[0] = run_start
;
132 sect
->match_map
= match_map
;
136 if (pre_nop
&& ud_disassemble(&pre_ud
) == 0) {
137 /* Ran out of pre bytes to match; we're done! */
138 unsigned long safety_offset
= run
- safety_start
;
139 if (sect
->unmatched
!= 0) {
140 if (mode
== RUN_PRE_DEBUG
)
141 ksdebug(change
, "%d unmatched jumps\n",
146 ret
= create_safety_record(change
, sect
, safety_records
,
147 (unsigned long)safety_start
,
151 if (run_nop
&& ud_disassemble(&run_ud
) == 0) {
155 pre_nop
= is_nop(&pre_ud
, pre
) || is_mcount_call(&pre_ud
, pre
);
156 run_nop
= is_nop(&run_ud
, run
) || is_mcount_call(&run_ud
, run
);
157 if (pre_nop
&& !run_nop
) {
158 if (mode
== RUN_PRE_DEBUG
) {
159 ksdebug(change
, "| nop: ");
160 print_bytes(change
, run
, 0, pre
,
161 ud_insn_len(&pre_ud
));
163 pre
+= ud_insn_len(&pre_ud
);
166 if (run_nop
&& !pre_nop
) {
167 if (mode
== RUN_PRE_DEBUG
) {
168 ksdebug(change
, "| nop: ");
169 print_bytes(change
, run
, ud_insn_len(&run_ud
),
172 run
+= ud_insn_len(&run_ud
);
175 if (run_nop
&& pre_nop
) {
176 ret
= compare_instructions(change
, sect
, &finger
,
177 run_start
, run
, pre
, &run_ud
,
178 &pre_ud
, RUN_PRE_SILENT
);
180 if (mode
== RUN_PRE_DEBUG
) {
181 ksdebug(change
, "| nop: ");
182 print_bytes(change
, run
,
183 ud_insn_len(&run_ud
), pre
,
184 ud_insn_len(&pre_ud
));
186 run
+= ud_insn_len(&run_ud
);
187 pre
+= ud_insn_len(&pre_ud
);
189 } else if (ret
!= NO_MATCH
&& ret
!= OK
) {
193 pre_offset
= pre
- pre_start
;
195 if (match_map
[pre_offset
] == NULL
) {
196 match_map
[pre_offset
] = run
;
197 } else if (match_map
[pre_offset
] == run
) {
200 /* There is a discontinuity in the match map.
201 Check that the last instruction was an
202 unconditional change of control */
203 if (!run_unconditional
) {
204 ksdebug(change
, "<--[No unconditional change "
205 "of control at control transfer point "
206 "%lx]\n", pre_offset
);
211 if (mode
== RUN_PRE_DEBUG
)
212 ksdebug(change
, " [Moving run pointer for %lx "
213 "from %lx to %lx]\n", pre_offset
,
214 (unsigned long)(run
- run_start
),
215 (unsigned long)(match_map
[pre_offset
] -
218 /* Create a safety_record for the block just matched */
219 ret
= create_safety_record(change
, sect
, safety_records
,
220 (unsigned long)safety_start
,
225 /* We re-initialize the run ud structure because
226 it may have cached upcoming bytes */
227 run
= match_map
[pre_offset
];
228 initialize_ksplice_ud(&run_ud
);
229 ud_set_input_hook(&run_ud
, next_run_byte
);
230 ud_set_user_opaque_data(&run_ud
, (unsigned char *)run
);
232 if (ud_disassemble(&run_ud
) == 0) {
239 run_offset
= run
- run_start
;
240 run_unconditional
= is_unconditional_jump(&run_ud
);
244 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && \
245 defined(KSPLICE_USE_BUG_FRAME)
246 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
247 if (run_ud
.mnemonic
== pre_ud
.mnemonic
&&
248 run_ud
.mnemonic
== UD_Iud2
) {
249 const struct bug_frame
250 *pre_bug
= (const struct bug_frame
*)pre
,
251 *run_bug
= (const struct bug_frame
*)run
;
252 const struct ksplice_reloc
*r
;
253 ret
= lookup_reloc(change
, &finger
,
254 (unsigned long)&pre_bug
->filename
,
256 if (ret
== NO_MATCH
) {
257 if (mode
== RUN_PRE_INITIAL
)
258 ksdebug(change
, "Unrecognized ud2\n");
263 ret
= handle_reloc(change
, sect
, r
,
264 (unsigned long)&run_bug
->filename
,
268 /* If there's a relocation, then it's a BUG? */
269 if (mode
== RUN_PRE_DEBUG
) {
270 ksdebug(change
, "[BUG?: ");
272 run
+ sizeof(run_bug
->ud2
),
274 pre
+ sizeof(pre_bug
->ud2
),
276 ksdebug(change
, "] ");
278 pre
+= sizeof(*pre_bug
);
279 run
+= sizeof(*run_bug
);
280 ud_input_skip(&run_ud
,
281 sizeof(*run_bug
) - sizeof(run_bug
->ud2
));
282 ud_input_skip(&pre_ud
,
283 sizeof(*pre_bug
) - sizeof(pre_bug
->ud2
));
286 #endif /* LINUX_VERSION_CODE && KSPLICE_USE_BUG_FRAME */
289 if (run_ud
.mnemonic
== pre_ud
.mnemonic
&&
290 run_ud
.mnemonic
== UD_Iud2
) {
291 unsigned char bytes
[3];
292 unsigned char prefix
[3] = { 0x78, 0x65, 0x6e };
293 if (probe_kernel_read(bytes
, (void *)run
+ 2, 3) !=
294 -EFAULT
&& pre
- pre_start
< sect
->size
&&
295 memcmp(bytes
, prefix
, 3) == 0 &&
296 memcmp(pre
+ 2, prefix
, 3) == 0) {
297 /* Exception for XEN_EMULATE_PREFIX */
300 ud_input_skip(&run_ud
, 3);
301 ud_input_skip(&pre_ud
, 3);
305 #endif /* CONFIG_XEN */
307 ret
= compare_instructions(change
, sect
, &finger
, run_start
,
308 run
, pre
, &run_ud
, &pre_ud
, mode
);
311 run
+= ud_insn_len(&run_ud
);
312 pre
+= ud_insn_len(&pre_ud
);
315 if (ret
!= OK
|| mode
!= RUN_PRE_FINAL
) {
317 sect
->match_map
= NULL
;
322 static abort_t
compare_instructions(struct ksplice_mod_change
*change
,
323 struct ksplice_section
*sect
,
324 const struct ksplice_reloc
**fingerp
,
325 const unsigned char *run_start
,
326 const unsigned char *run
,
327 const unsigned char *pre
, struct ud
*run_ud
,
328 struct ud
*pre_ud
, enum run_pre_mode mode
)
332 bool found_bug_entry
= false;
333 const unsigned char *pre_start
= (const unsigned char *)sect
->address
;
334 unsigned long pre_offset
= pre
- pre_start
;
335 const struct ksplice_reloc
*r
;
337 if (mode
== RUN_PRE_DEBUG
) {
338 ksdebug(change
, "| ");
339 print_bytes(change
, run
, ud_insn_len(run_ud
), pre
,
340 ud_insn_len(pre_ud
));
343 if (run_ud
->mnemonic
!= pre_ud
->mnemonic
) {
344 if (mode
== RUN_PRE_DEBUG
)
345 ksdebug(change
, "mnemonic mismatch: %s %s\n",
346 ud_lookup_mnemonic(run_ud
->mnemonic
),
347 ud_lookup_mnemonic(pre_ud
->mnemonic
));
351 if (run_ud
->mnemonic
== UD_Iinvalid
) {
352 ksdebug(change
, "Unrecognized opcode at %s+%lx\n",
353 sect
->symbol
->label
, pre_offset
);
358 ret
= lookup_reloc(change
, fingerp
, (unsigned long)pre
, &r
);
363 else if (r
->howto
->size
!= 0)
366 if (r
->howto
->type
== KSPLICE_HOWTO_BUG
)
367 found_bug_entry
= true;
369 if (mode
== RUN_PRE_DEBUG
) {
370 if (r
->howto
->type
== KSPLICE_HOWTO_EXTABLE
)
371 ksdebug(change
, "[ex] ");
372 if (r
->howto
->type
== KSPLICE_HOWTO_BUG
)
373 ksdebug(change
, "[bug] ");
374 if (r
->howto
->type
== KSPLICE_HOWTO_SYMBOL
)
375 ksdebug(change
, "[sym] ");
377 ret
= handle_reloc(change
, sect
, r
, (unsigned long)run
, mode
);
383 #if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
384 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
385 #else /* !CONFIG_X86_64 || LINUX_VERSION_CODE >= */
386 #ifndef do_each_thread_ve /* OpenVZ */
387 if (run_ud
->mnemonic
== UD_Iud2
&& !found_bug_entry
) {
388 if (strcmp(change
->target_name
, "kvm_intel") == 0 ||
389 strcmp(change
->target_name
, "kvm_amd") == 0) {
390 /* KVM has ud2a bugs without a bug table entry. */
391 if (mode
== RUN_PRE_DEBUG
)
392 ksdebug(change
, "[kvm ud2]");
394 ksdebug(change
, "Unexpected ud2\n");
398 #endif /* do_each_thread_ve */
399 #endif /* CONFIG_X86_64 && LINUX_VERSION_CODE */
401 for (i
= 0; i
< ARRAY_SIZE(run_ud
->operand
); i
++) {
402 ret
= compare_operands(change
, sect
, fingerp
, run_start
, run
,
403 pre
, run_ud
, pre_ud
, i
, mode
);
410 static abort_t
compare_operands(struct ksplice_mod_change
*change
,
411 struct ksplice_section
*sect
,
412 const struct ksplice_reloc
**fingerp
,
413 const unsigned char *run_start
,
414 const unsigned char *run
,
415 const unsigned char *pre
, struct ud
*run_ud
,
416 struct ud
*pre_ud
, int opnum
,
417 enum run_pre_mode mode
)
421 const unsigned char *pre_start
= (const unsigned char *)sect
->address
;
422 unsigned long pre_offset
= pre
- pre_start
;
423 unsigned long run_offset
= run
- run_start
;
424 struct ud_operand
*run_op
= &run_ud
->operand
[opnum
];
425 struct ud_operand
*pre_op
= &pre_ud
->operand
[opnum
];
426 uint8_t run_off
= ud_prefix_len(run_ud
);
427 uint8_t pre_off
= ud_prefix_len(pre_ud
);
428 const unsigned char **match_map
= sect
->match_map
;
429 const struct ksplice_reloc
*r
;
430 for (i
= 0; i
< opnum
; i
++) {
431 run_off
+= ud_operand_len(&run_ud
->operand
[i
]);
432 pre_off
+= ud_operand_len(&pre_ud
->operand
[i
]);
435 if (run_op
->type
!= pre_op
->type
) {
436 if (mode
== RUN_PRE_DEBUG
)
437 ksdebug(change
, "type mismatch: %d %d\n", run_op
->type
,
441 if (run_op
->base
!= pre_op
->base
) {
442 if (mode
== RUN_PRE_DEBUG
)
443 ksdebug(change
, "base mismatch: %d %d\n", run_op
->base
,
447 if (run_op
->index
!= pre_op
->index
) {
448 if (mode
== RUN_PRE_DEBUG
)
449 ksdebug(change
, "index mismatch: %d %d\n",
450 run_op
->index
, pre_op
->index
);
453 if (run_op
->type
== UD_OP_PTR
&&
454 run_op
->lval
.ptr
.seg
!= pre_op
->lval
.ptr
.seg
) {
455 if (mode
== RUN_PRE_DEBUG
)
456 ksdebug(change
, "segment mismatch: %d %d\n",
457 run_op
->lval
.ptr
.seg
, pre_op
->lval
.ptr
.seg
);
460 if (ud_operand_len(run_op
) == 0 && ud_operand_len(pre_op
) == 0)
463 ret
= lookup_reloc(change
, fingerp
, (unsigned long)(pre
+ pre_off
), &r
);
465 struct ksplice_reloc run_reloc
= *r
;
466 struct ksplice_reloc_howto run_howto
= *r
->howto
;
467 unsigned int run_reloc_len
= ud_operand_len(run_op
);
468 unsigned int pre_reloc_len
= ud_operand_len(pre_op
);
470 if (run_op
->type
== UD_OP_PTR
) {
471 /* Adjust for reloc length != operand length for
472 instructions take a segment:offset operand */
477 run_reloc
.howto
= &run_howto
;
478 if (r
->howto
->size
!= pre_reloc_len
) {
479 ksdebug(change
, "ksplice_h: run-pre: reloc size %d "
480 "differs from disassembled size %d\n",
481 r
->howto
->size
, pre_reloc_len
);
484 if (r
->howto
->size
!= run_reloc_len
&&
485 (r
->howto
->dst_mask
!= N_BITS(r
->howto
->size
* 8) ||
486 r
->howto
->rightshift
!= 0)) {
487 /* Reloc types unsupported with differing reloc sizes */
488 ksdebug(change
, "ksplice_h: reloc: invalid flags for a "
489 "relocation with size changed\n");
490 ksdebug(change
, "%ld %u\n", r
->howto
->dst_mask
,
491 r
->howto
->rightshift
);
494 /* adjust for differing relocation size */
495 run_howto
.size
= run_reloc_len
;
496 if (r
->howto
->size
!= run_howto
.size
)
497 run_howto
.dst_mask
= N_BITS(run_howto
.size
* 8);
498 run_reloc
.insn_addend
+= pre_reloc_len
- run_reloc_len
;
499 ret
= handle_reloc(change
, sect
, &run_reloc
,
500 (unsigned long)(run
+ run_off
), mode
);
502 if (mode
== RUN_PRE_DEBUG
)
503 ksdebug(change
, "Matching failure at offset "
504 "%lx\n", pre_offset
);
507 /* This operand is a successfully processed relocation */
509 } else if (ret
!= NO_MATCH
) {
512 if (pre_op
->type
== UD_OP_JIMM
) {
513 /* Immediate jump without a relocation */
514 const unsigned char *pre_target
= pre
+ ud_insn_len(pre_ud
) +
515 ud_operand_lval(pre_op
);
516 const unsigned char *run_target
= run
+ ud_insn_len(run_ud
) +
517 ud_operand_lval(run_op
);
518 if (pre_target
>= pre_start
&&
519 pre_target
< pre_start
+ sect
->size
) {
520 /* Jump within the current function.
521 Check it's to a corresponding place */
522 unsigned long new_pre_offset
= pre_target
- pre_start
;
523 unsigned long new_run_offset
= run_target
- run_start
;
524 if (mode
== RUN_PRE_DEBUG
)
525 ksdebug(change
, "[Jumps: pre=%lx run=%lx "
526 "pret=%lx runt=%lx] ", pre_offset
,
527 run_offset
, new_pre_offset
,
529 if (match_map
[pre_target
- pre_start
] != NULL
&&
530 match_map
[pre_target
- pre_start
] != run_target
) {
531 ksdebug(change
, "<--[Jumps to nonmatching "
534 } else if (match_map
[pre_target
- pre_start
] == NULL
) {
535 match_map
[pre_target
- pre_start
] = run_target
;
539 } else if (pre_target
== run_target
) {
540 /* Paravirt-inserted pcrel jump; OK! */
543 if (mode
== RUN_PRE_DEBUG
) {
544 ksdebug(change
, "<--Different operands!\n");
545 ksdebug(change
, "%lx %lx %lx %lx %x %lx %lx "
546 "%lx\n", (unsigned long)pre_start
,
547 (unsigned long)pre_target
,
548 (unsigned long)pre_start
+ sect
->size
,
549 (unsigned long)pre
, ud_insn_len(pre_ud
),
550 sect
->size
, ud_operand_lval(pre_op
),
551 (unsigned long)run_target
);
555 } else if (ud_operand_len(pre_op
) == ud_operand_len(run_op
) &&
556 memcmp(pre
+ pre_off
, run
+ run_off
,
557 ud_operand_len(run_op
)) == 0) {
560 if (mode
== RUN_PRE_DEBUG
)
561 ksdebug(change
, "<--Different operands!\n");
566 static void initialize_ksplice_ud(struct ud
*ud
)
569 ud_set_mode(ud
, BITS_PER_LONG
);
570 ud_set_syntax(ud
, NULL
);
572 ud_set_vendor(ud
, UD_VENDOR_ANY
);
576 static bool is_mcount_call(struct ud
*ud
, const unsigned char *addr
)
579 addr
+ ud_insn_len(ud
) + ud_operand_lval(&ud
->operand
[0]);
580 if (ud
->mnemonic
== UD_Icall
&&
581 (target
== mcount
|| target
== ftrace_trace_function
))
585 #else /* !CONFIG_FTRACE */
586 static bool is_mcount_call(struct ud
*ud
, const unsigned char *addr
)
590 #endif /* CONFIG_FTRACE */
592 static bool is_nop(struct ud
*ud
, const unsigned char *addr
)
594 switch (ud
->mnemonic
) {
599 return ud
->dis_mode
== 32 &&
600 ud
->operand
[0].type
== UD_OP_REG
&&
601 ud
->operand
[1].type
== UD_OP_REG
&&
602 ud
->operand
[2].type
== UD_NONE
&&
603 ud
->operand
[0].base
== ud
->operand
[1].base
;
605 return ud
->dis_mode
== 32 &&
606 ud
->operand
[0].type
== UD_OP_REG
&&
607 ud
->operand
[1].type
== UD_OP_MEM
&&
608 ((ud
->operand
[1].base
== ud
->operand
[0].base
&&
609 ud
->operand
[1].index
== UD_NONE
) ||
610 (ud
->operand
[1].base
== UD_NONE
&&
611 ud
->operand
[1].index
== ud
->operand
[0].base
&&
612 ud
->operand
[1].scale
== 0)) &&
613 ud_operand_lval(&ud
->operand
[1]) == 0 &&
614 ud
->operand
[2].type
== UD_NONE
;
616 /* jmp +N followed by N 0x90s is a NOP */
617 if (ud
->operand
[0].type
== UD_OP_JIMM
&&
618 ud
->operand
[1].type
== UD_NONE
&&
619 ud
->operand
[2].type
== UD_NONE
&&
620 ud_operand_len(&ud
->operand
[0]) == 1) {
621 /* struct ud is big so we avoid putting it on the stack.
622 * This is safe because we are holding module_mutex. */
623 static struct ud temp_ud
;
624 int len
= ud_operand_lval(&ud
->operand
[0]);
627 if (len
< 0 || len
> 13)
630 initialize_ksplice_ud(&temp_ud
);
631 ud_set_input_hook(&temp_ud
, next_run_byte
);
632 ud_set_user_opaque_data(&temp_ud
,
633 (unsigned char *)addr
+
636 for (i
= 0; i
< len
; i
++) {
637 if (ud_disassemble(&temp_ud
) == 0)
639 if (temp_ud
.mnemonic
!= UD_Inop
)
649 static bool is_unconditional_jump(struct ud
*ud
)
651 switch (ud
->mnemonic
) {
668 static uint8_t ud_operand_len(struct ud_operand
*operand
)
670 if (operand
->type
== UD_OP_MEM
)
671 return operand
->offset
/ 8;
672 if (operand
->type
== UD_OP_REG
)
674 return operand
->size
/ 8;
677 static uint8_t ud_prefix_len(struct ud
*ud
)
679 int len
= ud_insn_len(ud
);
681 for (i
= 0; i
< ARRAY_SIZE(ud
->operand
); i
++)
682 len
-= ud_operand_len(&ud
->operand
[i
]);
686 static long ud_operand_lval(struct ud_operand
*operand
)
688 switch (operand
->type
== UD_OP_MEM
? operand
->offset
: operand
->size
) {
690 return operand
->lval
.sbyte
;
692 return operand
->lval
.sword
;
694 return operand
->lval
.sdword
;
696 return operand
->lval
.sqword
;
702 static int next_run_byte(struct ud
*ud
)
705 if (probe_kernel_read(&byte
, ud_get_user_opaque_data(ud
), 1) == -EFAULT
)
707 ud_set_user_opaque_data(ud
, ud_get_user_opaque_data(ud
) + 1);
710 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
712 static struct ksplice_symbol trampoline_symbol
= {
714 .label
= "<trampoline>",
717 static const struct ksplice_reloc_howto trampoline_howto
= {
718 .type
= KSPLICE_HOWTO_RELOC
,
721 .dst_mask
= 0xffffffffL
,
726 static const struct ksplice_reloc trampoline_reloc
= {
727 .symbol
= &trampoline_symbol
,
730 .howto
= &trampoline_howto
,
733 static abort_t
trampoline_target(struct ksplice_mod_change
*change
,
734 unsigned long addr
, unsigned long *new_addr
)
739 if (probe_kernel_read(&byte
, (void *)addr
, sizeof(byte
)) == -EFAULT
)
745 ret
= read_reloc_value(change
, &trampoline_reloc
, addr
+ 1, new_addr
);
749 *new_addr
+= addr
+ 1;
753 static abort_t
prepare_trampoline(struct ksplice_mod_change
*change
,
754 struct ksplice_patch
*p
)
757 ((unsigned char *)p
->contents
)[0] = 0xe9;
758 return write_reloc_value(change
, &trampoline_reloc
,
759 (unsigned long)p
->contents
+ 1,
760 p
->repladdr
- (p
->oldaddr
+ 1));
763 static abort_t
handle_paravirt(struct ksplice_mod_change
*change
,
764 unsigned long pre_addr
, unsigned long run_addr
,
767 unsigned char run
[5], pre
[5];
770 if (probe_kernel_read(&run
, (void *)run_addr
, sizeof(run
)) == -EFAULT
||
771 probe_kernel_read(&pre
, (void *)pre_addr
, sizeof(pre
)) == -EFAULT
)
774 if ((run
[0] == 0xe8 && pre
[0] == 0xe8) ||
775 (run
[0] == 0xe9 && pre
[0] == 0xe9))
776 if (run_addr
+ 1 + *(int32_t *)&run
[1] ==
777 pre_addr
+ 1 + *(int32_t *)&pre
[1])
782 static bool valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
)
784 return p
> (const void *)tinfo
785 && p
<= (const void *)tinfo
+ THREAD_SIZE
- sizeof(long);
788 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
789 static bool virtual_address_mapped(unsigned long addr
)
794 #endif /* pud_page */
798 #ifdef KSPLICE_STANDALONE
801 #endif /* KSPLICE_STANDALONE */
803 pgd
= pgd_offset_k(addr
);
804 if (!pgd_present(*pgd
))
808 pud
= pud_offset(pgd
, addr
);
809 if (!pud_present(*pud
))
812 pmd
= pmd_offset(pud
, addr
);
814 pmd
= pmd_offset(pgd
, addr
);
815 #endif /* pud_page */
817 if (!pmd_present(*pmd
))
823 pte
= pte_offset_kernel(pmd
, addr
);
824 if (!pte_present(*pte
))
829 #endif /* LINUX_VERSION_CODE */