1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
18 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
21 extern const char thread_return
[];
22 EXTRACT_SYMBOL(thread_return
);
23 #define KSPLICE_IP(x) ((unsigned long)thread_return)
24 #else /* !CONFIG_X86_64 */
25 #define KSPLICE_IP(x) ((x)->thread.ip)
26 #endif /* CONFIG_X86_64 */
27 #define KSPLICE_SP(x) ((x)->thread.sp)
29 #else /* LINUX_VERSION_CODE < */
30 /* faca62273b602ab482fb7d3d940dbf41ef08b00e was after 2.6.24 */
33 extern const char thread_return
[];
34 EXTRACT_SYMBOL(thread_return
);
35 #define KSPLICE_IP(x) ((unsigned long)thread_return)
36 #define KSPLICE_SP(x) ((x)->thread.rsp)
37 #else /* !CONFIG_X86_64 */
38 #define KSPLICE_IP(x) ((x)->thread.eip)
39 #define KSPLICE_SP(x) ((x)->thread.esp)
40 #endif /* CONFIG_X86_64 */
42 #endif /* LINUX_VERSION_CODE */
44 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
46 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) && defined(CONFIG_FTRACE)
47 /* 606576ce816603d9fe1fb453a88bc6eea16ca709 was after 2.6.27 */
48 #define CONFIG_FUNCTION_TRACER 1
49 #endif /* LINUX_VERSION_CODE && CONFIG_FTRACE */
50 #ifdef CONFIG_FUNCTION_TRACER
51 #include <asm/ftrace.h>
52 #include <linux/ftrace.h>
54 extern ftrace_func_t ftrace_trace_function
;
55 EXTRACT_SYMBOL(ftrace_trace_function
);
56 #endif /* CONFIG_FUNCTION_TRACER */
58 #define N_BITS(n) ((n) < sizeof(long) * 8 ? ~(~0L << (n)) : ~0L)
60 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
61 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
62 #if defined(_I386_BUG_H) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) || \
63 defined(CONFIG_DEBUG_BUGVERBOSE)) && \
64 !defined(do_each_thread_ve) /* OpenVZ */
65 /* 38326f786cf4529a86b1ccde3aa17f4fa7e8472a was after 2.6.10 */
66 /* ud2 means BUG(). On old i386 kernels, it is followed
67 by 2 bytes and then a 4-byte relocation; and is not
68 disassembler-friendly. */
73 } __attribute__((packed
));
74 #define KSPLICE_USE_BUG_FRAME
75 #elif defined(__ASM_X8664_BUG_H)
76 #define KSPLICE_USE_BUG_FRAME
78 #endif /* LINUX_VERSION_CODE */
80 static abort_t
compare_instructions(struct ksplice_mod_change
*change
,
81 struct ksplice_section
*sect
,
82 const struct ksplice_reloc
**fingerp
,
83 const unsigned char *run_start
,
84 const unsigned char *run
,
85 const unsigned char *pre
, struct ud
*run_ud
,
86 struct ud
*pre_ud
, enum run_pre_mode mode
);
87 static abort_t
compare_operands(struct ksplice_mod_change
*change
,
88 struct ksplice_section
*sect
,
89 const struct ksplice_reloc
**fingerp
,
90 const unsigned char *run_start
,
91 const unsigned char *run
,
92 const unsigned char *pre
, struct ud
*run_ud
,
93 struct ud
*pre_ud
, int opnum
,
94 enum run_pre_mode mode
);
95 static uint8_t ud_operand_len(struct ud_operand
*operand
);
96 static uint8_t ud_prefix_len(struct ud
*ud
);
97 static long ud_operand_lval(struct ud_operand
*operand
);
98 static int next_run_byte(struct ud
*ud
);
99 static bool is_nop(struct ud
*ud
, const unsigned char *addr
);
100 static bool is_unconditional_jump(struct ud
*ud
);
101 static bool is_mcount_call(struct ud
*ud
, const unsigned char *addr
);
102 static void initialize_ksplice_ud(struct ud
*ud
);
104 static abort_t
arch_run_pre_cmp(struct ksplice_mod_change
*change
,
105 struct ksplice_section
*sect
,
106 unsigned long run_addr
,
107 struct list_head
*safety_records
,
108 enum run_pre_mode mode
)
111 const unsigned char *run
, *pre
, *run_start
, *pre_start
, *safety_start
;
112 /* struct ud is big so we avoid putting it on the stack. This
113 * is safe because we are holding module_mutex. */
114 static struct ud pre_ud
, run_ud
;
115 const unsigned char **match_map
;
116 const struct ksplice_reloc
*finger
;
117 unsigned long pre_offset
, run_offset
;
118 bool run_unconditional
= false;
119 bool pre_nop
= true, run_nop
= true;
124 pre_start
= (const unsigned char *)sect
->address
;
125 run_start
= (const unsigned char *)run_addr
;
127 finger
= init_reloc_search(change
, sect
);
132 initialize_ksplice_ud(&pre_ud
);
133 ud_set_input_buffer(&pre_ud
, (unsigned char *)pre
, sect
->size
);
135 initialize_ksplice_ud(&run_ud
);
136 ud_set_input_hook(&run_ud
, next_run_byte
);
137 ud_set_user_opaque_data(&run_ud
, (unsigned char *)run_addr
);
138 safety_start
= run_start
;
140 match_map
= vmalloc(sizeof(*match_map
) * sect
->size
);
141 if (match_map
== NULL
)
142 return OUT_OF_MEMORY
;
143 memset(match_map
, 0, sizeof(*match_map
) * sect
->size
);
144 match_map
[0] = run_start
;
145 sect
->match_map
= match_map
;
149 if (pre_nop
&& ud_disassemble(&pre_ud
) == 0) {
150 /* Ran out of pre bytes to match; we're done! */
151 unsigned long safety_offset
= run
- safety_start
;
152 if (sect
->unmatched
!= 0) {
153 if (mode
== RUN_PRE_DEBUG
)
154 ksdebug(change
, "%d unmatched jumps\n",
159 ret
= create_safety_record(change
, sect
, safety_records
,
160 (unsigned long)safety_start
,
164 if (run_nop
&& ud_disassemble(&run_ud
) == 0) {
168 pre_nop
= is_nop(&pre_ud
, pre
) || is_mcount_call(&pre_ud
, pre
);
169 run_nop
= is_nop(&run_ud
, run
) || is_mcount_call(&run_ud
, run
);
170 if (pre_nop
&& !run_nop
) {
171 if (mode
== RUN_PRE_DEBUG
) {
172 ksdebug(change
, "| nop: ");
173 print_bytes(change
, run
, 0, pre
,
174 ud_insn_len(&pre_ud
));
176 pre
+= ud_insn_len(&pre_ud
);
179 if (run_nop
&& !pre_nop
) {
180 if (mode
== RUN_PRE_DEBUG
) {
181 ksdebug(change
, "| nop: ");
182 print_bytes(change
, run
, ud_insn_len(&run_ud
),
185 run
+= ud_insn_len(&run_ud
);
188 if (run_nop
&& pre_nop
) {
189 ret
= compare_instructions(change
, sect
, &finger
,
190 run_start
, run
, pre
, &run_ud
,
191 &pre_ud
, RUN_PRE_SILENT
);
193 if (mode
== RUN_PRE_DEBUG
) {
194 ksdebug(change
, "| nop: ");
195 print_bytes(change
, run
,
196 ud_insn_len(&run_ud
), pre
,
197 ud_insn_len(&pre_ud
));
199 run
+= ud_insn_len(&run_ud
);
200 pre
+= ud_insn_len(&pre_ud
);
202 } else if (ret
!= NO_MATCH
&& ret
!= OK
) {
206 pre_offset
= pre
- pre_start
;
208 if (match_map
[pre_offset
] == NULL
) {
209 match_map
[pre_offset
] = run
;
210 } else if (match_map
[pre_offset
] == run
) {
213 /* There is a discontinuity in the match map.
214 Check that the last instruction was an
215 unconditional change of control */
216 if (!run_unconditional
) {
217 ksdebug(change
, "<--[No unconditional change "
218 "of control at control transfer point "
219 "%lx]\n", pre_offset
);
224 if (mode
== RUN_PRE_DEBUG
)
225 ksdebug(change
, " [Moving run pointer for %lx "
226 "from %lx to %lx]\n", pre_offset
,
227 (unsigned long)(run
- run_start
),
228 (unsigned long)(match_map
[pre_offset
] -
231 /* Create a safety_record for the block just matched */
232 ret
= create_safety_record(change
, sect
, safety_records
,
233 (unsigned long)safety_start
,
238 /* We re-initialize the run ud structure because
239 it may have cached upcoming bytes */
240 run
= match_map
[pre_offset
];
241 initialize_ksplice_ud(&run_ud
);
242 ud_set_input_hook(&run_ud
, next_run_byte
);
243 ud_set_user_opaque_data(&run_ud
, (unsigned char *)run
);
245 if (ud_disassemble(&run_ud
) == 0) {
252 run_offset
= run
- run_start
;
253 run_unconditional
= is_unconditional_jump(&run_ud
);
257 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && \
258 defined(KSPLICE_USE_BUG_FRAME)
259 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
260 if (run_ud
.mnemonic
== pre_ud
.mnemonic
&&
261 run_ud
.mnemonic
== UD_Iud2
) {
262 const struct bug_frame
263 *pre_bug
= (const struct bug_frame
*)pre
,
264 *run_bug
= (const struct bug_frame
*)run
;
265 const struct ksplice_reloc
*r
;
266 ret
= lookup_reloc(change
, &finger
,
267 (unsigned long)&pre_bug
->filename
,
269 if (ret
== NO_MATCH
) {
270 if (mode
== RUN_PRE_INITIAL
)
271 ksdebug(change
, "Unrecognized ud2\n");
276 ret
= handle_reloc(change
, sect
, r
,
277 (unsigned long)&run_bug
->filename
,
281 /* If there's a relocation, then it's a BUG? */
282 if (mode
== RUN_PRE_DEBUG
) {
283 ksdebug(change
, "[BUG?: ");
285 run
+ sizeof(run_bug
->ud2
),
287 pre
+ sizeof(pre_bug
->ud2
),
289 ksdebug(change
, "] ");
291 pre
+= sizeof(*pre_bug
);
292 run
+= sizeof(*run_bug
);
293 ud_input_skip(&run_ud
,
294 sizeof(*run_bug
) - sizeof(run_bug
->ud2
));
295 ud_input_skip(&pre_ud
,
296 sizeof(*pre_bug
) - sizeof(pre_bug
->ud2
));
299 #endif /* LINUX_VERSION_CODE && KSPLICE_USE_BUG_FRAME */
302 if (run_ud
.mnemonic
== pre_ud
.mnemonic
&&
303 run_ud
.mnemonic
== UD_Iud2
) {
304 unsigned char bytes
[3];
305 unsigned char prefix
[3] = { 0x78, 0x65, 0x6e };
306 if (probe_kernel_read(bytes
, (void *)run
+ 2, 3) !=
307 -EFAULT
&& pre
- pre_start
< sect
->size
&&
308 memcmp(bytes
, prefix
, 3) == 0 &&
309 memcmp(pre
+ 2, prefix
, 3) == 0) {
310 /* Exception for XEN_EMULATE_PREFIX */
313 ud_input_skip(&run_ud
, 3);
314 ud_input_skip(&pre_ud
, 3);
318 #endif /* CONFIG_XEN */
320 ret
= compare_instructions(change
, sect
, &finger
, run_start
,
321 run
, pre
, &run_ud
, &pre_ud
, mode
);
324 run
+= ud_insn_len(&run_ud
);
325 pre
+= ud_insn_len(&pre_ud
);
328 if (ret
!= OK
|| mode
!= RUN_PRE_FINAL
) {
330 sect
->match_map
= NULL
;
335 static abort_t
compare_instructions(struct ksplice_mod_change
*change
,
336 struct ksplice_section
*sect
,
337 const struct ksplice_reloc
**fingerp
,
338 const unsigned char *run_start
,
339 const unsigned char *run
,
340 const unsigned char *pre
, struct ud
*run_ud
,
341 struct ud
*pre_ud
, enum run_pre_mode mode
)
345 bool found_bug_entry
= false;
346 const unsigned char *pre_start
= (const unsigned char *)sect
->address
;
347 unsigned long pre_offset
= pre
- pre_start
;
348 const struct ksplice_reloc
*r
;
350 if (mode
== RUN_PRE_DEBUG
) {
351 ksdebug(change
, "| ");
352 print_bytes(change
, run
, ud_insn_len(run_ud
), pre
,
353 ud_insn_len(pre_ud
));
356 if (run_ud
->mnemonic
!= pre_ud
->mnemonic
) {
357 if (mode
== RUN_PRE_DEBUG
)
358 ksdebug(change
, "mnemonic mismatch: %s %s\n",
359 ud_lookup_mnemonic(run_ud
->mnemonic
),
360 ud_lookup_mnemonic(pre_ud
->mnemonic
));
364 if (run_ud
->mnemonic
== UD_Iinvalid
) {
365 ksdebug(change
, "Unrecognized opcode at %s+%lx\n",
366 sect
->symbol
->label
, pre_offset
);
371 ret
= lookup_reloc(change
, fingerp
, (unsigned long)pre
, &r
);
376 else if (r
->howto
->size
!= 0)
379 if (r
->howto
->type
== KSPLICE_HOWTO_BUG
)
380 found_bug_entry
= true;
382 if (mode
== RUN_PRE_DEBUG
) {
383 if (r
->howto
->type
== KSPLICE_HOWTO_EXTABLE
)
384 ksdebug(change
, "[ex] ");
385 if (r
->howto
->type
== KSPLICE_HOWTO_BUG
)
386 ksdebug(change
, "[bug] ");
387 if (r
->howto
->type
== KSPLICE_HOWTO_SYMBOL
)
388 ksdebug(change
, "[sym] ");
390 ret
= handle_reloc(change
, sect
, r
, (unsigned long)run
, mode
);
396 #if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
397 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
398 #else /* !CONFIG_X86_64 || LINUX_VERSION_CODE >= */
399 #ifndef do_each_thread_ve /* OpenVZ */
400 if (run_ud
->mnemonic
== UD_Iud2
&& !found_bug_entry
) {
401 if (strcmp(change
->target_name
, "kvm_intel") == 0 ||
402 strcmp(change
->target_name
, "kvm_amd") == 0) {
403 /* KVM has ud2a bugs without a bug table entry. */
404 if (mode
== RUN_PRE_DEBUG
)
405 ksdebug(change
, "[kvm ud2]");
407 ksdebug(change
, "Unexpected ud2\n");
411 #endif /* do_each_thread_ve */
412 #endif /* CONFIG_X86_64 && LINUX_VERSION_CODE */
414 for (i
= 0; i
< ARRAY_SIZE(run_ud
->operand
); i
++) {
415 ret
= compare_operands(change
, sect
, fingerp
, run_start
, run
,
416 pre
, run_ud
, pre_ud
, i
, mode
);
423 static abort_t
compare_operands(struct ksplice_mod_change
*change
,
424 struct ksplice_section
*sect
,
425 const struct ksplice_reloc
**fingerp
,
426 const unsigned char *run_start
,
427 const unsigned char *run
,
428 const unsigned char *pre
, struct ud
*run_ud
,
429 struct ud
*pre_ud
, int opnum
,
430 enum run_pre_mode mode
)
434 const unsigned char *pre_start
= (const unsigned char *)sect
->address
;
435 unsigned long pre_offset
= pre
- pre_start
;
436 unsigned long run_offset
= run
- run_start
;
437 struct ud_operand
*run_op
= &run_ud
->operand
[opnum
];
438 struct ud_operand
*pre_op
= &pre_ud
->operand
[opnum
];
439 uint8_t run_off
= ud_prefix_len(run_ud
);
440 uint8_t pre_off
= ud_prefix_len(pre_ud
);
441 const unsigned char **match_map
= sect
->match_map
;
442 const struct ksplice_reloc
*r
;
443 for (i
= 0; i
< opnum
; i
++) {
444 run_off
+= ud_operand_len(&run_ud
->operand
[i
]);
445 pre_off
+= ud_operand_len(&pre_ud
->operand
[i
]);
448 if (run_op
->type
!= pre_op
->type
) {
449 if (mode
== RUN_PRE_DEBUG
)
450 ksdebug(change
, "type mismatch: %d %d\n", run_op
->type
,
454 if (run_op
->base
!= pre_op
->base
) {
455 if (mode
== RUN_PRE_DEBUG
)
456 ksdebug(change
, "base mismatch: %d %d\n", run_op
->base
,
460 if (run_op
->index
!= pre_op
->index
) {
461 if (mode
== RUN_PRE_DEBUG
)
462 ksdebug(change
, "index mismatch: %d %d\n",
463 run_op
->index
, pre_op
->index
);
466 if (run_op
->type
== UD_OP_PTR
&&
467 run_op
->lval
.ptr
.seg
!= pre_op
->lval
.ptr
.seg
) {
468 if (mode
== RUN_PRE_DEBUG
)
469 ksdebug(change
, "segment mismatch: %d %d\n",
470 run_op
->lval
.ptr
.seg
, pre_op
->lval
.ptr
.seg
);
473 if (ud_operand_len(run_op
) == 0 && ud_operand_len(pre_op
) == 0)
476 ret
= lookup_reloc(change
, fingerp
, (unsigned long)(pre
+ pre_off
), &r
);
478 struct ksplice_reloc run_reloc
= *r
;
479 struct ksplice_reloc_howto run_howto
= *r
->howto
;
480 unsigned int run_reloc_len
= ud_operand_len(run_op
);
481 unsigned int pre_reloc_len
= ud_operand_len(pre_op
);
483 if (run_op
->type
== UD_OP_PTR
) {
484 /* Adjust for reloc length != operand length for
485 instructions take a segment:offset operand */
490 run_reloc
.howto
= &run_howto
;
491 if (r
->howto
->size
!= pre_reloc_len
) {
492 ksdebug(change
, "ksplice_h: run-pre: reloc size %d "
493 "differs from disassembled size %d\n",
494 r
->howto
->size
, pre_reloc_len
);
497 if (r
->howto
->size
!= run_reloc_len
&&
498 (r
->howto
->dst_mask
!= N_BITS(r
->howto
->size
* 8) ||
499 r
->howto
->rightshift
!= 0)) {
500 /* Reloc types unsupported with differing reloc sizes */
501 ksdebug(change
, "ksplice_h: reloc: invalid flags for a "
502 "relocation with size changed\n");
503 ksdebug(change
, "%ld %u\n", r
->howto
->dst_mask
,
504 r
->howto
->rightshift
);
507 /* adjust for differing relocation size */
508 run_howto
.size
= run_reloc_len
;
509 if (r
->howto
->size
!= run_howto
.size
)
510 run_howto
.dst_mask
= N_BITS(run_howto
.size
* 8);
511 run_reloc
.insn_addend
+= pre_reloc_len
- run_reloc_len
;
512 ret
= handle_reloc(change
, sect
, &run_reloc
,
513 (unsigned long)(run
+ run_off
), mode
);
515 if (mode
== RUN_PRE_DEBUG
)
516 ksdebug(change
, "Matching failure at offset "
517 "%lx\n", pre_offset
);
520 /* This operand is a successfully processed relocation */
522 } else if (ret
!= NO_MATCH
) {
525 if (pre_op
->type
== UD_OP_JIMM
) {
526 /* Immediate jump without a relocation */
527 const unsigned char *pre_target
= pre
+ ud_insn_len(pre_ud
) +
528 ud_operand_lval(pre_op
);
529 const unsigned char *run_target
= run
+ ud_insn_len(run_ud
) +
530 ud_operand_lval(run_op
);
531 if (pre_target
>= pre_start
&&
532 pre_target
< pre_start
+ sect
->size
) {
533 /* Jump within the current function.
534 Check it's to a corresponding place */
535 unsigned long new_pre_offset
= pre_target
- pre_start
;
536 unsigned long new_run_offset
= run_target
- run_start
;
537 if (mode
== RUN_PRE_DEBUG
)
538 ksdebug(change
, "[Jumps: pre=%lx run=%lx "
539 "pret=%lx runt=%lx] ", pre_offset
,
540 run_offset
, new_pre_offset
,
542 if (match_map
[pre_target
- pre_start
] != NULL
&&
543 match_map
[pre_target
- pre_start
] != run_target
) {
544 ksdebug(change
, "<--[Jumps to nonmatching "
547 } else if (match_map
[pre_target
- pre_start
] == NULL
) {
548 match_map
[pre_target
- pre_start
] = run_target
;
552 } else if (pre_target
== run_target
) {
553 /* Paravirt-inserted pcrel jump; OK! */
556 if (mode
== RUN_PRE_DEBUG
) {
557 ksdebug(change
, "<--Different operands!\n");
558 ksdebug(change
, "%lx %lx %lx %lx %x %lx %lx "
559 "%lx\n", (unsigned long)pre_start
,
560 (unsigned long)pre_target
,
561 (unsigned long)pre_start
+ sect
->size
,
562 (unsigned long)pre
, ud_insn_len(pre_ud
),
563 sect
->size
, ud_operand_lval(pre_op
),
564 (unsigned long)run_target
);
568 } else if (ud_operand_len(pre_op
) == ud_operand_len(run_op
) &&
569 memcmp(pre
+ pre_off
, run
+ run_off
,
570 ud_operand_len(run_op
)) == 0) {
573 if (mode
== RUN_PRE_DEBUG
)
574 ksdebug(change
, "<--Different operands!\n");
579 static void initialize_ksplice_ud(struct ud
*ud
)
582 ud_set_mode(ud
, BITS_PER_LONG
);
583 ud_set_syntax(ud
, NULL
);
585 ud_set_vendor(ud
, UD_VENDOR_ANY
);
588 #ifdef CONFIG_FUNCTION_TRACER
589 static bool is_mcount_call(struct ud
*ud
, const unsigned char *addr
)
592 addr
+ ud_insn_len(ud
) + ud_operand_lval(&ud
->operand
[0]);
593 if (ud
->mnemonic
== UD_Icall
&&
594 (target
== mcount
|| target
== ftrace_trace_function
))
598 #else /* !CONFIG_FUNCTION_TRACER */
599 static bool is_mcount_call(struct ud
*ud
, const unsigned char *addr
)
603 #endif /* CONFIG_FUNCTION_TRACER */
605 static bool is_nop(struct ud
*ud
, const unsigned char *addr
)
607 switch (ud
->mnemonic
) {
612 return ud
->dis_mode
== 32 &&
613 ud
->operand
[0].type
== UD_OP_REG
&&
614 ud
->operand
[1].type
== UD_OP_REG
&&
615 ud
->operand
[2].type
== UD_NONE
&&
616 ud
->operand
[0].base
== ud
->operand
[1].base
;
618 return ud
->dis_mode
== 32 &&
619 ud
->operand
[0].type
== UD_OP_REG
&&
620 ud
->operand
[1].type
== UD_OP_MEM
&&
621 ((ud
->operand
[1].base
== ud
->operand
[0].base
&&
622 ud
->operand
[1].index
== UD_NONE
) ||
623 (ud
->operand
[1].base
== UD_NONE
&&
624 ud
->operand
[1].index
== ud
->operand
[0].base
&&
625 ud
->operand
[1].scale
== 0)) &&
626 ud_operand_lval(&ud
->operand
[1]) == 0 &&
627 ud
->operand
[2].type
== UD_NONE
;
629 /* jmp +N followed by N 0x90s is a NOP */
630 if (ud
->operand
[0].type
== UD_OP_JIMM
&&
631 ud
->operand
[1].type
== UD_NONE
&&
632 ud
->operand
[2].type
== UD_NONE
&&
633 ud_operand_len(&ud
->operand
[0]) == 1) {
634 /* struct ud is big so we avoid putting it on the stack.
635 * This is safe because we are holding module_mutex. */
636 static struct ud temp_ud
;
637 int len
= ud_operand_lval(&ud
->operand
[0]);
640 if (len
< 0 || len
> 13)
643 initialize_ksplice_ud(&temp_ud
);
644 ud_set_input_hook(&temp_ud
, next_run_byte
);
645 ud_set_user_opaque_data(&temp_ud
,
646 (unsigned char *)addr
+
649 for (i
= 0; i
< len
; i
++) {
650 if (ud_disassemble(&temp_ud
) == 0)
652 if (temp_ud
.mnemonic
!= UD_Inop
)
662 static bool is_unconditional_jump(struct ud
*ud
)
664 switch (ud
->mnemonic
) {
681 static uint8_t ud_operand_len(struct ud_operand
*operand
)
683 if (operand
->type
== UD_OP_MEM
)
684 return operand
->offset
/ 8;
685 if (operand
->type
== UD_OP_REG
)
687 return operand
->size
/ 8;
690 static uint8_t ud_prefix_len(struct ud
*ud
)
692 int len
= ud_insn_len(ud
);
694 for (i
= 0; i
< ARRAY_SIZE(ud
->operand
); i
++)
695 len
-= ud_operand_len(&ud
->operand
[i
]);
699 static long ud_operand_lval(struct ud_operand
*operand
)
701 switch (operand
->type
== UD_OP_MEM
? operand
->offset
: operand
->size
) {
703 return operand
->lval
.sbyte
;
705 return operand
->lval
.sword
;
707 return operand
->lval
.sdword
;
709 return operand
->lval
.sqword
;
715 static int next_run_byte(struct ud
*ud
)
718 if (probe_kernel_read(&byte
, ud_get_user_opaque_data(ud
), 1) == -EFAULT
)
720 ud_set_user_opaque_data(ud
, ud_get_user_opaque_data(ud
) + 1);
723 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
725 static struct ksplice_symbol trampoline_symbol
= {
727 .label
= "<trampoline>",
730 static const struct ksplice_reloc_howto trampoline_howto
= {
731 .type
= KSPLICE_HOWTO_RELOC
,
734 .dst_mask
= 0xffffffffL
,
739 static const struct ksplice_reloc trampoline_reloc
= {
740 .symbol
= &trampoline_symbol
,
743 .howto
= &trampoline_howto
,
746 static abort_t
trampoline_target(struct ksplice_mod_change
*change
,
747 unsigned long addr
, unsigned long *new_addr
)
752 if (probe_kernel_read(&byte
, (void *)addr
, sizeof(byte
)) == -EFAULT
)
758 ret
= read_reloc_value(change
, &trampoline_reloc
, addr
+ 1, new_addr
);
762 *new_addr
+= addr
+ 1;
766 static abort_t
prepare_trampoline(struct ksplice_mod_change
*change
,
767 struct ksplice_patch
*p
)
770 ((unsigned char *)p
->contents
)[0] = 0xe9;
771 return write_reloc_value(change
, &trampoline_reloc
,
772 (unsigned long)p
->contents
+ 1,
773 p
->repladdr
- (p
->oldaddr
+ 1));
776 static abort_t
handle_paravirt(struct ksplice_mod_change
*change
,
777 unsigned long pre_addr
, unsigned long run_addr
,
780 unsigned char run
[5], pre
[5];
783 if (probe_kernel_read(&run
, (void *)run_addr
, sizeof(run
)) == -EFAULT
||
784 probe_kernel_read(&pre
, (void *)pre_addr
, sizeof(pre
)) == -EFAULT
)
787 if ((run
[0] == 0xe8 && pre
[0] == 0xe8) ||
788 (run
[0] == 0xe9 && pre
[0] == 0xe9))
789 if (run_addr
+ 1 + *(int32_t *)&run
[1] ==
790 pre_addr
+ 1 + *(int32_t *)&pre
[1])
795 static bool valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
)
797 return p
> (const void *)tinfo
798 && p
<= (const void *)tinfo
+ THREAD_SIZE
- sizeof(long);
801 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
802 static bool virtual_address_mapped(unsigned long addr
)
807 #endif /* pud_page */
811 #ifdef KSPLICE_STANDALONE
814 #endif /* KSPLICE_STANDALONE */
816 pgd
= pgd_offset_k(addr
);
817 if (!pgd_present(*pgd
))
821 pud
= pud_offset(pgd
, addr
);
822 if (!pud_present(*pud
))
825 pmd
= pmd_offset(pud
, addr
);
827 pmd
= pmd_offset(pgd
, addr
);
828 #endif /* pud_page */
830 if (!pmd_present(*pmd
))
836 pte
= pte_offset_kernel(pmd
, addr
);
837 if (!pte_present(*pte
))
842 #endif /* LINUX_VERSION_CODE */