Don't cleanup the bootstrap update immediately when bootstrapping fails.
[ksplice.git] / kmodsrc / x86 / ksplice-arch.c
blob64203ef8dbb0dc3eb6d9eb5d9262d4d5adefb03d
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
20 #ifdef CONFIG_X86_64
21 extern const char thread_return[];
22 EXTRACT_SYMBOL(thread_return);
23 #define KSPLICE_IP(x) ((unsigned long)thread_return)
24 #else /* !CONFIG_X86_64 */
25 #define KSPLICE_IP(x) ((x)->thread.ip)
26 #endif /* CONFIG_X86_64 */
27 #define KSPLICE_SP(x) ((x)->thread.sp)
29 #else /* LINUX_VERSION_CODE < */
30 /* faca62273b602ab482fb7d3d940dbf41ef08b00e was after 2.6.24 */
32 #ifdef CONFIG_X86_64
33 extern const char thread_return[];
34 EXTRACT_SYMBOL(thread_return);
35 #define KSPLICE_IP(x) ((unsigned long)thread_return)
36 #define KSPLICE_SP(x) ((x)->thread.rsp)
37 #else /* !CONFIG_X86_64 */
38 #define KSPLICE_IP(x) ((x)->thread.eip)
39 #define KSPLICE_SP(x) ((x)->thread.esp)
40 #endif /* CONFIG_X86_64 */
42 #endif /* LINUX_VERSION_CODE */
44 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
45 #include "udis86.h"
46 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) && defined(CONFIG_FTRACE)
47 /* 606576ce816603d9fe1fb453a88bc6eea16ca709 was after 2.6.27 */
48 #define CONFIG_FUNCTION_TRACER 1
49 #endif /* LINUX_VERSION_CODE && CONFIG_FTRACE */
50 #ifdef CONFIG_FUNCTION_TRACER
51 #include <asm/ftrace.h>
52 #include <linux/ftrace.h>
54 extern ftrace_func_t ftrace_trace_function;
55 EXTRACT_SYMBOL(ftrace_trace_function);
56 #endif /* CONFIG_FUNCTION_TRACER */
58 #define N_BITS(n) ((n) < sizeof(long) * 8 ? ~(~0L << (n)) : ~0L)
60 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
61 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
62 #if defined(_I386_BUG_H) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) || \
63 defined(CONFIG_DEBUG_BUGVERBOSE)) && \
64 !defined(do_each_thread_ve) /* OpenVZ */
65 /* 38326f786cf4529a86b1ccde3aa17f4fa7e8472a was after 2.6.10 */
66 /* ud2 means BUG(). On old i386 kernels, it is followed
67 by 2 bytes and then a 4-byte relocation; and is not
68 disassembler-friendly. */
69 struct bug_frame {
70 unsigned char ud2[2];
71 unsigned short line;
72 char *filename;
73 } __attribute__((packed));
74 #define KSPLICE_USE_BUG_FRAME
75 #elif defined(__ASM_X8664_BUG_H)
76 #define KSPLICE_USE_BUG_FRAME
77 #endif
78 #endif /* LINUX_VERSION_CODE */
80 static abort_t compare_instructions(struct ksplice_mod_change *change,
81 struct ksplice_section *sect,
82 const struct ksplice_reloc **fingerp,
83 const unsigned char *run_start,
84 const unsigned char *run,
85 const unsigned char *pre, struct ud *run_ud,
86 struct ud *pre_ud, enum run_pre_mode mode);
87 static abort_t compare_operands(struct ksplice_mod_change *change,
88 struct ksplice_section *sect,
89 const struct ksplice_reloc **fingerp,
90 const unsigned char *run_start,
91 const unsigned char *run,
92 const unsigned char *pre, struct ud *run_ud,
93 struct ud *pre_ud, int opnum,
94 enum run_pre_mode mode);
95 static uint8_t ud_operand_len(struct ud_operand *operand);
96 static uint8_t ud_prefix_len(struct ud *ud);
97 static long ud_operand_lval(struct ud_operand *operand);
98 static int next_run_byte(struct ud *ud);
99 static bool is_nop(struct ud *ud, const unsigned char *addr);
100 static bool is_unconditional_jump(struct ud *ud);
101 static bool is_mcount_call(struct ud *ud, const unsigned char *addr);
102 static void initialize_ksplice_ud(struct ud *ud);
104 static abort_t arch_run_pre_cmp(struct ksplice_mod_change *change,
105 struct ksplice_section *sect,
106 unsigned long run_addr,
107 struct list_head *safety_records,
108 enum run_pre_mode mode)
110 abort_t ret;
111 const unsigned char *run, *pre, *run_start, *pre_start, *safety_start;
112 /* struct ud is big so we avoid putting it on the stack. This
113 * is safe because we are holding module_mutex. */
114 static struct ud pre_ud, run_ud;
115 const unsigned char **match_map;
116 const struct ksplice_reloc *finger;
117 unsigned long pre_offset, run_offset;
118 bool run_unconditional = false;
119 bool pre_nop = true, run_nop = true;
121 if (sect->size == 0)
122 return NO_MATCH;
124 pre_start = (const unsigned char *)sect->address;
125 run_start = (const unsigned char *)run_addr;
127 finger = init_reloc_search(change, sect);
129 run = run_start;
130 pre = pre_start;
132 initialize_ksplice_ud(&pre_ud);
133 ud_set_input_buffer(&pre_ud, (unsigned char *)pre, sect->size);
135 initialize_ksplice_ud(&run_ud);
136 ud_set_input_hook(&run_ud, next_run_byte);
137 ud_set_user_opaque_data(&run_ud, (unsigned char *)run_addr);
138 safety_start = run_start;
140 match_map = vmalloc(sizeof(*match_map) * sect->size);
141 if (match_map == NULL)
142 return OUT_OF_MEMORY;
143 memset(match_map, 0, sizeof(*match_map) * sect->size);
144 match_map[0] = run_start;
145 sect->match_map = match_map;
146 sect->unmatched = 1;
148 while (1) {
149 if (pre_nop && ud_disassemble(&pre_ud) == 0) {
150 /* Ran out of pre bytes to match; we're done! */
151 unsigned long safety_offset = run - safety_start;
152 if (sect->unmatched != 0) {
153 if (mode == RUN_PRE_DEBUG)
154 ksdebug(change, "%d unmatched jumps\n",
155 sect->unmatched);
156 ret = NO_MATCH;
157 goto out;
159 ret = create_safety_record(change, sect, safety_records,
160 (unsigned long)safety_start,
161 safety_offset);
162 goto out;
164 if (run_nop && ud_disassemble(&run_ud) == 0) {
165 ret = NO_MATCH;
166 goto out;
168 pre_nop = is_nop(&pre_ud, pre) || is_mcount_call(&pre_ud, pre);
169 run_nop = is_nop(&run_ud, run) || is_mcount_call(&run_ud, run);
170 if (pre_nop && !run_nop) {
171 if (mode == RUN_PRE_DEBUG) {
172 ksdebug(change, "| nop: ");
173 print_bytes(change, run, 0, pre,
174 ud_insn_len(&pre_ud));
176 pre += ud_insn_len(&pre_ud);
177 continue;
179 if (run_nop && !pre_nop) {
180 if (mode == RUN_PRE_DEBUG) {
181 ksdebug(change, "| nop: ");
182 print_bytes(change, run, ud_insn_len(&run_ud),
183 pre, 0);
185 run += ud_insn_len(&run_ud);
186 continue;
188 if (run_nop && pre_nop) {
189 ret = compare_instructions(change, sect, &finger,
190 run_start, run, pre, &run_ud,
191 &pre_ud, RUN_PRE_SILENT);
192 if (ret != OK) {
193 if (mode == RUN_PRE_DEBUG) {
194 ksdebug(change, "| nop: ");
195 print_bytes(change, run,
196 ud_insn_len(&run_ud), pre,
197 ud_insn_len(&pre_ud));
199 run += ud_insn_len(&run_ud);
200 pre += ud_insn_len(&pre_ud);
201 continue;
202 } else if (ret != NO_MATCH && ret != OK) {
203 goto out;
206 pre_offset = pre - pre_start;
208 if (match_map[pre_offset] == NULL) {
209 match_map[pre_offset] = run;
210 } else if (match_map[pre_offset] == run) {
211 sect->unmatched--;
212 } else {
213 /* There is a discontinuity in the match map.
214 Check that the last instruction was an
215 unconditional change of control */
216 if (!run_unconditional) {
217 ksdebug(change, "<--[No unconditional change "
218 "of control at control transfer point "
219 "%lx]\n", pre_offset);
220 ret = NO_MATCH;
221 goto out;
224 if (mode == RUN_PRE_DEBUG)
225 ksdebug(change, " [Moving run pointer for %lx "
226 "from %lx to %lx]\n", pre_offset,
227 (unsigned long)(run - run_start),
228 (unsigned long)(match_map[pre_offset] -
229 run_start));
231 /* Create a safety_record for the block just matched */
232 ret = create_safety_record(change, sect, safety_records,
233 (unsigned long)safety_start,
234 run - safety_start);
235 if (ret != OK)
236 goto out;
238 /* We re-initialize the run ud structure because
239 it may have cached upcoming bytes */
240 run = match_map[pre_offset];
241 initialize_ksplice_ud(&run_ud);
242 ud_set_input_hook(&run_ud, next_run_byte);
243 ud_set_user_opaque_data(&run_ud, (unsigned char *)run);
244 safety_start = run;
245 if (ud_disassemble(&run_ud) == 0) {
246 ret = NO_MATCH;
247 goto out;
250 sect->unmatched--;
252 run_offset = run - run_start;
253 run_unconditional = is_unconditional_jump(&run_ud);
254 run_nop = true;
255 pre_nop = true;
257 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && \
258 defined(KSPLICE_USE_BUG_FRAME)
259 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
260 if (run_ud.mnemonic == pre_ud.mnemonic &&
261 run_ud.mnemonic == UD_Iud2) {
262 const struct bug_frame
263 *pre_bug = (const struct bug_frame *)pre,
264 *run_bug = (const struct bug_frame *)run;
265 const struct ksplice_reloc *r;
266 ret = lookup_reloc(change, &finger,
267 (unsigned long)&pre_bug->filename,
268 &r);
269 if (ret == NO_MATCH) {
270 if (mode == RUN_PRE_INITIAL)
271 ksdebug(change, "Unrecognized ud2\n");
272 goto out;
274 if (ret != OK)
275 goto out;
276 ret = handle_reloc(change, sect, r,
277 (unsigned long)&run_bug->filename,
278 mode);
279 if (ret != OK)
280 goto out;
281 /* If there's a relocation, then it's a BUG? */
282 if (mode == RUN_PRE_DEBUG) {
283 ksdebug(change, "[BUG?: ");
284 print_bytes(change,
285 run + sizeof(run_bug->ud2),
286 sizeof(*run_bug),
287 pre + sizeof(pre_bug->ud2),
288 sizeof(*pre_bug));
289 ksdebug(change, "] ");
291 pre += sizeof(*pre_bug);
292 run += sizeof(*run_bug);
293 ud_input_skip(&run_ud,
294 sizeof(*run_bug) - sizeof(run_bug->ud2));
295 ud_input_skip(&pre_ud,
296 sizeof(*pre_bug) - sizeof(pre_bug->ud2));
297 continue;
299 #endif /* LINUX_VERSION_CODE && KSPLICE_USE_BUG_FRAME */
301 #ifdef CONFIG_XEN
302 if (run_ud.mnemonic == pre_ud.mnemonic &&
303 run_ud.mnemonic == UD_Iud2) {
304 unsigned char bytes[3];
305 unsigned char prefix[3] = { 0x78, 0x65, 0x6e };
306 if (probe_kernel_read(bytes, (void *)run + 2, 3) !=
307 -EFAULT && pre - pre_start < sect->size &&
308 memcmp(bytes, prefix, 3) == 0 &&
309 memcmp(pre + 2, prefix, 3) == 0) {
310 /* Exception for XEN_EMULATE_PREFIX */
311 run += 5;
312 pre += 5;
313 ud_input_skip(&run_ud, 3);
314 ud_input_skip(&pre_ud, 3);
315 continue;
318 #endif /* CONFIG_XEN */
320 ret = compare_instructions(change, sect, &finger, run_start,
321 run, pre, &run_ud, &pre_ud, mode);
322 if (ret != OK)
323 goto out;
324 run += ud_insn_len(&run_ud);
325 pre += ud_insn_len(&pre_ud);
327 out:
328 if (ret != OK || mode != RUN_PRE_FINAL) {
329 vfree(match_map);
330 sect->match_map = NULL;
332 return ret;
335 static abort_t compare_instructions(struct ksplice_mod_change *change,
336 struct ksplice_section *sect,
337 const struct ksplice_reloc **fingerp,
338 const unsigned char *run_start,
339 const unsigned char *run,
340 const unsigned char *pre, struct ud *run_ud,
341 struct ud *pre_ud, enum run_pre_mode mode)
343 abort_t ret;
344 int i;
345 bool found_bug_entry = false;
346 const unsigned char *pre_start = (const unsigned char *)sect->address;
347 unsigned long pre_offset = pre - pre_start;
348 const struct ksplice_reloc *r;
350 if (mode == RUN_PRE_DEBUG) {
351 ksdebug(change, "| ");
352 print_bytes(change, run, ud_insn_len(run_ud), pre,
353 ud_insn_len(pre_ud));
356 if (run_ud->mnemonic != pre_ud->mnemonic) {
357 if (mode == RUN_PRE_DEBUG)
358 ksdebug(change, "mnemonic mismatch: %s %s\n",
359 ud_lookup_mnemonic(run_ud->mnemonic),
360 ud_lookup_mnemonic(pre_ud->mnemonic));
361 return NO_MATCH;
364 if (run_ud->mnemonic == UD_Iinvalid) {
365 ksdebug(change, "Unrecognized opcode at %s+%lx\n",
366 sect->symbol->label, pre_offset);
367 return UNEXPECTED;
370 while (1) {
371 ret = lookup_reloc(change, fingerp, (unsigned long)pre, &r);
372 if (ret == NO_MATCH)
373 break;
374 else if (ret != OK)
375 return ret;
376 else if (r->howto->size != 0)
377 break;
379 if (r->howto->type == KSPLICE_HOWTO_BUG)
380 found_bug_entry = true;
382 if (mode == RUN_PRE_DEBUG) {
383 if (r->howto->type == KSPLICE_HOWTO_EXTABLE)
384 ksdebug(change, "[ex] ");
385 if (r->howto->type == KSPLICE_HOWTO_BUG)
386 ksdebug(change, "[bug] ");
387 if (r->howto->type == KSPLICE_HOWTO_SYMBOL)
388 ksdebug(change, "[sym] ");
390 ret = handle_reloc(change, sect, r, (unsigned long)run, mode);
391 if (ret != OK)
392 return ret;
393 (*fingerp)++;
396 #if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
397 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
398 #else /* !CONFIG_X86_64 || LINUX_VERSION_CODE >= */
399 #ifndef do_each_thread_ve /* OpenVZ */
400 if (run_ud->mnemonic == UD_Iud2 && !found_bug_entry) {
401 if (strcmp(change->target_name, "kvm_intel") == 0 ||
402 strcmp(change->target_name, "kvm_amd") == 0) {
403 /* KVM has ud2a bugs without a bug table entry. */
404 if (mode == RUN_PRE_DEBUG)
405 ksdebug(change, "[kvm ud2]");
406 } else {
407 ksdebug(change, "Unexpected ud2\n");
408 return NO_MATCH;
411 #endif /* do_each_thread_ve */
412 #endif /* CONFIG_X86_64 && LINUX_VERSION_CODE */
414 for (i = 0; i < ARRAY_SIZE(run_ud->operand); i++) {
415 ret = compare_operands(change, sect, fingerp, run_start, run,
416 pre, run_ud, pre_ud, i, mode);
417 if (ret != OK)
418 return ret;
420 return OK;
423 static abort_t compare_operands(struct ksplice_mod_change *change,
424 struct ksplice_section *sect,
425 const struct ksplice_reloc **fingerp,
426 const unsigned char *run_start,
427 const unsigned char *run,
428 const unsigned char *pre, struct ud *run_ud,
429 struct ud *pre_ud, int opnum,
430 enum run_pre_mode mode)
432 abort_t ret;
433 int i;
434 const unsigned char *pre_start = (const unsigned char *)sect->address;
435 unsigned long pre_offset = pre - pre_start;
436 unsigned long run_offset = run - run_start;
437 struct ud_operand *run_op = &run_ud->operand[opnum];
438 struct ud_operand *pre_op = &pre_ud->operand[opnum];
439 uint8_t run_off = ud_prefix_len(run_ud);
440 uint8_t pre_off = ud_prefix_len(pre_ud);
441 const unsigned char **match_map = sect->match_map;
442 const struct ksplice_reloc *r;
443 for (i = 0; i < opnum; i++) {
444 run_off += ud_operand_len(&run_ud->operand[i]);
445 pre_off += ud_operand_len(&pre_ud->operand[i]);
448 if (run_op->type != pre_op->type) {
449 if (mode == RUN_PRE_DEBUG)
450 ksdebug(change, "type mismatch: %d %d\n", run_op->type,
451 pre_op->type);
452 return NO_MATCH;
454 if (run_op->base != pre_op->base) {
455 if (mode == RUN_PRE_DEBUG)
456 ksdebug(change, "base mismatch: %d %d\n", run_op->base,
457 pre_op->base);
458 return NO_MATCH;
460 if (run_op->index != pre_op->index) {
461 if (mode == RUN_PRE_DEBUG)
462 ksdebug(change, "index mismatch: %d %d\n",
463 run_op->index, pre_op->index);
464 return NO_MATCH;
466 if (run_op->type == UD_OP_PTR &&
467 run_op->lval.ptr.seg != pre_op->lval.ptr.seg) {
468 if (mode == RUN_PRE_DEBUG)
469 ksdebug(change, "segment mismatch: %d %d\n",
470 run_op->lval.ptr.seg, pre_op->lval.ptr.seg);
471 return NO_MATCH;
473 if (ud_operand_len(run_op) == 0 && ud_operand_len(pre_op) == 0)
474 return OK;
476 ret = lookup_reloc(change, fingerp, (unsigned long)(pre + pre_off), &r);
477 if (ret == OK) {
478 struct ksplice_reloc run_reloc = *r;
479 struct ksplice_reloc_howto run_howto = *r->howto;
480 unsigned int run_reloc_len = ud_operand_len(run_op);
481 unsigned int pre_reloc_len = ud_operand_len(pre_op);
483 if (run_op->type == UD_OP_PTR) {
484 /* Adjust for reloc length != operand length for
485 instructions take a segment:offset operand */
486 run_reloc_len -= 2;
487 pre_reloc_len -= 2;
490 run_reloc.howto = &run_howto;
491 if (r->howto->size != pre_reloc_len) {
492 ksdebug(change, "ksplice_h: run-pre: reloc size %d "
493 "differs from disassembled size %d\n",
494 r->howto->size, pre_reloc_len);
495 return NO_MATCH;
497 if (r->howto->size != run_reloc_len &&
498 (r->howto->dst_mask != N_BITS(r->howto->size * 8) ||
499 r->howto->rightshift != 0)) {
500 /* Reloc types unsupported with differing reloc sizes */
501 ksdebug(change, "ksplice_h: reloc: invalid flags for a "
502 "relocation with size changed\n");
503 ksdebug(change, "%ld %u\n", r->howto->dst_mask,
504 r->howto->rightshift);
505 return UNEXPECTED;
507 /* adjust for differing relocation size */
508 run_howto.size = run_reloc_len;
509 if (r->howto->size != run_howto.size)
510 run_howto.dst_mask = N_BITS(run_howto.size * 8);
511 run_reloc.insn_addend += pre_reloc_len - run_reloc_len;
512 ret = handle_reloc(change, sect, &run_reloc,
513 (unsigned long)(run + run_off), mode);
514 if (ret != OK) {
515 if (mode == RUN_PRE_DEBUG)
516 ksdebug(change, "Matching failure at offset "
517 "%lx\n", pre_offset);
518 return ret;
520 /* This operand is a successfully processed relocation */
521 return OK;
522 } else if (ret != NO_MATCH) {
523 return ret;
525 if (pre_op->type == UD_OP_JIMM) {
526 /* Immediate jump without a relocation */
527 const unsigned char *pre_target = pre + ud_insn_len(pre_ud) +
528 ud_operand_lval(pre_op);
529 const unsigned char *run_target = run + ud_insn_len(run_ud) +
530 ud_operand_lval(run_op);
531 if (pre_target >= pre_start &&
532 pre_target < pre_start + sect->size) {
533 /* Jump within the current function.
534 Check it's to a corresponding place */
535 unsigned long new_pre_offset = pre_target - pre_start;
536 unsigned long new_run_offset = run_target - run_start;
537 if (mode == RUN_PRE_DEBUG)
538 ksdebug(change, "[Jumps: pre=%lx run=%lx "
539 "pret=%lx runt=%lx] ", pre_offset,
540 run_offset, new_pre_offset,
541 new_run_offset);
542 if (match_map[pre_target - pre_start] != NULL &&
543 match_map[pre_target - pre_start] != run_target) {
544 ksdebug(change, "<--[Jumps to nonmatching "
545 "locations]\n");
546 return NO_MATCH;
547 } else if (match_map[pre_target - pre_start] == NULL) {
548 match_map[pre_target - pre_start] = run_target;
549 sect->unmatched++;
551 return OK;
552 } else if (pre_target == run_target) {
553 /* Paravirt-inserted pcrel jump; OK! */
554 return OK;
555 } else {
556 if (mode == RUN_PRE_DEBUG) {
557 ksdebug(change, "<--Different operands!\n");
558 ksdebug(change, "%lx %lx %lx %lx %x %lx %lx "
559 "%lx\n", (unsigned long)pre_start,
560 (unsigned long)pre_target,
561 (unsigned long)pre_start + sect->size,
562 (unsigned long)pre, ud_insn_len(pre_ud),
563 sect->size, ud_operand_lval(pre_op),
564 (unsigned long)run_target);
566 return NO_MATCH;
568 } else if (ud_operand_len(pre_op) == ud_operand_len(run_op) &&
569 memcmp(pre + pre_off, run + run_off,
570 ud_operand_len(run_op)) == 0) {
571 return OK;
572 } else {
573 if (mode == RUN_PRE_DEBUG)
574 ksdebug(change, "<--Different operands!\n");
575 return NO_MATCH;
579 static void initialize_ksplice_ud(struct ud *ud)
581 ud_init(ud);
582 ud_set_mode(ud, BITS_PER_LONG);
583 ud_set_syntax(ud, NULL);
584 ud_set_pc(ud, 0);
585 ud_set_vendor(ud, UD_VENDOR_ANY);
588 #ifdef CONFIG_FUNCTION_TRACER
589 static bool is_mcount_call(struct ud *ud, const unsigned char *addr)
591 const void *target =
592 addr + ud_insn_len(ud) + ud_operand_lval(&ud->operand[0]);
593 if (ud->mnemonic == UD_Icall &&
594 (target == mcount || target == ftrace_trace_function))
595 return true;
596 return false;
598 #else /* !CONFIG_FUNCTION_TRACER */
599 static bool is_mcount_call(struct ud *ud, const unsigned char *addr)
601 return false;
603 #endif /* CONFIG_FUNCTION_TRACER */
605 static bool is_nop(struct ud *ud, const unsigned char *addr)
607 switch (ud->mnemonic) {
608 case UD_Inop:
609 return true;
610 case UD_Imov:
611 case UD_Ixchg:
612 return ud->dis_mode == 32 &&
613 ud->operand[0].type == UD_OP_REG &&
614 ud->operand[1].type == UD_OP_REG &&
615 ud->operand[2].type == UD_NONE &&
616 ud->operand[0].base == ud->operand[1].base;
617 case UD_Ilea:
618 return ud->dis_mode == 32 &&
619 ud->operand[0].type == UD_OP_REG &&
620 ud->operand[1].type == UD_OP_MEM &&
621 ((ud->operand[1].base == ud->operand[0].base &&
622 ud->operand[1].index == UD_NONE) ||
623 (ud->operand[1].base == UD_NONE &&
624 ud->operand[1].index == ud->operand[0].base &&
625 ud->operand[1].scale == 0)) &&
626 ud_operand_lval(&ud->operand[1]) == 0 &&
627 ud->operand[2].type == UD_NONE;
628 case UD_Ijmp:
629 /* jmp +N followed by N 0x90s is a NOP */
630 if (ud->operand[0].type == UD_OP_JIMM &&
631 ud->operand[1].type == UD_NONE &&
632 ud->operand[2].type == UD_NONE &&
633 ud_operand_len(&ud->operand[0]) == 1) {
634 /* struct ud is big so we avoid putting it on the stack.
635 * This is safe because we are holding module_mutex. */
636 static struct ud temp_ud;
637 int len = ud_operand_lval(&ud->operand[0]);
638 int i;
640 if (len < 0 || len > 13)
641 return false;
643 initialize_ksplice_ud(&temp_ud);
644 ud_set_input_hook(&temp_ud, next_run_byte);
645 ud_set_user_opaque_data(&temp_ud,
646 (unsigned char *)addr +
647 ud_insn_len(ud));
649 for (i = 0; i < len; i++) {
650 if (ud_disassemble(&temp_ud) == 0)
651 return false;
652 if (temp_ud.mnemonic != UD_Inop)
653 return false;
655 return true;
657 default:
658 return false;
662 static bool is_unconditional_jump(struct ud *ud)
664 switch (ud->mnemonic) {
665 case UD_Ijmp:
666 case UD_Iret:
667 case UD_Iretf:
668 case UD_Iiretw:
669 case UD_Iiretd:
670 case UD_Iiretq:
671 case UD_Isysexit:
672 case UD_Isysret:
673 case UD_Isyscall:
674 case UD_Isysenter:
675 return true;
676 default:
677 return false;
681 static uint8_t ud_operand_len(struct ud_operand *operand)
683 if (operand->type == UD_OP_MEM)
684 return operand->offset / 8;
685 if (operand->type == UD_OP_REG)
686 return 0;
687 return operand->size / 8;
690 static uint8_t ud_prefix_len(struct ud *ud)
692 int len = ud_insn_len(ud);
693 int i;
694 for (i = 0; i < ARRAY_SIZE(ud->operand); i++)
695 len -= ud_operand_len(&ud->operand[i]);
696 return len;
699 static long ud_operand_lval(struct ud_operand *operand)
701 switch (operand->type == UD_OP_MEM ? operand->offset : operand->size) {
702 case 8:
703 return operand->lval.sbyte;
704 case 16:
705 return operand->lval.sword;
706 case 32:
707 return operand->lval.sdword;
708 case 64:
709 return operand->lval.sqword;
710 default:
711 return 0;
715 static int next_run_byte(struct ud *ud)
717 unsigned char byte;
718 if (probe_kernel_read(&byte, ud_get_user_opaque_data(ud), 1) == -EFAULT)
719 return UD_EOI;
720 ud_set_user_opaque_data(ud, ud_get_user_opaque_data(ud) + 1);
721 return byte;
723 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
725 static struct ksplice_symbol trampoline_symbol = {
726 .name = NULL,
727 .label = "<trampoline>",
730 static const struct ksplice_reloc_howto trampoline_howto = {
731 .type = KSPLICE_HOWTO_RELOC,
732 .pcrel = 1,
733 .size = 4,
734 .dst_mask = 0xffffffffL,
735 .rightshift = 0,
736 .signed_addend = 1,
739 static const struct ksplice_reloc trampoline_reloc = {
740 .symbol = &trampoline_symbol,
741 .insn_addend = -4,
742 .target_addend = 0,
743 .howto = &trampoline_howto,
746 static abort_t trampoline_target(struct ksplice_mod_change *change,
747 unsigned long addr, unsigned long *new_addr)
749 abort_t ret;
750 unsigned char byte;
752 if (probe_kernel_read(&byte, (void *)addr, sizeof(byte)) == -EFAULT)
753 return NO_MATCH;
755 if (byte != 0xe9)
756 return NO_MATCH;
758 ret = read_reloc_value(change, &trampoline_reloc, addr + 1, new_addr);
759 if (ret != OK)
760 return ret;
762 *new_addr += addr + 1;
763 return OK;
766 static abort_t prepare_trampoline(struct ksplice_mod_change *change,
767 struct ksplice_patch *p)
769 p->size = 5;
770 ((unsigned char *)p->contents)[0] = 0xe9;
771 return write_reloc_value(change, &trampoline_reloc,
772 (unsigned long)p->contents + 1,
773 p->repladdr - (p->oldaddr + 1));
776 static abort_t handle_paravirt(struct ksplice_mod_change *change,
777 unsigned long pre_addr, unsigned long run_addr,
778 int *matched)
780 unsigned char run[5], pre[5];
781 *matched = 0;
783 if (probe_kernel_read(&run, (void *)run_addr, sizeof(run)) == -EFAULT ||
784 probe_kernel_read(&pre, (void *)pre_addr, sizeof(pre)) == -EFAULT)
785 return OK;
787 if ((run[0] == 0xe8 && pre[0] == 0xe8) ||
788 (run[0] == 0xe9 && pre[0] == 0xe9))
789 if (run_addr + 1 + *(int32_t *)&run[1] ==
790 pre_addr + 1 + *(int32_t *)&pre[1])
791 *matched = 5;
792 return OK;
795 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p)
797 return p > (const void *)tinfo
798 && p <= (const void *)tinfo + THREAD_SIZE - sizeof(long);
801 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
802 static bool virtual_address_mapped(unsigned long addr)
804 pgd_t *pgd;
805 #ifdef pud_page
806 pud_t *pud;
807 #endif /* pud_page */
808 pmd_t *pmd;
809 pte_t *pte;
811 #ifdef KSPLICE_STANDALONE
812 if (!bootstrapped)
813 return true;
814 #endif /* KSPLICE_STANDALONE */
816 pgd = pgd_offset_k(addr);
817 if (!pgd_present(*pgd))
818 return false;
820 #ifdef pud_page
821 pud = pud_offset(pgd, addr);
822 if (!pud_present(*pud))
823 return false;
825 pmd = pmd_offset(pud, addr);
826 #else /* pud_page */
827 pmd = pmd_offset(pgd, addr);
828 #endif /* pud_page */
830 if (!pmd_present(*pmd))
831 return false;
833 if (pmd_large(*pmd))
834 return true;
836 pte = pte_offset_kernel(pmd, addr);
837 if (!pte_present(*pte))
838 return false;
840 return true;
842 #endif /* LINUX_VERSION_CODE */