Only allow paravirt-inserted jumps that go outside the current section.
[ksplice.git] / kmodsrc / x86 / ksplice-arch.c
blob4ba55e68bd5850b9ace7d77e31252798342eba8f
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #if defined(_ASM_X86_PROCESSOR_H) || \
19 defined(__ASM_X86_PROCESSOR_H) /* New unified x86 */
20 #define KSPLICE_IP(x) ((x)->thread.ip)
21 #define KSPLICE_SP(x) ((x)->thread.sp)
22 #elif defined(CONFIG_X86_64) /* Old x86 64-bit */
23 /* The IP is on the stack, so we don't need to check it separately.
24 * Instead, we need to prevent Ksplice from patching thread_return.
26 extern const char thread_return[];
27 EXTRACT_SYMBOL(thread_return);
28 #define KSPLICE_IP(x) ((unsigned long)thread_return)
29 #define KSPLICE_SP(x) ((x)->thread.rsp)
30 #else /* Old x86 32-bit */
31 #define KSPLICE_IP(x) ((x)->thread.eip)
32 #define KSPLICE_SP(x) ((x)->thread.esp)
33 #endif /* __ASM_X86_PROCESSOR_H */
35 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
36 #include "udis86.h"
37 #ifdef CONFIG_FTRACE
38 #include <asm/ftrace.h>
39 #include <linux/ftrace.h>
41 extern ftrace_func_t ftrace_trace_function;
42 EXTRACT_SYMBOL(ftrace_trace_function);
43 #endif /* CONFIG_FTRACE */
45 #define N_BITS(n) ((n) < sizeof(long) * 8 ? ~(~0L << (n)) : ~0L)
47 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
48 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
49 #if defined(_I386_BUG_H) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) || \
50 defined(CONFIG_DEBUG_BUGVERBOSE)) && \
51 !defined(do_each_thread_ve) /* OpenVZ */
52 /* 38326f786cf4529a86b1ccde3aa17f4fa7e8472a was after 2.6.10 */
53 /* ud2 means BUG(). On old i386 kernels, it is followed
54 by 2 bytes and then a 4-byte relocation; and is not
55 disassembler-friendly. */
56 struct bug_frame {
57 unsigned char ud2[2];
58 unsigned short line;
59 char *filename;
60 } __attribute__((packed));
61 #define KSPLICE_USE_BUG_FRAME
62 #elif defined(__ASM_X8664_BUG_H)
63 #define KSPLICE_USE_BUG_FRAME
64 #endif
65 #endif /* LINUX_VERSION_CODE */
67 static abort_t compare_instructions(struct ksplice_mod_change *change,
68 struct ksplice_section *sect,
69 const struct ksplice_reloc **fingerp,
70 const unsigned char *run_start,
71 const unsigned char *run,
72 const unsigned char *pre, struct ud *run_ud,
73 struct ud *pre_ud, enum run_pre_mode mode);
74 static abort_t compare_operands(struct ksplice_mod_change *change,
75 struct ksplice_section *sect,
76 const struct ksplice_reloc **fingerp,
77 const unsigned char *run_start,
78 const unsigned char *run,
79 const unsigned char *pre, struct ud *run_ud,
80 struct ud *pre_ud, int opnum,
81 enum run_pre_mode mode);
82 static uint8_t ud_operand_len(struct ud_operand *operand);
83 static uint8_t ud_prefix_len(struct ud *ud);
84 static long ud_operand_lval(struct ud_operand *operand);
85 static int next_run_byte(struct ud *ud);
86 static bool is_nop(struct ud *ud, const unsigned char *addr);
87 static bool is_unconditional_jump(struct ud *ud);
88 static bool is_mcount_call(struct ud *ud, const unsigned char *addr);
89 static void initialize_ksplice_ud(struct ud *ud);
91 static abort_t arch_run_pre_cmp(struct ksplice_mod_change *change,
92 struct ksplice_section *sect,
93 unsigned long run_addr,
94 struct list_head *safety_records,
95 enum run_pre_mode mode)
97 abort_t ret;
98 const unsigned char *run, *pre, *run_start, *pre_start, *safety_start;
99 /* struct ud is big so we avoid putting it on the stack. This
100 * is safe because we are holding module_mutex. */
101 static struct ud pre_ud, run_ud;
102 const unsigned char **match_map;
103 const struct ksplice_reloc *finger;
104 unsigned long pre_offset, run_offset;
105 bool run_unconditional = false;
106 bool pre_nop = true, run_nop = true;
108 if (sect->size == 0)
109 return NO_MATCH;
111 pre_start = (const unsigned char *)sect->address;
112 run_start = (const unsigned char *)run_addr;
114 finger = init_reloc_search(change, sect);
116 run = run_start;
117 pre = pre_start;
119 initialize_ksplice_ud(&pre_ud);
120 ud_set_input_buffer(&pre_ud, (unsigned char *)pre, sect->size);
122 initialize_ksplice_ud(&run_ud);
123 ud_set_input_hook(&run_ud, next_run_byte);
124 ud_set_user_opaque_data(&run_ud, (unsigned char *)run_addr);
125 safety_start = run_start;
127 match_map = vmalloc(sizeof(*match_map) * sect->size);
128 if (match_map == NULL)
129 return OUT_OF_MEMORY;
130 memset(match_map, 0, sizeof(*match_map) * sect->size);
131 match_map[0] = run_start;
132 sect->match_map = match_map;
133 sect->unmatched = 1;
135 while (1) {
136 if (pre_nop && ud_disassemble(&pre_ud) == 0) {
137 /* Ran out of pre bytes to match; we're done! */
138 unsigned long safety_offset = run - safety_start;
139 if (sect->unmatched != 0) {
140 if (mode == RUN_PRE_DEBUG)
141 ksdebug(change, "%d unmatched jumps\n",
142 sect->unmatched);
143 ret = NO_MATCH;
144 goto out;
146 ret = create_safety_record(change, sect, safety_records,
147 (unsigned long)safety_start,
148 safety_offset);
149 goto out;
151 if (run_nop && ud_disassemble(&run_ud) == 0) {
152 ret = NO_MATCH;
153 goto out;
155 pre_nop = is_nop(&pre_ud, pre) || is_mcount_call(&pre_ud, pre);
156 run_nop = is_nop(&run_ud, run) || is_mcount_call(&run_ud, run);
157 if (pre_nop && !run_nop) {
158 if (mode == RUN_PRE_DEBUG) {
159 ksdebug(change, "| nop: ");
160 print_bytes(change, run, 0, pre,
161 ud_insn_len(&pre_ud));
163 pre += ud_insn_len(&pre_ud);
164 continue;
166 if (run_nop && !pre_nop) {
167 if (mode == RUN_PRE_DEBUG) {
168 ksdebug(change, "| nop: ");
169 print_bytes(change, run, ud_insn_len(&run_ud),
170 pre, 0);
172 run += ud_insn_len(&run_ud);
173 continue;
175 if (run_nop && pre_nop) {
176 ret = compare_instructions(change, sect, &finger,
177 run_start, run, pre, &run_ud,
178 &pre_ud, RUN_PRE_SILENT);
179 if (ret != OK) {
180 if (mode == RUN_PRE_DEBUG) {
181 ksdebug(change, "| nop: ");
182 print_bytes(change, run,
183 ud_insn_len(&run_ud), pre,
184 ud_insn_len(&pre_ud));
186 run += ud_insn_len(&run_ud);
187 pre += ud_insn_len(&pre_ud);
188 continue;
189 } else if (ret != NO_MATCH && ret != OK) {
190 goto out;
193 pre_offset = pre - pre_start;
195 if (match_map[pre_offset] == NULL) {
196 match_map[pre_offset] = run;
197 } else if (match_map[pre_offset] == run) {
198 sect->unmatched--;
199 } else {
200 /* There is a discontinuity in the match map.
201 Check that the last instruction was an
202 unconditional change of control */
203 if (!run_unconditional) {
204 ksdebug(change, "<--[No unconditional change "
205 "of control at control transfer point "
206 "%lx]\n", pre_offset);
207 ret = NO_MATCH;
208 goto out;
211 if (mode == RUN_PRE_DEBUG)
212 ksdebug(change, " [Moving run pointer for %lx "
213 "from %lx to %lx]\n", pre_offset,
214 (unsigned long)(run - run_start),
215 (unsigned long)(match_map[pre_offset] -
216 run_start));
218 /* Create a safety_record for the block just matched */
219 ret = create_safety_record(change, sect, safety_records,
220 (unsigned long)safety_start,
221 run - safety_start);
222 if (ret != OK)
223 goto out;
225 /* We re-initialize the run ud structure because
226 it may have cached upcoming bytes */
227 run = match_map[pre_offset];
228 initialize_ksplice_ud(&run_ud);
229 ud_set_input_hook(&run_ud, next_run_byte);
230 ud_set_user_opaque_data(&run_ud, (unsigned char *)run);
231 safety_start = run;
232 if (ud_disassemble(&run_ud) == 0) {
233 ret = NO_MATCH;
234 goto out;
237 sect->unmatched--;
239 run_offset = run - run_start;
240 run_unconditional = is_unconditional_jump(&run_ud);
241 run_nop = true;
242 pre_nop = true;
244 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && \
245 defined(KSPLICE_USE_BUG_FRAME)
246 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
247 if (run_ud.mnemonic == pre_ud.mnemonic &&
248 run_ud.mnemonic == UD_Iud2) {
249 const struct bug_frame
250 *pre_bug = (const struct bug_frame *)pre,
251 *run_bug = (const struct bug_frame *)run;
252 const struct ksplice_reloc *r;
253 ret = lookup_reloc(change, &finger,
254 (unsigned long)&pre_bug->filename,
255 &r);
256 if (ret == NO_MATCH) {
257 if (mode == RUN_PRE_INITIAL)
258 ksdebug(change, "Unrecognized ud2\n");
259 goto out;
261 if (ret != OK)
262 goto out;
263 ret = handle_reloc(change, sect, r,
264 (unsigned long)&run_bug->filename,
265 mode);
266 if (ret != OK)
267 goto out;
268 /* If there's a relocation, then it's a BUG? */
269 if (mode == RUN_PRE_DEBUG) {
270 ksdebug(change, "[BUG?: ");
271 print_bytes(change,
272 run + sizeof(run_bug->ud2),
273 sizeof(*run_bug),
274 pre + sizeof(pre_bug->ud2),
275 sizeof(*pre_bug));
276 ksdebug(change, "] ");
278 pre += sizeof(*pre_bug);
279 run += sizeof(*run_bug);
280 ud_input_skip(&run_ud,
281 sizeof(*run_bug) - sizeof(run_bug->ud2));
282 ud_input_skip(&pre_ud,
283 sizeof(*pre_bug) - sizeof(pre_bug->ud2));
284 continue;
286 #endif /* LINUX_VERSION_CODE && KSPLICE_USE_BUG_FRAME */
288 #ifdef CONFIG_XEN
289 if (run_ud.mnemonic == pre_ud.mnemonic &&
290 run_ud.mnemonic == UD_Iud2) {
291 unsigned char bytes[3];
292 unsigned char prefix[3] = { 0x78, 0x65, 0x6e };
293 if (probe_kernel_read(bytes, (void *)run + 2, 3) !=
294 -EFAULT && pre - pre_start < sect->size &&
295 memcmp(bytes, prefix, 3) == 0 &&
296 memcmp(pre + 2, prefix, 3) == 0) {
297 /* Exception for XEN_EMULATE_PREFIX */
298 run += 5;
299 pre += 5;
300 ud_input_skip(&run_ud, 3);
301 ud_input_skip(&pre_ud, 3);
302 continue;
305 #endif /* CONFIG_XEN */
307 ret = compare_instructions(change, sect, &finger, run_start,
308 run, pre, &run_ud, &pre_ud, mode);
309 if (ret != OK)
310 goto out;
311 run += ud_insn_len(&run_ud);
312 pre += ud_insn_len(&pre_ud);
314 out:
315 if (ret != OK || mode != RUN_PRE_FINAL) {
316 vfree(match_map);
317 sect->match_map = NULL;
319 return ret;
322 static abort_t compare_instructions(struct ksplice_mod_change *change,
323 struct ksplice_section *sect,
324 const struct ksplice_reloc **fingerp,
325 const unsigned char *run_start,
326 const unsigned char *run,
327 const unsigned char *pre, struct ud *run_ud,
328 struct ud *pre_ud, enum run_pre_mode mode)
330 abort_t ret;
331 int i;
332 bool found_bug_entry = false;
333 const unsigned char *pre_start = (const unsigned char *)sect->address;
334 unsigned long pre_offset = pre - pre_start;
335 const struct ksplice_reloc *r;
337 if (mode == RUN_PRE_DEBUG) {
338 ksdebug(change, "| ");
339 print_bytes(change, run, ud_insn_len(run_ud), pre,
340 ud_insn_len(pre_ud));
343 if (run_ud->mnemonic != pre_ud->mnemonic) {
344 if (mode == RUN_PRE_DEBUG)
345 ksdebug(change, "mnemonic mismatch: %s %s\n",
346 ud_lookup_mnemonic(run_ud->mnemonic),
347 ud_lookup_mnemonic(pre_ud->mnemonic));
348 return NO_MATCH;
351 if (run_ud->mnemonic == UD_Iinvalid) {
352 ksdebug(change, "Unrecognized opcode at %s+%lx\n",
353 sect->symbol->label, pre_offset);
354 return UNEXPECTED;
357 while (1) {
358 ret = lookup_reloc(change, fingerp, (unsigned long)pre, &r);
359 if (ret == NO_MATCH)
360 break;
361 else if (ret != OK)
362 return ret;
363 else if (r->howto->size != 0)
364 break;
366 if (r->howto->type == KSPLICE_HOWTO_BUG)
367 found_bug_entry = true;
369 if (mode == RUN_PRE_DEBUG) {
370 if (r->howto->type == KSPLICE_HOWTO_EXTABLE)
371 ksdebug(change, "[ex] ");
372 if (r->howto->type == KSPLICE_HOWTO_BUG)
373 ksdebug(change, "[bug] ");
374 if (r->howto->type == KSPLICE_HOWTO_SYMBOL)
375 ksdebug(change, "[sym] ");
377 ret = handle_reloc(change, sect, r, (unsigned long)run, mode);
378 if (ret != OK)
379 return ret;
380 (*fingerp)++;
383 #if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
384 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
385 #else /* !CONFIG_X86_64 || LINUX_VERSION_CODE >= */
386 #ifndef do_each_thread_ve /* OpenVZ */
387 if (run_ud->mnemonic == UD_Iud2 && !found_bug_entry) {
388 if (strcmp(change->target_name, "kvm_intel") == 0 ||
389 strcmp(change->target_name, "kvm_amd") == 0) {
390 /* KVM has ud2a bugs without a bug table entry. */
391 if (mode == RUN_PRE_DEBUG)
392 ksdebug(change, "[kvm ud2]");
393 } else {
394 ksdebug(change, "Unexpected ud2\n");
395 return NO_MATCH;
398 #endif /* do_each_thread_ve */
399 #endif /* CONFIG_X86_64 && LINUX_VERSION_CODE */
401 for (i = 0; i < ARRAY_SIZE(run_ud->operand); i++) {
402 ret = compare_operands(change, sect, fingerp, run_start, run,
403 pre, run_ud, pre_ud, i, mode);
404 if (ret != OK)
405 return ret;
407 return OK;
410 static abort_t compare_operands(struct ksplice_mod_change *change,
411 struct ksplice_section *sect,
412 const struct ksplice_reloc **fingerp,
413 const unsigned char *run_start,
414 const unsigned char *run,
415 const unsigned char *pre, struct ud *run_ud,
416 struct ud *pre_ud, int opnum,
417 enum run_pre_mode mode)
419 abort_t ret;
420 int i;
421 const unsigned char *pre_start = (const unsigned char *)sect->address;
422 unsigned long pre_offset = pre - pre_start;
423 unsigned long run_offset = run - run_start;
424 struct ud_operand *run_op = &run_ud->operand[opnum];
425 struct ud_operand *pre_op = &pre_ud->operand[opnum];
426 uint8_t run_off = ud_prefix_len(run_ud);
427 uint8_t pre_off = ud_prefix_len(pre_ud);
428 const unsigned char **match_map = sect->match_map;
429 const struct ksplice_reloc *r;
430 for (i = 0; i < opnum; i++) {
431 run_off += ud_operand_len(&run_ud->operand[i]);
432 pre_off += ud_operand_len(&pre_ud->operand[i]);
435 if (run_op->type != pre_op->type) {
436 if (mode == RUN_PRE_DEBUG)
437 ksdebug(change, "type mismatch: %d %d\n", run_op->type,
438 pre_op->type);
439 return NO_MATCH;
441 if (run_op->base != pre_op->base) {
442 if (mode == RUN_PRE_DEBUG)
443 ksdebug(change, "base mismatch: %d %d\n", run_op->base,
444 pre_op->base);
445 return NO_MATCH;
447 if (run_op->index != pre_op->index) {
448 if (mode == RUN_PRE_DEBUG)
449 ksdebug(change, "index mismatch: %d %d\n",
450 run_op->index, pre_op->index);
451 return NO_MATCH;
453 if (run_op->type == UD_OP_PTR &&
454 run_op->lval.ptr.seg != pre_op->lval.ptr.seg) {
455 if (mode == RUN_PRE_DEBUG)
456 ksdebug(change, "segment mismatch: %d %d\n",
457 run_op->lval.ptr.seg, pre_op->lval.ptr.seg);
458 return NO_MATCH;
460 if (ud_operand_len(run_op) == 0 && ud_operand_len(pre_op) == 0)
461 return OK;
463 ret = lookup_reloc(change, fingerp, (unsigned long)(pre + pre_off), &r);
464 if (ret == OK) {
465 struct ksplice_reloc run_reloc = *r;
466 struct ksplice_reloc_howto run_howto = *r->howto;
467 unsigned int run_reloc_len = ud_operand_len(run_op);
468 unsigned int pre_reloc_len = ud_operand_len(pre_op);
470 if (run_op->type == UD_OP_PTR) {
471 /* Adjust for reloc length != operand length for
472 instructions take a segment:offset operand */
473 run_reloc_len -= 2;
474 pre_reloc_len -= 2;
477 run_reloc.howto = &run_howto;
478 if (r->howto->size != pre_reloc_len) {
479 ksdebug(change, "ksplice_h: run-pre: reloc size %d "
480 "differs from disassembled size %d\n",
481 r->howto->size, pre_reloc_len);
482 return NO_MATCH;
484 if (r->howto->size != run_reloc_len &&
485 (r->howto->dst_mask != N_BITS(r->howto->size * 8) ||
486 r->howto->rightshift != 0)) {
487 /* Reloc types unsupported with differing reloc sizes */
488 ksdebug(change, "ksplice_h: reloc: invalid flags for a "
489 "relocation with size changed\n");
490 ksdebug(change, "%ld %u\n", r->howto->dst_mask,
491 r->howto->rightshift);
492 return UNEXPECTED;
494 /* adjust for differing relocation size */
495 run_howto.size = run_reloc_len;
496 if (r->howto->size != run_howto.size)
497 run_howto.dst_mask = N_BITS(run_howto.size * 8);
498 run_reloc.insn_addend += pre_reloc_len - run_reloc_len;
499 ret = handle_reloc(change, sect, &run_reloc,
500 (unsigned long)(run + run_off), mode);
501 if (ret != OK) {
502 if (mode == RUN_PRE_DEBUG)
503 ksdebug(change, "Matching failure at offset "
504 "%lx\n", pre_offset);
505 return ret;
507 /* This operand is a successfully processed relocation */
508 return OK;
509 } else if (ret != NO_MATCH) {
510 return ret;
512 if (pre_op->type == UD_OP_JIMM) {
513 /* Immediate jump without a relocation */
514 const unsigned char *pre_target = pre + ud_insn_len(pre_ud) +
515 ud_operand_lval(pre_op);
516 const unsigned char *run_target = run + ud_insn_len(run_ud) +
517 ud_operand_lval(run_op);
518 if (pre_target >= pre_start &&
519 pre_target < pre_start + sect->size) {
520 /* Jump within the current function.
521 Check it's to a corresponding place */
522 unsigned long new_pre_offset = pre_target - pre_start;
523 unsigned long new_run_offset = run_target - run_start;
524 if (mode == RUN_PRE_DEBUG)
525 ksdebug(change, "[Jumps: pre=%lx run=%lx "
526 "pret=%lx runt=%lx] ", pre_offset,
527 run_offset, new_pre_offset,
528 new_run_offset);
529 if (match_map[pre_target - pre_start] != NULL &&
530 match_map[pre_target - pre_start] != run_target) {
531 ksdebug(change, "<--[Jumps to nonmatching "
532 "locations]\n");
533 return NO_MATCH;
534 } else if (match_map[pre_target - pre_start] == NULL) {
535 match_map[pre_target - pre_start] = run_target;
536 sect->unmatched++;
538 return OK;
539 } else if (pre_target == run_target) {
540 /* Paravirt-inserted pcrel jump; OK! */
541 return OK;
542 } else {
543 if (mode == RUN_PRE_DEBUG) {
544 ksdebug(change, "<--Different operands!\n");
545 ksdebug(change, "%lx %lx %lx %lx %x %lx %lx "
546 "%lx\n", (unsigned long)pre_start,
547 (unsigned long)pre_target,
548 (unsigned long)pre_start + sect->size,
549 (unsigned long)pre, ud_insn_len(pre_ud),
550 sect->size, ud_operand_lval(pre_op),
551 (unsigned long)run_target);
553 return NO_MATCH;
555 } else if (ud_operand_len(pre_op) == ud_operand_len(run_op) &&
556 memcmp(pre + pre_off, run + run_off,
557 ud_operand_len(run_op)) == 0) {
558 return OK;
559 } else {
560 if (mode == RUN_PRE_DEBUG)
561 ksdebug(change, "<--Different operands!\n");
562 return NO_MATCH;
566 static void initialize_ksplice_ud(struct ud *ud)
568 ud_init(ud);
569 ud_set_mode(ud, BITS_PER_LONG);
570 ud_set_syntax(ud, NULL);
571 ud_set_pc(ud, 0);
572 ud_set_vendor(ud, UD_VENDOR_ANY);
575 #ifdef CONFIG_FTRACE
576 static bool is_mcount_call(struct ud *ud, const unsigned char *addr)
578 const void *target =
579 addr + ud_insn_len(ud) + ud_operand_lval(&ud->operand[0]);
580 if (ud->mnemonic == UD_Icall &&
581 (target == mcount || target == ftrace_trace_function))
582 return true;
583 return false;
585 #else /* !CONFIG_FTRACE */
586 static bool is_mcount_call(struct ud *ud, const unsigned char *addr)
588 return false;
590 #endif /* CONFIG_FTRACE */
592 static bool is_nop(struct ud *ud, const unsigned char *addr)
594 switch (ud->mnemonic) {
595 case UD_Inop:
596 return true;
597 case UD_Imov:
598 case UD_Ixchg:
599 return ud->dis_mode == 32 &&
600 ud->operand[0].type == UD_OP_REG &&
601 ud->operand[1].type == UD_OP_REG &&
602 ud->operand[2].type == UD_NONE &&
603 ud->operand[0].base == ud->operand[1].base;
604 case UD_Ilea:
605 return ud->dis_mode == 32 &&
606 ud->operand[0].type == UD_OP_REG &&
607 ud->operand[1].type == UD_OP_MEM &&
608 ((ud->operand[1].base == ud->operand[0].base &&
609 ud->operand[1].index == UD_NONE) ||
610 (ud->operand[1].base == UD_NONE &&
611 ud->operand[1].index == ud->operand[0].base &&
612 ud->operand[1].scale == 0)) &&
613 ud_operand_lval(&ud->operand[1]) == 0 &&
614 ud->operand[2].type == UD_NONE;
615 case UD_Ijmp:
616 /* jmp +N followed by N 0x90s is a NOP */
617 if (ud->operand[0].type == UD_OP_JIMM &&
618 ud->operand[1].type == UD_NONE &&
619 ud->operand[2].type == UD_NONE &&
620 ud_operand_len(&ud->operand[0]) == 1) {
621 /* struct ud is big so we avoid putting it on the stack.
622 * This is safe because we are holding module_mutex. */
623 static struct ud temp_ud;
624 int len = ud_operand_lval(&ud->operand[0]);
625 int i;
627 if (len < 0 || len > 13)
628 return false;
630 initialize_ksplice_ud(&temp_ud);
631 ud_set_input_hook(&temp_ud, next_run_byte);
632 ud_set_user_opaque_data(&temp_ud,
633 (unsigned char *)addr +
634 ud_insn_len(ud));
636 for (i = 0; i < len; i++) {
637 if (ud_disassemble(&temp_ud) == 0)
638 return false;
639 if (temp_ud.mnemonic != UD_Inop)
640 return false;
642 return true;
644 default:
645 return false;
649 static bool is_unconditional_jump(struct ud *ud)
651 switch (ud->mnemonic) {
652 case UD_Ijmp:
653 case UD_Iret:
654 case UD_Iretf:
655 case UD_Iiretw:
656 case UD_Iiretd:
657 case UD_Iiretq:
658 case UD_Isysexit:
659 case UD_Isysret:
660 case UD_Isyscall:
661 case UD_Isysenter:
662 return true;
663 default:
664 return false;
668 static uint8_t ud_operand_len(struct ud_operand *operand)
670 if (operand->type == UD_OP_MEM)
671 return operand->offset / 8;
672 if (operand->type == UD_OP_REG)
673 return 0;
674 return operand->size / 8;
677 static uint8_t ud_prefix_len(struct ud *ud)
679 int len = ud_insn_len(ud);
680 int i;
681 for (i = 0; i < ARRAY_SIZE(ud->operand); i++)
682 len -= ud_operand_len(&ud->operand[i]);
683 return len;
686 static long ud_operand_lval(struct ud_operand *operand)
688 switch (operand->type == UD_OP_MEM ? operand->offset : operand->size) {
689 case 8:
690 return operand->lval.sbyte;
691 case 16:
692 return operand->lval.sword;
693 case 32:
694 return operand->lval.sdword;
695 case 64:
696 return operand->lval.sqword;
697 default:
698 return 0;
702 static int next_run_byte(struct ud *ud)
704 unsigned char byte;
705 if (probe_kernel_read(&byte, ud_get_user_opaque_data(ud), 1) == -EFAULT)
706 return UD_EOI;
707 ud_set_user_opaque_data(ud, ud_get_user_opaque_data(ud) + 1);
708 return byte;
710 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
712 static struct ksplice_symbol trampoline_symbol = {
713 .name = NULL,
714 .label = "<trampoline>",
717 static const struct ksplice_reloc_howto trampoline_howto = {
718 .type = KSPLICE_HOWTO_RELOC,
719 .pcrel = 1,
720 .size = 4,
721 .dst_mask = 0xffffffffL,
722 .rightshift = 0,
723 .signed_addend = 1,
726 static const struct ksplice_reloc trampoline_reloc = {
727 .symbol = &trampoline_symbol,
728 .insn_addend = -4,
729 .target_addend = 0,
730 .howto = &trampoline_howto,
733 static abort_t trampoline_target(struct ksplice_mod_change *change,
734 unsigned long addr, unsigned long *new_addr)
736 abort_t ret;
737 unsigned char byte;
739 if (probe_kernel_read(&byte, (void *)addr, sizeof(byte)) == -EFAULT)
740 return NO_MATCH;
742 if (byte != 0xe9)
743 return NO_MATCH;
745 ret = read_reloc_value(change, &trampoline_reloc, addr + 1, new_addr);
746 if (ret != OK)
747 return ret;
749 *new_addr += addr + 1;
750 return OK;
753 static abort_t prepare_trampoline(struct ksplice_mod_change *change,
754 struct ksplice_patch *p)
756 p->size = 5;
757 ((unsigned char *)p->contents)[0] = 0xe9;
758 return write_reloc_value(change, &trampoline_reloc,
759 (unsigned long)p->contents + 1,
760 p->repladdr - (p->oldaddr + 1));
763 static abort_t handle_paravirt(struct ksplice_mod_change *change,
764 unsigned long pre_addr, unsigned long run_addr,
765 int *matched)
767 unsigned char run[5], pre[5];
768 *matched = 0;
770 if (probe_kernel_read(&run, (void *)run_addr, sizeof(run)) == -EFAULT ||
771 probe_kernel_read(&pre, (void *)pre_addr, sizeof(pre)) == -EFAULT)
772 return OK;
774 if ((run[0] == 0xe8 && pre[0] == 0xe8) ||
775 (run[0] == 0xe9 && pre[0] == 0xe9))
776 if (run_addr + 1 + *(int32_t *)&run[1] ==
777 pre_addr + 1 + *(int32_t *)&pre[1])
778 *matched = 5;
779 return OK;
782 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p)
784 return p > (const void *)tinfo
785 && p <= (const void *)tinfo + THREAD_SIZE - sizeof(long);
788 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
789 static bool virtual_address_mapped(unsigned long addr)
791 pgd_t *pgd;
792 #ifdef pud_page
793 pud_t *pud;
794 #endif /* pud_page */
795 pmd_t *pmd;
796 pte_t *pte;
798 #ifdef KSPLICE_STANDALONE
799 if (!bootstrapped)
800 return true;
801 #endif /* KSPLICE_STANDALONE */
803 pgd = pgd_offset_k(addr);
804 if (!pgd_present(*pgd))
805 return false;
807 #ifdef pud_page
808 pud = pud_offset(pgd, addr);
809 if (!pud_present(*pud))
810 return false;
812 pmd = pmd_offset(pud, addr);
813 #else /* pud_page */
814 pmd = pmd_offset(pgd, addr);
815 #endif /* pud_page */
817 if (!pmd_present(*pmd))
818 return false;
820 if (pmd_large(*pmd))
821 return true;
823 pte = pte_offset_kernel(pmd, addr);
824 if (!pte_present(*pte))
825 return false;
827 return true;
829 #endif /* LINUX_VERSION_CODE */