Move the definition of ksplice_kcalloc above its use.
[ksplice.git] / kmodsrc / ksplice.c
blob8508eb5d21d9f5c60ab096eebf8635b560d1e08b
1 /* Copyright (C) 2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License, version 2.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
14 * 02110-1301, USA.
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/kallsyms.h>
20 #include <linux/kthread.h>
21 #include <linux/proc_fs.h>
22 #include <linux/sched.h>
23 #include <linux/stop_machine.h>
24 #include <linux/time.h>
25 #ifdef KSPLICE_STANDALONE
26 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
27 #include <linux/version.h>
28 #include <asm/uaccess.h>
29 #include "ksplice.h"
30 #include "ksplice-run-pre.h"
31 #else
32 #include <linux/uaccess.h>
33 #include "ksplice.h"
34 #include <asm/ksplice-run-pre.h>
35 #endif
37 #ifdef KSPLICE_STANDALONE
39 /* Old kernels do not have kcalloc */
40 #define kcalloc ksplice_kcalloc
41 static inline void *ksplice_kcalloc(size_t n, size_t size,
42 typeof(GFP_KERNEL) flags)
44 char *mem;
45 if (n != 0 && size > ULONG_MAX / n)
46 return NULL;
47 mem = kmalloc(n * size, flags);
48 if (mem)
49 memset(mem, 0, n * size);
50 return mem;
53 /* Old kernels use semaphore instead of mutex
54 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16 */
55 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
56 #define mutex semaphore
57 #define mutex_lock down
58 #define mutex_unlock up
59 #endif
61 #ifndef task_thread_info
62 #define task_thread_info(task) (task)->thread_info
63 #endif /* task_thread_info */
65 #ifdef CONFIG_X86
66 #ifdef __ASM_X86_PROCESSOR_H /* New unified x86 */
67 #define KSPLICE_IP(x) ((x)->thread.ip)
68 #define KSPLICE_SP(x) ((x)->thread.sp)
69 #elif defined CONFIG_X86_64 /* Old x86 64-bit */
70 /* The IP is on the stack, so we don't need to check it separately.
71 * Instead, we need to prevent Ksplice from patching thread_return.
73 extern const char thread_return[];
74 #define KSPLICE_IP(x) thread_return
75 #define KSPLICE_SP(x) ((x)->thread.rsp)
76 #else /* Old x86 32-bit */
77 #define KSPLICE_IP(x) ((x)->thread.eip)
78 #define KSPLICE_SP(x) ((x)->thread.esp)
79 #endif /* __ASM_X86_PROCESSOR_H */
80 #endif /* CONFIG_X86 */
82 static int bootstrapped = 0;
84 #ifdef CONFIG_KALLSYMS
85 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
86 extern u8 kallsyms_names[];
87 #endif /* CONFIG_KALLSYMS */
89 /* defined by ksplice-create */
90 extern struct ksplice_reloc ksplice_init_relocs;
92 /* Obtained via System.map */
93 extern struct list_head modules;
94 extern struct mutex module_mutex;
96 #else /* KSPLICE_STANDALONE */
97 #define KSPLICE_IP(x) ((x)->thread.ip)
98 #define KSPLICE_SP(x) ((x)->thread.sp)
99 #endif /* KSPLICE_STANDALONE */
101 static int debug;
102 module_param(debug, int, 0600);
104 void cleanup_ksplice_module(struct module_pack *pack)
106 remove_proc_entry(pack->name, &proc_root);
109 int activate_primary(struct module_pack *pack)
111 int i, ret;
112 struct proc_dir_entry *proc_entry;
114 if (process_ksplice_relocs(pack, pack->primary_relocs, 0) != 0)
115 return -1;
117 if (resolve_patch_symbols(pack) != 0)
118 return -1;
120 #ifdef CONFIG_MODULE_UNLOAD
121 if (add_patch_dependencies(pack) != 0)
122 return -1;
123 #endif
125 proc_entry = create_proc_entry(pack->name, 0644, NULL);
126 if (proc_entry == NULL) {
127 print_abort("primary module: could not create proc entry");
128 return -1;
131 proc_entry->read_proc = procfile_read;
132 proc_entry->write_proc = procfile_write;
133 proc_entry->data = pack;
134 proc_entry->owner = pack->primary;
135 proc_entry->mode = S_IFREG | S_IRUSR | S_IWUSR;
136 proc_entry->uid = 0;
137 proc_entry->gid = 0;
138 proc_entry->size = 0;
140 for (i = 0; i < 5; i++) {
141 bust_spinlocks(1);
142 ret = stop_machine_run(__apply_patches, pack, NR_CPUS);
143 bust_spinlocks(0);
144 if (ret != -EAGAIN)
145 break;
146 set_current_state(TASK_INTERRUPTIBLE);
147 schedule_timeout(msecs_to_jiffies(1000));
149 if (pack->state != KSPLICE_APPLIED) {
150 remove_proc_entry(pack->name, &proc_root);
151 if (ret == -EAGAIN)
152 print_abort("stack check: to-be-replaced code is busy");
153 return -1;
156 printk(KERN_INFO "ksplice: Update %s applied successfully\n",
157 pack->name);
158 return 0;
161 int resolve_patch_symbols(struct module_pack *pack)
163 struct ksplice_patch *p;
164 int ret;
165 LIST_HEAD(vals);
167 for (p = pack->patches; p->oldstr; p++) {
168 p->saved = kmalloc(5, GFP_KERNEL);
169 if (p->saved == NULL) {
170 print_abort("out of memory");
171 return -ENOMEM;
174 ret = compute_address(pack, p->oldstr, &vals, 0);
175 if (ret < 0)
176 return ret;
178 if (!singular(&vals)) {
179 release_vals(&vals);
180 failed_to_find(p->oldstr);
181 return -1;
183 p->oldaddr =
184 list_entry(vals.next, struct candidate_val, list)->val;
185 release_vals(&vals);
188 return 0;
191 int procfile_read(char *buffer, char **buffer_location,
192 off_t offset, int buffer_length, int *eof, void *data)
194 return 0;
197 int procfile_write(struct file *file, const char *buffer, unsigned long count,
198 void *data)
200 int i, ret;
201 struct module_pack *pack = data;
202 printk(KERN_INFO "ksplice: Preparing to reverse %s\n", pack->name);
204 if (pack->state != KSPLICE_APPLIED)
205 return count;
207 for (i = 0; i < 5; i++) {
208 bust_spinlocks(1);
209 ret = stop_machine_run(__reverse_patches, pack, NR_CPUS);
210 bust_spinlocks(0);
211 if (ret != -EAGAIN)
212 break;
213 set_current_state(TASK_INTERRUPTIBLE);
214 schedule_timeout(msecs_to_jiffies(1000));
216 if (ret == -EAGAIN)
217 print_abort("stack check: to-be-reversed code is busy");
218 else if (ret == 0)
219 printk(KERN_INFO "ksplice: Update %s reversed successfully\n",
220 pack->name);
221 else if (ret == -EBUSY)
222 printk(KERN_ERR "ksplice: Update module %s is in use by "
223 "another module\n", pack->name);
225 return count;
228 int __apply_patches(void *packptr)
230 struct module_pack *pack = packptr;
231 struct ksplice_patch *p;
232 struct safety_record *rec;
233 mm_segment_t old_fs;
235 list_for_each_entry(rec, pack->safety_records, list) {
236 for (p = pack->patches; p->oldstr; p++) {
237 if (p->oldaddr == rec->addr)
238 rec->care = 1;
242 if (check_each_task(pack) < 0)
243 return -EAGAIN;
245 if (!try_module_get(pack->primary))
246 return -ENODEV;
248 pack->state = KSPLICE_APPLIED;
250 old_fs = get_fs();
251 set_fs(KERNEL_DS);
252 for (p = pack->patches; p->oldstr; p++) {
253 memcpy((void *)p->saved, (void *)p->oldaddr, 5);
254 *((u8 *) p->oldaddr) = 0xE9;
255 *((u32 *) (p->oldaddr + 1)) = p->repladdr - (p->oldaddr + 5);
256 flush_icache_range((unsigned long)p->oldaddr,
257 (unsigned long)(p->oldaddr + 5));
259 set_fs(old_fs);
260 return 0;
263 int __reverse_patches(void *packptr)
265 struct module_pack *pack = packptr;
266 struct ksplice_patch *p;
267 mm_segment_t old_fs;
269 if (pack->state != KSPLICE_APPLIED)
270 return 0;
272 #ifdef CONFIG_MODULE_UNLOAD
273 if (module_refcount(pack->primary) != 2)
274 return -EBUSY;
275 #endif
277 if (check_each_task(pack) < 0)
278 return -EAGAIN;
280 clear_list(pack->safety_records, struct safety_record, list);
281 pack->state = KSPLICE_REVERSED;
282 module_put(pack->primary);
284 old_fs = get_fs();
285 set_fs(KERNEL_DS);
286 for (p = pack->patches; p->oldstr; p++) {
287 memcpy((void *)p->oldaddr, (void *)p->saved, 5);
288 kfree(p->saved);
289 flush_icache_range((unsigned long)p->oldaddr,
290 (unsigned long)(p->oldaddr + 5));
292 set_fs(old_fs);
293 return 0;
296 int check_each_task(struct module_pack *pack)
298 struct task_struct *g, *p;
299 int status = 0;
300 read_lock(&tasklist_lock);
301 do_each_thread(g, p) {
302 /* do_each_thread is a double loop! */
303 if (check_task(pack, p) < 0) {
304 if (debug == 1) {
305 debug = 2;
306 check_task(pack, p);
307 debug = 1;
309 status = -EAGAIN;
312 while_each_thread(g, p);
313 read_unlock(&tasklist_lock);
314 return status;
317 int check_task(struct module_pack *pack, struct task_struct *t)
319 int status, ret;
321 ksplice_debug(2, KERN_DEBUG "ksplice: stack check: pid %d (%s) eip "
322 "%08lx ", t->pid, t->comm, KSPLICE_IP(t));
323 status = check_address_for_conflict(pack, KSPLICE_IP(t));
324 ksplice_debug(2, ": ");
326 if (t == current) {
327 ret = check_stack(pack, task_thread_info(t),
328 (long *)__builtin_frame_address(0));
329 if (status == 0)
330 status = ret;
331 } else if (!task_curr(t)) {
332 ret = check_stack(pack, task_thread_info(t),
333 (long *)KSPLICE_SP(t));
334 if (status == 0)
335 status = ret;
336 } else if (strcmp(t->comm, "kstopmachine") != 0) {
337 ksplice_debug(2, "unexpected running task!");
338 status = -ENODEV;
340 ksplice_debug(2, "\n");
341 return status;
344 /* Modified version of Linux's print_context_stack */
345 int check_stack(struct module_pack *pack, struct thread_info *tinfo,
346 long *stack)
348 int status = 0;
349 long addr;
351 while (valid_stack_ptr(tinfo, stack)) {
352 addr = *stack++;
353 if (__kernel_text_address(addr)) {
354 ksplice_debug(2, "%08lx ", addr);
355 if (check_address_for_conflict(pack, addr) < 0)
356 status = -EAGAIN;
359 return status;
362 int check_address_for_conflict(struct module_pack *pack, long addr)
364 struct ksplice_size *s = pack->primary_sizes;
365 struct safety_record *rec;
367 /* It is safe for addr to point to the beginning of a patched
368 function, because that location will be overwritten with a
369 trampoline. */
370 list_for_each_entry(rec, pack->safety_records, list) {
371 if (rec->care == 1 && addr > rec->addr
372 && addr < rec->addr + rec->size) {
373 ksplice_debug(2, "[<-- CONFLICT] ");
374 return -EAGAIN;
377 for (; s->name != NULL; s++) {
378 if (addr >= s->thismod_addr
379 && addr < s->thismod_addr + s->size) {
380 ksplice_debug(2, "[<-- CONFLICT] ");
381 return -EAGAIN;
384 return 0;
387 /* Modified version of Linux's valid_stack_ptr */
388 int valid_stack_ptr(struct thread_info *tinfo, void *p)
390 return p > (void *)tinfo
391 && p <= (void *)tinfo + THREAD_SIZE - sizeof(long);
394 int init_ksplice_module(struct module_pack *pack)
396 int ret = 0;
397 #ifdef KSPLICE_STANDALONE
398 if (process_ksplice_relocs(pack, &ksplice_init_relocs, 1) != 0)
399 return -1;
400 bootstrapped = 1;
401 #endif
403 printk(KERN_INFO "ksplice_h: Preparing and checking %s\n", pack->name);
405 if (activate_helper(pack) != 0 || activate_primary(pack) != 0)
406 ret = -1;
408 clear_list(pack->reloc_namevals, struct reloc_nameval, list);
409 clear_list(pack->reloc_addrmaps, struct reloc_addrmap, list);
410 if (pack->state == KSPLICE_PREPARING)
411 clear_list(pack->safety_records, struct safety_record, list);
413 return ret;
416 int activate_helper(struct module_pack *pack)
418 struct ksplice_size *s;
419 int i, record_count = 0, ret;
420 char *finished;
421 int numfinished, oldfinished = 0;
422 int restart_count = 0;
424 if (process_ksplice_relocs(pack, pack->helper_relocs, 1) != 0)
425 return -1;
427 for (s = pack->helper_sizes; s->name != NULL; s++)
428 record_count++;
430 finished = kcalloc(record_count, 1, GFP_KERNEL);
431 if (finished == NULL) {
432 print_abort("out of memory");
433 return -ENOMEM;
436 start:
437 for (s = pack->helper_sizes, i = 0; s->name != NULL; s++, i++) {
438 if (s->size == 0)
439 finished[i] = 1;
440 if (finished[i])
441 continue;
443 ret = search_for_match(pack, s);
444 if (ret < 0) {
445 kfree(finished);
446 return ret;
447 } else if (ret > 0) {
448 finished[i] = 1;
452 numfinished = 0;
453 for (i = 0; i < record_count; i++) {
454 if (finished[i])
455 numfinished++;
457 if (numfinished == record_count) {
458 kfree(finished);
459 return 0;
462 if (oldfinished == numfinished) {
463 for (s = pack->helper_sizes, i = 0; s->name != NULL; s++, i++) {
464 if (finished[i] == 0)
465 ksplice_debug(2, KERN_DEBUG "ksplice: run-pre: "
466 "could not match section %s\n",
467 s->name);
469 print_abort("run-pre: could not match some sections");
470 kfree(finished);
471 return -1;
473 oldfinished = numfinished;
475 if (restart_count < 20) {
476 restart_count++;
477 goto start;
479 print_abort("run-pre: restart limit exceeded");
480 kfree(finished);
481 return -1;
484 int search_for_match(struct module_pack *pack, struct ksplice_size *s)
486 int i, ret;
487 long run_addr;
488 LIST_HEAD(vals);
489 struct candidate_val *v;
491 for (i = 0; i < s->num_sym_addrs; i++) {
492 ret = add_candidate_val(&vals, s->sym_addrs[i]);
493 if (ret < 0)
494 return ret;
497 ret = compute_address(pack, s->name, &vals, 1);
498 if (ret < 0)
499 return ret;
501 ksplice_debug(3, KERN_DEBUG "ksplice_h: run-pre: starting sect search "
502 "for %s\n", s->name);
504 list_for_each_entry(v, &vals, list) {
505 run_addr = v->val;
507 yield();
508 ret = try_addr(pack, s, run_addr, s->thismod_addr);
509 if (ret != 0) {
510 /* we've encountered a match (> 0) or an error (< 0) */
511 release_vals(&vals);
512 return ret;
515 release_vals(&vals);
517 #ifdef KSPLICE_STANDALONE
518 ret = brute_search_all(pack, s);
519 #endif
520 return ret;
523 int try_addr(struct module_pack *pack, struct ksplice_size *s, long run_addr,
524 long pre_addr)
526 struct safety_record *tmp;
527 struct reloc_nameval *nv;
529 if (run_pre_cmp(pack, run_addr, pre_addr, s->size, 0) != 0) {
530 set_temp_myst_relocs(pack, NOVAL);
531 ksplice_debug(1, KERN_DEBUG "ksplice_h: run-pre: sect %s does "
532 "not match ", s->name);
533 ksplice_debug(1, "(r_a=%08lx p_a=%08lx s=%ld)\n",
534 run_addr, pre_addr, s->size);
535 ksplice_debug(1, "ksplice_h: run-pre: ");
536 if (debug >= 1)
537 run_pre_cmp(pack, run_addr, pre_addr, s->size, 1);
538 ksplice_debug(1, "\n");
539 } else {
540 set_temp_myst_relocs(pack, VAL);
542 ksplice_debug(3, KERN_DEBUG "ksplice_h: run-pre: found sect "
543 "%s=%08lx\n", s->name, run_addr);
545 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
546 if (tmp == NULL) {
547 print_abort("out of memory");
548 return -ENOMEM;
550 tmp->addr = run_addr;
551 tmp->size = s->size;
552 tmp->care = 0;
553 list_add(&tmp->list, pack->safety_records);
555 nv = find_nameval(pack, s->name, 1);
556 if (nv == NULL)
557 return -ENOMEM;
558 nv->val = run_addr;
559 nv->status = VAL;
561 return 1;
563 return 0;
566 int handle_myst_reloc(long pre_addr, int *pre_o, long run_addr,
567 int *run_o, struct reloc_addrmap *map, int rerun)
569 int expected;
570 int offset = (int)(pre_addr + *pre_o - map->addr);
571 long run_reloc = 0;
572 long run_reloc_addr;
573 run_reloc_addr = run_addr + *run_o - offset;
574 if (map->size == 4)
575 run_reloc = *(int *)run_reloc_addr;
576 else if (map->size == 8)
577 run_reloc = *(long long *)run_reloc_addr;
578 else
579 BUG();
581 if (debug >= 3 && !rerun) {
582 printk(KERN_DEBUG "ksplice_h: run-pre: reloc at r_a=%08lx "
583 "p_o=%08x: ", run_addr, *pre_o);
584 printk("%s=%08lx (A=%08lx *r=%08lx)\n",
585 map->nameval->name, map->nameval->val,
586 map->addend, run_reloc);
589 if (!starts_with(map->nameval->name, ".rodata.str")) {
590 expected = run_reloc - map->addend;
591 if ((int)run_reloc == 0x77777777)
592 return 1;
593 if (map->pcrel)
594 expected += run_reloc_addr;
595 if (map->nameval->status == NOVAL) {
596 map->nameval->val = expected;
597 map->nameval->status = TEMP;
598 } else if (map->nameval->val != expected) {
599 if (rerun)
600 return 1;
601 printk(KERN_DEBUG "ksplice_h: pre-run reloc: Expected "
602 "%s=%08x!\n", map->nameval->name, expected);
603 return 1;
607 *pre_o += map->size - offset;
608 *run_o += map->size - offset;
609 return 0;
612 int process_ksplice_relocs(struct module_pack *pack,
613 struct ksplice_reloc *relocs, int pre)
615 struct ksplice_reloc *r;
616 for (r = relocs; r->sym_name != NULL; r++) {
617 if (process_reloc(pack, r, pre) != 0)
618 return -1;
620 return 0;
623 int process_reloc(struct module_pack *pack, struct ksplice_reloc *r, int pre)
625 int i, ret;
626 long off, sym_addr;
627 struct reloc_addrmap *map;
628 const long blank_addr = r->blank_sect_addr + r->blank_offset;
629 LIST_HEAD(vals);
630 #ifdef KSPLICE_STANDALONE
631 /* run_pre_reloc: will this reloc be used for run-pre matching? */
632 const int run_pre_reloc = pre && bootstrapped;
633 #ifndef CONFIG_KALLSYMS
635 if (bootstrapped)
636 goto skip_using_system_map;
637 #endif /* CONFIG_KALLSYMS */
638 #endif /* KSPLICE_STANDALONE */
640 /* Some Fedora kernel releases have System.map files whose symbol
641 * addresses disagree with the running kernel by a constant address
642 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
643 * values used to compile these kernels. This constant address offset
644 * is always a multiple of 0x100000.
646 * If we observe an offset that is NOT a multiple of 0x100000, then the
647 * user provided us with an incorrect System.map file, and we should
648 * abort.
649 * If we observe an offset that is a multiple of 0x100000, then we can
650 * adjust the System.map address values accordingly and proceed.
652 off = (long)printk - pack->map_printk;
653 if (off & 0xfffff) {
654 print_abort("System.map does not match kernel");
655 return -1;
657 for (i = 0; i < r->num_sym_addrs; i++) {
658 ret = add_candidate_val(&vals, r->sym_addrs[i] + off);
659 if (ret < 0)
660 return ret;
662 #ifndef CONFIG_KALLSYMS
663 skip_using_system_map:
664 #endif
666 if ((r->size == 4 && *(int *)blank_addr != 0x77777777)
667 || (r->size == 8 &&
668 *(long long *)blank_addr != 0x7777777777777777ll)) {
669 ksplice_debug(4, KERN_DEBUG "ksplice%s: reloc: skipped %s:%08lx"
670 " (altinstr)\n", (pre ? "_h" : ""),
671 r->sym_name, r->blank_offset);
672 release_vals(&vals);
673 return 0;
676 ret = compute_address(pack, r->sym_name, &vals, pre);
677 if (ret < 0)
678 return ret;
679 if (!singular(&vals)) {
680 release_vals(&vals);
681 #ifdef KSPLICE_STANDALONE
682 if (!run_pre_reloc) {
683 #else
684 if (!pre) {
685 #endif
686 failed_to_find(r->sym_name);
687 return -1;
690 ksplice_debug(4, KERN_DEBUG "ksplice: reloc: deferred %s:%08lx "
691 "to run-pre\n", r->sym_name, r->blank_offset);
693 map = kmalloc(sizeof(*map), GFP_KERNEL);
694 if (map == NULL) {
695 print_abort("out of memory");
696 return -ENOMEM;
698 map->addr = blank_addr;
699 map->nameval = find_nameval(pack, r->sym_name, 1);
700 if (map->nameval == NULL)
701 return -ENOMEM;
702 map->addend = r->addend;
703 map->pcrel = r->pcrel;
704 map->size = r->size;
705 list_add(&map->list, pack->reloc_addrmaps);
706 return 0;
708 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
709 release_vals(&vals);
711 #ifdef CONFIG_MODULE_UNLOAD
712 if (!pre) {
713 ret = add_dependency_on_address(pack, sym_addr);
714 if (ret < 0)
715 return ret;
717 #endif
719 #ifdef KSPLICE_STANDALONE
720 if (r->pcrel && run_pre_reloc) {
721 #else
722 if (r->pcrel && pre) {
723 #endif
724 map = kmalloc(sizeof(*map), GFP_KERNEL);
725 if (map == NULL) {
726 print_abort("out of memory");
727 return -ENOMEM;
729 map->addr = blank_addr;
730 map->nameval = find_nameval(pack, "ksplice_zero", 1);
731 if (map->nameval == NULL)
732 return -ENOMEM;
733 map->nameval->val = 0;
734 map->nameval->status = VAL;
735 map->addend = sym_addr + r->addend;
736 map->size = r->size;
737 map->pcrel = r->pcrel;
738 list_add(&map->list, pack->reloc_addrmaps);
740 } else {
741 long val;
742 if (r->pcrel)
743 val = sym_addr + r->addend - blank_addr;
744 else
745 val = sym_addr + r->addend;
746 if (r->size == 4)
747 *(int *)blank_addr = val;
748 else if (r->size == 8)
749 *(long long *)blank_addr = val;
750 else
751 BUG();
754 ksplice_debug(4, KERN_DEBUG "ksplice%s: reloc: %s:%08lx ",
755 (pre ? "_h" : ""), r->sym_name, r->blank_offset);
756 ksplice_debug(4, "(S=%08lx A=%08lx ", sym_addr, r->addend);
757 if (r->size == 4)
758 ksplice_debug(4, "aft=%08x)\n", *(int *)blank_addr);
759 else if (r->size == 8)
760 ksplice_debug(4, "aft=%016llx)\n", *(long long *)blank_addr);
761 else
762 BUG();
763 return 0;
766 #ifdef CONFIG_MODULE_UNLOAD
767 int add_dependency_on_address(struct module_pack *pack, long addr)
769 struct module *m;
770 int ret = 0;
771 mutex_lock(&module_mutex);
772 m = module_text_address(addr);
773 if (m == NULL || starts_with(m->name, pack->name) ||
774 ends_with(m->name, "_helper"))
775 ret = 0;
776 else if (use_module(pack->primary, m) != 1)
777 ret = -EBUSY;
778 mutex_unlock(&module_mutex);
779 return ret;
782 int add_patch_dependencies(struct module_pack *pack)
784 int ret;
785 struct ksplice_patch *p;
786 for (p = pack->patches; p->oldstr; p++) {
787 ret = add_dependency_on_address(pack, p->oldaddr);
788 if (ret < 0)
789 return ret;
791 return 0;
794 #ifdef KSPLICE_STANDALONE
795 /* Essentially, code from module.c; we use directly use_module and module_text_address */
796 struct module *module_text_address(unsigned long addr)
798 struct module *m;
799 list_for_each_entry(m, &modules, list) {
800 if ((addr >= (unsigned long)m->module_core &&
801 addr < (unsigned long)m->module_core + m->core_size) ||
802 (addr >= (unsigned long)m->module_init &&
803 addr < (unsigned long)m->module_init + m->init_size))
804 return m;
806 return NULL;
809 struct module_use {
810 struct list_head list;
811 struct module *module_which_uses;
814 /* I'm not yet certain whether we need the strong form of this. */
815 static inline int strong_try_module_get(struct module *mod)
817 if (mod && mod->state != MODULE_STATE_LIVE)
818 return -EBUSY;
819 if (try_module_get(mod))
820 return 0;
821 return -ENOENT;
824 /* Does a already use b? */
825 static int already_uses(struct module *a, struct module *b)
827 struct module_use *use;
828 list_for_each_entry(use, &b->modules_which_use_me, list) {
829 if (use->module_which_uses == a)
830 return 1;
832 return 0;
835 /* Make it so module a uses b. Must be holding module_mutex */
836 int use_module(struct module *a, struct module *b)
838 struct module_use *use;
839 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
840 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
841 int no_warn;
842 #endif
843 if (b == NULL || already_uses(a, b))
844 return 1;
846 if (strong_try_module_get(b) < 0)
847 return 0;
849 ksplice_debug(4, "Allocating new usage for %s.\n", a->name);
850 use = kmalloc(sizeof(*use), GFP_ATOMIC);
851 if (!use) {
852 printk("%s: out of memory adding dependencies\n", a->name);
853 module_put(b);
854 return 0;
856 use->module_which_uses = a;
857 list_add(&use->list, &b->modules_which_use_me);
858 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
859 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
860 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
861 #endif
862 return 1;
864 #endif /* KSPLICE_STANDALONE */
865 #endif /* CONFIG_MODULE_UNLOAD */
867 int compute_address(struct module_pack *pack, char *sym_name,
868 struct list_head *vals, int pre)
870 int i, ret;
871 const char *prefix[] = { ".text.", ".bss.", ".data.", NULL };
872 #ifdef KSPLICE_STANDALONE
873 if (!bootstrapped)
874 return 0;
875 #endif
877 if (!pre) {
878 struct reloc_nameval *nv = find_nameval(pack, sym_name, 0);
879 if (nv != NULL && nv->status != NOVAL) {
880 release_vals(vals);
881 ret = add_candidate_val(vals, nv->val);
882 if (ret < 0)
883 return ret;
884 ksplice_debug(1, KERN_DEBUG "ksplice: using detected "
885 "sym %s=%08lx\n", sym_name, nv->val);
886 return 0;
890 if (starts_with(sym_name, ".rodata"))
891 return 0;
893 #ifdef CONFIG_KALLSYMS
894 ret = kernel_lookup(sym_name, vals);
895 if (ret < 0)
896 return ret;
897 ret = other_module_lookup(sym_name, vals, pack->name);
898 if (ret < 0)
899 return ret;
900 #endif
902 for (i = 0; prefix[i] != NULL; i++) {
903 if (starts_with(sym_name, prefix[i])) {
904 ret = compute_address(pack, sym_name +
905 strlen(prefix[i]), vals, pre);
906 if (ret < 0)
907 return ret;
910 return 0;
913 #ifdef CONFIG_KALLSYMS
914 int other_module_lookup(const char *name_wlabel, struct list_head *vals,
915 const char *ksplice_name)
917 int ret = 0;
918 struct accumulate_struct acc = { dup_wolabel(name_wlabel), vals };
919 struct module *m;
921 if (acc.desired_name == NULL)
922 return -ENOMEM;
923 mutex_lock(&module_mutex);
924 list_for_each_entry(m, &modules, list) {
925 if (!starts_with(m->name, ksplice_name)
926 && !ends_with(m->name, "_helper")) {
927 ret = module_on_each_symbol(m,
928 accumulate_matching_names,
929 &acc);
930 if (ret < 0)
931 break;
934 mutex_unlock(&module_mutex);
936 kfree(acc.desired_name);
937 return ret;
939 #endif /* CONFIG_KALLSYMS */
941 int accumulate_matching_names(void *data, const char *sym_name, long sym_val)
943 int ret = 0;
944 struct accumulate_struct *acc = data;
946 if (strncmp(sym_name, acc->desired_name, strlen(acc->desired_name)) !=
948 return 0;
950 sym_name = dup_wolabel(sym_name);
951 if (sym_name == NULL)
952 return -ENOMEM;
953 /* TODO: possibly remove "&& sym_val != 0" */
954 if (strcmp(sym_name, acc->desired_name) == 0 && sym_val != 0)
955 ret = add_candidate_val(acc->vals, sym_val);
956 kfree(sym_name);
957 return ret;
960 #ifdef KSPLICE_STANDALONE
961 int brute_search_all(struct module_pack *pack, struct ksplice_size *s)
963 struct module *m;
964 int ret = 0;
965 int saved_debug;
966 char *where = NULL;
968 ksplice_debug(2, KERN_DEBUG "ksplice: brute_search: searching for %s\n",
969 s->name);
970 saved_debug = debug;
971 debug = 0;
973 mutex_lock(&module_mutex);
974 list_for_each_entry(m, &modules, list) {
975 if (starts_with(m->name, pack->name) ||
976 ends_with(m->name, "_helper"))
977 continue;
978 if (brute_search(pack, s, m->module_core, m->core_size) == 0 ||
979 brute_search(pack, s, m->module_init, m->init_size) == 0) {
980 ret = 1;
981 where = m->name;
982 break;
985 mutex_unlock(&module_mutex);
987 if (ret == 0) {
988 if (brute_search(pack, s, (void *)init_mm.start_code,
989 init_mm.end_code - init_mm.start_code) == 0) {
990 ret = 1;
991 where = "vmlinux";
995 if (ret == 1 && saved_debug >= 2)
996 printk(KERN_DEBUG "ksplice: brute_search: found %s in %s\n",
997 s->name, where);
999 debug = saved_debug;
1000 return ret;
1003 #ifdef CONFIG_KALLSYMS
1004 /* Modified version of Linux's kallsyms_lookup_name */
1005 int kernel_lookup(const char *name_wlabel, struct list_head *vals)
1007 int ret;
1008 char namebuf[KSYM_NAME_LEN + 1];
1009 unsigned long i;
1010 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1011 unsigned long off;
1012 #endif /* LINUX_VERSION_CODE */
1014 const char *name = dup_wolabel(name_wlabel);
1015 if (name == NULL)
1016 return -ENOMEM;
1018 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
1019 * 2.6.10 was the first release after this commit
1021 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1022 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
1023 off = ksplice_kallsyms_expand_symbol(off, namebuf);
1025 if (strcmp(namebuf, name) == 0) {
1026 ret = add_candidate_val(vals, kallsyms_addresses[i]);
1027 if (ret < 0)
1028 return ret;
1031 #else /* LINUX_VERSION_CODE */
1032 char *knames;
1034 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
1035 unsigned prefix = *knames++;
1037 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
1039 if (strcmp(namebuf, name) == 0) {
1040 ret = add_candidate_val(vals, kallsyms_addresses[i]);
1041 if (ret < 0)
1042 return ret;
1045 knames += strlen(knames) + 1;
1047 #endif /* LINUX_VERSION_CODE */
1049 kfree(name);
1050 return 0;
1053 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
1054 * 2.6.10 was the first release after this commit
1056 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1057 extern u8 kallsyms_token_table[];
1058 extern u16 kallsyms_token_index[];
1059 /* Modified version of Linux's kallsyms_expand_symbol */
1060 long ksplice_kallsyms_expand_symbol(unsigned long off, char *result)
1062 long len, skipped_first = 0;
1063 const u8 *tptr, *data;
1065 data = &kallsyms_names[off];
1066 len = *data;
1067 data++;
1069 off += len + 1;
1071 while (len) {
1072 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
1073 data++;
1074 len--;
1076 while (*tptr) {
1077 if (skipped_first) {
1078 *result = *tptr;
1079 result++;
1080 } else
1081 skipped_first = 1;
1082 tptr++;
1086 *result = '\0';
1088 return off;
1090 #endif /* LINUX_VERSION_CODE */
1092 int module_on_each_symbol(struct module *mod,
1093 int (*fn) (void *, const char *, long), void *data)
1095 unsigned int i;
1096 int ret;
1098 /* TODO: possibly remove this if statement */
1099 if (strlen(mod->name) <= 1)
1100 return 0;
1102 for (i = 0; i < mod->num_symtab; i++) {
1103 if ((ret =
1104 fn(data, mod->strtab + mod->symtab[i].st_name,
1105 mod->symtab[i].st_value) != 0))
1106 return ret;
1108 return 0;
1110 #endif /* CONFIG_KALLSYMS */
1111 #else /* KSPLICE_STANDALONE */
1112 EXPORT_SYMBOL_GPL(init_ksplice_module);
1113 EXPORT_SYMBOL_GPL(cleanup_ksplice_module);
1115 int init_module(void)
1117 return 0;
1120 void cleanup_module(void)
1124 int kernel_lookup(const char *name_wlabel, struct list_head *vals)
1126 int ret;
1127 struct accumulate_struct acc = { dup_wolabel(name_wlabel), vals };
1128 if (acc.desired_name == NULL)
1129 return -ENOMEM;
1130 ret = kallsyms_on_each_symbol(accumulate_matching_names, &acc);
1131 if (ret < 0)
1132 return ret;
1133 kfree(acc.desired_name);
1134 return 0;
1136 #endif /* KSPLICE_STANDALONE */
1138 int add_candidate_val(struct list_head *vals, long val)
1140 struct candidate_val *tmp, *new;
1142 list_for_each_entry(tmp, vals, list) {
1143 if (tmp->val == val)
1144 return 0;
1146 new = kmalloc(sizeof(*new), GFP_KERNEL);
1147 if (new == NULL) {
1148 print_abort("out of memory");
1149 return -ENOMEM;
1151 new->val = val;
1152 list_add(&new->list, vals);
1153 return 0;
1156 void release_vals(struct list_head *vals)
1158 clear_list(vals, struct candidate_val, list);
1161 struct reloc_nameval *find_nameval(struct module_pack *pack, char *name,
1162 int create)
1164 struct reloc_nameval *nv, *new;
1165 char *newname;
1166 list_for_each_entry(nv, pack->reloc_namevals, list) {
1167 newname = nv->name;
1168 if (starts_with(newname, ".text."))
1169 newname += 6;
1170 if (strcmp(newname, name) == 0)
1171 return nv;
1173 if (!create)
1174 return NULL;
1176 new = kmalloc(sizeof(*new), GFP_KERNEL);
1177 if (new == NULL) {
1178 print_abort("out of memory");
1179 return NULL;
1181 new->name = name;
1182 new->val = 0;
1183 new->status = NOVAL;
1184 list_add(&new->list, pack->reloc_namevals);
1185 return new;
1188 struct reloc_addrmap *find_addrmap(struct module_pack *pack, long addr)
1190 struct reloc_addrmap *map;
1191 list_for_each_entry(map, pack->reloc_addrmaps, list) {
1192 if (addr >= map->addr && addr < map->addr + map->size)
1193 return map;
1195 return NULL;
1198 void set_temp_myst_relocs(struct module_pack *pack, int status_val)
1200 struct reloc_nameval *nv;
1201 list_for_each_entry(nv, pack->reloc_namevals, list) {
1202 if (nv->status == TEMP)
1203 nv->status = status_val;
1207 int starts_with(const char *str, const char *prefix)
1209 return strncmp(str, prefix, strlen(prefix)) == 0;
1212 int ends_with(const char *str, const char *suffix)
1214 return strlen(str) >= strlen(suffix) &&
1215 strcmp(&str[strlen(str) - strlen(suffix)], suffix) == 0;
1218 int label_offset(const char *sym_name)
1220 int i;
1221 for (i = 0;
1222 sym_name[i] != 0 && sym_name[i + 1] != 0 && sym_name[i + 2] != 0
1223 && sym_name[i + 3] != 0; i++) {
1224 if (sym_name[i] == '_' && sym_name[i + 1] == '_'
1225 && sym_name[i + 2] == '_' && sym_name[i + 3] == '_')
1226 return i + 4;
1228 return -1;
1231 const char *dup_wolabel(const char *sym_name)
1233 int offset, entire_strlen, label_strlen, new_strlen;
1234 char *newstr;
1236 offset = label_offset(sym_name);
1237 if (offset == -1)
1238 label_strlen = 0;
1239 else
1240 label_strlen = strlen(&sym_name[offset]) + strlen("____");
1242 entire_strlen = strlen(sym_name);
1243 new_strlen = entire_strlen - label_strlen;
1244 newstr = kmalloc(new_strlen + 1, GFP_KERNEL);
1245 if (newstr == NULL) {
1246 print_abort("out of memory");
1247 return NULL;
1249 memcpy(newstr, sym_name, new_strlen);
1250 newstr[new_strlen] = 0;
1251 return newstr;
1254 MODULE_LICENSE("GPL v2");
1255 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");