1 /* Copyright (C) 2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License, version 2.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/kallsyms.h>
20 #include <linux/kthread.h>
21 #include <linux/proc_fs.h>
22 #include <linux/sched.h>
23 #include <linux/stop_machine.h>
24 #include <linux/time.h>
25 #ifdef KSPLICE_STANDALONE
26 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
27 #include <linux/version.h>
28 #include <asm/uaccess.h>
30 #include "ksplice-run-pre.h"
32 #include <linux/uaccess.h>
34 #include <asm/ksplice-run-pre.h>
37 #ifdef KSPLICE_STANDALONE
39 /* Old kernels do not have kcalloc */
40 #define kcalloc ksplice_kcalloc
41 static inline void *ksplice_kcalloc(size_t n
, size_t size
,
42 typeof(GFP_KERNEL
) flags
)
45 if (n
!= 0 && size
> ULONG_MAX
/ n
)
47 mem
= kmalloc(n
* size
, flags
);
49 memset(mem
, 0, n
* size
);
53 /* Old kernels use semaphore instead of mutex
54 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16 */
55 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
56 #define mutex semaphore
57 #define mutex_lock down
58 #define mutex_unlock up
61 #ifndef task_thread_info
62 #define task_thread_info(task) (task)->thread_info
63 #endif /* task_thread_info */
66 #ifdef __ASM_X86_PROCESSOR_H /* New unified x86 */
67 #define KSPLICE_IP(x) ((x)->thread.ip)
68 #define KSPLICE_SP(x) ((x)->thread.sp)
69 #elif defined CONFIG_X86_64 /* Old x86 64-bit */
70 /* The IP is on the stack, so we don't need to check it separately.
71 * Instead, we need to prevent Ksplice from patching thread_return.
73 extern const char thread_return
[];
74 #define KSPLICE_IP(x) thread_return
75 #define KSPLICE_SP(x) ((x)->thread.rsp)
76 #else /* Old x86 32-bit */
77 #define KSPLICE_IP(x) ((x)->thread.eip)
78 #define KSPLICE_SP(x) ((x)->thread.esp)
79 #endif /* __ASM_X86_PROCESSOR_H */
80 #endif /* CONFIG_X86 */
82 static int bootstrapped
= 0;
84 #ifdef CONFIG_KALLSYMS
85 extern unsigned long kallsyms_addresses
[], kallsyms_num_syms
;
86 extern u8 kallsyms_names
[];
87 #endif /* CONFIG_KALLSYMS */
89 /* defined by ksplice-create */
90 extern struct ksplice_reloc ksplice_init_relocs
;
92 /* Obtained via System.map */
93 extern struct list_head modules
;
94 extern struct mutex module_mutex
;
96 #else /* KSPLICE_STANDALONE */
97 #define KSPLICE_IP(x) ((x)->thread.ip)
98 #define KSPLICE_SP(x) ((x)->thread.sp)
99 #endif /* KSPLICE_STANDALONE */
102 module_param(debug
, int, 0600);
104 void cleanup_ksplice_module(struct module_pack
*pack
)
106 remove_proc_entry(pack
->name
, &proc_root
);
109 int activate_primary(struct module_pack
*pack
)
112 struct proc_dir_entry
*proc_entry
;
114 if (process_ksplice_relocs(pack
, pack
->primary_relocs
, 0) != 0)
117 if (resolve_patch_symbols(pack
) != 0)
120 #ifdef CONFIG_MODULE_UNLOAD
121 if (add_patch_dependencies(pack
) != 0)
125 proc_entry
= create_proc_entry(pack
->name
, 0644, NULL
);
126 if (proc_entry
== NULL
) {
127 print_abort("primary module: could not create proc entry");
131 proc_entry
->read_proc
= procfile_read
;
132 proc_entry
->write_proc
= procfile_write
;
133 proc_entry
->data
= pack
;
134 proc_entry
->owner
= pack
->primary
;
135 proc_entry
->mode
= S_IFREG
| S_IRUSR
| S_IWUSR
;
138 proc_entry
->size
= 0;
140 for (i
= 0; i
< 5; i
++) {
142 ret
= stop_machine_run(__apply_patches
, pack
, NR_CPUS
);
146 set_current_state(TASK_INTERRUPTIBLE
);
147 schedule_timeout(msecs_to_jiffies(1000));
149 if (pack
->state
!= KSPLICE_APPLIED
) {
150 remove_proc_entry(pack
->name
, &proc_root
);
152 print_abort("stack check: to-be-replaced code is busy");
156 printk(KERN_INFO
"ksplice: Update %s applied successfully\n",
161 int resolve_patch_symbols(struct module_pack
*pack
)
163 struct ksplice_patch
*p
;
167 for (p
= pack
->patches
; p
->oldstr
; p
++) {
168 p
->saved
= kmalloc(5, GFP_KERNEL
);
169 if (p
->saved
== NULL
) {
170 print_abort("out of memory");
174 ret
= compute_address(pack
, p
->oldstr
, &vals
, 0);
178 if (!singular(&vals
)) {
180 failed_to_find(p
->oldstr
);
184 list_entry(vals
.next
, struct candidate_val
, list
)->val
;
191 int procfile_read(char *buffer
, char **buffer_location
,
192 off_t offset
, int buffer_length
, int *eof
, void *data
)
197 int procfile_write(struct file
*file
, const char *buffer
, unsigned long count
,
201 struct module_pack
*pack
= data
;
202 printk(KERN_INFO
"ksplice: Preparing to reverse %s\n", pack
->name
);
204 if (pack
->state
!= KSPLICE_APPLIED
)
207 for (i
= 0; i
< 5; i
++) {
209 ret
= stop_machine_run(__reverse_patches
, pack
, NR_CPUS
);
213 set_current_state(TASK_INTERRUPTIBLE
);
214 schedule_timeout(msecs_to_jiffies(1000));
217 print_abort("stack check: to-be-reversed code is busy");
219 printk(KERN_INFO
"ksplice: Update %s reversed successfully\n",
221 else if (ret
== -EBUSY
)
222 printk(KERN_ERR
"ksplice: Update module %s is in use by "
223 "another module\n", pack
->name
);
228 int __apply_patches(void *packptr
)
230 struct module_pack
*pack
= packptr
;
231 struct ksplice_patch
*p
;
232 struct safety_record
*rec
;
235 list_for_each_entry(rec
, pack
->safety_records
, list
) {
236 for (p
= pack
->patches
; p
->oldstr
; p
++) {
237 if (p
->oldaddr
== rec
->addr
)
242 if (check_each_task(pack
) < 0)
245 if (!try_module_get(pack
->primary
))
248 pack
->state
= KSPLICE_APPLIED
;
252 for (p
= pack
->patches
; p
->oldstr
; p
++) {
253 memcpy((void *)p
->saved
, (void *)p
->oldaddr
, 5);
254 *((u8
*) p
->oldaddr
) = 0xE9;
255 *((u32
*) (p
->oldaddr
+ 1)) = p
->repladdr
- (p
->oldaddr
+ 5);
256 flush_icache_range((unsigned long)p
->oldaddr
,
257 (unsigned long)(p
->oldaddr
+ 5));
263 int __reverse_patches(void *packptr
)
265 struct module_pack
*pack
= packptr
;
266 struct ksplice_patch
*p
;
269 if (pack
->state
!= KSPLICE_APPLIED
)
272 #ifdef CONFIG_MODULE_UNLOAD
273 if (module_refcount(pack
->primary
) != 2)
277 if (check_each_task(pack
) < 0)
280 clear_list(pack
->safety_records
, struct safety_record
, list
);
281 pack
->state
= KSPLICE_REVERSED
;
282 module_put(pack
->primary
);
286 for (p
= pack
->patches
; p
->oldstr
; p
++) {
287 memcpy((void *)p
->oldaddr
, (void *)p
->saved
, 5);
289 flush_icache_range((unsigned long)p
->oldaddr
,
290 (unsigned long)(p
->oldaddr
+ 5));
296 int check_each_task(struct module_pack
*pack
)
298 struct task_struct
*g
, *p
;
300 read_lock(&tasklist_lock
);
301 do_each_thread(g
, p
) {
302 /* do_each_thread is a double loop! */
303 if (check_task(pack
, p
) < 0) {
312 while_each_thread(g
, p
);
313 read_unlock(&tasklist_lock
);
317 int check_task(struct module_pack
*pack
, struct task_struct
*t
)
321 ksplice_debug(2, KERN_DEBUG
"ksplice: stack check: pid %d (%s) eip "
322 "%08lx ", t
->pid
, t
->comm
, KSPLICE_IP(t
));
323 status
= check_address_for_conflict(pack
, KSPLICE_IP(t
));
324 ksplice_debug(2, ": ");
327 ret
= check_stack(pack
, task_thread_info(t
),
328 (long *)__builtin_frame_address(0));
331 } else if (!task_curr(t
)) {
332 ret
= check_stack(pack
, task_thread_info(t
),
333 (long *)KSPLICE_SP(t
));
336 } else if (strcmp(t
->comm
, "kstopmachine") != 0) {
337 ksplice_debug(2, "unexpected running task!");
340 ksplice_debug(2, "\n");
344 /* Modified version of Linux's print_context_stack */
345 int check_stack(struct module_pack
*pack
, struct thread_info
*tinfo
,
351 while (valid_stack_ptr(tinfo
, stack
)) {
353 if (__kernel_text_address(addr
)) {
354 ksplice_debug(2, "%08lx ", addr
);
355 if (check_address_for_conflict(pack
, addr
) < 0)
362 int check_address_for_conflict(struct module_pack
*pack
, long addr
)
364 struct ksplice_size
*s
= pack
->primary_sizes
;
365 struct safety_record
*rec
;
367 /* It is safe for addr to point to the beginning of a patched
368 function, because that location will be overwritten with a
370 list_for_each_entry(rec
, pack
->safety_records
, list
) {
371 if (rec
->care
== 1 && addr
> rec
->addr
372 && addr
< rec
->addr
+ rec
->size
) {
373 ksplice_debug(2, "[<-- CONFLICT] ");
377 for (; s
->name
!= NULL
; s
++) {
378 if (addr
>= s
->thismod_addr
379 && addr
< s
->thismod_addr
+ s
->size
) {
380 ksplice_debug(2, "[<-- CONFLICT] ");
387 /* Modified version of Linux's valid_stack_ptr */
388 int valid_stack_ptr(struct thread_info
*tinfo
, void *p
)
390 return p
> (void *)tinfo
391 && p
<= (void *)tinfo
+ THREAD_SIZE
- sizeof(long);
394 int init_ksplice_module(struct module_pack
*pack
)
397 #ifdef KSPLICE_STANDALONE
398 if (process_ksplice_relocs(pack
, &ksplice_init_relocs
, 1) != 0)
403 printk(KERN_INFO
"ksplice_h: Preparing and checking %s\n", pack
->name
);
405 if (activate_helper(pack
) != 0 || activate_primary(pack
) != 0)
408 clear_list(pack
->reloc_namevals
, struct reloc_nameval
, list
);
409 clear_list(pack
->reloc_addrmaps
, struct reloc_addrmap
, list
);
410 if (pack
->state
== KSPLICE_PREPARING
)
411 clear_list(pack
->safety_records
, struct safety_record
, list
);
416 int activate_helper(struct module_pack
*pack
)
418 struct ksplice_size
*s
;
419 int i
, record_count
= 0, ret
;
421 int numfinished
, oldfinished
= 0;
422 int restart_count
= 0;
424 if (process_ksplice_relocs(pack
, pack
->helper_relocs
, 1) != 0)
427 for (s
= pack
->helper_sizes
; s
->name
!= NULL
; s
++)
430 finished
= kcalloc(record_count
, 1, GFP_KERNEL
);
431 if (finished
== NULL
) {
432 print_abort("out of memory");
437 for (s
= pack
->helper_sizes
, i
= 0; s
->name
!= NULL
; s
++, i
++) {
443 ret
= search_for_match(pack
, s
);
447 } else if (ret
> 0) {
453 for (i
= 0; i
< record_count
; i
++) {
457 if (numfinished
== record_count
) {
462 if (oldfinished
== numfinished
) {
463 for (s
= pack
->helper_sizes
, i
= 0; s
->name
!= NULL
; s
++, i
++) {
464 if (finished
[i
] == 0)
465 ksplice_debug(2, KERN_DEBUG
"ksplice: run-pre: "
466 "could not match section %s\n",
469 print_abort("run-pre: could not match some sections");
473 oldfinished
= numfinished
;
475 if (restart_count
< 20) {
479 print_abort("run-pre: restart limit exceeded");
484 int search_for_match(struct module_pack
*pack
, struct ksplice_size
*s
)
489 struct candidate_val
*v
;
491 for (i
= 0; i
< s
->num_sym_addrs
; i
++) {
492 ret
= add_candidate_val(&vals
, s
->sym_addrs
[i
]);
497 ret
= compute_address(pack
, s
->name
, &vals
, 1);
501 ksplice_debug(3, KERN_DEBUG
"ksplice_h: run-pre: starting sect search "
502 "for %s\n", s
->name
);
504 list_for_each_entry(v
, &vals
, list
) {
508 ret
= try_addr(pack
, s
, run_addr
, s
->thismod_addr
);
510 /* we've encountered a match (> 0) or an error (< 0) */
517 #ifdef KSPLICE_STANDALONE
518 ret
= brute_search_all(pack
, s
);
523 int try_addr(struct module_pack
*pack
, struct ksplice_size
*s
, long run_addr
,
526 struct safety_record
*tmp
;
527 struct reloc_nameval
*nv
;
529 if (run_pre_cmp(pack
, run_addr
, pre_addr
, s
->size
, 0) != 0) {
530 set_temp_myst_relocs(pack
, NOVAL
);
531 ksplice_debug(1, KERN_DEBUG
"ksplice_h: run-pre: sect %s does "
532 "not match ", s
->name
);
533 ksplice_debug(1, "(r_a=%08lx p_a=%08lx s=%ld)\n",
534 run_addr
, pre_addr
, s
->size
);
535 ksplice_debug(1, "ksplice_h: run-pre: ");
537 run_pre_cmp(pack
, run_addr
, pre_addr
, s
->size
, 1);
538 ksplice_debug(1, "\n");
540 set_temp_myst_relocs(pack
, VAL
);
542 ksplice_debug(3, KERN_DEBUG
"ksplice_h: run-pre: found sect "
543 "%s=%08lx\n", s
->name
, run_addr
);
545 tmp
= kmalloc(sizeof(*tmp
), GFP_KERNEL
);
547 print_abort("out of memory");
550 tmp
->addr
= run_addr
;
553 list_add(&tmp
->list
, pack
->safety_records
);
555 nv
= find_nameval(pack
, s
->name
, 1);
566 int handle_myst_reloc(long pre_addr
, int *pre_o
, long run_addr
,
567 int *run_o
, struct reloc_addrmap
*map
, int rerun
)
570 int offset
= (int)(pre_addr
+ *pre_o
- map
->addr
);
573 run_reloc_addr
= run_addr
+ *run_o
- offset
;
575 run_reloc
= *(int *)run_reloc_addr
;
576 else if (map
->size
== 8)
577 run_reloc
= *(long long *)run_reloc_addr
;
581 if (debug
>= 3 && !rerun
) {
582 printk(KERN_DEBUG
"ksplice_h: run-pre: reloc at r_a=%08lx "
583 "p_o=%08x: ", run_addr
, *pre_o
);
584 printk("%s=%08lx (A=%08lx *r=%08lx)\n",
585 map
->nameval
->name
, map
->nameval
->val
,
586 map
->addend
, run_reloc
);
589 if (!starts_with(map
->nameval
->name
, ".rodata.str")) {
590 expected
= run_reloc
- map
->addend
;
591 if ((int)run_reloc
== 0x77777777)
594 expected
+= run_reloc_addr
;
595 if (map
->nameval
->status
== NOVAL
) {
596 map
->nameval
->val
= expected
;
597 map
->nameval
->status
= TEMP
;
598 } else if (map
->nameval
->val
!= expected
) {
601 printk(KERN_DEBUG
"ksplice_h: pre-run reloc: Expected "
602 "%s=%08x!\n", map
->nameval
->name
, expected
);
607 *pre_o
+= map
->size
- offset
;
608 *run_o
+= map
->size
- offset
;
612 int process_ksplice_relocs(struct module_pack
*pack
,
613 struct ksplice_reloc
*relocs
, int pre
)
615 struct ksplice_reloc
*r
;
616 for (r
= relocs
; r
->sym_name
!= NULL
; r
++) {
617 if (process_reloc(pack
, r
, pre
) != 0)
623 int process_reloc(struct module_pack
*pack
, struct ksplice_reloc
*r
, int pre
)
627 struct reloc_addrmap
*map
;
628 const long blank_addr
= r
->blank_sect_addr
+ r
->blank_offset
;
630 #ifdef KSPLICE_STANDALONE
631 /* run_pre_reloc: will this reloc be used for run-pre matching? */
632 const int run_pre_reloc
= pre
&& bootstrapped
;
633 #ifndef CONFIG_KALLSYMS
636 goto skip_using_system_map
;
637 #endif /* CONFIG_KALLSYMS */
638 #endif /* KSPLICE_STANDALONE */
640 /* Some Fedora kernel releases have System.map files whose symbol
641 * addresses disagree with the running kernel by a constant address
642 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
643 * values used to compile these kernels. This constant address offset
644 * is always a multiple of 0x100000.
646 * If we observe an offset that is NOT a multiple of 0x100000, then the
647 * user provided us with an incorrect System.map file, and we should
649 * If we observe an offset that is a multiple of 0x100000, then we can
650 * adjust the System.map address values accordingly and proceed.
652 off
= (long)printk
- pack
->map_printk
;
654 print_abort("System.map does not match kernel");
657 for (i
= 0; i
< r
->num_sym_addrs
; i
++) {
658 ret
= add_candidate_val(&vals
, r
->sym_addrs
[i
] + off
);
662 #ifndef CONFIG_KALLSYMS
663 skip_using_system_map
:
666 if ((r
->size
== 4 && *(int *)blank_addr
!= 0x77777777)
668 *(long long *)blank_addr
!= 0x7777777777777777ll
)) {
669 ksplice_debug(4, KERN_DEBUG
"ksplice%s: reloc: skipped %s:%08lx"
670 " (altinstr)\n", (pre
? "_h" : ""),
671 r
->sym_name
, r
->blank_offset
);
676 ret
= compute_address(pack
, r
->sym_name
, &vals
, pre
);
679 if (!singular(&vals
)) {
681 #ifdef KSPLICE_STANDALONE
682 if (!run_pre_reloc
) {
686 failed_to_find(r
->sym_name
);
690 ksplice_debug(4, KERN_DEBUG
"ksplice: reloc: deferred %s:%08lx "
691 "to run-pre\n", r
->sym_name
, r
->blank_offset
);
693 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
695 print_abort("out of memory");
698 map
->addr
= blank_addr
;
699 map
->nameval
= find_nameval(pack
, r
->sym_name
, 1);
700 if (map
->nameval
== NULL
)
702 map
->addend
= r
->addend
;
703 map
->pcrel
= r
->pcrel
;
705 list_add(&map
->list
, pack
->reloc_addrmaps
);
708 sym_addr
= list_entry(vals
.next
, struct candidate_val
, list
)->val
;
711 #ifdef CONFIG_MODULE_UNLOAD
713 ret
= add_dependency_on_address(pack
, sym_addr
);
719 #ifdef KSPLICE_STANDALONE
720 if (r
->pcrel
&& run_pre_reloc
) {
722 if (r
->pcrel
&& pre
) {
724 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
726 print_abort("out of memory");
729 map
->addr
= blank_addr
;
730 map
->nameval
= find_nameval(pack
, "ksplice_zero", 1);
731 if (map
->nameval
== NULL
)
733 map
->nameval
->val
= 0;
734 map
->nameval
->status
= VAL
;
735 map
->addend
= sym_addr
+ r
->addend
;
737 map
->pcrel
= r
->pcrel
;
738 list_add(&map
->list
, pack
->reloc_addrmaps
);
743 val
= sym_addr
+ r
->addend
- blank_addr
;
745 val
= sym_addr
+ r
->addend
;
747 *(int *)blank_addr
= val
;
748 else if (r
->size
== 8)
749 *(long long *)blank_addr
= val
;
754 ksplice_debug(4, KERN_DEBUG
"ksplice%s: reloc: %s:%08lx ",
755 (pre
? "_h" : ""), r
->sym_name
, r
->blank_offset
);
756 ksplice_debug(4, "(S=%08lx A=%08lx ", sym_addr
, r
->addend
);
758 ksplice_debug(4, "aft=%08x)\n", *(int *)blank_addr
);
759 else if (r
->size
== 8)
760 ksplice_debug(4, "aft=%016llx)\n", *(long long *)blank_addr
);
766 #ifdef CONFIG_MODULE_UNLOAD
767 int add_dependency_on_address(struct module_pack
*pack
, long addr
)
771 mutex_lock(&module_mutex
);
772 m
= module_text_address(addr
);
773 if (m
== NULL
|| starts_with(m
->name
, pack
->name
) ||
774 ends_with(m
->name
, "_helper"))
776 else if (use_module(pack
->primary
, m
) != 1)
778 mutex_unlock(&module_mutex
);
782 int add_patch_dependencies(struct module_pack
*pack
)
785 struct ksplice_patch
*p
;
786 for (p
= pack
->patches
; p
->oldstr
; p
++) {
787 ret
= add_dependency_on_address(pack
, p
->oldaddr
);
794 #ifdef KSPLICE_STANDALONE
795 /* Essentially, code from module.c; we use directly use_module and module_text_address */
796 struct module
*module_text_address(unsigned long addr
)
799 list_for_each_entry(m
, &modules
, list
) {
800 if ((addr
>= (unsigned long)m
->module_core
&&
801 addr
< (unsigned long)m
->module_core
+ m
->core_size
) ||
802 (addr
>= (unsigned long)m
->module_init
&&
803 addr
< (unsigned long)m
->module_init
+ m
->init_size
))
810 struct list_head list
;
811 struct module
*module_which_uses
;
814 /* I'm not yet certain whether we need the strong form of this. */
815 static inline int strong_try_module_get(struct module
*mod
)
817 if (mod
&& mod
->state
!= MODULE_STATE_LIVE
)
819 if (try_module_get(mod
))
824 /* Does a already use b? */
825 static int already_uses(struct module
*a
, struct module
*b
)
827 struct module_use
*use
;
828 list_for_each_entry(use
, &b
->modules_which_use_me
, list
) {
829 if (use
->module_which_uses
== a
)
835 /* Make it so module a uses b. Must be holding module_mutex */
836 int use_module(struct module
*a
, struct module
*b
)
838 struct module_use
*use
;
839 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
840 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
843 if (b
== NULL
|| already_uses(a
, b
))
846 if (strong_try_module_get(b
) < 0)
849 ksplice_debug(4, "Allocating new usage for %s.\n", a
->name
);
850 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
852 printk("%s: out of memory adding dependencies\n", a
->name
);
856 use
->module_which_uses
= a
;
857 list_add(&use
->list
, &b
->modules_which_use_me
);
858 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
859 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
860 no_warn
= sysfs_create_link(b
->holders_dir
, &a
->mkobj
.kobj
, a
->name
);
864 #endif /* KSPLICE_STANDALONE */
865 #endif /* CONFIG_MODULE_UNLOAD */
867 int compute_address(struct module_pack
*pack
, char *sym_name
,
868 struct list_head
*vals
, int pre
)
871 const char *prefix
[] = { ".text.", ".bss.", ".data.", NULL
};
872 #ifdef KSPLICE_STANDALONE
878 struct reloc_nameval
*nv
= find_nameval(pack
, sym_name
, 0);
879 if (nv
!= NULL
&& nv
->status
!= NOVAL
) {
881 ret
= add_candidate_val(vals
, nv
->val
);
884 ksplice_debug(1, KERN_DEBUG
"ksplice: using detected "
885 "sym %s=%08lx\n", sym_name
, nv
->val
);
890 if (starts_with(sym_name
, ".rodata"))
893 #ifdef CONFIG_KALLSYMS
894 ret
= kernel_lookup(sym_name
, vals
);
897 ret
= other_module_lookup(sym_name
, vals
, pack
->name
);
902 for (i
= 0; prefix
[i
] != NULL
; i
++) {
903 if (starts_with(sym_name
, prefix
[i
])) {
904 ret
= compute_address(pack
, sym_name
+
905 strlen(prefix
[i
]), vals
, pre
);
913 #ifdef CONFIG_KALLSYMS
914 int other_module_lookup(const char *name_wlabel
, struct list_head
*vals
,
915 const char *ksplice_name
)
918 struct accumulate_struct acc
= { dup_wolabel(name_wlabel
), vals
};
921 if (acc
.desired_name
== NULL
)
923 mutex_lock(&module_mutex
);
924 list_for_each_entry(m
, &modules
, list
) {
925 if (!starts_with(m
->name
, ksplice_name
)
926 && !ends_with(m
->name
, "_helper")) {
927 ret
= module_on_each_symbol(m
,
928 accumulate_matching_names
,
934 mutex_unlock(&module_mutex
);
936 kfree(acc
.desired_name
);
939 #endif /* CONFIG_KALLSYMS */
941 int accumulate_matching_names(void *data
, const char *sym_name
, long sym_val
)
944 struct accumulate_struct
*acc
= data
;
946 if (strncmp(sym_name
, acc
->desired_name
, strlen(acc
->desired_name
)) !=
950 sym_name
= dup_wolabel(sym_name
);
951 if (sym_name
== NULL
)
953 /* TODO: possibly remove "&& sym_val != 0" */
954 if (strcmp(sym_name
, acc
->desired_name
) == 0 && sym_val
!= 0)
955 ret
= add_candidate_val(acc
->vals
, sym_val
);
960 #ifdef KSPLICE_STANDALONE
961 int brute_search_all(struct module_pack
*pack
, struct ksplice_size
*s
)
968 ksplice_debug(2, KERN_DEBUG
"ksplice: brute_search: searching for %s\n",
973 mutex_lock(&module_mutex
);
974 list_for_each_entry(m
, &modules
, list
) {
975 if (starts_with(m
->name
, pack
->name
) ||
976 ends_with(m
->name
, "_helper"))
978 if (brute_search(pack
, s
, m
->module_core
, m
->core_size
) == 0 ||
979 brute_search(pack
, s
, m
->module_init
, m
->init_size
) == 0) {
985 mutex_unlock(&module_mutex
);
988 if (brute_search(pack
, s
, (void *)init_mm
.start_code
,
989 init_mm
.end_code
- init_mm
.start_code
) == 0) {
995 if (ret
== 1 && saved_debug
>= 2)
996 printk(KERN_DEBUG
"ksplice: brute_search: found %s in %s\n",
1003 #ifdef CONFIG_KALLSYMS
1004 /* Modified version of Linux's kallsyms_lookup_name */
1005 int kernel_lookup(const char *name_wlabel
, struct list_head
*vals
)
1008 char namebuf
[KSYM_NAME_LEN
+ 1];
1010 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1012 #endif /* LINUX_VERSION_CODE */
1014 const char *name
= dup_wolabel(name_wlabel
);
1018 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
1019 * 2.6.10 was the first release after this commit
1021 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1022 for (i
= 0, off
= 0; i
< kallsyms_num_syms
; i
++) {
1023 off
= ksplice_kallsyms_expand_symbol(off
, namebuf
);
1025 if (strcmp(namebuf
, name
) == 0) {
1026 ret
= add_candidate_val(vals
, kallsyms_addresses
[i
]);
1031 #else /* LINUX_VERSION_CODE */
1034 for (i
= 0, knames
= kallsyms_names
; i
< kallsyms_num_syms
; i
++) {
1035 unsigned prefix
= *knames
++;
1037 strlcpy(namebuf
+ prefix
, knames
, KSYM_NAME_LEN
- prefix
);
1039 if (strcmp(namebuf
, name
) == 0) {
1040 ret
= add_candidate_val(vals
, kallsyms_addresses
[i
]);
1045 knames
+= strlen(knames
) + 1;
1047 #endif /* LINUX_VERSION_CODE */
1053 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
1054 * 2.6.10 was the first release after this commit
1056 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1057 extern u8 kallsyms_token_table
[];
1058 extern u16 kallsyms_token_index
[];
1059 /* Modified version of Linux's kallsyms_expand_symbol */
1060 long ksplice_kallsyms_expand_symbol(unsigned long off
, char *result
)
1062 long len
, skipped_first
= 0;
1063 const u8
*tptr
, *data
;
1065 data
= &kallsyms_names
[off
];
1072 tptr
= &kallsyms_token_table
[kallsyms_token_index
[*data
]];
1077 if (skipped_first
) {
1090 #endif /* LINUX_VERSION_CODE */
1092 int module_on_each_symbol(struct module
*mod
,
1093 int (*fn
) (void *, const char *, long), void *data
)
1098 /* TODO: possibly remove this if statement */
1099 if (strlen(mod
->name
) <= 1)
1102 for (i
= 0; i
< mod
->num_symtab
; i
++) {
1104 fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
1105 mod
->symtab
[i
].st_value
) != 0))
1110 #endif /* CONFIG_KALLSYMS */
1111 #else /* KSPLICE_STANDALONE */
1112 EXPORT_SYMBOL_GPL(init_ksplice_module
);
1113 EXPORT_SYMBOL_GPL(cleanup_ksplice_module
);
1115 int init_module(void)
1120 void cleanup_module(void)
1124 int kernel_lookup(const char *name_wlabel
, struct list_head
*vals
)
1127 struct accumulate_struct acc
= { dup_wolabel(name_wlabel
), vals
};
1128 if (acc
.desired_name
== NULL
)
1130 ret
= kallsyms_on_each_symbol(accumulate_matching_names
, &acc
);
1133 kfree(acc
.desired_name
);
1136 #endif /* KSPLICE_STANDALONE */
1138 int add_candidate_val(struct list_head
*vals
, long val
)
1140 struct candidate_val
*tmp
, *new;
1142 list_for_each_entry(tmp
, vals
, list
) {
1143 if (tmp
->val
== val
)
1146 new = kmalloc(sizeof(*new), GFP_KERNEL
);
1148 print_abort("out of memory");
1152 list_add(&new->list
, vals
);
1156 void release_vals(struct list_head
*vals
)
1158 clear_list(vals
, struct candidate_val
, list
);
1161 struct reloc_nameval
*find_nameval(struct module_pack
*pack
, char *name
,
1164 struct reloc_nameval
*nv
, *new;
1166 list_for_each_entry(nv
, pack
->reloc_namevals
, list
) {
1168 if (starts_with(newname
, ".text."))
1170 if (strcmp(newname
, name
) == 0)
1176 new = kmalloc(sizeof(*new), GFP_KERNEL
);
1178 print_abort("out of memory");
1183 new->status
= NOVAL
;
1184 list_add(&new->list
, pack
->reloc_namevals
);
1188 struct reloc_addrmap
*find_addrmap(struct module_pack
*pack
, long addr
)
1190 struct reloc_addrmap
*map
;
1191 list_for_each_entry(map
, pack
->reloc_addrmaps
, list
) {
1192 if (addr
>= map
->addr
&& addr
< map
->addr
+ map
->size
)
1198 void set_temp_myst_relocs(struct module_pack
*pack
, int status_val
)
1200 struct reloc_nameval
*nv
;
1201 list_for_each_entry(nv
, pack
->reloc_namevals
, list
) {
1202 if (nv
->status
== TEMP
)
1203 nv
->status
= status_val
;
1207 int starts_with(const char *str
, const char *prefix
)
1209 return strncmp(str
, prefix
, strlen(prefix
)) == 0;
1212 int ends_with(const char *str
, const char *suffix
)
1214 return strlen(str
) >= strlen(suffix
) &&
1215 strcmp(&str
[strlen(str
) - strlen(suffix
)], suffix
) == 0;
1218 int label_offset(const char *sym_name
)
1222 sym_name
[i
] != 0 && sym_name
[i
+ 1] != 0 && sym_name
[i
+ 2] != 0
1223 && sym_name
[i
+ 3] != 0; i
++) {
1224 if (sym_name
[i
] == '_' && sym_name
[i
+ 1] == '_'
1225 && sym_name
[i
+ 2] == '_' && sym_name
[i
+ 3] == '_')
1231 const char *dup_wolabel(const char *sym_name
)
1233 int offset
, entire_strlen
, label_strlen
, new_strlen
;
1236 offset
= label_offset(sym_name
);
1240 label_strlen
= strlen(&sym_name
[offset
]) + strlen("____");
1242 entire_strlen
= strlen(sym_name
);
1243 new_strlen
= entire_strlen
- label_strlen
;
1244 newstr
= kmalloc(new_strlen
+ 1, GFP_KERNEL
);
1245 if (newstr
== NULL
) {
1246 print_abort("out of memory");
1249 memcpy(newstr
, sym_name
, new_strlen
);
1250 newstr
[new_strlen
] = 0;
1254 MODULE_LICENSE("GPL v2");
1255 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");