1 /* Copyright (C) 2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License, version 2.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
17 #include <linux/module.h>
18 #ifdef CONFIG_DEBUG_FS
19 #include <linux/debugfs.h>
20 #endif /* CONFIG_DEBUG_FS */
21 #include <linux/errno.h>
22 #include <linux/kallsyms.h>
23 #include <linux/kobject.h>
24 #include <linux/kthread.h>
25 #include <linux/pagemap.h>
26 #include <linux/sched.h>
27 #include <linux/stop_machine.h>
28 #include <linux/sysfs.h>
29 #include <linux/time.h>
30 #include <linux/version.h>
31 #include <linux/vmalloc.h>
32 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
33 #include <linux/uaccess.h>
34 #else /* LINUX_VERSION_CODE < */
35 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
36 #include <asm/uaccess.h>
37 #endif /* LINUX_VERSION_CODE */
38 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
39 #include <asm/alternative.h>
40 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
41 #ifdef KSPLICE_STANDALONE
43 #else /* !KSPLICE_STANDALONE */
44 #include <linux/ksplice.h>
45 #endif /* KSPLICE_STANDALONE */
47 #if BITS_PER_LONG == 32
49 #elif BITS_PER_LONG == 64
51 #endif /* BITS_PER_LONG */
53 enum ksplice_stage_enum
{
54 PREPARING
, APPLIED
, REVERSED
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
58 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
60 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
61 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
62 #define __bitwise__ __bitwise
65 typedef int __bitwise__ abort_t
;
67 #define OK ((__force abort_t) 0)
68 #define NO_MATCH ((__force abort_t) 1)
69 #define BAD_SYSTEM_MAP ((__force abort_t) 2)
70 #define CODE_BUSY ((__force abort_t) 3)
71 #define MODULE_BUSY ((__force abort_t) 4)
72 #define OUT_OF_MEMORY ((__force abort_t) 5)
73 #define FAILED_TO_FIND ((__force abort_t) 6)
74 #define ALREADY_REVERSED ((__force abort_t) 7)
75 #define MISSING_EXPORT ((__force abort_t) 8)
76 #define UNEXPECTED ((__force abort_t) 9)
78 struct update_bundle
{
82 enum ksplice_stage_enum stage
;
85 #ifdef CONFIG_DEBUG_FS
86 struct debugfs_blob_wrapper debug_blob
;
87 struct dentry
*debugfs_dentry
;
88 #endif /* CONFIG_DEBUG_FS */
89 struct list_head packs
;
90 struct list_head conflicts
;
91 struct list_head list
;
95 const char *process_name
;
97 struct list_head stack
;
98 struct list_head list
;
101 struct ksplice_frame
{
104 const char *symbol_name
;
105 struct list_head list
;
108 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
109 /* Old kernels don't have debugfs_create_blob */
110 struct debugfs_blob_wrapper
{
114 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
116 struct reloc_nameval
{
117 struct list_head list
;
120 enum { NOVAL
, TEMP
, VAL
} status
;
123 struct reloc_addrmap
{
124 struct list_head list
;
126 struct reloc_nameval
*nameval
;
133 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
134 static inline int virtual_address_mapped(unsigned long addr
)
137 pte_t
*pte
= lookup_address(addr
, &level
);
138 return pte
== NULL
? 0 : pte_present(*pte
);
140 #else /* LINUX_VERSION_CODE < */
141 /* f0646e43acb18f0e00b00085dc88bc3f403e7930 was after 2.6.24 */
142 static inline int virtual_address_mapped(unsigned long addr
)
144 pgd_t
*pgd
= pgd_offset_k(addr
);
147 #endif /* pud_page */
151 if (!pgd_present(*pgd
))
155 pud
= pud_offset(pgd
, addr
);
156 if (!pud_present(*pud
))
159 pmd
= pmd_offset(pud
, addr
);
161 pmd
= pmd_offset(pgd
, addr
);
162 #endif /* pud_page */
164 if (!pmd_present(*pmd
))
170 pte
= pte_offset_kernel(pmd
, addr
);
171 if (!pte_present(*pte
))
176 #endif /* LINUX_VERSION_CODE */
178 static struct reloc_nameval
*find_nameval(struct module_pack
*pack
,
179 const char *name
, int create
);
180 static struct reloc_addrmap
*find_addrmap(struct module_pack
*pack
,
182 static abort_t
handle_myst_reloc(struct module_pack
*pack
,
183 unsigned long pre_addr
, unsigned long run_addr
,
184 int rerun
, int *matched
);
186 struct safety_record
{
187 struct list_head list
;
194 struct candidate_val
{
195 struct list_head list
;
199 #define singular(list) (!list_empty(list) && (list)->next->next == (list))
201 #ifdef CONFIG_DEBUG_FS
202 static abort_t
init_debug_buf(struct update_bundle
*bundle
);
203 static void clear_debug_buf(struct update_bundle
*bundle
);
204 static int __attribute__((format(printf
, 2, 3)))
205 __ksdebug(struct update_bundle
*bundle
, const char *fmt
, ...);
206 #else /* !CONFIG_DEBUG_FS */
207 static inline abort_t
init_debug_buf(struct update_bundle
*bundle
)
212 static inline void clear_debug_buf(struct update_bundle
*bundle
)
217 #define __ksdebug(bundle, fmt, ...) printk(fmt, ## __VA_ARGS__)
218 #endif /* CONFIG_DEBUG_FS */
220 #define _ksdebug(bundle, level, fmt, ...) \
222 if ((bundle)->debug >= (level)) \
223 __ksdebug(bundle, fmt, ## __VA_ARGS__); \
225 #define ksdebug(pack, level, fmt, ...) \
226 do { _ksdebug((pack)->bundle, level, fmt, ## __VA_ARGS__); } while (0)
227 #define failed_to_find(pack, sym_name) \
228 ksdebug(pack, 0, KERN_ERR "ksplice: Failed to find symbol %s at " \
229 "%s:%d\n", sym_name, __FILE__, __LINE__)
231 static inline void print_abort(struct module_pack
*pack
, const char *str
)
233 ksdebug(pack
, 0, KERN_ERR
"ksplice: Aborted. (%s)\n", str
);
236 static LIST_HEAD(update_bundles
);
238 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
239 /* Old kernels do not have kcalloc
240 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
242 static inline void *kcalloc(size_t n
, size_t size
, typeof(GFP_KERNEL
) flags
)
245 if (n
!= 0 && size
> ULONG_MAX
/ n
)
247 mem
= kmalloc(n
* size
, flags
);
249 memset(mem
, 0, n
* size
);
252 #endif /* LINUX_VERSION_CODE */
254 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
255 /* Old kernels do not have kstrdup
256 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
258 static char *kstrdup(const char *s
, typeof(GFP_KERNEL
) gfp
)
267 buf
= kmalloc(len
, gfp
);
272 #endif /* LINUX_VERSION_CODE */
274 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
275 /* Old kernels use semaphore instead of mutex
276 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
278 #define mutex semaphore
279 #define mutex_lock down
280 #define mutex_unlock up
281 #endif /* LINUX_VERSION_CODE */
283 #ifndef task_thread_info
284 #define task_thread_info(task) (task)->thread_info
285 #endif /* !task_thread_info */
288 #ifdef __ASM_X86_PROCESSOR_H /* New unified x86 */
289 #define KSPLICE_IP(x) ((x)->thread.ip)
290 #define KSPLICE_SP(x) ((x)->thread.sp)
291 #elif defined(CONFIG_X86_64) /* Old x86 64-bit */
292 /* The IP is on the stack, so we don't need to check it separately.
293 * Instead, we need to prevent Ksplice from patching thread_return.
295 extern const char thread_return
[];
296 #define KSPLICE_IP(x) ((unsigned long)thread_return)
297 #define KSPLICE_SP(x) ((x)->thread.rsp)
298 #else /* Old x86 32-bit */
299 #define KSPLICE_IP(x) ((x)->thread.eip)
300 #define KSPLICE_SP(x) ((x)->thread.esp)
301 #endif /* __ASM_X86_PROCESSOR_H */
302 #endif /* CONFIG_X86 */
304 #ifdef KSPLICE_STANDALONE
306 static int bootstrapped
= 0;
308 #ifdef CONFIG_KALLSYMS
309 extern unsigned long kallsyms_addresses
[], kallsyms_num_syms
;
310 extern u8 kallsyms_names
[];
311 #endif /* CONFIG_KALLSYMS */
313 /* defined by ksplice-create */
314 extern const struct ksplice_reloc ksplice_init_relocs
[],
315 ksplice_init_relocs_end
[];
317 /* Obtained via System.map */
318 extern struct list_head modules
;
319 extern struct mutex module_mutex
;
320 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
321 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
322 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
323 #endif /* LINUX_VERSION_CODE */
324 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
325 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
326 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
327 #endif /* LINUX_VERSION_CODE */
328 extern const struct kernel_symbol __start___ksymtab
[];
329 extern const struct kernel_symbol __stop___ksymtab
[];
330 extern const unsigned long __start___kcrctab
[];
331 extern const struct kernel_symbol __start___ksymtab_gpl
[];
332 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
333 extern const unsigned long __start___kcrctab_gpl
[];
334 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
335 extern const struct kernel_symbol __start___ksymtab_unused
[];
336 extern const struct kernel_symbol __stop___ksymtab_unused
[];
337 extern const unsigned long __start___kcrctab_unused
[];
338 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
339 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
340 extern const unsigned long __start___kcrctab_unused_gpl
[];
341 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
342 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
343 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
344 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
345 extern const unsigned long __start___kcrctab_gpl_future
[];
346 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
348 #endif /* KSPLICE_STANDALONE */
350 static abort_t
process_ksplice_relocs(struct module_pack
*pack
,
351 const struct ksplice_reloc
*relocs
,
352 const struct ksplice_reloc
*relocs_end
,
354 static abort_t
process_reloc(struct module_pack
*pack
,
355 const struct ksplice_reloc
*r
, int pre
);
356 static abort_t
compute_address(struct module_pack
*pack
, const char *sym_name
,
357 struct list_head
*vals
, int pre
);
359 struct accumulate_struct
{
360 const char *desired_name
;
361 struct list_head
*vals
;
364 #ifdef CONFIG_KALLSYMS
365 static int accumulate_matching_names(void *data
, const char *sym_name
,
366 unsigned long sym_val
);
367 static abort_t
kernel_lookup(const char *name
, struct list_head
*vals
);
368 static abort_t
other_module_lookup(const char *name
, struct list_head
*vals
,
369 const char *ksplice_name
);
370 #ifdef KSPLICE_STANDALONE
371 static int module_on_each_symbol(const struct module
*mod
,
372 int (*fn
)(void *, const char *, unsigned long),
374 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
375 static unsigned long ksplice_kallsyms_expand_symbol(unsigned long off
,
377 #endif /* LINUX_VERSION_CODE */
378 #endif /* KSPLICE_STANDALONE */
379 #endif /* CONFIG_KALLSYMS */
380 static int label_offset(const char *sym_name
);
381 static char *dup_wolabel(const char *sym_name
);
382 static abort_t
exported_symbol_lookup(const char *name
, struct list_head
*vals
);
384 #ifdef KSPLICE_STANDALONE
385 static abort_t
brute_search_all(struct module_pack
*pack
,
386 const struct ksplice_size
*s
);
387 #endif /* KSPLICE_STANDALONE */
389 static abort_t
add_candidate_val(struct list_head
*vals
, unsigned long val
);
390 static void release_vals(struct list_head
*vals
);
391 static void set_temp_myst_relocs(struct module_pack
*pack
, int status_val
);
392 static int contains_canary(struct module_pack
*pack
, unsigned long blank_addr
,
393 int size
, long dst_mask
);
394 static int starts_with(const char *str
, const char *prefix
);
395 static int ends_with(const char *str
, const char *suffix
);
397 #define clear_list(head, type, member) \
399 struct list_head *_pos, *_n; \
400 list_for_each_safe(_pos, _n, head) { \
402 kfree(list_entry(_pos, type, member)); \
407 static abort_t
activate_primary(struct module_pack
*pack
);
408 static abort_t
resolve_patch_symbols(struct module_pack
*pack
);
409 static abort_t
process_exports(struct module_pack
*pack
);
410 static int __apply_patches(void *bundle
);
411 static int __reverse_patches(void *bundle
);
412 static abort_t
check_each_task(struct update_bundle
*bundle
);
413 static abort_t
check_task(struct update_bundle
*bundle
,
414 const struct task_struct
*t
, int save_conflicts
);
415 static abort_t
check_stack(struct update_bundle
*bundle
, struct conflict
*conf
,
416 const struct thread_info
*tinfo
,
417 const unsigned long *stack
);
418 static abort_t
check_address_for_conflict(struct update_bundle
*bundle
,
419 struct conflict
*conf
,
421 static int valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
);
422 static int is_stop_machine(const struct task_struct
*t
);
423 static void cleanup_conflicts(struct update_bundle
*bundle
);
424 static void print_conflicts(struct update_bundle
*bundle
);
425 #ifdef KSPLICE_STANDALONE
426 static const struct kernel_symbol
*__find_symbol(const char *name
,
427 struct module
**owner
,
428 const unsigned long **crc
,
429 const char **export_type
,
430 _Bool gplok
, _Bool warn
);
431 #endif /* KSPLICE_STANDALONE */
432 static void insert_trampoline(struct ksplice_patch
*p
);
433 static void remove_trampoline(const struct ksplice_patch
*p
);
434 static void free_trampolines(struct update_bundle
*bundle
);
435 static abort_t
prepare_trampolines(struct update_bundle
*bundle
);
436 /* Architecture-specific functions defined in ksplice-run-pre.h */
437 static abort_t
create_trampoline(struct ksplice_patch
*p
);
438 static unsigned long follow_trampolines(struct module_pack
*pack
,
441 static abort_t
add_dependency_on_address(struct module_pack
*pack
,
443 static abort_t
add_patch_dependencies(struct module_pack
*pack
);
444 #ifdef KSPLICE_STANDALONE
445 static int use_module(struct module
*a
, struct module
*b
);
446 #endif /* KSPLICE_STANDALONE */
449 static abort_t
activate_helper(struct module_pack
*pack
);
450 static abort_t
search_for_match(struct module_pack
*pack
,
451 const struct ksplice_size
*s
);
452 static abort_t
try_addr(struct module_pack
*pack
, const struct ksplice_size
*s
,
453 unsigned long run_addr
, unsigned long pre_addr
);
454 static abort_t
rodata_run_pre_cmp(struct module_pack
*pack
,
455 unsigned long run_addr
,
456 unsigned long pre_addr
, unsigned int size
,
459 static abort_t
reverse_patches(struct update_bundle
*bundle
);
460 static abort_t
apply_patches(struct update_bundle
*bundle
);
461 static abort_t
apply_update(struct update_bundle
*bundle
);
462 static int register_ksplice_module(struct module_pack
*pack
);
463 static void unregister_ksplice_module(struct module_pack
*pack
);
464 static struct update_bundle
*init_ksplice_bundle(const char *kid
);
465 static void cleanup_ksplice_bundle(struct update_bundle
*bundle
);
466 static void add_to_bundle(struct module_pack
*pack
,
467 struct update_bundle
*bundle
);
468 static int ksplice_sysfs_init(struct update_bundle
*bundle
);
470 #ifdef KSPLICE_STANDALONE
471 #include "ksplice-run-pre.h"
472 #else /* !KSPLICE_STANDALONE */
473 #include <asm/ksplice-run-pre.h>
474 #endif /* KSPLICE_STANDALONE */
476 #ifndef KSPLICE_STANDALONE
477 static struct kobject
*ksplice_kobj
;
478 #endif /* !KSPLICE_STANDALONE */
480 struct ksplice_attribute
{
481 struct attribute attr
;
482 ssize_t (*show
)(struct update_bundle
*bundle
, char *buf
);
483 ssize_t (*store
)(struct update_bundle
*bundle
, const char *buf
,
487 static ssize_t
ksplice_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
490 struct ksplice_attribute
*attribute
=
491 container_of(attr
, struct ksplice_attribute
, attr
);
492 struct update_bundle
*bundle
=
493 container_of(kobj
, struct update_bundle
, kobj
);
494 if (attribute
->show
== NULL
)
496 return attribute
->show(bundle
, buf
);
499 static ssize_t
ksplice_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
500 const char *buf
, size_t len
)
502 struct ksplice_attribute
*attribute
=
503 container_of(attr
, struct ksplice_attribute
, attr
);
504 struct update_bundle
*bundle
=
505 container_of(kobj
, struct update_bundle
, kobj
);
506 if (attribute
->store
== NULL
)
508 return attribute
->store(bundle
, buf
, len
);
511 static struct sysfs_ops ksplice_sysfs_ops
= {
512 .show
= ksplice_attr_show
,
513 .store
= ksplice_attr_store
,
516 static void ksplice_release(struct kobject
*kobj
)
518 struct update_bundle
*bundle
;
519 bundle
= container_of(kobj
, struct update_bundle
, kobj
);
520 cleanup_ksplice_bundle(bundle
);
523 static ssize_t
stage_show(struct update_bundle
*bundle
, char *buf
)
525 switch (bundle
->stage
) {
527 return snprintf(buf
, PAGE_SIZE
, "preparing\n");
529 return snprintf(buf
, PAGE_SIZE
, "applied\n");
531 return snprintf(buf
, PAGE_SIZE
, "reversed\n");
536 static ssize_t
abort_cause_show(struct update_bundle
*bundle
, char *buf
)
538 switch (bundle
->abort_cause
) {
540 return snprintf(buf
, PAGE_SIZE
, "ok\n");
542 return snprintf(buf
, PAGE_SIZE
, "no_match\n");
544 return snprintf(buf
, PAGE_SIZE
, "bad_system_map\n");
546 return snprintf(buf
, PAGE_SIZE
, "code_busy\n");
548 return snprintf(buf
, PAGE_SIZE
, "module_busy\n");
550 return snprintf(buf
, PAGE_SIZE
, "out_of_memory\n");
552 return snprintf(buf
, PAGE_SIZE
, "failed_to_find\n");
553 case ALREADY_REVERSED
:
554 return snprintf(buf
, PAGE_SIZE
, "already_reversed\n");
556 return snprintf(buf
, PAGE_SIZE
, "missing_export\n");
558 return snprintf(buf
, PAGE_SIZE
, "unexpected\n");
563 static ssize_t
conflict_show(struct update_bundle
*bundle
, char *buf
)
565 const struct conflict
*conf
;
566 const struct ksplice_frame
*frame
;
568 list_for_each_entry(conf
, &bundle
->conflicts
, list
) {
569 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "%s %d",
570 conf
->process_name
, conf
->pid
);
571 list_for_each_entry(frame
, &conf
->stack
, list
) {
572 if (!frame
->has_conflict
)
574 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, " %s",
577 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "\n");
582 static ssize_t
stage_store(struct update_bundle
*bundle
,
583 const char *buf
, size_t len
)
585 if (strncmp(buf
, "applied\n", len
) == 0 && bundle
->stage
== PREPARING
)
586 bundle
->abort_cause
= apply_update(bundle
);
587 else if (strncmp(buf
, "reversed\n", len
) == 0 &&
588 bundle
->stage
== APPLIED
)
589 bundle
->abort_cause
= reverse_patches(bundle
);
593 static ssize_t
debug_show(struct update_bundle
*bundle
, char *buf
)
595 return snprintf(buf
, PAGE_SIZE
, "%d\n", bundle
->debug
);
598 static ssize_t
debug_store(struct update_bundle
*bundle
, const char *buf
,
602 int d
= simple_strtoul(buf
, &tmp
, 10);
603 if (*buf
&& (*tmp
== '\0' || *tmp
== '\n')) {
610 static struct ksplice_attribute stage_attribute
=
611 __ATTR(stage
, 0600, stage_show
, stage_store
);
612 static struct ksplice_attribute abort_cause_attribute
=
613 __ATTR(abort_cause
, 0400, abort_cause_show
, NULL
);
614 static struct ksplice_attribute debug_attribute
=
615 __ATTR(debug
, 0600, debug_show
, debug_store
);
616 static struct ksplice_attribute conflict_attribute
=
617 __ATTR(conflicts
, 0400, conflict_show
, NULL
);
619 static struct attribute
*ksplice_attrs
[] = {
620 &stage_attribute
.attr
,
621 &abort_cause_attribute
.attr
,
622 &debug_attribute
.attr
,
623 &conflict_attribute
.attr
,
627 static struct kobj_type ksplice_ktype
= {
628 .sysfs_ops
= &ksplice_sysfs_ops
,
629 .release
= ksplice_release
,
630 .default_attrs
= ksplice_attrs
,
633 void cleanup_ksplice_module(struct module_pack
*pack
)
635 if (pack
->bundle
== NULL
)
637 if (pack
->bundle
->stage
!= APPLIED
) {
638 struct ksplice_patch
*p
;
639 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
641 unregister_ksplice_module(pack
);
644 EXPORT_SYMBOL_GPL(cleanup_ksplice_module
);
646 static abort_t
activate_primary(struct module_pack
*pack
)
648 const struct ksplice_patch
*p
;
649 struct safety_record
*rec
;
651 ret
= process_ksplice_relocs(pack
, pack
->primary_relocs
,
652 pack
->primary_relocs_end
, 0);
656 ret
= resolve_patch_symbols(pack
);
660 ret
= process_exports(pack
);
664 ret
= add_patch_dependencies(pack
);
668 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
669 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
670 if (p
->oldaddr
== rec
->addr
)
677 static abort_t
resolve_patch_symbols(struct module_pack
*pack
)
679 struct ksplice_patch
*p
;
683 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
684 ret
= compute_address(pack
, p
->oldstr
, &vals
, 0);
688 if (!singular(&vals
)) {
690 failed_to_find(pack
, p
->oldstr
);
691 return FAILED_TO_FIND
;
694 list_entry(vals
.next
, struct candidate_val
, list
)->val
;
701 static abort_t
process_exports(struct module_pack
*pack
)
703 struct ksplice_export
*export
;
705 const struct kernel_symbol
*sym
;
706 const char *export_type
;
708 for (export
= pack
->exports
; export
< pack
->exports_end
; export
++) {
709 sym
= __find_symbol(export
->name
, &m
, NULL
, &export_type
, 1, 0);
711 ksdebug(pack
, 0, "Could not find kernel_symbol struct"
712 "for %s (%s)\n", export
->name
, export
->type
);
713 return MISSING_EXPORT
;
715 if (strcmp(export_type
, export
->type
) != 0) {
716 ksdebug(pack
, 0, "Nonmatching export type for %s "
717 "(%s/%s)\n", export
->name
, export
->type
,
719 return MISSING_EXPORT
;
721 /* Cast away const since we are planning to mutate the
722 * kernel_symbol structure. */
723 export
->sym
= (struct kernel_symbol
*)sym
;
724 export
->saved_name
= export
->sym
->name
;
725 if (m
!= pack
->primary
&& use_module(pack
->primary
, m
) != 1)
731 static abort_t
prepare_trampolines(struct update_bundle
*bundle
)
733 struct module_pack
*pack
;
734 struct ksplice_patch
*p
;
737 list_for_each_entry(pack
, &bundle
->packs
, list
) {
738 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
739 ret
= create_trampoline(p
);
741 free_trampolines(bundle
);
745 p
->saved
= kmalloc(p
->size
, GFP_KERNEL
);
746 if (p
->saved
== NULL
) {
747 free_trampolines(bundle
);
748 return OUT_OF_MEMORY
;
755 static void free_trampolines(struct update_bundle
*bundle
)
757 struct module_pack
*pack
;
758 struct ksplice_patch
*p
;
760 list_for_each_entry(pack
, &bundle
->packs
, list
) {
761 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
762 kfree(p
->trampoline
);
763 p
->trampoline
= NULL
;
768 static void insert_trampoline(struct ksplice_patch
*p
)
770 mm_segment_t old_fs
= get_fs();
772 memcpy((void *)p
->saved
, (void *)p
->oldaddr
, p
->size
);
773 memcpy((void *)p
->oldaddr
, (void *)p
->trampoline
, p
->size
);
774 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
778 static void remove_trampoline(const struct ksplice_patch
*p
)
780 mm_segment_t old_fs
= get_fs();
782 memcpy((void *)p
->oldaddr
, (void *)p
->saved
, p
->size
);
783 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
787 static abort_t
apply_patches(struct update_bundle
*bundle
)
792 ret
= prepare_trampolines(bundle
);
796 for (i
= 0; i
< 5; i
++) {
797 cleanup_conflicts(bundle
);
799 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
800 ret
= (__force abort_t
)stop_machine(__apply_patches
, bundle
,
802 #else /* LINUX_VERSION_CODE < */
803 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
804 ret
= (__force abort_t
)stop_machine_run(__apply_patches
, bundle
,
806 #endif /* LINUX_VERSION_CODE */
808 if (ret
!= CODE_BUSY
)
810 set_current_state(TASK_INTERRUPTIBLE
);
811 schedule_timeout(msecs_to_jiffies(1000));
813 free_trampolines(bundle
);
816 struct module_pack
*pack
;
817 const struct ksplice_size
*s
;
818 struct safety_record
*rec
;
819 list_for_each_entry(pack
, &bundle
->packs
, list
) {
820 for (s
= pack
->primary_sizes
;
821 s
< pack
->primary_sizes_end
; s
++) {
822 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
824 return OUT_OF_MEMORY
;
825 rec
->addr
= s
->thismod_addr
;
829 list_add(&rec
->list
, &pack
->safety_records
);
832 _ksdebug(bundle
, 0, KERN_INFO
"ksplice: Update %s applied "
833 "successfully\n", bundle
->kid
);
835 } else if (ret
== CODE_BUSY
) {
836 print_conflicts(bundle
);
837 _ksdebug(bundle
, 0, KERN_ERR
"ksplice: Aborted %s. stack "
838 "check: to-be-replaced code is busy\n", bundle
->kid
);
839 } else if (ret
== ALREADY_REVERSED
) {
840 _ksdebug(bundle
, 0, KERN_ERR
"ksplice: Aborted %s. Ksplice "
841 "update %s is already reversed.\n", bundle
->kid
,
847 static abort_t
reverse_patches(struct update_bundle
*bundle
)
851 struct module_pack
*pack
;
853 clear_debug_buf(bundle
);
854 ret
= init_debug_buf(bundle
);
858 _ksdebug(bundle
, 0, KERN_INFO
"ksplice: Preparing to reverse %s\n",
861 for (i
= 0; i
< 5; i
++) {
862 cleanup_conflicts(bundle
);
863 clear_list(&bundle
->conflicts
, struct conflict
, list
);
865 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
866 ret
= (__force abort_t
)stop_machine(__reverse_patches
, bundle
,
868 #else /* LINUX_VERSION_CODE < */
869 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
870 ret
= (__force abort_t
)stop_machine_run(__reverse_patches
,
872 #endif /* LINUX_VERSION_CODE */
874 if (ret
!= CODE_BUSY
)
876 set_current_state(TASK_INTERRUPTIBLE
);
877 schedule_timeout(msecs_to_jiffies(1000));
879 list_for_each_entry(pack
, &bundle
->packs
, list
)
880 clear_list(&pack
->safety_records
, struct safety_record
, list
);
882 _ksdebug(bundle
, 0, KERN_INFO
"ksplice: Update %s reversed"
883 " successfully\n", bundle
->kid
);
884 } else if (ret
== CODE_BUSY
) {
885 print_conflicts(bundle
);
886 _ksdebug(bundle
, 0, KERN_ERR
"ksplice: Aborted %s. stack "
887 "check: to-be-reversed code is busy\n", bundle
->kid
);
888 } else if (ret
== MODULE_BUSY
) {
889 _ksdebug(bundle
, 0, KERN_ERR
"ksplice: Update %s is"
890 " in use by another module\n", bundle
->kid
);
895 static int __apply_patches(void *bundleptr
)
897 struct update_bundle
*bundle
= bundleptr
;
898 struct module_pack
*pack
;
899 struct ksplice_patch
*p
;
900 struct ksplice_export
*export
;
903 if (bundle
->stage
== APPLIED
)
904 return (__force
int)OK
;
906 if (bundle
->stage
!= PREPARING
)
907 return (__force
int)UNEXPECTED
;
909 ret
= check_each_task(bundle
);
911 return (__force
int)ret
;
913 list_for_each_entry(pack
, &bundle
->packs
, list
) {
914 if (try_module_get(pack
->primary
) != 1) {
915 struct module_pack
*pack1
;
916 list_for_each_entry(pack1
, &bundle
->packs
, list
) {
919 module_put(pack1
->primary
);
921 return (__force
int)UNEXPECTED
;
925 bundle
->stage
= APPLIED
;
927 list_for_each_entry(pack
, &bundle
->packs
, list
) {
928 for (export
= pack
->exports
; export
< pack
->exports_end
;
930 export
->sym
->name
= export
->new_name
;
933 list_for_each_entry(pack
, &bundle
->packs
, list
) {
934 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
935 insert_trampoline(p
);
937 return (__force
int)OK
;
940 static int __reverse_patches(void *bundleptr
)
942 struct update_bundle
*bundle
= bundleptr
;
943 struct module_pack
*pack
;
944 const struct ksplice_patch
*p
;
945 struct ksplice_export
*export
;
948 if (bundle
->stage
!= APPLIED
)
949 return (__force
int)OK
;
951 #ifdef CONFIG_MODULE_UNLOAD
952 /* primary's refcount isn't changed by accessing ksplice.ko's sysfs */
953 list_for_each_entry(pack
, &bundle
->packs
, list
) {
954 if (module_refcount(pack
->primary
) != 1)
955 return (__force
int)MODULE_BUSY
;
957 #endif /* CONFIG_MODULE_UNLOAD */
959 ret
= check_each_task(bundle
);
961 return (__force
int)ret
;
963 bundle
->stage
= REVERSED
;
965 list_for_each_entry(pack
, &bundle
->packs
, list
)
966 module_put(pack
->primary
);
968 list_for_each_entry(pack
, &bundle
->packs
, list
) {
969 for (export
= pack
->exports
; export
< pack
->exports_end
;
971 export
->sym
->name
= export
->saved_name
;
974 list_for_each_entry(pack
, &bundle
->packs
, list
) {
975 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
976 remove_trampoline(p
);
978 return (__force
int)OK
;
981 static abort_t
check_each_task(struct update_bundle
*bundle
)
983 const struct task_struct
*g
, *p
;
985 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
986 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
987 read_lock(&tasklist_lock
);
988 #endif /* LINUX_VERSION_CODE */
989 do_each_thread(g
, p
) {
990 /* do_each_thread is a double loop! */
991 if (check_task(bundle
, p
, 0) != OK
)
992 ret
= check_task(bundle
, p
, 1);
994 while_each_thread(g
, p
);
995 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
996 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
997 read_unlock(&tasklist_lock
);
998 #endif /* LINUX_VERSION_CODE */
1002 static abort_t
check_task(struct update_bundle
*bundle
,
1003 const struct task_struct
*t
, int save_conflicts
)
1005 abort_t status
, ret
;
1006 struct conflict
*conf
= NULL
;
1008 if (save_conflicts
== 1) {
1009 conf
= kmalloc(sizeof(*conf
), GFP_ATOMIC
);
1011 return OUT_OF_MEMORY
;
1012 conf
->process_name
= kstrdup(t
->comm
, GFP_ATOMIC
);
1013 if (conf
->process_name
== NULL
) {
1015 return OUT_OF_MEMORY
;
1018 INIT_LIST_HEAD(&conf
->stack
);
1019 list_add(&conf
->list
, &bundle
->conflicts
);
1022 status
= check_address_for_conflict(bundle
, conf
, KSPLICE_IP(t
));
1024 ret
= check_stack(bundle
, conf
, task_thread_info(t
),
1025 (unsigned long *)__builtin_frame_address(0));
1028 } else if (!task_curr(t
)) {
1029 ret
= check_stack(bundle
, conf
, task_thread_info(t
),
1030 (unsigned long *)KSPLICE_SP(t
));
1033 } else if (!is_stop_machine(t
)) {
1034 status
= UNEXPECTED
;
1039 /* Modified version of Linux's print_context_stack */
1040 static abort_t
check_stack(struct update_bundle
*bundle
, struct conflict
*conf
,
1041 const struct thread_info
*tinfo
,
1042 const unsigned long *stack
)
1044 abort_t status
= OK
, ret
;
1047 while (valid_stack_ptr(tinfo
, stack
)) {
1049 ret
= check_address_for_conflict(bundle
, conf
, addr
);
1056 static abort_t
check_address_for_conflict(struct update_bundle
*bundle
,
1057 struct conflict
*conf
,
1060 const struct safety_record
*rec
;
1061 struct module_pack
*pack
;
1062 struct ksplice_frame
*frame
= NULL
;
1065 frame
= kmalloc(sizeof(*frame
), GFP_ATOMIC
);
1067 return OUT_OF_MEMORY
;
1069 frame
->has_conflict
= 0;
1070 frame
->symbol_name
= NULL
;
1071 list_add(&frame
->list
, &conf
->stack
);
1073 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1074 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
1075 if (rec
->care
== 1 && addr
>= rec
->addr
1076 && addr
< rec
->addr
+ rec
->size
) {
1077 if (frame
!= NULL
) {
1078 frame
->symbol_name
= rec
->name
;
1079 frame
->has_conflict
= 1;
1088 /* Modified version of Linux's valid_stack_ptr */
1089 static int valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
)
1091 return p
> (const void *)tinfo
1092 && p
<= (const void *)tinfo
+ THREAD_SIZE
- sizeof(long);
1095 static int is_stop_machine(const struct task_struct
*t
)
1097 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
1099 if (!starts_with(t
->comm
, "kstop"))
1101 num
= t
->comm
+ strlen("kstop");
1102 return num
[strspn(num
, "0123456789")] == '\0';
1103 #else /* LINUX_VERSION_CODE < */
1104 return strcmp(t
->comm
, "kstopmachine") == 0;
1105 #endif /* LINUX_VERSION_CODE */
1108 static void cleanup_conflicts(struct update_bundle
*bundle
)
1110 struct conflict
*conf
;
1111 list_for_each_entry(conf
, &bundle
->conflicts
, list
) {
1112 clear_list(&conf
->stack
, struct ksplice_frame
, list
);
1113 kfree(conf
->process_name
);
1115 clear_list(&bundle
->conflicts
, struct conflict
, list
);
1118 static void print_conflicts(struct update_bundle
*bundle
)
1120 const struct conflict
*conf
;
1121 const struct ksplice_frame
*frame
;
1122 list_for_each_entry(conf
, &bundle
->conflicts
, list
) {
1123 _ksdebug(bundle
, 2, KERN_DEBUG
"ksplice: stack check: pid %d "
1124 "(%s):", conf
->pid
, conf
->process_name
);
1125 list_for_each_entry(frame
, &conf
->stack
, list
) {
1126 _ksdebug(bundle
, 2, " %" ADDR
, frame
->addr
);
1127 if (frame
->has_conflict
)
1128 _ksdebug(bundle
, 2, " [<-CONFLICT]");
1130 _ksdebug(bundle
, 2, "\n");
1134 static int register_ksplice_module(struct module_pack
*pack
)
1136 struct update_bundle
*bundle
;
1140 INIT_LIST_HEAD(&pack
->reloc_addrmaps
);
1141 INIT_LIST_HEAD(&pack
->reloc_namevals
);
1142 INIT_LIST_HEAD(&pack
->safety_records
);
1144 mutex_lock(&module_mutex
);
1145 pack
->target
= NULL
;
1146 if (pack
->target_name
!= NULL
) {
1147 list_for_each_entry(m
, &modules
, list
) {
1148 if (strcmp(pack
->target_name
, m
->name
) == 0)
1151 if (pack
->target
== NULL
|| !module_is_live(pack
->target
)) {
1156 list_for_each_entry(bundle
, &update_bundles
, list
) {
1157 if (strcmp(pack
->kid
, bundle
->kid
) == 0) {
1158 if (bundle
->stage
!= PREPARING
) {
1162 add_to_bundle(pack
, bundle
);
1166 bundle
= init_ksplice_bundle(pack
->kid
);
1167 if (bundle
== NULL
) {
1171 ret
= ksplice_sysfs_init(bundle
);
1173 cleanup_ksplice_bundle(bundle
);
1176 add_to_bundle(pack
, bundle
);
1178 mutex_unlock(&module_mutex
);
1182 static void unregister_ksplice_module(struct module_pack
*pack
)
1184 if (pack
->bundle
== NULL
)
1186 if (pack
->bundle
->stage
!= APPLIED
) {
1187 mutex_lock(&module_mutex
);
1188 list_del(&pack
->list
);
1189 mutex_unlock(&module_mutex
);
1190 if (list_empty(&pack
->bundle
->packs
))
1191 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
1192 kobject_put(&pack
->bundle
->kobj
);
1193 #else /* LINUX_VERSION_CODE < */
1194 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
1195 kobject_unregister(&pack
->bundle
->kobj
);
1196 #endif /* LINUX_VERSION_CODE */
1197 pack
->bundle
= NULL
;
1201 static void add_to_bundle(struct module_pack
*pack
,
1202 struct update_bundle
*bundle
)
1204 pack
->bundle
= bundle
;
1205 list_add(&pack
->list
, &bundle
->packs
);
1208 static void cleanup_ksplice_bundle(struct update_bundle
*bundle
)
1210 mutex_lock(&module_mutex
);
1211 list_del(&bundle
->list
);
1212 mutex_unlock(&module_mutex
);
1213 cleanup_conflicts(bundle
);
1214 clear_debug_buf(bundle
);
1216 kfree(bundle
->name
);
1220 static struct update_bundle
*init_ksplice_bundle(const char *kid
)
1222 struct update_bundle
*bundle
;
1223 const char *str
= "ksplice_";
1225 bundle
= kcalloc(1, sizeof(struct update_bundle
), GFP_KERNEL
);
1228 buf
= kmalloc(strlen(kid
) + strlen(str
) + 1, GFP_KERNEL
);
1233 snprintf(buf
, strlen(kid
) + strlen(str
) + 1, "%s%s", str
, kid
);
1235 bundle
->kid
= kstrdup(kid
, GFP_KERNEL
);
1236 if (bundle
->kid
== NULL
) {
1237 kfree(bundle
->name
);
1241 INIT_LIST_HEAD(&bundle
->packs
);
1242 if (init_debug_buf(bundle
) != OK
) {
1244 kfree(bundle
->name
);
1248 list_add(&bundle
->list
, &update_bundles
);
1249 bundle
->stage
= PREPARING
;
1250 bundle
->abort_cause
= OK
;
1251 INIT_LIST_HEAD(&bundle
->conflicts
);
1255 static int ksplice_sysfs_init(struct update_bundle
*bundle
)
1258 memset(&bundle
->kobj
, 0, sizeof(bundle
->kobj
));
1259 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
1260 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
1261 #ifndef KSPLICE_STANDALONE
1262 ret
= kobject_init_and_add(&bundle
->kobj
, &ksplice_ktype
,
1263 ksplice_kobj
, "%s", bundle
->kid
);
1264 #else /* KSPLICE_STANDALONE */
1265 ret
= kobject_init_and_add(&bundle
->kobj
, &ksplice_ktype
,
1266 &THIS_MODULE
->mkobj
.kobj
, "ksplice");
1267 #endif /* KSPLICE_STANDALONE */
1268 #else /* LINUX_VERSION_CODE < */
1269 ret
= kobject_set_name(&bundle
->kobj
, "%s", "ksplice");
1272 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
1273 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
1274 bundle
->kobj
.parent
= &THIS_MODULE
->mkobj
.kobj
;
1275 #else /* LINUX_VERSION_CODE < */
1276 bundle
->kobj
.parent
= &THIS_MODULE
->mkobj
->kobj
;
1277 #endif /* LINUX_VERSION_CODE */
1278 bundle
->kobj
.ktype
= &ksplice_ktype
;
1279 ret
= kobject_register(&bundle
->kobj
);
1280 #endif /* LINUX_VERSION_CODE */
1283 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1284 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
1285 kobject_uevent(&bundle
->kobj
, KOBJ_ADD
);
1286 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1287 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
1288 kobject_uevent(&bundle
->kobj
, KOBJ_ADD
, NULL
);
1289 #endif /* LINUX_VERSION_CODE */
1293 int init_ksplice_module(struct module_pack
*pack
)
1295 #ifdef KSPLICE_STANDALONE
1296 if (bootstrapped
== 0)
1298 #endif /* KSPLICE_STANDALONE */
1299 return register_ksplice_module(pack
);
1301 EXPORT_SYMBOL(init_ksplice_module
);
1303 static abort_t
apply_update(struct update_bundle
*bundle
)
1305 struct module_pack
*pack
;
1308 mutex_lock(&module_mutex
);
1309 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1310 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1311 if (pack
->target
== NULL
) {
1312 apply_paravirt(pack
->primary_parainstructions
,
1313 pack
->primary_parainstructions_end
);
1314 apply_paravirt(pack
->helper_parainstructions
,
1315 pack
->helper_parainstructions_end
);
1318 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1320 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1321 ksdebug(pack
, 0, KERN_INFO
"ksplice_h: Preparing and checking "
1322 "%s\n", pack
->name
);
1323 ret
= activate_helper(pack
);
1327 ret
= activate_primary(pack
);
1331 ret
= apply_patches(bundle
);
1333 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1334 clear_list(&pack
->reloc_namevals
, struct reloc_nameval
, list
);
1335 clear_list(&pack
->reloc_addrmaps
, struct reloc_addrmap
, list
);
1336 if (bundle
->stage
== PREPARING
)
1337 clear_list(&pack
->safety_records
, struct safety_record
,
1340 mutex_unlock(&module_mutex
);
1344 static abort_t
activate_helper(struct module_pack
*pack
)
1346 const struct ksplice_size
*s
;
1349 int record_count
= pack
->helper_sizes_end
- pack
->helper_sizes
;
1351 int numfinished
, oldfinished
= 0;
1352 int restart_count
= 0;
1354 ret
= process_ksplice_relocs(pack
, pack
->helper_relocs
,
1355 pack
->helper_relocs_end
, 1);
1359 finished
= kcalloc(record_count
, sizeof(char), GFP_KERNEL
);
1360 if (finished
== NULL
)
1361 return OUT_OF_MEMORY
;
1364 for (s
= pack
->helper_sizes
; s
< pack
->helper_sizes_end
; s
++) {
1365 i
= s
- pack
->helper_sizes
;
1371 ret
= search_for_match(pack
, s
);
1374 } else if (ret
!= NO_MATCH
) {
1381 for (i
= 0; i
< record_count
; i
++) {
1385 if (numfinished
== record_count
) {
1390 if (oldfinished
== numfinished
) {
1391 for (s
= pack
->helper_sizes
; s
< pack
->helper_sizes_end
; s
++) {
1392 i
= s
- pack
->helper_sizes
;
1393 if (finished
[i
] == 0)
1394 ksdebug(pack
, 2, KERN_DEBUG
"ksplice: run-pre: "
1395 "could not match section %s\n",
1398 print_abort(pack
, "run-pre: could not match some sections");
1402 oldfinished
= numfinished
;
1404 if (restart_count
< 20) {
1408 print_abort(pack
, "run-pre: restart limit exceeded");
1413 static abort_t
search_for_match(struct module_pack
*pack
,
1414 const struct ksplice_size
*s
)
1418 unsigned long run_addr
;
1420 struct candidate_val
*v
;
1422 for (i
= 0; i
< s
->num_sym_addrs
; i
++) {
1423 ret
= add_candidate_val(&vals
, s
->sym_addrs
[i
]);
1428 ret
= compute_address(pack
, s
->name
, &vals
, 1);
1432 ksdebug(pack
, 3, KERN_DEBUG
"ksplice_h: run-pre: starting sect search "
1433 "for %s\n", s
->name
);
1435 list_for_each_entry(v
, &vals
, list
) {
1439 ret
= try_addr(pack
, s
, run_addr
, s
->thismod_addr
);
1440 if (ret
!= NO_MATCH
) {
1441 release_vals(&vals
);
1445 release_vals(&vals
);
1447 #ifdef KSPLICE_STANDALONE
1448 ret
= brute_search_all(pack
, s
);
1449 if (ret
!= NO_MATCH
)
1451 #endif /* KSPLICE_STANDALONE */
1456 static abort_t
rodata_run_pre_cmp(struct module_pack
*pack
,
1457 unsigned long run_addr
,
1458 unsigned long pre_addr
, unsigned int size
,
1463 const unsigned char *pre
= (const unsigned char *)pre_addr
;
1464 const unsigned char *run
= (const unsigned char *)run_addr
;
1466 print_bytes(pack
, run
, size
, pre
, size
);
1467 for (off
= 0; off
< size
; off
++) {
1468 if (!virtual_address_mapped((unsigned long)run
+ off
)) {
1470 ksdebug(pack
, 3, "rodata unmapped after "
1471 "%u/%u bytes\n", off
, size
);
1474 ret
= handle_myst_reloc(pack
, pre_addr
+ off
, run_addr
+ off
,
1478 ksdebug(pack
, 3, "reloc in rodata section does "
1479 "not match after %u/%u bytes\n", off
,
1485 } else if (run
[off
] != pre
[off
]) {
1487 ksdebug(pack
, 3, "rodata does not match after "
1488 "%u/%u bytes\n", off
, size
);
1495 static struct module
*module_data_address(unsigned long addr
)
1499 list_for_each_entry(mod
, &modules
, list
) {
1500 if (addr
>= (unsigned long)mod
->module_core
+
1501 mod
->core_text_size
&&
1502 addr
< (unsigned long)mod
->module_core
+ mod
->core_size
)
1508 static abort_t
try_addr(struct module_pack
*pack
, const struct ksplice_size
*s
,
1509 unsigned long run_addr
, unsigned long pre_addr
)
1511 struct safety_record
*rec
;
1512 struct reloc_nameval
*nv
;
1514 const struct module
*run_module
;
1516 if ((s
->flags
& KSPLICE_SIZE_RODATA
) != 0)
1517 run_module
= module_data_address(run_addr
);
1519 run_module
= module_text_address(run_addr
);
1520 if (run_module
!= pack
->target
) {
1521 ksdebug(pack
, 1, KERN_DEBUG
"ksplice_h: run-pre: ignoring "
1522 "address %" ADDR
" in other module %s for sect %s\n",
1524 run_module
== NULL
? "vmlinux" : run_module
->name
,
1529 if ((s
->flags
& KSPLICE_SIZE_RODATA
) != 0)
1530 ret
= rodata_run_pre_cmp(pack
, run_addr
, pre_addr
, s
->size
, 0);
1532 ret
= run_pre_cmp(pack
, run_addr
, pre_addr
, s
->size
, 0);
1533 if (ret
== NO_MATCH
) {
1534 set_temp_myst_relocs(pack
, NOVAL
);
1535 ksdebug(pack
, 1, KERN_DEBUG
"ksplice_h: run-pre: %s sect %s "
1537 (s
->flags
& KSPLICE_SIZE_RODATA
) != 0 ? "data" : "text",
1539 ksdebug(pack
, 1, "(r_a=%" ADDR
" p_a=%" ADDR
" s=%ld)\n",
1540 run_addr
, pre_addr
, s
->size
);
1541 ksdebug(pack
, 1, KERN_DEBUG
"ksplice_h: run-pre: ");
1542 if (pack
->bundle
->debug
>= 1) {
1543 if ((s
->flags
& KSPLICE_SIZE_RODATA
) != 0)
1544 ret
= rodata_run_pre_cmp(pack
, run_addr
,
1545 pre_addr
, s
->size
, 1);
1547 ret
= run_pre_cmp(pack
, run_addr
, pre_addr
,
1549 set_temp_myst_relocs(pack
, NOVAL
);
1551 ksdebug(pack
, 1, "\n");
1553 } else if (ret
!= OK
) {
1557 set_temp_myst_relocs(pack
, VAL
);
1558 ksdebug(pack
, 3, KERN_DEBUG
"ksplice_h: run-pre: found sect %s=%" ADDR
1559 "\n", s
->name
, run_addr
);
1561 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
1563 return OUT_OF_MEMORY
;
1564 /* It is safe for addr to point to the beginning of a patched function,
1565 because that location will be overwritten with a trampoline. */
1566 if ((s
->flags
& KSPLICE_SIZE_DELETED
) == 0 &&
1567 (s
->flags
& KSPLICE_SIZE_RODATA
) == 0) {
1568 rec
->addr
= run_addr
+ 1;
1569 rec
->size
= s
->size
- 1;
1570 rec
->care
= 0; /* May be changed later by ksplice_patches */
1572 rec
->addr
= run_addr
;
1573 rec
->size
= s
->size
;
1576 rec
->name
= s
->name
;
1577 list_add(&rec
->list
, &pack
->safety_records
);
1579 nv
= find_nameval(pack
, s
->name
, 1);
1581 return OUT_OF_MEMORY
;
1588 static abort_t
handle_myst_reloc(struct module_pack
*pack
,
1589 unsigned long pre_addr
, unsigned long run_addr
,
1590 int rerun
, int *matched
)
1592 unsigned long run_reloc_addr
;
1593 long run_reloc_val
, expected
;
1596 struct reloc_addrmap
*map
= find_addrmap(pack
, pre_addr
);
1601 offset
= (int)(pre_addr
- map
->addr
);
1602 run_reloc_addr
= run_addr
- offset
;
1603 switch (map
->size
) {
1606 *(int8_t *)run_reloc_addr
& (int8_t)map
->dst_mask
;
1610 *(int16_t *)run_reloc_addr
& (int16_t)map
->dst_mask
;
1614 *(int32_t *)run_reloc_addr
& (int32_t)map
->dst_mask
;
1617 run_reloc_val
= *(int64_t *)run_reloc_addr
& map
->dst_mask
;
1620 print_abort(pack
, "Invalid relocation size");
1625 ksdebug(pack
, 3, KERN_DEBUG
"ksplice_h: run-pre: reloc at r_a=%"
1626 ADDR
" p_a=%" ADDR
": ", run_addr
, pre_addr
);
1627 ksdebug(pack
, 3, "%s=%" ADDR
" (A=%" ADDR
" *r=%" ADDR
")\n",
1628 map
->nameval
->name
, map
->nameval
->val
, map
->addend
,
1632 if (!starts_with(map
->nameval
->name
, ".rodata.str")) {
1633 if (contains_canary(pack
, run_reloc_addr
, map
->size
,
1634 map
->dst_mask
) != 0)
1637 expected
= run_reloc_val
- map
->addend
;
1639 expected
+= run_reloc_addr
;
1640 if (map
->nameval
->status
== NOVAL
) {
1641 map
->nameval
->val
= expected
;
1642 map
->nameval
->status
= TEMP
;
1643 } else if (map
->nameval
->val
!= expected
) {
1646 ksdebug(pack
, 0, KERN_DEBUG
"ksplice_h: run-pre reloc: "
1647 "Nameval address %" ADDR
" does not match "
1648 "expected %" ADDR
" for %s!\n",
1649 map
->nameval
->val
, expected
,
1650 map
->nameval
->name
);
1654 *matched
= map
->size
- offset
;
1658 static abort_t
process_ksplice_relocs(struct module_pack
*pack
,
1659 const struct ksplice_reloc
*relocs
,
1660 const struct ksplice_reloc
*relocs_end
,
1663 const struct ksplice_reloc
*r
;
1664 for (r
= relocs
; r
< relocs_end
; r
++) {
1665 abort_t ret
= process_reloc(pack
, r
, pre
);
1672 static abort_t
process_reloc(struct module_pack
*pack
,
1673 const struct ksplice_reloc
*r
, int pre
)
1678 unsigned long sym_addr
;
1679 struct reloc_addrmap
*map
;
1682 #ifdef KSPLICE_STANDALONE
1683 /* run_pre_reloc: will this reloc be used for run-pre matching? */
1684 const int run_pre_reloc
= pre
&& bootstrapped
;
1685 #endif /* KSPLICE_STANDALONE */
1687 #ifndef CONFIG_KALLSYMS
1688 #ifdef KSPLICE_STANDALONE
1690 goto skip_using_system_map
;
1691 #else /* !KSPLICE_STANDALONE */
1692 goto skip_using_system_map
;
1693 #endif /* KSPLICE_STANDALONE */
1694 #endif /* !CONFIG_KALLSYMS */
1696 /* Some Fedora kernel releases have System.map files whose symbol
1697 * addresses disagree with the running kernel by a constant address
1698 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
1699 * values used to compile these kernels. This constant address offset
1700 * is always a multiple of 0x100000.
1702 * If we observe an offset that is NOT a multiple of 0x100000, then the
1703 * user provided us with an incorrect System.map file, and we should
1705 * If we observe an offset that is a multiple of 0x100000, then we can
1706 * adjust the System.map address values accordingly and proceed.
1708 off
= (unsigned long)printk
- pack
->map_printk
;
1709 if (off
& 0xfffff) {
1710 print_abort(pack
, "System.map does not match kernel");
1711 return BAD_SYSTEM_MAP
;
1713 for (i
= 0; i
< r
->num_sym_addrs
; i
++) {
1714 ret1
= add_candidate_val(&vals
, r
->sym_addrs
[i
] + off
);
1718 #ifndef CONFIG_KALLSYMS
1719 skip_using_system_map
:
1720 #endif /* !CONFIG_KALLSYMS */
1722 ret
= contains_canary(pack
, r
->blank_addr
, r
->size
, r
->dst_mask
);
1724 release_vals(&vals
);
1728 ksdebug(pack
, 4, KERN_DEBUG
"ksplice%s: reloc: skipped %s:%"
1729 ADDR
" (altinstr)\n", (pre
? "_h" : ""), r
->sym_name
,
1731 release_vals(&vals
);
1735 ret1
= compute_address(pack
, r
->sym_name
, &vals
, pre
);
1738 if (!singular(&vals
)) {
1739 release_vals(&vals
);
1740 #ifdef KSPLICE_STANDALONE
1741 if (!run_pre_reloc
) {
1742 #else /* !KSPLICE_STANDALONE */
1744 #endif /* KSPLICE_STANDALONE */
1745 failed_to_find(pack
, r
->sym_name
);
1746 return FAILED_TO_FIND
;
1749 ksdebug(pack
, 4, KERN_DEBUG
"ksplice: reloc: deferred %s:%" ADDR
1750 " to run-pre\n", r
->sym_name
, r
->blank_offset
);
1752 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
1754 return OUT_OF_MEMORY
;
1755 map
->addr
= r
->blank_addr
;
1756 map
->nameval
= find_nameval(pack
, r
->sym_name
, 1);
1757 if (map
->nameval
== NULL
) {
1759 return OUT_OF_MEMORY
;
1761 map
->pcrel
= r
->pcrel
;
1762 map
->addend
= r
->addend
;
1763 map
->size
= r
->size
;
1764 map
->dst_mask
= r
->dst_mask
;
1765 list_add(&map
->list
, &pack
->reloc_addrmaps
);
1768 sym_addr
= list_entry(vals
.next
, struct candidate_val
, list
)->val
;
1769 release_vals(&vals
);
1772 ret1
= add_dependency_on_address(pack
, sym_addr
);
1777 #ifdef KSPLICE_STANDALONE
1778 if (r
->pcrel
&& run_pre_reloc
) {
1779 #else /* !KSPLICE_STANDALONE */
1780 if (r
->pcrel
&& pre
) {
1781 #endif /* KSPLICE_STANDALONE */
1782 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
1784 return OUT_OF_MEMORY
;
1785 map
->addr
= r
->blank_addr
;
1786 map
->nameval
= find_nameval(pack
, "ksplice_zero", 1);
1787 if (map
->nameval
== NULL
) {
1789 return OUT_OF_MEMORY
;
1791 map
->nameval
->val
= 0;
1792 map
->nameval
->status
= VAL
;
1793 map
->pcrel
= r
->pcrel
;
1794 map
->addend
= sym_addr
+ r
->addend
;
1795 map
->size
= r
->size
;
1796 map
->dst_mask
= r
->dst_mask
;
1797 list_add(&map
->list
, &pack
->reloc_addrmaps
);
1802 val
= sym_addr
+ r
->addend
- r
->blank_addr
;
1804 val
= sym_addr
+ r
->addend
;
1808 *(int8_t *)r
->blank_addr
=
1809 (*(int8_t *)r
->blank_addr
& ~(int8_t)r
->dst_mask
)
1810 | ((val
>> r
->rightshift
) & (int8_t)r
->dst_mask
);
1813 *(int16_t *)r
->blank_addr
=
1814 (*(int16_t *)r
->blank_addr
& ~(int16_t)r
->dst_mask
)
1815 | ((val
>> r
->rightshift
) & (int16_t)r
->dst_mask
);
1818 *(int32_t *)r
->blank_addr
=
1819 (*(int32_t *)r
->blank_addr
& ~(int32_t)r
->dst_mask
)
1820 | ((val
>> r
->rightshift
) & (int32_t)r
->dst_mask
);
1823 *(int64_t *)r
->blank_addr
=
1824 (*(int64_t *)r
->blank_addr
& ~r
->dst_mask
) |
1825 ((val
>> r
->rightshift
) & r
->dst_mask
);
1828 print_abort(pack
, "Invalid relocation size");
1833 ksdebug(pack
, 4, KERN_DEBUG
"ksplice%s: reloc: %s:%" ADDR
" ",
1834 (pre
? "_h" : ""), r
->sym_name
, r
->blank_offset
);
1835 ksdebug(pack
, 4, "(S=%" ADDR
" A=%" ADDR
" ", sym_addr
, r
->addend
);
1838 ksdebug(pack
, 4, "aft=%02x)\n", *(int8_t *)r
->blank_addr
);
1841 ksdebug(pack
, 4, "aft=%04x)\n", *(int16_t *)r
->blank_addr
);
1844 ksdebug(pack
, 4, "aft=%08x)\n", *(int32_t *)r
->blank_addr
);
1847 ksdebug(pack
, 4, "aft=%016llx)\n", *(int64_t *)r
->blank_addr
);
1850 print_abort(pack
, "Invalid relocation size");
1856 static abort_t
add_dependency_on_address(struct module_pack
*pack
,
1859 struct module
*m
= module_text_address(follow_trampolines(pack
, addr
));
1860 if (m
== NULL
|| starts_with(m
->name
, pack
->name
) ||
1861 ends_with(m
->name
, "_helper"))
1863 if (use_module(pack
->primary
, m
) != 1)
1868 static abort_t
add_patch_dependencies(struct module_pack
*pack
)
1871 const struct ksplice_patch
*p
;
1872 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1873 ret
= add_dependency_on_address(pack
, p
->oldaddr
);
1880 #ifdef KSPLICE_STANDALONE
1881 #ifdef CONFIG_MODULE_UNLOAD
1883 struct list_head list
;
1884 struct module
*module_which_uses
;
1887 /* I'm not yet certain whether we need the strong form of this. */
1888 static inline int strong_try_module_get(struct module
*mod
)
1890 if (mod
&& mod
->state
!= MODULE_STATE_LIVE
)
1892 if (try_module_get(mod
))
1897 /* Does a already use b? */
1898 static int already_uses(struct module
*a
, struct module
*b
)
1900 struct module_use
*use
;
1901 list_for_each_entry(use
, &b
->modules_which_use_me
, list
) {
1902 if (use
->module_which_uses
== a
)
1908 /* Make it so module a uses b. Must be holding module_mutex */
1909 static int use_module(struct module
*a
, struct module
*b
)
1911 struct module_use
*use
;
1912 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
1913 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
1915 #endif /* LINUX_VERSION_CODE */
1916 if (b
== NULL
|| already_uses(a
, b
))
1919 if (strong_try_module_get(b
) < 0)
1922 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
1927 use
->module_which_uses
= a
;
1928 list_add(&use
->list
, &b
->modules_which_use_me
);
1929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
1930 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
1931 no_warn
= sysfs_create_link(b
->holders_dir
, &a
->mkobj
.kobj
, a
->name
);
1932 #endif /* LINUX_VERSION_CODE */
1935 #else /* CONFIG_MODULE_UNLOAD */
1936 static int use_module(struct module
*a
, struct module
*b
)
1940 #endif /* CONFIG_MODULE_UNLOAD */
1941 #endif /* KSPLICE_STANDALONE */
1943 static abort_t
compute_address(struct module_pack
*pack
, const char *sym_name
,
1944 struct list_head
*vals
, int pre
)
1948 const char *prefix
[] = { ".text.", ".bss.", ".data.", NULL
};
1950 #ifdef KSPLICE_STANDALONE
1953 #endif /* KSPLICE_STANDALONE */
1956 struct reloc_nameval
*nv
= find_nameval(pack
, sym_name
, 0);
1957 if (nv
!= NULL
&& nv
->status
!= NOVAL
) {
1959 ret
= add_candidate_val(vals
, nv
->val
);
1962 ksdebug(pack
, 1, KERN_DEBUG
"ksplice: using detected "
1963 "sym %s=%" ADDR
"\n", sym_name
, nv
->val
);
1968 if (starts_with(sym_name
, ".rodata"))
1971 name
= dup_wolabel(sym_name
);
1972 ret
= exported_symbol_lookup(name
, vals
);
1973 #ifdef CONFIG_KALLSYMS
1975 ret
= kernel_lookup(name
, vals
);
1977 ret
= other_module_lookup(name
, vals
, pack
->name
);
1978 #endif /* CONFIG_KALLSYMS */
1983 for (i
= 0; prefix
[i
] != NULL
; i
++) {
1984 if (starts_with(sym_name
, prefix
[i
])) {
1985 ret
= compute_address(pack
,
1986 sym_name
+ strlen(prefix
[i
]),
1995 static abort_t
exported_symbol_lookup(const char *name
, struct list_head
*vals
)
1997 const struct kernel_symbol
*sym
;
1998 sym
= __find_symbol(name
, NULL
, NULL
, NULL
, 1, 0);
2001 return add_candidate_val(vals
, sym
->value
);
2004 #ifdef KSPLICE_STANDALONE
2005 /* lookup symbol in given range of kernel_symbols */
2006 static const struct kernel_symbol
*lookup_symbol(const char *name
,
2007 const struct kernel_symbol
*start
,
2008 const struct kernel_symbol
*stop
)
2010 const struct kernel_symbol
*ks
= start
;
2011 for (; ks
< stop
; ks
++)
2012 if (strcmp(ks
->name
, name
) == 0)
2018 const struct kernel_symbol
*start
, *stop
;
2019 const unsigned long *crcs
;
2020 const char *export_type
;
2023 #ifndef CONFIG_MODVERSIONS
2024 #define symversion(base, idx) NULL
2026 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
2029 /* Modified version of search_symarrays from kernel/module.c */
2030 static const struct kernel_symbol
*search_symarrays(const struct symsearch
*arr
,
2033 const char **export_type
,
2036 const unsigned long **crc
)
2039 const struct kernel_symbol
*ks
;
2041 for (i
= 0; i
< num
; i
++) {
2042 ks
= lookup_symbol(name
, arr
[i
].start
, arr
[i
].stop
);
2047 *crc
= symversion(arr
[i
].crcs
, ks
- arr
[i
].start
);
2049 *export_type
= arr
[i
].export_type
;
2055 /* Modified version of kernel/module.c's find_symbol */
2056 static const struct kernel_symbol
*__find_symbol(const char *name
,
2057 struct module
**owner
,
2058 const unsigned long **crc
,
2059 const char **export_type
,
2060 _Bool gplok
, _Bool warn
)
2063 const struct kernel_symbol
*ks
;
2065 const struct symsearch arr
[] = {
2066 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
, "" },
2067 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
2068 __start___kcrctab_gpl
, "_gpl" },
2069 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2070 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
2071 __start___kcrctab_gpl_future
, "_gpl_future" },
2072 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2073 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2074 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
2075 __start___kcrctab_unused
, "_unused" },
2076 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
2077 __start___kcrctab_unused_gpl
, "_unused_gpl" },
2078 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2081 /* Core kernel first. */
2082 ks
= search_symarrays(arr
, ARRAY_SIZE(arr
), name
, export_type
, gplok
,
2090 /* Now try modules. */
2091 list_for_each_entry(mod
, &modules
, list
) {
2092 struct symsearch arr
[] = {
2093 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
, "" },
2094 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
2095 mod
->gpl_crcs
, "_gpl" },
2096 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2097 { mod
->gpl_future_syms
,
2098 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
2099 mod
->gpl_future_crcs
, "_gpl_future" },
2100 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2101 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2103 mod
->unused_syms
+ mod
->num_unused_syms
,
2104 mod
->unused_crcs
, "_unused" },
2105 { mod
->unused_gpl_syms
,
2106 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
2107 mod
->unused_gpl_crcs
, "_unused_gpl" },
2108 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2111 ks
= search_symarrays(arr
, ARRAY_SIZE(arr
),
2112 name
, export_type
, gplok
, warn
, crc
);
2122 #endif /* KSPLICE_STANDALONE */
2124 #ifdef CONFIG_KALLSYMS
2125 static abort_t
other_module_lookup(const char *name
, struct list_head
*vals
,
2126 const char *ksplice_name
)
2129 struct accumulate_struct acc
= { name
, vals
};
2130 const struct module
*m
;
2132 list_for_each_entry(m
, &modules
, list
) {
2133 if (starts_with(m
->name
, ksplice_name
) ||
2134 ends_with(m
->name
, "_helper"))
2136 ret
= (__force abort_t
)
2137 module_on_each_symbol(m
, accumulate_matching_names
, &acc
);
2145 static int accumulate_matching_names(void *data
, const char *sym_name
,
2146 unsigned long sym_val
)
2149 struct accumulate_struct
*acc
= data
;
2151 if (strcmp(sym_name
, acc
->desired_name
) == 0)
2152 ret
= add_candidate_val(acc
->vals
, sym_val
);
2153 return (__force
int)ret
;
2155 #endif /* CONFIG_KALLSYMS */
2157 #ifdef KSPLICE_STANDALONE
2158 static abort_t
brute_search(struct module_pack
*pack
,
2159 const struct ksplice_size
*s
,
2160 const void *start
, unsigned long len
)
2166 for (addr
= (unsigned long)start
; addr
< (unsigned long)start
+ len
;
2168 if (addr
% 100000 == 0)
2171 if (!virtual_address_mapped(addr
))
2174 run
= *(const unsigned char *)(addr
);
2175 pre
= *(const unsigned char *)(s
->thismod_addr
);
2180 ret
= try_addr(pack
, s
, addr
, s
->thismod_addr
);
2181 if (ret
!= NO_MATCH
)
2188 static abort_t
brute_search_all(struct module_pack
*pack
,
2189 const struct ksplice_size
*s
)
2192 abort_t ret
= NO_MATCH
;
2194 const char *where
= NULL
;
2196 ksdebug(pack
, 2, KERN_DEBUG
"ksplice: brute_search: searching for %s\n",
2198 saved_debug
= pack
->bundle
->debug
;
2199 pack
->bundle
->debug
= 0;
2201 list_for_each_entry(m
, &modules
, list
) {
2202 if (starts_with(m
->name
, pack
->name
) ||
2203 ends_with(m
->name
, "_helper"))
2205 if (brute_search(pack
, s
, m
->module_core
, m
->core_size
) == OK
||
2206 brute_search(pack
, s
, m
->module_init
, m
->init_size
) == OK
) {
2213 if (ret
== NO_MATCH
) {
2214 if (brute_search(pack
, s
, (const void *)init_mm
.start_code
,
2215 init_mm
.end_code
- init_mm
.start_code
) == OK
) {
2221 pack
->bundle
->debug
= saved_debug
;
2223 ksdebug(pack
, 2, KERN_DEBUG
"ksplice: brute_search: found %s "
2224 "in %s\n", s
->name
, where
);
2229 #ifdef CONFIG_KALLSYMS
2230 /* Modified version of Linux's kallsyms_lookup_name */
2231 static abort_t
kernel_lookup(const char *name
, struct list_head
*vals
)
2234 char namebuf
[KSYM_NAME_LEN
+ 1];
2236 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2238 #endif /* LINUX_VERSION_CODE */
2240 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2241 * 2.6.10 was the first release after this commit
2243 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2244 for (i
= 0, off
= 0; i
< kallsyms_num_syms
; i
++) {
2245 off
= ksplice_kallsyms_expand_symbol(off
, namebuf
);
2247 if (strcmp(namebuf
, name
) == 0) {
2248 ret
= add_candidate_val(vals
, kallsyms_addresses
[i
]);
2253 #else /* LINUX_VERSION_CODE < */
2256 for (i
= 0, knames
= kallsyms_names
; i
< kallsyms_num_syms
; i
++) {
2257 unsigned prefix
= *knames
++;
2259 strlcpy(namebuf
+ prefix
, knames
, KSYM_NAME_LEN
- prefix
);
2261 if (strcmp(namebuf
, name
) == 0) {
2262 ret
= add_candidate_val(vals
, kallsyms_addresses
[i
]);
2267 knames
+= strlen(knames
) + 1;
2269 #endif /* LINUX_VERSION_CODE */
2274 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2275 * 2.6.10 was the first release after this commit
2277 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2278 extern u8 kallsyms_token_table
[];
2279 extern u16 kallsyms_token_index
[];
2280 /* Modified version of Linux's kallsyms_expand_symbol */
2281 static unsigned long ksplice_kallsyms_expand_symbol(unsigned long off
,
2284 long len
, skipped_first
= 0;
2285 const u8
*tptr
, *data
;
2287 data
= &kallsyms_names
[off
];
2294 tptr
= &kallsyms_token_table
[kallsyms_token_index
[*data
]];
2299 if (skipped_first
) {
2312 #endif /* LINUX_VERSION_CODE */
2314 static int module_on_each_symbol(const struct module
*mod
,
2315 int (*fn
)(void *, const char *, unsigned long),
2321 for (i
= 0; i
< mod
->num_symtab
; i
++) {
2323 fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
2324 mod
->symtab
[i
].st_value
) != 0))
2329 #endif /* CONFIG_KALLSYMS */
2330 #else /* !KSPLICE_STANDALONE */
2332 static abort_t
kernel_lookup(const char *name
, struct list_head
*vals
)
2334 struct accumulate_struct acc
= { name
, vals
};
2335 return (__force abort_t
)
2336 kallsyms_on_each_symbol(accumulate_matching_names
, &acc
);
2338 #endif /* KSPLICE_STANDALONE */
2340 static abort_t
add_candidate_val(struct list_head
*vals
, unsigned long val
)
2342 struct candidate_val
*tmp
, *new;
2344 list_for_each_entry(tmp
, vals
, list
) {
2345 if (tmp
->val
== val
)
2348 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2350 return OUT_OF_MEMORY
;
2352 list_add(&new->list
, vals
);
2356 static void release_vals(struct list_head
*vals
)
2358 clear_list(vals
, struct candidate_val
, list
);
2361 static struct reloc_nameval
*find_nameval(struct module_pack
*pack
,
2362 const char *name
, int create
)
2364 struct reloc_nameval
*nv
, *new;
2365 const char *newname
;
2366 if (starts_with(name
, ".text."))
2368 list_for_each_entry(nv
, &pack
->reloc_namevals
, list
) {
2370 if (starts_with(newname
, ".text."))
2372 if (strcmp(newname
, name
) == 0)
2378 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2383 new->status
= NOVAL
;
2384 list_add(&new->list
, &pack
->reloc_namevals
);
2388 static struct reloc_addrmap
*find_addrmap(struct module_pack
*pack
,
2391 struct reloc_addrmap
*map
;
2392 list_for_each_entry(map
, &pack
->reloc_addrmaps
, list
) {
2393 if (addr
>= map
->addr
&& addr
< map
->addr
+ map
->size
)
2399 static void set_temp_myst_relocs(struct module_pack
*pack
, int status_val
)
2401 struct reloc_nameval
*nv
;
2402 list_for_each_entry(nv
, &pack
->reloc_namevals
, list
) {
2403 if (nv
->status
== TEMP
)
2404 nv
->status
= status_val
;
2408 static int contains_canary(struct module_pack
*pack
, unsigned long blank_addr
,
2409 int size
, long dst_mask
)
2413 return (*(int8_t *)blank_addr
& (int8_t)dst_mask
) ==
2416 return (*(int16_t *)blank_addr
& (int16_t)dst_mask
) ==
2417 (0x7777 & dst_mask
);
2419 return (*(int32_t *)blank_addr
& (int32_t)dst_mask
) ==
2420 (0x77777777 & dst_mask
);
2422 return (*(int64_t *)blank_addr
& dst_mask
) ==
2423 (0x7777777777777777ll
& dst_mask
);
2425 print_abort(pack
, "Invalid relocation size");
2430 static int starts_with(const char *str
, const char *prefix
)
2432 return strncmp(str
, prefix
, strlen(prefix
)) == 0;
2435 static int ends_with(const char *str
, const char *suffix
)
2437 return strlen(str
) >= strlen(suffix
) &&
2438 strcmp(&str
[strlen(str
) - strlen(suffix
)], suffix
) == 0;
2441 static int label_offset(const char *sym_name
)
2445 sym_name
[i
] != 0 && sym_name
[i
+ 1] != 0 && sym_name
[i
+ 2] != 0
2446 && sym_name
[i
+ 3] != 0; i
++) {
2447 if (sym_name
[i
] == '_' && sym_name
[i
+ 1] == '_'
2448 && sym_name
[i
+ 2] == '_' && sym_name
[i
+ 3] == '_')
2454 static char *dup_wolabel(const char *sym_name
)
2456 int offset
, entire_strlen
, label_strlen
, new_strlen
;
2459 offset
= label_offset(sym_name
);
2463 label_strlen
= strlen(&sym_name
[offset
]) + strlen("____");
2465 entire_strlen
= strlen(sym_name
);
2466 new_strlen
= entire_strlen
- label_strlen
;
2467 newstr
= kmalloc(new_strlen
+ 1, GFP_KERNEL
);
2470 memcpy(newstr
, sym_name
, new_strlen
);
2471 newstr
[new_strlen
] = 0;
2475 #ifdef CONFIG_DEBUG_FS
2476 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
2477 /* Old kernels don't have debugfs_create_blob */
2478 static ssize_t
read_file_blob(struct file
*file
, char __user
*user_buf
,
2479 size_t count
, loff_t
*ppos
)
2481 struct debugfs_blob_wrapper
*blob
= file
->private_data
;
2482 return simple_read_from_buffer(user_buf
, count
, ppos
, blob
->data
,
2486 static int blob_open(struct inode
*inode
, struct file
*file
)
2488 if (inode
->i_private
)
2489 file
->private_data
= inode
->i_private
;
2493 static struct file_operations fops_blob
= {
2494 .read
= read_file_blob
,
2498 static struct dentry
*debugfs_create_blob(const char *name
, mode_t mode
,
2499 struct dentry
*parent
,
2500 struct debugfs_blob_wrapper
*blob
)
2502 return debugfs_create_file(name
, mode
, parent
, blob
, &fops_blob
);
2504 #endif /* LINUX_VERSION_CODE */
2506 static void clear_debug_buf(struct update_bundle
*bundle
)
2508 if (bundle
->debugfs_dentry
== NULL
)
2510 debugfs_remove(bundle
->debugfs_dentry
);
2511 bundle
->debugfs_dentry
= NULL
;
2512 bundle
->debug_blob
.size
= 0;
2513 vfree(bundle
->debug_blob
.data
);
2514 bundle
->debug_blob
.data
= NULL
;
2517 static abort_t
init_debug_buf(struct update_bundle
*bundle
)
2519 bundle
->debug_blob
.size
= 0;
2520 bundle
->debug_blob
.data
= NULL
;
2521 bundle
->debugfs_dentry
=
2522 debugfs_create_blob(bundle
->name
, S_IFREG
| S_IRUSR
, NULL
,
2523 &bundle
->debug_blob
);
2524 if (bundle
->debugfs_dentry
== NULL
)
2525 return OUT_OF_MEMORY
;
2529 static int __ksdebug(struct update_bundle
*bundle
, const char *fmt
, ...)
2532 unsigned long size
, old_size
, new_size
;
2534 if ((bundle
->debug_blob
.data
== NULL
||
2535 ((char *)bundle
->debug_blob
.data
)[bundle
->debug_blob
.size
- 1] ==
2536 '\n') && strlen(fmt
) >= 3 && fmt
[0] == '<' && fmt
[1] >= '0' &&
2537 fmt
[1] <= '7' && fmt
[2] == '>')
2540 /* size includes the trailing '\0' */
2541 va_start(args
, fmt
);
2542 size
= 1 + vsnprintf(bundle
->debug_blob
.data
, 0, fmt
, args
);
2544 old_size
= bundle
->debug_blob
.size
== 0 ? 0 :
2545 max(PAGE_SIZE
, roundup_pow_of_two(bundle
->debug_blob
.size
));
2546 new_size
= bundle
->debug_blob
.size
+ size
== 0 ? 0 :
2547 max(PAGE_SIZE
, roundup_pow_of_two(bundle
->debug_blob
.size
+ size
));
2548 if (new_size
> old_size
) {
2549 char *buf
= vmalloc(new_size
);
2552 memcpy(buf
, bundle
->debug_blob
.data
, bundle
->debug_blob
.size
);
2553 vfree(bundle
->debug_blob
.data
);
2554 bundle
->debug_blob
.data
= buf
;
2556 va_start(args
, fmt
);
2557 bundle
->debug_blob
.size
+= vsnprintf(bundle
->debug_blob
.data
+
2558 bundle
->debug_blob
.size
,
2563 #endif /* CONFIG_DEBUG_FS */
2565 #ifdef KSPLICE_STANDALONE
2567 module_param(debug
, int, 0600);
2568 MODULE_PARM_DESC(debug
, "Debug level");
2570 static struct module_pack ksplice_pack
= {
2571 .name
= "ksplice_" STR(KSPLICE_KID
),
2572 .kid
= "init_" STR(KSPLICE_KID
),
2573 .target_name
= NULL
,
2575 .map_printk
= MAP_PRINTK
,
2576 .primary
= THIS_MODULE
,
2577 .reloc_addrmaps
= LIST_HEAD_INIT(ksplice_pack
.reloc_addrmaps
),
2578 .reloc_namevals
= LIST_HEAD_INIT(ksplice_pack
.reloc_namevals
),
2580 #endif /* KSPLICE_STANDALONE */
2582 static int init_ksplice(void)
2584 #ifdef KSPLICE_STANDALONE
2585 struct module_pack
*pack
= &ksplice_pack
;
2586 pack
->bundle
= init_ksplice_bundle(pack
->kid
);
2587 if (pack
->bundle
== NULL
)
2589 add_to_bundle(pack
, pack
->bundle
);
2590 pack
->bundle
->debug
= debug
;
2591 pack
->bundle
->abort_cause
=
2592 process_ksplice_relocs(pack
, ksplice_init_relocs
,
2593 ksplice_init_relocs_end
, 1);
2594 if (pack
->bundle
->abort_cause
== OK
)
2596 #else /* !KSPLICE_STANDALONE */
2597 ksplice_kobj
= kobject_create_and_add("ksplice", kernel_kobj
);
2598 if (ksplice_kobj
== NULL
)
2600 #endif /* KSPLICE_STANDALONE */
2604 static void cleanup_ksplice(void)
2606 #ifdef KSPLICE_STANDALONE
2607 cleanup_ksplice_bundle(ksplice_pack
.bundle
);
2608 #else /* !KSPLICE_STANDALONE */
2609 kobject_put(ksplice_kobj
);
2610 #endif /* KSPLICE_STANDALONE */
2613 module_init(init_ksplice
);
2614 module_exit(cleanup_ksplice
);
2616 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");
2617 MODULE_DESCRIPTION("Ksplice rebootless update system");
2618 #ifdef KSPLICE_VERSION
2619 MODULE_VERSION(KSPLICE_VERSION
);
2621 MODULE_LICENSE("GPL v2");