1 /* Copyright (C) 2007-2008 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kasoerg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
67 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
71 #endif /* LINUX_VERSION_CODE */
74 STAGE_PREPARING
, /* the update is not yet applied */
75 STAGE_APPLIED
, /* the update is applied */
76 STAGE_REVERSED
, /* the update has been applied and reversed */
79 /* parameter to modify run-pre matching */
81 RUN_PRE_INITIAL
, /* dry run (only change temp_labelvals) */
82 RUN_PRE_DEBUG
, /* dry run with byte-by-byte debugging */
83 RUN_PRE_FINAL
, /* finalizes the matching */
84 #ifdef KSPLICE_STANDALONE
86 #endif /* KSPLICE_STANDALONE */
89 enum { NOVAL
, TEMP
, VAL
};
91 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
92 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
94 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
95 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
96 #define __bitwise__ __bitwise
99 typedef int __bitwise__ abort_t
;
101 #define OK ((__force abort_t) 0)
102 #define NO_MATCH ((__force abort_t) 1)
103 #define CODE_BUSY ((__force abort_t) 2)
104 #define MODULE_BUSY ((__force abort_t) 3)
105 #define OUT_OF_MEMORY ((__force abort_t) 4)
106 #define FAILED_TO_FIND ((__force abort_t) 5)
107 #define ALREADY_REVERSED ((__force abort_t) 6)
108 #define MISSING_EXPORT ((__force abort_t) 7)
109 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
110 #define UNEXPECTED ((__force abort_t) 9)
111 #define TARGET_NOT_LOADED ((__force abort_t) 10)
112 #define CALL_FAILED ((__force abort_t) 11)
113 #ifdef KSPLICE_STANDALONE
114 #define BAD_SYSTEM_MAP ((__force abort_t) 12)
115 #endif /* KSPLICE_STANDALONE */
124 #ifdef CONFIG_DEBUG_FS
125 struct debugfs_blob_wrapper debug_blob
;
126 struct dentry
*debugfs_dentry
;
127 #else /* !CONFIG_DEBUG_FS */
128 bool debug_continue_line
;
129 #endif /* CONFIG_DEBUG_FS */
130 bool partial
; /* is it OK if some target mods aren't loaded */
131 struct list_head packs
; /* packs for loaded target mods */
132 struct list_head unused_packs
; /* packs for non-loaded target mods */
133 struct list_head conflicts
;
134 struct list_head list
;
137 /* a process conflicting with an update */
139 const char *process_name
;
141 struct list_head stack
;
142 struct list_head list
;
145 /* an address on the stack of a conflict */
146 struct conflict_addr
{
147 unsigned long addr
; /* the address on the stack */
148 bool has_conflict
; /* does this address in particular conflict? */
149 const char *label
; /* the label of the conflicting safety_record */
150 struct list_head list
;
153 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
154 /* Old kernels don't have debugfs_create_blob */
155 struct debugfs_blob_wrapper
{
159 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
162 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
163 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
167 struct list_head list
;
168 struct ksplice_symbol
*symbol
;
169 struct list_head
*saved_vals
;
172 /* region to be checked for conflicts in the stack check */
173 struct safety_record
{
174 struct list_head list
;
176 unsigned long addr
; /* the address to be checked for conflicts
177 * (e.g. an obsolete function's starting addr)
179 unsigned long size
; /* the size of the region to be checked */
182 /* possible value for a symbol */
183 struct candidate_val
{
184 struct list_head list
;
188 /* private struct used by init_symbol_array */
189 struct ksplice_lookup
{
191 struct ksplice_pack
*pack
;
192 struct ksplice_symbol
**arr
;
198 #ifdef KSPLICE_NO_KERNEL_SUPPORT
200 const struct kernel_symbol
*start
, *stop
;
201 const unsigned long *crcs
;
209 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
211 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
212 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
214 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
215 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
216 static bool virtual_address_mapped(unsigned long addr
)
219 return probe_kernel_address(addr
, retval
) != -EFAULT
;
221 #else /* LINUX_VERSION_CODE < */
222 static bool virtual_address_mapped(unsigned long addr
);
223 #endif /* LINUX_VERSION_CODE */
225 static long probe_kernel_read(void *dst
, void *src
, size_t size
)
229 if (!virtual_address_mapped((unsigned long)src
) ||
230 !virtual_address_mapped((unsigned long)src
+ size
- 1))
233 memcpy(dst
, src
, size
);
236 #endif /* LINUX_VERSION_CODE */
238 static LIST_HEAD(updates
);
239 #ifdef KSPLICE_STANDALONE
240 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
241 extern struct list_head ksplice_module_list
;
242 #else /* !CONFIG_KSPLICE */
243 LIST_HEAD(ksplice_module_list
);
244 #endif /* CONFIG_KSPLICE */
245 #else /* !KSPLICE_STANDALONE */
246 LIST_HEAD(ksplice_module_list
);
247 EXPORT_SYMBOL_GPL(ksplice_module_list
);
248 static struct kobject
*ksplice_kobj
;
249 #endif /* KSPLICE_STANDALONE */
251 static struct kobj_type ksplice_ktype
;
253 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
254 /* Old kernels do not have kcalloc
255 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
257 static void *kcalloc(size_t n
, size_t size
, typeof(GFP_KERNEL
) flags
)
260 if (n
!= 0 && size
> ULONG_MAX
/ n
)
262 mem
= kmalloc(n
* size
, flags
);
264 memset(mem
, 0, n
* size
);
267 #endif /* LINUX_VERSION_CODE */
269 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
270 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
271 static void u32_swap(void *a
, void *b
, int size
)
274 *(u32
*)a
= *(u32
*)b
;
278 static void generic_swap(void *a
, void *b
, int size
)
284 *(char *)a
++ = *(char *)b
;
286 } while (--size
> 0);
290 * sort - sort an array of elements
291 * @base: pointer to data to sort
292 * @num: number of elements
293 * @size: size of each element
294 * @cmp: pointer to comparison function
295 * @swap: pointer to swap function or NULL
297 * This function does a heapsort on the given array. You may provide a
298 * swap function optimized to your element type.
300 * Sorting time is O(n log n) both on average and worst-case. While
301 * qsort is about 20% faster on average, it suffers from exploitable
302 * O(n*n) worst-case behavior and extra memory requirements that make
303 * it less suitable for kernel use.
306 void sort(void *base
, size_t num
, size_t size
,
307 int (*cmp
)(const void *, const void *),
308 void (*swap
)(void *, void *, int size
))
310 /* pre-scale counters for performance */
311 int i
= (num
/ 2 - 1) * size
, n
= num
* size
, c
, r
;
314 swap
= (size
== 4 ? u32_swap
: generic_swap
);
317 for (; i
>= 0; i
-= size
) {
318 for (r
= i
; r
* 2 + size
< n
; r
= c
) {
320 if (c
< n
- size
&& cmp(base
+ c
, base
+ c
+ size
) < 0)
322 if (cmp(base
+ r
, base
+ c
) >= 0)
324 swap(base
+ r
, base
+ c
, size
);
329 for (i
= n
- size
; i
> 0; i
-= size
) {
330 swap(base
, base
+ i
, size
);
331 for (r
= 0; r
* 2 + size
< i
; r
= c
) {
333 if (c
< i
- size
&& cmp(base
+ c
, base
+ c
+ size
) < 0)
335 if (cmp(base
+ r
, base
+ c
) >= 0)
337 swap(base
+ r
, base
+ c
, size
);
341 #endif /* LINUX_VERSION_CODE < */
343 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
344 /* Old kernels do not have kstrdup
345 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
347 static char *kstrdup(const char *s
, typeof(GFP_KERNEL
) gfp
)
356 buf
= kmalloc(len
, gfp
);
361 #endif /* LINUX_VERSION_CODE */
363 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
364 /* Old kernels use semaphore instead of mutex
365 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
367 #define mutex semaphore
368 #define mutex_lock down
369 #define mutex_unlock up
370 #endif /* LINUX_VERSION_CODE */
372 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
373 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
374 static char * __attribute_used__
375 kvasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, va_list ap
)
382 len
= vsnprintf(dummy
, 0, fmt
, aq
);
385 p
= kmalloc(len
+ 1, gfp
);
389 vsnprintf(p
, len
+ 1, fmt
, ap
);
395 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
396 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
397 static char * __attribute__((format (printf
, 2, 3)))
398 kasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, ...)
404 p
= kvasprintf(gfp
, fmt
, ap
);
411 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
412 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
413 static int strict_strtoul(const char *cp
, unsigned int base
, unsigned long *res
)
424 val
= simple_strtoul(cp
, &tail
, base
);
425 if ((*tail
== '\0') ||
426 ((len
== (size_t)(tail
- cp
) + 1) && (*tail
== '\n'))) {
435 #ifndef task_thread_info
436 #define task_thread_info(task) (task)->thread_info
437 #endif /* !task_thread_info */
439 #ifdef KSPLICE_STANDALONE
441 static bool bootstrapped
= false;
443 #ifdef CONFIG_KALLSYMS
444 extern unsigned long kallsyms_addresses
[], kallsyms_num_syms
;
445 extern u8 kallsyms_names
[];
446 #endif /* CONFIG_KALLSYMS */
448 /* defined by ksplice-create */
449 extern const struct ksplice_reloc ksplice_init_relocs
[],
450 ksplice_init_relocs_end
[];
452 /* Obtained via System.map */
453 extern struct list_head modules
;
454 extern struct mutex module_mutex
;
455 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
456 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
457 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
458 #endif /* LINUX_VERSION_CODE */
459 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
460 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
461 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
462 #endif /* LINUX_VERSION_CODE */
463 extern const struct kernel_symbol __start___ksymtab
[];
464 extern const struct kernel_symbol __stop___ksymtab
[];
465 extern const unsigned long __start___kcrctab
[];
466 extern const struct kernel_symbol __start___ksymtab_gpl
[];
467 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
468 extern const unsigned long __start___kcrctab_gpl
[];
469 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
470 extern const struct kernel_symbol __start___ksymtab_unused
[];
471 extern const struct kernel_symbol __stop___ksymtab_unused
[];
472 extern const unsigned long __start___kcrctab_unused
[];
473 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
474 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
475 extern const unsigned long __start___kcrctab_unused_gpl
[];
476 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
477 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
478 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
479 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
480 extern const unsigned long __start___kcrctab_gpl_future
[];
481 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
483 #endif /* KSPLICE_STANDALONE */
485 static struct update
*init_ksplice_update(const char *kid
);
486 static void cleanup_ksplice_update(struct update
*update
);
487 static void maybe_cleanup_ksplice_update(struct update
*update
);
488 static void add_to_update(struct ksplice_pack
*pack
, struct update
*update
);
489 static int ksplice_sysfs_init(struct update
*update
);
491 /* Preparing the relocations and patches for application */
492 static abort_t
apply_update(struct update
*update
);
493 static abort_t
prepare_pack(struct ksplice_pack
*pack
);
494 static abort_t
finalize_pack(struct ksplice_pack
*pack
);
495 static abort_t
finalize_patches(struct ksplice_pack
*pack
);
496 static abort_t
add_dependency_on_address(struct ksplice_pack
*pack
,
498 static abort_t
map_trampoline_pages(struct update
*update
);
499 static void unmap_trampoline_pages(struct update
*update
);
500 static void *map_writable(void *addr
, size_t len
);
501 static abort_t
apply_relocs(struct ksplice_pack
*pack
,
502 const struct ksplice_reloc
*relocs
,
503 const struct ksplice_reloc
*relocs_end
);
504 static abort_t
apply_reloc(struct ksplice_pack
*pack
,
505 const struct ksplice_reloc
*r
);
506 static abort_t
apply_howto_reloc(struct ksplice_pack
*pack
,
507 const struct ksplice_reloc
*r
);
508 static abort_t
apply_howto_date(struct ksplice_pack
*pack
,
509 const struct ksplice_reloc
*r
);
510 static abort_t
read_reloc_value(struct ksplice_pack
*pack
,
511 const struct ksplice_reloc
*r
,
512 unsigned long addr
, unsigned long *valp
);
513 static abort_t
write_reloc_value(struct ksplice_pack
*pack
,
514 const struct ksplice_reloc
*r
,
515 unsigned long addr
, unsigned long sym_addr
);
516 static void __attribute__((noreturn
)) ksplice_deleted(void);
518 /* run-pre matching */
519 static abort_t
match_pack_sections(struct ksplice_pack
*pack
,
520 bool consider_data_sections
);
521 static abort_t
find_section(struct ksplice_pack
*pack
,
522 struct ksplice_section
*sect
);
523 static abort_t
try_addr(struct ksplice_pack
*pack
,
524 struct ksplice_section
*sect
,
525 unsigned long run_addr
,
526 struct list_head
*safety_records
,
527 enum run_pre_mode mode
);
528 static abort_t
run_pre_cmp(struct ksplice_pack
*pack
,
529 const struct ksplice_section
*sect
,
530 unsigned long run_addr
,
531 struct list_head
*safety_records
,
532 enum run_pre_mode mode
);
533 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
534 /* defined in arch/ARCH/kernel/ksplice-arch.c */
535 static abort_t
arch_run_pre_cmp(struct ksplice_pack
*pack
,
536 struct ksplice_section
*sect
,
537 unsigned long run_addr
,
538 struct list_head
*safety_records
,
539 enum run_pre_mode mode
);
540 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
541 static void print_bytes(struct ksplice_pack
*pack
,
542 const unsigned char *run
, int runc
,
543 const unsigned char *pre
, int prec
);
544 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
545 static abort_t
brute_search(struct ksplice_pack
*pack
,
546 struct ksplice_section
*sect
,
547 const void *start
, unsigned long len
,
548 struct list_head
*vals
);
549 static abort_t
brute_search_all(struct ksplice_pack
*pack
,
550 struct ksplice_section
*sect
,
551 struct list_head
*vals
);
552 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
553 static const struct ksplice_reloc
*
554 init_reloc_search(struct ksplice_pack
*pack
,
555 const struct ksplice_section
*sect
);
556 static const struct ksplice_reloc
*find_reloc(const struct ksplice_reloc
*start
,
557 const struct ksplice_reloc
*end
,
558 unsigned long address
,
560 static abort_t
lookup_reloc(struct ksplice_pack
*pack
,
561 const struct ksplice_reloc
**fingerp
,
563 const struct ksplice_reloc
**relocp
);
564 static abort_t
handle_reloc(struct ksplice_pack
*pack
,
565 const struct ksplice_section
*sect
,
566 const struct ksplice_reloc
*r
,
567 unsigned long run_addr
, enum run_pre_mode mode
);
568 static abort_t
handle_howto_date(struct ksplice_pack
*pack
,
569 const struct ksplice_section
*sect
,
570 const struct ksplice_reloc
*r
,
571 unsigned long run_addr
,
572 enum run_pre_mode mode
);
573 static abort_t
handle_howto_reloc(struct ksplice_pack
*pack
,
574 const struct ksplice_section
*sect
,
575 const struct ksplice_reloc
*r
,
576 unsigned long run_addr
,
577 enum run_pre_mode mode
);
578 static struct ksplice_section
*symbol_section(struct ksplice_pack
*pack
,
579 const struct ksplice_symbol
*sym
);
580 static int compare_section_labels(const void *va
, const void *vb
);
581 static int symbol_section_bsearch_compare(const void *a
, const void *b
);
582 static const struct ksplice_reloc
*patch_reloc(struct ksplice_pack
*pack
,
583 const struct ksplice_patch
*p
);
585 /* Computing possible addresses for symbols */
586 static abort_t
lookup_symbol(struct ksplice_pack
*pack
,
587 const struct ksplice_symbol
*ksym
,
588 struct list_head
*vals
);
589 static void cleanup_symbol_arrays(struct ksplice_pack
*pack
);
590 static abort_t
init_symbol_arrays(struct ksplice_pack
*pack
);
591 static abort_t
init_symbol_array(struct ksplice_pack
*pack
,
592 struct ksplice_symbol
*start
,
593 struct ksplice_symbol
*end
);
594 static abort_t
uniquify_symbols(struct ksplice_pack
*pack
);
595 static abort_t
add_matching_values(struct ksplice_lookup
*lookup
,
596 const char *sym_name
, unsigned long sym_val
);
597 static bool add_export_values(const struct symsearch
*syms
,
598 struct module
*owner
,
599 unsigned int symnum
, void *data
);
600 static int symbolp_bsearch_compare(const void *key
, const void *elt
);
601 static int compare_symbolp_names(const void *a
, const void *b
);
602 static int compare_symbolp_labels(const void *a
, const void *b
);
603 #ifdef CONFIG_KALLSYMS
604 static int add_kallsyms_values(void *data
, const char *name
,
605 struct module
*owner
, unsigned long val
);
606 #endif /* CONFIG_KALLSYMS */
607 #ifdef KSPLICE_STANDALONE
609 add_system_map_candidates(struct ksplice_pack
*pack
,
610 const struct ksplice_system_map
*start
,
611 const struct ksplice_system_map
*end
,
612 const char *label
, struct list_head
*vals
);
613 static int compare_system_map(const void *a
, const void *b
);
614 static int system_map_bsearch_compare(const void *key
, const void *elt
);
615 #endif /* KSPLICE_STANDALONE */
616 static abort_t
new_export_lookup(struct ksplice_pack
*ipack
, const char *name
,
617 struct list_head
*vals
);
619 /* Atomic update trampoline insertion and removal */
620 static abort_t
apply_patches(struct update
*update
);
621 static abort_t
reverse_patches(struct update
*update
);
622 static int __apply_patches(void *update
);
623 static int __reverse_patches(void *update
);
624 static abort_t
check_each_task(struct update
*update
);
625 static abort_t
check_task(struct update
*update
,
626 const struct task_struct
*t
, bool rerun
);
627 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
628 const struct thread_info
*tinfo
,
629 const unsigned long *stack
);
630 static abort_t
check_address(struct update
*update
,
631 struct conflict
*conf
, unsigned long addr
);
632 static abort_t
check_record(struct conflict_addr
*ca
,
633 const struct safety_record
*rec
,
635 static bool is_stop_machine(const struct task_struct
*t
);
636 static void cleanup_conflicts(struct update
*update
);
637 static void print_conflicts(struct update
*update
);
638 static void insert_trampoline(struct ksplice_patch
*p
);
639 static abort_t
verify_trampoline(struct ksplice_pack
*pack
,
640 const struct ksplice_patch
*p
);
641 static void remove_trampoline(const struct ksplice_patch
*p
);
643 static abort_t
create_labelval(struct ksplice_pack
*pack
,
644 struct ksplice_symbol
*ksym
,
645 unsigned long val
, int status
);
646 static abort_t
create_safety_record(struct ksplice_pack
*pack
,
647 const struct ksplice_section
*sect
,
648 struct list_head
*record_list
,
649 unsigned long run_addr
,
650 unsigned long run_size
);
651 static abort_t
add_candidate_val(struct ksplice_pack
*pack
,
652 struct list_head
*vals
, unsigned long val
);
653 static void release_vals(struct list_head
*vals
);
654 static void set_temp_labelvals(struct ksplice_pack
*pack
, int status_val
);
656 static int contains_canary(struct ksplice_pack
*pack
, unsigned long blank_addr
,
657 const struct ksplice_reloc_howto
*howto
);
658 static unsigned long follow_trampolines(struct ksplice_pack
*pack
,
660 static bool patches_module(const struct module
*a
, const struct module
*b
);
661 static bool starts_with(const char *str
, const char *prefix
);
662 static bool singular(struct list_head
*list
);
663 static void *bsearch(const void *key
, const void *base
, size_t n
,
664 size_t size
, int (*cmp
)(const void *key
, const void *elt
));
665 static int compare_relocs(const void *a
, const void *b
);
666 static int reloc_bsearch_compare(const void *key
, const void *elt
);
669 static abort_t
init_debug_buf(struct update
*update
);
670 static void clear_debug_buf(struct update
*update
);
671 static int __attribute__((format(printf
, 2, 3)))
672 _ksdebug(struct update
*update
, const char *fmt
, ...);
673 #define ksdebug(pack, fmt, ...) \
674 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
676 #ifdef KSPLICE_NO_KERNEL_SUPPORT
677 /* Functions defined here that will be exported in later kernels */
678 #ifdef CONFIG_KALLSYMS
679 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
680 struct module
*, unsigned long),
682 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
683 static unsigned int kallsyms_expand_symbol(unsigned int off
, char *result
);
684 #endif /* LINUX_VERSION_CODE */
685 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
689 #endif /* CONFIG_KALLSYMS */
690 static struct module
*find_module(const char *name
);
691 static int use_module(struct module
*a
, struct module
*b
);
692 static const struct kernel_symbol
*find_symbol(const char *name
,
693 struct module
**owner
,
694 const unsigned long **crc
,
695 bool gplok
, bool warn
);
696 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
697 struct module
*owner
,
698 unsigned int symnum
, void *data
),
700 static struct module
*__module_data_address(unsigned long addr
);
701 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
703 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
705 /* Prepare a trampoline for the given patch */
706 static abort_t
prepare_trampoline(struct ksplice_pack
*pack
,
707 struct ksplice_patch
*p
);
708 /* What address does the trampoline at addr jump to? */
709 static abort_t
trampoline_target(struct ksplice_pack
*pack
, unsigned long addr
,
710 unsigned long *new_addr
);
711 /* Hook to handle pc-relative jumps inserted by parainstructions */
712 static abort_t
handle_paravirt(struct ksplice_pack
*pack
, unsigned long pre
,
713 unsigned long run
, int *matched
);
714 /* Called for relocations of type KSPLICE_HOWTO_BUG */
715 static abort_t
handle_bug(struct ksplice_pack
*pack
,
716 const struct ksplice_reloc
*r
,
717 unsigned long run_addr
);
718 /* Called for relocations of type KSPLICE_HOWTO_EXTABLE */
719 static abort_t
handle_extable(struct ksplice_pack
*pack
,
720 const struct ksplice_reloc
*r
,
721 unsigned long run_addr
);
722 /* Is address p on the stack of the given thread? */
723 static bool valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
);
725 #ifndef KSPLICE_STANDALONE
726 #include "ksplice-arch.c"
727 #elif defined CONFIG_X86
728 #include "x86/ksplice-arch.c"
729 #elif defined CONFIG_ARM
730 #include "arm/ksplice-arch.c"
731 #endif /* KSPLICE_STANDALONE */
733 #define clear_list(head, type, member) \
735 struct list_head *_pos, *_n; \
736 list_for_each_safe(_pos, _n, head) { \
738 kfree(list_entry(_pos, type, member)); \
743 * init_ksplice_pack() - Initializes a ksplice pack
744 * @pack: The pack to be initialized. All of the public fields of the
745 * pack and its associated data structures should be populated
746 * before this function is called. The values of the private
747 * fields will be ignored.
749 int init_ksplice_pack(struct ksplice_pack
*pack
)
751 struct update
*update
;
752 struct ksplice_patch
*p
;
753 struct ksplice_section
*s
;
756 #ifdef KSPLICE_STANDALONE
759 #endif /* KSPLICE_STANDALONE */
761 INIT_LIST_HEAD(&pack
->temp_labelvals
);
762 INIT_LIST_HEAD(&pack
->safety_records
);
764 sort(pack
->helper_relocs
,
765 pack
->helper_relocs_end
- pack
->helper_relocs
,
766 sizeof(*pack
->helper_relocs
), compare_relocs
, NULL
);
767 sort(pack
->primary_relocs
,
768 pack
->primary_relocs_end
- pack
->primary_relocs
,
769 sizeof(*pack
->primary_relocs
), compare_relocs
, NULL
);
770 sort(pack
->helper_sections
,
771 pack
->helper_sections_end
- pack
->helper_sections
,
772 sizeof(*pack
->helper_sections
), compare_section_labels
, NULL
);
773 #ifdef KSPLICE_STANDALONE
774 sort(pack
->primary_system_map
,
775 pack
->primary_system_map_end
- pack
->primary_system_map
,
776 sizeof(*pack
->primary_system_map
), compare_system_map
, NULL
);
777 sort(pack
->helper_system_map
,
778 pack
->helper_system_map_end
- pack
->helper_system_map
,
779 sizeof(*pack
->helper_system_map
), compare_system_map
, NULL
);
780 #endif /* KSPLICE_STANDALONE */
782 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
784 for (s
= pack
->helper_sections
; s
< pack
->helper_sections_end
; s
++)
786 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
787 const struct ksplice_reloc
*r
= patch_reloc(pack
, p
);
790 if (p
->type
== KSPLICE_PATCH_DATA
) {
791 s
= symbol_section(pack
, r
->symbol
);
794 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
795 * to modify rodata sections that have been explicitly
796 * marked for patching using the ksplice-patch.h macro
797 * ksplice_assume_rodata. Here we modify the section
798 * flags appropriately.
800 if (s
->flags
& KSPLICE_SECTION_DATA
)
801 s
->flags
= (s
->flags
& ~KSPLICE_SECTION_DATA
) |
802 KSPLICE_SECTION_RODATA
;
806 mutex_lock(&module_mutex
);
807 list_for_each_entry(update
, &updates
, list
) {
808 if (strcmp(pack
->kid
, update
->kid
) == 0) {
809 if (update
->stage
!= STAGE_PREPARING
) {
813 add_to_update(pack
, update
);
818 update
= init_ksplice_update(pack
->kid
);
819 if (update
== NULL
) {
823 ret
= ksplice_sysfs_init(update
);
825 cleanup_ksplice_update(update
);
828 add_to_update(pack
, update
);
830 mutex_unlock(&module_mutex
);
833 EXPORT_SYMBOL_GPL(init_ksplice_pack
);
836 * cleanup_ksplice_pack() - Cleans up a pack
837 * @pack: The pack to be cleaned up
839 void cleanup_ksplice_pack(struct ksplice_pack
*pack
)
841 if (pack
->update
== NULL
)
844 mutex_lock(&module_mutex
);
845 if (pack
->update
->stage
== STAGE_APPLIED
) {
846 /* If the pack wasn't actually applied (because we
847 * only applied this update to loaded modules and this
848 * target was not loaded), then unregister the pack
849 * from the list of unused packs.
851 struct ksplice_pack
*p
;
854 list_for_each_entry(p
, &pack
->update
->unused_packs
, list
) {
859 list_del(&pack
->list
);
860 mutex_unlock(&module_mutex
);
863 list_del(&pack
->list
);
864 if (pack
->update
->stage
== STAGE_PREPARING
)
865 maybe_cleanup_ksplice_update(pack
->update
);
867 mutex_unlock(&module_mutex
);
869 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack
);
871 static struct update
*init_ksplice_update(const char *kid
)
873 struct update
*update
;
874 update
= kcalloc(1, sizeof(struct update
), GFP_KERNEL
);
877 update
->name
= kasprintf(GFP_KERNEL
, "ksplice_%s", kid
);
878 if (update
->name
== NULL
) {
882 update
->kid
= kstrdup(kid
, GFP_KERNEL
);
883 if (update
->kid
== NULL
) {
888 if (try_module_get(THIS_MODULE
) != 1) {
894 INIT_LIST_HEAD(&update
->packs
);
895 INIT_LIST_HEAD(&update
->unused_packs
);
896 if (init_debug_buf(update
) != OK
) {
897 module_put(THIS_MODULE
);
903 list_add(&update
->list
, &updates
);
904 update
->stage
= STAGE_PREPARING
;
905 update
->abort_cause
= OK
;
907 INIT_LIST_HEAD(&update
->conflicts
);
911 static void cleanup_ksplice_update(struct update
*update
)
913 list_del(&update
->list
);
914 cleanup_conflicts(update
);
915 clear_debug_buf(update
);
919 module_put(THIS_MODULE
);
922 /* Clean up the update if it no longer has any packs */
923 static void maybe_cleanup_ksplice_update(struct update
*update
)
925 if (list_empty(&update
->packs
) && list_empty(&update
->unused_packs
))
926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
927 kobject_put(&update
->kobj
);
928 #else /* LINUX_VERSION_CODE < */
929 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
930 kobject_unregister(&update
->kobj
);
931 #endif /* LINUX_VERSION_CODE */
934 static void add_to_update(struct ksplice_pack
*pack
, struct update
*update
)
936 pack
->update
= update
;
937 list_add(&pack
->list
, &update
->unused_packs
);
938 pack
->module_list_entry
.primary
= pack
->primary
;
941 static int ksplice_sysfs_init(struct update
*update
)
944 memset(&update
->kobj
, 0, sizeof(update
->kobj
));
945 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
946 #ifndef KSPLICE_STANDALONE
947 ret
= kobject_init_and_add(&update
->kobj
, &ksplice_ktype
,
948 ksplice_kobj
, "%s", update
->kid
);
949 #else /* KSPLICE_STANDALONE */
950 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
951 ret
= kobject_init_and_add(&update
->kobj
, &ksplice_ktype
,
952 &THIS_MODULE
->mkobj
.kobj
, "ksplice");
953 #endif /* KSPLICE_STANDALONE */
954 #else /* LINUX_VERSION_CODE < */
955 ret
= kobject_set_name(&update
->kobj
, "%s", "ksplice");
958 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
959 update
->kobj
.parent
= &THIS_MODULE
->mkobj
.kobj
;
960 #else /* LINUX_VERSION_CODE < */
961 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
962 update
->kobj
.parent
= &THIS_MODULE
->mkobj
->kobj
;
963 #endif /* LINUX_VERSION_CODE */
964 update
->kobj
.ktype
= &ksplice_ktype
;
965 ret
= kobject_register(&update
->kobj
);
966 #endif /* LINUX_VERSION_CODE */
969 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
970 kobject_uevent(&update
->kobj
, KOBJ_ADD
);
971 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
972 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
973 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
974 kobject_uevent(&update
->kobj
, KOBJ_ADD
, NULL
);
975 #endif /* LINUX_VERSION_CODE */
979 static abort_t
apply_update(struct update
*update
)
981 struct ksplice_pack
*pack
, *n
;
985 list_for_each_entry_safe(pack
, n
, &update
->unused_packs
, list
) {
986 if (strcmp(pack
->target_name
, "vmlinux") == 0) {
988 } else if (pack
->target
== NULL
) {
989 pack
->target
= find_module(pack
->target_name
);
990 if (pack
->target
== NULL
||
991 !module_is_live(pack
->target
)) {
992 if (update
->partial
) {
995 ret
= TARGET_NOT_LOADED
;
999 retval
= use_module(pack
->primary
, pack
->target
);
1005 list_del(&pack
->list
);
1006 list_add_tail(&pack
->list
, &update
->packs
);
1007 pack
->module_list_entry
.target
= pack
->target
;
1009 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1010 if (pack
->target
== NULL
) {
1011 apply_paravirt(pack
->primary_parainstructions
,
1012 pack
->primary_parainstructions_end
);
1013 apply_paravirt(pack
->helper_parainstructions
,
1014 pack
->helper_parainstructions_end
);
1016 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1019 list_for_each_entry(pack
, &update
->packs
, list
) {
1020 const struct ksplice_section
*sect
;
1021 for (sect
= pack
->primary_sections
;
1022 sect
< pack
->primary_sections_end
; sect
++) {
1023 struct safety_record
*rec
= kmalloc(sizeof(*rec
),
1026 ret
= OUT_OF_MEMORY
;
1029 rec
->addr
= sect
->address
;
1030 rec
->size
= sect
->size
;
1031 rec
->label
= sect
->symbol
->label
;
1032 list_add(&rec
->list
, &pack
->safety_records
);
1036 list_for_each_entry(pack
, &update
->packs
, list
) {
1037 ret
= init_symbol_arrays(pack
);
1039 cleanup_symbol_arrays(pack
);
1042 ret
= prepare_pack(pack
);
1043 cleanup_symbol_arrays(pack
);
1047 ret
= apply_patches(update
);
1049 list_for_each_entry(pack
, &update
->packs
, list
) {
1050 struct ksplice_section
*s
;
1051 if (update
->stage
== STAGE_PREPARING
)
1052 clear_list(&pack
->safety_records
, struct safety_record
,
1054 for (s
= pack
->helper_sections
; s
< pack
->helper_sections_end
;
1056 if (s
->match_map
!= NULL
) {
1057 vfree(s
->match_map
);
1058 s
->match_map
= NULL
;
1065 static int compare_symbolp_names(const void *a
, const void *b
)
1067 const struct ksplice_symbol
*const *sympa
= a
, *const *sympb
= b
;
1068 if ((*sympa
)->name
== NULL
&& (*sympb
)->name
== NULL
)
1070 if ((*sympa
)->name
== NULL
)
1072 if ((*sympb
)->name
== NULL
)
1074 return strcmp((*sympa
)->name
, (*sympb
)->name
);
1077 static int compare_symbolp_labels(const void *a
, const void *b
)
1079 const struct ksplice_symbol
*const *sympa
= a
, *const *sympb
= b
;
1080 return strcmp((*sympa
)->label
, (*sympb
)->label
);
1083 static int symbolp_bsearch_compare(const void *key
, const void *elt
)
1085 const char *name
= key
;
1086 const struct ksplice_symbol
*const *symp
= elt
;
1087 const struct ksplice_symbol
*sym
= *symp
;
1088 if (sym
->name
== NULL
)
1090 return strcmp(name
, sym
->name
);
1093 static abort_t
add_matching_values(struct ksplice_lookup
*lookup
,
1094 const char *sym_name
, unsigned long sym_val
)
1096 struct ksplice_symbol
**symp
;
1099 symp
= bsearch(sym_name
, lookup
->arr
, lookup
->size
,
1100 sizeof(*lookup
->arr
), symbolp_bsearch_compare
);
1104 while (symp
> lookup
->arr
&&
1105 symbolp_bsearch_compare(sym_name
, symp
- 1) == 0)
1108 for (; symp
< lookup
->arr
+ lookup
->size
; symp
++) {
1109 struct ksplice_symbol
*sym
= *symp
;
1110 if (sym
->name
== NULL
|| strcmp(sym_name
, sym
->name
) != 0)
1112 ret
= add_candidate_val(lookup
->pack
, sym
->vals
, sym_val
);
1119 #ifdef CONFIG_KALLSYMS
1120 static int add_kallsyms_values(void *data
, const char *name
,
1121 struct module
*owner
, unsigned long val
)
1123 struct ksplice_lookup
*lookup
= data
;
1124 if (owner
== lookup
->pack
->primary
||
1125 !patches_module(owner
, lookup
->pack
->target
))
1126 return (__force
int)OK
;
1127 return (__force
int)add_matching_values(lookup
, name
, val
);
1129 #endif /* CONFIG_KALLSYMS */
1131 static bool add_export_values(const struct symsearch
*syms
,
1132 struct module
*owner
,
1133 unsigned int symnum
, void *data
)
1135 struct ksplice_lookup
*lookup
= data
;
1138 ret
= add_matching_values(lookup
, syms
->start
[symnum
].name
,
1139 syms
->start
[symnum
].value
);
1147 static void cleanup_symbol_arrays(struct ksplice_pack
*pack
)
1149 struct ksplice_symbol
*sym
;
1150 for (sym
= pack
->primary_symbols
; sym
< pack
->primary_symbols_end
;
1152 if (sym
->vals
!= NULL
) {
1153 clear_list(sym
->vals
, struct candidate_val
, list
);
1158 for (sym
= pack
->helper_symbols
; sym
< pack
->helper_symbols_end
; sym
++) {
1159 if (sym
->vals
!= NULL
) {
1160 clear_list(sym
->vals
, struct candidate_val
, list
);
1168 * The primary and helper modules each have their own independent
1169 * ksplice_symbol structures. uniquify_symbols unifies these separate
1170 * pieces of kernel symbol information by replacing all references to
1171 * the helper copy of symbols with references to the primary copy.
1173 static abort_t
uniquify_symbols(struct ksplice_pack
*pack
)
1175 struct ksplice_reloc
*r
;
1176 struct ksplice_section
*s
;
1177 struct ksplice_symbol
*sym
, **sym_arr
, **symp
;
1178 size_t size
= pack
->primary_symbols_end
- pack
->primary_symbols
;
1183 sym_arr
= vmalloc(sizeof(*sym_arr
) * size
);
1184 if (sym_arr
== NULL
)
1185 return OUT_OF_MEMORY
;
1187 for (symp
= sym_arr
, sym
= pack
->primary_symbols
;
1188 symp
< sym_arr
+ size
&& sym
< pack
->primary_symbols_end
;
1192 sort(sym_arr
, size
, sizeof(*sym_arr
), compare_symbolp_labels
, NULL
);
1194 for (r
= pack
->helper_relocs
; r
< pack
->helper_relocs_end
; r
++) {
1195 symp
= bsearch(&r
->symbol
, sym_arr
, size
, sizeof(*sym_arr
),
1196 compare_symbolp_labels
);
1198 if ((*symp
)->name
== NULL
)
1199 (*symp
)->name
= r
->symbol
->name
;
1204 for (s
= pack
->helper_sections
; s
< pack
->helper_sections_end
; s
++) {
1205 symp
= bsearch(&s
->symbol
, sym_arr
, size
, sizeof(*sym_arr
),
1206 compare_symbolp_labels
);
1208 if ((*symp
)->name
== NULL
)
1209 (*symp
)->name
= s
->symbol
->name
;
1219 * Initialize the ksplice_symbol structures in the given array using
1220 * the kallsyms and exported symbol tables.
1222 static abort_t
init_symbol_array(struct ksplice_pack
*pack
,
1223 struct ksplice_symbol
*start
,
1224 struct ksplice_symbol
*end
)
1226 struct ksplice_symbol
*sym
, **sym_arr
, **symp
;
1227 struct ksplice_lookup lookup
;
1228 size_t size
= end
- start
;
1234 for (sym
= start
; sym
< end
; sym
++) {
1235 if (starts_with(sym
->label
, "__ksymtab")) {
1236 const struct kernel_symbol
*ksym
;
1237 const char *colon
= strchr(sym
->label
, ':');
1238 const char *name
= colon
+ 1;
1241 ksym
= find_symbol(name
, NULL
, NULL
, true, false);
1243 ksdebug(pack
, "Could not find kernel_symbol "
1244 "structure for %s\n", name
);
1247 sym
->value
= (unsigned long)ksym
;
1252 sym
->vals
= kmalloc(sizeof(*sym
->vals
), GFP_KERNEL
);
1253 if (sym
->vals
== NULL
)
1254 return OUT_OF_MEMORY
;
1255 INIT_LIST_HEAD(sym
->vals
);
1259 sym_arr
= vmalloc(sizeof(*sym_arr
) * size
);
1260 if (sym_arr
== NULL
)
1261 return OUT_OF_MEMORY
;
1263 for (symp
= sym_arr
, sym
= start
; symp
< sym_arr
+ size
&& sym
< end
;
1267 sort(sym_arr
, size
, sizeof(*sym_arr
), compare_symbolp_names
, NULL
);
1270 lookup
.arr
= sym_arr
;
1274 each_symbol(add_export_values
, &lookup
);
1276 #ifdef CONFIG_KALLSYMS
1278 ret
= (__force abort_t
)
1279 kallsyms_on_each_symbol(add_kallsyms_values
, &lookup
);
1280 #endif /* CONFIG_KALLSYMS */
1285 /* Prepare the pack's ksplice_symbol structures for run-pre matching */
1286 static abort_t
init_symbol_arrays(struct ksplice_pack
*pack
)
1290 ret
= uniquify_symbols(pack
);
1294 ret
= init_symbol_array(pack
, pack
->helper_symbols
,
1295 pack
->helper_symbols_end
);
1299 ret
= init_symbol_array(pack
, pack
->primary_symbols
,
1300 pack
->primary_symbols_end
);
1307 static abort_t
prepare_pack(struct ksplice_pack
*pack
)
1311 ksdebug(pack
, "Preparing and checking %s\n", pack
->name
);
1312 ret
= match_pack_sections(pack
, false);
1313 if (ret
== NO_MATCH
) {
1314 /* It is possible that by using relocations from .data sections
1315 * we can successfully run-pre match the rest of the sections.
1316 * To avoid using any symbols obtained from .data sections
1317 * (which may be unreliable) in the post code, we first prepare
1318 * the post code and then try to run-pre match the remaining
1319 * sections with the help of .data sections.
1321 ksdebug(pack
, "Continuing without some sections; we might "
1322 "find them later.\n");
1323 ret
= finalize_pack(pack
);
1325 ksdebug(pack
, "Aborted. Unable to continue without "
1326 "the unmatched sections.\n");
1330 ksdebug(pack
, "run-pre: Considering .data sections to find the "
1331 "unmatched sections\n");
1332 ret
= match_pack_sections(pack
, true);
1336 ksdebug(pack
, "run-pre: Found all previously unmatched "
1339 } else if (ret
!= OK
) {
1343 return finalize_pack(pack
);
1347 * Finish preparing the pack for insertion into the kernel.
1348 * Afterwards, the replacement code should be ready to run and the
1349 * ksplice_patches should all be ready for trampoline insertion.
1351 static abort_t
finalize_pack(struct ksplice_pack
*pack
)
1354 ret
= apply_relocs(pack
, pack
->primary_relocs
,
1355 pack
->primary_relocs_end
);
1359 ret
= finalize_patches(pack
);
1366 static abort_t
finalize_patches(struct ksplice_pack
*pack
)
1368 struct ksplice_patch
*p
;
1369 struct safety_record
*rec
;
1372 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1374 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
1375 if (rec
->addr
<= p
->oldaddr
&&
1376 p
->oldaddr
< rec
->addr
+ rec
->size
) {
1381 if (!found
&& p
->type
!= KSPLICE_PATCH_EXPORT
) {
1382 const struct ksplice_reloc
*r
= patch_reloc(pack
, p
);
1384 ksdebug(pack
, "A patch with no ksplice_reloc at"
1385 " its oldaddr has no safety record\n");
1388 ksdebug(pack
, "No safety record for patch with oldaddr "
1389 "%s+%lx\n", r
->symbol
->label
, r
->target_addend
);
1393 if (p
->type
== KSPLICE_PATCH_TEXT
) {
1394 ret
= prepare_trampoline(pack
, p
);
1399 if (found
&& rec
->addr
+ rec
->size
< p
->oldaddr
+ p
->size
) {
1400 ksdebug(pack
, "Safety record %s is too short for "
1401 "patch\n", rec
->label
);
1405 if (p
->type
== KSPLICE_PATCH_TEXT
) {
1406 if (p
->repladdr
== 0)
1407 p
->repladdr
= (unsigned long)ksplice_deleted
;
1413 static abort_t
map_trampoline_pages(struct update
*update
)
1415 struct ksplice_pack
*pack
;
1416 list_for_each_entry(pack
, &update
->packs
, list
) {
1417 struct ksplice_patch
*p
;
1418 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1419 p
->vaddr
= map_writable((void *)p
->oldaddr
, p
->size
);
1420 if (p
->vaddr
== NULL
) {
1421 ksdebug(pack
, "Unable to map oldaddr read/write"
1423 unmap_trampoline_pages(update
);
1431 static void unmap_trampoline_pages(struct update
*update
)
1433 struct ksplice_pack
*pack
;
1434 list_for_each_entry(pack
, &update
->packs
, list
) {
1435 struct ksplice_patch
*p
;
1436 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1437 vunmap((void *)((unsigned long)p
->vaddr
& PAGE_MASK
));
1444 * map_writable creates a shadow page mapping of the range
1445 * [addr, addr + len) so that we can write to code mapped read-only.
1447 * It is similar to a generalized version of x86's text_poke. But
1448 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1449 * map_writable to map the pages before stop_machine, then use the
1450 * mapping inside stop_machine, and unmap the pages afterwards.
1452 static void *map_writable(void *addr
, size_t len
)
1455 int nr_pages
= DIV_ROUND_UP(offset_in_page(addr
) + len
, PAGE_SIZE
);
1456 struct page
**pages
= kmalloc(nr_pages
* sizeof(*pages
), GFP_KERNEL
);
1457 void *page_addr
= (void *)((unsigned long)addr
& PAGE_MASK
);
1463 for (i
= 0; i
< nr_pages
; i
++) {
1464 if (__module_text_address((unsigned long)page_addr
) == NULL
&&
1465 __module_data_address((unsigned long)page_addr
) == NULL
) {
1466 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1467 pages
[i
] = virt_to_page(page_addr
);
1468 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1469 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1471 pfn_to_page(__pa_symbol(page_addr
) >> PAGE_SHIFT
);
1472 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1473 WARN_ON(!PageReserved(pages
[i
]));
1475 pages
[i
] = vmalloc_to_page(addr
);
1477 if (pages
[i
] == NULL
) {
1481 page_addr
+= PAGE_SIZE
;
1483 vaddr
= vmap(pages
, nr_pages
, VM_MAP
, PAGE_KERNEL
);
1487 return vaddr
+ offset_in_page(addr
);
1491 * Ksplice adds a dependency on any symbol address used to resolve relocations
1492 * in the primary module.
1494 * Be careful to follow_trampolines so that we always depend on the
1495 * latest version of the target function, since that's the code that
1496 * will run if we call addr.
1498 static abort_t
add_dependency_on_address(struct ksplice_pack
*pack
,
1501 struct ksplice_pack
*p
;
1503 __module_text_address(follow_trampolines(pack
, addr
));
1506 list_for_each_entry(p
, &pack
->update
->packs
, list
) {
1507 if (m
== p
->primary
)
1510 if (use_module(pack
->primary
, m
) != 1)
1515 static abort_t
apply_relocs(struct ksplice_pack
*pack
,
1516 const struct ksplice_reloc
*relocs
,
1517 const struct ksplice_reloc
*relocs_end
)
1519 const struct ksplice_reloc
*r
;
1520 for (r
= relocs
; r
< relocs_end
; r
++) {
1521 abort_t ret
= apply_reloc(pack
, r
);
1528 static abort_t
apply_reloc(struct ksplice_pack
*pack
,
1529 const struct ksplice_reloc
*r
)
1531 switch (r
->howto
->type
) {
1532 case KSPLICE_HOWTO_RELOC
:
1533 case KSPLICE_HOWTO_RELOC_PATCH
:
1534 return apply_howto_reloc(pack
, r
);
1535 case KSPLICE_HOWTO_DATE
:
1536 case KSPLICE_HOWTO_TIME
:
1537 return apply_howto_date(pack
, r
);
1539 ksdebug(pack
, "Unexpected howto type %d\n", r
->howto
->type
);
1545 * Applies a relocation. Aborts if the symbol referenced in it has
1546 * not been uniquely resolved.
1548 static abort_t
apply_howto_reloc(struct ksplice_pack
*pack
,
1549 const struct ksplice_reloc
*r
)
1553 unsigned long sym_addr
;
1556 canary_ret
= contains_canary(pack
, r
->blank_addr
, r
->howto
);
1559 if (canary_ret
== 0) {
1560 ksdebug(pack
, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1561 r
->blank_addr
, r
->symbol
->label
, r
->target_addend
);
1565 #ifdef KSPLICE_STANDALONE
1566 if (!bootstrapped
) {
1567 ret
= add_system_map_candidates(pack
,
1568 pack
->primary_system_map
,
1569 pack
->primary_system_map_end
,
1570 r
->symbol
->label
, &vals
);
1572 release_vals(&vals
);
1576 #endif /* KSPLICE_STANDALONE */
1577 ret
= lookup_symbol(pack
, r
->symbol
, &vals
);
1579 release_vals(&vals
);
1583 * Relocations for the oldaddr fields of patches must have
1584 * been resolved via run-pre matching.
1586 if (!singular(&vals
) || (r
->symbol
->vals
!= NULL
&&
1587 r
->howto
->type
== KSPLICE_HOWTO_RELOC_PATCH
)) {
1588 release_vals(&vals
);
1589 ksdebug(pack
, "Failed to find %s for reloc\n",
1591 return FAILED_TO_FIND
;
1593 sym_addr
= list_entry(vals
.next
, struct candidate_val
, list
)->val
;
1594 release_vals(&vals
);
1596 ret
= write_reloc_value(pack
, r
, r
->blank_addr
,
1597 r
->howto
->pcrel
? sym_addr
- r
->blank_addr
:
1602 ksdebug(pack
, "reloc: %lx to %s+%lx (S=%lx ", r
->blank_addr
,
1603 r
->symbol
->label
, r
->target_addend
, sym_addr
);
1604 switch (r
->howto
->size
) {
1606 ksdebug(pack
, "aft=%02x)\n", *(uint8_t *)r
->blank_addr
);
1609 ksdebug(pack
, "aft=%04x)\n", *(uint16_t *)r
->blank_addr
);
1612 ksdebug(pack
, "aft=%08x)\n", *(uint32_t *)r
->blank_addr
);
1614 #if BITS_PER_LONG >= 64
1616 ksdebug(pack
, "aft=%016llx)\n", *(uint64_t *)r
->blank_addr
);
1618 #endif /* BITS_PER_LONG */
1620 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1623 #ifdef KSPLICE_STANDALONE
1626 #endif /* KSPLICE_STANDALONE */
1629 * Create labelvals so that we can verify our choices in the
1630 * second round of run-pre matching that considers data sections.
1632 ret
= create_labelval(pack
, r
->symbol
, sym_addr
, VAL
);
1636 return add_dependency_on_address(pack
, sym_addr
);
1640 * Date relocations are created wherever __DATE__ or __TIME__ is used
1641 * in the kernel; we resolve them by simply copying in the date/time
1642 * obtained from run-pre matching the relevant compilation unit.
1644 static abort_t
apply_howto_date(struct ksplice_pack
*pack
,
1645 const struct ksplice_reloc
*r
)
1647 if (r
->symbol
->vals
!= NULL
) {
1648 ksdebug(pack
, "Failed to find %s for date\n", r
->symbol
->label
);
1649 return FAILED_TO_FIND
;
1651 memcpy((unsigned char *)r
->blank_addr
,
1652 (const unsigned char *)r
->symbol
->value
, r
->howto
->size
);
1657 * Given a relocation and its run address, compute the address of the
1658 * symbol the relocation referenced, and store it in *valp.
1660 static abort_t
read_reloc_value(struct ksplice_pack
*pack
,
1661 const struct ksplice_reloc
*r
,
1662 unsigned long addr
, unsigned long *valp
)
1664 unsigned char bytes
[sizeof(long)];
1666 const struct ksplice_reloc_howto
*howto
= r
->howto
;
1668 if (howto
->size
<= 0 || howto
->size
> sizeof(long)) {
1669 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1673 if (probe_kernel_read(bytes
, (void *)addr
, howto
->size
) == -EFAULT
)
1676 switch (howto
->size
) {
1678 val
= *(uint8_t *)bytes
;
1681 val
= *(uint16_t *)bytes
;
1684 val
= *(uint32_t *)bytes
;
1686 #if BITS_PER_LONG >= 64
1688 val
= *(uint64_t *)bytes
;
1690 #endif /* BITS_PER_LONG */
1692 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1696 val
&= howto
->dst_mask
;
1697 if (howto
->signed_addend
)
1698 val
|= -(val
& (howto
->dst_mask
& ~(howto
->dst_mask
>> 1)));
1699 val
<<= howto
->rightshift
;
1700 val
-= r
->insn_addend
+ r
->target_addend
;
1706 * Given a relocation, the address of its storage unit, and the
1707 * address of the symbol the relocation references, write the
1708 * relocation's final value into the storage unit.
1710 static abort_t
write_reloc_value(struct ksplice_pack
*pack
,
1711 const struct ksplice_reloc
*r
,
1712 unsigned long addr
, unsigned long sym_addr
)
1714 unsigned long val
= sym_addr
+ r
->target_addend
+ r
->insn_addend
;
1715 const struct ksplice_reloc_howto
*howto
= r
->howto
;
1716 val
>>= howto
->rightshift
;
1717 switch (howto
->size
) {
1719 *(uint8_t *)addr
= (*(uint8_t *)addr
& ~howto
->dst_mask
) |
1720 (val
& howto
->dst_mask
);
1723 *(uint16_t *)addr
= (*(uint16_t *)addr
& ~howto
->dst_mask
) |
1724 (val
& howto
->dst_mask
);
1727 *(uint32_t *)addr
= (*(uint32_t *)addr
& ~howto
->dst_mask
) |
1728 (val
& howto
->dst_mask
);
1730 #if BITS_PER_LONG >= 64
1732 *(uint64_t *)addr
= (*(uint64_t *)addr
& ~howto
->dst_mask
) |
1733 (val
& howto
->dst_mask
);
1735 #endif /* BITS_PER_LONG */
1737 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1741 if (read_reloc_value(pack
, r
, addr
, &val
) != OK
|| val
!= sym_addr
) {
1742 ksdebug(pack
, "Aborted. Relocation overflow.\n");
1749 /* Replacement address used for functions deleted by the patch */
1750 static void __attribute__((noreturn
)) ksplice_deleted(void)
1752 printk(KERN_CRIT
"Called a kernel function deleted by Ksplice!\n");
1754 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1755 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1760 /* Floodfill to run-pre match the sections within a pack. */
1761 static abort_t
match_pack_sections(struct ksplice_pack
*pack
,
1762 bool consider_data_sections
)
1764 struct ksplice_section
*sect
;
1769 for (sect
= pack
->helper_sections
; sect
< pack
->helper_sections_end
;
1771 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0 &&
1772 (sect
->flags
& KSPLICE_SECTION_STRING
) == 0 &&
1773 (sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0)
1777 while (remaining
> 0) {
1779 for (sect
= pack
->helper_sections
;
1780 sect
< pack
->helper_sections_end
; sect
++) {
1781 if ((sect
->flags
& KSPLICE_SECTION_MATCHED
) != 0)
1783 if ((!consider_data_sections
&&
1784 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0) ||
1785 (sect
->flags
& KSPLICE_SECTION_STRING
) != 0)
1787 ret
= find_section(pack
, sect
);
1789 sect
->flags
|= KSPLICE_SECTION_MATCHED
;
1790 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0)
1793 } else if (ret
!= NO_MATCH
) {
1801 for (sect
= pack
->helper_sections
;
1802 sect
< pack
->helper_sections_end
; sect
++) {
1803 if ((sect
->flags
& KSPLICE_SECTION_MATCHED
) != 0 ||
1804 (sect
->flags
& KSPLICE_SECTION_STRING
) != 0)
1806 ksdebug(pack
, "run-pre: could not match %s "
1808 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0 ?
1810 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ?
1811 "rodata" : "text", sect
->symbol
->label
);
1813 ksdebug(pack
, "Aborted. run-pre: could not match some "
1821 * Search for the section in the running kernel. Returns OK if and
1822 * only if it finds precisely one address in the kernel matching the
1825 static abort_t
find_section(struct ksplice_pack
*pack
,
1826 struct ksplice_section
*sect
)
1830 unsigned long run_addr
;
1832 struct candidate_val
*v
, *n
;
1834 #ifdef KSPLICE_STANDALONE
1835 ret
= add_system_map_candidates(pack
, pack
->helper_system_map
,
1836 pack
->helper_system_map_end
,
1837 sect
->symbol
->label
, &vals
);
1839 release_vals(&vals
);
1842 #endif /* KSPLICE_STANDALONE */
1843 ret
= lookup_symbol(pack
, sect
->symbol
, &vals
);
1845 release_vals(&vals
);
1849 ksdebug(pack
, "run-pre: starting sect search for %s\n",
1850 sect
->symbol
->label
);
1852 list_for_each_entry_safe(v
, n
, &vals
, list
) {
1856 ret
= try_addr(pack
, sect
, run_addr
, NULL
, RUN_PRE_INITIAL
);
1857 if (ret
== NO_MATCH
) {
1860 } else if (ret
!= OK
) {
1861 release_vals(&vals
);
1866 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1867 if (list_empty(&vals
) && (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
1868 ret
= brute_search_all(pack
, sect
, &vals
);
1870 release_vals(&vals
);
1874 * Make sure run-pre matching output is displayed if
1875 * brute_search succeeds.
1877 if (singular(&vals
)) {
1878 run_addr
= list_entry(vals
.next
, struct candidate_val
,
1880 ret
= try_addr(pack
, sect
, run_addr
, NULL
,
1883 ksdebug(pack
, "run-pre: Debug run failed for "
1884 "sect %s:\n", sect
->symbol
->label
);
1885 release_vals(&vals
);
1890 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1892 if (singular(&vals
)) {
1893 LIST_HEAD(safety_records
);
1894 run_addr
= list_entry(vals
.next
, struct candidate_val
,
1896 ret
= try_addr(pack
, sect
, run_addr
, &safety_records
,
1898 release_vals(&vals
);
1900 clear_list(&safety_records
, struct safety_record
, list
);
1901 ksdebug(pack
, "run-pre: Final run failed for sect "
1902 "%s:\n", sect
->symbol
->label
);
1904 list_splice(&safety_records
, &pack
->safety_records
);
1907 } else if (!list_empty(&vals
)) {
1908 struct candidate_val
*val
;
1909 ksdebug(pack
, "run-pre: multiple candidates for sect %s:\n",
1910 sect
->symbol
->label
);
1912 list_for_each_entry(val
, &vals
, list
) {
1914 ksdebug(pack
, "%lx\n", val
->val
);
1916 ksdebug(pack
, "...\n");
1920 release_vals(&vals
);
1923 release_vals(&vals
);
1928 * try_addr is the the interface to run-pre matching. Its primary
1929 * purpose is to manage debugging information for run-pre matching;
1930 * all the hard work is in run_pre_cmp.
1932 static abort_t
try_addr(struct ksplice_pack
*pack
,
1933 struct ksplice_section
*sect
,
1934 unsigned long run_addr
,
1935 struct list_head
*safety_records
,
1936 enum run_pre_mode mode
)
1939 const struct module
*run_module
;
1941 if ((sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ||
1942 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0)
1943 run_module
= __module_data_address(run_addr
);
1945 run_module
= __module_text_address(run_addr
);
1946 if (run_module
== pack
->primary
) {
1947 ksdebug(pack
, "run-pre: unexpected address %lx in primary "
1948 "module %s for sect %s\n", run_addr
, run_module
->name
,
1949 sect
->symbol
->label
);
1952 if (!patches_module(run_module
, pack
->target
)) {
1953 ksdebug(pack
, "run-pre: ignoring address %lx in other module "
1954 "%s for sect %s\n", run_addr
, run_module
== NULL
?
1955 "vmlinux" : run_module
->name
, sect
->symbol
->label
);
1959 ret
= create_labelval(pack
, sect
->symbol
, run_addr
, TEMP
);
1963 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1964 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
, mode
);
1965 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1966 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
1967 ret
= arch_run_pre_cmp(pack
, sect
, run_addr
, safety_records
,
1970 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
, mode
);
1971 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1972 if (ret
== NO_MATCH
&& mode
!= RUN_PRE_FINAL
) {
1973 set_temp_labelvals(pack
, NOVAL
);
1974 ksdebug(pack
, "run-pre: %s sect %s does not match (r_a=%lx "
1976 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ? "rodata" :
1977 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0 ? "data" :
1978 "text", sect
->symbol
->label
, run_addr
, sect
->address
,
1980 ksdebug(pack
, "run-pre: ");
1981 if (pack
->update
->debug
>= 1) {
1982 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1983 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
,
1985 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1986 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
1987 ret
= arch_run_pre_cmp(pack
, sect
, run_addr
,
1991 ret
= run_pre_cmp(pack
, sect
, run_addr
,
1994 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1995 set_temp_labelvals(pack
, NOVAL
);
1997 ksdebug(pack
, "\n");
1999 } else if (ret
!= OK
) {
2000 set_temp_labelvals(pack
, NOVAL
);
2004 if (mode
!= RUN_PRE_FINAL
) {
2005 set_temp_labelvals(pack
, NOVAL
);
2006 ksdebug(pack
, "run-pre: candidate for sect %s=%lx\n",
2007 sect
->symbol
->label
, run_addr
);
2011 set_temp_labelvals(pack
, VAL
);
2012 ksdebug(pack
, "run-pre: found sect %s=%lx\n", sect
->symbol
->label
,
2018 * run_pre_cmp is the primary run-pre matching function; it determines
2019 * whether the given ksplice_section matches the code or data in the
2020 * running kernel starting at run_addr.
2022 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2023 * section is created.
2025 * The run_pre_mode is also used to determine what debugging
2026 * information to display.
2028 static abort_t
run_pre_cmp(struct ksplice_pack
*pack
,
2029 const struct ksplice_section
*sect
,
2030 unsigned long run_addr
,
2031 struct list_head
*safety_records
,
2032 enum run_pre_mode mode
)
2036 const struct ksplice_reloc
*r
, *finger
;
2037 const unsigned char *pre
, *run
, *pre_start
, *run_start
;
2038 unsigned char runval
;
2040 pre_start
= (const unsigned char *)sect
->address
;
2041 run_start
= (const unsigned char *)run_addr
;
2043 finger
= init_reloc_search(pack
, sect
);
2047 while (pre
< pre_start
+ sect
->size
) {
2048 unsigned long offset
= pre
- pre_start
;
2049 ret
= lookup_reloc(pack
, &finger
, (unsigned long)pre
, &r
);
2051 ret
= handle_reloc(pack
, sect
, r
, (unsigned long)run
,
2054 if (mode
== RUN_PRE_INITIAL
)
2055 ksdebug(pack
, "reloc in sect does not "
2056 "match after %lx/%lx bytes\n",
2057 offset
, sect
->size
);
2060 if (mode
== RUN_PRE_DEBUG
)
2061 print_bytes(pack
, run
, r
->howto
->size
, pre
,
2063 pre
+= r
->howto
->size
;
2064 run
+= r
->howto
->size
;
2067 } else if (ret
!= NO_MATCH
) {
2071 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0) {
2072 ret
= handle_paravirt(pack
, (unsigned long)pre
,
2073 (unsigned long)run
, &matched
);
2077 if (mode
== RUN_PRE_DEBUG
)
2078 print_bytes(pack
, run
, matched
, pre
,
2086 if (probe_kernel_read(&runval
, (void *)run
, 1) == -EFAULT
) {
2087 if (mode
== RUN_PRE_INITIAL
)
2088 ksdebug(pack
, "sect unmapped after %lx/%lx "
2089 "bytes\n", offset
, sect
->size
);
2093 if (runval
!= *pre
&&
2094 (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
2095 if (mode
== RUN_PRE_INITIAL
)
2096 ksdebug(pack
, "sect does not match after "
2097 "%lx/%lx bytes\n", offset
, sect
->size
);
2098 if (mode
== RUN_PRE_DEBUG
) {
2099 print_bytes(pack
, run
, 1, pre
, 1);
2100 ksdebug(pack
, "[p_o=%lx] ! ", offset
);
2101 print_bytes(pack
, run
+ 1, 2, pre
+ 1, 2);
2105 if (mode
== RUN_PRE_DEBUG
)
2106 print_bytes(pack
, run
, 1, pre
, 1);
2110 return create_safety_record(pack
, sect
, safety_records
, run_addr
,
2114 static void print_bytes(struct ksplice_pack
*pack
,
2115 const unsigned char *run
, int runc
,
2116 const unsigned char *pre
, int prec
)
2119 int matched
= min(runc
, prec
);
2120 for (o
= 0; o
< matched
; o
++) {
2121 if (run
[o
] == pre
[o
])
2122 ksdebug(pack
, "%02x ", run
[o
]);
2124 ksdebug(pack
, "%02x/%02x ", run
[o
], pre
[o
]);
2126 for (o
= matched
; o
< runc
; o
++)
2127 ksdebug(pack
, "%02x/ ", run
[o
]);
2128 for (o
= matched
; o
< prec
; o
++)
2129 ksdebug(pack
, "/%02x ", pre
[o
]);
2132 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2133 static abort_t
brute_search(struct ksplice_pack
*pack
,
2134 struct ksplice_section
*sect
,
2135 const void *start
, unsigned long len
,
2136 struct list_head
*vals
)
2142 for (addr
= (unsigned long)start
; addr
< (unsigned long)start
+ len
;
2144 if (addr
% 100000 == 0)
2147 if (probe_kernel_read(&run
, (void *)addr
, 1) == -EFAULT
)
2150 pre
= *(const unsigned char *)(sect
->address
);
2155 ret
= try_addr(pack
, sect
, addr
, NULL
, RUN_PRE_INITIAL
);
2157 ret
= add_candidate_val(pack
, vals
, addr
);
2160 } else if (ret
!= NO_MATCH
) {
2168 static abort_t
brute_search_all(struct ksplice_pack
*pack
,
2169 struct ksplice_section
*sect
,
2170 struct list_head
*vals
)
2176 ksdebug(pack
, "brute_search: searching for %s\n", sect
->symbol
->label
);
2177 saved_debug
= pack
->update
->debug
;
2178 pack
->update
->debug
= 0;
2180 list_for_each_entry(m
, &modules
, list
) {
2181 if (!patches_module(m
, pack
->target
) || m
== pack
->primary
)
2183 ret
= brute_search(pack
, sect
, m
->module_core
, m
->core_size
,
2187 ret
= brute_search(pack
, sect
, m
->module_init
, m
->init_size
,
2193 ret
= brute_search(pack
, sect
, (const void *)init_mm
.start_code
,
2194 init_mm
.end_code
- init_mm
.start_code
, vals
);
2197 pack
->update
->debug
= saved_debug
;
2200 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2203 unsigned long address
;
2207 static int reloc_bsearch_compare(const void *key
, const void *elt
)
2209 const struct range
*range
= key
;
2210 const struct ksplice_reloc
*r
= elt
;
2211 if (range
->address
+ range
->size
<= r
->blank_addr
)
2213 if (range
->address
> r
->blank_addr
)
2218 static const struct ksplice_reloc
*find_reloc(const struct ksplice_reloc
*start
,
2219 const struct ksplice_reloc
*end
,
2220 unsigned long address
,
2223 const struct ksplice_reloc
*r
;
2224 struct range range
= { address
, size
};
2225 r
= bsearch((void *)&range
, start
, end
- start
, sizeof(*r
),
2226 reloc_bsearch_compare
);
2229 while (r
> start
&& (r
- 1)->blank_addr
>= address
)
2234 static const struct ksplice_reloc
*
2235 init_reloc_search(struct ksplice_pack
*pack
, const struct ksplice_section
*sect
)
2237 const struct ksplice_reloc
*r
;
2238 r
= find_reloc(pack
->helper_relocs
, pack
->helper_relocs_end
,
2239 sect
->address
, sect
->size
);
2241 return pack
->helper_relocs_end
;
2246 * lookup_reloc implements an amortized O(1) lookup for the next
2247 * helper relocation. It must be called with a strictly increasing
2248 * sequence of addresses.
2250 * The fingerp is private data for lookup_reloc, and needs to have
2251 * been initialized as a pointer to the result of find_reloc (or
2252 * init_reloc_search).
2254 static abort_t
lookup_reloc(struct ksplice_pack
*pack
,
2255 const struct ksplice_reloc
**fingerp
,
2257 const struct ksplice_reloc
**relocp
)
2259 const struct ksplice_reloc
*r
= *fingerp
;
2262 while (r
< pack
->helper_relocs_end
&&
2263 addr
>= r
->blank_addr
+ r
->howto
->size
&&
2264 !(addr
== r
->blank_addr
&& r
->howto
->size
== 0))
2267 if (r
== pack
->helper_relocs_end
)
2269 if (addr
< r
->blank_addr
)
2272 if (r
->howto
->type
!= KSPLICE_HOWTO_RELOC
)
2275 canary_ret
= contains_canary(pack
, r
->blank_addr
, r
->howto
);
2278 if (canary_ret
== 0) {
2279 ksdebug(pack
, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2280 "(altinstr)\n", r
->blank_addr
, r
->symbol
->label
,
2284 if (addr
!= r
->blank_addr
) {
2285 ksdebug(pack
, "Invalid nonzero relocation offset\n");
2291 static abort_t
handle_reloc(struct ksplice_pack
*pack
,
2292 const struct ksplice_section
*sect
,
2293 const struct ksplice_reloc
*r
,
2294 unsigned long run_addr
, enum run_pre_mode mode
)
2296 switch (r
->howto
->type
) {
2297 case KSPLICE_HOWTO_RELOC
:
2298 return handle_howto_reloc(pack
, sect
, r
, run_addr
, mode
);
2299 case KSPLICE_HOWTO_DATE
:
2300 case KSPLICE_HOWTO_TIME
:
2301 return handle_howto_date(pack
, sect
, r
, run_addr
, mode
);
2302 case KSPLICE_HOWTO_BUG
:
2303 return handle_bug(pack
, r
, run_addr
);
2304 case KSPLICE_HOWTO_EXTABLE
:
2305 return handle_extable(pack
, r
, run_addr
);
2307 ksdebug(pack
, "Unexpected howto type %d\n", r
->howto
->type
);
2313 * For date/time relocations, we check that the sequence of bytes
2314 * matches the format of a date or time.
2316 static abort_t
handle_howto_date(struct ksplice_pack
*pack
,
2317 const struct ksplice_section
*sect
,
2318 const struct ksplice_reloc
*r
,
2319 unsigned long run_addr
, enum run_pre_mode mode
)
2322 char *buf
= kmalloc(r
->howto
->size
, GFP_KERNEL
);
2325 return OUT_OF_MEMORY
;
2326 if (probe_kernel_read(buf
, (void *)run_addr
, r
->howto
->size
) == -EFAULT
) {
2331 switch (r
->howto
->type
) {
2332 case KSPLICE_HOWTO_TIME
:
2333 if (isdigit(buf
[0]) && isdigit(buf
[1]) && buf
[2] == ':' &&
2334 isdigit(buf
[3]) && isdigit(buf
[4]) && buf
[5] == ':' &&
2335 isdigit(buf
[6]) && isdigit(buf
[7]))
2340 case KSPLICE_HOWTO_DATE
:
2341 if (isalpha(buf
[0]) && isalpha(buf
[1]) && isalpha(buf
[2]) &&
2342 buf
[3] == ' ' && (buf
[4] == ' ' || isdigit(buf
[4])) &&
2343 isdigit(buf
[5]) && buf
[6] == ' ' && isdigit(buf
[7]) &&
2344 isdigit(buf
[8]) && isdigit(buf
[9]) && isdigit(buf
[10]))
2352 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
)
2353 ksdebug(pack
, "%s string: \"%.*s\" does not match format\n",
2354 r
->howto
->type
== KSPLICE_HOWTO_DATE
? "date" : "time",
2355 r
->howto
->size
, buf
);
2359 ret
= create_labelval(pack
, r
->symbol
, run_addr
, TEMP
);
2366 * Extract the value of a symbol used in a relocation in the pre code
2367 * during run-pre matching, giving an error if it conflicts with a
2368 * previously found value of that symbol
2370 static abort_t
handle_howto_reloc(struct ksplice_pack
*pack
,
2371 const struct ksplice_section
*sect
,
2372 const struct ksplice_reloc
*r
,
2373 unsigned long run_addr
,
2374 enum run_pre_mode mode
)
2376 struct ksplice_section
*sym_sect
= symbol_section(pack
, r
->symbol
);
2377 unsigned long offset
= r
->target_addend
;
2381 ret
= read_reloc_value(pack
, r
, run_addr
, &val
);
2384 if (r
->howto
->pcrel
)
2387 #ifdef KSPLICE_STANDALONE
2388 /* The match_map is only used in KSPLICE_STANDALONE */
2389 if (sym_sect
== NULL
|| sym_sect
->match_map
== NULL
|| offset
== 0) {
2391 } else if (offset
< 0 || offset
>= sym_sect
->size
) {
2392 ksdebug(pack
, "Out of range relocation: %s+%lx -> %s+%lx",
2393 sect
->symbol
->label
, r
->blank_addr
- sect
->address
,
2394 r
->symbol
->label
, offset
);
2396 } else if (sect
== sym_sect
&& sect
->match_map
[offset
] == NULL
) {
2397 sym_sect
->match_map
[offset
] =
2398 (const unsigned char *)r
->symbol
->value
+ offset
;
2399 } else if (sect
== sym_sect
&& (unsigned long)sect
->match_map
[offset
] ==
2400 r
->symbol
->value
+ offset
) {
2402 } else if (sect
== sym_sect
) {
2403 ksdebug(pack
, "Relocations to nonmatching locations within "
2404 "section %s: %lx does not match %lx\n",
2405 sect
->symbol
->label
, offset
,
2406 (unsigned long)sect
->match_map
[offset
] -
2409 } else if ((sym_sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0) {
2410 if (mode
== RUN_PRE_INITIAL
)
2411 ksdebug(pack
, "Delaying matching of %s due to reloc "
2412 "from to unmatching section: %s+%lx\n",
2413 sect
->symbol
->label
, r
->symbol
->label
, offset
);
2415 } else if (sym_sect
->match_map
[offset
] == NULL
) {
2416 if (mode
== RUN_PRE_INITIAL
)
2417 ksdebug(pack
, "Relocation not to instruction boundary: "
2418 "%s+%lx -> %s+%lx", sect
->symbol
->label
,
2419 r
->blank_addr
- sect
->address
, r
->symbol
->label
,
2422 } else if ((unsigned long)sym_sect
->match_map
[offset
] !=
2423 r
->symbol
->value
+ offset
) {
2424 if (mode
== RUN_PRE_INITIAL
)
2425 ksdebug(pack
, "Match map shift %s+%lx: %lx != %lx\n",
2426 r
->symbol
->label
, offset
,
2427 r
->symbol
->value
+ offset
,
2428 (unsigned long)sym_sect
->match_map
[offset
]);
2429 val
+= r
->symbol
->value
+ offset
-
2430 (unsigned long)sym_sect
->match_map
[offset
];
2432 #endif /* KSPLICE_STANDALONE */
2434 if (mode
== RUN_PRE_INITIAL
)
2435 ksdebug(pack
, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2436 "found %s = %lx\n", run_addr
, r
->blank_addr
,
2437 r
->symbol
->label
, offset
, r
->symbol
->label
, val
);
2439 if (contains_canary(pack
, run_addr
, r
->howto
) != 0) {
2440 ksdebug(pack
, "Aborted. Unexpected canary in run code at %lx"
2445 if ((sect
->flags
& KSPLICE_SECTION_DATA
) != 0 &&
2446 sect
->symbol
== r
->symbol
)
2448 ret
= create_labelval(pack
, r
->symbol
, val
, TEMP
);
2449 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
)
2450 ksdebug(pack
, "run-pre: reloc at r_a=%lx p_a=%lx: labelval %s "
2451 "= %lx does not match expected %lx\n", run_addr
,
2452 r
->blank_addr
, r
->symbol
->label
, r
->symbol
->value
, val
);
2456 if (sym_sect
!= NULL
&& (sym_sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0
2457 && (sym_sect
->flags
& KSPLICE_SECTION_STRING
) != 0) {
2458 if (mode
== RUN_PRE_INITIAL
)
2459 ksdebug(pack
, "Recursively comparing string section "
2460 "%s\n", sym_sect
->symbol
->label
);
2461 else if (mode
== RUN_PRE_DEBUG
)
2462 ksdebug(pack
, "[str start] ");
2463 ret
= run_pre_cmp(pack
, sym_sect
, val
, NULL
, mode
);
2464 if (mode
== RUN_PRE_DEBUG
)
2465 ksdebug(pack
, "[str end] ");
2466 if (ret
== OK
&& mode
== RUN_PRE_INITIAL
)
2467 ksdebug(pack
, "Successfully matched string section %s"
2468 "\n", sym_sect
->symbol
->label
);
2469 else if (mode
== RUN_PRE_INITIAL
)
2470 ksdebug(pack
, "Failed to match string section %s\n",
2471 sym_sect
->symbol
->label
);
2476 static int symbol_section_bsearch_compare(const void *a
, const void *b
)
2478 const struct ksplice_symbol
*sym
= a
;
2479 const struct ksplice_section
*sect
= b
;
2480 return strcmp(sym
->label
, sect
->symbol
->label
);
2483 static int compare_section_labels(const void *va
, const void *vb
)
2485 const struct ksplice_section
*a
= va
, *b
= vb
;
2486 return strcmp(a
->symbol
->label
, b
->symbol
->label
);
2489 static struct ksplice_section
*symbol_section(struct ksplice_pack
*pack
,
2490 const struct ksplice_symbol
*sym
)
2492 return bsearch(sym
, pack
->helper_sections
, pack
->helper_sections_end
-
2493 pack
->helper_sections
, sizeof(struct ksplice_section
),
2494 symbol_section_bsearch_compare
);
2497 /* Find the relocation for the oldaddr of a ksplice_patch */
2498 static const struct ksplice_reloc
*patch_reloc(struct ksplice_pack
*pack
,
2499 const struct ksplice_patch
*p
)
2501 unsigned long addr
= (unsigned long)&p
->oldaddr
;
2502 const struct ksplice_reloc
*r
=
2503 find_reloc(pack
->primary_relocs
, pack
->primary_relocs_end
, addr
,
2505 if (r
== NULL
|| r
->blank_addr
< addr
||
2506 r
->blank_addr
>= addr
+ sizeof(addr
))
2512 * Populates vals with the possible values for ksym from the various
2513 * sources Ksplice uses to resolve symbols
2515 static abort_t
lookup_symbol(struct ksplice_pack
*pack
,
2516 const struct ksplice_symbol
*ksym
,
2517 struct list_head
*vals
)
2521 #ifdef KSPLICE_STANDALONE
2524 #endif /* KSPLICE_STANDALONE */
2526 if (ksym
->vals
== NULL
) {
2528 ksdebug(pack
, "using detected sym %s=%lx\n", ksym
->label
,
2530 return add_candidate_val(pack
, vals
, ksym
->value
);
2533 #ifdef CONFIG_MODULE_UNLOAD
2534 if (strcmp(ksym
->label
, "cleanup_module") == 0 && pack
->target
!= NULL
2535 && pack
->target
->exit
!= NULL
) {
2536 ret
= add_candidate_val(pack
, vals
,
2537 (unsigned long)pack
->target
->exit
);
2543 if (ksym
->name
!= NULL
) {
2544 struct candidate_val
*val
;
2545 list_for_each_entry(val
, ksym
->vals
, list
) {
2546 ret
= add_candidate_val(pack
, vals
, val
->val
);
2551 ret
= new_export_lookup(pack
, ksym
->name
, vals
);
2559 #ifdef KSPLICE_STANDALONE
2561 add_system_map_candidates(struct ksplice_pack
*pack
,
2562 const struct ksplice_system_map
*start
,
2563 const struct ksplice_system_map
*end
,
2564 const char *label
, struct list_head
*vals
)
2569 const struct ksplice_system_map
*smap
;
2571 /* Some Fedora kernel releases have System.map files whose symbol
2572 * addresses disagree with the running kernel by a constant address
2573 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2574 * values used to compile these kernels. This constant address offset
2575 * is always a multiple of 0x100000.
2577 * If we observe an offset that is NOT a multiple of 0x100000, then the
2578 * user provided us with an incorrect System.map file, and we should
2580 * If we observe an offset that is a multiple of 0x100000, then we can
2581 * adjust the System.map address values accordingly and proceed.
2583 off
= (unsigned long)printk
- pack
->map_printk
;
2584 if (off
& 0xfffff) {
2585 ksdebug(pack
, "Aborted. System.map does not match kernel.\n");
2586 return BAD_SYSTEM_MAP
;
2589 smap
= bsearch(label
, start
, end
- start
, sizeof(*smap
),
2590 system_map_bsearch_compare
);
2594 for (i
= 0; i
< smap
->nr_candidates
; i
++) {
2595 ret
= add_candidate_val(pack
, vals
, smap
->candidates
[i
] + off
);
2602 static int system_map_bsearch_compare(const void *key
, const void *elt
)
2604 const struct ksplice_system_map
*map
= elt
;
2605 const char *label
= key
;
2606 return strcmp(label
, map
->label
);
2608 #endif /* !KSPLICE_STANDALONE */
2611 * An update could one module to export a symbol and at the same time
2612 * change another module to use that symbol. This violates the normal
2613 * situation where the packs can be handled independently.
2615 * new_export_lookup obtains symbol values from the changes to the
2616 * exported symbol table made by other packs.
2618 static abort_t
new_export_lookup(struct ksplice_pack
*ipack
, const char *name
,
2619 struct list_head
*vals
)
2621 struct ksplice_pack
*pack
;
2622 struct ksplice_patch
*p
;
2623 list_for_each_entry(pack
, &ipack
->update
->packs
, list
) {
2624 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
2625 const struct kernel_symbol
*sym
;
2626 const struct ksplice_reloc
*r
;
2627 if (p
->type
!= KSPLICE_PATCH_EXPORT
||
2628 strcmp(name
, *(const char **)p
->contents
) != 0)
2631 /* Check that the p->oldaddr reloc has been resolved. */
2632 r
= patch_reloc(pack
, p
);
2634 contains_canary(pack
, r
->blank_addr
, r
->howto
) != 0)
2636 sym
= (const struct kernel_symbol
*)r
->symbol
->value
;
2639 * Check that the sym->value reloc has been resolved,
2640 * if there is a Ksplice relocation there.
2642 r
= find_reloc(pack
->primary_relocs
,
2643 pack
->primary_relocs_end
,
2644 (unsigned long)&sym
->value
,
2645 sizeof(&sym
->value
));
2647 r
->blank_addr
== (unsigned long)&sym
->value
&&
2648 contains_canary(pack
, r
->blank_addr
, r
->howto
) != 0)
2650 return add_candidate_val(ipack
, vals
, sym
->value
);
2657 * When apply_patches is called, the update should be fully prepared.
2658 * apply_patches will try to actually insert trampolines for the
2661 static abort_t
apply_patches(struct update
*update
)
2665 struct ksplice_pack
*pack
;
2667 ret
= map_trampoline_pages(update
);
2671 list_for_each_entry(pack
, &update
->packs
, list
) {
2672 const typeof(int (*)(void)) *f
;
2673 for (f
= pack
->pre_apply
; f
< pack
->pre_apply_end
; f
++) {
2681 for (i
= 0; i
< 5; i
++) {
2682 cleanup_conflicts(update
);
2683 #ifdef KSPLICE_STANDALONE
2685 #endif /* KSPLICE_STANDALONE */
2686 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2687 ret
= (__force abort_t
)stop_machine(__apply_patches
, update
,
2689 #else /* LINUX_VERSION_CODE < */
2690 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2691 ret
= (__force abort_t
)stop_machine_run(__apply_patches
, update
,
2693 #endif /* LINUX_VERSION_CODE */
2694 #ifdef KSPLICE_STANDALONE
2696 #endif /* KSPLICE_STANDALONE */
2697 if (ret
!= CODE_BUSY
)
2699 set_current_state(TASK_INTERRUPTIBLE
);
2700 schedule_timeout(msecs_to_jiffies(1000));
2703 unmap_trampoline_pages(update
);
2705 if (ret
== CODE_BUSY
) {
2706 print_conflicts(update
);
2707 _ksdebug(update
, "Aborted %s. stack check: to-be-replaced "
2708 "code is busy.\n", update
->kid
);
2709 } else if (ret
== ALREADY_REVERSED
) {
2710 _ksdebug(update
, "Aborted %s. Ksplice update %s is already "
2711 "reversed.\n", update
->kid
, update
->kid
);
2715 list_for_each_entry(pack
, &update
->packs
, list
) {
2716 const typeof(void (*)(void)) *f
;
2717 for (f
= pack
->fail_apply
; f
< pack
->fail_apply_end
;
2725 list_for_each_entry(pack
, &update
->packs
, list
) {
2726 const typeof(void (*)(void)) *f
;
2727 for (f
= pack
->post_apply
; f
< pack
->post_apply_end
; f
++)
2731 _ksdebug(update
, "Atomic patch insertion for %s complete\n",
2736 static abort_t
reverse_patches(struct update
*update
)
2740 struct ksplice_pack
*pack
;
2742 clear_debug_buf(update
);
2743 ret
= init_debug_buf(update
);
2747 _ksdebug(update
, "Preparing to reverse %s\n", update
->kid
);
2749 ret
= map_trampoline_pages(update
);
2753 list_for_each_entry(pack
, &update
->packs
, list
) {
2754 const typeof(int (*)(void)) *f
;
2755 for (f
= pack
->pre_reverse
; f
< pack
->pre_reverse_end
; f
++) {
2763 for (i
= 0; i
< 5; i
++) {
2764 cleanup_conflicts(update
);
2765 clear_list(&update
->conflicts
, struct conflict
, list
);
2766 #ifdef KSPLICE_STANDALONE
2768 #endif /* KSPLICE_STANDALONE */
2769 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2770 ret
= (__force abort_t
)stop_machine(__reverse_patches
, update
,
2772 #else /* LINUX_VERSION_CODE < */
2773 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2774 ret
= (__force abort_t
)stop_machine_run(__reverse_patches
,
2776 #endif /* LINUX_VERSION_CODE */
2777 #ifdef KSPLICE_STANDALONE
2779 #endif /* KSPLICE_STANDALONE */
2780 if (ret
!= CODE_BUSY
)
2782 set_current_state(TASK_INTERRUPTIBLE
);
2783 schedule_timeout(msecs_to_jiffies(1000));
2786 unmap_trampoline_pages(update
);
2788 if (ret
== CODE_BUSY
) {
2789 print_conflicts(update
);
2790 _ksdebug(update
, "Aborted %s. stack check: to-be-reversed "
2791 "code is busy.\n", update
->kid
);
2792 } else if (ret
== MODULE_BUSY
) {
2793 _ksdebug(update
, "Update %s is in use by another module\n",
2798 list_for_each_entry(pack
, &update
->packs
, list
) {
2799 const typeof(void (*)(void)) *f
;
2800 for (f
= pack
->fail_reverse
; f
< pack
->fail_reverse_end
;
2808 list_for_each_entry(pack
, &update
->packs
, list
) {
2809 const typeof(void (*)(void)) *f
;
2810 for (f
= pack
->post_reverse
; f
< pack
->post_reverse_end
; f
++)
2814 list_for_each_entry(pack
, &update
->packs
, list
)
2815 clear_list(&pack
->safety_records
, struct safety_record
, list
);
2817 _ksdebug(update
, "Atomic patch removal for %s complete\n", update
->kid
);
2821 /* Atomically insert the update; run from within stop_machine */
2822 static int __apply_patches(void *updateptr
)
2824 struct update
*update
= updateptr
;
2825 struct ksplice_pack
*pack
;
2826 struct ksplice_patch
*p
;
2829 if (update
->stage
== STAGE_APPLIED
)
2830 return (__force
int)OK
;
2832 if (update
->stage
!= STAGE_PREPARING
)
2833 return (__force
int)UNEXPECTED
;
2835 ret
= check_each_task(update
);
2837 return (__force
int)ret
;
2839 list_for_each_entry(pack
, &update
->packs
, list
) {
2840 if (try_module_get(pack
->primary
) != 1) {
2841 struct ksplice_pack
*pack1
;
2842 list_for_each_entry(pack1
, &update
->packs
, list
) {
2845 module_put(pack1
->primary
);
2847 module_put(THIS_MODULE
);
2848 return (__force
int)UNEXPECTED
;
2852 list_for_each_entry(pack
, &update
->packs
, list
) {
2853 const typeof(int (*)(void)) *f
;
2854 for (f
= pack
->check_apply
; f
< pack
->check_apply_end
; f
++)
2856 return (__force
int)CALL_FAILED
;
2859 /* Commit point: the update application will succeed. */
2861 update
->stage
= STAGE_APPLIED
;
2862 #ifdef TAINT_KSPLICE
2863 add_taint(TAINT_KSPLICE
);
2866 list_for_each_entry(pack
, &update
->packs
, list
)
2867 list_add(&pack
->module_list_entry
.list
, &ksplice_module_list
);
2869 list_for_each_entry(pack
, &update
->packs
, list
) {
2870 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
2871 insert_trampoline(p
);
2874 list_for_each_entry(pack
, &update
->packs
, list
) {
2875 const typeof(void (*)(void)) *f
;
2876 for (f
= pack
->apply
; f
< pack
->apply_end
; f
++)
2880 return (__force
int)OK
;
2883 /* Atomically remove the update; run from within stop_machine */
2884 static int __reverse_patches(void *updateptr
)
2886 struct update
*update
= updateptr
;
2887 struct ksplice_pack
*pack
;
2888 const struct ksplice_patch
*p
;
2891 if (update
->stage
!= STAGE_APPLIED
)
2892 return (__force
int)OK
;
2894 #ifdef CONFIG_MODULE_UNLOAD
2895 list_for_each_entry(pack
, &update
->packs
, list
) {
2896 if (module_refcount(pack
->primary
) != 1)
2897 return (__force
int)MODULE_BUSY
;
2899 #endif /* CONFIG_MODULE_UNLOAD */
2901 ret
= check_each_task(update
);
2903 return (__force
int)ret
;
2905 list_for_each_entry(pack
, &update
->packs
, list
) {
2906 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
2907 ret
= verify_trampoline(pack
, p
);
2909 return (__force
int)ret
;
2913 list_for_each_entry(pack
, &update
->packs
, list
) {
2914 const typeof(int (*)(void)) *f
;
2915 for (f
= pack
->check_reverse
; f
< pack
->check_reverse_end
; f
++)
2917 return (__force
int)CALL_FAILED
;
2920 /* Commit point: the update reversal will succeed. */
2922 update
->stage
= STAGE_REVERSED
;
2924 list_for_each_entry(pack
, &update
->packs
, list
)
2925 module_put(pack
->primary
);
2927 list_for_each_entry(pack
, &update
->packs
, list
)
2928 list_del(&pack
->module_list_entry
.list
);
2930 list_for_each_entry(pack
, &update
->packs
, list
) {
2931 const typeof(void (*)(void)) *f
;
2932 for (f
= pack
->reverse
; f
< pack
->reverse_end
; f
++)
2936 list_for_each_entry(pack
, &update
->packs
, list
) {
2937 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
2938 remove_trampoline(p
);
2941 return (__force
int)OK
;
2945 * Check whether any thread's instruction pointer or any address of
2946 * its stack is contained in one of the safety_records associated with
2949 * check_each_task must be called from inside stop_machine, because it
2950 * does not take tasklist_lock (which cannot be held by anyone else
2951 * during stop_machine).
2953 static abort_t
check_each_task(struct update
*update
)
2955 const struct task_struct
*g
, *p
;
2956 abort_t status
= OK
, ret
;
2957 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2958 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2959 read_lock(&tasklist_lock
);
2960 #endif /* LINUX_VERSION_CODE */
2961 do_each_thread(g
, p
) {
2962 /* do_each_thread is a double loop! */
2963 ret
= check_task(update
, p
, false);
2965 check_task(update
, p
, true);
2968 if (ret
!= OK
&& ret
!= CODE_BUSY
)
2969 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2970 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2972 #else /* LINUX_VERSION_CODE < */
2974 #endif /* LINUX_VERSION_CODE */
2975 } while_each_thread(g
, p
);
2976 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2977 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2979 read_unlock(&tasklist_lock
);
2980 #endif /* LINUX_VERSION_CODE */
2984 static abort_t
check_task(struct update
*update
,
2985 const struct task_struct
*t
, bool rerun
)
2987 abort_t status
, ret
;
2988 struct conflict
*conf
= NULL
;
2991 conf
= kmalloc(sizeof(*conf
), GFP_ATOMIC
);
2993 return OUT_OF_MEMORY
;
2994 conf
->process_name
= kstrdup(t
->comm
, GFP_ATOMIC
);
2995 if (conf
->process_name
== NULL
) {
2997 return OUT_OF_MEMORY
;
3000 INIT_LIST_HEAD(&conf
->stack
);
3001 list_add(&conf
->list
, &update
->conflicts
);
3004 status
= check_address(update
, conf
, KSPLICE_IP(t
));
3006 ret
= check_stack(update
, conf
, task_thread_info(t
),
3007 (unsigned long *)__builtin_frame_address(0));
3010 } else if (!task_curr(t
)) {
3011 ret
= check_stack(update
, conf
, task_thread_info(t
),
3012 (unsigned long *)KSPLICE_SP(t
));
3015 } else if (!is_stop_machine(t
)) {
3016 status
= UNEXPECTED_RUNNING_TASK
;
3021 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
3022 const struct thread_info
*tinfo
,
3023 const unsigned long *stack
)
3025 abort_t status
= OK
, ret
;
3028 while (valid_stack_ptr(tinfo
, stack
)) {
3030 ret
= check_address(update
, conf
, addr
);
3037 static abort_t
check_address(struct update
*update
,
3038 struct conflict
*conf
, unsigned long addr
)
3040 abort_t status
= OK
, ret
;
3041 const struct safety_record
*rec
;
3042 struct ksplice_pack
*pack
;
3043 struct conflict_addr
*ca
= NULL
;
3046 ca
= kmalloc(sizeof(*ca
), GFP_ATOMIC
);
3048 return OUT_OF_MEMORY
;
3050 ca
->has_conflict
= false;
3052 list_add(&ca
->list
, &conf
->stack
);
3055 list_for_each_entry(pack
, &update
->packs
, list
) {
3056 unsigned long tramp_addr
= follow_trampolines(pack
, addr
);
3057 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
3058 ret
= check_record(ca
, rec
, tramp_addr
);
3066 static abort_t
check_record(struct conflict_addr
*ca
,
3067 const struct safety_record
*rec
, unsigned long addr
)
3069 if (addr
>= rec
->addr
&& addr
< rec
->addr
+ rec
->size
) {
3071 ca
->label
= rec
->label
;
3072 ca
->has_conflict
= true;
3079 /* Is the task one of the stop_machine tasks? */
3080 static bool is_stop_machine(const struct task_struct
*t
)
3082 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3083 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3084 const char *kstop_prefix
= "kstop/";
3085 #else /* LINUX_VERSION_CODE < */
3086 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3087 const char *kstop_prefix
= "kstop";
3088 #endif /* LINUX_VERSION_CODE */
3090 if (!starts_with(t
->comm
, kstop_prefix
))
3092 num
= t
->comm
+ strlen(kstop_prefix
);
3093 return num
[strspn(num
, "0123456789")] == '\0';
3094 #else /* LINUX_VERSION_CODE < */
3095 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3096 return strcmp(t
->comm
, "kstopmachine") == 0;
3097 #endif /* LINUX_VERSION_CODE */
3100 static void cleanup_conflicts(struct update
*update
)
3102 struct conflict
*conf
;
3103 list_for_each_entry(conf
, &update
->conflicts
, list
) {
3104 clear_list(&conf
->stack
, struct conflict_addr
, list
);
3105 kfree(conf
->process_name
);
3107 clear_list(&update
->conflicts
, struct conflict
, list
);
3110 static void print_conflicts(struct update
*update
)
3112 const struct conflict
*conf
;
3113 const struct conflict_addr
*ca
;
3114 list_for_each_entry(conf
, &update
->conflicts
, list
) {
3115 _ksdebug(update
, "stack check: pid %d (%s):", conf
->pid
,
3116 conf
->process_name
);
3117 list_for_each_entry(ca
, &conf
->stack
, list
) {
3118 _ksdebug(update
, " %lx", ca
->addr
);
3119 if (ca
->has_conflict
)
3120 _ksdebug(update
, " [<-CONFLICT]");
3122 _ksdebug(update
, "\n");
3126 static void insert_trampoline(struct ksplice_patch
*p
)
3128 mm_segment_t old_fs
= get_fs();
3130 memcpy(p
->saved
, p
->vaddr
, p
->size
);
3131 memcpy(p
->vaddr
, p
->contents
, p
->size
);
3132 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
3136 static abort_t
verify_trampoline(struct ksplice_pack
*pack
,
3137 const struct ksplice_patch
*p
)
3139 if (memcmp(p
->vaddr
, p
->contents
, p
->size
) != 0) {
3140 ksdebug(pack
, "Aborted. Trampoline at %lx has been "
3141 "overwritten.\n", p
->oldaddr
);
3147 static void remove_trampoline(const struct ksplice_patch
*p
)
3149 mm_segment_t old_fs
= get_fs();
3151 memcpy(p
->vaddr
, p
->saved
, p
->size
);
3152 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
3156 /* Returns NO_MATCH if there's already a labelval with a different value */
3157 static abort_t
create_labelval(struct ksplice_pack
*pack
,
3158 struct ksplice_symbol
*ksym
,
3159 unsigned long val
, int status
)
3161 val
= follow_trampolines(pack
, val
);
3162 if (ksym
->vals
== NULL
)
3163 return ksym
->value
== val
? OK
: NO_MATCH
;
3166 if (status
== TEMP
) {
3167 struct labelval
*lv
= kmalloc(sizeof(*lv
), GFP_KERNEL
);
3169 return OUT_OF_MEMORY
;
3171 lv
->saved_vals
= ksym
->vals
;
3172 list_add(&lv
->list
, &pack
->temp_labelvals
);
3179 * Creates a new safety_record for a helper section based on its
3180 * ksplice_section and run-pre matching information.
3182 static abort_t
create_safety_record(struct ksplice_pack
*pack
,
3183 const struct ksplice_section
*sect
,
3184 struct list_head
*record_list
,
3185 unsigned long run_addr
,
3186 unsigned long run_size
)
3188 struct safety_record
*rec
;
3189 struct ksplice_patch
*p
;
3191 if (record_list
== NULL
)
3194 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
3195 const struct ksplice_reloc
*r
= patch_reloc(pack
, p
);
3196 if (strcmp(sect
->symbol
->label
, r
->symbol
->label
) == 0)
3199 if (p
>= pack
->patches_end
)
3202 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
3204 return OUT_OF_MEMORY
;
3206 * The helper might be unloaded when checking reversing
3207 * patches, so we need to kstrdup the label here.
3209 rec
->label
= kstrdup(sect
->symbol
->label
, GFP_KERNEL
);
3210 if (rec
->label
== NULL
) {
3212 return OUT_OF_MEMORY
;
3214 rec
->addr
= run_addr
;
3215 rec
->size
= run_size
;
3217 list_add(&rec
->list
, record_list
);
3221 static abort_t
add_candidate_val(struct ksplice_pack
*pack
,
3222 struct list_head
*vals
, unsigned long val
)
3224 struct candidate_val
*tmp
, *new;
3227 * Careful: follow trampolines before comparing values so that we do
3228 * not mistake the obsolete function for another copy of the function.
3230 val
= follow_trampolines(pack
, val
);
3232 list_for_each_entry(tmp
, vals
, list
) {
3233 if (tmp
->val
== val
)
3236 new = kmalloc(sizeof(*new), GFP_KERNEL
);
3238 return OUT_OF_MEMORY
;
3240 list_add(&new->list
, vals
);
3244 static void release_vals(struct list_head
*vals
)
3246 clear_list(vals
, struct candidate_val
, list
);
3250 * The temp_labelvals list is used to cache those temporary labelvals
3251 * that have been created to cross-check the symbol values obtained
3252 * from different relocations within a single section being matched.
3254 * If status is VAL, commit the temp_labelvals as final values.
3256 * If status is NOVAL, restore the list of possible values to the
3257 * ksplice_symbol, so that it no longer has a known value.
3259 static void set_temp_labelvals(struct ksplice_pack
*pack
, int status
)
3261 struct labelval
*lv
, *n
;
3262 list_for_each_entry_safe(lv
, n
, &pack
->temp_labelvals
, list
) {
3263 if (status
== NOVAL
) {
3264 lv
->symbol
->vals
= lv
->saved_vals
;
3266 release_vals(lv
->saved_vals
);
3267 kfree(lv
->saved_vals
);
3269 list_del(&lv
->list
);
3274 /* Is there a Ksplice canary with given howto at blank_addr? */
3275 static int contains_canary(struct ksplice_pack
*pack
, unsigned long blank_addr
,
3276 const struct ksplice_reloc_howto
*howto
)
3278 switch (howto
->size
) {
3280 return (*(uint8_t *)blank_addr
& howto
->dst_mask
) ==
3281 (KSPLICE_CANARY
& howto
->dst_mask
);
3283 return (*(uint16_t *)blank_addr
& howto
->dst_mask
) ==
3284 (KSPLICE_CANARY
& howto
->dst_mask
);
3286 return (*(uint32_t *)blank_addr
& howto
->dst_mask
) ==
3287 (KSPLICE_CANARY
& howto
->dst_mask
);
3288 #if BITS_PER_LONG >= 64
3290 return (*(uint64_t *)blank_addr
& howto
->dst_mask
) ==
3291 (KSPLICE_CANARY
& howto
->dst_mask
);
3292 #endif /* BITS_PER_LONG */
3294 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
3300 * Compute the address of the code you would actually run if you were
3301 * to call the function at addr (i.e., follow the sequence of jumps
3304 static unsigned long follow_trampolines(struct ksplice_pack
*pack
,
3307 unsigned long new_addr
;
3311 #ifdef KSPLICE_STANDALONE
3314 #endif /* KSPLICE_STANDALONE */
3315 if (!__kernel_text_address(addr
) ||
3316 trampoline_target(pack
, addr
, &new_addr
) != OK
)
3318 m
= __module_text_address(new_addr
);
3319 if (m
== NULL
|| m
== pack
->target
||
3320 !starts_with(m
->name
, "ksplice"))
3326 /* Does module a patch module b? */
3327 static bool patches_module(const struct module
*a
, const struct module
*b
)
3329 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3333 if (a
== NULL
|| !starts_with(a
->name
, "ksplice_"))
3335 name
= a
->name
+ strlen("ksplice_");
3336 name
+= strcspn(name
, "_");
3340 return strcmp(name
, b
== NULL
? "vmlinux" : b
->name
) == 0;
3341 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3342 struct ksplice_module_list_entry
*entry
;
3345 list_for_each_entry(entry
, &ksplice_module_list
, list
) {
3346 if (entry
->target
== b
&& entry
->primary
== a
)
3350 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3353 static bool starts_with(const char *str
, const char *prefix
)
3355 return strncmp(str
, prefix
, strlen(prefix
)) == 0;
3358 static bool singular(struct list_head
*list
)
3360 return !list_empty(list
) && list
->next
->next
== list
;
3363 static void *bsearch(const void *key
, const void *base
, size_t n
,
3364 size_t size
, int (*cmp
)(const void *key
, const void *elt
))
3366 int start
= 0, end
= n
- 1, mid
, result
;
3369 while (start
<= end
) {
3370 mid
= (start
+ end
) / 2;
3371 result
= cmp(key
, base
+ mid
* size
);
3374 else if (result
> 0)
3377 return (void *)base
+ mid
* size
;
3382 static int compare_relocs(const void *a
, const void *b
)
3384 const struct ksplice_reloc
*ra
= a
, *rb
= b
;
3385 if (ra
->blank_addr
> rb
->blank_addr
)
3387 else if (ra
->blank_addr
< rb
->blank_addr
)
3390 return ra
->howto
->size
- rb
->howto
->size
;
3393 #ifdef KSPLICE_STANDALONE
3394 static int compare_system_map(const void *a
, const void *b
)
3396 const struct ksplice_system_map
*sa
= a
, *sb
= b
;
3397 return strcmp(sa
->label
, sb
->label
);
3399 #endif /* KSPLICE_STANDALONE */
3401 #ifdef CONFIG_DEBUG_FS
3402 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3403 /* Old kernels don't have debugfs_create_blob */
3404 static ssize_t
read_file_blob(struct file
*file
, char __user
*user_buf
,
3405 size_t count
, loff_t
*ppos
)
3407 struct debugfs_blob_wrapper
*blob
= file
->private_data
;
3408 return simple_read_from_buffer(user_buf
, count
, ppos
, blob
->data
,
3412 static int blob_open(struct inode
*inode
, struct file
*file
)
3414 if (inode
->i_private
)
3415 file
->private_data
= inode
->i_private
;
3419 static struct file_operations fops_blob
= {
3420 .read
= read_file_blob
,
3424 static struct dentry
*debugfs_create_blob(const char *name
, mode_t mode
,
3425 struct dentry
*parent
,
3426 struct debugfs_blob_wrapper
*blob
)
3428 return debugfs_create_file(name
, mode
, parent
, blob
, &fops_blob
);
3430 #endif /* LINUX_VERSION_CODE */
3432 static abort_t
init_debug_buf(struct update
*update
)
3434 update
->debug_blob
.size
= 0;
3435 update
->debug_blob
.data
= NULL
;
3436 update
->debugfs_dentry
=
3437 debugfs_create_blob(update
->name
, S_IFREG
| S_IRUSR
, NULL
,
3438 &update
->debug_blob
);
3439 if (update
->debugfs_dentry
== NULL
)
3440 return OUT_OF_MEMORY
;
3444 static void clear_debug_buf(struct update
*update
)
3446 if (update
->debugfs_dentry
== NULL
)
3448 debugfs_remove(update
->debugfs_dentry
);
3449 update
->debugfs_dentry
= NULL
;
3450 update
->debug_blob
.size
= 0;
3451 vfree(update
->debug_blob
.data
);
3452 update
->debug_blob
.data
= NULL
;
3455 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
3458 unsigned long size
, old_size
, new_size
;
3460 if (update
->debug
== 0)
3463 /* size includes the trailing '\0' */
3464 va_start(args
, fmt
);
3465 size
= 1 + vsnprintf(update
->debug_blob
.data
, 0, fmt
, args
);
3467 old_size
= update
->debug_blob
.size
== 0 ? 0 :
3468 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
));
3469 new_size
= update
->debug_blob
.size
+ size
== 0 ? 0 :
3470 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
+ size
));
3471 if (new_size
> old_size
) {
3472 char *buf
= vmalloc(new_size
);
3475 memcpy(buf
, update
->debug_blob
.data
, update
->debug_blob
.size
);
3476 vfree(update
->debug_blob
.data
);
3477 update
->debug_blob
.data
= buf
;
3479 va_start(args
, fmt
);
3480 update
->debug_blob
.size
+= vsnprintf(update
->debug_blob
.data
+
3481 update
->debug_blob
.size
,
3486 #else /* CONFIG_DEBUG_FS */
3487 static abort_t
init_debug_buf(struct update
*update
)
3492 static void clear_debug_buf(struct update
*update
)
3497 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
3501 if (update
->debug
== 0)
3504 if (!update
->debug_continue_line
)
3505 printk(KERN_DEBUG
"ksplice: ");
3507 va_start(args
, fmt
);
3508 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3510 #else /* LINUX_VERSION_CODE < */
3511 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3513 char *buf
= kvasprintf(GFP_KERNEL
, fmt
, args
);
3517 #endif /* LINUX_VERSION_CODE */
3520 update
->debug_continue_line
=
3521 fmt
[0] == '\0' || fmt
[strlen(fmt
) - 1] != '\n';
3524 #endif /* CONFIG_DEBUG_FS */
3526 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3527 #ifdef CONFIG_KALLSYMS
3528 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3529 struct module
*, unsigned long),
3532 char namebuf
[KSYM_NAME_LEN
];
3534 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3536 #endif /* LINUX_VERSION_CODE */
3539 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3540 * 2.6.10 was the first release after this commit
3542 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3543 for (i
= 0, off
= 0; i
< kallsyms_num_syms
; i
++) {
3544 off
= kallsyms_expand_symbol(off
, namebuf
);
3545 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
3549 #else /* LINUX_VERSION_CODE < */
3552 for (i
= 0, knames
= kallsyms_names
; i
< kallsyms_num_syms
; i
++) {
3553 unsigned prefix
= *knames
++;
3555 strlcpy(namebuf
+ prefix
, knames
, KSYM_NAME_LEN
- prefix
);
3557 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
3561 knames
+= strlen(knames
) + 1;
3563 #endif /* LINUX_VERSION_CODE */
3564 return module_kallsyms_on_each_symbol(fn
, data
);
3567 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3568 * 2.6.10 was the first release after this commit
3570 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3571 extern u8 kallsyms_token_table
[];
3572 extern u16 kallsyms_token_index
[];
3574 static unsigned int kallsyms_expand_symbol(unsigned int off
, char *result
)
3576 long len
, skipped_first
= 0;
3577 const u8
*tptr
, *data
;
3579 data
= &kallsyms_names
[off
];
3586 tptr
= &kallsyms_token_table
[kallsyms_token_index
[*data
]];
3591 if (skipped_first
) {
3604 #endif /* LINUX_VERSION_CODE */
3606 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3615 list_for_each_entry(mod
, &modules
, list
) {
3616 for (i
= 0; i
< mod
->num_symtab
; i
++) {
3617 ret
= fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
3618 mod
, mod
->symtab
[i
].st_value
);
3625 #endif /* CONFIG_KALLSYMS */
3627 static struct module
*find_module(const char *name
)
3631 list_for_each_entry(mod
, &modules
, list
) {
3632 if (strcmp(mod
->name
, name
) == 0)
3638 #ifdef CONFIG_MODULE_UNLOAD
3640 struct list_head list
;
3641 struct module
*module_which_uses
;
3644 /* I'm not yet certain whether we need the strong form of this. */
3645 static inline int strong_try_module_get(struct module
*mod
)
3647 if (mod
&& mod
->state
!= MODULE_STATE_LIVE
)
3649 if (try_module_get(mod
))
3654 /* Does a already use b? */
3655 static int already_uses(struct module
*a
, struct module
*b
)
3657 struct module_use
*use
;
3658 list_for_each_entry(use
, &b
->modules_which_use_me
, list
) {
3659 if (use
->module_which_uses
== a
)
3665 /* Make it so module a uses b. Must be holding module_mutex */
3666 static int use_module(struct module
*a
, struct module
*b
)
3668 struct module_use
*use
;
3669 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3670 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3672 #endif /* LINUX_VERSION_CODE */
3673 if (b
== NULL
|| already_uses(a
, b
))
3676 if (strong_try_module_get(b
) < 0)
3679 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
3684 use
->module_which_uses
= a
;
3685 list_add(&use
->list
, &b
->modules_which_use_me
);
3686 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3687 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3688 no_warn
= sysfs_create_link(b
->holders_dir
, &a
->mkobj
.kobj
, a
->name
);
3689 #endif /* LINUX_VERSION_CODE */
3692 #else /* CONFIG_MODULE_UNLOAD */
3693 static int use_module(struct module
*a
, struct module
*b
)
3697 #endif /* CONFIG_MODULE_UNLOAD */
3699 #ifndef CONFIG_MODVERSIONS
3700 #define symversion(base, idx) NULL
3702 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3705 static bool each_symbol_in_section(const struct symsearch
*arr
,
3706 unsigned int arrsize
,
3707 struct module
*owner
,
3708 bool (*fn
)(const struct symsearch
*syms
,
3709 struct module
*owner
,
3710 unsigned int symnum
, void *data
),
3715 for (j
= 0; j
< arrsize
; j
++) {
3716 for (i
= 0; i
< arr
[j
].stop
- arr
[j
].start
; i
++)
3717 if (fn(&arr
[j
], owner
, i
, data
))
3724 /* Returns true as soon as fn returns true, otherwise false. */
3725 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
3726 struct module
*owner
,
3727 unsigned int symnum
, void *data
),
3731 const struct symsearch arr
[] = {
3732 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
3733 NOT_GPL_ONLY
, false },
3734 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
3735 __start___kcrctab_gpl
,
3737 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3738 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
3739 __start___kcrctab_gpl_future
,
3740 WILL_BE_GPL_ONLY
, false },
3741 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3742 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3743 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
3744 __start___kcrctab_unused
,
3745 NOT_GPL_ONLY
, true },
3746 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
3747 __start___kcrctab_unused_gpl
,
3749 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3752 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
3755 list_for_each_entry(mod
, &modules
, list
) {
3756 struct symsearch module_arr
[] = {
3757 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
3758 NOT_GPL_ONLY
, false },
3759 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
3762 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3763 { mod
->gpl_future_syms
,
3764 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
3765 mod
->gpl_future_crcs
,
3766 WILL_BE_GPL_ONLY
, false },
3767 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3768 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3770 mod
->unused_syms
+ mod
->num_unused_syms
,
3772 NOT_GPL_ONLY
, true },
3773 { mod
->unused_gpl_syms
,
3774 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
3775 mod
->unused_gpl_crcs
,
3777 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3780 if (each_symbol_in_section(module_arr
, ARRAY_SIZE(module_arr
),
3787 struct find_symbol_arg
{
3794 struct module
*owner
;
3795 const unsigned long *crc
;
3796 const struct kernel_symbol
*sym
;
3799 static bool find_symbol_in_section(const struct symsearch
*syms
,
3800 struct module
*owner
,
3801 unsigned int symnum
, void *data
)
3803 struct find_symbol_arg
*fsa
= data
;
3805 if (strcmp(syms
->start
[symnum
].name
, fsa
->name
) != 0)
3809 if (syms
->licence
== GPL_ONLY
)
3811 if (syms
->licence
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
3812 printk(KERN_WARNING
"Symbol %s is being used "
3813 "by a non-GPL module, which will not "
3814 "be allowed in the future\n", fsa
->name
);
3815 printk(KERN_WARNING
"Please see the file "
3816 "Documentation/feature-removal-schedule.txt "
3817 "in the kernel source tree for more details.\n");
3821 #ifdef CONFIG_UNUSED_SYMBOLS
3822 if (syms
->unused
&& fsa
->warn
) {
3823 printk(KERN_WARNING
"Symbol %s is marked as UNUSED, "
3824 "however this module is using it.\n", fsa
->name
);
3826 "This symbol will go away in the future.\n");
3828 "Please evalute if this is the right api to use and if "
3829 "it really is, submit a report the linux kernel "
3830 "mailinglist together with submitting your code for "
3836 fsa
->crc
= symversion(syms
->crcs
, symnum
);
3837 fsa
->sym
= &syms
->start
[symnum
];
3841 /* Find a symbol and return it, along with, (optional) crc and
3842 * (optional) module which owns it */
3843 static const struct kernel_symbol
*find_symbol(const char *name
,
3844 struct module
**owner
,
3845 const unsigned long **crc
,
3846 bool gplok
, bool warn
)
3848 struct find_symbol_arg fsa
;
3854 if (each_symbol(find_symbol_in_section
, &fsa
)) {
3865 static struct module
*__module_data_address(unsigned long addr
)
3869 list_for_each_entry(mod
, &modules
, list
) {
3870 if (addr
>= (unsigned long)mod
->module_core
+
3871 mod
->core_text_size
&&
3872 addr
< (unsigned long)mod
->module_core
+ mod
->core_size
)
3877 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3879 struct ksplice_attribute
{
3880 struct attribute attr
;
3881 ssize_t (*show
)(struct update
*update
, char *buf
);
3882 ssize_t (*store
)(struct update
*update
, const char *buf
, size_t len
);
3885 static ssize_t
ksplice_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
3888 struct ksplice_attribute
*attribute
=
3889 container_of(attr
, struct ksplice_attribute
, attr
);
3890 struct update
*update
= container_of(kobj
, struct update
, kobj
);
3891 if (attribute
->show
== NULL
)
3893 return attribute
->show(update
, buf
);
3896 static ssize_t
ksplice_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3897 const char *buf
, size_t len
)
3899 struct ksplice_attribute
*attribute
=
3900 container_of(attr
, struct ksplice_attribute
, attr
);
3901 struct update
*update
= container_of(kobj
, struct update
, kobj
);
3902 if (attribute
->store
== NULL
)
3904 return attribute
->store(update
, buf
, len
);
3907 static struct sysfs_ops ksplice_sysfs_ops
= {
3908 .show
= ksplice_attr_show
,
3909 .store
= ksplice_attr_store
,
3912 static void ksplice_release(struct kobject
*kobj
)
3914 struct update
*update
;
3915 update
= container_of(kobj
, struct update
, kobj
);
3916 cleanup_ksplice_update(update
);
3919 static ssize_t
stage_show(struct update
*update
, char *buf
)
3921 switch (update
->stage
) {
3922 case STAGE_PREPARING
:
3923 return snprintf(buf
, PAGE_SIZE
, "preparing\n");
3925 return snprintf(buf
, PAGE_SIZE
, "applied\n");
3926 case STAGE_REVERSED
:
3927 return snprintf(buf
, PAGE_SIZE
, "reversed\n");
3932 static ssize_t
abort_cause_show(struct update
*update
, char *buf
)
3934 switch (update
->abort_cause
) {
3936 return snprintf(buf
, PAGE_SIZE
, "ok\n");
3938 return snprintf(buf
, PAGE_SIZE
, "no_match\n");
3939 #ifdef KSPLICE_STANDALONE
3940 case BAD_SYSTEM_MAP
:
3941 return snprintf(buf
, PAGE_SIZE
, "bad_system_map\n");
3942 #endif /* KSPLICE_STANDALONE */
3944 return snprintf(buf
, PAGE_SIZE
, "code_busy\n");
3946 return snprintf(buf
, PAGE_SIZE
, "module_busy\n");
3948 return snprintf(buf
, PAGE_SIZE
, "out_of_memory\n");
3949 case FAILED_TO_FIND
:
3950 return snprintf(buf
, PAGE_SIZE
, "failed_to_find\n");
3951 case ALREADY_REVERSED
:
3952 return snprintf(buf
, PAGE_SIZE
, "already_reversed\n");
3953 case MISSING_EXPORT
:
3954 return snprintf(buf
, PAGE_SIZE
, "missing_export\n");
3955 case UNEXPECTED_RUNNING_TASK
:
3956 return snprintf(buf
, PAGE_SIZE
, "unexpected_running_task\n");
3957 case TARGET_NOT_LOADED
:
3958 return snprintf(buf
, PAGE_SIZE
, "target_not_loaded\n");
3960 return snprintf(buf
, PAGE_SIZE
, "call_failed\n");
3962 return snprintf(buf
, PAGE_SIZE
, "unexpected\n");
3964 return snprintf(buf
, PAGE_SIZE
, "unknown\n");
3969 static ssize_t
conflict_show(struct update
*update
, char *buf
)
3971 const struct conflict
*conf
;
3972 const struct conflict_addr
*ca
;
3974 mutex_lock(&module_mutex
);
3975 list_for_each_entry(conf
, &update
->conflicts
, list
) {
3976 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "%s %d",
3977 conf
->process_name
, conf
->pid
);
3978 list_for_each_entry(ca
, &conf
->stack
, list
) {
3979 if (!ca
->has_conflict
)
3981 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, " %s",
3984 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "\n");
3986 mutex_unlock(&module_mutex
);
3990 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
3991 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr
)
3993 struct update
*update
= updateptr
;
3994 mutex_lock(&module_mutex
);
3995 maybe_cleanup_ksplice_update(update
);
3996 mutex_unlock(&module_mutex
);
4000 static ssize_t
stage_store(struct update
*update
, const char *buf
, size_t len
)
4002 enum stage old_stage
;
4003 mutex_lock(&module_mutex
);
4004 old_stage
= update
->stage
;
4005 if ((strncmp(buf
, "applied", len
) == 0 ||
4006 strncmp(buf
, "applied\n", len
) == 0) &&
4007 update
->stage
== STAGE_PREPARING
)
4008 update
->abort_cause
= apply_update(update
);
4009 else if ((strncmp(buf
, "reversed", len
) == 0 ||
4010 strncmp(buf
, "reversed\n", len
) == 0) &&
4011 update
->stage
== STAGE_APPLIED
)
4012 update
->abort_cause
= reverse_patches(update
);
4013 else if ((strncmp(buf
, "cleanup", len
) == 0 ||
4014 strncmp(buf
, "cleanup\n", len
) == 0) &&
4015 update
->stage
== STAGE_REVERSED
)
4016 kthread_run(maybe_cleanup_ksplice_update_wrapper
, update
,
4017 "ksplice_cleanup_%s", update
->kid
);
4019 if (old_stage
!= STAGE_REVERSED
&& update
->abort_cause
== OK
)
4020 printk(KERN_INFO
"ksplice: Update %s %s successfully\n",
4022 update
->stage
== STAGE_APPLIED
? "applied" : "reversed");
4023 mutex_unlock(&module_mutex
);
4027 static ssize_t
debug_show(struct update
*update
, char *buf
)
4029 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->debug
);
4032 static ssize_t
debug_store(struct update
*update
, const char *buf
, size_t len
)
4035 int ret
= strict_strtoul(buf
, 10, &l
);
4042 static ssize_t
partial_show(struct update
*update
, char *buf
)
4044 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->partial
);
4047 static ssize_t
partial_store(struct update
*update
, const char *buf
, size_t len
)
4050 int ret
= strict_strtoul(buf
, 10, &l
);
4053 update
->partial
= l
;
4057 static struct ksplice_attribute stage_attribute
=
4058 __ATTR(stage
, 0600, stage_show
, stage_store
);
4059 static struct ksplice_attribute abort_cause_attribute
=
4060 __ATTR(abort_cause
, 0400, abort_cause_show
, NULL
);
4061 static struct ksplice_attribute debug_attribute
=
4062 __ATTR(debug
, 0600, debug_show
, debug_store
);
4063 static struct ksplice_attribute partial_attribute
=
4064 __ATTR(partial
, 0600, partial_show
, partial_store
);
4065 static struct ksplice_attribute conflict_attribute
=
4066 __ATTR(conflicts
, 0400, conflict_show
, NULL
);
4068 static struct attribute
*ksplice_attrs
[] = {
4069 &stage_attribute
.attr
,
4070 &abort_cause_attribute
.attr
,
4071 &debug_attribute
.attr
,
4072 &partial_attribute
.attr
,
4073 &conflict_attribute
.attr
,
4077 static struct kobj_type ksplice_ktype
= {
4078 .sysfs_ops
= &ksplice_sysfs_ops
,
4079 .release
= ksplice_release
,
4080 .default_attrs
= ksplice_attrs
,
4083 #ifdef KSPLICE_STANDALONE
4085 module_param(debug
, int, 0600);
4086 MODULE_PARM_DESC(debug
, "Debug level");
4088 extern struct ksplice_system_map ksplice_system_map
[], ksplice_system_map_end
[];
4090 static struct ksplice_pack bootstrap_pack
= {
4091 .name
= "ksplice_" __stringify(KSPLICE_KID
),
4092 .kid
= "init_" __stringify(KSPLICE_KID
),
4093 .target_name
= NULL
,
4095 .map_printk
= MAP_PRINTK
,
4096 .primary
= THIS_MODULE
,
4097 .primary_system_map
= ksplice_system_map
,
4098 .primary_system_map_end
= ksplice_system_map_end
,
4100 #endif /* KSPLICE_STANDALONE */
4102 static int init_ksplice(void)
4104 #ifdef KSPLICE_STANDALONE
4105 struct ksplice_pack
*pack
= &bootstrap_pack
;
4106 pack
->update
= init_ksplice_update(pack
->kid
);
4107 sort(pack
->primary_system_map
,
4108 pack
->primary_system_map_end
- pack
->primary_system_map
,
4109 sizeof(struct ksplice_system_map
), compare_system_map
, NULL
);
4110 if (pack
->update
== NULL
)
4112 add_to_update(pack
, pack
->update
);
4113 pack
->update
->debug
= debug
;
4114 pack
->update
->abort_cause
=
4115 apply_relocs(pack
, ksplice_init_relocs
, ksplice_init_relocs_end
);
4116 if (pack
->update
->abort_cause
== OK
)
4117 bootstrapped
= true;
4118 cleanup_ksplice_update(bootstrap_pack
.update
);
4119 #else /* !KSPLICE_STANDALONE */
4120 ksplice_kobj
= kobject_create_and_add("ksplice", kernel_kobj
);
4121 if (ksplice_kobj
== NULL
)
4123 #endif /* KSPLICE_STANDALONE */
4127 static void cleanup_ksplice(void)
4129 #ifndef KSPLICE_STANDALONE
4130 kobject_put(ksplice_kobj
);
4131 #endif /* KSPLICE_STANDALONE */
4134 module_init(init_ksplice
);
4135 module_exit(cleanup_ksplice
);
4137 MODULE_AUTHOR("Ksplice, Inc.");
4138 MODULE_DESCRIPTION("Ksplice rebootless update system");
4139 #ifdef KSPLICE_VERSION
4140 MODULE_VERSION(KSPLICE_VERSION
);
4142 MODULE_LICENSE("GPL v2");