1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
52 #include <linux/utsrelease.h>
53 #else /* LINUX_VERSION_CODE < */
54 /* 63104eec234bdecb55fd9c15467ae00d0a3f42ac was after 2.6.17 */
55 #endif /* LINUX_VERSION_CODE */
56 #include <linux/vmalloc.h>
57 #ifdef KSPLICE_STANDALONE
59 #else /* !KSPLICE_STANDALONE */
60 #include <linux/ksplice.h>
61 #endif /* KSPLICE_STANDALONE */
62 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
63 #include <asm/alternative.h>
64 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
66 #if defined(KSPLICE_STANDALONE) && \
67 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
68 #define KSPLICE_NO_KERNEL_SUPPORT 1
69 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
72 STAGE_PREPARING
, /* the update is not yet applied */
73 STAGE_APPLIED
, /* the update is applied */
74 STAGE_REVERSED
, /* the update has been applied and reversed */
77 /* parameter to modify run-pre matching */
79 RUN_PRE_INITIAL
, /* dry run (only change temp_labelvals) */
80 RUN_PRE_DEBUG
, /* dry run with byte-by-byte debugging */
81 RUN_PRE_FINAL
, /* finalizes the matching */
82 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
84 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
87 enum { NOVAL
, TEMP
, VAL
};
89 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
90 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
92 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
93 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
94 #define __bitwise__ __bitwise
97 typedef int __bitwise__ abort_t
;
99 #define OK ((__force abort_t) 0)
100 #define NO_MATCH ((__force abort_t) 1)
101 #define CODE_BUSY ((__force abort_t) 2)
102 #define MODULE_BUSY ((__force abort_t) 3)
103 #define OUT_OF_MEMORY ((__force abort_t) 4)
104 #define FAILED_TO_FIND ((__force abort_t) 5)
105 #define ALREADY_REVERSED ((__force abort_t) 6)
106 #define MISSING_EXPORT ((__force abort_t) 7)
107 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
108 #define UNEXPECTED ((__force abort_t) 9)
109 #define TARGET_NOT_LOADED ((__force abort_t) 10)
110 #define CALL_FAILED ((__force abort_t) 11)
111 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
112 #ifdef KSPLICE_STANDALONE
113 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
114 #endif /* KSPLICE_STANDALONE */
123 #ifdef CONFIG_DEBUG_FS
124 struct debugfs_blob_wrapper debug_blob
;
125 struct dentry
*debugfs_dentry
;
126 #else /* !CONFIG_DEBUG_FS */
127 bool debug_continue_line
;
128 #endif /* CONFIG_DEBUG_FS */
129 bool partial
; /* is it OK if some target mods aren't loaded */
130 struct list_head changes
, /* changes for loaded target mods */
131 unused_changes
; /* changes for non-loaded target mods */
132 struct list_head conflicts
;
133 struct list_head list
;
134 struct list_head ksplice_module_list
;
137 /* a process conflicting with an update */
139 const char *process_name
;
141 struct list_head stack
;
142 struct list_head list
;
145 /* an address on the stack of a conflict */
146 struct conflict_addr
{
147 unsigned long addr
; /* the address on the stack */
148 bool has_conflict
; /* does this address in particular conflict? */
149 const char *label
; /* the label of the conflicting safety_record */
150 struct list_head list
;
153 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
154 /* Old kernels don't have debugfs_create_blob */
155 struct debugfs_blob_wrapper
{
159 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
162 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
163 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
167 struct list_head list
;
168 struct ksplice_symbol
*symbol
;
169 struct list_head
*saved_vals
;
172 /* region to be checked for conflicts in the stack check */
173 struct safety_record
{
174 struct list_head list
;
176 unsigned long addr
; /* the address to be checked for conflicts
177 * (e.g. an obsolete function's starting addr)
179 unsigned long size
; /* the size of the region to be checked */
182 /* possible value for a symbol */
183 struct candidate_val
{
184 struct list_head list
;
188 /* private struct used by init_symbol_array */
189 struct ksplice_lookup
{
191 struct ksplice_mod_change
*change
;
192 struct ksplice_symbol
**arr
;
198 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
199 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
201 const struct kernel_symbol
*start
, *stop
;
202 const unsigned long *crcs
;
210 #endif /* LINUX_VERSION_CODE */
212 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
213 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
215 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
216 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
217 static bool virtual_address_mapped(unsigned long addr
)
220 return probe_kernel_address(addr
, retval
) != -EFAULT
;
222 #else /* LINUX_VERSION_CODE < */
223 static bool virtual_address_mapped(unsigned long addr
);
224 #endif /* LINUX_VERSION_CODE */
226 static long probe_kernel_read(void *dst
, void *src
, size_t size
)
230 if (!virtual_address_mapped((unsigned long)src
) ||
231 !virtual_address_mapped((unsigned long)src
+ size
- 1))
234 memcpy(dst
, src
, size
);
237 #endif /* LINUX_VERSION_CODE */
239 static LIST_HEAD(updates
);
240 #ifdef KSPLICE_STANDALONE
241 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
242 extern struct list_head ksplice_modules
;
243 #else /* !CONFIG_KSPLICE */
244 LIST_HEAD(ksplice_modules
);
245 #endif /* CONFIG_KSPLICE */
246 #else /* !KSPLICE_STANDALONE */
247 LIST_HEAD(ksplice_modules
);
248 EXPORT_SYMBOL_GPL(ksplice_modules
);
249 static struct kobject
*ksplice_kobj
;
250 #endif /* KSPLICE_STANDALONE */
252 static struct kobj_type update_ktype
;
254 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
255 /* Old kernels do not have kcalloc
256 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
258 static void *kcalloc(size_t n
, size_t size
, typeof(GFP_KERNEL
) flags
)
261 if (n
!= 0 && size
> ULONG_MAX
/ n
)
263 mem
= kmalloc(n
* size
, flags
);
265 memset(mem
, 0, n
* size
);
268 #endif /* LINUX_VERSION_CODE */
270 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
271 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
272 static void u32_swap(void *a
, void *b
, int size
)
275 *(u32
*)a
= *(u32
*)b
;
279 static void generic_swap(void *a
, void *b
, int size
)
285 *(char *)a
++ = *(char *)b
;
287 } while (--size
> 0);
291 * sort - sort an array of elements
292 * @base: pointer to data to sort
293 * @num: number of elements
294 * @size: size of each element
295 * @cmp: pointer to comparison function
296 * @swap: pointer to swap function or NULL
298 * This function does a heapsort on the given array. You may provide a
299 * swap function optimized to your element type.
301 * Sorting time is O(n log n) both on average and worst-case. While
302 * qsort is about 20% faster on average, it suffers from exploitable
303 * O(n*n) worst-case behavior and extra memory requirements that make
304 * it less suitable for kernel use.
307 void sort(void *base
, size_t num
, size_t size
,
308 int (*cmp
)(const void *, const void *),
309 void (*swap
)(void *, void *, int size
))
311 /* pre-scale counters for performance */
312 int i
= (num
/ 2 - 1) * size
, n
= num
* size
, c
, r
;
315 swap
= (size
== 4 ? u32_swap
: generic_swap
);
318 for (; i
>= 0; i
-= size
) {
319 for (r
= i
; r
* 2 + size
< n
; r
= c
) {
321 if (c
< n
- size
&& cmp(base
+ c
, base
+ c
+ size
) < 0)
323 if (cmp(base
+ r
, base
+ c
) >= 0)
325 swap(base
+ r
, base
+ c
, size
);
330 for (i
= n
- size
; i
> 0; i
-= size
) {
331 swap(base
, base
+ i
, size
);
332 for (r
= 0; r
* 2 + size
< i
; r
= c
) {
334 if (c
< i
- size
&& cmp(base
+ c
, base
+ c
+ size
) < 0)
336 if (cmp(base
+ r
, base
+ c
) >= 0)
338 swap(base
+ r
, base
+ c
, size
);
342 #endif /* LINUX_VERSION_CODE < */
344 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
345 /* Old kernels do not have kstrdup
346 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was after 2.6.12
348 #define kstrdup ksplice_kstrdup
349 static char *kstrdup(const char *s
, typeof(GFP_KERNEL
) gfp
)
358 buf
= kmalloc(len
, gfp
);
363 #endif /* LINUX_VERSION_CODE */
365 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
366 /* Old kernels use semaphore instead of mutex
367 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
369 #define mutex semaphore
370 #define mutex_lock down
371 #define mutex_unlock up
372 #endif /* LINUX_VERSION_CODE */
374 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
375 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
376 static char * __attribute_used__
377 kvasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, va_list ap
)
384 len
= vsnprintf(dummy
, 0, fmt
, aq
);
387 p
= kmalloc(len
+ 1, gfp
);
391 vsnprintf(p
, len
+ 1, fmt
, ap
);
397 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
398 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
399 static char * __attribute__((format (printf
, 2, 3)))
400 kasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, ...)
406 p
= kvasprintf(gfp
, fmt
, ap
);
413 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
414 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
415 static int strict_strtoul(const char *cp
, unsigned int base
, unsigned long *res
)
426 val
= simple_strtoul(cp
, &tail
, base
);
427 if ((*tail
== '\0') ||
428 ((len
== (size_t)(tail
- cp
) + 1) && (*tail
== '\n'))) {
437 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
438 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
439 /* Assume cpus == NULL. */
440 #define stop_machine(fn, data, cpus) stop_machine_run(fn, data, NR_CPUS);
441 #endif /* LINUX_VERSION_CODE */
443 #ifndef task_thread_info
444 #define task_thread_info(task) (task)->thread_info
445 #endif /* !task_thread_info */
447 #ifdef KSPLICE_STANDALONE
449 #ifdef do_each_thread_ve /* OpenVZ kernels define this */
450 #define do_each_thread do_each_thread_all
451 #define while_each_thread while_each_thread_all
454 static bool bootstrapped
= false;
456 /* defined by ksplice-create */
457 extern const struct ksplice_reloc ksplice_init_relocs
[],
458 ksplice_init_relocs_end
[];
460 #endif /* KSPLICE_STANDALONE */
462 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
463 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
464 extern struct list_head modules
;
465 extern struct mutex module_mutex
;
466 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
467 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
468 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
469 #endif /* LINUX_VERSION_CODE */
470 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
471 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
472 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
473 #endif /* LINUX_VERSION_CODE */
474 extern const struct kernel_symbol __start___ksymtab
[];
475 extern const struct kernel_symbol __stop___ksymtab
[];
476 extern const unsigned long __start___kcrctab
[];
477 extern const struct kernel_symbol __start___ksymtab_gpl
[];
478 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
479 extern const unsigned long __start___kcrctab_gpl
[];
480 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
481 extern const struct kernel_symbol __start___ksymtab_unused
[];
482 extern const struct kernel_symbol __stop___ksymtab_unused
[];
483 extern const unsigned long __start___kcrctab_unused
[];
484 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
485 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
486 extern const unsigned long __start___kcrctab_unused_gpl
[];
487 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
488 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
489 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
490 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
491 extern const unsigned long __start___kcrctab_gpl_future
[];
492 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
493 #endif /* LINUX_VERSION_CODE */
495 static struct update
*init_ksplice_update(const char *kid
);
496 static void cleanup_ksplice_update(struct update
*update
);
497 static void maybe_cleanup_ksplice_update(struct update
*update
);
498 static void add_to_update(struct ksplice_mod_change
*change
,
499 struct update
*update
);
500 static int ksplice_sysfs_init(struct update
*update
);
502 /* Preparing the relocations and patches for application */
503 static abort_t
apply_update(struct update
*update
);
504 static abort_t
reverse_update(struct update
*update
);
505 static abort_t
prepare_change(struct ksplice_mod_change
*change
);
506 static abort_t
finalize_change(struct ksplice_mod_change
*change
);
507 static abort_t
finalize_patches(struct ksplice_mod_change
*change
);
508 static abort_t
add_dependency_on_address(struct ksplice_mod_change
*change
,
510 static abort_t
map_trampoline_pages(struct update
*update
);
511 static void unmap_trampoline_pages(struct update
*update
);
512 static void *map_writable(void *addr
, size_t len
);
513 static abort_t
apply_relocs(struct ksplice_mod_change
*change
,
514 const struct ksplice_reloc
*relocs
,
515 const struct ksplice_reloc
*relocs_end
);
516 static abort_t
apply_reloc(struct ksplice_mod_change
*change
,
517 const struct ksplice_reloc
*r
);
518 static abort_t
apply_howto_reloc(struct ksplice_mod_change
*change
,
519 const struct ksplice_reloc
*r
);
520 static abort_t
apply_howto_date(struct ksplice_mod_change
*change
,
521 const struct ksplice_reloc
*r
);
522 static abort_t
read_reloc_value(struct ksplice_mod_change
*change
,
523 const struct ksplice_reloc
*r
,
524 unsigned long addr
, unsigned long *valp
);
525 static abort_t
write_reloc_value(struct ksplice_mod_change
*change
,
526 const struct ksplice_reloc
*r
,
527 unsigned long addr
, unsigned long sym_addr
);
528 static abort_t
create_module_list_entry(struct ksplice_mod_change
*change
,
530 static void cleanup_module_list_entries(struct update
*update
);
531 static void __attribute__((noreturn
)) ksplice_deleted(void);
533 /* run-pre matching */
534 static abort_t
match_change_sections(struct ksplice_mod_change
*change
,
535 bool consider_data_sections
);
536 static abort_t
find_section(struct ksplice_mod_change
*change
,
537 struct ksplice_section
*sect
);
538 static abort_t
try_addr(struct ksplice_mod_change
*change
,
539 struct ksplice_section
*sect
,
540 unsigned long run_addr
,
541 struct list_head
*safety_records
,
542 enum run_pre_mode mode
);
543 static abort_t
run_pre_cmp(struct ksplice_mod_change
*change
,
544 const struct ksplice_section
*sect
,
545 unsigned long run_addr
,
546 struct list_head
*safety_records
,
547 enum run_pre_mode mode
);
548 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
549 /* defined in arch/ARCH/kernel/ksplice-arch.c */
550 static abort_t
arch_run_pre_cmp(struct ksplice_mod_change
*change
,
551 struct ksplice_section
*sect
,
552 unsigned long run_addr
,
553 struct list_head
*safety_records
,
554 enum run_pre_mode mode
);
555 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
556 static void print_bytes(struct ksplice_mod_change
*change
,
557 const unsigned char *run
, int runc
,
558 const unsigned char *pre
, int prec
);
559 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
560 static abort_t
brute_search(struct ksplice_mod_change
*change
,
561 struct ksplice_section
*sect
,
562 const void *start
, unsigned long len
,
563 struct list_head
*vals
);
564 static abort_t
brute_search_all(struct ksplice_mod_change
*change
,
565 struct ksplice_section
*sect
,
566 struct list_head
*vals
);
567 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
568 static const struct ksplice_reloc
*
569 init_reloc_search(struct ksplice_mod_change
*change
,
570 const struct ksplice_section
*sect
);
571 static const struct ksplice_reloc
*find_reloc(const struct ksplice_reloc
*start
,
572 const struct ksplice_reloc
*end
,
573 unsigned long address
,
575 static abort_t
lookup_reloc(struct ksplice_mod_change
*change
,
576 const struct ksplice_reloc
**fingerp
,
578 const struct ksplice_reloc
**relocp
);
579 static abort_t
handle_reloc(struct ksplice_mod_change
*change
,
580 const struct ksplice_section
*sect
,
581 const struct ksplice_reloc
*r
,
582 unsigned long run_addr
, enum run_pre_mode mode
);
583 static abort_t
handle_howto_date(struct ksplice_mod_change
*change
,
584 const struct ksplice_section
*sect
,
585 const struct ksplice_reloc
*r
,
586 unsigned long run_addr
,
587 enum run_pre_mode mode
);
588 static abort_t
handle_howto_reloc(struct ksplice_mod_change
*change
,
589 const struct ksplice_section
*sect
,
590 const struct ksplice_reloc
*r
,
591 unsigned long run_addr
,
592 enum run_pre_mode mode
);
593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
595 static abort_t
handle_bug(struct ksplice_mod_change
*change
,
596 const struct ksplice_reloc
*r
,
597 unsigned long run_addr
);
598 #endif /* CONFIG_BUG */
599 #else /* LINUX_VERSION_CODE < */
600 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
601 #endif /* LINUX_VERSION_CODE */
602 static abort_t
handle_extable(struct ksplice_mod_change
*change
,
603 const struct ksplice_reloc
*r
,
604 unsigned long run_addr
);
605 static struct ksplice_section
*symbol_section(struct ksplice_mod_change
*change
,
606 const struct ksplice_symbol
*sym
);
607 static int compare_section_labels(const void *va
, const void *vb
);
608 static int symbol_section_bsearch_compare(const void *a
, const void *b
);
609 static const struct ksplice_reloc
*
610 patch_reloc(struct ksplice_mod_change
*change
,
611 const struct ksplice_patch
*p
);
613 /* Computing possible addresses for symbols */
614 static abort_t
lookup_symbol(struct ksplice_mod_change
*change
,
615 const struct ksplice_symbol
*ksym
,
616 struct list_head
*vals
);
617 static void cleanup_symbol_arrays(struct ksplice_mod_change
*change
);
618 static abort_t
init_symbol_arrays(struct ksplice_mod_change
*change
);
619 static abort_t
init_symbol_array(struct ksplice_mod_change
*change
,
620 struct ksplice_symbol
*start
,
621 struct ksplice_symbol
*end
);
622 static abort_t
uniquify_symbols(struct ksplice_mod_change
*change
);
623 static abort_t
add_matching_values(struct ksplice_lookup
*lookup
,
624 const char *sym_name
, unsigned long sym_val
);
625 static bool add_export_values(const struct symsearch
*syms
,
626 struct module
*owner
,
627 unsigned int symnum
, void *data
);
628 static int symbolp_bsearch_compare(const void *key
, const void *elt
);
629 static int compare_symbolp_names(const void *a
, const void *b
);
630 static int compare_symbolp_labels(const void *a
, const void *b
);
631 #ifdef CONFIG_KALLSYMS
632 static int add_kallsyms_values(void *data
, const char *name
,
633 struct module
*owner
, unsigned long val
);
634 #endif /* CONFIG_KALLSYMS */
635 #ifdef KSPLICE_STANDALONE
637 add_system_map_candidates(struct ksplice_mod_change
*change
,
638 const struct ksplice_system_map
*start
,
639 const struct ksplice_system_map
*end
,
640 const char *label
, struct list_head
*vals
);
641 static int compare_system_map(const void *a
, const void *b
);
642 static int system_map_bsearch_compare(const void *key
, const void *elt
);
643 #endif /* KSPLICE_STANDALONE */
644 static abort_t
new_export_lookup(struct ksplice_mod_change
*ichange
,
645 const char *name
, struct list_head
*vals
);
647 /* Atomic update trampoline insertion and removal */
648 static abort_t
patch_action(struct update
*update
, enum ksplice_action action
);
649 static int __apply_patches(void *update
);
650 static int __reverse_patches(void *update
);
651 static abort_t
check_each_task(struct update
*update
);
652 static abort_t
check_task(struct update
*update
,
653 const struct task_struct
*t
, bool rerun
);
654 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
655 const struct thread_info
*tinfo
,
656 const unsigned long *stack
);
657 static abort_t
check_address(struct update
*update
,
658 struct conflict
*conf
, unsigned long addr
);
659 static abort_t
check_record(struct conflict_addr
*ca
,
660 const struct safety_record
*rec
,
662 static bool is_stop_machine(const struct task_struct
*t
);
663 static void cleanup_conflicts(struct update
*update
);
664 static void print_conflicts(struct update
*update
);
665 static void insert_trampoline(struct ksplice_patch
*p
);
666 static abort_t
verify_trampoline(struct ksplice_mod_change
*change
,
667 const struct ksplice_patch
*p
);
668 static void remove_trampoline(const struct ksplice_patch
*p
);
670 static abort_t
create_labelval(struct ksplice_mod_change
*change
,
671 struct ksplice_symbol
*ksym
,
672 unsigned long val
, int status
);
673 static abort_t
create_safety_record(struct ksplice_mod_change
*change
,
674 const struct ksplice_section
*sect
,
675 struct list_head
*record_list
,
676 unsigned long run_addr
,
677 unsigned long run_size
);
678 static abort_t
add_candidate_val(struct ksplice_mod_change
*change
,
679 struct list_head
*vals
, unsigned long val
);
680 static void release_vals(struct list_head
*vals
);
681 static void set_temp_labelvals(struct ksplice_mod_change
*change
, int status
);
683 static int contains_canary(struct ksplice_mod_change
*change
,
684 unsigned long blank_addr
,
685 const struct ksplice_reloc_howto
*howto
);
686 static unsigned long follow_trampolines(struct ksplice_mod_change
*change
,
688 static bool patches_module(const struct module
*a
, const struct module
*b
);
689 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
690 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
691 static bool strstarts(const char *str
, const char *prefix
);
692 #endif /* LINUX_VERSION_CODE */
693 static bool singular(struct list_head
*list
);
694 static void *bsearch(const void *key
, const void *base
, size_t n
,
695 size_t size
, int (*cmp
)(const void *key
, const void *elt
));
696 static int compare_relocs(const void *a
, const void *b
);
697 static int reloc_bsearch_compare(const void *key
, const void *elt
);
700 static abort_t
init_debug_buf(struct update
*update
);
701 static void clear_debug_buf(struct update
*update
);
702 static int __attribute__((format(printf
, 2, 3)))
703 _ksdebug(struct update
*update
, const char *fmt
, ...);
704 #define ksdebug(change, fmt, ...) \
705 _ksdebug(change->update, fmt, ## __VA_ARGS__)
707 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
708 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
709 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
710 struct module
*, unsigned long),
712 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
713 static unsigned int kallsyms_expand_symbol(unsigned int off
, char *result
);
714 #endif /* LINUX_VERSION_CODE */
715 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
719 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
721 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
722 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
723 static struct module
*find_module(const char *name
);
724 static int use_module(struct module
*a
, struct module
*b
);
725 static const struct kernel_symbol
*find_symbol(const char *name
,
726 struct module
**owner
,
727 const unsigned long **crc
,
728 bool gplok
, bool warn
);
729 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
730 struct module
*owner
,
731 unsigned int symnum
, void *data
),
733 static struct module
*__module_address(unsigned long addr
);
734 #endif /* LINUX_VERSION_CODE */
736 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
738 /* Prepare a trampoline for the given patch */
739 static abort_t
prepare_trampoline(struct ksplice_mod_change
*change
,
740 struct ksplice_patch
*p
);
741 /* What address does the trampoline at addr jump to? */
742 static abort_t
trampoline_target(struct ksplice_mod_change
*change
,
743 unsigned long addr
, unsigned long *new_addr
);
744 /* Hook to handle pc-relative jumps inserted by parainstructions */
745 static abort_t
handle_paravirt(struct ksplice_mod_change
*change
,
746 unsigned long pre
, unsigned long run
,
748 /* Is address p on the stack of the given thread? */
749 static bool valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
);
751 #ifndef KSPLICE_STANDALONE
752 #include "ksplice-arch.c"
753 #elif defined CONFIG_X86
754 #include "x86/ksplice-arch.c"
755 #elif defined CONFIG_ARM
756 #include "arm/ksplice-arch.c"
757 #endif /* KSPLICE_STANDALONE */
759 #define clear_list(head, type, member) \
761 struct list_head *_pos, *_n; \
762 list_for_each_safe(_pos, _n, head) { \
764 kfree(list_entry(_pos, type, member)); \
769 * init_ksplice_mod_change() - Initializes a ksplice change
770 * @change: The change to be initialized. All of the public fields of the
771 * change and its associated data structures should be populated
772 * before this function is called. The values of the private
773 * fields will be ignored.
775 int init_ksplice_mod_change(struct ksplice_mod_change
*change
)
777 struct update
*update
;
778 struct ksplice_patch
*p
;
779 struct ksplice_section
*s
;
782 #ifdef KSPLICE_STANDALONE
785 #endif /* KSPLICE_STANDALONE */
787 INIT_LIST_HEAD(&change
->temp_labelvals
);
788 INIT_LIST_HEAD(&change
->safety_records
);
790 sort(change
->old_code
.relocs
,
791 change
->old_code
.relocs_end
- change
->old_code
.relocs
,
792 sizeof(*change
->old_code
.relocs
), compare_relocs
, NULL
);
793 sort(change
->new_code
.relocs
,
794 change
->new_code
.relocs_end
- change
->new_code
.relocs
,
795 sizeof(*change
->new_code
.relocs
), compare_relocs
, NULL
);
796 sort(change
->old_code
.sections
,
797 change
->old_code
.sections_end
- change
->old_code
.sections
,
798 sizeof(*change
->old_code
.sections
), compare_section_labels
, NULL
);
799 #ifdef KSPLICE_STANDALONE
800 sort(change
->new_code
.system_map
,
801 change
->new_code
.system_map_end
- change
->new_code
.system_map
,
802 sizeof(*change
->new_code
.system_map
), compare_system_map
, NULL
);
803 sort(change
->old_code
.system_map
,
804 change
->old_code
.system_map_end
- change
->old_code
.system_map
,
805 sizeof(*change
->old_code
.system_map
), compare_system_map
, NULL
);
806 #endif /* KSPLICE_STANDALONE */
808 for (p
= change
->patches
; p
< change
->patches_end
; p
++)
810 for (s
= change
->old_code
.sections
; s
< change
->old_code
.sections_end
;
813 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
814 const struct ksplice_reloc
*r
= patch_reloc(change
, p
);
817 if (p
->type
== KSPLICE_PATCH_DATA
) {
818 s
= symbol_section(change
, r
->symbol
);
821 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
822 * to modify rodata sections that have been explicitly
823 * marked for patching using the ksplice-patch.h macro
824 * ksplice_assume_rodata. Here we modify the section
825 * flags appropriately.
827 if (s
->flags
& KSPLICE_SECTION_DATA
)
828 s
->flags
= (s
->flags
& ~KSPLICE_SECTION_DATA
) |
829 KSPLICE_SECTION_RODATA
;
833 mutex_lock(&module_mutex
);
834 list_for_each_entry(update
, &updates
, list
) {
835 if (strcmp(change
->kid
, update
->kid
) == 0) {
836 if (update
->stage
!= STAGE_PREPARING
) {
840 add_to_update(change
, update
);
845 update
= init_ksplice_update(change
->kid
);
846 if (update
== NULL
) {
850 ret
= ksplice_sysfs_init(update
);
852 cleanup_ksplice_update(update
);
855 add_to_update(change
, update
);
857 mutex_unlock(&module_mutex
);
860 EXPORT_SYMBOL_GPL(init_ksplice_mod_change
);
863 * cleanup_ksplice_mod_change() - Cleans up a change if appropriate
864 * @change: The change to be cleaned up
866 * cleanup_ksplice_mod_change is currently called twice for each
867 * Ksplice update; once when the old_code module is unloaded, and once
868 * when the new_code module is unloaded. The extra call is used to
869 * avoid leaks if you unload the old_code without applying the update.
871 void cleanup_ksplice_mod_change(struct ksplice_mod_change
*change
)
873 if (change
->update
== NULL
)
876 mutex_lock(&module_mutex
);
877 if (change
->update
->stage
== STAGE_APPLIED
) {
878 /* If the change wasn't actually applied (because we
879 * only applied this update to loaded modules and this
880 * target was not loaded), then unregister the change
881 * from the list of unused changes.
883 struct ksplice_mod_change
*c
;
886 list_for_each_entry(c
, &change
->update
->unused_changes
, list
) {
891 list_del(&change
->list
);
892 mutex_unlock(&module_mutex
);
895 list_del(&change
->list
);
896 if (change
->update
->stage
== STAGE_PREPARING
)
897 maybe_cleanup_ksplice_update(change
->update
);
898 change
->update
= NULL
;
899 mutex_unlock(&module_mutex
);
901 EXPORT_SYMBOL_GPL(cleanup_ksplice_mod_change
);
903 static struct update
*init_ksplice_update(const char *kid
)
905 struct update
*update
;
906 update
= kcalloc(1, sizeof(struct update
), GFP_KERNEL
);
909 update
->name
= kasprintf(GFP_KERNEL
, "ksplice_%s", kid
);
910 if (update
->name
== NULL
) {
914 update
->kid
= kstrdup(kid
, GFP_KERNEL
);
915 if (update
->kid
== NULL
) {
920 if (try_module_get(THIS_MODULE
) != 1) {
926 INIT_LIST_HEAD(&update
->changes
);
927 INIT_LIST_HEAD(&update
->unused_changes
);
928 INIT_LIST_HEAD(&update
->ksplice_module_list
);
929 if (init_debug_buf(update
) != OK
) {
930 module_put(THIS_MODULE
);
936 list_add(&update
->list
, &updates
);
937 update
->stage
= STAGE_PREPARING
;
938 update
->abort_cause
= OK
;
940 INIT_LIST_HEAD(&update
->conflicts
);
944 static void cleanup_ksplice_update(struct update
*update
)
946 list_del(&update
->list
);
947 cleanup_conflicts(update
);
948 clear_debug_buf(update
);
949 cleanup_module_list_entries(update
);
953 module_put(THIS_MODULE
);
956 /* Clean up the update if it no longer has any changes */
957 static void maybe_cleanup_ksplice_update(struct update
*update
)
959 if (list_empty(&update
->changes
) && list_empty(&update
->unused_changes
))
960 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
961 kobject_put(&update
->kobj
);
962 #else /* LINUX_VERSION_CODE < */
963 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
964 kobject_unregister(&update
->kobj
);
965 #endif /* LINUX_VERSION_CODE */
968 static void add_to_update(struct ksplice_mod_change
*change
,
969 struct update
*update
)
971 change
->update
= update
;
972 list_add(&change
->list
, &update
->unused_changes
);
975 static int ksplice_sysfs_init(struct update
*update
)
978 memset(&update
->kobj
, 0, sizeof(update
->kobj
));
979 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
980 #ifndef KSPLICE_STANDALONE
981 ret
= kobject_init_and_add(&update
->kobj
, &update_ktype
,
982 ksplice_kobj
, "%s", update
->kid
);
983 #else /* KSPLICE_STANDALONE */
984 ret
= kobject_init_and_add(&update
->kobj
, &update_ktype
,
985 &THIS_MODULE
->mkobj
.kobj
, "ksplice");
986 #endif /* KSPLICE_STANDALONE */
987 #else /* LINUX_VERSION_CODE < */
988 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
989 ret
= kobject_set_name(&update
->kobj
, "%s", "ksplice");
992 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
993 update
->kobj
.parent
= &THIS_MODULE
->mkobj
.kobj
;
994 #else /* LINUX_VERSION_CODE < */
995 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
996 update
->kobj
.parent
= &THIS_MODULE
->mkobj
->kobj
;
997 #endif /* LINUX_VERSION_CODE */
998 update
->kobj
.ktype
= &update_ktype
;
999 ret
= kobject_register(&update
->kobj
);
1000 #endif /* LINUX_VERSION_CODE */
1003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1004 kobject_uevent(&update
->kobj
, KOBJ_ADD
);
1005 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1006 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
1007 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
1008 kobject_uevent(&update
->kobj
, KOBJ_ADD
, NULL
);
1009 #endif /* LINUX_VERSION_CODE */
1013 static abort_t
apply_update(struct update
*update
)
1015 struct ksplice_mod_change
*change
, *n
;
1019 list_for_each_entry(change
, &update
->changes
, list
) {
1020 ret
= create_module_list_entry(change
, true);
1025 list_for_each_entry_safe(change
, n
, &update
->unused_changes
, list
) {
1026 if (strcmp(change
->target_name
, "vmlinux") == 0) {
1027 change
->target
= NULL
;
1028 } else if (change
->target
== NULL
) {
1029 change
->target
= find_module(change
->target_name
);
1030 if (change
->target
== NULL
||
1031 !module_is_live(change
->target
)) {
1032 if (!update
->partial
) {
1033 ret
= TARGET_NOT_LOADED
;
1036 ret
= create_module_list_entry(change
, false);
1041 retval
= use_module(change
->new_code_mod
,
1048 ret
= create_module_list_entry(change
, true);
1051 list_del(&change
->list
);
1052 list_add_tail(&change
->list
, &update
->changes
);
1054 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1055 if (change
->target
== NULL
) {
1056 apply_paravirt(change
->new_code
.parainstructions
,
1057 change
->new_code
.parainstructions_end
);
1058 apply_paravirt(change
->old_code
.parainstructions
,
1059 change
->old_code
.parainstructions_end
);
1061 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1064 list_for_each_entry(change
, &update
->changes
, list
) {
1065 const struct ksplice_section
*sect
;
1066 for (sect
= change
->new_code
.sections
;
1067 sect
< change
->new_code
.sections_end
; sect
++) {
1068 struct safety_record
*rec
= kmalloc(sizeof(*rec
),
1071 ret
= OUT_OF_MEMORY
;
1074 rec
->addr
= sect
->address
;
1075 rec
->size
= sect
->size
;
1076 rec
->label
= sect
->symbol
->label
;
1077 list_add(&rec
->list
, &change
->safety_records
);
1081 list_for_each_entry(change
, &update
->changes
, list
) {
1082 ret
= init_symbol_arrays(change
);
1084 cleanup_symbol_arrays(change
);
1087 ret
= prepare_change(change
);
1088 cleanup_symbol_arrays(change
);
1092 ret
= patch_action(update
, KS_APPLY
);
1094 list_for_each_entry(change
, &update
->changes
, list
) {
1095 struct ksplice_section
*s
;
1096 if (update
->stage
== STAGE_PREPARING
)
1097 clear_list(&change
->safety_records
,
1098 struct safety_record
, list
);
1099 for (s
= change
->old_code
.sections
;
1100 s
< change
->old_code
.sections_end
; s
++) {
1101 if (s
->match_map
!= NULL
) {
1102 vfree(s
->match_map
);
1103 s
->match_map
= NULL
;
1107 if (update
->stage
== STAGE_PREPARING
)
1108 cleanup_module_list_entries(update
);
1111 printk(KERN_INFO
"ksplice: Update %s applied successfully\n",
1116 static abort_t
reverse_update(struct update
*update
)
1119 struct ksplice_mod_change
*change
;
1121 clear_debug_buf(update
);
1122 ret
= init_debug_buf(update
);
1126 _ksdebug(update
, "Preparing to reverse %s\n", update
->kid
);
1128 ret
= patch_action(update
, KS_REVERSE
);
1132 list_for_each_entry(change
, &update
->changes
, list
)
1133 clear_list(&change
->safety_records
, struct safety_record
, list
);
1135 printk(KERN_INFO
"ksplice: Update %s reversed successfully\n",
1140 static int compare_symbolp_names(const void *a
, const void *b
)
1142 const struct ksplice_symbol
*const *sympa
= a
, *const *sympb
= b
;
1143 if ((*sympa
)->name
== NULL
&& (*sympb
)->name
== NULL
)
1145 if ((*sympa
)->name
== NULL
)
1147 if ((*sympb
)->name
== NULL
)
1149 return strcmp((*sympa
)->name
, (*sympb
)->name
);
1152 static int compare_symbolp_labels(const void *a
, const void *b
)
1154 const struct ksplice_symbol
*const *sympa
= a
, *const *sympb
= b
;
1155 return strcmp((*sympa
)->label
, (*sympb
)->label
);
1158 static int symbolp_bsearch_compare(const void *key
, const void *elt
)
1160 const char *name
= key
;
1161 const struct ksplice_symbol
*const *symp
= elt
;
1162 const struct ksplice_symbol
*sym
= *symp
;
1163 if (sym
->name
== NULL
)
1165 return strcmp(name
, sym
->name
);
1168 static abort_t
add_matching_values(struct ksplice_lookup
*lookup
,
1169 const char *sym_name
, unsigned long sym_val
)
1171 struct ksplice_symbol
**symp
;
1174 symp
= bsearch(sym_name
, lookup
->arr
, lookup
->size
,
1175 sizeof(*lookup
->arr
), symbolp_bsearch_compare
);
1179 while (symp
> lookup
->arr
&&
1180 symbolp_bsearch_compare(sym_name
, symp
- 1) == 0)
1183 for (; symp
< lookup
->arr
+ lookup
->size
; symp
++) {
1184 struct ksplice_symbol
*sym
= *symp
;
1185 if (sym
->name
== NULL
|| strcmp(sym_name
, sym
->name
) != 0)
1187 ret
= add_candidate_val(lookup
->change
,
1188 sym
->candidate_vals
, sym_val
);
1195 #ifdef CONFIG_KALLSYMS
1196 static int add_kallsyms_values(void *data
, const char *name
,
1197 struct module
*owner
, unsigned long val
)
1199 struct ksplice_lookup
*lookup
= data
;
1200 if (owner
== lookup
->change
->new_code_mod
||
1201 !patches_module(owner
, lookup
->change
->target
))
1202 return (__force
int)OK
;
1203 return (__force
int)add_matching_values(lookup
, name
, val
);
1205 #endif /* CONFIG_KALLSYMS */
1207 static bool add_export_values(const struct symsearch
*syms
,
1208 struct module
*owner
,
1209 unsigned int symnum
, void *data
)
1211 struct ksplice_lookup
*lookup
= data
;
1214 ret
= add_matching_values(lookup
, syms
->start
[symnum
].name
,
1215 syms
->start
[symnum
].value
);
1223 static void cleanup_symbol_arrays(struct ksplice_mod_change
*change
)
1225 struct ksplice_symbol
*sym
;
1226 for (sym
= change
->new_code
.symbols
; sym
< change
->new_code
.symbols_end
;
1228 if (sym
->candidate_vals
!= NULL
) {
1229 clear_list(sym
->candidate_vals
, struct candidate_val
,
1231 kfree(sym
->candidate_vals
);
1232 sym
->candidate_vals
= NULL
;
1235 for (sym
= change
->old_code
.symbols
; sym
< change
->old_code
.symbols_end
;
1237 if (sym
->candidate_vals
!= NULL
) {
1238 clear_list(sym
->candidate_vals
, struct candidate_val
,
1240 kfree(sym
->candidate_vals
);
1241 sym
->candidate_vals
= NULL
;
1247 * The new_code and old_code modules each have their own independent
1248 * ksplice_symbol structures. uniquify_symbols unifies these separate
1249 * pieces of kernel symbol information by replacing all references to
1250 * the old_code copy of symbols with references to the new_code copy.
1252 static abort_t
uniquify_symbols(struct ksplice_mod_change
*change
)
1254 struct ksplice_reloc
*r
;
1255 struct ksplice_section
*s
;
1256 struct ksplice_symbol
*sym
, **sym_arr
, **symp
;
1257 size_t size
= change
->new_code
.symbols_end
- change
->new_code
.symbols
;
1262 sym_arr
= vmalloc(sizeof(*sym_arr
) * size
);
1263 if (sym_arr
== NULL
)
1264 return OUT_OF_MEMORY
;
1266 for (symp
= sym_arr
, sym
= change
->new_code
.symbols
;
1267 symp
< sym_arr
+ size
&& sym
< change
->new_code
.symbols_end
;
1271 sort(sym_arr
, size
, sizeof(*sym_arr
), compare_symbolp_labels
, NULL
);
1273 for (r
= change
->old_code
.relocs
; r
< change
->old_code
.relocs_end
;
1275 symp
= bsearch(&r
->symbol
, sym_arr
, size
, sizeof(*sym_arr
),
1276 compare_symbolp_labels
);
1278 if ((*symp
)->name
== NULL
)
1279 (*symp
)->name
= r
->symbol
->name
;
1284 for (s
= change
->old_code
.sections
; s
< change
->old_code
.sections_end
;
1286 symp
= bsearch(&s
->symbol
, sym_arr
, size
, sizeof(*sym_arr
),
1287 compare_symbolp_labels
);
1289 if ((*symp
)->name
== NULL
)
1290 (*symp
)->name
= s
->symbol
->name
;
1300 * Initialize the ksplice_symbol structures in the given array using
1301 * the kallsyms and exported symbol tables.
1303 static abort_t
init_symbol_array(struct ksplice_mod_change
*change
,
1304 struct ksplice_symbol
*start
,
1305 struct ksplice_symbol
*end
)
1307 struct ksplice_symbol
*sym
, **sym_arr
, **symp
;
1308 struct ksplice_lookup lookup
;
1309 size_t size
= end
- start
;
1315 for (sym
= start
; sym
< end
; sym
++) {
1316 if (strstarts(sym
->label
, "__ksymtab")) {
1317 const struct kernel_symbol
*ksym
;
1318 const char *colon
= strchr(sym
->label
, ':');
1319 const char *name
= colon
+ 1;
1322 ksym
= find_symbol(name
, NULL
, NULL
, true, false);
1324 ksdebug(change
, "Could not find kernel_symbol "
1325 "structure for %s\n", name
);
1328 sym
->value
= (unsigned long)ksym
;
1329 sym
->candidate_vals
= NULL
;
1333 sym
->candidate_vals
= kmalloc(sizeof(*sym
->candidate_vals
),
1335 if (sym
->candidate_vals
== NULL
)
1336 return OUT_OF_MEMORY
;
1337 INIT_LIST_HEAD(sym
->candidate_vals
);
1341 sym_arr
= vmalloc(sizeof(*sym_arr
) * size
);
1342 if (sym_arr
== NULL
)
1343 return OUT_OF_MEMORY
;
1345 for (symp
= sym_arr
, sym
= start
; symp
< sym_arr
+ size
&& sym
< end
;
1349 sort(sym_arr
, size
, sizeof(*sym_arr
), compare_symbolp_names
, NULL
);
1351 lookup
.change
= change
;
1352 lookup
.arr
= sym_arr
;
1356 each_symbol(add_export_values
, &lookup
);
1358 #ifdef CONFIG_KALLSYMS
1360 ret
= (__force abort_t
)
1361 kallsyms_on_each_symbol(add_kallsyms_values
, &lookup
);
1362 #endif /* CONFIG_KALLSYMS */
1367 /* Prepare the change's ksplice_symbol structures for run-pre matching */
1368 static abort_t
init_symbol_arrays(struct ksplice_mod_change
*change
)
1372 ret
= uniquify_symbols(change
);
1376 ret
= init_symbol_array(change
, change
->old_code
.symbols
,
1377 change
->old_code
.symbols_end
);
1381 ret
= init_symbol_array(change
, change
->new_code
.symbols
,
1382 change
->new_code
.symbols_end
);
1389 static abort_t
prepare_change(struct ksplice_mod_change
*change
)
1393 ksdebug(change
, "Preparing and checking %s\n", change
->name
);
1394 ret
= match_change_sections(change
, false);
1395 if (ret
== NO_MATCH
) {
1396 /* It is possible that by using relocations from .data sections
1397 * we can successfully run-pre match the rest of the sections.
1398 * To avoid using any symbols obtained from .data sections
1399 * (which may be unreliable) in the post code, we first prepare
1400 * the post code and then try to run-pre match the remaining
1401 * sections with the help of .data sections.
1403 ksdebug(change
, "Continuing without some sections; we might "
1404 "find them later.\n");
1405 ret
= finalize_change(change
);
1407 ksdebug(change
, "Aborted. Unable to continue without "
1408 "the unmatched sections.\n");
1412 ksdebug(change
, "run-pre: Considering .data sections to find "
1413 "the unmatched sections\n");
1414 ret
= match_change_sections(change
, true);
1418 ksdebug(change
, "run-pre: Found all previously unmatched "
1421 } else if (ret
!= OK
) {
1425 return finalize_change(change
);
1429 * Finish preparing the change for insertion into the kernel.
1430 * Afterwards, the replacement code should be ready to run and the
1431 * ksplice_patches should all be ready for trampoline insertion.
1433 static abort_t
finalize_change(struct ksplice_mod_change
*change
)
1436 ret
= apply_relocs(change
, change
->new_code
.relocs
,
1437 change
->new_code
.relocs_end
);
1441 ret
= finalize_patches(change
);
1448 static abort_t
finalize_patches(struct ksplice_mod_change
*change
)
1450 struct ksplice_patch
*p
;
1451 struct safety_record
*rec
;
1454 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
1456 list_for_each_entry(rec
, &change
->safety_records
, list
) {
1457 if (rec
->addr
<= p
->oldaddr
&&
1458 p
->oldaddr
< rec
->addr
+ rec
->size
) {
1463 if (!found
&& p
->type
!= KSPLICE_PATCH_EXPORT
) {
1464 const struct ksplice_reloc
*r
= patch_reloc(change
, p
);
1466 ksdebug(change
, "A patch with no reloc at its "
1467 "oldaddr has no safety record\n");
1470 ksdebug(change
, "No safety record for patch with "
1471 "oldaddr %s+%lx\n", r
->symbol
->label
,
1476 if (p
->type
== KSPLICE_PATCH_TEXT
) {
1477 ret
= prepare_trampoline(change
, p
);
1482 if (found
&& rec
->addr
+ rec
->size
< p
->oldaddr
+ p
->size
) {
1483 ksdebug(change
, "Safety record %s is too short for "
1484 "patch\n", rec
->label
);
1488 if (p
->type
== KSPLICE_PATCH_TEXT
) {
1489 if (p
->repladdr
== 0)
1490 p
->repladdr
= (unsigned long)ksplice_deleted
;
1496 /* noinline to prevent garbage on the stack from confusing check_stack */
1497 static noinline abort_t
map_trampoline_pages(struct update
*update
)
1499 struct ksplice_mod_change
*change
;
1500 list_for_each_entry(change
, &update
->changes
, list
) {
1501 struct ksplice_patch
*p
;
1502 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
1503 p
->vaddr
= map_writable((void *)p
->oldaddr
, p
->size
);
1504 if (p
->vaddr
== NULL
) {
1506 "Unable to map oldaddr read/write\n");
1507 unmap_trampoline_pages(update
);
1515 static void unmap_trampoline_pages(struct update
*update
)
1517 struct ksplice_mod_change
*change
;
1518 list_for_each_entry(change
, &update
->changes
, list
) {
1519 struct ksplice_patch
*p
;
1520 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
1521 vunmap((void *)((unsigned long)p
->vaddr
& PAGE_MASK
));
1528 * map_writable creates a shadow page mapping of the range
1529 * [addr, addr + len) so that we can write to code mapped read-only.
1531 * It is similar to a generalized version of x86's text_poke. But
1532 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1533 * map_writable to map the pages before stop_machine, then use the
1534 * mapping inside stop_machine, and unmap the pages afterwards.
1536 static void *map_writable(void *addr
, size_t len
)
1539 int nr_pages
= DIV_ROUND_UP(offset_in_page(addr
) + len
, PAGE_SIZE
);
1540 struct page
**pages
= kmalloc(nr_pages
* sizeof(*pages
), GFP_KERNEL
);
1541 void *page_addr
= (void *)((unsigned long)addr
& PAGE_MASK
);
1547 for (i
= 0; i
< nr_pages
; i
++) {
1548 if (__module_address((unsigned long)page_addr
) == NULL
) {
1549 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1550 pages
[i
] = virt_to_page(page_addr
);
1551 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1552 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1554 pfn_to_page(__pa_symbol(page_addr
) >> PAGE_SHIFT
);
1555 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1556 WARN_ON(!PageReserved(pages
[i
]));
1558 pages
[i
] = vmalloc_to_page(addr
);
1560 if (pages
[i
] == NULL
) {
1564 page_addr
+= PAGE_SIZE
;
1566 vaddr
= vmap(pages
, nr_pages
, VM_MAP
, PAGE_KERNEL
);
1570 return vaddr
+ offset_in_page(addr
);
1574 * Ksplice adds a dependency on any symbol address used to resolve
1575 * relocations in the new_code module.
1577 * Be careful to follow_trampolines so that we always depend on the
1578 * latest version of the target function, since that's the code that
1579 * will run if we call addr.
1581 static abort_t
add_dependency_on_address(struct ksplice_mod_change
*change
,
1584 struct ksplice_mod_change
*c
;
1586 __module_text_address(follow_trampolines(change
, addr
));
1589 list_for_each_entry(c
, &change
->update
->changes
, list
) {
1590 if (m
== c
->new_code_mod
)
1593 if (use_module(change
->new_code_mod
, m
) != 1)
1598 static abort_t
apply_relocs(struct ksplice_mod_change
*change
,
1599 const struct ksplice_reloc
*relocs
,
1600 const struct ksplice_reloc
*relocs_end
)
1602 const struct ksplice_reloc
*r
;
1603 for (r
= relocs
; r
< relocs_end
; r
++) {
1604 abort_t ret
= apply_reloc(change
, r
);
1611 static abort_t
apply_reloc(struct ksplice_mod_change
*change
,
1612 const struct ksplice_reloc
*r
)
1614 switch (r
->howto
->type
) {
1615 case KSPLICE_HOWTO_RELOC
:
1616 case KSPLICE_HOWTO_RELOC_PATCH
:
1617 return apply_howto_reloc(change
, r
);
1618 case KSPLICE_HOWTO_DATE
:
1619 case KSPLICE_HOWTO_TIME
:
1620 return apply_howto_date(change
, r
);
1622 ksdebug(change
, "Unexpected howto type %d\n", r
->howto
->type
);
1628 * Applies a relocation. Aborts if the symbol referenced in it has
1629 * not been uniquely resolved.
1631 static abort_t
apply_howto_reloc(struct ksplice_mod_change
*change
,
1632 const struct ksplice_reloc
*r
)
1636 unsigned long sym_addr
;
1639 canary_ret
= contains_canary(change
, r
->blank_addr
, r
->howto
);
1642 if (canary_ret
== 0) {
1643 ksdebug(change
, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1644 r
->blank_addr
, r
->symbol
->label
, r
->target_addend
);
1648 #ifdef KSPLICE_STANDALONE
1649 if (!bootstrapped
) {
1650 ret
= add_system_map_candidates(change
,
1651 change
->new_code
.system_map
,
1652 change
->new_code
.system_map_end
,
1653 r
->symbol
->label
, &vals
);
1655 release_vals(&vals
);
1659 #endif /* KSPLICE_STANDALONE */
1660 ret
= lookup_symbol(change
, r
->symbol
, &vals
);
1662 release_vals(&vals
);
1666 * Relocations for the oldaddr fields of patches must have
1667 * been resolved via run-pre matching.
1669 if (!singular(&vals
) || (r
->symbol
->candidate_vals
!= NULL
&&
1670 r
->howto
->type
== KSPLICE_HOWTO_RELOC_PATCH
)) {
1671 release_vals(&vals
);
1672 ksdebug(change
, "Failed to find %s for reloc\n",
1674 return FAILED_TO_FIND
;
1676 sym_addr
= list_entry(vals
.next
, struct candidate_val
, list
)->val
;
1677 release_vals(&vals
);
1679 ret
= write_reloc_value(change
, r
, r
->blank_addr
,
1680 r
->howto
->pcrel
? sym_addr
- r
->blank_addr
:
1685 ksdebug(change
, "reloc: %lx to %s+%lx (S=%lx ", r
->blank_addr
,
1686 r
->symbol
->label
, r
->target_addend
, sym_addr
);
1687 switch (r
->howto
->size
) {
1689 ksdebug(change
, "aft=%02x)\n", *(uint8_t *)r
->blank_addr
);
1692 ksdebug(change
, "aft=%04x)\n", *(uint16_t *)r
->blank_addr
);
1695 ksdebug(change
, "aft=%08x)\n", *(uint32_t *)r
->blank_addr
);
1697 #if BITS_PER_LONG >= 64
1699 ksdebug(change
, "aft=%016llx)\n", *(uint64_t *)r
->blank_addr
);
1701 #endif /* BITS_PER_LONG */
1703 ksdebug(change
, "Aborted. Invalid relocation size.\n");
1706 #ifdef KSPLICE_STANDALONE
1709 #endif /* KSPLICE_STANDALONE */
1712 * Create labelvals so that we can verify our choices in the
1713 * second round of run-pre matching that considers data sections.
1715 ret
= create_labelval(change
, r
->symbol
, sym_addr
, VAL
);
1719 return add_dependency_on_address(change
, sym_addr
);
1723 * Date relocations are created wherever __DATE__ or __TIME__ is used
1724 * in the kernel; we resolve them by simply copying in the date/time
1725 * obtained from run-pre matching the relevant compilation unit.
1727 static abort_t
apply_howto_date(struct ksplice_mod_change
*change
,
1728 const struct ksplice_reloc
*r
)
1730 if (r
->symbol
->candidate_vals
!= NULL
) {
1731 ksdebug(change
, "Failed to find %s for date\n",
1733 return FAILED_TO_FIND
;
1735 memcpy((unsigned char *)r
->blank_addr
,
1736 (const unsigned char *)r
->symbol
->value
, r
->howto
->size
);
1741 * Given a relocation and its run address, compute the address of the
1742 * symbol the relocation referenced, and store it in *valp.
1744 static abort_t
read_reloc_value(struct ksplice_mod_change
*change
,
1745 const struct ksplice_reloc
*r
,
1746 unsigned long addr
, unsigned long *valp
)
1748 unsigned char bytes
[sizeof(long)];
1750 const struct ksplice_reloc_howto
*howto
= r
->howto
;
1752 if (howto
->size
<= 0 || howto
->size
> sizeof(long)) {
1753 ksdebug(change
, "Aborted. Invalid relocation size.\n");
1757 if (probe_kernel_read(bytes
, (void *)addr
, howto
->size
) == -EFAULT
)
1760 switch (howto
->size
) {
1762 val
= *(uint8_t *)bytes
;
1765 val
= *(uint16_t *)bytes
;
1768 val
= *(uint32_t *)bytes
;
1770 #if BITS_PER_LONG >= 64
1772 val
= *(uint64_t *)bytes
;
1774 #endif /* BITS_PER_LONG */
1776 ksdebug(change
, "Aborted. Invalid relocation size.\n");
1780 val
&= howto
->dst_mask
;
1781 if (howto
->signed_addend
)
1782 val
|= -(val
& (howto
->dst_mask
& ~(howto
->dst_mask
>> 1)));
1783 val
<<= howto
->rightshift
;
1784 val
-= r
->insn_addend
+ r
->target_addend
;
1790 * Given a relocation, the address of its storage unit, and the
1791 * address of the symbol the relocation references, write the
1792 * relocation's final value into the storage unit.
1794 static abort_t
write_reloc_value(struct ksplice_mod_change
*change
,
1795 const struct ksplice_reloc
*r
,
1796 unsigned long addr
, unsigned long sym_addr
)
1798 unsigned long val
= sym_addr
+ r
->target_addend
+ r
->insn_addend
;
1799 const struct ksplice_reloc_howto
*howto
= r
->howto
;
1800 val
>>= howto
->rightshift
;
1801 switch (howto
->size
) {
1803 *(uint8_t *)addr
= (*(uint8_t *)addr
& ~howto
->dst_mask
) |
1804 (val
& howto
->dst_mask
);
1807 *(uint16_t *)addr
= (*(uint16_t *)addr
& ~howto
->dst_mask
) |
1808 (val
& howto
->dst_mask
);
1811 *(uint32_t *)addr
= (*(uint32_t *)addr
& ~howto
->dst_mask
) |
1812 (val
& howto
->dst_mask
);
1814 #if BITS_PER_LONG >= 64
1816 *(uint64_t *)addr
= (*(uint64_t *)addr
& ~howto
->dst_mask
) |
1817 (val
& howto
->dst_mask
);
1819 #endif /* BITS_PER_LONG */
1821 ksdebug(change
, "Aborted. Invalid relocation size.\n");
1825 if (read_reloc_value(change
, r
, addr
, &val
) != OK
|| val
!= sym_addr
) {
1826 ksdebug(change
, "Aborted. Relocation overflow.\n");
1833 static abort_t
create_module_list_entry(struct ksplice_mod_change
*change
,
1836 struct ksplice_module_list_entry
*entry
=
1837 kmalloc(sizeof(*entry
), GFP_KERNEL
);
1839 return OUT_OF_MEMORY
;
1840 entry
->new_code_mod_name
=
1841 kstrdup(change
->new_code_mod
->name
, GFP_KERNEL
);
1842 if (entry
->new_code_mod_name
== NULL
) {
1844 return OUT_OF_MEMORY
;
1846 entry
->target_mod_name
= kstrdup(change
->target_name
, GFP_KERNEL
);
1847 if (entry
->target_mod_name
== NULL
) {
1848 kfree(entry
->new_code_mod_name
);
1850 return OUT_OF_MEMORY
;
1852 /* The update's kid is guaranteed to outlast the module_list_entry */
1853 entry
->kid
= change
->update
->kid
;
1854 entry
->applied
= to_be_applied
;
1855 list_add(&entry
->update_list
, &change
->update
->ksplice_module_list
);
1859 static void cleanup_module_list_entries(struct update
*update
)
1861 struct ksplice_module_list_entry
*entry
;
1862 list_for_each_entry(entry
, &update
->ksplice_module_list
, update_list
) {
1863 kfree(entry
->target_mod_name
);
1864 kfree(entry
->new_code_mod_name
);
1866 clear_list(&update
->ksplice_module_list
,
1867 struct ksplice_module_list_entry
, update_list
);
1870 /* Replacement address used for functions deleted by the patch */
1871 static void __attribute__((noreturn
)) ksplice_deleted(void)
1873 printk(KERN_CRIT
"Called a kernel function deleted by Ksplice!\n");
1875 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1876 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1881 /* Floodfill to run-pre match the sections within a change. */
1882 static abort_t
match_change_sections(struct ksplice_mod_change
*change
,
1883 bool consider_data_sections
)
1885 struct ksplice_section
*sect
;
1890 for (sect
= change
->old_code
.sections
;
1891 sect
< change
->old_code
.sections_end
; sect
++) {
1892 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0 &&
1893 (sect
->flags
& KSPLICE_SECTION_STRING
) == 0 &&
1894 (sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0)
1898 while (remaining
> 0) {
1900 for (sect
= change
->old_code
.sections
;
1901 sect
< change
->old_code
.sections_end
; sect
++) {
1902 if ((sect
->flags
& KSPLICE_SECTION_MATCHED
) != 0)
1904 if ((!consider_data_sections
&&
1905 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0) ||
1906 (sect
->flags
& KSPLICE_SECTION_STRING
) != 0)
1908 ret
= find_section(change
, sect
);
1910 sect
->flags
|= KSPLICE_SECTION_MATCHED
;
1911 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0)
1914 } else if (ret
!= NO_MATCH
) {
1922 for (sect
= change
->old_code
.sections
;
1923 sect
< change
->old_code
.sections_end
; sect
++) {
1924 if ((sect
->flags
& KSPLICE_SECTION_MATCHED
) != 0 ||
1925 (sect
->flags
& KSPLICE_SECTION_STRING
) != 0)
1927 ksdebug(change
, "run-pre: could not match %s "
1929 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0 ?
1931 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ?
1932 "rodata" : "text", sect
->symbol
->label
);
1934 ksdebug(change
, "Aborted. run-pre: could not match some "
1942 * Search for the section in the running kernel. Returns OK if and
1943 * only if it finds precisely one address in the kernel matching the
1946 static abort_t
find_section(struct ksplice_mod_change
*change
,
1947 struct ksplice_section
*sect
)
1951 unsigned long run_addr
;
1953 struct candidate_val
*v
, *n
;
1955 #ifdef KSPLICE_STANDALONE
1956 ret
= add_system_map_candidates(change
, change
->old_code
.system_map
,
1957 change
->old_code
.system_map_end
,
1958 sect
->symbol
->label
, &vals
);
1960 release_vals(&vals
);
1963 #endif /* KSPLICE_STANDALONE */
1964 ret
= lookup_symbol(change
, sect
->symbol
, &vals
);
1966 release_vals(&vals
);
1970 ksdebug(change
, "run-pre: starting sect search for %s\n",
1971 sect
->symbol
->label
);
1973 list_for_each_entry_safe(v
, n
, &vals
, list
) {
1977 ret
= try_addr(change
, sect
, run_addr
, NULL
, RUN_PRE_INITIAL
);
1978 if (ret
== NO_MATCH
) {
1981 } else if (ret
!= OK
) {
1982 release_vals(&vals
);
1987 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1988 if (list_empty(&vals
) && (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
1989 ret
= brute_search_all(change
, sect
, &vals
);
1991 release_vals(&vals
);
1995 * Make sure run-pre matching output is displayed if
1996 * brute_search succeeds.
1998 if (singular(&vals
)) {
1999 run_addr
= list_entry(vals
.next
, struct candidate_val
,
2001 ret
= try_addr(change
, sect
, run_addr
, NULL
,
2004 ksdebug(change
, "run-pre: Debug run failed for "
2005 "sect %s:\n", sect
->symbol
->label
);
2006 release_vals(&vals
);
2011 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2013 if (singular(&vals
)) {
2014 LIST_HEAD(safety_records
);
2015 run_addr
= list_entry(vals
.next
, struct candidate_val
,
2017 ret
= try_addr(change
, sect
, run_addr
, &safety_records
,
2019 release_vals(&vals
);
2021 clear_list(&safety_records
, struct safety_record
, list
);
2022 ksdebug(change
, "run-pre: Final run failed for sect "
2023 "%s:\n", sect
->symbol
->label
);
2025 list_splice(&safety_records
, &change
->safety_records
);
2028 } else if (!list_empty(&vals
)) {
2029 struct candidate_val
*val
;
2030 ksdebug(change
, "run-pre: multiple candidates for sect %s:\n",
2031 sect
->symbol
->label
);
2033 list_for_each_entry(val
, &vals
, list
) {
2035 ksdebug(change
, "%lx\n", val
->val
);
2037 ksdebug(change
, "...\n");
2041 release_vals(&vals
);
2044 release_vals(&vals
);
2049 * try_addr is the the interface to run-pre matching. Its primary
2050 * purpose is to manage debugging information for run-pre matching;
2051 * all the hard work is in run_pre_cmp.
2053 static abort_t
try_addr(struct ksplice_mod_change
*change
,
2054 struct ksplice_section
*sect
,
2055 unsigned long run_addr
,
2056 struct list_head
*safety_records
,
2057 enum run_pre_mode mode
)
2060 const struct module
*run_module
= __module_address(run_addr
);
2062 if (run_module
== change
->new_code_mod
) {
2063 ksdebug(change
, "run-pre: unexpected address %lx in new_code "
2064 "module %s for sect %s\n", run_addr
, run_module
->name
,
2065 sect
->symbol
->label
);
2068 if (!patches_module(run_module
, change
->target
)) {
2069 ksdebug(change
, "run-pre: ignoring address %lx in other module "
2070 "%s for sect %s\n", run_addr
, run_module
== NULL
?
2071 "vmlinux" : run_module
->name
, sect
->symbol
->label
);
2075 ret
= create_labelval(change
, sect
->symbol
, run_addr
, TEMP
);
2079 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2080 ret
= run_pre_cmp(change
, sect
, run_addr
, safety_records
, mode
);
2081 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2082 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
2083 ret
= arch_run_pre_cmp(change
, sect
, run_addr
, safety_records
,
2086 ret
= run_pre_cmp(change
, sect
, run_addr
, safety_records
, mode
);
2087 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2088 if (ret
== NO_MATCH
&& mode
!= RUN_PRE_FINAL
) {
2089 set_temp_labelvals(change
, NOVAL
);
2090 ksdebug(change
, "run-pre: %s sect %s does not match (r_a=%lx "
2092 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ? "rodata" :
2093 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0 ? "data" :
2094 "text", sect
->symbol
->label
, run_addr
, sect
->address
,
2096 ksdebug(change
, "run-pre: ");
2097 if (change
->update
->debug
>= 1) {
2098 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2099 ret
= run_pre_cmp(change
, sect
, run_addr
,
2100 safety_records
, RUN_PRE_DEBUG
);
2101 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2102 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
2103 ret
= arch_run_pre_cmp(change
, sect
, run_addr
,
2107 ret
= run_pre_cmp(change
, sect
, run_addr
,
2110 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2111 set_temp_labelvals(change
, NOVAL
);
2113 ksdebug(change
, "\n");
2115 } else if (ret
!= OK
) {
2116 set_temp_labelvals(change
, NOVAL
);
2120 if (mode
!= RUN_PRE_FINAL
) {
2121 set_temp_labelvals(change
, NOVAL
);
2122 ksdebug(change
, "run-pre: candidate for sect %s=%lx\n",
2123 sect
->symbol
->label
, run_addr
);
2127 set_temp_labelvals(change
, VAL
);
2128 ksdebug(change
, "run-pre: found sect %s=%lx\n", sect
->symbol
->label
,
2134 * run_pre_cmp is the primary run-pre matching function; it determines
2135 * whether the given ksplice_section matches the code or data in the
2136 * running kernel starting at run_addr.
2138 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2139 * section is created.
2141 * The run_pre_mode is also used to determine what debugging
2142 * information to display.
2144 static abort_t
run_pre_cmp(struct ksplice_mod_change
*change
,
2145 const struct ksplice_section
*sect
,
2146 unsigned long run_addr
,
2147 struct list_head
*safety_records
,
2148 enum run_pre_mode mode
)
2152 const struct ksplice_reloc
*r
, *finger
;
2153 const unsigned char *pre
, *run
, *pre_start
, *run_start
;
2154 unsigned char runval
;
2156 pre_start
= (const unsigned char *)sect
->address
;
2157 run_start
= (const unsigned char *)run_addr
;
2159 finger
= init_reloc_search(change
, sect
);
2163 while (pre
< pre_start
+ sect
->size
) {
2164 unsigned long offset
= pre
- pre_start
;
2165 ret
= lookup_reloc(change
, &finger
, (unsigned long)pre
, &r
);
2167 ret
= handle_reloc(change
, sect
, r
, (unsigned long)run
,
2170 if (mode
== RUN_PRE_INITIAL
)
2171 ksdebug(change
, "reloc in sect does "
2172 "not match after %lx/%lx "
2173 "bytes\n", offset
, sect
->size
);
2176 if (mode
== RUN_PRE_DEBUG
)
2177 print_bytes(change
, run
, r
->howto
->size
, pre
,
2179 pre
+= r
->howto
->size
;
2180 run
+= r
->howto
->size
;
2183 } else if (ret
!= NO_MATCH
) {
2187 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0) {
2188 ret
= handle_paravirt(change
, (unsigned long)pre
,
2189 (unsigned long)run
, &matched
);
2193 if (mode
== RUN_PRE_DEBUG
)
2194 print_bytes(change
, run
, matched
, pre
,
2202 if (probe_kernel_read(&runval
, (void *)run
, 1) == -EFAULT
) {
2203 if (mode
== RUN_PRE_INITIAL
)
2204 ksdebug(change
, "sect unmapped after %lx/%lx "
2205 "bytes\n", offset
, sect
->size
);
2209 if (runval
!= *pre
&&
2210 (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
2211 if (mode
== RUN_PRE_INITIAL
)
2212 ksdebug(change
, "sect does not match after "
2213 "%lx/%lx bytes\n", offset
, sect
->size
);
2214 if (mode
== RUN_PRE_DEBUG
) {
2215 print_bytes(change
, run
, 1, pre
, 1);
2216 ksdebug(change
, "[p_o=%lx] ! ", offset
);
2217 print_bytes(change
, run
+ 1, 2, pre
+ 1, 2);
2221 if (mode
== RUN_PRE_DEBUG
)
2222 print_bytes(change
, run
, 1, pre
, 1);
2226 return create_safety_record(change
, sect
, safety_records
, run_addr
,
2230 static void print_bytes(struct ksplice_mod_change
*change
,
2231 const unsigned char *run
, int runc
,
2232 const unsigned char *pre
, int prec
)
2235 int matched
= min(runc
, prec
);
2236 for (o
= 0; o
< matched
; o
++) {
2237 if (run
[o
] == pre
[o
])
2238 ksdebug(change
, "%02x ", run
[o
]);
2240 ksdebug(change
, "%02x/%02x ", run
[o
], pre
[o
]);
2242 for (o
= matched
; o
< runc
; o
++)
2243 ksdebug(change
, "%02x/ ", run
[o
]);
2244 for (o
= matched
; o
< prec
; o
++)
2245 ksdebug(change
, "/%02x ", pre
[o
]);
2248 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2249 static abort_t
brute_search(struct ksplice_mod_change
*change
,
2250 struct ksplice_section
*sect
,
2251 const void *start
, unsigned long len
,
2252 struct list_head
*vals
)
2258 for (addr
= (unsigned long)start
; addr
< (unsigned long)start
+ len
;
2260 if (addr
% 100000 == 0)
2263 if (probe_kernel_read(&run
, (void *)addr
, 1) == -EFAULT
)
2266 pre
= *(const unsigned char *)(sect
->address
);
2271 ret
= try_addr(change
, sect
, addr
, NULL
, RUN_PRE_INITIAL
);
2273 ret
= add_candidate_val(change
, vals
, addr
);
2276 } else if (ret
!= NO_MATCH
) {
2284 extern struct list_head modules
;
2286 static abort_t
brute_search_all(struct ksplice_mod_change
*change
,
2287 struct ksplice_section
*sect
,
2288 struct list_head
*vals
)
2294 ksdebug(change
, "brute_search: searching for %s\n",
2295 sect
->symbol
->label
);
2296 saved_debug
= change
->update
->debug
;
2297 change
->update
->debug
= 0;
2299 list_for_each_entry(m
, &modules
, list
) {
2300 if (!patches_module(m
, change
->target
) ||
2301 m
== change
->new_code_mod
)
2303 ret
= brute_search(change
, sect
, m
->module_core
, m
->core_size
,
2307 ret
= brute_search(change
, sect
, m
->module_init
, m
->init_size
,
2313 ret
= brute_search(change
, sect
, (const void *)init_mm
.start_code
,
2314 init_mm
.end_code
- init_mm
.start_code
, vals
);
2317 change
->update
->debug
= saved_debug
;
2320 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2323 unsigned long address
;
2327 static int reloc_bsearch_compare(const void *key
, const void *elt
)
2329 const struct range
*range
= key
;
2330 const struct ksplice_reloc
*r
= elt
;
2331 if (range
->address
+ range
->size
<= r
->blank_addr
)
2333 if (range
->address
> r
->blank_addr
)
2338 static const struct ksplice_reloc
*find_reloc(const struct ksplice_reloc
*start
,
2339 const struct ksplice_reloc
*end
,
2340 unsigned long address
,
2343 const struct ksplice_reloc
*r
;
2344 struct range range
= { address
, size
};
2345 r
= bsearch((void *)&range
, start
, end
- start
, sizeof(*r
),
2346 reloc_bsearch_compare
);
2349 while (r
> start
&& (r
- 1)->blank_addr
>= address
)
2354 static const struct ksplice_reloc
*
2355 init_reloc_search(struct ksplice_mod_change
*change
,
2356 const struct ksplice_section
*sect
)
2358 const struct ksplice_reloc
*r
;
2359 r
= find_reloc(change
->old_code
.relocs
, change
->old_code
.relocs_end
,
2360 sect
->address
, sect
->size
);
2362 return change
->old_code
.relocs_end
;
2367 * lookup_reloc implements an amortized O(1) lookup for the next
2368 * old_code relocation. It must be called with a strictly increasing
2369 * sequence of addresses.
2371 * The fingerp is private data for lookup_reloc, and needs to have
2372 * been initialized as a pointer to the result of find_reloc (or
2373 * init_reloc_search).
2375 static abort_t
lookup_reloc(struct ksplice_mod_change
*change
,
2376 const struct ksplice_reloc
**fingerp
,
2378 const struct ksplice_reloc
**relocp
)
2380 const struct ksplice_reloc
*r
= *fingerp
;
2383 while (r
< change
->old_code
.relocs_end
&&
2384 addr
>= r
->blank_addr
+ r
->howto
->size
&&
2385 !(addr
== r
->blank_addr
&& r
->howto
->size
== 0))
2388 if (r
== change
->old_code
.relocs_end
)
2390 if (addr
< r
->blank_addr
)
2393 if (r
->howto
->type
!= KSPLICE_HOWTO_RELOC
)
2396 canary_ret
= contains_canary(change
, r
->blank_addr
, r
->howto
);
2399 if (canary_ret
== 0) {
2400 ksdebug(change
, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2401 "(altinstr)\n", r
->blank_addr
, r
->symbol
->label
,
2405 if (addr
!= r
->blank_addr
) {
2406 ksdebug(change
, "Invalid nonzero relocation offset\n");
2412 static abort_t
handle_reloc(struct ksplice_mod_change
*change
,
2413 const struct ksplice_section
*sect
,
2414 const struct ksplice_reloc
*r
,
2415 unsigned long run_addr
, enum run_pre_mode mode
)
2417 switch (r
->howto
->type
) {
2418 case KSPLICE_HOWTO_RELOC
:
2419 return handle_howto_reloc(change
, sect
, r
, run_addr
, mode
);
2420 case KSPLICE_HOWTO_DATE
:
2421 case KSPLICE_HOWTO_TIME
:
2422 return handle_howto_date(change
, sect
, r
, run_addr
, mode
);
2423 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
2425 case KSPLICE_HOWTO_BUG
:
2426 return handle_bug(change
, r
, run_addr
);
2427 #endif /* CONFIG_BUG */
2428 #else /* LINUX_VERSION_CODE < */
2429 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
2430 #endif /* LINUX_VERSION_CODE */
2431 case KSPLICE_HOWTO_EXTABLE
:
2432 return handle_extable(change
, r
, run_addr
);
2434 ksdebug(change
, "Unexpected howto type %d\n", r
->howto
->type
);
2440 * For date/time relocations, we check that the sequence of bytes
2441 * matches the format of a date or time.
2443 static abort_t
handle_howto_date(struct ksplice_mod_change
*change
,
2444 const struct ksplice_section
*sect
,
2445 const struct ksplice_reloc
*r
,
2446 unsigned long run_addr
, enum run_pre_mode mode
)
2449 char *buf
= kmalloc(r
->howto
->size
, GFP_KERNEL
);
2452 return OUT_OF_MEMORY
;
2453 if (probe_kernel_read(buf
, (void *)run_addr
, r
->howto
->size
) == -EFAULT
) {
2458 switch (r
->howto
->type
) {
2459 case KSPLICE_HOWTO_TIME
:
2460 if (isdigit(buf
[0]) && isdigit(buf
[1]) && buf
[2] == ':' &&
2461 isdigit(buf
[3]) && isdigit(buf
[4]) && buf
[5] == ':' &&
2462 isdigit(buf
[6]) && isdigit(buf
[7]))
2467 case KSPLICE_HOWTO_DATE
:
2468 if (isalpha(buf
[0]) && isalpha(buf
[1]) && isalpha(buf
[2]) &&
2469 buf
[3] == ' ' && (buf
[4] == ' ' || isdigit(buf
[4])) &&
2470 isdigit(buf
[5]) && buf
[6] == ' ' && isdigit(buf
[7]) &&
2471 isdigit(buf
[8]) && isdigit(buf
[9]) && isdigit(buf
[10]))
2479 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
)
2480 ksdebug(change
, "%s string: \"%.*s\" does not match format\n",
2481 r
->howto
->type
== KSPLICE_HOWTO_DATE
? "date" : "time",
2482 r
->howto
->size
, buf
);
2486 ret
= create_labelval(change
, r
->symbol
, run_addr
, TEMP
);
2493 * Extract the value of a symbol used in a relocation in the pre code
2494 * during run-pre matching, giving an error if it conflicts with a
2495 * previously found value of that symbol
2497 static abort_t
handle_howto_reloc(struct ksplice_mod_change
*change
,
2498 const struct ksplice_section
*sect
,
2499 const struct ksplice_reloc
*r
,
2500 unsigned long run_addr
,
2501 enum run_pre_mode mode
)
2503 struct ksplice_section
*sym_sect
= symbol_section(change
, r
->symbol
);
2504 unsigned long offset
= r
->target_addend
;
2508 ret
= read_reloc_value(change
, r
, run_addr
, &val
);
2511 if (r
->howto
->pcrel
)
2514 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
2515 if (sym_sect
== NULL
|| sym_sect
->match_map
== NULL
|| offset
== 0) {
2517 } else if (offset
< 0 || offset
>= sym_sect
->size
) {
2518 ksdebug(change
, "Out of range relocation: %s+%lx -> %s+%lx",
2519 sect
->symbol
->label
, r
->blank_addr
- sect
->address
,
2520 r
->symbol
->label
, offset
);
2522 } else if (sect
== sym_sect
&& sect
->match_map
[offset
] == NULL
) {
2523 sym_sect
->match_map
[offset
] =
2524 (const unsigned char *)r
->symbol
->value
+ offset
;
2525 } else if (sect
== sym_sect
&& (unsigned long)sect
->match_map
[offset
] ==
2526 r
->symbol
->value
+ offset
) {
2528 } else if (sect
== sym_sect
) {
2529 ksdebug(change
, "Relocations to nonmatching locations within "
2530 "section %s: %lx does not match %lx\n",
2531 sect
->symbol
->label
, offset
,
2532 (unsigned long)sect
->match_map
[offset
] -
2535 } else if ((sym_sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0) {
2536 if (mode
== RUN_PRE_INITIAL
)
2537 ksdebug(change
, "Delaying matching of %s due to reloc "
2538 "from to unmatching section: %s+%lx\n",
2539 sect
->symbol
->label
, r
->symbol
->label
, offset
);
2541 } else if (sym_sect
->match_map
[offset
] == NULL
) {
2542 if (mode
== RUN_PRE_INITIAL
)
2543 ksdebug(change
, "Relocation not to instruction "
2544 "boundary: %s+%lx -> %s+%lx",
2545 sect
->symbol
->label
, r
->blank_addr
-
2546 sect
->address
, r
->symbol
->label
, offset
);
2548 } else if ((unsigned long)sym_sect
->match_map
[offset
] !=
2549 r
->symbol
->value
+ offset
) {
2550 if (mode
== RUN_PRE_INITIAL
)
2551 ksdebug(change
, "Match map shift %s+%lx: %lx != %lx\n",
2552 r
->symbol
->label
, offset
,
2553 r
->symbol
->value
+ offset
,
2554 (unsigned long)sym_sect
->match_map
[offset
]);
2555 val
+= r
->symbol
->value
+ offset
-
2556 (unsigned long)sym_sect
->match_map
[offset
];
2558 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
2560 if (mode
== RUN_PRE_INITIAL
)
2561 ksdebug(change
, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2562 "found %s = %lx\n", run_addr
, r
->blank_addr
,
2563 r
->symbol
->label
, offset
, r
->symbol
->label
, val
);
2565 if (contains_canary(change
, run_addr
, r
->howto
) != 0) {
2566 ksdebug(change
, "Aborted. Unexpected canary in run code at %lx"
2571 if ((sect
->flags
& KSPLICE_SECTION_DATA
) != 0 &&
2572 sect
->symbol
== r
->symbol
)
2574 ret
= create_labelval(change
, r
->symbol
, val
, TEMP
);
2575 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
)
2576 ksdebug(change
, "run-pre: reloc at r_a=%lx p_a=%lx: labelval "
2577 "%s = %lx does not match expected %lx\n", run_addr
,
2578 r
->blank_addr
, r
->symbol
->label
, r
->symbol
->value
, val
);
2582 if (sym_sect
!= NULL
&& (sym_sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0
2583 && (sym_sect
->flags
& KSPLICE_SECTION_STRING
) != 0) {
2584 if (mode
== RUN_PRE_INITIAL
)
2585 ksdebug(change
, "Recursively comparing string section "
2586 "%s\n", sym_sect
->symbol
->label
);
2587 else if (mode
== RUN_PRE_DEBUG
)
2588 ksdebug(change
, "[str start] ");
2589 ret
= run_pre_cmp(change
, sym_sect
, val
, NULL
, mode
);
2590 if (mode
== RUN_PRE_DEBUG
)
2591 ksdebug(change
, "[str end] ");
2592 if (ret
== OK
&& mode
== RUN_PRE_INITIAL
)
2593 ksdebug(change
, "Successfully matched string section %s"
2594 "\n", sym_sect
->symbol
->label
);
2595 else if (mode
== RUN_PRE_INITIAL
)
2596 ksdebug(change
, "Failed to match string section %s\n",
2597 sym_sect
->symbol
->label
);
2602 #ifdef CONFIG_GENERIC_BUG
2603 static abort_t
handle_bug(struct ksplice_mod_change
*change
,
2604 const struct ksplice_reloc
*r
, unsigned long run_addr
)
2606 const struct bug_entry
*run_bug
= find_bug(run_addr
);
2607 struct ksplice_section
*bug_sect
= symbol_section(change
, r
->symbol
);
2608 if (run_bug
== NULL
)
2610 if (bug_sect
== NULL
)
2612 return create_labelval(change
, bug_sect
->symbol
, (unsigned long)run_bug
,
2615 #endif /* CONFIG_GENERIC_BUG */
2617 static abort_t
handle_extable(struct ksplice_mod_change
*change
,
2618 const struct ksplice_reloc
*r
,
2619 unsigned long run_addr
)
2621 const struct exception_table_entry
*run_ent
=
2622 search_exception_tables(run_addr
);
2623 struct ksplice_section
*ex_sect
= symbol_section(change
, r
->symbol
);
2624 if (run_ent
== NULL
)
2626 if (ex_sect
== NULL
)
2628 return create_labelval(change
, ex_sect
->symbol
, (unsigned long)run_ent
,
2632 static int symbol_section_bsearch_compare(const void *a
, const void *b
)
2634 const struct ksplice_symbol
*sym
= a
;
2635 const struct ksplice_section
*sect
= b
;
2636 return strcmp(sym
->label
, sect
->symbol
->label
);
2639 static int compare_section_labels(const void *va
, const void *vb
)
2641 const struct ksplice_section
*a
= va
, *b
= vb
;
2642 return strcmp(a
->symbol
->label
, b
->symbol
->label
);
2645 static struct ksplice_section
*symbol_section(struct ksplice_mod_change
*change
,
2646 const struct ksplice_symbol
*sym
)
2648 return bsearch(sym
, change
->old_code
.sections
,
2649 change
->old_code
.sections_end
-
2650 change
->old_code
.sections
,
2651 sizeof(struct ksplice_section
),
2652 symbol_section_bsearch_compare
);
2655 /* Find the relocation for the oldaddr of a ksplice_patch */
2656 static const struct ksplice_reloc
*
2657 patch_reloc(struct ksplice_mod_change
*change
,
2658 const struct ksplice_patch
*p
)
2660 unsigned long addr
= (unsigned long)&p
->oldaddr
;
2661 const struct ksplice_reloc
*r
=
2662 find_reloc(change
->new_code
.relocs
, change
->new_code
.relocs_end
,
2663 addr
, sizeof(addr
));
2664 if (r
== NULL
|| r
->blank_addr
< addr
||
2665 r
->blank_addr
>= addr
+ sizeof(addr
))
2671 * Populates vals with the possible values for ksym from the various
2672 * sources Ksplice uses to resolve symbols
2674 static abort_t
lookup_symbol(struct ksplice_mod_change
*change
,
2675 const struct ksplice_symbol
*ksym
,
2676 struct list_head
*vals
)
2680 #ifdef KSPLICE_STANDALONE
2683 #endif /* KSPLICE_STANDALONE */
2685 if (ksym
->candidate_vals
== NULL
) {
2687 ksdebug(change
, "using detected sym %s=%lx\n", ksym
->label
,
2689 return add_candidate_val(change
, vals
, ksym
->value
);
2692 #ifdef CONFIG_MODULE_UNLOAD
2693 if (strcmp(ksym
->label
, "cleanup_module") == 0 && change
->target
!= NULL
2694 && change
->target
->exit
!= NULL
) {
2695 ret
= add_candidate_val(change
, vals
,
2696 (unsigned long)change
->target
->exit
);
2702 if (ksym
->name
!= NULL
) {
2703 struct candidate_val
*val
;
2704 list_for_each_entry(val
, ksym
->candidate_vals
, list
) {
2705 ret
= add_candidate_val(change
, vals
, val
->val
);
2710 ret
= new_export_lookup(change
, ksym
->name
, vals
);
2718 #ifdef KSPLICE_STANDALONE
2720 add_system_map_candidates(struct ksplice_mod_change
*change
,
2721 const struct ksplice_system_map
*start
,
2722 const struct ksplice_system_map
*end
,
2723 const char *label
, struct list_head
*vals
)
2728 const struct ksplice_system_map
*smap
;
2730 /* Some Fedora kernel releases have System.map files whose symbol
2731 * addresses disagree with the running kernel by a constant address
2732 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2733 * values used to compile these kernels. This constant address offset
2734 * is always a multiple of 0x100000.
2736 * If we observe an offset that is NOT a multiple of 0x100000, then the
2737 * user provided us with an incorrect System.map file, and we should
2739 * If we observe an offset that is a multiple of 0x100000, then we can
2740 * adjust the System.map address values accordingly and proceed.
2742 off
= (unsigned long)printk
- change
->map_printk
;
2743 if (off
& 0xfffff) {
2745 "Aborted. System.map does not match kernel.\n");
2746 return BAD_SYSTEM_MAP
;
2749 smap
= bsearch(label
, start
, end
- start
, sizeof(*smap
),
2750 system_map_bsearch_compare
);
2754 for (i
= 0; i
< smap
->nr_candidates
; i
++) {
2755 ret
= add_candidate_val(change
, vals
,
2756 smap
->candidates
[i
] + off
);
2763 static int system_map_bsearch_compare(const void *key
, const void *elt
)
2765 const struct ksplice_system_map
*map
= elt
;
2766 const char *label
= key
;
2767 return strcmp(label
, map
->label
);
2769 #endif /* !KSPLICE_STANDALONE */
2772 * An update could one module to export a symbol and at the same time
2773 * change another module to use that symbol. This violates the normal
2774 * situation where the changes can be handled independently.
2776 * new_export_lookup obtains symbol values from the changes to the
2777 * exported symbol table made by other changes.
2779 static abort_t
new_export_lookup(struct ksplice_mod_change
*ichange
,
2780 const char *name
, struct list_head
*vals
)
2782 struct ksplice_mod_change
*change
;
2783 struct ksplice_patch
*p
;
2784 list_for_each_entry(change
, &ichange
->update
->changes
, list
) {
2785 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
2786 const struct kernel_symbol
*sym
;
2787 const struct ksplice_reloc
*r
;
2788 if (p
->type
!= KSPLICE_PATCH_EXPORT
||
2789 strcmp(name
, *(const char **)p
->contents
) != 0)
2792 /* Check that the p->oldaddr reloc has been resolved. */
2793 r
= patch_reloc(change
, p
);
2795 contains_canary(change
, r
->blank_addr
,
2798 sym
= (const struct kernel_symbol
*)r
->symbol
->value
;
2801 * Check that the sym->value reloc has been resolved,
2802 * if there is a Ksplice relocation there.
2804 r
= find_reloc(change
->new_code
.relocs
,
2805 change
->new_code
.relocs_end
,
2806 (unsigned long)&sym
->value
,
2807 sizeof(&sym
->value
));
2809 r
->blank_addr
== (unsigned long)&sym
->value
&&
2810 contains_canary(change
, r
->blank_addr
,
2813 return add_candidate_val(ichange
, vals
, sym
->value
);
2820 * When patch_action is called, the update should be fully prepared.
2821 * patch_action will try to actually insert or remove trampolines for
2824 static abort_t
patch_action(struct update
*update
, enum ksplice_action action
)
2826 static int (*const __patch_actions
[KS_ACTIONS
])(void *) = {
2827 [KS_APPLY
] = __apply_patches
,
2828 [KS_REVERSE
] = __reverse_patches
,
2832 struct ksplice_mod_change
*change
;
2834 ret
= map_trampoline_pages(update
);
2838 list_for_each_entry(change
, &update
->changes
, list
) {
2839 const typeof(int (*)(void)) *f
;
2840 for (f
= change
->hooks
[action
].pre
;
2841 f
< change
->hooks
[action
].pre_end
; f
++) {
2849 for (i
= 0; i
< 5; i
++) {
2850 cleanup_conflicts(update
);
2851 #ifdef KSPLICE_STANDALONE
2853 #endif /* KSPLICE_STANDALONE */
2854 ret
= (__force abort_t
)stop_machine(__patch_actions
[action
],
2856 #ifdef KSPLICE_STANDALONE
2858 #endif /* KSPLICE_STANDALONE */
2859 if (ret
!= CODE_BUSY
)
2861 set_current_state(TASK_INTERRUPTIBLE
);
2862 schedule_timeout(msecs_to_jiffies(1000));
2865 unmap_trampoline_pages(update
);
2867 if (ret
== CODE_BUSY
) {
2868 print_conflicts(update
);
2869 _ksdebug(update
, "Aborted %s. stack check: to-be-%s "
2870 "code is busy.\n", update
->kid
,
2871 action
== KS_APPLY
? "replaced" : "reversed");
2872 } else if (ret
== ALREADY_REVERSED
) {
2873 _ksdebug(update
, "Aborted %s. Ksplice update %s is already "
2874 "reversed.\n", update
->kid
, update
->kid
);
2875 } else if (ret
== MODULE_BUSY
) {
2876 _ksdebug(update
, "Update %s is in use by another module\n",
2881 list_for_each_entry(change
, &update
->changes
, list
) {
2882 const typeof(void (*)(void)) *f
;
2883 for (f
= change
->hooks
[action
].fail
;
2884 f
< change
->hooks
[action
].fail_end
; f
++)
2891 list_for_each_entry(change
, &update
->changes
, list
) {
2892 const typeof(void (*)(void)) *f
;
2893 for (f
= change
->hooks
[action
].post
;
2894 f
< change
->hooks
[action
].post_end
; f
++)
2898 _ksdebug(update
, "Atomic patch %s for %s complete\n",
2899 action
== KS_APPLY
? "insertion" : "removal", update
->kid
);
2903 /* Atomically insert the update; run from within stop_machine */
2904 static int __apply_patches(void *updateptr
)
2906 struct update
*update
= updateptr
;
2907 struct ksplice_mod_change
*change
;
2908 struct ksplice_module_list_entry
*entry
;
2909 struct ksplice_patch
*p
;
2912 if (update
->stage
== STAGE_APPLIED
)
2913 return (__force
int)OK
;
2915 if (update
->stage
!= STAGE_PREPARING
)
2916 return (__force
int)UNEXPECTED
;
2918 ret
= check_each_task(update
);
2920 return (__force
int)ret
;
2922 list_for_each_entry(change
, &update
->changes
, list
) {
2923 if (try_module_get(change
->new_code_mod
) != 1) {
2924 struct ksplice_mod_change
*change1
;
2925 list_for_each_entry(change1
, &update
->changes
, list
) {
2926 if (change1
== change
)
2928 module_put(change1
->new_code_mod
);
2930 module_put(THIS_MODULE
);
2931 return (__force
int)UNEXPECTED
;
2935 list_for_each_entry(change
, &update
->changes
, list
) {
2936 const typeof(int (*)(void)) *f
;
2937 for (f
= change
->hooks
[KS_APPLY
].check
;
2938 f
< change
->hooks
[KS_APPLY
].check_end
; f
++) {
2940 return (__force
int)CALL_FAILED
;
2944 /* Commit point: the update application will succeed. */
2946 update
->stage
= STAGE_APPLIED
;
2947 #ifdef TAINT_KSPLICE
2948 add_taint(TAINT_KSPLICE
);
2951 list_for_each_entry(entry
, &update
->ksplice_module_list
, update_list
)
2952 list_add(&entry
->list
, &ksplice_modules
);
2954 list_for_each_entry(change
, &update
->changes
, list
) {
2955 for (p
= change
->patches
; p
< change
->patches_end
; p
++)
2956 insert_trampoline(p
);
2959 list_for_each_entry(change
, &update
->changes
, list
) {
2960 const typeof(void (*)(void)) *f
;
2961 for (f
= change
->hooks
[KS_APPLY
].intra
;
2962 f
< change
->hooks
[KS_APPLY
].intra_end
; f
++)
2966 return (__force
int)OK
;
2969 /* Atomically remove the update; run from within stop_machine */
2970 static int __reverse_patches(void *updateptr
)
2972 struct update
*update
= updateptr
;
2973 struct ksplice_mod_change
*change
;
2974 struct ksplice_module_list_entry
*entry
;
2975 const struct ksplice_patch
*p
;
2978 if (update
->stage
!= STAGE_APPLIED
)
2979 return (__force
int)OK
;
2981 #ifdef CONFIG_MODULE_UNLOAD
2982 list_for_each_entry(change
, &update
->changes
, list
) {
2983 if (module_refcount(change
->new_code_mod
) != 1)
2984 return (__force
int)MODULE_BUSY
;
2986 #endif /* CONFIG_MODULE_UNLOAD */
2988 list_for_each_entry(entry
, &update
->ksplice_module_list
, update_list
) {
2989 if (!entry
->applied
&&
2990 find_module(entry
->target_mod_name
) != NULL
)
2991 return COLD_UPDATE_LOADED
;
2994 ret
= check_each_task(update
);
2996 return (__force
int)ret
;
2998 list_for_each_entry(change
, &update
->changes
, list
) {
2999 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
3000 ret
= verify_trampoline(change
, p
);
3002 return (__force
int)ret
;
3006 list_for_each_entry(change
, &update
->changes
, list
) {
3007 const typeof(int (*)(void)) *f
;
3008 for (f
= change
->hooks
[KS_REVERSE
].check
;
3009 f
< change
->hooks
[KS_REVERSE
].check_end
; f
++) {
3011 return (__force
int)CALL_FAILED
;
3015 /* Commit point: the update reversal will succeed. */
3017 update
->stage
= STAGE_REVERSED
;
3019 list_for_each_entry(change
, &update
->changes
, list
)
3020 module_put(change
->new_code_mod
);
3022 list_for_each_entry(entry
, &update
->ksplice_module_list
, update_list
)
3023 list_del(&entry
->list
);
3025 list_for_each_entry(change
, &update
->changes
, list
) {
3026 const typeof(void (*)(void)) *f
;
3027 for (f
= change
->hooks
[KS_REVERSE
].intra
;
3028 f
< change
->hooks
[KS_REVERSE
].intra_end
; f
++)
3032 list_for_each_entry(change
, &update
->changes
, list
) {
3033 for (p
= change
->patches
; p
< change
->patches_end
; p
++)
3034 remove_trampoline(p
);
3037 return (__force
int)OK
;
3041 * Check whether any thread's instruction pointer or any address of
3042 * its stack is contained in one of the safety_records associated with
3045 * check_each_task must be called from inside stop_machine, because it
3046 * does not take tasklist_lock (which cannot be held by anyone else
3047 * during stop_machine).
3049 static abort_t
check_each_task(struct update
*update
)
3051 const struct task_struct
*g
, *p
;
3052 abort_t status
= OK
, ret
;
3053 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3054 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3055 read_lock(&tasklist_lock
);
3056 #endif /* LINUX_VERSION_CODE */
3057 do_each_thread(g
, p
) {
3058 /* do_each_thread is a double loop! */
3059 ret
= check_task(update
, p
, false);
3061 check_task(update
, p
, true);
3064 if (ret
!= OK
&& ret
!= CODE_BUSY
)
3065 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3066 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3068 #else /* LINUX_VERSION_CODE < */
3070 #endif /* LINUX_VERSION_CODE */
3071 } while_each_thread(g
, p
);
3072 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3073 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3075 read_unlock(&tasklist_lock
);
3076 #endif /* LINUX_VERSION_CODE */
3080 static abort_t
check_task(struct update
*update
,
3081 const struct task_struct
*t
, bool rerun
)
3083 abort_t status
, ret
;
3084 struct conflict
*conf
= NULL
;
3087 conf
= kmalloc(sizeof(*conf
), GFP_ATOMIC
);
3089 return OUT_OF_MEMORY
;
3090 conf
->process_name
= kstrdup(t
->comm
, GFP_ATOMIC
);
3091 if (conf
->process_name
== NULL
) {
3093 return OUT_OF_MEMORY
;
3096 INIT_LIST_HEAD(&conf
->stack
);
3097 list_add(&conf
->list
, &update
->conflicts
);
3100 status
= check_address(update
, conf
, KSPLICE_IP(t
));
3102 ret
= check_stack(update
, conf
, task_thread_info(t
),
3103 (unsigned long *)__builtin_frame_address(0));
3106 } else if (!task_curr(t
)) {
3107 ret
= check_stack(update
, conf
, task_thread_info(t
),
3108 (unsigned long *)KSPLICE_SP(t
));
3111 } else if (!is_stop_machine(t
)) {
3112 status
= UNEXPECTED_RUNNING_TASK
;
3117 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
3118 const struct thread_info
*tinfo
,
3119 const unsigned long *stack
)
3121 abort_t status
= OK
, ret
;
3124 while (valid_stack_ptr(tinfo
, stack
)) {
3126 ret
= check_address(update
, conf
, addr
);
3133 static abort_t
check_address(struct update
*update
,
3134 struct conflict
*conf
, unsigned long addr
)
3136 abort_t status
= OK
, ret
;
3137 const struct safety_record
*rec
;
3138 struct ksplice_mod_change
*change
;
3139 struct conflict_addr
*ca
= NULL
;
3142 ca
= kmalloc(sizeof(*ca
), GFP_ATOMIC
);
3144 return OUT_OF_MEMORY
;
3146 ca
->has_conflict
= false;
3148 list_add(&ca
->list
, &conf
->stack
);
3151 list_for_each_entry(change
, &update
->changes
, list
) {
3152 unsigned long tramp_addr
= follow_trampolines(change
, addr
);
3153 list_for_each_entry(rec
, &change
->safety_records
, list
) {
3154 ret
= check_record(ca
, rec
, tramp_addr
);
3162 static abort_t
check_record(struct conflict_addr
*ca
,
3163 const struct safety_record
*rec
, unsigned long addr
)
3165 if (addr
>= rec
->addr
&& addr
< rec
->addr
+ rec
->size
) {
3167 ca
->label
= rec
->label
;
3168 ca
->has_conflict
= true;
3175 /* Is the task one of the stop_machine tasks? */
3176 static bool is_stop_machine(const struct task_struct
*t
)
3178 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3179 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3180 const char *kstop_prefix
= "kstop/";
3181 #else /* LINUX_VERSION_CODE < */
3182 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3183 const char *kstop_prefix
= "kstop";
3184 #endif /* LINUX_VERSION_CODE */
3186 if (!strstarts(t
->comm
, kstop_prefix
))
3188 num
= t
->comm
+ strlen(kstop_prefix
);
3189 return num
[strspn(num
, "0123456789")] == '\0';
3190 #else /* LINUX_VERSION_CODE < */
3191 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3192 return strcmp(t
->comm
, "kstopmachine") == 0;
3193 #endif /* LINUX_VERSION_CODE */
3196 static void cleanup_conflicts(struct update
*update
)
3198 struct conflict
*conf
;
3199 list_for_each_entry(conf
, &update
->conflicts
, list
) {
3200 clear_list(&conf
->stack
, struct conflict_addr
, list
);
3201 kfree(conf
->process_name
);
3203 clear_list(&update
->conflicts
, struct conflict
, list
);
3206 static void print_conflicts(struct update
*update
)
3208 const struct conflict
*conf
;
3209 const struct conflict_addr
*ca
;
3210 list_for_each_entry(conf
, &update
->conflicts
, list
) {
3211 _ksdebug(update
, "stack check: pid %d (%s):", conf
->pid
,
3212 conf
->process_name
);
3213 list_for_each_entry(ca
, &conf
->stack
, list
) {
3214 _ksdebug(update
, " %lx", ca
->addr
);
3215 if (ca
->has_conflict
)
3216 _ksdebug(update
, " [<-CONFLICT]");
3218 _ksdebug(update
, "\n");
3222 static void insert_trampoline(struct ksplice_patch
*p
)
3224 mm_segment_t old_fs
= get_fs();
3226 memcpy(p
->saved
, p
->vaddr
, p
->size
);
3227 memcpy(p
->vaddr
, p
->contents
, p
->size
);
3228 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
3232 static abort_t
verify_trampoline(struct ksplice_mod_change
*change
,
3233 const struct ksplice_patch
*p
)
3235 if (memcmp(p
->vaddr
, p
->contents
, p
->size
) != 0) {
3236 ksdebug(change
, "Aborted. Trampoline at %lx has been "
3237 "overwritten.\n", p
->oldaddr
);
3243 static void remove_trampoline(const struct ksplice_patch
*p
)
3245 mm_segment_t old_fs
= get_fs();
3247 memcpy(p
->vaddr
, p
->saved
, p
->size
);
3248 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
3252 /* Returns NO_MATCH if there's already a labelval with a different value */
3253 static abort_t
create_labelval(struct ksplice_mod_change
*change
,
3254 struct ksplice_symbol
*ksym
,
3255 unsigned long val
, int status
)
3257 val
= follow_trampolines(change
, val
);
3258 if (ksym
->candidate_vals
== NULL
)
3259 return ksym
->value
== val
? OK
: NO_MATCH
;
3262 if (status
== TEMP
) {
3263 struct labelval
*lv
= kmalloc(sizeof(*lv
), GFP_KERNEL
);
3265 return OUT_OF_MEMORY
;
3267 lv
->saved_vals
= ksym
->candidate_vals
;
3268 list_add(&lv
->list
, &change
->temp_labelvals
);
3270 ksym
->candidate_vals
= NULL
;
3275 * Creates a new safety_record for a old_code section based on its
3276 * ksplice_section and run-pre matching information.
3278 static abort_t
create_safety_record(struct ksplice_mod_change
*change
,
3279 const struct ksplice_section
*sect
,
3280 struct list_head
*record_list
,
3281 unsigned long run_addr
,
3282 unsigned long run_size
)
3284 struct safety_record
*rec
;
3285 struct ksplice_patch
*p
;
3287 if (record_list
== NULL
)
3290 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
3291 const struct ksplice_reloc
*r
= patch_reloc(change
, p
);
3292 if (strcmp(sect
->symbol
->label
, r
->symbol
->label
) == 0)
3295 if (p
>= change
->patches_end
)
3298 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
3300 return OUT_OF_MEMORY
;
3302 * The old_code might be unloaded when checking reversing
3303 * patches, so we need to kstrdup the label here.
3305 rec
->label
= kstrdup(sect
->symbol
->label
, GFP_KERNEL
);
3306 if (rec
->label
== NULL
) {
3308 return OUT_OF_MEMORY
;
3310 rec
->addr
= run_addr
;
3311 rec
->size
= run_size
;
3313 list_add(&rec
->list
, record_list
);
3317 static abort_t
add_candidate_val(struct ksplice_mod_change
*change
,
3318 struct list_head
*vals
, unsigned long val
)
3320 struct candidate_val
*tmp
, *new;
3323 * Careful: follow trampolines before comparing values so that we do
3324 * not mistake the obsolete function for another copy of the function.
3326 val
= follow_trampolines(change
, val
);
3328 list_for_each_entry(tmp
, vals
, list
) {
3329 if (tmp
->val
== val
)
3332 new = kmalloc(sizeof(*new), GFP_KERNEL
);
3334 return OUT_OF_MEMORY
;
3336 list_add(&new->list
, vals
);
3340 static void release_vals(struct list_head
*vals
)
3342 clear_list(vals
, struct candidate_val
, list
);
3346 * The temp_labelvals list is used to cache those temporary labelvals
3347 * that have been created to cross-check the symbol values obtained
3348 * from different relocations within a single section being matched.
3350 * If status is VAL, commit the temp_labelvals as final values.
3352 * If status is NOVAL, restore the list of possible values to the
3353 * ksplice_symbol, so that it no longer has a known value.
3355 static void set_temp_labelvals(struct ksplice_mod_change
*change
, int status
)
3357 struct labelval
*lv
, *n
;
3358 list_for_each_entry_safe(lv
, n
, &change
->temp_labelvals
, list
) {
3359 if (status
== NOVAL
) {
3360 lv
->symbol
->candidate_vals
= lv
->saved_vals
;
3362 release_vals(lv
->saved_vals
);
3363 kfree(lv
->saved_vals
);
3365 list_del(&lv
->list
);
3370 /* Is there a Ksplice canary with given howto at blank_addr? */
3371 static int contains_canary(struct ksplice_mod_change
*change
,
3372 unsigned long blank_addr
,
3373 const struct ksplice_reloc_howto
*howto
)
3375 switch (howto
->size
) {
3377 return (*(uint8_t *)blank_addr
& howto
->dst_mask
) ==
3378 (KSPLICE_CANARY
& howto
->dst_mask
);
3380 return (*(uint16_t *)blank_addr
& howto
->dst_mask
) ==
3381 (KSPLICE_CANARY
& howto
->dst_mask
);
3383 return (*(uint32_t *)blank_addr
& howto
->dst_mask
) ==
3384 (KSPLICE_CANARY
& howto
->dst_mask
);
3385 #if BITS_PER_LONG >= 64
3387 return (*(uint64_t *)blank_addr
& howto
->dst_mask
) ==
3388 (KSPLICE_CANARY
& howto
->dst_mask
);
3389 #endif /* BITS_PER_LONG */
3391 ksdebug(change
, "Aborted. Invalid relocation size.\n");
3397 * Compute the address of the code you would actually run if you were
3398 * to call the function at addr (i.e., follow the sequence of jumps
3401 static unsigned long follow_trampolines(struct ksplice_mod_change
*change
,
3404 unsigned long new_addr
;
3408 #ifdef KSPLICE_STANDALONE
3411 #endif /* KSPLICE_STANDALONE */
3412 if (!__kernel_text_address(addr
) ||
3413 trampoline_target(change
, addr
, &new_addr
) != OK
)
3415 m
= __module_text_address(new_addr
);
3416 if (m
== NULL
|| m
== change
->target
||
3417 !strstarts(m
->name
, "ksplice"))
3423 /* Does module a patch module b? */
3424 static bool patches_module(const struct module
*a
, const struct module
*b
)
3426 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3428 const char *modname
= b
== NULL
? "vmlinux" : b
->name
;
3431 if (a
== NULL
|| !strstarts(a
->name
, "ksplice_"))
3433 name
= a
->name
+ strlen("ksplice_");
3434 name
+= strcspn(name
, "_");
3438 return strstarts(name
, modname
) &&
3439 strcmp(name
+ strlen(modname
), "_new") == 0;
3440 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3441 struct ksplice_module_list_entry
*entry
;
3444 list_for_each_entry(entry
, &ksplice_modules
, list
) {
3445 if (strcmp(entry
->target_mod_name
, b
->name
) == 0 &&
3446 strcmp(entry
->new_code_mod_name
, a
->name
) == 0)
3450 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3453 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3454 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
3455 static bool strstarts(const char *str
, const char *prefix
)
3457 return strncmp(str
, prefix
, strlen(prefix
)) == 0;
3459 #endif /* LINUX_VERSION_CODE */
3461 static bool singular(struct list_head
*list
)
3463 return !list_empty(list
) && list
->next
->next
== list
;
3466 static void *bsearch(const void *key
, const void *base
, size_t n
,
3467 size_t size
, int (*cmp
)(const void *key
, const void *elt
))
3469 int start
= 0, end
= n
- 1, mid
, result
;
3472 while (start
<= end
) {
3473 mid
= (start
+ end
) / 2;
3474 result
= cmp(key
, base
+ mid
* size
);
3477 else if (result
> 0)
3480 return (void *)base
+ mid
* size
;
3485 static int compare_relocs(const void *a
, const void *b
)
3487 const struct ksplice_reloc
*ra
= a
, *rb
= b
;
3488 if (ra
->blank_addr
> rb
->blank_addr
)
3490 else if (ra
->blank_addr
< rb
->blank_addr
)
3493 return ra
->howto
->size
- rb
->howto
->size
;
3496 #ifdef KSPLICE_STANDALONE
3497 static int compare_system_map(const void *a
, const void *b
)
3499 const struct ksplice_system_map
*sa
= a
, *sb
= b
;
3500 return strcmp(sa
->label
, sb
->label
);
3502 #endif /* KSPLICE_STANDALONE */
3504 #ifdef CONFIG_DEBUG_FS
3505 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3506 /* Old kernels don't have debugfs_create_blob */
3507 static ssize_t
read_file_blob(struct file
*file
, char __user
*user_buf
,
3508 size_t count
, loff_t
*ppos
)
3510 struct debugfs_blob_wrapper
*blob
= file
->private_data
;
3511 return simple_read_from_buffer(user_buf
, count
, ppos
, blob
->data
,
3515 static int blob_open(struct inode
*inode
, struct file
*file
)
3517 if (inode
->i_private
)
3518 file
->private_data
= inode
->i_private
;
3522 static struct file_operations fops_blob
= {
3523 .read
= read_file_blob
,
3527 static struct dentry
*debugfs_create_blob(const char *name
, mode_t mode
,
3528 struct dentry
*parent
,
3529 struct debugfs_blob_wrapper
*blob
)
3531 return debugfs_create_file(name
, mode
, parent
, blob
, &fops_blob
);
3533 #endif /* LINUX_VERSION_CODE */
3535 static abort_t
init_debug_buf(struct update
*update
)
3537 update
->debug_blob
.size
= 0;
3538 update
->debug_blob
.data
= NULL
;
3539 update
->debugfs_dentry
=
3540 debugfs_create_blob(update
->name
, S_IFREG
| S_IRUSR
, NULL
,
3541 &update
->debug_blob
);
3542 if (update
->debugfs_dentry
== NULL
)
3543 return OUT_OF_MEMORY
;
3547 static void clear_debug_buf(struct update
*update
)
3549 if (update
->debugfs_dentry
== NULL
)
3551 debugfs_remove(update
->debugfs_dentry
);
3552 update
->debugfs_dentry
= NULL
;
3553 update
->debug_blob
.size
= 0;
3554 vfree(update
->debug_blob
.data
);
3555 update
->debug_blob
.data
= NULL
;
3558 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
3561 unsigned long size
, old_size
, new_size
;
3563 if (update
->debug
== 0)
3566 /* size includes the trailing '\0' */
3567 va_start(args
, fmt
);
3568 size
= 1 + vsnprintf(update
->debug_blob
.data
, 0, fmt
, args
);
3570 old_size
= update
->debug_blob
.size
== 0 ? 0 :
3571 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
));
3572 new_size
= update
->debug_blob
.size
+ size
== 0 ? 0 :
3573 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
+ size
));
3574 if (new_size
> old_size
) {
3575 char *buf
= vmalloc(new_size
);
3578 memcpy(buf
, update
->debug_blob
.data
, update
->debug_blob
.size
);
3579 vfree(update
->debug_blob
.data
);
3580 update
->debug_blob
.data
= buf
;
3582 va_start(args
, fmt
);
3583 update
->debug_blob
.size
+= vsnprintf(update
->debug_blob
.data
+
3584 update
->debug_blob
.size
,
3589 #else /* CONFIG_DEBUG_FS */
3590 static abort_t
init_debug_buf(struct update
*update
)
3595 static void clear_debug_buf(struct update
*update
)
3600 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
3604 if (update
->debug
== 0)
3607 if (!update
->debug_continue_line
)
3608 printk(KERN_DEBUG
"ksplice: ");
3610 va_start(args
, fmt
);
3611 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3613 #else /* LINUX_VERSION_CODE < */
3614 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3616 char *buf
= kvasprintf(GFP_KERNEL
, fmt
, args
);
3620 #endif /* LINUX_VERSION_CODE */
3623 update
->debug_continue_line
=
3624 fmt
[0] == '\0' || fmt
[strlen(fmt
) - 1] != '\n';
3627 #endif /* CONFIG_DEBUG_FS */
3629 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
3630 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
3631 extern unsigned long kallsyms_addresses
[];
3632 extern unsigned long kallsyms_num_syms
;
3633 extern u8 kallsyms_names
[];
3635 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3636 struct module
*, unsigned long),
3639 char namebuf
[KSYM_NAME_LEN
];
3641 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3643 #endif /* LINUX_VERSION_CODE */
3646 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3647 for (i
= 0, off
= 0; i
< kallsyms_num_syms
; i
++) {
3648 off
= kallsyms_expand_symbol(off
, namebuf
);
3649 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
3653 #else /* LINUX_VERSION_CODE < */
3654 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3657 for (i
= 0, knames
= kallsyms_names
; i
< kallsyms_num_syms
; i
++) {
3658 unsigned prefix
= *knames
++;
3660 strlcpy(namebuf
+ prefix
, knames
, KSYM_NAME_LEN
- prefix
);
3662 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
3666 knames
+= strlen(knames
) + 1;
3668 #endif /* LINUX_VERSION_CODE */
3669 return module_kallsyms_on_each_symbol(fn
, data
);
3672 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3673 extern u8 kallsyms_token_table
[];
3674 extern u16 kallsyms_token_index
[];
3676 static unsigned int kallsyms_expand_symbol(unsigned int off
, char *result
)
3678 long len
, skipped_first
= 0;
3679 const u8
*tptr
, *data
;
3681 data
= &kallsyms_names
[off
];
3688 tptr
= &kallsyms_token_table
[kallsyms_token_index
[*data
]];
3693 if (skipped_first
) {
3706 #else /* LINUX_VERSION_CODE < */
3707 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3708 #endif /* LINUX_VERSION_CODE */
3710 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3719 list_for_each_entry(mod
, &modules
, list
) {
3720 for (i
= 0; i
< mod
->num_symtab
; i
++) {
3721 ret
= fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
3722 mod
, mod
->symtab
[i
].st_value
);
3729 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
3731 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3732 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
3733 static struct module
*find_module(const char *name
)
3737 list_for_each_entry(mod
, &modules
, list
) {
3738 if (strcmp(mod
->name
, name
) == 0)
3744 #ifdef CONFIG_MODULE_UNLOAD
3746 struct list_head list
;
3747 struct module
*module_which_uses
;
3750 /* I'm not yet certain whether we need the strong form of this. */
3751 static inline int strong_try_module_get(struct module
*mod
)
3753 if (mod
&& mod
->state
!= MODULE_STATE_LIVE
)
3755 if (try_module_get(mod
))
3760 /* Does a already use b? */
3761 static int already_uses(struct module
*a
, struct module
*b
)
3763 struct module_use
*use
;
3764 list_for_each_entry(use
, &b
->modules_which_use_me
, list
) {
3765 if (use
->module_which_uses
== a
)
3771 /* Make it so module a uses b. Must be holding module_mutex */
3772 static int use_module(struct module
*a
, struct module
*b
)
3774 struct module_use
*use
;
3775 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3776 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3778 #endif /* LINUX_VERSION_CODE */
3779 if (b
== NULL
|| already_uses(a
, b
))
3782 if (strong_try_module_get(b
) < 0)
3785 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
3790 use
->module_which_uses
= a
;
3791 list_add(&use
->list
, &b
->modules_which_use_me
);
3792 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3793 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3794 no_warn
= sysfs_create_link(b
->holders_dir
, &a
->mkobj
.kobj
, a
->name
);
3795 #endif /* LINUX_VERSION_CODE */
3798 #else /* CONFIG_MODULE_UNLOAD */
3799 static int use_module(struct module
*a
, struct module
*b
)
3803 #endif /* CONFIG_MODULE_UNLOAD */
3805 #ifndef CONFIG_MODVERSIONS
3806 #define symversion(base, idx) NULL
3808 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3811 static bool each_symbol_in_section(const struct symsearch
*arr
,
3812 unsigned int arrsize
,
3813 struct module
*owner
,
3814 bool (*fn
)(const struct symsearch
*syms
,
3815 struct module
*owner
,
3816 unsigned int symnum
, void *data
),
3821 for (j
= 0; j
< arrsize
; j
++) {
3822 for (i
= 0; i
< arr
[j
].stop
- arr
[j
].start
; i
++)
3823 if (fn(&arr
[j
], owner
, i
, data
))
3830 /* Returns true as soon as fn returns true, otherwise false. */
3831 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
3832 struct module
*owner
,
3833 unsigned int symnum
, void *data
),
3837 const struct symsearch arr
[] = {
3838 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
3839 NOT_GPL_ONLY
, false },
3840 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
3841 __start___kcrctab_gpl
,
3843 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3844 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
3845 __start___kcrctab_gpl_future
,
3846 WILL_BE_GPL_ONLY
, false },
3847 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3848 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3849 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
3850 __start___kcrctab_unused
,
3851 NOT_GPL_ONLY
, true },
3852 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
3853 __start___kcrctab_unused_gpl
,
3855 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3858 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
3861 list_for_each_entry(mod
, &modules
, list
) {
3862 struct symsearch module_arr
[] = {
3863 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
3864 NOT_GPL_ONLY
, false },
3865 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
3868 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3869 { mod
->gpl_future_syms
,
3870 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
3871 mod
->gpl_future_crcs
,
3872 WILL_BE_GPL_ONLY
, false },
3873 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3874 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3876 mod
->unused_syms
+ mod
->num_unused_syms
,
3878 NOT_GPL_ONLY
, true },
3879 { mod
->unused_gpl_syms
,
3880 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
3881 mod
->unused_gpl_crcs
,
3883 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3886 if (each_symbol_in_section(module_arr
, ARRAY_SIZE(module_arr
),
3893 struct find_symbol_arg
{
3900 struct module
*owner
;
3901 const unsigned long *crc
;
3902 const struct kernel_symbol
*sym
;
3905 static bool find_symbol_in_section(const struct symsearch
*syms
,
3906 struct module
*owner
,
3907 unsigned int symnum
, void *data
)
3909 struct find_symbol_arg
*fsa
= data
;
3911 if (strcmp(syms
->start
[symnum
].name
, fsa
->name
) != 0)
3915 if (syms
->licence
== GPL_ONLY
)
3917 if (syms
->licence
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
3918 printk(KERN_WARNING
"Symbol %s is being used "
3919 "by a non-GPL module, which will not "
3920 "be allowed in the future\n", fsa
->name
);
3921 printk(KERN_WARNING
"Please see the file "
3922 "Documentation/feature-removal-schedule.txt "
3923 "in the kernel source tree for more details.\n");
3927 #ifdef CONFIG_UNUSED_SYMBOLS
3928 if (syms
->unused
&& fsa
->warn
) {
3929 printk(KERN_WARNING
"Symbol %s is marked as UNUSED, "
3930 "however this module is using it.\n", fsa
->name
);
3932 "This symbol will go away in the future.\n");
3934 "Please evalute if this is the right api to use and if "
3935 "it really is, submit a report the linux kernel "
3936 "mailinglist together with submitting your code for "
3942 fsa
->crc
= symversion(syms
->crcs
, symnum
);
3943 fsa
->sym
= &syms
->start
[symnum
];
3947 /* Find a symbol and return it, along with, (optional) crc and
3948 * (optional) module which owns it */
3949 static const struct kernel_symbol
*find_symbol(const char *name
,
3950 struct module
**owner
,
3951 const unsigned long **crc
,
3952 bool gplok
, bool warn
)
3954 struct find_symbol_arg fsa
;
3960 if (each_symbol(find_symbol_in_section
, &fsa
)) {
3971 static inline int within_module_core(unsigned long addr
, struct module
*mod
)
3973 return (unsigned long)mod
->module_core
<= addr
&&
3974 addr
< (unsigned long)mod
->module_core
+ mod
->core_size
;
3977 static inline int within_module_init(unsigned long addr
, struct module
*mod
)
3979 return (unsigned long)mod
->module_init
<= addr
&&
3980 addr
< (unsigned long)mod
->module_init
+ mod
->init_size
;
3983 static struct module
*__module_address(unsigned long addr
)
3987 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3988 list_for_each_entry_rcu(mod
, &modules
, list
)
3990 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
3991 list_for_each_entry(mod
, &modules
, list
)
3993 if (within_module_core(addr
, mod
) ||
3994 within_module_init(addr
, mod
))
3998 #endif /* LINUX_VERSION_CODE */
4000 struct update_attribute
{
4001 struct attribute attr
;
4002 ssize_t (*show
)(struct update
*update
, char *buf
);
4003 ssize_t (*store
)(struct update
*update
, const char *buf
, size_t len
);
4006 static ssize_t
update_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
4009 struct update_attribute
*attribute
=
4010 container_of(attr
, struct update_attribute
, attr
);
4011 struct update
*update
= container_of(kobj
, struct update
, kobj
);
4012 if (attribute
->show
== NULL
)
4014 return attribute
->show(update
, buf
);
4017 static ssize_t
update_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
4018 const char *buf
, size_t len
)
4020 struct update_attribute
*attribute
=
4021 container_of(attr
, struct update_attribute
, attr
);
4022 struct update
*update
= container_of(kobj
, struct update
, kobj
);
4023 if (attribute
->store
== NULL
)
4025 return attribute
->store(update
, buf
, len
);
4028 static struct sysfs_ops update_sysfs_ops
= {
4029 .show
= update_attr_show
,
4030 .store
= update_attr_store
,
4033 static void update_release(struct kobject
*kobj
)
4035 struct update
*update
;
4036 update
= container_of(kobj
, struct update
, kobj
);
4037 cleanup_ksplice_update(update
);
4040 static ssize_t
stage_show(struct update
*update
, char *buf
)
4042 switch (update
->stage
) {
4043 case STAGE_PREPARING
:
4044 return snprintf(buf
, PAGE_SIZE
, "preparing\n");
4046 return snprintf(buf
, PAGE_SIZE
, "applied\n");
4047 case STAGE_REVERSED
:
4048 return snprintf(buf
, PAGE_SIZE
, "reversed\n");
4053 static ssize_t
abort_cause_show(struct update
*update
, char *buf
)
4055 switch (update
->abort_cause
) {
4057 return snprintf(buf
, PAGE_SIZE
, "ok\n");
4059 return snprintf(buf
, PAGE_SIZE
, "no_match\n");
4060 #ifdef KSPLICE_STANDALONE
4061 case BAD_SYSTEM_MAP
:
4062 return snprintf(buf
, PAGE_SIZE
, "bad_system_map\n");
4063 #endif /* KSPLICE_STANDALONE */
4065 return snprintf(buf
, PAGE_SIZE
, "code_busy\n");
4067 return snprintf(buf
, PAGE_SIZE
, "module_busy\n");
4069 return snprintf(buf
, PAGE_SIZE
, "out_of_memory\n");
4070 case FAILED_TO_FIND
:
4071 return snprintf(buf
, PAGE_SIZE
, "failed_to_find\n");
4072 case ALREADY_REVERSED
:
4073 return snprintf(buf
, PAGE_SIZE
, "already_reversed\n");
4074 case MISSING_EXPORT
:
4075 return snprintf(buf
, PAGE_SIZE
, "missing_export\n");
4076 case UNEXPECTED_RUNNING_TASK
:
4077 return snprintf(buf
, PAGE_SIZE
, "unexpected_running_task\n");
4078 case TARGET_NOT_LOADED
:
4079 return snprintf(buf
, PAGE_SIZE
, "target_not_loaded\n");
4081 return snprintf(buf
, PAGE_SIZE
, "call_failed\n");
4082 case COLD_UPDATE_LOADED
:
4083 return snprintf(buf
, PAGE_SIZE
, "cold_update_loaded\n");
4085 return snprintf(buf
, PAGE_SIZE
, "unexpected\n");
4087 return snprintf(buf
, PAGE_SIZE
, "unknown\n");
4092 static ssize_t
conflict_show(struct update
*update
, char *buf
)
4094 const struct conflict
*conf
;
4095 const struct conflict_addr
*ca
;
4097 mutex_lock(&module_mutex
);
4098 list_for_each_entry(conf
, &update
->conflicts
, list
) {
4099 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "%s %d",
4100 conf
->process_name
, conf
->pid
);
4101 list_for_each_entry(ca
, &conf
->stack
, list
) {
4102 if (!ca
->has_conflict
)
4104 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, " %s",
4107 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "\n");
4109 mutex_unlock(&module_mutex
);
4113 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4114 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr
)
4116 struct update
*update
= updateptr
;
4117 mutex_lock(&module_mutex
);
4118 maybe_cleanup_ksplice_update(update
);
4119 mutex_unlock(&module_mutex
);
4123 static ssize_t
stage_store(struct update
*update
, const char *buf
, size_t len
)
4125 enum stage old_stage
;
4126 mutex_lock(&module_mutex
);
4127 old_stage
= update
->stage
;
4128 if ((strncmp(buf
, "applied", len
) == 0 ||
4129 strncmp(buf
, "applied\n", len
) == 0) &&
4130 update
->stage
== STAGE_PREPARING
)
4131 update
->abort_cause
= apply_update(update
);
4132 else if ((strncmp(buf
, "reversed", len
) == 0 ||
4133 strncmp(buf
, "reversed\n", len
) == 0) &&
4134 update
->stage
== STAGE_APPLIED
)
4135 update
->abort_cause
= reverse_update(update
);
4136 else if ((strncmp(buf
, "cleanup", len
) == 0 ||
4137 strncmp(buf
, "cleanup\n", len
) == 0) &&
4138 update
->stage
== STAGE_REVERSED
)
4139 kthread_run(maybe_cleanup_ksplice_update_wrapper
, update
,
4140 "ksplice_cleanup_%s", update
->kid
);
4142 mutex_unlock(&module_mutex
);
4146 static ssize_t
debug_show(struct update
*update
, char *buf
)
4148 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->debug
);
4151 static ssize_t
debug_store(struct update
*update
, const char *buf
, size_t len
)
4154 int ret
= strict_strtoul(buf
, 10, &l
);
4161 static ssize_t
partial_show(struct update
*update
, char *buf
)
4163 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->partial
);
4166 static ssize_t
partial_store(struct update
*update
, const char *buf
, size_t len
)
4169 int ret
= strict_strtoul(buf
, 10, &l
);
4172 update
->partial
= l
;
4176 static struct update_attribute stage_attribute
=
4177 __ATTR(stage
, 0600, stage_show
, stage_store
);
4178 static struct update_attribute abort_cause_attribute
=
4179 __ATTR(abort_cause
, 0400, abort_cause_show
, NULL
);
4180 static struct update_attribute debug_attribute
=
4181 __ATTR(debug
, 0600, debug_show
, debug_store
);
4182 static struct update_attribute partial_attribute
=
4183 __ATTR(partial
, 0600, partial_show
, partial_store
);
4184 static struct update_attribute conflict_attribute
=
4185 __ATTR(conflicts
, 0400, conflict_show
, NULL
);
4187 static struct attribute
*update_attrs
[] = {
4188 &stage_attribute
.attr
,
4189 &abort_cause_attribute
.attr
,
4190 &debug_attribute
.attr
,
4191 &partial_attribute
.attr
,
4192 &conflict_attribute
.attr
,
4196 static struct kobj_type update_ktype
= {
4197 .sysfs_ops
= &update_sysfs_ops
,
4198 .release
= update_release
,
4199 .default_attrs
= update_attrs
,
4202 #ifdef KSPLICE_STANDALONE
4204 module_param(debug
, int, 0600);
4205 MODULE_PARM_DESC(debug
, "Debug level");
4207 extern struct ksplice_system_map ksplice_system_map
[], ksplice_system_map_end
[];
4209 static struct ksplice_mod_change bootstrap_mod_change
= {
4210 .name
= "ksplice_" __stringify(KSPLICE_KID
),
4211 .kid
= "init_" __stringify(KSPLICE_KID
),
4212 .target_name
= NULL
,
4214 .map_printk
= MAP_PRINTK
,
4215 .new_code_mod
= THIS_MODULE
,
4216 .new_code
.system_map
= ksplice_system_map
,
4217 .new_code
.system_map_end
= ksplice_system_map_end
,
4219 #endif /* KSPLICE_STANDALONE */
4221 static int init_ksplice(void)
4223 #ifdef KSPLICE_STANDALONE
4224 struct ksplice_mod_change
*change
= &bootstrap_mod_change
;
4225 change
->update
= init_ksplice_update(change
->kid
);
4226 sort(change
->new_code
.system_map
,
4227 change
->new_code
.system_map_end
- change
->new_code
.system_map
,
4228 sizeof(struct ksplice_system_map
), compare_system_map
, NULL
);
4229 if (change
->update
== NULL
)
4231 add_to_update(change
, change
->update
);
4232 change
->update
->debug
= debug
;
4233 change
->update
->abort_cause
=
4234 apply_relocs(change
, ksplice_init_relocs
, ksplice_init_relocs_end
);
4235 if (change
->update
->abort_cause
== OK
)
4236 bootstrapped
= true;
4237 cleanup_ksplice_update(bootstrap_mod_change
.update
);
4238 #else /* !KSPLICE_STANDALONE */
4239 ksplice_kobj
= kobject_create_and_add("ksplice", kernel_kobj
);
4240 if (ksplice_kobj
== NULL
)
4242 #endif /* KSPLICE_STANDALONE */
4246 static void cleanup_ksplice(void)
4248 #ifndef KSPLICE_STANDALONE
4249 kobject_put(ksplice_kobj
);
4250 #endif /* KSPLICE_STANDALONE */
4253 module_init(init_ksplice
);
4254 module_exit(cleanup_ksplice
);
4256 MODULE_AUTHOR("Ksplice, Inc.");
4257 MODULE_DESCRIPTION("Ksplice rebootless update system");
4258 #ifdef KSPLICE_VERSION
4259 MODULE_VERSION(KSPLICE_VERSION
);
4261 MODULE_LICENSE("GPL v2");