1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #ifdef KSPLICE_STANDALONE
62 #if !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
67 #define __used __attribute_used__
70 #define EXTRACT_SYMBOL(sym) \
71 static const typeof(&sym) PASTE(__ksplice_extract_, __LINE__) \
72 __used __attribute__((section(".ksplice_extract"))) = &sym
73 #endif /* KSPLICE_STANDALONE */
76 STAGE_PREPARING
, /* the update is not yet applied */
77 STAGE_APPLIED
, /* the update is applied */
78 STAGE_REVERSED
, /* the update has been applied and reversed */
81 /* parameter to modify run-pre matching */
83 RUN_PRE_INITIAL
, /* dry run (only change temp_labelvals) */
84 RUN_PRE_DEBUG
, /* dry run with byte-by-byte debugging */
85 RUN_PRE_FINAL
, /* finalizes the matching */
86 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
88 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
91 enum { NOVAL
, TEMP
, VAL
};
93 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
94 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
96 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
97 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
98 #define __bitwise__ __bitwise
101 typedef int __bitwise__ abort_t
;
103 #define OK ((__force abort_t) 0)
104 #define NO_MATCH ((__force abort_t) 1)
105 #define CODE_BUSY ((__force abort_t) 2)
106 #define MODULE_BUSY ((__force abort_t) 3)
107 #define OUT_OF_MEMORY ((__force abort_t) 4)
108 #define FAILED_TO_FIND ((__force abort_t) 5)
109 #define ALREADY_REVERSED ((__force abort_t) 6)
110 #define MISSING_EXPORT ((__force abort_t) 7)
111 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
112 #define UNEXPECTED ((__force abort_t) 9)
113 #define TARGET_NOT_LOADED ((__force abort_t) 10)
114 #define CALL_FAILED ((__force abort_t) 11)
115 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
116 #ifdef KSPLICE_STANDALONE
117 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
118 #endif /* KSPLICE_STANDALONE */
127 #ifdef CONFIG_DEBUG_FS
128 struct debugfs_blob_wrapper debug_blob
;
129 struct dentry
*debugfs_dentry
;
130 #else /* !CONFIG_DEBUG_FS */
131 bool debug_continue_line
;
132 #endif /* CONFIG_DEBUG_FS */
133 bool partial
; /* is it OK if some target mods aren't loaded */
134 struct list_head changes
, /* changes for loaded target mods */
135 unused_changes
; /* changes for non-loaded target mods */
136 struct list_head conflicts
;
137 struct list_head list
;
138 struct list_head ksplice_module_list
;
141 /* a process conflicting with an update */
143 const char *process_name
;
145 struct list_head stack
;
146 struct list_head list
;
149 /* an address on the stack of a conflict */
150 struct conflict_addr
{
151 unsigned long addr
; /* the address on the stack */
152 bool has_conflict
; /* does this address in particular conflict? */
153 const char *label
; /* the label of the conflicting safety_record */
154 struct list_head list
;
157 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
158 /* Old kernels don't have debugfs_create_blob */
159 struct debugfs_blob_wrapper
{
163 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
165 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
166 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
167 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
171 struct list_head list
;
172 struct ksplice_symbol
*symbol
;
173 struct list_head
*saved_vals
;
176 /* region to be checked for conflicts in the stack check */
177 struct safety_record
{
178 struct list_head list
;
180 unsigned long addr
; /* the address to be checked for conflicts
181 * (e.g. an obsolete function's starting addr)
183 unsigned long size
; /* the size of the region to be checked */
186 /* possible value for a symbol */
187 struct candidate_val
{
188 struct list_head list
;
192 /* private struct used by init_symbol_array */
193 struct ksplice_lookup
{
195 struct ksplice_mod_change
*change
;
196 struct ksplice_symbol
**arr
;
202 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
203 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
205 const struct kernel_symbol
*start
, *stop
;
206 const unsigned long *crcs
;
214 #endif /* LINUX_VERSION_CODE */
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
217 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
219 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
220 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
221 static bool virtual_address_mapped(unsigned long addr
)
224 return probe_kernel_address(addr
, retval
) != -EFAULT
;
226 #else /* LINUX_VERSION_CODE < */
227 static bool virtual_address_mapped(unsigned long addr
);
228 #endif /* LINUX_VERSION_CODE */
230 static long probe_kernel_read(void *dst
, void *src
, size_t size
)
234 if (!virtual_address_mapped((unsigned long)src
) ||
235 !virtual_address_mapped((unsigned long)src
+ size
- 1))
238 memcpy(dst
, src
, size
);
241 #endif /* LINUX_VERSION_CODE */
243 static LIST_HEAD(updates
);
244 #ifdef KSPLICE_STANDALONE
245 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
246 extern struct list_head ksplice_modules
;
247 #else /* !CONFIG_KSPLICE */
248 LIST_HEAD(ksplice_modules
);
249 #endif /* CONFIG_KSPLICE */
250 #else /* !KSPLICE_STANDALONE */
251 LIST_HEAD(ksplice_modules
);
252 EXPORT_SYMBOL_GPL(ksplice_modules
);
253 static struct kobject
*ksplice_kobj
;
254 #endif /* KSPLICE_STANDALONE */
256 static struct kobj_type update_ktype
;
258 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
259 /* Old kernels do not have kcalloc
260 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
262 static void *kcalloc(size_t n
, size_t size
, typeof(GFP_KERNEL
) flags
)
265 if (n
!= 0 && size
> ULONG_MAX
/ n
)
267 mem
= kmalloc(n
* size
, flags
);
269 memset(mem
, 0, n
* size
);
272 #endif /* LINUX_VERSION_CODE */
274 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
275 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
276 static void u32_swap(void *a
, void *b
, int size
)
279 *(u32
*)a
= *(u32
*)b
;
283 static void generic_swap(void *a
, void *b
, int size
)
289 *(char *)a
++ = *(char *)b
;
291 } while (--size
> 0);
295 * sort - sort an array of elements
296 * @base: pointer to data to sort
297 * @num: number of elements
298 * @size: size of each element
299 * @cmp: pointer to comparison function
300 * @swap: pointer to swap function or NULL
302 * This function does a heapsort on the given array. You may provide a
303 * swap function optimized to your element type.
305 * Sorting time is O(n log n) both on average and worst-case. While
306 * qsort is about 20% faster on average, it suffers from exploitable
307 * O(n*n) worst-case behavior and extra memory requirements that make
308 * it less suitable for kernel use.
311 void sort(void *base
, size_t num
, size_t size
,
312 int (*cmp
)(const void *, const void *),
313 void (*swap
)(void *, void *, int size
))
315 /* pre-scale counters for performance */
316 int i
= (num
/ 2 - 1) * size
, n
= num
* size
, c
, r
;
319 swap
= (size
== 4 ? u32_swap
: generic_swap
);
322 for (; i
>= 0; i
-= size
) {
323 for (r
= i
; r
* 2 + size
< n
; r
= c
) {
325 if (c
< n
- size
&& cmp(base
+ c
, base
+ c
+ size
) < 0)
327 if (cmp(base
+ r
, base
+ c
) >= 0)
329 swap(base
+ r
, base
+ c
, size
);
334 for (i
= n
- size
; i
> 0; i
-= size
) {
335 swap(base
, base
+ i
, size
);
336 for (r
= 0; r
* 2 + size
< i
; r
= c
) {
338 if (c
< i
- size
&& cmp(base
+ c
, base
+ c
+ size
) < 0)
340 if (cmp(base
+ r
, base
+ c
) >= 0)
342 swap(base
+ r
, base
+ c
, size
);
346 #endif /* LINUX_VERSION_CODE < */
348 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
349 /* Old kernels do not have kstrdup
350 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was after 2.6.12
352 #define kstrdup ksplice_kstrdup
353 static char *kstrdup(const char *s
, typeof(GFP_KERNEL
) gfp
)
362 buf
= kmalloc(len
, gfp
);
367 #endif /* LINUX_VERSION_CODE */
369 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
370 /* Old kernels use semaphore instead of mutex
371 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
373 #define mutex semaphore
374 #define mutex_lock down
375 #define mutex_unlock up
376 #endif /* LINUX_VERSION_CODE */
378 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
379 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
380 static char * __attribute_used__
381 kvasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, va_list ap
)
388 len
= vsnprintf(dummy
, 0, fmt
, aq
);
391 p
= kmalloc(len
+ 1, gfp
);
395 vsnprintf(p
, len
+ 1, fmt
, ap
);
401 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
402 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
403 static char * __attribute__((format (printf
, 2, 3)))
404 kasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, ...)
410 p
= kvasprintf(gfp
, fmt
, ap
);
417 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
418 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
419 static int strict_strtoul(const char *cp
, unsigned int base
, unsigned long *res
)
430 val
= simple_strtoul(cp
, &tail
, base
);
431 if ((*tail
== '\0') ||
432 ((len
== (size_t)(tail
- cp
) + 1) && (*tail
== '\n'))) {
441 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
442 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
443 /* Assume cpus == NULL. */
444 #define stop_machine(fn, data, cpus) stop_machine_run(fn, data, NR_CPUS);
445 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
446 /* ee527cd3a20c2aeaac17d939e5d011f7a76d69f5 was after 2.6.21 */
447 EXTRACT_SYMBOL(stop_machine_run
);
448 #endif /* LINUX_VERSION_CODE */
449 #endif /* LINUX_VERSION_CODE */
451 #ifndef task_thread_info
452 #define task_thread_info(task) (task)->thread_info
453 #endif /* !task_thread_info */
455 #ifdef KSPLICE_STANDALONE
457 #ifdef do_each_thread_ve /* OpenVZ kernels define this */
458 #define do_each_thread do_each_thread_all
459 #define while_each_thread while_each_thread_all
462 static bool bootstrapped
= false;
464 /* defined by ksplice-create */
465 extern const struct ksplice_reloc ksplice_init_relocs
[],
466 ksplice_init_relocs_end
[];
468 #endif /* KSPLICE_STANDALONE */
470 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
471 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
472 extern struct list_head modules
;
473 EXTRACT_SYMBOL(modules
);
474 extern struct mutex module_mutex
;
475 EXTRACT_SYMBOL(module_mutex
);
476 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
477 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
478 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
479 #endif /* LINUX_VERSION_CODE */
480 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
481 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
482 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
483 #endif /* LINUX_VERSION_CODE */
484 extern const struct kernel_symbol __start___ksymtab
[];
485 EXTRACT_SYMBOL(__start___ksymtab
);
486 extern const struct kernel_symbol __stop___ksymtab
[];
487 EXTRACT_SYMBOL(__stop___ksymtab
);
488 extern const unsigned long __start___kcrctab
[];
489 EXTRACT_SYMBOL(__start___kcrctab
);
490 extern const struct kernel_symbol __start___ksymtab_gpl
[];
491 EXTRACT_SYMBOL(__start___ksymtab_gpl
);
492 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
493 EXTRACT_SYMBOL(__stop___ksymtab_gpl
);
494 extern const unsigned long __start___kcrctab_gpl
[];
495 EXTRACT_SYMBOL(__start___kcrctab_gpl
);
496 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
497 extern const struct kernel_symbol __start___ksymtab_unused
[];
498 EXTRACT_SYMBOL(__start___ksymtab_unused
);
499 extern const struct kernel_symbol __stop___ksymtab_unused
[];
500 EXTRACT_SYMBOL(__stop___ksymtab_unused
);
501 extern const unsigned long __start___kcrctab_unused
[];
502 EXTRACT_SYMBOL(__start___kcrctab_unused
);
503 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
504 EXTRACT_SYMBOL(__start___ksymtab_unused_gpl
);
505 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
506 EXTRACT_SYMBOL(__stop___ksymtab_unused_gpl
);
507 extern const unsigned long __start___kcrctab_unused_gpl
[];
508 EXTRACT_SYMBOL(__start___kcrctab_unused_gpl
);
509 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
510 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
511 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
512 EXTRACT_SYMBOL(__start___ksymtab_gpl_future
);
513 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
514 EXTRACT_SYMBOL(__stop___ksymtab_gpl_future
);
515 extern const unsigned long __start___kcrctab_gpl_future
[];
516 EXTRACT_SYMBOL(__start___kcrctab_gpl_future
);
517 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
518 #endif /* LINUX_VERSION_CODE */
520 static struct update
*init_ksplice_update(const char *kid
);
521 static void cleanup_ksplice_update(struct update
*update
);
522 static void maybe_cleanup_ksplice_update(struct update
*update
);
523 static void add_to_update(struct ksplice_mod_change
*change
,
524 struct update
*update
);
525 static int ksplice_sysfs_init(struct update
*update
);
527 /* Preparing the relocations and patches for application */
528 static abort_t
apply_update(struct update
*update
);
529 static abort_t
reverse_update(struct update
*update
);
530 static abort_t
prepare_change(struct ksplice_mod_change
*change
);
531 static abort_t
finalize_change(struct ksplice_mod_change
*change
);
532 static abort_t
finalize_patches(struct ksplice_mod_change
*change
);
533 static abort_t
add_dependency_on_address(struct ksplice_mod_change
*change
,
535 static abort_t
map_trampoline_pages(struct update
*update
);
536 static void unmap_trampoline_pages(struct update
*update
);
537 static void *map_writable(void *addr
, size_t len
);
538 static abort_t
apply_relocs(struct ksplice_mod_change
*change
,
539 const struct ksplice_reloc
*relocs
,
540 const struct ksplice_reloc
*relocs_end
);
541 static abort_t
apply_reloc(struct ksplice_mod_change
*change
,
542 const struct ksplice_reloc
*r
);
543 static abort_t
apply_howto_reloc(struct ksplice_mod_change
*change
,
544 const struct ksplice_reloc
*r
);
545 static abort_t
apply_howto_date(struct ksplice_mod_change
*change
,
546 const struct ksplice_reloc
*r
);
547 static abort_t
read_reloc_value(struct ksplice_mod_change
*change
,
548 const struct ksplice_reloc
*r
,
549 unsigned long addr
, unsigned long *valp
);
550 static abort_t
write_reloc_value(struct ksplice_mod_change
*change
,
551 const struct ksplice_reloc
*r
,
552 unsigned long addr
, unsigned long sym_addr
);
553 static abort_t
create_module_list_entry(struct ksplice_mod_change
*change
,
555 static void cleanup_module_list_entries(struct update
*update
);
556 static void __attribute__((noreturn
)) ksplice_deleted(void);
558 /* run-pre matching */
559 static abort_t
match_change_sections(struct ksplice_mod_change
*change
,
560 bool consider_data_sections
);
561 static abort_t
find_section(struct ksplice_mod_change
*change
,
562 struct ksplice_section
*sect
);
563 static abort_t
try_addr(struct ksplice_mod_change
*change
,
564 struct ksplice_section
*sect
,
565 unsigned long run_addr
,
566 struct list_head
*safety_records
,
567 enum run_pre_mode mode
);
568 static abort_t
run_pre_cmp(struct ksplice_mod_change
*change
,
569 const struct ksplice_section
*sect
,
570 unsigned long run_addr
,
571 struct list_head
*safety_records
,
572 enum run_pre_mode mode
);
573 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
574 /* defined in arch/ARCH/kernel/ksplice-arch.c */
575 static abort_t
arch_run_pre_cmp(struct ksplice_mod_change
*change
,
576 struct ksplice_section
*sect
,
577 unsigned long run_addr
,
578 struct list_head
*safety_records
,
579 enum run_pre_mode mode
);
580 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
581 static void print_bytes(struct ksplice_mod_change
*change
,
582 const unsigned char *run
, int runc
,
583 const unsigned char *pre
, int prec
);
584 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
585 static abort_t
brute_search(struct ksplice_mod_change
*change
,
586 struct ksplice_section
*sect
,
587 const void *start
, unsigned long len
,
588 struct list_head
*vals
);
589 static abort_t
brute_search_all(struct ksplice_mod_change
*change
,
590 struct ksplice_section
*sect
,
591 struct list_head
*vals
);
592 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
593 static const struct ksplice_reloc
*
594 init_reloc_search(struct ksplice_mod_change
*change
,
595 const struct ksplice_section
*sect
);
596 static const struct ksplice_reloc
*find_reloc(const struct ksplice_reloc
*start
,
597 const struct ksplice_reloc
*end
,
598 unsigned long address
,
600 static abort_t
lookup_reloc(struct ksplice_mod_change
*change
,
601 const struct ksplice_reloc
**fingerp
,
603 const struct ksplice_reloc
**relocp
);
604 static abort_t
handle_reloc(struct ksplice_mod_change
*change
,
605 const struct ksplice_section
*sect
,
606 const struct ksplice_reloc
*r
,
607 unsigned long run_addr
, enum run_pre_mode mode
);
608 static abort_t
handle_howto_date(struct ksplice_mod_change
*change
,
609 const struct ksplice_section
*sect
,
610 const struct ksplice_reloc
*r
,
611 unsigned long run_addr
,
612 enum run_pre_mode mode
);
613 static abort_t
handle_howto_reloc(struct ksplice_mod_change
*change
,
614 const struct ksplice_section
*sect
,
615 const struct ksplice_reloc
*r
,
616 unsigned long run_addr
,
617 enum run_pre_mode mode
);
618 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
620 static abort_t
handle_bug(struct ksplice_mod_change
*change
,
621 const struct ksplice_reloc
*r
,
622 unsigned long run_addr
);
623 #endif /* CONFIG_BUG */
624 #else /* LINUX_VERSION_CODE < */
625 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
626 #endif /* LINUX_VERSION_CODE */
627 static abort_t
handle_extable(struct ksplice_mod_change
*change
,
628 const struct ksplice_reloc
*r
,
629 unsigned long run_addr
);
630 static struct ksplice_section
*symbol_section(struct ksplice_mod_change
*change
,
631 const struct ksplice_symbol
*sym
);
632 static int compare_section_labels(const void *va
, const void *vb
);
633 static int symbol_section_bsearch_compare(const void *a
, const void *b
);
634 static const struct ksplice_reloc
*
635 patch_reloc(struct ksplice_mod_change
*change
,
636 const struct ksplice_patch
*p
);
638 /* Computing possible addresses for symbols */
639 static abort_t
lookup_symbol(struct ksplice_mod_change
*change
,
640 const struct ksplice_symbol
*ksym
,
641 struct list_head
*vals
);
642 static void cleanup_symbol_arrays(struct ksplice_mod_change
*change
);
643 static abort_t
init_symbol_arrays(struct ksplice_mod_change
*change
);
644 static abort_t
init_symbol_array(struct ksplice_mod_change
*change
,
645 struct ksplice_symbol
*start
,
646 struct ksplice_symbol
*end
);
647 static abort_t
uniquify_symbols(struct ksplice_mod_change
*change
);
648 static abort_t
add_matching_values(struct ksplice_lookup
*lookup
,
649 const char *sym_name
, unsigned long sym_val
);
650 static bool add_export_values(const struct symsearch
*syms
,
651 struct module
*owner
,
652 unsigned int symnum
, void *data
);
653 static int symbolp_bsearch_compare(const void *key
, const void *elt
);
654 static int compare_symbolp_names(const void *a
, const void *b
);
655 static int compare_symbolp_labels(const void *a
, const void *b
);
656 #ifdef CONFIG_KALLSYMS
657 static int add_kallsyms_values(void *data
, const char *name
,
658 struct module
*owner
, unsigned long val
);
659 #endif /* CONFIG_KALLSYMS */
660 #ifdef KSPLICE_STANDALONE
662 add_system_map_candidates(struct ksplice_mod_change
*change
,
663 const struct ksplice_system_map
*start
,
664 const struct ksplice_system_map
*end
,
665 const char *label
, struct list_head
*vals
);
666 static int compare_system_map(const void *a
, const void *b
);
667 static int system_map_bsearch_compare(const void *key
, const void *elt
);
668 #endif /* KSPLICE_STANDALONE */
669 static abort_t
new_export_lookup(struct ksplice_mod_change
*ichange
,
670 const char *name
, struct list_head
*vals
);
672 /* Atomic update trampoline insertion and removal */
673 static abort_t
patch_action(struct update
*update
, enum ksplice_action action
);
674 static int __apply_patches(void *update
);
675 static int __reverse_patches(void *update
);
676 static abort_t
check_each_task(struct update
*update
);
677 static abort_t
check_task(struct update
*update
,
678 const struct task_struct
*t
, bool rerun
);
679 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
680 const struct thread_info
*tinfo
,
681 const unsigned long *stack
);
682 static abort_t
check_address(struct update
*update
,
683 struct conflict
*conf
, unsigned long addr
);
684 static abort_t
check_record(struct conflict_addr
*ca
,
685 const struct safety_record
*rec
,
687 static bool is_stop_machine(const struct task_struct
*t
);
688 static void cleanup_conflicts(struct update
*update
);
689 static void print_conflicts(struct update
*update
);
690 static void insert_trampoline(struct ksplice_patch
*p
);
691 static abort_t
verify_trampoline(struct ksplice_mod_change
*change
,
692 const struct ksplice_patch
*p
);
693 static void remove_trampoline(const struct ksplice_patch
*p
);
695 static abort_t
create_labelval(struct ksplice_mod_change
*change
,
696 struct ksplice_symbol
*ksym
,
697 unsigned long val
, int status
);
698 static abort_t
create_safety_record(struct ksplice_mod_change
*change
,
699 const struct ksplice_section
*sect
,
700 struct list_head
*record_list
,
701 unsigned long run_addr
,
702 unsigned long run_size
);
703 static abort_t
add_candidate_val(struct ksplice_mod_change
*change
,
704 struct list_head
*vals
, unsigned long val
);
705 static void release_vals(struct list_head
*vals
);
706 static void set_temp_labelvals(struct ksplice_mod_change
*change
, int status
);
708 static int contains_canary(struct ksplice_mod_change
*change
,
709 unsigned long blank_addr
,
710 const struct ksplice_reloc_howto
*howto
);
711 static unsigned long follow_trampolines(struct ksplice_mod_change
*change
,
713 static bool patches_module(const struct module
*a
, const struct module
*b
);
714 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
715 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
716 static bool strstarts(const char *str
, const char *prefix
);
717 #endif /* LINUX_VERSION_CODE */
718 static bool singular(struct list_head
*list
);
719 static void *bsearch(const void *key
, const void *base
, size_t n
,
720 size_t size
, int (*cmp
)(const void *key
, const void *elt
));
721 static int compare_relocs(const void *a
, const void *b
);
722 static int reloc_bsearch_compare(const void *key
, const void *elt
);
725 static abort_t
init_debug_buf(struct update
*update
);
726 static void clear_debug_buf(struct update
*update
);
727 static int __attribute__((format(printf
, 2, 3)))
728 _ksdebug(struct update
*update
, const char *fmt
, ...);
729 #define ksdebug(change, fmt, ...) \
730 _ksdebug(change->update, fmt, ## __VA_ARGS__)
732 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
733 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
734 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
735 struct module
*, unsigned long),
737 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
738 static unsigned int kallsyms_expand_symbol(unsigned int off
, char *result
);
739 #endif /* LINUX_VERSION_CODE */
740 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
744 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
746 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
747 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
748 static struct module
*find_module(const char *name
);
749 static int use_module(struct module
*a
, struct module
*b
);
750 static const struct kernel_symbol
*find_symbol(const char *name
,
751 struct module
**owner
,
752 const unsigned long **crc
,
753 bool gplok
, bool warn
);
754 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
755 struct module
*owner
,
756 unsigned int symnum
, void *data
),
758 static struct module
*__module_address(unsigned long addr
);
759 #endif /* LINUX_VERSION_CODE */
761 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
763 /* Prepare a trampoline for the given patch */
764 static abort_t
prepare_trampoline(struct ksplice_mod_change
*change
,
765 struct ksplice_patch
*p
);
766 /* What address does the trampoline at addr jump to? */
767 static abort_t
trampoline_target(struct ksplice_mod_change
*change
,
768 unsigned long addr
, unsigned long *new_addr
);
769 /* Hook to handle pc-relative jumps inserted by parainstructions */
770 static abort_t
handle_paravirt(struct ksplice_mod_change
*change
,
771 unsigned long pre
, unsigned long run
,
773 /* Is address p on the stack of the given thread? */
774 static bool valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
);
776 #ifndef KSPLICE_STANDALONE
777 #include "ksplice-arch.c"
778 #elif defined CONFIG_X86
779 #include "x86/ksplice-arch.c"
780 #elif defined CONFIG_ARM
781 #include "arm/ksplice-arch.c"
782 #endif /* KSPLICE_STANDALONE */
784 #define clear_list(head, type, member) \
786 struct list_head *_pos, *_n; \
787 list_for_each_safe(_pos, _n, head) { \
789 kfree(list_entry(_pos, type, member)); \
794 * init_ksplice_mod_change() - Initializes a ksplice change
795 * @change: The change to be initialized. All of the public fields of the
796 * change and its associated data structures should be populated
797 * before this function is called. The values of the private
798 * fields will be ignored.
800 int init_ksplice_mod_change(struct ksplice_mod_change
*change
)
802 struct update
*update
;
803 struct ksplice_patch
*p
;
804 struct ksplice_section
*s
;
807 #ifdef KSPLICE_STANDALONE
810 #endif /* KSPLICE_STANDALONE */
812 INIT_LIST_HEAD(&change
->temp_labelvals
);
813 INIT_LIST_HEAD(&change
->safety_records
);
815 sort(change
->old_code
.relocs
,
816 change
->old_code
.relocs_end
- change
->old_code
.relocs
,
817 sizeof(*change
->old_code
.relocs
), compare_relocs
, NULL
);
818 sort(change
->new_code
.relocs
,
819 change
->new_code
.relocs_end
- change
->new_code
.relocs
,
820 sizeof(*change
->new_code
.relocs
), compare_relocs
, NULL
);
821 sort(change
->old_code
.sections
,
822 change
->old_code
.sections_end
- change
->old_code
.sections
,
823 sizeof(*change
->old_code
.sections
), compare_section_labels
, NULL
);
824 #ifdef KSPLICE_STANDALONE
825 sort(change
->new_code
.system_map
,
826 change
->new_code
.system_map_end
- change
->new_code
.system_map
,
827 sizeof(*change
->new_code
.system_map
), compare_system_map
, NULL
);
828 sort(change
->old_code
.system_map
,
829 change
->old_code
.system_map_end
- change
->old_code
.system_map
,
830 sizeof(*change
->old_code
.system_map
), compare_system_map
, NULL
);
831 #endif /* KSPLICE_STANDALONE */
833 for (p
= change
->patches
; p
< change
->patches_end
; p
++)
835 for (s
= change
->old_code
.sections
; s
< change
->old_code
.sections_end
;
838 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
839 const struct ksplice_reloc
*r
= patch_reloc(change
, p
);
842 if (p
->type
== KSPLICE_PATCH_DATA
) {
843 s
= symbol_section(change
, r
->symbol
);
846 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
847 * to modify rodata sections that have been explicitly
848 * marked for patching using the ksplice-patch.h macro
849 * ksplice_assume_rodata. Here we modify the section
850 * flags appropriately.
852 if (s
->flags
& KSPLICE_SECTION_DATA
)
853 s
->flags
= (s
->flags
& ~KSPLICE_SECTION_DATA
) |
854 KSPLICE_SECTION_RODATA
;
858 mutex_lock(&module_mutex
);
859 list_for_each_entry(update
, &updates
, list
) {
860 if (strcmp(change
->kid
, update
->kid
) == 0) {
861 if (update
->stage
!= STAGE_PREPARING
) {
865 add_to_update(change
, update
);
870 update
= init_ksplice_update(change
->kid
);
871 if (update
== NULL
) {
875 ret
= ksplice_sysfs_init(update
);
877 cleanup_ksplice_update(update
);
880 add_to_update(change
, update
);
882 mutex_unlock(&module_mutex
);
885 EXPORT_SYMBOL_GPL(init_ksplice_mod_change
);
888 * cleanup_ksplice_mod_change() - Cleans up a change if appropriate
889 * @change: The change to be cleaned up
891 * cleanup_ksplice_mod_change is ordinarily called twice for each
892 * Ksplice update: once when the old_code module is unloaded, and once
893 * when the new_code module is unloaded. By freeing what can be freed
894 * on each unload, we avoid leaks even in unusual scenarios, e.g. if
895 * several alternative old_code modules are loaded and unloaded
898 void cleanup_ksplice_mod_change(struct ksplice_mod_change
*change
)
900 if (change
->update
== NULL
)
903 mutex_lock(&module_mutex
);
904 if (change
->update
->stage
== STAGE_APPLIED
) {
905 struct ksplice_mod_change
*c
;
908 list_for_each_entry(c
, &change
->update
->unused_changes
, list
) {
913 list_del(&change
->list
);
914 mutex_unlock(&module_mutex
);
917 list_del(&change
->list
);
918 if (change
->update
->stage
== STAGE_PREPARING
)
919 maybe_cleanup_ksplice_update(change
->update
);
920 change
->update
= NULL
;
921 mutex_unlock(&module_mutex
);
923 EXPORT_SYMBOL_GPL(cleanup_ksplice_mod_change
);
925 static struct update
*init_ksplice_update(const char *kid
)
927 struct update
*update
;
928 update
= kcalloc(1, sizeof(struct update
), GFP_KERNEL
);
931 update
->name
= kasprintf(GFP_KERNEL
, "ksplice_%s", kid
);
932 if (update
->name
== NULL
) {
936 update
->kid
= kstrdup(kid
, GFP_KERNEL
);
937 if (update
->kid
== NULL
) {
942 if (try_module_get(THIS_MODULE
) != 1) {
948 INIT_LIST_HEAD(&update
->changes
);
949 INIT_LIST_HEAD(&update
->unused_changes
);
950 INIT_LIST_HEAD(&update
->ksplice_module_list
);
951 if (init_debug_buf(update
) != OK
) {
952 module_put(THIS_MODULE
);
958 list_add(&update
->list
, &updates
);
959 update
->stage
= STAGE_PREPARING
;
960 update
->abort_cause
= OK
;
962 INIT_LIST_HEAD(&update
->conflicts
);
966 static void cleanup_ksplice_update(struct update
*update
)
968 list_del(&update
->list
);
969 cleanup_conflicts(update
);
970 clear_debug_buf(update
);
971 cleanup_module_list_entries(update
);
975 module_put(THIS_MODULE
);
978 /* Clean up the update if it no longer has any changes */
979 static void maybe_cleanup_ksplice_update(struct update
*update
)
981 if (list_empty(&update
->changes
) && list_empty(&update
->unused_changes
))
982 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
983 kobject_put(&update
->kobj
);
984 #else /* LINUX_VERSION_CODE < */
985 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
986 kobject_unregister(&update
->kobj
);
987 #endif /* LINUX_VERSION_CODE */
990 static void add_to_update(struct ksplice_mod_change
*change
,
991 struct update
*update
)
993 change
->update
= update
;
994 list_add(&change
->list
, &update
->unused_changes
);
997 static int ksplice_sysfs_init(struct update
*update
)
1000 memset(&update
->kobj
, 0, sizeof(update
->kobj
));
1001 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
1002 #ifndef KSPLICE_STANDALONE
1003 ret
= kobject_init_and_add(&update
->kobj
, &update_ktype
,
1004 ksplice_kobj
, "%s", update
->kid
);
1005 #else /* KSPLICE_STANDALONE */
1006 ret
= kobject_init_and_add(&update
->kobj
, &update_ktype
,
1007 &THIS_MODULE
->mkobj
.kobj
, "ksplice");
1008 #endif /* KSPLICE_STANDALONE */
1009 #else /* LINUX_VERSION_CODE < */
1010 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
1011 ret
= kobject_set_name(&update
->kobj
, "%s", "ksplice");
1014 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
1015 update
->kobj
.parent
= &THIS_MODULE
->mkobj
.kobj
;
1016 #else /* LINUX_VERSION_CODE < */
1017 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
1018 update
->kobj
.parent
= &THIS_MODULE
->mkobj
->kobj
;
1019 #endif /* LINUX_VERSION_CODE */
1020 update
->kobj
.ktype
= &update_ktype
;
1021 ret
= kobject_register(&update
->kobj
);
1022 #endif /* LINUX_VERSION_CODE */
1025 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1026 kobject_uevent(&update
->kobj
, KOBJ_ADD
);
1027 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1028 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
1029 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
1030 kobject_uevent(&update
->kobj
, KOBJ_ADD
, NULL
);
1031 #endif /* LINUX_VERSION_CODE */
1035 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1036 EXTRACT_SYMBOL(apply_paravirt
);
1037 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1039 static abort_t
apply_update(struct update
*update
)
1041 struct ksplice_mod_change
*change
, *n
;
1045 list_for_each_entry(change
, &update
->changes
, list
) {
1046 ret
= create_module_list_entry(change
, true);
1051 list_for_each_entry_safe(change
, n
, &update
->unused_changes
, list
) {
1052 if (strcmp(change
->target_name
, "vmlinux") == 0) {
1053 change
->target
= NULL
;
1054 } else if (change
->target
== NULL
) {
1055 change
->target
= find_module(change
->target_name
);
1056 if (change
->target
== NULL
||
1057 !module_is_live(change
->target
)) {
1058 if (!update
->partial
) {
1059 ret
= TARGET_NOT_LOADED
;
1062 ret
= create_module_list_entry(change
, false);
1067 retval
= use_module(change
->new_code_mod
,
1074 ret
= create_module_list_entry(change
, true);
1077 list_del(&change
->list
);
1078 list_add_tail(&change
->list
, &update
->changes
);
1080 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1081 if (change
->target
== NULL
) {
1082 apply_paravirt(change
->new_code
.parainstructions
,
1083 change
->new_code
.parainstructions_end
);
1084 apply_paravirt(change
->old_code
.parainstructions
,
1085 change
->old_code
.parainstructions_end
);
1087 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1090 list_for_each_entry(change
, &update
->changes
, list
) {
1091 const struct ksplice_section
*sect
;
1092 for (sect
= change
->new_code
.sections
;
1093 sect
< change
->new_code
.sections_end
; sect
++) {
1094 struct safety_record
*rec
= kmalloc(sizeof(*rec
),
1097 ret
= OUT_OF_MEMORY
;
1100 rec
->addr
= sect
->address
;
1101 rec
->size
= sect
->size
;
1102 rec
->label
= sect
->symbol
->label
;
1103 list_add(&rec
->list
, &change
->safety_records
);
1107 list_for_each_entry(change
, &update
->changes
, list
) {
1108 ret
= init_symbol_arrays(change
);
1110 cleanup_symbol_arrays(change
);
1113 ret
= prepare_change(change
);
1114 cleanup_symbol_arrays(change
);
1118 ret
= patch_action(update
, KS_APPLY
);
1120 list_for_each_entry(change
, &update
->changes
, list
) {
1121 struct ksplice_section
*s
;
1122 if (update
->stage
== STAGE_PREPARING
)
1123 clear_list(&change
->safety_records
,
1124 struct safety_record
, list
);
1125 for (s
= change
->old_code
.sections
;
1126 s
< change
->old_code
.sections_end
; s
++) {
1127 if (s
->match_map
!= NULL
) {
1128 vfree(s
->match_map
);
1129 s
->match_map
= NULL
;
1133 if (update
->stage
== STAGE_PREPARING
)
1134 cleanup_module_list_entries(update
);
1137 printk(KERN_INFO
"ksplice: Update %s applied successfully\n",
1142 static abort_t
reverse_update(struct update
*update
)
1145 struct ksplice_mod_change
*change
;
1147 clear_debug_buf(update
);
1148 ret
= init_debug_buf(update
);
1152 _ksdebug(update
, "Preparing to reverse %s\n", update
->kid
);
1154 ret
= patch_action(update
, KS_REVERSE
);
1158 list_for_each_entry(change
, &update
->changes
, list
)
1159 clear_list(&change
->safety_records
, struct safety_record
, list
);
1161 printk(KERN_INFO
"ksplice: Update %s reversed successfully\n",
1166 static int compare_symbolp_names(const void *a
, const void *b
)
1168 const struct ksplice_symbol
*const *sympa
= a
, *const *sympb
= b
;
1169 if ((*sympa
)->name
== NULL
&& (*sympb
)->name
== NULL
)
1171 if ((*sympa
)->name
== NULL
)
1173 if ((*sympb
)->name
== NULL
)
1175 return strcmp((*sympa
)->name
, (*sympb
)->name
);
1178 static int compare_symbolp_labels(const void *a
, const void *b
)
1180 const struct ksplice_symbol
*const *sympa
= a
, *const *sympb
= b
;
1181 return strcmp((*sympa
)->label
, (*sympb
)->label
);
1184 static int symbolp_bsearch_compare(const void *key
, const void *elt
)
1186 const char *name
= key
;
1187 const struct ksplice_symbol
*const *symp
= elt
;
1188 const struct ksplice_symbol
*sym
= *symp
;
1189 if (sym
->name
== NULL
)
1191 return strcmp(name
, sym
->name
);
1194 static abort_t
add_matching_values(struct ksplice_lookup
*lookup
,
1195 const char *sym_name
, unsigned long sym_val
)
1197 struct ksplice_symbol
**symp
;
1200 symp
= bsearch(sym_name
, lookup
->arr
, lookup
->size
,
1201 sizeof(*lookup
->arr
), symbolp_bsearch_compare
);
1205 while (symp
> lookup
->arr
&&
1206 symbolp_bsearch_compare(sym_name
, symp
- 1) == 0)
1209 for (; symp
< lookup
->arr
+ lookup
->size
; symp
++) {
1210 struct ksplice_symbol
*sym
= *symp
;
1211 if (sym
->name
== NULL
|| strcmp(sym_name
, sym
->name
) != 0)
1213 ret
= add_candidate_val(lookup
->change
,
1214 sym
->candidate_vals
, sym_val
);
1221 #ifdef CONFIG_KALLSYMS
1222 static int add_kallsyms_values(void *data
, const char *name
,
1223 struct module
*owner
, unsigned long val
)
1225 struct ksplice_lookup
*lookup
= data
;
1226 if (owner
== lookup
->change
->new_code_mod
||
1227 !patches_module(owner
, lookup
->change
->target
))
1228 return (__force
int)OK
;
1229 return (__force
int)add_matching_values(lookup
, name
, val
);
1231 #endif /* CONFIG_KALLSYMS */
1233 static bool add_export_values(const struct symsearch
*syms
,
1234 struct module
*owner
,
1235 unsigned int symnum
, void *data
)
1237 struct ksplice_lookup
*lookup
= data
;
1240 ret
= add_matching_values(lookup
, syms
->start
[symnum
].name
,
1241 syms
->start
[symnum
].value
);
1249 static void cleanup_symbol_arrays(struct ksplice_mod_change
*change
)
1251 struct ksplice_symbol
*sym
;
1252 for (sym
= change
->new_code
.symbols
; sym
< change
->new_code
.symbols_end
;
1254 if (sym
->candidate_vals
!= NULL
) {
1255 clear_list(sym
->candidate_vals
, struct candidate_val
,
1257 kfree(sym
->candidate_vals
);
1258 sym
->candidate_vals
= NULL
;
1261 for (sym
= change
->old_code
.symbols
; sym
< change
->old_code
.symbols_end
;
1263 if (sym
->candidate_vals
!= NULL
) {
1264 clear_list(sym
->candidate_vals
, struct candidate_val
,
1266 kfree(sym
->candidate_vals
);
1267 sym
->candidate_vals
= NULL
;
1273 * The new_code and old_code modules each have their own independent
1274 * ksplice_symbol structures. uniquify_symbols unifies these separate
1275 * pieces of kernel symbol information by replacing all references to
1276 * the old_code copy of symbols with references to the new_code copy.
1278 static abort_t
uniquify_symbols(struct ksplice_mod_change
*change
)
1280 struct ksplice_reloc
*r
;
1281 struct ksplice_section
*s
;
1282 struct ksplice_symbol
*sym
, **sym_arr
, **symp
;
1283 size_t size
= change
->new_code
.symbols_end
- change
->new_code
.symbols
;
1288 sym_arr
= vmalloc(sizeof(*sym_arr
) * size
);
1289 if (sym_arr
== NULL
)
1290 return OUT_OF_MEMORY
;
1292 for (symp
= sym_arr
, sym
= change
->new_code
.symbols
;
1293 symp
< sym_arr
+ size
&& sym
< change
->new_code
.symbols_end
;
1297 sort(sym_arr
, size
, sizeof(*sym_arr
), compare_symbolp_labels
, NULL
);
1299 for (r
= change
->old_code
.relocs
; r
< change
->old_code
.relocs_end
;
1301 symp
= bsearch(&r
->symbol
, sym_arr
, size
, sizeof(*sym_arr
),
1302 compare_symbolp_labels
);
1304 if ((*symp
)->name
== NULL
)
1305 (*symp
)->name
= r
->symbol
->name
;
1310 for (s
= change
->old_code
.sections
; s
< change
->old_code
.sections_end
;
1312 symp
= bsearch(&s
->symbol
, sym_arr
, size
, sizeof(*sym_arr
),
1313 compare_symbolp_labels
);
1315 if ((*symp
)->name
== NULL
)
1316 (*symp
)->name
= s
->symbol
->name
;
1326 * Initialize the ksplice_symbol structures in the given array using
1327 * the kallsyms and exported symbol tables.
1329 static abort_t
init_symbol_array(struct ksplice_mod_change
*change
,
1330 struct ksplice_symbol
*start
,
1331 struct ksplice_symbol
*end
)
1333 struct ksplice_symbol
*sym
, **sym_arr
, **symp
;
1334 struct ksplice_lookup lookup
;
1335 size_t size
= end
- start
;
1341 for (sym
= start
; sym
< end
; sym
++) {
1342 if (strstarts(sym
->label
, "__ksymtab")) {
1343 const struct kernel_symbol
*ksym
;
1344 const char *colon
= strchr(sym
->label
, ':');
1345 const char *name
= colon
+ 1;
1348 ksym
= find_symbol(name
, NULL
, NULL
, true, false);
1350 ksdebug(change
, "Could not find kernel_symbol "
1351 "structure for %s\n", name
);
1354 sym
->value
= (unsigned long)ksym
;
1355 sym
->candidate_vals
= NULL
;
1359 sym
->candidate_vals
= kmalloc(sizeof(*sym
->candidate_vals
),
1361 if (sym
->candidate_vals
== NULL
)
1362 return OUT_OF_MEMORY
;
1363 INIT_LIST_HEAD(sym
->candidate_vals
);
1367 sym_arr
= vmalloc(sizeof(*sym_arr
) * size
);
1368 if (sym_arr
== NULL
)
1369 return OUT_OF_MEMORY
;
1371 for (symp
= sym_arr
, sym
= start
; symp
< sym_arr
+ size
&& sym
< end
;
1375 sort(sym_arr
, size
, sizeof(*sym_arr
), compare_symbolp_names
, NULL
);
1377 lookup
.change
= change
;
1378 lookup
.arr
= sym_arr
;
1382 each_symbol(add_export_values
, &lookup
);
1384 #ifdef CONFIG_KALLSYMS
1386 ret
= (__force abort_t
)
1387 kallsyms_on_each_symbol(add_kallsyms_values
, &lookup
);
1388 #endif /* CONFIG_KALLSYMS */
1394 * Prepare the change's ksplice_symbol structures for run-pre matching
1396 * noinline to prevent garbage on the stack from confusing check_stack
1398 static noinline abort_t
init_symbol_arrays(struct ksplice_mod_change
*change
)
1402 ret
= uniquify_symbols(change
);
1406 ret
= init_symbol_array(change
, change
->old_code
.symbols
,
1407 change
->old_code
.symbols_end
);
1411 ret
= init_symbol_array(change
, change
->new_code
.symbols
,
1412 change
->new_code
.symbols_end
);
1419 /* noinline to prevent garbage on the stack from confusing check_stack */
1420 static noinline abort_t
prepare_change(struct ksplice_mod_change
*change
)
1424 ksdebug(change
, "Preparing and checking %s\n", change
->name
);
1425 ret
= match_change_sections(change
, false);
1426 if (ret
== NO_MATCH
) {
1427 /* It is possible that by using relocations from .data sections
1428 * we can successfully run-pre match the rest of the sections.
1429 * To avoid using any symbols obtained from .data sections
1430 * (which may be unreliable) in the post code, we first prepare
1431 * the post code and then try to run-pre match the remaining
1432 * sections with the help of .data sections.
1434 ksdebug(change
, "Continuing without some sections; we might "
1435 "find them later.\n");
1436 ret
= finalize_change(change
);
1438 ksdebug(change
, "Aborted. Unable to continue without "
1439 "the unmatched sections.\n");
1443 ksdebug(change
, "run-pre: Considering .data sections to find "
1444 "the unmatched sections\n");
1445 ret
= match_change_sections(change
, true);
1449 ksdebug(change
, "run-pre: Found all previously unmatched "
1452 } else if (ret
!= OK
) {
1456 return finalize_change(change
);
1460 * Finish preparing the change for insertion into the kernel.
1461 * Afterwards, the replacement code should be ready to run and the
1462 * ksplice_patches should all be ready for trampoline insertion.
1464 static abort_t
finalize_change(struct ksplice_mod_change
*change
)
1467 ret
= apply_relocs(change
, change
->new_code
.relocs
,
1468 change
->new_code
.relocs_end
);
1472 ret
= finalize_patches(change
);
1479 static abort_t
finalize_patches(struct ksplice_mod_change
*change
)
1481 struct ksplice_patch
*p
;
1482 struct safety_record
*rec
;
1485 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
1487 list_for_each_entry(rec
, &change
->safety_records
, list
) {
1488 if (rec
->addr
<= p
->oldaddr
&&
1489 p
->oldaddr
< rec
->addr
+ rec
->size
) {
1494 if (!found
&& p
->type
!= KSPLICE_PATCH_EXPORT
) {
1495 const struct ksplice_reloc
*r
= patch_reloc(change
, p
);
1497 ksdebug(change
, "A patch with no reloc at its "
1498 "oldaddr has no safety record\n");
1501 ksdebug(change
, "No safety record for patch with "
1502 "oldaddr %s+%lx\n", r
->symbol
->label
,
1507 if (p
->type
== KSPLICE_PATCH_TEXT
) {
1508 ret
= prepare_trampoline(change
, p
);
1513 if (found
&& rec
->addr
+ rec
->size
< p
->oldaddr
+ p
->size
) {
1514 ksdebug(change
, "Safety record %s is too short for "
1515 "patch\n", rec
->label
);
1519 if (p
->type
== KSPLICE_PATCH_TEXT
) {
1520 if (p
->repladdr
== 0)
1521 p
->repladdr
= (unsigned long)ksplice_deleted
;
1525 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
1526 struct ksplice_patch
*q
;
1527 for (q
= change
->patches
; q
< change
->patches_end
; q
++) {
1528 if (p
!= q
&& p
->oldaddr
<= q
->oldaddr
&&
1529 p
->oldaddr
+ p
->size
> q
->oldaddr
) {
1530 ksdebug(change
, "Overlapping oldaddrs "
1540 /* noinline to prevent garbage on the stack from confusing check_stack */
1541 static noinline abort_t
map_trampoline_pages(struct update
*update
)
1543 struct ksplice_mod_change
*change
;
1544 list_for_each_entry(change
, &update
->changes
, list
) {
1545 struct ksplice_patch
*p
;
1546 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
1547 p
->vaddr
= map_writable((void *)p
->oldaddr
, p
->size
);
1548 if (p
->vaddr
== NULL
) {
1550 "Unable to map oldaddr read/write\n");
1551 unmap_trampoline_pages(update
);
1559 static void unmap_trampoline_pages(struct update
*update
)
1561 struct ksplice_mod_change
*change
;
1562 list_for_each_entry(change
, &update
->changes
, list
) {
1563 struct ksplice_patch
*p
;
1564 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
1565 vunmap((void *)((unsigned long)p
->vaddr
& PAGE_MASK
));
1571 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) && defined(CONFIG_X86_64)
1572 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1573 #define phys_base ({EXTRACT_SYMBOL(phys_base); phys_base;})
1574 #endif /* LINUX_VERSION_CODE && CONFIG_X86_64 */
1577 * map_writable creates a shadow page mapping of the range
1578 * [addr, addr + len) so that we can write to code mapped read-only.
1580 * It is similar to a generalized version of x86's text_poke. But
1581 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1582 * map_writable to map the pages before stop_machine, then use the
1583 * mapping inside stop_machine, and unmap the pages afterwards.
1585 static void *map_writable(void *addr
, size_t len
)
1588 int nr_pages
= DIV_ROUND_UP(offset_in_page(addr
) + len
, PAGE_SIZE
);
1589 struct page
**pages
= kmalloc(nr_pages
* sizeof(*pages
), GFP_KERNEL
);
1590 void *page_addr
= (void *)((unsigned long)addr
& PAGE_MASK
);
1596 for (i
= 0; i
< nr_pages
; i
++) {
1597 if (__module_address((unsigned long)page_addr
) == NULL
) {
1598 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1599 pages
[i
] = virt_to_page(page_addr
);
1600 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1601 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21
1602 * This works around a broken virt_to_page() from the RHEL 5 backport
1603 * of x86-64 relocatable kernel support.
1606 pfn_to_page(__pa_symbol(page_addr
) >> PAGE_SHIFT
);
1607 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1608 WARN_ON(!PageReserved(pages
[i
]));
1610 pages
[i
] = vmalloc_to_page(addr
);
1612 if (pages
[i
] == NULL
) {
1616 page_addr
+= PAGE_SIZE
;
1618 vaddr
= vmap(pages
, nr_pages
, VM_MAP
, PAGE_KERNEL
);
1622 return vaddr
+ offset_in_page(addr
);
1625 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
1626 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
1627 EXTRACT_SYMBOL(__module_text_address
);
1628 #endif /* LINUX_VERSION_CODE */
1631 * Ksplice adds a dependency on any symbol address used to resolve
1632 * relocations in the new_code module.
1634 * Be careful to follow_trampolines so that we always depend on the
1635 * latest version of the target function, since that's the code that
1636 * will run if we call addr.
1638 static abort_t
add_dependency_on_address(struct ksplice_mod_change
*change
,
1641 struct ksplice_mod_change
*c
;
1643 __module_text_address(follow_trampolines(change
, addr
));
1646 list_for_each_entry(c
, &change
->update
->changes
, list
) {
1647 if (m
== c
->new_code_mod
)
1650 if (use_module(change
->new_code_mod
, m
) != 1)
1655 static abort_t
apply_relocs(struct ksplice_mod_change
*change
,
1656 const struct ksplice_reloc
*relocs
,
1657 const struct ksplice_reloc
*relocs_end
)
1659 const struct ksplice_reloc
*r
;
1660 for (r
= relocs
; r
< relocs_end
; r
++) {
1661 abort_t ret
= apply_reloc(change
, r
);
1668 static abort_t
apply_reloc(struct ksplice_mod_change
*change
,
1669 const struct ksplice_reloc
*r
)
1671 switch (r
->howto
->type
) {
1672 case KSPLICE_HOWTO_RELOC
:
1673 case KSPLICE_HOWTO_RELOC_PATCH
:
1674 return apply_howto_reloc(change
, r
);
1675 case KSPLICE_HOWTO_DATE
:
1676 case KSPLICE_HOWTO_TIME
:
1677 return apply_howto_date(change
, r
);
1679 ksdebug(change
, "Unexpected howto type %d\n", r
->howto
->type
);
1685 * Applies a relocation. Aborts if the symbol referenced in it has
1686 * not been uniquely resolved.
1688 static abort_t
apply_howto_reloc(struct ksplice_mod_change
*change
,
1689 const struct ksplice_reloc
*r
)
1693 unsigned long sym_addr
;
1696 canary_ret
= contains_canary(change
, r
->blank_addr
, r
->howto
);
1699 if (canary_ret
== 0) {
1700 ksdebug(change
, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1701 r
->blank_addr
, r
->symbol
->label
, r
->target_addend
);
1705 #ifdef KSPLICE_STANDALONE
1706 if (!bootstrapped
) {
1707 ret
= add_system_map_candidates(change
,
1708 change
->new_code
.system_map
,
1709 change
->new_code
.system_map_end
,
1710 r
->symbol
->label
, &vals
);
1712 release_vals(&vals
);
1716 #endif /* KSPLICE_STANDALONE */
1717 ret
= lookup_symbol(change
, r
->symbol
, &vals
);
1719 release_vals(&vals
);
1723 * Relocations for the oldaddr fields of patches must have
1724 * been resolved via run-pre matching.
1726 if (!singular(&vals
) || (r
->symbol
->candidate_vals
!= NULL
&&
1727 r
->howto
->type
== KSPLICE_HOWTO_RELOC_PATCH
)) {
1728 release_vals(&vals
);
1729 ksdebug(change
, "Failed to find %s for reloc\n",
1731 return FAILED_TO_FIND
;
1733 sym_addr
= list_entry(vals
.next
, struct candidate_val
, list
)->val
;
1734 release_vals(&vals
);
1736 ret
= write_reloc_value(change
, r
, r
->blank_addr
,
1737 r
->howto
->pcrel
? sym_addr
- r
->blank_addr
:
1742 ksdebug(change
, "reloc: %lx to %s+%lx (S=%lx ", r
->blank_addr
,
1743 r
->symbol
->label
, r
->target_addend
, sym_addr
);
1744 switch (r
->howto
->size
) {
1746 ksdebug(change
, "aft=%02x)\n", *(uint8_t *)r
->blank_addr
);
1749 ksdebug(change
, "aft=%04x)\n", *(uint16_t *)r
->blank_addr
);
1752 ksdebug(change
, "aft=%08x)\n", *(uint32_t *)r
->blank_addr
);
1754 #if BITS_PER_LONG >= 64
1756 ksdebug(change
, "aft=%016llx)\n", *(uint64_t *)r
->blank_addr
);
1758 #endif /* BITS_PER_LONG */
1760 ksdebug(change
, "Aborted. Invalid relocation size.\n");
1763 #ifdef KSPLICE_STANDALONE
1766 #endif /* KSPLICE_STANDALONE */
1769 * Create labelvals so that we can verify our choices in the
1770 * second round of run-pre matching that considers data sections.
1772 ret
= create_labelval(change
, r
->symbol
, sym_addr
, VAL
);
1776 return add_dependency_on_address(change
, sym_addr
);
1780 * Date relocations are created wherever __DATE__ or __TIME__ is used
1781 * in the kernel; we resolve them by simply copying in the date/time
1782 * obtained from run-pre matching the relevant compilation unit.
1784 static abort_t
apply_howto_date(struct ksplice_mod_change
*change
,
1785 const struct ksplice_reloc
*r
)
1787 if (r
->symbol
->candidate_vals
!= NULL
) {
1788 ksdebug(change
, "Failed to find %s for date\n",
1790 return FAILED_TO_FIND
;
1792 memcpy((unsigned char *)r
->blank_addr
,
1793 (const unsigned char *)r
->symbol
->value
, r
->howto
->size
);
1798 * Given a relocation and its run address, compute the address of the
1799 * symbol the relocation referenced, and store it in *valp.
1801 static abort_t
read_reloc_value(struct ksplice_mod_change
*change
,
1802 const struct ksplice_reloc
*r
,
1803 unsigned long addr
, unsigned long *valp
)
1805 unsigned char bytes
[sizeof(long)];
1807 const struct ksplice_reloc_howto
*howto
= r
->howto
;
1809 if (howto
->size
<= 0 || howto
->size
> sizeof(long)) {
1810 ksdebug(change
, "Aborted. Invalid relocation size.\n");
1814 if (probe_kernel_read(bytes
, (void *)addr
, howto
->size
) == -EFAULT
)
1817 switch (howto
->size
) {
1819 val
= *(uint8_t *)bytes
;
1822 val
= *(uint16_t *)bytes
;
1825 val
= *(uint32_t *)bytes
;
1827 #if BITS_PER_LONG >= 64
1829 val
= *(uint64_t *)bytes
;
1831 #endif /* BITS_PER_LONG */
1833 ksdebug(change
, "Aborted. Invalid relocation size.\n");
1837 val
&= howto
->dst_mask
;
1838 if (howto
->signed_addend
)
1839 val
|= -(val
& (howto
->dst_mask
& ~(howto
->dst_mask
>> 1)));
1840 val
<<= howto
->rightshift
;
1841 val
-= r
->insn_addend
+ r
->target_addend
;
1847 * Given a relocation, the address of its storage unit, and the
1848 * address of the symbol the relocation references, write the
1849 * relocation's final value into the storage unit.
1851 static abort_t
write_reloc_value(struct ksplice_mod_change
*change
,
1852 const struct ksplice_reloc
*r
,
1853 unsigned long addr
, unsigned long sym_addr
)
1855 unsigned long val
= sym_addr
+ r
->target_addend
+ r
->insn_addend
;
1856 const struct ksplice_reloc_howto
*howto
= r
->howto
;
1857 val
>>= howto
->rightshift
;
1858 switch (howto
->size
) {
1860 *(uint8_t *)addr
= (*(uint8_t *)addr
& ~howto
->dst_mask
) |
1861 (val
& howto
->dst_mask
);
1864 *(uint16_t *)addr
= (*(uint16_t *)addr
& ~howto
->dst_mask
) |
1865 (val
& howto
->dst_mask
);
1868 *(uint32_t *)addr
= (*(uint32_t *)addr
& ~howto
->dst_mask
) |
1869 (val
& howto
->dst_mask
);
1871 #if BITS_PER_LONG >= 64
1873 *(uint64_t *)addr
= (*(uint64_t *)addr
& ~howto
->dst_mask
) |
1874 (val
& howto
->dst_mask
);
1876 #endif /* BITS_PER_LONG */
1878 ksdebug(change
, "Aborted. Invalid relocation size.\n");
1882 if (read_reloc_value(change
, r
, addr
, &val
) != OK
|| val
!= sym_addr
) {
1883 ksdebug(change
, "Aborted. Relocation overflow.\n");
1890 static abort_t
create_module_list_entry(struct ksplice_mod_change
*change
,
1893 struct ksplice_module_list_entry
*entry
=
1894 kmalloc(sizeof(*entry
), GFP_KERNEL
);
1896 return OUT_OF_MEMORY
;
1897 entry
->new_code_mod_name
=
1898 kstrdup(change
->new_code_mod
->name
, GFP_KERNEL
);
1899 if (entry
->new_code_mod_name
== NULL
) {
1901 return OUT_OF_MEMORY
;
1903 entry
->target_mod_name
= kstrdup(change
->target_name
, GFP_KERNEL
);
1904 if (entry
->target_mod_name
== NULL
) {
1905 kfree(entry
->new_code_mod_name
);
1907 return OUT_OF_MEMORY
;
1909 /* The update's kid is guaranteed to outlast the module_list_entry */
1910 entry
->kid
= change
->update
->kid
;
1911 entry
->applied
= to_be_applied
;
1912 list_add(&entry
->update_list
, &change
->update
->ksplice_module_list
);
1916 static void cleanup_module_list_entries(struct update
*update
)
1918 struct ksplice_module_list_entry
*entry
;
1919 list_for_each_entry(entry
, &update
->ksplice_module_list
, update_list
) {
1920 kfree(entry
->target_mod_name
);
1921 kfree(entry
->new_code_mod_name
);
1923 clear_list(&update
->ksplice_module_list
,
1924 struct ksplice_module_list_entry
, update_list
);
1927 /* Replacement address used for functions deleted by the patch */
1928 static void __attribute__((noreturn
)) ksplice_deleted(void)
1930 printk(KERN_CRIT
"Called a kernel function deleted by Ksplice!\n");
1932 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1933 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1938 /* Floodfill to run-pre match the sections within a change. */
1939 static abort_t
match_change_sections(struct ksplice_mod_change
*change
,
1940 bool consider_data_sections
)
1942 struct ksplice_section
*sect
;
1947 for (sect
= change
->old_code
.sections
;
1948 sect
< change
->old_code
.sections_end
; sect
++) {
1949 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0 &&
1950 (sect
->flags
& KSPLICE_SECTION_STRING
) == 0 &&
1951 (sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0)
1955 while (remaining
> 0) {
1957 for (sect
= change
->old_code
.sections
;
1958 sect
< change
->old_code
.sections_end
; sect
++) {
1959 if ((sect
->flags
& KSPLICE_SECTION_MATCHED
) != 0)
1961 if ((!consider_data_sections
&&
1962 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0) ||
1963 (sect
->flags
& KSPLICE_SECTION_STRING
) != 0)
1965 ret
= find_section(change
, sect
);
1967 sect
->flags
|= KSPLICE_SECTION_MATCHED
;
1968 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0)
1971 } else if (ret
!= NO_MATCH
) {
1979 for (sect
= change
->old_code
.sections
;
1980 sect
< change
->old_code
.sections_end
; sect
++) {
1981 if ((sect
->flags
& KSPLICE_SECTION_MATCHED
) != 0 ||
1982 (sect
->flags
& KSPLICE_SECTION_STRING
) != 0)
1984 ksdebug(change
, "run-pre: could not match %s "
1986 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0 ?
1988 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ?
1989 "rodata" : "text", sect
->symbol
->label
);
1991 ksdebug(change
, "Aborted. run-pre: could not match some "
1999 * Search for the section in the running kernel. Returns OK if and
2000 * only if it finds precisely one address in the kernel matching the
2003 static abort_t
find_section(struct ksplice_mod_change
*change
,
2004 struct ksplice_section
*sect
)
2008 unsigned long run_addr
;
2010 struct candidate_val
*v
, *n
;
2012 #ifdef KSPLICE_STANDALONE
2013 ret
= add_system_map_candidates(change
, change
->old_code
.system_map
,
2014 change
->old_code
.system_map_end
,
2015 sect
->symbol
->label
, &vals
);
2017 release_vals(&vals
);
2020 #endif /* KSPLICE_STANDALONE */
2021 ret
= lookup_symbol(change
, sect
->symbol
, &vals
);
2023 release_vals(&vals
);
2027 ksdebug(change
, "run-pre: starting sect search for %s\n",
2028 sect
->symbol
->label
);
2030 list_for_each_entry_safe(v
, n
, &vals
, list
) {
2034 ret
= try_addr(change
, sect
, run_addr
, NULL
, RUN_PRE_INITIAL
);
2035 if (ret
== NO_MATCH
) {
2038 } else if (ret
!= OK
) {
2039 release_vals(&vals
);
2044 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2045 if (list_empty(&vals
) && (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
2046 ret
= brute_search_all(change
, sect
, &vals
);
2048 release_vals(&vals
);
2052 * Make sure run-pre matching output is displayed if
2053 * brute_search succeeds.
2055 if (singular(&vals
)) {
2056 run_addr
= list_entry(vals
.next
, struct candidate_val
,
2058 ret
= try_addr(change
, sect
, run_addr
, NULL
,
2061 ksdebug(change
, "run-pre: Debug run failed for "
2062 "sect %s:\n", sect
->symbol
->label
);
2063 release_vals(&vals
);
2068 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2070 if (singular(&vals
)) {
2071 LIST_HEAD(safety_records
);
2072 run_addr
= list_entry(vals
.next
, struct candidate_val
,
2074 ret
= try_addr(change
, sect
, run_addr
, &safety_records
,
2076 release_vals(&vals
);
2078 clear_list(&safety_records
, struct safety_record
, list
);
2079 ksdebug(change
, "run-pre: Final run failed for sect "
2080 "%s:\n", sect
->symbol
->label
);
2082 list_splice(&safety_records
, &change
->safety_records
);
2085 } else if (!list_empty(&vals
)) {
2086 struct candidate_val
*val
;
2087 ksdebug(change
, "run-pre: multiple candidates for sect %s:\n",
2088 sect
->symbol
->label
);
2090 list_for_each_entry(val
, &vals
, list
) {
2092 ksdebug(change
, "%lx\n", val
->val
);
2094 ksdebug(change
, "...\n");
2098 release_vals(&vals
);
2101 release_vals(&vals
);
2106 * try_addr is the the interface to run-pre matching. Its primary
2107 * purpose is to manage debugging information for run-pre matching;
2108 * all the hard work is in run_pre_cmp.
2110 static abort_t
try_addr(struct ksplice_mod_change
*change
,
2111 struct ksplice_section
*sect
,
2112 unsigned long run_addr
,
2113 struct list_head
*safety_records
,
2114 enum run_pre_mode mode
)
2117 const struct module
*run_module
= __module_address(run_addr
);
2119 if (run_module
== change
->new_code_mod
) {
2120 ksdebug(change
, "run-pre: unexpected address %lx in new_code "
2121 "module %s for sect %s\n", run_addr
, run_module
->name
,
2122 sect
->symbol
->label
);
2125 if (!patches_module(run_module
, change
->target
)) {
2126 ksdebug(change
, "run-pre: ignoring address %lx in other module "
2127 "%s for sect %s\n", run_addr
, run_module
== NULL
?
2128 "vmlinux" : run_module
->name
, sect
->symbol
->label
);
2132 ret
= create_labelval(change
, sect
->symbol
, run_addr
, TEMP
);
2136 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2137 ret
= run_pre_cmp(change
, sect
, run_addr
, safety_records
, mode
);
2138 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2139 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
2140 ret
= arch_run_pre_cmp(change
, sect
, run_addr
, safety_records
,
2143 ret
= run_pre_cmp(change
, sect
, run_addr
, safety_records
, mode
);
2144 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2145 if (ret
== NO_MATCH
&& mode
!= RUN_PRE_FINAL
) {
2146 set_temp_labelvals(change
, NOVAL
);
2147 ksdebug(change
, "run-pre: %s sect %s does not match (r_a=%lx "
2149 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ? "rodata" :
2150 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0 ? "data" :
2151 "text", sect
->symbol
->label
, run_addr
, sect
->address
,
2153 ksdebug(change
, "run-pre: ");
2154 if (change
->update
->debug
>= 1) {
2155 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2156 ret
= run_pre_cmp(change
, sect
, run_addr
,
2157 safety_records
, RUN_PRE_DEBUG
);
2158 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2159 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
2160 ret
= arch_run_pre_cmp(change
, sect
, run_addr
,
2164 ret
= run_pre_cmp(change
, sect
, run_addr
,
2167 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2168 set_temp_labelvals(change
, NOVAL
);
2170 ksdebug(change
, "\n");
2172 } else if (ret
!= OK
) {
2173 set_temp_labelvals(change
, NOVAL
);
2177 if (mode
!= RUN_PRE_FINAL
) {
2178 set_temp_labelvals(change
, NOVAL
);
2179 ksdebug(change
, "run-pre: candidate for sect %s=%lx\n",
2180 sect
->symbol
->label
, run_addr
);
2184 set_temp_labelvals(change
, VAL
);
2185 ksdebug(change
, "run-pre: found sect %s=%lx\n", sect
->symbol
->label
,
2191 * run_pre_cmp is the primary run-pre matching function; it determines
2192 * whether the given ksplice_section matches the code or data in the
2193 * running kernel starting at run_addr.
2195 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2196 * section is created.
2198 * The run_pre_mode is also used to determine what debugging
2199 * information to display.
2201 static abort_t
run_pre_cmp(struct ksplice_mod_change
*change
,
2202 const struct ksplice_section
*sect
,
2203 unsigned long run_addr
,
2204 struct list_head
*safety_records
,
2205 enum run_pre_mode mode
)
2209 const struct ksplice_reloc
*r
, *finger
;
2210 const unsigned char *pre
, *run
, *pre_start
, *run_start
;
2211 unsigned char runval
;
2213 pre_start
= (const unsigned char *)sect
->address
;
2214 run_start
= (const unsigned char *)run_addr
;
2216 finger
= init_reloc_search(change
, sect
);
2220 while (pre
< pre_start
+ sect
->size
) {
2221 unsigned long offset
= pre
- pre_start
;
2222 ret
= lookup_reloc(change
, &finger
, (unsigned long)pre
, &r
);
2224 ret
= handle_reloc(change
, sect
, r
, (unsigned long)run
,
2227 if (mode
== RUN_PRE_INITIAL
)
2228 ksdebug(change
, "reloc in sect does "
2229 "not match after %lx/%lx "
2230 "bytes\n", offset
, sect
->size
);
2233 if (mode
== RUN_PRE_DEBUG
)
2234 print_bytes(change
, run
, r
->howto
->size
, pre
,
2236 pre
+= r
->howto
->size
;
2237 run
+= r
->howto
->size
;
2240 } else if (ret
!= NO_MATCH
) {
2244 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0) {
2245 ret
= handle_paravirt(change
, (unsigned long)pre
,
2246 (unsigned long)run
, &matched
);
2250 if (mode
== RUN_PRE_DEBUG
)
2251 print_bytes(change
, run
, matched
, pre
,
2259 if (probe_kernel_read(&runval
, (void *)run
, 1) == -EFAULT
) {
2260 if (mode
== RUN_PRE_INITIAL
)
2261 ksdebug(change
, "sect unmapped after %lx/%lx "
2262 "bytes\n", offset
, sect
->size
);
2266 if (runval
!= *pre
&&
2267 (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
2268 if (mode
== RUN_PRE_INITIAL
)
2269 ksdebug(change
, "sect does not match after "
2270 "%lx/%lx bytes\n", offset
, sect
->size
);
2271 if (mode
== RUN_PRE_DEBUG
) {
2272 print_bytes(change
, run
, 1, pre
, 1);
2273 ksdebug(change
, "[p_o=%lx] ! ", offset
);
2274 print_bytes(change
, run
+ 1, 2, pre
+ 1, 2);
2278 if (mode
== RUN_PRE_DEBUG
)
2279 print_bytes(change
, run
, 1, pre
, 1);
2283 return create_safety_record(change
, sect
, safety_records
, run_addr
,
2287 static void print_bytes(struct ksplice_mod_change
*change
,
2288 const unsigned char *run
, int runc
,
2289 const unsigned char *pre
, int prec
)
2292 int matched
= min(runc
, prec
);
2293 for (o
= 0; o
< matched
; o
++) {
2294 if (run
[o
] == pre
[o
])
2295 ksdebug(change
, "%02x ", run
[o
]);
2297 ksdebug(change
, "%02x/%02x ", run
[o
], pre
[o
]);
2299 for (o
= matched
; o
< runc
; o
++)
2300 ksdebug(change
, "%02x/ ", run
[o
]);
2301 for (o
= matched
; o
< prec
; o
++)
2302 ksdebug(change
, "/%02x ", pre
[o
]);
2305 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2306 static abort_t
brute_search(struct ksplice_mod_change
*change
,
2307 struct ksplice_section
*sect
,
2308 const void *start
, unsigned long len
,
2309 struct list_head
*vals
)
2315 for (addr
= (unsigned long)start
; addr
< (unsigned long)start
+ len
;
2317 if (addr
% 100000 == 0)
2320 if (probe_kernel_read(&run
, (void *)addr
, 1) == -EFAULT
)
2323 pre
= *(const unsigned char *)(sect
->address
);
2328 ret
= try_addr(change
, sect
, addr
, NULL
, RUN_PRE_INITIAL
);
2330 ret
= add_candidate_val(change
, vals
, addr
);
2333 } else if (ret
!= NO_MATCH
) {
2341 extern struct list_head modules
;
2342 EXTRACT_SYMBOL(modules
);
2343 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
2344 /* 3abf024d2abb79614d8c4cb25a70d5596f77d0ad was after 2.6.24 */
2345 EXTRACT_SYMBOL(init_mm
);
2346 #endif /* LINUX_VERSION_CODE */
2348 static abort_t
brute_search_all(struct ksplice_mod_change
*change
,
2349 struct ksplice_section
*sect
,
2350 struct list_head
*vals
)
2356 ksdebug(change
, "brute_search: searching for %s\n",
2357 sect
->symbol
->label
);
2358 saved_debug
= change
->update
->debug
;
2359 change
->update
->debug
= 0;
2361 list_for_each_entry(m
, &modules
, list
) {
2362 if (!patches_module(m
, change
->target
) ||
2363 m
== change
->new_code_mod
)
2365 ret
= brute_search(change
, sect
, m
->module_core
, m
->core_size
,
2369 ret
= brute_search(change
, sect
, m
->module_init
, m
->init_size
,
2375 ret
= brute_search(change
, sect
, (const void *)init_mm
.start_code
,
2376 init_mm
.end_code
- init_mm
.start_code
, vals
);
2379 change
->update
->debug
= saved_debug
;
2382 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2385 unsigned long address
;
2389 static int reloc_bsearch_compare(const void *key
, const void *elt
)
2391 const struct range
*range
= key
;
2392 const struct ksplice_reloc
*r
= elt
;
2393 if (range
->address
+ range
->size
<= r
->blank_addr
)
2395 if (range
->address
> r
->blank_addr
)
2400 static const struct ksplice_reloc
*find_reloc(const struct ksplice_reloc
*start
,
2401 const struct ksplice_reloc
*end
,
2402 unsigned long address
,
2405 const struct ksplice_reloc
*r
;
2406 struct range range
= { address
, size
};
2407 r
= bsearch((void *)&range
, start
, end
- start
, sizeof(*r
),
2408 reloc_bsearch_compare
);
2411 while (r
> start
&& (r
- 1)->blank_addr
>= address
)
2416 static const struct ksplice_reloc
*
2417 init_reloc_search(struct ksplice_mod_change
*change
,
2418 const struct ksplice_section
*sect
)
2420 const struct ksplice_reloc
*r
;
2421 r
= find_reloc(change
->old_code
.relocs
, change
->old_code
.relocs_end
,
2422 sect
->address
, sect
->size
);
2424 return change
->old_code
.relocs_end
;
2429 * lookup_reloc implements an amortized O(1) lookup for the next
2430 * old_code relocation. It must be called with a strictly increasing
2431 * sequence of addresses.
2433 * The fingerp is private data for lookup_reloc, and needs to have
2434 * been initialized as a pointer to the result of find_reloc (or
2435 * init_reloc_search).
2437 static abort_t
lookup_reloc(struct ksplice_mod_change
*change
,
2438 const struct ksplice_reloc
**fingerp
,
2440 const struct ksplice_reloc
**relocp
)
2442 const struct ksplice_reloc
*r
= *fingerp
;
2445 while (r
< change
->old_code
.relocs_end
&&
2446 addr
>= r
->blank_addr
+ r
->howto
->size
&&
2447 !(addr
== r
->blank_addr
&& r
->howto
->size
== 0))
2450 if (r
== change
->old_code
.relocs_end
)
2452 if (addr
< r
->blank_addr
)
2455 if (r
->howto
->type
!= KSPLICE_HOWTO_RELOC
)
2458 canary_ret
= contains_canary(change
, r
->blank_addr
, r
->howto
);
2461 if (canary_ret
== 0) {
2462 ksdebug(change
, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2463 "(altinstr)\n", r
->blank_addr
, r
->symbol
->label
,
2467 if (addr
!= r
->blank_addr
) {
2468 ksdebug(change
, "Invalid nonzero relocation offset\n");
2474 static abort_t
handle_howto_symbol(struct ksplice_mod_change
*change
,
2475 const struct ksplice_reloc
*r
,
2476 unsigned long run_addr
,
2477 enum run_pre_mode mode
)
2479 if (mode
== RUN_PRE_INITIAL
)
2480 ksdebug(change
, "run-pre: symbol %s at %lx\n", r
->symbol
->label
,
2482 return create_labelval(change
, r
->symbol
, run_addr
, TEMP
);
2485 static abort_t
handle_reloc(struct ksplice_mod_change
*change
,
2486 const struct ksplice_section
*sect
,
2487 const struct ksplice_reloc
*r
,
2488 unsigned long run_addr
, enum run_pre_mode mode
)
2490 switch (r
->howto
->type
) {
2491 case KSPLICE_HOWTO_RELOC
:
2492 return handle_howto_reloc(change
, sect
, r
, run_addr
, mode
);
2493 case KSPLICE_HOWTO_DATE
:
2494 case KSPLICE_HOWTO_TIME
:
2495 return handle_howto_date(change
, sect
, r
, run_addr
, mode
);
2496 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
2498 case KSPLICE_HOWTO_BUG
:
2499 return handle_bug(change
, r
, run_addr
);
2500 #endif /* CONFIG_BUG */
2501 #else /* LINUX_VERSION_CODE < */
2502 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
2503 #endif /* LINUX_VERSION_CODE */
2504 case KSPLICE_HOWTO_EXTABLE
:
2505 return handle_extable(change
, r
, run_addr
);
2506 case KSPLICE_HOWTO_SYMBOL
:
2507 return handle_howto_symbol(change
, r
, run_addr
, mode
);
2509 ksdebug(change
, "Unexpected howto type %d\n", r
->howto
->type
);
2515 * For date/time relocations, we check that the sequence of bytes
2516 * matches the format of a date or time.
2518 static abort_t
handle_howto_date(struct ksplice_mod_change
*change
,
2519 const struct ksplice_section
*sect
,
2520 const struct ksplice_reloc
*r
,
2521 unsigned long run_addr
, enum run_pre_mode mode
)
2524 char *buf
= kmalloc(r
->howto
->size
, GFP_KERNEL
);
2527 return OUT_OF_MEMORY
;
2528 if (probe_kernel_read(buf
, (void *)run_addr
, r
->howto
->size
) == -EFAULT
) {
2533 switch (r
->howto
->type
) {
2534 case KSPLICE_HOWTO_TIME
:
2535 if (isdigit(buf
[0]) && isdigit(buf
[1]) && buf
[2] == ':' &&
2536 isdigit(buf
[3]) && isdigit(buf
[4]) && buf
[5] == ':' &&
2537 isdigit(buf
[6]) && isdigit(buf
[7]))
2542 case KSPLICE_HOWTO_DATE
:
2543 if (isalpha(buf
[0]) && isalpha(buf
[1]) && isalpha(buf
[2]) &&
2544 buf
[3] == ' ' && (buf
[4] == ' ' || isdigit(buf
[4])) &&
2545 isdigit(buf
[5]) && buf
[6] == ' ' && isdigit(buf
[7]) &&
2546 isdigit(buf
[8]) && isdigit(buf
[9]) && isdigit(buf
[10]))
2554 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
)
2555 ksdebug(change
, "%s string: \"%.*s\" does not match format\n",
2556 r
->howto
->type
== KSPLICE_HOWTO_DATE
? "date" : "time",
2557 r
->howto
->size
, buf
);
2561 ret
= create_labelval(change
, r
->symbol
, run_addr
, TEMP
);
2568 * Extract the value of a symbol used in a relocation in the pre code
2569 * during run-pre matching, giving an error if it conflicts with a
2570 * previously found value of that symbol
2572 static abort_t
handle_howto_reloc(struct ksplice_mod_change
*change
,
2573 const struct ksplice_section
*sect
,
2574 const struct ksplice_reloc
*r
,
2575 unsigned long run_addr
,
2576 enum run_pre_mode mode
)
2578 struct ksplice_section
*sym_sect
= symbol_section(change
, r
->symbol
);
2579 unsigned long offset
= r
->target_addend
;
2583 ret
= read_reloc_value(change
, r
, run_addr
, &val
);
2586 if (r
->howto
->pcrel
)
2589 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
2590 if (sym_sect
== NULL
|| sym_sect
->match_map
== NULL
|| offset
== 0) {
2592 } else if (offset
< 0 || offset
>= sym_sect
->size
) {
2593 ksdebug(change
, "Out of range relocation: %s+%lx -> %s+%lx",
2594 sect
->symbol
->label
, r
->blank_addr
- sect
->address
,
2595 r
->symbol
->label
, offset
);
2597 } else if (sect
== sym_sect
&& sect
->match_map
[offset
] == NULL
) {
2598 sym_sect
->match_map
[offset
] =
2599 (const unsigned char *)r
->symbol
->value
+ offset
;
2600 } else if (sect
== sym_sect
&& (unsigned long)sect
->match_map
[offset
] ==
2601 r
->symbol
->value
+ offset
) {
2603 } else if (sect
== sym_sect
) {
2604 ksdebug(change
, "Relocations to nonmatching locations within "
2605 "section %s: %lx does not match %lx\n",
2606 sect
->symbol
->label
, offset
,
2607 (unsigned long)sect
->match_map
[offset
] -
2610 } else if ((sym_sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0) {
2611 if (mode
== RUN_PRE_INITIAL
)
2612 ksdebug(change
, "Delaying matching of %s due to reloc "
2613 "from to unmatching section: %s+%lx\n",
2614 sect
->symbol
->label
, r
->symbol
->label
, offset
);
2616 } else if (sym_sect
->match_map
[offset
] == NULL
) {
2617 if (mode
== RUN_PRE_INITIAL
)
2618 ksdebug(change
, "Relocation not to instruction "
2619 "boundary: %s+%lx -> %s+%lx",
2620 sect
->symbol
->label
, r
->blank_addr
-
2621 sect
->address
, r
->symbol
->label
, offset
);
2623 } else if ((unsigned long)sym_sect
->match_map
[offset
] !=
2624 r
->symbol
->value
+ offset
) {
2625 if (mode
== RUN_PRE_INITIAL
)
2626 ksdebug(change
, "Match map shift %s+%lx: %lx != %lx\n",
2627 r
->symbol
->label
, offset
,
2628 r
->symbol
->value
+ offset
,
2629 (unsigned long)sym_sect
->match_map
[offset
]);
2630 val
+= r
->symbol
->value
+ offset
-
2631 (unsigned long)sym_sect
->match_map
[offset
];
2633 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
2635 if (mode
== RUN_PRE_INITIAL
)
2636 ksdebug(change
, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2637 "found %s = %lx\n", run_addr
, r
->blank_addr
,
2638 r
->symbol
->label
, offset
, r
->symbol
->label
, val
);
2640 if (contains_canary(change
, run_addr
, r
->howto
) != 0) {
2641 ksdebug(change
, "Aborted. Unexpected canary in run code at %lx"
2646 if ((sect
->flags
& KSPLICE_SECTION_DATA
) != 0 &&
2647 sect
->symbol
== r
->symbol
)
2649 ret
= create_labelval(change
, r
->symbol
, val
, TEMP
);
2650 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
)
2651 ksdebug(change
, "run-pre: reloc at r_a=%lx p_a=%lx: labelval "
2652 "%s = %lx does not match expected %lx\n", run_addr
,
2653 r
->blank_addr
, r
->symbol
->label
, r
->symbol
->value
, val
);
2657 if (sym_sect
!= NULL
&& (sym_sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0
2658 && (sym_sect
->flags
& KSPLICE_SECTION_STRING
) != 0) {
2659 if (mode
== RUN_PRE_INITIAL
)
2660 ksdebug(change
, "Recursively comparing string section "
2661 "%s\n", sym_sect
->symbol
->label
);
2662 else if (mode
== RUN_PRE_DEBUG
)
2663 ksdebug(change
, "[str start] ");
2664 ret
= run_pre_cmp(change
, sym_sect
, val
, NULL
, mode
);
2665 if (mode
== RUN_PRE_DEBUG
)
2666 ksdebug(change
, "[str end] ");
2667 if (ret
== OK
&& mode
== RUN_PRE_INITIAL
)
2668 ksdebug(change
, "Successfully matched string section %s"
2669 "\n", sym_sect
->symbol
->label
);
2670 else if (mode
== RUN_PRE_INITIAL
)
2671 ksdebug(change
, "Failed to match string section %s\n",
2672 sym_sect
->symbol
->label
);
2677 #ifdef CONFIG_GENERIC_BUG
2678 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2679 EXTRACT_SYMBOL(find_bug
);
2680 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2681 static abort_t
handle_bug(struct ksplice_mod_change
*change
,
2682 const struct ksplice_reloc
*r
, unsigned long run_addr
)
2684 const struct bug_entry
*run_bug
= find_bug(run_addr
);
2685 struct ksplice_section
*bug_sect
= symbol_section(change
, r
->symbol
);
2686 if (run_bug
== NULL
)
2688 if (bug_sect
== NULL
)
2690 return create_labelval(change
, bug_sect
->symbol
, (unsigned long)run_bug
,
2693 #endif /* CONFIG_GENERIC_BUG */
2695 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2696 EXTRACT_SYMBOL(search_exception_tables
);
2697 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2699 static abort_t
handle_extable(struct ksplice_mod_change
*change
,
2700 const struct ksplice_reloc
*r
,
2701 unsigned long run_addr
)
2703 const struct exception_table_entry
*run_ent
=
2704 search_exception_tables(run_addr
);
2705 struct ksplice_section
*ex_sect
= symbol_section(change
, r
->symbol
);
2706 if (run_ent
== NULL
)
2708 if (ex_sect
== NULL
)
2710 return create_labelval(change
, ex_sect
->symbol
, (unsigned long)run_ent
,
2714 static int symbol_section_bsearch_compare(const void *a
, const void *b
)
2716 const struct ksplice_symbol
*sym
= a
;
2717 const struct ksplice_section
*sect
= b
;
2718 return strcmp(sym
->label
, sect
->symbol
->label
);
2721 static int compare_section_labels(const void *va
, const void *vb
)
2723 const struct ksplice_section
*a
= va
, *b
= vb
;
2724 return strcmp(a
->symbol
->label
, b
->symbol
->label
);
2727 static struct ksplice_section
*symbol_section(struct ksplice_mod_change
*change
,
2728 const struct ksplice_symbol
*sym
)
2730 return bsearch(sym
, change
->old_code
.sections
,
2731 change
->old_code
.sections_end
-
2732 change
->old_code
.sections
,
2733 sizeof(struct ksplice_section
),
2734 symbol_section_bsearch_compare
);
2737 /* Find the relocation for the oldaddr of a ksplice_patch */
2738 static const struct ksplice_reloc
*
2739 patch_reloc(struct ksplice_mod_change
*change
,
2740 const struct ksplice_patch
*p
)
2742 unsigned long addr
= (unsigned long)&p
->oldaddr
;
2743 const struct ksplice_reloc
*r
=
2744 find_reloc(change
->new_code
.relocs
, change
->new_code
.relocs_end
,
2745 addr
, sizeof(addr
));
2746 if (r
== NULL
|| r
->blank_addr
< addr
||
2747 r
->blank_addr
>= addr
+ sizeof(addr
))
2753 * Populates vals with the possible values for ksym from the various
2754 * sources Ksplice uses to resolve symbols
2756 static abort_t
lookup_symbol(struct ksplice_mod_change
*change
,
2757 const struct ksplice_symbol
*ksym
,
2758 struct list_head
*vals
)
2762 #ifdef KSPLICE_STANDALONE
2765 #endif /* KSPLICE_STANDALONE */
2767 if (ksym
->candidate_vals
== NULL
) {
2769 ksdebug(change
, "using detected sym %s=%lx\n", ksym
->label
,
2771 return add_candidate_val(change
, vals
, ksym
->value
);
2774 #ifdef CONFIG_MODULE_UNLOAD
2775 if (strcmp(ksym
->label
, "cleanup_module") == 0 && change
->target
!= NULL
2776 && change
->target
->exit
!= NULL
) {
2777 ret
= add_candidate_val(change
, vals
,
2778 (unsigned long)change
->target
->exit
);
2784 if (ksym
->name
!= NULL
) {
2785 struct candidate_val
*val
;
2786 list_for_each_entry(val
, ksym
->candidate_vals
, list
) {
2787 ret
= add_candidate_val(change
, vals
, val
->val
);
2792 ret
= new_export_lookup(change
, ksym
->name
, vals
);
2800 #ifdef KSPLICE_STANDALONE
2802 add_system_map_candidates(struct ksplice_mod_change
*change
,
2803 const struct ksplice_system_map
*start
,
2804 const struct ksplice_system_map
*end
,
2805 const char *label
, struct list_head
*vals
)
2810 const struct ksplice_system_map
*smap
;
2812 /* Some Fedora kernel releases have System.map files whose symbol
2813 * addresses disagree with the running kernel by a constant address
2814 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2815 * values used to compile these kernels. This constant address offset
2816 * is always a multiple of 0x100000.
2818 * If we observe an offset that is NOT a multiple of 0x100000, then the
2819 * user provided us with an incorrect System.map file, and we should
2821 * If we observe an offset that is a multiple of 0x100000, then we can
2822 * adjust the System.map address values accordingly and proceed.
2824 off
= (unsigned long)printk
- change
->map_printk
;
2825 if (off
& 0xfffff) {
2827 "Aborted. System.map does not match kernel.\n");
2828 return BAD_SYSTEM_MAP
;
2831 smap
= bsearch(label
, start
, end
- start
, sizeof(*smap
),
2832 system_map_bsearch_compare
);
2836 for (i
= 0; i
< smap
->nr_candidates
; i
++) {
2837 ret
= add_candidate_val(change
, vals
,
2838 smap
->candidates
[i
] + off
);
2845 static int system_map_bsearch_compare(const void *key
, const void *elt
)
2847 const struct ksplice_system_map
*map
= elt
;
2848 const char *label
= key
;
2849 return strcmp(label
, map
->label
);
2851 #endif /* !KSPLICE_STANDALONE */
2854 * An update could one module to export a symbol and at the same time
2855 * change another module to use that symbol. This violates the normal
2856 * situation where the changes can be handled independently.
2858 * new_export_lookup obtains symbol values from the changes to the
2859 * exported symbol table made by other changes.
2861 static abort_t
new_export_lookup(struct ksplice_mod_change
*ichange
,
2862 const char *name
, struct list_head
*vals
)
2864 struct ksplice_mod_change
*change
;
2865 struct ksplice_patch
*p
;
2866 list_for_each_entry(change
, &ichange
->update
->changes
, list
) {
2867 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
2868 const struct kernel_symbol
*sym
;
2869 const struct ksplice_reloc
*r
;
2870 if (p
->type
!= KSPLICE_PATCH_EXPORT
||
2871 strcmp(name
, *(const char **)p
->contents
) != 0)
2874 /* Check that the p->oldaddr reloc has been resolved. */
2875 r
= patch_reloc(change
, p
);
2877 contains_canary(change
, r
->blank_addr
,
2880 sym
= (const struct kernel_symbol
*)r
->symbol
->value
;
2883 * Check that the sym->value reloc has been resolved,
2884 * if there is a Ksplice relocation there.
2886 r
= find_reloc(change
->new_code
.relocs
,
2887 change
->new_code
.relocs_end
,
2888 (unsigned long)&sym
->value
,
2889 sizeof(&sym
->value
));
2891 r
->blank_addr
== (unsigned long)&sym
->value
&&
2892 contains_canary(change
, r
->blank_addr
,
2895 return add_candidate_val(ichange
, vals
, sym
->value
);
2901 #ifdef KSPLICE_STANDALONE
2902 EXTRACT_SYMBOL(bust_spinlocks
);
2903 #endif /* KSPLICE_STANDALONE */
2906 * When patch_action is called, the update should be fully prepared.
2907 * patch_action will try to actually insert or remove trampolines for
2910 static abort_t
patch_action(struct update
*update
, enum ksplice_action action
)
2912 static int (*const __patch_actions
[KS_ACTIONS
])(void *) = {
2913 [KS_APPLY
] = __apply_patches
,
2914 [KS_REVERSE
] = __reverse_patches
,
2918 struct ksplice_mod_change
*change
;
2920 ret
= map_trampoline_pages(update
);
2924 list_for_each_entry(change
, &update
->changes
, list
) {
2925 const typeof(int (*)(void)) *f
;
2926 for (f
= change
->hooks
[action
].pre
;
2927 f
< change
->hooks
[action
].pre_end
; f
++) {
2935 for (i
= 0; i
< 5; i
++) {
2936 cleanup_conflicts(update
);
2937 #ifdef KSPLICE_STANDALONE
2939 #endif /* KSPLICE_STANDALONE */
2940 ret
= (__force abort_t
)stop_machine(__patch_actions
[action
],
2942 #ifdef KSPLICE_STANDALONE
2944 #endif /* KSPLICE_STANDALONE */
2945 if (ret
!= CODE_BUSY
)
2947 set_current_state(TASK_INTERRUPTIBLE
);
2948 schedule_timeout(msecs_to_jiffies(1000));
2951 unmap_trampoline_pages(update
);
2953 if (ret
== CODE_BUSY
) {
2954 print_conflicts(update
);
2955 _ksdebug(update
, "Aborted %s. stack check: to-be-%s "
2956 "code is busy.\n", update
->kid
,
2957 action
== KS_APPLY
? "replaced" : "reversed");
2958 } else if (ret
== ALREADY_REVERSED
) {
2959 _ksdebug(update
, "Aborted %s. Ksplice update %s is already "
2960 "reversed.\n", update
->kid
, update
->kid
);
2961 } else if (ret
== MODULE_BUSY
) {
2962 _ksdebug(update
, "Update %s is in use by another module\n",
2967 list_for_each_entry(change
, &update
->changes
, list
) {
2968 const typeof(void (*)(void)) *f
;
2969 for (f
= change
->hooks
[action
].fail
;
2970 f
< change
->hooks
[action
].fail_end
; f
++)
2977 list_for_each_entry(change
, &update
->changes
, list
) {
2978 const typeof(void (*)(void)) *f
;
2979 for (f
= change
->hooks
[action
].post
;
2980 f
< change
->hooks
[action
].post_end
; f
++)
2984 _ksdebug(update
, "Atomic patch %s for %s complete\n",
2985 action
== KS_APPLY
? "insertion" : "removal", update
->kid
);
2989 /* Atomically insert the update; run from within stop_machine */
2990 static int __apply_patches(void *updateptr
)
2992 struct update
*update
= updateptr
;
2993 struct ksplice_mod_change
*change
;
2994 struct ksplice_module_list_entry
*entry
;
2995 struct ksplice_patch
*p
;
2998 if (update
->stage
== STAGE_APPLIED
)
2999 return (__force
int)OK
;
3001 if (update
->stage
!= STAGE_PREPARING
)
3002 return (__force
int)UNEXPECTED
;
3004 ret
= check_each_task(update
);
3006 return (__force
int)ret
;
3008 list_for_each_entry(change
, &update
->changes
, list
) {
3009 if (try_module_get(change
->new_code_mod
) != 1) {
3010 struct ksplice_mod_change
*change1
;
3011 list_for_each_entry(change1
, &update
->changes
, list
) {
3012 if (change1
== change
)
3014 module_put(change1
->new_code_mod
);
3016 module_put(THIS_MODULE
);
3017 return (__force
int)UNEXPECTED
;
3021 list_for_each_entry(change
, &update
->changes
, list
) {
3022 const typeof(int (*)(void)) *f
;
3023 for (f
= change
->hooks
[KS_APPLY
].check
;
3024 f
< change
->hooks
[KS_APPLY
].check_end
; f
++) {
3026 return (__force
int)CALL_FAILED
;
3030 /* Commit point: the update application will succeed. */
3032 update
->stage
= STAGE_APPLIED
;
3033 #ifdef TAINT_KSPLICE
3034 add_taint(TAINT_KSPLICE
);
3037 list_for_each_entry(entry
, &update
->ksplice_module_list
, update_list
)
3038 list_add(&entry
->list
, &ksplice_modules
);
3040 list_for_each_entry(change
, &update
->changes
, list
) {
3041 for (p
= change
->patches
; p
< change
->patches_end
; p
++)
3042 insert_trampoline(p
);
3045 list_for_each_entry(change
, &update
->changes
, list
) {
3046 const typeof(void (*)(void)) *f
;
3047 for (f
= change
->hooks
[KS_APPLY
].intra
;
3048 f
< change
->hooks
[KS_APPLY
].intra_end
; f
++)
3052 return (__force
int)OK
;
3055 /* Atomically remove the update; run from within stop_machine */
3056 static int __reverse_patches(void *updateptr
)
3058 struct update
*update
= updateptr
;
3059 struct ksplice_mod_change
*change
;
3060 struct ksplice_module_list_entry
*entry
;
3061 const struct ksplice_patch
*p
;
3064 if (update
->stage
!= STAGE_APPLIED
)
3065 return (__force
int)OK
;
3067 #ifdef CONFIG_MODULE_UNLOAD
3068 list_for_each_entry(change
, &update
->changes
, list
) {
3069 if (module_refcount(change
->new_code_mod
) != 1)
3070 return (__force
int)MODULE_BUSY
;
3072 #endif /* CONFIG_MODULE_UNLOAD */
3074 list_for_each_entry(entry
, &update
->ksplice_module_list
, update_list
) {
3075 if (!entry
->applied
&&
3076 find_module(entry
->target_mod_name
) != NULL
)
3077 return COLD_UPDATE_LOADED
;
3080 ret
= check_each_task(update
);
3082 return (__force
int)ret
;
3084 list_for_each_entry(change
, &update
->changes
, list
) {
3085 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
3086 ret
= verify_trampoline(change
, p
);
3088 return (__force
int)ret
;
3092 list_for_each_entry(change
, &update
->changes
, list
) {
3093 const typeof(int (*)(void)) *f
;
3094 for (f
= change
->hooks
[KS_REVERSE
].check
;
3095 f
< change
->hooks
[KS_REVERSE
].check_end
; f
++) {
3097 return (__force
int)CALL_FAILED
;
3101 /* Commit point: the update reversal will succeed. */
3103 update
->stage
= STAGE_REVERSED
;
3105 list_for_each_entry(change
, &update
->changes
, list
)
3106 module_put(change
->new_code_mod
);
3108 list_for_each_entry(entry
, &update
->ksplice_module_list
, update_list
)
3109 list_del(&entry
->list
);
3111 list_for_each_entry(change
, &update
->changes
, list
) {
3112 const typeof(void (*)(void)) *f
;
3113 for (f
= change
->hooks
[KS_REVERSE
].intra
;
3114 f
< change
->hooks
[KS_REVERSE
].intra_end
; f
++)
3118 list_for_each_entry(change
, &update
->changes
, list
) {
3119 for (p
= change
->patches
; p
< change
->patches_end
; p
++)
3120 remove_trampoline(p
);
3123 return (__force
int)OK
;
3126 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3127 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3128 EXTRACT_SYMBOL(tasklist_lock
);
3129 #endif /* LINUX_VERSION_CODE */
3132 * Check whether any thread's instruction pointer or any address of
3133 * its stack is contained in one of the safety_records associated with
3136 * check_each_task must be called from inside stop_machine, because it
3137 * does not take tasklist_lock (which cannot be held by anyone else
3138 * during stop_machine).
3140 static abort_t
check_each_task(struct update
*update
)
3142 const struct task_struct
*g
, *p
;
3143 abort_t status
= OK
, ret
;
3144 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3145 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3146 read_lock(&tasklist_lock
);
3147 #endif /* LINUX_VERSION_CODE */
3148 do_each_thread(g
, p
) {
3149 /* do_each_thread is a double loop! */
3150 ret
= check_task(update
, p
, false);
3152 check_task(update
, p
, true);
3155 if (ret
!= OK
&& ret
!= CODE_BUSY
)
3156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3157 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3159 #else /* LINUX_VERSION_CODE < */
3161 #endif /* LINUX_VERSION_CODE */
3162 } while_each_thread(g
, p
);
3163 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3164 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3166 read_unlock(&tasklist_lock
);
3167 #endif /* LINUX_VERSION_CODE */
3171 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3172 EXTRACT_SYMBOL(task_curr
);
3173 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3175 static abort_t
check_task(struct update
*update
,
3176 const struct task_struct
*t
, bool rerun
)
3178 abort_t status
, ret
;
3179 struct conflict
*conf
= NULL
;
3182 conf
= kmalloc(sizeof(*conf
), GFP_ATOMIC
);
3184 return OUT_OF_MEMORY
;
3185 conf
->process_name
= kstrdup(t
->comm
, GFP_ATOMIC
);
3186 if (conf
->process_name
== NULL
) {
3188 return OUT_OF_MEMORY
;
3191 INIT_LIST_HEAD(&conf
->stack
);
3192 list_add(&conf
->list
, &update
->conflicts
);
3195 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
3196 if (t
->state
== TASK_DEAD
)
3197 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3198 /* c394cc9fbb367f87faa2228ec2eabacd2d4701c6 was after 2.6.18 */
3199 if ((t
->flags
& PF_DEAD
) != 0)
3203 status
= check_address(update
, conf
, KSPLICE_IP(t
));
3205 ret
= check_stack(update
, conf
, task_thread_info(t
),
3206 (unsigned long *)__builtin_frame_address(0));
3209 } else if (!task_curr(t
)) {
3210 ret
= check_stack(update
, conf
, task_thread_info(t
),
3211 (unsigned long *)KSPLICE_SP(t
));
3214 } else if (!is_stop_machine(t
)) {
3215 status
= UNEXPECTED_RUNNING_TASK
;
3220 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
3221 const struct thread_info
*tinfo
,
3222 const unsigned long *stack
)
3224 abort_t status
= OK
, ret
;
3227 while (valid_stack_ptr(tinfo
, stack
)) {
3229 ret
= check_address(update
, conf
, addr
);
3236 static abort_t
check_address(struct update
*update
,
3237 struct conflict
*conf
, unsigned long addr
)
3239 abort_t status
= OK
, ret
;
3240 const struct safety_record
*rec
;
3241 struct ksplice_mod_change
*change
;
3242 struct conflict_addr
*ca
= NULL
;
3245 ca
= kmalloc(sizeof(*ca
), GFP_ATOMIC
);
3247 return OUT_OF_MEMORY
;
3249 ca
->has_conflict
= false;
3251 list_add(&ca
->list
, &conf
->stack
);
3254 list_for_each_entry(change
, &update
->changes
, list
) {
3255 unsigned long tramp_addr
= follow_trampolines(change
, addr
);
3256 list_for_each_entry(rec
, &change
->safety_records
, list
) {
3257 ret
= check_record(ca
, rec
, tramp_addr
);
3265 static abort_t
check_record(struct conflict_addr
*ca
,
3266 const struct safety_record
*rec
, unsigned long addr
)
3268 if (addr
>= rec
->addr
&& addr
< rec
->addr
+ rec
->size
) {
3270 ca
->label
= rec
->label
;
3271 ca
->has_conflict
= true;
3278 /* Is the task one of the stop_machine tasks? */
3279 static bool is_stop_machine(const struct task_struct
*t
)
3281 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3282 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3283 const char *kstop_prefix
= "kstop/";
3284 #else /* LINUX_VERSION_CODE < */
3285 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3286 const char *kstop_prefix
= "kstop";
3287 #endif /* LINUX_VERSION_CODE */
3289 if (!strstarts(t
->comm
, kstop_prefix
))
3291 num
= t
->comm
+ strlen(kstop_prefix
);
3292 return num
[strspn(num
, "0123456789")] == '\0';
3293 #else /* LINUX_VERSION_CODE < */
3294 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3295 return strcmp(t
->comm
, "kstopmachine") == 0;
3296 #endif /* LINUX_VERSION_CODE */
3299 static void cleanup_conflicts(struct update
*update
)
3301 struct conflict
*conf
;
3302 list_for_each_entry(conf
, &update
->conflicts
, list
) {
3303 clear_list(&conf
->stack
, struct conflict_addr
, list
);
3304 kfree(conf
->process_name
);
3306 clear_list(&update
->conflicts
, struct conflict
, list
);
3309 static void print_conflicts(struct update
*update
)
3311 const struct conflict
*conf
;
3312 const struct conflict_addr
*ca
;
3313 list_for_each_entry(conf
, &update
->conflicts
, list
) {
3314 _ksdebug(update
, "stack check: pid %d (%s):", conf
->pid
,
3315 conf
->process_name
);
3316 list_for_each_entry(ca
, &conf
->stack
, list
) {
3317 _ksdebug(update
, " %lx", ca
->addr
);
3318 if (ca
->has_conflict
)
3319 _ksdebug(update
, " [<-CONFLICT]");
3321 _ksdebug(update
, "\n");
3325 static void insert_trampoline(struct ksplice_patch
*p
)
3327 mm_segment_t old_fs
= get_fs();
3329 memcpy(p
->saved
, p
->vaddr
, p
->size
);
3330 memcpy(p
->vaddr
, p
->contents
, p
->size
);
3331 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
3335 static abort_t
verify_trampoline(struct ksplice_mod_change
*change
,
3336 const struct ksplice_patch
*p
)
3338 if (memcmp(p
->vaddr
, p
->contents
, p
->size
) != 0) {
3339 ksdebug(change
, "Aborted. Trampoline at %lx has been "
3340 "overwritten.\n", p
->oldaddr
);
3346 static void remove_trampoline(const struct ksplice_patch
*p
)
3348 mm_segment_t old_fs
= get_fs();
3350 memcpy(p
->vaddr
, p
->saved
, p
->size
);
3351 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
3355 /* Returns NO_MATCH if there's already a labelval with a different value */
3356 static abort_t
create_labelval(struct ksplice_mod_change
*change
,
3357 struct ksplice_symbol
*ksym
,
3358 unsigned long val
, int status
)
3360 val
= follow_trampolines(change
, val
);
3361 if (ksym
->candidate_vals
== NULL
)
3362 return ksym
->value
== val
? OK
: NO_MATCH
;
3365 if (status
== TEMP
) {
3366 struct labelval
*lv
= kmalloc(sizeof(*lv
), GFP_KERNEL
);
3368 return OUT_OF_MEMORY
;
3370 lv
->saved_vals
= ksym
->candidate_vals
;
3371 list_add(&lv
->list
, &change
->temp_labelvals
);
3373 ksym
->candidate_vals
= NULL
;
3378 * Creates a new safety_record for a old_code section based on its
3379 * ksplice_section and run-pre matching information.
3381 static abort_t
create_safety_record(struct ksplice_mod_change
*change
,
3382 const struct ksplice_section
*sect
,
3383 struct list_head
*record_list
,
3384 unsigned long run_addr
,
3385 unsigned long run_size
)
3387 struct safety_record
*rec
;
3388 struct ksplice_patch
*p
;
3390 if (record_list
== NULL
)
3393 for (p
= change
->patches
; p
< change
->patches_end
; p
++) {
3394 const struct ksplice_reloc
*r
= patch_reloc(change
, p
);
3395 if (strcmp(sect
->symbol
->label
, r
->symbol
->label
) == 0)
3398 if (p
>= change
->patches_end
)
3401 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
3403 return OUT_OF_MEMORY
;
3405 * The old_code might be unloaded when checking reversing
3406 * patches, so we need to kstrdup the label here.
3408 rec
->label
= kstrdup(sect
->symbol
->label
, GFP_KERNEL
);
3409 if (rec
->label
== NULL
) {
3411 return OUT_OF_MEMORY
;
3413 rec
->addr
= run_addr
;
3414 rec
->size
= run_size
;
3416 list_add(&rec
->list
, record_list
);
3420 static abort_t
add_candidate_val(struct ksplice_mod_change
*change
,
3421 struct list_head
*vals
, unsigned long val
)
3423 struct candidate_val
*tmp
, *new;
3426 * Careful: follow trampolines before comparing values so that we do
3427 * not mistake the obsolete function for another copy of the function.
3429 val
= follow_trampolines(change
, val
);
3431 list_for_each_entry(tmp
, vals
, list
) {
3432 if (tmp
->val
== val
)
3435 new = kmalloc(sizeof(*new), GFP_KERNEL
);
3437 return OUT_OF_MEMORY
;
3439 list_add(&new->list
, vals
);
3443 static void release_vals(struct list_head
*vals
)
3445 clear_list(vals
, struct candidate_val
, list
);
3449 * The temp_labelvals list is used to cache those temporary labelvals
3450 * that have been created to cross-check the symbol values obtained
3451 * from different relocations within a single section being matched.
3453 * If status is VAL, commit the temp_labelvals as final values.
3455 * If status is NOVAL, restore the list of possible values to the
3456 * ksplice_symbol, so that it no longer has a known value.
3458 static void set_temp_labelvals(struct ksplice_mod_change
*change
, int status
)
3460 struct labelval
*lv
, *n
;
3461 list_for_each_entry_safe(lv
, n
, &change
->temp_labelvals
, list
) {
3462 if (status
== NOVAL
) {
3463 lv
->symbol
->candidate_vals
= lv
->saved_vals
;
3465 release_vals(lv
->saved_vals
);
3466 kfree(lv
->saved_vals
);
3468 list_del(&lv
->list
);
3473 /* Is there a Ksplice canary with given howto at blank_addr? */
3474 static int contains_canary(struct ksplice_mod_change
*change
,
3475 unsigned long blank_addr
,
3476 const struct ksplice_reloc_howto
*howto
)
3478 switch (howto
->size
) {
3480 return (*(uint8_t *)blank_addr
& howto
->dst_mask
) ==
3481 (KSPLICE_CANARY
& howto
->dst_mask
);
3483 return (*(uint16_t *)blank_addr
& howto
->dst_mask
) ==
3484 (KSPLICE_CANARY
& howto
->dst_mask
);
3486 return (*(uint32_t *)blank_addr
& howto
->dst_mask
) ==
3487 (KSPLICE_CANARY
& howto
->dst_mask
);
3488 #if BITS_PER_LONG >= 64
3490 return (*(uint64_t *)blank_addr
& howto
->dst_mask
) ==
3491 (KSPLICE_CANARY
& howto
->dst_mask
);
3492 #endif /* BITS_PER_LONG */
3494 ksdebug(change
, "Aborted. Invalid relocation size.\n");
3499 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3500 EXTRACT_SYMBOL(__kernel_text_address
);
3501 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3504 * Compute the address of the code you would actually run if you were
3505 * to call the function at addr (i.e., follow the sequence of jumps
3508 static unsigned long follow_trampolines(struct ksplice_mod_change
*change
,
3511 unsigned long new_addr
;
3515 #ifdef KSPLICE_STANDALONE
3518 #endif /* KSPLICE_STANDALONE */
3519 if (!__kernel_text_address(addr
) ||
3520 trampoline_target(change
, addr
, &new_addr
) != OK
)
3522 m
= __module_text_address(new_addr
);
3523 if (m
== NULL
|| m
== change
->target
||
3524 !strstarts(m
->name
, "ksplice"))
3530 /* Does module a patch module b? */
3531 static bool patches_module(const struct module
*a
, const struct module
*b
)
3533 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3535 const char *modname
= b
== NULL
? "vmlinux" : b
->name
;
3538 if (a
== NULL
|| !strstarts(a
->name
, "ksplice_"))
3540 name
= a
->name
+ strlen("ksplice_");
3541 name
+= strcspn(name
, "_");
3545 return strstarts(name
, modname
) &&
3546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3547 strcmp(name
+ strlen(modname
), "_new") == 0;
3548 #else /* LINUX_VERSION_CODE < */
3549 /* 0e8a2de644a93132594f66222a9d48405674eacd was after 2.6.9 */
3550 (strcmp(name
+ strlen(modname
), "_n") == 0
3551 || strcmp(name
+ strlen(modname
), "_new") == 0);
3552 #endif /* LINUX_VERSION_CODE */
3553 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3554 struct ksplice_module_list_entry
*entry
;
3557 list_for_each_entry(entry
, &ksplice_modules
, list
) {
3558 if (strcmp(entry
->target_mod_name
, b
->name
) == 0 &&
3559 strcmp(entry
->new_code_mod_name
, a
->name
) == 0)
3563 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3566 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3567 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
3568 static bool strstarts(const char *str
, const char *prefix
)
3570 return strncmp(str
, prefix
, strlen(prefix
)) == 0;
3572 #endif /* LINUX_VERSION_CODE */
3574 static bool singular(struct list_head
*list
)
3576 return !list_empty(list
) && list
->next
->next
== list
;
3579 static void *bsearch(const void *key
, const void *base
, size_t n
,
3580 size_t size
, int (*cmp
)(const void *key
, const void *elt
))
3582 int start
= 0, end
= n
- 1, mid
, result
;
3585 while (start
<= end
) {
3586 mid
= (start
+ end
) / 2;
3587 result
= cmp(key
, base
+ mid
* size
);
3590 else if (result
> 0)
3593 return (void *)base
+ mid
* size
;
3598 static int compare_relocs(const void *a
, const void *b
)
3600 const struct ksplice_reloc
*ra
= a
, *rb
= b
;
3601 if (ra
->blank_addr
> rb
->blank_addr
)
3603 else if (ra
->blank_addr
< rb
->blank_addr
)
3606 return ra
->howto
->size
- rb
->howto
->size
;
3609 #ifdef KSPLICE_STANDALONE
3610 static int compare_system_map(const void *a
, const void *b
)
3612 const struct ksplice_system_map
*sa
= a
, *sb
= b
;
3613 return strcmp(sa
->label
, sb
->label
);
3615 #endif /* KSPLICE_STANDALONE */
3617 #ifdef CONFIG_DEBUG_FS
3618 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3619 /* Old kernels don't have debugfs_create_blob */
3620 static ssize_t
read_file_blob(struct file
*file
, char __user
*user_buf
,
3621 size_t count
, loff_t
*ppos
)
3623 struct debugfs_blob_wrapper
*blob
= file
->private_data
;
3624 return simple_read_from_buffer(user_buf
, count
, ppos
, blob
->data
,
3628 static int blob_open(struct inode
*inode
, struct file
*file
)
3630 if (inode
->i_private
)
3631 file
->private_data
= inode
->i_private
;
3635 static struct file_operations fops_blob
= {
3636 .read
= read_file_blob
,
3640 static struct dentry
*debugfs_create_blob(const char *name
, mode_t mode
,
3641 struct dentry
*parent
,
3642 struct debugfs_blob_wrapper
*blob
)
3644 return debugfs_create_file(name
, mode
, parent
, blob
, &fops_blob
);
3646 #endif /* LINUX_VERSION_CODE */
3648 static abort_t
init_debug_buf(struct update
*update
)
3650 update
->debug_blob
.size
= 0;
3651 update
->debug_blob
.data
= NULL
;
3652 update
->debugfs_dentry
=
3653 debugfs_create_blob(update
->name
, S_IFREG
| S_IRUSR
, NULL
,
3654 &update
->debug_blob
);
3655 if (update
->debugfs_dentry
== NULL
)
3656 return OUT_OF_MEMORY
;
3660 static void clear_debug_buf(struct update
*update
)
3662 if (update
->debugfs_dentry
== NULL
)
3664 debugfs_remove(update
->debugfs_dentry
);
3665 update
->debugfs_dentry
= NULL
;
3666 update
->debug_blob
.size
= 0;
3667 vfree(update
->debug_blob
.data
);
3668 update
->debug_blob
.data
= NULL
;
3671 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
3674 unsigned long size
, old_size
, new_size
;
3676 if (update
->debug
== 0)
3679 /* size includes the trailing '\0' */
3680 va_start(args
, fmt
);
3681 size
= 1 + vsnprintf(update
->debug_blob
.data
, 0, fmt
, args
);
3683 old_size
= update
->debug_blob
.size
== 0 ? 0 :
3684 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
));
3685 new_size
= update
->debug_blob
.size
+ size
== 0 ? 0 :
3686 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
+ size
));
3687 if (new_size
> old_size
) {
3688 char *buf
= vmalloc(new_size
);
3691 memcpy(buf
, update
->debug_blob
.data
, update
->debug_blob
.size
);
3692 vfree(update
->debug_blob
.data
);
3693 update
->debug_blob
.data
= buf
;
3695 va_start(args
, fmt
);
3696 update
->debug_blob
.size
+= vsnprintf(update
->debug_blob
.data
+
3697 update
->debug_blob
.size
,
3702 #else /* CONFIG_DEBUG_FS */
3703 static abort_t
init_debug_buf(struct update
*update
)
3708 static void clear_debug_buf(struct update
*update
)
3713 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
3717 if (update
->debug
== 0)
3720 if (!update
->debug_continue_line
)
3721 printk(KERN_DEBUG
"ksplice: ");
3723 va_start(args
, fmt
);
3724 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3726 #else /* LINUX_VERSION_CODE < */
3727 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3729 char *buf
= kvasprintf(GFP_KERNEL
, fmt
, args
);
3733 #endif /* LINUX_VERSION_CODE */
3736 update
->debug_continue_line
=
3737 fmt
[0] == '\0' || fmt
[strlen(fmt
) - 1] != '\n';
3740 #endif /* CONFIG_DEBUG_FS */
3742 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
3743 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
3744 extern unsigned long kallsyms_addresses
[];
3745 EXTRACT_SYMBOL(kallsyms_addresses
);
3746 extern unsigned long kallsyms_num_syms
;
3747 EXTRACT_SYMBOL(kallsyms_num_syms
);
3748 extern u8 kallsyms_names
[];
3749 EXTRACT_SYMBOL(kallsyms_names
);
3751 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3752 struct module
*, unsigned long),
3755 char namebuf
[KSYM_NAME_LEN
];
3757 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3759 #endif /* LINUX_VERSION_CODE */
3762 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3763 for (i
= 0, off
= 0; i
< kallsyms_num_syms
; i
++) {
3764 off
= kallsyms_expand_symbol(off
, namebuf
);
3765 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
3769 #else /* LINUX_VERSION_CODE < */
3770 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3773 for (i
= 0, knames
= kallsyms_names
; i
< kallsyms_num_syms
; i
++) {
3774 unsigned prefix
= *knames
++;
3776 strlcpy(namebuf
+ prefix
, knames
, KSYM_NAME_LEN
- prefix
);
3778 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
3782 knames
+= strlen(knames
) + 1;
3784 #endif /* LINUX_VERSION_CODE */
3785 return module_kallsyms_on_each_symbol(fn
, data
);
3788 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3789 extern u8 kallsyms_token_table
[];
3790 EXTRACT_SYMBOL(kallsyms_token_table
);
3791 extern u16 kallsyms_token_index
[];
3792 EXTRACT_SYMBOL(kallsyms_token_index
);
3794 static unsigned int kallsyms_expand_symbol(unsigned int off
, char *result
)
3796 long len
, skipped_first
= 0;
3797 const u8
*tptr
, *data
;
3799 data
= &kallsyms_names
[off
];
3806 tptr
= &kallsyms_token_table
[kallsyms_token_index
[*data
]];
3811 if (skipped_first
) {
3824 #else /* LINUX_VERSION_CODE < */
3825 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3826 #endif /* LINUX_VERSION_CODE */
3828 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3837 list_for_each_entry(mod
, &modules
, list
) {
3838 for (i
= 0; i
< mod
->num_symtab
; i
++) {
3839 ret
= fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
3840 mod
, mod
->symtab
[i
].st_value
);
3847 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
3849 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3850 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
3851 static struct module
*find_module(const char *name
)
3855 list_for_each_entry(mod
, &modules
, list
) {
3856 if (strcmp(mod
->name
, name
) == 0)
3862 #ifdef CONFIG_MODULE_UNLOAD
3864 struct list_head list
;
3865 struct module
*module_which_uses
;
3868 /* I'm not yet certain whether we need the strong form of this. */
3869 static inline int strong_try_module_get(struct module
*mod
)
3871 if (mod
&& mod
->state
!= MODULE_STATE_LIVE
)
3873 if (try_module_get(mod
))
3878 /* Does a already use b? */
3879 static int already_uses(struct module
*a
, struct module
*b
)
3881 struct module_use
*use
;
3882 list_for_each_entry(use
, &b
->modules_which_use_me
, list
) {
3883 if (use
->module_which_uses
== a
)
3889 /* Make it so module a uses b. Must be holding module_mutex */
3890 static int use_module(struct module
*a
, struct module
*b
)
3892 struct module_use
*use
;
3893 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3894 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3896 #endif /* LINUX_VERSION_CODE */
3897 if (b
== NULL
|| already_uses(a
, b
))
3900 if (strong_try_module_get(b
) < 0)
3903 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
3908 use
->module_which_uses
= a
;
3909 list_add(&use
->list
, &b
->modules_which_use_me
);
3910 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3911 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3912 no_warn
= sysfs_create_link(b
->holders_dir
, &a
->mkobj
.kobj
, a
->name
);
3913 #endif /* LINUX_VERSION_CODE */
3916 #else /* CONFIG_MODULE_UNLOAD */
3917 static int use_module(struct module
*a
, struct module
*b
)
3921 #endif /* CONFIG_MODULE_UNLOAD */
3923 #ifndef CONFIG_MODVERSIONS
3924 #define symversion(base, idx) NULL
3926 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3929 static bool each_symbol_in_section(const struct symsearch
*arr
,
3930 unsigned int arrsize
,
3931 struct module
*owner
,
3932 bool (*fn
)(const struct symsearch
*syms
,
3933 struct module
*owner
,
3934 unsigned int symnum
, void *data
),
3939 for (j
= 0; j
< arrsize
; j
++) {
3940 for (i
= 0; i
< arr
[j
].stop
- arr
[j
].start
; i
++)
3941 if (fn(&arr
[j
], owner
, i
, data
))
3948 /* Returns true as soon as fn returns true, otherwise false. */
3949 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
3950 struct module
*owner
,
3951 unsigned int symnum
, void *data
),
3955 const struct symsearch arr
[] = {
3956 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
3957 NOT_GPL_ONLY
, false },
3958 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
3959 __start___kcrctab_gpl
,
3961 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3962 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
3963 __start___kcrctab_gpl_future
,
3964 WILL_BE_GPL_ONLY
, false },
3965 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3966 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3967 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
3968 __start___kcrctab_unused
,
3969 NOT_GPL_ONLY
, true },
3970 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
3971 __start___kcrctab_unused_gpl
,
3973 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3976 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
3979 list_for_each_entry(mod
, &modules
, list
) {
3980 struct symsearch module_arr
[] = {
3981 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
3982 NOT_GPL_ONLY
, false },
3983 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
3986 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3987 { mod
->gpl_future_syms
,
3988 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
3989 mod
->gpl_future_crcs
,
3990 WILL_BE_GPL_ONLY
, false },
3991 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3992 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3994 mod
->unused_syms
+ mod
->num_unused_syms
,
3996 NOT_GPL_ONLY
, true },
3997 { mod
->unused_gpl_syms
,
3998 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
3999 mod
->unused_gpl_crcs
,
4001 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
4004 if (each_symbol_in_section(module_arr
, ARRAY_SIZE(module_arr
),
4011 struct find_symbol_arg
{
4018 struct module
*owner
;
4019 const unsigned long *crc
;
4020 const struct kernel_symbol
*sym
;
4023 static bool find_symbol_in_section(const struct symsearch
*syms
,
4024 struct module
*owner
,
4025 unsigned int symnum
, void *data
)
4027 struct find_symbol_arg
*fsa
= data
;
4029 if (strcmp(syms
->start
[symnum
].name
, fsa
->name
) != 0)
4033 if (syms
->licence
== GPL_ONLY
)
4035 if (syms
->licence
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
4036 printk(KERN_WARNING
"Symbol %s is being used "
4037 "by a non-GPL module, which will not "
4038 "be allowed in the future\n", fsa
->name
);
4039 printk(KERN_WARNING
"Please see the file "
4040 "Documentation/feature-removal-schedule.txt "
4041 "in the kernel source tree for more details.\n");
4045 #ifdef CONFIG_UNUSED_SYMBOLS
4046 if (syms
->unused
&& fsa
->warn
) {
4047 printk(KERN_WARNING
"Symbol %s is marked as UNUSED, "
4048 "however this module is using it.\n", fsa
->name
);
4050 "This symbol will go away in the future.\n");
4052 "Please evalute if this is the right api to use and if "
4053 "it really is, submit a report the linux kernel "
4054 "mailinglist together with submitting your code for "
4060 fsa
->crc
= symversion(syms
->crcs
, symnum
);
4061 fsa
->sym
= &syms
->start
[symnum
];
4065 /* Find a symbol and return it, along with, (optional) crc and
4066 * (optional) module which owns it */
4067 static const struct kernel_symbol
*find_symbol(const char *name
,
4068 struct module
**owner
,
4069 const unsigned long **crc
,
4070 bool gplok
, bool warn
)
4072 struct find_symbol_arg fsa
;
4078 if (each_symbol(find_symbol_in_section
, &fsa
)) {
4089 static inline int within_module_core(unsigned long addr
, struct module
*mod
)
4091 return (unsigned long)mod
->module_core
<= addr
&&
4092 addr
< (unsigned long)mod
->module_core
+ mod
->core_size
;
4095 static inline int within_module_init(unsigned long addr
, struct module
*mod
)
4097 return (unsigned long)mod
->module_init
<= addr
&&
4098 addr
< (unsigned long)mod
->module_init
+ mod
->init_size
;
4101 static struct module
*__module_address(unsigned long addr
)
4105 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
4106 list_for_each_entry_rcu(mod
, &modules
, list
)
4108 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
4109 list_for_each_entry(mod
, &modules
, list
)
4111 if (within_module_core(addr
, mod
) ||
4112 within_module_init(addr
, mod
))
4116 #endif /* LINUX_VERSION_CODE */
4118 struct update_attribute
{
4119 struct attribute attr
;
4120 ssize_t (*show
)(struct update
*update
, char *buf
);
4121 ssize_t (*store
)(struct update
*update
, const char *buf
, size_t len
);
4124 static ssize_t
update_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
4127 struct update_attribute
*attribute
=
4128 container_of(attr
, struct update_attribute
, attr
);
4129 struct update
*update
= container_of(kobj
, struct update
, kobj
);
4130 if (attribute
->show
== NULL
)
4132 return attribute
->show(update
, buf
);
4135 static ssize_t
update_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
4136 const char *buf
, size_t len
)
4138 struct update_attribute
*attribute
=
4139 container_of(attr
, struct update_attribute
, attr
);
4140 struct update
*update
= container_of(kobj
, struct update
, kobj
);
4141 if (attribute
->store
== NULL
)
4143 return attribute
->store(update
, buf
, len
);
4146 static struct sysfs_ops update_sysfs_ops
= {
4147 .show
= update_attr_show
,
4148 .store
= update_attr_store
,
4151 static void update_release(struct kobject
*kobj
)
4153 struct update
*update
;
4154 update
= container_of(kobj
, struct update
, kobj
);
4155 cleanup_ksplice_update(update
);
4158 static ssize_t
stage_show(struct update
*update
, char *buf
)
4160 switch (update
->stage
) {
4161 case STAGE_PREPARING
:
4162 return snprintf(buf
, PAGE_SIZE
, "preparing\n");
4164 return snprintf(buf
, PAGE_SIZE
, "applied\n");
4165 case STAGE_REVERSED
:
4166 return snprintf(buf
, PAGE_SIZE
, "reversed\n");
4171 static ssize_t
abort_cause_show(struct update
*update
, char *buf
)
4173 switch (update
->abort_cause
) {
4175 return snprintf(buf
, PAGE_SIZE
, "ok\n");
4177 return snprintf(buf
, PAGE_SIZE
, "no_match\n");
4178 #ifdef KSPLICE_STANDALONE
4179 case BAD_SYSTEM_MAP
:
4180 return snprintf(buf
, PAGE_SIZE
, "bad_system_map\n");
4181 #endif /* KSPLICE_STANDALONE */
4183 return snprintf(buf
, PAGE_SIZE
, "code_busy\n");
4185 return snprintf(buf
, PAGE_SIZE
, "module_busy\n");
4187 return snprintf(buf
, PAGE_SIZE
, "out_of_memory\n");
4188 case FAILED_TO_FIND
:
4189 return snprintf(buf
, PAGE_SIZE
, "failed_to_find\n");
4190 case ALREADY_REVERSED
:
4191 return snprintf(buf
, PAGE_SIZE
, "already_reversed\n");
4192 case MISSING_EXPORT
:
4193 return snprintf(buf
, PAGE_SIZE
, "missing_export\n");
4194 case UNEXPECTED_RUNNING_TASK
:
4195 return snprintf(buf
, PAGE_SIZE
, "unexpected_running_task\n");
4196 case TARGET_NOT_LOADED
:
4197 return snprintf(buf
, PAGE_SIZE
, "target_not_loaded\n");
4199 return snprintf(buf
, PAGE_SIZE
, "call_failed\n");
4200 case COLD_UPDATE_LOADED
:
4201 return snprintf(buf
, PAGE_SIZE
, "cold_update_loaded\n");
4203 return snprintf(buf
, PAGE_SIZE
, "unexpected\n");
4205 return snprintf(buf
, PAGE_SIZE
, "unknown\n");
4210 static ssize_t
conflict_show(struct update
*update
, char *buf
)
4212 const struct conflict
*conf
;
4213 const struct conflict_addr
*ca
;
4215 mutex_lock(&module_mutex
);
4216 list_for_each_entry(conf
, &update
->conflicts
, list
) {
4217 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "%s %d",
4218 conf
->process_name
, conf
->pid
);
4219 list_for_each_entry(ca
, &conf
->stack
, list
) {
4220 if (!ca
->has_conflict
)
4222 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, " %s",
4225 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "\n");
4227 mutex_unlock(&module_mutex
);
4231 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4232 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr
)
4234 struct update
*update
= updateptr
;
4235 mutex_lock(&module_mutex
);
4236 maybe_cleanup_ksplice_update(update
);
4237 mutex_unlock(&module_mutex
);
4241 static ssize_t
stage_store(struct update
*update
, const char *buf
, size_t len
)
4243 enum stage old_stage
;
4244 mutex_lock(&module_mutex
);
4245 old_stage
= update
->stage
;
4246 if ((strncmp(buf
, "applied", len
) == 0 ||
4247 strncmp(buf
, "applied\n", len
) == 0) &&
4248 update
->stage
== STAGE_PREPARING
)
4249 update
->abort_cause
= apply_update(update
);
4250 else if ((strncmp(buf
, "reversed", len
) == 0 ||
4251 strncmp(buf
, "reversed\n", len
) == 0) &&
4252 update
->stage
== STAGE_APPLIED
)
4253 update
->abort_cause
= reverse_update(update
);
4254 else if ((strncmp(buf
, "cleanup", len
) == 0 ||
4255 strncmp(buf
, "cleanup\n", len
) == 0) &&
4256 update
->stage
== STAGE_REVERSED
)
4257 kthread_run(maybe_cleanup_ksplice_update_wrapper
, update
,
4258 "ksplice_cleanup_%s", update
->kid
);
4260 mutex_unlock(&module_mutex
);
4264 static ssize_t
debug_show(struct update
*update
, char *buf
)
4266 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->debug
);
4269 static ssize_t
debug_store(struct update
*update
, const char *buf
, size_t len
)
4272 int ret
= strict_strtoul(buf
, 10, &l
);
4279 static ssize_t
partial_show(struct update
*update
, char *buf
)
4281 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->partial
);
4284 static ssize_t
partial_store(struct update
*update
, const char *buf
, size_t len
)
4287 int ret
= strict_strtoul(buf
, 10, &l
);
4290 update
->partial
= l
;
4294 static struct update_attribute stage_attribute
=
4295 __ATTR(stage
, 0600, stage_show
, stage_store
);
4296 static struct update_attribute abort_cause_attribute
=
4297 __ATTR(abort_cause
, 0400, abort_cause_show
, NULL
);
4298 static struct update_attribute debug_attribute
=
4299 __ATTR(debug
, 0600, debug_show
, debug_store
);
4300 static struct update_attribute partial_attribute
=
4301 __ATTR(partial
, 0600, partial_show
, partial_store
);
4302 static struct update_attribute conflict_attribute
=
4303 __ATTR(conflicts
, 0400, conflict_show
, NULL
);
4305 static struct attribute
*update_attrs
[] = {
4306 &stage_attribute
.attr
,
4307 &abort_cause_attribute
.attr
,
4308 &debug_attribute
.attr
,
4309 &partial_attribute
.attr
,
4310 &conflict_attribute
.attr
,
4314 static struct kobj_type update_ktype
= {
4315 .sysfs_ops
= &update_sysfs_ops
,
4316 .release
= update_release
,
4317 .default_attrs
= update_attrs
,
4320 #ifdef KSPLICE_STANDALONE
4322 module_param(debug
, int, 0600);
4323 MODULE_PARM_DESC(debug
, "Debug level");
4325 extern struct ksplice_system_map ksplice_system_map
[], ksplice_system_map_end
[];
4327 static struct ksplice_mod_change bootstrap_mod_change
= {
4328 .name
= "ksplice_" __stringify(KSPLICE_KID
),
4329 .kid
= "init_" __stringify(KSPLICE_KID
),
4330 .target_name
= NULL
,
4332 .map_printk
= MAP_PRINTK
,
4333 .new_code_mod
= THIS_MODULE
,
4334 .new_code
.system_map
= ksplice_system_map
,
4335 .new_code
.system_map_end
= ksplice_system_map_end
,
4337 #endif /* KSPLICE_STANDALONE */
4339 static int init_ksplice(void)
4341 #ifdef KSPLICE_STANDALONE
4342 struct ksplice_mod_change
*change
= &bootstrap_mod_change
;
4343 change
->update
= init_ksplice_update(change
->kid
);
4344 sort(change
->new_code
.system_map
,
4345 change
->new_code
.system_map_end
- change
->new_code
.system_map
,
4346 sizeof(struct ksplice_system_map
), compare_system_map
, NULL
);
4347 if (change
->update
== NULL
)
4349 add_to_update(change
, change
->update
);
4350 change
->update
->debug
= debug
;
4351 change
->update
->abort_cause
=
4352 apply_relocs(change
, ksplice_init_relocs
, ksplice_init_relocs_end
);
4353 if (change
->update
->abort_cause
== OK
)
4354 bootstrapped
= true;
4355 cleanup_ksplice_update(bootstrap_mod_change
.update
);
4356 #else /* !KSPLICE_STANDALONE */
4357 ksplice_kobj
= kobject_create_and_add("ksplice", kernel_kobj
);
4358 if (ksplice_kobj
== NULL
)
4360 #endif /* KSPLICE_STANDALONE */
4364 static void cleanup_ksplice(void)
4366 #ifndef KSPLICE_STANDALONE
4367 kobject_put(ksplice_kobj
);
4368 #endif /* KSPLICE_STANDALONE */
4371 module_init(init_ksplice
);
4372 module_exit(cleanup_ksplice
);
4374 MODULE_AUTHOR("Ksplice, Inc.");
4375 MODULE_DESCRIPTION("Ksplice rebootless update system");
4376 #ifdef KSPLICE_VERSION
4377 MODULE_VERSION(KSPLICE_VERSION
);
4379 MODULE_LICENSE("GPL v2");