1 /* Copyright (C) 2007-2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
2 * Copyright (C) 2008 Anders Kaseorg <andersk@mit.edu>,
3 * Tim Abbott <tabbott@mit.edu>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
22 #include <linux/bug.h>
23 #else /* LINUX_VERSION_CODE */
24 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
25 #endif /* LINUX_VERSION_CODE */
26 #include <linux/ctype.h>
27 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
28 #include <linux/debugfs.h>
29 #else /* CONFIG_DEBUG_FS */
30 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
31 #endif /* CONFIG_DEBUG_FS */
32 #include <linux/errno.h>
33 #include <linux/kallsyms.h>
34 #include <linux/kobject.h>
35 #include <linux/kthread.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched.h>
38 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
39 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
40 #include <linux/sort.h>
41 #endif /* LINUX_VERSION_CODE < */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
67 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
71 #endif /* LINUX_VERSION_CODE */
74 STAGE_PREPARING
, STAGE_APPLIED
, STAGE_REVERSED
78 RUN_PRE_INITIAL
, RUN_PRE_DEBUG
, RUN_PRE_FINAL
, RUN_PRE_SILENT
81 enum { NOVAL
, TEMP
, VAL
};
83 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
84 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
86 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
87 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
88 #define __bitwise__ __bitwise
91 typedef int __bitwise__ abort_t
;
93 #define OK ((__force abort_t) 0)
94 #define NO_MATCH ((__force abort_t) 1)
95 #define CODE_BUSY ((__force abort_t) 2)
96 #define MODULE_BUSY ((__force abort_t) 3)
97 #define OUT_OF_MEMORY ((__force abort_t) 4)
98 #define FAILED_TO_FIND ((__force abort_t) 5)
99 #define ALREADY_REVERSED ((__force abort_t) 6)
100 #define MISSING_EXPORT ((__force abort_t) 7)
101 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
102 #define UNEXPECTED ((__force abort_t) 9)
103 #define TARGET_NOT_LOADED ((__force abort_t) 10)
104 #ifdef KSPLICE_STANDALONE
105 #define BAD_SYSTEM_MAP ((__force abort_t) 11)
106 #endif /* KSPLICE_STANDALONE */
115 #ifdef CONFIG_DEBUG_FS
116 struct debugfs_blob_wrapper debug_blob
;
117 struct dentry
*debugfs_dentry
;
118 #else /* !CONFIG_DEBUG_FS */
119 bool debug_continue_line
;
120 #endif /* CONFIG_DEBUG_FS */
122 struct list_head packs
;
123 struct list_head unused_packs
;
124 struct list_head conflicts
;
125 struct list_head list
;
129 const char *process_name
;
131 struct list_head stack
;
132 struct list_head list
;
135 struct conflict_addr
{
139 struct list_head list
;
142 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
143 /* Old kernels don't have debugfs_create_blob */
144 struct debugfs_blob_wrapper
{
148 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
151 struct list_head list
;
152 struct ksplice_symbol
*symbol
;
153 struct list_head
*saved_vals
;
156 struct safety_record
{
157 struct list_head list
;
161 bool first_byte_safe
;
164 struct candidate_val
{
165 struct list_head list
;
169 struct accumulate_struct
{
170 struct ksplice_pack
*pack
;
171 const char *desired_name
;
172 struct list_head
*vals
;
175 struct ksplice_lookup
{
177 struct ksplice_pack
*pack
;
178 struct ksplice_symbol
**arr
;
184 #ifdef KSPLICE_NO_KERNEL_SUPPORT
186 const struct kernel_symbol
*start
, *stop
;
187 const unsigned long *crcs
;
195 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
197 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
198 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
200 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
201 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
202 static bool virtual_address_mapped(unsigned long addr
)
205 return probe_kernel_address(addr
, retval
) != -EFAULT
;
207 #else /* LINUX_VERSION_CODE < */
208 static bool virtual_address_mapped(unsigned long addr
);
209 #endif /* LINUX_VERSION_CODE */
211 static long probe_kernel_read(void *dst
, void *src
, size_t size
)
215 if (!virtual_address_mapped((unsigned long)src
) ||
216 !virtual_address_mapped((unsigned long)src
+ size
- 1))
219 memcpy(dst
, src
, size
);
222 #endif /* LINUX_VERSION_CODE */
224 static LIST_HEAD(updates
);
225 #ifdef KSPLICE_STANDALONE
226 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
227 extern struct list_head ksplice_module_list
;
228 #else /* !CONFIG_KSPLICE */
229 LIST_HEAD(ksplice_module_list
);
230 #endif /* CONFIG_KSPLICE */
231 #else /* !KSPLICE_STANDALONE */
232 LIST_HEAD(ksplice_module_list
);
233 EXPORT_SYMBOL_GPL(ksplice_module_list
);
234 static struct kobject
*ksplice_kobj
;
235 #endif /* KSPLICE_STANDALONE */
237 static struct kobj_type ksplice_ktype
;
239 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
240 /* Old kernels do not have kcalloc
241 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
243 static void *kcalloc(size_t n
, size_t size
, typeof(GFP_KERNEL
) flags
)
246 if (n
!= 0 && size
> ULONG_MAX
/ n
)
248 mem
= kmalloc(n
* size
, flags
);
250 memset(mem
, 0, n
* size
);
253 #endif /* LINUX_VERSION_CODE */
255 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
256 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
257 static void u32_swap(void *a
, void *b
, int size
)
260 *(u32
*)a
= *(u32
*)b
;
264 static void generic_swap(void *a
, void *b
, int size
)
270 *(char *)a
++ = *(char *)b
;
272 } while (--size
> 0);
276 * sort - sort an array of elements
277 * @base: pointer to data to sort
278 * @num: number of elements
279 * @size: size of each element
280 * @cmp: pointer to comparison function
281 * @swap: pointer to swap function or NULL
283 * This function does a heapsort on the given array. You may provide a
284 * swap function optimized to your element type.
286 * Sorting time is O(n log n) both on average and worst-case. While
287 * qsort is about 20% faster on average, it suffers from exploitable
288 * O(n*n) worst-case behavior and extra memory requirements that make
289 * it less suitable for kernel use.
292 void sort(void *base
, size_t num
, size_t size
,
293 int (*cmp
)(const void *, const void *),
294 void (*swap
)(void *, void *, int size
))
296 /* pre-scale counters for performance */
297 int i
= (num
/ 2 - 1) * size
, n
= num
* size
, c
, r
;
300 swap
= (size
== 4 ? u32_swap
: generic_swap
);
303 for (; i
>= 0; i
-= size
) {
304 for (r
= i
; r
* 2 + size
< n
; r
= c
) {
306 if (c
< n
- size
&& cmp(base
+ c
, base
+ c
+ size
) < 0)
308 if (cmp(base
+ r
, base
+ c
) >= 0)
310 swap(base
+ r
, base
+ c
, size
);
315 for (i
= n
- size
; i
> 0; i
-= size
) {
316 swap(base
, base
+ i
, size
);
317 for (r
= 0; r
* 2 + size
< i
; r
= c
) {
319 if (c
< i
- size
&& cmp(base
+ c
, base
+ c
+ size
) < 0)
321 if (cmp(base
+ r
, base
+ c
) >= 0)
323 swap(base
+ r
, base
+ c
, size
);
327 #endif /* LINUX_VERSION_CODE < */
329 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
330 /* Old kernels do not have kstrdup
331 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
333 static char *kstrdup(const char *s
, typeof(GFP_KERNEL
) gfp
)
342 buf
= kmalloc(len
, gfp
);
347 #endif /* LINUX_VERSION_CODE */
349 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
350 /* Old kernels use semaphore instead of mutex
351 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
353 #define mutex semaphore
354 #define mutex_lock down
355 #define mutex_unlock up
356 #endif /* LINUX_VERSION_CODE */
358 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
359 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
360 static char * __attribute_used__
361 kvasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, va_list ap
)
368 len
= vsnprintf(dummy
, 0, fmt
, aq
);
371 p
= kmalloc(len
+ 1, gfp
);
375 vsnprintf(p
, len
+ 1, fmt
, ap
);
381 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
382 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
383 static char * __attribute__((format (printf
, 2, 3)))
384 kasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, ...)
390 p
= kvasprintf(gfp
, fmt
, ap
);
397 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
398 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
399 static int strict_strtoul(const char *cp
, unsigned int base
, unsigned long *res
)
410 val
= simple_strtoul(cp
, &tail
, base
);
411 if ((*tail
== '\0') ||
412 ((len
== (size_t)(tail
- cp
) + 1) && (*tail
== '\n'))) {
421 #ifndef task_thread_info
422 #define task_thread_info(task) (task)->thread_info
423 #endif /* !task_thread_info */
425 #ifdef KSPLICE_STANDALONE
427 static bool bootstrapped
= false;
429 #ifdef CONFIG_KALLSYMS
430 extern unsigned long kallsyms_addresses
[], kallsyms_num_syms
;
431 extern u8 kallsyms_names
[];
432 #endif /* CONFIG_KALLSYMS */
434 /* defined by ksplice-create */
435 extern const struct ksplice_reloc ksplice_init_relocs
[],
436 ksplice_init_relocs_end
[];
438 /* Obtained via System.map */
439 extern struct list_head modules
;
440 extern struct mutex module_mutex
;
441 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
442 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
443 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
444 #endif /* LINUX_VERSION_CODE */
445 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
446 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
447 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
448 #endif /* LINUX_VERSION_CODE */
449 extern const struct kernel_symbol __start___ksymtab
[];
450 extern const struct kernel_symbol __stop___ksymtab
[];
451 extern const unsigned long __start___kcrctab
[];
452 extern const struct kernel_symbol __start___ksymtab_gpl
[];
453 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
454 extern const unsigned long __start___kcrctab_gpl
[];
455 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
456 extern const struct kernel_symbol __start___ksymtab_unused
[];
457 extern const struct kernel_symbol __stop___ksymtab_unused
[];
458 extern const unsigned long __start___kcrctab_unused
[];
459 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
460 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
461 extern const unsigned long __start___kcrctab_unused_gpl
[];
462 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
463 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
464 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
465 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
466 extern const unsigned long __start___kcrctab_gpl_future
[];
467 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
469 #endif /* KSPLICE_STANDALONE */
471 static struct update
*init_ksplice_update(const char *kid
);
472 static void cleanup_ksplice_update(struct update
*update
);
473 static void maybe_cleanup_ksplice_update(struct update
*update
);
474 static void add_to_update(struct ksplice_pack
*pack
, struct update
*update
);
475 static int ksplice_sysfs_init(struct update
*update
);
477 /* Preparing the relocations and patches for application */
478 static abort_t
apply_update(struct update
*update
);
479 static abort_t
prepare_pack(struct ksplice_pack
*pack
);
480 static abort_t
finalize_pack(struct ksplice_pack
*pack
);
481 static abort_t
finalize_exports(struct ksplice_pack
*pack
);
482 static abort_t
finalize_patches(struct ksplice_pack
*pack
);
483 static abort_t
add_dependency_on_address(struct ksplice_pack
*pack
,
485 static abort_t
map_trampoline_pages(struct update
*update
);
486 static void unmap_trampoline_pages(struct update
*update
);
487 static void *map_writable(void *addr
, size_t len
);
488 static abort_t
apply_relocs(struct ksplice_pack
*pack
,
489 const struct ksplice_reloc
*relocs
,
490 const struct ksplice_reloc
*relocs_end
);
491 static abort_t
apply_reloc(struct ksplice_pack
*pack
,
492 const struct ksplice_reloc
*r
);
493 static abort_t
apply_howto_reloc(struct ksplice_pack
*pack
,
494 const struct ksplice_reloc
*r
);
495 static abort_t
apply_howto_date(struct ksplice_pack
*pack
,
496 const struct ksplice_reloc
*r
);
497 static abort_t
read_reloc_value(struct ksplice_pack
*pack
,
498 const struct ksplice_reloc
*r
,
499 unsigned long addr
, unsigned long *valp
);
500 static abort_t
write_reloc_value(struct ksplice_pack
*pack
,
501 const struct ksplice_reloc
*r
,
502 unsigned long addr
, unsigned long sym_addr
);
503 static void __attribute__((noreturn
)) ksplice_deleted(void);
505 /* run-pre matching */
506 static abort_t
match_pack_sections(struct ksplice_pack
*pack
,
507 bool consider_data_sections
);
508 static abort_t
find_section(struct ksplice_pack
*pack
,
509 struct ksplice_section
*sect
);
510 static abort_t
try_addr(struct ksplice_pack
*pack
,
511 struct ksplice_section
*sect
,
512 unsigned long run_addr
,
513 struct list_head
*safety_records
,
514 enum run_pre_mode mode
);
515 static abort_t
run_pre_cmp(struct ksplice_pack
*pack
,
516 const struct ksplice_section
*sect
,
517 unsigned long run_addr
,
518 struct list_head
*safety_records
,
519 enum run_pre_mode mode
);
520 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
521 /* defined in arch/ARCH/kernel/ksplice-arch.c */
522 static abort_t
arch_run_pre_cmp(struct ksplice_pack
*pack
,
523 struct ksplice_section
*sect
,
524 unsigned long run_addr
,
525 struct list_head
*safety_records
,
526 enum run_pre_mode mode
);
527 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
528 static void print_bytes(struct ksplice_pack
*pack
,
529 const unsigned char *run
, int runc
,
530 const unsigned char *pre
, int prec
);
531 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
532 static abort_t
brute_search(struct ksplice_pack
*pack
,
533 struct ksplice_section
*sect
,
534 const void *start
, unsigned long len
,
535 struct list_head
*vals
);
536 static abort_t
brute_search_all(struct ksplice_pack
*pack
,
537 struct ksplice_section
*sect
,
538 struct list_head
*vals
);
539 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
540 static const struct ksplice_reloc
*
541 init_reloc_search(struct ksplice_pack
*pack
,
542 const struct ksplice_section
*sect
);
543 static const struct ksplice_reloc
*find_reloc(const struct ksplice_reloc
*start
,
544 const struct ksplice_reloc
*end
,
545 unsigned long address
,
547 static abort_t
lookup_reloc(struct ksplice_pack
*pack
,
548 const struct ksplice_reloc
**fingerp
,
550 const struct ksplice_reloc
**relocp
);
551 static abort_t
handle_reloc(struct ksplice_pack
*pack
,
552 const struct ksplice_section
*sect
,
553 const struct ksplice_reloc
*r
,
554 unsigned long run_addr
, enum run_pre_mode mode
);
555 static abort_t
handle_howto_date(struct ksplice_pack
*pack
,
556 const struct ksplice_section
*sect
,
557 const struct ksplice_reloc
*r
,
558 unsigned long run_addr
,
559 enum run_pre_mode mode
);
560 static abort_t
handle_howto_reloc(struct ksplice_pack
*pack
,
561 const struct ksplice_section
*sect
,
562 const struct ksplice_reloc
*r
,
563 unsigned long run_addr
,
564 enum run_pre_mode mode
);
565 static struct ksplice_section
*symbol_section(struct ksplice_pack
*pack
,
566 const struct ksplice_symbol
*sym
);
567 static int compare_section_labels(const void *va
, const void *vb
);
568 static int symbol_section_bsearch_compare(const void *a
, const void *b
);
569 static const struct ksplice_reloc
*patch_reloc(struct ksplice_pack
*pack
,
570 const struct ksplice_patch
*p
);
572 /* Computing possible addresses for symbols */
573 static abort_t
lookup_symbol(struct ksplice_pack
*pack
,
574 const struct ksplice_symbol
*ksym
,
575 struct list_head
*vals
);
576 static void cleanup_symbol_arrays(struct ksplice_pack
*pack
);
577 static abort_t
init_symbol_arrays(struct ksplice_pack
*pack
);
578 static abort_t
init_symbol_array(struct ksplice_pack
*pack
,
579 struct ksplice_symbol
*start
,
580 struct ksplice_symbol
*end
);
581 static abort_t
uniquify_symbols(struct ksplice_pack
*pack
);
582 static abort_t
add_matching_values(struct ksplice_lookup
*lookup
,
583 const char *sym_name
, unsigned long sym_val
);
584 static bool add_export_values(const struct symsearch
*syms
,
585 struct module
*owner
,
586 unsigned int symnum
, void *data
);
587 static int symbolp_bsearch_compare(const void *key
, const void *elt
);
588 static int compare_symbolp_names(const void *a
, const void *b
);
589 static int compare_symbolp_labels(const void *a
, const void *b
);
590 #ifdef CONFIG_KALLSYMS
591 static int add_kallsyms_values(void *data
, const char *name
,
592 struct module
*owner
, unsigned long val
);
593 #endif /* CONFIG_KALLSYMS */
594 #ifdef KSPLICE_STANDALONE
596 add_system_map_candidates(struct ksplice_pack
*pack
,
597 const struct ksplice_system_map
*start
,
598 const struct ksplice_system_map
*end
,
599 const char *label
, struct list_head
*vals
);
600 static int compare_system_map(const void *a
, const void *b
);
601 static int system_map_bsearch_compare(const void *key
, const void *elt
);
602 #endif /* KSPLICE_STANDALONE */
603 static abort_t
new_export_lookup(struct ksplice_pack
*p
, struct update
*update
,
604 const char *name
, struct list_head
*vals
);
606 /* Atomic update insertion and removal */
607 static abort_t
apply_patches(struct update
*update
);
608 static abort_t
reverse_patches(struct update
*update
);
609 static int __apply_patches(void *update
);
610 static int __reverse_patches(void *update
);
611 static abort_t
check_each_task(struct update
*update
);
612 static abort_t
check_task(struct update
*update
,
613 const struct task_struct
*t
, bool rerun
);
614 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
615 const struct thread_info
*tinfo
,
616 const unsigned long *stack
);
617 static abort_t
check_address(struct update
*update
,
618 struct conflict
*conf
, unsigned long addr
);
619 static abort_t
check_record(struct conflict_addr
*ca
,
620 const struct safety_record
*rec
,
622 static bool is_stop_machine(const struct task_struct
*t
);
623 static void cleanup_conflicts(struct update
*update
);
624 static void print_conflicts(struct update
*update
);
625 static void insert_trampoline(struct ksplice_patch
*p
);
626 static abort_t
verify_trampoline(struct ksplice_pack
*pack
,
627 const struct ksplice_patch
*p
);
628 static void remove_trampoline(const struct ksplice_patch
*p
);
630 static abort_t
create_labelval(struct ksplice_pack
*pack
,
631 struct ksplice_symbol
*ksym
,
632 unsigned long val
, int status
);
633 static abort_t
create_safety_record(struct ksplice_pack
*pack
,
634 const struct ksplice_section
*sect
,
635 struct list_head
*record_list
,
636 unsigned long run_addr
,
637 unsigned long run_size
);
638 static abort_t
add_candidate_val(struct ksplice_pack
*pack
,
639 struct list_head
*vals
, unsigned long val
);
640 static void release_vals(struct list_head
*vals
);
641 static void set_temp_labelvals(struct ksplice_pack
*pack
, int status_val
);
643 static int contains_canary(struct ksplice_pack
*pack
, unsigned long blank_addr
,
644 const struct ksplice_reloc_howto
*howto
);
645 static unsigned long follow_trampolines(struct ksplice_pack
*pack
,
647 static bool patches_module(const struct module
*a
, const struct module
*b
);
648 static bool starts_with(const char *str
, const char *prefix
);
649 static bool singular(struct list_head
*list
);
650 static void *bsearch(const void *key
, const void *base
, size_t n
,
651 size_t size
, int (*cmp
)(const void *key
, const void *elt
));
652 static int compare_relocs(const void *a
, const void *b
);
653 static int reloc_bsearch_compare(const void *key
, const void *elt
);
656 static abort_t
init_debug_buf(struct update
*update
);
657 static void clear_debug_buf(struct update
*update
);
658 static int __attribute__((format(printf
, 2, 3)))
659 _ksdebug(struct update
*update
, const char *fmt
, ...);
660 #define ksdebug(pack, fmt, ...) \
661 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
663 #ifdef KSPLICE_NO_KERNEL_SUPPORT
664 /* Functions defined here that will be exported in later kernels */
665 #ifdef CONFIG_KALLSYMS
666 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
667 struct module
*, unsigned long),
669 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
670 static unsigned int kallsyms_expand_symbol(unsigned int off
, char *result
);
671 #endif /* LINUX_VERSION_CODE */
672 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
676 #endif /* CONFIG_KALLSYMS */
677 static struct module
*find_module(const char *name
);
678 static int use_module(struct module
*a
, struct module
*b
);
679 static const struct kernel_symbol
*find_symbol(const char *name
,
680 struct module
**owner
,
681 const unsigned long **crc
,
682 bool gplok
, bool warn
);
683 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
684 struct module
*owner
,
685 unsigned int symnum
, void *data
),
687 static struct module
*__module_data_address(unsigned long addr
);
688 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
690 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
691 static abort_t
prepare_trampoline(struct ksplice_pack
*pack
,
692 struct ksplice_patch
*p
);
693 static abort_t
trampoline_target(struct ksplice_pack
*pack
, unsigned long addr
,
694 unsigned long *new_addr
);
695 static abort_t
handle_paravirt(struct ksplice_pack
*pack
, unsigned long pre
,
696 unsigned long run
, int *matched
);
697 static abort_t
handle_bug(struct ksplice_pack
*pack
,
698 const struct ksplice_reloc
*r
,
699 unsigned long run_addr
);
700 static abort_t
handle_extable(struct ksplice_pack
*pack
,
701 const struct ksplice_reloc
*r
,
702 unsigned long run_addr
);
703 static bool valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
);
705 #ifndef KSPLICE_STANDALONE
706 #include "ksplice-arch.c"
707 #elif defined CONFIG_X86
708 #include "x86/ksplice-arch.c"
709 #elif defined CONFIG_ARM
710 #include "arm/ksplice-arch.c"
711 #endif /* KSPLICE_STANDALONE */
713 #define clear_list(head, type, member) \
715 struct list_head *_pos, *_n; \
716 list_for_each_safe(_pos, _n, head) { \
718 kfree(list_entry(_pos, type, member)); \
722 int init_ksplice_pack(struct ksplice_pack
*pack
)
724 struct update
*update
;
725 struct ksplice_patch
*p
;
726 struct ksplice_section
*s
;
729 #ifdef KSPLICE_STANDALONE
732 #endif /* KSPLICE_STANDALONE */
734 INIT_LIST_HEAD(&pack
->temp_labelvals
);
735 INIT_LIST_HEAD(&pack
->safety_records
);
737 sort(pack
->helper_relocs
,
738 (pack
->helper_relocs_end
- pack
->helper_relocs
),
739 sizeof(*pack
->helper_relocs
), compare_relocs
, NULL
);
740 sort(pack
->primary_relocs
,
741 (pack
->primary_relocs_end
- pack
->primary_relocs
),
742 sizeof(*pack
->primary_relocs
), compare_relocs
, NULL
);
743 sort(pack
->helper_sections
,
744 (pack
->helper_sections_end
- pack
->helper_sections
),
745 sizeof(*pack
->helper_sections
), compare_section_labels
, NULL
);
746 #ifdef KSPLICE_STANDALONE
747 sort(pack
->primary_system_map
,
748 (pack
->primary_system_map_end
- pack
->primary_system_map
),
749 sizeof(*pack
->primary_system_map
), compare_system_map
, NULL
);
750 sort(pack
->helper_system_map
,
751 (pack
->helper_system_map_end
- pack
->helper_system_map
),
752 sizeof(*pack
->helper_system_map
), compare_system_map
, NULL
);
753 #endif /* KSPLICE_STANDALONE */
755 mutex_lock(&module_mutex
);
756 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
758 for (s
= pack
->helper_sections
; s
< pack
->helper_sections_end
; s
++)
761 list_for_each_entry(update
, &updates
, list
) {
762 if (strcmp(pack
->kid
, update
->kid
) == 0) {
763 if (update
->stage
!= STAGE_PREPARING
) {
767 add_to_update(pack
, update
);
772 update
= init_ksplice_update(pack
->kid
);
773 if (update
== NULL
) {
777 ret
= ksplice_sysfs_init(update
);
779 cleanup_ksplice_update(update
);
782 add_to_update(pack
, update
);
784 mutex_unlock(&module_mutex
);
787 EXPORT_SYMBOL_GPL(init_ksplice_pack
);
789 void cleanup_ksplice_pack(struct ksplice_pack
*pack
)
791 if (pack
->update
== NULL
)
793 if (pack
->update
->stage
== STAGE_APPLIED
) {
794 /* If the pack wasn't actually applied (because we
795 only applied this update to loaded modules and this
796 target wasn't loaded), then unregister the pack
797 from the list of unused packs */
798 struct ksplice_pack
*p
;
801 mutex_lock(&module_mutex
);
802 list_for_each_entry(p
, &pack
->update
->unused_packs
, list
) {
807 list_del(&pack
->list
);
808 mutex_unlock(&module_mutex
);
811 mutex_lock(&module_mutex
);
812 list_del(&pack
->list
);
813 mutex_unlock(&module_mutex
);
814 if (pack
->update
->stage
== STAGE_PREPARING
)
815 maybe_cleanup_ksplice_update(pack
->update
);
818 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack
);
820 static struct update
*init_ksplice_update(const char *kid
)
822 struct update
*update
;
823 update
= kcalloc(1, sizeof(struct update
), GFP_KERNEL
);
826 update
->name
= kasprintf(GFP_KERNEL
, "ksplice_%s", kid
);
827 if (update
->name
== NULL
) {
831 update
->kid
= kstrdup(kid
, GFP_KERNEL
);
832 if (update
->kid
== NULL
) {
837 if (try_module_get(THIS_MODULE
) != 1) {
843 INIT_LIST_HEAD(&update
->packs
);
844 INIT_LIST_HEAD(&update
->unused_packs
);
845 if (init_debug_buf(update
) != OK
) {
846 module_put(THIS_MODULE
);
852 list_add(&update
->list
, &updates
);
853 update
->stage
= STAGE_PREPARING
;
854 update
->abort_cause
= OK
;
856 INIT_LIST_HEAD(&update
->conflicts
);
860 static void cleanup_ksplice_update(struct update
*update
)
862 #ifdef KSPLICE_STANDALONE
864 mutex_lock(&module_mutex
);
865 list_del(&update
->list
);
867 mutex_unlock(&module_mutex
);
868 #else /* !KSPLICE_STANDALONE */
869 mutex_lock(&module_mutex
);
870 list_del(&update
->list
);
871 mutex_unlock(&module_mutex
);
872 #endif /* KSPLICE_STANDALONE */
873 cleanup_conflicts(update
);
874 clear_debug_buf(update
);
878 module_put(THIS_MODULE
);
881 static void maybe_cleanup_ksplice_update(struct update
*update
)
883 if (list_empty(&update
->packs
) && list_empty(&update
->unused_packs
))
884 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
885 kobject_put(&update
->kobj
);
886 #else /* LINUX_VERSION_CODE < */
887 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
888 kobject_unregister(&update
->kobj
);
889 #endif /* LINUX_VERSION_CODE */
892 static void add_to_update(struct ksplice_pack
*pack
, struct update
*update
)
894 pack
->update
= update
;
895 list_add(&pack
->list
, &update
->unused_packs
);
896 pack
->module_list_entry
.primary
= pack
->primary
;
899 static int ksplice_sysfs_init(struct update
*update
)
902 memset(&update
->kobj
, 0, sizeof(update
->kobj
));
903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
904 #ifndef KSPLICE_STANDALONE
905 ret
= kobject_init_and_add(&update
->kobj
, &ksplice_ktype
,
906 ksplice_kobj
, "%s", update
->kid
);
907 #else /* KSPLICE_STANDALONE */
908 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
909 ret
= kobject_init_and_add(&update
->kobj
, &ksplice_ktype
,
910 &THIS_MODULE
->mkobj
.kobj
, "ksplice");
911 #endif /* KSPLICE_STANDALONE */
912 #else /* LINUX_VERSION_CODE < */
913 ret
= kobject_set_name(&update
->kobj
, "%s", "ksplice");
916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
917 update
->kobj
.parent
= &THIS_MODULE
->mkobj
.kobj
;
918 #else /* LINUX_VERSION_CODE < */
919 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
920 update
->kobj
.parent
= &THIS_MODULE
->mkobj
->kobj
;
921 #endif /* LINUX_VERSION_CODE */
922 update
->kobj
.ktype
= &ksplice_ktype
;
923 ret
= kobject_register(&update
->kobj
);
924 #endif /* LINUX_VERSION_CODE */
927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
928 kobject_uevent(&update
->kobj
, KOBJ_ADD
);
929 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
930 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
931 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
932 kobject_uevent(&update
->kobj
, KOBJ_ADD
, NULL
);
933 #endif /* LINUX_VERSION_CODE */
937 static abort_t
apply_update(struct update
*update
)
939 struct ksplice_pack
*pack
, *n
;
943 mutex_lock(&module_mutex
);
944 list_for_each_entry_safe(pack
, n
, &update
->unused_packs
, list
) {
945 if (strcmp(pack
->target_name
, "vmlinux") == 0) {
947 } else if (pack
->target
== NULL
) {
948 pack
->target
= find_module(pack
->target_name
);
949 if (pack
->target
== NULL
||
950 !module_is_live(pack
->target
)) {
951 if (update
->partial
) {
954 ret
= TARGET_NOT_LOADED
;
958 retval
= use_module(pack
->primary
, pack
->target
);
964 list_del(&pack
->list
);
965 list_add_tail(&pack
->list
, &update
->packs
);
966 pack
->module_list_entry
.target
= pack
->target
;
968 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
969 if (pack
->target
== NULL
) {
970 apply_paravirt(pack
->primary_parainstructions
,
971 pack
->primary_parainstructions_end
);
972 apply_paravirt(pack
->helper_parainstructions
,
973 pack
->helper_parainstructions_end
);
975 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
978 list_for_each_entry(pack
, &update
->packs
, list
) {
979 ret
= init_symbol_arrays(pack
);
981 cleanup_symbol_arrays(pack
);
984 ret
= prepare_pack(pack
);
985 cleanup_symbol_arrays(pack
);
989 ret
= apply_patches(update
);
991 list_for_each_entry(pack
, &update
->packs
, list
) {
992 struct ksplice_section
*s
;
993 if (update
->stage
== STAGE_PREPARING
)
994 clear_list(&pack
->safety_records
, struct safety_record
,
996 for (s
= pack
->helper_sections
; s
< pack
->helper_sections_end
;
998 if (s
->match_map
!= NULL
) {
1000 s
->match_map
= NULL
;
1004 mutex_unlock(&module_mutex
);
1008 static int compare_symbolp_names(const void *a
, const void *b
)
1010 const struct ksplice_symbol
*const *sympa
= a
, *const *sympb
= b
;
1011 if ((*sympa
)->name
== NULL
&& (*sympb
)->name
== NULL
)
1013 if ((*sympa
)->name
== NULL
)
1015 if ((*sympb
)->name
== NULL
)
1017 return strcmp((*sympa
)->name
, (*sympb
)->name
);
1020 static int compare_symbolp_labels(const void *a
, const void *b
)
1022 const struct ksplice_symbol
*const *sympa
= a
, *const *sympb
= b
;
1023 return strcmp((*sympa
)->label
, (*sympb
)->label
);
1026 static int symbolp_bsearch_compare(const void *key
, const void *elt
)
1028 const char *name
= key
;
1029 const struct ksplice_symbol
*const *symp
= elt
;
1030 const struct ksplice_symbol
*sym
= *symp
;
1031 if (sym
->name
== NULL
)
1033 return strcmp(name
, sym
->name
);
1036 static abort_t
add_matching_values(struct ksplice_lookup
*lookup
,
1037 const char *sym_name
, unsigned long sym_val
)
1039 struct ksplice_symbol
**symp
;
1042 symp
= bsearch(sym_name
, lookup
->arr
, lookup
->size
,
1043 sizeof(*lookup
->arr
), symbolp_bsearch_compare
);
1047 while (symp
> lookup
->arr
&&
1048 symbolp_bsearch_compare(sym_name
, symp
- 1) == 0)
1051 for (; symp
< lookup
->arr
+ lookup
->size
; symp
++) {
1052 struct ksplice_symbol
*sym
= *symp
;
1053 if (sym
->name
== NULL
|| strcmp(sym_name
, sym
->name
) != 0)
1055 ret
= add_candidate_val(lookup
->pack
, sym
->vals
, sym_val
);
1062 #ifdef CONFIG_KALLSYMS
1063 static int add_kallsyms_values(void *data
, const char *name
,
1064 struct module
*owner
, unsigned long val
)
1066 struct ksplice_lookup
*lookup
= data
;
1067 if (owner
== lookup
->pack
->primary
||
1068 !patches_module(owner
, lookup
->pack
->target
))
1069 return (__force
int)OK
;
1070 return (__force
int)add_matching_values(lookup
, name
, val
);
1072 #endif /* CONFIG_KALLSYMS */
1074 static bool add_export_values(const struct symsearch
*syms
,
1075 struct module
*owner
,
1076 unsigned int symnum
, void *data
)
1078 struct ksplice_lookup
*lookup
= data
;
1081 ret
= add_matching_values(lookup
, syms
->start
[symnum
].name
,
1082 syms
->start
[symnum
].value
);
1090 static void cleanup_symbol_arrays(struct ksplice_pack
*pack
)
1092 struct ksplice_symbol
*sym
;
1093 for (sym
= pack
->primary_symbols
; sym
< pack
->primary_symbols_end
;
1095 if (sym
->vals
!= NULL
) {
1096 clear_list(sym
->vals
, struct candidate_val
, list
);
1101 for (sym
= pack
->helper_symbols
; sym
< pack
->helper_symbols_end
; sym
++) {
1102 if (sym
->vals
!= NULL
) {
1103 clear_list(sym
->vals
, struct candidate_val
, list
);
1110 static abort_t
uniquify_symbols(struct ksplice_pack
*pack
)
1112 struct ksplice_reloc
*r
;
1113 struct ksplice_section
*s
;
1114 struct ksplice_symbol
*sym
, **sym_arr
, **symp
;
1115 size_t size
= pack
->primary_symbols_end
- pack
->primary_symbols
;
1120 sym_arr
= vmalloc(sizeof(*sym_arr
) * size
);
1121 if (sym_arr
== NULL
)
1122 return OUT_OF_MEMORY
;
1124 for (symp
= sym_arr
, sym
= pack
->primary_symbols
;
1125 symp
< sym_arr
+ size
&& sym
< pack
->primary_symbols_end
;
1129 sort(sym_arr
, size
, sizeof(*sym_arr
), compare_symbolp_labels
, NULL
);
1131 for (r
= pack
->helper_relocs
; r
< pack
->helper_relocs_end
; r
++) {
1132 symp
= bsearch(&r
->symbol
, sym_arr
, size
, sizeof(*sym_arr
),
1133 compare_symbolp_labels
);
1135 if ((*symp
)->name
== NULL
)
1136 (*symp
)->name
= r
->symbol
->name
;
1141 for (s
= pack
->helper_sections
; s
< pack
->helper_sections_end
; s
++) {
1142 symp
= bsearch(&s
->symbol
, sym_arr
, size
, sizeof(*sym_arr
),
1143 compare_symbolp_labels
);
1145 if ((*symp
)->name
== NULL
)
1146 (*symp
)->name
= s
->symbol
->name
;
1155 static abort_t
init_symbol_array(struct ksplice_pack
*pack
,
1156 struct ksplice_symbol
*start
,
1157 struct ksplice_symbol
*end
)
1159 struct ksplice_symbol
*sym
, **sym_arr
, **symp
;
1160 struct ksplice_lookup lookup
;
1161 size_t size
= end
- start
;
1167 for (sym
= start
; sym
< end
; sym
++) {
1168 sym
->vals
= kmalloc(sizeof(*sym
->vals
), GFP_KERNEL
);
1169 if (sym
->vals
== NULL
)
1170 return OUT_OF_MEMORY
;
1171 INIT_LIST_HEAD(sym
->vals
);
1175 sym_arr
= vmalloc(sizeof(*sym_arr
) * size
);
1176 if (sym_arr
== NULL
)
1177 return OUT_OF_MEMORY
;
1179 for (symp
= sym_arr
, sym
= start
; symp
< sym_arr
+ size
&& sym
< end
;
1183 sort(sym_arr
, size
, sizeof(*sym_arr
), compare_symbolp_names
, NULL
);
1186 lookup
.arr
= sym_arr
;
1190 each_symbol(add_export_values
, &lookup
);
1192 #ifdef CONFIG_KALLSYMS
1194 ret
= (__force abort_t
)
1195 kallsyms_on_each_symbol(add_kallsyms_values
, &lookup
);
1196 #endif /* CONFIG_KALLSYMS */
1201 static abort_t
init_symbol_arrays(struct ksplice_pack
*pack
)
1205 ret
= uniquify_symbols(pack
);
1209 ret
= init_symbol_array(pack
, pack
->helper_symbols
,
1210 pack
->helper_symbols_end
);
1214 ret
= init_symbol_array(pack
, pack
->primary_symbols
,
1215 pack
->primary_symbols_end
);
1222 static abort_t
prepare_pack(struct ksplice_pack
*pack
)
1226 ksdebug(pack
, "Preparing and checking %s\n", pack
->name
);
1227 ret
= match_pack_sections(pack
, false);
1228 if (ret
== NO_MATCH
) {
1229 /* It is possible that by using relocations from .data sections
1230 we can successfully run-pre match the rest of the sections.
1231 To avoid using any symbols obtained from .data sections
1232 (which may be unreliable) in the post code, we first prepare
1233 the post code and then try to run-pre match the remaining
1234 sections with the help of .data sections.
1236 ksdebug(pack
, "Continuing without some sections; we might "
1237 "find them later.\n");
1238 ret
= finalize_pack(pack
);
1240 ksdebug(pack
, "Aborted. Unable to continue without "
1241 "the unmatched sections.\n");
1245 ksdebug(pack
, "run-pre: Considering .data sections to find the "
1246 "unmatched sections\n");
1247 ret
= match_pack_sections(pack
, true);
1251 ksdebug(pack
, "run-pre: Found all previously unmatched "
1254 } else if (ret
!= OK
) {
1258 return finalize_pack(pack
);
1261 static abort_t
finalize_pack(struct ksplice_pack
*pack
)
1264 ret
= apply_relocs(pack
, pack
->primary_relocs
,
1265 pack
->primary_relocs_end
);
1269 ret
= finalize_patches(pack
);
1273 ret
= finalize_exports(pack
);
1280 static abort_t
finalize_exports(struct ksplice_pack
*pack
)
1282 struct ksplice_export
*exp
;
1284 const struct kernel_symbol
*sym
;
1286 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++) {
1287 sym
= find_symbol(exp
->name
, &m
, NULL
, true, false);
1289 ksdebug(pack
, "Could not find kernel_symbol struct for "
1291 return MISSING_EXPORT
;
1294 /* Cast away const since we are planning to mutate the
1295 * kernel_symbol structure. */
1296 exp
->sym
= (struct kernel_symbol
*)sym
;
1297 exp
->saved_name
= exp
->sym
->name
;
1298 if (m
!= pack
->primary
&& use_module(pack
->primary
, m
) != 1) {
1299 ksdebug(pack
, "Aborted. Could not add dependency on "
1300 "symbol %s from module %s.\n", sym
->name
,
1308 static abort_t
finalize_patches(struct ksplice_pack
*pack
)
1310 struct ksplice_patch
*p
;
1311 struct safety_record
*rec
;
1314 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1316 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
1317 if (rec
->addr
<= p
->oldaddr
&&
1318 p
->oldaddr
< rec
->addr
+ rec
->size
) {
1324 const struct ksplice_reloc
*r
= patch_reloc(pack
, p
);
1326 ksdebug(pack
, "A patch with no ksplice_reloc at"
1327 " its oldaddr has no safety record\n");
1330 ksdebug(pack
, "No safety record for patch with oldaddr "
1331 "%s+%lx\n", r
->symbol
->label
, r
->target_addend
);
1335 if (p
->type
== KSPLICE_PATCH_TEXT
) {
1336 ret
= prepare_trampoline(pack
, p
);
1341 if (rec
->addr
+ rec
->size
< p
->oldaddr
+ p
->size
) {
1342 ksdebug(pack
, "Safety record %s is too short for "
1343 "patch\n", rec
->label
);
1347 if (p
->type
== KSPLICE_PATCH_TEXT
) {
1348 if (p
->repladdr
== 0)
1349 p
->repladdr
= (unsigned long)ksplice_deleted
;
1351 rec
->first_byte_safe
= true;
1354 ret
= add_dependency_on_address(pack
, p
->oldaddr
);
1361 static abort_t
map_trampoline_pages(struct update
*update
)
1363 struct ksplice_pack
*pack
;
1364 list_for_each_entry(pack
, &update
->packs
, list
) {
1365 struct ksplice_patch
*p
;
1366 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1367 p
->vaddr
= map_writable((void *)p
->oldaddr
, p
->size
);
1368 if (p
->vaddr
== NULL
) {
1369 ksdebug(pack
, "Unable to map oldaddr read/write"
1371 unmap_trampoline_pages(update
);
1379 static void unmap_trampoline_pages(struct update
*update
)
1381 struct ksplice_pack
*pack
;
1382 list_for_each_entry(pack
, &update
->packs
, list
) {
1383 struct ksplice_patch
*p
;
1384 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1385 vunmap((void *)((unsigned long)p
->vaddr
& PAGE_MASK
));
1391 /* Based off of linux's text_poke. */
1392 static void *map_writable(void *addr
, size_t len
)
1396 unsigned long laddr
= (unsigned long)addr
;
1397 struct page
*pages
[2];
1399 if ((laddr
>= init_mm
.start_code
&& laddr
< init_mm
.end_code
) ||
1400 (laddr
>= init_mm
.start_data
&& laddr
< init_mm
.end_data
)) {
1401 #if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
1402 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1403 pages
[0] = pfn_to_page(__pa_symbol(addr
) >> PAGE_SHIFT
);
1404 WARN_ON(!PageReserved(pages
[0]));
1405 pages
[1] = pfn_to_page(__pa_symbol(addr
+ PAGE_SIZE
) >>
1407 #else /* !CONFIG_X86_64 || LINUX_VERSION_CODE >= */
1408 pages
[0] = virt_to_page(addr
);
1409 WARN_ON(!PageReserved(pages
[0]));
1410 pages
[1] = virt_to_page(addr
+ PAGE_SIZE
);
1411 #endif /* CONFIG_X86_64 && LINUX_VERSION_CODE */
1413 pages
[0] = vmalloc_to_page(addr
);
1414 pages
[1] = vmalloc_to_page(addr
+ PAGE_SIZE
);
1420 vaddr
= vmap(pages
, nr_pages
, VM_MAP
, PAGE_KERNEL
);
1423 return vaddr
+ offset_in_page(addr
);
1426 static abort_t
add_dependency_on_address(struct ksplice_pack
*pack
,
1429 struct ksplice_pack
*p
;
1431 __module_text_address(follow_trampolines(pack
, addr
));
1434 list_for_each_entry(p
, &pack
->update
->packs
, list
) {
1435 if (m
== p
->primary
)
1438 if (use_module(pack
->primary
, m
) != 1)
1443 static abort_t
apply_relocs(struct ksplice_pack
*pack
,
1444 const struct ksplice_reloc
*relocs
,
1445 const struct ksplice_reloc
*relocs_end
)
1447 const struct ksplice_reloc
*r
;
1448 for (r
= relocs
; r
< relocs_end
; r
++) {
1449 abort_t ret
= apply_reloc(pack
, r
);
1456 static abort_t
apply_reloc(struct ksplice_pack
*pack
,
1457 const struct ksplice_reloc
*r
)
1459 switch (r
->howto
->type
) {
1460 case KSPLICE_HOWTO_RELOC
:
1461 case KSPLICE_HOWTO_RELOC_PATCH
:
1462 return apply_howto_reloc(pack
, r
);
1463 case KSPLICE_HOWTO_DATE
:
1464 case KSPLICE_HOWTO_TIME
:
1465 return apply_howto_date(pack
, r
);
1467 ksdebug(pack
, "Unexpected howto type %d\n", r
->howto
->type
);
1472 static abort_t
apply_howto_reloc(struct ksplice_pack
*pack
,
1473 const struct ksplice_reloc
*r
)
1477 unsigned long sym_addr
;
1480 canary_ret
= contains_canary(pack
, r
->blank_addr
, r
->howto
);
1483 if (canary_ret
== 0) {
1484 ksdebug(pack
, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1485 r
->blank_addr
, r
->symbol
->label
, r
->target_addend
);
1489 #ifdef KSPLICE_STANDALONE
1490 if (!bootstrapped
) {
1491 ret
= add_system_map_candidates(pack
,
1492 pack
->primary_system_map
,
1493 pack
->primary_system_map_end
,
1494 r
->symbol
->label
, &vals
);
1496 release_vals(&vals
);
1500 #endif /* KSPLICE_STANDALONE */
1501 ret
= lookup_symbol(pack
, r
->symbol
, &vals
);
1503 release_vals(&vals
);
1506 if (!singular(&vals
) || (r
->symbol
->vals
!= NULL
&&
1507 r
->howto
->type
== KSPLICE_HOWTO_RELOC_PATCH
)) {
1508 release_vals(&vals
);
1509 ksdebug(pack
, "Failed to find %s for reloc\n",
1511 return FAILED_TO_FIND
;
1513 sym_addr
= list_entry(vals
.next
, struct candidate_val
, list
)->val
;
1514 release_vals(&vals
);
1516 ret
= write_reloc_value(pack
, r
, r
->blank_addr
,
1517 r
->howto
->pcrel
? sym_addr
- r
->blank_addr
:
1522 ksdebug(pack
, "reloc: %lx to %s+%lx (S=%lx ", r
->blank_addr
,
1523 r
->symbol
->label
, r
->target_addend
, sym_addr
);
1524 switch (r
->howto
->size
) {
1526 ksdebug(pack
, "aft=%02x)\n", *(uint8_t *)r
->blank_addr
);
1529 ksdebug(pack
, "aft=%04x)\n", *(uint16_t *)r
->blank_addr
);
1532 ksdebug(pack
, "aft=%08x)\n", *(uint32_t *)r
->blank_addr
);
1534 #if BITS_PER_LONG >= 64
1536 ksdebug(pack
, "aft=%016llx)\n", *(uint64_t *)r
->blank_addr
);
1538 #endif /* BITS_PER_LONG */
1540 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1543 #ifdef KSPLICE_STANDALONE
1546 #endif /* KSPLICE_STANDALONE */
1548 /* Create labelvals so that we can verify our choices in the second
1549 round of run-pre matching that considers data sections. */
1550 ret
= create_labelval(pack
, r
->symbol
, sym_addr
, VAL
);
1554 return add_dependency_on_address(pack
, sym_addr
);
1557 static abort_t
apply_howto_date(struct ksplice_pack
*pack
,
1558 const struct ksplice_reloc
*r
)
1560 if (r
->symbol
->vals
!= NULL
) {
1561 ksdebug(pack
, "Failed to find %s for date\n", r
->symbol
->label
);
1562 return FAILED_TO_FIND
;
1564 memcpy((unsigned char *)r
->blank_addr
,
1565 (const unsigned char *)r
->symbol
->value
, r
->howto
->size
);
1569 static abort_t
read_reloc_value(struct ksplice_pack
*pack
,
1570 const struct ksplice_reloc
*r
,
1571 unsigned long addr
, unsigned long *valp
)
1573 unsigned char bytes
[sizeof(long)];
1575 const struct ksplice_reloc_howto
*howto
= r
->howto
;
1577 if (howto
->size
<= 0 || howto
->size
> sizeof(long)) {
1578 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1582 if (probe_kernel_read(bytes
, (void *)addr
, howto
->size
) == -EFAULT
)
1585 switch (howto
->size
) {
1587 val
= *(uint8_t *)bytes
;
1590 val
= *(uint16_t *)bytes
;
1593 val
= *(uint32_t *)bytes
;
1595 #if BITS_PER_LONG >= 64
1597 val
= *(uint64_t *)bytes
;
1599 #endif /* BITS_PER_LONG */
1601 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1605 val
&= howto
->dst_mask
;
1606 if (howto
->signed_addend
)
1607 val
|= -(val
& (howto
->dst_mask
& ~(howto
->dst_mask
>> 1)));
1608 val
<<= howto
->rightshift
;
1609 val
-= r
->insn_addend
+ r
->target_addend
;
1614 static abort_t
write_reloc_value(struct ksplice_pack
*pack
,
1615 const struct ksplice_reloc
*r
,
1616 unsigned long addr
, unsigned long sym_addr
)
1618 unsigned long val
= sym_addr
+ r
->target_addend
+ r
->insn_addend
;
1619 const struct ksplice_reloc_howto
*howto
= r
->howto
;
1620 val
>>= howto
->rightshift
;
1621 switch (howto
->size
) {
1623 *(uint8_t *)addr
= (*(uint8_t *)addr
& ~howto
->dst_mask
) |
1624 (val
& howto
->dst_mask
);
1627 *(uint16_t *)addr
= (*(uint16_t *)addr
& ~howto
->dst_mask
) |
1628 (val
& howto
->dst_mask
);
1631 *(uint32_t *)addr
= (*(uint32_t *)addr
& ~howto
->dst_mask
) |
1632 (val
& howto
->dst_mask
);
1634 #if BITS_PER_LONG >= 64
1636 *(uint64_t *)addr
= (*(uint64_t *)addr
& ~howto
->dst_mask
) |
1637 (val
& howto
->dst_mask
);
1639 #endif /* BITS_PER_LONG */
1641 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1645 if (read_reloc_value(pack
, r
, addr
, &val
) != OK
|| val
!= sym_addr
) {
1646 ksdebug(pack
, "Aborted. Relocation overflow.\n");
1653 static void __attribute__((noreturn
)) ksplice_deleted(void)
1655 printk(KERN_CRIT
"Called a kernel function deleted by Ksplice!\n");
1657 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1658 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1663 static abort_t
match_pack_sections(struct ksplice_pack
*pack
,
1664 bool consider_data_sections
)
1666 struct ksplice_section
*sect
;
1671 for (sect
= pack
->helper_sections
; sect
< pack
->helper_sections_end
;
1673 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0 &&
1674 (sect
->flags
& KSPLICE_SECTION_STRING
) == 0 &&
1675 (sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0)
1679 while (remaining
> 0) {
1681 for (sect
= pack
->helper_sections
;
1682 sect
< pack
->helper_sections_end
; sect
++) {
1683 if ((sect
->flags
& KSPLICE_SECTION_MATCHED
) != 0)
1685 if ((!consider_data_sections
&&
1686 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0) ||
1687 (sect
->flags
& KSPLICE_SECTION_STRING
) != 0)
1689 ret
= find_section(pack
, sect
);
1691 sect
->flags
|= KSPLICE_SECTION_MATCHED
;
1692 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0)
1695 } else if (ret
!= NO_MATCH
) {
1703 for (sect
= pack
->helper_sections
;
1704 sect
< pack
->helper_sections_end
; sect
++) {
1705 if ((sect
->flags
& KSPLICE_SECTION_MATCHED
) != 0 ||
1706 (sect
->flags
& KSPLICE_SECTION_STRING
) != 0)
1708 ksdebug(pack
, "run-pre: could not match %s "
1710 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0 ?
1712 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ?
1713 "rodata" : "text", sect
->symbol
->label
);
1715 ksdebug(pack
, "Aborted. run-pre: could not match some "
1722 static abort_t
find_section(struct ksplice_pack
*pack
,
1723 struct ksplice_section
*sect
)
1727 unsigned long run_addr
;
1729 struct candidate_val
*v
, *n
;
1731 #ifdef KSPLICE_STANDALONE
1732 ret
= add_system_map_candidates(pack
, pack
->helper_system_map
,
1733 pack
->helper_system_map_end
,
1734 sect
->symbol
->label
, &vals
);
1736 release_vals(&vals
);
1739 #endif /* KSPLICE_STANDALONE */
1740 ret
= lookup_symbol(pack
, sect
->symbol
, &vals
);
1742 release_vals(&vals
);
1746 ksdebug(pack
, "run-pre: starting sect search for %s\n",
1747 sect
->symbol
->label
);
1749 list_for_each_entry_safe(v
, n
, &vals
, list
) {
1753 ret
= try_addr(pack
, sect
, run_addr
, NULL
, RUN_PRE_INITIAL
);
1754 if (ret
== NO_MATCH
) {
1757 } else if (ret
!= OK
) {
1758 release_vals(&vals
);
1763 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1764 if (list_empty(&vals
) && (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
1765 ret
= brute_search_all(pack
, sect
, &vals
);
1767 release_vals(&vals
);
1770 /* Make sure run-pre matching output is displayed if
1771 brute_search succeeds */
1772 if (singular(&vals
)) {
1773 run_addr
= list_entry(vals
.next
, struct candidate_val
,
1775 ret
= try_addr(pack
, sect
, run_addr
, NULL
,
1778 ksdebug(pack
, "run-pre: Debug run failed for "
1779 "sect %s:\n", sect
->symbol
->label
);
1780 release_vals(&vals
);
1785 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1787 if (singular(&vals
)) {
1788 LIST_HEAD(safety_records
);
1789 run_addr
= list_entry(vals
.next
, struct candidate_val
,
1791 ret
= try_addr(pack
, sect
, run_addr
, &safety_records
,
1793 release_vals(&vals
);
1795 clear_list(&safety_records
, struct safety_record
, list
);
1796 ksdebug(pack
, "run-pre: Final run failed for sect "
1797 "%s:\n", sect
->symbol
->label
);
1799 list_splice(&safety_records
, &pack
->safety_records
);
1802 } else if (!list_empty(&vals
)) {
1803 struct candidate_val
*val
;
1804 ksdebug(pack
, "run-pre: multiple candidates for sect %s:\n",
1805 sect
->symbol
->label
);
1807 list_for_each_entry(val
, &vals
, list
) {
1809 ksdebug(pack
, "%lx\n", val
->val
);
1811 ksdebug(pack
, "...\n");
1815 release_vals(&vals
);
1818 release_vals(&vals
);
1822 static abort_t
try_addr(struct ksplice_pack
*pack
,
1823 struct ksplice_section
*sect
,
1824 unsigned long run_addr
,
1825 struct list_head
*safety_records
,
1826 enum run_pre_mode mode
)
1829 const struct module
*run_module
;
1831 if ((sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ||
1832 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0)
1833 run_module
= __module_data_address(run_addr
);
1835 run_module
= __module_text_address(run_addr
);
1836 if (run_module
== pack
->primary
) {
1837 ksdebug(pack
, "run-pre: unexpected address %lx in primary "
1838 "module %s for sect %s\n", run_addr
, run_module
->name
,
1839 sect
->symbol
->label
);
1842 if (!patches_module(run_module
, pack
->target
)) {
1843 ksdebug(pack
, "run-pre: ignoring address %lx in other module "
1844 "%s for sect %s\n", run_addr
, run_module
== NULL
?
1845 "vmlinux" : run_module
->name
, sect
->symbol
->label
);
1849 ret
= create_labelval(pack
, sect
->symbol
, run_addr
, TEMP
);
1853 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1854 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
, mode
);
1855 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1856 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
1857 ret
= arch_run_pre_cmp(pack
, sect
, run_addr
, safety_records
,
1860 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
, mode
);
1861 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1862 if (ret
== NO_MATCH
&& mode
!= RUN_PRE_FINAL
) {
1863 set_temp_labelvals(pack
, NOVAL
);
1864 ksdebug(pack
, "run-pre: %s sect %s does not match (r_a=%lx "
1866 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ? "rodata" :
1867 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0 ? "data" :
1868 "text", sect
->symbol
->label
, run_addr
, sect
->address
,
1870 ksdebug(pack
, "run-pre: ");
1871 if (pack
->update
->debug
>= 1) {
1872 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1873 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
,
1875 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1876 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
1877 ret
= arch_run_pre_cmp(pack
, sect
, run_addr
,
1881 ret
= run_pre_cmp(pack
, sect
, run_addr
,
1884 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1885 set_temp_labelvals(pack
, NOVAL
);
1887 ksdebug(pack
, "\n");
1889 } else if (ret
!= OK
) {
1890 set_temp_labelvals(pack
, NOVAL
);
1894 if (mode
!= RUN_PRE_FINAL
) {
1895 set_temp_labelvals(pack
, NOVAL
);
1896 ksdebug(pack
, "run-pre: candidate for sect %s=%lx\n",
1897 sect
->symbol
->label
, run_addr
);
1901 set_temp_labelvals(pack
, VAL
);
1902 ksdebug(pack
, "run-pre: found sect %s=%lx\n", sect
->symbol
->label
,
1907 static abort_t
run_pre_cmp(struct ksplice_pack
*pack
,
1908 const struct ksplice_section
*sect
,
1909 unsigned long run_addr
,
1910 struct list_head
*safety_records
,
1911 enum run_pre_mode mode
)
1915 const struct ksplice_reloc
*r
, *finger
;
1916 const unsigned char *pre
, *run
, *pre_start
, *run_start
;
1917 unsigned char runval
;
1919 pre_start
= (const unsigned char *)sect
->address
;
1920 run_start
= (const unsigned char *)run_addr
;
1922 finger
= init_reloc_search(pack
, sect
);
1926 while (pre
< pre_start
+ sect
->size
) {
1927 unsigned long offset
= pre
- pre_start
;
1928 ret
= lookup_reloc(pack
, &finger
, (unsigned long)pre
, &r
);
1930 ret
= handle_reloc(pack
, sect
, r
, (unsigned long)run
,
1933 if (mode
== RUN_PRE_INITIAL
)
1934 ksdebug(pack
, "reloc in sect does not "
1935 "match after %lx/%lx bytes\n",
1936 offset
, sect
->size
);
1939 if (mode
== RUN_PRE_DEBUG
)
1940 print_bytes(pack
, run
, r
->howto
->size
, pre
,
1942 pre
+= r
->howto
->size
;
1943 run
+= r
->howto
->size
;
1946 } else if (ret
!= NO_MATCH
) {
1950 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0) {
1951 ret
= handle_paravirt(pack
, (unsigned long)pre
,
1952 (unsigned long)run
, &matched
);
1956 if (mode
== RUN_PRE_DEBUG
)
1957 print_bytes(pack
, run
, matched
, pre
,
1965 if (probe_kernel_read(&runval
, (void *)run
, 1) == -EFAULT
) {
1966 if (mode
== RUN_PRE_INITIAL
)
1967 ksdebug(pack
, "sect unmapped after %lx/%lx "
1968 "bytes\n", offset
, sect
->size
);
1972 if (runval
!= *pre
&&
1973 (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
1974 if (mode
== RUN_PRE_INITIAL
)
1975 ksdebug(pack
, "sect does not match after "
1976 "%lx/%lx bytes\n", offset
, sect
->size
);
1977 if (mode
== RUN_PRE_DEBUG
) {
1978 print_bytes(pack
, run
, 1, pre
, 1);
1979 ksdebug(pack
, "[p_o=%lx] ! ", offset
);
1980 print_bytes(pack
, run
+ 1, 2, pre
+ 1, 2);
1984 if (mode
== RUN_PRE_DEBUG
)
1985 print_bytes(pack
, run
, 1, pre
, 1);
1989 return create_safety_record(pack
, sect
, safety_records
, run_addr
,
1993 static void print_bytes(struct ksplice_pack
*pack
,
1994 const unsigned char *run
, int runc
,
1995 const unsigned char *pre
, int prec
)
1998 int matched
= min(runc
, prec
);
1999 for (o
= 0; o
< matched
; o
++) {
2000 if (run
[o
] == pre
[o
])
2001 ksdebug(pack
, "%02x ", run
[o
]);
2003 ksdebug(pack
, "%02x/%02x ", run
[o
], pre
[o
]);
2005 for (o
= matched
; o
< runc
; o
++)
2006 ksdebug(pack
, "%02x/ ", run
[o
]);
2007 for (o
= matched
; o
< prec
; o
++)
2008 ksdebug(pack
, "/%02x ", pre
[o
]);
2011 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2012 static abort_t
brute_search(struct ksplice_pack
*pack
,
2013 struct ksplice_section
*sect
,
2014 const void *start
, unsigned long len
,
2015 struct list_head
*vals
)
2021 for (addr
= (unsigned long)start
; addr
< (unsigned long)start
+ len
;
2023 if (addr
% 100000 == 0)
2026 if (probe_kernel_read(&run
, (void *)addr
, 1) == -EFAULT
)
2029 pre
= *(const unsigned char *)(sect
->address
);
2034 ret
= try_addr(pack
, sect
, addr
, NULL
, RUN_PRE_INITIAL
);
2036 ret
= add_candidate_val(pack
, vals
, addr
);
2039 } else if (ret
!= NO_MATCH
) {
2047 static abort_t
brute_search_all(struct ksplice_pack
*pack
,
2048 struct ksplice_section
*sect
,
2049 struct list_head
*vals
)
2055 ksdebug(pack
, "brute_search: searching for %s\n", sect
->symbol
->label
);
2056 saved_debug
= pack
->update
->debug
;
2057 pack
->update
->debug
= 0;
2059 list_for_each_entry(m
, &modules
, list
) {
2060 if (!patches_module(m
, pack
->target
) || m
== pack
->primary
)
2062 ret
= brute_search(pack
, sect
, m
->module_core
, m
->core_size
,
2066 ret
= brute_search(pack
, sect
, m
->module_init
, m
->init_size
,
2072 ret
= brute_search(pack
, sect
, (const void *)init_mm
.start_code
,
2073 init_mm
.end_code
- init_mm
.start_code
, vals
);
2076 pack
->update
->debug
= saved_debug
;
2079 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2082 unsigned long address
;
2086 static int reloc_bsearch_compare(const void *key
, const void *elt
)
2088 const struct range
*range
= key
;
2089 const struct ksplice_reloc
*r
= elt
;
2090 if (range
->address
+ range
->size
<= r
->blank_addr
)
2092 if (range
->address
> r
->blank_addr
)
2097 static const struct ksplice_reloc
*find_reloc(const struct ksplice_reloc
*start
,
2098 const struct ksplice_reloc
*end
,
2099 unsigned long address
,
2102 const struct ksplice_reloc
*r
;
2103 struct range range
= { address
, size
};
2104 r
= bsearch((void *)&range
, start
, end
- start
, sizeof(*r
),
2105 reloc_bsearch_compare
);
2108 while (r
> start
&& (r
- 1)->blank_addr
>= address
)
2113 static const struct ksplice_reloc
*
2114 init_reloc_search(struct ksplice_pack
*pack
, const struct ksplice_section
*sect
)
2116 const struct ksplice_reloc
*r
;
2117 r
= find_reloc(pack
->helper_relocs
, pack
->helper_relocs_end
,
2118 sect
->address
, sect
->size
);
2120 return pack
->helper_relocs_end
;
2124 static abort_t
lookup_reloc(struct ksplice_pack
*pack
,
2125 const struct ksplice_reloc
**fingerp
,
2127 const struct ksplice_reloc
**relocp
)
2129 const struct ksplice_reloc
*r
= *fingerp
;
2132 while (r
< pack
->helper_relocs_end
&&
2133 addr
>= r
->blank_addr
+ r
->howto
->size
&&
2134 !(addr
== r
->blank_addr
&& r
->howto
->size
== 0))
2137 if (r
== pack
->helper_relocs_end
)
2139 if (addr
< r
->blank_addr
)
2142 if (r
->howto
->type
!= KSPLICE_HOWTO_RELOC
)
2145 canary_ret
= contains_canary(pack
, r
->blank_addr
, r
->howto
);
2148 if (canary_ret
== 0) {
2149 ksdebug(pack
, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2150 "(altinstr)\n", r
->blank_addr
, r
->symbol
->label
,
2154 if (addr
!= r
->blank_addr
) {
2155 ksdebug(pack
, "Invalid nonzero relocation offset\n");
2161 static abort_t
handle_reloc(struct ksplice_pack
*pack
,
2162 const struct ksplice_section
*sect
,
2163 const struct ksplice_reloc
*r
,
2164 unsigned long run_addr
, enum run_pre_mode mode
)
2166 switch (r
->howto
->type
) {
2167 case KSPLICE_HOWTO_RELOC
:
2168 return handle_howto_reloc(pack
, sect
, r
, run_addr
, mode
);
2169 case KSPLICE_HOWTO_DATE
:
2170 case KSPLICE_HOWTO_TIME
:
2171 return handle_howto_date(pack
, sect
, r
, run_addr
, mode
);
2172 case KSPLICE_HOWTO_BUG
:
2173 return handle_bug(pack
, r
, run_addr
);
2174 case KSPLICE_HOWTO_EXTABLE
:
2175 return handle_extable(pack
, r
, run_addr
);
2177 ksdebug(pack
, "Unexpected howto type %d\n", r
->howto
->type
);
2182 static abort_t
handle_howto_date(struct ksplice_pack
*pack
,
2183 const struct ksplice_section
*sect
,
2184 const struct ksplice_reloc
*r
,
2185 unsigned long run_addr
, enum run_pre_mode mode
)
2188 char *buf
= kmalloc(r
->howto
->size
, GFP_KERNEL
);
2191 return OUT_OF_MEMORY
;
2192 if (probe_kernel_read(buf
, (void *)run_addr
, r
->howto
->size
) == -EFAULT
) {
2197 switch (r
->howto
->type
) {
2198 case KSPLICE_HOWTO_TIME
:
2199 if (isdigit(buf
[0]) && isdigit(buf
[1]) && buf
[2] == ':' &&
2200 isdigit(buf
[3]) && isdigit(buf
[4]) && buf
[5] == ':' &&
2201 isdigit(buf
[6]) && isdigit(buf
[7]))
2206 case KSPLICE_HOWTO_DATE
:
2207 if (isalpha(buf
[0]) && isalpha(buf
[1]) && isalpha(buf
[2]) &&
2208 buf
[3] == ' ' && (buf
[4] == ' ' || isdigit(buf
[4])) &&
2209 isdigit(buf
[5]) && buf
[6] == ' ' && isdigit(buf
[7]) &&
2210 isdigit(buf
[8]) && isdigit(buf
[9]) && isdigit(buf
[10]))
2218 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
)
2219 ksdebug(pack
, "%s string: \"%.*s\" does not match format\n",
2220 r
->howto
->type
== KSPLICE_HOWTO_DATE
? "date" : "time",
2221 r
->howto
->size
, buf
);
2225 ret
= create_labelval(pack
, r
->symbol
, run_addr
, TEMP
);
2231 static abort_t
handle_howto_reloc(struct ksplice_pack
*pack
,
2232 const struct ksplice_section
*sect
,
2233 const struct ksplice_reloc
*r
,
2234 unsigned long run_addr
,
2235 enum run_pre_mode mode
)
2237 struct ksplice_section
*sym_sect
= symbol_section(pack
, r
->symbol
);
2238 unsigned long offset
= r
->target_addend
;
2242 ret
= read_reloc_value(pack
, r
, run_addr
, &val
);
2245 if (r
->howto
->pcrel
)
2248 #ifdef KSPLICE_STANDALONE
2249 /* The match_map is only used in KSPLICE_STANDALONE */
2250 if (sym_sect
== NULL
|| sym_sect
->match_map
== NULL
|| offset
== 0) {
2252 } else if (offset
< 0 || offset
>= sym_sect
->size
) {
2253 ksdebug(pack
, "Out of range relocation: %s+%lx -> %s+%lx",
2254 sect
->symbol
->label
, r
->blank_addr
- sect
->address
,
2255 r
->symbol
->label
, offset
);
2257 } else if (sect
== sym_sect
&& sect
->match_map
[offset
] == NULL
) {
2258 sym_sect
->match_map
[offset
] =
2259 (const unsigned char *)r
->symbol
->value
+ offset
;
2260 } else if (sect
== sym_sect
&& (unsigned long)sect
->match_map
[offset
] ==
2261 r
->symbol
->value
+ offset
) {
2263 } else if (sect
== sym_sect
) {
2264 ksdebug(pack
, "Relocations to nonmatching locations within "
2265 "section %s: %lx does not match %lx\n",
2266 sect
->symbol
->label
, offset
,
2267 (unsigned long)sect
->match_map
[offset
] -
2270 } else if ((sym_sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0) {
2271 if (mode
== RUN_PRE_INITIAL
)
2272 ksdebug(pack
, "Delaying matching of %s due to reloc "
2273 "from to unmatching section: %s+%lx\n",
2274 sect
->symbol
->label
, r
->symbol
->label
, offset
);
2276 } else if (sym_sect
->match_map
[offset
] == NULL
) {
2277 if (mode
== RUN_PRE_INITIAL
)
2278 ksdebug(pack
, "Relocation not to instruction boundary: "
2279 "%s+%lx -> %s+%lx", sect
->symbol
->label
,
2280 r
->blank_addr
- sect
->address
, r
->symbol
->label
,
2283 } else if ((unsigned long)sym_sect
->match_map
[offset
] !=
2284 r
->symbol
->value
+ offset
) {
2285 if (mode
== RUN_PRE_INITIAL
)
2286 ksdebug(pack
, "Match map shift %s+%lx: %lx != %lx\n",
2287 r
->symbol
->label
, offset
,
2288 r
->symbol
->value
+ offset
,
2289 (unsigned long)sym_sect
->match_map
[offset
]);
2290 val
+= r
->symbol
->value
+ offset
-
2291 (unsigned long)sym_sect
->match_map
[offset
];
2293 #endif /* KSPLICE_STANDALONE */
2295 if (mode
== RUN_PRE_INITIAL
)
2296 ksdebug(pack
, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2297 "found %s = %lx\n", run_addr
, r
->blank_addr
,
2298 r
->symbol
->label
, offset
, r
->symbol
->label
, val
);
2300 if (contains_canary(pack
, run_addr
, r
->howto
) != 0) {
2301 ksdebug(pack
, "Aborted. Unexpected canary in run code at %lx"
2306 if ((sect
->flags
& KSPLICE_SECTION_DATA
) != 0 &&
2307 sect
->symbol
== r
->symbol
)
2309 ret
= create_labelval(pack
, r
->symbol
, val
, TEMP
);
2310 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
)
2311 ksdebug(pack
, "run-pre: reloc at r_a=%lx p_a=%lx: labelval %s "
2312 "= %lx does not match expected %lx\n", run_addr
,
2313 r
->blank_addr
, r
->symbol
->label
, r
->symbol
->value
, val
);
2317 if (sym_sect
!= NULL
&& (sym_sect
->flags
& KSPLICE_SECTION_MATCHED
) == 0
2318 && (sym_sect
->flags
& KSPLICE_SECTION_STRING
) != 0) {
2319 if (mode
== RUN_PRE_INITIAL
)
2320 ksdebug(pack
, "Recursively comparing string section "
2321 "%s\n", sym_sect
->symbol
->label
);
2322 else if (mode
== RUN_PRE_DEBUG
)
2323 ksdebug(pack
, "[str start] ");
2324 ret
= run_pre_cmp(pack
, sym_sect
, val
, NULL
, mode
);
2325 if (mode
== RUN_PRE_DEBUG
)
2326 ksdebug(pack
, "[str end] ");
2327 if (ret
== OK
&& mode
== RUN_PRE_INITIAL
)
2328 ksdebug(pack
, "Successfully matched string section %s"
2329 "\n", sym_sect
->symbol
->label
);
2330 else if (mode
== RUN_PRE_INITIAL
)
2331 ksdebug(pack
, "Failed to match string section %s\n",
2332 sym_sect
->symbol
->label
);
2337 static int symbol_section_bsearch_compare(const void *a
, const void *b
)
2339 const struct ksplice_symbol
*sym
= a
;
2340 const struct ksplice_section
*sect
= b
;
2341 return strcmp(sym
->label
, sect
->symbol
->label
);
2344 static int compare_section_labels(const void *va
, const void *vb
)
2346 const struct ksplice_section
*a
= va
, *b
= vb
;
2347 return strcmp(a
->symbol
->label
, b
->symbol
->label
);
2350 static struct ksplice_section
*symbol_section(struct ksplice_pack
*pack
,
2351 const struct ksplice_symbol
*sym
)
2353 return bsearch(sym
, pack
->helper_sections
, pack
->helper_sections_end
-
2354 pack
->helper_sections
, sizeof(struct ksplice_section
),
2355 symbol_section_bsearch_compare
);
2358 static const struct ksplice_reloc
*patch_reloc(struct ksplice_pack
*pack
,
2359 const struct ksplice_patch
*p
)
2361 unsigned long addr
= (unsigned long)&p
->oldaddr
;
2362 const struct ksplice_reloc
*r
=
2363 find_reloc(pack
->primary_relocs
, pack
->primary_relocs_end
, addr
,
2365 if (r
== NULL
|| r
->blank_addr
< addr
||
2366 r
->blank_addr
>= addr
+ sizeof(addr
))
2371 static abort_t
lookup_symbol(struct ksplice_pack
*pack
,
2372 const struct ksplice_symbol
*ksym
,
2373 struct list_head
*vals
)
2377 #ifdef KSPLICE_STANDALONE
2380 #endif /* KSPLICE_STANDALONE */
2382 if (ksym
->vals
== NULL
) {
2384 ksdebug(pack
, "using detected sym %s=%lx\n", ksym
->label
,
2386 return add_candidate_val(pack
, vals
, ksym
->value
);
2389 #ifdef CONFIG_MODULE_UNLOAD
2390 if (strcmp(ksym
->label
, "cleanup_module") == 0 && pack
->target
!= NULL
2391 && pack
->target
->exit
!= NULL
) {
2392 ret
= add_candidate_val(pack
, vals
,
2393 (unsigned long)pack
->target
->exit
);
2399 if (ksym
->name
!= NULL
) {
2400 struct candidate_val
*val
;
2401 list_for_each_entry(val
, ksym
->vals
, list
) {
2402 ret
= add_candidate_val(pack
, vals
, val
->val
);
2407 ret
= new_export_lookup(pack
, pack
->update
, ksym
->name
, vals
);
2415 #ifdef KSPLICE_STANDALONE
2417 add_system_map_candidates(struct ksplice_pack
*pack
,
2418 const struct ksplice_system_map
*start
,
2419 const struct ksplice_system_map
*end
,
2420 const char *label
, struct list_head
*vals
)
2425 const struct ksplice_system_map
*smap
;
2427 /* Some Fedora kernel releases have System.map files whose symbol
2428 * addresses disagree with the running kernel by a constant address
2429 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2430 * values used to compile these kernels. This constant address offset
2431 * is always a multiple of 0x100000.
2433 * If we observe an offset that is NOT a multiple of 0x100000, then the
2434 * user provided us with an incorrect System.map file, and we should
2436 * If we observe an offset that is a multiple of 0x100000, then we can
2437 * adjust the System.map address values accordingly and proceed.
2439 off
= (unsigned long)printk
- pack
->map_printk
;
2440 if (off
& 0xfffff) {
2441 ksdebug(pack
, "Aborted. System.map does not match kernel.\n");
2442 return BAD_SYSTEM_MAP
;
2445 smap
= bsearch(label
, start
, end
- start
, sizeof(*smap
),
2446 system_map_bsearch_compare
);
2450 for (i
= 0; i
< smap
->nr_candidates
; i
++) {
2451 ret
= add_candidate_val(pack
, vals
, smap
->candidates
[i
] + off
);
2458 static int system_map_bsearch_compare(const void *key
, const void *elt
)
2460 const struct ksplice_system_map
*map
= elt
;
2461 const char *label
= key
;
2462 return strcmp(label
, map
->label
);
2464 #endif /* !KSPLICE_STANDALONE */
2466 static abort_t
new_export_lookup(struct ksplice_pack
*p
, struct update
*update
,
2467 const char *name
, struct list_head
*vals
)
2469 struct ksplice_pack
*pack
;
2470 struct ksplice_export
*exp
;
2471 list_for_each_entry(pack
, &update
->packs
, list
) {
2472 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++) {
2473 struct ksplice_reloc_howto howto
;
2474 howto
.size
= sizeof(unsigned long);
2475 howto
.type
= KSPLICE_HOWTO_RELOC
;
2476 howto
.dst_mask
= -1;
2477 if (strcmp(exp
->new_name
, name
) == 0 &&
2479 contains_canary(pack
,
2480 (unsigned long)&exp
->sym
->value
,
2482 return add_candidate_val(p
, vals
,
2489 static abort_t
apply_patches(struct update
*update
)
2493 struct ksplice_pack
*pack
;
2494 const struct ksplice_section
*sect
;
2496 list_for_each_entry(pack
, &update
->packs
, list
) {
2497 for (sect
= pack
->primary_sections
;
2498 sect
< pack
->primary_sections_end
; sect
++) {
2499 struct safety_record
*rec
= kmalloc(sizeof(*rec
),
2502 return OUT_OF_MEMORY
;
2503 rec
->addr
= sect
->address
;
2504 rec
->size
= sect
->size
;
2505 rec
->label
= sect
->symbol
->label
;
2506 rec
->first_byte_safe
= false;
2507 list_add(&rec
->list
, &pack
->safety_records
);
2511 ret
= map_trampoline_pages(update
);
2514 for (i
= 0; i
< 5; i
++) {
2515 cleanup_conflicts(update
);
2516 #ifdef KSPLICE_STANDALONE
2518 #endif /* KSPLICE_STANDALONE */
2519 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2520 ret
= (__force abort_t
)stop_machine(__apply_patches
, update
,
2522 #else /* LINUX_VERSION_CODE < */
2523 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2524 ret
= (__force abort_t
)stop_machine_run(__apply_patches
, update
,
2526 #endif /* LINUX_VERSION_CODE */
2527 #ifdef KSPLICE_STANDALONE
2529 #endif /* KSPLICE_STANDALONE */
2530 if (ret
!= CODE_BUSY
)
2532 set_current_state(TASK_INTERRUPTIBLE
);
2533 schedule_timeout(msecs_to_jiffies(1000));
2535 unmap_trampoline_pages(update
);
2537 if (ret
== CODE_BUSY
) {
2538 print_conflicts(update
);
2539 _ksdebug(update
, "Aborted %s. stack check: to-be-replaced "
2540 "code is busy.\n", update
->kid
);
2541 } else if (ret
== ALREADY_REVERSED
) {
2542 _ksdebug(update
, "Aborted %s. Ksplice update %s is already "
2543 "reversed.\n", update
->kid
, update
->kid
);
2549 _ksdebug(update
, "Atomic patch insertion for %s complete\n",
2554 static abort_t
reverse_patches(struct update
*update
)
2558 struct ksplice_pack
*pack
;
2560 clear_debug_buf(update
);
2561 ret
= init_debug_buf(update
);
2565 _ksdebug(update
, "Preparing to reverse %s\n", update
->kid
);
2567 ret
= map_trampoline_pages(update
);
2570 for (i
= 0; i
< 5; i
++) {
2571 cleanup_conflicts(update
);
2572 clear_list(&update
->conflicts
, struct conflict
, list
);
2573 #ifdef KSPLICE_STANDALONE
2575 #endif /* KSPLICE_STANDALONE */
2576 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2577 ret
= (__force abort_t
)stop_machine(__reverse_patches
, update
,
2579 #else /* LINUX_VERSION_CODE < */
2580 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2581 ret
= (__force abort_t
)stop_machine_run(__reverse_patches
,
2583 #endif /* LINUX_VERSION_CODE */
2584 #ifdef KSPLICE_STANDALONE
2586 #endif /* KSPLICE_STANDALONE */
2587 if (ret
!= CODE_BUSY
)
2589 set_current_state(TASK_INTERRUPTIBLE
);
2590 schedule_timeout(msecs_to_jiffies(1000));
2592 unmap_trampoline_pages(update
);
2594 if (ret
== CODE_BUSY
) {
2595 print_conflicts(update
);
2596 _ksdebug(update
, "Aborted %s. stack check: to-be-reversed "
2597 "code is busy.\n", update
->kid
);
2598 } else if (ret
== MODULE_BUSY
) {
2599 _ksdebug(update
, "Update %s is in use by another module\n",
2606 list_for_each_entry(pack
, &update
->packs
, list
)
2607 clear_list(&pack
->safety_records
, struct safety_record
, list
);
2609 _ksdebug(update
, "Atomic patch removal for %s complete\n", update
->kid
);
2613 static int __apply_patches(void *updateptr
)
2615 struct update
*update
= updateptr
;
2616 struct ksplice_pack
*pack
;
2617 struct ksplice_patch
*p
;
2618 struct ksplice_export
*exp
;
2621 if (update
->stage
== STAGE_APPLIED
)
2622 return (__force
int)OK
;
2624 if (update
->stage
!= STAGE_PREPARING
)
2625 return (__force
int)UNEXPECTED
;
2627 ret
= check_each_task(update
);
2629 return (__force
int)ret
;
2631 list_for_each_entry(pack
, &update
->packs
, list
) {
2632 if (try_module_get(pack
->primary
) != 1) {
2633 struct ksplice_pack
*pack1
;
2634 list_for_each_entry(pack1
, &update
->packs
, list
) {
2637 module_put(pack1
->primary
);
2639 module_put(THIS_MODULE
);
2640 return (__force
int)UNEXPECTED
;
2644 update
->stage
= STAGE_APPLIED
;
2645 #ifdef TAINT_KSPLICE
2646 add_taint(TAINT_KSPLICE
);
2649 list_for_each_entry(pack
, &update
->packs
, list
)
2650 list_add(&pack
->module_list_entry
.list
, &ksplice_module_list
);
2652 list_for_each_entry(pack
, &update
->packs
, list
) {
2653 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++)
2654 exp
->sym
->name
= exp
->new_name
;
2657 list_for_each_entry(pack
, &update
->packs
, list
) {
2658 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
2659 insert_trampoline(p
);
2661 return (__force
int)OK
;
2664 static int __reverse_patches(void *updateptr
)
2666 struct update
*update
= updateptr
;
2667 struct ksplice_pack
*pack
;
2668 const struct ksplice_patch
*p
;
2669 struct ksplice_export
*exp
;
2672 if (update
->stage
!= STAGE_APPLIED
)
2673 return (__force
int)OK
;
2675 #ifdef CONFIG_MODULE_UNLOAD
2676 list_for_each_entry(pack
, &update
->packs
, list
) {
2677 if (module_refcount(pack
->primary
) != 1)
2678 return (__force
int)MODULE_BUSY
;
2680 #endif /* CONFIG_MODULE_UNLOAD */
2682 ret
= check_each_task(update
);
2684 return (__force
int)ret
;
2686 list_for_each_entry(pack
, &update
->packs
, list
) {
2687 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
2688 ret
= verify_trampoline(pack
, p
);
2690 return (__force
int)ret
;
2694 update
->stage
= STAGE_REVERSED
;
2696 list_for_each_entry(pack
, &update
->packs
, list
)
2697 module_put(pack
->primary
);
2699 list_for_each_entry(pack
, &update
->packs
, list
)
2700 list_del(&pack
->module_list_entry
.list
);
2702 list_for_each_entry(pack
, &update
->packs
, list
) {
2703 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++)
2704 exp
->sym
->name
= exp
->saved_name
;
2707 list_for_each_entry(pack
, &update
->packs
, list
) {
2708 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
2709 remove_trampoline(p
);
2711 return (__force
int)OK
;
2714 static abort_t
check_each_task(struct update
*update
)
2716 const struct task_struct
*g
, *p
;
2717 abort_t status
= OK
, ret
;
2718 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2719 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2720 read_lock(&tasklist_lock
);
2721 #endif /* LINUX_VERSION_CODE */
2722 do_each_thread(g
, p
) {
2723 /* do_each_thread is a double loop! */
2724 ret
= check_task(update
, p
, false);
2726 check_task(update
, p
, true);
2729 if (ret
!= OK
&& ret
!= CODE_BUSY
)
2730 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2731 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2733 #else /* LINUX_VERSION_CODE < */
2735 #endif /* LINUX_VERSION_CODE */
2736 } while_each_thread(g
, p
);
2737 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2738 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2740 read_unlock(&tasklist_lock
);
2741 #endif /* LINUX_VERSION_CODE */
2745 static abort_t
check_task(struct update
*update
,
2746 const struct task_struct
*t
, bool rerun
)
2748 abort_t status
, ret
;
2749 struct conflict
*conf
= NULL
;
2752 conf
= kmalloc(sizeof(*conf
), GFP_ATOMIC
);
2754 return OUT_OF_MEMORY
;
2755 conf
->process_name
= kstrdup(t
->comm
, GFP_ATOMIC
);
2756 if (conf
->process_name
== NULL
) {
2758 return OUT_OF_MEMORY
;
2761 INIT_LIST_HEAD(&conf
->stack
);
2762 list_add(&conf
->list
, &update
->conflicts
);
2765 status
= check_address(update
, conf
, KSPLICE_IP(t
));
2767 ret
= check_stack(update
, conf
, task_thread_info(t
),
2768 (unsigned long *)__builtin_frame_address(0));
2771 } else if (!task_curr(t
)) {
2772 ret
= check_stack(update
, conf
, task_thread_info(t
),
2773 (unsigned long *)KSPLICE_SP(t
));
2776 } else if (!is_stop_machine(t
)) {
2777 status
= UNEXPECTED_RUNNING_TASK
;
2782 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
2783 const struct thread_info
*tinfo
,
2784 const unsigned long *stack
)
2786 abort_t status
= OK
, ret
;
2789 while (valid_stack_ptr(tinfo
, stack
)) {
2791 ret
= check_address(update
, conf
, addr
);
2798 static abort_t
check_address(struct update
*update
,
2799 struct conflict
*conf
, unsigned long addr
)
2801 abort_t status
= OK
, ret
;
2802 const struct safety_record
*rec
;
2803 struct ksplice_pack
*pack
;
2804 struct conflict_addr
*ca
= NULL
;
2807 ca
= kmalloc(sizeof(*ca
), GFP_ATOMIC
);
2809 return OUT_OF_MEMORY
;
2811 ca
->has_conflict
= false;
2813 list_add(&ca
->list
, &conf
->stack
);
2816 list_for_each_entry(pack
, &update
->packs
, list
) {
2817 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
2818 ret
= check_record(ca
, rec
, addr
);
2826 static abort_t
check_record(struct conflict_addr
*ca
,
2827 const struct safety_record
*rec
, unsigned long addr
)
2829 if ((addr
> rec
->addr
&& addr
< rec
->addr
+ rec
->size
) ||
2830 (addr
== rec
->addr
&& !rec
->first_byte_safe
)) {
2832 ca
->label
= rec
->label
;
2833 ca
->has_conflict
= true;
2840 static bool is_stop_machine(const struct task_struct
*t
)
2842 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2844 if (!starts_with(t
->comm
, "kstop"))
2846 num
= t
->comm
+ strlen("kstop");
2847 return num
[strspn(num
, "0123456789")] == '\0';
2848 #else /* LINUX_VERSION_CODE < */
2849 return strcmp(t
->comm
, "kstopmachine") == 0;
2850 #endif /* LINUX_VERSION_CODE */
2853 static void cleanup_conflicts(struct update
*update
)
2855 struct conflict
*conf
;
2856 list_for_each_entry(conf
, &update
->conflicts
, list
) {
2857 clear_list(&conf
->stack
, struct conflict_addr
, list
);
2858 kfree(conf
->process_name
);
2860 clear_list(&update
->conflicts
, struct conflict
, list
);
2863 static void print_conflicts(struct update
*update
)
2865 const struct conflict
*conf
;
2866 const struct conflict_addr
*ca
;
2867 list_for_each_entry(conf
, &update
->conflicts
, list
) {
2868 _ksdebug(update
, "stack check: pid %d (%s):", conf
->pid
,
2869 conf
->process_name
);
2870 list_for_each_entry(ca
, &conf
->stack
, list
) {
2871 _ksdebug(update
, " %lx", ca
->addr
);
2872 if (ca
->has_conflict
)
2873 _ksdebug(update
, " [<-CONFLICT]");
2875 _ksdebug(update
, "\n");
2879 static void insert_trampoline(struct ksplice_patch
*p
)
2881 mm_segment_t old_fs
= get_fs();
2883 memcpy(p
->saved
, p
->vaddr
, p
->size
);
2884 memcpy(p
->vaddr
, p
->contents
, p
->size
);
2885 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
2889 static abort_t
verify_trampoline(struct ksplice_pack
*pack
,
2890 const struct ksplice_patch
*p
)
2892 if (memcmp(p
->vaddr
, p
->contents
, p
->size
) != 0) {
2893 ksdebug(pack
, "Aborted. Trampoline at %lx has been "
2894 "overwritten.\n", p
->oldaddr
);
2900 static void remove_trampoline(const struct ksplice_patch
*p
)
2902 mm_segment_t old_fs
= get_fs();
2904 memcpy(p
->vaddr
, p
->saved
, p
->size
);
2905 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
2909 static abort_t
create_labelval(struct ksplice_pack
*pack
,
2910 struct ksplice_symbol
*ksym
,
2911 unsigned long val
, int status
)
2913 val
= follow_trampolines(pack
, val
);
2914 if (ksym
->vals
== NULL
)
2915 return ksym
->value
== val
? OK
: NO_MATCH
;
2918 if (status
== TEMP
) {
2919 struct labelval
*lv
= kmalloc(sizeof(*lv
), GFP_KERNEL
);
2921 return OUT_OF_MEMORY
;
2923 lv
->saved_vals
= ksym
->vals
;
2924 list_add(&lv
->list
, &pack
->temp_labelvals
);
2930 static abort_t
create_safety_record(struct ksplice_pack
*pack
,
2931 const struct ksplice_section
*sect
,
2932 struct list_head
*record_list
,
2933 unsigned long run_addr
,
2934 unsigned long run_size
)
2936 struct safety_record
*rec
;
2937 struct ksplice_patch
*p
;
2939 if (record_list
== NULL
)
2942 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
2943 const struct ksplice_reloc
*r
= patch_reloc(pack
, p
);
2944 if (strcmp(sect
->symbol
->label
, r
->symbol
->label
) == 0)
2947 if (p
>= pack
->patches_end
)
2950 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
2952 return OUT_OF_MEMORY
;
2953 rec
->label
= kstrdup(sect
->symbol
->label
, GFP_KERNEL
);
2954 if (rec
->label
== NULL
) {
2956 return OUT_OF_MEMORY
;
2958 rec
->addr
= run_addr
;
2959 rec
->size
= run_size
;
2960 rec
->first_byte_safe
= false;
2962 list_add(&rec
->list
, record_list
);
2966 static abort_t
add_candidate_val(struct ksplice_pack
*pack
,
2967 struct list_head
*vals
, unsigned long val
)
2969 struct candidate_val
*tmp
, *new;
2970 val
= follow_trampolines(pack
, val
);
2972 list_for_each_entry(tmp
, vals
, list
) {
2973 if (tmp
->val
== val
)
2976 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2978 return OUT_OF_MEMORY
;
2980 list_add(&new->list
, vals
);
2984 static void release_vals(struct list_head
*vals
)
2986 clear_list(vals
, struct candidate_val
, list
);
2989 static void set_temp_labelvals(struct ksplice_pack
*pack
, int status
)
2991 struct labelval
*lv
, *n
;
2992 list_for_each_entry_safe(lv
, n
, &pack
->temp_labelvals
, list
) {
2993 if (status
== NOVAL
) {
2994 lv
->symbol
->vals
= lv
->saved_vals
;
2996 release_vals(lv
->saved_vals
);
2997 kfree(lv
->saved_vals
);
2999 list_del(&lv
->list
);
3004 static int contains_canary(struct ksplice_pack
*pack
, unsigned long blank_addr
,
3005 const struct ksplice_reloc_howto
*howto
)
3007 switch (howto
->size
) {
3009 return (*(uint8_t *)blank_addr
& howto
->dst_mask
) ==
3010 (KSPLICE_CANARY
& howto
->dst_mask
);
3012 return (*(uint16_t *)blank_addr
& howto
->dst_mask
) ==
3013 (KSPLICE_CANARY
& howto
->dst_mask
);
3015 return (*(uint32_t *)blank_addr
& howto
->dst_mask
) ==
3016 (KSPLICE_CANARY
& howto
->dst_mask
);
3017 #if BITS_PER_LONG >= 64
3019 return (*(uint64_t *)blank_addr
& howto
->dst_mask
) ==
3020 (KSPLICE_CANARY
& howto
->dst_mask
);
3021 #endif /* BITS_PER_LONG */
3023 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
3028 static unsigned long follow_trampolines(struct ksplice_pack
*pack
,
3031 unsigned long new_addr
;
3035 if (trampoline_target(pack
, addr
, &new_addr
) != OK
)
3037 m
= __module_text_address(new_addr
);
3038 if (m
== NULL
|| m
== pack
->target
||
3039 !starts_with(m
->name
, "ksplice"))
3041 ksdebug(pack
, "Following trampoline %lx %lx(%s)\n", addr
,
3047 /* Does module a patch module b? */
3048 static bool patches_module(const struct module
*a
, const struct module
*b
)
3050 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3054 if (a
== NULL
|| !starts_with(a
->name
, "ksplice_"))
3056 name
= a
->name
+ strlen("ksplice_");
3057 name
+= strcspn(name
, "_");
3061 return strcmp(name
, b
== NULL
? "vmlinux" : b
->name
) == 0;
3062 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3063 struct ksplice_module_list_entry
*entry
;
3066 list_for_each_entry(entry
, &ksplice_module_list
, list
) {
3067 if (entry
->target
== b
&& entry
->primary
== a
)
3071 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3074 static bool starts_with(const char *str
, const char *prefix
)
3076 return strncmp(str
, prefix
, strlen(prefix
)) == 0;
3079 static bool singular(struct list_head
*list
)
3081 return !list_empty(list
) && list
->next
->next
== list
;
3084 static void *bsearch(const void *key
, const void *base
, size_t n
,
3085 size_t size
, int (*cmp
)(const void *key
, const void *elt
))
3087 int start
= 0, end
= n
- 1, mid
, result
;
3090 while (start
<= end
) {
3091 mid
= (start
+ end
) / 2;
3092 result
= cmp(key
, base
+ mid
* size
);
3095 else if (result
> 0)
3098 return (void *)base
+ mid
* size
;
3103 static int compare_relocs(const void *a
, const void *b
)
3105 const struct ksplice_reloc
*ra
= a
, *rb
= b
;
3106 if (ra
->blank_addr
> rb
->blank_addr
)
3108 else if (ra
->blank_addr
< rb
->blank_addr
)
3111 return ra
->howto
->size
- rb
->howto
->size
;
3114 #ifdef KSPLICE_STANDALONE
3115 static int compare_system_map(const void *a
, const void *b
)
3117 const struct ksplice_system_map
*sa
= a
, *sb
= b
;
3118 return strcmp(sa
->label
, sb
->label
);
3120 #endif /* KSPLICE_STANDALONE */
3122 #ifdef CONFIG_DEBUG_FS
3123 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3124 /* Old kernels don't have debugfs_create_blob */
3125 static ssize_t
read_file_blob(struct file
*file
, char __user
*user_buf
,
3126 size_t count
, loff_t
*ppos
)
3128 struct debugfs_blob_wrapper
*blob
= file
->private_data
;
3129 return simple_read_from_buffer(user_buf
, count
, ppos
, blob
->data
,
3133 static int blob_open(struct inode
*inode
, struct file
*file
)
3135 if (inode
->i_private
)
3136 file
->private_data
= inode
->i_private
;
3140 static struct file_operations fops_blob
= {
3141 .read
= read_file_blob
,
3145 static struct dentry
*debugfs_create_blob(const char *name
, mode_t mode
,
3146 struct dentry
*parent
,
3147 struct debugfs_blob_wrapper
*blob
)
3149 return debugfs_create_file(name
, mode
, parent
, blob
, &fops_blob
);
3151 #endif /* LINUX_VERSION_CODE */
3153 static abort_t
init_debug_buf(struct update
*update
)
3155 update
->debug_blob
.size
= 0;
3156 update
->debug_blob
.data
= NULL
;
3157 update
->debugfs_dentry
=
3158 debugfs_create_blob(update
->name
, S_IFREG
| S_IRUSR
, NULL
,
3159 &update
->debug_blob
);
3160 if (update
->debugfs_dentry
== NULL
)
3161 return OUT_OF_MEMORY
;
3165 static void clear_debug_buf(struct update
*update
)
3167 if (update
->debugfs_dentry
== NULL
)
3169 debugfs_remove(update
->debugfs_dentry
);
3170 update
->debugfs_dentry
= NULL
;
3171 update
->debug_blob
.size
= 0;
3172 vfree(update
->debug_blob
.data
);
3173 update
->debug_blob
.data
= NULL
;
3176 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
3179 unsigned long size
, old_size
, new_size
;
3181 if (update
->debug
== 0)
3184 /* size includes the trailing '\0' */
3185 va_start(args
, fmt
);
3186 size
= 1 + vsnprintf(update
->debug_blob
.data
, 0, fmt
, args
);
3188 old_size
= update
->debug_blob
.size
== 0 ? 0 :
3189 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
));
3190 new_size
= update
->debug_blob
.size
+ size
== 0 ? 0 :
3191 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
+ size
));
3192 if (new_size
> old_size
) {
3193 char *buf
= vmalloc(new_size
);
3196 memcpy(buf
, update
->debug_blob
.data
, update
->debug_blob
.size
);
3197 vfree(update
->debug_blob
.data
);
3198 update
->debug_blob
.data
= buf
;
3200 va_start(args
, fmt
);
3201 update
->debug_blob
.size
+= vsnprintf(update
->debug_blob
.data
+
3202 update
->debug_blob
.size
,
3207 #else /* CONFIG_DEBUG_FS */
3208 static abort_t
init_debug_buf(struct update
*update
)
3213 static void clear_debug_buf(struct update
*update
)
3218 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
3222 if (update
->debug
== 0)
3225 if (!update
->debug_continue_line
)
3226 printk(KERN_DEBUG
"ksplice: ");
3228 va_start(args
, fmt
);
3229 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3231 #else /* LINUX_VERSION_CODE < */
3232 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3234 char *buf
= kvasprintf(GFP_KERNEL
, fmt
, args
);
3238 #endif /* LINUX_VERSION_CODE */
3241 update
->debug_continue_line
=
3242 fmt
[0] == '\0' || fmt
[strlen(fmt
) - 1] != '\n';
3245 #endif /* CONFIG_DEBUG_FS */
3247 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3248 #ifdef CONFIG_KALLSYMS
3249 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3250 struct module
*, unsigned long),
3253 char namebuf
[KSYM_NAME_LEN
];
3255 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3257 #endif /* LINUX_VERSION_CODE */
3260 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3261 * 2.6.10 was the first release after this commit
3263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3264 for (i
= 0, off
= 0; i
< kallsyms_num_syms
; i
++) {
3265 off
= kallsyms_expand_symbol(off
, namebuf
);
3266 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
3270 #else /* LINUX_VERSION_CODE < */
3273 for (i
= 0, knames
= kallsyms_names
; i
< kallsyms_num_syms
; i
++) {
3274 unsigned prefix
= *knames
++;
3276 strlcpy(namebuf
+ prefix
, knames
, KSYM_NAME_LEN
- prefix
);
3278 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
3282 knames
+= strlen(knames
) + 1;
3284 #endif /* LINUX_VERSION_CODE */
3285 return module_kallsyms_on_each_symbol(fn
, data
);
3288 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3289 * 2.6.10 was the first release after this commit
3291 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3292 extern u8 kallsyms_token_table
[];
3293 extern u16 kallsyms_token_index
[];
3295 static unsigned int kallsyms_expand_symbol(unsigned int off
, char *result
)
3297 long len
, skipped_first
= 0;
3298 const u8
*tptr
, *data
;
3300 data
= &kallsyms_names
[off
];
3307 tptr
= &kallsyms_token_table
[kallsyms_token_index
[*data
]];
3312 if (skipped_first
) {
3325 #endif /* LINUX_VERSION_CODE */
3327 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3336 list_for_each_entry(mod
, &modules
, list
) {
3337 for (i
= 0; i
< mod
->num_symtab
; i
++) {
3338 ret
= fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
3339 mod
, mod
->symtab
[i
].st_value
);
3346 #endif /* CONFIG_KALLSYMS */
3348 static struct module
*find_module(const char *name
)
3352 list_for_each_entry(mod
, &modules
, list
) {
3353 if (strcmp(mod
->name
, name
) == 0)
3359 #ifdef CONFIG_MODULE_UNLOAD
3361 struct list_head list
;
3362 struct module
*module_which_uses
;
3365 /* I'm not yet certain whether we need the strong form of this. */
3366 static inline int strong_try_module_get(struct module
*mod
)
3368 if (mod
&& mod
->state
!= MODULE_STATE_LIVE
)
3370 if (try_module_get(mod
))
3375 /* Does a already use b? */
3376 static int already_uses(struct module
*a
, struct module
*b
)
3378 struct module_use
*use
;
3379 list_for_each_entry(use
, &b
->modules_which_use_me
, list
) {
3380 if (use
->module_which_uses
== a
)
3386 /* Make it so module a uses b. Must be holding module_mutex */
3387 static int use_module(struct module
*a
, struct module
*b
)
3389 struct module_use
*use
;
3390 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3391 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3393 #endif /* LINUX_VERSION_CODE */
3394 if (b
== NULL
|| already_uses(a
, b
))
3397 if (strong_try_module_get(b
) < 0)
3400 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
3405 use
->module_which_uses
= a
;
3406 list_add(&use
->list
, &b
->modules_which_use_me
);
3407 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3408 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3409 no_warn
= sysfs_create_link(b
->holders_dir
, &a
->mkobj
.kobj
, a
->name
);
3410 #endif /* LINUX_VERSION_CODE */
3413 #else /* CONFIG_MODULE_UNLOAD */
3414 static int use_module(struct module
*a
, struct module
*b
)
3418 #endif /* CONFIG_MODULE_UNLOAD */
3420 #ifndef CONFIG_MODVERSIONS
3421 #define symversion(base, idx) NULL
3423 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3426 static bool each_symbol_in_section(const struct symsearch
*arr
,
3427 unsigned int arrsize
,
3428 struct module
*owner
,
3429 bool (*fn
)(const struct symsearch
*syms
,
3430 struct module
*owner
,
3431 unsigned int symnum
, void *data
),
3436 for (j
= 0; j
< arrsize
; j
++) {
3437 for (i
= 0; i
< arr
[j
].stop
- arr
[j
].start
; i
++)
3438 if (fn(&arr
[j
], owner
, i
, data
))
3445 /* Returns true as soon as fn returns true, otherwise false. */
3446 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
3447 struct module
*owner
,
3448 unsigned int symnum
, void *data
),
3452 const struct symsearch arr
[] = {
3453 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
3454 NOT_GPL_ONLY
, false },
3455 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
3456 __start___kcrctab_gpl
,
3458 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3459 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
3460 __start___kcrctab_gpl_future
,
3461 WILL_BE_GPL_ONLY
, false },
3462 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3463 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3464 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
3465 __start___kcrctab_unused
,
3466 NOT_GPL_ONLY
, true },
3467 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
3468 __start___kcrctab_unused_gpl
,
3470 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3473 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
3476 list_for_each_entry(mod
, &modules
, list
) {
3477 struct symsearch module_arr
[] = {
3478 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
3479 NOT_GPL_ONLY
, false },
3480 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
3483 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3484 { mod
->gpl_future_syms
,
3485 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
3486 mod
->gpl_future_crcs
,
3487 WILL_BE_GPL_ONLY
, false },
3488 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3489 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3491 mod
->unused_syms
+ mod
->num_unused_syms
,
3493 NOT_GPL_ONLY
, true },
3494 { mod
->unused_gpl_syms
,
3495 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
3496 mod
->unused_gpl_crcs
,
3498 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3501 if (each_symbol_in_section(module_arr
, ARRAY_SIZE(module_arr
),
3508 struct find_symbol_arg
{
3515 struct module
*owner
;
3516 const unsigned long *crc
;
3517 const struct kernel_symbol
*sym
;
3520 static bool find_symbol_in_section(const struct symsearch
*syms
,
3521 struct module
*owner
,
3522 unsigned int symnum
, void *data
)
3524 struct find_symbol_arg
*fsa
= data
;
3526 if (strcmp(syms
->start
[symnum
].name
, fsa
->name
) != 0)
3530 if (syms
->licence
== GPL_ONLY
)
3532 if (syms
->licence
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
3533 printk(KERN_WARNING
"Symbol %s is being used "
3534 "by a non-GPL module, which will not "
3535 "be allowed in the future\n", fsa
->name
);
3536 printk(KERN_WARNING
"Please see the file "
3537 "Documentation/feature-removal-schedule.txt "
3538 "in the kernel source tree for more details.\n");
3542 #ifdef CONFIG_UNUSED_SYMBOLS
3543 if (syms
->unused
&& fsa
->warn
) {
3544 printk(KERN_WARNING
"Symbol %s is marked as UNUSED, "
3545 "however this module is using it.\n", fsa
->name
);
3547 "This symbol will go away in the future.\n");
3549 "Please evalute if this is the right api to use and if "
3550 "it really is, submit a report the linux kernel "
3551 "mailinglist together with submitting your code for "
3557 fsa
->crc
= symversion(syms
->crcs
, symnum
);
3558 fsa
->sym
= &syms
->start
[symnum
];
3562 /* Find a symbol and return it, along with, (optional) crc and
3563 * (optional) module which owns it */
3564 static const struct kernel_symbol
*find_symbol(const char *name
,
3565 struct module
**owner
,
3566 const unsigned long **crc
,
3567 bool gplok
, bool warn
)
3569 struct find_symbol_arg fsa
;
3575 if (each_symbol(find_symbol_in_section
, &fsa
)) {
3586 static struct module
*__module_data_address(unsigned long addr
)
3590 list_for_each_entry(mod
, &modules
, list
) {
3591 if (addr
>= (unsigned long)mod
->module_core
+
3592 mod
->core_text_size
&&
3593 addr
< (unsigned long)mod
->module_core
+ mod
->core_size
)
3598 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3600 struct ksplice_attribute
{
3601 struct attribute attr
;
3602 ssize_t (*show
)(struct update
*update
, char *buf
);
3603 ssize_t (*store
)(struct update
*update
, const char *buf
, size_t len
);
3606 static ssize_t
ksplice_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
3609 struct ksplice_attribute
*attribute
=
3610 container_of(attr
, struct ksplice_attribute
, attr
);
3611 struct update
*update
= container_of(kobj
, struct update
, kobj
);
3612 if (attribute
->show
== NULL
)
3614 return attribute
->show(update
, buf
);
3617 static ssize_t
ksplice_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3618 const char *buf
, size_t len
)
3620 struct ksplice_attribute
*attribute
=
3621 container_of(attr
, struct ksplice_attribute
, attr
);
3622 struct update
*update
= container_of(kobj
, struct update
, kobj
);
3623 if (attribute
->store
== NULL
)
3625 return attribute
->store(update
, buf
, len
);
3628 static struct sysfs_ops ksplice_sysfs_ops
= {
3629 .show
= ksplice_attr_show
,
3630 .store
= ksplice_attr_store
,
3633 static void ksplice_release(struct kobject
*kobj
)
3635 struct update
*update
;
3636 update
= container_of(kobj
, struct update
, kobj
);
3637 cleanup_ksplice_update(update
);
3640 static ssize_t
stage_show(struct update
*update
, char *buf
)
3642 switch (update
->stage
) {
3643 case STAGE_PREPARING
:
3644 return snprintf(buf
, PAGE_SIZE
, "preparing\n");
3646 return snprintf(buf
, PAGE_SIZE
, "applied\n");
3647 case STAGE_REVERSED
:
3648 return snprintf(buf
, PAGE_SIZE
, "reversed\n");
3653 static ssize_t
abort_cause_show(struct update
*update
, char *buf
)
3655 switch (update
->abort_cause
) {
3657 return snprintf(buf
, PAGE_SIZE
, "ok\n");
3659 return snprintf(buf
, PAGE_SIZE
, "no_match\n");
3660 #ifdef KSPLICE_STANDALONE
3661 case BAD_SYSTEM_MAP
:
3662 return snprintf(buf
, PAGE_SIZE
, "bad_system_map\n");
3663 #endif /* KSPLICE_STANDALONE */
3665 return snprintf(buf
, PAGE_SIZE
, "code_busy\n");
3667 return snprintf(buf
, PAGE_SIZE
, "module_busy\n");
3669 return snprintf(buf
, PAGE_SIZE
, "out_of_memory\n");
3670 case FAILED_TO_FIND
:
3671 return snprintf(buf
, PAGE_SIZE
, "failed_to_find\n");
3672 case ALREADY_REVERSED
:
3673 return snprintf(buf
, PAGE_SIZE
, "already_reversed\n");
3674 case MISSING_EXPORT
:
3675 return snprintf(buf
, PAGE_SIZE
, "missing_export\n");
3676 case UNEXPECTED_RUNNING_TASK
:
3677 return snprintf(buf
, PAGE_SIZE
, "unexpected_running_task\n");
3678 case TARGET_NOT_LOADED
:
3679 return snprintf(buf
, PAGE_SIZE
, "target_not_loaded\n");
3681 return snprintf(buf
, PAGE_SIZE
, "unexpected\n");
3686 static ssize_t
conflict_show(struct update
*update
, char *buf
)
3688 const struct conflict
*conf
;
3689 const struct conflict_addr
*ca
;
3691 list_for_each_entry(conf
, &update
->conflicts
, list
) {
3692 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "%s %d",
3693 conf
->process_name
, conf
->pid
);
3694 list_for_each_entry(ca
, &conf
->stack
, list
) {
3695 if (!ca
->has_conflict
)
3697 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, " %s",
3700 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "\n");
3705 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr
)
3707 struct update
*update
= updateptr
;
3708 maybe_cleanup_ksplice_update(update
);
3712 static ssize_t
stage_store(struct update
*update
, const char *buf
, size_t len
)
3714 enum stage old_stage
= update
->stage
;
3715 if ((strncmp(buf
, "applied", len
) == 0 ||
3716 strncmp(buf
, "applied\n", len
) == 0) &&
3717 update
->stage
== STAGE_PREPARING
)
3718 update
->abort_cause
= apply_update(update
);
3719 else if ((strncmp(buf
, "reversed", len
) == 0 ||
3720 strncmp(buf
, "reversed\n", len
) == 0) &&
3721 update
->stage
== STAGE_APPLIED
)
3722 update
->abort_cause
= reverse_patches(update
);
3723 else if ((strncmp(buf
, "cleanup", len
) == 0 ||
3724 strncmp(buf
, "cleanup\n", len
) == 0) &&
3725 update
->stage
== STAGE_REVERSED
)
3726 kthread_run(maybe_cleanup_ksplice_update_wrapper
, update
,
3727 "ksplice_cleanup_%s", update
->kid
);
3729 if (old_stage
!= STAGE_REVERSED
&& update
->abort_cause
== OK
)
3730 printk(KERN_INFO
"ksplice: Update %s %s successfully\n",
3732 update
->stage
== STAGE_APPLIED
? "applied" : "reversed");
3736 static ssize_t
debug_show(struct update
*update
, char *buf
)
3738 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->debug
);
3741 static ssize_t
debug_store(struct update
*update
, const char *buf
, size_t len
)
3744 int ret
= strict_strtoul(buf
, 10, &l
);
3751 static ssize_t
partial_show(struct update
*update
, char *buf
)
3753 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->partial
);
3756 static ssize_t
partial_store(struct update
*update
, const char *buf
, size_t len
)
3759 int ret
= strict_strtoul(buf
, 10, &l
);
3762 update
->partial
= l
;
3766 static struct ksplice_attribute stage_attribute
=
3767 __ATTR(stage
, 0600, stage_show
, stage_store
);
3768 static struct ksplice_attribute abort_cause_attribute
=
3769 __ATTR(abort_cause
, 0400, abort_cause_show
, NULL
);
3770 static struct ksplice_attribute debug_attribute
=
3771 __ATTR(debug
, 0600, debug_show
, debug_store
);
3772 static struct ksplice_attribute partial_attribute
=
3773 __ATTR(partial
, 0600, partial_show
, partial_store
);
3774 static struct ksplice_attribute conflict_attribute
=
3775 __ATTR(conflicts
, 0400, conflict_show
, NULL
);
3777 static struct attribute
*ksplice_attrs
[] = {
3778 &stage_attribute
.attr
,
3779 &abort_cause_attribute
.attr
,
3780 &debug_attribute
.attr
,
3781 &partial_attribute
.attr
,
3782 &conflict_attribute
.attr
,
3786 static struct kobj_type ksplice_ktype
= {
3787 .sysfs_ops
= &ksplice_sysfs_ops
,
3788 .release
= ksplice_release
,
3789 .default_attrs
= ksplice_attrs
,
3792 #ifdef KSPLICE_STANDALONE
3794 module_param(debug
, int, 0600);
3795 MODULE_PARM_DESC(debug
, "Debug level");
3797 extern struct ksplice_system_map ksplice_system_map
[], ksplice_system_map_end
[];
3799 static struct ksplice_pack bootstrap_pack
= {
3800 .name
= "ksplice_" __stringify(KSPLICE_KID
),
3801 .kid
= "init_" __stringify(KSPLICE_KID
),
3802 .target_name
= NULL
,
3804 .map_printk
= MAP_PRINTK
,
3805 .primary
= THIS_MODULE
,
3806 .primary_system_map
= ksplice_system_map
,
3807 .primary_system_map_end
= ksplice_system_map_end
,
3809 #endif /* KSPLICE_STANDALONE */
3811 static int init_ksplice(void)
3813 #ifdef KSPLICE_STANDALONE
3814 struct ksplice_pack
*pack
= &bootstrap_pack
;
3815 pack
->update
= init_ksplice_update(pack
->kid
);
3816 #ifdef KSPLICE_STANDALONE
3817 sort(pack
->primary_system_map
,
3818 (pack
->primary_system_map_end
- pack
->primary_system_map
),
3819 sizeof(struct ksplice_system_map
), compare_system_map
, NULL
);
3820 #endif /* KSPLICE_STANDALONE */
3821 if (pack
->update
== NULL
)
3823 add_to_update(pack
, pack
->update
);
3824 pack
->update
->debug
= debug
;
3825 pack
->update
->abort_cause
=
3826 apply_relocs(pack
, ksplice_init_relocs
, ksplice_init_relocs_end
);
3827 if (pack
->update
->abort_cause
== OK
)
3828 bootstrapped
= true;
3829 cleanup_ksplice_update(bootstrap_pack
.update
);
3830 #else /* !KSPLICE_STANDALONE */
3831 ksplice_kobj
= kobject_create_and_add("ksplice", kernel_kobj
);
3832 if (ksplice_kobj
== NULL
)
3834 #endif /* KSPLICE_STANDALONE */
3838 static void cleanup_ksplice(void)
3840 #ifndef KSPLICE_STANDALONE
3841 kobject_put(ksplice_kobj
);
3842 #endif /* KSPLICE_STANDALONE */
3845 module_init(init_ksplice
);
3846 module_exit(cleanup_ksplice
);
3848 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");
3849 MODULE_DESCRIPTION("Ksplice rebootless update system");
3850 #ifdef KSPLICE_VERSION
3851 MODULE_VERSION(KSPLICE_VERSION
);
3853 MODULE_LICENSE("GPL v2");