1 /* Copyright (C) 2007-2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
2 * Copyright (C) 2008 Anders Kaseorg <andersk@mit.edu>,
3 * Tim Abbott <tabbott@mit.edu>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
22 #include <linux/debugfs.h>
23 #else /* CONFIG_DEBUG_FS */
24 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
25 #endif /* CONFIG_DEBUG_FS */
26 #include <linux/errno.h>
27 #include <linux/kallsyms.h>
28 #include <linux/kobject.h>
29 #include <linux/kthread.h>
30 #include <linux/pagemap.h>
31 #include <linux/sched.h>
32 #include <linux/stop_machine.h>
33 #include <linux/sysfs.h>
34 #include <linux/time.h>
35 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
36 #include <linux/uaccess.h>
37 #else /* LINUX_VERSION_CODE < */
38 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
39 #include <asm/uaccess.h>
40 #endif /* LINUX_VERSION_CODE */
41 #include <linux/vmalloc.h>
42 #ifdef KSPLICE_STANDALONE
44 #else /* !KSPLICE_STANDALONE */
45 #include <linux/ksplice.h>
46 #endif /* KSPLICE_STANDALONE */
47 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
48 #include <asm/alternative.h>
49 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
51 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
52 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
56 #endif /* LINUX_VERSION_CODE */
58 #if BITS_PER_LONG == 32
60 #elif BITS_PER_LONG == 64
62 #endif /* BITS_PER_LONG */
65 STAGE_PREPARING
, STAGE_APPLIED
, STAGE_REVERSED
69 RUN_PRE_INITIAL
, RUN_PRE_DEBUG
, RUN_PRE_FINAL
72 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
73 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
75 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
76 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
77 #define __bitwise__ __bitwise
80 typedef int __bitwise__ abort_t
;
82 #define OK ((__force abort_t) 0)
83 #define NO_MATCH ((__force abort_t) 1)
84 #define BAD_SYSTEM_MAP ((__force abort_t) 2)
85 #define CODE_BUSY ((__force abort_t) 3)
86 #define MODULE_BUSY ((__force abort_t) 4)
87 #define OUT_OF_MEMORY ((__force abort_t) 5)
88 #define FAILED_TO_FIND ((__force abort_t) 6)
89 #define ALREADY_REVERSED ((__force abort_t) 7)
90 #define MISSING_EXPORT ((__force abort_t) 8)
91 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 9)
92 #define UNEXPECTED ((__force abort_t) 10)
94 struct update_bundle
{
101 #ifdef CONFIG_DEBUG_FS
102 struct debugfs_blob_wrapper debug_blob
;
103 struct dentry
*debugfs_dentry
;
104 #else /* !CONFIG_DEBUG_FS */
105 bool debug_continue_line
;
106 #endif /* CONFIG_DEBUG_FS */
107 struct list_head packs
;
108 struct list_head conflicts
;
109 struct list_head list
;
113 const char *process_name
;
115 struct list_head stack
;
116 struct list_head list
;
119 struct conflict_frame
{
123 struct list_head list
;
126 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
127 /* Old kernels don't have debugfs_create_blob */
128 struct debugfs_blob_wrapper
{
132 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
134 struct reloc_nameval
{
135 struct list_head list
;
138 enum { NOVAL
, TEMP
, VAL
} status
;
141 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
142 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
144 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
145 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
146 static int virtual_address_mapped(unsigned long addr
)
149 return probe_kernel_address(addr
, retval
) != -EFAULT
;
151 #else /* LINUX_VERSION_CODE < */
152 static int virtual_address_mapped(unsigned long addr
);
153 #endif /* LINUX_VERSION_CODE */
155 static long probe_kernel_read(void *dst
, void *src
, size_t size
)
157 if (!virtual_address_mapped((unsigned long)src
) ||
158 !virtual_address_mapped((unsigned long)src
+ size
))
161 memcpy(dst
, src
, size
);
164 #endif /* LINUX_VERSION_CODE */
166 static struct reloc_nameval
*find_nameval(struct module_pack
*pack
,
168 static abort_t
create_nameval(struct module_pack
*pack
, const char *label
,
169 unsigned long val
, int status
);
170 static abort_t
lookup_reloc(struct module_pack
*pack
, unsigned long addr
,
171 const struct ksplice_reloc
**relocp
);
172 static abort_t
handle_reloc(struct module_pack
*pack
,
173 const struct ksplice_reloc
*r
,
174 unsigned long run_addr
, enum run_pre_mode mode
);
176 struct safety_record
{
177 struct list_head list
;
181 bool first_byte_safe
;
184 struct candidate_val
{
185 struct list_head list
;
189 static bool singular(struct list_head
*list
)
191 return !list_empty(list
) && list
->next
->next
== list
;
194 static int __attribute__((format(printf
, 2, 3)))
195 _ksdebug(struct update_bundle
*bundle
, const char *fmt
, ...);
196 #ifdef CONFIG_DEBUG_FS
197 static abort_t
init_debug_buf(struct update_bundle
*bundle
);
198 static void clear_debug_buf(struct update_bundle
*bundle
);
199 #else /* !CONFIG_DEBUG_FS */
200 static inline abort_t
init_debug_buf(struct update_bundle
*bundle
)
205 static inline void clear_debug_buf(struct update_bundle
*bundle
)
209 #endif /* CONFIG_DEBUG_FS */
211 #define ksdebug(pack, fmt, ...) \
212 _ksdebug(pack->bundle, fmt, ## __VA_ARGS__)
213 #define failed_to_find(pack, sym_name) \
214 ksdebug(pack, "Failed to find symbol %s at " \
215 "%s:%d\n", sym_name, __FILE__, __LINE__)
217 static inline void print_abort(struct module_pack
*pack
, const char *str
)
219 ksdebug(pack
, "Aborted. (%s)\n", str
);
222 static LIST_HEAD(update_bundles
);
223 #ifdef KSPLICE_STANDALONE
224 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
225 extern struct list_head ksplice_module_list
;
226 #else /* !CONFIG_KSPLICE */
227 LIST_HEAD(ksplice_module_list
);
228 #endif /* CONFIG_KSPLICE */
229 #else /* !KSPLICE_STANDALONE */
230 LIST_HEAD(ksplice_module_list
);
231 EXPORT_SYMBOL_GPL(ksplice_module_list
);
232 #endif /* KSPLICE_STANDALONE */
234 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
235 /* Old kernels do not have kcalloc
236 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
238 static inline void *kcalloc(size_t n
, size_t size
, typeof(GFP_KERNEL
) flags
)
241 if (n
!= 0 && size
> ULONG_MAX
/ n
)
243 mem
= kmalloc(n
* size
, flags
);
245 memset(mem
, 0, n
* size
);
248 #endif /* LINUX_VERSION_CODE */
250 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
251 /* Old kernels do not have kstrdup
252 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
254 static char *kstrdup(const char *s
, typeof(GFP_KERNEL
) gfp
)
263 buf
= kmalloc(len
, gfp
);
268 #endif /* LINUX_VERSION_CODE */
270 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
271 /* Old kernels use semaphore instead of mutex
272 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
274 #define mutex semaphore
275 #define mutex_lock down
276 #define mutex_unlock up
277 #endif /* LINUX_VERSION_CODE */
279 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
280 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
281 static char * __attribute_used__
282 kvasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, va_list ap
)
289 len
= vsnprintf(dummy
, 0, fmt
, aq
);
292 p
= kmalloc(len
+ 1, gfp
);
296 vsnprintf(p
, len
+ 1, fmt
, ap
);
302 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
303 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
304 static char * __attribute__((format (printf
, 2, 3)))
305 kasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, ...)
311 p
= kvasprintf(gfp
, fmt
, ap
);
318 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
319 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
320 static int strict_strtoul(const char *cp
, unsigned int base
, unsigned long *res
)
331 val
= simple_strtoul(cp
, &tail
, base
);
332 if ((*tail
== '\0') ||
333 ((len
== (size_t)(tail
- cp
) + 1) && (*tail
== '\n'))) {
342 #ifndef task_thread_info
343 #define task_thread_info(task) (task)->thread_info
344 #endif /* !task_thread_info */
346 #ifdef KSPLICE_STANDALONE
348 static int bootstrapped
= 0;
350 #ifdef CONFIG_KALLSYMS
351 extern unsigned long kallsyms_addresses
[], kallsyms_num_syms
;
352 extern u8 kallsyms_names
[];
353 #endif /* CONFIG_KALLSYMS */
355 /* defined by ksplice-create */
356 extern const struct ksplice_reloc ksplice_init_relocs
[],
357 ksplice_init_relocs_end
[];
359 /* Obtained via System.map */
360 extern struct list_head modules
;
361 extern struct mutex module_mutex
;
362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
363 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
364 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
365 #endif /* LINUX_VERSION_CODE */
366 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
367 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
368 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
369 #endif /* LINUX_VERSION_CODE */
370 extern const struct kernel_symbol __start___ksymtab
[];
371 extern const struct kernel_symbol __stop___ksymtab
[];
372 extern const unsigned long __start___kcrctab
[];
373 extern const struct kernel_symbol __start___ksymtab_gpl
[];
374 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
375 extern const unsigned long __start___kcrctab_gpl
[];
376 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
377 extern const struct kernel_symbol __start___ksymtab_unused
[];
378 extern const struct kernel_symbol __stop___ksymtab_unused
[];
379 extern const unsigned long __start___kcrctab_unused
[];
380 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
381 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
382 extern const unsigned long __start___kcrctab_unused_gpl
[];
383 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
384 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
385 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
386 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
387 extern const unsigned long __start___kcrctab_gpl_future
[];
388 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
390 #endif /* KSPLICE_STANDALONE */
392 static abort_t
apply_relocs(struct module_pack
*pack
,
393 const struct ksplice_reloc
*relocs
,
394 const struct ksplice_reloc
*relocs_end
);
395 static abort_t
apply_reloc(struct module_pack
*pack
,
396 const struct ksplice_reloc
*r
);
397 static abort_t
read_reloc_value(struct module_pack
*pack
,
398 const struct ksplice_reloc
*r
,
399 unsigned long addr
, unsigned long *valp
);
400 static abort_t
write_reloc_value(struct module_pack
*pack
,
401 const struct ksplice_reloc
*r
,
402 unsigned long sym_addr
);
403 static abort_t
add_system_map_candidates(struct module_pack
*pack
,
404 const struct ksplice_symbol
*symbol
,
405 struct list_head
*vals
);
406 static abort_t
compute_address(struct module_pack
*pack
,
407 const struct ksplice_symbol
*ksym
,
408 struct list_head
*vals
);
410 struct accumulate_struct
{
411 const char *desired_name
;
412 struct list_head
*vals
;
415 #ifdef CONFIG_KALLSYMS
416 static int accumulate_matching_names(void *data
, const char *sym_name
,
417 unsigned long sym_val
);
418 static abort_t
kernel_lookup(const char *name
, struct list_head
*vals
);
419 static abort_t
other_module_lookup(struct module_pack
*pack
, const char *name
,
420 struct list_head
*vals
);
421 #ifdef KSPLICE_STANDALONE
422 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
423 static unsigned long ksplice_kallsyms_expand_symbol(unsigned long off
,
425 #endif /* LINUX_VERSION_CODE */
426 #endif /* KSPLICE_STANDALONE */
427 #endif /* CONFIG_KALLSYMS */
428 static abort_t
exported_symbol_lookup(const char *name
, struct list_head
*vals
);
429 static abort_t
new_export_lookup(struct update_bundle
*bundle
,
430 const char *name
, struct list_head
*vals
);
432 #ifdef KSPLICE_STANDALONE
433 static abort_t
brute_search_all(struct module_pack
*pack
,
434 const struct ksplice_size
*s
,
435 struct list_head
*vals
);
436 #endif /* KSPLICE_STANDALONE */
438 static abort_t
add_candidate_val(struct list_head
*vals
, unsigned long val
);
439 static void release_vals(struct list_head
*vals
);
440 static void set_temp_myst_relocs(struct module_pack
*pack
, int status_val
);
441 static int contains_canary(struct module_pack
*pack
, unsigned long blank_addr
,
442 int size
, long dst_mask
);
443 static int starts_with(const char *str
, const char *prefix
);
444 static int ends_with(const char *str
, const char *suffix
);
446 #define clear_list(head, type, member) \
448 struct list_head *_pos, *_n; \
449 list_for_each_safe(_pos, _n, head) { \
451 kfree(list_entry(_pos, type, member)); \
456 static abort_t
activate_primary(struct module_pack
*pack
);
457 static abort_t
process_exports(struct module_pack
*pack
);
458 static abort_t
process_patches(struct module_pack
*pack
);
459 static int __apply_patches(void *bundle
);
460 static int __reverse_patches(void *bundle
);
461 static abort_t
check_each_task(struct update_bundle
*bundle
);
462 static abort_t
check_task(struct update_bundle
*bundle
,
463 const struct task_struct
*t
, int rerun
);
464 static abort_t
check_stack(struct update_bundle
*bundle
, struct conflict
*conf
,
465 const struct thread_info
*tinfo
,
466 const unsigned long *stack
);
467 static abort_t
check_address(struct update_bundle
*bundle
,
468 struct conflict
*conf
, unsigned long addr
);
469 static abort_t
check_record(struct conflict_frame
*frame
,
470 const struct safety_record
*rec
,
472 static int valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
);
473 static int is_stop_machine(const struct task_struct
*t
);
474 static void cleanup_conflicts(struct update_bundle
*bundle
);
475 static void print_conflicts(struct update_bundle
*bundle
);
476 static void insert_trampoline(struct ksplice_patch
*p
);
477 static void remove_trampoline(const struct ksplice_patch
*p
);
478 /* Architecture-specific functions defined in ARCH/ksplice-arch.c */
479 static abort_t
create_trampoline(struct ksplice_patch
*p
);
480 static unsigned long follow_trampolines(struct module_pack
*pack
,
482 static abort_t
handle_paravirt(struct module_pack
*pack
, unsigned long pre
,
483 unsigned long run
, int *matched
);
485 static abort_t
add_dependency_on_address(struct module_pack
*pack
,
487 static abort_t
add_patch_dependencies(struct module_pack
*pack
);
489 #if defined(KSPLICE_STANDALONE) && \
490 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
491 #define KSPLICE_NO_KERNEL_SUPPORT 1
492 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
494 #ifdef KSPLICE_NO_KERNEL_SUPPORT
495 /* Functions defined here that will be exported in later kernels */
496 #ifdef CONFIG_KALLSYMS
497 static int module_kallsyms_on_each_symbol(const struct module
*mod
,
498 int (*fn
)(void *, const char *,
501 #endif /* CONFIG_KALLSYMS */
502 static struct module
*find_module(const char *name
);
503 static int use_module(struct module
*a
, struct module
*b
);
504 static const struct kernel_symbol
*find_symbol(const char *name
,
505 struct module
**owner
,
506 const unsigned long **crc
,
507 bool gplok
, bool warn
);
508 static struct module
*__module_data_address(unsigned long addr
);
509 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
512 static abort_t
activate_helper(struct module_pack
*pack
,
513 bool consider_data_sections
);
514 static abort_t
search_for_match(struct module_pack
*pack
,
515 const struct ksplice_size
*s
);
516 static abort_t
try_addr(struct module_pack
*pack
, const struct ksplice_size
*s
,
517 unsigned long run_addr
,
518 struct list_head
*safety_records
,
519 enum run_pre_mode mode
);
520 static abort_t
run_pre_cmp(struct module_pack
*pack
,
521 const struct ksplice_size
*s
,
522 unsigned long run_addr
,
523 struct list_head
*safety_records
,
524 enum run_pre_mode mode
);
525 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
526 /* defined in $ARCH/ksplice-arch.c */
527 static abort_t
arch_run_pre_cmp(struct module_pack
*pack
,
528 const struct ksplice_size
*s
,
529 unsigned long run_addr
,
530 struct list_head
*safety_records
,
531 enum run_pre_mode mode
);
532 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
533 static void print_bytes(struct module_pack
*pack
,
534 const unsigned char *run
, int runc
,
535 const unsigned char *pre
, int prec
);
536 static abort_t
create_safety_record(struct module_pack
*pack
,
537 const struct ksplice_size
*s
,
538 struct list_head
*record_list
,
539 unsigned long run_addr
,
540 unsigned long run_size
);
542 static abort_t
reverse_patches(struct update_bundle
*bundle
);
543 static abort_t
apply_patches(struct update_bundle
*bundle
);
544 static abort_t
apply_update(struct update_bundle
*bundle
);
545 static int register_ksplice_module(struct module_pack
*pack
);
546 static struct update_bundle
*init_ksplice_bundle(const char *kid
);
547 static void cleanup_ksplice_bundle(struct update_bundle
*bundle
);
548 static void add_to_bundle(struct module_pack
*pack
,
549 struct update_bundle
*bundle
);
550 static int ksplice_sysfs_init(struct update_bundle
*bundle
);
552 #ifndef KSPLICE_STANDALONE
553 #include "ksplice-arch.c"
554 #elif defined CONFIG_X86
555 #include "x86/ksplice-arch.c"
556 #elif defined CONFIG_ARM
557 #include "arm/ksplice-arch.c"
558 #endif /* KSPLICE_STANDALONE */
560 #ifndef KSPLICE_STANDALONE
561 static struct kobject
*ksplice_kobj
;
562 #endif /* !KSPLICE_STANDALONE */
564 struct ksplice_attribute
{
565 struct attribute attr
;
566 ssize_t (*show
)(struct update_bundle
*bundle
, char *buf
);
567 ssize_t (*store
)(struct update_bundle
*bundle
, const char *buf
,
571 static ssize_t
ksplice_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
574 struct ksplice_attribute
*attribute
=
575 container_of(attr
, struct ksplice_attribute
, attr
);
576 struct update_bundle
*bundle
=
577 container_of(kobj
, struct update_bundle
, kobj
);
578 if (attribute
->show
== NULL
)
580 return attribute
->show(bundle
, buf
);
583 static ssize_t
ksplice_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
584 const char *buf
, size_t len
)
586 struct ksplice_attribute
*attribute
=
587 container_of(attr
, struct ksplice_attribute
, attr
);
588 struct update_bundle
*bundle
=
589 container_of(kobj
, struct update_bundle
, kobj
);
590 if (attribute
->store
== NULL
)
592 return attribute
->store(bundle
, buf
, len
);
595 static struct sysfs_ops ksplice_sysfs_ops
= {
596 .show
= ksplice_attr_show
,
597 .store
= ksplice_attr_store
,
600 static void ksplice_release(struct kobject
*kobj
)
602 struct update_bundle
*bundle
;
603 bundle
= container_of(kobj
, struct update_bundle
, kobj
);
604 cleanup_ksplice_bundle(bundle
);
607 static ssize_t
stage_show(struct update_bundle
*bundle
, char *buf
)
609 switch (bundle
->stage
) {
610 case STAGE_PREPARING
:
611 return snprintf(buf
, PAGE_SIZE
, "preparing\n");
613 return snprintf(buf
, PAGE_SIZE
, "applied\n");
615 return snprintf(buf
, PAGE_SIZE
, "reversed\n");
620 static ssize_t
abort_cause_show(struct update_bundle
*bundle
, char *buf
)
622 switch (bundle
->abort_cause
) {
624 return snprintf(buf
, PAGE_SIZE
, "ok\n");
626 return snprintf(buf
, PAGE_SIZE
, "no_match\n");
628 return snprintf(buf
, PAGE_SIZE
, "bad_system_map\n");
630 return snprintf(buf
, PAGE_SIZE
, "code_busy\n");
632 return snprintf(buf
, PAGE_SIZE
, "module_busy\n");
634 return snprintf(buf
, PAGE_SIZE
, "out_of_memory\n");
636 return snprintf(buf
, PAGE_SIZE
, "failed_to_find\n");
637 case ALREADY_REVERSED
:
638 return snprintf(buf
, PAGE_SIZE
, "already_reversed\n");
640 return snprintf(buf
, PAGE_SIZE
, "missing_export\n");
641 case UNEXPECTED_RUNNING_TASK
:
642 return snprintf(buf
, PAGE_SIZE
, "unexpected_running_task\n");
644 return snprintf(buf
, PAGE_SIZE
, "unexpected\n");
649 static ssize_t
conflict_show(struct update_bundle
*bundle
, char *buf
)
651 const struct conflict
*conf
;
652 const struct conflict_frame
*frame
;
654 list_for_each_entry(conf
, &bundle
->conflicts
, list
) {
655 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "%s %d",
656 conf
->process_name
, conf
->pid
);
657 list_for_each_entry(frame
, &conf
->stack
, list
) {
658 if (!frame
->has_conflict
)
660 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, " %s",
663 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "\n");
668 static ssize_t
stage_store(struct update_bundle
*bundle
,
669 const char *buf
, size_t len
)
671 if (strncmp(buf
, "applied\n", len
) == 0 &&
672 bundle
->stage
== STAGE_PREPARING
)
673 bundle
->abort_cause
= apply_update(bundle
);
674 else if (strncmp(buf
, "reversed\n", len
) == 0 &&
675 bundle
->stage
== STAGE_APPLIED
)
676 bundle
->abort_cause
= reverse_patches(bundle
);
680 static ssize_t
debug_show(struct update_bundle
*bundle
, char *buf
)
682 return snprintf(buf
, PAGE_SIZE
, "%d\n", bundle
->debug
);
685 static ssize_t
debug_store(struct update_bundle
*bundle
, const char *buf
,
689 int ret
= strict_strtoul(buf
, 10, &l
);
696 static struct ksplice_attribute stage_attribute
=
697 __ATTR(stage
, 0600, stage_show
, stage_store
);
698 static struct ksplice_attribute abort_cause_attribute
=
699 __ATTR(abort_cause
, 0400, abort_cause_show
, NULL
);
700 static struct ksplice_attribute debug_attribute
=
701 __ATTR(debug
, 0600, debug_show
, debug_store
);
702 static struct ksplice_attribute conflict_attribute
=
703 __ATTR(conflicts
, 0400, conflict_show
, NULL
);
705 static struct attribute
*ksplice_attrs
[] = {
706 &stage_attribute
.attr
,
707 &abort_cause_attribute
.attr
,
708 &debug_attribute
.attr
,
709 &conflict_attribute
.attr
,
713 static struct kobj_type ksplice_ktype
= {
714 .sysfs_ops
= &ksplice_sysfs_ops
,
715 .release
= ksplice_release
,
716 .default_attrs
= ksplice_attrs
,
719 static abort_t
activate_primary(struct module_pack
*pack
)
722 ret
= apply_relocs(pack
, pack
->primary_relocs
,
723 pack
->primary_relocs_end
);
727 ret
= process_patches(pack
);
731 ret
= process_exports(pack
);
735 ret
= add_patch_dependencies(pack
);
742 static void __attribute__((noreturn
)) ksplice_deleted(void)
744 printk(KERN_CRIT
"Attempted call of kernel function deleted by Ksplice "
747 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
748 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
753 static abort_t
process_patches(struct module_pack
*pack
)
755 struct ksplice_patch
*p
;
756 struct safety_record
*rec
;
759 /* Check every patch has a safety_record */
760 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
761 struct reloc_nameval
*nv
= find_nameval(pack
, p
->label
);
764 failed_to_find(pack
, p
->label
);
765 return FAILED_TO_FIND
;
767 p
->oldaddr
= nv
->val
;
769 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
770 if (strcmp(rec
->label
, p
->label
) == 0 &&
771 follow_trampolines(pack
, p
->oldaddr
) == rec
->addr
) {
777 ksdebug(pack
, "No safety record for patch %s\n",
781 if (rec
->size
< p
->size
) {
782 ksdebug(pack
, "Symbol %s is too short for trampoline\n",
787 if (p
->repladdr
== 0)
788 p
->repladdr
= (unsigned long)ksplice_deleted
;
790 rec
->first_byte_safe
= true;
792 ret
= create_trampoline(p
);
799 static abort_t
process_exports(struct module_pack
*pack
)
801 struct ksplice_export
*export
;
803 const struct kernel_symbol
*sym
;
805 for (export
= pack
->exports
; export
< pack
->exports_end
; export
++) {
806 sym
= find_symbol(export
->name
, &m
, NULL
, true, false);
808 ksdebug(pack
, "Could not find kernel_symbol struct for "
809 "%s\n", export
->name
);
810 return MISSING_EXPORT
;
813 /* Cast away const since we are planning to mutate the
814 * kernel_symbol structure. */
815 export
->sym
= (struct kernel_symbol
*)sym
;
816 export
->saved_name
= export
->sym
->name
;
817 if (m
!= pack
->primary
&& use_module(pack
->primary
, m
) != 1)
823 static void insert_trampoline(struct ksplice_patch
*p
)
825 mm_segment_t old_fs
= get_fs();
827 memcpy((void *)p
->saved
, (void *)p
->oldaddr
, p
->size
);
828 memcpy((void *)p
->oldaddr
, (void *)p
->trampoline
, p
->size
);
829 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
833 static void remove_trampoline(const struct ksplice_patch
*p
)
835 mm_segment_t old_fs
= get_fs();
837 memcpy((void *)p
->oldaddr
, (void *)p
->saved
, p
->size
);
838 flush_icache_range(p
->oldaddr
, p
->oldaddr
+ p
->size
);
842 static abort_t
apply_patches(struct update_bundle
*bundle
)
847 for (i
= 0; i
< 5; i
++) {
848 cleanup_conflicts(bundle
);
849 #ifdef KSPLICE_STANDALONE
851 #endif /* KSPLICE_STANDALONE */
852 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
853 ret
= (__force abort_t
)stop_machine(__apply_patches
, bundle
,
855 #else /* LINUX_VERSION_CODE < */
856 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
857 ret
= (__force abort_t
)stop_machine_run(__apply_patches
, bundle
,
859 #endif /* LINUX_VERSION_CODE */
860 #ifdef KSPLICE_STANDALONE
862 #endif /* KSPLICE_STANDALONE */
863 if (ret
!= CODE_BUSY
)
865 set_current_state(TASK_INTERRUPTIBLE
);
866 schedule_timeout(msecs_to_jiffies(1000));
870 struct module_pack
*pack
;
871 const struct ksplice_size
*s
;
872 struct safety_record
*rec
;
873 list_for_each_entry(pack
, &bundle
->packs
, list
) {
874 for (s
= pack
->primary_sizes
;
875 s
< pack
->primary_sizes_end
; s
++) {
876 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
878 return OUT_OF_MEMORY
;
879 rec
->addr
= s
->thismod_addr
;
881 rec
->label
= s
->symbol
->label
;
882 list_add(&rec
->list
, &pack
->safety_records
);
885 _ksdebug(bundle
, "Update %s applied successfully\n",
888 } else if (ret
== CODE_BUSY
) {
889 print_conflicts(bundle
);
890 _ksdebug(bundle
, "Aborted %s. stack check: to-be-replaced "
891 "code is busy\n", bundle
->kid
);
892 } else if (ret
== ALREADY_REVERSED
) {
893 _ksdebug(bundle
, "Aborted %s. Ksplice update %s is already "
894 "reversed.\n", bundle
->kid
, bundle
->kid
);
899 static abort_t
reverse_patches(struct update_bundle
*bundle
)
903 struct module_pack
*pack
;
905 clear_debug_buf(bundle
);
906 ret
= init_debug_buf(bundle
);
910 _ksdebug(bundle
, "Preparing to reverse %s\n", bundle
->kid
);
912 for (i
= 0; i
< 5; i
++) {
913 cleanup_conflicts(bundle
);
914 clear_list(&bundle
->conflicts
, struct conflict
, list
);
915 #ifdef KSPLICE_STANDALONE
917 #endif /* KSPLICE_STANDALONE */
918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
919 ret
= (__force abort_t
)stop_machine(__reverse_patches
, bundle
,
921 #else /* LINUX_VERSION_CODE < */
922 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
923 ret
= (__force abort_t
)stop_machine_run(__reverse_patches
,
925 #endif /* LINUX_VERSION_CODE */
926 #ifdef KSPLICE_STANDALONE
928 #endif /* KSPLICE_STANDALONE */
929 if (ret
!= CODE_BUSY
)
931 set_current_state(TASK_INTERRUPTIBLE
);
932 schedule_timeout(msecs_to_jiffies(1000));
934 list_for_each_entry(pack
, &bundle
->packs
, list
)
935 clear_list(&pack
->safety_records
, struct safety_record
, list
);
937 _ksdebug(bundle
, "Update %s reversed successfully\n",
939 } else if (ret
== CODE_BUSY
) {
940 print_conflicts(bundle
);
941 _ksdebug(bundle
, "Aborted %s. stack check: to-be-reversed "
942 "code is busy\n", bundle
->kid
);
943 } else if (ret
== MODULE_BUSY
) {
944 _ksdebug(bundle
, "Update %s is in use by another module\n",
950 static int __apply_patches(void *bundleptr
)
952 struct update_bundle
*bundle
= bundleptr
;
953 struct module_pack
*pack
;
954 struct ksplice_patch
*p
;
955 struct ksplice_export
*export
;
958 if (bundle
->stage
== STAGE_APPLIED
)
959 return (__force
int)OK
;
961 if (bundle
->stage
!= STAGE_PREPARING
)
962 return (__force
int)UNEXPECTED
;
964 ret
= check_each_task(bundle
);
966 return (__force
int)ret
;
968 list_for_each_entry(pack
, &bundle
->packs
, list
) {
969 if (try_module_get(pack
->primary
) != 1) {
970 struct module_pack
*pack1
;
971 list_for_each_entry(pack1
, &bundle
->packs
, list
) {
974 module_put(pack1
->primary
);
976 return (__force
int)UNEXPECTED
;
980 bundle
->stage
= STAGE_APPLIED
;
982 list_for_each_entry(pack
, &bundle
->packs
, list
) {
983 for (export
= pack
->exports
; export
< pack
->exports_end
;
985 export
->sym
->name
= export
->new_name
;
988 list_for_each_entry(pack
, &bundle
->packs
, list
) {
989 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
990 insert_trampoline(p
);
992 return (__force
int)OK
;
995 static int __reverse_patches(void *bundleptr
)
997 struct update_bundle
*bundle
= bundleptr
;
998 struct module_pack
*pack
;
999 const struct ksplice_patch
*p
;
1000 struct ksplice_export
*export
;
1003 if (bundle
->stage
!= STAGE_APPLIED
)
1004 return (__force
int)OK
;
1006 #ifdef CONFIG_MODULE_UNLOAD
1007 /* primary's refcount isn't changed by accessing ksplice.ko's sysfs */
1008 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1009 if (module_refcount(pack
->primary
) != 1)
1010 return (__force
int)MODULE_BUSY
;
1012 #endif /* CONFIG_MODULE_UNLOAD */
1014 ret
= check_each_task(bundle
);
1016 return (__force
int)ret
;
1018 bundle
->stage
= STAGE_REVERSED
;
1020 list_for_each_entry(pack
, &bundle
->packs
, list
)
1021 module_put(pack
->primary
);
1023 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1024 for (export
= pack
->exports
; export
< pack
->exports_end
;
1026 export
->sym
->name
= export
->saved_name
;
1029 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1030 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++)
1031 remove_trampoline(p
);
1033 return (__force
int)OK
;
1036 static abort_t
check_each_task(struct update_bundle
*bundle
)
1038 const struct task_struct
*g
, *p
;
1039 abort_t status
= OK
, ret
;
1040 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
1041 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
1042 read_lock(&tasklist_lock
);
1043 #endif /* LINUX_VERSION_CODE */
1044 do_each_thread(g
, p
) {
1045 /* do_each_thread is a double loop! */
1046 ret
= check_task(bundle
, p
, 0);
1048 check_task(bundle
, p
, 1);
1051 if (ret
!= OK
&& ret
!= CODE_BUSY
)
1053 } while_each_thread(g
, p
);
1055 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
1056 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
1057 read_unlock(&tasklist_lock
);
1058 #endif /* LINUX_VERSION_CODE */
1062 static abort_t
check_task(struct update_bundle
*bundle
,
1063 const struct task_struct
*t
, int rerun
)
1065 abort_t status
, ret
;
1066 struct conflict
*conf
= NULL
;
1069 conf
= kmalloc(sizeof(*conf
), GFP_ATOMIC
);
1071 return OUT_OF_MEMORY
;
1072 conf
->process_name
= kstrdup(t
->comm
, GFP_ATOMIC
);
1073 if (conf
->process_name
== NULL
) {
1075 return OUT_OF_MEMORY
;
1078 INIT_LIST_HEAD(&conf
->stack
);
1079 list_add(&conf
->list
, &bundle
->conflicts
);
1082 status
= check_address(bundle
, conf
, KSPLICE_IP(t
));
1084 ret
= check_stack(bundle
, conf
, task_thread_info(t
),
1085 (unsigned long *)__builtin_frame_address(0));
1088 } else if (!task_curr(t
)) {
1089 ret
= check_stack(bundle
, conf
, task_thread_info(t
),
1090 (unsigned long *)KSPLICE_SP(t
));
1093 } else if (!is_stop_machine(t
)) {
1094 status
= UNEXPECTED_RUNNING_TASK
;
1099 /* Modified version of Linux's print_context_stack */
1100 static abort_t
check_stack(struct update_bundle
*bundle
, struct conflict
*conf
,
1101 const struct thread_info
*tinfo
,
1102 const unsigned long *stack
)
1104 abort_t status
= OK
, ret
;
1107 while (valid_stack_ptr(tinfo
, stack
)) {
1109 ret
= check_address(bundle
, conf
, addr
);
1116 static abort_t
check_address(struct update_bundle
*bundle
,
1117 struct conflict
*conf
, unsigned long addr
)
1119 abort_t status
= OK
, ret
;
1120 const struct safety_record
*rec
;
1121 struct module_pack
*pack
;
1122 struct conflict_frame
*frame
= NULL
;
1125 frame
= kmalloc(sizeof(*frame
), GFP_ATOMIC
);
1127 return OUT_OF_MEMORY
;
1129 frame
->has_conflict
= 0;
1130 frame
->label
= NULL
;
1131 list_add(&frame
->list
, &conf
->stack
);
1134 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1135 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
1136 ret
= check_record(frame
, rec
, addr
);
1144 static abort_t
check_record(struct conflict_frame
*frame
,
1145 const struct safety_record
*rec
, unsigned long addr
)
1147 if ((addr
> rec
->addr
&& addr
< rec
->addr
+ rec
->size
) ||
1148 (addr
== rec
->addr
&& !rec
->first_byte_safe
)) {
1149 if (frame
!= NULL
) {
1150 frame
->label
= rec
->label
;
1151 frame
->has_conflict
= 1;
1158 /* Modified version of Linux's valid_stack_ptr */
1159 static int valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
)
1161 return p
> (const void *)tinfo
1162 && p
<= (const void *)tinfo
+ THREAD_SIZE
- sizeof(long);
1165 static int is_stop_machine(const struct task_struct
*t
)
1167 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
1169 if (!starts_with(t
->comm
, "kstop"))
1171 num
= t
->comm
+ strlen("kstop");
1172 return num
[strspn(num
, "0123456789")] == '\0';
1173 #else /* LINUX_VERSION_CODE < */
1174 return strcmp(t
->comm
, "kstopmachine") == 0;
1175 #endif /* LINUX_VERSION_CODE */
1178 static void cleanup_conflicts(struct update_bundle
*bundle
)
1180 struct conflict
*conf
;
1181 list_for_each_entry(conf
, &bundle
->conflicts
, list
) {
1182 clear_list(&conf
->stack
, struct conflict_frame
, list
);
1183 kfree(conf
->process_name
);
1185 clear_list(&bundle
->conflicts
, struct conflict
, list
);
1188 static void print_conflicts(struct update_bundle
*bundle
)
1190 const struct conflict
*conf
;
1191 const struct conflict_frame
*frame
;
1192 list_for_each_entry(conf
, &bundle
->conflicts
, list
) {
1193 _ksdebug(bundle
, "stack check: pid %d (%s):", conf
->pid
,
1194 conf
->process_name
);
1195 list_for_each_entry(frame
, &conf
->stack
, list
) {
1196 _ksdebug(bundle
, " %" ADDR
, frame
->addr
);
1197 if (frame
->has_conflict
)
1198 _ksdebug(bundle
, " [<-CONFLICT]");
1200 _ksdebug(bundle
, "\n");
1204 #ifdef KSPLICE_NO_KERNEL_SUPPORT
1205 static struct module
*find_module(const char *name
)
1209 list_for_each_entry(mod
, &modules
, list
) {
1210 if (strcmp(mod
->name
, name
) == 0)
1215 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
1217 static int register_ksplice_module(struct module_pack
*pack
)
1219 struct update_bundle
*bundle
;
1222 INIT_LIST_HEAD(&pack
->reloc_namevals
);
1223 INIT_LIST_HEAD(&pack
->safety_records
);
1225 mutex_lock(&module_mutex
);
1226 if (strcmp(pack
->target_name
, "vmlinux") == 0) {
1227 pack
->target
= NULL
;
1229 pack
->target
= find_module(pack
->target_name
);
1230 if (pack
->target
== NULL
|| !module_is_live(pack
->target
)) {
1235 list_for_each_entry(bundle
, &update_bundles
, list
) {
1236 if (strcmp(pack
->kid
, bundle
->kid
) == 0) {
1237 if (bundle
->stage
!= STAGE_PREPARING
) {
1241 add_to_bundle(pack
, bundle
);
1242 list_add(&pack
->module_list_entry
.list
,
1243 &ksplice_module_list
);
1247 bundle
= init_ksplice_bundle(pack
->kid
);
1248 if (bundle
== NULL
) {
1252 ret
= ksplice_sysfs_init(bundle
);
1254 cleanup_ksplice_bundle(bundle
);
1257 add_to_bundle(pack
, bundle
);
1258 list_add(&pack
->module_list_entry
.list
, &ksplice_module_list
);
1260 mutex_unlock(&module_mutex
);
1264 void cleanup_ksplice_module(struct module_pack
*pack
)
1266 if (pack
->bundle
== NULL
|| pack
->bundle
->stage
== STAGE_APPLIED
)
1268 mutex_lock(&module_mutex
);
1269 list_del(&pack
->list
);
1270 list_del(&pack
->module_list_entry
.list
);
1271 mutex_unlock(&module_mutex
);
1272 if (list_empty(&pack
->bundle
->packs
))
1273 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
1274 kobject_put(&pack
->bundle
->kobj
);
1275 #else /* LINUX_VERSION_CODE < */
1276 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
1277 kobject_unregister(&pack
->bundle
->kobj
);
1278 #endif /* LINUX_VERSION_CODE */
1279 pack
->bundle
= NULL
;
1281 EXPORT_SYMBOL_GPL(cleanup_ksplice_module
);
1283 static void add_to_bundle(struct module_pack
*pack
,
1284 struct update_bundle
*bundle
)
1286 pack
->bundle
= bundle
;
1287 list_add(&pack
->list
, &bundle
->packs
);
1288 pack
->module_list_entry
.target
= pack
->target
;
1289 pack
->module_list_entry
.primary
= pack
->primary
;
1292 static void cleanup_ksplice_bundle(struct update_bundle
*bundle
)
1294 mutex_lock(&module_mutex
);
1295 list_del(&bundle
->list
);
1296 mutex_unlock(&module_mutex
);
1297 cleanup_conflicts(bundle
);
1298 clear_debug_buf(bundle
);
1300 kfree(bundle
->name
);
1304 static struct update_bundle
*init_ksplice_bundle(const char *kid
)
1306 struct update_bundle
*bundle
;
1307 bundle
= kcalloc(1, sizeof(struct update_bundle
), GFP_KERNEL
);
1310 bundle
->name
= kasprintf(GFP_KERNEL
, "ksplice_%s", kid
);
1311 if (bundle
->name
== NULL
) {
1315 bundle
->kid
= kstrdup(kid
, GFP_KERNEL
);
1316 if (bundle
->kid
== NULL
) {
1317 kfree(bundle
->name
);
1321 INIT_LIST_HEAD(&bundle
->packs
);
1322 if (init_debug_buf(bundle
) != OK
) {
1324 kfree(bundle
->name
);
1328 list_add(&bundle
->list
, &update_bundles
);
1329 bundle
->stage
= STAGE_PREPARING
;
1330 bundle
->abort_cause
= OK
;
1331 INIT_LIST_HEAD(&bundle
->conflicts
);
1335 static int ksplice_sysfs_init(struct update_bundle
*bundle
)
1338 memset(&bundle
->kobj
, 0, sizeof(bundle
->kobj
));
1339 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
1340 #ifndef KSPLICE_STANDALONE
1341 ret
= kobject_init_and_add(&bundle
->kobj
, &ksplice_ktype
,
1342 ksplice_kobj
, "%s", bundle
->kid
);
1343 #else /* KSPLICE_STANDALONE */
1344 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
1345 ret
= kobject_init_and_add(&bundle
->kobj
, &ksplice_ktype
,
1346 &THIS_MODULE
->mkobj
.kobj
, "ksplice");
1347 #endif /* KSPLICE_STANDALONE */
1348 #else /* LINUX_VERSION_CODE < */
1349 ret
= kobject_set_name(&bundle
->kobj
, "%s", "ksplice");
1352 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
1353 bundle
->kobj
.parent
= &THIS_MODULE
->mkobj
.kobj
;
1354 #else /* LINUX_VERSION_CODE < */
1355 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
1356 bundle
->kobj
.parent
= &THIS_MODULE
->mkobj
->kobj
;
1357 #endif /* LINUX_VERSION_CODE */
1358 bundle
->kobj
.ktype
= &ksplice_ktype
;
1359 ret
= kobject_register(&bundle
->kobj
);
1360 #endif /* LINUX_VERSION_CODE */
1363 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1364 kobject_uevent(&bundle
->kobj
, KOBJ_ADD
);
1365 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1366 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
1367 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
1368 kobject_uevent(&bundle
->kobj
, KOBJ_ADD
, NULL
);
1369 #endif /* LINUX_VERSION_CODE */
1373 int init_ksplice_module(struct module_pack
*pack
)
1375 #ifdef KSPLICE_STANDALONE
1376 if (bootstrapped
== 0)
1378 #endif /* KSPLICE_STANDALONE */
1379 return register_ksplice_module(pack
);
1381 EXPORT_SYMBOL(init_ksplice_module
);
1383 static abort_t
apply_update(struct update_bundle
*bundle
)
1385 struct module_pack
*pack
;
1388 mutex_lock(&module_mutex
);
1389 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1390 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1391 if (pack
->target
== NULL
) {
1392 apply_paravirt(pack
->primary_parainstructions
,
1393 pack
->primary_parainstructions_end
);
1394 apply_paravirt(pack
->helper_parainstructions
,
1395 pack
->helper_parainstructions_end
);
1398 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1400 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1401 ksdebug(pack
, "Preparing and checking %s\n", pack
->name
);
1402 ret
= activate_helper(pack
, false);
1403 if (ret
== NO_MATCH
) {
1404 ksdebug(pack
, "Trying to continue without the "
1405 "unmatched sections; we will find them later."
1407 ret
= activate_primary(pack
);
1409 ksdebug(pack
, "Aborted. Unable to continue "
1410 "without the unmatched sections.\n");
1413 ksdebug(pack
, "run-pre: Considering .data sections to "
1414 "find the unmatched sections\n");
1415 ret
= activate_helper(pack
, true);
1418 ksdebug(pack
, "run-pre: Found all previously unmatched "
1420 } else if (ret
!= OK
) {
1423 ret
= activate_primary(pack
);
1428 ret
= apply_patches(bundle
);
1430 list_for_each_entry(pack
, &bundle
->packs
, list
) {
1431 clear_list(&pack
->reloc_namevals
, struct reloc_nameval
, list
);
1432 if (bundle
->stage
== STAGE_PREPARING
)
1433 clear_list(&pack
->safety_records
, struct safety_record
,
1436 mutex_unlock(&module_mutex
);
1440 static abort_t
activate_helper(struct module_pack
*pack
,
1441 bool consider_data_sections
)
1443 const struct ksplice_size
*s
;
1446 int i
, remaining
= 0;
1449 finished
= kcalloc(pack
->helper_sizes_end
- pack
->helper_sizes
,
1450 sizeof(*finished
), GFP_KERNEL
);
1451 if (finished
== NULL
)
1452 return OUT_OF_MEMORY
;
1453 for (s
= pack
->helper_sizes
; s
< pack
->helper_sizes_end
; s
++) {
1454 if ((s
->flags
& KSPLICE_SIZE_DATA
) == 0)
1458 while (remaining
> 0) {
1460 for (s
= pack
->helper_sizes
; s
< pack
->helper_sizes_end
; s
++) {
1461 i
= s
- pack
->helper_sizes
;
1464 if (!consider_data_sections
&&
1465 (s
->flags
& KSPLICE_SIZE_DATA
) != 0)
1467 ret
= search_for_match(pack
, s
);
1470 if ((s
->flags
& KSPLICE_SIZE_DATA
) == 0)
1473 } else if (ret
!= NO_MATCH
) {
1482 for (s
= pack
->helper_sizes
; s
< pack
->helper_sizes_end
; s
++) {
1483 i
= s
- pack
->helper_sizes
;
1484 if (finished
[i
] == 0)
1485 ksdebug(pack
, "run-pre: could not match "
1486 "section %s\n", s
->symbol
->label
);
1488 print_abort(pack
, "run-pre: could not match some sections");
1496 static abort_t
search_for_match(struct module_pack
*pack
,
1497 const struct ksplice_size
*s
)
1501 unsigned long run_addr
;
1503 struct candidate_val
*v
, *n
;
1505 ret
= add_system_map_candidates(pack
, s
->symbol
, &vals
);
1507 release_vals(&vals
);
1510 ret
= compute_address(pack
, s
->symbol
, &vals
);
1512 release_vals(&vals
);
1516 ksdebug(pack
, "run-pre: starting sect search for %s\n",
1519 list_for_each_entry_safe(v
, n
, &vals
, list
) {
1523 ret
= try_addr(pack
, s
, run_addr
, NULL
, RUN_PRE_INITIAL
);
1524 if (ret
== NO_MATCH
) {
1527 } else if (ret
!= OK
) {
1528 release_vals(&vals
);
1533 #ifdef KSPLICE_STANDALONE
1534 if (list_empty(&vals
) && (s
->flags
& KSPLICE_SIZE_DATA
) == 0) {
1535 ret
= brute_search_all(pack
, s
, &vals
);
1537 release_vals(&vals
);
1540 /* Make sure run-pre matching output is displayed if
1541 brute_search succeeds */
1542 if (singular(&vals
)) {
1543 run_addr
= list_entry(vals
.next
, struct candidate_val
,
1545 ret
= try_addr(pack
, s
, run_addr
, NULL
,
1548 ksdebug(pack
, "run-pre: Debug run failed for "
1549 "sect %s:\n", s
->symbol
->label
);
1550 release_vals(&vals
);
1555 #endif /* KSPLICE_STANDALONE */
1557 if (singular(&vals
)) {
1558 LIST_HEAD(safety_records
);
1559 run_addr
= list_entry(vals
.next
, struct candidate_val
,
1561 ret
= try_addr(pack
, s
, run_addr
, &safety_records
,
1563 release_vals(&vals
);
1565 clear_list(&safety_records
, struct safety_record
, list
);
1566 ksdebug(pack
, "run-pre: Final run failed for sect "
1567 "%s:\n", s
->symbol
->label
);
1569 list_splice(&safety_records
, &pack
->safety_records
);
1572 } else if (!list_empty(&vals
)) {
1573 struct candidate_val
*val
;
1574 ksdebug(pack
, "run-pre: multiple candidates for sect %s:\n",
1577 list_for_each_entry(val
, &vals
, list
) {
1579 ksdebug(pack
, "%lx\n", val
->val
);
1581 ksdebug(pack
, "...\n");
1585 release_vals(&vals
);
1588 release_vals(&vals
);
1592 static void print_bytes(struct module_pack
*pack
,
1593 const unsigned char *run
, int runc
,
1594 const unsigned char *pre
, int prec
)
1597 int matched
= min(runc
, prec
);
1598 for (o
= 0; o
< matched
; o
++) {
1599 if (run
[o
] == pre
[o
])
1600 ksdebug(pack
, "%02x ", run
[o
]);
1602 ksdebug(pack
, "%02x/%02x ", run
[o
], pre
[o
]);
1604 for (o
= matched
; o
< runc
; o
++)
1605 ksdebug(pack
, "%02x/ ", run
[o
]);
1606 for (o
= matched
; o
< prec
; o
++)
1607 ksdebug(pack
, "/%02x ", pre
[o
]);
1610 static abort_t
run_pre_cmp(struct module_pack
*pack
,
1611 const struct ksplice_size
*s
,
1612 unsigned long run_addr
,
1613 struct list_head
*safety_records
,
1614 enum run_pre_mode mode
)
1618 unsigned long pre_addr
= s
->thismod_addr
;
1619 const struct ksplice_reloc
*r
;
1620 const unsigned char *pre
, *run
;
1621 unsigned char runval
;
1623 if ((s
->flags
& KSPLICE_SIZE_TEXT
) != 0)
1624 run_addr
= follow_trampolines(pack
, run_addr
);
1626 pre
= (const unsigned char *)pre_addr
;
1627 run
= (const unsigned char *)run_addr
;
1628 while (pre
< (const unsigned char *)pre_addr
+ s
->size
) {
1629 ret
= lookup_reloc(pack
, (unsigned long)pre
, &r
);
1631 ret
= handle_reloc(pack
, r
, (unsigned long)run
, mode
);
1633 if (mode
== RUN_PRE_INITIAL
)
1634 ksdebug(pack
, "reloc in sect does not "
1635 "match after %lx/%lx bytes\n",
1636 (unsigned long)pre
- pre_addr
,
1640 if (mode
== RUN_PRE_DEBUG
)
1641 print_bytes(pack
, run
, r
->size
, pre
, r
->size
);
1645 } else if (ret
!= NO_MATCH
) {
1649 if ((s
->flags
& KSPLICE_SIZE_TEXT
) != 0) {
1650 ret
= handle_paravirt(pack
, (unsigned long)pre
,
1651 (unsigned long)run
, &matched
);
1655 if (mode
== RUN_PRE_DEBUG
)
1656 print_bytes(pack
, run
, matched
, pre
,
1664 if (probe_kernel_read(&runval
, (void *)run
, 1) == -EFAULT
) {
1665 if (mode
== RUN_PRE_INITIAL
)
1666 ksdebug(pack
, "sect unmapped after %lx/%lx "
1668 (unsigned long)pre
- pre_addr
, s
->size
);
1672 if (runval
!= *pre
&& (s
->flags
& KSPLICE_SIZE_DATA
) == 0) {
1673 if (mode
== RUN_PRE_INITIAL
)
1674 ksdebug(pack
, "sect does not match after "
1676 (unsigned long)pre
- pre_addr
, s
->size
);
1677 if (mode
== RUN_PRE_DEBUG
) {
1678 print_bytes(pack
, run
, 1, pre
, 1);
1679 ksdebug(pack
, "[p_o=%lx] ! ",
1680 (unsigned long)pre
- pre_addr
);
1681 print_bytes(pack
, run
+ 1, 2, pre
+ 1, 2);
1685 if (mode
== RUN_PRE_DEBUG
)
1686 print_bytes(pack
, run
, 1, pre
, 1);
1690 return create_safety_record(pack
, s
, safety_records
, run_addr
,
1691 (unsigned long)run
- run_addr
);
1694 #ifdef KSPLICE_NO_KERNEL_SUPPORT
1695 static struct module
*__module_data_address(unsigned long addr
)
1699 list_for_each_entry(mod
, &modules
, list
) {
1700 if (addr
>= (unsigned long)mod
->module_core
+
1701 mod
->core_text_size
&&
1702 addr
< (unsigned long)mod
->module_core
+ mod
->core_size
)
1707 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
1709 static abort_t
try_addr(struct module_pack
*pack
, const struct ksplice_size
*s
,
1710 unsigned long run_addr
,
1711 struct list_head
*safety_records
,
1712 enum run_pre_mode mode
)
1715 const struct module
*run_module
;
1717 if ((s
->flags
& KSPLICE_SIZE_RODATA
) != 0 ||
1718 (s
->flags
& KSPLICE_SIZE_DATA
) != 0)
1719 run_module
= __module_data_address(run_addr
);
1721 run_module
= __module_text_address(run_addr
);
1722 if (run_module
!= pack
->target
) {
1723 ksdebug(pack
, "run-pre: ignoring address %" ADDR
" in other "
1724 "module %s for sect %s\n", run_addr
,
1725 run_module
== NULL
? "vmlinux" : run_module
->name
,
1730 ret
= create_nameval(pack
, s
->symbol
->label
, run_addr
, TEMP
);
1734 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1735 ret
= run_pre_cmp(pack
, s
, run_addr
, safety_records
, mode
);
1736 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1737 if ((s
->flags
& KSPLICE_SIZE_TEXT
) != 0)
1738 ret
= arch_run_pre_cmp(pack
, s
, run_addr
, safety_records
, mode
);
1740 ret
= run_pre_cmp(pack
, s
, run_addr
, safety_records
, mode
);
1741 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1742 if (ret
== NO_MATCH
&& mode
!= RUN_PRE_FINAL
) {
1743 set_temp_myst_relocs(pack
, NOVAL
);
1744 ksdebug(pack
, "run-pre: %s sect %s does not match ",
1745 (s
->flags
& KSPLICE_SIZE_RODATA
) != 0 ? "data" : "text",
1747 ksdebug(pack
, "(r_a=%" ADDR
" p_a=%" ADDR
" s=%lx)\n",
1748 run_addr
, s
->thismod_addr
, s
->size
);
1749 ksdebug(pack
, "run-pre: ");
1750 if (pack
->bundle
->debug
>= 1) {
1751 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1752 ret
= run_pre_cmp(pack
, s
, run_addr
, safety_records
,
1754 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1755 if ((s
->flags
& KSPLICE_SIZE_TEXT
) != 0)
1756 ret
= arch_run_pre_cmp(pack
, s
, run_addr
,
1760 ret
= run_pre_cmp(pack
, s
, run_addr
,
1763 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1764 set_temp_myst_relocs(pack
, NOVAL
);
1766 ksdebug(pack
, "\n");
1768 } else if (ret
!= OK
) {
1769 set_temp_myst_relocs(pack
, NOVAL
);
1771 } else if (mode
!= RUN_PRE_FINAL
) {
1772 set_temp_myst_relocs(pack
, NOVAL
);
1773 ksdebug(pack
, "run-pre: candidate for sect %s=%" ADDR
"\n",
1774 s
->symbol
->label
, run_addr
);
1778 set_temp_myst_relocs(pack
, VAL
);
1779 ksdebug(pack
, "run-pre: found sect %s=%" ADDR
"\n", s
->symbol
->label
,
1784 static abort_t
create_safety_record(struct module_pack
*pack
,
1785 const struct ksplice_size
*s
,
1786 struct list_head
*record_list
,
1787 unsigned long run_addr
,
1788 unsigned long run_size
)
1790 struct safety_record
*rec
;
1791 struct ksplice_patch
*p
;
1793 if (record_list
== NULL
)
1796 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1797 if (strcmp(s
->symbol
->label
, p
->label
) == 0)
1800 if (p
>= pack
->patches_end
)
1803 if ((s
->flags
& KSPLICE_SIZE_TEXT
) == 0 && p
->repladdr
!= 0) {
1804 ksdebug(pack
, "Error: ksplice_patch %s is matched to a "
1805 "non-deleted non-text section!\n", s
->symbol
->label
);
1809 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
1811 return OUT_OF_MEMORY
;
1812 rec
->addr
= run_addr
;
1813 rec
->size
= run_size
;
1814 rec
->label
= s
->symbol
->label
;
1815 rec
->first_byte_safe
= false;
1817 list_add(&rec
->list
, record_list
);
1821 static abort_t
handle_reloc(struct module_pack
*pack
,
1822 const struct ksplice_reloc
*r
,
1823 unsigned long run_addr
, enum run_pre_mode mode
)
1828 ret
= read_reloc_value(pack
, r
, run_addr
, &val
);
1832 if (mode
== RUN_PRE_INITIAL
)
1833 ksdebug(pack
, "run-pre: reloc at r_a=%" ADDR
" p_a=%" ADDR
1834 " to %s+%lx: found %s = %" ADDR
"\n",
1835 run_addr
, r
->blank_addr
, r
->symbol
->label
, r
->addend
,
1836 r
->symbol
->label
, val
);
1838 if (starts_with(r
->symbol
->label
, ".rodata.str"))
1841 if (contains_canary(pack
, run_addr
, r
->size
, r
->dst_mask
) != 0)
1844 ret
= create_nameval(pack
, r
->symbol
->label
, val
, TEMP
);
1845 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
) {
1846 struct reloc_nameval
*nv
= find_nameval(pack
, r
->symbol
->label
);
1847 ksdebug(pack
, "run-pre: reloc at r_a=%" ADDR
" p_a=%" ADDR
1848 ": nameval %s = %" ADDR
"(%d) does not match expected "
1849 "%" ADDR
"\n", run_addr
, r
->blank_addr
,
1850 r
->symbol
->label
, nv
->val
, nv
->status
, val
);
1855 static abort_t
read_reloc_value(struct module_pack
*pack
,
1856 const struct ksplice_reloc
*r
,
1857 unsigned long addr
, unsigned long *valp
)
1859 unsigned char bytes
[sizeof(long)];
1862 if (probe_kernel_read(bytes
, (void *)addr
, r
->size
) == -EFAULT
)
1867 val
= *(uint8_t *)bytes
;
1870 val
= *(uint16_t *)bytes
;
1873 val
= *(uint32_t *)bytes
;
1875 #if BITS_PER_LONG >= 64
1877 val
= *(uint64_t *)bytes
;
1879 #endif /* BITS_PER_LONG */
1881 print_abort(pack
, "Invalid relocation size");
1886 if (r
->signed_addend
)
1887 val
|= -(val
& (r
->dst_mask
& ~(r
->dst_mask
>> 1)));
1888 val
<<= r
->rightshift
;
1890 val
+= (unsigned long)addr
;
1896 static abort_t
write_reloc_value(struct module_pack
*pack
,
1897 const struct ksplice_reloc
*r
,
1898 unsigned long sym_addr
)
1900 unsigned long val
= sym_addr
+ r
->addend
;
1902 val
-= r
->blank_addr
;
1903 val
>>= r
->rightshift
;
1906 *(uint8_t *)r
->blank_addr
=
1907 (*(uint8_t *)r
->blank_addr
& ~r
->dst_mask
) |
1908 (val
& r
->dst_mask
);
1911 *(uint16_t *)r
->blank_addr
=
1912 (*(uint16_t *)r
->blank_addr
& ~r
->dst_mask
) |
1913 (val
& r
->dst_mask
);
1916 *(uint32_t *)r
->blank_addr
=
1917 (*(uint32_t *)r
->blank_addr
& ~r
->dst_mask
) |
1918 (val
& r
->dst_mask
);
1920 #if BITS_PER_LONG >= 64
1922 *(uint64_t *)r
->blank_addr
=
1923 (*(uint64_t *)r
->blank_addr
& ~r
->dst_mask
) |
1924 (val
& r
->dst_mask
);
1926 #endif /* BITS_PER_LONG */
1928 print_abort(pack
, "Invalid relocation size");
1932 if (read_reloc_value(pack
, r
, r
->blank_addr
, &val
) != OK
||
1934 print_abort(pack
, "relocation overflow");
1941 static abort_t
apply_relocs(struct module_pack
*pack
,
1942 const struct ksplice_reloc
*relocs
,
1943 const struct ksplice_reloc
*relocs_end
)
1945 const struct ksplice_reloc
*r
;
1946 for (r
= relocs
; r
< relocs_end
; r
++) {
1947 abort_t ret
= apply_reloc(pack
, r
);
1954 static abort_t
apply_reloc(struct module_pack
*pack
,
1955 const struct ksplice_reloc
*r
)
1959 unsigned long sym_addr
;
1962 canary_ret
= contains_canary(pack
, r
->blank_addr
, r
->size
, r
->dst_mask
);
1965 if (canary_ret
== 0) {
1966 ksdebug(pack
, "reloc: skipped %s:%" ADDR
"(altinstr)\n",
1967 r
->symbol
->label
, r
->blank_offset
);
1971 #ifdef KSPLICE_STANDALONE
1972 if (!bootstrapped
) {
1973 ret
= add_system_map_candidates(pack
, r
->symbol
, &vals
);
1975 release_vals(&vals
);
1979 #else /* !KSPLICE_STANDALONE */
1980 #ifdef CONFIG_KALLSYMS
1981 ret
= add_system_map_candidates(pack
, r
->symbol
, &vals
);
1983 release_vals(&vals
);
1986 #endif /* CONFIG_KALLSYMS */
1987 #endif /* KSPLICE_STANDALONE */
1988 ret
= compute_address(pack
, r
->symbol
, &vals
);
1990 release_vals(&vals
);
1993 if (!singular(&vals
)) {
1994 release_vals(&vals
);
1995 failed_to_find(pack
, r
->symbol
->label
);
1996 return FAILED_TO_FIND
;
1998 sym_addr
= list_entry(vals
.next
, struct candidate_val
, list
)->val
;
1999 release_vals(&vals
);
2001 ret
= write_reloc_value(pack
, r
, sym_addr
);
2005 ksdebug(pack
, "reloc: %s:%" ADDR
" ", r
->symbol
->label
,
2007 ksdebug(pack
, "(S=%" ADDR
" A=%" ADDR
" ", sym_addr
, r
->addend
);
2010 ksdebug(pack
, "aft=%02x)\n", *(uint8_t *)r
->blank_addr
);
2013 ksdebug(pack
, "aft=%04x)\n", *(uint16_t *)r
->blank_addr
);
2016 ksdebug(pack
, "aft=%08x)\n", *(uint32_t *)r
->blank_addr
);
2018 #if BITS_PER_LONG >= 64
2020 ksdebug(pack
, "aft=%016llx)\n", *(uint64_t *)r
->blank_addr
);
2022 #endif /* BITS_PER_LONG */
2024 print_abort(pack
, "Invalid relocation size");
2027 #ifdef KSPLICE_STANDALONE
2030 #endif /* KSPLICE_STANDALONE */
2031 /* Create namevals so that we can verify our choices in the second
2032 round of run-pre matching that considers data sections. */
2033 ret
= create_nameval(pack
, r
->symbol
->label
, sym_addr
, VAL
);
2036 return add_dependency_on_address(pack
, sym_addr
);
2039 static abort_t
add_system_map_candidates(struct module_pack
*pack
,
2040 const struct ksplice_symbol
*symbol
,
2041 struct list_head
*vals
)
2047 /* Some Fedora kernel releases have System.map files whose symbol
2048 * addresses disagree with the running kernel by a constant address
2049 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2050 * values used to compile these kernels. This constant address offset
2051 * is always a multiple of 0x100000.
2053 * If we observe an offset that is NOT a multiple of 0x100000, then the
2054 * user provided us with an incorrect System.map file, and we should
2056 * If we observe an offset that is a multiple of 0x100000, then we can
2057 * adjust the System.map address values accordingly and proceed.
2059 off
= (unsigned long)printk
- pack
->map_printk
;
2060 if (off
& 0xfffff) {
2061 print_abort(pack
, "System.map does not match kernel");
2062 return BAD_SYSTEM_MAP
;
2064 for (i
= 0; i
< symbol
->nr_candidates
; i
++) {
2065 ret
= add_candidate_val(vals
, symbol
->candidates
[i
] + off
);
2072 static abort_t
add_dependency_on_address(struct module_pack
*pack
,
2076 __module_text_address(follow_trampolines(pack
, addr
));
2077 if (m
== NULL
|| starts_with(m
->name
, pack
->name
) ||
2078 ends_with(m
->name
, "_helper"))
2080 if (use_module(pack
->primary
, m
) != 1)
2085 static abort_t
add_patch_dependencies(struct module_pack
*pack
)
2088 const struct ksplice_patch
*p
;
2089 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
2090 ret
= add_dependency_on_address(pack
, p
->oldaddr
);
2097 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2098 #ifdef CONFIG_MODULE_UNLOAD
2100 struct list_head list
;
2101 struct module
*module_which_uses
;
2104 /* I'm not yet certain whether we need the strong form of this. */
2105 static inline int strong_try_module_get(struct module
*mod
)
2107 if (mod
&& mod
->state
!= MODULE_STATE_LIVE
)
2109 if (try_module_get(mod
))
2114 /* Does a already use b? */
2115 static int already_uses(struct module
*a
, struct module
*b
)
2117 struct module_use
*use
;
2118 list_for_each_entry(use
, &b
->modules_which_use_me
, list
) {
2119 if (use
->module_which_uses
== a
)
2125 /* Make it so module a uses b. Must be holding module_mutex */
2126 static int use_module(struct module
*a
, struct module
*b
)
2128 struct module_use
*use
;
2129 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2130 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
2132 #endif /* LINUX_VERSION_CODE */
2133 if (b
== NULL
|| already_uses(a
, b
))
2136 if (strong_try_module_get(b
) < 0)
2139 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
2144 use
->module_which_uses
= a
;
2145 list_add(&use
->list
, &b
->modules_which_use_me
);
2146 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2147 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
2148 no_warn
= sysfs_create_link(b
->holders_dir
, &a
->mkobj
.kobj
, a
->name
);
2149 #endif /* LINUX_VERSION_CODE */
2152 #else /* CONFIG_MODULE_UNLOAD */
2153 static int use_module(struct module
*a
, struct module
*b
)
2157 #endif /* CONFIG_MODULE_UNLOAD */
2158 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2160 static abort_t
compute_address(struct module_pack
*pack
,
2161 const struct ksplice_symbol
*ksym
,
2162 struct list_head
*vals
)
2165 struct reloc_nameval
*nv
;
2167 #ifdef KSPLICE_STANDALONE
2170 #endif /* KSPLICE_STANDALONE */
2172 nv
= find_nameval(pack
, ksym
->label
);
2175 ksdebug(pack
, "using detected sym %s=%" ADDR
"\n", ksym
->label
,
2177 return add_candidate_val(vals
, nv
->val
);
2180 if (starts_with(ksym
->label
, ".rodata"))
2183 #ifdef CONFIG_MODULE_UNLOAD
2184 if (strcmp(ksym
->label
, "cleanup_module") == 0 && pack
->target
!= NULL
2185 && pack
->target
->exit
!= NULL
) {
2186 ret
= add_candidate_val(vals
,
2187 (unsigned long)pack
->target
->exit
);
2193 ret
= exported_symbol_lookup(ksym
->name
, vals
);
2195 ret
= new_export_lookup(pack
->bundle
, ksym
->name
, vals
);
2196 #ifdef CONFIG_KALLSYMS
2198 ret
= kernel_lookup(ksym
->name
, vals
);
2200 ret
= other_module_lookup(pack
, ksym
->name
, vals
);
2201 #endif /* CONFIG_KALLSYMS */
2208 static abort_t
new_export_lookup(struct update_bundle
*bundle
,
2209 const char *name
, struct list_head
*vals
)
2211 struct module_pack
*pack
;
2212 struct ksplice_export
*exp
;
2213 list_for_each_entry(pack
, &bundle
->packs
, list
) {
2214 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++) {
2215 if (strcmp(exp
->new_name
, name
) == 0 &&
2217 contains_canary(pack
,
2218 (unsigned long)&exp
->sym
->value
,
2219 sizeof(unsigned long), -1) == 0)
2220 return add_candidate_val(vals
, exp
->sym
->value
);
2226 static abort_t
exported_symbol_lookup(const char *name
, struct list_head
*vals
)
2228 const struct kernel_symbol
*sym
;
2229 sym
= find_symbol(name
, NULL
, NULL
, true, false);
2232 return add_candidate_val(vals
, sym
->value
);
2235 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2236 #ifndef CONFIG_MODVERSIONS
2237 #define symversion(base, idx) NULL
2239 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
2243 const struct kernel_symbol
*start
, *stop
;
2244 const unsigned long *crcs
;
2253 static bool each_symbol_in_section(const struct symsearch
*arr
,
2254 unsigned int arrsize
,
2255 struct module
*owner
,
2256 bool (*fn
)(const struct symsearch
*syms
,
2257 struct module
*owner
,
2258 unsigned int symnum
, void *data
),
2263 for (j
= 0; j
< arrsize
; j
++) {
2264 for (i
= 0; i
< arr
[j
].stop
- arr
[j
].start
; i
++)
2265 if (fn(&arr
[j
], owner
, i
, data
))
2272 /* Returns true as soon as fn returns true, otherwise false. */
2273 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
2274 struct module
*owner
,
2275 unsigned int symnum
, void *data
),
2279 const struct symsearch arr
[] = {
2280 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
2281 NOT_GPL_ONLY
, false },
2282 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
2283 __start___kcrctab_gpl
,
2285 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2286 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
2287 __start___kcrctab_gpl_future
,
2288 WILL_BE_GPL_ONLY
, false },
2289 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2290 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2291 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
2292 __start___kcrctab_unused
,
2293 NOT_GPL_ONLY
, true },
2294 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
2295 __start___kcrctab_unused_gpl
,
2297 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2300 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
2303 list_for_each_entry(mod
, &modules
, list
) {
2304 struct symsearch module_arr
[] = {
2305 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
2306 NOT_GPL_ONLY
, false },
2307 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
2310 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2311 { mod
->gpl_future_syms
,
2312 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
2313 mod
->gpl_future_crcs
,
2314 WILL_BE_GPL_ONLY
, false },
2315 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2316 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2318 mod
->unused_syms
+ mod
->num_unused_syms
,
2320 NOT_GPL_ONLY
, true },
2321 { mod
->unused_gpl_syms
,
2322 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
2323 mod
->unused_gpl_crcs
,
2325 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2328 if (each_symbol_in_section(module_arr
, ARRAY_SIZE(module_arr
),
2335 struct find_symbol_arg
{
2342 struct module
*owner
;
2343 const unsigned long *crc
;
2344 const struct kernel_symbol
*sym
;
2347 static bool find_symbol_in_section(const struct symsearch
*syms
,
2348 struct module
*owner
,
2349 unsigned int symnum
, void *data
)
2351 struct find_symbol_arg
*fsa
= data
;
2353 if (strcmp(syms
->start
[symnum
].name
, fsa
->name
) != 0)
2357 if (syms
->licence
== GPL_ONLY
)
2359 if (syms
->licence
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
2360 printk(KERN_WARNING
"Symbol %s is being used "
2361 "by a non-GPL module, which will not "
2362 "be allowed in the future\n", fsa
->name
);
2363 printk(KERN_WARNING
"Please see the file "
2364 "Documentation/feature-removal-schedule.txt "
2365 "in the kernel source tree for more details.\n");
2369 #ifdef CONFIG_UNUSED_SYMBOLS
2370 if (syms
->unused
&& fsa
->warn
) {
2371 printk(KERN_WARNING
"Symbol %s is marked as UNUSED, "
2372 "however this module is using it.\n", fsa
->name
);
2374 "This symbol will go away in the future.\n");
2376 "Please evalute if this is the right api to use and if "
2377 "it really is, submit a report the linux kernel "
2378 "mailinglist together with submitting your code for "
2384 fsa
->crc
= symversion(syms
->crcs
, symnum
);
2385 fsa
->sym
= &syms
->start
[symnum
];
2389 /* Find a symbol and return it, along with, (optional) crc and
2390 * (optional) module which owns it */
2391 static const struct kernel_symbol
*find_symbol(const char *name
,
2392 struct module
**owner
,
2393 const unsigned long **crc
,
2394 bool gplok
, bool warn
)
2396 struct find_symbol_arg fsa
;
2402 if (each_symbol(find_symbol_in_section
, &fsa
)) {
2412 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2414 #ifdef CONFIG_KALLSYMS
2415 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2416 static abort_t
other_module_lookup(struct module_pack
*pack
, const char *name
,
2417 struct list_head
*vals
)
2420 struct accumulate_struct acc
= { name
, vals
};
2421 const struct module
*m
;
2423 list_for_each_entry(m
, &modules
, list
) {
2424 if (starts_with(m
->name
, pack
->name
) ||
2425 !ends_with(m
->name
, pack
->target_name
))
2427 ret
= (__force abort_t
)
2428 module_kallsyms_on_each_symbol(m
, accumulate_matching_names
,
2435 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
2436 static abort_t
other_module_lookup(struct module_pack
*pack
, const char *name
,
2437 struct list_head
*vals
)
2439 struct accumulate_struct acc
= { name
, vals
};
2440 struct ksplice_module_list_entry
*entry
;
2443 list_for_each_entry(entry
, &ksplice_module_list
, list
) {
2444 if (entry
->target
!= pack
->target
||
2445 entry
->primary
== pack
->primary
)
2447 ret
= (__force abort_t
)
2448 module_kallsyms_on_each_symbol(entry
->primary
,
2449 accumulate_matching_names
,
2454 if (pack
->target
== NULL
)
2456 ret
= (__force abort_t
)
2457 module_kallsyms_on_each_symbol(pack
->target
,
2458 accumulate_matching_names
, &acc
);
2461 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2463 static int accumulate_matching_names(void *data
, const char *sym_name
,
2464 unsigned long sym_val
)
2467 struct accumulate_struct
*acc
= data
;
2469 if (strcmp(sym_name
, acc
->desired_name
) == 0)
2470 ret
= add_candidate_val(acc
->vals
, sym_val
);
2471 return (__force
int)ret
;
2473 #endif /* CONFIG_KALLSYMS */
2475 #ifdef KSPLICE_STANDALONE
2476 static abort_t
brute_search(struct module_pack
*pack
,
2477 const struct ksplice_size
*s
,
2478 const void *start
, unsigned long len
,
2479 struct list_head
*vals
)
2485 for (addr
= (unsigned long)start
; addr
< (unsigned long)start
+ len
;
2487 if (addr
% 100000 == 0)
2490 if (probe_kernel_read(&run
, (void *)addr
, 1) == -EFAULT
)
2493 pre
= *(const unsigned char *)(s
->thismod_addr
);
2498 ret
= try_addr(pack
, s
, addr
, NULL
, RUN_PRE_INITIAL
);
2500 ret
= add_candidate_val(vals
, addr
);
2503 } else if (ret
!= NO_MATCH
) {
2511 static abort_t
brute_search_all(struct module_pack
*pack
,
2512 const struct ksplice_size
*s
,
2513 struct list_head
*vals
)
2519 ksdebug(pack
, "brute_search: searching for %s\n", s
->symbol
->label
);
2520 saved_debug
= pack
->bundle
->debug
;
2521 pack
->bundle
->debug
= 0;
2523 list_for_each_entry(m
, &modules
, list
) {
2524 if (starts_with(m
->name
, pack
->name
) ||
2525 ends_with(m
->name
, "_helper"))
2527 ret
= brute_search(pack
, s
, m
->module_core
, m
->core_size
, vals
);
2530 ret
= brute_search(pack
, s
, m
->module_init
, m
->init_size
, vals
);
2535 ret
= brute_search(pack
, s
, (const void *)init_mm
.start_code
,
2536 init_mm
.end_code
- init_mm
.start_code
, vals
);
2537 pack
->bundle
->debug
= saved_debug
;
2542 #ifdef CONFIG_KALLSYMS
2543 /* Modified version of Linux's kallsyms_lookup_name */
2544 static abort_t
kernel_lookup(const char *name
, struct list_head
*vals
)
2547 char namebuf
[KSYM_NAME_LEN
+ 1];
2549 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2551 #endif /* LINUX_VERSION_CODE */
2553 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2554 * 2.6.10 was the first release after this commit
2556 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2557 for (i
= 0, off
= 0; i
< kallsyms_num_syms
; i
++) {
2558 off
= ksplice_kallsyms_expand_symbol(off
, namebuf
);
2560 if (strcmp(namebuf
, name
) == 0) {
2561 ret
= add_candidate_val(vals
, kallsyms_addresses
[i
]);
2566 #else /* LINUX_VERSION_CODE < */
2569 for (i
= 0, knames
= kallsyms_names
; i
< kallsyms_num_syms
; i
++) {
2570 unsigned prefix
= *knames
++;
2572 strlcpy(namebuf
+ prefix
, knames
, KSYM_NAME_LEN
- prefix
);
2574 if (strcmp(namebuf
, name
) == 0) {
2575 ret
= add_candidate_val(vals
, kallsyms_addresses
[i
]);
2580 knames
+= strlen(knames
) + 1;
2582 #endif /* LINUX_VERSION_CODE */
2587 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2588 * 2.6.10 was the first release after this commit
2590 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2591 extern u8 kallsyms_token_table
[];
2592 extern u16 kallsyms_token_index
[];
2593 /* Modified version of Linux's kallsyms_expand_symbol */
2594 static unsigned long ksplice_kallsyms_expand_symbol(unsigned long off
,
2597 long len
, skipped_first
= 0;
2598 const u8
*tptr
, *data
;
2600 data
= &kallsyms_names
[off
];
2607 tptr
= &kallsyms_token_table
[kallsyms_token_index
[*data
]];
2612 if (skipped_first
) {
2625 #endif /* LINUX_VERSION_CODE */
2627 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2628 static int module_kallsyms_on_each_symbol(const struct module
*mod
,
2629 int (*fn
)(void *, const char *,
2636 for (i
= 0; i
< mod
->num_symtab
; i
++) {
2638 fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
2639 mod
->symtab
[i
].st_value
) != 0))
2644 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2645 #endif /* CONFIG_KALLSYMS */
2646 #else /* !KSPLICE_STANDALONE */
2648 static abort_t
kernel_lookup(const char *name
, struct list_head
*vals
)
2650 struct accumulate_struct acc
= { name
, vals
};
2651 return (__force abort_t
)
2652 kernel_kallsyms_on_each_symbol(accumulate_matching_names
, &acc
);
2654 #endif /* KSPLICE_STANDALONE */
2656 static abort_t
add_candidate_val(struct list_head
*vals
, unsigned long val
)
2658 struct candidate_val
*tmp
, *new;
2660 list_for_each_entry(tmp
, vals
, list
) {
2661 if (tmp
->val
== val
)
2664 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2666 return OUT_OF_MEMORY
;
2668 list_add(&new->list
, vals
);
2672 static void release_vals(struct list_head
*vals
)
2674 clear_list(vals
, struct candidate_val
, list
);
2677 static struct reloc_nameval
*find_nameval(struct module_pack
*pack
,
2680 struct reloc_nameval
*nv
;
2681 list_for_each_entry(nv
, &pack
->reloc_namevals
, list
) {
2682 if (strcmp(nv
->label
, label
) == 0)
2688 static abort_t
create_nameval(struct module_pack
*pack
, const char *label
,
2689 unsigned long val
, int status
)
2691 struct reloc_nameval
*nv
= find_nameval(pack
, label
);
2693 return nv
->val
== val
? OK
: NO_MATCH
;
2695 nv
= kmalloc(sizeof(*nv
), GFP_KERNEL
);
2697 return OUT_OF_MEMORY
;
2700 nv
->status
= status
;
2701 list_add(&nv
->list
, &pack
->reloc_namevals
);
2705 static abort_t
lookup_reloc(struct module_pack
*pack
, unsigned long addr
,
2706 const struct ksplice_reloc
**relocp
)
2708 const struct ksplice_reloc
*r
;
2710 for (r
= pack
->helper_relocs
; r
< pack
->helper_relocs_end
; r
++) {
2711 if (addr
>= r
->blank_addr
&& addr
< r
->blank_addr
+ r
->size
) {
2712 canary_ret
= contains_canary(pack
, r
->blank_addr
,
2713 r
->size
, r
->dst_mask
);
2716 if (canary_ret
== 0) {
2717 ksdebug(pack
, "reloc: skipped %s:%" ADDR
2718 " (altinstr)\n", r
->symbol
->label
,
2722 if (addr
!= r
->blank_addr
) {
2723 ksdebug(pack
, "Invalid nonzero relocation "
2734 static void set_temp_myst_relocs(struct module_pack
*pack
, int status_val
)
2736 struct reloc_nameval
*nv
, *n
;
2737 list_for_each_entry_safe(nv
, n
, &pack
->reloc_namevals
, list
) {
2738 if (nv
->status
== TEMP
) {
2739 if (status_val
== NOVAL
) {
2740 list_del(&nv
->list
);
2743 nv
->status
= status_val
;
2749 static int contains_canary(struct module_pack
*pack
, unsigned long blank_addr
,
2750 int size
, long dst_mask
)
2754 return (*(uint8_t *)blank_addr
& dst_mask
) ==
2757 return (*(uint16_t *)blank_addr
& dst_mask
) ==
2758 (0x7777 & dst_mask
);
2760 return (*(uint32_t *)blank_addr
& dst_mask
) ==
2761 (0x77777777 & dst_mask
);
2762 #if BITS_PER_LONG >= 64
2764 return (*(uint64_t *)blank_addr
& dst_mask
) ==
2765 (0x7777777777777777l
& dst_mask
);
2766 #endif /* BITS_PER_LONG */
2768 print_abort(pack
, "Invalid relocation size");
2773 static int starts_with(const char *str
, const char *prefix
)
2775 return strncmp(str
, prefix
, strlen(prefix
)) == 0;
2778 static int ends_with(const char *str
, const char *suffix
)
2780 return strlen(str
) >= strlen(suffix
) &&
2781 strcmp(&str
[strlen(str
) - strlen(suffix
)], suffix
) == 0;
2784 #ifdef CONFIG_DEBUG_FS
2785 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
2786 /* Old kernels don't have debugfs_create_blob */
2787 static ssize_t
read_file_blob(struct file
*file
, char __user
*user_buf
,
2788 size_t count
, loff_t
*ppos
)
2790 struct debugfs_blob_wrapper
*blob
= file
->private_data
;
2791 return simple_read_from_buffer(user_buf
, count
, ppos
, blob
->data
,
2795 static int blob_open(struct inode
*inode
, struct file
*file
)
2797 if (inode
->i_private
)
2798 file
->private_data
= inode
->i_private
;
2802 static struct file_operations fops_blob
= {
2803 .read
= read_file_blob
,
2807 static struct dentry
*debugfs_create_blob(const char *name
, mode_t mode
,
2808 struct dentry
*parent
,
2809 struct debugfs_blob_wrapper
*blob
)
2811 return debugfs_create_file(name
, mode
, parent
, blob
, &fops_blob
);
2813 #endif /* LINUX_VERSION_CODE */
2815 static void clear_debug_buf(struct update_bundle
*bundle
)
2817 if (bundle
->debugfs_dentry
== NULL
)
2819 debugfs_remove(bundle
->debugfs_dentry
);
2820 bundle
->debugfs_dentry
= NULL
;
2821 bundle
->debug_blob
.size
= 0;
2822 vfree(bundle
->debug_blob
.data
);
2823 bundle
->debug_blob
.data
= NULL
;
2826 static abort_t
init_debug_buf(struct update_bundle
*bundle
)
2828 bundle
->debug_blob
.size
= 0;
2829 bundle
->debug_blob
.data
= NULL
;
2830 bundle
->debugfs_dentry
=
2831 debugfs_create_blob(bundle
->name
, S_IFREG
| S_IRUSR
, NULL
,
2832 &bundle
->debug_blob
);
2833 if (bundle
->debugfs_dentry
== NULL
)
2834 return OUT_OF_MEMORY
;
2838 static int _ksdebug(struct update_bundle
*bundle
, const char *fmt
, ...)
2841 unsigned long size
, old_size
, new_size
;
2843 if (bundle
->debug
== 0)
2846 /* size includes the trailing '\0' */
2847 va_start(args
, fmt
);
2848 size
= 1 + vsnprintf(bundle
->debug_blob
.data
, 0, fmt
, args
);
2850 old_size
= bundle
->debug_blob
.size
== 0 ? 0 :
2851 max(PAGE_SIZE
, roundup_pow_of_two(bundle
->debug_blob
.size
));
2852 new_size
= bundle
->debug_blob
.size
+ size
== 0 ? 0 :
2853 max(PAGE_SIZE
, roundup_pow_of_two(bundle
->debug_blob
.size
+ size
));
2854 if (new_size
> old_size
) {
2855 char *buf
= vmalloc(new_size
);
2858 memcpy(buf
, bundle
->debug_blob
.data
, bundle
->debug_blob
.size
);
2859 vfree(bundle
->debug_blob
.data
);
2860 bundle
->debug_blob
.data
= buf
;
2862 va_start(args
, fmt
);
2863 bundle
->debug_blob
.size
+= vsnprintf(bundle
->debug_blob
.data
+
2864 bundle
->debug_blob
.size
,
2869 #else /* CONFIG_DEBUG_FS */
2870 static int _ksdebug(struct update_bundle
*bundle
, const char *fmt
, ...)
2874 if (bundle
->debug
== 0)
2877 if (!bundle
->debug_continue_line
)
2878 printk(KERN_DEBUG
"ksplice: ");
2880 va_start(args
, fmt
);
2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
2883 #else /* LINUX_VERSION_CODE < */
2884 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
2886 char *buf
= kvasprintf(GFP_KERNEL
, fmt
, args
);
2890 #endif /* LINUX_VERSION_CODE */
2893 bundle
->debug_continue_line
=
2894 fmt
[0] == '\0' || fmt
[strlen(fmt
) - 1] != '\n';
2897 #endif /* CONFIG_DEBUG_FS */
2899 #ifdef KSPLICE_STANDALONE
2901 module_param(debug
, int, 0600);
2902 MODULE_PARM_DESC(debug
, "Debug level");
2904 static struct module_pack ksplice_pack
= {
2905 .name
= "ksplice_" STR(KSPLICE_KID
),
2906 .kid
= "init_" STR(KSPLICE_KID
),
2907 .target_name
= NULL
,
2909 .map_printk
= MAP_PRINTK
,
2910 .primary
= THIS_MODULE
,
2911 .reloc_namevals
= LIST_HEAD_INIT(ksplice_pack
.reloc_namevals
),
2913 #endif /* KSPLICE_STANDALONE */
2915 static int init_ksplice(void)
2917 #ifdef KSPLICE_STANDALONE
2918 struct module_pack
*pack
= &ksplice_pack
;
2919 pack
->bundle
= init_ksplice_bundle(pack
->kid
);
2920 if (pack
->bundle
== NULL
)
2922 add_to_bundle(pack
, pack
->bundle
);
2923 pack
->bundle
->debug
= debug
;
2924 pack
->bundle
->abort_cause
=
2925 apply_relocs(pack
, ksplice_init_relocs
, ksplice_init_relocs_end
);
2926 if (pack
->bundle
->abort_cause
== OK
)
2928 #else /* !KSPLICE_STANDALONE */
2929 ksplice_kobj
= kobject_create_and_add("ksplice", kernel_kobj
);
2930 if (ksplice_kobj
== NULL
)
2932 #endif /* KSPLICE_STANDALONE */
2936 static void cleanup_ksplice(void)
2938 #ifdef KSPLICE_STANDALONE
2939 cleanup_ksplice_bundle(ksplice_pack
.bundle
);
2940 #else /* !KSPLICE_STANDALONE */
2941 kobject_put(ksplice_kobj
);
2942 #endif /* KSPLICE_STANDALONE */
2945 module_init(init_ksplice
);
2946 module_exit(cleanup_ksplice
);
2948 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");
2949 MODULE_DESCRIPTION("Ksplice rebootless update system");
2950 #ifdef KSPLICE_VERSION
2951 MODULE_VERSION(KSPLICE_VERSION
);
2953 MODULE_LICENSE("GPL v2");