1 /* Copyright (C) 2007-2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
2 * Copyright (C) 2008 Anders Kaseorg <andersk@mit.edu>,
3 * Tim Abbott <tabbott@mit.edu>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
22 #include <linux/debugfs.h>
23 #else /* CONFIG_DEBUG_FS */
24 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
25 #endif /* CONFIG_DEBUG_FS */
26 #include <linux/errno.h>
27 #include <linux/kallsyms.h>
28 #include <linux/kobject.h>
29 #include <linux/kthread.h>
30 #include <linux/pagemap.h>
31 #include <linux/sched.h>
32 #include <linux/stop_machine.h>
33 #include <linux/sysfs.h>
34 #include <linux/time.h>
35 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
36 #include <linux/uaccess.h>
37 #else /* LINUX_VERSION_CODE < */
38 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
39 #include <asm/uaccess.h>
40 #endif /* LINUX_VERSION_CODE */
41 #include <linux/vmalloc.h>
42 #ifdef KSPLICE_STANDALONE
44 #else /* !KSPLICE_STANDALONE */
45 #include <linux/ksplice.h>
46 #endif /* KSPLICE_STANDALONE */
47 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
48 #include <asm/alternative.h>
49 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
51 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
52 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
56 #endif /* LINUX_VERSION_CODE */
59 STAGE_PREPARING
, STAGE_APPLIED
, STAGE_REVERSED
63 RUN_PRE_INITIAL
, RUN_PRE_DEBUG
, RUN_PRE_FINAL
66 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
67 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
69 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
70 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
71 #define __bitwise__ __bitwise
74 typedef int __bitwise__ abort_t
;
76 #define OK ((__force abort_t) 0)
77 #define NO_MATCH ((__force abort_t) 1)
78 #define CODE_BUSY ((__force abort_t) 2)
79 #define MODULE_BUSY ((__force abort_t) 3)
80 #define OUT_OF_MEMORY ((__force abort_t) 4)
81 #define FAILED_TO_FIND ((__force abort_t) 5)
82 #define ALREADY_REVERSED ((__force abort_t) 6)
83 #define MISSING_EXPORT ((__force abort_t) 7)
84 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
85 #define UNEXPECTED ((__force abort_t) 9)
86 #ifdef KSPLICE_STANDALONE
87 #define BAD_SYSTEM_MAP ((__force abort_t) 10)
88 #endif /* KSPLICE_STANDALONE */
97 #ifdef CONFIG_DEBUG_FS
98 struct debugfs_blob_wrapper debug_blob
;
99 struct dentry
*debugfs_dentry
;
100 #else /* !CONFIG_DEBUG_FS */
101 bool debug_continue_line
;
102 #endif /* CONFIG_DEBUG_FS */
103 struct list_head packs
;
104 struct list_head conflicts
;
105 struct list_head list
;
109 const char *process_name
;
111 struct list_head stack
;
112 struct list_head list
;
115 struct conflict_addr
{
119 struct list_head list
;
122 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
123 /* Old kernels don't have debugfs_create_blob */
124 struct debugfs_blob_wrapper
{
128 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
130 struct reloc_nameval
{
131 struct list_head list
;
134 enum { NOVAL
, TEMP
, VAL
} status
;
137 struct safety_record
{
138 struct list_head list
;
142 bool first_byte_safe
;
145 struct candidate_val
{
146 struct list_head list
;
150 struct accumulate_struct
{
151 struct ksplice_pack
*pack
;
152 const char *desired_name
;
153 struct list_head
*vals
;
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
157 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
159 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
160 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
161 static bool virtual_address_mapped(unsigned long addr
)
164 return probe_kernel_address(addr
, retval
) != -EFAULT
;
166 #else /* LINUX_VERSION_CODE < */
167 static bool virtual_address_mapped(unsigned long addr
);
168 #endif /* LINUX_VERSION_CODE */
170 static long probe_kernel_read(void *dst
, void *src
, size_t size
)
172 if (!virtual_address_mapped((unsigned long)src
) ||
173 !virtual_address_mapped((unsigned long)src
+ size
))
176 memcpy(dst
, src
, size
);
179 #endif /* LINUX_VERSION_CODE */
181 static LIST_HEAD(updates
);
182 #ifdef KSPLICE_STANDALONE
183 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
184 extern struct list_head ksplice_module_list
;
185 #else /* !CONFIG_KSPLICE */
186 LIST_HEAD(ksplice_module_list
);
187 #endif /* CONFIG_KSPLICE */
188 #else /* !KSPLICE_STANDALONE */
189 LIST_HEAD(ksplice_module_list
);
190 EXPORT_SYMBOL_GPL(ksplice_module_list
);
191 static struct kobject
*ksplice_kobj
;
192 #endif /* KSPLICE_STANDALONE */
194 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
195 /* Old kernels do not have kcalloc
196 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
198 static void *kcalloc(size_t n
, size_t size
, typeof(GFP_KERNEL
) flags
)
201 if (n
!= 0 && size
> ULONG_MAX
/ n
)
203 mem
= kmalloc(n
* size
, flags
);
205 memset(mem
, 0, n
* size
);
208 #endif /* LINUX_VERSION_CODE */
210 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
211 /* Old kernels do not have kstrdup
212 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
214 static char *kstrdup(const char *s
, typeof(GFP_KERNEL
) gfp
)
223 buf
= kmalloc(len
, gfp
);
228 #endif /* LINUX_VERSION_CODE */
230 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
231 /* Old kernels use semaphore instead of mutex
232 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
234 #define mutex semaphore
235 #define mutex_lock down
236 #define mutex_unlock up
237 #endif /* LINUX_VERSION_CODE */
239 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
240 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
241 static char * __attribute_used__
242 kvasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, va_list ap
)
249 len
= vsnprintf(dummy
, 0, fmt
, aq
);
252 p
= kmalloc(len
+ 1, gfp
);
256 vsnprintf(p
, len
+ 1, fmt
, ap
);
262 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
263 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
264 static char * __attribute__((format (printf
, 2, 3)))
265 kasprintf(typeof(GFP_KERNEL
) gfp
, const char *fmt
, ...)
271 p
= kvasprintf(gfp
, fmt
, ap
);
278 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
279 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
280 static int strict_strtoul(const char *cp
, unsigned int base
, unsigned long *res
)
291 val
= simple_strtoul(cp
, &tail
, base
);
292 if ((*tail
== '\0') ||
293 ((len
== (size_t)(tail
- cp
) + 1) && (*tail
== '\n'))) {
302 #ifndef task_thread_info
303 #define task_thread_info(task) (task)->thread_info
304 #endif /* !task_thread_info */
306 #ifdef KSPLICE_STANDALONE
308 static bool bootstrapped
= false;
310 #ifdef CONFIG_KALLSYMS
311 extern unsigned long kallsyms_addresses
[], kallsyms_num_syms
;
312 extern u8 kallsyms_names
[];
313 #endif /* CONFIG_KALLSYMS */
315 /* defined by ksplice-create */
316 extern const struct ksplice_reloc ksplice_init_relocs
[],
317 ksplice_init_relocs_end
[];
319 /* Obtained via System.map */
320 extern struct list_head modules
;
321 extern struct mutex module_mutex
;
322 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
323 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
324 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
325 #endif /* LINUX_VERSION_CODE */
326 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
327 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
328 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
329 #endif /* LINUX_VERSION_CODE */
330 extern const struct kernel_symbol __start___ksymtab
[];
331 extern const struct kernel_symbol __stop___ksymtab
[];
332 extern const unsigned long __start___kcrctab
[];
333 extern const struct kernel_symbol __start___ksymtab_gpl
[];
334 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
335 extern const unsigned long __start___kcrctab_gpl
[];
336 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
337 extern const struct kernel_symbol __start___ksymtab_unused
[];
338 extern const struct kernel_symbol __stop___ksymtab_unused
[];
339 extern const unsigned long __start___kcrctab_unused
[];
340 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
341 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
342 extern const unsigned long __start___kcrctab_unused_gpl
[];
343 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
344 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
345 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
346 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
347 extern const unsigned long __start___kcrctab_gpl_future
[];
348 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
350 #endif /* KSPLICE_STANDALONE */
352 static struct update
*init_ksplice_update(const char *kid
);
353 static void cleanup_ksplice_update(struct update
*update
);
354 static void add_to_update(struct ksplice_pack
*pack
, struct update
*update
);
355 static int ksplice_sysfs_init(struct update
*update
);
357 /* Preparing the relocations and patches for application */
358 static abort_t
apply_update(struct update
*update
);
359 static abort_t
activate_pack(struct ksplice_pack
*pack
);
360 static abort_t
finalize_pack(struct ksplice_pack
*pack
);
361 static abort_t
finalize_exports(struct ksplice_pack
*pack
);
362 static abort_t
finalize_patches(struct ksplice_pack
*pack
);
363 static abort_t
add_dependency_on_address(struct ksplice_pack
*pack
,
365 static abort_t
apply_relocs(struct ksplice_pack
*pack
,
366 const struct ksplice_reloc
*relocs
,
367 const struct ksplice_reloc
*relocs_end
);
368 static abort_t
apply_reloc(struct ksplice_pack
*pack
,
369 const struct ksplice_reloc
*r
);
370 static abort_t
read_reloc_value(struct ksplice_pack
*pack
,
371 const struct ksplice_reloc
*r
,
372 unsigned long addr
, unsigned long *valp
);
373 static abort_t
write_reloc_value(struct ksplice_pack
*pack
,
374 const struct ksplice_reloc
*r
,
375 unsigned long addr
, unsigned long sym_addr
);
377 /* run-pre matching */
378 static abort_t
match_pack_sections(struct ksplice_pack
*pack
,
379 bool consider_data_sections
);
380 static abort_t
find_section(struct ksplice_pack
*pack
,
381 const struct ksplice_section
*sect
);
382 static abort_t
try_addr(struct ksplice_pack
*pack
,
383 const struct ksplice_section
*sect
,
384 unsigned long run_addr
,
385 struct list_head
*safety_records
,
386 enum run_pre_mode mode
);
387 static abort_t
run_pre_cmp(struct ksplice_pack
*pack
,
388 const struct ksplice_section
*sect
,
389 unsigned long run_addr
,
390 struct list_head
*safety_records
,
391 enum run_pre_mode mode
);
392 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
393 /* defined in arch/ARCH/kernel/ksplice-arch.c */
394 static abort_t
arch_run_pre_cmp(struct ksplice_pack
*pack
,
395 const struct ksplice_section
*sect
,
396 unsigned long run_addr
,
397 struct list_head
*safety_records
,
398 enum run_pre_mode mode
);
399 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
400 static void print_bytes(struct ksplice_pack
*pack
,
401 const unsigned char *run
, int runc
,
402 const unsigned char *pre
, int prec
);
403 #ifdef KSPLICE_STANDALONE
404 static abort_t
brute_search(struct ksplice_pack
*pack
,
405 const struct ksplice_section
*sect
,
406 const void *start
, unsigned long len
,
407 struct list_head
*vals
);
408 static abort_t
brute_search_all(struct ksplice_pack
*pack
,
409 const struct ksplice_section
*sect
,
410 struct list_head
*vals
);
411 #endif /* KSPLICE_STANDALONE */
412 static abort_t
lookup_reloc(struct ksplice_pack
*pack
, unsigned long addr
,
413 const struct ksplice_reloc
**relocp
);
414 static abort_t
handle_reloc(struct ksplice_pack
*pack
,
415 const struct ksplice_reloc
*r
,
416 unsigned long run_addr
, enum run_pre_mode mode
);
418 /* Computing possible addresses for symbols */
419 static abort_t
lookup_symbol(struct ksplice_pack
*pack
,
420 const struct ksplice_symbol
*ksym
,
421 struct list_head
*vals
);
422 #ifdef KSPLICE_STANDALONE
424 add_system_map_candidates(struct ksplice_pack
*pack
,
425 const struct ksplice_system_map
*start
,
426 const struct ksplice_system_map
*end
,
427 const char *label
, struct list_head
*vals
);
428 #endif /* KSPLICE_STANDALONE */
429 #ifdef CONFIG_KALLSYMS
430 static abort_t
lookup_symbol_kallsyms(struct ksplice_pack
*pack
,
431 const char *name
, struct list_head
*vals
);
432 static int accumulate_matching_names(void *data
, const char *sym_name
,
433 struct module
*sym_owner
,
434 unsigned long sym_val
);
435 #endif /* CONFIG_KALLSYMS */
436 static abort_t
exported_symbol_lookup(const char *name
, struct list_head
*vals
);
437 static abort_t
new_export_lookup(struct update
*update
,
438 const char *name
, struct list_head
*vals
);
440 /* Atomic update insertion and removal */
441 static abort_t
apply_patches(struct update
*update
);
442 static abort_t
reverse_patches(struct update
*update
);
443 static int __apply_patches(void *update
);
444 static int __reverse_patches(void *update
);
445 static abort_t
check_each_task(struct update
*update
);
446 static abort_t
check_task(struct update
*update
,
447 const struct task_struct
*t
, bool rerun
);
448 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
449 const struct thread_info
*tinfo
,
450 const unsigned long *stack
);
451 static abort_t
check_address(struct update
*update
,
452 struct conflict
*conf
, unsigned long addr
);
453 static abort_t
check_record(struct conflict_addr
*ca
,
454 const struct safety_record
*rec
,
456 static bool is_stop_machine(const struct task_struct
*t
);
457 static void cleanup_conflicts(struct update
*update
);
458 static void print_conflicts(struct update
*update
);
459 static void insert_trampoline(struct ksplice_trampoline
*t
);
460 static void remove_trampoline(const struct ksplice_trampoline
*t
);
462 static struct reloc_nameval
*find_nameval(struct ksplice_pack
*pack
,
464 static abort_t
create_nameval(struct ksplice_pack
*pack
, const char *label
,
465 unsigned long val
, int status
);
466 static abort_t
create_safety_record(struct ksplice_pack
*pack
,
467 const struct ksplice_section
*sect
,
468 struct list_head
*record_list
,
469 unsigned long run_addr
,
470 unsigned long run_size
);
471 static abort_t
add_candidate_val(struct list_head
*vals
, unsigned long val
);
472 static void prune_trampoline_vals(struct ksplice_pack
*pack
,
473 struct list_head
*vals
);
474 static void release_vals(struct list_head
*vals
);
475 static void set_temp_namevals(struct ksplice_pack
*pack
, int status_val
);
477 static int contains_canary(struct ksplice_pack
*pack
, unsigned long blank_addr
,
478 int size
, long dst_mask
);
479 static unsigned long follow_trampolines(struct ksplice_pack
*pack
,
481 static bool patches_module(const struct module
*a
, const struct module
*b
);
482 static bool starts_with(const char *str
, const char *prefix
);
483 static bool singular(struct list_head
*list
);
486 static abort_t
init_debug_buf(struct update
*update
);
487 static void clear_debug_buf(struct update
*update
);
488 static int __attribute__((format(printf
, 2, 3)))
489 _ksdebug(struct update
*update
, const char *fmt
, ...);
490 #define ksdebug(pack, fmt, ...) \
491 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
493 #if defined(KSPLICE_STANDALONE) && \
494 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
495 #define KSPLICE_NO_KERNEL_SUPPORT 1
496 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
498 #ifdef KSPLICE_NO_KERNEL_SUPPORT
499 /* Functions defined here that will be exported in later kernels */
500 #ifdef CONFIG_KALLSYMS
501 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
502 struct module
*, unsigned long),
504 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
505 static unsigned long ksplice_kallsyms_expand_symbol(unsigned long off
,
507 #endif /* LINUX_VERSION_CODE */
508 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
512 #endif /* CONFIG_KALLSYMS */
513 static struct module
*find_module(const char *name
);
514 static int use_module(struct module
*a
, struct module
*b
);
515 static const struct kernel_symbol
*find_symbol(const char *name
,
516 struct module
**owner
,
517 const unsigned long **crc
,
518 bool gplok
, bool warn
);
519 static struct module
*__module_data_address(unsigned long addr
);
520 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
522 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
523 static abort_t
prepare_trampoline(struct ksplice_pack
*pack
,
524 struct ksplice_trampoline
*t
);
525 static abort_t
trampoline_target(struct ksplice_pack
*pack
, unsigned long addr
,
526 unsigned long *new_addr
);
527 static abort_t
handle_paravirt(struct ksplice_pack
*pack
, unsigned long pre
,
528 unsigned long run
, int *matched
);
529 static bool valid_stack_ptr(const struct thread_info
*tinfo
, const void *p
);
531 #ifndef KSPLICE_STANDALONE
532 #include "ksplice-arch.c"
533 #elif defined CONFIG_X86
534 #include "x86/ksplice-arch.c"
535 #elif defined CONFIG_ARM
536 #include "arm/ksplice-arch.c"
537 #endif /* KSPLICE_STANDALONE */
539 #define clear_list(head, type, member) \
541 struct list_head *_pos, *_n; \
542 list_for_each_safe(_pos, _n, head) { \
544 kfree(list_entry(_pos, type, member)); \
548 struct ksplice_attribute
{
549 struct attribute attr
;
550 ssize_t (*show
)(struct update
*update
, char *buf
);
551 ssize_t (*store
)(struct update
*update
, const char *buf
, size_t len
);
554 static ssize_t
ksplice_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
557 struct ksplice_attribute
*attribute
=
558 container_of(attr
, struct ksplice_attribute
, attr
);
559 struct update
*update
= container_of(kobj
, struct update
, kobj
);
560 if (attribute
->show
== NULL
)
562 return attribute
->show(update
, buf
);
565 static ssize_t
ksplice_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
566 const char *buf
, size_t len
)
568 struct ksplice_attribute
*attribute
=
569 container_of(attr
, struct ksplice_attribute
, attr
);
570 struct update
*update
= container_of(kobj
, struct update
, kobj
);
571 if (attribute
->store
== NULL
)
573 return attribute
->store(update
, buf
, len
);
576 static struct sysfs_ops ksplice_sysfs_ops
= {
577 .show
= ksplice_attr_show
,
578 .store
= ksplice_attr_store
,
581 static void ksplice_release(struct kobject
*kobj
)
583 struct update
*update
;
584 update
= container_of(kobj
, struct update
, kobj
);
585 cleanup_ksplice_update(update
);
588 static ssize_t
stage_show(struct update
*update
, char *buf
)
590 switch (update
->stage
) {
591 case STAGE_PREPARING
:
592 return snprintf(buf
, PAGE_SIZE
, "preparing\n");
594 return snprintf(buf
, PAGE_SIZE
, "applied\n");
596 return snprintf(buf
, PAGE_SIZE
, "reversed\n");
601 static ssize_t
abort_cause_show(struct update
*update
, char *buf
)
603 switch (update
->abort_cause
) {
605 return snprintf(buf
, PAGE_SIZE
, "ok\n");
607 return snprintf(buf
, PAGE_SIZE
, "no_match\n");
608 #ifdef KSPLICE_STANDALONE
610 return snprintf(buf
, PAGE_SIZE
, "bad_system_map\n");
611 #endif /* KSPLICE_STANDALONE */
613 return snprintf(buf
, PAGE_SIZE
, "code_busy\n");
615 return snprintf(buf
, PAGE_SIZE
, "module_busy\n");
617 return snprintf(buf
, PAGE_SIZE
, "out_of_memory\n");
619 return snprintf(buf
, PAGE_SIZE
, "failed_to_find\n");
620 case ALREADY_REVERSED
:
621 return snprintf(buf
, PAGE_SIZE
, "already_reversed\n");
623 return snprintf(buf
, PAGE_SIZE
, "missing_export\n");
624 case UNEXPECTED_RUNNING_TASK
:
625 return snprintf(buf
, PAGE_SIZE
, "unexpected_running_task\n");
627 return snprintf(buf
, PAGE_SIZE
, "unexpected\n");
632 static ssize_t
conflict_show(struct update
*update
, char *buf
)
634 const struct conflict
*conf
;
635 const struct conflict_addr
*ca
;
637 list_for_each_entry(conf
, &update
->conflicts
, list
) {
638 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "%s %d",
639 conf
->process_name
, conf
->pid
);
640 list_for_each_entry(ca
, &conf
->stack
, list
) {
641 if (!ca
->has_conflict
)
643 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, " %s",
646 used
+= snprintf(buf
+ used
, PAGE_SIZE
- used
, "\n");
651 static ssize_t
stage_store(struct update
*update
, const char *buf
, size_t len
)
653 if ((strncmp(buf
, "applied", len
) == 0 ||
654 strncmp(buf
, "applied\n", len
) == 0) &&
655 update
->stage
== STAGE_PREPARING
)
656 update
->abort_cause
= apply_update(update
);
657 else if ((strncmp(buf
, "reversed", len
) == 0 ||
658 strncmp(buf
, "reversed\n", len
) == 0) &&
659 update
->stage
== STAGE_APPLIED
)
660 update
->abort_cause
= reverse_patches(update
);
664 static ssize_t
debug_show(struct update
*update
, char *buf
)
666 return snprintf(buf
, PAGE_SIZE
, "%d\n", update
->debug
);
669 static ssize_t
debug_store(struct update
*update
, const char *buf
, size_t len
)
672 int ret
= strict_strtoul(buf
, 10, &l
);
679 static struct ksplice_attribute stage_attribute
=
680 __ATTR(stage
, 0600, stage_show
, stage_store
);
681 static struct ksplice_attribute abort_cause_attribute
=
682 __ATTR(abort_cause
, 0400, abort_cause_show
, NULL
);
683 static struct ksplice_attribute debug_attribute
=
684 __ATTR(debug
, 0600, debug_show
, debug_store
);
685 static struct ksplice_attribute conflict_attribute
=
686 __ATTR(conflicts
, 0400, conflict_show
, NULL
);
688 static struct attribute
*ksplice_attrs
[] = {
689 &stage_attribute
.attr
,
690 &abort_cause_attribute
.attr
,
691 &debug_attribute
.attr
,
692 &conflict_attribute
.attr
,
696 static struct kobj_type ksplice_ktype
= {
697 .sysfs_ops
= &ksplice_sysfs_ops
,
698 .release
= ksplice_release
,
699 .default_attrs
= ksplice_attrs
,
702 static void __attribute__((noreturn
)) ksplice_deleted(void)
704 printk(KERN_CRIT
"Called a kernel function deleted by Ksplice!\n");
706 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
707 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
712 int init_ksplice_pack(struct ksplice_pack
*pack
)
714 struct update
*update
;
717 #ifdef KSPLICE_STANDALONE
720 #endif /* KSPLICE_STANDALONE */
722 INIT_LIST_HEAD(&pack
->reloc_namevals
);
723 INIT_LIST_HEAD(&pack
->safety_records
);
725 mutex_lock(&module_mutex
);
726 if (strcmp(pack
->target_name
, "vmlinux") == 0) {
729 pack
->target
= find_module(pack
->target_name
);
730 if (pack
->target
== NULL
|| !module_is_live(pack
->target
)) {
734 ret
= use_module(pack
->primary
, pack
->target
);
740 list_for_each_entry(update
, &updates
, list
) {
741 if (strcmp(pack
->kid
, update
->kid
) == 0) {
742 if (update
->stage
!= STAGE_PREPARING
) {
746 add_to_update(pack
, update
);
750 update
= init_ksplice_update(pack
->kid
);
751 if (update
== NULL
) {
755 ret
= ksplice_sysfs_init(update
);
757 cleanup_ksplice_update(update
);
760 add_to_update(pack
, update
);
762 mutex_unlock(&module_mutex
);
765 EXPORT_SYMBOL(init_ksplice_pack
);
767 void cleanup_ksplice_pack(struct ksplice_pack
*pack
)
769 if (pack
->update
== NULL
|| pack
->update
->stage
== STAGE_APPLIED
)
771 mutex_lock(&module_mutex
);
772 list_del(&pack
->list
);
773 mutex_unlock(&module_mutex
);
774 if (list_empty(&pack
->update
->packs
))
775 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
776 kobject_put(&pack
->update
->kobj
);
777 #else /* LINUX_VERSION_CODE < */
778 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
779 kobject_unregister(&pack
->update
->kobj
);
780 #endif /* LINUX_VERSION_CODE */
783 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack
);
785 static struct update
*init_ksplice_update(const char *kid
)
787 struct update
*update
;
788 update
= kcalloc(1, sizeof(struct update
), GFP_KERNEL
);
791 update
->name
= kasprintf(GFP_KERNEL
, "ksplice_%s", kid
);
792 if (update
->name
== NULL
) {
796 update
->kid
= kstrdup(kid
, GFP_KERNEL
);
797 if (update
->kid
== NULL
) {
802 INIT_LIST_HEAD(&update
->packs
);
803 if (init_debug_buf(update
) != OK
) {
809 list_add(&update
->list
, &updates
);
810 update
->stage
= STAGE_PREPARING
;
811 update
->abort_cause
= OK
;
812 INIT_LIST_HEAD(&update
->conflicts
);
816 static void cleanup_ksplice_update(struct update
*update
)
818 #ifdef KSPLICE_STANDALONE
820 mutex_lock(&module_mutex
);
821 list_del(&update
->list
);
823 mutex_unlock(&module_mutex
);
824 #else /* !KSPLICE_STANDALONE */
825 mutex_lock(&module_mutex
);
826 list_del(&update
->list
);
827 mutex_unlock(&module_mutex
);
828 #endif /* KSPLICE_STANDALONE */
829 cleanup_conflicts(update
);
830 clear_debug_buf(update
);
836 static void add_to_update(struct ksplice_pack
*pack
, struct update
*update
)
838 pack
->update
= update
;
839 list_add(&pack
->list
, &update
->packs
);
840 pack
->module_list_entry
.target
= pack
->target
;
841 pack
->module_list_entry
.primary
= pack
->primary
;
844 static int ksplice_sysfs_init(struct update
*update
)
847 memset(&update
->kobj
, 0, sizeof(update
->kobj
));
848 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
849 #ifndef KSPLICE_STANDALONE
850 ret
= kobject_init_and_add(&update
->kobj
, &ksplice_ktype
,
851 ksplice_kobj
, "%s", update
->kid
);
852 #else /* KSPLICE_STANDALONE */
853 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
854 ret
= kobject_init_and_add(&update
->kobj
, &ksplice_ktype
,
855 &THIS_MODULE
->mkobj
.kobj
, "ksplice");
856 #endif /* KSPLICE_STANDALONE */
857 #else /* LINUX_VERSION_CODE < */
858 ret
= kobject_set_name(&update
->kobj
, "%s", "ksplice");
861 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
862 update
->kobj
.parent
= &THIS_MODULE
->mkobj
.kobj
;
863 #else /* LINUX_VERSION_CODE < */
864 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
865 update
->kobj
.parent
= &THIS_MODULE
->mkobj
->kobj
;
866 #endif /* LINUX_VERSION_CODE */
867 update
->kobj
.ktype
= &ksplice_ktype
;
868 ret
= kobject_register(&update
->kobj
);
869 #endif /* LINUX_VERSION_CODE */
872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
873 kobject_uevent(&update
->kobj
, KOBJ_ADD
);
874 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
875 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
876 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
877 kobject_uevent(&update
->kobj
, KOBJ_ADD
, NULL
);
878 #endif /* LINUX_VERSION_CODE */
882 static abort_t
apply_update(struct update
*update
)
884 struct ksplice_pack
*pack
;
887 mutex_lock(&module_mutex
);
888 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
889 list_for_each_entry(pack
, &update
->packs
, list
) {
890 if (pack
->target
== NULL
) {
891 apply_paravirt(pack
->primary_parainstructions
,
892 pack
->primary_parainstructions_end
);
893 apply_paravirt(pack
->helper_parainstructions
,
894 pack
->helper_parainstructions_end
);
897 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
899 list_for_each_entry(pack
, &update
->packs
, list
) {
900 ret
= activate_pack(pack
);
904 ret
= apply_patches(update
);
906 list_for_each_entry(pack
, &update
->packs
, list
) {
907 clear_list(&pack
->reloc_namevals
, struct reloc_nameval
, list
);
908 if (update
->stage
== STAGE_PREPARING
)
909 clear_list(&pack
->safety_records
, struct safety_record
,
912 mutex_unlock(&module_mutex
);
917 static abort_t
activate_pack(struct ksplice_pack
*pack
)
921 ksdebug(pack
, "Preparing and checking %s\n", pack
->name
);
922 ret
= match_pack_sections(pack
, false);
923 if (ret
== NO_MATCH
) {
924 /* It is possible that by using relocations from .data sections
925 we can successfully run-pre match the rest of the sections.
926 To avoid using any symbols obtained from .data sections
927 (which may be unreliable) in the post code, we first prepare
928 the post code and then try to run-pre match the remaining
929 sections with the help of .data sections.
931 ksdebug(pack
, "Continuing without some sections; we might "
932 "find them later.\n");
933 ret
= finalize_pack(pack
);
935 ksdebug(pack
, "Aborted. Unable to continue without "
936 "the unmatched sections.\n");
940 ksdebug(pack
, "run-pre: Considering .data sections to find the "
941 "unmatched sections\n");
942 ret
= match_pack_sections(pack
, true);
946 ksdebug(pack
, "run-pre: Found all previously unmatched "
949 } else if (ret
!= OK
) {
953 return finalize_pack(pack
);
956 static abort_t
finalize_pack(struct ksplice_pack
*pack
)
959 ret
= apply_relocs(pack
, pack
->primary_relocs
,
960 pack
->primary_relocs_end
);
964 ret
= finalize_patches(pack
);
968 ret
= finalize_exports(pack
);
975 static abort_t
finalize_exports(struct ksplice_pack
*pack
)
977 struct ksplice_export
*exp
;
979 const struct kernel_symbol
*sym
;
981 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++) {
982 sym
= find_symbol(exp
->name
, &m
, NULL
, true, false);
984 ksdebug(pack
, "Could not find kernel_symbol struct for "
986 return MISSING_EXPORT
;
989 /* Cast away const since we are planning to mutate the
990 * kernel_symbol structure. */
991 exp
->sym
= (struct kernel_symbol
*)sym
;
992 exp
->saved_name
= exp
->sym
->name
;
993 if (m
!= pack
->primary
&& use_module(pack
->primary
, m
) != 1) {
994 ksdebug(pack
, "Aborted. Could not add dependency on "
995 "symbol %s from module %s.\n", sym
->name
,
1003 static abort_t
finalize_patches(struct ksplice_pack
*pack
)
1005 struct ksplice_patch
*p
;
1006 struct safety_record
*rec
;
1009 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
1010 struct reloc_nameval
*nv
= find_nameval(pack
, p
->label
);
1013 ksdebug(pack
, "Failed to find %s for oldaddr\n",
1015 return FAILED_TO_FIND
;
1017 p
->trampoline
.oldaddr
= nv
->val
;
1019 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
1020 if (strcmp(rec
->label
, p
->label
) == 0 &&
1021 follow_trampolines(pack
, p
->trampoline
.oldaddr
)
1028 ksdebug(pack
, "No safety record for patch %s\n",
1032 if (rec
->size
< p
->trampoline
.size
) {
1033 ksdebug(pack
, "Symbol %s is too short for trampoline\n",
1038 if (p
->trampoline
.repladdr
== 0)
1039 p
->trampoline
.repladdr
= (unsigned long)ksplice_deleted
;
1041 rec
->first_byte_safe
= true;
1043 ret
= prepare_trampoline(pack
, &p
->trampoline
);
1047 if (p
->trampoline
.oldaddr
!= rec
->addr
) {
1048 /* If there's already a trampoline at oldaddr, prepare
1049 a reverse trampoline to install there */
1050 p
->reverse_trampoline
.oldaddr
= rec
->addr
;
1051 p
->reverse_trampoline
.repladdr
= p
->trampoline
.oldaddr
;
1052 ret
= prepare_trampoline(pack
, &p
->reverse_trampoline
);
1056 p
->reverse_trampoline
.size
= 0;
1059 ret
= add_dependency_on_address(pack
, p
->trampoline
.oldaddr
);
1066 static abort_t
add_dependency_on_address(struct ksplice_pack
*pack
,
1070 __module_text_address(follow_trampolines(pack
, addr
));
1071 if (m
== NULL
|| m
== pack
->primary
)
1073 if (use_module(pack
->primary
, m
) != 1)
1078 static abort_t
apply_relocs(struct ksplice_pack
*pack
,
1079 const struct ksplice_reloc
*relocs
,
1080 const struct ksplice_reloc
*relocs_end
)
1082 const struct ksplice_reloc
*r
;
1083 for (r
= relocs
; r
< relocs_end
; r
++) {
1084 abort_t ret
= apply_reloc(pack
, r
);
1091 static abort_t
apply_reloc(struct ksplice_pack
*pack
,
1092 const struct ksplice_reloc
*r
)
1096 unsigned long sym_addr
;
1099 canary_ret
= contains_canary(pack
, r
->blank_addr
, r
->size
, r
->dst_mask
);
1102 if (canary_ret
== 0) {
1103 ksdebug(pack
, "reloc: skipped %s:%lx (altinstr)\n",
1104 r
->symbol
->label
, r
->blank_offset
);
1108 #ifdef KSPLICE_STANDALONE
1109 if (!bootstrapped
) {
1110 ret
= add_system_map_candidates(pack
,
1111 pack
->primary_system_map
,
1112 pack
->primary_system_map_end
,
1113 r
->symbol
->label
, &vals
);
1115 release_vals(&vals
);
1119 #endif /* KSPLICE_STANDALONE */
1120 ret
= lookup_symbol(pack
, r
->symbol
, &vals
);
1122 release_vals(&vals
);
1125 if (!singular(&vals
)) {
1126 release_vals(&vals
);
1127 ksdebug(pack
, "Failed to find %s for reloc\n",
1129 return FAILED_TO_FIND
;
1131 sym_addr
= list_entry(vals
.next
, struct candidate_val
, list
)->val
;
1132 release_vals(&vals
);
1134 ret
= write_reloc_value(pack
, r
, r
->blank_addr
,
1135 r
->pcrel
? sym_addr
- r
->blank_addr
: sym_addr
);
1139 ksdebug(pack
, "reloc: %s:%lx", r
->symbol
->label
, r
->blank_offset
);
1140 ksdebug(pack
, "(S=%lx A=%lx ", sym_addr
, r
->addend
);
1143 ksdebug(pack
, "aft=%02x)\n", *(uint8_t *)r
->blank_addr
);
1146 ksdebug(pack
, "aft=%04x)\n", *(uint16_t *)r
->blank_addr
);
1149 ksdebug(pack
, "aft=%08x)\n", *(uint32_t *)r
->blank_addr
);
1151 #if BITS_PER_LONG >= 64
1153 ksdebug(pack
, "aft=%016llx)\n", *(uint64_t *)r
->blank_addr
);
1155 #endif /* BITS_PER_LONG */
1157 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1160 #ifdef KSPLICE_STANDALONE
1163 #endif /* KSPLICE_STANDALONE */
1164 /* Create namevals so that we can verify our choices in the second
1165 round of run-pre matching that considers data sections. */
1166 ret
= create_nameval(pack
, r
->symbol
->label
, sym_addr
, VAL
);
1169 return add_dependency_on_address(pack
, sym_addr
);
1172 static abort_t
read_reloc_value(struct ksplice_pack
*pack
,
1173 const struct ksplice_reloc
*r
,
1174 unsigned long addr
, unsigned long *valp
)
1176 unsigned char bytes
[sizeof(long)];
1179 if (probe_kernel_read(bytes
, (void *)addr
, r
->size
) == -EFAULT
)
1184 val
= *(uint8_t *)bytes
;
1187 val
= *(uint16_t *)bytes
;
1190 val
= *(uint32_t *)bytes
;
1192 #if BITS_PER_LONG >= 64
1194 val
= *(uint64_t *)bytes
;
1196 #endif /* BITS_PER_LONG */
1198 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1203 if (r
->signed_addend
)
1204 val
|= -(val
& (r
->dst_mask
& ~(r
->dst_mask
>> 1)));
1205 val
<<= r
->rightshift
;
1211 static abort_t
write_reloc_value(struct ksplice_pack
*pack
,
1212 const struct ksplice_reloc
*r
,
1213 unsigned long addr
, unsigned long sym_addr
)
1215 unsigned long val
= sym_addr
+ r
->addend
;
1216 val
>>= r
->rightshift
;
1220 (*(uint8_t *)addr
& ~r
->dst_mask
) | (val
& r
->dst_mask
);
1224 (*(uint16_t *)addr
& ~r
->dst_mask
) | (val
& r
->dst_mask
);
1228 (*(uint32_t *)addr
& ~r
->dst_mask
) | (val
& r
->dst_mask
);
1230 #if BITS_PER_LONG >= 64
1233 (*(uint64_t *)addr
& ~r
->dst_mask
) | (val
& r
->dst_mask
);
1235 #endif /* BITS_PER_LONG */
1237 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
1241 if (read_reloc_value(pack
, r
, addr
, &val
) != OK
|| val
!= sym_addr
) {
1242 ksdebug(pack
, "Aborted. Relocation overflow.\n");
1249 static abort_t
match_pack_sections(struct ksplice_pack
*pack
,
1250 bool consider_data_sections
)
1252 const struct ksplice_section
*sect
;
1255 int i
, remaining
= 0;
1258 finished
= kcalloc(pack
->helper_sections_end
- pack
->helper_sections
,
1259 sizeof(*finished
), GFP_KERNEL
);
1260 if (finished
== NULL
)
1261 return OUT_OF_MEMORY
;
1262 for (sect
= pack
->helper_sections
; sect
< pack
->helper_sections_end
;
1264 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0)
1268 while (remaining
> 0) {
1270 for (sect
= pack
->helper_sections
;
1271 sect
< pack
->helper_sections_end
; sect
++) {
1272 i
= sect
- pack
->helper_sections
;
1275 if (!consider_data_sections
&&
1276 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0)
1278 ret
= find_section(pack
, sect
);
1281 if ((sect
->flags
& KSPLICE_SECTION_DATA
) == 0)
1284 } else if (ret
!= NO_MATCH
) {
1293 for (sect
= pack
->helper_sections
;
1294 sect
< pack
->helper_sections_end
; sect
++) {
1295 i
= sect
- pack
->helper_sections
;
1296 if (finished
[i
] == 0)
1297 ksdebug(pack
, "run-pre: could not match "
1298 "section %s\n", sect
->symbol
->label
);
1300 ksdebug(pack
, "Aborted. run-pre: could not match some "
1309 static abort_t
find_section(struct ksplice_pack
*pack
,
1310 const struct ksplice_section
*sect
)
1314 unsigned long run_addr
;
1316 struct candidate_val
*v
, *n
;
1318 #ifdef KSPLICE_STANDALONE
1319 ret
= add_system_map_candidates(pack
, pack
->helper_system_map
,
1320 pack
->helper_system_map_end
,
1321 sect
->symbol
->label
, &vals
);
1323 release_vals(&vals
);
1326 #endif /* KSPLICE_STANDALONE */
1327 ret
= lookup_symbol(pack
, sect
->symbol
, &vals
);
1329 release_vals(&vals
);
1333 ksdebug(pack
, "run-pre: starting sect search for %s\n",
1334 sect
->symbol
->label
);
1336 list_for_each_entry_safe(v
, n
, &vals
, list
) {
1340 ret
= try_addr(pack
, sect
, run_addr
, NULL
, RUN_PRE_INITIAL
);
1341 if (ret
== NO_MATCH
) {
1344 } else if (ret
!= OK
) {
1345 release_vals(&vals
);
1350 #ifdef KSPLICE_STANDALONE
1351 if (list_empty(&vals
) && (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
1352 ret
= brute_search_all(pack
, sect
, &vals
);
1354 release_vals(&vals
);
1357 /* Make sure run-pre matching output is displayed if
1358 brute_search succeeds */
1359 prune_trampoline_vals(pack
, &vals
);
1360 if (singular(&vals
)) {
1361 run_addr
= list_entry(vals
.next
, struct candidate_val
,
1363 ret
= try_addr(pack
, sect
, run_addr
, NULL
,
1366 ksdebug(pack
, "run-pre: Debug run failed for "
1367 "sect %s:\n", sect
->symbol
->label
);
1368 release_vals(&vals
);
1373 #endif /* KSPLICE_STANDALONE */
1375 prune_trampoline_vals(pack
, &vals
);
1376 if (singular(&vals
)) {
1377 LIST_HEAD(safety_records
);
1378 run_addr
= list_entry(vals
.next
, struct candidate_val
,
1380 ret
= try_addr(pack
, sect
, run_addr
, &safety_records
,
1382 release_vals(&vals
);
1384 clear_list(&safety_records
, struct safety_record
, list
);
1385 ksdebug(pack
, "run-pre: Final run failed for sect "
1386 "%s:\n", sect
->symbol
->label
);
1388 list_splice(&safety_records
, &pack
->safety_records
);
1391 } else if (!list_empty(&vals
)) {
1392 struct candidate_val
*val
;
1393 ksdebug(pack
, "run-pre: multiple candidates for sect %s:\n",
1394 sect
->symbol
->label
);
1396 list_for_each_entry(val
, &vals
, list
) {
1398 ksdebug(pack
, "%lx\n", val
->val
);
1400 ksdebug(pack
, "...\n");
1404 release_vals(&vals
);
1407 release_vals(&vals
);
1411 static abort_t
try_addr(struct ksplice_pack
*pack
,
1412 const struct ksplice_section
*sect
,
1413 unsigned long run_addr
,
1414 struct list_head
*safety_records
,
1415 enum run_pre_mode mode
)
1418 const struct module
*run_module
;
1420 if ((sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ||
1421 (sect
->flags
& KSPLICE_SECTION_DATA
) != 0)
1422 run_module
= __module_data_address(run_addr
);
1424 run_module
= __module_text_address(run_addr
);
1425 if (!patches_module(run_module
, pack
->target
)) {
1426 ksdebug(pack
, "run-pre: ignoring address %lx in other module "
1427 "%s for sect %s\n", run_addr
, run_module
== NULL
?
1428 "vmlinux" : run_module
->name
, sect
->symbol
->label
);
1432 ret
= create_nameval(pack
, sect
->symbol
->label
, run_addr
, TEMP
);
1436 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1437 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
, mode
);
1438 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1439 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
1440 ret
= arch_run_pre_cmp(pack
, sect
, run_addr
, safety_records
,
1443 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
, mode
);
1444 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1445 if (ret
== NO_MATCH
&& mode
!= RUN_PRE_FINAL
) {
1446 set_temp_namevals(pack
, NOVAL
);
1447 ksdebug(pack
, "run-pre: %s sect %s does not match (r_a=%lx "
1449 (sect
->flags
& KSPLICE_SECTION_RODATA
) != 0 ? "data" :
1450 "text", sect
->symbol
->label
, run_addr
,
1451 sect
->thismod_addr
, sect
->size
);
1452 ksdebug(pack
, "run-pre: ");
1453 if (pack
->update
->debug
>= 1) {
1454 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1455 ret
= run_pre_cmp(pack
, sect
, run_addr
, safety_records
,
1457 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1458 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
1459 ret
= arch_run_pre_cmp(pack
, sect
, run_addr
,
1463 ret
= run_pre_cmp(pack
, sect
, run_addr
,
1466 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1467 set_temp_namevals(pack
, NOVAL
);
1469 ksdebug(pack
, "\n");
1471 } else if (ret
!= OK
) {
1472 set_temp_namevals(pack
, NOVAL
);
1476 if (mode
!= RUN_PRE_FINAL
) {
1477 set_temp_namevals(pack
, NOVAL
);
1478 ksdebug(pack
, "run-pre: candidate for sect %s=%lx\n",
1479 sect
->symbol
->label
, run_addr
);
1483 set_temp_namevals(pack
, VAL
);
1484 ksdebug(pack
, "run-pre: found sect %s=%lx\n", sect
->symbol
->label
,
1489 static abort_t
run_pre_cmp(struct ksplice_pack
*pack
,
1490 const struct ksplice_section
*sect
,
1491 unsigned long run_addr
,
1492 struct list_head
*safety_records
,
1493 enum run_pre_mode mode
)
1497 const struct ksplice_reloc
*r
;
1498 const unsigned char *pre
, *run
, *pre_start
, *run_start
;
1499 unsigned char runval
;
1501 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0)
1502 run_addr
= follow_trampolines(pack
, run_addr
);
1504 pre_start
= (const unsigned char *)sect
->thismod_addr
;
1505 run_start
= (const unsigned char *)run_addr
;
1509 while (pre
< pre_start
+ sect
->size
) {
1510 unsigned long offset
= pre
- pre_start
;
1511 ret
= lookup_reloc(pack
, (unsigned long)pre
, &r
);
1513 ret
= handle_reloc(pack
, r
, (unsigned long)run
, mode
);
1515 if (mode
== RUN_PRE_INITIAL
)
1516 ksdebug(pack
, "reloc in sect does not "
1517 "match after %lx/%lx bytes\n",
1518 offset
, sect
->size
);
1521 if (mode
== RUN_PRE_DEBUG
)
1522 print_bytes(pack
, run
, r
->size
, pre
, r
->size
);
1526 } else if (ret
!= NO_MATCH
) {
1530 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) != 0) {
1531 ret
= handle_paravirt(pack
, (unsigned long)pre
,
1532 (unsigned long)run
, &matched
);
1536 if (mode
== RUN_PRE_DEBUG
)
1537 print_bytes(pack
, run
, matched
, pre
,
1545 if (probe_kernel_read(&runval
, (void *)run
, 1) == -EFAULT
) {
1546 if (mode
== RUN_PRE_INITIAL
)
1547 ksdebug(pack
, "sect unmapped after %lx/%lx "
1548 "bytes\n", offset
, sect
->size
);
1552 if (runval
!= *pre
&&
1553 (sect
->flags
& KSPLICE_SECTION_DATA
) == 0) {
1554 if (mode
== RUN_PRE_INITIAL
)
1555 ksdebug(pack
, "sect does not match after "
1556 "%lx/%lx bytes\n", offset
, sect
->size
);
1557 if (mode
== RUN_PRE_DEBUG
) {
1558 print_bytes(pack
, run
, 1, pre
, 1);
1559 ksdebug(pack
, "[p_o=%lx] ! ", offset
);
1560 print_bytes(pack
, run
+ 1, 2, pre
+ 1, 2);
1564 if (mode
== RUN_PRE_DEBUG
)
1565 print_bytes(pack
, run
, 1, pre
, 1);
1569 return create_safety_record(pack
, sect
, safety_records
, run_addr
,
1573 static void print_bytes(struct ksplice_pack
*pack
,
1574 const unsigned char *run
, int runc
,
1575 const unsigned char *pre
, int prec
)
1578 int matched
= min(runc
, prec
);
1579 for (o
= 0; o
< matched
; o
++) {
1580 if (run
[o
] == pre
[o
])
1581 ksdebug(pack
, "%02x ", run
[o
]);
1583 ksdebug(pack
, "%02x/%02x ", run
[o
], pre
[o
]);
1585 for (o
= matched
; o
< runc
; o
++)
1586 ksdebug(pack
, "%02x/ ", run
[o
]);
1587 for (o
= matched
; o
< prec
; o
++)
1588 ksdebug(pack
, "/%02x ", pre
[o
]);
1591 #ifdef KSPLICE_STANDALONE
1592 static abort_t
brute_search(struct ksplice_pack
*pack
,
1593 const struct ksplice_section
*sect
,
1594 const void *start
, unsigned long len
,
1595 struct list_head
*vals
)
1601 for (addr
= (unsigned long)start
; addr
< (unsigned long)start
+ len
;
1603 if (addr
% 100000 == 0)
1606 if (probe_kernel_read(&run
, (void *)addr
, 1) == -EFAULT
)
1609 pre
= *(const unsigned char *)(sect
->thismod_addr
);
1614 ret
= try_addr(pack
, sect
, addr
, NULL
, RUN_PRE_INITIAL
);
1616 ret
= add_candidate_val(vals
, addr
);
1619 } else if (ret
!= NO_MATCH
) {
1627 static abort_t
brute_search_all(struct ksplice_pack
*pack
,
1628 const struct ksplice_section
*sect
,
1629 struct list_head
*vals
)
1635 ksdebug(pack
, "brute_search: searching for %s\n", sect
->symbol
->label
);
1636 saved_debug
= pack
->update
->debug
;
1637 pack
->update
->debug
= 0;
1639 list_for_each_entry(m
, &modules
, list
) {
1640 if (!patches_module(m
, pack
->target
))
1642 ret
= brute_search(pack
, sect
, m
->module_core
, m
->core_size
,
1646 ret
= brute_search(pack
, sect
, m
->module_init
, m
->init_size
,
1652 ret
= brute_search(pack
, sect
, (const void *)init_mm
.start_code
,
1653 init_mm
.end_code
- init_mm
.start_code
, vals
);
1656 pack
->update
->debug
= saved_debug
;
1659 #endif /* KSPLICE_STANDALONE */
1661 static abort_t
lookup_reloc(struct ksplice_pack
*pack
, unsigned long addr
,
1662 const struct ksplice_reloc
**relocp
)
1664 const struct ksplice_reloc
*r
;
1666 for (r
= pack
->helper_relocs
; r
< pack
->helper_relocs_end
; r
++) {
1667 if (addr
>= r
->blank_addr
&& addr
< r
->blank_addr
+ r
->size
) {
1668 canary_ret
= contains_canary(pack
, r
->blank_addr
,
1669 r
->size
, r
->dst_mask
);
1672 if (canary_ret
== 0) {
1673 ksdebug(pack
, "reloc: skipped %s:%lx "
1674 "(altinstr)\n", r
->symbol
->label
,
1678 if (addr
!= r
->blank_addr
) {
1679 ksdebug(pack
, "Invalid nonzero relocation "
1690 #ifdef KSPLICE_NO_KERNEL_SUPPORT
1691 static struct module
*__module_data_address(unsigned long addr
)
1695 list_for_each_entry(mod
, &modules
, list
) {
1696 if (addr
>= (unsigned long)mod
->module_core
+
1697 mod
->core_text_size
&&
1698 addr
< (unsigned long)mod
->module_core
+ mod
->core_size
)
1703 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
1705 static abort_t
handle_reloc(struct ksplice_pack
*pack
,
1706 const struct ksplice_reloc
*r
,
1707 unsigned long run_addr
, enum run_pre_mode mode
)
1712 ret
= read_reloc_value(pack
, r
, run_addr
, &val
);
1718 if (mode
== RUN_PRE_INITIAL
)
1719 ksdebug(pack
, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
1720 "found %s = %lx\n", run_addr
, r
->blank_addr
,
1721 r
->symbol
->label
, r
->addend
, r
->symbol
->label
, val
);
1723 if (starts_with(r
->symbol
->label
, ".rodata.str"))
1726 if (contains_canary(pack
, run_addr
, r
->size
, r
->dst_mask
) != 0) {
1727 ksdebug(pack
, "Aborted. Unexpected canary in run code at %lx"
1732 ret
= create_nameval(pack
, r
->symbol
->label
, val
, TEMP
);
1733 if (ret
== NO_MATCH
&& mode
== RUN_PRE_INITIAL
) {
1734 struct reloc_nameval
*nv
= find_nameval(pack
, r
->symbol
->label
);
1735 ksdebug(pack
, "run-pre: reloc at r_a=%lx p_a=%lx: nameval %s = "
1736 "%lx(%d) does not match expected %lx\n", run_addr
,
1737 r
->blank_addr
, r
->symbol
->label
, nv
->val
, nv
->status
,
1743 static abort_t
lookup_symbol(struct ksplice_pack
*pack
,
1744 const struct ksplice_symbol
*ksym
,
1745 struct list_head
*vals
)
1748 struct reloc_nameval
*nv
;
1750 #ifdef KSPLICE_STANDALONE
1753 #endif /* KSPLICE_STANDALONE */
1755 nv
= find_nameval(pack
, ksym
->label
);
1758 ksdebug(pack
, "using detected sym %s=%lx\n", ksym
->label
,
1760 return add_candidate_val(vals
, nv
->val
);
1763 if (starts_with(ksym
->label
, ".rodata.str"))
1766 #ifdef CONFIG_MODULE_UNLOAD
1767 if (strcmp(ksym
->label
, "cleanup_module") == 0 && pack
->target
!= NULL
1768 && pack
->target
->exit
!= NULL
) {
1769 ret
= add_candidate_val(vals
,
1770 (unsigned long)pack
->target
->exit
);
1776 ret
= exported_symbol_lookup(ksym
->name
, vals
);
1780 ret
= new_export_lookup(pack
->update
, ksym
->name
, vals
);
1784 #ifdef CONFIG_KALLSYMS
1785 ret
= lookup_symbol_kallsyms(pack
, ksym
->name
, vals
);
1788 #endif /* CONFIG_KALLSYMS */
1793 #ifdef KSPLICE_STANDALONE
1795 add_system_map_candidates(struct ksplice_pack
*pack
,
1796 const struct ksplice_system_map
*start
,
1797 const struct ksplice_system_map
*end
,
1798 const char *label
, struct list_head
*vals
)
1803 const struct ksplice_system_map
*smap
;
1805 /* Some Fedora kernel releases have System.map files whose symbol
1806 * addresses disagree with the running kernel by a constant address
1807 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
1808 * values used to compile these kernels. This constant address offset
1809 * is always a multiple of 0x100000.
1811 * If we observe an offset that is NOT a multiple of 0x100000, then the
1812 * user provided us with an incorrect System.map file, and we should
1814 * If we observe an offset that is a multiple of 0x100000, then we can
1815 * adjust the System.map address values accordingly and proceed.
1817 off
= (unsigned long)printk
- pack
->map_printk
;
1818 if (off
& 0xfffff) {
1819 ksdebug(pack
, "Aborted. System.map does not match kernel.\n");
1820 return BAD_SYSTEM_MAP
;
1822 for (smap
= start
; smap
< end
; smap
++) {
1823 if (strcmp(smap
->label
, label
) == 0)
1828 for (i
= 0; i
< smap
->nr_candidates
; i
++) {
1829 ret
= add_candidate_val(vals
, smap
->candidates
[i
] + off
);
1835 #endif /* !KSPLICE_STANDALONE */
1837 #ifdef CONFIG_KALLSYMS
1838 static abort_t
lookup_symbol_kallsyms(struct ksplice_pack
*pack
,
1839 const char *name
, struct list_head
*vals
)
1841 struct accumulate_struct acc
= { pack
, name
, vals
};
1842 return (__force abort_t
)
1843 kallsyms_on_each_symbol(accumulate_matching_names
, &acc
);
1846 static int accumulate_matching_names(void *data
, const char *sym_name
,
1847 struct module
*sym_owner
,
1848 unsigned long sym_val
)
1850 struct accumulate_struct
*acc
= data
;
1851 if (strcmp(sym_name
, acc
->desired_name
) == 0 &&
1852 patches_module(sym_owner
, acc
->pack
->target
) &&
1853 sym_owner
!= acc
->pack
->primary
)
1854 return (__force
int)add_candidate_val(acc
->vals
, sym_val
);
1855 return (__force
int)OK
;
1857 #endif /* CONFIG_KALLSYMS */
1859 static abort_t
exported_symbol_lookup(const char *name
, struct list_head
*vals
)
1861 const struct kernel_symbol
*sym
;
1862 sym
= find_symbol(name
, NULL
, NULL
, true, false);
1865 return add_candidate_val(vals
, sym
->value
);
1868 static abort_t
new_export_lookup(struct update
*update
,
1869 const char *name
, struct list_head
*vals
)
1871 struct ksplice_pack
*pack
;
1872 struct ksplice_export
*exp
;
1873 list_for_each_entry(pack
, &update
->packs
, list
) {
1874 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++) {
1875 if (strcmp(exp
->new_name
, name
) == 0 &&
1877 contains_canary(pack
,
1878 (unsigned long)&exp
->sym
->value
,
1879 sizeof(unsigned long), -1) == 0)
1880 return add_candidate_val(vals
, exp
->sym
->value
);
1886 static abort_t
apply_patches(struct update
*update
)
1890 struct ksplice_pack
*pack
;
1891 const struct ksplice_section
*sect
;
1893 for (i
= 0; i
< 5; i
++) {
1894 cleanup_conflicts(update
);
1895 #ifdef KSPLICE_STANDALONE
1897 #endif /* KSPLICE_STANDALONE */
1898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
1899 ret
= (__force abort_t
)stop_machine(__apply_patches
, update
,
1901 #else /* LINUX_VERSION_CODE < */
1902 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
1903 ret
= (__force abort_t
)stop_machine_run(__apply_patches
, update
,
1905 #endif /* LINUX_VERSION_CODE */
1906 #ifdef KSPLICE_STANDALONE
1908 #endif /* KSPLICE_STANDALONE */
1909 if (ret
!= CODE_BUSY
)
1911 set_current_state(TASK_INTERRUPTIBLE
);
1912 schedule_timeout(msecs_to_jiffies(1000));
1915 if (ret
== CODE_BUSY
) {
1916 print_conflicts(update
);
1917 _ksdebug(update
, "Aborted %s. stack check: to-be-replaced "
1918 "code is busy.\n", update
->kid
);
1919 } else if (ret
== ALREADY_REVERSED
) {
1920 _ksdebug(update
, "Aborted %s. Ksplice update %s is already "
1921 "reversed.\n", update
->kid
, update
->kid
);
1927 list_for_each_entry(pack
, &update
->packs
, list
) {
1928 for (sect
= pack
->primary_sections
;
1929 sect
< pack
->primary_sections_end
; sect
++) {
1930 ret
= create_safety_record(pack
, sect
,
1931 &pack
->safety_records
,
1939 _ksdebug(update
, "Update %s applied successfully\n", update
->kid
);
1943 static abort_t
reverse_patches(struct update
*update
)
1947 struct ksplice_pack
*pack
;
1949 clear_debug_buf(update
);
1950 ret
= init_debug_buf(update
);
1954 _ksdebug(update
, "Preparing to reverse %s\n", update
->kid
);
1956 for (i
= 0; i
< 5; i
++) {
1957 cleanup_conflicts(update
);
1958 clear_list(&update
->conflicts
, struct conflict
, list
);
1959 #ifdef KSPLICE_STANDALONE
1961 #endif /* KSPLICE_STANDALONE */
1962 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
1963 ret
= (__force abort_t
)stop_machine(__reverse_patches
, update
,
1965 #else /* LINUX_VERSION_CODE < */
1966 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
1967 ret
= (__force abort_t
)stop_machine_run(__reverse_patches
,
1969 #endif /* LINUX_VERSION_CODE */
1970 #ifdef KSPLICE_STANDALONE
1972 #endif /* KSPLICE_STANDALONE */
1973 if (ret
!= CODE_BUSY
)
1975 set_current_state(TASK_INTERRUPTIBLE
);
1976 schedule_timeout(msecs_to_jiffies(1000));
1978 list_for_each_entry(pack
, &update
->packs
, list
)
1979 clear_list(&pack
->safety_records
, struct safety_record
, list
);
1981 if (ret
== CODE_BUSY
) {
1982 print_conflicts(update
);
1983 _ksdebug(update
, "Aborted %s. stack check: to-be-reversed "
1984 "code is busy.\n", update
->kid
);
1985 } else if (ret
== MODULE_BUSY
) {
1986 _ksdebug(update
, "Update %s is in use by another module\n",
1993 _ksdebug(update
, "Update %s reversed successfully\n", update
->kid
);
1997 static int __apply_patches(void *updateptr
)
1999 struct update
*update
= updateptr
;
2000 struct ksplice_pack
*pack
;
2001 struct ksplice_patch
*p
;
2002 struct ksplice_export
*exp
;
2005 if (update
->stage
== STAGE_APPLIED
)
2006 return (__force
int)OK
;
2008 if (update
->stage
!= STAGE_PREPARING
)
2009 return (__force
int)UNEXPECTED
;
2011 ret
= check_each_task(update
);
2013 return (__force
int)ret
;
2015 list_for_each_entry(pack
, &update
->packs
, list
) {
2016 if (try_module_get(pack
->primary
) != 1) {
2017 struct ksplice_pack
*pack1
;
2018 list_for_each_entry(pack1
, &update
->packs
, list
) {
2021 module_put(pack1
->primary
);
2023 return (__force
int)UNEXPECTED
;
2027 update
->stage
= STAGE_APPLIED
;
2029 list_for_each_entry(pack
, &update
->packs
, list
)
2030 list_add(&pack
->module_list_entry
.list
, &ksplice_module_list
);
2032 list_for_each_entry(pack
, &update
->packs
, list
) {
2033 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++)
2034 exp
->sym
->name
= exp
->new_name
;
2037 list_for_each_entry(pack
, &update
->packs
, list
) {
2038 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
2039 insert_trampoline(&p
->trampoline
);
2040 insert_trampoline(&p
->reverse_trampoline
);
2043 return (__force
int)OK
;
2046 static int __reverse_patches(void *updateptr
)
2048 struct update
*update
= updateptr
;
2049 struct ksplice_pack
*pack
;
2050 const struct ksplice_patch
*p
;
2051 struct ksplice_export
*exp
;
2054 if (update
->stage
!= STAGE_APPLIED
)
2055 return (__force
int)OK
;
2057 #ifdef CONFIG_MODULE_UNLOAD
2058 list_for_each_entry(pack
, &update
->packs
, list
) {
2059 if (module_refcount(pack
->primary
) != 1)
2060 return (__force
int)MODULE_BUSY
;
2062 #endif /* CONFIG_MODULE_UNLOAD */
2064 ret
= check_each_task(update
);
2066 return (__force
int)ret
;
2068 update
->stage
= STAGE_REVERSED
;
2070 list_for_each_entry(pack
, &update
->packs
, list
)
2071 module_put(pack
->primary
);
2073 list_for_each_entry(pack
, &update
->packs
, list
)
2074 list_del(&pack
->module_list_entry
.list
);
2076 list_for_each_entry(pack
, &update
->packs
, list
) {
2077 for (exp
= pack
->exports
; exp
< pack
->exports_end
; exp
++)
2078 exp
->sym
->name
= exp
->saved_name
;
2081 list_for_each_entry(pack
, &update
->packs
, list
) {
2082 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
2083 remove_trampoline(&p
->trampoline
);
2084 remove_trampoline(&p
->reverse_trampoline
);
2087 return (__force
int)OK
;
2090 static abort_t
check_each_task(struct update
*update
)
2092 const struct task_struct
*g
, *p
;
2093 abort_t status
= OK
, ret
;
2094 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2095 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2096 read_lock(&tasklist_lock
);
2097 #endif /* LINUX_VERSION_CODE */
2098 do_each_thread(g
, p
) {
2099 /* do_each_thread is a double loop! */
2100 ret
= check_task(update
, p
, false);
2102 check_task(update
, p
, true);
2105 if (ret
!= OK
&& ret
!= CODE_BUSY
)
2106 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2107 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2109 #else /* LINUX_VERSION_CODE < */
2111 #endif /* LINUX_VERSION_CODE */
2112 } while_each_thread(g
, p
);
2113 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2114 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2116 read_unlock(&tasklist_lock
);
2117 #endif /* LINUX_VERSION_CODE */
2121 static abort_t
check_task(struct update
*update
,
2122 const struct task_struct
*t
, bool rerun
)
2124 abort_t status
, ret
;
2125 struct conflict
*conf
= NULL
;
2128 conf
= kmalloc(sizeof(*conf
), GFP_ATOMIC
);
2130 return OUT_OF_MEMORY
;
2131 conf
->process_name
= kstrdup(t
->comm
, GFP_ATOMIC
);
2132 if (conf
->process_name
== NULL
) {
2134 return OUT_OF_MEMORY
;
2137 INIT_LIST_HEAD(&conf
->stack
);
2138 list_add(&conf
->list
, &update
->conflicts
);
2141 status
= check_address(update
, conf
, KSPLICE_IP(t
));
2143 ret
= check_stack(update
, conf
, task_thread_info(t
),
2144 (unsigned long *)__builtin_frame_address(0));
2147 } else if (!task_curr(t
)) {
2148 ret
= check_stack(update
, conf
, task_thread_info(t
),
2149 (unsigned long *)KSPLICE_SP(t
));
2152 } else if (!is_stop_machine(t
)) {
2153 status
= UNEXPECTED_RUNNING_TASK
;
2158 static abort_t
check_stack(struct update
*update
, struct conflict
*conf
,
2159 const struct thread_info
*tinfo
,
2160 const unsigned long *stack
)
2162 abort_t status
= OK
, ret
;
2165 while (valid_stack_ptr(tinfo
, stack
)) {
2167 ret
= check_address(update
, conf
, addr
);
2174 static abort_t
check_address(struct update
*update
,
2175 struct conflict
*conf
, unsigned long addr
)
2177 abort_t status
= OK
, ret
;
2178 const struct safety_record
*rec
;
2179 struct ksplice_pack
*pack
;
2180 struct conflict_addr
*ca
= NULL
;
2183 ca
= kmalloc(sizeof(*ca
), GFP_ATOMIC
);
2185 return OUT_OF_MEMORY
;
2187 ca
->has_conflict
= false;
2189 list_add(&ca
->list
, &conf
->stack
);
2192 list_for_each_entry(pack
, &update
->packs
, list
) {
2193 list_for_each_entry(rec
, &pack
->safety_records
, list
) {
2194 ret
= check_record(ca
, rec
, addr
);
2202 static abort_t
check_record(struct conflict_addr
*ca
,
2203 const struct safety_record
*rec
, unsigned long addr
)
2205 if ((addr
> rec
->addr
&& addr
< rec
->addr
+ rec
->size
) ||
2206 (addr
== rec
->addr
&& !rec
->first_byte_safe
)) {
2208 ca
->label
= rec
->label
;
2209 ca
->has_conflict
= true;
2216 static bool is_stop_machine(const struct task_struct
*t
)
2218 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2220 if (!starts_with(t
->comm
, "kstop"))
2222 num
= t
->comm
+ strlen("kstop");
2223 return num
[strspn(num
, "0123456789")] == '\0';
2224 #else /* LINUX_VERSION_CODE < */
2225 return strcmp(t
->comm
, "kstopmachine") == 0;
2226 #endif /* LINUX_VERSION_CODE */
2229 static void cleanup_conflicts(struct update
*update
)
2231 struct conflict
*conf
;
2232 list_for_each_entry(conf
, &update
->conflicts
, list
) {
2233 clear_list(&conf
->stack
, struct conflict_addr
, list
);
2234 kfree(conf
->process_name
);
2236 clear_list(&update
->conflicts
, struct conflict
, list
);
2239 static void print_conflicts(struct update
*update
)
2241 const struct conflict
*conf
;
2242 const struct conflict_addr
*ca
;
2243 list_for_each_entry(conf
, &update
->conflicts
, list
) {
2244 _ksdebug(update
, "stack check: pid %d (%s):", conf
->pid
,
2245 conf
->process_name
);
2246 list_for_each_entry(ca
, &conf
->stack
, list
) {
2247 _ksdebug(update
, " %lx", ca
->addr
);
2248 if (ca
->has_conflict
)
2249 _ksdebug(update
, " [<-CONFLICT]");
2251 _ksdebug(update
, "\n");
2255 static void insert_trampoline(struct ksplice_trampoline
*t
)
2257 mm_segment_t old_fs
= get_fs();
2259 memcpy((void *)t
->saved
, (void *)t
->oldaddr
, t
->size
);
2260 memcpy((void *)t
->oldaddr
, (void *)t
->trampoline
, t
->size
);
2261 flush_icache_range(t
->oldaddr
, t
->oldaddr
+ t
->size
);
2265 static void remove_trampoline(const struct ksplice_trampoline
*t
)
2267 mm_segment_t old_fs
= get_fs();
2269 memcpy((void *)t
->oldaddr
, (void *)t
->saved
, t
->size
);
2270 flush_icache_range(t
->oldaddr
, t
->oldaddr
+ t
->size
);
2274 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2275 #ifndef CONFIG_MODVERSIONS
2276 #define symversion(base, idx) NULL
2278 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
2282 const struct kernel_symbol
*start
, *stop
;
2283 const unsigned long *crcs
;
2292 static bool each_symbol_in_section(const struct symsearch
*arr
,
2293 unsigned int arrsize
,
2294 struct module
*owner
,
2295 bool (*fn
)(const struct symsearch
*syms
,
2296 struct module
*owner
,
2297 unsigned int symnum
, void *data
),
2302 for (j
= 0; j
< arrsize
; j
++) {
2303 for (i
= 0; i
< arr
[j
].stop
- arr
[j
].start
; i
++)
2304 if (fn(&arr
[j
], owner
, i
, data
))
2311 /* Returns true as soon as fn returns true, otherwise false. */
2312 static bool each_symbol(bool (*fn
)(const struct symsearch
*arr
,
2313 struct module
*owner
,
2314 unsigned int symnum
, void *data
),
2318 const struct symsearch arr
[] = {
2319 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
2320 NOT_GPL_ONLY
, false },
2321 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
2322 __start___kcrctab_gpl
,
2324 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2325 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
2326 __start___kcrctab_gpl_future
,
2327 WILL_BE_GPL_ONLY
, false },
2328 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2329 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2330 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
2331 __start___kcrctab_unused
,
2332 NOT_GPL_ONLY
, true },
2333 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
2334 __start___kcrctab_unused_gpl
,
2336 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2339 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
2342 list_for_each_entry(mod
, &modules
, list
) {
2343 struct symsearch module_arr
[] = {
2344 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
2345 NOT_GPL_ONLY
, false },
2346 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
2349 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2350 { mod
->gpl_future_syms
,
2351 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
2352 mod
->gpl_future_crcs
,
2353 WILL_BE_GPL_ONLY
, false },
2354 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2355 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2357 mod
->unused_syms
+ mod
->num_unused_syms
,
2359 NOT_GPL_ONLY
, true },
2360 { mod
->unused_gpl_syms
,
2361 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
2362 mod
->unused_gpl_crcs
,
2364 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2367 if (each_symbol_in_section(module_arr
, ARRAY_SIZE(module_arr
),
2374 struct find_symbol_arg
{
2381 struct module
*owner
;
2382 const unsigned long *crc
;
2383 const struct kernel_symbol
*sym
;
2386 static bool find_symbol_in_section(const struct symsearch
*syms
,
2387 struct module
*owner
,
2388 unsigned int symnum
, void *data
)
2390 struct find_symbol_arg
*fsa
= data
;
2392 if (strcmp(syms
->start
[symnum
].name
, fsa
->name
) != 0)
2396 if (syms
->licence
== GPL_ONLY
)
2398 if (syms
->licence
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
2399 printk(KERN_WARNING
"Symbol %s is being used "
2400 "by a non-GPL module, which will not "
2401 "be allowed in the future\n", fsa
->name
);
2402 printk(KERN_WARNING
"Please see the file "
2403 "Documentation/feature-removal-schedule.txt "
2404 "in the kernel source tree for more details.\n");
2408 #ifdef CONFIG_UNUSED_SYMBOLS
2409 if (syms
->unused
&& fsa
->warn
) {
2410 printk(KERN_WARNING
"Symbol %s is marked as UNUSED, "
2411 "however this module is using it.\n", fsa
->name
);
2413 "This symbol will go away in the future.\n");
2415 "Please evalute if this is the right api to use and if "
2416 "it really is, submit a report the linux kernel "
2417 "mailinglist together with submitting your code for "
2423 fsa
->crc
= symversion(syms
->crcs
, symnum
);
2424 fsa
->sym
= &syms
->start
[symnum
];
2428 /* Find a symbol and return it, along with, (optional) crc and
2429 * (optional) module which owns it */
2430 static const struct kernel_symbol
*find_symbol(const char *name
,
2431 struct module
**owner
,
2432 const unsigned long **crc
,
2433 bool gplok
, bool warn
)
2435 struct find_symbol_arg fsa
;
2441 if (each_symbol(find_symbol_in_section
, &fsa
)) {
2451 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2453 static struct reloc_nameval
*find_nameval(struct ksplice_pack
*pack
,
2456 struct reloc_nameval
*nv
;
2457 list_for_each_entry(nv
, &pack
->reloc_namevals
, list
) {
2458 if (strcmp(nv
->label
, label
) == 0)
2464 static abort_t
create_nameval(struct ksplice_pack
*pack
, const char *label
,
2465 unsigned long val
, int status
)
2467 struct reloc_nameval
*nv
= find_nameval(pack
, label
);
2469 return nv
->val
== val
? OK
: NO_MATCH
;
2471 nv
= kmalloc(sizeof(*nv
), GFP_KERNEL
);
2473 return OUT_OF_MEMORY
;
2476 nv
->status
= status
;
2477 list_add(&nv
->list
, &pack
->reloc_namevals
);
2481 static abort_t
create_safety_record(struct ksplice_pack
*pack
,
2482 const struct ksplice_section
*sect
,
2483 struct list_head
*record_list
,
2484 unsigned long run_addr
,
2485 unsigned long run_size
)
2487 struct safety_record
*rec
;
2488 struct ksplice_patch
*p
;
2490 if (record_list
== NULL
)
2493 for (p
= pack
->patches
; p
< pack
->patches_end
; p
++) {
2494 if (strcmp(sect
->symbol
->label
, p
->label
) == 0)
2497 if (p
>= pack
->patches_end
)
2500 if ((sect
->flags
& KSPLICE_SECTION_TEXT
) == 0 &&
2501 p
->trampoline
.repladdr
!= 0) {
2502 ksdebug(pack
, "Error: ksplice_patch %s is matched to a "
2503 "non-deleted non-text section!\n", sect
->symbol
->label
);
2507 rec
= kmalloc(sizeof(*rec
), GFP_KERNEL
);
2509 return OUT_OF_MEMORY
;
2510 rec
->addr
= run_addr
;
2511 rec
->size
= run_size
;
2512 rec
->label
= sect
->symbol
->label
;
2513 rec
->first_byte_safe
= false;
2515 list_add(&rec
->list
, record_list
);
2519 static abort_t
add_candidate_val(struct list_head
*vals
, unsigned long val
)
2521 struct candidate_val
*tmp
, *new;
2523 list_for_each_entry(tmp
, vals
, list
) {
2524 if (tmp
->val
== val
)
2527 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2529 return OUT_OF_MEMORY
;
2531 list_add(&new->list
, vals
);
2535 /* If there are only two candidates and their addresses are related by
2536 a trampoline, then we have successfully found a function patched by a
2537 previous update. We remove the endpoint of the trampoline from the vals
2538 list, so that this update uses the patched function's original address. */
2539 static void prune_trampoline_vals(struct ksplice_pack
*pack
,
2540 struct list_head
*vals
)
2542 struct candidate_val
*val1
, *val2
;
2544 if (list_empty(vals
) || singular(vals
))
2546 if (vals
->next
->next
->next
!= vals
)
2549 val1
= list_entry(vals
->next
, struct candidate_val
, list
);
2550 val2
= list_entry(vals
->next
->next
, struct candidate_val
, list
);
2552 if (val1
->val
== follow_trampolines(pack
, val2
->val
)) {
2553 list_del(&val1
->list
);
2557 if (val2
->val
== follow_trampolines(pack
, val1
->val
)) {
2558 list_del(&val2
->list
);
2564 static void release_vals(struct list_head
*vals
)
2566 clear_list(vals
, struct candidate_val
, list
);
2569 static void set_temp_namevals(struct ksplice_pack
*pack
, int status
)
2571 struct reloc_nameval
*nv
, *n
;
2572 list_for_each_entry_safe(nv
, n
, &pack
->reloc_namevals
, list
) {
2573 if (nv
->status
== TEMP
) {
2574 if (status
== NOVAL
) {
2575 list_del(&nv
->list
);
2578 nv
->status
= status
;
2584 static int contains_canary(struct ksplice_pack
*pack
, unsigned long blank_addr
,
2585 int size
, long dst_mask
)
2589 return (*(uint8_t *)blank_addr
& dst_mask
) ==
2590 (KSPLICE_CANARY
& dst_mask
);
2592 return (*(uint16_t *)blank_addr
& dst_mask
) ==
2593 (KSPLICE_CANARY
& dst_mask
);
2595 return (*(uint32_t *)blank_addr
& dst_mask
) ==
2596 (KSPLICE_CANARY
& dst_mask
);
2597 #if BITS_PER_LONG >= 64
2599 return (*(uint64_t *)blank_addr
& dst_mask
) ==
2600 (KSPLICE_CANARY
& dst_mask
);
2601 #endif /* BITS_PER_LONG */
2603 ksdebug(pack
, "Aborted. Invalid relocation size.\n");
2608 static unsigned long follow_trampolines(struct ksplice_pack
*pack
,
2611 unsigned long new_addr
;
2614 if (trampoline_target(pack
, addr
, &new_addr
) != OK
)
2617 /* Confirm that it is a jump into a ksplice module */
2618 m
= __module_text_address(new_addr
);
2619 if (m
!= NULL
&& m
!= pack
->target
&& starts_with(m
->name
, "ksplice")) {
2620 ksdebug(pack
, "Following trampoline %lx %lx(%s)\n", addr
,
2627 /* Does module a patch module b? */
2628 static bool patches_module(const struct module
*a
, const struct module
*b
)
2630 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2634 if (a
== NULL
|| !starts_with(a
->name
, "ksplice_"))
2636 name
= a
->name
+ strlen("ksplice_");
2637 name
+= strcspn(name
, "_");
2641 return strcmp(name
, b
== NULL
? "vmlinux" : b
->name
) == 0;
2642 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
2643 struct ksplice_module_list_entry
*entry
;
2646 list_for_each_entry(entry
, &ksplice_module_list
, list
) {
2647 if (entry
->target
== b
&& entry
->primary
== a
)
2651 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2654 static bool starts_with(const char *str
, const char *prefix
)
2656 return strncmp(str
, prefix
, strlen(prefix
)) == 0;
2659 static bool singular(struct list_head
*list
)
2661 return !list_empty(list
) && list
->next
->next
== list
;
2664 #ifdef CONFIG_DEBUG_FS
2665 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
2666 /* Old kernels don't have debugfs_create_blob */
2667 static ssize_t
read_file_blob(struct file
*file
, char __user
*user_buf
,
2668 size_t count
, loff_t
*ppos
)
2670 struct debugfs_blob_wrapper
*blob
= file
->private_data
;
2671 return simple_read_from_buffer(user_buf
, count
, ppos
, blob
->data
,
2675 static int blob_open(struct inode
*inode
, struct file
*file
)
2677 if (inode
->i_private
)
2678 file
->private_data
= inode
->i_private
;
2682 static struct file_operations fops_blob
= {
2683 .read
= read_file_blob
,
2687 static struct dentry
*debugfs_create_blob(const char *name
, mode_t mode
,
2688 struct dentry
*parent
,
2689 struct debugfs_blob_wrapper
*blob
)
2691 return debugfs_create_file(name
, mode
, parent
, blob
, &fops_blob
);
2693 #endif /* LINUX_VERSION_CODE */
2695 static abort_t
init_debug_buf(struct update
*update
)
2697 update
->debug_blob
.size
= 0;
2698 update
->debug_blob
.data
= NULL
;
2699 update
->debugfs_dentry
=
2700 debugfs_create_blob(update
->name
, S_IFREG
| S_IRUSR
, NULL
,
2701 &update
->debug_blob
);
2702 if (update
->debugfs_dentry
== NULL
)
2703 return OUT_OF_MEMORY
;
2707 static void clear_debug_buf(struct update
*update
)
2709 if (update
->debugfs_dentry
== NULL
)
2711 debugfs_remove(update
->debugfs_dentry
);
2712 update
->debugfs_dentry
= NULL
;
2713 update
->debug_blob
.size
= 0;
2714 vfree(update
->debug_blob
.data
);
2715 update
->debug_blob
.data
= NULL
;
2718 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
2721 unsigned long size
, old_size
, new_size
;
2723 if (update
->debug
== 0)
2726 /* size includes the trailing '\0' */
2727 va_start(args
, fmt
);
2728 size
= 1 + vsnprintf(update
->debug_blob
.data
, 0, fmt
, args
);
2730 old_size
= update
->debug_blob
.size
== 0 ? 0 :
2731 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
));
2732 new_size
= update
->debug_blob
.size
+ size
== 0 ? 0 :
2733 max(PAGE_SIZE
, roundup_pow_of_two(update
->debug_blob
.size
+ size
));
2734 if (new_size
> old_size
) {
2735 char *buf
= vmalloc(new_size
);
2738 memcpy(buf
, update
->debug_blob
.data
, update
->debug_blob
.size
);
2739 vfree(update
->debug_blob
.data
);
2740 update
->debug_blob
.data
= buf
;
2742 va_start(args
, fmt
);
2743 update
->debug_blob
.size
+= vsnprintf(update
->debug_blob
.data
+
2744 update
->debug_blob
.size
,
2749 #else /* CONFIG_DEBUG_FS */
2750 static abort_t
init_debug_buf(struct update
*update
)
2755 static void clear_debug_buf(struct update
*update
)
2760 static int _ksdebug(struct update
*update
, const char *fmt
, ...)
2764 if (update
->debug
== 0)
2767 if (!update
->debug_continue_line
)
2768 printk(KERN_DEBUG
"ksplice: ");
2770 va_start(args
, fmt
);
2771 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
2773 #else /* LINUX_VERSION_CODE < */
2774 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
2776 char *buf
= kvasprintf(GFP_KERNEL
, fmt
, args
);
2780 #endif /* LINUX_VERSION_CODE */
2783 update
->debug_continue_line
=
2784 fmt
[0] == '\0' || fmt
[strlen(fmt
) - 1] != '\n';
2787 #endif /* CONFIG_DEBUG_FS */
2789 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2790 #ifdef CONFIG_KALLSYMS
2791 static int kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
2792 struct module
*, unsigned long),
2795 char namebuf
[KSYM_NAME_LEN
];
2797 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2799 #endif /* LINUX_VERSION_CODE */
2802 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2803 * 2.6.10 was the first release after this commit
2805 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2806 for (i
= 0, off
= 0; i
< kallsyms_num_syms
; i
++) {
2807 off
= ksplice_kallsyms_expand_symbol(off
, namebuf
);
2808 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
2812 #else /* LINUX_VERSION_CODE < */
2815 for (i
= 0, knames
= kallsyms_names
; i
< kallsyms_num_syms
; i
++) {
2816 unsigned prefix
= *knames
++;
2818 strlcpy(namebuf
+ prefix
, knames
, KSYM_NAME_LEN
- prefix
);
2820 ret
= fn(data
, namebuf
, NULL
, kallsyms_addresses
[i
]);
2824 knames
+= strlen(knames
) + 1;
2826 #endif /* LINUX_VERSION_CODE */
2827 return module_kallsyms_on_each_symbol(fn
, data
);
2830 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2831 * 2.6.10 was the first release after this commit
2833 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2834 extern u8 kallsyms_token_table
[];
2835 extern u16 kallsyms_token_index
[];
2836 /* Modified version of Linux's kallsyms_expand_symbol */
2837 static unsigned long ksplice_kallsyms_expand_symbol(unsigned long off
,
2840 long len
, skipped_first
= 0;
2841 const u8
*tptr
, *data
;
2843 data
= &kallsyms_names
[off
];
2850 tptr
= &kallsyms_token_table
[kallsyms_token_index
[*data
]];
2855 if (skipped_first
) {
2868 #endif /* LINUX_VERSION_CODE */
2870 static int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
2879 list_for_each_entry(mod
, &modules
, list
) {
2880 for (i
= 0; i
< mod
->num_symtab
; i
++) {
2881 ret
= fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
2882 mod
, mod
->symtab
[i
].st_value
);
2889 #endif /* CONFIG_KALLSYMS */
2891 static struct module
*find_module(const char *name
)
2895 list_for_each_entry(mod
, &modules
, list
) {
2896 if (strcmp(mod
->name
, name
) == 0)
2902 #ifdef CONFIG_MODULE_UNLOAD
2904 struct list_head list
;
2905 struct module
*module_which_uses
;
2908 /* I'm not yet certain whether we need the strong form of this. */
2909 static inline int strong_try_module_get(struct module
*mod
)
2911 if (mod
&& mod
->state
!= MODULE_STATE_LIVE
)
2913 if (try_module_get(mod
))
2918 /* Does a already use b? */
2919 static int already_uses(struct module
*a
, struct module
*b
)
2921 struct module_use
*use
;
2922 list_for_each_entry(use
, &b
->modules_which_use_me
, list
) {
2923 if (use
->module_which_uses
== a
)
2929 /* Make it so module a uses b. Must be holding module_mutex */
2930 static int use_module(struct module
*a
, struct module
*b
)
2932 struct module_use
*use
;
2933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2934 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
2936 #endif /* LINUX_VERSION_CODE */
2937 if (b
== NULL
|| already_uses(a
, b
))
2940 if (strong_try_module_get(b
) < 0)
2943 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
2948 use
->module_which_uses
= a
;
2949 list_add(&use
->list
, &b
->modules_which_use_me
);
2950 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2951 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
2952 no_warn
= sysfs_create_link(b
->holders_dir
, &a
->mkobj
.kobj
, a
->name
);
2953 #endif /* LINUX_VERSION_CODE */
2956 #else /* CONFIG_MODULE_UNLOAD */
2957 static int use_module(struct module
*a
, struct module
*b
)
2961 #endif /* CONFIG_MODULE_UNLOAD */
2962 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2964 #ifdef KSPLICE_STANDALONE
2966 module_param(debug
, int, 0600);
2967 MODULE_PARM_DESC(debug
, "Debug level");
2969 extern struct ksplice_system_map ksplice_system_map
[], ksplice_system_map_end
[];
2971 static struct ksplice_pack bootstrap_pack
= {
2972 .name
= "ksplice_" STR(KSPLICE_KID
),
2973 .kid
= "init_" STR(KSPLICE_KID
),
2974 .target_name
= NULL
,
2976 .map_printk
= MAP_PRINTK
,
2977 .primary
= THIS_MODULE
,
2978 .reloc_namevals
= LIST_HEAD_INIT(bootstrap_pack
.reloc_namevals
),
2979 .primary_system_map
= ksplice_system_map
,
2980 .primary_system_map_end
= ksplice_system_map_end
,
2982 #endif /* KSPLICE_STANDALONE */
2984 static int init_ksplice(void)
2986 #ifdef KSPLICE_STANDALONE
2987 struct ksplice_pack
*pack
= &bootstrap_pack
;
2988 pack
->update
= init_ksplice_update(pack
->kid
);
2989 if (pack
->update
== NULL
)
2991 add_to_update(pack
, pack
->update
);
2992 pack
->update
->debug
= debug
;
2993 pack
->update
->abort_cause
=
2994 apply_relocs(pack
, ksplice_init_relocs
, ksplice_init_relocs_end
);
2995 if (pack
->update
->abort_cause
== OK
)
2996 bootstrapped
= true;
2997 #else /* !KSPLICE_STANDALONE */
2998 ksplice_kobj
= kobject_create_and_add("ksplice", kernel_kobj
);
2999 if (ksplice_kobj
== NULL
)
3001 #endif /* KSPLICE_STANDALONE */
3005 static void cleanup_ksplice(void)
3007 #ifdef KSPLICE_STANDALONE
3008 cleanup_ksplice_update(bootstrap_pack
.update
);
3009 #else /* !KSPLICE_STANDALONE */
3010 kobject_put(ksplice_kobj
);
3011 #endif /* KSPLICE_STANDALONE */
3014 module_init(init_ksplice
);
3015 module_exit(cleanup_ksplice
);
3017 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");
3018 MODULE_DESCRIPTION("Ksplice rebootless update system");
3019 #ifdef KSPLICE_VERSION
3020 MODULE_VERSION(KSPLICE_VERSION
);
3022 MODULE_LICENSE("GPL v2");