Never write a section symbol into the ksplice_symbol name.
[ksplice.git] / kmodsrc / ksplice.c
blob9b58629c1f0fba9854f3e60f35438800a02b0b49
1 /* Copyright (C) 2007-2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
2 * Copyright (C) 2008 Anders Kaseorg <andersk@mit.edu>,
3 * Tim Abbott <tabbott@mit.edu>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
16 * 02110-1301, USA.
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
22 #include <linux/debugfs.h>
23 #else /* CONFIG_DEBUG_FS */
24 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
25 #endif /* CONFIG_DEBUG_FS */
26 #include <linux/errno.h>
27 #include <linux/kallsyms.h>
28 #include <linux/kobject.h>
29 #include <linux/kthread.h>
30 #include <linux/pagemap.h>
31 #include <linux/sched.h>
32 #include <linux/stop_machine.h>
33 #include <linux/sysfs.h>
34 #include <linux/time.h>
35 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
36 #include <linux/uaccess.h>
37 #else /* LINUX_VERSION_CODE < */
38 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
39 #include <asm/uaccess.h>
40 #endif /* LINUX_VERSION_CODE */
41 #include <linux/vmalloc.h>
42 #ifdef KSPLICE_STANDALONE
43 #include "ksplice.h"
44 #else /* !KSPLICE_STANDALONE */
45 #include <linux/ksplice.h>
46 #endif /* KSPLICE_STANDALONE */
47 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
48 #include <asm/alternative.h>
49 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
51 #if defined(KSPLICE_STANDALONE) && \
52 LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) && defined(CONFIG_DEBUG_RODATA)
53 /* 6fb14755a676282a4e6caa05a08c92db8e45cfff was after 2.6.21 */
54 #if !defined(CONFIG_KPROBES) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
55 /* 4e4eee0e0139811b36a07854dcfa9746bc8b16d3 was after 2.6.25 */
56 #error "This version of Ksplice does not support your kernel."
57 #error "Future versions of Ksplice will fix this problem."
58 #endif /* !CONFIG_KPROBES || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) */
59 #endif /* KSPLICE_STANDALONE && LINUX_VERSION_CODE && CONFIG_DEBUG_RODATA */
61 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
62 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
63 #define bool _Bool
64 #define false 0
65 #define true 1
66 #endif /* LINUX_VERSION_CODE */
68 enum stage {
69 STAGE_PREPARING, STAGE_APPLIED, STAGE_REVERSED
72 enum run_pre_mode {
73 RUN_PRE_INITIAL, RUN_PRE_DEBUG, RUN_PRE_FINAL
76 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
77 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
78 #define __bitwise__
79 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
80 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
81 #define __bitwise__ __bitwise
82 #endif
84 typedef int __bitwise__ abort_t;
86 #define OK ((__force abort_t) 0)
87 #define NO_MATCH ((__force abort_t) 1)
88 #define CODE_BUSY ((__force abort_t) 2)
89 #define MODULE_BUSY ((__force abort_t) 3)
90 #define OUT_OF_MEMORY ((__force abort_t) 4)
91 #define FAILED_TO_FIND ((__force abort_t) 5)
92 #define ALREADY_REVERSED ((__force abort_t) 6)
93 #define MISSING_EXPORT ((__force abort_t) 7)
94 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
95 #define UNEXPECTED ((__force abort_t) 9)
96 #ifdef KSPLICE_STANDALONE
97 #define BAD_SYSTEM_MAP ((__force abort_t) 10)
98 #endif /* KSPLICE_STANDALONE */
100 struct update {
101 const char *kid;
102 const char *name;
103 struct kobject kobj;
104 enum stage stage;
105 abort_t abort_cause;
106 int debug;
107 #ifdef CONFIG_DEBUG_FS
108 struct debugfs_blob_wrapper debug_blob;
109 struct dentry *debugfs_dentry;
110 #else /* !CONFIG_DEBUG_FS */
111 bool debug_continue_line;
112 #endif /* CONFIG_DEBUG_FS */
113 struct list_head packs;
114 struct list_head conflicts;
115 struct list_head list;
118 struct conflict {
119 const char *process_name;
120 pid_t pid;
121 struct list_head stack;
122 struct list_head list;
125 struct conflict_addr {
126 unsigned long addr;
127 bool has_conflict;
128 const char *label;
129 struct list_head list;
132 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
133 /* Old kernels don't have debugfs_create_blob */
134 struct debugfs_blob_wrapper {
135 void *data;
136 unsigned long size;
138 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
140 struct labelval {
141 struct list_head list;
142 const char *label;
143 unsigned long val;
144 enum { NOVAL, TEMP, VAL } status;
147 struct safety_record {
148 struct list_head list;
149 const char *label;
150 unsigned long addr;
151 unsigned long size;
152 bool first_byte_safe;
155 struct candidate_val {
156 struct list_head list;
157 unsigned long val;
160 struct accumulate_struct {
161 struct ksplice_pack *pack;
162 const char *desired_name;
163 struct list_head *vals;
166 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
167 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
169 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
170 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
171 static bool virtual_address_mapped(unsigned long addr)
173 char retval;
174 return probe_kernel_address(addr, retval) != -EFAULT;
176 #else /* LINUX_VERSION_CODE < */
177 static bool virtual_address_mapped(unsigned long addr);
178 #endif /* LINUX_VERSION_CODE */
180 static long probe_kernel_read(void *dst, void *src, size_t size)
182 if (!virtual_address_mapped((unsigned long)src) ||
183 !virtual_address_mapped((unsigned long)src + size))
184 return -EFAULT;
186 memcpy(dst, src, size);
187 return 0;
189 #endif /* LINUX_VERSION_CODE */
191 static LIST_HEAD(updates);
192 #ifdef KSPLICE_STANDALONE
193 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
194 extern struct list_head ksplice_module_list;
195 #else /* !CONFIG_KSPLICE */
196 LIST_HEAD(ksplice_module_list);
197 #endif /* CONFIG_KSPLICE */
198 #else /* !KSPLICE_STANDALONE */
199 LIST_HEAD(ksplice_module_list);
200 EXPORT_SYMBOL_GPL(ksplice_module_list);
201 static struct kobject *ksplice_kobj;
202 #endif /* KSPLICE_STANDALONE */
204 static struct kobj_type ksplice_ktype;
206 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
207 /* Old kernels do not have kcalloc
208 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
210 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
212 char *mem;
213 if (n != 0 && size > ULONG_MAX / n)
214 return NULL;
215 mem = kmalloc(n * size, flags);
216 if (mem)
217 memset(mem, 0, n * size);
218 return mem;
220 #endif /* LINUX_VERSION_CODE */
222 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
223 /* Old kernels do not have kstrdup
224 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
226 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
228 size_t len;
229 char *buf;
231 if (!s)
232 return NULL;
234 len = strlen(s) + 1;
235 buf = kmalloc(len, gfp);
236 if (buf)
237 memcpy(buf, s, len);
238 return buf;
240 #endif /* LINUX_VERSION_CODE */
242 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
243 /* Old kernels use semaphore instead of mutex
244 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
246 #define mutex semaphore
247 #define mutex_lock down
248 #define mutex_unlock up
249 #endif /* LINUX_VERSION_CODE */
251 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
252 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
253 static char * __attribute_used__
254 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
256 unsigned int len;
257 char *p, dummy[1];
258 va_list aq;
260 va_copy(aq, ap);
261 len = vsnprintf(dummy, 0, fmt, aq);
262 va_end(aq);
264 p = kmalloc(len + 1, gfp);
265 if (!p)
266 return NULL;
268 vsnprintf(p, len + 1, fmt, ap);
270 return p;
272 #endif
274 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
275 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
276 static char * __attribute__((format (printf, 2, 3)))
277 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
279 va_list ap;
280 char *p;
282 va_start(ap, fmt);
283 p = kvasprintf(gfp, fmt, ap);
284 va_end(ap);
286 return p;
288 #endif
290 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
291 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
292 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
294 char *tail;
295 unsigned long val;
296 size_t len;
298 *res = 0;
299 len = strlen(cp);
300 if (len == 0)
301 return -EINVAL;
303 val = simple_strtoul(cp, &tail, base);
304 if ((*tail == '\0') ||
305 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
306 *res = val;
307 return 0;
310 return -EINVAL;
312 #endif
314 #ifndef task_thread_info
315 #define task_thread_info(task) (task)->thread_info
316 #endif /* !task_thread_info */
318 #ifdef KSPLICE_STANDALONE
320 static bool bootstrapped = false;
322 #ifdef CONFIG_KALLSYMS
323 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
324 extern u8 kallsyms_names[];
325 #endif /* CONFIG_KALLSYMS */
327 /* defined by ksplice-create */
328 extern const struct ksplice_reloc ksplice_init_relocs[],
329 ksplice_init_relocs_end[];
331 /* Obtained via System.map */
332 extern struct list_head modules;
333 extern struct mutex module_mutex;
334 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
335 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
336 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
337 #endif /* LINUX_VERSION_CODE */
338 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
339 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
340 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
341 #endif /* LINUX_VERSION_CODE */
342 extern const struct kernel_symbol __start___ksymtab[];
343 extern const struct kernel_symbol __stop___ksymtab[];
344 extern const unsigned long __start___kcrctab[];
345 extern const struct kernel_symbol __start___ksymtab_gpl[];
346 extern const struct kernel_symbol __stop___ksymtab_gpl[];
347 extern const unsigned long __start___kcrctab_gpl[];
348 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
349 extern const struct kernel_symbol __start___ksymtab_unused[];
350 extern const struct kernel_symbol __stop___ksymtab_unused[];
351 extern const unsigned long __start___kcrctab_unused[];
352 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
353 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
354 extern const unsigned long __start___kcrctab_unused_gpl[];
355 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
356 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
357 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
358 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
359 extern const unsigned long __start___kcrctab_gpl_future[];
360 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
362 #endif /* KSPLICE_STANDALONE */
364 static struct update *init_ksplice_update(const char *kid);
365 static void cleanup_ksplice_update(struct update *update);
366 static void add_to_update(struct ksplice_pack *pack, struct update *update);
367 static int ksplice_sysfs_init(struct update *update);
369 /* Preparing the relocations and patches for application */
370 static abort_t apply_update(struct update *update);
371 static abort_t prepare_pack(struct ksplice_pack *pack);
372 static abort_t finalize_pack(struct ksplice_pack *pack);
373 static abort_t finalize_exports(struct ksplice_pack *pack);
374 static abort_t finalize_patches(struct ksplice_pack *pack);
375 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
376 unsigned long addr);
377 static abort_t apply_relocs(struct ksplice_pack *pack,
378 const struct ksplice_reloc *relocs,
379 const struct ksplice_reloc *relocs_end);
380 static abort_t apply_reloc(struct ksplice_pack *pack,
381 const struct ksplice_reloc *r);
382 static abort_t read_reloc_value(struct ksplice_pack *pack,
383 const struct ksplice_reloc *r,
384 unsigned long addr, unsigned long *valp);
385 static abort_t write_reloc_value(struct ksplice_pack *pack,
386 const struct ksplice_reloc *r,
387 unsigned long addr, unsigned long sym_addr);
388 static void __attribute__((noreturn)) ksplice_deleted(void);
390 /* run-pre matching */
391 static abort_t match_pack_sections(struct ksplice_pack *pack,
392 bool consider_data_sections);
393 static abort_t find_section(struct ksplice_pack *pack,
394 const struct ksplice_section *sect);
395 static abort_t try_addr(struct ksplice_pack *pack,
396 const struct ksplice_section *sect,
397 unsigned long run_addr,
398 struct list_head *safety_records,
399 enum run_pre_mode mode);
400 static abort_t run_pre_cmp(struct ksplice_pack *pack,
401 const struct ksplice_section *sect,
402 unsigned long run_addr,
403 struct list_head *safety_records,
404 enum run_pre_mode mode);
405 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
406 /* defined in arch/ARCH/kernel/ksplice-arch.c */
407 static abort_t arch_run_pre_cmp(struct ksplice_pack *pack,
408 const struct ksplice_section *sect,
409 unsigned long run_addr,
410 struct list_head *safety_records,
411 enum run_pre_mode mode);
412 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
413 static void print_bytes(struct ksplice_pack *pack,
414 const unsigned char *run, int runc,
415 const unsigned char *pre, int prec);
416 #ifdef KSPLICE_STANDALONE
417 static abort_t brute_search(struct ksplice_pack *pack,
418 const struct ksplice_section *sect,
419 const void *start, unsigned long len,
420 struct list_head *vals);
421 static abort_t brute_search_all(struct ksplice_pack *pack,
422 const struct ksplice_section *sect,
423 struct list_head *vals);
424 #endif /* KSPLICE_STANDALONE */
425 static abort_t lookup_reloc(struct ksplice_pack *pack, unsigned long addr,
426 const struct ksplice_reloc **relocp);
427 static abort_t handle_reloc(struct ksplice_pack *pack,
428 const struct ksplice_reloc *r,
429 unsigned long run_addr, enum run_pre_mode mode);
431 /* Computing possible addresses for symbols */
432 static abort_t lookup_symbol(struct ksplice_pack *pack,
433 const struct ksplice_symbol *ksym,
434 struct list_head *vals);
435 #ifdef KSPLICE_STANDALONE
436 static abort_t
437 add_system_map_candidates(struct ksplice_pack *pack,
438 const struct ksplice_system_map *start,
439 const struct ksplice_system_map *end,
440 const char *label, struct list_head *vals);
441 #endif /* KSPLICE_STANDALONE */
442 #ifdef CONFIG_KALLSYMS
443 static abort_t lookup_symbol_kallsyms(struct ksplice_pack *pack,
444 const char *name, struct list_head *vals);
445 static int accumulate_matching_names(void *data, const char *sym_name,
446 struct module *sym_owner,
447 unsigned long sym_val);
448 #endif /* CONFIG_KALLSYMS */
449 static abort_t exported_symbol_lookup(struct ksplice_pack *pack,
450 const char *name, struct list_head *vals);
451 static abort_t new_export_lookup(struct ksplice_pack *p, struct update *update,
452 const char *name, struct list_head *vals);
454 /* Atomic update insertion and removal */
455 static abort_t apply_patches(struct update *update);
456 static abort_t reverse_patches(struct update *update);
457 static int __apply_patches(void *update);
458 static int __reverse_patches(void *update);
459 static abort_t check_each_task(struct update *update);
460 static abort_t check_task(struct update *update,
461 const struct task_struct *t, bool rerun);
462 static abort_t check_stack(struct update *update, struct conflict *conf,
463 const struct thread_info *tinfo,
464 const unsigned long *stack);
465 static abort_t check_address(struct update *update,
466 struct conflict *conf, unsigned long addr);
467 static abort_t check_record(struct conflict_addr *ca,
468 const struct safety_record *rec,
469 unsigned long addr);
470 static bool is_stop_machine(const struct task_struct *t);
471 static void cleanup_conflicts(struct update *update);
472 static void print_conflicts(struct update *update);
473 static void insert_trampoline(struct ksplice_patch *p);
474 static abort_t verify_trampoline(struct ksplice_pack *pack,
475 const struct ksplice_patch *p);
476 static void remove_trampoline(const struct ksplice_patch *p);
478 static struct labelval *find_labelval(struct ksplice_pack *pack,
479 const char *label);
480 static abort_t create_labelval(struct ksplice_pack *pack, const char *label,
481 unsigned long val, int status);
482 static abort_t create_safety_record(struct ksplice_pack *pack,
483 const struct ksplice_section *sect,
484 struct list_head *record_list,
485 unsigned long run_addr,
486 unsigned long run_size);
487 static abort_t add_candidate_val(struct ksplice_pack *pack,
488 struct list_head *vals, unsigned long val);
489 static void release_vals(struct list_head *vals);
490 static void set_temp_labelvals(struct ksplice_pack *pack, int status_val);
492 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
493 int size, long dst_mask);
494 static unsigned long follow_trampolines(struct ksplice_pack *pack,
495 unsigned long addr);
496 static bool patches_module(const struct module *a, const struct module *b);
497 static bool starts_with(const char *str, const char *prefix);
498 static bool singular(struct list_head *list);
500 /* Debugging */
501 static abort_t init_debug_buf(struct update *update);
502 static void clear_debug_buf(struct update *update);
503 static int __attribute__((format(printf, 2, 3)))
504 _ksdebug(struct update *update, const char *fmt, ...);
505 #define ksdebug(pack, fmt, ...) \
506 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
508 #if defined(KSPLICE_STANDALONE) && \
509 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
510 #define KSPLICE_NO_KERNEL_SUPPORT 1
511 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
513 #ifdef KSPLICE_NO_KERNEL_SUPPORT
514 /* Functions defined here that will be exported in later kernels */
515 #ifdef CONFIG_KALLSYMS
516 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
517 struct module *, unsigned long),
518 void *data);
519 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
520 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
521 #endif /* LINUX_VERSION_CODE */
522 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
523 struct module *,
524 unsigned long),
525 void *data);
526 #endif /* CONFIG_KALLSYMS */
527 static struct module *find_module(const char *name);
528 static int use_module(struct module *a, struct module *b);
529 static const struct kernel_symbol *find_symbol(const char *name,
530 struct module **owner,
531 const unsigned long **crc,
532 bool gplok, bool warn);
533 static struct module *__module_data_address(unsigned long addr);
534 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
536 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
537 static abort_t prepare_trampoline(struct ksplice_pack *pack,
538 struct ksplice_patch *p);
539 static abort_t trampoline_target(struct ksplice_pack *pack, unsigned long addr,
540 unsigned long *new_addr);
541 static abort_t handle_paravirt(struct ksplice_pack *pack, unsigned long pre,
542 unsigned long run, int *matched);
543 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
545 #ifndef KSPLICE_STANDALONE
546 #include "ksplice-arch.c"
547 #elif defined CONFIG_X86
548 #include "x86/ksplice-arch.c"
549 #elif defined CONFIG_ARM
550 #include "arm/ksplice-arch.c"
551 #endif /* KSPLICE_STANDALONE */
553 #define clear_list(head, type, member) \
554 do { \
555 struct list_head *_pos, *_n; \
556 list_for_each_safe(_pos, _n, head) { \
557 list_del(_pos); \
558 kfree(list_entry(_pos, type, member)); \
560 } while (0)
562 int init_ksplice_pack(struct ksplice_pack *pack)
564 struct update *update;
565 int ret = 0;
567 #ifdef KSPLICE_STANDALONE
568 if (!bootstrapped)
569 return -1;
570 #endif /* KSPLICE_STANDALONE */
572 INIT_LIST_HEAD(&pack->labelvals);
573 INIT_LIST_HEAD(&pack->safety_records);
575 mutex_lock(&module_mutex);
576 if (strcmp(pack->target_name, "vmlinux") == 0) {
577 pack->target = NULL;
578 } else {
579 pack->target = find_module(pack->target_name);
580 if (pack->target == NULL || !module_is_live(pack->target)) {
581 ret = -ENODEV;
582 goto out;
584 ret = use_module(pack->primary, pack->target);
585 if (ret != 1) {
586 ret = -ENODEV;
587 goto out;
590 list_for_each_entry(update, &updates, list) {
591 if (strcmp(pack->kid, update->kid) == 0) {
592 if (update->stage != STAGE_PREPARING) {
593 ret = -EPERM;
594 goto out;
596 add_to_update(pack, update);
597 goto out;
600 update = init_ksplice_update(pack->kid);
601 if (update == NULL) {
602 ret = -ENOMEM;
603 goto out;
605 ret = ksplice_sysfs_init(update);
606 if (ret != 0) {
607 cleanup_ksplice_update(update);
608 goto out;
610 add_to_update(pack, update);
611 out:
612 mutex_unlock(&module_mutex);
613 return ret;
615 EXPORT_SYMBOL_GPL(init_ksplice_pack);
617 void cleanup_ksplice_pack(struct ksplice_pack *pack)
619 if (pack->update == NULL || pack->update->stage == STAGE_APPLIED)
620 return;
621 mutex_lock(&module_mutex);
622 list_del(&pack->list);
623 mutex_unlock(&module_mutex);
624 if (list_empty(&pack->update->packs))
625 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
626 kobject_put(&pack->update->kobj);
627 #else /* LINUX_VERSION_CODE < */
628 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
629 kobject_unregister(&pack->update->kobj);
630 #endif /* LINUX_VERSION_CODE */
631 pack->update = NULL;
633 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack);
635 static struct update *init_ksplice_update(const char *kid)
637 struct update *update;
638 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
639 if (update == NULL)
640 return NULL;
641 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
642 if (update->name == NULL) {
643 kfree(update);
644 return NULL;
646 update->kid = kstrdup(kid, GFP_KERNEL);
647 if (update->kid == NULL) {
648 kfree(update->name);
649 kfree(update);
650 return NULL;
652 INIT_LIST_HEAD(&update->packs);
653 if (init_debug_buf(update) != OK) {
654 kfree(update->kid);
655 kfree(update->name);
656 kfree(update);
657 return NULL;
659 list_add(&update->list, &updates);
660 update->stage = STAGE_PREPARING;
661 update->abort_cause = OK;
662 INIT_LIST_HEAD(&update->conflicts);
663 return update;
666 static void cleanup_ksplice_update(struct update *update)
668 #ifdef KSPLICE_STANDALONE
669 if (bootstrapped)
670 mutex_lock(&module_mutex);
671 list_del(&update->list);
672 if (bootstrapped)
673 mutex_unlock(&module_mutex);
674 #else /* !KSPLICE_STANDALONE */
675 mutex_lock(&module_mutex);
676 list_del(&update->list);
677 mutex_unlock(&module_mutex);
678 #endif /* KSPLICE_STANDALONE */
679 cleanup_conflicts(update);
680 clear_debug_buf(update);
681 kfree(update->kid);
682 kfree(update->name);
683 kfree(update);
686 static void add_to_update(struct ksplice_pack *pack, struct update *update)
688 pack->update = update;
689 list_add(&pack->list, &update->packs);
690 pack->module_list_entry.target = pack->target;
691 pack->module_list_entry.primary = pack->primary;
694 static int ksplice_sysfs_init(struct update *update)
696 int ret = 0;
697 memset(&update->kobj, 0, sizeof(update->kobj));
698 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
699 #ifndef KSPLICE_STANDALONE
700 ret = kobject_init_and_add(&update->kobj, &ksplice_ktype,
701 ksplice_kobj, "%s", update->kid);
702 #else /* KSPLICE_STANDALONE */
703 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
704 ret = kobject_init_and_add(&update->kobj, &ksplice_ktype,
705 &THIS_MODULE->mkobj.kobj, "ksplice");
706 #endif /* KSPLICE_STANDALONE */
707 #else /* LINUX_VERSION_CODE < */
708 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
709 if (ret != 0)
710 return ret;
711 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
712 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
713 #else /* LINUX_VERSION_CODE < */
714 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
715 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
716 #endif /* LINUX_VERSION_CODE */
717 update->kobj.ktype = &ksplice_ktype;
718 ret = kobject_register(&update->kobj);
719 #endif /* LINUX_VERSION_CODE */
720 if (ret != 0)
721 return ret;
722 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
723 kobject_uevent(&update->kobj, KOBJ_ADD);
724 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
725 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
726 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
727 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
728 #endif /* LINUX_VERSION_CODE */
729 return 0;
732 static abort_t apply_update(struct update *update)
734 struct ksplice_pack *pack;
735 abort_t ret;
737 mutex_lock(&module_mutex);
738 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
739 list_for_each_entry(pack, &update->packs, list) {
740 if (pack->target == NULL) {
741 apply_paravirt(pack->primary_parainstructions,
742 pack->primary_parainstructions_end);
743 apply_paravirt(pack->helper_parainstructions,
744 pack->helper_parainstructions_end);
747 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
749 list_for_each_entry(pack, &update->packs, list) {
750 ret = prepare_pack(pack);
751 if (ret != OK)
752 goto out;
754 ret = apply_patches(update);
755 out:
756 list_for_each_entry(pack, &update->packs, list) {
757 clear_list(&pack->labelvals, struct labelval, list);
758 if (update->stage == STAGE_PREPARING)
759 clear_list(&pack->safety_records, struct safety_record,
760 list);
762 mutex_unlock(&module_mutex);
763 return ret;
767 static abort_t prepare_pack(struct ksplice_pack *pack)
769 abort_t ret;
771 ksdebug(pack, "Preparing and checking %s\n", pack->name);
772 ret = match_pack_sections(pack, false);
773 if (ret == NO_MATCH) {
774 /* It is possible that by using relocations from .data sections
775 we can successfully run-pre match the rest of the sections.
776 To avoid using any symbols obtained from .data sections
777 (which may be unreliable) in the post code, we first prepare
778 the post code and then try to run-pre match the remaining
779 sections with the help of .data sections.
781 ksdebug(pack, "Continuing without some sections; we might "
782 "find them later.\n");
783 ret = finalize_pack(pack);
784 if (ret != OK) {
785 ksdebug(pack, "Aborted. Unable to continue without "
786 "the unmatched sections.\n");
787 return ret;
790 ksdebug(pack, "run-pre: Considering .data sections to find the "
791 "unmatched sections\n");
792 ret = match_pack_sections(pack, true);
793 if (ret != OK)
794 return ret;
796 ksdebug(pack, "run-pre: Found all previously unmatched "
797 "sections\n");
798 return OK;
799 } else if (ret != OK) {
800 return ret;
803 return finalize_pack(pack);
806 static abort_t finalize_pack(struct ksplice_pack *pack)
808 abort_t ret;
809 ret = apply_relocs(pack, pack->primary_relocs,
810 pack->primary_relocs_end);
811 if (ret != OK)
812 return ret;
814 ret = finalize_patches(pack);
815 if (ret != OK)
816 return ret;
818 ret = finalize_exports(pack);
819 if (ret != OK)
820 return ret;
822 return OK;
825 static abort_t finalize_exports(struct ksplice_pack *pack)
827 struct ksplice_export *exp;
828 struct module *m;
829 const struct kernel_symbol *sym;
831 for (exp = pack->exports; exp < pack->exports_end; exp++) {
832 sym = find_symbol(exp->name, &m, NULL, true, false);
833 if (sym == NULL) {
834 ksdebug(pack, "Could not find kernel_symbol struct for "
835 "%s\n", exp->name);
836 return MISSING_EXPORT;
839 /* Cast away const since we are planning to mutate the
840 * kernel_symbol structure. */
841 exp->sym = (struct kernel_symbol *)sym;
842 exp->saved_name = exp->sym->name;
843 if (m != pack->primary && use_module(pack->primary, m) != 1) {
844 ksdebug(pack, "Aborted. Could not add dependency on "
845 "symbol %s from module %s.\n", sym->name,
846 m->name);
847 return UNEXPECTED;
850 return OK;
853 static abort_t finalize_patches(struct ksplice_pack *pack)
855 struct ksplice_patch *p;
856 struct safety_record *rec;
857 abort_t ret;
859 for (p = pack->patches; p < pack->patches_end; p++) {
860 struct labelval *lv = find_labelval(pack, p->label);
861 bool found = false;
862 if (lv == NULL) {
863 ksdebug(pack, "Failed to find %s for oldaddr\n",
864 p->label);
865 return FAILED_TO_FIND;
867 p->oldaddr = lv->val;
869 list_for_each_entry(rec, &pack->safety_records, list) {
870 if (strcmp(rec->label, p->label) == 0 &&
871 follow_trampolines(pack, p->oldaddr)
872 == rec->addr) {
873 found = true;
874 break;
877 if (!found) {
878 ksdebug(pack, "No safety record for patch %s\n",
879 p->label);
880 return NO_MATCH;
882 if (rec->size < p->size) {
883 ksdebug(pack, "Symbol %s is too short for trampoline\n",
884 p->label);
885 return UNEXPECTED;
888 if (p->repladdr == 0)
889 p->repladdr = (unsigned long)ksplice_deleted;
890 else
891 rec->first_byte_safe = true;
893 ret = prepare_trampoline(pack, p);
894 if (ret != OK)
895 return ret;
897 ret = add_dependency_on_address(pack, p->oldaddr);
898 if (ret != OK)
899 return ret;
901 return OK;
904 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
905 unsigned long addr)
907 struct module *m =
908 __module_text_address(follow_trampolines(pack, addr));
909 if (m == NULL || m == pack->primary)
910 return OK;
911 if (use_module(pack->primary, m) != 1)
912 return MODULE_BUSY;
913 return OK;
916 static abort_t apply_relocs(struct ksplice_pack *pack,
917 const struct ksplice_reloc *relocs,
918 const struct ksplice_reloc *relocs_end)
920 const struct ksplice_reloc *r;
921 for (r = relocs; r < relocs_end; r++) {
922 abort_t ret = apply_reloc(pack, r);
923 if (ret != OK)
924 return ret;
926 return OK;
929 static abort_t apply_reloc(struct ksplice_pack *pack,
930 const struct ksplice_reloc *r)
932 abort_t ret;
933 int canary_ret;
934 unsigned long sym_addr;
935 LIST_HEAD(vals);
937 canary_ret = contains_canary(pack, r->blank_addr, r->size, r->dst_mask);
938 if (canary_ret < 0)
939 return UNEXPECTED;
940 if (canary_ret == 0) {
941 ksdebug(pack, "reloc: skipped %s:%lx (altinstr)\n",
942 r->symbol->label, r->blank_offset);
943 return OK;
946 #ifdef KSPLICE_STANDALONE
947 if (!bootstrapped) {
948 ret = add_system_map_candidates(pack,
949 pack->primary_system_map,
950 pack->primary_system_map_end,
951 r->symbol->label, &vals);
952 if (ret != OK) {
953 release_vals(&vals);
954 return ret;
957 #endif /* KSPLICE_STANDALONE */
958 ret = lookup_symbol(pack, r->symbol, &vals);
959 if (ret != OK) {
960 release_vals(&vals);
961 return ret;
963 if (!singular(&vals)) {
964 release_vals(&vals);
965 ksdebug(pack, "Failed to find %s for reloc\n",
966 r->symbol->label);
967 return FAILED_TO_FIND;
969 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
970 release_vals(&vals);
972 ret = write_reloc_value(pack, r, r->blank_addr,
973 r->pcrel ? sym_addr - r->blank_addr : sym_addr);
974 if (ret != OK)
975 return ret;
977 ksdebug(pack, "reloc: %s:%lx", r->symbol->label, r->blank_offset);
978 ksdebug(pack, "(S=%lx A=%lx ", sym_addr, r->addend);
979 switch (r->size) {
980 case 1:
981 ksdebug(pack, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
982 break;
983 case 2:
984 ksdebug(pack, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
985 break;
986 case 4:
987 ksdebug(pack, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
988 break;
989 #if BITS_PER_LONG >= 64
990 case 8:
991 ksdebug(pack, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
992 break;
993 #endif /* BITS_PER_LONG */
994 default:
995 ksdebug(pack, "Aborted. Invalid relocation size.\n");
996 return UNEXPECTED;
998 #ifdef KSPLICE_STANDALONE
999 if (!bootstrapped)
1000 return OK;
1001 #endif /* KSPLICE_STANDALONE */
1002 /* Create labelvals so that we can verify our choices in the second
1003 round of run-pre matching that considers data sections. */
1004 ret = create_labelval(pack, r->symbol->label, sym_addr, VAL);
1005 if (ret != OK)
1006 return ret;
1007 return add_dependency_on_address(pack, sym_addr);
1010 static abort_t read_reloc_value(struct ksplice_pack *pack,
1011 const struct ksplice_reloc *r,
1012 unsigned long addr, unsigned long *valp)
1014 unsigned char bytes[sizeof(long)];
1015 unsigned long val;
1017 if (probe_kernel_read(bytes, (void *)addr, r->size) == -EFAULT)
1018 return NO_MATCH;
1020 switch (r->size) {
1021 case 1:
1022 val = *(uint8_t *)bytes;
1023 break;
1024 case 2:
1025 val = *(uint16_t *)bytes;
1026 break;
1027 case 4:
1028 val = *(uint32_t *)bytes;
1029 break;
1030 #if BITS_PER_LONG >= 64
1031 case 8:
1032 val = *(uint64_t *)bytes;
1033 break;
1034 #endif /* BITS_PER_LONG */
1035 default:
1036 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1037 return UNEXPECTED;
1040 val &= r->dst_mask;
1041 if (r->signed_addend)
1042 val |= -(val & (r->dst_mask & ~(r->dst_mask >> 1)));
1043 val <<= r->rightshift;
1044 val -= r->addend;
1045 *valp = val;
1046 return OK;
1049 static abort_t write_reloc_value(struct ksplice_pack *pack,
1050 const struct ksplice_reloc *r,
1051 unsigned long addr, unsigned long sym_addr)
1053 unsigned long val = sym_addr + r->addend;
1054 val >>= r->rightshift;
1055 switch (r->size) {
1056 case 1:
1057 *(uint8_t *)addr =
1058 (*(uint8_t *)addr & ~r->dst_mask) | (val & r->dst_mask);
1059 break;
1060 case 2:
1061 *(uint16_t *)addr =
1062 (*(uint16_t *)addr & ~r->dst_mask) | (val & r->dst_mask);
1063 break;
1064 case 4:
1065 *(uint32_t *)addr =
1066 (*(uint32_t *)addr & ~r->dst_mask) | (val & r->dst_mask);
1067 break;
1068 #if BITS_PER_LONG >= 64
1069 case 8:
1070 *(uint64_t *)addr =
1071 (*(uint64_t *)addr & ~r->dst_mask) | (val & r->dst_mask);
1072 break;
1073 #endif /* BITS_PER_LONG */
1074 default:
1075 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1076 return UNEXPECTED;
1079 if (read_reloc_value(pack, r, addr, &val) != OK || val != sym_addr) {
1080 ksdebug(pack, "Aborted. Relocation overflow.\n");
1081 return UNEXPECTED;
1084 return OK;
1087 static void __attribute__((noreturn)) ksplice_deleted(void)
1089 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1090 BUG();
1091 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1092 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1093 for (;;);
1094 #endif
1097 static abort_t match_pack_sections(struct ksplice_pack *pack,
1098 bool consider_data_sections)
1100 const struct ksplice_section *sect;
1101 abort_t ret;
1102 char *finished;
1103 int i, remaining = 0;
1104 bool progress;
1106 finished = kcalloc(pack->helper_sections_end - pack->helper_sections,
1107 sizeof(*finished), GFP_KERNEL);
1108 if (finished == NULL)
1109 return OUT_OF_MEMORY;
1110 for (sect = pack->helper_sections; sect < pack->helper_sections_end;
1111 sect++) {
1112 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1113 remaining++;
1116 while (remaining > 0) {
1117 progress = false;
1118 for (sect = pack->helper_sections;
1119 sect < pack->helper_sections_end; sect++) {
1120 i = sect - pack->helper_sections;
1121 if (finished[i])
1122 continue;
1123 if (!consider_data_sections &&
1124 (sect->flags & KSPLICE_SECTION_DATA) != 0)
1125 continue;
1126 ret = find_section(pack, sect);
1127 if (ret == OK) {
1128 finished[i] = 1;
1129 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1130 remaining--;
1131 progress = true;
1132 } else if (ret != NO_MATCH) {
1133 kfree(finished);
1134 return ret;
1138 if (progress)
1139 continue;
1141 for (sect = pack->helper_sections;
1142 sect < pack->helper_sections_end; sect++) {
1143 i = sect - pack->helper_sections;
1144 if (finished[i] == 0)
1145 ksdebug(pack, "run-pre: could not match "
1146 "section %s\n", sect->symbol->label);
1148 ksdebug(pack, "Aborted. run-pre: could not match some "
1149 "sections.\n");
1150 kfree(finished);
1151 return NO_MATCH;
1153 kfree(finished);
1154 return OK;
1157 static abort_t find_section(struct ksplice_pack *pack,
1158 const struct ksplice_section *sect)
1160 int i;
1161 abort_t ret;
1162 unsigned long run_addr;
1163 LIST_HEAD(vals);
1164 struct candidate_val *v, *n;
1166 #ifdef KSPLICE_STANDALONE
1167 ret = add_system_map_candidates(pack, pack->helper_system_map,
1168 pack->helper_system_map_end,
1169 sect->symbol->label, &vals);
1170 if (ret != OK) {
1171 release_vals(&vals);
1172 return ret;
1174 #endif /* KSPLICE_STANDALONE */
1175 ret = lookup_symbol(pack, sect->symbol, &vals);
1176 if (ret != OK) {
1177 release_vals(&vals);
1178 return ret;
1181 ksdebug(pack, "run-pre: starting sect search for %s\n",
1182 sect->symbol->label);
1184 list_for_each_entry_safe(v, n, &vals, list) {
1185 run_addr = v->val;
1187 yield();
1188 ret = try_addr(pack, sect, run_addr, NULL, RUN_PRE_INITIAL);
1189 if (ret == NO_MATCH) {
1190 list_del(&v->list);
1191 kfree(v);
1192 } else if (ret != OK) {
1193 release_vals(&vals);
1194 return ret;
1198 #ifdef KSPLICE_STANDALONE
1199 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1200 ret = brute_search_all(pack, sect, &vals);
1201 if (ret != OK) {
1202 release_vals(&vals);
1203 return ret;
1205 /* Make sure run-pre matching output is displayed if
1206 brute_search succeeds */
1207 if (singular(&vals)) {
1208 run_addr = list_entry(vals.next, struct candidate_val,
1209 list)->val;
1210 ret = try_addr(pack, sect, run_addr, NULL,
1211 RUN_PRE_INITIAL);
1212 if (ret != OK) {
1213 ksdebug(pack, "run-pre: Debug run failed for "
1214 "sect %s:\n", sect->symbol->label);
1215 release_vals(&vals);
1216 return ret;
1220 #endif /* KSPLICE_STANDALONE */
1222 if (singular(&vals)) {
1223 LIST_HEAD(safety_records);
1224 run_addr = list_entry(vals.next, struct candidate_val,
1225 list)->val;
1226 ret = try_addr(pack, sect, run_addr, &safety_records,
1227 RUN_PRE_FINAL);
1228 release_vals(&vals);
1229 if (ret != OK) {
1230 clear_list(&safety_records, struct safety_record, list);
1231 ksdebug(pack, "run-pre: Final run failed for sect "
1232 "%s:\n", sect->symbol->label);
1233 } else {
1234 list_splice(&safety_records, &pack->safety_records);
1236 return ret;
1237 } else if (!list_empty(&vals)) {
1238 struct candidate_val *val;
1239 ksdebug(pack, "run-pre: multiple candidates for sect %s:\n",
1240 sect->symbol->label);
1241 i = 0;
1242 list_for_each_entry(val, &vals, list) {
1243 i++;
1244 ksdebug(pack, "%lx\n", val->val);
1245 if (i > 5) {
1246 ksdebug(pack, "...\n");
1247 break;
1250 release_vals(&vals);
1251 return NO_MATCH;
1253 release_vals(&vals);
1254 return NO_MATCH;
1257 static abort_t try_addr(struct ksplice_pack *pack,
1258 const struct ksplice_section *sect,
1259 unsigned long run_addr,
1260 struct list_head *safety_records,
1261 enum run_pre_mode mode)
1263 abort_t ret;
1264 const struct module *run_module;
1266 if ((sect->flags & KSPLICE_SECTION_RODATA) != 0 ||
1267 (sect->flags & KSPLICE_SECTION_DATA) != 0)
1268 run_module = __module_data_address(run_addr);
1269 else
1270 run_module = __module_text_address(run_addr);
1271 if (run_module == pack->primary) {
1272 ksdebug(pack, "run-pre: unexpected address %lx in primary "
1273 "module %s for sect %s\n", run_addr, run_module->name,
1274 sect->symbol->label);
1275 return UNEXPECTED;
1277 if (!patches_module(run_module, pack->target)) {
1278 ksdebug(pack, "run-pre: ignoring address %lx in other module "
1279 "%s for sect %s\n", run_addr, run_module == NULL ?
1280 "vmlinux" : run_module->name, sect->symbol->label);
1281 return NO_MATCH;
1284 ret = create_labelval(pack, sect->symbol->label, run_addr, TEMP);
1285 if (ret != OK)
1286 return ret;
1288 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1289 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
1290 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1291 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
1292 ret = arch_run_pre_cmp(pack, sect, run_addr, safety_records,
1293 mode);
1294 else
1295 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
1296 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1297 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
1298 set_temp_labelvals(pack, NOVAL);
1299 ksdebug(pack, "run-pre: %s sect %s does not match (r_a=%lx "
1300 "p_a=%lx s=%lx)\n",
1301 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "data" :
1302 "text", sect->symbol->label, run_addr, sect->address,
1303 sect->size);
1304 ksdebug(pack, "run-pre: ");
1305 if (pack->update->debug >= 1) {
1306 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1307 ret = run_pre_cmp(pack, sect, run_addr, safety_records,
1308 RUN_PRE_DEBUG);
1309 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1310 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
1311 ret = arch_run_pre_cmp(pack, sect, run_addr,
1312 safety_records,
1313 RUN_PRE_DEBUG);
1314 else
1315 ret = run_pre_cmp(pack, sect, run_addr,
1316 safety_records,
1317 RUN_PRE_DEBUG);
1318 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1319 set_temp_labelvals(pack, NOVAL);
1321 ksdebug(pack, "\n");
1322 return ret;
1323 } else if (ret != OK) {
1324 set_temp_labelvals(pack, NOVAL);
1325 return ret;
1328 if (mode != RUN_PRE_FINAL) {
1329 set_temp_labelvals(pack, NOVAL);
1330 ksdebug(pack, "run-pre: candidate for sect %s=%lx\n",
1331 sect->symbol->label, run_addr);
1332 return OK;
1335 set_temp_labelvals(pack, VAL);
1336 ksdebug(pack, "run-pre: found sect %s=%lx\n", sect->symbol->label,
1337 run_addr);
1338 return OK;
1341 static abort_t run_pre_cmp(struct ksplice_pack *pack,
1342 const struct ksplice_section *sect,
1343 unsigned long run_addr,
1344 struct list_head *safety_records,
1345 enum run_pre_mode mode)
1347 int matched = 0;
1348 abort_t ret;
1349 const struct ksplice_reloc *r;
1350 const unsigned char *pre, *run, *pre_start, *run_start;
1351 unsigned char runval;
1353 pre_start = (const unsigned char *)sect->address;
1354 run_start = (const unsigned char *)run_addr;
1356 pre = pre_start;
1357 run = run_start;
1358 while (pre < pre_start + sect->size) {
1359 unsigned long offset = pre - pre_start;
1360 ret = lookup_reloc(pack, (unsigned long)pre, &r);
1361 if (ret == OK) {
1362 ret = handle_reloc(pack, r, (unsigned long)run, mode);
1363 if (ret != OK) {
1364 if (mode == RUN_PRE_INITIAL)
1365 ksdebug(pack, "reloc in sect does not "
1366 "match after %lx/%lx bytes\n",
1367 offset, sect->size);
1368 return ret;
1370 if (mode == RUN_PRE_DEBUG)
1371 print_bytes(pack, run, r->size, pre, r->size);
1372 pre += r->size;
1373 run += r->size;
1374 continue;
1375 } else if (ret != NO_MATCH) {
1376 return ret;
1379 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
1380 ret = handle_paravirt(pack, (unsigned long)pre,
1381 (unsigned long)run, &matched);
1382 if (ret != OK)
1383 return ret;
1384 if (matched != 0) {
1385 if (mode == RUN_PRE_DEBUG)
1386 print_bytes(pack, run, matched, pre,
1387 matched);
1388 pre += matched;
1389 run += matched;
1390 continue;
1394 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
1395 if (mode == RUN_PRE_INITIAL)
1396 ksdebug(pack, "sect unmapped after %lx/%lx "
1397 "bytes\n", offset, sect->size);
1398 return NO_MATCH;
1401 if (runval != *pre &&
1402 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1403 if (mode == RUN_PRE_INITIAL)
1404 ksdebug(pack, "sect does not match after "
1405 "%lx/%lx bytes\n", offset, sect->size);
1406 if (mode == RUN_PRE_DEBUG) {
1407 print_bytes(pack, run, 1, pre, 1);
1408 ksdebug(pack, "[p_o=%lx] ! ", offset);
1409 print_bytes(pack, run + 1, 2, pre + 1, 2);
1411 return NO_MATCH;
1413 if (mode == RUN_PRE_DEBUG)
1414 print_bytes(pack, run, 1, pre, 1);
1415 pre++;
1416 run++;
1418 return create_safety_record(pack, sect, safety_records, run_addr,
1419 run - run_start);
1422 static void print_bytes(struct ksplice_pack *pack,
1423 const unsigned char *run, int runc,
1424 const unsigned char *pre, int prec)
1426 int o;
1427 int matched = min(runc, prec);
1428 for (o = 0; o < matched; o++) {
1429 if (run[o] == pre[o])
1430 ksdebug(pack, "%02x ", run[o]);
1431 else
1432 ksdebug(pack, "%02x/%02x ", run[o], pre[o]);
1434 for (o = matched; o < runc; o++)
1435 ksdebug(pack, "%02x/ ", run[o]);
1436 for (o = matched; o < prec; o++)
1437 ksdebug(pack, "/%02x ", pre[o]);
1440 #ifdef KSPLICE_STANDALONE
1441 static abort_t brute_search(struct ksplice_pack *pack,
1442 const struct ksplice_section *sect,
1443 const void *start, unsigned long len,
1444 struct list_head *vals)
1446 unsigned long addr;
1447 char run, pre;
1448 abort_t ret;
1450 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
1451 addr++) {
1452 if (addr % 100000 == 0)
1453 yield();
1455 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
1456 return OK;
1458 pre = *(const unsigned char *)(sect->address);
1460 if (run != pre)
1461 continue;
1463 ret = try_addr(pack, sect, addr, NULL, RUN_PRE_INITIAL);
1464 if (ret == OK) {
1465 ret = add_candidate_val(pack, vals, addr);
1466 if (ret != OK)
1467 return ret;
1468 } else if (ret != NO_MATCH) {
1469 return ret;
1473 return OK;
1476 static abort_t brute_search_all(struct ksplice_pack *pack,
1477 const struct ksplice_section *sect,
1478 struct list_head *vals)
1480 struct module *m;
1481 abort_t ret = OK;
1482 int saved_debug;
1484 ksdebug(pack, "brute_search: searching for %s\n", sect->symbol->label);
1485 saved_debug = pack->update->debug;
1486 pack->update->debug = 0;
1488 list_for_each_entry(m, &modules, list) {
1489 if (!patches_module(m, pack->target) || m == pack->primary)
1490 continue;
1491 ret = brute_search(pack, sect, m->module_core, m->core_size,
1492 vals);
1493 if (ret != OK)
1494 goto out;
1495 ret = brute_search(pack, sect, m->module_init, m->init_size,
1496 vals);
1497 if (ret != OK)
1498 goto out;
1501 ret = brute_search(pack, sect, (const void *)init_mm.start_code,
1502 init_mm.end_code - init_mm.start_code, vals);
1504 out:
1505 pack->update->debug = saved_debug;
1506 return ret;
1508 #endif /* KSPLICE_STANDALONE */
1510 static abort_t lookup_reloc(struct ksplice_pack *pack, unsigned long addr,
1511 const struct ksplice_reloc **relocp)
1513 const struct ksplice_reloc *r;
1514 int canary_ret;
1515 for (r = pack->helper_relocs; r < pack->helper_relocs_end; r++) {
1516 if (addr >= r->blank_addr && addr < r->blank_addr + r->size) {
1517 canary_ret = contains_canary(pack, r->blank_addr,
1518 r->size, r->dst_mask);
1519 if (canary_ret < 0)
1520 return UNEXPECTED;
1521 if (canary_ret == 0) {
1522 ksdebug(pack, "reloc: skipped %s:%lx "
1523 "(altinstr)\n", r->symbol->label,
1524 r->blank_offset);
1525 return NO_MATCH;
1527 if (addr != r->blank_addr) {
1528 ksdebug(pack, "Invalid nonzero relocation "
1529 "offset\n");
1530 return UNEXPECTED;
1532 *relocp = r;
1533 return OK;
1536 return NO_MATCH;
1539 static abort_t handle_reloc(struct ksplice_pack *pack,
1540 const struct ksplice_reloc *r,
1541 unsigned long run_addr, enum run_pre_mode mode)
1543 unsigned long val;
1544 abort_t ret;
1546 ret = read_reloc_value(pack, r, run_addr, &val);
1547 if (ret != OK)
1548 return ret;
1549 if (r->pcrel)
1550 val += run_addr;
1552 if (mode == RUN_PRE_INITIAL)
1553 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
1554 "found %s = %lx\n", run_addr, r->blank_addr,
1555 r->symbol->label, r->addend, r->symbol->label, val);
1557 if (starts_with(r->symbol->label, ".rodata.str"))
1558 return OK;
1560 if (contains_canary(pack, run_addr, r->size, r->dst_mask) != 0) {
1561 ksdebug(pack, "Aborted. Unexpected canary in run code at %lx"
1562 "\n", run_addr);
1563 return UNEXPECTED;
1566 ret = create_labelval(pack, r->symbol->label, val, TEMP);
1567 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL) {
1568 struct labelval *lv = find_labelval(pack, r->symbol->label);
1569 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx: labelval %s "
1570 "= %lx(%d) does not match expected %lx\n", run_addr,
1571 r->blank_addr, r->symbol->label, lv->val, lv->status,
1572 val);
1574 return ret;
1577 static abort_t lookup_symbol(struct ksplice_pack *pack,
1578 const struct ksplice_symbol *ksym,
1579 struct list_head *vals)
1581 abort_t ret;
1582 struct labelval *lv;
1584 #ifdef KSPLICE_STANDALONE
1585 if (!bootstrapped)
1586 return OK;
1587 #endif /* KSPLICE_STANDALONE */
1589 lv = find_labelval(pack, ksym->label);
1590 if (lv != NULL) {
1591 release_vals(vals);
1592 ksdebug(pack, "using detected sym %s=%lx\n", ksym->label,
1593 lv->val);
1594 return add_candidate_val(pack, vals, lv->val);
1597 #ifdef CONFIG_MODULE_UNLOAD
1598 if (strcmp(ksym->label, "cleanup_module") == 0 && pack->target != NULL
1599 && pack->target->exit != NULL) {
1600 ret = add_candidate_val(pack, vals,
1601 (unsigned long)pack->target->exit);
1602 if (ret != OK)
1603 return ret;
1605 #endif
1607 if (ksym->name != NULL) {
1608 ret = exported_symbol_lookup(pack, ksym->name, vals);
1609 if (ret != OK)
1610 return ret;
1612 ret = new_export_lookup(pack, pack->update, ksym->name, vals);
1613 if (ret != OK)
1614 return ret;
1616 #ifdef CONFIG_KALLSYMS
1617 ret = lookup_symbol_kallsyms(pack, ksym->name, vals);
1618 if (ret != OK)
1619 return ret;
1620 #endif /* CONFIG_KALLSYMS */
1623 return OK;
1626 #ifdef KSPLICE_STANDALONE
1627 static abort_t
1628 add_system_map_candidates(struct ksplice_pack *pack,
1629 const struct ksplice_system_map *start,
1630 const struct ksplice_system_map *end,
1631 const char *label, struct list_head *vals)
1633 abort_t ret;
1634 long off;
1635 int i;
1636 const struct ksplice_system_map *smap;
1638 /* Some Fedora kernel releases have System.map files whose symbol
1639 * addresses disagree with the running kernel by a constant address
1640 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
1641 * values used to compile these kernels. This constant address offset
1642 * is always a multiple of 0x100000.
1644 * If we observe an offset that is NOT a multiple of 0x100000, then the
1645 * user provided us with an incorrect System.map file, and we should
1646 * abort.
1647 * If we observe an offset that is a multiple of 0x100000, then we can
1648 * adjust the System.map address values accordingly and proceed.
1650 off = (unsigned long)printk - pack->map_printk;
1651 if (off & 0xfffff) {
1652 ksdebug(pack, "Aborted. System.map does not match kernel.\n");
1653 return BAD_SYSTEM_MAP;
1655 for (smap = start; smap < end; smap++) {
1656 if (strcmp(smap->label, label) == 0)
1657 break;
1659 if (smap >= end)
1660 return OK;
1661 for (i = 0; i < smap->nr_candidates; i++) {
1662 ret = add_candidate_val(pack, vals, smap->candidates[i] + off);
1663 if (ret != OK)
1664 return ret;
1666 return OK;
1668 #endif /* !KSPLICE_STANDALONE */
1670 #ifdef CONFIG_KALLSYMS
1671 static abort_t lookup_symbol_kallsyms(struct ksplice_pack *pack,
1672 const char *name, struct list_head *vals)
1674 struct accumulate_struct acc = { pack, name, vals };
1675 return (__force abort_t)
1676 kallsyms_on_each_symbol(accumulate_matching_names, &acc);
1679 static int accumulate_matching_names(void *data, const char *sym_name,
1680 struct module *sym_owner,
1681 unsigned long sym_val)
1683 struct accumulate_struct *acc = data;
1684 if (strcmp(sym_name, acc->desired_name) == 0 &&
1685 patches_module(sym_owner, acc->pack->target) &&
1686 sym_owner != acc->pack->primary)
1687 return (__force int)add_candidate_val(acc->pack, acc->vals,
1688 sym_val);
1689 return (__force int)OK;
1691 #endif /* CONFIG_KALLSYMS */
1693 static abort_t exported_symbol_lookup(struct ksplice_pack *pack,
1694 const char *name, struct list_head *vals)
1696 const struct kernel_symbol *sym;
1697 sym = find_symbol(name, NULL, NULL, true, false);
1698 if (sym == NULL)
1699 return OK;
1700 return add_candidate_val(pack, vals, sym->value);
1703 static abort_t new_export_lookup(struct ksplice_pack *p, struct update *update,
1704 const char *name, struct list_head *vals)
1706 struct ksplice_pack *pack;
1707 struct ksplice_export *exp;
1708 list_for_each_entry(pack, &update->packs, list) {
1709 for (exp = pack->exports; exp < pack->exports_end; exp++) {
1710 if (strcmp(exp->new_name, name) == 0 &&
1711 exp->sym != NULL &&
1712 contains_canary(pack,
1713 (unsigned long)&exp->sym->value,
1714 sizeof(unsigned long), -1) == 0)
1715 return add_candidate_val(p, vals,
1716 exp->sym->value);
1719 return OK;
1722 static abort_t apply_patches(struct update *update)
1724 int i;
1725 abort_t ret;
1726 struct ksplice_pack *pack;
1727 const struct ksplice_section *sect;
1729 list_for_each_entry(pack, &update->packs, list) {
1730 for (sect = pack->primary_sections;
1731 sect < pack->primary_sections_end; sect++) {
1732 ret = create_safety_record(pack, sect,
1733 &pack->safety_records,
1734 sect->address, sect->size);
1735 if (ret != OK)
1736 return ret;
1740 for (i = 0; i < 5; i++) {
1741 cleanup_conflicts(update);
1742 #ifdef KSPLICE_STANDALONE
1743 bust_spinlocks(1);
1744 #endif /* KSPLICE_STANDALONE */
1745 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
1746 ret = (__force abort_t)stop_machine(__apply_patches, update,
1747 NULL);
1748 #else /* LINUX_VERSION_CODE < */
1749 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
1750 ret = (__force abort_t)stop_machine_run(__apply_patches, update,
1751 NR_CPUS);
1752 #endif /* LINUX_VERSION_CODE */
1753 #ifdef KSPLICE_STANDALONE
1754 bust_spinlocks(0);
1755 #endif /* KSPLICE_STANDALONE */
1756 if (ret != CODE_BUSY)
1757 break;
1758 set_current_state(TASK_INTERRUPTIBLE);
1759 schedule_timeout(msecs_to_jiffies(1000));
1762 if (ret == CODE_BUSY) {
1763 print_conflicts(update);
1764 _ksdebug(update, "Aborted %s. stack check: to-be-replaced "
1765 "code is busy.\n", update->kid);
1766 } else if (ret == ALREADY_REVERSED) {
1767 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
1768 "reversed.\n", update->kid, update->kid);
1771 if (ret != OK)
1772 return ret;
1774 _ksdebug(update, "Atomic patch insertion for %s complete\n",
1775 update->kid);
1776 return OK;
1779 static abort_t reverse_patches(struct update *update)
1781 int i;
1782 abort_t ret;
1783 struct ksplice_pack *pack;
1785 clear_debug_buf(update);
1786 ret = init_debug_buf(update);
1787 if (ret != OK)
1788 return ret;
1790 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
1792 for (i = 0; i < 5; i++) {
1793 cleanup_conflicts(update);
1794 clear_list(&update->conflicts, struct conflict, list);
1795 #ifdef KSPLICE_STANDALONE
1796 bust_spinlocks(1);
1797 #endif /* KSPLICE_STANDALONE */
1798 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
1799 ret = (__force abort_t)stop_machine(__reverse_patches, update,
1800 NULL);
1801 #else /* LINUX_VERSION_CODE < */
1802 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
1803 ret = (__force abort_t)stop_machine_run(__reverse_patches,
1804 update, NR_CPUS);
1805 #endif /* LINUX_VERSION_CODE */
1806 #ifdef KSPLICE_STANDALONE
1807 bust_spinlocks(0);
1808 #endif /* KSPLICE_STANDALONE */
1809 if (ret != CODE_BUSY)
1810 break;
1811 set_current_state(TASK_INTERRUPTIBLE);
1812 schedule_timeout(msecs_to_jiffies(1000));
1814 list_for_each_entry(pack, &update->packs, list)
1815 clear_list(&pack->safety_records, struct safety_record, list);
1817 if (ret == CODE_BUSY) {
1818 print_conflicts(update);
1819 _ksdebug(update, "Aborted %s. stack check: to-be-reversed "
1820 "code is busy.\n", update->kid);
1821 } else if (ret == MODULE_BUSY) {
1822 _ksdebug(update, "Update %s is in use by another module\n",
1823 update->kid);
1826 if (ret != OK)
1827 return ret;
1829 _ksdebug(update, "Atomic patch removal for %s complete\n", update->kid);
1830 return OK;
1833 static int __apply_patches(void *updateptr)
1835 struct update *update = updateptr;
1836 struct ksplice_pack *pack;
1837 struct ksplice_patch *p;
1838 struct ksplice_export *exp;
1839 abort_t ret;
1841 if (update->stage == STAGE_APPLIED)
1842 return (__force int)OK;
1844 if (update->stage != STAGE_PREPARING)
1845 return (__force int)UNEXPECTED;
1847 ret = check_each_task(update);
1848 if (ret != OK)
1849 return (__force int)ret;
1851 list_for_each_entry(pack, &update->packs, list) {
1852 if (try_module_get(pack->primary) != 1) {
1853 struct ksplice_pack *pack1;
1854 list_for_each_entry(pack1, &update->packs, list) {
1855 if (pack1 == pack)
1856 break;
1857 module_put(pack1->primary);
1859 return (__force int)UNEXPECTED;
1863 update->stage = STAGE_APPLIED;
1864 #ifdef TAINT_KSPLICE
1865 add_taint(TAINT_KSPLICE);
1866 #endif
1868 list_for_each_entry(pack, &update->packs, list)
1869 list_add(&pack->module_list_entry.list, &ksplice_module_list);
1871 list_for_each_entry(pack, &update->packs, list) {
1872 for (exp = pack->exports; exp < pack->exports_end; exp++)
1873 exp->sym->name = exp->new_name;
1876 list_for_each_entry(pack, &update->packs, list) {
1877 for (p = pack->patches; p < pack->patches_end; p++)
1878 insert_trampoline(p);
1880 return (__force int)OK;
1883 static int __reverse_patches(void *updateptr)
1885 struct update *update = updateptr;
1886 struct ksplice_pack *pack;
1887 const struct ksplice_patch *p;
1888 struct ksplice_export *exp;
1889 abort_t ret;
1891 if (update->stage != STAGE_APPLIED)
1892 return (__force int)OK;
1894 #ifdef CONFIG_MODULE_UNLOAD
1895 list_for_each_entry(pack, &update->packs, list) {
1896 if (module_refcount(pack->primary) != 1)
1897 return (__force int)MODULE_BUSY;
1899 #endif /* CONFIG_MODULE_UNLOAD */
1901 ret = check_each_task(update);
1902 if (ret != OK)
1903 return (__force int)ret;
1905 list_for_each_entry(pack, &update->packs, list) {
1906 for (p = pack->patches; p < pack->patches_end; p++) {
1907 ret = verify_trampoline(pack, p);
1908 if (ret != OK)
1909 return (__force int)ret;
1913 update->stage = STAGE_REVERSED;
1915 list_for_each_entry(pack, &update->packs, list)
1916 module_put(pack->primary);
1918 list_for_each_entry(pack, &update->packs, list)
1919 list_del(&pack->module_list_entry.list);
1921 list_for_each_entry(pack, &update->packs, list) {
1922 for (exp = pack->exports; exp < pack->exports_end; exp++)
1923 exp->sym->name = exp->saved_name;
1926 list_for_each_entry(pack, &update->packs, list) {
1927 for (p = pack->patches; p < pack->patches_end; p++)
1928 remove_trampoline(p);
1930 return (__force int)OK;
1933 static abort_t check_each_task(struct update *update)
1935 const struct task_struct *g, *p;
1936 abort_t status = OK, ret;
1937 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
1938 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
1939 read_lock(&tasklist_lock);
1940 #endif /* LINUX_VERSION_CODE */
1941 do_each_thread(g, p) {
1942 /* do_each_thread is a double loop! */
1943 ret = check_task(update, p, false);
1944 if (ret != OK) {
1945 check_task(update, p, true);
1946 status = ret;
1948 if (ret != OK && ret != CODE_BUSY)
1949 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
1950 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
1951 goto out;
1952 #else /* LINUX_VERSION_CODE < */
1953 return ret;
1954 #endif /* LINUX_VERSION_CODE */
1955 } while_each_thread(g, p);
1956 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
1957 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
1958 out:
1959 read_unlock(&tasklist_lock);
1960 #endif /* LINUX_VERSION_CODE */
1961 return status;
1964 static abort_t check_task(struct update *update,
1965 const struct task_struct *t, bool rerun)
1967 abort_t status, ret;
1968 struct conflict *conf = NULL;
1970 if (rerun) {
1971 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
1972 if (conf == NULL)
1973 return OUT_OF_MEMORY;
1974 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
1975 if (conf->process_name == NULL) {
1976 kfree(conf);
1977 return OUT_OF_MEMORY;
1979 conf->pid = t->pid;
1980 INIT_LIST_HEAD(&conf->stack);
1981 list_add(&conf->list, &update->conflicts);
1984 status = check_address(update, conf, KSPLICE_IP(t));
1985 if (t == current) {
1986 ret = check_stack(update, conf, task_thread_info(t),
1987 (unsigned long *)__builtin_frame_address(0));
1988 if (status == OK)
1989 status = ret;
1990 } else if (!task_curr(t)) {
1991 ret = check_stack(update, conf, task_thread_info(t),
1992 (unsigned long *)KSPLICE_SP(t));
1993 if (status == OK)
1994 status = ret;
1995 } else if (!is_stop_machine(t)) {
1996 status = UNEXPECTED_RUNNING_TASK;
1998 return status;
2001 static abort_t check_stack(struct update *update, struct conflict *conf,
2002 const struct thread_info *tinfo,
2003 const unsigned long *stack)
2005 abort_t status = OK, ret;
2006 unsigned long addr;
2008 while (valid_stack_ptr(tinfo, stack)) {
2009 addr = *stack++;
2010 ret = check_address(update, conf, addr);
2011 if (ret != OK)
2012 status = ret;
2014 return status;
2017 static abort_t check_address(struct update *update,
2018 struct conflict *conf, unsigned long addr)
2020 abort_t status = OK, ret;
2021 const struct safety_record *rec;
2022 struct ksplice_pack *pack;
2023 struct conflict_addr *ca = NULL;
2025 if (conf != NULL) {
2026 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
2027 if (ca == NULL)
2028 return OUT_OF_MEMORY;
2029 ca->addr = addr;
2030 ca->has_conflict = false;
2031 ca->label = NULL;
2032 list_add(&ca->list, &conf->stack);
2035 list_for_each_entry(pack, &update->packs, list) {
2036 list_for_each_entry(rec, &pack->safety_records, list) {
2037 ret = check_record(ca, rec, addr);
2038 if (ret != OK)
2039 status = ret;
2042 return status;
2045 static abort_t check_record(struct conflict_addr *ca,
2046 const struct safety_record *rec, unsigned long addr)
2048 if ((addr > rec->addr && addr < rec->addr + rec->size) ||
2049 (addr == rec->addr && !rec->first_byte_safe)) {
2050 if (ca != NULL) {
2051 ca->label = rec->label;
2052 ca->has_conflict = true;
2054 return CODE_BUSY;
2056 return OK;
2059 static bool is_stop_machine(const struct task_struct *t)
2061 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2062 const char *num;
2063 if (!starts_with(t->comm, "kstop"))
2064 return false;
2065 num = t->comm + strlen("kstop");
2066 return num[strspn(num, "0123456789")] == '\0';
2067 #else /* LINUX_VERSION_CODE < */
2068 return strcmp(t->comm, "kstopmachine") == 0;
2069 #endif /* LINUX_VERSION_CODE */
2072 static void cleanup_conflicts(struct update *update)
2074 struct conflict *conf;
2075 list_for_each_entry(conf, &update->conflicts, list) {
2076 clear_list(&conf->stack, struct conflict_addr, list);
2077 kfree(conf->process_name);
2079 clear_list(&update->conflicts, struct conflict, list);
2082 static void print_conflicts(struct update *update)
2084 const struct conflict *conf;
2085 const struct conflict_addr *ca;
2086 list_for_each_entry(conf, &update->conflicts, list) {
2087 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
2088 conf->process_name);
2089 list_for_each_entry(ca, &conf->stack, list) {
2090 _ksdebug(update, " %lx", ca->addr);
2091 if (ca->has_conflict)
2092 _ksdebug(update, " [<-CONFLICT]");
2094 _ksdebug(update, "\n");
2098 static void insert_trampoline(struct ksplice_patch *p)
2100 mm_segment_t old_fs = get_fs();
2101 set_fs(KERNEL_DS);
2102 memcpy((void *)p->saved, (void *)p->oldaddr, p->size);
2103 memcpy((void *)p->oldaddr, (void *)p->trampoline, p->size);
2104 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
2105 set_fs(old_fs);
2108 static abort_t verify_trampoline(struct ksplice_pack *pack,
2109 const struct ksplice_patch *p)
2111 if (memcmp((void *)p->oldaddr, (void *)p->trampoline, p->size) != 0) {
2112 ksdebug(pack, "Aborted. Trampoline at %lx has been "
2113 "overwritten.\n", p->oldaddr);
2114 return CODE_BUSY;
2116 return OK;
2119 static void remove_trampoline(const struct ksplice_patch *p)
2121 mm_segment_t old_fs = get_fs();
2122 set_fs(KERNEL_DS);
2123 memcpy((void *)p->oldaddr, (void *)p->saved, p->size);
2124 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
2125 set_fs(old_fs);
2128 static struct labelval *find_labelval(struct ksplice_pack *pack,
2129 const char *label)
2131 struct labelval *lv;
2132 list_for_each_entry(lv, &pack->labelvals, list) {
2133 if (strcmp(lv->label, label) == 0)
2134 return lv;
2136 return NULL;
2139 static abort_t create_labelval(struct ksplice_pack *pack, const char *label,
2140 unsigned long val, int status)
2142 struct labelval *lv = find_labelval(pack, label);
2143 val = follow_trampolines(pack, val);
2144 if (lv != NULL)
2145 return lv->val == val ? OK : NO_MATCH;
2147 lv = kmalloc(sizeof(*lv), GFP_KERNEL);
2148 if (lv == NULL)
2149 return OUT_OF_MEMORY;
2150 lv->label = label;
2151 lv->val = val;
2152 lv->status = status;
2153 list_add(&lv->list, &pack->labelvals);
2154 return OK;
2157 static abort_t create_safety_record(struct ksplice_pack *pack,
2158 const struct ksplice_section *sect,
2159 struct list_head *record_list,
2160 unsigned long run_addr,
2161 unsigned long run_size)
2163 struct safety_record *rec;
2164 struct ksplice_patch *p;
2166 if (record_list == NULL)
2167 return OK;
2169 for (p = pack->patches; p < pack->patches_end; p++) {
2170 if (strcmp(sect->symbol->label, p->label) == 0)
2171 break;
2173 if (p >= pack->patches_end)
2174 return OK;
2176 if ((sect->flags & KSPLICE_SECTION_TEXT) == 0 && p->repladdr != 0) {
2177 ksdebug(pack, "Error: ksplice_patch %s is matched to a "
2178 "non-deleted non-text section!\n", sect->symbol->label);
2179 return UNEXPECTED;
2182 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
2183 if (rec == NULL)
2184 return OUT_OF_MEMORY;
2185 rec->addr = run_addr;
2186 rec->size = run_size;
2187 rec->label = sect->symbol->label;
2188 rec->first_byte_safe = false;
2190 list_add(&rec->list, record_list);
2191 return OK;
2194 static abort_t add_candidate_val(struct ksplice_pack *pack,
2195 struct list_head *vals, unsigned long val)
2197 struct candidate_val *tmp, *new;
2198 val = follow_trampolines(pack, val);
2200 list_for_each_entry(tmp, vals, list) {
2201 if (tmp->val == val)
2202 return OK;
2204 new = kmalloc(sizeof(*new), GFP_KERNEL);
2205 if (new == NULL)
2206 return OUT_OF_MEMORY;
2207 new->val = val;
2208 list_add(&new->list, vals);
2209 return OK;
2212 static void release_vals(struct list_head *vals)
2214 clear_list(vals, struct candidate_val, list);
2217 static void set_temp_labelvals(struct ksplice_pack *pack, int status)
2219 struct labelval *lv, *n;
2220 list_for_each_entry_safe(lv, n, &pack->labelvals, list) {
2221 if (lv->status == TEMP) {
2222 if (status == NOVAL) {
2223 list_del(&lv->list);
2224 kfree(lv);
2225 } else {
2226 lv->status = status;
2232 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
2233 int size, long dst_mask)
2235 switch (size) {
2236 case 1:
2237 return (*(uint8_t *)blank_addr & dst_mask) ==
2238 (KSPLICE_CANARY & dst_mask);
2239 case 2:
2240 return (*(uint16_t *)blank_addr & dst_mask) ==
2241 (KSPLICE_CANARY & dst_mask);
2242 case 4:
2243 return (*(uint32_t *)blank_addr & dst_mask) ==
2244 (KSPLICE_CANARY & dst_mask);
2245 #if BITS_PER_LONG >= 64
2246 case 8:
2247 return (*(uint64_t *)blank_addr & dst_mask) ==
2248 (KSPLICE_CANARY & dst_mask);
2249 #endif /* BITS_PER_LONG */
2250 default:
2251 ksdebug(pack, "Aborted. Invalid relocation size.\n");
2252 return -1;
2256 static unsigned long follow_trampolines(struct ksplice_pack *pack,
2257 unsigned long addr)
2259 unsigned long new_addr;
2260 struct module *m;
2262 while (1) {
2263 if (trampoline_target(pack, addr, &new_addr) != OK)
2264 return addr;
2265 m = __module_text_address(new_addr);
2266 if (m == NULL || m == pack->target ||
2267 !starts_with(m->name, "ksplice"))
2268 return addr;
2269 ksdebug(pack, "Following trampoline %lx %lx(%s)\n", addr,
2270 new_addr, m->name);
2271 addr = new_addr;
2275 /* Does module a patch module b? */
2276 static bool patches_module(const struct module *a, const struct module *b)
2278 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2279 const char *name;
2280 if (a == b)
2281 return true;
2282 if (a == NULL || !starts_with(a->name, "ksplice_"))
2283 return false;
2284 name = a->name + strlen("ksplice_");
2285 name += strcspn(name, "_");
2286 if (name[0] != '_')
2287 return false;
2288 name++;
2289 return strcmp(name, b == NULL ? "vmlinux" : b->name) == 0;
2290 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
2291 struct ksplice_module_list_entry *entry;
2292 if (a == b)
2293 return true;
2294 list_for_each_entry(entry, &ksplice_module_list, list) {
2295 if (entry->target == b && entry->primary == a)
2296 return true;
2298 return false;
2299 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2302 static bool starts_with(const char *str, const char *prefix)
2304 return strncmp(str, prefix, strlen(prefix)) == 0;
2307 static bool singular(struct list_head *list)
2309 return !list_empty(list) && list->next->next == list;
2312 #ifdef CONFIG_DEBUG_FS
2313 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
2314 /* Old kernels don't have debugfs_create_blob */
2315 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
2316 size_t count, loff_t *ppos)
2318 struct debugfs_blob_wrapper *blob = file->private_data;
2319 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
2320 blob->size);
2323 static int blob_open(struct inode *inode, struct file *file)
2325 if (inode->i_private)
2326 file->private_data = inode->i_private;
2327 return 0;
2330 static struct file_operations fops_blob = {
2331 .read = read_file_blob,
2332 .open = blob_open,
2335 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
2336 struct dentry *parent,
2337 struct debugfs_blob_wrapper *blob)
2339 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
2341 #endif /* LINUX_VERSION_CODE */
2343 static abort_t init_debug_buf(struct update *update)
2345 update->debug_blob.size = 0;
2346 update->debug_blob.data = NULL;
2347 update->debugfs_dentry =
2348 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
2349 &update->debug_blob);
2350 if (update->debugfs_dentry == NULL)
2351 return OUT_OF_MEMORY;
2352 return OK;
2355 static void clear_debug_buf(struct update *update)
2357 if (update->debugfs_dentry == NULL)
2358 return;
2359 debugfs_remove(update->debugfs_dentry);
2360 update->debugfs_dentry = NULL;
2361 update->debug_blob.size = 0;
2362 vfree(update->debug_blob.data);
2363 update->debug_blob.data = NULL;
2366 static int _ksdebug(struct update *update, const char *fmt, ...)
2368 va_list args;
2369 unsigned long size, old_size, new_size;
2371 if (update->debug == 0)
2372 return 0;
2374 /* size includes the trailing '\0' */
2375 va_start(args, fmt);
2376 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
2377 va_end(args);
2378 old_size = update->debug_blob.size == 0 ? 0 :
2379 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
2380 new_size = update->debug_blob.size + size == 0 ? 0 :
2381 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
2382 if (new_size > old_size) {
2383 char *buf = vmalloc(new_size);
2384 if (buf == NULL)
2385 return -ENOMEM;
2386 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
2387 vfree(update->debug_blob.data);
2388 update->debug_blob.data = buf;
2390 va_start(args, fmt);
2391 update->debug_blob.size += vsnprintf(update->debug_blob.data +
2392 update->debug_blob.size,
2393 size, fmt, args);
2394 va_end(args);
2395 return 0;
2397 #else /* CONFIG_DEBUG_FS */
2398 static abort_t init_debug_buf(struct update *update)
2400 return OK;
2403 static void clear_debug_buf(struct update *update)
2405 return;
2408 static int _ksdebug(struct update *update, const char *fmt, ...)
2410 va_list args;
2412 if (update->debug == 0)
2413 return 0;
2415 if (!update->debug_continue_line)
2416 printk(KERN_DEBUG "ksplice: ");
2418 va_start(args, fmt);
2419 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
2420 vprintk(fmt, args);
2421 #else /* LINUX_VERSION_CODE < */
2422 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
2424 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
2425 printk("%s", buf);
2426 kfree(buf);
2428 #endif /* LINUX_VERSION_CODE */
2429 va_end(args);
2431 update->debug_continue_line =
2432 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
2433 return 0;
2435 #endif /* CONFIG_DEBUG_FS */
2437 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2438 #ifdef CONFIG_KALLSYMS
2439 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
2440 struct module *, unsigned long),
2441 void *data)
2443 char namebuf[KSYM_NAME_LEN];
2444 unsigned long i;
2445 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2446 unsigned int off;
2447 #endif /* LINUX_VERSION_CODE */
2448 int ret;
2450 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2451 * 2.6.10 was the first release after this commit
2453 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2454 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
2455 off = kallsyms_expand_symbol(off, namebuf);
2456 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
2457 if (ret != 0)
2458 return ret;
2460 #else /* LINUX_VERSION_CODE < */
2461 char *knames;
2463 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
2464 unsigned prefix = *knames++;
2466 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
2468 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
2469 if (ret != OK)
2470 return ret;
2472 knames += strlen(knames) + 1;
2474 #endif /* LINUX_VERSION_CODE */
2475 return module_kallsyms_on_each_symbol(fn, data);
2478 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2479 * 2.6.10 was the first release after this commit
2481 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2482 extern u8 kallsyms_token_table[];
2483 extern u16 kallsyms_token_index[];
2485 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
2487 long len, skipped_first = 0;
2488 const u8 *tptr, *data;
2490 data = &kallsyms_names[off];
2491 len = *data;
2492 data++;
2494 off += len + 1;
2496 while (len) {
2497 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
2498 data++;
2499 len--;
2501 while (*tptr) {
2502 if (skipped_first) {
2503 *result = *tptr;
2504 result++;
2505 } else
2506 skipped_first = 1;
2507 tptr++;
2511 *result = '\0';
2513 return off;
2515 #endif /* LINUX_VERSION_CODE */
2517 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
2518 struct module *,
2519 unsigned long),
2520 void *data)
2522 struct module *mod;
2523 unsigned int i;
2524 int ret;
2526 list_for_each_entry(mod, &modules, list) {
2527 for (i = 0; i < mod->num_symtab; i++) {
2528 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
2529 mod, mod->symtab[i].st_value);
2530 if (ret != 0)
2531 return ret;
2534 return 0;
2536 #endif /* CONFIG_KALLSYMS */
2538 static struct module *find_module(const char *name)
2540 struct module *mod;
2542 list_for_each_entry(mod, &modules, list) {
2543 if (strcmp(mod->name, name) == 0)
2544 return mod;
2546 return NULL;
2549 #ifdef CONFIG_MODULE_UNLOAD
2550 struct module_use {
2551 struct list_head list;
2552 struct module *module_which_uses;
2555 /* I'm not yet certain whether we need the strong form of this. */
2556 static inline int strong_try_module_get(struct module *mod)
2558 if (mod && mod->state != MODULE_STATE_LIVE)
2559 return -EBUSY;
2560 if (try_module_get(mod))
2561 return 0;
2562 return -ENOENT;
2565 /* Does a already use b? */
2566 static int already_uses(struct module *a, struct module *b)
2568 struct module_use *use;
2569 list_for_each_entry(use, &b->modules_which_use_me, list) {
2570 if (use->module_which_uses == a)
2571 return 1;
2573 return 0;
2576 /* Make it so module a uses b. Must be holding module_mutex */
2577 static int use_module(struct module *a, struct module *b)
2579 struct module_use *use;
2580 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2581 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
2582 int no_warn;
2583 #endif /* LINUX_VERSION_CODE */
2584 if (b == NULL || already_uses(a, b))
2585 return 1;
2587 if (strong_try_module_get(b) < 0)
2588 return 0;
2590 use = kmalloc(sizeof(*use), GFP_ATOMIC);
2591 if (!use) {
2592 module_put(b);
2593 return 0;
2595 use->module_which_uses = a;
2596 list_add(&use->list, &b->modules_which_use_me);
2597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2598 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
2599 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
2600 #endif /* LINUX_VERSION_CODE */
2601 return 1;
2603 #else /* CONFIG_MODULE_UNLOAD */
2604 static int use_module(struct module *a, struct module *b)
2606 return 1;
2608 #endif /* CONFIG_MODULE_UNLOAD */
2610 #ifndef CONFIG_MODVERSIONS
2611 #define symversion(base, idx) NULL
2612 #else
2613 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
2614 #endif
2616 struct symsearch {
2617 const struct kernel_symbol *start, *stop;
2618 const unsigned long *crcs;
2619 enum {
2620 NOT_GPL_ONLY,
2621 GPL_ONLY,
2622 WILL_BE_GPL_ONLY,
2623 } licence;
2624 bool unused;
2627 static bool each_symbol_in_section(const struct symsearch *arr,
2628 unsigned int arrsize,
2629 struct module *owner,
2630 bool (*fn)(const struct symsearch *syms,
2631 struct module *owner,
2632 unsigned int symnum, void *data),
2633 void *data)
2635 unsigned int i, j;
2637 for (j = 0; j < arrsize; j++) {
2638 for (i = 0; i < arr[j].stop - arr[j].start; i++)
2639 if (fn(&arr[j], owner, i, data))
2640 return true;
2643 return false;
2646 /* Returns true as soon as fn returns true, otherwise false. */
2647 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
2648 struct module *owner,
2649 unsigned int symnum, void *data),
2650 void *data)
2652 struct module *mod;
2653 const struct symsearch arr[] = {
2654 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
2655 NOT_GPL_ONLY, false },
2656 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
2657 __start___kcrctab_gpl,
2658 GPL_ONLY, false },
2659 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2660 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
2661 __start___kcrctab_gpl_future,
2662 WILL_BE_GPL_ONLY, false },
2663 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2664 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2665 { __start___ksymtab_unused, __stop___ksymtab_unused,
2666 __start___kcrctab_unused,
2667 NOT_GPL_ONLY, true },
2668 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
2669 __start___kcrctab_unused_gpl,
2670 GPL_ONLY, true },
2671 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2674 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
2675 return 1;
2677 list_for_each_entry(mod, &modules, list) {
2678 struct symsearch module_arr[] = {
2679 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
2680 NOT_GPL_ONLY, false },
2681 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
2682 mod->gpl_crcs,
2683 GPL_ONLY, false },
2684 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2685 { mod->gpl_future_syms,
2686 mod->gpl_future_syms + mod->num_gpl_future_syms,
2687 mod->gpl_future_crcs,
2688 WILL_BE_GPL_ONLY, false },
2689 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2690 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2691 { mod->unused_syms,
2692 mod->unused_syms + mod->num_unused_syms,
2693 mod->unused_crcs,
2694 NOT_GPL_ONLY, true },
2695 { mod->unused_gpl_syms,
2696 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
2697 mod->unused_gpl_crcs,
2698 GPL_ONLY, true },
2699 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2702 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
2703 mod, fn, data))
2704 return true;
2706 return false;
2709 struct find_symbol_arg {
2710 /* Input */
2711 const char *name;
2712 bool gplok;
2713 bool warn;
2715 /* Output */
2716 struct module *owner;
2717 const unsigned long *crc;
2718 const struct kernel_symbol *sym;
2721 static bool find_symbol_in_section(const struct symsearch *syms,
2722 struct module *owner,
2723 unsigned int symnum, void *data)
2725 struct find_symbol_arg *fsa = data;
2727 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
2728 return false;
2730 if (!fsa->gplok) {
2731 if (syms->licence == GPL_ONLY)
2732 return false;
2733 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
2734 printk(KERN_WARNING "Symbol %s is being used "
2735 "by a non-GPL module, which will not "
2736 "be allowed in the future\n", fsa->name);
2737 printk(KERN_WARNING "Please see the file "
2738 "Documentation/feature-removal-schedule.txt "
2739 "in the kernel source tree for more details.\n");
2743 #ifdef CONFIG_UNUSED_SYMBOLS
2744 if (syms->unused && fsa->warn) {
2745 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
2746 "however this module is using it.\n", fsa->name);
2747 printk(KERN_WARNING
2748 "This symbol will go away in the future.\n");
2749 printk(KERN_WARNING
2750 "Please evalute if this is the right api to use and if "
2751 "it really is, submit a report the linux kernel "
2752 "mailinglist together with submitting your code for "
2753 "inclusion.\n");
2755 #endif
2757 fsa->owner = owner;
2758 fsa->crc = symversion(syms->crcs, symnum);
2759 fsa->sym = &syms->start[symnum];
2760 return true;
2763 /* Find a symbol and return it, along with, (optional) crc and
2764 * (optional) module which owns it */
2765 static const struct kernel_symbol *find_symbol(const char *name,
2766 struct module **owner,
2767 const unsigned long **crc,
2768 bool gplok, bool warn)
2770 struct find_symbol_arg fsa;
2772 fsa.name = name;
2773 fsa.gplok = gplok;
2774 fsa.warn = warn;
2776 if (each_symbol(find_symbol_in_section, &fsa)) {
2777 if (owner)
2778 *owner = fsa.owner;
2779 if (crc)
2780 *crc = fsa.crc;
2781 return fsa.sym;
2784 return NULL;
2787 static struct module *__module_data_address(unsigned long addr)
2789 struct module *mod;
2791 list_for_each_entry(mod, &modules, list) {
2792 if (addr >= (unsigned long)mod->module_core +
2793 mod->core_text_size &&
2794 addr < (unsigned long)mod->module_core + mod->core_size)
2795 return mod;
2797 return NULL;
2799 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2801 struct ksplice_attribute {
2802 struct attribute attr;
2803 ssize_t (*show)(struct update *update, char *buf);
2804 ssize_t (*store)(struct update *update, const char *buf, size_t len);
2807 static ssize_t ksplice_attr_show(struct kobject *kobj, struct attribute *attr,
2808 char *buf)
2810 struct ksplice_attribute *attribute =
2811 container_of(attr, struct ksplice_attribute, attr);
2812 struct update *update = container_of(kobj, struct update, kobj);
2813 if (attribute->show == NULL)
2814 return -EIO;
2815 return attribute->show(update, buf);
2818 static ssize_t ksplice_attr_store(struct kobject *kobj, struct attribute *attr,
2819 const char *buf, size_t len)
2821 struct ksplice_attribute *attribute =
2822 container_of(attr, struct ksplice_attribute, attr);
2823 struct update *update = container_of(kobj, struct update, kobj);
2824 if (attribute->store == NULL)
2825 return -EIO;
2826 return attribute->store(update, buf, len);
2829 static struct sysfs_ops ksplice_sysfs_ops = {
2830 .show = ksplice_attr_show,
2831 .store = ksplice_attr_store,
2834 static void ksplice_release(struct kobject *kobj)
2836 struct update *update;
2837 update = container_of(kobj, struct update, kobj);
2838 cleanup_ksplice_update(update);
2841 static ssize_t stage_show(struct update *update, char *buf)
2843 switch (update->stage) {
2844 case STAGE_PREPARING:
2845 return snprintf(buf, PAGE_SIZE, "preparing\n");
2846 case STAGE_APPLIED:
2847 return snprintf(buf, PAGE_SIZE, "applied\n");
2848 case STAGE_REVERSED:
2849 return snprintf(buf, PAGE_SIZE, "reversed\n");
2851 return 0;
2854 static ssize_t abort_cause_show(struct update *update, char *buf)
2856 switch (update->abort_cause) {
2857 case OK:
2858 return snprintf(buf, PAGE_SIZE, "ok\n");
2859 case NO_MATCH:
2860 return snprintf(buf, PAGE_SIZE, "no_match\n");
2861 #ifdef KSPLICE_STANDALONE
2862 case BAD_SYSTEM_MAP:
2863 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
2864 #endif /* KSPLICE_STANDALONE */
2865 case CODE_BUSY:
2866 return snprintf(buf, PAGE_SIZE, "code_busy\n");
2867 case MODULE_BUSY:
2868 return snprintf(buf, PAGE_SIZE, "module_busy\n");
2869 case OUT_OF_MEMORY:
2870 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
2871 case FAILED_TO_FIND:
2872 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
2873 case ALREADY_REVERSED:
2874 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
2875 case MISSING_EXPORT:
2876 return snprintf(buf, PAGE_SIZE, "missing_export\n");
2877 case UNEXPECTED_RUNNING_TASK:
2878 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
2879 case UNEXPECTED:
2880 return snprintf(buf, PAGE_SIZE, "unexpected\n");
2882 return 0;
2885 static ssize_t conflict_show(struct update *update, char *buf)
2887 const struct conflict *conf;
2888 const struct conflict_addr *ca;
2889 int used = 0;
2890 list_for_each_entry(conf, &update->conflicts, list) {
2891 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
2892 conf->process_name, conf->pid);
2893 list_for_each_entry(ca, &conf->stack, list) {
2894 if (!ca->has_conflict)
2895 continue;
2896 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
2897 ca->label);
2899 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
2901 return used;
2904 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
2906 if ((strncmp(buf, "applied", len) == 0 ||
2907 strncmp(buf, "applied\n", len) == 0) &&
2908 update->stage == STAGE_PREPARING)
2909 update->abort_cause = apply_update(update);
2910 else if ((strncmp(buf, "reversed", len) == 0 ||
2911 strncmp(buf, "reversed\n", len) == 0) &&
2912 update->stage == STAGE_APPLIED)
2913 update->abort_cause = reverse_patches(update);
2914 if (update->abort_cause == OK)
2915 printk(KERN_INFO "ksplice: Update %s %s successfully\n",
2916 update->kid,
2917 update->stage == STAGE_APPLIED ? "applied" : "reversed");
2918 return len;
2921 static ssize_t debug_show(struct update *update, char *buf)
2923 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
2926 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
2928 unsigned long l;
2929 int ret = strict_strtoul(buf, 10, &l);
2930 if (ret != 0)
2931 return ret;
2932 update->debug = l;
2933 return len;
2936 static struct ksplice_attribute stage_attribute =
2937 __ATTR(stage, 0600, stage_show, stage_store);
2938 static struct ksplice_attribute abort_cause_attribute =
2939 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
2940 static struct ksplice_attribute debug_attribute =
2941 __ATTR(debug, 0600, debug_show, debug_store);
2942 static struct ksplice_attribute conflict_attribute =
2943 __ATTR(conflicts, 0400, conflict_show, NULL);
2945 static struct attribute *ksplice_attrs[] = {
2946 &stage_attribute.attr,
2947 &abort_cause_attribute.attr,
2948 &debug_attribute.attr,
2949 &conflict_attribute.attr,
2950 NULL
2953 static struct kobj_type ksplice_ktype = {
2954 .sysfs_ops = &ksplice_sysfs_ops,
2955 .release = ksplice_release,
2956 .default_attrs = ksplice_attrs,
2959 #ifdef KSPLICE_STANDALONE
2960 static int debug;
2961 module_param(debug, int, 0600);
2962 MODULE_PARM_DESC(debug, "Debug level");
2964 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
2966 static struct ksplice_pack bootstrap_pack = {
2967 .name = "ksplice_" __stringify(KSPLICE_KID),
2968 .kid = "init_" __stringify(KSPLICE_KID),
2969 .target_name = NULL,
2970 .target = NULL,
2971 .map_printk = MAP_PRINTK,
2972 .primary = THIS_MODULE,
2973 .labelvals = LIST_HEAD_INIT(bootstrap_pack.labelvals),
2974 .primary_system_map = ksplice_system_map,
2975 .primary_system_map_end = ksplice_system_map_end,
2977 #endif /* KSPLICE_STANDALONE */
2979 static int init_ksplice(void)
2981 #ifdef KSPLICE_STANDALONE
2982 struct ksplice_pack *pack = &bootstrap_pack;
2983 pack->update = init_ksplice_update(pack->kid);
2984 if (pack->update == NULL)
2985 return -ENOMEM;
2986 add_to_update(pack, pack->update);
2987 pack->update->debug = debug;
2988 pack->update->abort_cause =
2989 apply_relocs(pack, ksplice_init_relocs, ksplice_init_relocs_end);
2990 if (pack->update->abort_cause == OK)
2991 bootstrapped = true;
2992 #else /* !KSPLICE_STANDALONE */
2993 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
2994 if (ksplice_kobj == NULL)
2995 return -ENOMEM;
2996 #endif /* KSPLICE_STANDALONE */
2997 return 0;
3000 static void cleanup_ksplice(void)
3002 #ifdef KSPLICE_STANDALONE
3003 cleanup_ksplice_update(bootstrap_pack.update);
3004 #else /* !KSPLICE_STANDALONE */
3005 kobject_put(ksplice_kobj);
3006 #endif /* KSPLICE_STANDALONE */
3009 module_init(init_ksplice);
3010 module_exit(cleanup_ksplice);
3012 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");
3013 MODULE_DESCRIPTION("Ksplice rebootless update system");
3014 #ifdef KSPLICE_VERSION
3015 MODULE_VERSION(KSPLICE_VERSION);
3016 #endif
3017 MODULE_LICENSE("GPL v2");